From d370dbc10e984813a80e5f954f4965225e8ddbb5 Mon Sep 17 00:00:00 2001 From: HGuillemet Date: Tue, 25 Jul 2023 15:20:35 +0200 Subject: [PATCH] * Refactor and improve presets for PyTorch (pull #1360) --- CHANGELOG.md | 1 + pytorch/README.md | 10 +- .../java/org/bytedeco/pytorch/ASMoutput.java | 4 +- .../pytorch/ActivityTraceWrapper.java | 26 + .../org/bytedeco/pytorch/ActivityTypeSet.java | 46 + .../java/org/bytedeco/pytorch/Adagrad.java | 12 +- .../org/bytedeco/pytorch/AdagradOptions.java | 9 +- .../bytedeco/pytorch/AdagradParamState.java | 9 +- .../gen/java/org/bytedeco/pytorch/Adam.java | 12 +- .../org/bytedeco/pytorch/AdamOptions.java | 9 +- .../org/bytedeco/pytorch/AdamParamState.java | 9 +- .../gen/java/org/bytedeco/pytorch/AdamW.java | 12 +- .../org/bytedeco/pytorch/AdamWOptions.java | 9 +- .../org/bytedeco/pytorch/AdamWParamState.java | 9 +- .../bytedeco/pytorch/AdaptiveAvgPool1d.java | 34 - .../pytorch/AdaptiveAvgPool1dImpl.java | 8 +- .../pytorch/AdaptiveAvgPool1dImplBase.java | 8 +- .../AdaptiveAvgPool1dImplCloneable.java | 14 +- .../AdaptiveAvgPool1dImplModuleHolder.java | 79 - .../pytorch/AdaptiveAvgPool1dOptions.java | 4 +- .../bytedeco/pytorch/AdaptiveAvgPool2d.java | 34 - .../pytorch/AdaptiveAvgPool2dImpl.java | 8 +- .../pytorch/AdaptiveAvgPool2dImplBase.java | 8 +- .../AdaptiveAvgPool2dImplCloneable.java | 14 +- .../AdaptiveAvgPool2dImplModuleHolder.java | 79 - .../pytorch/AdaptiveAvgPool2dOptions.java | 4 +- .../bytedeco/pytorch/AdaptiveAvgPool3d.java | 34 - .../pytorch/AdaptiveAvgPool3dImpl.java | 8 +- .../pytorch/AdaptiveAvgPool3dImplBase.java | 8 +- .../AdaptiveAvgPool3dImplCloneable.java | 14 +- .../AdaptiveAvgPool3dImplModuleHolder.java | 79 - .../pytorch/AdaptiveAvgPool3dOptions.java | 4 +- .../pytorch/AdaptiveLogSoftmaxWithLoss.java | 35 - .../AdaptiveLogSoftmaxWithLossImpl.java | 12 +- ...aptiveLogSoftmaxWithLossImplCloneable.java | 14 +- ...iveLogSoftmaxWithLossImplModuleHolder.java | 79 - .../AdaptiveLogSoftmaxWithLossOptions.java | 4 +- .../bytedeco/pytorch/AdaptiveMaxPool1d.java | 34 - .../pytorch/AdaptiveMaxPool1dImpl.java | 10 +- .../pytorch/AdaptiveMaxPool1dImplBase.java | 8 +- .../AdaptiveMaxPool1dImplCloneable.java | 14 +- .../AdaptiveMaxPool1dImplModuleHolder.java | 79 - .../pytorch/AdaptiveMaxPool1dOptions.java | 4 +- .../bytedeco/pytorch/AdaptiveMaxPool2d.java | 34 - .../pytorch/AdaptiveMaxPool2dImpl.java | 10 +- .../pytorch/AdaptiveMaxPool2dImplBase.java | 8 +- .../AdaptiveMaxPool2dImplCloneable.java | 14 +- .../AdaptiveMaxPool2dImplModuleHolder.java | 79 - .../pytorch/AdaptiveMaxPool2dOptions.java | 4 +- .../bytedeco/pytorch/AdaptiveMaxPool3d.java | 34 - .../pytorch/AdaptiveMaxPool3dImpl.java | 10 +- .../pytorch/AdaptiveMaxPool3dImplBase.java | 8 +- .../AdaptiveMaxPool3dImplCloneable.java | 14 +- .../AdaptiveMaxPool3dImplModuleHolder.java | 79 - .../pytorch/AdaptiveMaxPool3dOptions.java | 4 +- .../java/org/bytedeco/pytorch/AliasDb.java | 4 +- .../java/org/bytedeco/pytorch/AliasInfo.java | 4 +- .../bytedeco/pytorch/AliasInfoOptional.java | 5 +- .../pytorch/AliasTypeSetOptional.java | 5 +- .../java/org/bytedeco/pytorch/Allocator.java | 6 +- .../org/bytedeco/pytorch/AlphaDropout.java | 34 - .../pytorch/AlphaDropoutFuncOptions.java | 4 +- .../bytedeco/pytorch/AlphaDropoutImpl.java | 10 +- .../pytorch/AlphaDropoutImplBase.java | 10 +- .../pytorch/AlphaDropoutImplCloneable.java | 14 +- .../pytorch/AlphaDropoutImplModuleHolder.java | 79 - .../org/bytedeco/pytorch/AnnotatedSchema.java | 33 - .../org/bytedeco/pytorch/AnomalyMetadata.java | 4 +- .../org/bytedeco/pytorch/AnomalyMode.java | 4 +- .../org/bytedeco/pytorch/AnyClassType.java | 4 +- .../org/bytedeco/pytorch/AnyClassTypePtr.java | 4 +- .../org/bytedeco/pytorch/AnyEnumType.java | 4 +- .../org/bytedeco/pytorch/AnyEnumTypePtr.java | 4 +- .../org/bytedeco/pytorch/AnyListType.java | 4 +- .../org/bytedeco/pytorch/AnyListTypePtr.java | 4 +- .../java/org/bytedeco/pytorch/AnyModule.java | 289 +- .../org/bytedeco/pytorch/AnyModuleVector.java | 6 +- .../org/bytedeco/pytorch/AnyTupleType.java | 4 +- .../org/bytedeco/pytorch/AnyTupleTypePtr.java | 4 +- .../java/org/bytedeco/pytorch/AnyType.java | 4 +- .../java/org/bytedeco/pytorch/AnyTypePtr.java | 4 +- .../java/org/bytedeco/pytorch/AnyValue.java | 61 + .../gen/java/org/bytedeco/pytorch/Apply.java | 17 +- .../java/org/bytedeco/pytorch/Argument.java | 4 +- .../bytedeco/pytorch/ArgumentArrayRef.java | 144 + .../org/bytedeco/pytorch/ArgumentDef.java | 55 + .../bytedeco/pytorch/ArgumentDefArrayRef.java | 133 + .../org/bytedeco/pytorch/ArgumentInfo.java | 4 +- .../org/bytedeco/pytorch/ArgumentSpec.java | 4 +- .../bytedeco/pytorch/ArgumentSpecCreator.java | 4 +- .../pytorch/ArgumentSpecExecutionPlanMap.java | 4 +- .../org/bytedeco/pytorch/ArgumentVector.java | 86 - .../gen/java/org/bytedeco/pytorch/Assert.java | 10 +- .../gen/java/org/bytedeco/pytorch/Assign.java | 19 +- .../java/org/bytedeco/pytorch/AssignList.java | 38 + .../bytedeco/pytorch/AssignListIterator.java | 35 + .../org/bytedeco/pytorch/AssignListMaybe.java | 36 + .../java/org/bytedeco/pytorch/Attribute.java | 12 +- .../org/bytedeco/pytorch/AttributeList.java | 38 + .../pytorch/AttributeListIterator.java | 35 + .../org/bytedeco/pytorch/AttributePolicy.java | 6 +- .../org/bytedeco/pytorch/AttributeValue.java | 4 +- .../java/org/bytedeco/pytorch/AugAssign.java | 10 +- .../org/bytedeco/pytorch/AugAssignKind.java | 10 +- .../AutoDispatchBelowADInplaceOrView.java | 49 + .../pytorch/AutoDispatchBelowAutograd.java | 86 + .../AutoDispatchSkipFunctionalize.java | 38 + .../org/bytedeco/pytorch/AutoFwGradMode.java | 4 +- .../org/bytedeco/pytorch/AutoGradMode.java | 4 +- .../pytorch/AutoNonVariableTypeMode.java | 43 + .../org/bytedeco/pytorch/AutogradContext.java | 12 +- .../org/bytedeco/pytorch/AutogradMeta.java | 4 +- .../bytedeco/pytorch/AutogradMetaFactory.java | 4 +- .../AutogradMetaFactoryRegisterer.java | 4 +- .../pytorch/AutogradMetaInterface.java | 4 +- .../org/bytedeco/pytorch/AutogradState.java | 62 + .../java/org/bytedeco/pytorch/AvgPool1d.java | 34 - .../org/bytedeco/pytorch/AvgPool1dImpl.java | 8 +- .../bytedeco/pytorch/AvgPool1dImplBase.java | 8 +- .../pytorch/AvgPool1dImplCloneable.java | 14 +- .../pytorch/AvgPool1dImplModuleHolder.java | 79 - .../bytedeco/pytorch/AvgPool1dOptions.java | 4 +- .../java/org/bytedeco/pytorch/AvgPool2d.java | 34 - .../org/bytedeco/pytorch/AvgPool2dImpl.java | 8 +- .../bytedeco/pytorch/AvgPool2dImplBase.java | 8 +- .../pytorch/AvgPool2dImplCloneable.java | 14 +- .../pytorch/AvgPool2dImplModuleHolder.java | 79 - .../bytedeco/pytorch/AvgPool2dOptions.java | 4 +- .../java/org/bytedeco/pytorch/AvgPool3d.java | 34 - .../org/bytedeco/pytorch/AvgPool3dImpl.java | 8 +- .../bytedeco/pytorch/AvgPool3dImplBase.java | 8 +- .../pytorch/AvgPool3dImplCloneable.java | 14 +- .../pytorch/AvgPool3dImplModuleHolder.java | 79 - .../bytedeco/pytorch/AvgPool3dOptions.java | 4 +- .../gen/java/org/bytedeco/pytorch/Await.java | 35 +- .../java/org/bytedeco/pytorch/AwaitPtr.java | 150 + .../pytorch/AwaitSingleElementType.java | 4 +- .../java/org/bytedeco/pytorch/AwaitType.java | 4 +- .../java/org/bytedeco/pytorch/BCELoss.java | 34 - .../org/bytedeco/pytorch/BCELossImpl.java | 8 +- .../pytorch/BCELossImplCloneable.java | 14 +- .../pytorch/BCELossImplModuleHolder.java | 79 - .../org/bytedeco/pytorch/BCELossOptions.java | 6 +- .../bytedeco/pytorch/BCEWithLogitsLoss.java | 34 - .../pytorch/BCEWithLogitsLossImpl.java | 8 +- .../BCEWithLogitsLossImplCloneable.java | 14 +- .../BCEWithLogitsLossImplModuleHolder.java | 79 - .../pytorch/BCEWithLogitsLossOptions.java | 6 +- .../java/org/bytedeco/pytorch/BFloat16.java | 4 +- .../bytedeco/pytorch/BFloat16ArrayRef.java | 15 +- .../org/bytedeco/pytorch/BatchNorm1d.java | 34 - .../org/bytedeco/pytorch/BatchNorm1dImpl.java | 4 +- .../bytedeco/pytorch/BatchNorm1dImplBase.java | 4 +- .../pytorch/BatchNorm1dImplBaseBase.java | 4 +- .../pytorch/BatchNorm1dImplCloneable.java | 14 +- .../pytorch/BatchNorm1dImplModuleHolder.java | 79 - .../org/bytedeco/pytorch/BatchNorm2d.java | 34 - .../org/bytedeco/pytorch/BatchNorm2dImpl.java | 4 +- .../bytedeco/pytorch/BatchNorm2dImplBase.java | 4 +- .../pytorch/BatchNorm2dImplBaseBase.java | 4 +- .../pytorch/BatchNorm2dImplCloneable.java | 14 +- .../pytorch/BatchNorm2dImplModuleHolder.java | 79 - .../org/bytedeco/pytorch/BatchNorm3d.java | 34 - .../org/bytedeco/pytorch/BatchNorm3dImpl.java | 4 +- .../bytedeco/pytorch/BatchNorm3dImplBase.java | 4 +- .../pytorch/BatchNorm3dImplBaseBase.java | 4 +- .../pytorch/BatchNorm3dImplCloneable.java | 14 +- .../pytorch/BatchNorm3dImplModuleHolder.java | 79 - .../pytorch/BatchNormFuncOptions.java | 4 +- .../bytedeco/pytorch/BatchNormOptions.java | 4 +- .../java/org/bytedeco/pytorch/BatchSize.java | 4 +- .../bytedeco/pytorch/BatchSizeOptional.java | 5 +- .../bytedeco/pytorch/BatchSizeSampler.java | 4 +- .../java/org/bytedeco/pytorch/Bilinear.java | 34 - .../org/bytedeco/pytorch/BilinearImpl.java | 8 +- .../pytorch/BilinearImplCloneable.java | 14 +- .../pytorch/BilinearImplModuleHolder.java | 79 - .../org/bytedeco/pytorch/BilinearOptions.java | 4 +- .../gen/java/org/bytedeco/pytorch/BinOp.java | 10 +- .../gen/java/org/bytedeco/pytorch/Blob.java | 4 +- .../gen/java/org/bytedeco/pytorch/Block.java | 4 +- .../org/bytedeco/pytorch/BlockArrayRef.java | 37 +- .../org/bytedeco/pytorch/BlockVector.java | 86 - .../java/org/bytedeco/pytorch/BlockWrap.java | 4 +- .../org/bytedeco/pytorch/Bool2Vector.java | 6 +- .../org/bytedeco/pytorch/BoolArrayRef.java | 41 +- .../org/bytedeco/pytorch/BoolOptional.java | 5 +- .../java/org/bytedeco/pytorch/BoolType.java | 4 +- .../org/bytedeco/pytorch/BoolTypePtr.java | 4 +- .../java/org/bytedeco/pytorch/BoolVector.java | 6 +- .../bytedeco/pytorch/BoolVectorOptional.java | 5 +- .../pytorch/BooleanElementReference.java | 42 + .../org/bytedeco/pytorch/BooleanList.java | 239 + .../bytedeco/pytorch/BooleanListIterator.java | 84 + .../gen/java/org/bytedeco/pytorch/Break.java | 10 +- .../org/bytedeco/pytorch/BufferPolicy.java | 6 +- .../org/bytedeco/pytorch/BuiltinFunction.java | 11 +- .../org/bytedeco/pytorch/BuiltinModule.java | 8 +- .../org/bytedeco/pytorch/ByteArrayRef.java | 32 +- .../org/bytedeco/pytorch/ByteOptional.java | 5 +- .../bytedeco/pytorch/BytePointerVector.java | 6 +- .../bytedeco/pytorch/BytecodeEmitMode.java | 45 - .../pytorch/BytecodeEmitModeGuard.java | 46 - .../org/bytedeco/pytorch/C10FlagParser.java | 4 +- .../gen/java/org/bytedeco/pytorch/CELU.java | 34 - .../java/org/bytedeco/pytorch/CELUImpl.java | 8 +- .../bytedeco/pytorch/CELUImplCloneable.java | 14 +- .../pytorch/CELUImplModuleHolder.java | 79 - .../org/bytedeco/pytorch/CELUOptions.java | 4 +- .../bytedeco/pytorch/CPUGeneratorImpl.java | 49 + .../java/org/bytedeco/pytorch/CTCLoss.java | 34 - .../org/bytedeco/pytorch/CTCLossImpl.java | 8 +- .../pytorch/CTCLossImplCloneable.java | 14 +- .../pytorch/CTCLossImplModuleHolder.java | 79 - .../org/bytedeco/pytorch/CTCLossOptions.java | 6 +- .../org/bytedeco/pytorch/CUDAHooksArgs.java | 29 + .../bytedeco/pytorch/CUDAHooksInterface.java | 120 + .../pytorch/{_object.java => CUevent_st.java} | 10 +- .../gen/java/org/bytedeco/pytorch/Call.java | 8 +- .../java/org/bytedeco/pytorch/Capsule.java | 32 - .../org/bytedeco/pytorch/CapsuleType.java | 4 +- .../org/bytedeco/pytorch/CapsuleTypePtr.java | 4 +- .../java/org/bytedeco/pytorch/CastValue.java | 11 +- .../bytedeco/pytorch/ChunkBatchDataset.java | 4 +- .../pytorch/ChunkBatchSharedBatchDataset.java | 4 +- .../org/bytedeco/pytorch/ChunkDataReader.java | 4 +- .../org/bytedeco/pytorch/ChunkDataset.java | 4 +- .../bytedeco/pytorch/ChunkDatasetOptions.java | 4 +- .../pytorch/ChunkMapBatchDataset.java | 4 +- .../org/bytedeco/pytorch/ChunkMapDataset.java | 4 +- .../pytorch/ChunkRandomDataLoader.java | 4 +- .../pytorch/ChunkRandomDataLoaderBase.java | 5 +- .../pytorch/ChunkSharedBatchDataset.java | 4 +- .../pytorch/ChunkStatefulDataset.java | 4 +- .../org/bytedeco/pytorch/ClassAttribute.java | 4 +- .../java/org/bytedeco/pytorch/ClassDef.java | 17 +- .../java/org/bytedeco/pytorch/ClassType.java | 10 +- .../pytorch/ClassTypePropertyOptional.java | 5 +- .../java/org/bytedeco/pytorch/ClassValue.java | 21 +- .../org/bytedeco/pytorch/ClosureValue.java | 4 +- .../gen/java/org/bytedeco/pytorch/Code.java | 20 +- .../java/org/bytedeco/pytorch/CodeImpl.java | 4 +- .../org/bytedeco/pytorch/CompilationUnit.java | 32 +- .../pytorch/CompilationUnitVector.java | 6 +- .../pytorch/CompileTimeEmptyString.java | 7 +- .../pytorch/CompleteArgumentInfo.java | 37 - .../pytorch/CompleteArgumentInfoPOD.java | 58 - .../pytorch/CompleteArgumentSpec.java | 36 - .../org/bytedeco/pytorch/ComplexHolder.java | 39 - .../org/bytedeco/pytorch/ComplexType.java | 6 +- .../org/bytedeco/pytorch/ComplexTypePtr.java | 4 +- .../java/org/bytedeco/pytorch/Compound.java | 14 +- .../java/org/bytedeco/pytorch/ConstExpr.java | 11 +- .../org/bytedeco/pytorch/ConstantPad1d.java | 34 - .../bytedeco/pytorch/ConstantPad1dImpl.java | 8 +- .../pytorch/ConstantPad1dImplBase.java | 8 +- .../pytorch/ConstantPad1dImplCloneable.java | 14 +- .../ConstantPad1dImplModuleHolder.java | 79 - .../pytorch/ConstantPad1dOptions.java | 4 +- .../org/bytedeco/pytorch/ConstantPad2d.java | 34 - .../bytedeco/pytorch/ConstantPad2dImpl.java | 8 +- .../pytorch/ConstantPad2dImplBase.java | 8 +- .../pytorch/ConstantPad2dImplCloneable.java | 14 +- .../ConstantPad2dImplModuleHolder.java | 79 - .../pytorch/ConstantPad2dOptions.java | 4 +- .../org/bytedeco/pytorch/ConstantPad3d.java | 34 - .../bytedeco/pytorch/ConstantPad3dImpl.java | 8 +- .../pytorch/ConstantPad3dImplBase.java | 8 +- .../pytorch/ConstantPad3dImplCloneable.java | 14 +- .../ConstantPad3dImplModuleHolder.java | 79 - .../pytorch/ConstantPad3dOptions.java | 4 +- .../org/bytedeco/pytorch/ConstantString.java | 29 +- .../bytedeco/pytorch/ConstantStringPtr.java | 150 + .../java/org/bytedeco/pytorch/Context.java | 4 +- .../java/org/bytedeco/pytorch/Continue.java | 10 +- .../gen/java/org/bytedeco/pytorch/Conv1d.java | 34 - .../bytedeco/pytorch/Conv1dFuncOptions.java | 6 +- .../java/org/bytedeco/pytorch/Conv1dImpl.java | 8 +- .../org/bytedeco/pytorch/Conv1dImplBase.java | 6 +- .../bytedeco/pytorch/Conv1dImplCloneable.java | 14 +- .../pytorch/Conv1dImplModuleHolder.java | 79 - .../org/bytedeco/pytorch/Conv1dOptions.java | 8 +- ...onv_padding_t1.java => Conv1dPadding.java} | 20 +- .../gen/java/org/bytedeco/pytorch/Conv2d.java | 34 - .../bytedeco/pytorch/Conv2dFuncOptions.java | 6 +- .../java/org/bytedeco/pytorch/Conv2dImpl.java | 8 +- .../org/bytedeco/pytorch/Conv2dImplBase.java | 6 +- .../bytedeco/pytorch/Conv2dImplCloneable.java | 14 +- .../pytorch/Conv2dImplModuleHolder.java | 79 - .../org/bytedeco/pytorch/Conv2dOptions.java | 8 +- ...onv_padding_t2.java => Conv2dPadding.java} | 20 +- .../gen/java/org/bytedeco/pytorch/Conv3d.java | 34 - .../bytedeco/pytorch/Conv3dFuncOptions.java | 6 +- .../java/org/bytedeco/pytorch/Conv3dImpl.java | 8 +- .../org/bytedeco/pytorch/Conv3dImplBase.java | 6 +- .../bytedeco/pytorch/Conv3dImplCloneable.java | 14 +- .../pytorch/Conv3dImplModuleHolder.java | 79 - .../org/bytedeco/pytorch/Conv3dOptions.java | 8 +- ...onv_padding_t3.java => Conv3dPadding.java} | 20 +- ...dding_mode_t.java => ConvPaddingMode.java} | 36 +- .../org/bytedeco/pytorch/ConvTranspose1d.java | 34 - .../pytorch/ConvTranspose1dFuncOptions.java | 4 +- .../bytedeco/pytorch/ConvTranspose1dImpl.java | 8 +- .../pytorch/ConvTranspose1dImplBase.java | 8 +- .../pytorch/ConvTranspose1dImplBaseBase.java | 4 +- .../pytorch/ConvTranspose1dImplCloneable.java | 14 +- .../ConvTranspose1dImplModuleHolder.java | 79 - .../pytorch/ConvTranspose1dOptions.java | 6 +- .../org/bytedeco/pytorch/ConvTranspose2d.java | 34 - .../pytorch/ConvTranspose2dFuncOptions.java | 4 +- .../bytedeco/pytorch/ConvTranspose2dImpl.java | 8 +- .../pytorch/ConvTranspose2dImplBase.java | 8 +- .../pytorch/ConvTranspose2dImplBaseBase.java | 4 +- .../pytorch/ConvTranspose2dImplCloneable.java | 14 +- .../ConvTranspose2dImplModuleHolder.java | 79 - .../pytorch/ConvTranspose2dOptions.java | 6 +- .../org/bytedeco/pytorch/ConvTranspose3d.java | 34 - .../pytorch/ConvTranspose3dFuncOptions.java | 4 +- .../bytedeco/pytorch/ConvTranspose3dImpl.java | 8 +- .../pytorch/ConvTranspose3dImplBase.java | 8 +- .../pytorch/ConvTranspose3dImplBaseBase.java | 4 +- .../pytorch/ConvTranspose3dImplCloneable.java | 14 +- .../ConvTranspose3dImplModuleHolder.java | 79 - .../pytorch/ConvTranspose3dOptions.java | 6 +- .../bytedeco/pytorch/CopyBytesFunction.java | 4 +- .../bytedeco/pytorch/CosineEmbeddingLoss.java | 34 - .../pytorch/CosineEmbeddingLossImpl.java | 8 +- .../CosineEmbeddingLossImplCloneable.java | 14 +- .../CosineEmbeddingLossImplModuleHolder.java | 79 - .../pytorch/CosineEmbeddingLossOptions.java | 6 +- .../bytedeco/pytorch/CosineSimilarity.java | 34 - .../pytorch/CosineSimilarityImpl.java | 8 +- .../CosineSimilarityImplCloneable.java | 14 +- .../CosineSimilarityImplModuleHolder.java | 79 - .../pytorch/CosineSimilarityOptions.java | 4 +- .../org/bytedeco/pytorch/CppFunction.java | 104 + .../org/bytedeco/pytorch/CppSignature.java | 7 +- .../pytorch/CppSignatureOptional.java | 5 +- .../bytedeco/pytorch/CrossEntropyLoss.java | 34 - .../pytorch/CrossEntropyLossImpl.java | 8 +- .../CrossEntropyLossImplCloneable.java | 14 +- .../CrossEntropyLossImplModuleHolder.java | 79 - .../pytorch/CrossEntropyLossOptions.java | 6 +- .../org/bytedeco/pytorch/CrossMapLRN2d.java | 34 - .../bytedeco/pytorch/CrossMapLRN2dImpl.java | 8 +- .../pytorch/CrossMapLRN2dImplCloneable.java | 14 +- .../CrossMapLRN2dImplModuleHolder.java | 79 - .../pytorch/CrossMapLRN2dOptions.java | 4 +- .../bytedeco/pytorch/CustomBatchRequest.java | 4 +- .../bytedeco/pytorch/CustomClassHolder.java | 4 +- .../org/bytedeco/pytorch/DDPLoggingData.java | 8 +- .../bytedeco/pytorch/DataLoaderOptions.java | 4 +- .../java/org/bytedeco/pytorch/DataPtr.java | 14 +- .../org/bytedeco/pytorch/DebugInfoBase.java | 4 +- .../org/bytedeco/pytorch/DebugInfoGuard.java | 4 +- .../gen/java/org/bytedeco/pytorch/Decl.java | 15 +- .../gen/java/org/bytedeco/pytorch/Def.java | 16 +- .../java/org/bytedeco/pytorch/DefMaybe.java | 10 +- .../java/org/bytedeco/pytorch/DefVector.java | 6 +- .../gen/java/org/bytedeco/pytorch/Delete.java | 12 +- .../{warn_fn_type.java => DeleterFnPtr.java} | 12 +- .../DeserializationStorageContext.java | 4 +- .../bytedeco/pytorch/DetailConv1dOptions.java | 8 +- .../bytedeco/pytorch/DetailConv2dOptions.java | 8 +- .../bytedeco/pytorch/DetailConv3dOptions.java | 8 +- .../bytedeco/pytorch/DetectAnomalyGuard.java | 4 +- .../gen/java/org/bytedeco/pytorch/Device.java | 8 +- .../org/bytedeco/pytorch/DeviceGuard.java | 85 - .../pytorch/DeviceGuardImplInterface.java | 180 + .../pytorch/DeviceGuardImplRegistrar.java | 39 + .../java/org/bytedeco/pytorch/DeviceHash.java | 37 - .../org/bytedeco/pytorch/DeviceObjType.java | 4 +- .../bytedeco/pytorch/DeviceObjTypePtr.java | 4 +- .../org/bytedeco/pytorch/DeviceOptional.java | 5 +- .../org/bytedeco/pytorch/DeviceTypeHash.java | 37 - .../java/org/bytedeco/pytorch/DictComp.java | 10 +- .../org/bytedeco/pytorch/DictKeyEqualTo.java | 38 - .../org/bytedeco/pytorch/DictKeyHash.java | 38 - .../org/bytedeco/pytorch/DictLiteral.java | 16 +- .../java/org/bytedeco/pytorch/DictType.java | 4 +- .../pytorch/DifferentiableViewMeta.java | 4 +- .../java/org/bytedeco/pytorch/DimVector.java | 23 +- .../DimVectorInferExpandGeometryResult.java | 8 +- .../bytedeco/pytorch/DimVectorOptional.java | 5 +- .../java/org/bytedeco/pytorch/Dimname.java | 4 +- .../org/bytedeco/pytorch/DimnameArrayRef.java | 17 +- .../bytedeco/pytorch/DimnameListOptional.java | 5 +- .../org/bytedeco/pytorch/DimnameOptional.java | 5 +- .../org/bytedeco/pytorch/DimnameVector.java | 6 +- .../pytorch/DisablePythonDispatcher.java | 39 + .../pytorch/DisableRecordFunctionGuard.java | 4 +- .../org/bytedeco/pytorch/DisabledStr.java | 38 + .../pytorch/DispatchKeyExtractor.java | 4 +- .../bytedeco/pytorch/DispatchKeyOptional.java | 5 +- .../org/bytedeco/pytorch/DispatchKeySet.java | 12 +- .../pytorch/DispatchTraceNestingGuard.java | 36 - .../java/org/bytedeco/pytorch/Dispatcher.java | 4 +- .../bytedeco/pytorch/DistBackendError.java | 4 +- .../pytorch/DistributedRandomSampler.java | 4 +- .../bytedeco/pytorch/DistributedSampler.java | 4 +- .../pytorch/DistributedSequentialSampler.java | 4 +- ...{any_of.java => DontIncreaseRefcount.java} | 13 +- .../gen/java/org/bytedeco/pytorch/Dots.java | 10 +- .../org/bytedeco/pytorch/DoubleArrayRef.java | 26 +- .../pytorch/DoubleArrayRefOptional.java | 5 +- .../org/bytedeco/pytorch/DoubleComplex.java | 197 + ...rayRef.java => DoubleComplexArrayRef.java} | 48 +- .../DoubleComplexElementReference.java | 42 + .../bytedeco/pytorch/DoubleComplexList.java | 256 + .../pytorch/DoubleComplexListIterator.java | 87 + .../pytorch/DoubleElementReference.java | 42 + .../pytorch/DoubleExpandingArrayOptional.java | 5 +- .../java/org/bytedeco/pytorch/DoubleList.java | 239 + .../bytedeco/pytorch/DoubleListIterator.java | 84 + .../org/bytedeco/pytorch/DoubleOptional.java | 5 +- .../org/bytedeco/pytorch/DoubleVector.java | 6 +- .../pytorch/DoubleVectorOptional.java | 5 +- .../java/org/bytedeco/pytorch/Dropout.java | 34 - .../java/org/bytedeco/pytorch/Dropout2d.java | 34 - .../org/bytedeco/pytorch/Dropout2dImpl.java | 10 +- .../bytedeco/pytorch/Dropout2dImplBase.java | 10 +- .../pytorch/Dropout2dImplCloneable.java | 14 +- .../pytorch/Dropout2dImplModuleHolder.java | 79 - .../java/org/bytedeco/pytorch/Dropout3d.java | 34 - .../org/bytedeco/pytorch/Dropout3dImpl.java | 10 +- .../bytedeco/pytorch/Dropout3dImplBase.java | 10 +- .../pytorch/Dropout3dImplCloneable.java | 14 +- .../pytorch/Dropout3dImplModuleHolder.java | 79 - .../bytedeco/pytorch/DropoutFuncOptions.java | 4 +- .../org/bytedeco/pytorch/DropoutImpl.java | 10 +- .../org/bytedeco/pytorch/DropoutImplBase.java | 10 +- .../pytorch/DropoutImplCloneable.java | 14 +- .../pytorch/DropoutImplModuleHolder.java | 79 - .../org/bytedeco/pytorch/DropoutOptions.java | 4 +- .../gen/java/org/bytedeco/pytorch/ELU.java | 34 - .../java/org/bytedeco/pytorch/ELUImpl.java | 8 +- .../bytedeco/pytorch/ELUImplCloneable.java | 14 +- .../bytedeco/pytorch/ELUImplModuleHolder.java | 79 - .../java/org/bytedeco/pytorch/ELUOptions.java | 4 +- .../gen/java/org/bytedeco/pytorch/Edge.java | 4 +- .../java/org/bytedeco/pytorch/EdgeVector.java | 6 +- .../bytedeco/pytorch/EllipsisIndexType.java | 4 +- .../java/org/bytedeco/pytorch/Embedding.java | 42 - .../org/bytedeco/pytorch/EmbeddingBag.java | 42 - .../EmbeddingBagFromPretrainedOptions.java | 4 +- .../pytorch/EmbeddingBagFuncOptions.java | 4 +- .../bytedeco/pytorch/EmbeddingBagImpl.java | 12 +- .../pytorch/EmbeddingBagImplCloneable.java | 14 +- .../pytorch/EmbeddingBagImplModuleHolder.java | 79 - .../bytedeco/pytorch/EmbeddingBagMode.java | 4 +- .../bytedeco/pytorch/EmbeddingBagOptions.java | 4 +- .../EmbeddingFromPretrainedOptions.java | 4 +- .../pytorch/EmbeddingFuncOptions.java | 4 +- .../org/bytedeco/pytorch/EmbeddingImpl.java | 8 +- .../pytorch/EmbeddingImplCloneable.java | 14 +- .../pytorch/EmbeddingImplModuleHolder.java | 79 - .../bytedeco/pytorch/EmbeddingOptions.java | 4 +- .../pytorch/EnableProfilingGuard.java | 4 +- .../java/org/bytedeco/pytorch/EnabledStr.java | 31 + .../bytedeco/pytorch/EnforceFiniteError.java | 4 +- .../java/org/bytedeco/pytorch/EnumHolder.java | 31 +- .../org/bytedeco/pytorch/EnumHolderPtr.java | 150 + .../org/bytedeco/pytorch/EnumNameValue.java | 4 +- .../pytorch/EnumNameValueArrayRef.java | 15 +- .../java/org/bytedeco/pytorch/EnumType.java | 8 +- .../java/org/bytedeco/pytorch/EqualType.java | 39 - .../gen/java/org/bytedeco/pytorch/Error.java | 4 +- .../org/bytedeco/pytorch/ErrorReport.java | 10 +- .../java/org/bytedeco/pytorch/Example.java | 4 +- .../bytedeco/pytorch/ExampleCollation.java | 4 +- .../org/bytedeco/pytorch/ExampleIterator.java | 4 +- .../org/bytedeco/pytorch/ExampleOptional.java | 5 +- .../org/bytedeco/pytorch/ExampleStack.java | 4 +- .../org/bytedeco/pytorch/ExampleVector.java | 6 +- .../pytorch/ExampleVectorIterator.java | 44 - .../pytorch/ExampleVectorOptional.java | 5 +- .../ExampleVectorOptionalIterator.java | 4 +- .../pytorch/ExceptionMessageValue.java | 4 +- .../org/bytedeco/pytorch/ExceptionValue.java | 11 +- .../org/bytedeco/pytorch/ExecutionPlan.java | 14 +- .../ExecutorExecutionModeOptional.java | 5 +- .../bytedeco/pytorch/ExperimentalConfig.java | 71 + .../gen/java/org/bytedeco/pytorch/Expr.java | 10 +- .../java/org/bytedeco/pytorch/ExprList.java | 38 + .../bytedeco/pytorch/ExprListIterator.java | 35 + .../java/org/bytedeco/pytorch/ExprMaybe.java | 10 +- .../java/org/bytedeco/pytorch/ExprStmt.java | 10 +- .../org/bytedeco/pytorch/ExtraFilesMap.java | 4 +- .../org/bytedeco/pytorch/FanModeType.java | 4 +- .../bytedeco/pytorch/FeatureAlphaDropout.java | 34 - .../FeatureAlphaDropoutFuncOptions.java | 4 +- .../pytorch/FeatureAlphaDropoutImpl.java | 10 +- .../pytorch/FeatureAlphaDropoutImplBase.java | 10 +- .../FeatureAlphaDropoutImplCloneable.java | 14 +- .../FeatureAlphaDropoutImplModuleHolder.java | 79 - .../org/bytedeco/pytorch/FileLineFunc.java | 42 + .../java/org/bytedeco/pytorch/Flatten.java | 34 - .../org/bytedeco/pytorch/FlattenImpl.java | 8 +- .../pytorch/FlattenImplCloneable.java | 14 +- .../pytorch/FlattenImplModuleHolder.java | 79 - .../org/bytedeco/pytorch/FlattenOptions.java | 4 +- .../org/bytedeco/pytorch/FloatArrayRef.java | 24 +- .../org/bytedeco/pytorch/FloatComplex.java | 73 + ...rrayRef.java => FloatComplexArrayRef.java} | 48 +- .../org/bytedeco/pytorch/FloatOptional.java | 35 + .../java/org/bytedeco/pytorch/FloatType.java | 6 +- .../org/bytedeco/pytorch/FloatTypePtr.java | 4 +- .../gen/java/org/bytedeco/pytorch/Fold.java | 34 - .../java/org/bytedeco/pytorch/FoldImpl.java | 8 +- .../bytedeco/pytorch/FoldImplCloneable.java | 14 +- .../pytorch/FoldImplModuleHolder.java | 79 - .../org/bytedeco/pytorch/FoldOptions.java | 4 +- .../gen/java/org/bytedeco/pytorch/For.java | 18 +- ...eGuard.java => ForceDispatchKeyGuard.java} | 13 +- .../org/bytedeco/pytorch/ForwardADLevel.java | 4 +- .../org/bytedeco/pytorch/ForwardGrad.java | 4 +- .../pytorch/FractionalMaxPool1dOptions.java | 4 +- .../bytedeco/pytorch/FractionalMaxPool2d.java | 34 - .../pytorch/FractionalMaxPool2dImpl.java | 10 +- .../FractionalMaxPool2dImplCloneable.java | 14 +- .../FractionalMaxPool2dImplModuleHolder.java | 79 - .../pytorch/FractionalMaxPool2dOptions.java | 4 +- .../bytedeco/pytorch/FractionalMaxPool3d.java | 34 - .../pytorch/FractionalMaxPool3dImpl.java | 10 +- .../FractionalMaxPool3dImplCloneable.java | 14 +- .../FractionalMaxPool3dImplModuleHolder.java | 79 - .../pytorch/FractionalMaxPool3dOptions.java | 4 +- .../pytorch/FullDataLoaderOptions.java | 40 - .../bytedeco/pytorch/FuncTorchTLSBase.java | 50 + .../java/org/bytedeco/pytorch/Function.java | 10 +- .../pytorch/FunctionCrossMapLRN2d.java | 98 + .../bytedeco/pytorch/FunctionPostHook.java | 10 +- .../pytorch/FunctionPostHookVector.java | 6 +- .../org/bytedeco/pytorch/FunctionPreHook.java | 6 +- .../pytorch/FunctionPreHookVector.java | 6 +- .../org/bytedeco/pytorch/FunctionSchema.java | 85 +- .../pytorch/FunctionSchemaOptional.java | 5 +- .../pytorch/FunctionSchemaVector.java | 47 + .../org/bytedeco/pytorch/FunctionType.java | 4 +- .../org/bytedeco/pytorch/FunctionValue.java | 13 +- .../org/bytedeco/pytorch/FunctionVector.java | 6 +- .../pytorch/FunctionalityOffsetAndMask.java | 4 +- .../org/bytedeco/pytorch/FusionStrategy.java | 4 +- .../gen/java/org/bytedeco/pytorch/Future.java | 121 +- .../java/org/bytedeco/pytorch/FuturePtr.java | 150 + .../bytedeco/pytorch/FuturePtrArrayRef.java | 133 + .../pytorch/FuturePtrElementReference.java | 42 + .../org/bytedeco/pytorch/FuturePtrList.java | 239 + .../pytorch/FuturePtrListIterator.java | 84 + .../pytorch/FutureSingleElementType.java | 4 +- .../java/org/bytedeco/pytorch/FutureType.java | 4 +- .../gen/java/org/bytedeco/pytorch/GELU.java | 33 - .../java/org/bytedeco/pytorch/GELUImpl.java | 8 +- .../bytedeco/pytorch/GELUImplCloneable.java | 14 +- .../pytorch/GELUImplModuleHolder.java | 79 - .../org/bytedeco/pytorch/GELUOptions.java | 4 +- .../gen/java/org/bytedeco/pytorch/GLU.java | 34 - .../java/org/bytedeco/pytorch/GLUImpl.java | 8 +- .../bytedeco/pytorch/GLUImplCloneable.java | 14 +- .../bytedeco/pytorch/GLUImplModuleHolder.java | 79 - .../java/org/bytedeco/pytorch/GLUOptions.java | 4 +- .../gen/java/org/bytedeco/pytorch/GRU.java | 34 - .../java/org/bytedeco/pytorch/GRUCell.java | 34 - .../org/bytedeco/pytorch/GRUCellImpl.java | 10 +- .../org/bytedeco/pytorch/GRUCellImplBase.java | 6 +- .../pytorch/GRUCellImplCloneable.java | 14 +- .../pytorch/GRUCellImplModuleHolder.java | 79 - .../org/bytedeco/pytorch/GRUCellOptions.java | 4 +- .../java/org/bytedeco/pytorch/GRUImpl.java | 18 +- .../org/bytedeco/pytorch/GRUImplBase.java | 8 +- .../bytedeco/pytorch/GRUImplCloneable.java | 14 +- .../bytedeco/pytorch/GRUImplModuleHolder.java | 79 - .../java/org/bytedeco/pytorch/GRUOptions.java | 4 +- .../java/org/bytedeco/pytorch/Generator.java | 15 +- .../org/bytedeco/pytorch/GeneratorImpl.java | 51 + .../bytedeco/pytorch/GeneratorImplPtr.java | 150 + .../bytedeco/pytorch/GeneratorOptional.java | 5 +- .../org/bytedeco/pytorch/GeneratorType.java | 4 +- .../bytedeco/pytorch/GeneratorTypePtr.java | 4 +- .../org/bytedeco/pytorch/GenericDict.java | 23 +- .../bytedeco/pytorch/GenericDictEntryRef.java | 4 +- .../bytedeco/pytorch/GenericDictIterator.java | 10 +- .../pytorch/GenericElementReference.java | 42 + .../org/bytedeco/pytorch/GenericList.java | 232 + .../bytedeco/pytorch/GenericListIterator.java | 84 + .../gen/java/org/bytedeco/pytorch/Global.java | 12 +- .../java/org/bytedeco/pytorch/GradMode.java | 4 +- .../gen/java/org/bytedeco/pytorch/Graph.java | 15 +- .../java/org/bytedeco/pytorch/GraphAttr.java | 10 +- .../org/bytedeco/pytorch/GraphExecutor.java | 25 +- .../pytorch/GraphExecutorImplBase.java | 4 +- .../bytedeco/pytorch/GraphExecutorState.java | 4 +- .../org/bytedeco/pytorch/GraphFunction.java | 27 +- .../pytorch/GraphOptimizerEnabledGuard.java | 4 +- .../org/bytedeco/pytorch/GraphVector.java | 12 +- .../java/org/bytedeco/pytorch/GraphsAttr.java | 4 +- .../pytorch/GridSampleFuncOptions.java | 8 +- ...sample_mode_t.java => GridSampleMode.java} | 24 +- ...mode_t.java => GridSamplePaddingMode.java} | 30 +- .../java/org/bytedeco/pytorch/GroupNorm.java | 34 - .../pytorch/GroupNormFuncOptions.java | 4 +- .../org/bytedeco/pytorch/GroupNormImpl.java | 8 +- .../pytorch/GroupNormImplCloneable.java | 14 +- .../pytorch/GroupNormImplModuleHolder.java | 79 - .../bytedeco/pytorch/GroupNormOptions.java | 4 +- .../pytorch/GumbelSoftmaxFuncOptions.java | 4 +- .../org/bytedeco/pytorch/HIPHooksArgs.java | 29 + .../bytedeco/pytorch/HIPHooksInterface.java | 60 + .../gen/java/org/bytedeco/pytorch/Half.java | 4 +- .../org/bytedeco/pytorch/HalfArrayRef.java | 15 +- .../org/bytedeco/pytorch/HalfComplex.java | 61 + .../java/org/bytedeco/pytorch/Hardshrink.java | 34 - .../org/bytedeco/pytorch/HardshrinkImpl.java | 8 +- .../pytorch/HardshrinkImplCloneable.java | 14 +- .../pytorch/HardshrinkImplModuleHolder.java | 79 - .../bytedeco/pytorch/HardshrinkOptions.java | 4 +- .../java/org/bytedeco/pytorch/Hardtanh.java | 34 - .../org/bytedeco/pytorch/HardtanhImpl.java | 8 +- .../pytorch/HardtanhImplCloneable.java | 14 +- .../pytorch/HardtanhImplModuleHolder.java | 79 - .../org/bytedeco/pytorch/HardtanhOptions.java | 4 +- .../pytorch/HashAliasedIValueMap.java | 4 +- .../bytedeco/pytorch/HashAliasedIValues.java | 5 +- .../java/org/bytedeco/pytorch/HashType.java | 39 - .../bytedeco/pytorch/HermeticPyObjectTLS.java | 48 + .../bytedeco/pytorch/HingeEmbeddingLoss.java | 34 - .../pytorch/HingeEmbeddingLossImpl.java | 8 +- .../HingeEmbeddingLossImplCloneable.java | 14 +- .../HingeEmbeddingLossImplModuleHolder.java | 79 - .../pytorch/HingeEmbeddingLossOptions.java | 6 +- .../java/org/bytedeco/pytorch/HuberLoss.java | 34 - .../org/bytedeco/pytorch/HuberLossImpl.java | 8 +- .../pytorch/HuberLossImplCloneable.java | 14 +- .../pytorch/HuberLossImplModuleHolder.java | 79 - .../bytedeco/pytorch/HuberLossOptions.java | 6 +- .../java/org/bytedeco/pytorch/IMethod.java | 4 +- .../bytedeco/pytorch/IRAttributeError.java | 28 - .../org/bytedeco/pytorch/IStreamAdapter.java | 34 + .../gen/java/org/bytedeco/pytorch/IValue.java | 116 +- .../org/bytedeco/pytorch/IValueArrayRef.java | 23 +- .../org/bytedeco/pytorch/IValueOptional.java | 5 +- .../pytorch/IValueOptionalVector.java | 6 +- .../org/bytedeco/pytorch/IValueVector.java | 6 +- .../gen/java/org/bytedeco/pytorch/Ident.java | 10 +- .../java/org/bytedeco/pytorch/IdentList.java | 38 + .../bytedeco/pytorch/IdentListIterator.java | 35 + .../java/org/bytedeco/pytorch/Identity.java | 33 - .../org/bytedeco/pytorch/IdentityImpl.java | 4 +- .../pytorch/IdentityImplCloneable.java | 14 +- .../pytorch/IdentityImplModuleHolder.java | 79 - .../src/gen/java/org/bytedeco/pytorch/If.java | 20 +- .../pytorch/IncludeDispatchKeyGuard.java | 39 + .../java/org/bytedeco/pytorch/IndexError.java | 4 +- .../java/org/bytedeco/pytorch/Indices.java | 30 - .../org/bytedeco/pytorch/InferenceMode.java | 4 +- .../org/bytedeco/pytorch/InferredType.java | 4 +- .../bytedeco/pytorch/InlinedCallStack.java | 6 +- .../pytorch/InlinedCallStackOptional.java | 5 +- .../org/bytedeco/pytorch/InputArchive.java | 12 +- .../org/bytedeco/pytorch/InputMetadata.java | 79 - .../org/bytedeco/pytorch/InstanceNorm1d.java | 34 - .../bytedeco/pytorch/InstanceNorm1dImpl.java | 4 +- .../pytorch/InstanceNorm1dImplBase.java | 4 +- .../pytorch/InstanceNorm1dImplBaseBase.java | 4 +- .../pytorch/InstanceNorm1dImplCloneable.java | 14 +- .../InstanceNorm1dImplModuleHolder.java | 79 - .../org/bytedeco/pytorch/InstanceNorm2d.java | 34 - .../bytedeco/pytorch/InstanceNorm2dImpl.java | 4 +- .../pytorch/InstanceNorm2dImplBase.java | 4 +- .../pytorch/InstanceNorm2dImplBaseBase.java | 4 +- .../pytorch/InstanceNorm2dImplCloneable.java | 14 +- .../InstanceNorm2dImplModuleHolder.java | 79 - .../org/bytedeco/pytorch/InstanceNorm3d.java | 34 - .../bytedeco/pytorch/InstanceNorm3dImpl.java | 4 +- .../pytorch/InstanceNorm3dImplBase.java | 4 +- .../pytorch/InstanceNorm3dImplBaseBase.java | 4 +- .../pytorch/InstanceNorm3dImplCloneable.java | 14 +- .../InstanceNorm3dImplModuleHolder.java | 79 - .../pytorch/InstanceNormFuncOptions.java | 4 +- .../bytedeco/pytorch/InstanceNormOptions.java | 4 +- .../org/bytedeco/pytorch/Instruction.java | 16 +- .../bytedeco/pytorch/InstructionVector.java | 6 +- .../org/bytedeco/pytorch/IntArrayRef.java | 30 +- .../org/bytedeco/pytorch/IntOptional.java | 5 +- .../pytorch/IntSizedSmallVectorBase.java | 54 + .../java/org/bytedeco/pytorch/IntType.java | 6 +- .../java/org/bytedeco/pytorch/IntTypePtr.java | 4 +- .../org/bytedeco/pytorch/InterfaceType.java | 4 +- .../pytorch/InterpolateFuncOptions.java | 6 +- ...olate_mode_t.java => InterpolateMode.java} | 54 +- .../pytorch/InterpreterContinuation.java | 47 - .../bytedeco/pytorch/InterpreterState.java | 36 - .../pytorch/InterpreterStateImpl.java | 4 +- .../org/bytedeco/pytorch/IterableTree.java | 87 - .../java/org/bytedeco/pytorch/JitModule.java | 10 +- .../java/org/bytedeco/pytorch/JitNode.java | 12 +- .../org/bytedeco/pytorch/JitNodeVector.java | 6 +- .../org/bytedeco/pytorch/JitNodeWrap.java | 4 +- .../java/org/bytedeco/pytorch/JitObject.java | 20 +- .../java/org/bytedeco/pytorch/JitString.java | 4 +- .../java/org/bytedeco/pytorch/KLDivLoss.java | 34 - .../org/bytedeco/pytorch/KLDivLossImpl.java | 8 +- .../pytorch/KLDivLossImplCloneable.java | 14 +- .../pytorch/KLDivLossImplModuleHolder.java | 79 - .../bytedeco/pytorch/KLDivLossOptions.java | 6 +- ...duction_t.java => KLDivLossReduction.java} | 36 +- .../org/bytedeco/pytorch/KernelFunction.java | 6 +- .../gen/java/org/bytedeco/pytorch/L1Loss.java | 34 - .../java/org/bytedeco/pytorch/L1LossImpl.java | 8 +- .../bytedeco/pytorch/L1LossImplCloneable.java | 14 +- .../pytorch/L1LossImplModuleHolder.java | 79 - .../org/bytedeco/pytorch/L1LossOptions.java | 6 +- .../gen/java/org/bytedeco/pytorch/LBFGS.java | 12 +- .../org/bytedeco/pytorch/LBFGSOptions.java | 9 +- .../org/bytedeco/pytorch/LBFGSParamState.java | 9 +- .../java/org/bytedeco/pytorch/LPPool1d.java | 34 - .../org/bytedeco/pytorch/LPPool1dImpl.java | 8 +- .../bytedeco/pytorch/LPPool1dImplBase.java | 8 +- .../pytorch/LPPool1dImplCloneable.java | 14 +- .../pytorch/LPPool1dImplModuleHolder.java | 79 - .../org/bytedeco/pytorch/LPPool1dOptions.java | 4 +- .../java/org/bytedeco/pytorch/LPPool2d.java | 34 - .../org/bytedeco/pytorch/LPPool2dImpl.java | 8 +- .../bytedeco/pytorch/LPPool2dImplBase.java | 8 +- .../pytorch/LPPool2dImplCloneable.java | 14 +- .../pytorch/LPPool2dImplModuleHolder.java | 79 - .../org/bytedeco/pytorch/LPPool2dOptions.java | 4 +- .../org/bytedeco/pytorch/LPPool3dOptions.java | 4 +- .../org/bytedeco/pytorch/LRScheduler.java | 4 +- .../gen/java/org/bytedeco/pytorch/LSTM.java | 34 - .../java/org/bytedeco/pytorch/LSTMCell.java | 34 - .../org/bytedeco/pytorch/LSTMCellImpl.java | 14 +- .../bytedeco/pytorch/LSTMCellImplBase.java | 6 +- .../pytorch/LSTMCellImplCloneable.java | 14 +- .../pytorch/LSTMCellImplModuleHolder.java | 79 - .../org/bytedeco/pytorch/LSTMCellOptions.java | 4 +- .../java/org/bytedeco/pytorch/LSTMImpl.java | 20 +- .../org/bytedeco/pytorch/LSTMImplBase.java | 8 +- .../bytedeco/pytorch/LSTMImplCloneable.java | 14 +- .../pytorch/LSTMImplModuleHolder.java | 79 - .../org/bytedeco/pytorch/LSTMOptions.java | 4 +- .../java/org/bytedeco/pytorch/LayerNorm.java | 34 - .../pytorch/LayerNormFuncOptions.java | 4 +- .../org/bytedeco/pytorch/LayerNormImpl.java | 8 +- .../pytorch/LayerNormImplCloneable.java | 14 +- .../pytorch/LayerNormImplModuleHolder.java | 79 - .../bytedeco/pytorch/LayerNormOptions.java | 4 +- .../pytorch/LayoutEnumerationType.java | 4 +- .../org/bytedeco/pytorch/LayoutOptional.java | 5 +- .../java/org/bytedeco/pytorch/LayoutType.java | 4 +- .../org/bytedeco/pytorch/LayoutTypePtr.java | 4 +- .../java/org/bytedeco/pytorch/LeakyReLU.java | 34 - .../org/bytedeco/pytorch/LeakyReLUImpl.java | 8 +- .../pytorch/LeakyReLUImplCloneable.java | 14 +- .../pytorch/LeakyReLUImplModuleHolder.java | 79 - .../bytedeco/pytorch/LeakyReLUOptions.java | 4 +- .../pytorch/LegacyTensorConstructor.java | 4 +- .../gen/java/org/bytedeco/pytorch/Lexer.java | 4 +- .../java/org/bytedeco/pytorch/Library.java | 242 + .../org/bytedeco/pytorch/LinAlgError.java | 4 +- .../gen/java/org/bytedeco/pytorch/Linear.java | 34 - .../java/org/bytedeco/pytorch/LinearImpl.java | 8 +- .../bytedeco/pytorch/LinearImplCloneable.java | 14 +- .../pytorch/LinearImplModuleHolder.java | 79 - .../org/bytedeco/pytorch/LinearOptions.java | 4 +- .../java/org/bytedeco/pytorch/ListComp.java | 10 +- .../ListElementConstReferenceTraits.java | 38 - .../java/org/bytedeco/pytorch/ListImpl.java | 33 - .../org/bytedeco/pytorch/ListLiteral.java | 14 +- .../pytorch/ListSingleElementType.java | 4 +- .../java/org/bytedeco/pytorch/ListType.java | 4 +- .../bytedeco/pytorch/LocalDispatchKeySet.java | 31 + .../bytedeco/pytorch/LocalResponseNorm.java | 34 - .../pytorch/LocalResponseNormImpl.java | 8 +- .../LocalResponseNormImplCloneable.java | 14 +- .../LocalResponseNormImplModuleHolder.java | 79 - .../pytorch/LocalResponseNormOptions.java | 4 +- .../java/org/bytedeco/pytorch/LogSigmoid.java | 33 - .../org/bytedeco/pytorch/LogSigmoidImpl.java | 4 +- .../pytorch/LogSigmoidImplCloneable.java | 14 +- .../pytorch/LogSigmoidImplModuleHolder.java | 79 - .../java/org/bytedeco/pytorch/LogSoftmax.java | 34 - .../pytorch/LogSoftmaxFuncOptions.java | 4 +- .../org/bytedeco/pytorch/LogSoftmaxImpl.java | 8 +- .../pytorch/LogSoftmaxImplCloneable.java | 14 +- .../pytorch/LogSoftmaxImplModuleHolder.java | 79 - .../bytedeco/pytorch/LogSoftmaxOptions.java | 4 +- .../org/bytedeco/pytorch/LongArrayRef.java | 32 +- .../pytorch/LongArrayRefOptional.java | 11 +- .../pytorch/LongElementReference.java | 42 + .../pytorch/LongExpandingArrayOptional.java | 5 +- .../java/org/bytedeco/pytorch/LongList.java | 241 + .../bytedeco/pytorch/LongListIterator.java | 84 + .../org/bytedeco/pytorch/LongOptional.java | 5 +- ...rrayRef.java => LongOptionalArrayRef.java} | 59 +- .../bytedeco/pytorch/LongOptionalVector.java | 6 +- ...ctorBase.java => LongSmallVectorBase.java} | 12 +- .../pytorch/LongSmallVectorCommon.java | 49 + ...ctorImpl.java => LongSmallVectorImpl.java} | 25 +- .../org/bytedeco/pytorch/LongStringMap.java | 6 +- .../bytedeco/pytorch/LongVaryingShape.java | 10 +- .../java/org/bytedeco/pytorch/LongVector.java | 6 +- .../bytedeco/pytorch/LongVectorArrayRef.java | 133 + .../bytedeco/pytorch/LongVectorOptional.java | 5 +- ...ss_reduction_t.java => LossReduction.java} | 30 +- .../gen/java/org/bytedeco/pytorch/MNIST.java | 4 +- .../bytedeco/pytorch/MNISTBatchDataset.java | 4 +- .../org/bytedeco/pytorch/MNISTDataset.java | 4 +- .../pytorch/MNISTMapBatchDataset.java | 4 +- .../org/bytedeco/pytorch/MNISTMapDataset.java | 4 +- .../pytorch/MNISTRandomDataLoader.java | 4 +- .../pytorch/MNISTRandomDataLoaderBase.java | 5 +- ...{_Uninitialized.java => MPSHooksArgs.java} | 16 +- .../bytedeco/pytorch/MPSHooksInterface.java | 60 + .../java/org/bytedeco/pytorch/MSELoss.java | 34 - .../org/bytedeco/pytorch/MSELossImpl.java | 8 +- .../pytorch/MSELossImplCloneable.java | 14 +- .../pytorch/MSELossImplModuleHolder.java | 79 - .../org/bytedeco/pytorch/MSELossOptions.java | 6 +- .../org/bytedeco/pytorch/MagicMethod.java | 19 +- .../org/bytedeco/pytorch/MakeIndices.java | 41 - .../bytedeco/pytorch/MarginRankingLoss.java | 34 - .../pytorch/MarginRankingLossImpl.java | 8 +- .../MarginRankingLossImplCloneable.java | 14 +- .../MarginRankingLossImplModuleHolder.java | 79 - .../pytorch/MarginRankingLossOptions.java | 6 +- .../org/bytedeco/pytorch/MatchTypeReturn.java | 4 +- .../org/bytedeco/pytorch/MatchedSchema.java | 34 +- .../java/org/bytedeco/pytorch/MaxPool1d.java | 34 - .../org/bytedeco/pytorch/MaxPool1dImpl.java | 10 +- .../bytedeco/pytorch/MaxPool1dImplBase.java | 8 +- .../pytorch/MaxPool1dImplCloneable.java | 14 +- .../pytorch/MaxPool1dImplModuleHolder.java | 79 - .../bytedeco/pytorch/MaxPool1dOptions.java | 4 +- .../java/org/bytedeco/pytorch/MaxPool2d.java | 34 - .../org/bytedeco/pytorch/MaxPool2dImpl.java | 10 +- .../bytedeco/pytorch/MaxPool2dImplBase.java | 8 +- .../pytorch/MaxPool2dImplCloneable.java | 14 +- .../pytorch/MaxPool2dImplModuleHolder.java | 79 - .../bytedeco/pytorch/MaxPool2dOptions.java | 4 +- .../java/org/bytedeco/pytorch/MaxPool3d.java | 34 - .../org/bytedeco/pytorch/MaxPool3dImpl.java | 10 +- .../bytedeco/pytorch/MaxPool3dImplBase.java | 8 +- .../pytorch/MaxPool3dImplCloneable.java | 14 +- .../pytorch/MaxPool3dImplModuleHolder.java | 79 - .../bytedeco/pytorch/MaxPool3dOptions.java | 4 +- .../org/bytedeco/pytorch/MaxUnpool1d.java | 34 - .../pytorch/MaxUnpool1dFuncOptions.java | 4 +- .../org/bytedeco/pytorch/MaxUnpool1dImpl.java | 8 +- .../bytedeco/pytorch/MaxUnpool1dImplBase.java | 8 +- .../pytorch/MaxUnpool1dImplCloneable.java | 14 +- .../pytorch/MaxUnpool1dImplModuleHolder.java | 79 - .../bytedeco/pytorch/MaxUnpool1dOptions.java | 4 +- .../org/bytedeco/pytorch/MaxUnpool2d.java | 34 - .../pytorch/MaxUnpool2dFuncOptions.java | 4 +- .../org/bytedeco/pytorch/MaxUnpool2dImpl.java | 8 +- .../bytedeco/pytorch/MaxUnpool2dImplBase.java | 8 +- .../pytorch/MaxUnpool2dImplCloneable.java | 14 +- .../pytorch/MaxUnpool2dImplModuleHolder.java | 79 - .../bytedeco/pytorch/MaxUnpool2dOptions.java | 4 +- .../org/bytedeco/pytorch/MaxUnpool3d.java | 34 - .../pytorch/MaxUnpool3dFuncOptions.java | 4 +- .../org/bytedeco/pytorch/MaxUnpool3dImpl.java | 8 +- .../bytedeco/pytorch/MaxUnpool3dImplBase.java | 8 +- .../pytorch/MaxUnpool3dImplCloneable.java | 14 +- .../pytorch/MaxUnpool3dImplModuleHolder.java | 79 - .../bytedeco/pytorch/MaxUnpool3dOptions.java | 4 +- .../MaybeOwnedTraitsGenericImplTensor.java | 54 + ...raits.java => MaybeOwnedTraitsTensor.java} | 20 +- .../pytorch/MemoryFormatOptional.java | 5 +- .../bytedeco/pytorch/MemoryFormatType.java | 4 +- .../pytorch/MemoryFormattEnumerationType.java | 4 +- .../pytorch/MemoryReportingInfoBase.java | 4 +- .../java/org/bytedeco/pytorch/MetaBase.java | 129 + .../gen/java/org/bytedeco/pytorch/Method.java | 12 +- .../org/bytedeco/pytorch/MethodOptional.java | 5 +- .../org/bytedeco/pytorch/MethodValue.java | 11 +- .../gen/java/org/bytedeco/pytorch/Mish.java | 33 - .../java/org/bytedeco/pytorch/MishImpl.java | 4 +- .../bytedeco/pytorch/MishImplCloneable.java | 14 +- .../pytorch/MishImplModuleHolder.java | 79 - .../java/org/bytedeco/pytorch/MobileCode.java | 20 +- .../gen/java/org/bytedeco/pytorch/Module.java | 2780 +- .../java/org/bytedeco/pytorch/ModuleDict.java | 44 - .../org/bytedeco/pytorch/ModuleDictImpl.java | 36 +- .../pytorch/ModuleDictImplCloneable.java | 14 +- .../pytorch/ModuleDictImplModuleHolder.java | 89 - .../org/bytedeco/pytorch/ModuleHolder.java | 93 - .../bytedeco/pytorch/ModuleInstanceInfo.java | 6 +- .../pytorch/ModuleInstanceInfoOptional.java | 5 +- .../java/org/bytedeco/pytorch/ModuleList.java | 44 - .../org/bytedeco/pytorch/ModuleListImpl.java | 22 +- .../pytorch/ModuleListImplCloneable.java | 14 +- .../pytorch/ModuleListImplModuleHolder.java | 89 - .../org/bytedeco/pytorch/ModulePolicy.java | 6 +- .../org/bytedeco/pytorch/ModuleVector.java | 6 +- .../pytorch/MultiLabelMarginLoss.java | 34 - .../pytorch/MultiLabelMarginLossImpl.java | 8 +- .../MultiLabelMarginLossImplCloneable.java | 14 +- .../MultiLabelMarginLossImplModuleHolder.java | 79 - .../pytorch/MultiLabelMarginLossOptions.java | 6 +- .../pytorch/MultiLabelSoftMarginLoss.java | 34 - .../pytorch/MultiLabelSoftMarginLossImpl.java | 8 +- ...MultiLabelSoftMarginLossImplCloneable.java | 14 +- ...tiLabelSoftMarginLossImplModuleHolder.java | 79 - .../MultiLabelSoftMarginLossOptions.java | 6 +- .../org/bytedeco/pytorch/MultiMarginLoss.java | 34 - .../bytedeco/pytorch/MultiMarginLossImpl.java | 8 +- .../pytorch/MultiMarginLossImplCloneable.java | 14 +- .../MultiMarginLossImplModuleHolder.java | 79 - .../pytorch/MultiMarginLossOptions.java | 6 +- .../bytedeco/pytorch/MultiheadAttention.java | 34 - .../MultiheadAttentionForwardFuncOptions.java | 4 +- .../pytorch/MultiheadAttentionImpl.java | 17 +- .../MultiheadAttentionImplCloneable.java | 14 +- .../MultiheadAttentionImplModuleHolder.java | 79 - .../pytorch/MultiheadAttentionOptions.java | 4 +- .../java/org/bytedeco/pytorch/NLLLoss.java | 34 - .../org/bytedeco/pytorch/NLLLossImpl.java | 8 +- .../pytorch/NLLLossImplCloneable.java | 14 +- .../pytorch/NLLLossImplModuleHolder.java | 79 - .../org/bytedeco/pytorch/NLLLossOptions.java | 6 +- .../org/bytedeco/pytorch/NameMangler.java | 4 +- .../org/bytedeco/pytorch/NamedAnyModule.java | 69 - .../bytedeco/pytorch/NamedBufferPolicy.java | 42 - .../org/bytedeco/pytorch/NamedIValue.java | 4 +- ...butePolicy.java => NamedIValuePolicy.java} | 22 +- .../org/bytedeco/pytorch/NamedJitModule.java | 8 +- ...ePolicy.java => NamedJitModulePolicy.java} | 22 +- .../org/bytedeco/pytorch/NamedTensor.java | 6 +- .../org/bytedeco/pytorch/NamedTensorMeta.java | 4 +- .../pytorch/NamedTensorMetaInterface.java | 4 +- ...eterPolicy.java => NamedTensorPolicy.java} | 22 +- .../pytorch/NamedTupleConstructor.java | 11 +- .../java/org/bytedeco/pytorch/NamedType.java | 4 +- .../java/org/bytedeco/pytorch/NamedValue.java | 4 +- .../bytedeco/pytorch/NamedValueArrayRef.java | 15 +- .../bytedeco/pytorch/NamedValueOptional.java | 5 +- .../java/org/bytedeco/pytorch/NamesMode.java | 4 +- .../org/bytedeco/pytorch/NativeResolver.java | 8 +- .../bytedeco/pytorch/NestedTensorImpl.java | 4 +- .../org/bytedeco/pytorch/NoGradGuard.java | 4 +- .../org/bytedeco/pytorch/NoNamesGuard.java | 4 +- .../org/bytedeco/pytorch/NoTF32Guard.java | 4 +- .../java/org/bytedeco/pytorch/NoTarget.java | 4 +- .../pytorch/NoTracerDispatchMode.java | 37 - .../gen/java/org/bytedeco/pytorch/NoWarn.java | 38 - .../gen/java/org/bytedeco/pytorch/Node.java | 12 +- ...unctionPreHookMap.java => NodeIntMap.java} | 22 +- .../{TokenTrieVector.java => NodeSet.java} | 21 +- ...{Suspend.java => NodeSmallVectorBase.java} | 16 +- .../pytorch/NodeSmallVectorCommon.java | 49 + .../bytedeco/pytorch/NodeSmallVectorImpl.java | 71 + .../java/org/bytedeco/pytorch/NoneType.java | 4 +- .../org/bytedeco/pytorch/NoneTypePtr.java | 4 +- .../org/bytedeco/pytorch/Nonlinearity.java | 74 + .../bytedeco/pytorch/NonlinearityType.java | 72 - .../pytorch/NormalizeFuncOptions.java | 4 +- .../bytedeco/pytorch/NotImplementedError.java | 4 +- .../java/org/bytedeco/pytorch/NumberType.java | 6 +- .../org/bytedeco/pytorch/NumberTypePtr.java | 4 +- .../org/bytedeco/pytorch/ORTHooksArgs.java | 29 + .../bytedeco/pytorch/ORTHooksInterface.java | 45 + .../pytorch/OnnxfiBackendSystemError.java | 4 +- .../pytorch/OpRegistrationListener.java | 4 +- .../pytorch/OpTableOffsetAndMask.java | 39 - .../pytorch/OpaqueOptionalTensorRef.java | 46 + .../org/bytedeco/pytorch/OperandInfo.java | 95 + .../java/org/bytedeco/pytorch/Operation.java | 4 +- .../bytedeco/pytorch/OperationCreator.java | 4 +- .../java/org/bytedeco/pytorch/Operator.java | 8 +- .../org/bytedeco/pytorch/OperatorHandle.java | 10 +- .../pytorch/OperatorHandleOptional.java | 5 +- .../org/bytedeco/pytorch/OperatorKernel.java | 42 +- .../org/bytedeco/pytorch/OperatorName.java | 4 +- .../pytorch/OperatorNameOptional.java | 5 +- .../bytedeco/pytorch/OperatorNameView.java | 32 - .../bytedeco/pytorch/OperatorOptional.java | 5 +- .../pytorch/OperatorOptionalVector.java | 6 +- .../org/bytedeco/pytorch/OperatorSet.java | 4 +- .../org/bytedeco/pytorch/OperatorVector.java | 12 +- .../java/org/bytedeco/pytorch/Optimizer.java | 8 +- .../OptimizerCloneableAdagradOptions.java | 4 +- .../OptimizerCloneableAdagradParamState.java | 4 +- .../OptimizerCloneableAdamOptions.java | 4 +- .../OptimizerCloneableAdamParamState.java | 4 +- .../OptimizerCloneableAdamWOptions.java | 4 +- .../OptimizerCloneableAdamWParamState.java | 4 +- .../OptimizerCloneableLBFGSOptions.java | 4 +- .../OptimizerCloneableLBFGSParamState.java | 4 +- .../OptimizerCloneableRMSpropOptions.java | 4 +- .../OptimizerCloneableRMSpropParamState.java | 4 +- .../pytorch/OptimizerCloneableSGDOptions.java | 4 +- .../OptimizerCloneableSGDParamState.java | 4 +- .../bytedeco/pytorch/OptimizerOptions.java | 4 +- .../bytedeco/pytorch/OptimizerParamGroup.java | 12 +- .../pytorch/OptimizerParamGroupVector.java | 6 +- .../bytedeco/pytorch/OptimizerParamState.java | 4 +- .../bytedeco/pytorch/OptionalDeviceGuard.java | 10 +- .../pytorch/OptionalSingleElementType.java | 4 +- .../bytedeco/pytorch/OptionalTensorRef.java | 4 +- .../org/bytedeco/pytorch/OptionalType.java | 4 +- .../bytedeco/pytorch/OutOfMemoryError.java | 4 +- .../org/bytedeco/pytorch/OutputArchive.java | 6 +- .../bytedeco/pytorch/OwnedSourceRange.java | 29 - .../pytorch/PODLocalDispatchKeySet.java | 54 + .../gen/java/org/bytedeco/pytorch/PReLU.java | 34 - .../java/org/bytedeco/pytorch/PReLUImpl.java | 8 +- .../bytedeco/pytorch/PReLUImplCloneable.java | 14 +- .../pytorch/PReLUImplModuleHolder.java | 79 - .../org/bytedeco/pytorch/PReLUOptions.java | 4 +- .../org/bytedeco/pytorch/PackedSequence.java | 12 +- .../org/bytedeco/pytorch/PadFuncOptions.java | 6 +- .../{pad_mode_t.java => PaddingMode.java} | 36 +- .../bytedeco/pytorch/PairwiseDistance.java | 34 - .../pytorch/PairwiseDistanceImpl.java | 8 +- .../PairwiseDistanceImplCloneable.java | 14 +- .../PairwiseDistanceImplModuleHolder.java | 79 - .../pytorch/PairwiseDistanceOptions.java | 4 +- .../gen/java/org/bytedeco/pytorch/Param.java | 10 +- .../java/org/bytedeco/pytorch/ParamList.java | 38 + .../bytedeco/pytorch/ParamListIterator.java | 35 + .../org/bytedeco/pytorch/ParameterDict.java | 40 - .../bytedeco/pytorch/ParameterDictImpl.java | 14 +- .../pytorch/ParameterDictImplCloneable.java | 14 +- .../ParameterDictImplModuleHolder.java | 89 - .../org/bytedeco/pytorch/ParameterList.java | 39 - .../bytedeco/pytorch/ParameterListImpl.java | 12 +- .../pytorch/ParameterListImplCloneable.java | 14 +- .../ParameterListImplModuleHolder.java | 89 - .../org/bytedeco/pytorch/ParameterPolicy.java | 6 +- .../gen/java/org/bytedeco/pytorch/Pass.java | 10 +- .../java/org/bytedeco/pytorch/Pickler.java | 26 +- .../org/bytedeco/pytorch/PixelShuffle.java | 34 - .../bytedeco/pytorch/PixelShuffleImpl.java | 6 +- .../pytorch/PixelShuffleImplCloneable.java | 14 +- .../pytorch/PixelShuffleImplModuleHolder.java | 79 - .../bytedeco/pytorch/PixelShuffleOptions.java | 4 +- .../org/bytedeco/pytorch/PixelUnshuffle.java | 34 - .../bytedeco/pytorch/PixelUnshuffleImpl.java | 6 +- .../pytorch/PixelUnshuffleImplCloneable.java | 14 +- .../PixelUnshuffleImplModuleHolder.java | 79 - .../pytorch/PixelUnshuffleOptions.java | 4 +- .../pytorch/PlacementDeleteContext.java | 4 +- .../org/bytedeco/pytorch/PlacementDtor.java | 4 +- .../org/bytedeco/pytorch/PoissonNLLLoss.java | 34 - .../bytedeco/pytorch/PoissonNLLLossImpl.java | 8 +- .../pytorch/PoissonNLLLossImplCloneable.java | 14 +- .../PoissonNLLLossImplModuleHolder.java | 79 - .../pytorch/PoissonNLLLossOptions.java | 6 +- .../org/bytedeco/pytorch/PrintDepsTable.java | 42 - .../java/org/bytedeco/pytorch/PrintValue.java | 11 +- .../org/bytedeco/pytorch/ProfileIValueOp.java | 10 +- .../org/bytedeco/pytorch/ProfilerConfig.java | 82 + .../java/org/bytedeco/pytorch/Property.java | 10 +- .../org/bytedeco/pytorch/PropertyList.java | 38 + .../pytorch/PropertyListIterator.java | 35 + .../bytedeco/pytorch/PropertyListMaybe.java | 36 + .../bytedeco/pytorch/PropertyPropBase.java | 31 - .../org/bytedeco/pytorch/PropertyVector.java | 6 +- .../org/bytedeco/pytorch/PyInterpreter.java | 44 + .../bytedeco/pytorch/PyInterpreterVTable.java | 176 + .../org/bytedeco/pytorch/PyObjectHolder.java | 20 +- .../bytedeco/pytorch/PyObjectHolderPtr.java | 150 + .../org/bytedeco/pytorch/PyObjectType.java | 4 +- .../org/bytedeco/pytorch/PyObjectTypePtr.java | 4 +- .../bytedeco/pytorch/PyTorchStreamReader.java | 115 + .../bytedeco/pytorch/PythonDispatcherTLS.java | 42 + .../java/org/bytedeco/pytorch/PythonOp.java | 4 +- .../org/bytedeco/pytorch/PythonPrint.java | 49 - .../pytorch/PythonTorchFunctionTLS.java | 50 + .../org/bytedeco/pytorch/QEngineVector.java | 6 +- .../org/bytedeco/pytorch/QSchemeType.java | 4 +- .../org/bytedeco/pytorch/QSchemeTypePtr.java | 4 +- ...edTypeProperties.java => QTensorImpl.java} | 10 +- .../org/bytedeco/pytorch/QualifiedName.java | 8 +- .../pytorch/QualifiedNameOptional.java | 5 +- .../java/org/bytedeco/pytorch/Quantizer.java | 72 +- .../org/bytedeco/pytorch/QuantizerPtr.java | 150 + .../org/bytedeco/pytorch/QuantizerType.java | 4 +- .../bytedeco/pytorch/QuantizerTypePtr.java | 4 +- .../java/org/bytedeco/pytorch/RMSprop.java | 12 +- .../org/bytedeco/pytorch/RMSpropOptions.java | 9 +- .../bytedeco/pytorch/RMSpropParamState.java | 9 +- .../gen/java/org/bytedeco/pytorch/RNN.java | 34 - ...ions_base_mode_t.java => RNNBaseMode.java} | 36 +- .../java/org/bytedeco/pytorch/RNNCell.java | 34 - .../org/bytedeco/pytorch/RNNCellImpl.java | 10 +- .../org/bytedeco/pytorch/RNNCellImplBase.java | 6 +- .../pytorch/RNNCellImplCloneable.java | 14 +- .../pytorch/RNNCellImplModuleHolder.java | 79 - .../org/bytedeco/pytorch/RNNCellOptions.java | 6 +- .../bytedeco/pytorch/RNNCellOptionsBase.java | 4 +- .../java/org/bytedeco/pytorch/RNNImpl.java | 18 +- .../org/bytedeco/pytorch/RNNImplBase.java | 8 +- .../bytedeco/pytorch/RNNImplCloneable.java | 14 +- .../bytedeco/pytorch/RNNImplModuleHolder.java | 79 - ...nlinearity_t.java => RNNNonlinearity.java} | 24 +- .../java/org/bytedeco/pytorch/RNNOptions.java | 6 +- .../org/bytedeco/pytorch/RNNOptionsBase.java | 10 +- .../gen/java/org/bytedeco/pytorch/RReLU.java | 34 - .../bytedeco/pytorch/RReLUFuncOptions.java | 4 +- .../java/org/bytedeco/pytorch/RReLUImpl.java | 8 +- .../bytedeco/pytorch/RReLUImplCloneable.java | 14 +- .../pytorch/RReLUImplModuleHolder.java | 79 - .../org/bytedeco/pytorch/RReLUOptions.java | 4 +- .../org/bytedeco/pytorch/RRefInterface.java | 33 +- .../bytedeco/pytorch/RRefInterfacePtr.java | 150 + .../pytorch/RRefSingleElementType.java | 4 +- .../java/org/bytedeco/pytorch/RRefType.java | 4 +- .../gen/java/org/bytedeco/pytorch/Raise.java | 10 +- .../org/bytedeco/pytorch/RandomSampler.java | 4 +- .../java/org/bytedeco/pytorch/RangeValue.java | 10 +- .../gen/java/org/bytedeco/pytorch/ReLU.java | 34 - .../gen/java/org/bytedeco/pytorch/ReLU6.java | 34 - .../java/org/bytedeco/pytorch/ReLU6Impl.java | 8 +- .../bytedeco/pytorch/ReLU6ImplCloneable.java | 14 +- .../pytorch/ReLU6ImplModuleHolder.java | 79 - .../org/bytedeco/pytorch/ReLU6Options.java | 4 +- .../java/org/bytedeco/pytorch/ReLUImpl.java | 8 +- .../bytedeco/pytorch/ReLUImplCloneable.java | 14 +- .../pytorch/ReLUImplModuleHolder.java | 79 - .../org/bytedeco/pytorch/ReLUOptions.java | 4 +- .../pytorch/ReadAdapterInterface.java | 18 +- .../java/org/bytedeco/pytorch/ReadyQueue.java | 26 + .../org/bytedeco/pytorch/RecordFunction.java | 16 +- .../RecordFunctionCallbackHandleVector.java | 4 +- .../pytorch/RecordFunctionCallbacksEntry.java | 4 +- .../bytedeco/pytorch/RecordFunctionGuard.java | 4 +- .../pytorch/RecordFunctionHandleIntList.java | 46 + .../pytorch/RecordFunctionHandleIntPair.java | 40 + .../bytedeco/pytorch/RecordFunctionTLS.java | 4 +- .../org/bytedeco/pytorch/RecordScopeSet.java | 7 +- .../org/bytedeco/pytorch/ReflectionPad1d.java | 34 - .../bytedeco/pytorch/ReflectionPad1dImpl.java | 8 +- .../pytorch/ReflectionPad1dImplBase.java | 8 +- .../pytorch/ReflectionPad1dImplCloneable.java | 14 +- .../ReflectionPad1dImplModuleHolder.java | 79 - .../pytorch/ReflectionPad1dOptions.java | 4 +- .../org/bytedeco/pytorch/ReflectionPad2d.java | 34 - .../bytedeco/pytorch/ReflectionPad2dImpl.java | 8 +- .../pytorch/ReflectionPad2dImplBase.java | 8 +- .../pytorch/ReflectionPad2dImplCloneable.java | 14 +- .../ReflectionPad2dImplModuleHolder.java | 79 - .../pytorch/ReflectionPad2dOptions.java | 4 +- .../org/bytedeco/pytorch/ReflectionPad3d.java | 34 - .../bytedeco/pytorch/ReflectionPad3dImpl.java | 8 +- .../pytorch/ReflectionPad3dImplBase.java | 8 +- .../pytorch/ReflectionPad3dImplCloneable.java | 14 +- .../ReflectionPad3dImplModuleHolder.java | 79 - .../pytorch/ReflectionPad3dOptions.java | 4 +- .../bytedeco/pytorch/RegisterOperators.java | 119 +- .../pytorch/RegistrationHandleRAII.java | 4 +- .../pytorch/RegistrationListenerList.java | 4 +- .../bytedeco/pytorch/ReplicationPad1d.java | 34 - .../pytorch/ReplicationPad1dImpl.java | 8 +- .../pytorch/ReplicationPad1dImplBase.java | 8 +- .../ReplicationPad1dImplCloneable.java | 14 +- .../ReplicationPad1dImplModuleHolder.java | 79 - .../pytorch/ReplicationPad1dOptions.java | 4 +- .../bytedeco/pytorch/ReplicationPad2d.java | 34 - .../pytorch/ReplicationPad2dImpl.java | 8 +- .../pytorch/ReplicationPad2dImplBase.java | 8 +- .../ReplicationPad2dImplCloneable.java | 14 +- .../ReplicationPad2dImplModuleHolder.java | 79 - .../pytorch/ReplicationPad2dOptions.java | 4 +- .../bytedeco/pytorch/ReplicationPad3d.java | 34 - .../pytorch/ReplicationPad3dImpl.java | 8 +- .../pytorch/ReplicationPad3dImplBase.java | 8 +- .../ReplicationPad3dImplCloneable.java | 14 +- .../ReplicationPad3dImplModuleHolder.java | 79 - .../pytorch/ReplicationPad3dOptions.java | 4 +- .../java/org/bytedeco/pytorch/Resolver.java | 8 +- .../org/bytedeco/pytorch/ResolverVector.java | 12 +- ...ursiveMethodCallError.java => Result.java} | 12 +- .../gen/java/org/bytedeco/pytorch/Return.java | 10 +- .../gen/java/org/bytedeco/pytorch/SELU.java | 34 - .../java/org/bytedeco/pytorch/SELUImpl.java | 8 +- .../bytedeco/pytorch/SELUImplCloneable.java | 14 +- .../pytorch/SELUImplModuleHolder.java | 79 - .../org/bytedeco/pytorch/SELUOptions.java | 4 +- .../gen/java/org/bytedeco/pytorch/SGD.java | 8 +- .../java/org/bytedeco/pytorch/SGDOptions.java | 9 +- .../org/bytedeco/pytorch/SGDParamState.java | 9 +- .../org/bytedeco/pytorch/SafePyHandle.java | 48 + .../org/bytedeco/pytorch/SafePyObject.java | 50 + .../java/org/bytedeco/pytorch/Sampler.java | 4 +- .../pytorch/SavedTensorDefaultHooks.java | 4 +- .../pytorch/SavedTensorDefaultHooksTLS.java | 4 +- .../org/bytedeco/pytorch/SavedVariable.java | 4 +- .../pytorch/SavedVariableArrayRef.java | 17 +- .../bytedeco/pytorch/SavedVariableHooks.java | 4 +- .../bytedeco/pytorch/SavedVariableVector.java | 6 +- .../gen/java/org/bytedeco/pytorch/Scalar.java | 13 +- .../org/bytedeco/pytorch/ScalarArrayRef.java | 15 +- .../org/bytedeco/pytorch/ScalarOptional.java | 5 +- .../bytedeco/pytorch/ScalarTypeArrayRef.java | 17 +- .../pytorch/ScalarTypeEnumerationType.java | 4 +- .../bytedeco/pytorch/ScalarTypeOptional.java | 5 +- .../org/bytedeco/pytorch/ScalarTypeType.java | 4 +- .../bytedeco/pytorch/ScalarTypeTypePtr.java | 4 +- .../bytedeco/pytorch/ScalarTypeVector.java | 6 +- .../org/bytedeco/pytorch/SchemaArgument.java | 6 +- .../java/org/bytedeco/pytorch/SchemaInfo.java | 4 +- .../pytorch/SchemaRegistrationHandleRAII.java | 4 +- .../gen/java/org/bytedeco/pytorch/Scope.java | 4 +- .../org/bytedeco/pytorch/ScopeOptional.java | 5 +- .../pytorch/ScriptModuleSerializer.java | 38 - .../bytedeco/pytorch/ScriptTypeParser.java | 58 + .../gen/java/org/bytedeco/pytorch/Select.java | 10 +- .../gen/java/org/bytedeco/pytorch/Self.java | 8 +- .../java/org/bytedeco/pytorch/Sequential.java | 48 - .../org/bytedeco/pytorch/SequentialImpl.java | 408 +- .../pytorch/SequentialImplCloneable.java | 14 +- .../pytorch/SequentialImplModuleHolder.java | 89 - .../bytedeco/pytorch/SequentialSampler.java | 4 +- .../pytorch/SerializationStorageContext.java | 4 +- .../org/bytedeco/pytorch/ShapeSymbol.java | 11 +- .../bytedeco/pytorch/ShapeSymbolVector.java | 6 +- .../pytorch/ShapeSymbolVectorOptional.java | 5 +- .../pytorch/SharedAnyModuleVector.java | 16 +- ...Vector.java => SharedClassTypeVector.java} | 34 +- ...ctor.java => SharedFunctionPreVector.java} | 34 +- .../bytedeco/pytorch/SharedModuleVector.java | 16 +- .../bytedeco/pytorch/SharedParserData.java | 4 +- ...tor.java => SharedSugaredValueVector.java} | 34 +- .../java/org/bytedeco/pytorch/SharedType.java | 4 +- .../org/bytedeco/pytorch/ShortArrayRef.java | 21 +- .../gen/java/org/bytedeco/pytorch/SiLU.java | 33 - .../java/org/bytedeco/pytorch/SiLUImpl.java | 4 +- .../bytedeco/pytorch/SiLUImplCloneable.java | 14 +- .../pytorch/SiLUImplModuleHolder.java | 79 - .../java/org/bytedeco/pytorch/Sigmoid.java | 33 - .../org/bytedeco/pytorch/SigmoidImpl.java | 4 +- .../pytorch/SigmoidImplCloneable.java | 14 +- .../pytorch/SigmoidImplModuleHolder.java | 79 - .../java/org/bytedeco/pytorch/SimpleSelf.java | 12 +- .../org/bytedeco/pytorch/SimpleValue.java | 25 +- .../bytedeco/pytorch/SingletonTypePtr.java | 4 +- .../org/bytedeco/pytorch/SizeTArrayRef.java | 26 +- .../pytorch/SizeTMatchedSchemaPair.java | 40 + .../org/bytedeco/pytorch/SizeTOptional.java | 5 +- .../org/bytedeco/pytorch/SizeTVector.java | 6 +- .../bytedeco/pytorch/SizeTVectorOptional.java | 5 +- .../org/bytedeco/pytorch/SizesAndStrides.java | 99 + .../gen/java/org/bytedeco/pytorch/Slice.java | 4 +- .../java/org/bytedeco/pytorch/SliceExpr.java | 10 +- .../java/org/bytedeco/pytorch/SliceValue.java | 4 +- .../java/org/bytedeco/pytorch/SlotCursor.java | 8 +- .../org/bytedeco/pytorch/SmallNodeVector.java | 51 + .../org/bytedeco/pytorch/SmoothL1Loss.java | 34 - .../bytedeco/pytorch/SmoothL1LossImpl.java | 8 +- .../pytorch/SmoothL1LossImplCloneable.java | 14 +- .../pytorch/SmoothL1LossImplModuleHolder.java | 79 - .../bytedeco/pytorch/SmoothL1LossOptions.java | 6 +- .../org/bytedeco/pytorch/SoftMarginLoss.java | 34 - .../bytedeco/pytorch/SoftMarginLossImpl.java | 8 +- .../pytorch/SoftMarginLossImplCloneable.java | 14 +- .../SoftMarginLossImplModuleHolder.java | 79 - .../pytorch/SoftMarginLossOptions.java | 6 +- .../java/org/bytedeco/pytorch/Softmax.java | 34 - .../java/org/bytedeco/pytorch/Softmax2d.java | 33 - .../org/bytedeco/pytorch/Softmax2dImpl.java | 4 +- .../pytorch/Softmax2dImplCloneable.java | 14 +- .../pytorch/Softmax2dImplModuleHolder.java | 79 - .../bytedeco/pytorch/SoftmaxFuncOptions.java | 4 +- .../org/bytedeco/pytorch/SoftmaxImpl.java | 8 +- .../pytorch/SoftmaxImplCloneable.java | 14 +- .../pytorch/SoftmaxImplModuleHolder.java | 79 - .../org/bytedeco/pytorch/SoftmaxOptions.java | 4 +- .../java/org/bytedeco/pytorch/Softmin.java | 34 - .../bytedeco/pytorch/SoftminFuncOptions.java | 4 +- .../org/bytedeco/pytorch/SoftminImpl.java | 8 +- .../pytorch/SoftminImplCloneable.java | 14 +- .../pytorch/SoftminImplModuleHolder.java | 79 - .../org/bytedeco/pytorch/SoftminOptions.java | 4 +- .../java/org/bytedeco/pytorch/Softplus.java | 34 - .../org/bytedeco/pytorch/SoftplusImpl.java | 8 +- .../pytorch/SoftplusImplCloneable.java | 14 +- .../pytorch/SoftplusImplModuleHolder.java | 79 - .../org/bytedeco/pytorch/SoftplusOptions.java | 4 +- .../java/org/bytedeco/pytorch/Softshrink.java | 34 - .../org/bytedeco/pytorch/SoftshrinkImpl.java | 8 +- .../pytorch/SoftshrinkImplCloneable.java | 14 +- .../pytorch/SoftshrinkImplModuleHolder.java | 79 - .../bytedeco/pytorch/SoftshrinkOptions.java | 4 +- .../java/org/bytedeco/pytorch/Softsign.java | 33 - .../org/bytedeco/pytorch/SoftsignImpl.java | 4 +- .../pytorch/SoftsignImplCloneable.java | 14 +- .../pytorch/SoftsignImplModuleHolder.java | 79 - .../gen/java/org/bytedeco/pytorch/Source.java | 4 +- .../org/bytedeco/pytorch/SourceLocation.java | 4 +- .../org/bytedeco/pytorch/SourceRange.java | 6 +- .../pytorch/SourceRangeDeserializer.java | 39 - .../bytedeco/pytorch/SourceRangeHasher.java | 4 +- .../bytedeco/pytorch/SourceRangeOptional.java | 5 +- .../pytorch/SourceRangeSerializer.java | 24 - .../pytorch/SourceRangeUnpickler.java | 12 +- .../bytedeco/pytorch/SpecialFormValue.java | 4 +- .../org/bytedeco/pytorch/SplitUntil32Bit.java | 67 + .../java/org/bytedeco/pytorch/StackEntry.java | 8 +- .../bytedeco/pytorch/StackEntryVector.java | 6 +- .../bytedeco/pytorch/StackEntryVector_V.java | 26 - .../java/org/bytedeco/pytorch/Starred.java | 10 +- .../gen/java/org/bytedeco/pytorch/StepLR.java | 4 +- .../gen/java/org/bytedeco/pytorch/Stmt.java | 10 +- .../java/org/bytedeco/pytorch/StmtList.java | 38 + .../bytedeco/pytorch/StmtListIterator.java | 35 + .../java/org/bytedeco/pytorch/Storage.java | 12 +- .../org/bytedeco/pytorch/StorageImpl.java | 6 +- .../org/bytedeco/pytorch/StorageImplPtr.java | 150 + .../org/bytedeco/pytorch/StorageType.java | 4 +- .../org/bytedeco/pytorch/StorageTypePtr.java | 4 +- .../gen/java/org/bytedeco/pytorch/Stream.java | 8 +- .../org/bytedeco/pytorch/StreamData3.java | 4 +- .../bytedeco/pytorch/StreamData3Holder.java | 30 - .../java/org/bytedeco/pytorch/StreamHash.java | 37 - .../org/bytedeco/pytorch/StreamObjType.java | 4 +- .../bytedeco/pytorch/StreamObjTypePtr.java | 4 +- .../org/bytedeco/pytorch/StreamOptional.java | 5 +- .../org/bytedeco/pytorch/StreamSampler.java | 4 +- .../java/org/bytedeco/pytorch/StreamSet.java | 46 + .../gen/java/org/bytedeco/pytorch/Stride.java | 6 +- .../org/bytedeco/pytorch/StrideArrayRef.java | 17 +- .../org/bytedeco/pytorch/StrideOptional.java | 5 +- .../bytedeco/pytorch/StrideVaryingShape.java | 6 +- .../org/bytedeco/pytorch/StrideVector.java | 6 +- .../pytorch/StrideVectorOptional.java | 5 +- .../bytedeco/pytorch/StringAnyModuleDict.java | 20 +- .../pytorch/StringAnyModuleDictItem.java | 4 +- .../StringAnyModuleDictItemVector.java | 47 + .../bytedeco/pytorch/StringAnyModulePair.java | 4 +- ...Vector.java => StringAnyModuleVector.java} | 28 +- .../org/bytedeco/pytorch/StringArrayRef.java | 33 +- .../org/bytedeco/pytorch/StringBoolMap.java | 4 +- .../org/bytedeco/pytorch/StringCordView.java | 4 +- .../bytedeco/pytorch/StringFunctionMap.java | 4 +- .../pytorch/StringGenericListDict.java | 42 +- .../org/bytedeco/pytorch/StringIValueMap.java | 4 +- .../org/bytedeco/pytorch/StringIntMap.java | 4 +- .../org/bytedeco/pytorch/StringLiteral.java | 10 +- .../org/bytedeco/pytorch/StringLongMap.java | 4 +- .../pytorch/StringLongStringMapMap.java | 4 +- .../bytedeco/pytorch/StringLongVector.java | 4 +- .../bytedeco/pytorch/StringModuleDict.java | 18 +- .../pytorch/StringModuleDictItem.java | 8 +- .../pytorch/StringModuleDictItemVector.java | 47 + .../bytedeco/pytorch/StringModulePair.java | 4 +- ...airVector.java => StringModuleVector.java} | 28 +- .../org/bytedeco/pytorch/StringOptional.java | 5 +- .../java/org/bytedeco/pytorch/StringSet.java | 5 +- .../pytorch/StringSharedModuleDict.java | 26 +- .../pytorch/StringSharedModuleDictItem.java | 18 +- .../StringSharedModuleDictItemVector.java | 47 + .../pytorch/StringSharedModulePair.java | 10 +- ...tor.java => StringSharedModuleVector.java} | 28 +- .../org/bytedeco/pytorch/StringSizeTMap.java | 4 +- .../org/bytedeco/pytorch/StringStringMap.java | 4 +- .../bytedeco/pytorch/StringTensorDict.java | 16 +- .../pytorch/StringTensorDictItem.java | 8 +- .../pytorch/StringTensorDictItemVector.java | 47 + .../org/bytedeco/pytorch/StringTensorMap.java | 6 +- .../bytedeco/pytorch/StringTensorPair.java | 4 +- ...airVector.java => StringTensorVector.java} | 30 +- .../java/org/bytedeco/pytorch/StringType.java | 4 +- .../org/bytedeco/pytorch/StringTypePtr.java | 4 +- .../org/bytedeco/pytorch/StringValueMap.java | 4 +- .../org/bytedeco/pytorch/StringVector.java | 6 +- .../pytorch/StringVectorOptional.java | 5 +- .../java/org/bytedeco/pytorch/StringView.java | 13 +- .../bytedeco/pytorch/StrongFunctionPtr.java | 32 - .../org/bytedeco/pytorch/StrongTypePtr.java | 4 +- .../java/org/bytedeco/pytorch/Subscript.java | 15 +- .../bytedeco/pytorch/SugaredEnumClass.java | 14 +- .../bytedeco/pytorch/SugaredTupleValue.java | 20 +- .../org/bytedeco/pytorch/SugaredValue.java | 29 +- .../java/org/bytedeco/pytorch/SymBool.java | 12 +- .../org/bytedeco/pytorch/SymDimVector.java | 23 +- .../pytorch/SymDimVectorOptional.java | 5 +- .../java/org/bytedeco/pytorch/SymFloat.java | 12 +- .../org/bytedeco/pytorch/SymFloatType.java | 4 +- .../gen/java/org/bytedeco/pytorch/SymInt.java | 20 +- .../{SymIntRef.java => SymIntArrayRef.java} | 43 +- .../pytorch/SymIntArrayRefOptional.java | 13 +- .../org/bytedeco/pytorch/SymIntOptional.java | 5 +- ...orBase.java => SymIntSmallVectorBase.java} | 12 +- .../pytorch/SymIntSmallVectorCommon.java | 53 + ...orImpl.java => SymIntSmallVectorImpl.java} | 33 +- .../java/org/bytedeco/pytorch/SymIntType.java | 4 +- .../org/bytedeco/pytorch/SymIntVector.java | 6 +- .../java/org/bytedeco/pytorch/SymNode.java | 150 + .../{SymNodeRef.java => SymNodeArrayRef.java} | 57 +- .../org/bytedeco/pytorch/SymNodeImpl.java | 64 +- .../gen/java/org/bytedeco/pytorch/Symbol.java | 4 +- .../org/bytedeco/pytorch/SymbolArrayRef.java | 17 +- .../java/org/bytedeco/pytorch/SymbolHash.java | 39 - .../java/org/bytedeco/pytorch/SymbolSet.java | 5 +- .../org/bytedeco/pytorch/SymbolVector.java | 6 +- .../org/bytedeco/pytorch/SymbolicShape.java | 14 +- .../bytedeco/pytorch/T_DataPtrSizeT_T.java | 34 + .../org/bytedeco/pytorch/T_DoubleLong_T.java | 36 + .../java/org/bytedeco/pytorch/T_IntInt_T.java | 36 + .../org/bytedeco/pytorch/T_LongLong_T.java | 36 + ...> T_PackedSequenceT_TensorTensor_T_T.java} | 16 +- ...ple.java => T_PackedSequenceTensor_T.java} | 14 +- .../org/bytedeco/pytorch/T_StringLong_T.java | 36 + ...eTTuple.java => T_StringSizeTSizeT_T.java} | 20 +- ...java => T_StringSizeTSizeT_TOptional.java} | 19 +- ...edTensorMaybeOwnedTensorMaybeOwned_T.java} | 20 +- ...T_TensorMaybeOwnedTensorMaybeOwned_T.java} | 18 +- .../pytorch/T_TensorT_TensorTensor_T_T.java | 36 + ...e.java => T_TensorTensorDoubleLong_T.java} | 24 +- ...va => T_TensorTensorLongLongTensor_T.java} | 26 +- ... => T_TensorTensorTensorTensorLong_T.java} | 26 +- ...nsorTensorTensorTensorTensorTensor_T.java} | 30 +- ...nsorTensorTensorTensorTensorTensor_T.java} | 28 +- ...> T_TensorTensorTensorTensorTensor_T.java} | 26 +- .../T_TensorTensorTensorTensorVector_T.java | 40 + ...java => T_TensorTensorTensorTensor_T.java} | 24 +- ...TensorTensorsLongLongLongLongTensor_T.java | 50 + ...Tuple.java => T_TensorTensorTensor_T.java} | 22 +- .../T_TensorTensorVectorTensorVector_T.java | 38 + .../pytorch/T_TensorTensorVector_T.java | 36 + ...TensorTuple.java => T_TensorTensor_T.java} | 20 +- .../pytorch/T_TensorTensor_TOptional.java | 35 + ...ensorVectorTensorVectorTensorVector_T.java | 42 + .../pytorch/T_TensorVectorTensor_T.java | 36 + .../org/bytedeco/pytorch/T_TypePtrLong_T.java | 36 + .../pytorch/T_TypePtrLong_TOptional.java | 35 + .../org/bytedeco/pytorch/TagArrayRef.java | 133 + .../org/bytedeco/pytorch/TaggedRange.java | 31 - .../gen/java/org/bytedeco/pytorch/Tanh.java | 33 - .../java/org/bytedeco/pytorch/TanhImpl.java | 4 +- .../bytedeco/pytorch/TanhImplCloneable.java | 14 +- .../pytorch/TanhImplModuleHolder.java | 79 - .../java/org/bytedeco/pytorch/Tanhshrink.java | 33 - .../org/bytedeco/pytorch/TanhshrinkImpl.java | 4 +- .../pytorch/TanhshrinkImplCloneable.java | 14 +- .../pytorch/TanhshrinkImplModuleHolder.java | 79 - .../gen/java/org/bytedeco/pytorch/Tensor.java | 447 +- .../java/org/bytedeco/pytorch/TensorArg.java | 4 +- .../bytedeco/pytorch/TensorArgArrayRef.java | 17 +- .../org/bytedeco/pytorch/TensorArrayRef.java | 23 +- .../pytorch/TensorArrayRefOptional.java | 35 + .../java/org/bytedeco/pytorch/TensorBase.java | 47 +- .../pytorch/TensorBaseMaybeOwned.java | 65 + .../org/bytedeco/pytorch/TensorCastValue.java | 11 +- .../org/bytedeco/pytorch/TensorDataset.java | 41 - .../org/bytedeco/pytorch/TensorDeque.java | 8 +- .../pytorch/TensorElementReference.java | 42 + .../org/bytedeco/pytorch/TensorExample.java | 4 +- .../pytorch/TensorExampleBatchDataset.java | 4 +- .../pytorch/TensorExampleDataset.java | 4 +- .../bytedeco/pytorch/TensorExampleVector.java | 6 +- .../org/bytedeco/pytorch/TensorGeometry.java | 16 +- .../bytedeco/pytorch/TensorGeometryArg.java | 4 +- .../java/org/bytedeco/pytorch/TensorImpl.java | 91 +- .../org/bytedeco/pytorch/TensorImplPtr.java | 150 + .../org/bytedeco/pytorch/TensorImplSet.java | 7 +- .../bytedeco/pytorch/TensorImplVector.java | 6 +- .../org/bytedeco/pytorch/TensorIndex.java | 4 +- .../bytedeco/pytorch/TensorIndexArrayRef.java | 19 +- .../bytedeco/pytorch/TensorIndexVector.java | 6 +- .../org/bytedeco/pytorch/TensorIterator.java | 87 + .../bytedeco/pytorch/TensorIteratorBase.java | 252 + .../pytorch/TensorIteratorConfig.java | 166 + .../java/org/bytedeco/pytorch/TensorList.java | 239 + .../bytedeco/pytorch/TensorListIterator.java | 84 + .../bytedeco/pytorch/TensorListOptional.java | 32 - .../org/bytedeco/pytorch/TensorMaker.java | 8 +- .../bytedeco/pytorch/TensorMaybeOwned.java | 4 +- .../java/org/bytedeco/pytorch/TensorName.java | 9 +- .../org/bytedeco/pytorch/TensorNames.java | 4 +- .../org/bytedeco/pytorch/TensorOptional.java | 5 +- .../pytorch/TensorOptionalArrayRef.java | 15 +- .../TensorOptionalElementReference.java | 42 + .../bytedeco/pytorch/TensorOptionalList.java | 239 + .../pytorch/TensorOptionalListIterator.java | 84 + .../pytorch/TensorOptionalVector.java | 6 +- .../org/bytedeco/pytorch/TensorOptions.java | 4 +- .../pytorch/TensorTensorOptional.java | 32 - .../TensorTensorTensorTensorVectorTuple.java | 38 - .../pytorch/TensorTensorTensorTupleTuple.java | 34 - .../TensorTensorVectorTensorVectorTuple.java | 36 - .../pytorch/TensorTensorVectorTuple.java | 34 - .../org/bytedeco/pytorch/TensorTuple.java | 32 - .../java/org/bytedeco/pytorch/TensorType.java | 66 +- .../org/bytedeco/pytorch/TensorVector.java | 10 +- .../pytorch/TensorVectorOptional.java | 13 +- .../pytorch/TensorVectorTensorTuple.java | 34 - ...orVectorTensorVectorTensorVectorTuple.java | 40 - .../java/org/bytedeco/pytorch/TernaryIf.java | 10 +- .../org/bytedeco/pytorch/ThreadIdGuard.java | 4 +- .../pytorch/ThreadLocalDebugInfo.java | 7 +- .../pytorch/ThreadLocalPythonObjects.java | 48 + .../bytedeco/pytorch/ThreadLocalState.java | 4 +- .../pytorch/ThreadLocalStateGuard.java | 4 +- .../pytorch/ThreadLocalStateOptional.java | 5 +- .../java/org/bytedeco/pytorch/Threshold.java | 34 - .../org/bytedeco/pytorch/ThresholdImpl.java | 8 +- .../pytorch/ThresholdImplCloneable.java | 14 +- .../pytorch/ThresholdImplModuleHolder.java | 79 - .../bytedeco/pytorch/ThresholdOptions.java | 4 +- .../gen/java/org/bytedeco/pytorch/Token.java | 4 +- .../java/org/bytedeco/pytorch/TokenTrie.java | 41 - .../pytorch/TorchDispatchModeTLS.java | 46 + .../bytedeco/pytorch/TraceableFunction.java | 4 +- .../org/bytedeco/pytorch/TracingState.java | 60 - .../org/bytedeco/pytorch/Transformer.java | 35 - .../bytedeco/pytorch/TransformerDecoder.java | 35 - .../pytorch/TransformerDecoderImpl.java | 21 +- .../TransformerDecoderImplCloneable.java | 14 +- .../TransformerDecoderImplModuleHolder.java | 79 - .../pytorch/TransformerDecoderLayer.java | 34 - .../pytorch/TransformerDecoderLayerImpl.java | 27 +- .../TransformerDecoderLayerImplCloneable.java | 14 +- ...ansformerDecoderLayerImplModuleHolder.java | 79 - .../TransformerDecoderLayerOptions.java | 6 +- .../pytorch/TransformerDecoderOptions.java | 11 +- .../bytedeco/pytorch/TransformerEncoder.java | 35 - .../pytorch/TransformerEncoderImpl.java | 17 +- .../TransformerEncoderImplCloneable.java | 14 +- .../TransformerEncoderImplModuleHolder.java | 79 - .../pytorch/TransformerEncoderLayer.java | 34 - .../pytorch/TransformerEncoderLayerImpl.java | 20 +- .../TransformerEncoderLayerImplCloneable.java | 14 +- ...ansformerEncoderLayerImplModuleHolder.java | 79 - .../TransformerEncoderLayerOptions.java | 6 +- .../pytorch/TransformerEncoderOptions.java | 11 +- .../org/bytedeco/pytorch/TransformerImpl.java | 18 +- .../pytorch/TransformerImplCloneable.java | 14 +- .../pytorch/TransformerImplModuleHolder.java | 79 - .../bytedeco/pytorch/TransformerOptions.java | 6 +- .../gen/java/org/bytedeco/pytorch/Tree.java | 8 +- .../java/org/bytedeco/pytorch/TreeList.java | 51 + .../java/org/bytedeco/pytorch/TreeRef.java | 150 + ...ckler.java => TreeRefSmallVectorBase.java} | 14 +- .../pytorch/TreeRefSmallVectorCommon.java | 49 + .../pytorch/TreeRefSmallVectorImpl.java | 71 + .../bytedeco/pytorch/TreeRefStringMap.java | 49 + .../java/org/bytedeco/pytorch/TreeView.java | 16 +- .../bytedeco/pytorch/TripletMarginLoss.java | 34 - .../pytorch/TripletMarginLossImpl.java | 8 +- .../TripletMarginLossImplCloneable.java | 14 +- .../TripletMarginLossImplModuleHolder.java | 79 - .../pytorch/TripletMarginLossOptions.java | 6 +- .../TripletMarginWithDistanceLoss.java | 36 - .../TripletMarginWithDistanceLossImpl.java | 8 +- ...etMarginWithDistanceLossImplCloneable.java | 14 +- ...arginWithDistanceLossImplModuleHolder.java | 79 - .../TripletMarginWithDistanceLossOptions.java | 6 +- .../gen/java/org/bytedeco/pytorch/Tuple.java | 58 +- .../org/bytedeco/pytorch/TupleElements.java | 108 + .../org/bytedeco/pytorch/TupleLiteral.java | 14 +- .../java/org/bytedeco/pytorch/TuplePtr.java | 150 + .../java/org/bytedeco/pytorch/TupleType.java | 4 +- .../gen/java/org/bytedeco/pytorch/Type.java | 18 +- .../org/bytedeco/pytorch/TypeArrayRef.java | 17 +- .../java/org/bytedeco/pytorch/TypeEnv.java | 4 +- .../java/org/bytedeco/pytorch/TypeError.java | 4 +- .../org/bytedeco/pytorch/TypeIdentifier.java | 12 +- .../pytorch/TypeIdentifierIdWrapper.java | 41 - .../java/org/bytedeco/pytorch/TypeMeta.java | 7 +- .../org/bytedeco/pytorch/TypeMetaData.java | 4 +- .../bytedeco/pytorch/TypeMetaOptional.java | 5 +- .../org/bytedeco/pytorch/TypeNameUniquer.java | 47 - .../java/org/bytedeco/pytorch/TypeParser.java | 32 - .../org/bytedeco/pytorch/TypePtrOptional.java | 5 +- .../java/org/bytedeco/pytorch/TypeVector.java | 6 +- .../java/org/bytedeco/pytorch/UnaryOp.java | 10 +- .../bytedeco/pytorch/UndefinedTensorImpl.java | 4 +- .../java/org/bytedeco/pytorch/Unflatten.java | 34 - .../org/bytedeco/pytorch/UnflattenImpl.java | 12 +- .../pytorch/UnflattenImplCloneable.java | 14 +- .../pytorch/UnflattenImplModuleHolder.java | 79 - .../bytedeco/pytorch/UnflattenOptions.java | 4 +- .../gen/java/org/bytedeco/pytorch/Unfold.java | 34 - .../java/org/bytedeco/pytorch/UnfoldImpl.java | 8 +- .../bytedeco/pytorch/UnfoldImplCloneable.java | 14 +- .../pytorch/UnfoldImplModuleHolder.java | 79 - .../org/bytedeco/pytorch/UnfoldOptions.java | 4 +- .../java/org/bytedeco/pytorch/UnionType.java | 4 +- .../org/bytedeco/pytorch/UniqueVoidPtr.java | 84 + .../java/org/bytedeco/pytorch/Unpickler.java | 4 +- .../java/org/bytedeco/pytorch/Upsample.java | 34 - .../org/bytedeco/pytorch/UpsampleImpl.java | 8 +- .../pytorch/UpsampleImplCloneable.java | 14 +- .../pytorch/UpsampleImplModuleHolder.java | 79 - ...upsample_mode_t.java => UpsampleMode.java} | 42 +- .../org/bytedeco/pytorch/UpsampleOptions.java | 6 +- .../gen/java/org/bytedeco/pytorch/Use.java | 4 +- .../java/org/bytedeco/pytorch/V_JitNode.java | 26 - .../gen/java/org/bytedeco/pytorch/Value.java | 4 +- .../org/bytedeco/pytorch/ValueArrayRef.java | 37 +- .../java/org/bytedeco/pytorch/ValueError.java | 4 +- .../org/bytedeco/pytorch/ValueOptional.java | 5 +- .../org/bytedeco/pytorch/ValueValueMap.java | 4 +- .../org/bytedeco/pytorch/ValueVector.java | 6 +- .../java/org/bytedeco/pytorch/ValueWrap.java | 4 +- .../gen/java/org/bytedeco/pytorch/Var.java | 10 +- .../java/org/bytedeco/pytorch/VarMaybe.java | 10 +- .../java/org/bytedeco/pytorch/VarType.java | 32 - .../org/bytedeco/pytorch/VariableInfo.java | 4 +- .../org/bytedeco/pytorch/VariableVersion.java | 6 +- .../java/org/bytedeco/pytorch/WarnAlways.java | 4 +- .../java/org/bytedeco/pytorch/Warning.java | 18 +- .../org/bytedeco/pytorch/WarningHandler.java | 4 +- .../bytedeco/pytorch/WarningHandlerGuard.java | 4 +- .../java/org/bytedeco/pytorch/WeakIValue.java | 4 +- .../pytorch/WeakOrStrongCompilationUnit.java | 4 +- .../bytedeco/pytorch/WeakOrStrongTypePtr.java | 4 +- .../org/bytedeco/pytorch/WeakStorage.java | 104 + .../bytedeco/pytorch/WeakStorageVector.java | 47 + .../pytorch/WeakStorageVectorOptional.java | 35 + .../org/bytedeco/pytorch/WeakTypePtr.java | 4 +- .../gen/java/org/bytedeco/pytorch/While.java | 15 +- .../gen/java/org/bytedeco/pytorch/With.java | 19 +- .../bytedeco/pytorch/WithCurrentScope.java | 32 - .../org/bytedeco/pytorch/WithInsertPoint.java | 35 - .../java/org/bytedeco/pytorch/WithItem.java | 10 +- .../org/bytedeco/pytorch/WithItemList.java | 38 + .../pytorch/WithItemListIterator.java | 35 + .../pytorch/WithNestedTracingFrame.java | 36 - .../org/bytedeco/pytorch/WorkerException.java | 37 - .../bytedeco/pytorch/WriteableTensorData.java | 4 +- .../java/org/bytedeco/pytorch/ZeroPad2d.java | 34 - .../org/bytedeco/pytorch/ZeroPad2dImpl.java | 8 +- .../pytorch/ZeroPad2dImplCloneable.java | 14 +- .../pytorch/ZeroPad2dImplModuleHolder.java | 79 - .../bytedeco/pytorch/ZeroPad2dOptions.java | 4 +- .../pytorch/_CopyBytesFunctionRegisterer.java | 61 - .../bytedeco/pytorch/_compute_enum_name.java | 77 - .../org/bytedeco/pytorch/_str_wrapper.java | 41 - .../gen/java/org/bytedeco/pytorch/all_of.java | 25 - .../bytedeco/pytorch/attribute_iterator.java | 9 +- .../org/bytedeco/pytorch/attribute_list.java | 4 +- .../gen/java/org/bytedeco/pytorch/bitset.java | 70 + .../org/bytedeco/pytorch/buffer_iterator.java | 9 +- .../org/bytedeco/pytorch/buffer_list.java | 4 +- .../{PythonPrintImpl.java => class_.java} | 12 +- .../{ObserverContext.java => crc64_t.java} | 17 +- .../pytorch/cuda/ActivationDescriptor.java | 43 + .../pytorch/cuda/CTCLossDescriptor.java | 49 + .../org/bytedeco/pytorch/cuda/CUDAGuard.java | 76 + .../pytorch/cuda/CUDAKernelLaunchInfo.java | 62 + .../cuda/CUDAKernelLaunchInfoVector.java | 93 + .../cuda/CUDAKernelLaunchRegistry.java | 81 + .../pytorch/cuda/CUDAMultiStreamGuard.java | 43 + .../org/bytedeco/pytorch/cuda/CUDAStream.java | 112 + .../pytorch/cuda/CUDAStreamArrayRef.java | 147 + .../cuda/CUDAStreamCaptureModeGuard.java | 35 + .../pytorch/cuda/CUDAStreamGuard.java | 79 + .../pytorch/cuda/CUDAStreamOptional.java | 38 + .../org/bytedeco/pytorch/cuda/Constant.java | 34 + .../pytorch/cuda/ConvolutionDescriptor.java | 45 + .../org/bytedeco/pytorch/cuda/CuDNNError.java | 30 + .../pytorch/cuda/DeviceAssertionData.java | 63 + .../pytorch/cuda/DeviceAssertionsData.java | 50 + .../cuda/DeviceAssertionsDataVector.java | 93 + ...aVectorCUDAKernelLaunchInfoVectorPair.java | 43 + .../pytorch/cuda/DropoutDescriptor.java | 53 + .../pytorch/cuda/FilterDescriptor.java | 49 + .../pytorch/cuda/OptionalCUDAGuard.java | 90 + .../pytorch/cuda/OptionalCUDAStreamGuard.java | 86 + .../bytedeco/pytorch/cuda/RNNDescriptor.java | 46 + .../cuda/SpatialTransformerDescriptor.java | 45 + .../pytorch/cuda/TensorDescriptor.java | 77 + .../pytorch/fibonacci_hash_policy.java | 47 + .../org/bytedeco/pytorch/getTypePtr_.java | 38 - .../org/bytedeco/pytorch/global/torch.java | 87404 +++++++--------- .../bytedeco/pytorch/global/torch_cuda.java | 777 + .../org/bytedeco/pytorch/graph_node_list.java | 4 +- .../pytorch/graph_node_list_iterator.java | 4 +- .../ivalue_to_const_ref_overload_return.java | 40 - .../gen/java/org/bytedeco/pytorch/kArea.java | 4 +- .../java/org/bytedeco/pytorch/kBatchMean.java | 4 +- .../java/org/bytedeco/pytorch/kBicubic.java | 4 +- .../java/org/bytedeco/pytorch/kBilinear.java | 4 +- .../java/org/bytedeco/pytorch/kBorder.java | 4 +- .../java/org/bytedeco/pytorch/kCircular.java | 4 +- .../java/org/bytedeco/pytorch/kConstant.java | 4 +- .../java/org/bytedeco/pytorch/kConv1D.java | 4 +- .../java/org/bytedeco/pytorch/kConv2D.java | 4 +- .../java/org/bytedeco/pytorch/kConv3D.java | 4 +- .../bytedeco/pytorch/kConvTranspose1D.java | 4 +- .../bytedeco/pytorch/kConvTranspose2D.java | 4 +- .../bytedeco/pytorch/kConvTranspose3D.java | 4 +- .../gen/java/org/bytedeco/pytorch/kFanIn.java | 4 +- .../java/org/bytedeco/pytorch/kFanOut.java | 4 +- .../gen/java/org/bytedeco/pytorch/kGELU.java | 4 +- .../gen/java/org/bytedeco/pytorch/kGRU.java | 4 +- .../gen/java/org/bytedeco/pytorch/kLSTM.java | 4 +- .../java/org/bytedeco/pytorch/kLeakyReLU.java | 4 +- .../java/org/bytedeco/pytorch/kLinear.java | 4 +- .../gen/java/org/bytedeco/pytorch/kMax.java | 4 +- .../gen/java/org/bytedeco/pytorch/kMean.java | 4 +- .../gen/java/org/bytedeco/pytorch/kMish.java | 4 +- .../java/org/bytedeco/pytorch/kNearest.java | 4 +- .../org/bytedeco/pytorch/kNearestExact.java | 4 +- .../gen/java/org/bytedeco/pytorch/kNone.java | 4 +- .../java/org/bytedeco/pytorch/kRNN_RELU.java | 4 +- .../java/org/bytedeco/pytorch/kRNN_TANH.java | 4 +- .../gen/java/org/bytedeco/pytorch/kReLU.java | 4 +- .../java/org/bytedeco/pytorch/kReflect.java | 4 +- .../org/bytedeco/pytorch/kReflection.java | 4 +- .../java/org/bytedeco/pytorch/kReplicate.java | 4 +- .../gen/java/org/bytedeco/pytorch/kSame.java | 4 +- .../gen/java/org/bytedeco/pytorch/kSiLU.java | 4 +- .../java/org/bytedeco/pytorch/kSigmoid.java | 4 +- .../gen/java/org/bytedeco/pytorch/kSum.java | 4 +- .../gen/java/org/bytedeco/pytorch/kTanh.java | 4 +- .../java/org/bytedeco/pytorch/kTrilinear.java | 4 +- .../gen/java/org/bytedeco/pytorch/kValid.java | 4 +- .../gen/java/org/bytedeco/pytorch/kZeros.java | 4 +- .../org/bytedeco/pytorch/module_iterator.java | 9 +- .../org/bytedeco/pytorch/module_list.java | 4 +- .../bytedeco/pytorch/mt19937_data_pod.java | 121 + .../org/bytedeco/pytorch/mt19937_engine.java | 43 + .../{Object.java => mz_zip_archive.java} | 12 +- .../pytorch/named_attribute_iterator.java | 9 +- .../pytorch/named_attribute_list.java | 4 +- .../pytorch/named_buffer_iterator.java | 9 +- .../bytedeco/pytorch/named_buffer_list.java | 4 +- .../pytorch/named_module_iterator.java | 9 +- .../bytedeco/pytorch/named_module_list.java | 4 +- .../pytorch/named_parameter_iterator.java | 9 +- .../pytorch/named_parameter_list.java | 4 +- .../gen/java/org/bytedeco/pytorch/pack.java | 4 +- .../bytedeco/pytorch/parameter_iterator.java | 9 +- .../org/bytedeco/pytorch/parameter_list.java | 4 +- .../pytorch/power_of_two_hash_policy.java | 46 + .../org/bytedeco/pytorch/pretty_tree.java | 22 +- .../pytorch/prime_number_hash_policy.java | 244 + .../bytedeco/pytorch/propagation_error.java | 25 - .../gen/java/org/bytedeco/pytorch/qint32.java | 4 +- .../gen/java/org/bytedeco/pytorch/qint8.java | 4 +- .../java/org/bytedeco/pytorch/quint2x4.java | 4 +- .../java/org/bytedeco/pytorch/quint4x2.java | 4 +- .../gen/java/org/bytedeco/pytorch/quint8.java | 4 +- .../pytorch/static_cast_with_inter_type.java | 47 - .../pytorch/transformer_activation_t.java | 39 - .../java/org/bytedeco/pytorch/type_index.java | 38 + .../org/bytedeco/pytorch/AbstractTensor.java | 5 +- .../pytorch/TransformerActivation.java | 63 + .../pytorch/functions/ArchiveWriter.java | 30 + .../bytedeco/pytorch/functions/DDPLogger.java | 31 + .../pytorch/functions/DistanceFunction.java | 31 + .../org/bytedeco/pytorch/functions/Func.java | 28 + .../functions/GraphFunctionCreator.java | 30 + .../pytorch/functions/IValueSupplier.java | 30 + .../functions/IValueVectorConsumer.java | 30 + .../functions/JitModuleApplyFunction.java | 30 + .../pytorch/functions/LossClosure.java | 29 + .../functions/ModuleApplyFunction.java | 30 + .../functions/NamedModuleApplyFunction.java | 29 + .../NamedSharedModuleApplyFunction.java | 29 + .../pytorch/functions/PickleWriter.java | 31 + .../pytorch/functions/PointerConsumer.java | 28 + .../bytedeco/pytorch/functions/Reader.java | 29 + .../functions/SharedModuleApplyFunction.java | 32 + .../pytorch/functions/SizeTSupplier.java | 29 + .../pytorch/functions/StringConsumer.java | 30 + .../pytorch/functions/StringSupplier.java | 29 + .../pytorch/functions/TensorIdGetter.java | 31 + .../pytorch/functions/TensorMapper.java | 33 + .../pytorch/functions/TensorTensorHook.java | 31 + .../pytorch/functions/TypeMapper.java | 29 + .../pytorch/functions/TypeRenamer.java | 31 + .../pytorch/functions/ValueMapper.java | 29 + .../pytorch/functions/VoidTensorHook.java | 31 + .../org/bytedeco/pytorch/presets/torch.java | 4958 +- .../bytedeco/pytorch/presets/torch_cuda.java | 153 + pytorch/src/main/java9/module-info.java | 2 + .../pytorch/presets/torch_cuda_include.h | 26 + .../bytedeco/pytorch/presets/torch_include.h | 1415 + 1775 files changed, 67810 insertions(+), 76066 deletions(-) create mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/ActivityTraceWrapper.java create mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/ActivityTypeSet.java delete mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/AdaptiveAvgPool1d.java delete mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/AdaptiveAvgPool1dImplModuleHolder.java delete mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/AdaptiveAvgPool2d.java delete mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/AdaptiveAvgPool2dImplModuleHolder.java delete mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/AdaptiveAvgPool3d.java delete mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/AdaptiveAvgPool3dImplModuleHolder.java delete mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/AdaptiveLogSoftmaxWithLoss.java delete mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/AdaptiveLogSoftmaxWithLossImplModuleHolder.java delete mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/AdaptiveMaxPool1d.java delete mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/AdaptiveMaxPool1dImplModuleHolder.java delete mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/AdaptiveMaxPool2d.java delete mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/AdaptiveMaxPool2dImplModuleHolder.java delete mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/AdaptiveMaxPool3d.java delete mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/AdaptiveMaxPool3dImplModuleHolder.java delete mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/AlphaDropout.java delete mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/AlphaDropoutImplModuleHolder.java delete mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/AnnotatedSchema.java create mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/AnyValue.java create mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/ArgumentArrayRef.java create mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/ArgumentDef.java create mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/ArgumentDefArrayRef.java delete mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/ArgumentVector.java create mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/AssignList.java create mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/AssignListIterator.java create mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/AssignListMaybe.java create mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/AttributeList.java create mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/AttributeListIterator.java create mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/AutoDispatchBelowADInplaceOrView.java create mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/AutoDispatchBelowAutograd.java create mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/AutoDispatchSkipFunctionalize.java create mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/AutoNonVariableTypeMode.java create mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/AutogradState.java delete mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/AvgPool1d.java delete mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/AvgPool1dImplModuleHolder.java delete mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/AvgPool2d.java delete mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/AvgPool2dImplModuleHolder.java delete mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/AvgPool3d.java delete mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/AvgPool3dImplModuleHolder.java create mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/AwaitPtr.java delete mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/BCELoss.java delete mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/BCELossImplModuleHolder.java delete mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/BCEWithLogitsLoss.java delete mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/BCEWithLogitsLossImplModuleHolder.java delete mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/BatchNorm1d.java delete mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/BatchNorm1dImplModuleHolder.java delete mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/BatchNorm2d.java delete mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/BatchNorm2dImplModuleHolder.java delete mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/BatchNorm3d.java delete mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/BatchNorm3dImplModuleHolder.java delete mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/Bilinear.java delete mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/BilinearImplModuleHolder.java delete mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/BlockVector.java create mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/BooleanElementReference.java create mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/BooleanList.java create mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/BooleanListIterator.java delete mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/BytecodeEmitMode.java delete mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/BytecodeEmitModeGuard.java delete mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/CELU.java delete mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/CELUImplModuleHolder.java create mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/CPUGeneratorImpl.java delete mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/CTCLoss.java delete mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/CTCLossImplModuleHolder.java create mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/CUDAHooksArgs.java create mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/CUDAHooksInterface.java rename pytorch/src/gen/java/org/bytedeco/pytorch/{_object.java => CUevent_st.java} (69%) delete mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/Capsule.java delete mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/CompleteArgumentInfo.java delete mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/CompleteArgumentInfoPOD.java delete mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/CompleteArgumentSpec.java delete mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/ComplexHolder.java delete mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/ConstantPad1d.java delete mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/ConstantPad1dImplModuleHolder.java delete mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/ConstantPad2d.java delete mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/ConstantPad2dImplModuleHolder.java delete mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/ConstantPad3d.java delete mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/ConstantPad3dImplModuleHolder.java create mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/ConstantStringPtr.java delete mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/Conv1d.java delete mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/Conv1dImplModuleHolder.java rename pytorch/src/gen/java/org/bytedeco/pytorch/{conv_padding_t1.java => Conv1dPadding.java} (62%) delete mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/Conv2d.java delete mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/Conv2dImplModuleHolder.java rename pytorch/src/gen/java/org/bytedeco/pytorch/{conv_padding_t2.java => Conv2dPadding.java} (62%) delete mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/Conv3d.java delete mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/Conv3dImplModuleHolder.java rename pytorch/src/gen/java/org/bytedeco/pytorch/{conv_padding_t3.java => Conv3dPadding.java} (62%) rename pytorch/src/gen/java/org/bytedeco/pytorch/{conv_padding_mode_t.java => ConvPaddingMode.java} (52%) delete mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/ConvTranspose1d.java delete mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/ConvTranspose1dImplModuleHolder.java delete mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/ConvTranspose2d.java delete mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/ConvTranspose2dImplModuleHolder.java delete mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/ConvTranspose3d.java delete mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/ConvTranspose3dImplModuleHolder.java delete mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/CosineEmbeddingLoss.java delete mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/CosineEmbeddingLossImplModuleHolder.java delete mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/CosineSimilarity.java delete mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/CosineSimilarityImplModuleHolder.java create mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/CppFunction.java delete mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/CrossEntropyLoss.java delete mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/CrossEntropyLossImplModuleHolder.java delete mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/CrossMapLRN2d.java delete mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/CrossMapLRN2dImplModuleHolder.java rename pytorch/src/gen/java/org/bytedeco/pytorch/{warn_fn_type.java => DeleterFnPtr.java} (66%) delete mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/DeviceGuard.java create mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/DeviceGuardImplInterface.java create mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/DeviceGuardImplRegistrar.java delete mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/DeviceHash.java delete mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/DeviceTypeHash.java delete mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/DictKeyEqualTo.java delete mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/DictKeyHash.java create mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/DisablePythonDispatcher.java create mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/DisabledStr.java delete mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/DispatchTraceNestingGuard.java rename pytorch/src/gen/java/org/bytedeco/pytorch/{any_of.java => DontIncreaseRefcount.java} (56%) create mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/DoubleComplex.java rename pytorch/src/gen/java/org/bytedeco/pytorch/{DoubleComplexrrayRef.java => DoubleComplexArrayRef.java} (59%) create mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/DoubleComplexElementReference.java create mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/DoubleComplexList.java create mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/DoubleComplexListIterator.java create mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/DoubleElementReference.java create mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/DoubleList.java create mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/DoubleListIterator.java delete mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/Dropout.java delete mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/Dropout2d.java delete mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/Dropout2dImplModuleHolder.java delete mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/Dropout3d.java delete mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/Dropout3dImplModuleHolder.java delete mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/DropoutImplModuleHolder.java delete mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/ELU.java delete mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/ELUImplModuleHolder.java delete mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/Embedding.java delete mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/EmbeddingBag.java delete mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/EmbeddingBagImplModuleHolder.java delete mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/EmbeddingImplModuleHolder.java create mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/EnabledStr.java create mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/EnumHolderPtr.java delete mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/EqualType.java delete mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/ExampleVectorIterator.java create mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/ExperimentalConfig.java create mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/ExprList.java create mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/ExprListIterator.java delete mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/FeatureAlphaDropout.java delete mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/FeatureAlphaDropoutImplModuleHolder.java create mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/FileLineFunc.java delete mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/Flatten.java delete mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/FlattenImplModuleHolder.java create mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/FloatComplex.java rename pytorch/src/gen/java/org/bytedeco/pytorch/{FloatComplexrrayRef.java => FloatComplexArrayRef.java} (60%) create mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/FloatOptional.java delete mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/Fold.java delete mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/FoldImplModuleHolder.java rename pytorch/src/gen/java/org/bytedeco/pytorch/{NodeGuard.java => ForceDispatchKeyGuard.java} (51%) delete mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/FractionalMaxPool2d.java delete mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/FractionalMaxPool2dImplModuleHolder.java delete mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/FractionalMaxPool3d.java delete mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/FractionalMaxPool3dImplModuleHolder.java delete mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/FullDataLoaderOptions.java create mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/FuncTorchTLSBase.java create mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/FunctionCrossMapLRN2d.java create mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/FunctionSchemaVector.java create mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/FuturePtr.java create mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/FuturePtrArrayRef.java create mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/FuturePtrElementReference.java create mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/FuturePtrList.java create mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/FuturePtrListIterator.java delete mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/GELU.java delete mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/GELUImplModuleHolder.java delete mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/GLU.java delete mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/GLUImplModuleHolder.java delete mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/GRU.java delete mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/GRUCell.java delete mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/GRUCellImplModuleHolder.java delete mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/GRUImplModuleHolder.java create mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/GeneratorImpl.java create mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/GeneratorImplPtr.java create mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/GenericElementReference.java create mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/GenericList.java create mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/GenericListIterator.java rename pytorch/src/gen/java/org/bytedeco/pytorch/{grid_sample_mode_t.java => GridSampleMode.java} (56%) rename pytorch/src/gen/java/org/bytedeco/pytorch/{grid_sample_padding_mode_t.java => GridSamplePaddingMode.java} (52%) delete mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/GroupNorm.java delete mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/GroupNormImplModuleHolder.java create mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/HIPHooksArgs.java create mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/HIPHooksInterface.java create mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/HalfComplex.java delete mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/Hardshrink.java delete mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/HardshrinkImplModuleHolder.java delete mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/Hardtanh.java delete mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/HardtanhImplModuleHolder.java delete mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/HashType.java create mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/HermeticPyObjectTLS.java delete mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/HingeEmbeddingLoss.java delete mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/HingeEmbeddingLossImplModuleHolder.java delete mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/HuberLoss.java delete mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/HuberLossImplModuleHolder.java delete mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/IRAttributeError.java create mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/IStreamAdapter.java create mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/IdentList.java create mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/IdentListIterator.java delete mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/Identity.java delete mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/IdentityImplModuleHolder.java create mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/IncludeDispatchKeyGuard.java delete mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/Indices.java delete mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/InputMetadata.java delete mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/InstanceNorm1d.java delete mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/InstanceNorm1dImplModuleHolder.java delete mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/InstanceNorm2d.java delete mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/InstanceNorm2dImplModuleHolder.java delete mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/InstanceNorm3d.java delete mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/InstanceNorm3dImplModuleHolder.java create mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/IntSizedSmallVectorBase.java rename pytorch/src/gen/java/org/bytedeco/pytorch/{interpolate_mode_t.java => InterpolateMode.java} (50%) delete mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/InterpreterContinuation.java delete mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/InterpreterState.java delete mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/IterableTree.java delete mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/KLDivLoss.java delete mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/KLDivLossImplModuleHolder.java rename pytorch/src/gen/java/org/bytedeco/pytorch/{kldiv_loss_reduction_t.java => KLDivLossReduction.java} (51%) delete mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/L1Loss.java delete mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/L1LossImplModuleHolder.java delete mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/LPPool1d.java delete mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/LPPool1dImplModuleHolder.java delete mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/LPPool2d.java delete mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/LPPool2dImplModuleHolder.java delete mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/LSTM.java delete mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/LSTMCell.java delete mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/LSTMCellImplModuleHolder.java delete mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/LSTMImplModuleHolder.java delete mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/LayerNorm.java delete mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/LayerNormImplModuleHolder.java delete mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/LeakyReLU.java delete mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/LeakyReLUImplModuleHolder.java create mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/Library.java delete mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/Linear.java delete mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/LinearImplModuleHolder.java delete mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/ListElementConstReferenceTraits.java delete mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/ListImpl.java create mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/LocalDispatchKeySet.java delete mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/LocalResponseNorm.java delete mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/LocalResponseNormImplModuleHolder.java delete mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/LogSigmoid.java delete mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/LogSigmoidImplModuleHolder.java delete mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/LogSoftmax.java delete mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/LogSoftmaxImplModuleHolder.java create mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/LongElementReference.java create mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/LongList.java create mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/LongListIterator.java rename pytorch/src/gen/java/org/bytedeco/pytorch/{SugaredValueArrayRef.java => LongOptionalArrayRef.java} (55%) rename pytorch/src/gen/java/org/bytedeco/pytorch/{SymSmallVectorBase.java => LongSmallVectorBase.java} (56%) create mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/LongSmallVectorCommon.java rename pytorch/src/gen/java/org/bytedeco/pytorch/{DimVectorImpl.java => LongSmallVectorImpl.java} (73%) create mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/LongVectorArrayRef.java rename pytorch/src/gen/java/org/bytedeco/pytorch/{loss_reduction_t.java => LossReduction.java} (55%) rename pytorch/src/gen/java/org/bytedeco/pytorch/{_Uninitialized.java => MPSHooksArgs.java} (61%) create mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/MPSHooksInterface.java delete mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/MSELoss.java delete mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/MSELossImplModuleHolder.java delete mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/MakeIndices.java delete mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/MarginRankingLoss.java delete mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/MarginRankingLossImplModuleHolder.java delete mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/MaxPool1d.java delete mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/MaxPool1dImplModuleHolder.java delete mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/MaxPool2d.java delete mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/MaxPool2dImplModuleHolder.java delete mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/MaxPool3d.java delete mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/MaxPool3dImplModuleHolder.java delete mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/MaxUnpool1d.java delete mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/MaxUnpool1dImplModuleHolder.java delete mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/MaxUnpool2d.java delete mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/MaxUnpool2dImplModuleHolder.java delete mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/MaxUnpool3d.java delete mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/MaxUnpool3dImplModuleHolder.java create mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/MaybeOwnedTraitsGenericImplTensor.java rename pytorch/src/gen/java/org/bytedeco/pytorch/{MaybeOwnedTraits.java => MaybeOwnedTraitsTensor.java} (74%) create mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/MetaBase.java delete mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/Mish.java delete mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/MishImplModuleHolder.java delete mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/ModuleDict.java delete mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/ModuleDictImplModuleHolder.java delete mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/ModuleHolder.java delete mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/ModuleList.java delete mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/ModuleListImplModuleHolder.java delete mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/MultiLabelMarginLoss.java delete mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/MultiLabelMarginLossImplModuleHolder.java delete mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/MultiLabelSoftMarginLoss.java delete mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/MultiLabelSoftMarginLossImplModuleHolder.java delete mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/MultiMarginLoss.java delete mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/MultiMarginLossImplModuleHolder.java delete mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/MultiheadAttention.java delete mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/MultiheadAttentionImplModuleHolder.java delete mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/NLLLoss.java delete mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/NLLLossImplModuleHolder.java delete mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/NamedAnyModule.java delete mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/NamedBufferPolicy.java rename pytorch/src/gen/java/org/bytedeco/pytorch/{NamedAttributePolicy.java => NamedIValuePolicy.java} (63%) rename pytorch/src/gen/java/org/bytedeco/pytorch/{NamedModulePolicy.java => NamedJitModulePolicy.java} (66%) rename pytorch/src/gen/java/org/bytedeco/pytorch/{NamedParameterPolicy.java => NamedTensorPolicy.java} (63%) delete mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/NoTracerDispatchMode.java delete mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/NoWarn.java rename pytorch/src/gen/java/org/bytedeco/pytorch/{IntFunctionPreHookMap.java => NodeIntMap.java} (64%) rename pytorch/src/gen/java/org/bytedeco/pytorch/{TokenTrieVector.java => NodeSet.java} (61%) rename pytorch/src/gen/java/org/bytedeco/pytorch/{Suspend.java => NodeSmallVectorBase.java} (53%) create mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/NodeSmallVectorCommon.java create mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/NodeSmallVectorImpl.java create mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/Nonlinearity.java delete mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/NonlinearityType.java create mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/ORTHooksArgs.java create mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/ORTHooksInterface.java delete mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/OpTableOffsetAndMask.java create mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/OpaqueOptionalTensorRef.java create mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/OperandInfo.java delete mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/OperatorNameView.java delete mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/OwnedSourceRange.java create mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/PODLocalDispatchKeySet.java delete mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/PReLU.java delete mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/PReLUImplModuleHolder.java rename pytorch/src/gen/java/org/bytedeco/pytorch/{pad_mode_t.java => PaddingMode.java} (54%) delete mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/PairwiseDistance.java delete mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/PairwiseDistanceImplModuleHolder.java create mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/ParamList.java create mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/ParamListIterator.java delete mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/ParameterDict.java delete mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/ParameterDictImplModuleHolder.java delete mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/ParameterList.java delete mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/ParameterListImplModuleHolder.java delete mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/PixelShuffle.java delete mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/PixelShuffleImplModuleHolder.java delete mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/PixelUnshuffle.java delete mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/PixelUnshuffleImplModuleHolder.java delete mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/PoissonNLLLoss.java delete mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/PoissonNLLLossImplModuleHolder.java delete mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/PrintDepsTable.java create mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/ProfilerConfig.java create mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/PropertyList.java create mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/PropertyListIterator.java create mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/PropertyListMaybe.java delete mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/PropertyPropBase.java create mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/PyInterpreter.java create mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/PyInterpreterVTable.java create mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/PyObjectHolderPtr.java create mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/PyTorchStreamReader.java create mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/PythonDispatcherTLS.java delete mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/PythonPrint.java create mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/PythonTorchFunctionTLS.java rename pytorch/src/gen/java/org/bytedeco/pytorch/{DeprecatedTypeProperties.java => QTensorImpl.java} (70%) create mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/QuantizerPtr.java delete mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/RNN.java rename pytorch/src/gen/java/org/bytedeco/pytorch/{rnn_options_base_mode_t.java => RNNBaseMode.java} (52%) delete mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/RNNCell.java delete mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/RNNCellImplModuleHolder.java delete mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/RNNImplModuleHolder.java rename pytorch/src/gen/java/org/bytedeco/pytorch/{rnn_nonlinearity_t.java => RNNNonlinearity.java} (56%) delete mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/RReLU.java delete mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/RReLUImplModuleHolder.java create mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/RRefInterfacePtr.java delete mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/ReLU.java delete mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/ReLU6.java delete mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/ReLU6ImplModuleHolder.java delete mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/ReLUImplModuleHolder.java create mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/ReadyQueue.java create mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/RecordFunctionHandleIntList.java create mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/RecordFunctionHandleIntPair.java delete mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/ReflectionPad1d.java delete mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/ReflectionPad1dImplModuleHolder.java delete mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/ReflectionPad2d.java delete mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/ReflectionPad2dImplModuleHolder.java delete mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/ReflectionPad3d.java delete mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/ReflectionPad3dImplModuleHolder.java delete mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/ReplicationPad1d.java delete mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/ReplicationPad1dImplModuleHolder.java delete mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/ReplicationPad2d.java delete mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/ReplicationPad2dImplModuleHolder.java delete mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/ReplicationPad3d.java delete mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/ReplicationPad3dImplModuleHolder.java rename pytorch/src/gen/java/org/bytedeco/pytorch/{RecursiveMethodCallError.java => Result.java} (60%) delete mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/SELU.java delete mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/SELUImplModuleHolder.java create mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/SafePyHandle.java create mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/SafePyObject.java delete mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/ScriptModuleSerializer.java create mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/ScriptTypeParser.java delete mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/Sequential.java delete mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/SequentialImplModuleHolder.java rename pytorch/src/gen/java/org/bytedeco/pytorch/{ClassTypeVector.java => SharedClassTypeVector.java} (64%) rename pytorch/src/gen/java/org/bytedeco/pytorch/{FunctionPreVector.java => SharedFunctionPreVector.java} (62%) rename pytorch/src/gen/java/org/bytedeco/pytorch/{SugaredValueVector.java => SharedSugaredValueVector.java} (63%) delete mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/SiLU.java delete mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/SiLUImplModuleHolder.java delete mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/Sigmoid.java delete mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/SigmoidImplModuleHolder.java create mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/SizeTMatchedSchemaPair.java create mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/SizesAndStrides.java create mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/SmallNodeVector.java delete mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/SmoothL1Loss.java delete mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/SmoothL1LossImplModuleHolder.java delete mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/SoftMarginLoss.java delete mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/SoftMarginLossImplModuleHolder.java delete mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/Softmax.java delete mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/Softmax2d.java delete mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/Softmax2dImplModuleHolder.java delete mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/SoftmaxImplModuleHolder.java delete mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/Softmin.java delete mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/SoftminImplModuleHolder.java delete mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/Softplus.java delete mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/SoftplusImplModuleHolder.java delete mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/Softshrink.java delete mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/SoftshrinkImplModuleHolder.java delete mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/Softsign.java delete mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/SoftsignImplModuleHolder.java delete mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/SourceRangeDeserializer.java delete mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/SourceRangeSerializer.java create mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/SplitUntil32Bit.java delete mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/StackEntryVector_V.java create mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/StmtList.java create mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/StmtListIterator.java create mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/StorageImplPtr.java delete mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/StreamData3Holder.java delete mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/StreamHash.java create mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/StreamSet.java create mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/StringAnyModuleDictItemVector.java rename pytorch/src/gen/java/org/bytedeco/pytorch/{StringAnyModulePairVector.java => StringAnyModuleVector.java} (54%) create mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/StringModuleDictItemVector.java rename pytorch/src/gen/java/org/bytedeco/pytorch/{StringModulePairVector.java => StringModuleVector.java} (55%) create mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/StringSharedModuleDictItemVector.java rename pytorch/src/gen/java/org/bytedeco/pytorch/{StringSharedModulePairVector.java => StringSharedModuleVector.java} (52%) create mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/StringTensorDictItemVector.java rename pytorch/src/gen/java/org/bytedeco/pytorch/{StringTensorPairVector.java => StringTensorVector.java} (51%) delete mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/StrongFunctionPtr.java rename pytorch/src/gen/java/org/bytedeco/pytorch/{SymIntRef.java => SymIntArrayRef.java} (70%) rename pytorch/src/gen/java/org/bytedeco/pytorch/{SmallVectorBase.java => SymIntSmallVectorBase.java} (72%) create mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/SymIntSmallVectorCommon.java rename pytorch/src/gen/java/org/bytedeco/pytorch/{SymDimVectorImpl.java => SymIntSmallVectorImpl.java} (62%) create mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/SymNode.java rename pytorch/src/gen/java/org/bytedeco/pytorch/{SymNodeRef.java => SymNodeArrayRef.java} (56%) delete mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/SymbolHash.java create mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/T_DataPtrSizeT_T.java create mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/T_DoubleLong_T.java create mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/T_IntInt_T.java create mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/T_LongLong_T.java rename pytorch/src/gen/java/org/bytedeco/pytorch/{PackedSequenceTensorTensorTupleTuple.java => T_PackedSequenceT_TensorTensor_T_T.java} (58%) rename pytorch/src/gen/java/org/bytedeco/pytorch/{PackedSequenceTensorTuple.java => T_PackedSequenceTensor_T.java} (65%) create mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/T_StringLong_T.java rename pytorch/src/gen/java/org/bytedeco/pytorch/{StringSizeTSizeTTuple.java => T_StringSizeTSizeT_T.java} (61%) rename pytorch/src/gen/java/org/bytedeco/pytorch/{StringSizeTSizeTTupleOptional.java => T_StringSizeTSizeT_TOptional.java} (51%) rename pytorch/src/gen/java/org/bytedeco/pytorch/{TensorMaybeOwnedTensorMaybeOwnedTensorMaybeOwnedTuple.java => T_TensorMaybeOwnedTensorMaybeOwnedTensorMaybeOwned_T.java} (60%) rename pytorch/src/gen/java/org/bytedeco/pytorch/{TensorMaybeOwnedTensorMaybeOwnedTuple.java => T_TensorMaybeOwnedTensorMaybeOwned_T.java} (62%) create mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/T_TensorT_TensorTensor_T_T.java rename pytorch/src/gen/java/org/bytedeco/pytorch/{TensorTensorDoubleLongTuple.java => T_TensorTensorDoubleLong_T.java} (54%) rename pytorch/src/gen/java/org/bytedeco/pytorch/{TensorTensorLongLongTensorTuple.java => T_TensorTensorLongLongTensor_T.java} (54%) rename pytorch/src/gen/java/org/bytedeco/pytorch/{TensorTensorTensorTensorLongTuple.java => T_TensorTensorTensorTensorLong_T.java} (53%) rename pytorch/src/gen/java/org/bytedeco/pytorch/{TensorTensorTensorTensorTensorTensorTensorTuple.java => T_TensorTensorTensorTensorTensorTensorTensor_T.java} (50%) rename pytorch/src/gen/java/org/bytedeco/pytorch/{TensorTensorTensorTensorTensorTensorTuple.java => T_TensorTensorTensorTensorTensorTensor_T.java} (51%) rename pytorch/src/gen/java/org/bytedeco/pytorch/{TensorTensorTensorTensorTensorTuple.java => T_TensorTensorTensorTensorTensor_T.java} (52%) create mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/T_TensorTensorTensorTensorVector_T.java rename pytorch/src/gen/java/org/bytedeco/pytorch/{TensorTensorTensorTensorTuple.java => T_TensorTensorTensorTensor_T.java} (54%) create mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/T_TensorTensorTensorTensorsLongLongLongLongTensor_T.java rename pytorch/src/gen/java/org/bytedeco/pytorch/{TensorTensorTensorTuple.java => T_TensorTensorTensor_T.java} (55%) create mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/T_TensorTensorVectorTensorVector_T.java create mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/T_TensorTensorVector_T.java rename pytorch/src/gen/java/org/bytedeco/pytorch/{TensorTensorTuple.java => T_TensorTensor_T.java} (56%) create mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/T_TensorTensor_TOptional.java create mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/T_TensorVectorTensorVectorTensorVectorTensorVectorTensorVector_T.java create mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/T_TensorVectorTensor_T.java create mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/T_TypePtrLong_T.java create mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/T_TypePtrLong_TOptional.java create mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/TagArrayRef.java delete mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/TaggedRange.java delete mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/Tanh.java delete mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/TanhImplModuleHolder.java delete mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/Tanhshrink.java delete mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/TanhshrinkImplModuleHolder.java create mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/TensorArrayRefOptional.java create mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/TensorBaseMaybeOwned.java delete mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/TensorDataset.java create mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/TensorElementReference.java create mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/TensorImplPtr.java create mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/TensorIterator.java create mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/TensorIteratorBase.java create mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/TensorIteratorConfig.java create mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/TensorList.java create mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/TensorListIterator.java delete mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/TensorListOptional.java create mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/TensorOptionalElementReference.java create mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/TensorOptionalList.java create mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/TensorOptionalListIterator.java delete mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/TensorTensorOptional.java delete mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/TensorTensorTensorTensorVectorTuple.java delete mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/TensorTensorTensorTupleTuple.java delete mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/TensorTensorVectorTensorVectorTuple.java delete mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/TensorTensorVectorTuple.java delete mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/TensorTuple.java delete mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/TensorVectorTensorTuple.java delete mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/TensorVectorTensorVectorTensorVectorTensorVectorTensorVectorTuple.java create mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/ThreadLocalPythonObjects.java delete mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/Threshold.java delete mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/ThresholdImplModuleHolder.java delete mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/TokenTrie.java create mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/TorchDispatchModeTLS.java delete mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/TracingState.java delete mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/Transformer.java delete mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/TransformerDecoder.java delete mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/TransformerDecoderImplModuleHolder.java delete mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/TransformerDecoderLayer.java delete mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/TransformerDecoderLayerImplModuleHolder.java delete mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/TransformerEncoder.java delete mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/TransformerEncoderImplModuleHolder.java delete mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/TransformerEncoderLayer.java delete mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/TransformerEncoderLayerImplModuleHolder.java delete mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/TransformerImplModuleHolder.java create mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/TreeList.java create mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/TreeRef.java rename pytorch/src/gen/java/org/bytedeco/pytorch/{SourceRangePickler.java => TreeRefSmallVectorBase.java} (52%) create mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/TreeRefSmallVectorCommon.java create mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/TreeRefSmallVectorImpl.java create mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/TreeRefStringMap.java delete mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/TripletMarginLoss.java delete mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/TripletMarginLossImplModuleHolder.java delete mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/TripletMarginWithDistanceLoss.java delete mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/TripletMarginWithDistanceLossImplModuleHolder.java create mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/TupleElements.java create mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/TuplePtr.java delete mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/TypeIdentifierIdWrapper.java delete mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/TypeNameUniquer.java delete mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/TypeParser.java delete mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/Unflatten.java delete mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/UnflattenImplModuleHolder.java delete mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/Unfold.java delete mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/UnfoldImplModuleHolder.java create mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/UniqueVoidPtr.java delete mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/Upsample.java delete mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/UpsampleImplModuleHolder.java rename pytorch/src/gen/java/org/bytedeco/pytorch/{upsample_mode_t.java => UpsampleMode.java} (53%) delete mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/V_JitNode.java delete mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/VarType.java create mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/WeakStorage.java create mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/WeakStorageVector.java create mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/WeakStorageVectorOptional.java delete mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/WithCurrentScope.java delete mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/WithInsertPoint.java create mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/WithItemList.java create mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/WithItemListIterator.java delete mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/WithNestedTracingFrame.java delete mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/WorkerException.java delete mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/ZeroPad2d.java delete mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/ZeroPad2dImplModuleHolder.java delete mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/_CopyBytesFunctionRegisterer.java delete mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/_compute_enum_name.java delete mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/_str_wrapper.java delete mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/all_of.java create mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/bitset.java rename pytorch/src/gen/java/org/bytedeco/pytorch/{PythonPrintImpl.java => class_.java} (61%) rename pytorch/src/gen/java/org/bytedeco/pytorch/{ObserverContext.java => crc64_t.java} (50%) create mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/cuda/ActivationDescriptor.java create mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/cuda/CTCLossDescriptor.java create mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/cuda/CUDAGuard.java create mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/cuda/CUDAKernelLaunchInfo.java create mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/cuda/CUDAKernelLaunchInfoVector.java create mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/cuda/CUDAKernelLaunchRegistry.java create mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/cuda/CUDAMultiStreamGuard.java create mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/cuda/CUDAStream.java create mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/cuda/CUDAStreamArrayRef.java create mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/cuda/CUDAStreamCaptureModeGuard.java create mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/cuda/CUDAStreamGuard.java create mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/cuda/CUDAStreamOptional.java create mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/cuda/Constant.java create mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/cuda/ConvolutionDescriptor.java create mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/cuda/CuDNNError.java create mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/cuda/DeviceAssertionData.java create mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/cuda/DeviceAssertionsData.java create mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/cuda/DeviceAssertionsDataVector.java create mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/cuda/DeviceAssertionsDataVectorCUDAKernelLaunchInfoVectorPair.java create mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/cuda/DropoutDescriptor.java create mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/cuda/FilterDescriptor.java create mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/cuda/OptionalCUDAGuard.java create mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/cuda/OptionalCUDAStreamGuard.java create mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/cuda/RNNDescriptor.java create mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/cuda/SpatialTransformerDescriptor.java create mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/cuda/TensorDescriptor.java create mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/fibonacci_hash_policy.java delete mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/getTypePtr_.java create mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/global/torch_cuda.java delete mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/ivalue_to_const_ref_overload_return.java create mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/mt19937_data_pod.java create mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/mt19937_engine.java rename pytorch/src/gen/java/org/bytedeco/pytorch/{Object.java => mz_zip_archive.java} (61%) create mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/power_of_two_hash_policy.java create mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/prime_number_hash_policy.java delete mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/propagation_error.java delete mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/static_cast_with_inter_type.java delete mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/transformer_activation_t.java create mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/type_index.java create mode 100644 pytorch/src/main/java/org/bytedeco/pytorch/TransformerActivation.java create mode 100644 pytorch/src/main/java/org/bytedeco/pytorch/functions/ArchiveWriter.java create mode 100644 pytorch/src/main/java/org/bytedeco/pytorch/functions/DDPLogger.java create mode 100644 pytorch/src/main/java/org/bytedeco/pytorch/functions/DistanceFunction.java create mode 100644 pytorch/src/main/java/org/bytedeco/pytorch/functions/Func.java create mode 100644 pytorch/src/main/java/org/bytedeco/pytorch/functions/GraphFunctionCreator.java create mode 100644 pytorch/src/main/java/org/bytedeco/pytorch/functions/IValueSupplier.java create mode 100644 pytorch/src/main/java/org/bytedeco/pytorch/functions/IValueVectorConsumer.java create mode 100644 pytorch/src/main/java/org/bytedeco/pytorch/functions/JitModuleApplyFunction.java create mode 100644 pytorch/src/main/java/org/bytedeco/pytorch/functions/LossClosure.java create mode 100644 pytorch/src/main/java/org/bytedeco/pytorch/functions/ModuleApplyFunction.java create mode 100644 pytorch/src/main/java/org/bytedeco/pytorch/functions/NamedModuleApplyFunction.java create mode 100644 pytorch/src/main/java/org/bytedeco/pytorch/functions/NamedSharedModuleApplyFunction.java create mode 100644 pytorch/src/main/java/org/bytedeco/pytorch/functions/PickleWriter.java create mode 100644 pytorch/src/main/java/org/bytedeco/pytorch/functions/PointerConsumer.java create mode 100644 pytorch/src/main/java/org/bytedeco/pytorch/functions/Reader.java create mode 100644 pytorch/src/main/java/org/bytedeco/pytorch/functions/SharedModuleApplyFunction.java create mode 100644 pytorch/src/main/java/org/bytedeco/pytorch/functions/SizeTSupplier.java create mode 100644 pytorch/src/main/java/org/bytedeco/pytorch/functions/StringConsumer.java create mode 100644 pytorch/src/main/java/org/bytedeco/pytorch/functions/StringSupplier.java create mode 100644 pytorch/src/main/java/org/bytedeco/pytorch/functions/TensorIdGetter.java create mode 100644 pytorch/src/main/java/org/bytedeco/pytorch/functions/TensorMapper.java create mode 100644 pytorch/src/main/java/org/bytedeco/pytorch/functions/TensorTensorHook.java create mode 100644 pytorch/src/main/java/org/bytedeco/pytorch/functions/TypeMapper.java create mode 100644 pytorch/src/main/java/org/bytedeco/pytorch/functions/TypeRenamer.java create mode 100644 pytorch/src/main/java/org/bytedeco/pytorch/functions/ValueMapper.java create mode 100644 pytorch/src/main/java/org/bytedeco/pytorch/functions/VoidTensorHook.java create mode 100644 pytorch/src/main/java/org/bytedeco/pytorch/presets/torch_cuda.java create mode 100644 pytorch/src/main/resources/org/bytedeco/pytorch/presets/torch_cuda_include.h create mode 100644 pytorch/src/main/resources/org/bytedeco/pytorch/presets/torch_include.h diff --git a/CHANGELOG.md b/CHANGELOG.md index 277fc79de30..2f3542914d4 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,4 +1,5 @@ + * Refactor and improve presets for PyTorch ([pull #1360](https://github.com/bytedeco/javacpp-presets/pull/1360)) * Include `mkl_lapack.h` header file in presets for MKL ([issue #1388](https://github.com/bytedeco/javacpp-presets/issues/1388)) * Map new higher-level C++ API of Triton Inference Server ([pull #1361](https://github.com/bytedeco/javacpp-presets/pull/1361)) * Upgrade presets for OpenCV 4.8.0, DNNL 3.1.1, CPython 3.11.4, NumPy 1.25.1, SciPy 1.11.1, LLVM 16.0.6, TensorFlow Lite 2.13.0, Triton Inference Server 2.34.0, ONNX Runtime 1.15.1, and their dependencies diff --git a/pytorch/README.md b/pytorch/README.md index 0678c4294af..2cbd8c8156f 100644 --- a/pytorch/README.md +++ b/pytorch/README.md @@ -40,7 +40,7 @@ We can use [Maven 3](http://maven.apache.org/) to download and install automatic 4.0.0 org.bytedeco.pytorch simplemnist - 1.5.9 + 1.5.10-SNAPSHOT SimpleMNIST @@ -48,28 +48,28 @@ We can use [Maven 3](http://maven.apache.org/) to download and install automatic org.bytedeco pytorch-platform - 2.0.1-1.5.9 + 2.0.1-1.5.10-SNAPSHOT org.bytedeco pytorch-platform-gpu - 2.0.1-1.5.9 + 2.0.1-1.5.10-SNAPSHOT org.bytedeco cuda-platform-redist - 12.1-8.9-1.5.9 + 12.1-8.9-1.5.10-SNAPSHOT org.bytedeco mkl-platform-redist - 2023.1-1.5.9 + 2023.1-1.5.10-SNAPSHOT diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/ASMoutput.java b/pytorch/src/gen/java/org/bytedeco/pytorch/ASMoutput.java index 134816a3351..6c22b2d5693 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/ASMoutput.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/ASMoutput.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/ActivityTraceWrapper.java b/pytorch/src/gen/java/org/bytedeco/pytorch/ActivityTraceWrapper.java new file mode 100644 index 00000000000..d85fe88b885 --- /dev/null +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/ActivityTraceWrapper.java @@ -0,0 +1,26 @@ +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE + +package org.bytedeco.pytorch; + +import org.bytedeco.pytorch.Allocator; +import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; +import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; +import java.nio.*; +import org.bytedeco.javacpp.*; +import org.bytedeco.javacpp.annotation.*; + +import static org.bytedeco.javacpp.presets.javacpp.*; +import static org.bytedeco.openblas.global.openblas_nolapack.*; +import static org.bytedeco.openblas.global.openblas.*; + +import static org.bytedeco.pytorch.global.torch.*; + +@Namespace("torch::profiler::impl::kineto") @Opaque @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) +public class ActivityTraceWrapper extends Pointer { + /** Empty constructor. Calls {@code super((Pointer)null)}. */ + public ActivityTraceWrapper() { super((Pointer)null); } + /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ + public ActivityTraceWrapper(Pointer p) { super(p); } +} diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/ActivityTypeSet.java b/pytorch/src/gen/java/org/bytedeco/pytorch/ActivityTypeSet.java new file mode 100644 index 00000000000..195fb52485c --- /dev/null +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/ActivityTypeSet.java @@ -0,0 +1,46 @@ +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE + +package org.bytedeco.pytorch; + +import org.bytedeco.pytorch.Allocator; +import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; +import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; +import java.nio.*; +import org.bytedeco.javacpp.*; +import org.bytedeco.javacpp.annotation.*; + +import static org.bytedeco.javacpp.presets.javacpp.*; +import static org.bytedeco.openblas.global.openblas_nolapack.*; +import static org.bytedeco.openblas.global.openblas.*; + +import static org.bytedeco.pytorch.global.torch.*; + +@Name("std::set") @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) +public class ActivityTypeSet extends Pointer { + static { Loader.load(); } + /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ + public ActivityTypeSet(Pointer p) { super(p); } + public ActivityTypeSet() { allocate(); } + private native void allocate(); + public native @Name("operator =") @ByRef ActivityTypeSet put(@ByRef ActivityTypeSet x); + + public boolean empty() { return size() == 0; } + public native long size(); + + public ActivityType front() { try (Iterator it = begin()) { return it.get(); } } + public native void insert(@ByRef ActivityType value); + public native void erase(@ByRef ActivityType value); + public native @ByVal Iterator begin(); + public native @ByVal Iterator end(); + @NoOffset @Name("iterator") public static class Iterator extends Pointer { + public Iterator(Pointer p) { super(p); } + public Iterator() { } + + public native @Name("operator ++") @ByRef Iterator increment(); + public native @Name("operator ==") boolean equals(@ByRef Iterator it); + public native @Name("operator *") @ByRef @Const ActivityType get(); + } +} + diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/Adagrad.java b/pytorch/src/gen/java/org/bytedeco/pytorch/Adagrad.java index 6e2b56c5361..942ecb83938 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/Adagrad.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/Adagrad.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; @@ -33,10 +35,10 @@ public Adagrad( private native void allocate( @ByVal OptimizerParamGroupVector param_groups); - public Adagrad(@Cast({"", "std::vector"}) @StdMove TensorVector params, @ByVal(nullValue = "torch::optim::AdagradOptions{}") AdagradOptions defaults) { super((Pointer)null); allocate(params, defaults); } - private native void allocate(@Cast({"", "std::vector"}) @StdMove TensorVector params, @ByVal(nullValue = "torch::optim::AdagradOptions{}") AdagradOptions defaults); - public Adagrad(@Cast({"", "std::vector"}) @StdMove TensorVector params) { super((Pointer)null); allocate(params); } - private native void allocate(@Cast({"", "std::vector"}) @StdMove TensorVector params); + public Adagrad(@Cast({"", "std::vector"}) @StdMove TensorVector params, @ByVal(nullValue = "torch::optim::AdagradOptions{}") AdagradOptions defaults) { super((Pointer)null); allocate(params, defaults); } + private native void allocate(@Cast({"", "std::vector"}) @StdMove TensorVector params, @ByVal(nullValue = "torch::optim::AdagradOptions{}") AdagradOptions defaults); + public Adagrad(@Cast({"", "std::vector"}) @StdMove TensorVector params) { super((Pointer)null); allocate(params); } + private native void allocate(@Cast({"", "std::vector"}) @StdMove TensorVector params); public native @ByVal Tensor step(@ByVal(nullValue = "torch::optim::Optimizer::LossClosure(nullptr)") LossClosure closure); public native @ByVal Tensor step(); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/AdagradOptions.java b/pytorch/src/gen/java/org/bytedeco/pytorch/AdagradOptions.java index 50355fc2893..9efba03c4c7 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/AdagradOptions.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/AdagradOptions.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; @@ -33,7 +35,10 @@ public class AdagradOptions extends OptimizerCloneableAdagradOptions { public native @ByRef @NoException(true) DoublePointer eps(); - + private static native @Namespace @Cast("bool") @Name("operator ==") boolean equals( + @Const @ByRef AdagradOptions lhs, + @Const @ByRef AdagradOptions rhs); + public boolean equals(AdagradOptions rhs) { return equals(this, rhs); } public native double get_lr(); public native void set_lr(double lr); } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/AdagradParamState.java b/pytorch/src/gen/java/org/bytedeco/pytorch/AdagradParamState.java index f3b826c2882..841388e06d3 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/AdagradParamState.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/AdagradParamState.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; @@ -38,5 +40,8 @@ public class AdagradParamState extends OptimizerCloneableAdagradParamState { public native @Cast("int64_t*") @ByRef @NoException(true) LongPointer step(); - + private static native @Namespace @Cast("bool") @Name("operator ==") boolean equals( + @Const @ByRef AdagradParamState lhs, + @Const @ByRef AdagradParamState rhs); + public boolean equals(AdagradParamState rhs) { return equals(this, rhs); } } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/Adam.java b/pytorch/src/gen/java/org/bytedeco/pytorch/Adam.java index d13e656de9d..ae6724e295b 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/Adam.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/Adam.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; @@ -32,10 +34,10 @@ public Adam( @ByVal OptimizerParamGroupVector param_groups) { super((Pointer)null); allocate(param_groups); } private native void allocate( @ByVal OptimizerParamGroupVector param_groups); - public Adam(@Cast({"", "std::vector"}) @StdMove TensorVector params, @ByVal(nullValue = "torch::optim::AdamOptions{}") AdamOptions defaults) { super((Pointer)null); allocate(params, defaults); } - private native void allocate(@Cast({"", "std::vector"}) @StdMove TensorVector params, @ByVal(nullValue = "torch::optim::AdamOptions{}") AdamOptions defaults); - public Adam(@Cast({"", "std::vector"}) @StdMove TensorVector params) { super((Pointer)null); allocate(params); } - private native void allocate(@Cast({"", "std::vector"}) @StdMove TensorVector params); + public Adam(@Cast({"", "std::vector"}) @StdMove TensorVector params, @ByVal(nullValue = "torch::optim::AdamOptions{}") AdamOptions defaults) { super((Pointer)null); allocate(params, defaults); } + private native void allocate(@Cast({"", "std::vector"}) @StdMove TensorVector params, @ByVal(nullValue = "torch::optim::AdamOptions{}") AdamOptions defaults); + public Adam(@Cast({"", "std::vector"}) @StdMove TensorVector params) { super((Pointer)null); allocate(params); } + private native void allocate(@Cast({"", "std::vector"}) @StdMove TensorVector params); public native @ByVal Tensor step(@ByVal(nullValue = "torch::optim::Optimizer::LossClosure(nullptr)") LossClosure closure); public native @ByVal Tensor step(); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/AdamOptions.java b/pytorch/src/gen/java/org/bytedeco/pytorch/AdamOptions.java index 5d11f0db04f..e455c319d65 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/AdamOptions.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/AdamOptions.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; @@ -33,7 +35,10 @@ public class AdamOptions extends OptimizerCloneableAdamOptions { public native @Cast("bool*") @ByRef @NoException(true) BoolPointer amsgrad(); - + private static native @Namespace @Cast("bool") @Name("operator ==") boolean equals( + @Const @ByRef AdamOptions lhs, + @Const @ByRef AdamOptions rhs); + public boolean equals(AdamOptions rhs) { return equals(this, rhs); } public native double get_lr(); public native void set_lr(double lr); } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/AdamParamState.java b/pytorch/src/gen/java/org/bytedeco/pytorch/AdamParamState.java index 80b380757d3..93ab614cab7 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/AdamParamState.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/AdamParamState.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; @@ -40,5 +42,8 @@ public class AdamParamState extends OptimizerCloneableAdamParamState { public native @ByRef @NoException(true) Tensor max_exp_avg_sq(); - + private static native @Namespace @Cast("bool") @Name("operator ==") boolean equals( + @Const @ByRef AdamParamState lhs, + @Const @ByRef AdamParamState rhs); + public boolean equals(AdamParamState rhs) { return equals(this, rhs); } } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/AdamW.java b/pytorch/src/gen/java/org/bytedeco/pytorch/AdamW.java index b8f8cb24c1e..255df673b32 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/AdamW.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/AdamW.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; @@ -32,10 +34,10 @@ public AdamW( @ByVal OptimizerParamGroupVector param_groups) { super((Pointer)null); allocate(param_groups); } private native void allocate( @ByVal OptimizerParamGroupVector param_groups); - public AdamW(@Cast({"", "std::vector"}) @StdMove TensorVector params, @ByVal(nullValue = "torch::optim::AdamWOptions{}") AdamWOptions defaults) { super((Pointer)null); allocate(params, defaults); } - private native void allocate(@Cast({"", "std::vector"}) @StdMove TensorVector params, @ByVal(nullValue = "torch::optim::AdamWOptions{}") AdamWOptions defaults); - public AdamW(@Cast({"", "std::vector"}) @StdMove TensorVector params) { super((Pointer)null); allocate(params); } - private native void allocate(@Cast({"", "std::vector"}) @StdMove TensorVector params); + public AdamW(@Cast({"", "std::vector"}) @StdMove TensorVector params, @ByVal(nullValue = "torch::optim::AdamWOptions{}") AdamWOptions defaults) { super((Pointer)null); allocate(params, defaults); } + private native void allocate(@Cast({"", "std::vector"}) @StdMove TensorVector params, @ByVal(nullValue = "torch::optim::AdamWOptions{}") AdamWOptions defaults); + public AdamW(@Cast({"", "std::vector"}) @StdMove TensorVector params) { super((Pointer)null); allocate(params); } + private native void allocate(@Cast({"", "std::vector"}) @StdMove TensorVector params); public native @ByVal Tensor step(@ByVal(nullValue = "torch::optim::Optimizer::LossClosure(nullptr)") LossClosure closure); public native @ByVal Tensor step(); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/AdamWOptions.java b/pytorch/src/gen/java/org/bytedeco/pytorch/AdamWOptions.java index fb2831101a0..5607a72a48f 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/AdamWOptions.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/AdamWOptions.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; @@ -33,7 +35,10 @@ public class AdamWOptions extends OptimizerCloneableAdamWOptions { public native @Cast("bool*") @ByRef @NoException(true) BoolPointer amsgrad(); - + private static native @Namespace @Cast("bool") @Name("operator ==") boolean equals( + @Const @ByRef AdamWOptions lhs, + @Const @ByRef AdamWOptions rhs); + public boolean equals(AdamWOptions rhs) { return equals(this, rhs); } public native double get_lr(); public native void set_lr(double lr); } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/AdamWParamState.java b/pytorch/src/gen/java/org/bytedeco/pytorch/AdamWParamState.java index ac74d359bdd..1cfa5060df3 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/AdamWParamState.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/AdamWParamState.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; @@ -40,5 +42,8 @@ public class AdamWParamState extends OptimizerCloneableAdamWParamState { public native @ByRef @NoException(true) Tensor max_exp_avg_sq(); - + private static native @Namespace @Cast("bool") @Name("operator ==") boolean equals( + @Const @ByRef AdamWParamState lhs, + @Const @ByRef AdamWParamState rhs); + public boolean equals(AdamWParamState rhs) { return equals(this, rhs); } } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/AdaptiveAvgPool1d.java b/pytorch/src/gen/java/org/bytedeco/pytorch/AdaptiveAvgPool1d.java deleted file mode 100644 index 72e05bdf133..00000000000 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/AdaptiveAvgPool1d.java +++ /dev/null @@ -1,34 +0,0 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE - -package org.bytedeco.pytorch; - -import org.bytedeco.pytorch.Allocator; -import org.bytedeco.pytorch.Function; -import org.bytedeco.pytorch.Module; -import java.nio.*; -import org.bytedeco.javacpp.*; -import org.bytedeco.javacpp.annotation.*; - -import static org.bytedeco.javacpp.presets.javacpp.*; -import static org.bytedeco.openblas.global.openblas_nolapack.*; -import static org.bytedeco.openblas.global.openblas.*; - -import static org.bytedeco.pytorch.global.torch.*; - - -/** A {@code ModuleHolder} subclass for {@code AdaptiveAvgPool1dImpl}. - * See the documentation for {@code AdaptiveAvgPool1dImpl} class to learn what - * methods it provides, and examples of how to use {@code AdaptiveAvgPool1d} with - * {@code torch::nn::AdaptiveAvgPool1dOptions}. See the documentation for - * {@code ModuleHolder} to learn about PyTorch's module storage semantics. */ -@Namespace("torch::nn") @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) -public class AdaptiveAvgPool1d extends AdaptiveAvgPool1dImplModuleHolder { - static { Loader.load(); } - - public AdaptiveAvgPool1d(@ByVal @Cast("std::nullptr_t*") PointerPointer arg0) { super((Pointer)null); allocate(arg0); } - private native void allocate(@ByVal @Cast("std::nullptr_t*") PointerPointer arg0); public AdaptiveAvgPool1d(@SharedPtr @Cast({"", "std::shared_ptr"}) AdaptiveAvgPool1dImpl module) { super((Pointer)null); allocate(module); } - private native void allocate(@SharedPtr @Cast({"", "std::shared_ptr"}) AdaptiveAvgPool1dImpl module); - /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ - public AdaptiveAvgPool1d(Pointer p) { super(p); } - - } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/AdaptiveAvgPool1dImpl.java b/pytorch/src/gen/java/org/bytedeco/pytorch/AdaptiveAvgPool1dImpl.java index 7c35253cbfa..c1f139207b8 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/AdaptiveAvgPool1dImpl.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/AdaptiveAvgPool1dImpl.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; @@ -35,10 +37,10 @@ public class AdaptiveAvgPool1dImpl extends AdaptiveAvgPool1dImplBase { public AdaptiveAvgPool1dImpl(@ByVal @Cast("torch::ExpandingArray<1>*") LongPointer output_size) { super((Pointer)null); allocate(output_size); } - @NoDeallocator private native void allocate(@ByVal @Cast("torch::ExpandingArray<1>*") LongPointer output_size); + private native void allocate(@ByVal @Cast("torch::ExpandingArray<1>*") LongPointer output_size); public AdaptiveAvgPool1dImpl( @Const @ByRef AdaptiveAvgPool1dOptions options_) { super((Pointer)null); allocate(options_); } - @NoDeallocator private native void allocate( + private native void allocate( @Const @ByRef AdaptiveAvgPool1dOptions options_); /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public AdaptiveAvgPool1dImpl(Pointer p) { super(p); } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/AdaptiveAvgPool1dImplBase.java b/pytorch/src/gen/java/org/bytedeco/pytorch/AdaptiveAvgPool1dImplBase.java index 1c92b663bd7..d34878d9424 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/AdaptiveAvgPool1dImplBase.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/AdaptiveAvgPool1dImplBase.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; @@ -26,10 +28,10 @@ public class AdaptiveAvgPool1dImplBase extends AdaptiveAvgPool1dImplCloneable { public AdaptiveAvgPool1dImplBase(Pointer p) { super(p); } public AdaptiveAvgPool1dImplBase(@ByVal @Cast("torch::ExpandingArray<1>*") LongPointer output_size) { super((Pointer)null); allocate(output_size); } - @NoDeallocator private native void allocate(@ByVal @Cast("torch::ExpandingArray<1>*") LongPointer output_size); + private native void allocate(@ByVal @Cast("torch::ExpandingArray<1>*") LongPointer output_size); public AdaptiveAvgPool1dImplBase( @Const @ByRef AdaptiveAvgPool1dOptions options_) { super((Pointer)null); allocate(options_); } - @NoDeallocator private native void allocate( + private native void allocate( @Const @ByRef AdaptiveAvgPool1dOptions options_); public native void reset(); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/AdaptiveAvgPool1dImplCloneable.java b/pytorch/src/gen/java/org/bytedeco/pytorch/AdaptiveAvgPool1dImplCloneable.java index ed2cf903466..a7a1d46cb80 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/AdaptiveAvgPool1dImplCloneable.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/AdaptiveAvgPool1dImplCloneable.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; @@ -20,18 +22,18 @@ public class AdaptiveAvgPool1dImplCloneable extends Module { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public AdaptiveAvgPool1dImplCloneable(Pointer p) { super(p); } + @Override public Module asModule() { return asModule(this); } + @Namespace public static native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast>") Module asModule(@SharedPtr AdaptiveAvgPool1dImplCloneable pointer); /** {@code reset()} must perform initialization of all members with reference * semantics, most importantly parameters, buffers and submodules. */ public native void reset(); - @Override public Module asModule() { return asModule(this); } - @Namespace public static native @Name("static_cast") Module asModule(AdaptiveAvgPool1dImplCloneable module); /** Performs a recursive "deep copy" of the {@code Module}, such that all parameters * and submodules in the cloned module are different from those in the * original module. */ - public native @SharedPtr Module clone( - @Const @ByRef(nullValue = "c10::optional(c10::nullopt)") DeviceOptional device); - public native @SharedPtr Module clone(); + public native @SharedPtr("torch::nn::Module") @ByVal Module clone( + @Const @ByRef(nullValue = "c10::optional(c10::nullopt)") DeviceOptional device); + public native @SharedPtr("torch::nn::Module") @ByVal Module clone(); } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/AdaptiveAvgPool1dImplModuleHolder.java b/pytorch/src/gen/java/org/bytedeco/pytorch/AdaptiveAvgPool1dImplModuleHolder.java deleted file mode 100644 index 76365e5fdd3..00000000000 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/AdaptiveAvgPool1dImplModuleHolder.java +++ /dev/null @@ -1,79 +0,0 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE - -package org.bytedeco.pytorch; - -import org.bytedeco.pytorch.Allocator; -import org.bytedeco.pytorch.Function; -import org.bytedeco.pytorch.Module; -import java.nio.*; -import org.bytedeco.javacpp.*; -import org.bytedeco.javacpp.annotation.*; - -import static org.bytedeco.javacpp.presets.javacpp.*; -import static org.bytedeco.openblas.global.openblas_nolapack.*; -import static org.bytedeco.openblas.global.openblas.*; - -import static org.bytedeco.pytorch.global.torch.*; - -@Name("torch::nn::ModuleHolder") @NoOffset @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) -public class AdaptiveAvgPool1dImplModuleHolder extends Pointer { - static { Loader.load(); } - /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ - public AdaptiveAvgPool1dImplModuleHolder(Pointer p) { super(p); } - - - /// - - /** Default constructs the contained module if if has a default constructor, - * else produces a static error. - * - * NOTE: This uses the behavior of template - * classes in C++ that constructors (or any methods) are only compiled when - * actually used. */ - - - /** Constructs the {@code ModuleHolder} with an empty contained value. Access to - * the underlying module is not permitted and will throw an exception, until - * a value is assigned. */ - /* implicit */ public AdaptiveAvgPool1dImplModuleHolder(@ByVal @Cast("std::nullptr_t*") PointerPointer arg0) { super((Pointer)null); allocate(arg0); } -private native void allocate(@ByVal @Cast("std::nullptr_t*") PointerPointer arg0); - - /** Constructs the {@code ModuleHolder} with a contained module, forwarding all - * arguments to its constructor. */ - - /** Constructs the {@code ModuleHolder} from a pointer to the contained type. - * Example: {@code Linear(std::make_shared(...))}. */ - /* implicit */ public AdaptiveAvgPool1dImplModuleHolder(@SharedPtr @Cast({"", "std::shared_ptr"}) AdaptiveAvgPool1dImpl module) { super((Pointer)null); allocate(module); } -private native void allocate(@SharedPtr @Cast({"", "std::shared_ptr"}) AdaptiveAvgPool1dImpl module); - - /** Returns true if the {@code ModuleHolder} contains a module, or false if it is - * {@code nullptr}. */ - public native @Cast("bool") @Name("operator bool") @NoException(true) boolean asBoolean(); - - /** Forwards to the contained module. */ - public native @Name("operator ->") AdaptiveAvgPool1dImpl access(); - - /** Forwards to the contained module. */ - - /** Returns a reference to the contained module. */ - public native @ByRef @Name("operator *") AdaptiveAvgPool1dImpl multiply(); - - /** Returns a const reference to the contained module. */ - - /** Returns a shared pointer to the underlying module. */ - public native @SharedPtr @Cast({"", "std::shared_ptr"}) AdaptiveAvgPool1dImpl ptr(); - - /** Returns a pointer to the underlying module. */ - public native AdaptiveAvgPool1dImpl get(); - - /** Returns a const pointer to the underlying module. */ - - /** Calls the {@code forward()} method of the contained module. */ - - /** Forwards to the subscript operator of the contained module. - * NOTE: std::forward is qualified to prevent VS2017 emitting - * error C2872: 'std': ambiguous symbol */ - - /** Returns true if the {@code ModuleHolder} does not contain a module. */ - public native @Cast("bool") @NoException(true) boolean is_empty(); -} diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/AdaptiveAvgPool1dOptions.java b/pytorch/src/gen/java/org/bytedeco/pytorch/AdaptiveAvgPool1dOptions.java index 4c079ae9290..1b55ad48911 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/AdaptiveAvgPool1dOptions.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/AdaptiveAvgPool1dOptions.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/AdaptiveAvgPool2d.java b/pytorch/src/gen/java/org/bytedeco/pytorch/AdaptiveAvgPool2d.java deleted file mode 100644 index 044939bd03a..00000000000 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/AdaptiveAvgPool2d.java +++ /dev/null @@ -1,34 +0,0 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE - -package org.bytedeco.pytorch; - -import org.bytedeco.pytorch.Allocator; -import org.bytedeco.pytorch.Function; -import org.bytedeco.pytorch.Module; -import java.nio.*; -import org.bytedeco.javacpp.*; -import org.bytedeco.javacpp.annotation.*; - -import static org.bytedeco.javacpp.presets.javacpp.*; -import static org.bytedeco.openblas.global.openblas_nolapack.*; -import static org.bytedeco.openblas.global.openblas.*; - -import static org.bytedeco.pytorch.global.torch.*; - - -/** A {@code ModuleHolder} subclass for {@code AdaptiveAvgPool2dImpl}. - * See the documentation for {@code AdaptiveAvgPool2dImpl} class to learn what - * methods it provides, and examples of how to use {@code AdaptiveAvgPool2d} with - * {@code torch::nn::AdaptiveAvgPool2dOptions}. See the documentation for - * {@code ModuleHolder} to learn about PyTorch's module storage semantics. */ -@Namespace("torch::nn") @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) -public class AdaptiveAvgPool2d extends AdaptiveAvgPool2dImplModuleHolder { - static { Loader.load(); } - - public AdaptiveAvgPool2d(@ByVal @Cast("std::nullptr_t*") PointerPointer arg0) { super((Pointer)null); allocate(arg0); } - private native void allocate(@ByVal @Cast("std::nullptr_t*") PointerPointer arg0); public AdaptiveAvgPool2d(@SharedPtr @Cast({"", "std::shared_ptr"}) AdaptiveAvgPool2dImpl module) { super((Pointer)null); allocate(module); } - private native void allocate(@SharedPtr @Cast({"", "std::shared_ptr"}) AdaptiveAvgPool2dImpl module); - /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ - public AdaptiveAvgPool2d(Pointer p) { super(p); } - - } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/AdaptiveAvgPool2dImpl.java b/pytorch/src/gen/java/org/bytedeco/pytorch/AdaptiveAvgPool2dImpl.java index 26d88515c29..f85363d029d 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/AdaptiveAvgPool2dImpl.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/AdaptiveAvgPool2dImpl.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; @@ -35,10 +37,10 @@ public class AdaptiveAvgPool2dImpl extends AdaptiveAvgPool2dImplBase { public AdaptiveAvgPool2dImpl(@ByVal @Cast("torch::ExpandingArrayWithOptionalElem<2>*") LongOptional output_size) { super((Pointer)null); allocate(output_size); } - @NoDeallocator private native void allocate(@ByVal @Cast("torch::ExpandingArrayWithOptionalElem<2>*") LongOptional output_size); + private native void allocate(@ByVal @Cast("torch::ExpandingArrayWithOptionalElem<2>*") LongOptional output_size); public AdaptiveAvgPool2dImpl( @Const @ByRef AdaptiveAvgPool2dOptions options_) { super((Pointer)null); allocate(options_); } - @NoDeallocator private native void allocate( + private native void allocate( @Const @ByRef AdaptiveAvgPool2dOptions options_); /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public AdaptiveAvgPool2dImpl(Pointer p) { super(p); } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/AdaptiveAvgPool2dImplBase.java b/pytorch/src/gen/java/org/bytedeco/pytorch/AdaptiveAvgPool2dImplBase.java index 3dbda931ac6..6b77c1d3a6d 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/AdaptiveAvgPool2dImplBase.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/AdaptiveAvgPool2dImplBase.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; @@ -22,10 +24,10 @@ public class AdaptiveAvgPool2dImplBase extends AdaptiveAvgPool2dImplCloneable { public AdaptiveAvgPool2dImplBase(Pointer p) { super(p); } public AdaptiveAvgPool2dImplBase(@ByVal @Cast("torch::ExpandingArrayWithOptionalElem<2>*") LongOptional output_size) { super((Pointer)null); allocate(output_size); } - @NoDeallocator private native void allocate(@ByVal @Cast("torch::ExpandingArrayWithOptionalElem<2>*") LongOptional output_size); + private native void allocate(@ByVal @Cast("torch::ExpandingArrayWithOptionalElem<2>*") LongOptional output_size); public AdaptiveAvgPool2dImplBase( @Const @ByRef AdaptiveAvgPool2dOptions options_) { super((Pointer)null); allocate(options_); } - @NoDeallocator private native void allocate( + private native void allocate( @Const @ByRef AdaptiveAvgPool2dOptions options_); public native void reset(); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/AdaptiveAvgPool2dImplCloneable.java b/pytorch/src/gen/java/org/bytedeco/pytorch/AdaptiveAvgPool2dImplCloneable.java index e78f0a53d4c..a3156f2ee04 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/AdaptiveAvgPool2dImplCloneable.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/AdaptiveAvgPool2dImplCloneable.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; @@ -20,18 +22,18 @@ public class AdaptiveAvgPool2dImplCloneable extends Module { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public AdaptiveAvgPool2dImplCloneable(Pointer p) { super(p); } + @Override public Module asModule() { return asModule(this); } + @Namespace public static native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast>") Module asModule(@SharedPtr AdaptiveAvgPool2dImplCloneable pointer); /** {@code reset()} must perform initialization of all members with reference * semantics, most importantly parameters, buffers and submodules. */ public native void reset(); - @Override public Module asModule() { return asModule(this); } - @Namespace public static native @Name("static_cast") Module asModule(AdaptiveAvgPool2dImplCloneable module); /** Performs a recursive "deep copy" of the {@code Module}, such that all parameters * and submodules in the cloned module are different from those in the * original module. */ - public native @SharedPtr Module clone( - @Const @ByRef(nullValue = "c10::optional(c10::nullopt)") DeviceOptional device); - public native @SharedPtr Module clone(); + public native @SharedPtr("torch::nn::Module") @ByVal Module clone( + @Const @ByRef(nullValue = "c10::optional(c10::nullopt)") DeviceOptional device); + public native @SharedPtr("torch::nn::Module") @ByVal Module clone(); } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/AdaptiveAvgPool2dImplModuleHolder.java b/pytorch/src/gen/java/org/bytedeco/pytorch/AdaptiveAvgPool2dImplModuleHolder.java deleted file mode 100644 index 66f7d92ad14..00000000000 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/AdaptiveAvgPool2dImplModuleHolder.java +++ /dev/null @@ -1,79 +0,0 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE - -package org.bytedeco.pytorch; - -import org.bytedeco.pytorch.Allocator; -import org.bytedeco.pytorch.Function; -import org.bytedeco.pytorch.Module; -import java.nio.*; -import org.bytedeco.javacpp.*; -import org.bytedeco.javacpp.annotation.*; - -import static org.bytedeco.javacpp.presets.javacpp.*; -import static org.bytedeco.openblas.global.openblas_nolapack.*; -import static org.bytedeco.openblas.global.openblas.*; - -import static org.bytedeco.pytorch.global.torch.*; - -@Name("torch::nn::ModuleHolder") @NoOffset @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) -public class AdaptiveAvgPool2dImplModuleHolder extends Pointer { - static { Loader.load(); } - /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ - public AdaptiveAvgPool2dImplModuleHolder(Pointer p) { super(p); } - - - /// - - /** Default constructs the contained module if if has a default constructor, - * else produces a static error. - * - * NOTE: This uses the behavior of template - * classes in C++ that constructors (or any methods) are only compiled when - * actually used. */ - - - /** Constructs the {@code ModuleHolder} with an empty contained value. Access to - * the underlying module is not permitted and will throw an exception, until - * a value is assigned. */ - /* implicit */ public AdaptiveAvgPool2dImplModuleHolder(@ByVal @Cast("std::nullptr_t*") PointerPointer arg0) { super((Pointer)null); allocate(arg0); } -private native void allocate(@ByVal @Cast("std::nullptr_t*") PointerPointer arg0); - - /** Constructs the {@code ModuleHolder} with a contained module, forwarding all - * arguments to its constructor. */ - - /** Constructs the {@code ModuleHolder} from a pointer to the contained type. - * Example: {@code Linear(std::make_shared(...))}. */ - /* implicit */ public AdaptiveAvgPool2dImplModuleHolder(@SharedPtr @Cast({"", "std::shared_ptr"}) AdaptiveAvgPool2dImpl module) { super((Pointer)null); allocate(module); } -private native void allocate(@SharedPtr @Cast({"", "std::shared_ptr"}) AdaptiveAvgPool2dImpl module); - - /** Returns true if the {@code ModuleHolder} contains a module, or false if it is - * {@code nullptr}. */ - public native @Cast("bool") @Name("operator bool") @NoException(true) boolean asBoolean(); - - /** Forwards to the contained module. */ - public native @Name("operator ->") AdaptiveAvgPool2dImpl access(); - - /** Forwards to the contained module. */ - - /** Returns a reference to the contained module. */ - public native @ByRef @Name("operator *") AdaptiveAvgPool2dImpl multiply(); - - /** Returns a const reference to the contained module. */ - - /** Returns a shared pointer to the underlying module. */ - public native @SharedPtr @Cast({"", "std::shared_ptr"}) AdaptiveAvgPool2dImpl ptr(); - - /** Returns a pointer to the underlying module. */ - public native AdaptiveAvgPool2dImpl get(); - - /** Returns a const pointer to the underlying module. */ - - /** Calls the {@code forward()} method of the contained module. */ - - /** Forwards to the subscript operator of the contained module. - * NOTE: std::forward is qualified to prevent VS2017 emitting - * error C2872: 'std': ambiguous symbol */ - - /** Returns true if the {@code ModuleHolder} does not contain a module. */ - public native @Cast("bool") @NoException(true) boolean is_empty(); -} diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/AdaptiveAvgPool2dOptions.java b/pytorch/src/gen/java/org/bytedeco/pytorch/AdaptiveAvgPool2dOptions.java index 2fd5d63c386..39163b910d9 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/AdaptiveAvgPool2dOptions.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/AdaptiveAvgPool2dOptions.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/AdaptiveAvgPool3d.java b/pytorch/src/gen/java/org/bytedeco/pytorch/AdaptiveAvgPool3d.java deleted file mode 100644 index 9a78ca90644..00000000000 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/AdaptiveAvgPool3d.java +++ /dev/null @@ -1,34 +0,0 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE - -package org.bytedeco.pytorch; - -import org.bytedeco.pytorch.Allocator; -import org.bytedeco.pytorch.Function; -import org.bytedeco.pytorch.Module; -import java.nio.*; -import org.bytedeco.javacpp.*; -import org.bytedeco.javacpp.annotation.*; - -import static org.bytedeco.javacpp.presets.javacpp.*; -import static org.bytedeco.openblas.global.openblas_nolapack.*; -import static org.bytedeco.openblas.global.openblas.*; - -import static org.bytedeco.pytorch.global.torch.*; - - -/** A {@code ModuleHolder} subclass for {@code AdaptiveAvgPool3dImpl}. - * See the documentation for {@code AdaptiveAvgPool3dImpl} class to learn what - * methods it provides, and examples of how to use {@code AdaptiveAvgPool3d} with - * {@code torch::nn::AdaptiveAvgPool3dOptions}. See the documentation for - * {@code ModuleHolder} to learn about PyTorch's module storage semantics. */ -@Namespace("torch::nn") @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) -public class AdaptiveAvgPool3d extends AdaptiveAvgPool3dImplModuleHolder { - static { Loader.load(); } - - public AdaptiveAvgPool3d(@ByVal @Cast("std::nullptr_t*") PointerPointer arg0) { super((Pointer)null); allocate(arg0); } - private native void allocate(@ByVal @Cast("std::nullptr_t*") PointerPointer arg0); public AdaptiveAvgPool3d(@SharedPtr @Cast({"", "std::shared_ptr"}) AdaptiveAvgPool3dImpl module) { super((Pointer)null); allocate(module); } - private native void allocate(@SharedPtr @Cast({"", "std::shared_ptr"}) AdaptiveAvgPool3dImpl module); - /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ - public AdaptiveAvgPool3d(Pointer p) { super(p); } - - } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/AdaptiveAvgPool3dImpl.java b/pytorch/src/gen/java/org/bytedeco/pytorch/AdaptiveAvgPool3dImpl.java index f5e43e91678..457192c9ff7 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/AdaptiveAvgPool3dImpl.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/AdaptiveAvgPool3dImpl.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; @@ -35,10 +37,10 @@ public class AdaptiveAvgPool3dImpl extends AdaptiveAvgPool3dImplBase { public AdaptiveAvgPool3dImpl(@ByVal @Cast("torch::ExpandingArrayWithOptionalElem<3>*") LongOptional output_size) { super((Pointer)null); allocate(output_size); } - @NoDeallocator private native void allocate(@ByVal @Cast("torch::ExpandingArrayWithOptionalElem<3>*") LongOptional output_size); + private native void allocate(@ByVal @Cast("torch::ExpandingArrayWithOptionalElem<3>*") LongOptional output_size); public AdaptiveAvgPool3dImpl( @Const @ByRef AdaptiveAvgPool3dOptions options_) { super((Pointer)null); allocate(options_); } - @NoDeallocator private native void allocate( + private native void allocate( @Const @ByRef AdaptiveAvgPool3dOptions options_); /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public AdaptiveAvgPool3dImpl(Pointer p) { super(p); } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/AdaptiveAvgPool3dImplBase.java b/pytorch/src/gen/java/org/bytedeco/pytorch/AdaptiveAvgPool3dImplBase.java index 9bf21aaaf3d..7bf5b5c17f9 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/AdaptiveAvgPool3dImplBase.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/AdaptiveAvgPool3dImplBase.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; @@ -22,10 +24,10 @@ public class AdaptiveAvgPool3dImplBase extends AdaptiveAvgPool3dImplCloneable { public AdaptiveAvgPool3dImplBase(Pointer p) { super(p); } public AdaptiveAvgPool3dImplBase(@ByVal @Cast("torch::ExpandingArrayWithOptionalElem<3>*") LongOptional output_size) { super((Pointer)null); allocate(output_size); } - @NoDeallocator private native void allocate(@ByVal @Cast("torch::ExpandingArrayWithOptionalElem<3>*") LongOptional output_size); + private native void allocate(@ByVal @Cast("torch::ExpandingArrayWithOptionalElem<3>*") LongOptional output_size); public AdaptiveAvgPool3dImplBase( @Const @ByRef AdaptiveAvgPool3dOptions options_) { super((Pointer)null); allocate(options_); } - @NoDeallocator private native void allocate( + private native void allocate( @Const @ByRef AdaptiveAvgPool3dOptions options_); public native void reset(); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/AdaptiveAvgPool3dImplCloneable.java b/pytorch/src/gen/java/org/bytedeco/pytorch/AdaptiveAvgPool3dImplCloneable.java index bfdcc7f4c8d..00879572dba 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/AdaptiveAvgPool3dImplCloneable.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/AdaptiveAvgPool3dImplCloneable.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; @@ -20,18 +22,18 @@ public class AdaptiveAvgPool3dImplCloneable extends Module { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public AdaptiveAvgPool3dImplCloneable(Pointer p) { super(p); } + @Override public Module asModule() { return asModule(this); } + @Namespace public static native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast>") Module asModule(@SharedPtr AdaptiveAvgPool3dImplCloneable pointer); /** {@code reset()} must perform initialization of all members with reference * semantics, most importantly parameters, buffers and submodules. */ public native void reset(); - @Override public Module asModule() { return asModule(this); } - @Namespace public static native @Name("static_cast") Module asModule(AdaptiveAvgPool3dImplCloneable module); /** Performs a recursive "deep copy" of the {@code Module}, such that all parameters * and submodules in the cloned module are different from those in the * original module. */ - public native @SharedPtr Module clone( - @Const @ByRef(nullValue = "c10::optional(c10::nullopt)") DeviceOptional device); - public native @SharedPtr Module clone(); + public native @SharedPtr("torch::nn::Module") @ByVal Module clone( + @Const @ByRef(nullValue = "c10::optional(c10::nullopt)") DeviceOptional device); + public native @SharedPtr("torch::nn::Module") @ByVal Module clone(); } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/AdaptiveAvgPool3dImplModuleHolder.java b/pytorch/src/gen/java/org/bytedeco/pytorch/AdaptiveAvgPool3dImplModuleHolder.java deleted file mode 100644 index f958b93ae22..00000000000 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/AdaptiveAvgPool3dImplModuleHolder.java +++ /dev/null @@ -1,79 +0,0 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE - -package org.bytedeco.pytorch; - -import org.bytedeco.pytorch.Allocator; -import org.bytedeco.pytorch.Function; -import org.bytedeco.pytorch.Module; -import java.nio.*; -import org.bytedeco.javacpp.*; -import org.bytedeco.javacpp.annotation.*; - -import static org.bytedeco.javacpp.presets.javacpp.*; -import static org.bytedeco.openblas.global.openblas_nolapack.*; -import static org.bytedeco.openblas.global.openblas.*; - -import static org.bytedeco.pytorch.global.torch.*; - -@Name("torch::nn::ModuleHolder") @NoOffset @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) -public class AdaptiveAvgPool3dImplModuleHolder extends Pointer { - static { Loader.load(); } - /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ - public AdaptiveAvgPool3dImplModuleHolder(Pointer p) { super(p); } - - - /// - - /** Default constructs the contained module if if has a default constructor, - * else produces a static error. - * - * NOTE: This uses the behavior of template - * classes in C++ that constructors (or any methods) are only compiled when - * actually used. */ - - - /** Constructs the {@code ModuleHolder} with an empty contained value. Access to - * the underlying module is not permitted and will throw an exception, until - * a value is assigned. */ - /* implicit */ public AdaptiveAvgPool3dImplModuleHolder(@ByVal @Cast("std::nullptr_t*") PointerPointer arg0) { super((Pointer)null); allocate(arg0); } -private native void allocate(@ByVal @Cast("std::nullptr_t*") PointerPointer arg0); - - /** Constructs the {@code ModuleHolder} with a contained module, forwarding all - * arguments to its constructor. */ - - /** Constructs the {@code ModuleHolder} from a pointer to the contained type. - * Example: {@code Linear(std::make_shared(...))}. */ - /* implicit */ public AdaptiveAvgPool3dImplModuleHolder(@SharedPtr @Cast({"", "std::shared_ptr"}) AdaptiveAvgPool3dImpl module) { super((Pointer)null); allocate(module); } -private native void allocate(@SharedPtr @Cast({"", "std::shared_ptr"}) AdaptiveAvgPool3dImpl module); - - /** Returns true if the {@code ModuleHolder} contains a module, or false if it is - * {@code nullptr}. */ - public native @Cast("bool") @Name("operator bool") @NoException(true) boolean asBoolean(); - - /** Forwards to the contained module. */ - public native @Name("operator ->") AdaptiveAvgPool3dImpl access(); - - /** Forwards to the contained module. */ - - /** Returns a reference to the contained module. */ - public native @ByRef @Name("operator *") AdaptiveAvgPool3dImpl multiply(); - - /** Returns a const reference to the contained module. */ - - /** Returns a shared pointer to the underlying module. */ - public native @SharedPtr @Cast({"", "std::shared_ptr"}) AdaptiveAvgPool3dImpl ptr(); - - /** Returns a pointer to the underlying module. */ - public native AdaptiveAvgPool3dImpl get(); - - /** Returns a const pointer to the underlying module. */ - - /** Calls the {@code forward()} method of the contained module. */ - - /** Forwards to the subscript operator of the contained module. - * NOTE: std::forward is qualified to prevent VS2017 emitting - * error C2872: 'std': ambiguous symbol */ - - /** Returns true if the {@code ModuleHolder} does not contain a module. */ - public native @Cast("bool") @NoException(true) boolean is_empty(); -} diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/AdaptiveAvgPool3dOptions.java b/pytorch/src/gen/java/org/bytedeco/pytorch/AdaptiveAvgPool3dOptions.java index dcc97be3719..64e0d4c16dd 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/AdaptiveAvgPool3dOptions.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/AdaptiveAvgPool3dOptions.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/AdaptiveLogSoftmaxWithLoss.java b/pytorch/src/gen/java/org/bytedeco/pytorch/AdaptiveLogSoftmaxWithLoss.java deleted file mode 100644 index 8db45cfd11d..00000000000 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/AdaptiveLogSoftmaxWithLoss.java +++ /dev/null @@ -1,35 +0,0 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE - -package org.bytedeco.pytorch; - -import org.bytedeco.pytorch.Allocator; -import org.bytedeco.pytorch.Function; -import org.bytedeco.pytorch.Module; -import java.nio.*; -import org.bytedeco.javacpp.*; -import org.bytedeco.javacpp.annotation.*; - -import static org.bytedeco.javacpp.presets.javacpp.*; -import static org.bytedeco.openblas.global.openblas_nolapack.*; -import static org.bytedeco.openblas.global.openblas.*; - -import static org.bytedeco.pytorch.global.torch.*; - - -/** A {@code ModuleHolder} subclass for {@code AdaptiveLogSoftmaxWithLossImpl}. - * See the documentation for {@code AdaptiveLogSoftmaxWithLossImpl} class to learn - * what methods it provides, and examples of how to use - * {@code AdaptiveLogSoftmaxWithLoss} with - * {@code torch::nn::AdaptiveLogSoftmaxWithLossOptions}. See the documentation for - * {@code ModuleHolder} to learn about PyTorch's module storage semantics. */ -@Namespace("torch::nn") @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) -public class AdaptiveLogSoftmaxWithLoss extends AdaptiveLogSoftmaxWithLossImplModuleHolder { - static { Loader.load(); } - - public AdaptiveLogSoftmaxWithLoss(@ByVal @Cast("std::nullptr_t*") PointerPointer arg0) { super((Pointer)null); allocate(arg0); } - private native void allocate(@ByVal @Cast("std::nullptr_t*") PointerPointer arg0); public AdaptiveLogSoftmaxWithLoss(@SharedPtr @Cast({"", "std::shared_ptr"}) AdaptiveLogSoftmaxWithLossImpl module) { super((Pointer)null); allocate(module); } - private native void allocate(@SharedPtr @Cast({"", "std::shared_ptr"}) AdaptiveLogSoftmaxWithLossImpl module); - /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ - public AdaptiveLogSoftmaxWithLoss(Pointer p) { super(p); } - - } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/AdaptiveLogSoftmaxWithLossImpl.java b/pytorch/src/gen/java/org/bytedeco/pytorch/AdaptiveLogSoftmaxWithLossImpl.java index 6fb45770f72..68c7a75f4d4 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/AdaptiveLogSoftmaxWithLossImpl.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/AdaptiveLogSoftmaxWithLossImpl.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; @@ -44,14 +46,14 @@ public AdaptiveLogSoftmaxWithLossImpl( @Cast("int64_t") long in_features, @Cast("int64_t") long n_classes, @ByVal @Cast("std::vector*") LongVector cutoffs) { super((Pointer)null); allocate(in_features, n_classes, cutoffs); } - @NoDeallocator private native void allocate( + @SharedPtr private native void allocate( @Cast("int64_t") long in_features, @Cast("int64_t") long n_classes, @ByVal @Cast("std::vector*") LongVector cutoffs); public AdaptiveLogSoftmaxWithLossImpl( @ByVal AdaptiveLogSoftmaxWithLossOptions options_) { super((Pointer)null); allocate(options_); } - @NoDeallocator private native void allocate( + @SharedPtr private native void allocate( @ByVal AdaptiveLogSoftmaxWithLossOptions options_); public native @ByVal ASMoutput forward(@Const @ByRef Tensor input, @Const @ByRef Tensor target); @@ -89,8 +91,4 @@ public AdaptiveLogSoftmaxWithLossImpl( /** Output size of head classifier */ public native @Cast("int64_t") long head_size(); public native AdaptiveLogSoftmaxWithLossImpl head_size(long setter); - - public native @ByRef Linear head(); public native AdaptiveLogSoftmaxWithLossImpl head(Linear setter); - - public native @ByRef ModuleList tail(); public native AdaptiveLogSoftmaxWithLossImpl tail(ModuleList setter); } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/AdaptiveLogSoftmaxWithLossImplCloneable.java b/pytorch/src/gen/java/org/bytedeco/pytorch/AdaptiveLogSoftmaxWithLossImplCloneable.java index 1441af8cfb0..6f3714a64ad 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/AdaptiveLogSoftmaxWithLossImplCloneable.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/AdaptiveLogSoftmaxWithLossImplCloneable.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; @@ -20,18 +22,18 @@ public class AdaptiveLogSoftmaxWithLossImplCloneable extends Module { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public AdaptiveLogSoftmaxWithLossImplCloneable(Pointer p) { super(p); } + @Override public Module asModule() { return asModule(this); } + @Namespace public static native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast>") Module asModule(@SharedPtr AdaptiveLogSoftmaxWithLossImplCloneable pointer); /** {@code reset()} must perform initialization of all members with reference * semantics, most importantly parameters, buffers and submodules. */ public native void reset(); - @Override public Module asModule() { return asModule(this); } - @Namespace public static native @Name("static_cast") Module asModule(AdaptiveLogSoftmaxWithLossImplCloneable module); /** Performs a recursive "deep copy" of the {@code Module}, such that all parameters * and submodules in the cloned module are different from those in the * original module. */ - public native @SharedPtr Module clone( - @Const @ByRef(nullValue = "c10::optional(c10::nullopt)") DeviceOptional device); - public native @SharedPtr Module clone(); + public native @SharedPtr("torch::nn::Module") @ByVal Module clone( + @Const @ByRef(nullValue = "c10::optional(c10::nullopt)") DeviceOptional device); + public native @SharedPtr("torch::nn::Module") @ByVal Module clone(); } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/AdaptiveLogSoftmaxWithLossImplModuleHolder.java b/pytorch/src/gen/java/org/bytedeco/pytorch/AdaptiveLogSoftmaxWithLossImplModuleHolder.java deleted file mode 100644 index bf8c0398d1e..00000000000 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/AdaptiveLogSoftmaxWithLossImplModuleHolder.java +++ /dev/null @@ -1,79 +0,0 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE - -package org.bytedeco.pytorch; - -import org.bytedeco.pytorch.Allocator; -import org.bytedeco.pytorch.Function; -import org.bytedeco.pytorch.Module; -import java.nio.*; -import org.bytedeco.javacpp.*; -import org.bytedeco.javacpp.annotation.*; - -import static org.bytedeco.javacpp.presets.javacpp.*; -import static org.bytedeco.openblas.global.openblas_nolapack.*; -import static org.bytedeco.openblas.global.openblas.*; - -import static org.bytedeco.pytorch.global.torch.*; - -@Name("torch::nn::ModuleHolder") @NoOffset @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) -public class AdaptiveLogSoftmaxWithLossImplModuleHolder extends Pointer { - static { Loader.load(); } - /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ - public AdaptiveLogSoftmaxWithLossImplModuleHolder(Pointer p) { super(p); } - - - /// - - /** Default constructs the contained module if if has a default constructor, - * else produces a static error. - * - * NOTE: This uses the behavior of template - * classes in C++ that constructors (or any methods) are only compiled when - * actually used. */ - - - /** Constructs the {@code ModuleHolder} with an empty contained value. Access to - * the underlying module is not permitted and will throw an exception, until - * a value is assigned. */ - /* implicit */ public AdaptiveLogSoftmaxWithLossImplModuleHolder(@ByVal @Cast("std::nullptr_t*") PointerPointer arg0) { super((Pointer)null); allocate(arg0); } -private native void allocate(@ByVal @Cast("std::nullptr_t*") PointerPointer arg0); - - /** Constructs the {@code ModuleHolder} with a contained module, forwarding all - * arguments to its constructor. */ - - /** Constructs the {@code ModuleHolder} from a pointer to the contained type. - * Example: {@code Linear(std::make_shared(...))}. */ - /* implicit */ public AdaptiveLogSoftmaxWithLossImplModuleHolder(@SharedPtr @Cast({"", "std::shared_ptr"}) AdaptiveLogSoftmaxWithLossImpl module) { super((Pointer)null); allocate(module); } -private native void allocate(@SharedPtr @Cast({"", "std::shared_ptr"}) AdaptiveLogSoftmaxWithLossImpl module); - - /** Returns true if the {@code ModuleHolder} contains a module, or false if it is - * {@code nullptr}. */ - public native @Cast("bool") @Name("operator bool") @NoException(true) boolean asBoolean(); - - /** Forwards to the contained module. */ - public native @Name("operator ->") AdaptiveLogSoftmaxWithLossImpl access(); - - /** Forwards to the contained module. */ - - /** Returns a reference to the contained module. */ - public native @ByRef @Name("operator *") AdaptiveLogSoftmaxWithLossImpl multiply(); - - /** Returns a const reference to the contained module. */ - - /** Returns a shared pointer to the underlying module. */ - public native @SharedPtr @Cast({"", "std::shared_ptr"}) AdaptiveLogSoftmaxWithLossImpl ptr(); - - /** Returns a pointer to the underlying module. */ - public native AdaptiveLogSoftmaxWithLossImpl get(); - - /** Returns a const pointer to the underlying module. */ - - /** Calls the {@code forward()} method of the contained module. */ - - /** Forwards to the subscript operator of the contained module. - * NOTE: std::forward is qualified to prevent VS2017 emitting - * error C2872: 'std': ambiguous symbol */ - - /** Returns true if the {@code ModuleHolder} does not contain a module. */ - public native @Cast("bool") @NoException(true) boolean is_empty(); -} diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/AdaptiveLogSoftmaxWithLossOptions.java b/pytorch/src/gen/java/org/bytedeco/pytorch/AdaptiveLogSoftmaxWithLossOptions.java index a79a6c6e37b..cf503b18e01 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/AdaptiveLogSoftmaxWithLossOptions.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/AdaptiveLogSoftmaxWithLossOptions.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/AdaptiveMaxPool1d.java b/pytorch/src/gen/java/org/bytedeco/pytorch/AdaptiveMaxPool1d.java deleted file mode 100644 index 695f4ecbb35..00000000000 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/AdaptiveMaxPool1d.java +++ /dev/null @@ -1,34 +0,0 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE - -package org.bytedeco.pytorch; - -import org.bytedeco.pytorch.Allocator; -import org.bytedeco.pytorch.Function; -import org.bytedeco.pytorch.Module; -import java.nio.*; -import org.bytedeco.javacpp.*; -import org.bytedeco.javacpp.annotation.*; - -import static org.bytedeco.javacpp.presets.javacpp.*; -import static org.bytedeco.openblas.global.openblas_nolapack.*; -import static org.bytedeco.openblas.global.openblas.*; - -import static org.bytedeco.pytorch.global.torch.*; - - -/** A {@code ModuleHolder} subclass for {@code AdaptiveMaxPool1dImpl}. - * See the documentation for {@code AdaptiveMaxPool1dImpl} class to learn what - * methods it provides, and examples of how to use {@code AdaptiveMaxPool1d} with - * {@code torch::nn::AdaptiveMaxPool1dOptions}. See the documentation for - * {@code ModuleHolder} to learn about PyTorch's module storage semantics. */ -@Namespace("torch::nn") @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) -public class AdaptiveMaxPool1d extends AdaptiveMaxPool1dImplModuleHolder { - static { Loader.load(); } - - public AdaptiveMaxPool1d(@ByVal @Cast("std::nullptr_t*") PointerPointer arg0) { super((Pointer)null); allocate(arg0); } - private native void allocate(@ByVal @Cast("std::nullptr_t*") PointerPointer arg0); public AdaptiveMaxPool1d(@SharedPtr @Cast({"", "std::shared_ptr"}) AdaptiveMaxPool1dImpl module) { super((Pointer)null); allocate(module); } - private native void allocate(@SharedPtr @Cast({"", "std::shared_ptr"}) AdaptiveMaxPool1dImpl module); - /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ - public AdaptiveMaxPool1d(Pointer p) { super(p); } - - } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/AdaptiveMaxPool1dImpl.java b/pytorch/src/gen/java/org/bytedeco/pytorch/AdaptiveMaxPool1dImpl.java index 4fc822dd971..50fcd51fa50 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/AdaptiveMaxPool1dImpl.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/AdaptiveMaxPool1dImpl.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; @@ -35,10 +37,10 @@ public class AdaptiveMaxPool1dImpl extends AdaptiveMaxPool1dImplBase { public AdaptiveMaxPool1dImpl(@ByVal @Cast("torch::ExpandingArray<1>*") LongPointer output_size) { super((Pointer)null); allocate(output_size); } - @NoDeallocator private native void allocate(@ByVal @Cast("torch::ExpandingArray<1>*") LongPointer output_size); + private native void allocate(@ByVal @Cast("torch::ExpandingArray<1>*") LongPointer output_size); public AdaptiveMaxPool1dImpl( @Const @ByRef AdaptiveMaxPool1dOptions options_) { super((Pointer)null); allocate(options_); } - @NoDeallocator private native void allocate( + private native void allocate( @Const @ByRef AdaptiveMaxPool1dOptions options_); /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public AdaptiveMaxPool1dImpl(Pointer p) { super(p); } @@ -48,5 +50,5 @@ public AdaptiveMaxPool1dImpl( /** Returns the indices along with the outputs. * Useful to pass to nn.MaxUnpool1d. */ - public native @ByVal TensorTensorTuple forward_with_indices(@Const @ByRef Tensor input); + public native @ByVal T_TensorTensor_T forward_with_indices(@Const @ByRef Tensor input); } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/AdaptiveMaxPool1dImplBase.java b/pytorch/src/gen/java/org/bytedeco/pytorch/AdaptiveMaxPool1dImplBase.java index eea2f0249e4..d1ab371afa6 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/AdaptiveMaxPool1dImplBase.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/AdaptiveMaxPool1dImplBase.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; @@ -26,10 +28,10 @@ public class AdaptiveMaxPool1dImplBase extends AdaptiveMaxPool1dImplCloneable { public AdaptiveMaxPool1dImplBase(Pointer p) { super(p); } public AdaptiveMaxPool1dImplBase(@ByVal @Cast("torch::ExpandingArray<1>*") LongPointer output_size) { super((Pointer)null); allocate(output_size); } - @NoDeallocator private native void allocate(@ByVal @Cast("torch::ExpandingArray<1>*") LongPointer output_size); + private native void allocate(@ByVal @Cast("torch::ExpandingArray<1>*") LongPointer output_size); public AdaptiveMaxPool1dImplBase( @Const @ByRef AdaptiveMaxPool1dOptions options_) { super((Pointer)null); allocate(options_); } - @NoDeallocator private native void allocate( + private native void allocate( @Const @ByRef AdaptiveMaxPool1dOptions options_); public native void reset(); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/AdaptiveMaxPool1dImplCloneable.java b/pytorch/src/gen/java/org/bytedeco/pytorch/AdaptiveMaxPool1dImplCloneable.java index aecbeef93ab..0c19a51ade2 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/AdaptiveMaxPool1dImplCloneable.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/AdaptiveMaxPool1dImplCloneable.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; @@ -20,18 +22,18 @@ public class AdaptiveMaxPool1dImplCloneable extends Module { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public AdaptiveMaxPool1dImplCloneable(Pointer p) { super(p); } + @Override public Module asModule() { return asModule(this); } + @Namespace public static native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast>") Module asModule(@SharedPtr AdaptiveMaxPool1dImplCloneable pointer); /** {@code reset()} must perform initialization of all members with reference * semantics, most importantly parameters, buffers and submodules. */ public native void reset(); - @Override public Module asModule() { return asModule(this); } - @Namespace public static native @Name("static_cast") Module asModule(AdaptiveMaxPool1dImplCloneable module); /** Performs a recursive "deep copy" of the {@code Module}, such that all parameters * and submodules in the cloned module are different from those in the * original module. */ - public native @SharedPtr Module clone( - @Const @ByRef(nullValue = "c10::optional(c10::nullopt)") DeviceOptional device); - public native @SharedPtr Module clone(); + public native @SharedPtr("torch::nn::Module") @ByVal Module clone( + @Const @ByRef(nullValue = "c10::optional(c10::nullopt)") DeviceOptional device); + public native @SharedPtr("torch::nn::Module") @ByVal Module clone(); } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/AdaptiveMaxPool1dImplModuleHolder.java b/pytorch/src/gen/java/org/bytedeco/pytorch/AdaptiveMaxPool1dImplModuleHolder.java deleted file mode 100644 index 2f5f3c112f9..00000000000 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/AdaptiveMaxPool1dImplModuleHolder.java +++ /dev/null @@ -1,79 +0,0 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE - -package org.bytedeco.pytorch; - -import org.bytedeco.pytorch.Allocator; -import org.bytedeco.pytorch.Function; -import org.bytedeco.pytorch.Module; -import java.nio.*; -import org.bytedeco.javacpp.*; -import org.bytedeco.javacpp.annotation.*; - -import static org.bytedeco.javacpp.presets.javacpp.*; -import static org.bytedeco.openblas.global.openblas_nolapack.*; -import static org.bytedeco.openblas.global.openblas.*; - -import static org.bytedeco.pytorch.global.torch.*; - -@Name("torch::nn::ModuleHolder") @NoOffset @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) -public class AdaptiveMaxPool1dImplModuleHolder extends Pointer { - static { Loader.load(); } - /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ - public AdaptiveMaxPool1dImplModuleHolder(Pointer p) { super(p); } - - - /// - - /** Default constructs the contained module if if has a default constructor, - * else produces a static error. - * - * NOTE: This uses the behavior of template - * classes in C++ that constructors (or any methods) are only compiled when - * actually used. */ - - - /** Constructs the {@code ModuleHolder} with an empty contained value. Access to - * the underlying module is not permitted and will throw an exception, until - * a value is assigned. */ - /* implicit */ public AdaptiveMaxPool1dImplModuleHolder(@ByVal @Cast("std::nullptr_t*") PointerPointer arg0) { super((Pointer)null); allocate(arg0); } -private native void allocate(@ByVal @Cast("std::nullptr_t*") PointerPointer arg0); - - /** Constructs the {@code ModuleHolder} with a contained module, forwarding all - * arguments to its constructor. */ - - /** Constructs the {@code ModuleHolder} from a pointer to the contained type. - * Example: {@code Linear(std::make_shared(...))}. */ - /* implicit */ public AdaptiveMaxPool1dImplModuleHolder(@SharedPtr @Cast({"", "std::shared_ptr"}) AdaptiveMaxPool1dImpl module) { super((Pointer)null); allocate(module); } -private native void allocate(@SharedPtr @Cast({"", "std::shared_ptr"}) AdaptiveMaxPool1dImpl module); - - /** Returns true if the {@code ModuleHolder} contains a module, or false if it is - * {@code nullptr}. */ - public native @Cast("bool") @Name("operator bool") @NoException(true) boolean asBoolean(); - - /** Forwards to the contained module. */ - public native @Name("operator ->") AdaptiveMaxPool1dImpl access(); - - /** Forwards to the contained module. */ - - /** Returns a reference to the contained module. */ - public native @ByRef @Name("operator *") AdaptiveMaxPool1dImpl multiply(); - - /** Returns a const reference to the contained module. */ - - /** Returns a shared pointer to the underlying module. */ - public native @SharedPtr @Cast({"", "std::shared_ptr"}) AdaptiveMaxPool1dImpl ptr(); - - /** Returns a pointer to the underlying module. */ - public native AdaptiveMaxPool1dImpl get(); - - /** Returns a const pointer to the underlying module. */ - - /** Calls the {@code forward()} method of the contained module. */ - - /** Forwards to the subscript operator of the contained module. - * NOTE: std::forward is qualified to prevent VS2017 emitting - * error C2872: 'std': ambiguous symbol */ - - /** Returns true if the {@code ModuleHolder} does not contain a module. */ - public native @Cast("bool") @NoException(true) boolean is_empty(); -} diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/AdaptiveMaxPool1dOptions.java b/pytorch/src/gen/java/org/bytedeco/pytorch/AdaptiveMaxPool1dOptions.java index 3b359dd5488..5e5d1434828 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/AdaptiveMaxPool1dOptions.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/AdaptiveMaxPool1dOptions.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/AdaptiveMaxPool2d.java b/pytorch/src/gen/java/org/bytedeco/pytorch/AdaptiveMaxPool2d.java deleted file mode 100644 index 55207efcc4b..00000000000 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/AdaptiveMaxPool2d.java +++ /dev/null @@ -1,34 +0,0 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE - -package org.bytedeco.pytorch; - -import org.bytedeco.pytorch.Allocator; -import org.bytedeco.pytorch.Function; -import org.bytedeco.pytorch.Module; -import java.nio.*; -import org.bytedeco.javacpp.*; -import org.bytedeco.javacpp.annotation.*; - -import static org.bytedeco.javacpp.presets.javacpp.*; -import static org.bytedeco.openblas.global.openblas_nolapack.*; -import static org.bytedeco.openblas.global.openblas.*; - -import static org.bytedeco.pytorch.global.torch.*; - - -/** A {@code ModuleHolder} subclass for {@code AdaptiveMaxPool2dImpl}. - * See the documentation for {@code AdaptiveMaxPool2dImpl} class to learn what - * methods it provides, and examples of how to use {@code AdaptiveMaxPool2d} with - * {@code torch::nn::AdaptiveMaxPool2dOptions}. See the documentation for - * {@code ModuleHolder} to learn about PyTorch's module storage semantics. */ -@Namespace("torch::nn") @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) -public class AdaptiveMaxPool2d extends AdaptiveMaxPool2dImplModuleHolder { - static { Loader.load(); } - - public AdaptiveMaxPool2d(@ByVal @Cast("std::nullptr_t*") PointerPointer arg0) { super((Pointer)null); allocate(arg0); } - private native void allocate(@ByVal @Cast("std::nullptr_t*") PointerPointer arg0); public AdaptiveMaxPool2d(@SharedPtr @Cast({"", "std::shared_ptr"}) AdaptiveMaxPool2dImpl module) { super((Pointer)null); allocate(module); } - private native void allocate(@SharedPtr @Cast({"", "std::shared_ptr"}) AdaptiveMaxPool2dImpl module); - /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ - public AdaptiveMaxPool2d(Pointer p) { super(p); } - - } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/AdaptiveMaxPool2dImpl.java b/pytorch/src/gen/java/org/bytedeco/pytorch/AdaptiveMaxPool2dImpl.java index 5503a295d20..92b48723b63 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/AdaptiveMaxPool2dImpl.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/AdaptiveMaxPool2dImpl.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; @@ -35,10 +37,10 @@ public class AdaptiveMaxPool2dImpl extends AdaptiveMaxPool2dImplBase { public AdaptiveMaxPool2dImpl(@ByVal @Cast("torch::ExpandingArrayWithOptionalElem<2>*") LongOptional output_size) { super((Pointer)null); allocate(output_size); } - @NoDeallocator private native void allocate(@ByVal @Cast("torch::ExpandingArrayWithOptionalElem<2>*") LongOptional output_size); + private native void allocate(@ByVal @Cast("torch::ExpandingArrayWithOptionalElem<2>*") LongOptional output_size); public AdaptiveMaxPool2dImpl( @Const @ByRef AdaptiveMaxPool2dOptions options_) { super((Pointer)null); allocate(options_); } - @NoDeallocator private native void allocate( + private native void allocate( @Const @ByRef AdaptiveMaxPool2dOptions options_); /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public AdaptiveMaxPool2dImpl(Pointer p) { super(p); } @@ -48,5 +50,5 @@ public AdaptiveMaxPool2dImpl( /** Returns the indices along with the outputs. * Useful to pass to nn.MaxUnpool2d. */ - public native @ByVal TensorTensorTuple forward_with_indices(@Const @ByRef Tensor input); + public native @ByVal T_TensorTensor_T forward_with_indices(@Const @ByRef Tensor input); } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/AdaptiveMaxPool2dImplBase.java b/pytorch/src/gen/java/org/bytedeco/pytorch/AdaptiveMaxPool2dImplBase.java index 60af0facd75..6af69576bf0 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/AdaptiveMaxPool2dImplBase.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/AdaptiveMaxPool2dImplBase.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; @@ -22,10 +24,10 @@ public class AdaptiveMaxPool2dImplBase extends AdaptiveMaxPool2dImplCloneable { public AdaptiveMaxPool2dImplBase(Pointer p) { super(p); } public AdaptiveMaxPool2dImplBase(@ByVal @Cast("torch::ExpandingArrayWithOptionalElem<2>*") LongOptional output_size) { super((Pointer)null); allocate(output_size); } - @NoDeallocator private native void allocate(@ByVal @Cast("torch::ExpandingArrayWithOptionalElem<2>*") LongOptional output_size); + private native void allocate(@ByVal @Cast("torch::ExpandingArrayWithOptionalElem<2>*") LongOptional output_size); public AdaptiveMaxPool2dImplBase( @Const @ByRef AdaptiveMaxPool2dOptions options_) { super((Pointer)null); allocate(options_); } - @NoDeallocator private native void allocate( + private native void allocate( @Const @ByRef AdaptiveMaxPool2dOptions options_); public native void reset(); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/AdaptiveMaxPool2dImplCloneable.java b/pytorch/src/gen/java/org/bytedeco/pytorch/AdaptiveMaxPool2dImplCloneable.java index 1d1ecac8b1a..4e028df9b2e 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/AdaptiveMaxPool2dImplCloneable.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/AdaptiveMaxPool2dImplCloneable.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; @@ -20,18 +22,18 @@ public class AdaptiveMaxPool2dImplCloneable extends Module { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public AdaptiveMaxPool2dImplCloneable(Pointer p) { super(p); } + @Override public Module asModule() { return asModule(this); } + @Namespace public static native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast>") Module asModule(@SharedPtr AdaptiveMaxPool2dImplCloneable pointer); /** {@code reset()} must perform initialization of all members with reference * semantics, most importantly parameters, buffers and submodules. */ public native void reset(); - @Override public Module asModule() { return asModule(this); } - @Namespace public static native @Name("static_cast") Module asModule(AdaptiveMaxPool2dImplCloneable module); /** Performs a recursive "deep copy" of the {@code Module}, such that all parameters * and submodules in the cloned module are different from those in the * original module. */ - public native @SharedPtr Module clone( - @Const @ByRef(nullValue = "c10::optional(c10::nullopt)") DeviceOptional device); - public native @SharedPtr Module clone(); + public native @SharedPtr("torch::nn::Module") @ByVal Module clone( + @Const @ByRef(nullValue = "c10::optional(c10::nullopt)") DeviceOptional device); + public native @SharedPtr("torch::nn::Module") @ByVal Module clone(); } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/AdaptiveMaxPool2dImplModuleHolder.java b/pytorch/src/gen/java/org/bytedeco/pytorch/AdaptiveMaxPool2dImplModuleHolder.java deleted file mode 100644 index 4ec2c74b1ba..00000000000 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/AdaptiveMaxPool2dImplModuleHolder.java +++ /dev/null @@ -1,79 +0,0 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE - -package org.bytedeco.pytorch; - -import org.bytedeco.pytorch.Allocator; -import org.bytedeco.pytorch.Function; -import org.bytedeco.pytorch.Module; -import java.nio.*; -import org.bytedeco.javacpp.*; -import org.bytedeco.javacpp.annotation.*; - -import static org.bytedeco.javacpp.presets.javacpp.*; -import static org.bytedeco.openblas.global.openblas_nolapack.*; -import static org.bytedeco.openblas.global.openblas.*; - -import static org.bytedeco.pytorch.global.torch.*; - -@Name("torch::nn::ModuleHolder") @NoOffset @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) -public class AdaptiveMaxPool2dImplModuleHolder extends Pointer { - static { Loader.load(); } - /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ - public AdaptiveMaxPool2dImplModuleHolder(Pointer p) { super(p); } - - - /// - - /** Default constructs the contained module if if has a default constructor, - * else produces a static error. - * - * NOTE: This uses the behavior of template - * classes in C++ that constructors (or any methods) are only compiled when - * actually used. */ - - - /** Constructs the {@code ModuleHolder} with an empty contained value. Access to - * the underlying module is not permitted and will throw an exception, until - * a value is assigned. */ - /* implicit */ public AdaptiveMaxPool2dImplModuleHolder(@ByVal @Cast("std::nullptr_t*") PointerPointer arg0) { super((Pointer)null); allocate(arg0); } -private native void allocate(@ByVal @Cast("std::nullptr_t*") PointerPointer arg0); - - /** Constructs the {@code ModuleHolder} with a contained module, forwarding all - * arguments to its constructor. */ - - /** Constructs the {@code ModuleHolder} from a pointer to the contained type. - * Example: {@code Linear(std::make_shared(...))}. */ - /* implicit */ public AdaptiveMaxPool2dImplModuleHolder(@SharedPtr @Cast({"", "std::shared_ptr"}) AdaptiveMaxPool2dImpl module) { super((Pointer)null); allocate(module); } -private native void allocate(@SharedPtr @Cast({"", "std::shared_ptr"}) AdaptiveMaxPool2dImpl module); - - /** Returns true if the {@code ModuleHolder} contains a module, or false if it is - * {@code nullptr}. */ - public native @Cast("bool") @Name("operator bool") @NoException(true) boolean asBoolean(); - - /** Forwards to the contained module. */ - public native @Name("operator ->") AdaptiveMaxPool2dImpl access(); - - /** Forwards to the contained module. */ - - /** Returns a reference to the contained module. */ - public native @ByRef @Name("operator *") AdaptiveMaxPool2dImpl multiply(); - - /** Returns a const reference to the contained module. */ - - /** Returns a shared pointer to the underlying module. */ - public native @SharedPtr @Cast({"", "std::shared_ptr"}) AdaptiveMaxPool2dImpl ptr(); - - /** Returns a pointer to the underlying module. */ - public native AdaptiveMaxPool2dImpl get(); - - /** Returns a const pointer to the underlying module. */ - - /** Calls the {@code forward()} method of the contained module. */ - - /** Forwards to the subscript operator of the contained module. - * NOTE: std::forward is qualified to prevent VS2017 emitting - * error C2872: 'std': ambiguous symbol */ - - /** Returns true if the {@code ModuleHolder} does not contain a module. */ - public native @Cast("bool") @NoException(true) boolean is_empty(); -} diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/AdaptiveMaxPool2dOptions.java b/pytorch/src/gen/java/org/bytedeco/pytorch/AdaptiveMaxPool2dOptions.java index 19cd8114e4a..f36ce2f7bc3 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/AdaptiveMaxPool2dOptions.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/AdaptiveMaxPool2dOptions.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/AdaptiveMaxPool3d.java b/pytorch/src/gen/java/org/bytedeco/pytorch/AdaptiveMaxPool3d.java deleted file mode 100644 index c0b7cd580e0..00000000000 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/AdaptiveMaxPool3d.java +++ /dev/null @@ -1,34 +0,0 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE - -package org.bytedeco.pytorch; - -import org.bytedeco.pytorch.Allocator; -import org.bytedeco.pytorch.Function; -import org.bytedeco.pytorch.Module; -import java.nio.*; -import org.bytedeco.javacpp.*; -import org.bytedeco.javacpp.annotation.*; - -import static org.bytedeco.javacpp.presets.javacpp.*; -import static org.bytedeco.openblas.global.openblas_nolapack.*; -import static org.bytedeco.openblas.global.openblas.*; - -import static org.bytedeco.pytorch.global.torch.*; - - -/** A {@code ModuleHolder} subclass for {@code AdaptiveMaxPool3dImpl}. - * See the documentation for {@code AdaptiveMaxPool3dImpl} class to learn what - * methods it provides, and examples of how to use {@code AdaptiveMaxPool3d} with - * {@code torch::nn::AdaptiveMaxPool3dOptions}. See the documentation for - * {@code ModuleHolder} to learn about PyTorch's module storage semantics. */ -@Namespace("torch::nn") @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) -public class AdaptiveMaxPool3d extends AdaptiveMaxPool3dImplModuleHolder { - static { Loader.load(); } - - public AdaptiveMaxPool3d(@ByVal @Cast("std::nullptr_t*") PointerPointer arg0) { super((Pointer)null); allocate(arg0); } - private native void allocate(@ByVal @Cast("std::nullptr_t*") PointerPointer arg0); public AdaptiveMaxPool3d(@SharedPtr @Cast({"", "std::shared_ptr"}) AdaptiveMaxPool3dImpl module) { super((Pointer)null); allocate(module); } - private native void allocate(@SharedPtr @Cast({"", "std::shared_ptr"}) AdaptiveMaxPool3dImpl module); - /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ - public AdaptiveMaxPool3d(Pointer p) { super(p); } - - } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/AdaptiveMaxPool3dImpl.java b/pytorch/src/gen/java/org/bytedeco/pytorch/AdaptiveMaxPool3dImpl.java index 0ed6c40cdc0..07416e8ac99 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/AdaptiveMaxPool3dImpl.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/AdaptiveMaxPool3dImpl.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; @@ -35,10 +37,10 @@ public class AdaptiveMaxPool3dImpl extends AdaptiveMaxPool3dImplBase { public AdaptiveMaxPool3dImpl(@ByVal @Cast("torch::ExpandingArrayWithOptionalElem<3>*") LongOptional output_size) { super((Pointer)null); allocate(output_size); } - @NoDeallocator private native void allocate(@ByVal @Cast("torch::ExpandingArrayWithOptionalElem<3>*") LongOptional output_size); + private native void allocate(@ByVal @Cast("torch::ExpandingArrayWithOptionalElem<3>*") LongOptional output_size); public AdaptiveMaxPool3dImpl( @Const @ByRef AdaptiveMaxPool3dOptions options_) { super((Pointer)null); allocate(options_); } - @NoDeallocator private native void allocate( + private native void allocate( @Const @ByRef AdaptiveMaxPool3dOptions options_); /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public AdaptiveMaxPool3dImpl(Pointer p) { super(p); } @@ -48,5 +50,5 @@ public AdaptiveMaxPool3dImpl( /** Returns the indices along with the outputs. * Useful to pass to nn.MaxUnpool3d. */ - public native @ByVal TensorTensorTuple forward_with_indices(@Const @ByRef Tensor input); + public native @ByVal T_TensorTensor_T forward_with_indices(@Const @ByRef Tensor input); } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/AdaptiveMaxPool3dImplBase.java b/pytorch/src/gen/java/org/bytedeco/pytorch/AdaptiveMaxPool3dImplBase.java index 2b7b592a62f..c62ea0ad6d3 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/AdaptiveMaxPool3dImplBase.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/AdaptiveMaxPool3dImplBase.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; @@ -22,10 +24,10 @@ public class AdaptiveMaxPool3dImplBase extends AdaptiveMaxPool3dImplCloneable { public AdaptiveMaxPool3dImplBase(Pointer p) { super(p); } public AdaptiveMaxPool3dImplBase(@ByVal @Cast("torch::ExpandingArrayWithOptionalElem<3>*") LongOptional output_size) { super((Pointer)null); allocate(output_size); } - @NoDeallocator private native void allocate(@ByVal @Cast("torch::ExpandingArrayWithOptionalElem<3>*") LongOptional output_size); + private native void allocate(@ByVal @Cast("torch::ExpandingArrayWithOptionalElem<3>*") LongOptional output_size); public AdaptiveMaxPool3dImplBase( @Const @ByRef AdaptiveMaxPool3dOptions options_) { super((Pointer)null); allocate(options_); } - @NoDeallocator private native void allocate( + private native void allocate( @Const @ByRef AdaptiveMaxPool3dOptions options_); public native void reset(); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/AdaptiveMaxPool3dImplCloneable.java b/pytorch/src/gen/java/org/bytedeco/pytorch/AdaptiveMaxPool3dImplCloneable.java index ad8cc355606..35baff5c140 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/AdaptiveMaxPool3dImplCloneable.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/AdaptiveMaxPool3dImplCloneable.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; @@ -20,18 +22,18 @@ public class AdaptiveMaxPool3dImplCloneable extends Module { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public AdaptiveMaxPool3dImplCloneable(Pointer p) { super(p); } + @Override public Module asModule() { return asModule(this); } + @Namespace public static native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast>") Module asModule(@SharedPtr AdaptiveMaxPool3dImplCloneable pointer); /** {@code reset()} must perform initialization of all members with reference * semantics, most importantly parameters, buffers and submodules. */ public native void reset(); - @Override public Module asModule() { return asModule(this); } - @Namespace public static native @Name("static_cast") Module asModule(AdaptiveMaxPool3dImplCloneable module); /** Performs a recursive "deep copy" of the {@code Module}, such that all parameters * and submodules in the cloned module are different from those in the * original module. */ - public native @SharedPtr Module clone( - @Const @ByRef(nullValue = "c10::optional(c10::nullopt)") DeviceOptional device); - public native @SharedPtr Module clone(); + public native @SharedPtr("torch::nn::Module") @ByVal Module clone( + @Const @ByRef(nullValue = "c10::optional(c10::nullopt)") DeviceOptional device); + public native @SharedPtr("torch::nn::Module") @ByVal Module clone(); } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/AdaptiveMaxPool3dImplModuleHolder.java b/pytorch/src/gen/java/org/bytedeco/pytorch/AdaptiveMaxPool3dImplModuleHolder.java deleted file mode 100644 index 21bd66fd8e5..00000000000 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/AdaptiveMaxPool3dImplModuleHolder.java +++ /dev/null @@ -1,79 +0,0 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE - -package org.bytedeco.pytorch; - -import org.bytedeco.pytorch.Allocator; -import org.bytedeco.pytorch.Function; -import org.bytedeco.pytorch.Module; -import java.nio.*; -import org.bytedeco.javacpp.*; -import org.bytedeco.javacpp.annotation.*; - -import static org.bytedeco.javacpp.presets.javacpp.*; -import static org.bytedeco.openblas.global.openblas_nolapack.*; -import static org.bytedeco.openblas.global.openblas.*; - -import static org.bytedeco.pytorch.global.torch.*; - -@Name("torch::nn::ModuleHolder") @NoOffset @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) -public class AdaptiveMaxPool3dImplModuleHolder extends Pointer { - static { Loader.load(); } - /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ - public AdaptiveMaxPool3dImplModuleHolder(Pointer p) { super(p); } - - - /// - - /** Default constructs the contained module if if has a default constructor, - * else produces a static error. - * - * NOTE: This uses the behavior of template - * classes in C++ that constructors (or any methods) are only compiled when - * actually used. */ - - - /** Constructs the {@code ModuleHolder} with an empty contained value. Access to - * the underlying module is not permitted and will throw an exception, until - * a value is assigned. */ - /* implicit */ public AdaptiveMaxPool3dImplModuleHolder(@ByVal @Cast("std::nullptr_t*") PointerPointer arg0) { super((Pointer)null); allocate(arg0); } -private native void allocate(@ByVal @Cast("std::nullptr_t*") PointerPointer arg0); - - /** Constructs the {@code ModuleHolder} with a contained module, forwarding all - * arguments to its constructor. */ - - /** Constructs the {@code ModuleHolder} from a pointer to the contained type. - * Example: {@code Linear(std::make_shared(...))}. */ - /* implicit */ public AdaptiveMaxPool3dImplModuleHolder(@SharedPtr @Cast({"", "std::shared_ptr"}) AdaptiveMaxPool3dImpl module) { super((Pointer)null); allocate(module); } -private native void allocate(@SharedPtr @Cast({"", "std::shared_ptr"}) AdaptiveMaxPool3dImpl module); - - /** Returns true if the {@code ModuleHolder} contains a module, or false if it is - * {@code nullptr}. */ - public native @Cast("bool") @Name("operator bool") @NoException(true) boolean asBoolean(); - - /** Forwards to the contained module. */ - public native @Name("operator ->") AdaptiveMaxPool3dImpl access(); - - /** Forwards to the contained module. */ - - /** Returns a reference to the contained module. */ - public native @ByRef @Name("operator *") AdaptiveMaxPool3dImpl multiply(); - - /** Returns a const reference to the contained module. */ - - /** Returns a shared pointer to the underlying module. */ - public native @SharedPtr @Cast({"", "std::shared_ptr"}) AdaptiveMaxPool3dImpl ptr(); - - /** Returns a pointer to the underlying module. */ - public native AdaptiveMaxPool3dImpl get(); - - /** Returns a const pointer to the underlying module. */ - - /** Calls the {@code forward()} method of the contained module. */ - - /** Forwards to the subscript operator of the contained module. - * NOTE: std::forward is qualified to prevent VS2017 emitting - * error C2872: 'std': ambiguous symbol */ - - /** Returns true if the {@code ModuleHolder} does not contain a module. */ - public native @Cast("bool") @NoException(true) boolean is_empty(); -} diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/AdaptiveMaxPool3dOptions.java b/pytorch/src/gen/java/org/bytedeco/pytorch/AdaptiveMaxPool3dOptions.java index 4be8a17b562..e1bc1217a28 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/AdaptiveMaxPool3dOptions.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/AdaptiveMaxPool3dOptions.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/AliasDb.java b/pytorch/src/gen/java/org/bytedeco/pytorch/AliasDb.java index 35fa268b3b4..ca0ef0f9195 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/AliasDb.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/AliasDb.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/AliasInfo.java b/pytorch/src/gen/java/org/bytedeco/pytorch/AliasInfo.java index ce2ac24195f..043e1db6c07 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/AliasInfo.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/AliasInfo.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/AliasInfoOptional.java b/pytorch/src/gen/java/org/bytedeco/pytorch/AliasInfoOptional.java index dc277ecacde..757b85b1c40 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/AliasInfoOptional.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/AliasInfoOptional.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; @@ -26,6 +28,7 @@ public class AliasInfoOptional extends Pointer { public native @Name("operator =") @ByRef AliasInfoOptional put(@ByRef AliasInfoOptional x); public native boolean has_value(); + public native void reset(); public native @Name("value") @ByRef AliasInfo get(); @ValueSetter public native AliasInfoOptional put(@ByRef AliasInfo value); } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/AliasTypeSetOptional.java b/pytorch/src/gen/java/org/bytedeco/pytorch/AliasTypeSetOptional.java index e317e2e474d..29759e43df6 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/AliasTypeSetOptional.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/AliasTypeSetOptional.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; @@ -26,6 +28,7 @@ public class AliasTypeSetOptional extends Pointer { public native @Name("operator =") @ByRef AliasTypeSetOptional put(@ByRef AliasTypeSetOptional x); public native boolean has_value(); + public native void reset(); public native @Name("value") @ByRef TypeVector get(); @ValueSetter public native AliasTypeSetOptional put(@ByRef TypeVector value); } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/Allocator.java b/pytorch/src/gen/java/org/bytedeco/pytorch/Allocator.java index cfeb72eb3ce..eb44f918baf 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/Allocator.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/Allocator.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; @@ -50,7 +52,7 @@ public class Allocator extends Pointer { // is guaranteed to return a unique_ptr with this deleter attached; // it means the rawAllocate and rawDeallocate APIs are safe to use. // This function MUST always return the same BoundDeleter. - public native @Cast("c10::DeleterFnPtr") Deleter raw_deleter(); + public native @Cast("c10::DeleterFnPtr") PointerConsumer raw_deleter(); public native Pointer raw_allocate(@Cast("size_t") long n); public native void raw_deallocate(Pointer ptr); } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/AlphaDropout.java b/pytorch/src/gen/java/org/bytedeco/pytorch/AlphaDropout.java deleted file mode 100644 index aaa42daab23..00000000000 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/AlphaDropout.java +++ /dev/null @@ -1,34 +0,0 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE - -package org.bytedeco.pytorch; - -import org.bytedeco.pytorch.Allocator; -import org.bytedeco.pytorch.Function; -import org.bytedeco.pytorch.Module; -import java.nio.*; -import org.bytedeco.javacpp.*; -import org.bytedeco.javacpp.annotation.*; - -import static org.bytedeco.javacpp.presets.javacpp.*; -import static org.bytedeco.openblas.global.openblas_nolapack.*; -import static org.bytedeco.openblas.global.openblas.*; - -import static org.bytedeco.pytorch.global.torch.*; - - -/** A {@code ModuleHolder} subclass for {@code AlphaDropoutImpl}. - * See the documentation for {@code AlphaDropoutImpl} class to learn what methods it - * provides, and examples of how to use {@code AlphaDropout} with - * {@code torch::nn::AlphaDropoutOptions}. See the documentation for {@code ModuleHolder} - * to learn about PyTorch's module storage semantics. */ -@Namespace("torch::nn") @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) -public class AlphaDropout extends AlphaDropoutImplModuleHolder { - static { Loader.load(); } - - public AlphaDropout(@ByVal @Cast("std::nullptr_t*") PointerPointer arg0) { super((Pointer)null); allocate(arg0); } - private native void allocate(@ByVal @Cast("std::nullptr_t*") PointerPointer arg0); public AlphaDropout(@SharedPtr @Cast({"", "std::shared_ptr"}) AlphaDropoutImpl module) { super((Pointer)null); allocate(module); } - private native void allocate(@SharedPtr @Cast({"", "std::shared_ptr"}) AlphaDropoutImpl module); - /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ - public AlphaDropout(Pointer p) { super(p); } - - } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/AlphaDropoutFuncOptions.java b/pytorch/src/gen/java/org/bytedeco/pytorch/AlphaDropoutFuncOptions.java index 7795c49fece..57f58bd0d69 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/AlphaDropoutFuncOptions.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/AlphaDropoutFuncOptions.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/AlphaDropoutImpl.java b/pytorch/src/gen/java/org/bytedeco/pytorch/AlphaDropoutImpl.java index e21797fd079..5c3c61a790b 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/AlphaDropoutImpl.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/AlphaDropoutImpl.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; @@ -35,12 +37,12 @@ public class AlphaDropoutImpl extends AlphaDropoutImplBase { public AlphaDropoutImpl(double p) { super((Pointer)null); allocate(p); } - @NoDeallocator private native void allocate(double p); + private native void allocate(double p); public AlphaDropoutImpl(@Const @ByRef(nullValue = "torch::nn::DropoutOptions{}") DropoutOptions options_) { super((Pointer)null); allocate(options_); } - @NoDeallocator private native void allocate(@Const @ByRef(nullValue = "torch::nn::DropoutOptions{}") DropoutOptions options_); + private native void allocate(@Const @ByRef(nullValue = "torch::nn::DropoutOptions{}") DropoutOptions options_); public AlphaDropoutImpl() { super((Pointer)null); allocate(); } - @NoDeallocator private native void allocate(); + private native void allocate(); /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public AlphaDropoutImpl(Pointer p) { super(p); } /** Native array allocator. Access with {@link Pointer#position(long)}. */ diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/AlphaDropoutImplBase.java b/pytorch/src/gen/java/org/bytedeco/pytorch/AlphaDropoutImplBase.java index ff52ab6a753..237ab9acb16 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/AlphaDropoutImplBase.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/AlphaDropoutImplBase.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; @@ -23,12 +25,12 @@ public class AlphaDropoutImplBase extends AlphaDropoutImplCloneable { public AlphaDropoutImplBase(Pointer p) { super(p); } public AlphaDropoutImplBase(double p) { super((Pointer)null); allocate(p); } - @NoDeallocator private native void allocate(double p); + private native void allocate(double p); public AlphaDropoutImplBase(@Const @ByRef(nullValue = "torch::nn::DropoutOptions{}") DropoutOptions options_) { super((Pointer)null); allocate(options_); } - @NoDeallocator private native void allocate(@Const @ByRef(nullValue = "torch::nn::DropoutOptions{}") DropoutOptions options_); + private native void allocate(@Const @ByRef(nullValue = "torch::nn::DropoutOptions{}") DropoutOptions options_); public AlphaDropoutImplBase() { super((Pointer)null); allocate(); } - @NoDeallocator private native void allocate(); + private native void allocate(); public native void reset(); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/AlphaDropoutImplCloneable.java b/pytorch/src/gen/java/org/bytedeco/pytorch/AlphaDropoutImplCloneable.java index 7eb7fcd082a..d8c39e8b9a3 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/AlphaDropoutImplCloneable.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/AlphaDropoutImplCloneable.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; @@ -20,18 +22,18 @@ public class AlphaDropoutImplCloneable extends Module { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public AlphaDropoutImplCloneable(Pointer p) { super(p); } + @Override public Module asModule() { return asModule(this); } + @Namespace public static native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast>") Module asModule(@SharedPtr AlphaDropoutImplCloneable pointer); /** {@code reset()} must perform initialization of all members with reference * semantics, most importantly parameters, buffers and submodules. */ public native void reset(); - @Override public Module asModule() { return asModule(this); } - @Namespace public static native @Name("static_cast") Module asModule(AlphaDropoutImplCloneable module); /** Performs a recursive "deep copy" of the {@code Module}, such that all parameters * and submodules in the cloned module are different from those in the * original module. */ - public native @SharedPtr Module clone( - @Const @ByRef(nullValue = "c10::optional(c10::nullopt)") DeviceOptional device); - public native @SharedPtr Module clone(); + public native @SharedPtr("torch::nn::Module") @ByVal Module clone( + @Const @ByRef(nullValue = "c10::optional(c10::nullopt)") DeviceOptional device); + public native @SharedPtr("torch::nn::Module") @ByVal Module clone(); } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/AlphaDropoutImplModuleHolder.java b/pytorch/src/gen/java/org/bytedeco/pytorch/AlphaDropoutImplModuleHolder.java deleted file mode 100644 index 9c8c4a02a66..00000000000 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/AlphaDropoutImplModuleHolder.java +++ /dev/null @@ -1,79 +0,0 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE - -package org.bytedeco.pytorch; - -import org.bytedeco.pytorch.Allocator; -import org.bytedeco.pytorch.Function; -import org.bytedeco.pytorch.Module; -import java.nio.*; -import org.bytedeco.javacpp.*; -import org.bytedeco.javacpp.annotation.*; - -import static org.bytedeco.javacpp.presets.javacpp.*; -import static org.bytedeco.openblas.global.openblas_nolapack.*; -import static org.bytedeco.openblas.global.openblas.*; - -import static org.bytedeco.pytorch.global.torch.*; - -@Name("torch::nn::ModuleHolder") @NoOffset @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) -public class AlphaDropoutImplModuleHolder extends Pointer { - static { Loader.load(); } - /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ - public AlphaDropoutImplModuleHolder(Pointer p) { super(p); } - - - /// - - /** Default constructs the contained module if if has a default constructor, - * else produces a static error. - * - * NOTE: This uses the behavior of template - * classes in C++ that constructors (or any methods) are only compiled when - * actually used. */ - - - /** Constructs the {@code ModuleHolder} with an empty contained value. Access to - * the underlying module is not permitted and will throw an exception, until - * a value is assigned. */ - /* implicit */ public AlphaDropoutImplModuleHolder(@ByVal @Cast("std::nullptr_t*") PointerPointer arg0) { super((Pointer)null); allocate(arg0); } -private native void allocate(@ByVal @Cast("std::nullptr_t*") PointerPointer arg0); - - /** Constructs the {@code ModuleHolder} with a contained module, forwarding all - * arguments to its constructor. */ - - /** Constructs the {@code ModuleHolder} from a pointer to the contained type. - * Example: {@code Linear(std::make_shared(...))}. */ - /* implicit */ public AlphaDropoutImplModuleHolder(@SharedPtr @Cast({"", "std::shared_ptr"}) AlphaDropoutImpl module) { super((Pointer)null); allocate(module); } -private native void allocate(@SharedPtr @Cast({"", "std::shared_ptr"}) AlphaDropoutImpl module); - - /** Returns true if the {@code ModuleHolder} contains a module, or false if it is - * {@code nullptr}. */ - public native @Cast("bool") @Name("operator bool") @NoException(true) boolean asBoolean(); - - /** Forwards to the contained module. */ - public native @Name("operator ->") AlphaDropoutImpl access(); - - /** Forwards to the contained module. */ - - /** Returns a reference to the contained module. */ - public native @ByRef @Name("operator *") AlphaDropoutImpl multiply(); - - /** Returns a const reference to the contained module. */ - - /** Returns a shared pointer to the underlying module. */ - public native @SharedPtr @Cast({"", "std::shared_ptr"}) AlphaDropoutImpl ptr(); - - /** Returns a pointer to the underlying module. */ - public native AlphaDropoutImpl get(); - - /** Returns a const pointer to the underlying module. */ - - /** Calls the {@code forward()} method of the contained module. */ - - /** Forwards to the subscript operator of the contained module. - * NOTE: std::forward is qualified to prevent VS2017 emitting - * error C2872: 'std': ambiguous symbol */ - - /** Returns true if the {@code ModuleHolder} does not contain a module. */ - public native @Cast("bool") @NoException(true) boolean is_empty(); -} diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/AnnotatedSchema.java b/pytorch/src/gen/java/org/bytedeco/pytorch/AnnotatedSchema.java deleted file mode 100644 index 32bb9c0ce68..00000000000 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/AnnotatedSchema.java +++ /dev/null @@ -1,33 +0,0 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE - -package org.bytedeco.pytorch; - -import org.bytedeco.pytorch.Allocator; -import org.bytedeco.pytorch.Function; -import org.bytedeco.pytorch.Module; -import java.nio.*; -import org.bytedeco.javacpp.*; -import org.bytedeco.javacpp.annotation.*; - -import static org.bytedeco.javacpp.presets.javacpp.*; -import static org.bytedeco.openblas.global.openblas_nolapack.*; -import static org.bytedeco.openblas.global.openblas.*; - -import static org.bytedeco.pytorch.global.torch.*; - - -// This data structure represents operator schema, with metadata specifying -// where the registration of this schema occurred -@Namespace("c10::impl") @NoOffset @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) -public class AnnotatedSchema extends Pointer { - static { Loader.load(); } - /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ - public AnnotatedSchema(Pointer p) { super(p); } - - public AnnotatedSchema(@ByVal FunctionSchema s, @StdString BytePointer d) { super((Pointer)null); allocate(s, d); } - private native void allocate(@ByVal FunctionSchema s, @StdString BytePointer d); - public AnnotatedSchema(@ByVal FunctionSchema s, @StdString String d) { super((Pointer)null); allocate(s, d); } - private native void allocate(@ByVal FunctionSchema s, @StdString String d); - public native @ByRef FunctionSchema schema(); public native AnnotatedSchema schema(FunctionSchema setter); - public native @StdString BytePointer debug(); public native AnnotatedSchema debug(BytePointer setter); -} diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/AnomalyMetadata.java b/pytorch/src/gen/java/org/bytedeco/pytorch/AnomalyMetadata.java index a27c56689a1..a9e1009f29f 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/AnomalyMetadata.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/AnomalyMetadata.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/AnomalyMode.java b/pytorch/src/gen/java/org/bytedeco/pytorch/AnomalyMode.java index d7fed015391..422ff08028e 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/AnomalyMode.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/AnomalyMode.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/AnyClassType.java b/pytorch/src/gen/java/org/bytedeco/pytorch/AnyClassType.java index 99d21e95288..b6b7c20ca80 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/AnyClassType.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/AnyClassType.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/AnyClassTypePtr.java b/pytorch/src/gen/java/org/bytedeco/pytorch/AnyClassTypePtr.java index f86877de458..6cc7a8737fa 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/AnyClassTypePtr.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/AnyClassTypePtr.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/AnyEnumType.java b/pytorch/src/gen/java/org/bytedeco/pytorch/AnyEnumType.java index 5df0ee236c4..a64f106db02 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/AnyEnumType.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/AnyEnumType.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/AnyEnumTypePtr.java b/pytorch/src/gen/java/org/bytedeco/pytorch/AnyEnumTypePtr.java index 3415b8e5e9f..a8b98130858 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/AnyEnumTypePtr.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/AnyEnumTypePtr.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/AnyListType.java b/pytorch/src/gen/java/org/bytedeco/pytorch/AnyListType.java index 4407962bc8e..28a2d3d8b58 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/AnyListType.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/AnyListType.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/AnyListTypePtr.java b/pytorch/src/gen/java/org/bytedeco/pytorch/AnyListTypePtr.java index 51ea6bf3068..2d479234267 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/AnyListTypePtr.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/AnyListTypePtr.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/AnyModule.java b/pytorch/src/gen/java/org/bytedeco/pytorch/AnyModule.java index b90927a1229..2414c7ffca2 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/AnyModule.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/AnyModule.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; @@ -101,7 +103,7 @@ * std::shared_ptr ptr = module.ptr(); * torch::nn::Linear linear(module.get()); * \endrst */ -@Namespace("torch::nn") @NoOffset @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) +@Namespace("torch::nn") @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) public class AnyModule extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ @@ -118,9 +120,255 @@ public class AnyModule extends Pointer { /** A default-constructed {@code AnyModule} is in an empty state. */ public AnyModule() { super((Pointer)null); allocate(); } - private native void allocate(); + @SharedPtr private native void allocate(); /** Constructs an {@code AnyModule} from a {@code shared_ptr} to concrete module object. */ + public AnyModule(AdaptiveLogSoftmaxWithLossImpl module) { super((Pointer)null); allocate(module); } + @SharedPtr private native void allocate(@SharedPtr @Cast({"", "std::shared_ptr"}) AdaptiveLogSoftmaxWithLossImpl module); + public AnyModule(BatchNorm1dImpl module) { super((Pointer)null); allocate(module); } + @SharedPtr private native void allocate(@SharedPtr @Cast({"", "std::shared_ptr"}) BatchNorm1dImpl module); + public AnyModule(InstanceNorm1dImpl module) { super((Pointer)null); allocate(module); } + @SharedPtr private native void allocate(@SharedPtr @Cast({"", "std::shared_ptr"}) InstanceNorm1dImpl module); + public AnyModule(Conv1dImpl module) { super((Pointer)null); allocate(module); } + @SharedPtr private native void allocate(@SharedPtr @Cast({"", "std::shared_ptr"}) Conv1dImpl module); + public AnyModule(ConvTranspose1dImpl module) { super((Pointer)null); allocate(module); } + @SharedPtr private native void allocate(@SharedPtr @Cast({"", "std::shared_ptr"}) ConvTranspose1dImpl module); + public AnyModule(DropoutImpl module) { super((Pointer)null); allocate(module); } + @SharedPtr private native void allocate(@SharedPtr @Cast({"", "std::shared_ptr"}) DropoutImpl module); + public AnyModule(BatchNorm2dImpl module) { super((Pointer)null); allocate(module); } + @SharedPtr private native void allocate(@SharedPtr @Cast({"", "std::shared_ptr"}) BatchNorm2dImpl module); + public AnyModule(InstanceNorm2dImpl module) { super((Pointer)null); allocate(module); } + @SharedPtr private native void allocate(@SharedPtr @Cast({"", "std::shared_ptr"}) InstanceNorm2dImpl module); + public AnyModule(Conv2dImpl module) { super((Pointer)null); allocate(module); } + @SharedPtr private native void allocate(@SharedPtr @Cast({"", "std::shared_ptr"}) Conv2dImpl module); + public AnyModule(ConvTranspose2dImpl module) { super((Pointer)null); allocate(module); } + @SharedPtr private native void allocate(@SharedPtr @Cast({"", "std::shared_ptr"}) ConvTranspose2dImpl module); + public AnyModule(Dropout2dImpl module) { super((Pointer)null); allocate(module); } + @SharedPtr private native void allocate(@SharedPtr @Cast({"", "std::shared_ptr"}) Dropout2dImpl module); + public AnyModule(BatchNorm3dImpl module) { super((Pointer)null); allocate(module); } + @SharedPtr private native void allocate(@SharedPtr @Cast({"", "std::shared_ptr"}) BatchNorm3dImpl module); + public AnyModule(InstanceNorm3dImpl module) { super((Pointer)null); allocate(module); } + @SharedPtr private native void allocate(@SharedPtr @Cast({"", "std::shared_ptr"}) InstanceNorm3dImpl module); + public AnyModule(Conv3dImpl module) { super((Pointer)null); allocate(module); } + @SharedPtr private native void allocate(@SharedPtr @Cast({"", "std::shared_ptr"}) Conv3dImpl module); + public AnyModule(ConvTranspose3dImpl module) { super((Pointer)null); allocate(module); } + @SharedPtr private native void allocate(@SharedPtr @Cast({"", "std::shared_ptr"}) ConvTranspose3dImpl module); + public AnyModule(Dropout3dImpl module) { super((Pointer)null); allocate(module); } + @SharedPtr private native void allocate(@SharedPtr @Cast({"", "std::shared_ptr"}) Dropout3dImpl module); + public AnyModule(AlphaDropoutImpl module) { super((Pointer)null); allocate(module); } + @SharedPtr private native void allocate(@SharedPtr @Cast({"", "std::shared_ptr"}) AlphaDropoutImpl module); + public AnyModule(FeatureAlphaDropoutImpl module) { super((Pointer)null); allocate(module); } + @SharedPtr private native void allocate(@SharedPtr @Cast({"", "std::shared_ptr"}) FeatureAlphaDropoutImpl module); + public AnyModule(CosineSimilarityImpl module) { super((Pointer)null); allocate(module); } + @SharedPtr private native void allocate(@SharedPtr @Cast({"", "std::shared_ptr"}) CosineSimilarityImpl module); + public AnyModule(PairwiseDistanceImpl module) { super((Pointer)null); allocate(module); } + @SharedPtr private native void allocate(@SharedPtr @Cast({"", "std::shared_ptr"}) PairwiseDistanceImpl module); + public AnyModule(EmbeddingImpl module) { super((Pointer)null); allocate(module); } + @SharedPtr private native void allocate(@SharedPtr @Cast({"", "std::shared_ptr"}) EmbeddingImpl module); + public AnyModule(EmbeddingBagImpl module) { super((Pointer)null); allocate(module); } + @SharedPtr private native void allocate(@SharedPtr @Cast({"", "std::shared_ptr"}) EmbeddingBagImpl module); + public AnyModule(FoldImpl module) { super((Pointer)null); allocate(module); } + @SharedPtr private native void allocate(@SharedPtr @Cast({"", "std::shared_ptr"}) FoldImpl module); + public AnyModule(UnfoldImpl module) { super((Pointer)null); allocate(module); } + @SharedPtr private native void allocate(@SharedPtr @Cast({"", "std::shared_ptr"}) UnfoldImpl module); + public AnyModule(IdentityImpl module) { super((Pointer)null); allocate(module); } + @SharedPtr private native void allocate(@SharedPtr @Cast({"", "std::shared_ptr"}) IdentityImpl module); + public AnyModule(LinearImpl module) { super((Pointer)null); allocate(module); } + @SharedPtr private native void allocate(@SharedPtr @Cast({"", "std::shared_ptr"}) LinearImpl module); + public AnyModule(BilinearImpl module) { super((Pointer)null); allocate(module); } + @SharedPtr private native void allocate(@SharedPtr @Cast({"", "std::shared_ptr"}) BilinearImpl module); + public AnyModule(FlattenImpl module) { super((Pointer)null); allocate(module); } + @SharedPtr private native void allocate(@SharedPtr @Cast({"", "std::shared_ptr"}) FlattenImpl module); + public AnyModule(UnflattenImpl module) { super((Pointer)null); allocate(module); } + @SharedPtr private native void allocate(@SharedPtr @Cast({"", "std::shared_ptr"}) UnflattenImpl module); + public AnyModule(L1LossImpl module) { super((Pointer)null); allocate(module); } + @SharedPtr private native void allocate(@SharedPtr @Cast({"", "std::shared_ptr"}) L1LossImpl module); + public AnyModule(KLDivLossImpl module) { super((Pointer)null); allocate(module); } + @SharedPtr private native void allocate(@SharedPtr @Cast({"", "std::shared_ptr"}) KLDivLossImpl module); + public AnyModule(MSELossImpl module) { super((Pointer)null); allocate(module); } + @SharedPtr private native void allocate(@SharedPtr @Cast({"", "std::shared_ptr"}) MSELossImpl module); + public AnyModule(BCELossImpl module) { super((Pointer)null); allocate(module); } + @SharedPtr private native void allocate(@SharedPtr @Cast({"", "std::shared_ptr"}) BCELossImpl module); + public AnyModule(HingeEmbeddingLossImpl module) { super((Pointer)null); allocate(module); } + @SharedPtr private native void allocate(@SharedPtr @Cast({"", "std::shared_ptr"}) HingeEmbeddingLossImpl module); + public AnyModule(MultiMarginLossImpl module) { super((Pointer)null); allocate(module); } + @SharedPtr private native void allocate(@SharedPtr @Cast({"", "std::shared_ptr"}) MultiMarginLossImpl module); + public AnyModule(CosineEmbeddingLossImpl module) { super((Pointer)null); allocate(module); } + @SharedPtr private native void allocate(@SharedPtr @Cast({"", "std::shared_ptr"}) CosineEmbeddingLossImpl module); + public AnyModule(SmoothL1LossImpl module) { super((Pointer)null); allocate(module); } + @SharedPtr private native void allocate(@SharedPtr @Cast({"", "std::shared_ptr"}) SmoothL1LossImpl module); + public AnyModule(HuberLossImpl module) { super((Pointer)null); allocate(module); } + @SharedPtr private native void allocate(@SharedPtr @Cast({"", "std::shared_ptr"}) HuberLossImpl module); + public AnyModule(MultiLabelMarginLossImpl module) { super((Pointer)null); allocate(module); } + @SharedPtr private native void allocate(@SharedPtr @Cast({"", "std::shared_ptr"}) MultiLabelMarginLossImpl module); + public AnyModule(SoftMarginLossImpl module) { super((Pointer)null); allocate(module); } + @SharedPtr private native void allocate(@SharedPtr @Cast({"", "std::shared_ptr"}) SoftMarginLossImpl module); + public AnyModule(MultiLabelSoftMarginLossImpl module) { super((Pointer)null); allocate(module); } + @SharedPtr private native void allocate(@SharedPtr @Cast({"", "std::shared_ptr"}) MultiLabelSoftMarginLossImpl module); + public AnyModule(TripletMarginLossImpl module) { super((Pointer)null); allocate(module); } + @SharedPtr private native void allocate(@SharedPtr @Cast({"", "std::shared_ptr"}) TripletMarginLossImpl module); + public AnyModule(TripletMarginWithDistanceLossImpl module) { super((Pointer)null); allocate(module); } + @SharedPtr private native void allocate(@SharedPtr @Cast({"", "std::shared_ptr"}) TripletMarginWithDistanceLossImpl module); + public AnyModule(CTCLossImpl module) { super((Pointer)null); allocate(module); } + @SharedPtr private native void allocate(@SharedPtr @Cast({"", "std::shared_ptr"}) CTCLossImpl module); + public AnyModule(PoissonNLLLossImpl module) { super((Pointer)null); allocate(module); } + @SharedPtr private native void allocate(@SharedPtr @Cast({"", "std::shared_ptr"}) PoissonNLLLossImpl module); + public AnyModule(MarginRankingLossImpl module) { super((Pointer)null); allocate(module); } + @SharedPtr private native void allocate(@SharedPtr @Cast({"", "std::shared_ptr"}) MarginRankingLossImpl module); + public AnyModule(NLLLossImpl module) { super((Pointer)null); allocate(module); } + @SharedPtr private native void allocate(@SharedPtr @Cast({"", "std::shared_ptr"}) NLLLossImpl module); + public AnyModule(CrossEntropyLossImpl module) { super((Pointer)null); allocate(module); } + @SharedPtr private native void allocate(@SharedPtr @Cast({"", "std::shared_ptr"}) CrossEntropyLossImpl module); + public AnyModule(BCEWithLogitsLossImpl module) { super((Pointer)null); allocate(module); } + @SharedPtr private native void allocate(@SharedPtr @Cast({"", "std::shared_ptr"}) BCEWithLogitsLossImpl module); + public AnyModule(ReflectionPad1dImpl module) { super((Pointer)null); allocate(module); } + @SharedPtr private native void allocate(@SharedPtr @Cast({"", "std::shared_ptr"}) ReflectionPad1dImpl module); + public AnyModule(ReplicationPad1dImpl module) { super((Pointer)null); allocate(module); } + @SharedPtr private native void allocate(@SharedPtr @Cast({"", "std::shared_ptr"}) ReplicationPad1dImpl module); + public AnyModule(ConstantPad1dImpl module) { super((Pointer)null); allocate(module); } + @SharedPtr private native void allocate(@SharedPtr @Cast({"", "std::shared_ptr"}) ConstantPad1dImpl module); + public AnyModule(AvgPool1dImpl module) { super((Pointer)null); allocate(module); } + @SharedPtr private native void allocate(@SharedPtr @Cast({"", "std::shared_ptr"}) AvgPool1dImpl module); + public AnyModule(MaxPool1dImpl module) { super((Pointer)null); allocate(module); } + @SharedPtr private native void allocate(@SharedPtr @Cast({"", "std::shared_ptr"}) MaxPool1dImpl module); + public AnyModule(AdaptiveAvgPool1dImpl module) { super((Pointer)null); allocate(module); } + @SharedPtr private native void allocate(@SharedPtr @Cast({"", "std::shared_ptr"}) AdaptiveAvgPool1dImpl module); + public AnyModule(AdaptiveMaxPool1dImpl module) { super((Pointer)null); allocate(module); } + @SharedPtr private native void allocate(@SharedPtr @Cast({"", "std::shared_ptr"}) AdaptiveMaxPool1dImpl module); + public AnyModule(MaxUnpool1dImpl module) { super((Pointer)null); allocate(module); } + @SharedPtr private native void allocate(@SharedPtr @Cast({"", "std::shared_ptr"}) MaxUnpool1dImpl module); + public AnyModule(LPPool1dImpl module) { super((Pointer)null); allocate(module); } + @SharedPtr private native void allocate(@SharedPtr @Cast({"", "std::shared_ptr"}) LPPool1dImpl module); + public AnyModule(ReflectionPad2dImpl module) { super((Pointer)null); allocate(module); } + @SharedPtr private native void allocate(@SharedPtr @Cast({"", "std::shared_ptr"}) ReflectionPad2dImpl module); + public AnyModule(ReplicationPad2dImpl module) { super((Pointer)null); allocate(module); } + @SharedPtr private native void allocate(@SharedPtr @Cast({"", "std::shared_ptr"}) ReplicationPad2dImpl module); + public AnyModule(ConstantPad2dImpl module) { super((Pointer)null); allocate(module); } + @SharedPtr private native void allocate(@SharedPtr @Cast({"", "std::shared_ptr"}) ConstantPad2dImpl module); + public AnyModule(ZeroPad2dImpl module) { super((Pointer)null); allocate(module); } + @SharedPtr private native void allocate(@SharedPtr @Cast({"", "std::shared_ptr"}) ZeroPad2dImpl module); + public AnyModule(AvgPool2dImpl module) { super((Pointer)null); allocate(module); } + @SharedPtr private native void allocate(@SharedPtr @Cast({"", "std::shared_ptr"}) AvgPool2dImpl module); + public AnyModule(MaxPool2dImpl module) { super((Pointer)null); allocate(module); } + @SharedPtr private native void allocate(@SharedPtr @Cast({"", "std::shared_ptr"}) MaxPool2dImpl module); + public AnyModule(AdaptiveAvgPool2dImpl module) { super((Pointer)null); allocate(module); } + @SharedPtr private native void allocate(@SharedPtr @Cast({"", "std::shared_ptr"}) AdaptiveAvgPool2dImpl module); + public AnyModule(AdaptiveMaxPool2dImpl module) { super((Pointer)null); allocate(module); } + @SharedPtr private native void allocate(@SharedPtr @Cast({"", "std::shared_ptr"}) AdaptiveMaxPool2dImpl module); + public AnyModule(MaxUnpool2dImpl module) { super((Pointer)null); allocate(module); } + @SharedPtr private native void allocate(@SharedPtr @Cast({"", "std::shared_ptr"}) MaxUnpool2dImpl module); + public AnyModule(FractionalMaxPool2dImpl module) { super((Pointer)null); allocate(module); } + @SharedPtr private native void allocate(@SharedPtr @Cast({"", "std::shared_ptr"}) FractionalMaxPool2dImpl module); + public AnyModule(LPPool2dImpl module) { super((Pointer)null); allocate(module); } + @SharedPtr private native void allocate(@SharedPtr @Cast({"", "std::shared_ptr"}) LPPool2dImpl module); + public AnyModule(ReflectionPad3dImpl module) { super((Pointer)null); allocate(module); } + @SharedPtr private native void allocate(@SharedPtr @Cast({"", "std::shared_ptr"}) ReflectionPad3dImpl module); + public AnyModule(ReplicationPad3dImpl module) { super((Pointer)null); allocate(module); } + @SharedPtr private native void allocate(@SharedPtr @Cast({"", "std::shared_ptr"}) ReplicationPad3dImpl module); + public AnyModule(ConstantPad3dImpl module) { super((Pointer)null); allocate(module); } + @SharedPtr private native void allocate(@SharedPtr @Cast({"", "std::shared_ptr"}) ConstantPad3dImpl module); + public AnyModule(AvgPool3dImpl module) { super((Pointer)null); allocate(module); } + @SharedPtr private native void allocate(@SharedPtr @Cast({"", "std::shared_ptr"}) AvgPool3dImpl module); + public AnyModule(MaxPool3dImpl module) { super((Pointer)null); allocate(module); } + @SharedPtr private native void allocate(@SharedPtr @Cast({"", "std::shared_ptr"}) MaxPool3dImpl module); + public AnyModule(AdaptiveAvgPool3dImpl module) { super((Pointer)null); allocate(module); } + @SharedPtr private native void allocate(@SharedPtr @Cast({"", "std::shared_ptr"}) AdaptiveAvgPool3dImpl module); + public AnyModule(AdaptiveMaxPool3dImpl module) { super((Pointer)null); allocate(module); } + @SharedPtr private native void allocate(@SharedPtr @Cast({"", "std::shared_ptr"}) AdaptiveMaxPool3dImpl module); + public AnyModule(MaxUnpool3dImpl module) { super((Pointer)null); allocate(module); } + @SharedPtr private native void allocate(@SharedPtr @Cast({"", "std::shared_ptr"}) MaxUnpool3dImpl module); + public AnyModule(FractionalMaxPool3dImpl module) { super((Pointer)null); allocate(module); } + @SharedPtr private native void allocate(@SharedPtr @Cast({"", "std::shared_ptr"}) FractionalMaxPool3dImpl module); + public AnyModule(RNNImpl module) { super((Pointer)null); allocate(module); } + @SharedPtr private native void allocate(@SharedPtr @Cast({"", "std::shared_ptr"}) RNNImpl module); + public AnyModule(LSTMImpl module) { super((Pointer)null); allocate(module); } + @SharedPtr private native void allocate(@SharedPtr @Cast({"", "std::shared_ptr"}) LSTMImpl module); + public AnyModule(GRUImpl module) { super((Pointer)null); allocate(module); } + @SharedPtr private native void allocate(@SharedPtr @Cast({"", "std::shared_ptr"}) GRUImpl module); + public AnyModule(RNNCellImpl module) { super((Pointer)null); allocate(module); } + @SharedPtr private native void allocate(@SharedPtr @Cast({"", "std::shared_ptr"}) RNNCellImpl module); + public AnyModule(LSTMCellImpl module) { super((Pointer)null); allocate(module); } + @SharedPtr private native void allocate(@SharedPtr @Cast({"", "std::shared_ptr"}) LSTMCellImpl module); + public AnyModule(GRUCellImpl module) { super((Pointer)null); allocate(module); } + @SharedPtr private native void allocate(@SharedPtr @Cast({"", "std::shared_ptr"}) GRUCellImpl module); + public AnyModule(PixelShuffleImpl module) { super((Pointer)null); allocate(module); } + @SharedPtr private native void allocate(@SharedPtr @Cast({"", "std::shared_ptr"}) PixelShuffleImpl module); + public AnyModule(PixelUnshuffleImpl module) { super((Pointer)null); allocate(module); } + @SharedPtr private native void allocate(@SharedPtr @Cast({"", "std::shared_ptr"}) PixelUnshuffleImpl module); + public AnyModule(UpsampleImpl module) { super((Pointer)null); allocate(module); } + @SharedPtr private native void allocate(@SharedPtr @Cast({"", "std::shared_ptr"}) UpsampleImpl module); + public AnyModule(ELUImpl module) { super((Pointer)null); allocate(module); } + @SharedPtr private native void allocate(@SharedPtr @Cast({"", "std::shared_ptr"}) ELUImpl module); + public AnyModule(SELUImpl module) { super((Pointer)null); allocate(module); } + @SharedPtr private native void allocate(@SharedPtr @Cast({"", "std::shared_ptr"}) SELUImpl module); + public AnyModule(HardshrinkImpl module) { super((Pointer)null); allocate(module); } + @SharedPtr private native void allocate(@SharedPtr @Cast({"", "std::shared_ptr"}) HardshrinkImpl module); + public AnyModule(HardtanhImpl module) { super((Pointer)null); allocate(module); } + @SharedPtr private native void allocate(@SharedPtr @Cast({"", "std::shared_ptr"}) HardtanhImpl module); + public AnyModule(LeakyReLUImpl module) { super((Pointer)null); allocate(module); } + @SharedPtr private native void allocate(@SharedPtr @Cast({"", "std::shared_ptr"}) LeakyReLUImpl module); + public AnyModule(LogSigmoidImpl module) { super((Pointer)null); allocate(module); } + @SharedPtr private native void allocate(@SharedPtr @Cast({"", "std::shared_ptr"}) LogSigmoidImpl module); + public AnyModule(SoftmaxImpl module) { super((Pointer)null); allocate(module); } + @SharedPtr private native void allocate(@SharedPtr @Cast({"", "std::shared_ptr"}) SoftmaxImpl module); + public AnyModule(SoftminImpl module) { super((Pointer)null); allocate(module); } + @SharedPtr private native void allocate(@SharedPtr @Cast({"", "std::shared_ptr"}) SoftminImpl module); + public AnyModule(LogSoftmaxImpl module) { super((Pointer)null); allocate(module); } + @SharedPtr private native void allocate(@SharedPtr @Cast({"", "std::shared_ptr"}) LogSoftmaxImpl module); + public AnyModule(Softmax2dImpl module) { super((Pointer)null); allocate(module); } + @SharedPtr private native void allocate(@SharedPtr @Cast({"", "std::shared_ptr"}) Softmax2dImpl module); + public AnyModule(PReLUImpl module) { super((Pointer)null); allocate(module); } + @SharedPtr private native void allocate(@SharedPtr @Cast({"", "std::shared_ptr"}) PReLUImpl module); + public AnyModule(ReLUImpl module) { super((Pointer)null); allocate(module); } + @SharedPtr private native void allocate(@SharedPtr @Cast({"", "std::shared_ptr"}) ReLUImpl module); + public AnyModule(ReLU6Impl module) { super((Pointer)null); allocate(module); } + @SharedPtr private native void allocate(@SharedPtr @Cast({"", "std::shared_ptr"}) ReLU6Impl module); + public AnyModule(RReLUImpl module) { super((Pointer)null); allocate(module); } + @SharedPtr private native void allocate(@SharedPtr @Cast({"", "std::shared_ptr"}) RReLUImpl module); + public AnyModule(CELUImpl module) { super((Pointer)null); allocate(module); } + @SharedPtr private native void allocate(@SharedPtr @Cast({"", "std::shared_ptr"}) CELUImpl module); + public AnyModule(GLUImpl module) { super((Pointer)null); allocate(module); } + @SharedPtr private native void allocate(@SharedPtr @Cast({"", "std::shared_ptr"}) GLUImpl module); + public AnyModule(GELUImpl module) { super((Pointer)null); allocate(module); } + @SharedPtr private native void allocate(@SharedPtr @Cast({"", "std::shared_ptr"}) GELUImpl module); + public AnyModule(SiLUImpl module) { super((Pointer)null); allocate(module); } + @SharedPtr private native void allocate(@SharedPtr @Cast({"", "std::shared_ptr"}) SiLUImpl module); + public AnyModule(MishImpl module) { super((Pointer)null); allocate(module); } + @SharedPtr private native void allocate(@SharedPtr @Cast({"", "std::shared_ptr"}) MishImpl module); + public AnyModule(SigmoidImpl module) { super((Pointer)null); allocate(module); } + @SharedPtr private native void allocate(@SharedPtr @Cast({"", "std::shared_ptr"}) SigmoidImpl module); + public AnyModule(SoftplusImpl module) { super((Pointer)null); allocate(module); } + @SharedPtr private native void allocate(@SharedPtr @Cast({"", "std::shared_ptr"}) SoftplusImpl module); + public AnyModule(SoftshrinkImpl module) { super((Pointer)null); allocate(module); } + @SharedPtr private native void allocate(@SharedPtr @Cast({"", "std::shared_ptr"}) SoftshrinkImpl module); + public AnyModule(SoftsignImpl module) { super((Pointer)null); allocate(module); } + @SharedPtr private native void allocate(@SharedPtr @Cast({"", "std::shared_ptr"}) SoftsignImpl module); + public AnyModule(TanhImpl module) { super((Pointer)null); allocate(module); } + @SharedPtr private native void allocate(@SharedPtr @Cast({"", "std::shared_ptr"}) TanhImpl module); + public AnyModule(TanhshrinkImpl module) { super((Pointer)null); allocate(module); } + @SharedPtr private native void allocate(@SharedPtr @Cast({"", "std::shared_ptr"}) TanhshrinkImpl module); + public AnyModule(ThresholdImpl module) { super((Pointer)null); allocate(module); } + @SharedPtr private native void allocate(@SharedPtr @Cast({"", "std::shared_ptr"}) ThresholdImpl module); + public AnyModule(MultiheadAttentionImpl module) { super((Pointer)null); allocate(module); } + @SharedPtr private native void allocate(@SharedPtr @Cast({"", "std::shared_ptr"}) MultiheadAttentionImpl module); + public AnyModule(LayerNormImpl module) { super((Pointer)null); allocate(module); } + @SharedPtr private native void allocate(@SharedPtr @Cast({"", "std::shared_ptr"}) LayerNormImpl module); + public AnyModule(LocalResponseNormImpl module) { super((Pointer)null); allocate(module); } + @SharedPtr private native void allocate(@SharedPtr @Cast({"", "std::shared_ptr"}) LocalResponseNormImpl module); + public AnyModule(CrossMapLRN2dImpl module) { super((Pointer)null); allocate(module); } + @SharedPtr private native void allocate(@SharedPtr @Cast({"", "std::shared_ptr"}) CrossMapLRN2dImpl module); + public AnyModule(GroupNormImpl module) { super((Pointer)null); allocate(module); } + @SharedPtr private native void allocate(@SharedPtr @Cast({"", "std::shared_ptr"}) GroupNormImpl module); + public AnyModule(TransformerEncoderLayerImpl module) { super((Pointer)null); allocate(module); } + @SharedPtr private native void allocate(@SharedPtr @Cast({"", "std::shared_ptr"}) TransformerEncoderLayerImpl module); + public AnyModule(TransformerDecoderLayerImpl module) { super((Pointer)null); allocate(module); } + @SharedPtr private native void allocate(@SharedPtr @Cast({"", "std::shared_ptr"}) TransformerDecoderLayerImpl module); + public AnyModule(TransformerEncoderImpl module) { super((Pointer)null); allocate(module); } + @SharedPtr private native void allocate(@SharedPtr @Cast({"", "std::shared_ptr"}) TransformerEncoderImpl module); + public AnyModule(TransformerDecoderImpl module) { super((Pointer)null); allocate(module); } + @SharedPtr private native void allocate(@SharedPtr @Cast({"", "std::shared_ptr"}) TransformerDecoderImpl module); + public AnyModule(TransformerImpl module) { super((Pointer)null); allocate(module); } + @SharedPtr private native void allocate(@SharedPtr @Cast({"", "std::shared_ptr"}) TransformerImpl module); /** Constructs an {@code AnyModule} from a concrete module object. */ @@ -129,14 +377,14 @@ public class AnyModule extends Pointer { /** Move construction and assignment is allowed, and follows the default * behavior of move for {@code std::unique_ptr}. */ public AnyModule(@ByRef(true) AnyModule arg0) { super((Pointer)null); allocate(arg0); } - private native void allocate(@ByRef(true) AnyModule arg0); + @SharedPtr private native void allocate(@ByRef(true) AnyModule arg0); public native @ByRef @Name("operator =") AnyModule put(@ByRef(true) AnyModule arg0); /** Creates a shallow copy of an {@code AnyModule}. */ /** Creates a deep copy of an {@code AnyModule} if it contains a module, else an * empty {@code AnyModule} if it is empty. */ - public native @ByVal AnyModule clone(@ByVal(nullValue = "c10::optional(c10::nullopt)") DeviceOptional device); + public native @ByVal AnyModule clone(@ByVal(nullValue = "c10::optional(c10::nullopt)") DeviceOptional device); public native @ByVal AnyModule clone(); /** Assigns a module to the {@code AnyModule} (to circumvent the explicit @@ -145,10 +393,39 @@ public class AnyModule extends Pointer { /** Invokes {@code forward()} on the contained module with the given arguments, and * returns the return value as an {@code AnyValue}. Use this method when chaining * {@code AnyModule}s in a loop. */ + public native @ByVal AnyValue any_forward(@Const @ByRef AnyValue input); + public native @ByVal AnyValue any_forward(@Const @ByRef Tensor input); + public native @ByVal AnyValue any_forward(@Const @ByRef Tensor input1, @Const @ByRef Tensor input2); + public native @ByVal AnyValue any_forward(@Const @ByRef Tensor input1, @Const @ByRef Tensor input2, @Const @ByRef Tensor input3); + public native @ByVal AnyValue any_forward(@Const @ByRef Tensor input1, @Const @ByRef Tensor input2, @Const @ByRef Tensor input3, @Const @ByRef Tensor input4); + public native @ByVal AnyValue any_forward(@Const @ByRef Tensor input1, @Const @ByRef Tensor input2, @Const @ByRef Tensor input3, @Const @ByRef Tensor input4, @Const @ByRef Tensor input5, @Const @ByRef Tensor input6); + public native @ByVal AnyValue any_forward(@Const @ByRef Tensor input1, @Const @ByRef Tensor input2, @Const @ByRef Tensor input3, @Const @ByRef Tensor input4, @Const @ByRef Tensor input5, @Const @ByRef Tensor input6, @Const @ByRef Tensor input7, @Const @ByRef Tensor input8); + public native @ByVal AnyValue any_forward(@Const @ByRef Tensor input, @ByRef(nullValue = "c10::optional(c10::nullopt)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... output_size); + public native @ByVal AnyValue any_forward(@Const @ByRef Tensor input, @Const @ByRef(nullValue = "c10::optional(c10::nullopt)") LongArrayRefOptional output_size); + public native @ByVal AnyValue any_forward(@Const @ByRef Tensor input, @Const @ByRef Tensor indices, @Const @ByRef(nullValue = "c10::optional >(c10::nullopt)") LongVectorOptional output_size); + public native @ByVal AnyValue any_forward(@Const @ByRef Tensor input, @ByVal(nullValue = "torch::optional >{}") T_TensorTensor_TOptional hx_opt); + public native @ByVal AnyValue any_forward(@Const @ByRef Tensor query, @Const @ByRef Tensor key, @Const @ByRef Tensor value, @Const @ByRef(nullValue = "torch::Tensor{}") Tensor key_padding_mask, @Cast("bool") boolean need_weights/*=true*/, @Const @ByRef(nullValue = "torch::Tensor{}") Tensor attn_mask, @Cast("bool") boolean average_attn_weights/*=true*/); /** Invokes {@code forward()} on the contained module with the given arguments, and * casts the returned {@code AnyValue} to the supplied {@code ReturnType} (which defaults * to {@code torch::Tensor}). */ + public native @ByVal Tensor forward(@Const @ByRef Tensor input); + public native @ByVal Tensor forward(@Const @ByRef Tensor input1, @Const @ByRef Tensor input2); + public native @ByVal Tensor forward(@Const @ByRef Tensor input1, @Const @ByRef Tensor input2, @Const @ByRef Tensor input3); + public native @ByVal Tensor forward(@Const @ByRef Tensor input1, @Const @ByRef Tensor input2, @Const @ByRef Tensor input3, @Const @ByRef Tensor input4); + public native @ByVal Tensor forward(@Const @ByRef Tensor input1, @Const @ByRef Tensor input2, @Const @ByRef Tensor input3, @Const @ByRef Tensor input4, @Const @ByRef Tensor input5, @Const @ByRef Tensor input6); + public native @ByVal Tensor forward(@Const @ByRef Tensor input1, @Const @ByRef Tensor input2, @Const @ByRef Tensor input3, @Const @ByRef Tensor input4, @Const @ByRef Tensor input5, @Const @ByRef Tensor input6, @Const @ByRef Tensor input7, @Const @ByRef Tensor input8); + public native @ByVal Tensor forward(@Const @ByRef Tensor input, @ByRef(nullValue = "c10::optional(c10::nullopt)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... output_size); + public native @ByVal Tensor forward(@Const @ByRef Tensor input, @Const @ByRef(nullValue = "c10::optional(c10::nullopt)") LongArrayRefOptional output_size); + public native @ByVal Tensor forward(@Const @ByRef Tensor input, @Const @ByRef Tensor indices, @Const @ByRef(nullValue = "c10::optional >(c10::nullopt)") LongVectorOptional output_size); + public native @ByVal @Name("forward>>") T_TensorT_TensorTensor_T_T forwardT_TensorT_TensorTensor_T_T(@Const @ByRef Tensor input); + public native @ByVal @Name("forward>>") T_TensorT_TensorTensor_T_T forwardT_TensorT_TensorTensor_T_T(@Const @ByRef Tensor input, @ByVal(nullValue = "torch::optional >{}") T_TensorTensor_TOptional hx_opt); + public native @ByVal @Name("forward>") T_TensorTensor_T forwardT_TensorTensor_T(@Const @ByRef Tensor input); + public native @ByVal @Name("forward>") T_TensorTensor_T forwardT_TensorTensor_T(@Const @ByRef Tensor input1, @Const @ByRef Tensor input2); + public native @ByVal @Name("forward>") T_TensorTensor_T forwardT_TensorTensor_T(@Const @ByRef Tensor input1, @Const @ByRef Tensor input2, @Const @ByRef Tensor input3); + public native @ByVal @Name("forward>") T_TensorTensor_T forwardT_TensorTensor_T(@Const @ByRef Tensor input, @ByVal(nullValue = "torch::optional >{}") T_TensorTensor_TOptional hx_opt); + public native @ByVal @Name("forward>") T_TensorTensor_T forwardT_TensorTensor_T(@Const @ByRef Tensor query, @Const @ByRef Tensor key, @Const @ByRef Tensor value, @Const @ByRef(nullValue = "torch::Tensor{}") Tensor key_padding_mask, @Cast("bool") boolean need_weights/*=true*/, @Const @ByRef(nullValue = "torch::Tensor{}") Tensor attn_mask, @Cast("bool") boolean average_attn_weights/*=true*/); + public native @ByVal @Name("forward") ASMoutput forwardASMoutput(@Const @ByRef Tensor input, @Const @ByRef Tensor target); /** Attempts to cast the underlying module to the given module type. Throws an * exception if the types do not match. */ @@ -161,7 +438,7 @@ public class AnyModule extends Pointer { /** Returns a {@code std::shared_ptr} whose dynamic type is that of the underlying * module. */ - public native @SharedPtr @Cast({"", "std::shared_ptr"}) Module ptr(); + public native @SharedPtr("torch::nn::Module") @ByVal Module ptr(); /** Like {@code ptr()}, but casts the pointer to the given type. */ diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/AnyModuleVector.java b/pytorch/src/gen/java/org/bytedeco/pytorch/AnyModuleVector.java index 0b87223d761..480c4e0814a 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/AnyModuleVector.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/AnyModuleVector.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; @@ -33,6 +35,8 @@ public class AnyModuleVector extends Pointer { public void clear() { resize(0); } public native void resize(@Cast("size_t") long n); + public AnyModule front() { return get(0); } + public AnyModule back() { return get(size() - 1); } @Index(function = "at") public native @ByRef AnyModule get(@Cast("size_t") long i); public native AnyModuleVector put(@Cast("size_t") long i, AnyModule value); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/AnyTupleType.java b/pytorch/src/gen/java/org/bytedeco/pytorch/AnyTupleType.java index 2a8f96c283d..5dc8f8f8b97 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/AnyTupleType.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/AnyTupleType.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/AnyTupleTypePtr.java b/pytorch/src/gen/java/org/bytedeco/pytorch/AnyTupleTypePtr.java index 4ef3c8eef36..73a1a98cda4 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/AnyTupleTypePtr.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/AnyTupleTypePtr.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/AnyType.java b/pytorch/src/gen/java/org/bytedeco/pytorch/AnyType.java index 847e5a7453d..16ede3fd9f4 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/AnyType.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/AnyType.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/AnyTypePtr.java b/pytorch/src/gen/java/org/bytedeco/pytorch/AnyTypePtr.java index beeb68d67ea..b2a5d0de485 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/AnyTypePtr.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/AnyTypePtr.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/AnyValue.java b/pytorch/src/gen/java/org/bytedeco/pytorch/AnyValue.java new file mode 100644 index 00000000000..41787d2aa89 --- /dev/null +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/AnyValue.java @@ -0,0 +1,61 @@ +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE + +package org.bytedeco.pytorch; + +import org.bytedeco.pytorch.Allocator; +import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; +import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; +import java.nio.*; +import org.bytedeco.javacpp.*; +import org.bytedeco.javacpp.annotation.*; + +import static org.bytedeco.javacpp.presets.javacpp.*; +import static org.bytedeco.openblas.global.openblas_nolapack.*; +import static org.bytedeco.openblas.global.openblas.*; + +import static org.bytedeco.pytorch.global.torch.*; + + +// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ AnyValue ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +/** An implementation of {@code std::any} which stores + * a type erased object, whose concrete value can be retrieved at runtime by + * checking if the {@code typeid()} of a requested type matches the {@code typeid()} of + * the object stored. */ +@Namespace("torch::nn") @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) +public class AnyValue extends Pointer { + static { Loader.load(); } + /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ + public AnyValue(Pointer p) { super(p); } + + /** Move construction and assignment is allowed, and follows the default + * behavior of move for {@code std::unique_ptr}. */ + public AnyValue(@ByRef(true) AnyValue arg0) { super((Pointer)null); allocate(arg0); } + private native void allocate(@ByRef(true) AnyValue arg0); + public native @ByRef @Name("operator =") AnyValue put(@ByRef(true) AnyValue arg0); + + /** Copy construction and assignment is allowed. */ + + /** Constructs the {@code AnyValue} from value type. */ + + /** Returns a pointer to the value contained in the {@code AnyValue} if the type + * passed as template parameter matches the type of the value stored, and + * returns a null pointer otherwise. */ + public native @Name("try_get") Tensor try_getTensor(); + public native @Name("try_get") ASMoutput try_getASMoutput(); + public native @Name("try_get >") T_TensorTensor_T try_getT_TensorTensor_T(); + public native @Name("try_get > >") T_TensorT_TensorTensor_T_T try_getT_TensorT_TensorTensor_T_T(); + + /** Returns the value contained in the {@code AnyValue} if the type passed as + * template parameter matches the type of the value stored, and throws an + * exception otherwise. */ + public native @ByVal @Name("get") Tensor getTensor(); + public native @ByVal @Name("get") ASMoutput getASMoutput(); + public native @ByVal @Name("get >") T_TensorTensor_T getT_TensorTensor_T(); + public native @ByVal @Name("get > >") T_TensorT_TensorTensor_T_T getT_TensorT_TensorTensor_T_T(); + + /** Returns the {@code type_info} object of the contained value. */ + public native @Cast("const std::type_info*") @ByRef @NoException(true) Pointer type_info(); +} diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/Apply.java b/pytorch/src/gen/java/org/bytedeco/pytorch/Apply.java index 632d9f84619..270fe31f925 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/Apply.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/Apply.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; @@ -19,8 +21,17 @@ @Namespace("torch::jit") @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) public class Apply extends Expr { static { Loader.load(); } + /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ + public Apply(Pointer p) { super(p); } - public Apply(@Cast("const torch::jit::TreeRef*") @ByRef Pointer tree) { super((Pointer)null); allocate(tree); } - private native void allocate(@Cast("const torch::jit::TreeRef*") @ByRef Pointer tree); + public Apply(@Const @ByRef TreeRef tree) { super((Pointer)null); allocate(tree); } + private native void allocate(@Const @ByRef TreeRef tree); public native @ByVal Expr callee(); + public native @ByVal ExprList inputs(); + public native @ByVal AttributeList attributes(); + public static native @ByVal Apply create( + @Const @ByRef SourceRange range, + @Const @ByRef Expr callee, + @Const @ByRef ExprList inputs, + @Const @ByRef AttributeList attributes); } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/Argument.java b/pytorch/src/gen/java/org/bytedeco/pytorch/Argument.java index dda3ed4659a..a1ef82069e0 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/Argument.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/Argument.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/ArgumentArrayRef.java b/pytorch/src/gen/java/org/bytedeco/pytorch/ArgumentArrayRef.java new file mode 100644 index 00000000000..6076d0a5eb6 --- /dev/null +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/ArgumentArrayRef.java @@ -0,0 +1,144 @@ +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE + +package org.bytedeco.pytorch; + +import org.bytedeco.pytorch.Allocator; +import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; +import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; +import java.nio.*; +import org.bytedeco.javacpp.*; +import org.bytedeco.javacpp.annotation.*; + +import static org.bytedeco.javacpp.presets.javacpp.*; +import static org.bytedeco.openblas.global.openblas_nolapack.*; +import static org.bytedeco.openblas.global.openblas.*; + +import static org.bytedeco.pytorch.global.torch.*; + +/** ArrayRef - Represent a constant reference to an array (0 or more elements + * consecutively in memory), i.e. a start pointer and a length. It allows + * various APIs to take consecutive elements easily and conveniently. + * + * This class does not own the underlying data, it is expected to be used in + * situations where the data resides in some other buffer, whose lifetime + * extends past that of the ArrayRef. For this reason, it is not in general + * safe to store an ArrayRef. + * + * This is intended to be trivially copyable, so it should be passed by + * value. */ +@Name("c10::ArrayRef") @NoOffset @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) +public class ArgumentArrayRef extends Pointer { + static { Loader.load(); } + /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ + public ArgumentArrayRef(Pointer p) { super(p); } + /** Native array allocator. Access with {@link Pointer#position(long)}. */ + public ArgumentArrayRef(long size) { super((Pointer)null); allocateArray(size); } + private native void allocateArray(long size); + @Override public ArgumentArrayRef position(long position) { + return (ArgumentArrayRef)super.position(position); + } + @Override public ArgumentArrayRef getPointer(long i) { + return new ArgumentArrayRef((Pointer)this).offsetAddress(i); + } + + /** \name Constructors + * \{ +

+ * Construct an empty ArrayRef. */ + /* implicit */ public ArgumentArrayRef() { super((Pointer)null); allocate(); } +private native void allocate(); + + /** Construct an ArrayRef from a single element. */ + // TODO Make this explicit + + + /** Construct an ArrayRef from a pointer and length. */ + public ArgumentArrayRef(@Const Argument data, @Cast("size_t") long length) { super((Pointer)null); allocate(data, length); } + private native void allocate(@Const Argument data, @Cast("size_t") long length); + + /** Construct an ArrayRef from a range. */ + public ArgumentArrayRef(@Const Argument begin, @Const Argument end) { super((Pointer)null); allocate(begin, end); } + private native void allocate(@Const Argument begin, @Const Argument end); + + /** Construct an ArrayRef from a SmallVector. This is templated in order to + * avoid instantiating SmallVectorTemplateCommon whenever we + * copy-construct an ArrayRef. */ + + /** Construct an ArrayRef from a std::vector. */ + // The enable_if stuff here makes sure that this isn't used for + // std::vector, because ArrayRef can't work on a std::vector + // bitfield. + + /** Construct an ArrayRef from a std::array */ + + /** Construct an ArrayRef from a C array. */ + + /** Construct an ArrayRef from a std::initializer_list. */ + /* implicit */ + + /** \} + * \name Simple Operations + * \{ */ + + public native @Const @ByPtr Argument begin(); + public native @Const @ByPtr Argument end(); + + // These are actually the same as iterator, since ArrayRef only + // gives you const iterators. + public native @Const @ByPtr Argument cbegin(); + public native @Const @ByPtr Argument cend(); + + /** empty - Check if the array is empty. */ + public native @Cast("const bool") boolean empty(); + + public native @Const Argument data(); + + /** size - Get the array size. */ + public native @Cast("const size_t") long size(); + + /** front - Get the first element. */ + public native @Const @ByRef Argument front(); + + /** back - Get the last element. */ + public native @Const @ByRef Argument back(); + + /** equals - Check for element-wise equality. */ + public native @Cast("const bool") boolean equals(@ByVal ArgumentArrayRef RHS); + + /** slice(n, m) - Take M elements of the array starting at element N */ + public native @Const @ByVal ArgumentArrayRef slice(@Cast("size_t") long N, @Cast("size_t") long M); + + /** slice(n) - Chop off the first N elements of the array. */ + public native @Const @ByVal ArgumentArrayRef slice(@Cast("size_t") long N); + + /** \} + * \name Operator Overloads + * \{ */ + public native @Const @ByRef @Name("operator []") Argument get(@Cast("size_t") long Index); + + /** Vector compatibility */ + + /// + public native @Const @ByRef Argument at(@Cast("size_t") long Index); + + /** Disallow accidental assignment from a temporary. + * + * The declaration here is extra complicated so that "arrayRef = {}" + * continues to select the move assignment operator. */ + + + /** Disallow accidental assignment from a temporary. + * + * The declaration here is extra complicated so that "arrayRef = {}" + * continues to select the move assignment operator. */ + + + /** \} + * \name Expensive Operations + * \{ */ + public native @StdVector Argument vec(); + + /** \} */ +} diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/ArgumentDef.java b/pytorch/src/gen/java/org/bytedeco/pytorch/ArgumentDef.java new file mode 100644 index 00000000000..f5e011b39e6 --- /dev/null +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/ArgumentDef.java @@ -0,0 +1,55 @@ +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE + +package org.bytedeco.pytorch; + +import org.bytedeco.pytorch.Allocator; +import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; +import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; +import java.nio.*; +import org.bytedeco.javacpp.*; +import org.bytedeco.javacpp.annotation.*; + +import static org.bytedeco.javacpp.presets.javacpp.*; +import static org.bytedeco.openblas.global.openblas_nolapack.*; +import static org.bytedeco.openblas.global.openblas.*; + +import static org.bytedeco.pytorch.global.torch.*; + + +/** The templated inference code creates {@code ArgumentDef} instead of {@code Argument}, + * because that can be constructed at compile time and has a much smaller + * binary size than having calls to {@code Argument} constructors in the template. + * Creating {@code Argument} objects from {@code ArgumentDef} can then be done at + * runtime in a non-templated way. */ +@Namespace("c10::detail::infer_schema") @NoOffset @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) +public class ArgumentDef extends Pointer { + static { Loader.load(); } + /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ + public ArgumentDef(Pointer p) { super(p); } + /** Native array allocator. Access with {@link Pointer#position(long)}. */ + public ArgumentDef(long size) { super((Pointer)null); allocateArray(size); } + private native void allocateArray(long size); + @Override public ArgumentDef position(long position) { + return (ArgumentDef)super.position(position); + } + @Override public ArgumentDef getPointer(long i) { + return new ArgumentDef((Pointer)this).offsetAddress(i); + } + + public static class GetTypeFn extends FunctionPointer { + static { Loader.load(); } + /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ + public GetTypeFn(Pointer p) { super(p); } + protected GetTypeFn() { allocate(); } + private native void allocate(); + public native @ByVal Type.TypePtr call(); + } + public native GetTypeFn getTypeFn(); public native ArgumentDef getTypeFn(GetTypeFn setter); + public native GetTypeFn getFakeTypeFn(); public native ArgumentDef getFakeTypeFn(GetTypeFn setter); + public ArgumentDef() { super((Pointer)null); allocate(); } + private native void allocate(); + public ArgumentDef(GetTypeFn getTypeFn, GetTypeFn getFakeTypeFn) { super((Pointer)null); allocate(getTypeFn, getFakeTypeFn); } + private native void allocate(GetTypeFn getTypeFn, GetTypeFn getFakeTypeFn); +} diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/ArgumentDefArrayRef.java b/pytorch/src/gen/java/org/bytedeco/pytorch/ArgumentDefArrayRef.java new file mode 100644 index 00000000000..e5ac714f993 --- /dev/null +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/ArgumentDefArrayRef.java @@ -0,0 +1,133 @@ +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE + +package org.bytedeco.pytorch; + +import org.bytedeco.pytorch.Allocator; +import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; +import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; +import java.nio.*; +import org.bytedeco.javacpp.*; +import org.bytedeco.javacpp.annotation.*; + +import static org.bytedeco.javacpp.presets.javacpp.*; +import static org.bytedeco.openblas.global.openblas_nolapack.*; +import static org.bytedeco.openblas.global.openblas.*; + +import static org.bytedeco.pytorch.global.torch.*; + +@Name("c10::ArrayRef") @NoOffset @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) +public class ArgumentDefArrayRef extends Pointer { + static { Loader.load(); } + /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ + public ArgumentDefArrayRef(Pointer p) { super(p); } + /** Native array allocator. Access with {@link Pointer#position(long)}. */ + public ArgumentDefArrayRef(long size) { super((Pointer)null); allocateArray(size); } + private native void allocateArray(long size); + @Override public ArgumentDefArrayRef position(long position) { + return (ArgumentDefArrayRef)super.position(position); + } + @Override public ArgumentDefArrayRef getPointer(long i) { + return new ArgumentDefArrayRef((Pointer)this).offsetAddress(i); + } + + /** \name Constructors + * \{ +

+ * Construct an empty ArrayRef. */ + /* implicit */ public ArgumentDefArrayRef() { super((Pointer)null); allocate(); } +private native void allocate(); + + /** Construct an ArrayRef from a single element. */ + // TODO Make this explicit + + + /** Construct an ArrayRef from a pointer and length. */ + public ArgumentDefArrayRef(@Const ArgumentDef data, @Cast("size_t") long length) { super((Pointer)null); allocate(data, length); } + private native void allocate(@Const ArgumentDef data, @Cast("size_t") long length); + + /** Construct an ArrayRef from a range. */ + public ArgumentDefArrayRef(@Const ArgumentDef begin, @Const ArgumentDef end) { super((Pointer)null); allocate(begin, end); } + private native void allocate(@Const ArgumentDef begin, @Const ArgumentDef end); + + /** Construct an ArrayRef from a SmallVector. This is templated in order to + * avoid instantiating SmallVectorTemplateCommon whenever we + * copy-construct an ArrayRef. */ + + /** Construct an ArrayRef from a std::vector. */ + // The enable_if stuff here makes sure that this isn't used for + // std::vector, because ArrayRef can't work on a std::vector + // bitfield. + + /** Construct an ArrayRef from a std::array */ + + /** Construct an ArrayRef from a C array. */ + + /** Construct an ArrayRef from a std::initializer_list. */ + /* implicit */ + + /** \} + * \name Simple Operations + * \{ */ + + public native @Const @ByPtr ArgumentDef begin(); + public native @Const @ByPtr ArgumentDef end(); + + // These are actually the same as iterator, since ArrayRef only + // gives you const iterators. + public native @Const @ByPtr ArgumentDef cbegin(); + public native @Const @ByPtr ArgumentDef cend(); + + /** empty - Check if the array is empty. */ + public native @Cast("const bool") boolean empty(); + + public native @Const ArgumentDef data(); + + /** size - Get the array size. */ + public native @Cast("const size_t") long size(); + + /** front - Get the first element. */ + public native @Const @ByRef ArgumentDef front(); + + /** back - Get the last element. */ + public native @Const @ByRef ArgumentDef back(); + + /** equals - Check for element-wise equality. */ + + + /** slice(n, m) - Take M elements of the array starting at element N */ + public native @Const @ByVal ArgumentDefArrayRef slice(@Cast("size_t") long N, @Cast("size_t") long M); + + /** slice(n) - Chop off the first N elements of the array. */ + public native @Const @ByVal ArgumentDefArrayRef slice(@Cast("size_t") long N); + + /** \} + * \name Operator Overloads + * \{ */ + public native @Const @ByRef @Name("operator []") ArgumentDef get(@Cast("size_t") long Index); + + /** Vector compatibility */ + + /// + public native @Const @ByRef ArgumentDef at(@Cast("size_t") long Index); + + /** Disallow accidental assignment from a temporary. + * + * The declaration here is extra complicated so that "arrayRef = {}" + * continues to select the move assignment operator. */ + + + /** Disallow accidental assignment from a temporary. + * + * The declaration here is extra complicated so that "arrayRef = {}" + * continues to select the move assignment operator. */ + + + /** \} + * \name Expensive Operations + * \{ */ + public native @StdVector ArgumentDef vec(); + + /** \} */ +} diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/ArgumentInfo.java b/pytorch/src/gen/java/org/bytedeco/pytorch/ArgumentInfo.java index 3d9ba0d559d..a859fdf0e1e 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/ArgumentInfo.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/ArgumentInfo.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/ArgumentSpec.java b/pytorch/src/gen/java/org/bytedeco/pytorch/ArgumentSpec.java index b7b60c0aee9..43f0a6329d9 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/ArgumentSpec.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/ArgumentSpec.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/ArgumentSpecCreator.java b/pytorch/src/gen/java/org/bytedeco/pytorch/ArgumentSpecCreator.java index 1fa88af2d85..43c32f338a4 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/ArgumentSpecCreator.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/ArgumentSpecCreator.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/ArgumentSpecExecutionPlanMap.java b/pytorch/src/gen/java/org/bytedeco/pytorch/ArgumentSpecExecutionPlanMap.java index 084b5f1e01f..9dd21f2748c 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/ArgumentSpecExecutionPlanMap.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/ArgumentSpecExecutionPlanMap.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/ArgumentVector.java b/pytorch/src/gen/java/org/bytedeco/pytorch/ArgumentVector.java deleted file mode 100644 index 7c7d3499367..00000000000 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/ArgumentVector.java +++ /dev/null @@ -1,86 +0,0 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE - -package org.bytedeco.pytorch; - -import org.bytedeco.pytorch.Allocator; -import org.bytedeco.pytorch.Function; -import org.bytedeco.pytorch.Module; -import java.nio.*; -import org.bytedeco.javacpp.*; -import org.bytedeco.javacpp.annotation.*; - -import static org.bytedeco.javacpp.presets.javacpp.*; -import static org.bytedeco.openblas.global.openblas_nolapack.*; -import static org.bytedeco.openblas.global.openblas.*; - -import static org.bytedeco.pytorch.global.torch.*; - -@Name("std::vector") @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) -public class ArgumentVector extends Pointer { - static { Loader.load(); } - /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ - public ArgumentVector(Pointer p) { super(p); } - public ArgumentVector(Argument value) { this(1); put(0, value); } - public ArgumentVector(Argument ... array) { this(array.length); put(array); } - public ArgumentVector() { allocate(); } - public ArgumentVector(long n) { allocate(n); } - private native void allocate(); - private native void allocate(@Cast("size_t") long n); - public native @Name("operator =") @ByRef ArgumentVector put(@ByRef ArgumentVector x); - - public boolean empty() { return size() == 0; } - public native long size(); - public void clear() { resize(0); } - public native void resize(@Cast("size_t") long n); - - @Index(function = "at") public native @ByRef Argument get(@Cast("size_t") long i); - public native ArgumentVector put(@Cast("size_t") long i, Argument value); - - public native @ByVal Iterator insert(@ByVal Iterator pos, @ByRef Argument value); - public native @ByVal Iterator erase(@ByVal Iterator pos); - public native @ByVal Iterator begin(); - public native @ByVal Iterator end(); - @NoOffset @Name("iterator") public static class Iterator extends Pointer { - public Iterator(Pointer p) { super(p); } - public Iterator() { } - - public native @Name("operator ++") @ByRef Iterator increment(); - public native @Name("operator ==") boolean equals(@ByRef Iterator it); - public native @Name("operator *") @ByRef @Const Argument get(); - } - - public Argument[] get() { - Argument[] array = new Argument[size() < Integer.MAX_VALUE ? (int)size() : Integer.MAX_VALUE]; - for (int i = 0; i < array.length; i++) { - array[i] = get(i); - } - return array; - } - @Override public String toString() { - return java.util.Arrays.toString(get()); - } - - public Argument pop_back() { - long size = size(); - Argument value = get(size - 1); - resize(size - 1); - return value; - } - public ArgumentVector push_back(Argument value) { - long size = size(); - resize(size + 1); - return put(size, value); - } - public ArgumentVector put(Argument value) { - if (size() != 1) { resize(1); } - return put(0, value); - } - public ArgumentVector put(Argument ... array) { - if (size() != array.length) { resize(array.length); } - for (int i = 0; i < array.length; i++) { - put(i, array[i]); - } - return this; - } -} - diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/Assert.java b/pytorch/src/gen/java/org/bytedeco/pytorch/Assert.java index 7f95d17ecff..53c0baf7725 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/Assert.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/Assert.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; @@ -19,9 +21,11 @@ @Namespace("torch::jit") @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) public class Assert extends Stmt { static { Loader.load(); } + /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ + public Assert(Pointer p) { super(p); } - public Assert(@Cast("const torch::jit::TreeRef*") @ByRef Pointer tree) { super((Pointer)null); allocate(tree); } - private native void allocate(@Cast("const torch::jit::TreeRef*") @ByRef Pointer tree); + public Assert(@Const @ByRef TreeRef tree) { super((Pointer)null); allocate(tree); } + private native void allocate(@Const @ByRef TreeRef tree); public native @ByVal Expr test(); public native @ByVal ExprMaybe msg(); public static native @ByVal Assert create( diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/Assign.java b/pytorch/src/gen/java/org/bytedeco/pytorch/Assign.java index 5de07402253..0463f8adb10 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/Assign.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/Assign.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; @@ -19,9 +21,18 @@ @Namespace("torch::jit") @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) public class Assign extends Stmt { static { Loader.load(); } - - public Assign(@Cast("const torch::jit::TreeRef*") @ByRef Pointer tree) { super((Pointer)null); allocate(tree); } - private native void allocate(@Cast("const torch::jit::TreeRef*") @ByRef Pointer tree); + /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ + public Assign(Pointer p) { super(p); } + + public Assign(@Const @ByRef TreeRef tree) { super((Pointer)null); allocate(tree); } + private native void allocate(@Const @ByRef TreeRef tree); + public static native @ByVal Assign create( + @Const @ByRef SourceRange range, + @Const @ByRef ExprList lhs, + @Const @ByRef ExprMaybe rhs, + @Const @ByRef ExprMaybe type); + + public native @ByVal ExprList lhs_list(); public native @ByVal Expr lhs(); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/AssignList.java b/pytorch/src/gen/java/org/bytedeco/pytorch/AssignList.java new file mode 100644 index 00000000000..4a51e6c7179 --- /dev/null +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/AssignList.java @@ -0,0 +1,38 @@ +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE + +package org.bytedeco.pytorch; + +import org.bytedeco.pytorch.Allocator; +import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; +import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; +import java.nio.*; +import org.bytedeco.javacpp.*; +import org.bytedeco.javacpp.annotation.*; + +import static org.bytedeco.javacpp.presets.javacpp.*; +import static org.bytedeco.openblas.global.openblas_nolapack.*; +import static org.bytedeco.openblas.global.openblas.*; + +import static org.bytedeco.pytorch.global.torch.*; + + +@Name("torch::jit::List") @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) +public class AssignList extends TreeView { + static { Loader.load(); } + /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ + public AssignList(Pointer p) { super(p); } + + + public AssignList(@Const @ByRef TreeRef tree) { super((Pointer)null); allocate(tree); } + private native void allocate(@Const @ByRef TreeRef tree); + public native @ByVal @Cast("torch::jit::List::iterator*") AssignListIterator begin(); + public native @ByVal @Cast("torch::jit::List::iterator*") AssignListIterator end(); + public native @Cast("bool") boolean empty(); + public native @ByVal @Name("operator []") Assign get(@Cast("size_t") long i); + + public static native @ByVal AssignList create(@Const @ByRef SourceRange range, @StdVector Assign subtrees); + public static native @ByVal AssignList unsafeCreate(@Const @ByRef SourceRange range, @Cast("torch::jit::TreeList*") @ByRef(true) SymDimVector subtrees); + public native @Cast("size_t") long size(); +} diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/AssignListIterator.java b/pytorch/src/gen/java/org/bytedeco/pytorch/AssignListIterator.java new file mode 100644 index 00000000000..af9959f0585 --- /dev/null +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/AssignListIterator.java @@ -0,0 +1,35 @@ +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE + +package org.bytedeco.pytorch; + +import org.bytedeco.pytorch.Allocator; +import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; +import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; +import java.nio.*; +import org.bytedeco.javacpp.*; +import org.bytedeco.javacpp.annotation.*; + +import static org.bytedeco.javacpp.presets.javacpp.*; +import static org.bytedeco.openblas.global.openblas_nolapack.*; +import static org.bytedeco.openblas.global.openblas.*; + +import static org.bytedeco.pytorch.global.torch.*; + + +@Name("torch::jit::ListIterator") @NoOffset @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) +public class AssignListIterator extends Pointer { + static { Loader.load(); } + /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ + public AssignListIterator(Pointer p) { super(p); } + + public AssignListIterator(@ByVal @Cast("torch::jit::TreeList::const_iterator*") TreeRef it) { super((Pointer)null); allocate(it); } + private native void allocate(@ByVal @Cast("torch::jit::TreeList::const_iterator*") TreeRef it); + public native @Cast("bool") @Name("operator !=") boolean notEquals(@Const @ByRef AssignListIterator rhs); + public native @Cast("bool") @Name("operator ==") boolean equals(@Const @ByRef AssignListIterator rhs); + public native @ByVal @Name("operator *") Assign multiply(); + public native @ByRef @Name("operator +=") AssignListIterator addPut(@Cast("std::ptrdiff_t") long n); + public native @ByRef @Name("operator ++") AssignListIterator increment(); + public native @ByRef @Name("operator --") AssignListIterator decrement(); +} diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/AssignListMaybe.java b/pytorch/src/gen/java/org/bytedeco/pytorch/AssignListMaybe.java new file mode 100644 index 00000000000..c2ba083f1b6 --- /dev/null +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/AssignListMaybe.java @@ -0,0 +1,36 @@ +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE + +package org.bytedeco.pytorch; + +import org.bytedeco.pytorch.Allocator; +import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; +import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; +import java.nio.*; +import org.bytedeco.javacpp.*; +import org.bytedeco.javacpp.annotation.*; + +import static org.bytedeco.javacpp.presets.javacpp.*; +import static org.bytedeco.openblas.global.openblas_nolapack.*; +import static org.bytedeco.openblas.global.openblas.*; + +import static org.bytedeco.pytorch.global.torch.*; + + +@Name("torch::jit::Maybe >") @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) +public class AssignListMaybe extends TreeView { + static { Loader.load(); } + /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ + public AssignListMaybe(Pointer p) { super(p); } + + public AssignListMaybe(@Const @ByRef TreeRef tree) { super((Pointer)null); allocate(tree); } + private native void allocate(@Const @ByRef TreeRef tree); + /* implicit */ public AssignListMaybe(@Const @ByRef AssignList tree) { super((Pointer)null); allocate(tree); } +private native void allocate(@Const @ByRef AssignList tree); + public native @Cast("bool") boolean present(); + public native @ByVal AssignList get(); + + public static native @ByVal AssignListMaybe create(@Const @ByRef SourceRange range); + public static native @ByVal AssignListMaybe create(@Const @ByRef SourceRange range, @Const @ByRef AssignList value); +} diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/Attribute.java b/pytorch/src/gen/java/org/bytedeco/pytorch/Attribute.java index 15e5e24920a..7d42ebfda0c 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/Attribute.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/Attribute.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; @@ -23,13 +25,15 @@ @Namespace("torch::jit") @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) public class Attribute extends TreeView { static { Loader.load(); } + /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ + public Attribute(Pointer p) { super(p); } - public Attribute(@Cast("const torch::jit::TreeRef*") @ByRef Pointer tree) { super((Pointer)null); allocate(tree); } - private native void allocate(@Cast("const torch::jit::TreeRef*") @ByRef Pointer tree); + public Attribute(@Const @ByRef TreeRef tree) { super((Pointer)null); allocate(tree); } + private native void allocate(@Const @ByRef TreeRef tree); public native @ByVal Ident name(); public native @ByVal Expr value(); public static native @ByVal Attribute create( @Const @ByRef SourceRange range, @Const @ByRef Ident name, - @Cast("const torch::jit::TreeRef*") @ByRef Pointer value); + @Const @ByRef TreeRef value); } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/AttributeList.java b/pytorch/src/gen/java/org/bytedeco/pytorch/AttributeList.java new file mode 100644 index 00000000000..83d1550ebd7 --- /dev/null +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/AttributeList.java @@ -0,0 +1,38 @@ +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE + +package org.bytedeco.pytorch; + +import org.bytedeco.pytorch.Allocator; +import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; +import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; +import java.nio.*; +import org.bytedeco.javacpp.*; +import org.bytedeco.javacpp.annotation.*; + +import static org.bytedeco.javacpp.presets.javacpp.*; +import static org.bytedeco.openblas.global.openblas_nolapack.*; +import static org.bytedeco.openblas.global.openblas.*; + +import static org.bytedeco.pytorch.global.torch.*; + + +@Name("torch::jit::List") @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) +public class AttributeList extends TreeView { + static { Loader.load(); } + /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ + public AttributeList(Pointer p) { super(p); } + + + public AttributeList(@Const @ByRef TreeRef tree) { super((Pointer)null); allocate(tree); } + private native void allocate(@Const @ByRef TreeRef tree); + public native @ByVal @Cast("torch::jit::List::iterator*") AttributeListIterator begin(); + public native @ByVal @Cast("torch::jit::List::iterator*") AttributeListIterator end(); + public native @Cast("bool") boolean empty(); + public native @ByVal @Name("operator []") Attribute get(@Cast("size_t") long i); + + public static native @ByVal AttributeList create(@Const @ByRef SourceRange range, @StdVector Attribute subtrees); + public static native @ByVal AttributeList unsafeCreate(@Const @ByRef SourceRange range, @Cast("torch::jit::TreeList*") @ByRef(true) SymDimVector subtrees); + public native @Cast("size_t") long size(); +} diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/AttributeListIterator.java b/pytorch/src/gen/java/org/bytedeco/pytorch/AttributeListIterator.java new file mode 100644 index 00000000000..ef240d6c10e --- /dev/null +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/AttributeListIterator.java @@ -0,0 +1,35 @@ +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE + +package org.bytedeco.pytorch; + +import org.bytedeco.pytorch.Allocator; +import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; +import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; +import java.nio.*; +import org.bytedeco.javacpp.*; +import org.bytedeco.javacpp.annotation.*; + +import static org.bytedeco.javacpp.presets.javacpp.*; +import static org.bytedeco.openblas.global.openblas_nolapack.*; +import static org.bytedeco.openblas.global.openblas.*; + +import static org.bytedeco.pytorch.global.torch.*; + + +@Name("torch::jit::ListIterator") @NoOffset @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) +public class AttributeListIterator extends Pointer { + static { Loader.load(); } + /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ + public AttributeListIterator(Pointer p) { super(p); } + + public AttributeListIterator(@ByVal @Cast("torch::jit::TreeList::const_iterator*") TreeRef it) { super((Pointer)null); allocate(it); } + private native void allocate(@ByVal @Cast("torch::jit::TreeList::const_iterator*") TreeRef it); + public native @Cast("bool") @Name("operator !=") boolean notEquals(@Const @ByRef AttributeListIterator rhs); + public native @Cast("bool") @Name("operator ==") boolean equals(@Const @ByRef AttributeListIterator rhs); + public native @ByVal @Name("operator *") Attribute multiply(); + public native @ByRef @Name("operator +=") AttributeListIterator addPut(@Cast("std::ptrdiff_t") long n); + public native @ByRef @Name("operator ++") AttributeListIterator increment(); + public native @ByRef @Name("operator --") AttributeListIterator decrement(); +} diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/AttributePolicy.java b/pytorch/src/gen/java/org/bytedeco/pytorch/AttributePolicy.java index 472f08d5763..c1636bb5626 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/AttributePolicy.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/AttributePolicy.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; @@ -37,7 +39,7 @@ public class AttributePolicy extends Pointer { public static native @ByVal @Cast("torch::jit::detail::AttributePolicy::value_type*") IValue create( @StdVector SlotCursor cursors, @ByVal IValue v); - public static native @Cast("bool") boolean valid(@Const @SharedPtr @ByRef ClassType typ, @Cast("size_t") long i, @Const @ByRef IValue v); + public static native @Cast("bool") boolean valid(@Const @SharedPtr("c10::ClassType") @ByRef ClassType typ, @Cast("size_t") long i, @Const @ByRef IValue v); @MemberGetter public static native @Cast("const bool") boolean all_slots(); public static final boolean all_slots = all_slots(); } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/AttributeValue.java b/pytorch/src/gen/java/org/bytedeco/pytorch/AttributeValue.java index cd67d225345..7995f21b89e 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/AttributeValue.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/AttributeValue.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/AugAssign.java b/pytorch/src/gen/java/org/bytedeco/pytorch/AugAssign.java index 4c31248a127..87ea6edbe67 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/AugAssign.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/AugAssign.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; @@ -20,9 +22,11 @@ @Namespace("torch::jit") @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) public class AugAssign extends Stmt { static { Loader.load(); } + /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ + public AugAssign(Pointer p) { super(p); } - public AugAssign(@Cast("const torch::jit::TreeRef*") @ByRef Pointer tree) { super((Pointer)null); allocate(tree); } - private native void allocate(@Cast("const torch::jit::TreeRef*") @ByRef Pointer tree); + public AugAssign(@Const @ByRef TreeRef tree) { super((Pointer)null); allocate(tree); } + private native void allocate(@Const @ByRef TreeRef tree); public static native @ByVal AugAssign create( @Const @ByRef SourceRange range, @Const @ByRef Expr lhs, diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/AugAssignKind.java b/pytorch/src/gen/java/org/bytedeco/pytorch/AugAssignKind.java index 18d717df979..d91e008e33f 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/AugAssignKind.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/AugAssignKind.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; @@ -19,7 +21,9 @@ @Namespace("torch::jit") @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) public class AugAssignKind extends TreeView { static { Loader.load(); } + /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ + public AugAssignKind(Pointer p) { super(p); } - public AugAssignKind(@Cast("const torch::jit::TreeRef*") @ByRef Pointer tree) { super((Pointer)null); allocate(tree); } - private native void allocate(@Cast("const torch::jit::TreeRef*") @ByRef Pointer tree); + public AugAssignKind(@Const @ByRef TreeRef tree) { super((Pointer)null); allocate(tree); } + private native void allocate(@Const @ByRef TreeRef tree); } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/AutoDispatchBelowADInplaceOrView.java b/pytorch/src/gen/java/org/bytedeco/pytorch/AutoDispatchBelowADInplaceOrView.java new file mode 100644 index 00000000000..4b715238193 --- /dev/null +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/AutoDispatchBelowADInplaceOrView.java @@ -0,0 +1,49 @@ +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE + +package org.bytedeco.pytorch; + +import org.bytedeco.pytorch.Allocator; +import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; +import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; +import java.nio.*; +import org.bytedeco.javacpp.*; +import org.bytedeco.javacpp.annotation.*; + +import static org.bytedeco.javacpp.presets.javacpp.*; +import static org.bytedeco.openblas.global.openblas_nolapack.*; +import static org.bytedeco.openblas.global.openblas.*; + +import static org.bytedeco.pytorch.global.torch.*; + + +/* Note [AutoDispatchBelowADInplaceOrView] + * AutoDispatchBelowADInplaceOrView is equivalent to AutoNonVariableTypeMode + * before we split inplace & view ops out of VariableType kernel. + * Note this guard is used in VariableType kernels for functional ops + * as well as ADInplaceOrView kernels for inplace/view ops to enforce the + * Invariant: + * Once you are in VariableType/ADInplaceOrView kernel for an op, + * you never go back to a kernel on same dispatch key until + * you finish the current op. + */ +@Namespace("at") @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) +public class AutoDispatchBelowADInplaceOrView extends Pointer { + static { Loader.load(); } + /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ + public AutoDispatchBelowADInplaceOrView(Pointer p) { super(p); } + /** Native array allocator. Access with {@link Pointer#position(long)}. */ + public AutoDispatchBelowADInplaceOrView(long size) { super((Pointer)null); allocateArray(size); } + private native void allocateArray(long size); + @Override public AutoDispatchBelowADInplaceOrView position(long position) { + return (AutoDispatchBelowADInplaceOrView)super.position(position); + } + @Override public AutoDispatchBelowADInplaceOrView getPointer(long i) { + return new AutoDispatchBelowADInplaceOrView((Pointer)this).offsetAddress(i); + } + + public AutoDispatchBelowADInplaceOrView() { super((Pointer)null); allocate(); } + private native void allocate(); + // disable Autograd & ADInplaceOrView dispatch keys +} diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/AutoDispatchBelowAutograd.java b/pytorch/src/gen/java/org/bytedeco/pytorch/AutoDispatchBelowAutograd.java new file mode 100644 index 00000000000..b6684cba923 --- /dev/null +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/AutoDispatchBelowAutograd.java @@ -0,0 +1,86 @@ +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE + +package org.bytedeco.pytorch; + +import org.bytedeco.pytorch.Allocator; +import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; +import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; +import java.nio.*; +import org.bytedeco.javacpp.*; +import org.bytedeco.javacpp.annotation.*; + +import static org.bytedeco.javacpp.presets.javacpp.*; +import static org.bytedeco.openblas.global.openblas_nolapack.*; +import static org.bytedeco.openblas.global.openblas.*; + +import static org.bytedeco.pytorch.global.torch.*; + + +// A RAII, thread local (!) guard that will disable dispatch to variable +// handler. +// +// NOTE [ Treating Variables as non-Variables in type dispatch ] +// +// What exactly does AutoDispatchBelowAutograd do? The short answer is, it causes +// dispatches on ATen functions to go to the non-variable implementation, +// bypassing autograd handling (and also profiling and tracing). +// +// To understand why this guard exists, it's helpful to understand the history +// behind how Variable was implemented. Previously, Variables were implemented +// as a wrapper on Tensors; so the act of processing a Variable involved +// unwrapping the underlying Tensor, and then calling the underlying base +// operation on /that/ operation +// +// However, after the Variable/Tensor merge, there is no concept of unwrapping +// a tensor anymore. If you just call the operation on the same variable +// again inside your VariableType handler, you'll dispatch back to +// VariableType, which is not what we want. +// +// The solution to the above problem is to add `at::AutoDispatchBelowAutograd`, which +// when enabled will cause `legacyTensorType()` and `getType()` to always return +// non-Variable type, even if the tensor being called on is a variable. + +/* Note [AutoDispatchBelowAutograd] + * AutoDispatchBelowAutograd is **INTERNAL ONLY** that it should be used + * for kernel implementations and customized C++ kernels. + * If you are looking for a guard to run workload in inference mode, please use + * c10::InferenceMode RAII which is user facing API. + * In the past AutoDispatchBelowAutograd(or its old version AutoNonVariableTypeMode) + * was used in the user code for inference-only workload, this was under risk of + * producing wrong results silently in some edge cases. For example: + * ``` + * torch::Tensor s = torch::ones({1, 2, 3}).set_requires_grad(true); + * torch::Tensor out = s * s; + * { + * at::AutoDispatchBelowAutograd guard; + * s.add_(1); // Skips version bump on `s`. + * } + * // WRONG GRADIENT! s.grad() are now computed using `s` value after the + * // inplace update. + * out.backward(torch::ones_like(out)); + * ``` + * Users should use `c10::InferenceMode` here so that it'll properly throw an + * error saying "one of the variables needed for gradient computation has be modified." + */ +@Namespace("at") @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) +public class AutoDispatchBelowAutograd extends Pointer { + static { Loader.load(); } + /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ + public AutoDispatchBelowAutograd(Pointer p) { super(p); } + /** Native array allocator. Access with {@link Pointer#position(long)}. */ + public AutoDispatchBelowAutograd(long size) { super((Pointer)null); allocateArray(size); } + private native void allocateArray(long size); + @Override public AutoDispatchBelowAutograd position(long position) { + return (AutoDispatchBelowAutograd)super.position(position); + } + @Override public AutoDispatchBelowAutograd getPointer(long i) { + return new AutoDispatchBelowAutograd((Pointer)this).offsetAddress(i); + } + + public AutoDispatchBelowAutograd() { super((Pointer)null); allocate(); } + private native void allocate(); + + // disable all autograd dispatch keys +} diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/AutoDispatchSkipFunctionalize.java b/pytorch/src/gen/java/org/bytedeco/pytorch/AutoDispatchSkipFunctionalize.java new file mode 100644 index 00000000000..9db8cfd63a9 --- /dev/null +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/AutoDispatchSkipFunctionalize.java @@ -0,0 +1,38 @@ +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE + +package org.bytedeco.pytorch; + +import org.bytedeco.pytorch.Allocator; +import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; +import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; +import java.nio.*; +import org.bytedeco.javacpp.*; +import org.bytedeco.javacpp.annotation.*; + +import static org.bytedeco.javacpp.presets.javacpp.*; +import static org.bytedeco.openblas.global.openblas_nolapack.*; +import static org.bytedeco.openblas.global.openblas.*; + +import static org.bytedeco.pytorch.global.torch.*; + + +@Namespace("at") @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) +public class AutoDispatchSkipFunctionalize extends Pointer { + static { Loader.load(); } + /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ + public AutoDispatchSkipFunctionalize(Pointer p) { super(p); } + /** Native array allocator. Access with {@link Pointer#position(long)}. */ + public AutoDispatchSkipFunctionalize(long size) { super((Pointer)null); allocateArray(size); } + private native void allocateArray(long size); + @Override public AutoDispatchSkipFunctionalize position(long position) { + return (AutoDispatchSkipFunctionalize)super.position(position); + } + @Override public AutoDispatchSkipFunctionalize getPointer(long i) { + return new AutoDispatchSkipFunctionalize((Pointer)this).offsetAddress(i); + } + + public AutoDispatchSkipFunctionalize() { super((Pointer)null); allocate(); } + private native void allocate(); +} diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/AutoFwGradMode.java b/pytorch/src/gen/java/org/bytedeco/pytorch/AutoFwGradMode.java index 8c49ec197fa..4bb0b0312d1 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/AutoFwGradMode.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/AutoFwGradMode.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/AutoGradMode.java b/pytorch/src/gen/java/org/bytedeco/pytorch/AutoGradMode.java index 6f0ab8dfa4e..65da8586481 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/AutoGradMode.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/AutoGradMode.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/AutoNonVariableTypeMode.java b/pytorch/src/gen/java/org/bytedeco/pytorch/AutoNonVariableTypeMode.java new file mode 100644 index 00000000000..403cdb89dca --- /dev/null +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/AutoNonVariableTypeMode.java @@ -0,0 +1,43 @@ +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE + +package org.bytedeco.pytorch; + +import org.bytedeco.pytorch.Allocator; +import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; +import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; +import java.nio.*; +import org.bytedeco.javacpp.*; +import org.bytedeco.javacpp.annotation.*; + +import static org.bytedeco.javacpp.presets.javacpp.*; +import static org.bytedeco.openblas.global.openblas_nolapack.*; +import static org.bytedeco.openblas.global.openblas.*; + +import static org.bytedeco.pytorch.global.torch.*; + + +// TODO: AutoNonVariableTypeMode should be removed in release 1.10. +@Namespace("at") @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) +public class AutoNonVariableTypeMode extends Pointer { + static { Loader.load(); } + /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ + public AutoNonVariableTypeMode(Pointer p) { super(p); } + /** Native array allocator. Access with {@link Pointer#position(long)}. */ + public AutoNonVariableTypeMode(long size) { super((Pointer)null); allocateArray(size); } + private native void allocateArray(long size); + @Override public AutoNonVariableTypeMode position(long position) { + return (AutoNonVariableTypeMode)super.position(position); + } + @Override public AutoNonVariableTypeMode getPointer(long i) { + return new AutoNonVariableTypeMode((Pointer)this).offsetAddress(i); + } + + public AutoNonVariableTypeMode(@Cast("bool") boolean enabled/*=true*/) { super((Pointer)null); allocate(enabled); } + private native void allocate(@Cast("bool") boolean enabled/*=true*/); + public AutoNonVariableTypeMode() { super((Pointer)null); allocate(); } + private native void allocate(); + + // disable all autograd dispatch keys +} diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/AutogradContext.java b/pytorch/src/gen/java/org/bytedeco/pytorch/AutogradContext.java index ac53e769f19..de3e38244f1 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/AutogradContext.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/AutogradContext.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; @@ -44,15 +46,15 @@ public class AutogradContext extends Pointer { /** Saves the list of variables for a future call to {@code backward}. This * should be called at most once from inside of {@code forward}. */ - public native void save_for_backward(@Cast({"", "std::vector"}) @StdMove TensorVector to_save); + public native void save_for_backward(@Cast({"", "std::vector"}) @StdMove TensorVector to_save); /** Marks variables in the list as modified in an in-place operation. This * should be called at most once from inside of {@code forward} and all arguments * should be inputs. */ - public native void mark_dirty(@Cast({"", "std::vector"}) @StdMove TensorVector inputs); + public native void mark_dirty(@Cast({"", "std::vector"}) @StdMove TensorVector inputs); /** Marks outputs in the list as not requiring gradients. This should be * called at most once from inside of {@code forward} and all arguments should be * outputs. */ - public native void mark_non_differentiable(@Cast({"", "std::vector"}) @StdMove TensorVector outputs); + public native void mark_non_differentiable(@Cast({"", "std::vector"}) @StdMove TensorVector outputs); // Sets whether undefined output grad tensors should be expanded to tensors // full of zeros before calling backward function. Default value is true. public native void set_materialize_grads(@Cast("bool") boolean value); @@ -60,7 +62,7 @@ public class AutogradContext extends Pointer { /** Get the list of variables that were saved in {@code forward} using * {@code save_for_backward()}. Before returning them to the user, a check is made * to ensure that they were not modified by any in-place operations. */ - public native @Cast({"", "std::vector"}) @StdMove TensorVector get_saved_variables(); + public native @Cast({"", "std::vector"}) @StdMove TensorVector get_saved_variables(); public native @Const @ByRef TensorImplSet get_and_bump_dirty(); public native @Const @ByRef TensorImplSet get_non_differentiable(); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/AutogradMeta.java b/pytorch/src/gen/java/org/bytedeco/pytorch/AutogradMeta.java index d368d5e22be..73c4c4f626e 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/AutogradMeta.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/AutogradMeta.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/AutogradMetaFactory.java b/pytorch/src/gen/java/org/bytedeco/pytorch/AutogradMetaFactory.java index 3527738d123..fc09f333c5c 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/AutogradMetaFactory.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/AutogradMetaFactory.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/AutogradMetaFactoryRegisterer.java b/pytorch/src/gen/java/org/bytedeco/pytorch/AutogradMetaFactoryRegisterer.java index 39ccb9aa879..57eb70b0cd3 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/AutogradMetaFactoryRegisterer.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/AutogradMetaFactoryRegisterer.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/AutogradMetaInterface.java b/pytorch/src/gen/java/org/bytedeco/pytorch/AutogradMetaInterface.java index 0d2942cd0d6..74fb293dd69 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/AutogradMetaInterface.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/AutogradMetaInterface.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/AutogradState.java b/pytorch/src/gen/java/org/bytedeco/pytorch/AutogradState.java new file mode 100644 index 00000000000..3fd2ecff942 --- /dev/null +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/AutogradState.java @@ -0,0 +1,62 @@ +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE + +package org.bytedeco.pytorch; + +import org.bytedeco.pytorch.Allocator; +import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; +import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; +import java.nio.*; +import org.bytedeco.javacpp.*; +import org.bytedeco.javacpp.annotation.*; + +import static org.bytedeco.javacpp.presets.javacpp.*; +import static org.bytedeco.openblas.global.openblas_nolapack.*; +import static org.bytedeco.openblas.global.openblas.*; + +import static org.bytedeco.pytorch.global.torch.*; + + +// Structure used to pack all the thread local boolean +// flags used by autograd +@Namespace("c10") @NoOffset @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) +public class AutogradState extends Pointer { + static { Loader.load(); } + /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ + public AutogradState(Pointer p) { super(p); } + + public static native @ByRef AutogradState get_tls_state(); + public static native void set_tls_state(@ByVal AutogradState state); + + public AutogradState( + @Cast("bool") boolean grad_mode, + @Cast("bool") boolean inference_mode, + @Cast("bool") boolean fw_grad_mode, + @Cast("bool") boolean multithreading_enabled) { super((Pointer)null); allocate(grad_mode, inference_mode, fw_grad_mode, multithreading_enabled); } + private native void allocate( + @Cast("bool") boolean grad_mode, + @Cast("bool") boolean inference_mode, + @Cast("bool") boolean fw_grad_mode, + @Cast("bool") boolean multithreading_enabled); + + public native void set_grad_mode(@Cast("bool") boolean enabled); + + public native void set_fw_grad_mode(@Cast("bool") boolean enabled); + + public native void set_inference_mode(@Cast("bool") boolean enabled); + + public native void set_multithreading_enabled(@Cast("bool") boolean mulithreading_enabled); + + public native void set_view_replay_enabled(@Cast("bool") boolean view_replay_enabled); + + public native @Cast("bool") boolean get_grad_mode(); + + public native @Cast("bool") boolean get_fw_grad_mode(); + + public native @Cast("bool") boolean get_inference_mode(); + + public native @Cast("bool") boolean get_multithreading_enabled(); + + public native @Cast("bool") boolean get_view_replay_enabled(); +} diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/AvgPool1d.java b/pytorch/src/gen/java/org/bytedeco/pytorch/AvgPool1d.java deleted file mode 100644 index 03d406d7cc9..00000000000 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/AvgPool1d.java +++ /dev/null @@ -1,34 +0,0 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE - -package org.bytedeco.pytorch; - -import org.bytedeco.pytorch.Allocator; -import org.bytedeco.pytorch.Function; -import org.bytedeco.pytorch.Module; -import java.nio.*; -import org.bytedeco.javacpp.*; -import org.bytedeco.javacpp.annotation.*; - -import static org.bytedeco.javacpp.presets.javacpp.*; -import static org.bytedeco.openblas.global.openblas_nolapack.*; -import static org.bytedeco.openblas.global.openblas.*; - -import static org.bytedeco.pytorch.global.torch.*; - - -/** A {@code ModuleHolder} subclass for {@code AvgPool1dImpl}. - * See the documentation for {@code AvgPool1dImpl} class to learn what methods it - * provides, and examples of how to use {@code AvgPool1d} with - * {@code torch::nn::AvgPool1dOptions}. See the documentation for {@code ModuleHolder} to - * learn about PyTorch's module storage semantics. */ -@Namespace("torch::nn") @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) -public class AvgPool1d extends AvgPool1dImplModuleHolder { - static { Loader.load(); } - - public AvgPool1d(@ByVal @Cast("std::nullptr_t*") PointerPointer arg0) { super((Pointer)null); allocate(arg0); } - private native void allocate(@ByVal @Cast("std::nullptr_t*") PointerPointer arg0); public AvgPool1d(@SharedPtr @Cast({"", "std::shared_ptr"}) AvgPool1dImpl module) { super((Pointer)null); allocate(module); } - private native void allocate(@SharedPtr @Cast({"", "std::shared_ptr"}) AvgPool1dImpl module); - /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ - public AvgPool1d(Pointer p) { super(p); } - - } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/AvgPool1dImpl.java b/pytorch/src/gen/java/org/bytedeco/pytorch/AvgPool1dImpl.java index 01664065196..075fa250ec1 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/AvgPool1dImpl.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/AvgPool1dImpl.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; @@ -35,9 +37,9 @@ public class AvgPool1dImpl extends AvgPool1dImplBase { public AvgPool1dImpl(@ByVal @Cast("torch::ExpandingArray<1>*") LongPointer kernel_size) { super((Pointer)null); allocate(kernel_size); } - @NoDeallocator private native void allocate(@ByVal @Cast("torch::ExpandingArray<1>*") LongPointer kernel_size); + private native void allocate(@ByVal @Cast("torch::ExpandingArray<1>*") LongPointer kernel_size); public AvgPool1dImpl(@Const @ByRef AvgPool1dOptions options_) { super((Pointer)null); allocate(options_); } - @NoDeallocator private native void allocate(@Const @ByRef AvgPool1dOptions options_); + private native void allocate(@Const @ByRef AvgPool1dOptions options_); /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public AvgPool1dImpl(Pointer p) { super(p); } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/AvgPool1dImplBase.java b/pytorch/src/gen/java/org/bytedeco/pytorch/AvgPool1dImplBase.java index 4d7feec3118..3aa9edf6487 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/AvgPool1dImplBase.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/AvgPool1dImplBase.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; @@ -24,9 +26,9 @@ public class AvgPool1dImplBase extends AvgPool1dImplCloneable { public AvgPool1dImplBase(Pointer p) { super(p); } public AvgPool1dImplBase(@ByVal @Cast("torch::ExpandingArray<1>*") LongPointer kernel_size) { super((Pointer)null); allocate(kernel_size); } - @NoDeallocator private native void allocate(@ByVal @Cast("torch::ExpandingArray<1>*") LongPointer kernel_size); + private native void allocate(@ByVal @Cast("torch::ExpandingArray<1>*") LongPointer kernel_size); public AvgPool1dImplBase(@Const @ByRef AvgPool1dOptions options_) { super((Pointer)null); allocate(options_); } - @NoDeallocator private native void allocate(@Const @ByRef AvgPool1dOptions options_); + private native void allocate(@Const @ByRef AvgPool1dOptions options_); public native void reset(); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/AvgPool1dImplCloneable.java b/pytorch/src/gen/java/org/bytedeco/pytorch/AvgPool1dImplCloneable.java index d17bf6e3ebb..6f2000eab8e 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/AvgPool1dImplCloneable.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/AvgPool1dImplCloneable.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; @@ -20,18 +22,18 @@ public class AvgPool1dImplCloneable extends Module { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public AvgPool1dImplCloneable(Pointer p) { super(p); } + @Override public Module asModule() { return asModule(this); } + @Namespace public static native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast>") Module asModule(@SharedPtr AvgPool1dImplCloneable pointer); /** {@code reset()} must perform initialization of all members with reference * semantics, most importantly parameters, buffers and submodules. */ public native void reset(); - @Override public Module asModule() { return asModule(this); } - @Namespace public static native @Name("static_cast") Module asModule(AvgPool1dImplCloneable module); /** Performs a recursive "deep copy" of the {@code Module}, such that all parameters * and submodules in the cloned module are different from those in the * original module. */ - public native @SharedPtr Module clone( - @Const @ByRef(nullValue = "c10::optional(c10::nullopt)") DeviceOptional device); - public native @SharedPtr Module clone(); + public native @SharedPtr("torch::nn::Module") @ByVal Module clone( + @Const @ByRef(nullValue = "c10::optional(c10::nullopt)") DeviceOptional device); + public native @SharedPtr("torch::nn::Module") @ByVal Module clone(); } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/AvgPool1dImplModuleHolder.java b/pytorch/src/gen/java/org/bytedeco/pytorch/AvgPool1dImplModuleHolder.java deleted file mode 100644 index b145ab0ad38..00000000000 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/AvgPool1dImplModuleHolder.java +++ /dev/null @@ -1,79 +0,0 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE - -package org.bytedeco.pytorch; - -import org.bytedeco.pytorch.Allocator; -import org.bytedeco.pytorch.Function; -import org.bytedeco.pytorch.Module; -import java.nio.*; -import org.bytedeco.javacpp.*; -import org.bytedeco.javacpp.annotation.*; - -import static org.bytedeco.javacpp.presets.javacpp.*; -import static org.bytedeco.openblas.global.openblas_nolapack.*; -import static org.bytedeco.openblas.global.openblas.*; - -import static org.bytedeco.pytorch.global.torch.*; - -@Name("torch::nn::ModuleHolder") @NoOffset @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) -public class AvgPool1dImplModuleHolder extends Pointer { - static { Loader.load(); } - /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ - public AvgPool1dImplModuleHolder(Pointer p) { super(p); } - - - /// - - /** Default constructs the contained module if if has a default constructor, - * else produces a static error. - * - * NOTE: This uses the behavior of template - * classes in C++ that constructors (or any methods) are only compiled when - * actually used. */ - - - /** Constructs the {@code ModuleHolder} with an empty contained value. Access to - * the underlying module is not permitted and will throw an exception, until - * a value is assigned. */ - /* implicit */ public AvgPool1dImplModuleHolder(@ByVal @Cast("std::nullptr_t*") PointerPointer arg0) { super((Pointer)null); allocate(arg0); } -private native void allocate(@ByVal @Cast("std::nullptr_t*") PointerPointer arg0); - - /** Constructs the {@code ModuleHolder} with a contained module, forwarding all - * arguments to its constructor. */ - - /** Constructs the {@code ModuleHolder} from a pointer to the contained type. - * Example: {@code Linear(std::make_shared(...))}. */ - /* implicit */ public AvgPool1dImplModuleHolder(@SharedPtr @Cast({"", "std::shared_ptr"}) AvgPool1dImpl module) { super((Pointer)null); allocate(module); } -private native void allocate(@SharedPtr @Cast({"", "std::shared_ptr"}) AvgPool1dImpl module); - - /** Returns true if the {@code ModuleHolder} contains a module, or false if it is - * {@code nullptr}. */ - public native @Cast("bool") @Name("operator bool") @NoException(true) boolean asBoolean(); - - /** Forwards to the contained module. */ - public native @Name("operator ->") AvgPool1dImpl access(); - - /** Forwards to the contained module. */ - - /** Returns a reference to the contained module. */ - public native @ByRef @Name("operator *") AvgPool1dImpl multiply(); - - /** Returns a const reference to the contained module. */ - - /** Returns a shared pointer to the underlying module. */ - public native @SharedPtr @Cast({"", "std::shared_ptr"}) AvgPool1dImpl ptr(); - - /** Returns a pointer to the underlying module. */ - public native AvgPool1dImpl get(); - - /** Returns a const pointer to the underlying module. */ - - /** Calls the {@code forward()} method of the contained module. */ - - /** Forwards to the subscript operator of the contained module. - * NOTE: std::forward is qualified to prevent VS2017 emitting - * error C2872: 'std': ambiguous symbol */ - - /** Returns true if the {@code ModuleHolder} does not contain a module. */ - public native @Cast("bool") @NoException(true) boolean is_empty(); -} diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/AvgPool1dOptions.java b/pytorch/src/gen/java/org/bytedeco/pytorch/AvgPool1dOptions.java index a3aea549a54..df0a0124757 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/AvgPool1dOptions.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/AvgPool1dOptions.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/AvgPool2d.java b/pytorch/src/gen/java/org/bytedeco/pytorch/AvgPool2d.java deleted file mode 100644 index d33f9613db8..00000000000 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/AvgPool2d.java +++ /dev/null @@ -1,34 +0,0 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE - -package org.bytedeco.pytorch; - -import org.bytedeco.pytorch.Allocator; -import org.bytedeco.pytorch.Function; -import org.bytedeco.pytorch.Module; -import java.nio.*; -import org.bytedeco.javacpp.*; -import org.bytedeco.javacpp.annotation.*; - -import static org.bytedeco.javacpp.presets.javacpp.*; -import static org.bytedeco.openblas.global.openblas_nolapack.*; -import static org.bytedeco.openblas.global.openblas.*; - -import static org.bytedeco.pytorch.global.torch.*; - - -/** A {@code ModuleHolder} subclass for {@code AvgPool2dImpl}. - * See the documentation for {@code AvgPool2dImpl} class to learn what methods it - * provides, and examples of how to use {@code AvgPool2d} with - * {@code torch::nn::AvgPool2dOptions}. See the documentation for {@code ModuleHolder} to - * learn about PyTorch's module storage semantics. */ -@Namespace("torch::nn") @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) -public class AvgPool2d extends AvgPool2dImplModuleHolder { - static { Loader.load(); } - - public AvgPool2d(@ByVal @Cast("std::nullptr_t*") PointerPointer arg0) { super((Pointer)null); allocate(arg0); } - private native void allocate(@ByVal @Cast("std::nullptr_t*") PointerPointer arg0); public AvgPool2d(@SharedPtr @Cast({"", "std::shared_ptr"}) AvgPool2dImpl module) { super((Pointer)null); allocate(module); } - private native void allocate(@SharedPtr @Cast({"", "std::shared_ptr"}) AvgPool2dImpl module); - /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ - public AvgPool2d(Pointer p) { super(p); } - - } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/AvgPool2dImpl.java b/pytorch/src/gen/java/org/bytedeco/pytorch/AvgPool2dImpl.java index 6bbc2c6ff70..a5737361d37 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/AvgPool2dImpl.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/AvgPool2dImpl.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; @@ -35,9 +37,9 @@ public class AvgPool2dImpl extends AvgPool2dImplBase { public AvgPool2dImpl(@ByVal @Cast("torch::ExpandingArray<2>*") LongPointer kernel_size) { super((Pointer)null); allocate(kernel_size); } - @NoDeallocator private native void allocate(@ByVal @Cast("torch::ExpandingArray<2>*") LongPointer kernel_size); + private native void allocate(@ByVal @Cast("torch::ExpandingArray<2>*") LongPointer kernel_size); public AvgPool2dImpl(@Const @ByRef AvgPool2dOptions options_) { super((Pointer)null); allocate(options_); } - @NoDeallocator private native void allocate(@Const @ByRef AvgPool2dOptions options_); + private native void allocate(@Const @ByRef AvgPool2dOptions options_); /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public AvgPool2dImpl(Pointer p) { super(p); } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/AvgPool2dImplBase.java b/pytorch/src/gen/java/org/bytedeco/pytorch/AvgPool2dImplBase.java index a1ba97038e6..0b3199593a0 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/AvgPool2dImplBase.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/AvgPool2dImplBase.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; @@ -22,9 +24,9 @@ public class AvgPool2dImplBase extends AvgPool2dImplCloneable { public AvgPool2dImplBase(Pointer p) { super(p); } public AvgPool2dImplBase(@ByVal @Cast("torch::ExpandingArray<2>*") LongPointer kernel_size) { super((Pointer)null); allocate(kernel_size); } - @NoDeallocator private native void allocate(@ByVal @Cast("torch::ExpandingArray<2>*") LongPointer kernel_size); + private native void allocate(@ByVal @Cast("torch::ExpandingArray<2>*") LongPointer kernel_size); public AvgPool2dImplBase(@Const @ByRef AvgPool2dOptions options_) { super((Pointer)null); allocate(options_); } - @NoDeallocator private native void allocate(@Const @ByRef AvgPool2dOptions options_); + private native void allocate(@Const @ByRef AvgPool2dOptions options_); public native void reset(); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/AvgPool2dImplCloneable.java b/pytorch/src/gen/java/org/bytedeco/pytorch/AvgPool2dImplCloneable.java index afe3452c37d..553918506c9 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/AvgPool2dImplCloneable.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/AvgPool2dImplCloneable.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; @@ -20,18 +22,18 @@ public class AvgPool2dImplCloneable extends Module { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public AvgPool2dImplCloneable(Pointer p) { super(p); } + @Override public Module asModule() { return asModule(this); } + @Namespace public static native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast>") Module asModule(@SharedPtr AvgPool2dImplCloneable pointer); /** {@code reset()} must perform initialization of all members with reference * semantics, most importantly parameters, buffers and submodules. */ public native void reset(); - @Override public Module asModule() { return asModule(this); } - @Namespace public static native @Name("static_cast") Module asModule(AvgPool2dImplCloneable module); /** Performs a recursive "deep copy" of the {@code Module}, such that all parameters * and submodules in the cloned module are different from those in the * original module. */ - public native @SharedPtr Module clone( - @Const @ByRef(nullValue = "c10::optional(c10::nullopt)") DeviceOptional device); - public native @SharedPtr Module clone(); + public native @SharedPtr("torch::nn::Module") @ByVal Module clone( + @Const @ByRef(nullValue = "c10::optional(c10::nullopt)") DeviceOptional device); + public native @SharedPtr("torch::nn::Module") @ByVal Module clone(); } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/AvgPool2dImplModuleHolder.java b/pytorch/src/gen/java/org/bytedeco/pytorch/AvgPool2dImplModuleHolder.java deleted file mode 100644 index 7f3a8abea78..00000000000 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/AvgPool2dImplModuleHolder.java +++ /dev/null @@ -1,79 +0,0 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE - -package org.bytedeco.pytorch; - -import org.bytedeco.pytorch.Allocator; -import org.bytedeco.pytorch.Function; -import org.bytedeco.pytorch.Module; -import java.nio.*; -import org.bytedeco.javacpp.*; -import org.bytedeco.javacpp.annotation.*; - -import static org.bytedeco.javacpp.presets.javacpp.*; -import static org.bytedeco.openblas.global.openblas_nolapack.*; -import static org.bytedeco.openblas.global.openblas.*; - -import static org.bytedeco.pytorch.global.torch.*; - -@Name("torch::nn::ModuleHolder") @NoOffset @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) -public class AvgPool2dImplModuleHolder extends Pointer { - static { Loader.load(); } - /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ - public AvgPool2dImplModuleHolder(Pointer p) { super(p); } - - - /// - - /** Default constructs the contained module if if has a default constructor, - * else produces a static error. - * - * NOTE: This uses the behavior of template - * classes in C++ that constructors (or any methods) are only compiled when - * actually used. */ - - - /** Constructs the {@code ModuleHolder} with an empty contained value. Access to - * the underlying module is not permitted and will throw an exception, until - * a value is assigned. */ - /* implicit */ public AvgPool2dImplModuleHolder(@ByVal @Cast("std::nullptr_t*") PointerPointer arg0) { super((Pointer)null); allocate(arg0); } -private native void allocate(@ByVal @Cast("std::nullptr_t*") PointerPointer arg0); - - /** Constructs the {@code ModuleHolder} with a contained module, forwarding all - * arguments to its constructor. */ - - /** Constructs the {@code ModuleHolder} from a pointer to the contained type. - * Example: {@code Linear(std::make_shared(...))}. */ - /* implicit */ public AvgPool2dImplModuleHolder(@SharedPtr @Cast({"", "std::shared_ptr"}) AvgPool2dImpl module) { super((Pointer)null); allocate(module); } -private native void allocate(@SharedPtr @Cast({"", "std::shared_ptr"}) AvgPool2dImpl module); - - /** Returns true if the {@code ModuleHolder} contains a module, or false if it is - * {@code nullptr}. */ - public native @Cast("bool") @Name("operator bool") @NoException(true) boolean asBoolean(); - - /** Forwards to the contained module. */ - public native @Name("operator ->") AvgPool2dImpl access(); - - /** Forwards to the contained module. */ - - /** Returns a reference to the contained module. */ - public native @ByRef @Name("operator *") AvgPool2dImpl multiply(); - - /** Returns a const reference to the contained module. */ - - /** Returns a shared pointer to the underlying module. */ - public native @SharedPtr @Cast({"", "std::shared_ptr"}) AvgPool2dImpl ptr(); - - /** Returns a pointer to the underlying module. */ - public native AvgPool2dImpl get(); - - /** Returns a const pointer to the underlying module. */ - - /** Calls the {@code forward()} method of the contained module. */ - - /** Forwards to the subscript operator of the contained module. - * NOTE: std::forward is qualified to prevent VS2017 emitting - * error C2872: 'std': ambiguous symbol */ - - /** Returns true if the {@code ModuleHolder} does not contain a module. */ - public native @Cast("bool") @NoException(true) boolean is_empty(); -} diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/AvgPool2dOptions.java b/pytorch/src/gen/java/org/bytedeco/pytorch/AvgPool2dOptions.java index be9fedabfed..f3853e8a3a7 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/AvgPool2dOptions.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/AvgPool2dOptions.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/AvgPool3d.java b/pytorch/src/gen/java/org/bytedeco/pytorch/AvgPool3d.java deleted file mode 100644 index a71229722e4..00000000000 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/AvgPool3d.java +++ /dev/null @@ -1,34 +0,0 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE - -package org.bytedeco.pytorch; - -import org.bytedeco.pytorch.Allocator; -import org.bytedeco.pytorch.Function; -import org.bytedeco.pytorch.Module; -import java.nio.*; -import org.bytedeco.javacpp.*; -import org.bytedeco.javacpp.annotation.*; - -import static org.bytedeco.javacpp.presets.javacpp.*; -import static org.bytedeco.openblas.global.openblas_nolapack.*; -import static org.bytedeco.openblas.global.openblas.*; - -import static org.bytedeco.pytorch.global.torch.*; - - -/** A {@code ModuleHolder} subclass for {@code AvgPool3dImpl}. - * See the documentation for {@code AvgPool3dImpl} class to learn what methods it - * provides, and examples of how to use {@code AvgPool3d} with - * {@code torch::nn::AvgPool3dOptions}. See the documentation for {@code ModuleHolder} to - * learn about PyTorch's module storage semantics. */ -@Namespace("torch::nn") @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) -public class AvgPool3d extends AvgPool3dImplModuleHolder { - static { Loader.load(); } - - public AvgPool3d(@ByVal @Cast("std::nullptr_t*") PointerPointer arg0) { super((Pointer)null); allocate(arg0); } - private native void allocate(@ByVal @Cast("std::nullptr_t*") PointerPointer arg0); public AvgPool3d(@SharedPtr @Cast({"", "std::shared_ptr"}) AvgPool3dImpl module) { super((Pointer)null); allocate(module); } - private native void allocate(@SharedPtr @Cast({"", "std::shared_ptr"}) AvgPool3dImpl module); - /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ - public AvgPool3d(Pointer p) { super(p); } - - } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/AvgPool3dImpl.java b/pytorch/src/gen/java/org/bytedeco/pytorch/AvgPool3dImpl.java index dd2355d4347..91734e0ec15 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/AvgPool3dImpl.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/AvgPool3dImpl.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; @@ -35,9 +37,9 @@ public class AvgPool3dImpl extends AvgPool3dImplBase { public AvgPool3dImpl(@ByVal @Cast("torch::ExpandingArray<3>*") LongPointer kernel_size) { super((Pointer)null); allocate(kernel_size); } - @NoDeallocator private native void allocate(@ByVal @Cast("torch::ExpandingArray<3>*") LongPointer kernel_size); + private native void allocate(@ByVal @Cast("torch::ExpandingArray<3>*") LongPointer kernel_size); public AvgPool3dImpl(@Const @ByRef AvgPool3dOptions options_) { super((Pointer)null); allocate(options_); } - @NoDeallocator private native void allocate(@Const @ByRef AvgPool3dOptions options_); + private native void allocate(@Const @ByRef AvgPool3dOptions options_); /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public AvgPool3dImpl(Pointer p) { super(p); } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/AvgPool3dImplBase.java b/pytorch/src/gen/java/org/bytedeco/pytorch/AvgPool3dImplBase.java index e0b003cb91d..cad6f05eaf7 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/AvgPool3dImplBase.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/AvgPool3dImplBase.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; @@ -22,9 +24,9 @@ public class AvgPool3dImplBase extends AvgPool3dImplCloneable { public AvgPool3dImplBase(Pointer p) { super(p); } public AvgPool3dImplBase(@ByVal @Cast("torch::ExpandingArray<3>*") LongPointer kernel_size) { super((Pointer)null); allocate(kernel_size); } - @NoDeallocator private native void allocate(@ByVal @Cast("torch::ExpandingArray<3>*") LongPointer kernel_size); + private native void allocate(@ByVal @Cast("torch::ExpandingArray<3>*") LongPointer kernel_size); public AvgPool3dImplBase(@Const @ByRef AvgPool3dOptions options_) { super((Pointer)null); allocate(options_); } - @NoDeallocator private native void allocate(@Const @ByRef AvgPool3dOptions options_); + private native void allocate(@Const @ByRef AvgPool3dOptions options_); public native void reset(); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/AvgPool3dImplCloneable.java b/pytorch/src/gen/java/org/bytedeco/pytorch/AvgPool3dImplCloneable.java index 87012e921a7..12bbab6da9a 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/AvgPool3dImplCloneable.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/AvgPool3dImplCloneable.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; @@ -20,18 +22,18 @@ public class AvgPool3dImplCloneable extends Module { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public AvgPool3dImplCloneable(Pointer p) { super(p); } + @Override public Module asModule() { return asModule(this); } + @Namespace public static native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast>") Module asModule(@SharedPtr AvgPool3dImplCloneable pointer); /** {@code reset()} must perform initialization of all members with reference * semantics, most importantly parameters, buffers and submodules. */ public native void reset(); - @Override public Module asModule() { return asModule(this); } - @Namespace public static native @Name("static_cast") Module asModule(AvgPool3dImplCloneable module); /** Performs a recursive "deep copy" of the {@code Module}, such that all parameters * and submodules in the cloned module are different from those in the * original module. */ - public native @SharedPtr Module clone( - @Const @ByRef(nullValue = "c10::optional(c10::nullopt)") DeviceOptional device); - public native @SharedPtr Module clone(); + public native @SharedPtr("torch::nn::Module") @ByVal Module clone( + @Const @ByRef(nullValue = "c10::optional(c10::nullopt)") DeviceOptional device); + public native @SharedPtr("torch::nn::Module") @ByVal Module clone(); } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/AvgPool3dImplModuleHolder.java b/pytorch/src/gen/java/org/bytedeco/pytorch/AvgPool3dImplModuleHolder.java deleted file mode 100644 index 56c1e12b0f6..00000000000 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/AvgPool3dImplModuleHolder.java +++ /dev/null @@ -1,79 +0,0 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE - -package org.bytedeco.pytorch; - -import org.bytedeco.pytorch.Allocator; -import org.bytedeco.pytorch.Function; -import org.bytedeco.pytorch.Module; -import java.nio.*; -import org.bytedeco.javacpp.*; -import org.bytedeco.javacpp.annotation.*; - -import static org.bytedeco.javacpp.presets.javacpp.*; -import static org.bytedeco.openblas.global.openblas_nolapack.*; -import static org.bytedeco.openblas.global.openblas.*; - -import static org.bytedeco.pytorch.global.torch.*; - -@Name("torch::nn::ModuleHolder") @NoOffset @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) -public class AvgPool3dImplModuleHolder extends Pointer { - static { Loader.load(); } - /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ - public AvgPool3dImplModuleHolder(Pointer p) { super(p); } - - - /// - - /** Default constructs the contained module if if has a default constructor, - * else produces a static error. - * - * NOTE: This uses the behavior of template - * classes in C++ that constructors (or any methods) are only compiled when - * actually used. */ - - - /** Constructs the {@code ModuleHolder} with an empty contained value. Access to - * the underlying module is not permitted and will throw an exception, until - * a value is assigned. */ - /* implicit */ public AvgPool3dImplModuleHolder(@ByVal @Cast("std::nullptr_t*") PointerPointer arg0) { super((Pointer)null); allocate(arg0); } -private native void allocate(@ByVal @Cast("std::nullptr_t*") PointerPointer arg0); - - /** Constructs the {@code ModuleHolder} with a contained module, forwarding all - * arguments to its constructor. */ - - /** Constructs the {@code ModuleHolder} from a pointer to the contained type. - * Example: {@code Linear(std::make_shared(...))}. */ - /* implicit */ public AvgPool3dImplModuleHolder(@SharedPtr @Cast({"", "std::shared_ptr"}) AvgPool3dImpl module) { super((Pointer)null); allocate(module); } -private native void allocate(@SharedPtr @Cast({"", "std::shared_ptr"}) AvgPool3dImpl module); - - /** Returns true if the {@code ModuleHolder} contains a module, or false if it is - * {@code nullptr}. */ - public native @Cast("bool") @Name("operator bool") @NoException(true) boolean asBoolean(); - - /** Forwards to the contained module. */ - public native @Name("operator ->") AvgPool3dImpl access(); - - /** Forwards to the contained module. */ - - /** Returns a reference to the contained module. */ - public native @ByRef @Name("operator *") AvgPool3dImpl multiply(); - - /** Returns a const reference to the contained module. */ - - /** Returns a shared pointer to the underlying module. */ - public native @SharedPtr @Cast({"", "std::shared_ptr"}) AvgPool3dImpl ptr(); - - /** Returns a pointer to the underlying module. */ - public native AvgPool3dImpl get(); - - /** Returns a const pointer to the underlying module. */ - - /** Calls the {@code forward()} method of the contained module. */ - - /** Forwards to the subscript operator of the contained module. - * NOTE: std::forward is qualified to prevent VS2017 emitting - * error C2872: 'std': ambiguous symbol */ - - /** Returns true if the {@code ModuleHolder} does not contain a module. */ - public native @Cast("bool") @NoException(true) boolean is_empty(); -} diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/AvgPool3dOptions.java b/pytorch/src/gen/java/org/bytedeco/pytorch/AvgPool3dOptions.java index 0b7726d2984..41e82956ebb 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/AvgPool3dOptions.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/AvgPool3dOptions.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/Await.java b/pytorch/src/gen/java/org/bytedeco/pytorch/Await.java index 0a6ec34c693..4517f9b16b2 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/Await.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/Await.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; @@ -15,10 +17,35 @@ import static org.bytedeco.pytorch.global.torch.*; -@Namespace("c10::ivalue") @Opaque @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) + +@Name("c10::ivalue::Await") @NoOffset @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) public class Await extends Pointer { - /** Empty constructor. Calls {@code super((Pointer)null)}. */ - public Await() { super((Pointer)null); } + static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public Await(Pointer p) { super(p); } + + + + + + + public native @ByVal @Name("wait") IValue _wait(); + + public native @ByVal IValue value(); + + public native void setFn(@ByVal IValueSupplier fn); + + public native @Cast("bool") boolean completed(); + + public native void markCompleted(@ByVal IValue value); + + + + public native @ByVal Type.TypePtr elementType(); + + public native @ByVal Type.TypePtr type(); + + public native void setArgs(@ByVal IValueVector args); + + public native @ByRef IValueVector args(); } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/AwaitPtr.java b/pytorch/src/gen/java/org/bytedeco/pytorch/AwaitPtr.java new file mode 100644 index 00000000000..cc3f3bd94c0 --- /dev/null +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/AwaitPtr.java @@ -0,0 +1,150 @@ +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE + +package org.bytedeco.pytorch; + +import org.bytedeco.pytorch.Allocator; +import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; +import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; +import java.nio.*; +import org.bytedeco.javacpp.*; +import org.bytedeco.javacpp.annotation.*; + +import static org.bytedeco.javacpp.presets.javacpp.*; +import static org.bytedeco.openblas.global.openblas_nolapack.*; +import static org.bytedeco.openblas.global.openblas.*; + +import static org.bytedeco.pytorch.global.torch.*; + + +@Name("c10::intrusive_ptr") @NoOffset @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) +public class AwaitPtr extends Pointer { + static { Loader.load(); } + /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ + public AwaitPtr(Pointer p) { super(p); } + /** Native array allocator. Access with {@link Pointer#position(long)}. */ + public AwaitPtr(long size) { super((Pointer)null); allocateArray(size); } + private native void allocateArray(long size); + @Override public AwaitPtr position(long position) { + return (AwaitPtr)super.position(position); + } + @Override public AwaitPtr getPointer(long i) { + return new AwaitPtr((Pointer)this).offsetAddress(i); + } + + + public AwaitPtr() { super((Pointer)null); allocate(); } + @NoException(true) private native void allocate(); + + public AwaitPtr(@ByVal @Cast("std::nullptr_t*") PointerPointer arg0) { super((Pointer)null); allocate(arg0); } + @NoException(true) private native void allocate(@ByVal @Cast("std::nullptr_t*") PointerPointer arg0); + + // This constructor will not increase the ref counter for you. + // We use the tagged dispatch mechanism to explicitly mark this constructor + // to not increase the refcount + public AwaitPtr(Await target, @ByVal DontIncreaseRefcount arg1) { super((Pointer)null); allocate(target, arg1); } + @NoException(true) private native void allocate(Await target, @ByVal DontIncreaseRefcount arg1); + + + + public AwaitPtr(@ByRef(true) AwaitPtr rhs) { super((Pointer)null); allocate(rhs); } + @NoException(true) private native void allocate(@ByRef(true) AwaitPtr rhs); + + public native @ByRef @Name("operator =") @NoException(true) AwaitPtr put(@ByRef(true) AwaitPtr rhs); + + public native @NoException(true) Await get(); + + public native @ByRef @Name("operator *") @NoException(true) Await multiply(); + + public native @Name("operator ->") @NoException(true) Await access(); + + public native @Cast("bool") @Name("operator bool") @NoException(true) boolean asBoolean(); + + public native @NoException(true) void reset(); + + public native @NoException(true) void swap(@ByRef AwaitPtr rhs); + + // We do a lot of null-pointer checks in our code, good to have this be cheap. + public native @Cast("bool") @NoException(true) boolean defined(); + + public native @Cast("size_t") @NoException(true) long use_count(); + + public native @Cast("size_t") @NoException(true) long weak_use_count(); + + public native @Cast("bool") @NoException(true) boolean unique(); + + /** + * Returns an owning (!) pointer to the underlying object and makes the + * intrusive_ptr instance invalid. That means the refcount is not decreased. + * You *must* put the returned pointer back into a intrusive_ptr using + * intrusive_ptr::reclaim(ptr) to properly destruct it. + * This is helpful for C APIs. + */ + public native @NoException(true) Await release(); + + /** + * Takes an owning pointer to TTarget* and creates an intrusive_ptr that takes + * over ownership. That means the refcount is not increased. + * This is the counter-part to intrusive_ptr::release() and the pointer + * passed in *must* have been created using intrusive_ptr::release(). + */ + public static native @ByVal AwaitPtr reclaim(Await owning_ptr); + + /** + * Takes an owning pointer to TTarget* and creates an intrusive_ptr + * representing a new reference, i.e. the raw pointer retains + * ownership. + */ + public static native @ByVal AwaitPtr reclaim_copy(Await owning_ptr); + + /** + * Allocate a heap object with args and wrap it inside a intrusive_ptr and + * incref. This is a helper function to let make_intrusive() access private + * intrusive_ptr constructors. + */ + + /** + * Turn a new instance of TTarget (e.g., literally allocated + * using new TTarget(...) into an intrusive_ptr. If possible, + * use intrusive_ptr::make instead which statically guarantees + * that the allocation was done properly. + * + * At the moment, the only reason this method exists is because + * pybind11 holder types expect to be able to allocate in + * this way (because pybind11 handles the new allocation itself). + */ + public static native @ByVal AwaitPtr unsafe_steal_from_new(Await raw_ptr); + + /** + * Turn an instance of TTarget that should not be reference counted + * (e.g., allocated into an arena with placement new) into an + * intrusive_ptr. This is gratuitously unsafe and should only be + * used if you can guarantee that the pointer will not escape and be + * refcounted as normal. + * + * {@code expected_decrefs} is a debugging parameter: it indicates the + * number of strong owners the intrusive_ptr_target in question is + * expected to get. In most use cases, this will likely be 1. + * + * The reason this method exists is for manually sharing + * StorageImpls across Tensors in the static runtime. It needs + * access to private intrusive_ptr members so that the refcounts can + * be initialized to custom values. + */ + public static native @ByVal AwaitPtr unsafe_adapt_non_heap_allocated( + Await raw_ptr, + @Cast("size_t") long expected_decrefs); + + /** + * Turn a **non-owning raw pointer** to an intrusive_ptr. It is + * the moral equivalent of enable_shared_from_this on a shared pointer. + * + * This method is only valid for objects that are already live. If + * you are looking for the moral equivalent of unique_ptr(T*) + * constructor, see steal_from_new. + * + * TODO: https://github.com/pytorch/pytorch/issues/56482 + */ + public static native @ByVal AwaitPtr unsafe_reclaim_from_nonowning(Await raw_ptr); +} diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/AwaitSingleElementType.java b/pytorch/src/gen/java/org/bytedeco/pytorch/AwaitSingleElementType.java index 0e6cb371e8c..37fdbc02537 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/AwaitSingleElementType.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/AwaitSingleElementType.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/AwaitType.java b/pytorch/src/gen/java/org/bytedeco/pytorch/AwaitType.java index a0629b800cb..97c5311afb5 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/AwaitType.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/AwaitType.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/BCELoss.java b/pytorch/src/gen/java/org/bytedeco/pytorch/BCELoss.java deleted file mode 100644 index b9c25be99f4..00000000000 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/BCELoss.java +++ /dev/null @@ -1,34 +0,0 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE - -package org.bytedeco.pytorch; - -import org.bytedeco.pytorch.Allocator; -import org.bytedeco.pytorch.Function; -import org.bytedeco.pytorch.Module; -import java.nio.*; -import org.bytedeco.javacpp.*; -import org.bytedeco.javacpp.annotation.*; - -import static org.bytedeco.javacpp.presets.javacpp.*; -import static org.bytedeco.openblas.global.openblas_nolapack.*; -import static org.bytedeco.openblas.global.openblas.*; - -import static org.bytedeco.pytorch.global.torch.*; - - -/** A {@code ModuleHolder} subclass for {@code BCELossImpl}. - * See the documentation for {@code BCELossImpl} class to learn what methods it - * provides, and examples of how to use {@code BCELoss} with - * {@code torch::nn::BCELossOptions}. See the documentation for {@code ModuleHolder} to - * learn about PyTorch's module storage semantics. */ -@Namespace("torch::nn") @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) -public class BCELoss extends BCELossImplModuleHolder { - static { Loader.load(); } - - public BCELoss(@ByVal @Cast("std::nullptr_t*") PointerPointer arg0) { super((Pointer)null); allocate(arg0); } - private native void allocate(@ByVal @Cast("std::nullptr_t*") PointerPointer arg0); public BCELoss(@SharedPtr @Cast({"", "std::shared_ptr"}) BCELossImpl module) { super((Pointer)null); allocate(module); } - private native void allocate(@SharedPtr @Cast({"", "std::shared_ptr"}) BCELossImpl module); - /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ - public BCELoss(Pointer p) { super(p); } - - } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/BCELossImpl.java b/pytorch/src/gen/java/org/bytedeco/pytorch/BCELossImpl.java index 3239b385697..564e886f125 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/BCELossImpl.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/BCELossImpl.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; @@ -46,9 +48,9 @@ public class BCELossImpl extends BCELossImplCloneable { } public BCELossImpl(@ByVal(nullValue = "torch::nn::BCELossOptions{}") BCELossOptions options_) { super((Pointer)null); allocate(options_); } - @NoDeallocator private native void allocate(@ByVal(nullValue = "torch::nn::BCELossOptions{}") BCELossOptions options_); + @SharedPtr private native void allocate(@ByVal(nullValue = "torch::nn::BCELossOptions{}") BCELossOptions options_); public BCELossImpl() { super((Pointer)null); allocate(); } - @NoDeallocator private native void allocate(); + @SharedPtr private native void allocate(); public native void reset(); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/BCELossImplCloneable.java b/pytorch/src/gen/java/org/bytedeco/pytorch/BCELossImplCloneable.java index 42d3e0ceb46..b2f1b634de4 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/BCELossImplCloneable.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/BCELossImplCloneable.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; @@ -20,18 +22,18 @@ public class BCELossImplCloneable extends Module { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public BCELossImplCloneable(Pointer p) { super(p); } + @Override public Module asModule() { return asModule(this); } + @Namespace public static native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast>") Module asModule(@SharedPtr BCELossImplCloneable pointer); /** {@code reset()} must perform initialization of all members with reference * semantics, most importantly parameters, buffers and submodules. */ public native void reset(); - @Override public Module asModule() { return asModule(this); } - @Namespace public static native @Name("static_cast") Module asModule(BCELossImplCloneable module); /** Performs a recursive "deep copy" of the {@code Module}, such that all parameters * and submodules in the cloned module are different from those in the * original module. */ - public native @SharedPtr Module clone( - @Const @ByRef(nullValue = "c10::optional(c10::nullopt)") DeviceOptional device); - public native @SharedPtr Module clone(); + public native @SharedPtr("torch::nn::Module") @ByVal Module clone( + @Const @ByRef(nullValue = "c10::optional(c10::nullopt)") DeviceOptional device); + public native @SharedPtr("torch::nn::Module") @ByVal Module clone(); } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/BCELossImplModuleHolder.java b/pytorch/src/gen/java/org/bytedeco/pytorch/BCELossImplModuleHolder.java deleted file mode 100644 index 742350278c7..00000000000 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/BCELossImplModuleHolder.java +++ /dev/null @@ -1,79 +0,0 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE - -package org.bytedeco.pytorch; - -import org.bytedeco.pytorch.Allocator; -import org.bytedeco.pytorch.Function; -import org.bytedeco.pytorch.Module; -import java.nio.*; -import org.bytedeco.javacpp.*; -import org.bytedeco.javacpp.annotation.*; - -import static org.bytedeco.javacpp.presets.javacpp.*; -import static org.bytedeco.openblas.global.openblas_nolapack.*; -import static org.bytedeco.openblas.global.openblas.*; - -import static org.bytedeco.pytorch.global.torch.*; - -@Name("torch::nn::ModuleHolder") @NoOffset @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) -public class BCELossImplModuleHolder extends Pointer { - static { Loader.load(); } - /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ - public BCELossImplModuleHolder(Pointer p) { super(p); } - - - /// - - /** Default constructs the contained module if if has a default constructor, - * else produces a static error. - * - * NOTE: This uses the behavior of template - * classes in C++ that constructors (or any methods) are only compiled when - * actually used. */ - - - /** Constructs the {@code ModuleHolder} with an empty contained value. Access to - * the underlying module is not permitted and will throw an exception, until - * a value is assigned. */ - /* implicit */ public BCELossImplModuleHolder(@ByVal @Cast("std::nullptr_t*") PointerPointer arg0) { super((Pointer)null); allocate(arg0); } -private native void allocate(@ByVal @Cast("std::nullptr_t*") PointerPointer arg0); - - /** Constructs the {@code ModuleHolder} with a contained module, forwarding all - * arguments to its constructor. */ - - /** Constructs the {@code ModuleHolder} from a pointer to the contained type. - * Example: {@code Linear(std::make_shared(...))}. */ - /* implicit */ public BCELossImplModuleHolder(@SharedPtr @Cast({"", "std::shared_ptr"}) BCELossImpl module) { super((Pointer)null); allocate(module); } -private native void allocate(@SharedPtr @Cast({"", "std::shared_ptr"}) BCELossImpl module); - - /** Returns true if the {@code ModuleHolder} contains a module, or false if it is - * {@code nullptr}. */ - public native @Cast("bool") @Name("operator bool") @NoException(true) boolean asBoolean(); - - /** Forwards to the contained module. */ - public native @Name("operator ->") BCELossImpl access(); - - /** Forwards to the contained module. */ - - /** Returns a reference to the contained module. */ - public native @ByRef @Name("operator *") BCELossImpl multiply(); - - /** Returns a const reference to the contained module. */ - - /** Returns a shared pointer to the underlying module. */ - public native @SharedPtr @Cast({"", "std::shared_ptr"}) BCELossImpl ptr(); - - /** Returns a pointer to the underlying module. */ - public native BCELossImpl get(); - - /** Returns a const pointer to the underlying module. */ - - /** Calls the {@code forward()} method of the contained module. */ - - /** Forwards to the subscript operator of the contained module. - * NOTE: std::forward is qualified to prevent VS2017 emitting - * error C2872: 'std': ambiguous symbol */ - - /** Returns true if the {@code ModuleHolder} does not contain a module. */ - public native @Cast("bool") @NoException(true) boolean is_empty(); -} diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/BCELossOptions.java b/pytorch/src/gen/java/org/bytedeco/pytorch/BCELossOptions.java index c21b721b50e..e0e2814b149 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/BCELossOptions.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/BCELossOptions.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; @@ -43,5 +45,5 @@ public class BCELossOptions extends Pointer { } public native @ByRef @NoException(true) Tensor weight(); - public native @ByRef @NoException(true) loss_reduction_t reduction(); + public native @ByRef @NoException(true) LossReduction reduction(); } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/BCEWithLogitsLoss.java b/pytorch/src/gen/java/org/bytedeco/pytorch/BCEWithLogitsLoss.java deleted file mode 100644 index 69747733079..00000000000 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/BCEWithLogitsLoss.java +++ /dev/null @@ -1,34 +0,0 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE - -package org.bytedeco.pytorch; - -import org.bytedeco.pytorch.Allocator; -import org.bytedeco.pytorch.Function; -import org.bytedeco.pytorch.Module; -import java.nio.*; -import org.bytedeco.javacpp.*; -import org.bytedeco.javacpp.annotation.*; - -import static org.bytedeco.javacpp.presets.javacpp.*; -import static org.bytedeco.openblas.global.openblas_nolapack.*; -import static org.bytedeco.openblas.global.openblas.*; - -import static org.bytedeco.pytorch.global.torch.*; - - -/** A {@code ModuleHolder} subclass for {@code BCEWithLogitsLossImpl}. - * See the documentation for {@code BCEWithLogitsLossImpl} class to learn what - * methods it provides, and examples of how to use {@code BCEWithLogitsLoss} with - * {@code torch::nn::BCEWithLogitsLossOptions}. See the documentation for - * {@code ModuleHolder} to learn about PyTorch's module storage semantics. */ -@Namespace("torch::nn") @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) -public class BCEWithLogitsLoss extends BCEWithLogitsLossImplModuleHolder { - static { Loader.load(); } - - public BCEWithLogitsLoss(@ByVal @Cast("std::nullptr_t*") PointerPointer arg0) { super((Pointer)null); allocate(arg0); } - private native void allocate(@ByVal @Cast("std::nullptr_t*") PointerPointer arg0); public BCEWithLogitsLoss(@SharedPtr @Cast({"", "std::shared_ptr"}) BCEWithLogitsLossImpl module) { super((Pointer)null); allocate(module); } - private native void allocate(@SharedPtr @Cast({"", "std::shared_ptr"}) BCEWithLogitsLossImpl module); - /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ - public BCEWithLogitsLoss(Pointer p) { super(p); } - - } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/BCEWithLogitsLossImpl.java b/pytorch/src/gen/java/org/bytedeco/pytorch/BCEWithLogitsLossImpl.java index 6dd9315d245..1b2a45761b4 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/BCEWithLogitsLossImpl.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/BCEWithLogitsLossImpl.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; @@ -50,9 +52,9 @@ public class BCEWithLogitsLossImpl extends BCEWithLogitsLossImplCloneable { } public BCEWithLogitsLossImpl(@ByVal(nullValue = "torch::nn::BCEWithLogitsLossOptions{}") BCEWithLogitsLossOptions options_) { super((Pointer)null); allocate(options_); } - @NoDeallocator private native void allocate(@ByVal(nullValue = "torch::nn::BCEWithLogitsLossOptions{}") BCEWithLogitsLossOptions options_); + @SharedPtr private native void allocate(@ByVal(nullValue = "torch::nn::BCEWithLogitsLossOptions{}") BCEWithLogitsLossOptions options_); public BCEWithLogitsLossImpl() { super((Pointer)null); allocate(); } - @NoDeallocator private native void allocate(); + @SharedPtr private native void allocate(); public native void reset(); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/BCEWithLogitsLossImplCloneable.java b/pytorch/src/gen/java/org/bytedeco/pytorch/BCEWithLogitsLossImplCloneable.java index 97ea46046c4..a29e1ec90b9 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/BCEWithLogitsLossImplCloneable.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/BCEWithLogitsLossImplCloneable.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; @@ -20,18 +22,18 @@ public class BCEWithLogitsLossImplCloneable extends Module { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public BCEWithLogitsLossImplCloneable(Pointer p) { super(p); } + @Override public Module asModule() { return asModule(this); } + @Namespace public static native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast>") Module asModule(@SharedPtr BCEWithLogitsLossImplCloneable pointer); /** {@code reset()} must perform initialization of all members with reference * semantics, most importantly parameters, buffers and submodules. */ public native void reset(); - @Override public Module asModule() { return asModule(this); } - @Namespace public static native @Name("static_cast") Module asModule(BCEWithLogitsLossImplCloneable module); /** Performs a recursive "deep copy" of the {@code Module}, such that all parameters * and submodules in the cloned module are different from those in the * original module. */ - public native @SharedPtr Module clone( - @Const @ByRef(nullValue = "c10::optional(c10::nullopt)") DeviceOptional device); - public native @SharedPtr Module clone(); + public native @SharedPtr("torch::nn::Module") @ByVal Module clone( + @Const @ByRef(nullValue = "c10::optional(c10::nullopt)") DeviceOptional device); + public native @SharedPtr("torch::nn::Module") @ByVal Module clone(); } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/BCEWithLogitsLossImplModuleHolder.java b/pytorch/src/gen/java/org/bytedeco/pytorch/BCEWithLogitsLossImplModuleHolder.java deleted file mode 100644 index 546b05ca7ab..00000000000 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/BCEWithLogitsLossImplModuleHolder.java +++ /dev/null @@ -1,79 +0,0 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE - -package org.bytedeco.pytorch; - -import org.bytedeco.pytorch.Allocator; -import org.bytedeco.pytorch.Function; -import org.bytedeco.pytorch.Module; -import java.nio.*; -import org.bytedeco.javacpp.*; -import org.bytedeco.javacpp.annotation.*; - -import static org.bytedeco.javacpp.presets.javacpp.*; -import static org.bytedeco.openblas.global.openblas_nolapack.*; -import static org.bytedeco.openblas.global.openblas.*; - -import static org.bytedeco.pytorch.global.torch.*; - -@Name("torch::nn::ModuleHolder") @NoOffset @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) -public class BCEWithLogitsLossImplModuleHolder extends Pointer { - static { Loader.load(); } - /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ - public BCEWithLogitsLossImplModuleHolder(Pointer p) { super(p); } - - - /// - - /** Default constructs the contained module if if has a default constructor, - * else produces a static error. - * - * NOTE: This uses the behavior of template - * classes in C++ that constructors (or any methods) are only compiled when - * actually used. */ - - - /** Constructs the {@code ModuleHolder} with an empty contained value. Access to - * the underlying module is not permitted and will throw an exception, until - * a value is assigned. */ - /* implicit */ public BCEWithLogitsLossImplModuleHolder(@ByVal @Cast("std::nullptr_t*") PointerPointer arg0) { super((Pointer)null); allocate(arg0); } -private native void allocate(@ByVal @Cast("std::nullptr_t*") PointerPointer arg0); - - /** Constructs the {@code ModuleHolder} with a contained module, forwarding all - * arguments to its constructor. */ - - /** Constructs the {@code ModuleHolder} from a pointer to the contained type. - * Example: {@code Linear(std::make_shared(...))}. */ - /* implicit */ public BCEWithLogitsLossImplModuleHolder(@SharedPtr @Cast({"", "std::shared_ptr"}) BCEWithLogitsLossImpl module) { super((Pointer)null); allocate(module); } -private native void allocate(@SharedPtr @Cast({"", "std::shared_ptr"}) BCEWithLogitsLossImpl module); - - /** Returns true if the {@code ModuleHolder} contains a module, or false if it is - * {@code nullptr}. */ - public native @Cast("bool") @Name("operator bool") @NoException(true) boolean asBoolean(); - - /** Forwards to the contained module. */ - public native @Name("operator ->") BCEWithLogitsLossImpl access(); - - /** Forwards to the contained module. */ - - /** Returns a reference to the contained module. */ - public native @ByRef @Name("operator *") BCEWithLogitsLossImpl multiply(); - - /** Returns a const reference to the contained module. */ - - /** Returns a shared pointer to the underlying module. */ - public native @SharedPtr @Cast({"", "std::shared_ptr"}) BCEWithLogitsLossImpl ptr(); - - /** Returns a pointer to the underlying module. */ - public native BCEWithLogitsLossImpl get(); - - /** Returns a const pointer to the underlying module. */ - - /** Calls the {@code forward()} method of the contained module. */ - - /** Forwards to the subscript operator of the contained module. - * NOTE: std::forward is qualified to prevent VS2017 emitting - * error C2872: 'std': ambiguous symbol */ - - /** Returns true if the {@code ModuleHolder} does not contain a module. */ - public native @Cast("bool") @NoException(true) boolean is_empty(); -} diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/BCEWithLogitsLossOptions.java b/pytorch/src/gen/java/org/bytedeco/pytorch/BCEWithLogitsLossOptions.java index 0a57c955684..5054412f6ff 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/BCEWithLogitsLossOptions.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/BCEWithLogitsLossOptions.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; @@ -44,6 +46,6 @@ public class BCEWithLogitsLossOptions extends Pointer { } public native @ByRef @NoException(true) Tensor weight(); - public native @ByRef @NoException(true) loss_reduction_t reduction(); + public native @ByRef @NoException(true) LossReduction reduction(); public native @ByRef @NoException(true) Tensor pos_weight(); } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/BFloat16.java b/pytorch/src/gen/java/org/bytedeco/pytorch/BFloat16.java index 18477b76c9d..bfe0791baeb 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/BFloat16.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/BFloat16.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/BFloat16ArrayRef.java b/pytorch/src/gen/java/org/bytedeco/pytorch/BFloat16ArrayRef.java index 585e4081139..0407e457a82 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/BFloat16ArrayRef.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/BFloat16ArrayRef.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; @@ -39,8 +41,7 @@ public class BFloat16ArrayRef extends Pointer { /** Construct an ArrayRef from a single element. */ // TODO Make this explicit - public BFloat16ArrayRef(@Const @ByRef BFloat16 OneElt) { super((Pointer)null); allocate(OneElt); } - private native void allocate(@Const @ByRef BFloat16 OneElt); + /** Construct an ArrayRef from a pointer and length. */ public BFloat16ArrayRef(@Const BFloat16 data, @Cast("size_t") long length) { super((Pointer)null); allocate(data, length); } @@ -70,13 +71,13 @@ public class BFloat16ArrayRef extends Pointer { * \name Simple Operations * \{ */ - public native @ByVal @Cast("const c10::ArrayRef::t)>::iterator*") ShortPointer begin(); - public native @ByVal @Cast("const c10::ArrayRef::t)>::iterator*") ShortPointer end(); + public native @Const @ByPtr BFloat16 begin(); + public native @Const @ByPtr BFloat16 end(); // These are actually the same as iterator, since ArrayRef only // gives you const iterators. - public native @ByVal @Cast("const c10::ArrayRef::t)>::const_iterator*") ShortPointer cbegin(); - public native @ByVal @Cast("const c10::ArrayRef::t)>::const_iterator*") ShortPointer cend(); + public native @Const @ByPtr BFloat16 cbegin(); + public native @Const @ByPtr BFloat16 cend(); /** empty - Check if the array is empty. */ public native @Cast("const bool") boolean empty(); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/BatchNorm1d.java b/pytorch/src/gen/java/org/bytedeco/pytorch/BatchNorm1d.java deleted file mode 100644 index bb4af50eebc..00000000000 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/BatchNorm1d.java +++ /dev/null @@ -1,34 +0,0 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE - -package org.bytedeco.pytorch; - -import org.bytedeco.pytorch.Allocator; -import org.bytedeco.pytorch.Function; -import org.bytedeco.pytorch.Module; -import java.nio.*; -import org.bytedeco.javacpp.*; -import org.bytedeco.javacpp.annotation.*; - -import static org.bytedeco.javacpp.presets.javacpp.*; -import static org.bytedeco.openblas.global.openblas_nolapack.*; -import static org.bytedeco.openblas.global.openblas.*; - -import static org.bytedeco.pytorch.global.torch.*; - - -/** A {@code ModuleHolder} subclass for {@code BatchNorm1dImpl}. - * See the documentation for {@code BatchNorm1dImpl} class to learn what methods it - * provides, and examples of how to use {@code BatchNorm1d} with - * {@code torch::nn::BatchNorm1dOptions}. See the documentation for {@code ModuleHolder} to - * learn about PyTorch's module storage semantics. */ -@Namespace("torch::nn") @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) -public class BatchNorm1d extends BatchNorm1dImplModuleHolder { - static { Loader.load(); } - - public BatchNorm1d(@ByVal @Cast("std::nullptr_t*") PointerPointer arg0) { super((Pointer)null); allocate(arg0); } - private native void allocate(@ByVal @Cast("std::nullptr_t*") PointerPointer arg0); public BatchNorm1d(@SharedPtr @Cast({"", "std::shared_ptr"}) BatchNorm1dImpl module) { super((Pointer)null); allocate(module); } - private native void allocate(@SharedPtr @Cast({"", "std::shared_ptr"}) BatchNorm1dImpl module); - /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ - public BatchNorm1d(Pointer p) { super(p); } - - } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/BatchNorm1dImpl.java b/pytorch/src/gen/java/org/bytedeco/pytorch/BatchNorm1dImpl.java index cc1e4e834ef..0f3e1f86659 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/BatchNorm1dImpl.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/BatchNorm1dImpl.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/BatchNorm1dImplBase.java b/pytorch/src/gen/java/org/bytedeco/pytorch/BatchNorm1dImplBase.java index 8518addd163..29e94e51d8d 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/BatchNorm1dImplBase.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/BatchNorm1dImplBase.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/BatchNorm1dImplBaseBase.java b/pytorch/src/gen/java/org/bytedeco/pytorch/BatchNorm1dImplBaseBase.java index 5940ccde09d..98ce98583e1 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/BatchNorm1dImplBaseBase.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/BatchNorm1dImplBaseBase.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/BatchNorm1dImplCloneable.java b/pytorch/src/gen/java/org/bytedeco/pytorch/BatchNorm1dImplCloneable.java index 674b7ab8e3f..36dbb74cf43 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/BatchNorm1dImplCloneable.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/BatchNorm1dImplCloneable.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; @@ -20,18 +22,18 @@ public class BatchNorm1dImplCloneable extends Module { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public BatchNorm1dImplCloneable(Pointer p) { super(p); } + @Override public Module asModule() { return asModule(this); } + @Namespace public static native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast>") Module asModule(@SharedPtr BatchNorm1dImplCloneable pointer); /** {@code reset()} must perform initialization of all members with reference * semantics, most importantly parameters, buffers and submodules. */ public native void reset(); - @Override public Module asModule() { return asModule(this); } - @Namespace public static native @Name("static_cast") Module asModule(BatchNorm1dImplCloneable module); /** Performs a recursive "deep copy" of the {@code Module}, such that all parameters * and submodules in the cloned module are different from those in the * original module. */ - public native @SharedPtr Module clone( - @Const @ByRef(nullValue = "c10::optional(c10::nullopt)") DeviceOptional device); - public native @SharedPtr Module clone(); + public native @SharedPtr("torch::nn::Module") @ByVal Module clone( + @Const @ByRef(nullValue = "c10::optional(c10::nullopt)") DeviceOptional device); + public native @SharedPtr("torch::nn::Module") @ByVal Module clone(); } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/BatchNorm1dImplModuleHolder.java b/pytorch/src/gen/java/org/bytedeco/pytorch/BatchNorm1dImplModuleHolder.java deleted file mode 100644 index ac5fdeb0994..00000000000 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/BatchNorm1dImplModuleHolder.java +++ /dev/null @@ -1,79 +0,0 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE - -package org.bytedeco.pytorch; - -import org.bytedeco.pytorch.Allocator; -import org.bytedeco.pytorch.Function; -import org.bytedeco.pytorch.Module; -import java.nio.*; -import org.bytedeco.javacpp.*; -import org.bytedeco.javacpp.annotation.*; - -import static org.bytedeco.javacpp.presets.javacpp.*; -import static org.bytedeco.openblas.global.openblas_nolapack.*; -import static org.bytedeco.openblas.global.openblas.*; - -import static org.bytedeco.pytorch.global.torch.*; - -@Name("torch::nn::ModuleHolder") @NoOffset @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) -public class BatchNorm1dImplModuleHolder extends Pointer { - static { Loader.load(); } - /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ - public BatchNorm1dImplModuleHolder(Pointer p) { super(p); } - - - /// - - /** Default constructs the contained module if if has a default constructor, - * else produces a static error. - * - * NOTE: This uses the behavior of template - * classes in C++ that constructors (or any methods) are only compiled when - * actually used. */ - - - /** Constructs the {@code ModuleHolder} with an empty contained value. Access to - * the underlying module is not permitted and will throw an exception, until - * a value is assigned. */ - /* implicit */ public BatchNorm1dImplModuleHolder(@ByVal @Cast("std::nullptr_t*") PointerPointer arg0) { super((Pointer)null); allocate(arg0); } -private native void allocate(@ByVal @Cast("std::nullptr_t*") PointerPointer arg0); - - /** Constructs the {@code ModuleHolder} with a contained module, forwarding all - * arguments to its constructor. */ - - /** Constructs the {@code ModuleHolder} from a pointer to the contained type. - * Example: {@code Linear(std::make_shared(...))}. */ - /* implicit */ public BatchNorm1dImplModuleHolder(@SharedPtr @Cast({"", "std::shared_ptr"}) BatchNorm1dImpl module) { super((Pointer)null); allocate(module); } -private native void allocate(@SharedPtr @Cast({"", "std::shared_ptr"}) BatchNorm1dImpl module); - - /** Returns true if the {@code ModuleHolder} contains a module, or false if it is - * {@code nullptr}. */ - public native @Cast("bool") @Name("operator bool") @NoException(true) boolean asBoolean(); - - /** Forwards to the contained module. */ - public native @Name("operator ->") BatchNorm1dImpl access(); - - /** Forwards to the contained module. */ - - /** Returns a reference to the contained module. */ - public native @ByRef @Name("operator *") BatchNorm1dImpl multiply(); - - /** Returns a const reference to the contained module. */ - - /** Returns a shared pointer to the underlying module. */ - public native @SharedPtr @Cast({"", "std::shared_ptr"}) BatchNorm1dImpl ptr(); - - /** Returns a pointer to the underlying module. */ - public native BatchNorm1dImpl get(); - - /** Returns a const pointer to the underlying module. */ - - /** Calls the {@code forward()} method of the contained module. */ - - /** Forwards to the subscript operator of the contained module. - * NOTE: std::forward is qualified to prevent VS2017 emitting - * error C2872: 'std': ambiguous symbol */ - - /** Returns true if the {@code ModuleHolder} does not contain a module. */ - public native @Cast("bool") @NoException(true) boolean is_empty(); -} diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/BatchNorm2d.java b/pytorch/src/gen/java/org/bytedeco/pytorch/BatchNorm2d.java deleted file mode 100644 index 30e7b0914d6..00000000000 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/BatchNorm2d.java +++ /dev/null @@ -1,34 +0,0 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE - -package org.bytedeco.pytorch; - -import org.bytedeco.pytorch.Allocator; -import org.bytedeco.pytorch.Function; -import org.bytedeco.pytorch.Module; -import java.nio.*; -import org.bytedeco.javacpp.*; -import org.bytedeco.javacpp.annotation.*; - -import static org.bytedeco.javacpp.presets.javacpp.*; -import static org.bytedeco.openblas.global.openblas_nolapack.*; -import static org.bytedeco.openblas.global.openblas.*; - -import static org.bytedeco.pytorch.global.torch.*; - - -/** A {@code ModuleHolder} subclass for {@code BatchNorm2dImpl}. - * See the documentation for {@code BatchNorm2dImpl} class to learn what methods it - * provides, and examples of how to use {@code BatchNorm2d} with - * {@code torch::nn::BatchNorm2dOptions}. See the documentation for {@code ModuleHolder} to - * learn about PyTorch's module storage semantics. */ -@Namespace("torch::nn") @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) -public class BatchNorm2d extends BatchNorm2dImplModuleHolder { - static { Loader.load(); } - - public BatchNorm2d(@ByVal @Cast("std::nullptr_t*") PointerPointer arg0) { super((Pointer)null); allocate(arg0); } - private native void allocate(@ByVal @Cast("std::nullptr_t*") PointerPointer arg0); public BatchNorm2d(@SharedPtr @Cast({"", "std::shared_ptr"}) BatchNorm2dImpl module) { super((Pointer)null); allocate(module); } - private native void allocate(@SharedPtr @Cast({"", "std::shared_ptr"}) BatchNorm2dImpl module); - /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ - public BatchNorm2d(Pointer p) { super(p); } - - } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/BatchNorm2dImpl.java b/pytorch/src/gen/java/org/bytedeco/pytorch/BatchNorm2dImpl.java index 93b4b05018f..ba82168585f 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/BatchNorm2dImpl.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/BatchNorm2dImpl.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/BatchNorm2dImplBase.java b/pytorch/src/gen/java/org/bytedeco/pytorch/BatchNorm2dImplBase.java index 4cabded90f8..c6db5f8cb50 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/BatchNorm2dImplBase.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/BatchNorm2dImplBase.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/BatchNorm2dImplBaseBase.java b/pytorch/src/gen/java/org/bytedeco/pytorch/BatchNorm2dImplBaseBase.java index d0c0c5af515..29a90b8a370 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/BatchNorm2dImplBaseBase.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/BatchNorm2dImplBaseBase.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/BatchNorm2dImplCloneable.java b/pytorch/src/gen/java/org/bytedeco/pytorch/BatchNorm2dImplCloneable.java index 33f1c336cc2..fdc95618d6a 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/BatchNorm2dImplCloneable.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/BatchNorm2dImplCloneable.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; @@ -20,18 +22,18 @@ public class BatchNorm2dImplCloneable extends Module { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public BatchNorm2dImplCloneable(Pointer p) { super(p); } + @Override public Module asModule() { return asModule(this); } + @Namespace public static native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast>") Module asModule(@SharedPtr BatchNorm2dImplCloneable pointer); /** {@code reset()} must perform initialization of all members with reference * semantics, most importantly parameters, buffers and submodules. */ public native void reset(); - @Override public Module asModule() { return asModule(this); } - @Namespace public static native @Name("static_cast") Module asModule(BatchNorm2dImplCloneable module); /** Performs a recursive "deep copy" of the {@code Module}, such that all parameters * and submodules in the cloned module are different from those in the * original module. */ - public native @SharedPtr Module clone( - @Const @ByRef(nullValue = "c10::optional(c10::nullopt)") DeviceOptional device); - public native @SharedPtr Module clone(); + public native @SharedPtr("torch::nn::Module") @ByVal Module clone( + @Const @ByRef(nullValue = "c10::optional(c10::nullopt)") DeviceOptional device); + public native @SharedPtr("torch::nn::Module") @ByVal Module clone(); } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/BatchNorm2dImplModuleHolder.java b/pytorch/src/gen/java/org/bytedeco/pytorch/BatchNorm2dImplModuleHolder.java deleted file mode 100644 index 2ce49895864..00000000000 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/BatchNorm2dImplModuleHolder.java +++ /dev/null @@ -1,79 +0,0 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE - -package org.bytedeco.pytorch; - -import org.bytedeco.pytorch.Allocator; -import org.bytedeco.pytorch.Function; -import org.bytedeco.pytorch.Module; -import java.nio.*; -import org.bytedeco.javacpp.*; -import org.bytedeco.javacpp.annotation.*; - -import static org.bytedeco.javacpp.presets.javacpp.*; -import static org.bytedeco.openblas.global.openblas_nolapack.*; -import static org.bytedeco.openblas.global.openblas.*; - -import static org.bytedeco.pytorch.global.torch.*; - -@Name("torch::nn::ModuleHolder") @NoOffset @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) -public class BatchNorm2dImplModuleHolder extends Pointer { - static { Loader.load(); } - /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ - public BatchNorm2dImplModuleHolder(Pointer p) { super(p); } - - - /// - - /** Default constructs the contained module if if has a default constructor, - * else produces a static error. - * - * NOTE: This uses the behavior of template - * classes in C++ that constructors (or any methods) are only compiled when - * actually used. */ - - - /** Constructs the {@code ModuleHolder} with an empty contained value. Access to - * the underlying module is not permitted and will throw an exception, until - * a value is assigned. */ - /* implicit */ public BatchNorm2dImplModuleHolder(@ByVal @Cast("std::nullptr_t*") PointerPointer arg0) { super((Pointer)null); allocate(arg0); } -private native void allocate(@ByVal @Cast("std::nullptr_t*") PointerPointer arg0); - - /** Constructs the {@code ModuleHolder} with a contained module, forwarding all - * arguments to its constructor. */ - - /** Constructs the {@code ModuleHolder} from a pointer to the contained type. - * Example: {@code Linear(std::make_shared(...))}. */ - /* implicit */ public BatchNorm2dImplModuleHolder(@SharedPtr @Cast({"", "std::shared_ptr"}) BatchNorm2dImpl module) { super((Pointer)null); allocate(module); } -private native void allocate(@SharedPtr @Cast({"", "std::shared_ptr"}) BatchNorm2dImpl module); - - /** Returns true if the {@code ModuleHolder} contains a module, or false if it is - * {@code nullptr}. */ - public native @Cast("bool") @Name("operator bool") @NoException(true) boolean asBoolean(); - - /** Forwards to the contained module. */ - public native @Name("operator ->") BatchNorm2dImpl access(); - - /** Forwards to the contained module. */ - - /** Returns a reference to the contained module. */ - public native @ByRef @Name("operator *") BatchNorm2dImpl multiply(); - - /** Returns a const reference to the contained module. */ - - /** Returns a shared pointer to the underlying module. */ - public native @SharedPtr @Cast({"", "std::shared_ptr"}) BatchNorm2dImpl ptr(); - - /** Returns a pointer to the underlying module. */ - public native BatchNorm2dImpl get(); - - /** Returns a const pointer to the underlying module. */ - - /** Calls the {@code forward()} method of the contained module. */ - - /** Forwards to the subscript operator of the contained module. - * NOTE: std::forward is qualified to prevent VS2017 emitting - * error C2872: 'std': ambiguous symbol */ - - /** Returns true if the {@code ModuleHolder} does not contain a module. */ - public native @Cast("bool") @NoException(true) boolean is_empty(); -} diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/BatchNorm3d.java b/pytorch/src/gen/java/org/bytedeco/pytorch/BatchNorm3d.java deleted file mode 100644 index 388890f9e89..00000000000 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/BatchNorm3d.java +++ /dev/null @@ -1,34 +0,0 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE - -package org.bytedeco.pytorch; - -import org.bytedeco.pytorch.Allocator; -import org.bytedeco.pytorch.Function; -import org.bytedeco.pytorch.Module; -import java.nio.*; -import org.bytedeco.javacpp.*; -import org.bytedeco.javacpp.annotation.*; - -import static org.bytedeco.javacpp.presets.javacpp.*; -import static org.bytedeco.openblas.global.openblas_nolapack.*; -import static org.bytedeco.openblas.global.openblas.*; - -import static org.bytedeco.pytorch.global.torch.*; - - -/** A {@code ModuleHolder} subclass for {@code BatchNorm3dImpl}. - * See the documentation for {@code BatchNorm3dImpl} class to learn what methods it - * provides, and examples of how to use {@code BatchNorm3d} with - * {@code torch::nn::BatchNorm3dOptions}. See the documentation for {@code ModuleHolder} to - * learn about PyTorch's module storage semantics. */ -@Namespace("torch::nn") @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) -public class BatchNorm3d extends BatchNorm3dImplModuleHolder { - static { Loader.load(); } - - public BatchNorm3d(@ByVal @Cast("std::nullptr_t*") PointerPointer arg0) { super((Pointer)null); allocate(arg0); } - private native void allocate(@ByVal @Cast("std::nullptr_t*") PointerPointer arg0); public BatchNorm3d(@SharedPtr @Cast({"", "std::shared_ptr"}) BatchNorm3dImpl module) { super((Pointer)null); allocate(module); } - private native void allocate(@SharedPtr @Cast({"", "std::shared_ptr"}) BatchNorm3dImpl module); - /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ - public BatchNorm3d(Pointer p) { super(p); } - - } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/BatchNorm3dImpl.java b/pytorch/src/gen/java/org/bytedeco/pytorch/BatchNorm3dImpl.java index 3eb3430ac70..6a41698852a 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/BatchNorm3dImpl.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/BatchNorm3dImpl.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/BatchNorm3dImplBase.java b/pytorch/src/gen/java/org/bytedeco/pytorch/BatchNorm3dImplBase.java index 751346e000a..723f6ad05b9 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/BatchNorm3dImplBase.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/BatchNorm3dImplBase.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/BatchNorm3dImplBaseBase.java b/pytorch/src/gen/java/org/bytedeco/pytorch/BatchNorm3dImplBaseBase.java index 540fc322cb1..a3e840c0a78 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/BatchNorm3dImplBaseBase.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/BatchNorm3dImplBaseBase.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/BatchNorm3dImplCloneable.java b/pytorch/src/gen/java/org/bytedeco/pytorch/BatchNorm3dImplCloneable.java index 054c3800751..8179041964b 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/BatchNorm3dImplCloneable.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/BatchNorm3dImplCloneable.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; @@ -20,18 +22,18 @@ public class BatchNorm3dImplCloneable extends Module { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public BatchNorm3dImplCloneable(Pointer p) { super(p); } + @Override public Module asModule() { return asModule(this); } + @Namespace public static native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast>") Module asModule(@SharedPtr BatchNorm3dImplCloneable pointer); /** {@code reset()} must perform initialization of all members with reference * semantics, most importantly parameters, buffers and submodules. */ public native void reset(); - @Override public Module asModule() { return asModule(this); } - @Namespace public static native @Name("static_cast") Module asModule(BatchNorm3dImplCloneable module); /** Performs a recursive "deep copy" of the {@code Module}, such that all parameters * and submodules in the cloned module are different from those in the * original module. */ - public native @SharedPtr Module clone( - @Const @ByRef(nullValue = "c10::optional(c10::nullopt)") DeviceOptional device); - public native @SharedPtr Module clone(); + public native @SharedPtr("torch::nn::Module") @ByVal Module clone( + @Const @ByRef(nullValue = "c10::optional(c10::nullopt)") DeviceOptional device); + public native @SharedPtr("torch::nn::Module") @ByVal Module clone(); } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/BatchNorm3dImplModuleHolder.java b/pytorch/src/gen/java/org/bytedeco/pytorch/BatchNorm3dImplModuleHolder.java deleted file mode 100644 index 47a9b10b8f0..00000000000 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/BatchNorm3dImplModuleHolder.java +++ /dev/null @@ -1,79 +0,0 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE - -package org.bytedeco.pytorch; - -import org.bytedeco.pytorch.Allocator; -import org.bytedeco.pytorch.Function; -import org.bytedeco.pytorch.Module; -import java.nio.*; -import org.bytedeco.javacpp.*; -import org.bytedeco.javacpp.annotation.*; - -import static org.bytedeco.javacpp.presets.javacpp.*; -import static org.bytedeco.openblas.global.openblas_nolapack.*; -import static org.bytedeco.openblas.global.openblas.*; - -import static org.bytedeco.pytorch.global.torch.*; - -@Name("torch::nn::ModuleHolder") @NoOffset @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) -public class BatchNorm3dImplModuleHolder extends Pointer { - static { Loader.load(); } - /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ - public BatchNorm3dImplModuleHolder(Pointer p) { super(p); } - - - /// - - /** Default constructs the contained module if if has a default constructor, - * else produces a static error. - * - * NOTE: This uses the behavior of template - * classes in C++ that constructors (or any methods) are only compiled when - * actually used. */ - - - /** Constructs the {@code ModuleHolder} with an empty contained value. Access to - * the underlying module is not permitted and will throw an exception, until - * a value is assigned. */ - /* implicit */ public BatchNorm3dImplModuleHolder(@ByVal @Cast("std::nullptr_t*") PointerPointer arg0) { super((Pointer)null); allocate(arg0); } -private native void allocate(@ByVal @Cast("std::nullptr_t*") PointerPointer arg0); - - /** Constructs the {@code ModuleHolder} with a contained module, forwarding all - * arguments to its constructor. */ - - /** Constructs the {@code ModuleHolder} from a pointer to the contained type. - * Example: {@code Linear(std::make_shared(...))}. */ - /* implicit */ public BatchNorm3dImplModuleHolder(@SharedPtr @Cast({"", "std::shared_ptr"}) BatchNorm3dImpl module) { super((Pointer)null); allocate(module); } -private native void allocate(@SharedPtr @Cast({"", "std::shared_ptr"}) BatchNorm3dImpl module); - - /** Returns true if the {@code ModuleHolder} contains a module, or false if it is - * {@code nullptr}. */ - public native @Cast("bool") @Name("operator bool") @NoException(true) boolean asBoolean(); - - /** Forwards to the contained module. */ - public native @Name("operator ->") BatchNorm3dImpl access(); - - /** Forwards to the contained module. */ - - /** Returns a reference to the contained module. */ - public native @ByRef @Name("operator *") BatchNorm3dImpl multiply(); - - /** Returns a const reference to the contained module. */ - - /** Returns a shared pointer to the underlying module. */ - public native @SharedPtr @Cast({"", "std::shared_ptr"}) BatchNorm3dImpl ptr(); - - /** Returns a pointer to the underlying module. */ - public native BatchNorm3dImpl get(); - - /** Returns a const pointer to the underlying module. */ - - /** Calls the {@code forward()} method of the contained module. */ - - /** Forwards to the subscript operator of the contained module. - * NOTE: std::forward is qualified to prevent VS2017 emitting - * error C2872: 'std': ambiguous symbol */ - - /** Returns true if the {@code ModuleHolder} does not contain a module. */ - public native @Cast("bool") @NoException(true) boolean is_empty(); -} diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/BatchNormFuncOptions.java b/pytorch/src/gen/java/org/bytedeco/pytorch/BatchNormFuncOptions.java index 38963ad9776..869ec8924f5 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/BatchNormFuncOptions.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/BatchNormFuncOptions.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/BatchNormOptions.java b/pytorch/src/gen/java/org/bytedeco/pytorch/BatchNormOptions.java index be4a662a15a..9bd2a38763e 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/BatchNormOptions.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/BatchNormOptions.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/BatchSize.java b/pytorch/src/gen/java/org/bytedeco/pytorch/BatchSize.java index 0da384875df..9806e2ca148 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/BatchSize.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/BatchSize.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/BatchSizeOptional.java b/pytorch/src/gen/java/org/bytedeco/pytorch/BatchSizeOptional.java index 76672be3f49..fc5ed3b7a0c 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/BatchSizeOptional.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/BatchSizeOptional.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; @@ -26,6 +28,7 @@ public class BatchSizeOptional extends Pointer { public native @Name("operator =") @ByRef BatchSizeOptional put(@ByRef BatchSizeOptional x); public native boolean has_value(); + public native void reset(); public native @Name("value") @ByRef BatchSize get(); @ValueSetter public native BatchSizeOptional put(@ByRef BatchSize value); } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/BatchSizeSampler.java b/pytorch/src/gen/java/org/bytedeco/pytorch/BatchSizeSampler.java index 8f9be04cd74..678d4a71ce0 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/BatchSizeSampler.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/BatchSizeSampler.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/Bilinear.java b/pytorch/src/gen/java/org/bytedeco/pytorch/Bilinear.java deleted file mode 100644 index fae3b83dd4f..00000000000 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/Bilinear.java +++ /dev/null @@ -1,34 +0,0 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE - -package org.bytedeco.pytorch; - -import org.bytedeco.pytorch.Allocator; -import org.bytedeco.pytorch.Function; -import org.bytedeco.pytorch.Module; -import java.nio.*; -import org.bytedeco.javacpp.*; -import org.bytedeco.javacpp.annotation.*; - -import static org.bytedeco.javacpp.presets.javacpp.*; -import static org.bytedeco.openblas.global.openblas_nolapack.*; -import static org.bytedeco.openblas.global.openblas.*; - -import static org.bytedeco.pytorch.global.torch.*; - - -/** A {@code ModuleHolder} subclass for {@code BilinearImpl}. - * See the documentation for {@code BilinearImpl} class to learn what methods it - * provides, and examples of how to use {@code Bilinear} with - * {@code torch::nn::BilinearOptions}. See the documentation for {@code ModuleHolder} to - * learn about PyTorch's module storage semantics. */ -@Namespace("torch::nn") @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) -public class Bilinear extends BilinearImplModuleHolder { - static { Loader.load(); } - - public Bilinear(@ByVal @Cast("std::nullptr_t*") PointerPointer arg0) { super((Pointer)null); allocate(arg0); } - private native void allocate(@ByVal @Cast("std::nullptr_t*") PointerPointer arg0); public Bilinear(@SharedPtr @Cast({"", "std::shared_ptr"}) BilinearImpl module) { super((Pointer)null); allocate(module); } - private native void allocate(@SharedPtr @Cast({"", "std::shared_ptr"}) BilinearImpl module); - /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ - public Bilinear(Pointer p) { super(p); } - - } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/BilinearImpl.java b/pytorch/src/gen/java/org/bytedeco/pytorch/BilinearImpl.java index 8c9a2c63351..29d7981280f 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/BilinearImpl.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/BilinearImpl.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; @@ -36,9 +38,9 @@ public class BilinearImpl extends BilinearImplCloneable { public BilinearImpl(Pointer p) { super(p); } public BilinearImpl(@Cast("int64_t") long in1_features, @Cast("int64_t") long in2_features, @Cast("int64_t") long out_features) { super((Pointer)null); allocate(in1_features, in2_features, out_features); } - @NoDeallocator private native void allocate(@Cast("int64_t") long in1_features, @Cast("int64_t") long in2_features, @Cast("int64_t") long out_features); + @SharedPtr private native void allocate(@Cast("int64_t") long in1_features, @Cast("int64_t") long in2_features, @Cast("int64_t") long out_features); public BilinearImpl(@Const @ByRef BilinearOptions options_) { super((Pointer)null); allocate(options_); } - @NoDeallocator private native void allocate(@Const @ByRef BilinearOptions options_); + @SharedPtr private native void allocate(@Const @ByRef BilinearOptions options_); public native void reset(); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/BilinearImplCloneable.java b/pytorch/src/gen/java/org/bytedeco/pytorch/BilinearImplCloneable.java index ec8435fac04..7f7e0c35d7f 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/BilinearImplCloneable.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/BilinearImplCloneable.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; @@ -20,18 +22,18 @@ public class BilinearImplCloneable extends Module { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public BilinearImplCloneable(Pointer p) { super(p); } + @Override public Module asModule() { return asModule(this); } + @Namespace public static native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast>") Module asModule(@SharedPtr BilinearImplCloneable pointer); /** {@code reset()} must perform initialization of all members with reference * semantics, most importantly parameters, buffers and submodules. */ public native void reset(); - @Override public Module asModule() { return asModule(this); } - @Namespace public static native @Name("static_cast") Module asModule(BilinearImplCloneable module); /** Performs a recursive "deep copy" of the {@code Module}, such that all parameters * and submodules in the cloned module are different from those in the * original module. */ - public native @SharedPtr Module clone( - @Const @ByRef(nullValue = "c10::optional(c10::nullopt)") DeviceOptional device); - public native @SharedPtr Module clone(); + public native @SharedPtr("torch::nn::Module") @ByVal Module clone( + @Const @ByRef(nullValue = "c10::optional(c10::nullopt)") DeviceOptional device); + public native @SharedPtr("torch::nn::Module") @ByVal Module clone(); } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/BilinearImplModuleHolder.java b/pytorch/src/gen/java/org/bytedeco/pytorch/BilinearImplModuleHolder.java deleted file mode 100644 index f834f848393..00000000000 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/BilinearImplModuleHolder.java +++ /dev/null @@ -1,79 +0,0 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE - -package org.bytedeco.pytorch; - -import org.bytedeco.pytorch.Allocator; -import org.bytedeco.pytorch.Function; -import org.bytedeco.pytorch.Module; -import java.nio.*; -import org.bytedeco.javacpp.*; -import org.bytedeco.javacpp.annotation.*; - -import static org.bytedeco.javacpp.presets.javacpp.*; -import static org.bytedeco.openblas.global.openblas_nolapack.*; -import static org.bytedeco.openblas.global.openblas.*; - -import static org.bytedeco.pytorch.global.torch.*; - -@Name("torch::nn::ModuleHolder") @NoOffset @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) -public class BilinearImplModuleHolder extends Pointer { - static { Loader.load(); } - /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ - public BilinearImplModuleHolder(Pointer p) { super(p); } - - - /// - - /** Default constructs the contained module if if has a default constructor, - * else produces a static error. - * - * NOTE: This uses the behavior of template - * classes in C++ that constructors (or any methods) are only compiled when - * actually used. */ - - - /** Constructs the {@code ModuleHolder} with an empty contained value. Access to - * the underlying module is not permitted and will throw an exception, until - * a value is assigned. */ - /* implicit */ public BilinearImplModuleHolder(@ByVal @Cast("std::nullptr_t*") PointerPointer arg0) { super((Pointer)null); allocate(arg0); } -private native void allocate(@ByVal @Cast("std::nullptr_t*") PointerPointer arg0); - - /** Constructs the {@code ModuleHolder} with a contained module, forwarding all - * arguments to its constructor. */ - - /** Constructs the {@code ModuleHolder} from a pointer to the contained type. - * Example: {@code Linear(std::make_shared(...))}. */ - /* implicit */ public BilinearImplModuleHolder(@SharedPtr @Cast({"", "std::shared_ptr"}) BilinearImpl module) { super((Pointer)null); allocate(module); } -private native void allocate(@SharedPtr @Cast({"", "std::shared_ptr"}) BilinearImpl module); - - /** Returns true if the {@code ModuleHolder} contains a module, or false if it is - * {@code nullptr}. */ - public native @Cast("bool") @Name("operator bool") @NoException(true) boolean asBoolean(); - - /** Forwards to the contained module. */ - public native @Name("operator ->") BilinearImpl access(); - - /** Forwards to the contained module. */ - - /** Returns a reference to the contained module. */ - public native @ByRef @Name("operator *") BilinearImpl multiply(); - - /** Returns a const reference to the contained module. */ - - /** Returns a shared pointer to the underlying module. */ - public native @SharedPtr @Cast({"", "std::shared_ptr"}) BilinearImpl ptr(); - - /** Returns a pointer to the underlying module. */ - public native BilinearImpl get(); - - /** Returns a const pointer to the underlying module. */ - - /** Calls the {@code forward()} method of the contained module. */ - - /** Forwards to the subscript operator of the contained module. - * NOTE: std::forward is qualified to prevent VS2017 emitting - * error C2872: 'std': ambiguous symbol */ - - /** Returns true if the {@code ModuleHolder} does not contain a module. */ - public native @Cast("bool") @NoException(true) boolean is_empty(); -} diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/BilinearOptions.java b/pytorch/src/gen/java/org/bytedeco/pytorch/BilinearOptions.java index 28bf34b00f4..27079166355 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/BilinearOptions.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/BilinearOptions.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/BinOp.java b/pytorch/src/gen/java/org/bytedeco/pytorch/BinOp.java index a1e95280bb9..029638f1455 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/BinOp.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/BinOp.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; @@ -23,9 +25,11 @@ @Namespace("torch::jit") @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) public class BinOp extends Expr { static { Loader.load(); } + /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ + public BinOp(Pointer p) { super(p); } - public BinOp(@Cast("const torch::jit::TreeRef*") @ByRef Pointer tree) { super((Pointer)null); allocate(tree); } - private native void allocate(@Cast("const torch::jit::TreeRef*") @ByRef Pointer tree); + public BinOp(@Const @ByRef TreeRef tree) { super((Pointer)null); allocate(tree); } + private native void allocate(@Const @ByRef TreeRef tree); public native @ByVal Expr lhs(); public native @ByVal Expr rhs(); public static native @ByVal BinOp create( diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/Blob.java b/pytorch/src/gen/java/org/bytedeco/pytorch/Blob.java index 6885bb76b9a..c835d4f6046 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/Blob.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/Blob.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/Block.java b/pytorch/src/gen/java/org/bytedeco/pytorch/Block.java index 90ca54a9b9c..b38e25173ea 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/Block.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/Block.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/BlockArrayRef.java b/pytorch/src/gen/java/org/bytedeco/pytorch/BlockArrayRef.java index 4751c98b838..8582dede07d 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/BlockArrayRef.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/BlockArrayRef.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; @@ -20,19 +22,38 @@ public class BlockArrayRef extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public BlockArrayRef(Pointer p) { super(p); } + /** Native array allocator. Access with {@link Pointer#position(long)}. */ + public BlockArrayRef(long size) { super((Pointer)null); allocateArray(size); } + private native void allocateArray(long size); + @Override public BlockArrayRef position(long position) { + return (BlockArrayRef)super.position(position); + } + @Override public BlockArrayRef getPointer(long i) { + return new BlockArrayRef((Pointer)this).offsetAddress(i); + } /** \name Constructors * \{

* Construct an empty ArrayRef. */ - /* implicit */ + /* implicit */ public BlockArrayRef() { super((Pointer)null); allocate(); } +private native void allocate(); /** Construct an ArrayRef from a single element. */ // TODO Make this explicit + /** Construct an ArrayRef from a pointer and length. */ + public BlockArrayRef(@Cast("torch::jit::Block**") PointerPointer data, @Cast("size_t") long length) { super((Pointer)null); allocate(data, length); } + private native void allocate(@Cast("torch::jit::Block**") PointerPointer data, @Cast("size_t") long length); + public BlockArrayRef(@ByPtrPtr Block data, @Cast("size_t") long length) { super((Pointer)null); allocate(data, length); } + private native void allocate(@ByPtrPtr Block data, @Cast("size_t") long length); /** Construct an ArrayRef from a range. */ + public BlockArrayRef(@Cast("torch::jit::Block**") PointerPointer begin, @Cast("torch::jit::Block**") PointerPointer end) { super((Pointer)null); allocate(begin, end); } + private native void allocate(@Cast("torch::jit::Block**") PointerPointer begin, @Cast("torch::jit::Block**") PointerPointer end); + public BlockArrayRef(@ByPtrPtr Block begin, @ByPtrPtr Block end) { super((Pointer)null); allocate(begin, end); } + private native void allocate(@ByPtrPtr Block begin, @ByPtrPtr Block end); /** Construct an ArrayRef from a SmallVector. This is templated in order to * avoid instantiating SmallVectorTemplateCommon whenever we @@ -54,18 +75,18 @@ public class BlockArrayRef extends Pointer { * \name Simple Operations * \{ */ - public native @ByVal @Cast("const c10::ArrayRef::iterator*") Block begin(); - public native @ByVal @Cast("const c10::ArrayRef::iterator*") Block end(); + public native @Const PointerPointer begin(); + public native @Const PointerPointer end(); // These are actually the same as iterator, since ArrayRef only // gives you const iterators. - public native @ByVal @Cast("const c10::ArrayRef::const_iterator*") Block cbegin(); - public native @ByVal @Cast("const c10::ArrayRef::const_iterator*") Block cend(); + public native @Const PointerPointer cbegin(); + public native @Const PointerPointer cend(); /** empty - Check if the array is empty. */ public native @Cast("const bool") boolean empty(); - public native @Cast("const torch::jit::Block**") PointerPointer data(); + public native @Cast("torch::jit::Block**") PointerPointer data(); /** size - Get the array size. */ public native @Cast("const size_t") long size(); @@ -110,7 +131,7 @@ public class BlockArrayRef extends Pointer { /** \} * \name Expensive Operations * \{ */ - public native @ByVal BlockVector vec(); + public native @Cast("torch::jit::Block**") @StdVector PointerPointer vec(); /** \} */ } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/BlockVector.java b/pytorch/src/gen/java/org/bytedeco/pytorch/BlockVector.java deleted file mode 100644 index 4557ce46c65..00000000000 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/BlockVector.java +++ /dev/null @@ -1,86 +0,0 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE - -package org.bytedeco.pytorch; - -import org.bytedeco.pytorch.Allocator; -import org.bytedeco.pytorch.Function; -import org.bytedeco.pytorch.Module; -import java.nio.*; -import org.bytedeco.javacpp.*; -import org.bytedeco.javacpp.annotation.*; - -import static org.bytedeco.javacpp.presets.javacpp.*; -import static org.bytedeco.openblas.global.openblas_nolapack.*; -import static org.bytedeco.openblas.global.openblas.*; - -import static org.bytedeco.pytorch.global.torch.*; - -@Name("std::vector") @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) -public class BlockVector extends Pointer { - static { Loader.load(); } - /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ - public BlockVector(Pointer p) { super(p); } - public BlockVector(Block value) { this(1); put(0, value); } - public BlockVector(Block ... array) { this(array.length); put(array); } - public BlockVector() { allocate(); } - public BlockVector(long n) { allocate(n); } - private native void allocate(); - private native void allocate(@Cast("size_t") long n); - public native @Name("operator =") @ByRef BlockVector put(@ByRef BlockVector x); - - public boolean empty() { return size() == 0; } - public native long size(); - public void clear() { resize(0); } - public native void resize(@Cast("size_t") long n); - - @Index(function = "at") public native Block get(@Cast("size_t") long i); - public native BlockVector put(@Cast("size_t") long i, Block value); - - public native @ByVal Iterator insert(@ByVal Iterator pos, Block value); - public native @ByVal Iterator erase(@ByVal Iterator pos); - public native @ByVal Iterator begin(); - public native @ByVal Iterator end(); - @NoOffset @Name("iterator") public static class Iterator extends Pointer { - public Iterator(Pointer p) { super(p); } - public Iterator() { } - - public native @Name("operator ++") @ByRef Iterator increment(); - public native @Name("operator ==") boolean equals(@ByRef Iterator it); - public native @Name("operator *") @Const Block get(); - } - - public Block[] get() { - Block[] array = new Block[size() < Integer.MAX_VALUE ? (int)size() : Integer.MAX_VALUE]; - for (int i = 0; i < array.length; i++) { - array[i] = get(i); - } - return array; - } - @Override public String toString() { - return java.util.Arrays.toString(get()); - } - - public Block pop_back() { - long size = size(); - Block value = get(size - 1); - resize(size - 1); - return value; - } - public BlockVector push_back(Block value) { - long size = size(); - resize(size + 1); - return put(size, value); - } - public BlockVector put(Block value) { - if (size() != 1) { resize(1); } - return put(0, value); - } - public BlockVector put(Block ... array) { - if (size() != array.length) { resize(array.length); } - for (int i = 0; i < array.length; i++) { - put(i, array[i]); - } - return this; - } -} - diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/BlockWrap.java b/pytorch/src/gen/java/org/bytedeco/pytorch/BlockWrap.java index 115b7eff53a..1320b1b5311 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/BlockWrap.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/BlockWrap.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/Bool2Vector.java b/pytorch/src/gen/java/org/bytedeco/pytorch/Bool2Vector.java index 639378f65e2..1cd991dd051 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/Bool2Vector.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/Bool2Vector.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; @@ -33,6 +35,8 @@ public class Bool2Vector extends Pointer { public void clear() { resize(0); } public native void resize(@Cast("size_t") long n); + public BoolPointer front() { return get(0); } + public BoolPointer back() { return get(size() - 1); } @Index(function = "at") public native @Cast("std::array*") @ByRef BoolPointer get(@Cast("size_t") long i); public native Bool2Vector put(@Cast("size_t") long i, BoolPointer value); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/BoolArrayRef.java b/pytorch/src/gen/java/org/bytedeco/pytorch/BoolArrayRef.java index 2942eb6dcdb..32268b0e1cb 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/BoolArrayRef.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/BoolArrayRef.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; @@ -15,7 +17,7 @@ import static org.bytedeco.pytorch.global.torch.*; -@Name("c10::ArrayRef::t)>") @NoOffset @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) +@Name("c10::ArrayRef") @NoOffset @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) public class BoolArrayRef extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ @@ -39,16 +41,19 @@ public class BoolArrayRef extends Pointer { /** Construct an ArrayRef from a single element. */ // TODO Make this explicit - public BoolArrayRef(@Cast("const decltype(::c10::impl::ScalarTypeToCPPType<::c10::ScalarType::Bool>::t)") boolean OneElt) { super((Pointer)null); allocate(OneElt); } - private native void allocate(@Cast("const decltype(::c10::impl::ScalarTypeToCPPType<::c10::ScalarType::Bool>::t)") boolean OneElt); + /** Construct an ArrayRef from a pointer and length. */ - public BoolArrayRef(@Cast("const decltype(::c10::impl::ScalarTypeToCPPType<::c10::ScalarType::Bool>::t)*") BoolPointer data, @Cast("size_t") long length) { super((Pointer)null); allocate(data, length); } - private native void allocate(@Cast("const decltype(::c10::impl::ScalarTypeToCPPType<::c10::ScalarType::Bool>::t)*") BoolPointer data, @Cast("size_t") long length); + public BoolArrayRef(@Cast("const bool*") BoolPointer data, @Cast("size_t") long length) { super((Pointer)null); allocate(data, length); } + private native void allocate(@Cast("const bool*") BoolPointer data, @Cast("size_t") long length); + public BoolArrayRef(@Cast("const bool*") boolean[] data, @Cast("size_t") long length) { super((Pointer)null); allocate(data, length); } + private native void allocate(@Cast("const bool*") boolean[] data, @Cast("size_t") long length); /** Construct an ArrayRef from a range. */ - public BoolArrayRef(@Cast("const decltype(::c10::impl::ScalarTypeToCPPType<::c10::ScalarType::Bool>::t)*") BoolPointer begin, @Cast("const decltype(::c10::impl::ScalarTypeToCPPType<::c10::ScalarType::Bool>::t)*") BoolPointer end) { super((Pointer)null); allocate(begin, end); } - private native void allocate(@Cast("const decltype(::c10::impl::ScalarTypeToCPPType<::c10::ScalarType::Bool>::t)*") BoolPointer begin, @Cast("const decltype(::c10::impl::ScalarTypeToCPPType<::c10::ScalarType::Bool>::t)*") BoolPointer end); + public BoolArrayRef(@Cast("const bool*") BoolPointer begin, @Cast("const bool*") BoolPointer end) { super((Pointer)null); allocate(begin, end); } + private native void allocate(@Cast("const bool*") BoolPointer begin, @Cast("const bool*") BoolPointer end); + public BoolArrayRef(@Cast("const bool*") boolean[] begin, @Cast("const bool*") boolean[] end) { super((Pointer)null); allocate(begin, end); } + private native void allocate(@Cast("const bool*") boolean[] begin, @Cast("const bool*") boolean[] end); /** Construct an ArrayRef from a SmallVector. This is templated in order to * avoid instantiating SmallVectorTemplateCommon whenever we @@ -70,27 +75,27 @@ public class BoolArrayRef extends Pointer { * \name Simple Operations * \{ */ - public native @ByVal @Cast("const c10::ArrayRef::t)>::iterator*") BoolPointer begin(); - public native @ByVal @Cast("const c10::ArrayRef::t)>::iterator*") BoolPointer end(); + public native @Const BoolPointer begin(); + public native @Const BoolPointer end(); // These are actually the same as iterator, since ArrayRef only // gives you const iterators. - public native @ByVal @Cast("const c10::ArrayRef::t)>::const_iterator*") BoolPointer cbegin(); - public native @ByVal @Cast("const c10::ArrayRef::t)>::const_iterator*") BoolPointer cend(); + public native @Const BoolPointer cbegin(); + public native @Const BoolPointer cend(); /** empty - Check if the array is empty. */ public native @Cast("const bool") boolean empty(); - public native @Cast("const decltype(::c10::impl::ScalarTypeToCPPType<::c10::ScalarType::Bool>::t)*") BoolPointer data(); + public native @Cast("const bool*") BoolPointer data(); /** size - Get the array size. */ public native @Cast("const size_t") long size(); /** front - Get the first element. */ - public native @Cast("const decltype(::c10::impl::ScalarTypeToCPPType<::c10::ScalarType::Bool>::t)") boolean front(); + public native @Cast("const bool") boolean front(); /** back - Get the last element. */ - public native @Cast("const decltype(::c10::impl::ScalarTypeToCPPType<::c10::ScalarType::Bool>::t)") boolean back(); + public native @Cast("const bool") boolean back(); /** equals - Check for element-wise equality. */ public native @Cast("const bool") boolean equals(@ByVal BoolArrayRef RHS); @@ -104,12 +109,12 @@ public class BoolArrayRef extends Pointer { /** \} * \name Operator Overloads * \{ */ - public native @Cast("const decltype(::c10::impl::ScalarTypeToCPPType<::c10::ScalarType::Bool>::t)") @Name("operator []") boolean get(@Cast("size_t") long Index); + public native @Cast("const bool") @Name("operator []") boolean get(@Cast("size_t") long Index); /** Vector compatibility */ /// - public native @Cast("const decltype(::c10::impl::ScalarTypeToCPPType<::c10::ScalarType::Bool>::t)") boolean at(@Cast("size_t") long Index); + public native @Cast("const bool") boolean at(@Cast("size_t") long Index); /** Disallow accidental assignment from a temporary. * @@ -126,7 +131,7 @@ public class BoolArrayRef extends Pointer { /** \} * \name Expensive Operations * \{ */ - public native @Cast("decltype(::c10::impl::ScalarTypeToCPPType<::c10::ScalarType::Bool>::t)*") @StdVector BoolPointer vec(); + public native @ByVal BoolVector vec(); /** \} */ } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/BoolOptional.java b/pytorch/src/gen/java/org/bytedeco/pytorch/BoolOptional.java index 0ad4e259faf..f848528c33b 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/BoolOptional.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/BoolOptional.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; @@ -26,6 +28,7 @@ public class BoolOptional extends Pointer { public native @Name("operator =") @ByRef BoolOptional put(@ByRef BoolOptional x); public native boolean has_value(); + public native void reset(); public native @Name("value") @Cast("bool") boolean get(); @ValueSetter public native BoolOptional put(@Cast("bool") boolean value); } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/BoolType.java b/pytorch/src/gen/java/org/bytedeco/pytorch/BoolType.java index eae97e4234b..f138b68c4ca 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/BoolType.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/BoolType.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/BoolTypePtr.java b/pytorch/src/gen/java/org/bytedeco/pytorch/BoolTypePtr.java index 70bd3be06aa..59208d75345 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/BoolTypePtr.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/BoolTypePtr.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/BoolVector.java b/pytorch/src/gen/java/org/bytedeco/pytorch/BoolVector.java index bb7933e53b0..9a9ea1e92ea 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/BoolVector.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/BoolVector.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; @@ -33,6 +35,8 @@ public class BoolVector extends Pointer { public void clear() { resize(0); } public native void resize(@Cast("size_t") long n); + public boolean front() { return get(0); } + public boolean back() { return get(size() - 1); } @Index(function = "at") public native @Cast("bool") boolean get(@Cast("size_t") long i); public native BoolVector put(@Cast("size_t") long i, boolean value); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/BoolVectorOptional.java b/pytorch/src/gen/java/org/bytedeco/pytorch/BoolVectorOptional.java index 292d6c21e88..62ff2250e1a 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/BoolVectorOptional.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/BoolVectorOptional.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; @@ -26,6 +28,7 @@ public class BoolVectorOptional extends Pointer { public native @Name("operator =") @ByRef BoolVectorOptional put(@ByRef BoolVectorOptional x); public native boolean has_value(); + public native void reset(); public native @Name("value") @ByRef BoolVector get(); @ValueSetter public native BoolVectorOptional put(@ByRef BoolVector value); } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/BooleanElementReference.java b/pytorch/src/gen/java/org/bytedeco/pytorch/BooleanElementReference.java new file mode 100644 index 00000000000..b194b3416ad --- /dev/null +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/BooleanElementReference.java @@ -0,0 +1,42 @@ +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE + +package org.bytedeco.pytorch; + +import org.bytedeco.pytorch.Allocator; +import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; +import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; +import java.nio.*; +import org.bytedeco.javacpp.*; +import org.bytedeco.javacpp.annotation.*; + +import static org.bytedeco.javacpp.presets.javacpp.*; +import static org.bytedeco.openblas.global.openblas_nolapack.*; +import static org.bytedeco.openblas.global.openblas.*; + +import static org.bytedeco.pytorch.global.torch.*; + + +@Name("c10::impl::ListElementReference") @NoOffset @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) +public class BooleanElementReference extends Pointer { + static { Loader.load(); } + /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ + public BooleanElementReference(Pointer p) { super(p); } + + public native @Name("operator std::conditional_t::type>::value,const bool&,bool>") boolean getBoolean(); + + + + + + // assigning another ref to this assigns the underlying value + + + public native @Const @ByRef IValue get(); + + + + + +} diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/BooleanList.java b/pytorch/src/gen/java/org/bytedeco/pytorch/BooleanList.java new file mode 100644 index 00000000000..c6460fd12e1 --- /dev/null +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/BooleanList.java @@ -0,0 +1,239 @@ +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE + +package org.bytedeco.pytorch; + +import org.bytedeco.pytorch.Allocator; +import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; +import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; +import java.nio.*; +import org.bytedeco.javacpp.*; +import org.bytedeco.javacpp.annotation.*; + +import static org.bytedeco.javacpp.presets.javacpp.*; +import static org.bytedeco.openblas.global.openblas_nolapack.*; +import static org.bytedeco.openblas.global.openblas.*; + +import static org.bytedeco.pytorch.global.torch.*; + +@Name("c10::List") @NoOffset @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) +public class BooleanList extends Pointer { + static { Loader.load(); } + /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ + public BooleanList(Pointer p) { super(p); } + /** Native array allocator. Access with {@link Pointer#position(long)}. */ + public BooleanList(long size) { super((Pointer)null); allocateArray(size); } + private native void allocateArray(long size); + @Override public BooleanList position(long position) { + return (BooleanList)super.position(position); + } + @Override public BooleanList getPointer(long i) { + return new BooleanList((Pointer)this).offsetAddress(i); + } + + + /** + * Constructs an empty list. + */ + public BooleanList() { super((Pointer)null); allocate(); } + private native void allocate(); + + /** + * Constructs a list with some initial values. + * Example: + * List a({2, 3, 4}); + */ + public BooleanList(@ByVal BoolArrayRef initial_values) { super((Pointer)null); allocate(initial_values); } + private native void allocate(@ByVal BoolArrayRef initial_values); + + /** + * Create a generic list with runtime type information. + * This only works for c10::impl::GenericList and is not part of the public API + * but only supposed to be used internally by PyTorch. + */ + + + public BooleanList(@Const @ByRef BooleanList arg0) { super((Pointer)null); allocate(arg0); } + private native void allocate(@Const @ByRef BooleanList arg0); + public native @ByRef @Name("operator =") BooleanList put(@Const @ByRef BooleanList arg0); + + /** + * Create a new List pointing to a deep copy of the same data. + * The List returned is a new list with separate storage. + * Changes in it are not reflected in the original list or vice versa. + */ + public native @ByVal BooleanList copy(); + + /** + * Returns the element at specified location pos, with bounds checking. + * If pos is not within the range of the container, an exception of type std::out_of_range is thrown. + */ + public native boolean get(long pos); + + /** + * Moves out the element at the specified location pos and returns it, with bounds checking. + * If pos is not within the range of the container, an exception of type std::out_of_range is thrown. + * The list contains an invalid element at position pos afterwards. Any operations + * on it before re-setting it are invalid. + */ + public native boolean extract(long pos); + + /** + * Returns a reference to the element at specified location pos, with bounds checking. + * If pos is not within the range of the container, an exception of type std::out_of_range is thrown. + * + * You cannot store the reference, but you can read it and assign new values to it: + * + * List list = ...; + * list[2] = 5; + * int64_t v = list[1]; + */ + + + + + /** + * Assigns a new value to the element at location pos. + */ + public native void set(long pos, boolean value); + + /** + * Assigns a new value to the element at location pos. + */ + + /** + * Returns an iterator to the first element of the container. + * If the container is empty, the returned iterator will be equal to end(). + */ + public native @ByVal @Cast("c10::List::iterator*") BooleanListIterator begin(); + + /** + * Returns an iterator to the element following the last element of the container. + * This element acts as a placeholder; attempting to access it results in undefined behavior. + */ + public native @ByVal @Cast("c10::List::iterator*") BooleanListIterator end(); + + /** + * Checks if the container has no elements. + */ + public native @Cast("bool") boolean empty(); + + /** + * Returns the number of elements in the container + */ + public native long size(); + + /** + * Increase the capacity of the vector to a value that's greater or equal to new_cap. + */ + public native void reserve(long new_cap); + + /** + * Erases all elements from the container. After this call, size() returns zero. + * Invalidates any references, pointers, or iterators referring to contained elements. Any past-the-end iterators are also invalidated. + */ + public native void clear(); + + /** + * Inserts value before pos. + * May invalidate any references, pointers, or iterators referring to contained elements. Any past-the-end iterators may also be invalidated. + */ + public native @ByVal @Cast("c10::List::iterator*") BooleanListIterator insert(@ByVal @Cast("c10::List::iterator*") BooleanListIterator pos, @Cast("const bool") boolean value); + + /** + * Inserts value before pos. + * May invalidate any references, pointers, or iterators referring to contained elements. Any past-the-end iterators may also be invalidated. + */ + + /** + * Inserts a new element into the container directly before pos. + * The new element is constructed with the given arguments. + * May invalidate any references, pointers, or iterators referring to contained elements. Any past-the-end iterators may also be invalidated. + */ + + /** + * Appends the given element value to the end of the container. + * May invalidate any references, pointers, or iterators referring to contained elements. Any past-the-end iterators may also be invalidated. + */ + public native void push_back(@Cast("const bool") boolean value); + + /** + * Appends the given element value to the end of the container. + * May invalidate any references, pointers, or iterators referring to contained elements. Any past-the-end iterators may also be invalidated. + */ + + /** + * Appends the given list to the end of the container. Uses at most one memory allocation. + * May invalidate any references, pointers, or iterators referring to contained elements. Any past-the-end iterators may also be invalidated. + */ + public native void append(@ByVal BooleanList lst); + + /** + * Appends the given element value to the end of the container. + * The new element is constructed with the given arguments. + * May invalidate any references, pointers, or iterators referring to contained elements. Any past-the-end iterators may also be invalidated. + */ + + /** + * Removes the element at pos. + * May invalidate any references, pointers, or iterators referring to contained elements. Any past-the-end iterators may also be invalidated. + */ + public native @ByVal @Cast("c10::List::iterator*") BooleanListIterator erase(@ByVal @Cast("c10::List::iterator*") BooleanListIterator pos); + + /** + * Removes the elements in the range [first, last). + * May invalidate any references, pointers, or iterators referring to contained elements. Any past-the-end iterators may also be invalidated. + */ + public native @ByVal @Cast("c10::List::iterator*") BooleanListIterator erase(@ByVal @Cast("c10::List::iterator*") BooleanListIterator first, @ByVal @Cast("c10::List::iterator*") BooleanListIterator last); + + /** + * Removes the last element of the container. + * Calling pop_back on an empty container is undefined. + * May invalidate any references, pointers, or iterators referring to contained elements. Any past-the-end iterators may also be invalidated. + */ + public native void pop_back(); + + /** + * Resizes the container to contain count elements. + * If the current size is less than count, additional default-inserted elements are appended. + * May invalidate any references, pointers, or iterators referring to contained elements. Any past-the-end iterators may also be invalidated. + */ + public native void resize(long count); + + /** + * Resizes the container to contain count elements. + * If the current size is less than count, additional copies of value are appended. + * May invalidate any references, pointers, or iterators referring to contained elements. Any past-the-end iterators may also be invalidated. + */ + public native void resize(long count, @Cast("const bool") boolean value); + + /** + * Value equality comparison. This function implements Python-like semantics for + * equality: two lists with the same identity (e.g. same pointer) trivially + * compare equal, otherwise each element is compared for equality. + */ + + + + + /** + * Identity comparison. Returns true if and only if {@code rhs} represents the same + * List object as {@code this}. + */ + public native @Cast("bool") boolean is(@Const @ByRef BooleanList rhs); + + public native @ByVal BoolVector vec(); + + /** + * Returns the number of Lists currently pointing to this same list. + * If this is the only instance pointing to this list, returns 1. + */ + // TODO Test use_count + public native @Cast("size_t") long use_count(); + + public native @ByVal Type.TypePtr elementType(); + + // See [unsafe set type] for why this exists. + public native void unsafeSetElementType(@ByVal Type.TypePtr t); +} diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/BooleanListIterator.java b/pytorch/src/gen/java/org/bytedeco/pytorch/BooleanListIterator.java new file mode 100644 index 00000000000..5079de15bb2 --- /dev/null +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/BooleanListIterator.java @@ -0,0 +1,84 @@ +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE + +package org.bytedeco.pytorch; + +import org.bytedeco.pytorch.Allocator; +import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; +import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; +import java.nio.*; +import org.bytedeco.javacpp.*; +import org.bytedeco.javacpp.annotation.*; + +import static org.bytedeco.javacpp.presets.javacpp.*; +import static org.bytedeco.openblas.global.openblas_nolapack.*; +import static org.bytedeco.openblas.global.openblas.*; + +import static org.bytedeco.pytorch.global.torch.*; + +@Name("c10::impl::ListIterator") @NoOffset @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) +public class BooleanListIterator extends Pointer { + static { Loader.load(); } + /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ + public BooleanListIterator(Pointer p) { super(p); } + /** Native array allocator. Access with {@link Pointer#position(long)}. */ + public BooleanListIterator(long size) { super((Pointer)null); allocateArray(size); } + private native void allocateArray(long size); + @Override public BooleanListIterator position(long position) { + return (BooleanListIterator)super.position(position); + } + @Override public BooleanListIterator getPointer(long i) { + return new BooleanListIterator((Pointer)this).offsetAddress(i); + } + + // C++17 friendly std::iterator implementation + + public BooleanListIterator() { super((Pointer)null); allocate(); } + private native void allocate(); + + public BooleanListIterator(@Const @ByRef BooleanListIterator arg0) { super((Pointer)null); allocate(arg0); } + private native void allocate(@Const @ByRef BooleanListIterator arg0); + public native @ByRef @Name("operator =") BooleanListIterator put(@Const @ByRef BooleanListIterator arg0); + + public native @ByRef @Name("operator ++") BooleanListIterator increment(); + + public native @ByVal @Name("operator ++") BooleanListIterator increment(int arg0); + + public native @ByRef @Name("operator --") BooleanListIterator decrement(); + + public native @ByVal @Name("operator --") BooleanListIterator decrement(int arg0); + + public native @ByRef @Name("operator +=") BooleanListIterator addPut(long offset); + + public native @ByRef @Name("operator -=") BooleanListIterator subtractPut(long offset); + + public native @ByVal @Name("operator +") BooleanListIterator add(long offset); + + public native @ByVal @Name("operator -") BooleanListIterator subtract(long offset); + + private static native @Namespace @Cast("c10::impl::ListIterator::difference_type") @Name("operator -") long subtract(@Const @ByRef BooleanListIterator lhs, @Const @ByRef BooleanListIterator rhs); + public long subtract(BooleanListIterator rhs) { return subtract(this, rhs); } + + + + + + private static native @Namespace @Cast("bool") @Name("operator ==") boolean equals(@Const @ByRef BooleanListIterator lhs, @Const @ByRef BooleanListIterator rhs); + public boolean equals(BooleanListIterator rhs) { return equals(this, rhs); } + + private static native @Namespace @Cast("bool") @Name("operator !=") boolean notEquals(@Const @ByRef BooleanListIterator lhs, @Const @ByRef BooleanListIterator rhs); + public boolean notEquals(BooleanListIterator rhs) { return notEquals(this, rhs); } + + private static native @Namespace @Cast("bool") @Name("operator <") boolean lessThan(@Const @ByRef BooleanListIterator lhs, @Const @ByRef BooleanListIterator rhs); + public boolean lessThan(BooleanListIterator rhs) { return lessThan(this, rhs); } + + private static native @Namespace @Cast("bool") @Name("operator <=") boolean lessThanEquals(@Const @ByRef BooleanListIterator lhs, @Const @ByRef BooleanListIterator rhs); + public boolean lessThanEquals(BooleanListIterator rhs) { return lessThanEquals(this, rhs); } + + private static native @Namespace @Cast("bool") @Name("operator >") boolean greaterThan(@Const @ByRef BooleanListIterator lhs, @Const @ByRef BooleanListIterator rhs); + public boolean greaterThan(BooleanListIterator rhs) { return greaterThan(this, rhs); } + + private static native @Namespace @Cast("bool") @Name("operator >=") boolean greaterThanEquals(@Const @ByRef BooleanListIterator lhs, @Const @ByRef BooleanListIterator rhs); + public boolean greaterThanEquals(BooleanListIterator rhs) { return greaterThanEquals(this, rhs); } +} diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/Break.java b/pytorch/src/gen/java/org/bytedeco/pytorch/Break.java index 3b993fb947b..d12b7b3e2a0 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/Break.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/Break.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; @@ -19,8 +21,10 @@ @Namespace("torch::jit") @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) public class Break extends Stmt { static { Loader.load(); } + /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ + public Break(Pointer p) { super(p); } - public Break(@Cast("const torch::jit::TreeRef*") @ByRef Pointer tree) { super((Pointer)null); allocate(tree); } - private native void allocate(@Cast("const torch::jit::TreeRef*") @ByRef Pointer tree); + public Break(@Const @ByRef TreeRef tree) { super((Pointer)null); allocate(tree); } + private native void allocate(@Const @ByRef TreeRef tree); public static native @ByVal Break create(@Const @ByRef SourceRange range); } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/BufferPolicy.java b/pytorch/src/gen/java/org/bytedeco/pytorch/BufferPolicy.java index ed9fb054820..d39cb8c0dc1 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/BufferPolicy.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/BufferPolicy.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; @@ -37,7 +39,7 @@ public class BufferPolicy extends Pointer { public static native @ByVal @Cast("torch::jit::detail::BufferPolicy::value_type*") Tensor create( @StdVector SlotCursor cursors, @ByVal IValue v); - public static native @Cast("bool") boolean valid(@Const @SharedPtr @ByRef ClassType typ, @Cast("size_t") long i, @Const @ByRef IValue v); + public static native @Cast("bool") boolean valid(@Const @SharedPtr("c10::ClassType") @ByRef ClassType typ, @Cast("size_t") long i, @Const @ByRef IValue v); @MemberGetter public static native @Cast("const bool") boolean all_slots(); public static final boolean all_slots = all_slots(); } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/BuiltinFunction.java b/pytorch/src/gen/java/org/bytedeco/pytorch/BuiltinFunction.java index b1913e0ea34..652d672fccf 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/BuiltinFunction.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/BuiltinFunction.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; @@ -31,12 +33,7 @@ public class BuiltinFunction extends SugaredValue { // if this is method, then this is the self argument. public native @ByRef NamedValueOptional self(); public native BuiltinFunction self(NamedValueOptional setter); public native @StdString BytePointer kind(); - public native @SharedPtr @ByVal SugaredValue call( - @Const @ByRef SourceRange loc, - @ByRef GraphFunction m, - @ByVal NamedValueArrayRef args, - @ByVal NamedValueArrayRef kwargs, - @Cast("size_t") long n_binders); + // try to create this builtin but if it doesn't exist or the self argument // cannot possibly match, then return nullptr. Use in situations where it is diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/BuiltinModule.java b/pytorch/src/gen/java/org/bytedeco/pytorch/BuiltinModule.java index f9533454cfa..c79f7ab1ab7 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/BuiltinModule.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/BuiltinModule.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; @@ -32,11 +34,11 @@ public class BuiltinModule extends SugaredValue { private native void allocate(@StdString String name); public native @StdString BytePointer kind(); - public native @SharedPtr @ByVal SugaredValue attr( + public native @SharedPtr("torch::jit::SugaredValue") @ByVal SugaredValue attr( @Const @ByRef SourceRange loc, @ByRef GraphFunction m, @StdString BytePointer field); - public native @SharedPtr @ByVal SugaredValue attr( + public native @SharedPtr("torch::jit::SugaredValue") @ByVal SugaredValue attr( @Const @ByRef SourceRange loc, @ByRef GraphFunction m, @StdString String field); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/ByteArrayRef.java b/pytorch/src/gen/java/org/bytedeco/pytorch/ByteArrayRef.java index fe2d3a2f888..10a6ca2a047 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/ByteArrayRef.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/ByteArrayRef.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; @@ -15,17 +17,6 @@ import static org.bytedeco.pytorch.global.torch.*; -/** ArrayRef - Represent a constant reference to an array (0 or more elements - * consecutively in memory), i.e. a start pointer and a length. It allows - * various APIs to take consecutive elements easily and conveniently. - * - * This class does not own the underlying data, it is expected to be used in - * situations where the data resides in some other buffer, whose lifetime - * extends past that of the ArrayRef. For this reason, it is not in general - * safe to store an ArrayRef. - * - * This is intended to be trivially copyable, so it should be passed by - * value. */ @Name("c10::ArrayRef") @NoOffset @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) public class ByteArrayRef extends Pointer { static { Loader.load(); } @@ -50,8 +41,7 @@ public class ByteArrayRef extends Pointer { /** Construct an ArrayRef from a single element. */ // TODO Make this explicit - public ByteArrayRef(byte OneElt) { super((Pointer)null); allocate(OneElt); } - private native void allocate(byte OneElt); + /** Construct an ArrayRef from a pointer and length. */ public ByteArrayRef(@Const BytePointer data, @Cast("size_t") long length) { super((Pointer)null); allocate(data, length); } @@ -89,13 +79,13 @@ public class ByteArrayRef extends Pointer { * \name Simple Operations * \{ */ - public native @ByVal @Cast("const c10::ArrayRef::iterator*") BytePointer begin(); - public native @ByVal @Cast("const c10::ArrayRef::iterator*") BytePointer end(); + public native @Const BytePointer begin(); + public native @Const BytePointer end(); // These are actually the same as iterator, since ArrayRef only // gives you const iterators. - public native @ByVal @Cast("const c10::ArrayRef::const_iterator*") BytePointer cbegin(); - public native @ByVal @Cast("const c10::ArrayRef::const_iterator*") BytePointer cend(); + public native @Const BytePointer cbegin(); + public native @Const BytePointer cend(); /** empty - Check if the array is empty. */ public native @Cast("const bool") boolean empty(); @@ -112,13 +102,13 @@ public class ByteArrayRef extends Pointer { public native byte back(); /** equals - Check for element-wise equality. */ - public native @Cast("const bool") boolean equals(@ByVal @Cast("c10::ArrayRef*") ByteArrayRef RHS); + public native @Cast("const bool") boolean equals(@ByVal ByteArrayRef RHS); /** slice(n, m) - Take M elements of the array starting at element N */ - public native @ByVal @Cast("const c10::ArrayRef*") ByteArrayRef slice(@Cast("size_t") long N, @Cast("size_t") long M); + public native @Const @ByVal ByteArrayRef slice(@Cast("size_t") long N, @Cast("size_t") long M); /** slice(n) - Chop off the first N elements of the array. */ - public native @ByVal @Cast("const c10::ArrayRef*") ByteArrayRef slice(@Cast("size_t") long N); + public native @Const @ByVal ByteArrayRef slice(@Cast("size_t") long N); /** \} * \name Operator Overloads diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/ByteOptional.java b/pytorch/src/gen/java/org/bytedeco/pytorch/ByteOptional.java index 88c3ed3a2fb..8d5b233bc4b 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/ByteOptional.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/ByteOptional.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; @@ -26,6 +28,7 @@ public class ByteOptional extends Pointer { public native @Name("operator =") @ByRef ByteOptional put(@ByRef ByteOptional x); public native boolean has_value(); + public native void reset(); public native @Name("value") byte get(); @ValueSetter public native ByteOptional put(byte value); } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/BytePointerVector.java b/pytorch/src/gen/java/org/bytedeco/pytorch/BytePointerVector.java index 23081188c96..e8ad52bddfd 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/BytePointerVector.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/BytePointerVector.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; @@ -35,6 +37,8 @@ public class BytePointerVector extends Pointer { public void clear() { resize(0); } public native void resize(@Cast("size_t") long n); + public BytePointer front() { return get(0); } + public BytePointer back() { return get(size() - 1); } @Index(function = "at") public native @Const @Cast("const char*") BytePointer get(@Cast("size_t") long i); public native BytePointerVector put(@Cast("size_t") long i, BytePointer value); @ValueSetter @Index(function = "at") public native BytePointerVector put(@Cast("size_t") long i, @Const String value); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/BytecodeEmitMode.java b/pytorch/src/gen/java/org/bytedeco/pytorch/BytecodeEmitMode.java deleted file mode 100644 index ee4f565d4d8..00000000000 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/BytecodeEmitMode.java +++ /dev/null @@ -1,45 +0,0 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE - -package org.bytedeco.pytorch; - -import org.bytedeco.pytorch.Allocator; -import org.bytedeco.pytorch.Function; -import org.bytedeco.pytorch.Module; -import java.nio.*; -import org.bytedeco.javacpp.*; -import org.bytedeco.javacpp.annotation.*; - -import static org.bytedeco.javacpp.presets.javacpp.*; -import static org.bytedeco.openblas.global.openblas_nolapack.*; -import static org.bytedeco.openblas.global.openblas.*; - -import static org.bytedeco.pytorch.global.torch.*; - - -@Namespace("torch::jit") @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) -public class BytecodeEmitMode extends Pointer { - static { Loader.load(); } - /** Default native constructor. */ - public BytecodeEmitMode() { super((Pointer)null); allocate(); } - /** Native array allocator. Access with {@link Pointer#position(long)}. */ - public BytecodeEmitMode(long size) { super((Pointer)null); allocateArray(size); } - /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ - public BytecodeEmitMode(Pointer p) { super(p); } - private native void allocate(); - private native void allocateArray(long size); - @Override public BytecodeEmitMode position(long position) { - return (BytecodeEmitMode)super.position(position); - } - @Override public BytecodeEmitMode getPointer(long i) { - return new BytecodeEmitMode((Pointer)this).offsetAddress(i); - } - - public static native @Cast("bool") boolean is_default_value_for_unspecified_arg_enabled(); - public static native void set_default_value_for_unspecified_arg_enabled(@Cast("bool") boolean enabled); - - public static native @Cast("bool") boolean is_default_args_before_out_args_enabled(); - public static native void set_default_args_before_out_args_enabled(@Cast("bool") boolean enabled); - - public static native @Cast("bool") boolean is_emit_promoted_ops_enabled(); - public static native void set_default_emit_promoted_ops_enabled(@Cast("bool") boolean enabled); -} diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/BytecodeEmitModeGuard.java b/pytorch/src/gen/java/org/bytedeco/pytorch/BytecodeEmitModeGuard.java deleted file mode 100644 index c42e157275a..00000000000 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/BytecodeEmitModeGuard.java +++ /dev/null @@ -1,46 +0,0 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE - -package org.bytedeco.pytorch; - -import org.bytedeco.pytorch.Allocator; -import org.bytedeco.pytorch.Function; -import org.bytedeco.pytorch.Module; -import java.nio.*; -import org.bytedeco.javacpp.*; -import org.bytedeco.javacpp.annotation.*; - -import static org.bytedeco.javacpp.presets.javacpp.*; -import static org.bytedeco.openblas.global.openblas_nolapack.*; -import static org.bytedeco.openblas.global.openblas.*; - -import static org.bytedeco.pytorch.global.torch.*; - - -// RAII guard to switch the way JIT emits the bytecode for inputs. -// default_value_for_unspecified_arg: -// true: instruction of default argument values (like LOADC) is emitted. -// false: instruction of default argument values are not emitted. Instead -// they are fetched from operator schema. -// default_args_before_out_args (to forward compatibile support -// operators allowing out arguments and default arguments): -// true: the number of specified arguments will deserialized to (#all_args - -// #default_args). false: the number of specified arguments will deserialized to -// (#all_args). -@Namespace("torch::jit") @NoOffset @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) -public class BytecodeEmitModeGuard extends Pointer { - static { Loader.load(); } - /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ - public BytecodeEmitModeGuard(Pointer p) { super(p); } - - public BytecodeEmitModeGuard( - @Cast("bool") boolean enable_default_value_for_unspecified_arg, - @Cast("bool") boolean enable_default_args_before_out_args, - @Cast("bool") boolean enable_emit_promoted_ops) { super((Pointer)null); allocate(enable_default_value_for_unspecified_arg, enable_default_args_before_out_args, enable_emit_promoted_ops); } - private native void allocate( - @Cast("bool") boolean enable_default_value_for_unspecified_arg, - @Cast("bool") boolean enable_default_args_before_out_args, - @Cast("bool") boolean enable_emit_promoted_ops); - public native @Cast("bool") boolean prev_default_value_for_unspecified_arg_mode(); public native BytecodeEmitModeGuard prev_default_value_for_unspecified_arg_mode(boolean setter); - public native @Cast("bool") boolean prev_default_args_before_out_args(); public native BytecodeEmitModeGuard prev_default_args_before_out_args(boolean setter); - public native @Cast("bool") boolean prev_default_emit_promoted_ops(); public native BytecodeEmitModeGuard prev_default_emit_promoted_ops(boolean setter); -} diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/C10FlagParser.java b/pytorch/src/gen/java/org/bytedeco/pytorch/C10FlagParser.java index b1518c81c54..fe39047841d 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/C10FlagParser.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/C10FlagParser.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/CELU.java b/pytorch/src/gen/java/org/bytedeco/pytorch/CELU.java deleted file mode 100644 index 1e544c5436d..00000000000 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/CELU.java +++ /dev/null @@ -1,34 +0,0 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE - -package org.bytedeco.pytorch; - -import org.bytedeco.pytorch.Allocator; -import org.bytedeco.pytorch.Function; -import org.bytedeco.pytorch.Module; -import java.nio.*; -import org.bytedeco.javacpp.*; -import org.bytedeco.javacpp.annotation.*; - -import static org.bytedeco.javacpp.presets.javacpp.*; -import static org.bytedeco.openblas.global.openblas_nolapack.*; -import static org.bytedeco.openblas.global.openblas.*; - -import static org.bytedeco.pytorch.global.torch.*; - - -/** A {@code ModuleHolder} subclass for {@code CELUImpl}. - * See the documentation for {@code CELUImpl} class to learn what methods it - * provides, and examples of how to use {@code CELU} with {@code torch::nn::CELUOptions}. - * See the documentation for {@code ModuleHolder} to learn about PyTorch's - * module storage semantics. */ -@Namespace("torch::nn") @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) -public class CELU extends CELUImplModuleHolder { - static { Loader.load(); } - - public CELU(@ByVal @Cast("std::nullptr_t*") PointerPointer arg0) { super((Pointer)null); allocate(arg0); } - private native void allocate(@ByVal @Cast("std::nullptr_t*") PointerPointer arg0); public CELU(@SharedPtr @Cast({"", "std::shared_ptr"}) CELUImpl module) { super((Pointer)null); allocate(module); } - private native void allocate(@SharedPtr @Cast({"", "std::shared_ptr"}) CELUImpl module); - /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ - public CELU(Pointer p) { super(p); } - - } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/CELUImpl.java b/pytorch/src/gen/java/org/bytedeco/pytorch/CELUImpl.java index e6d8f66aec7..f546816c856 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/CELUImpl.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/CELUImpl.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; @@ -45,9 +47,9 @@ public class CELUImpl extends CELUImplCloneable { } public CELUImpl(@Const @ByRef(nullValue = "torch::nn::CELUOptions{}") CELUOptions options_) { super((Pointer)null); allocate(options_); } - @NoDeallocator private native void allocate(@Const @ByRef(nullValue = "torch::nn::CELUOptions{}") CELUOptions options_); + @SharedPtr private native void allocate(@Const @ByRef(nullValue = "torch::nn::CELUOptions{}") CELUOptions options_); public CELUImpl() { super((Pointer)null); allocate(); } - @NoDeallocator private native void allocate(); + @SharedPtr private native void allocate(); public native @ByVal Tensor forward(@ByVal Tensor input); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/CELUImplCloneable.java b/pytorch/src/gen/java/org/bytedeco/pytorch/CELUImplCloneable.java index 60f446c32c0..4be1d5ea58e 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/CELUImplCloneable.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/CELUImplCloneable.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; @@ -20,18 +22,18 @@ public class CELUImplCloneable extends Module { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public CELUImplCloneable(Pointer p) { super(p); } + @Override public Module asModule() { return asModule(this); } + @Namespace public static native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast>") Module asModule(@SharedPtr CELUImplCloneable pointer); /** {@code reset()} must perform initialization of all members with reference * semantics, most importantly parameters, buffers and submodules. */ public native void reset(); - @Override public Module asModule() { return asModule(this); } - @Namespace public static native @Name("static_cast") Module asModule(CELUImplCloneable module); /** Performs a recursive "deep copy" of the {@code Module}, such that all parameters * and submodules in the cloned module are different from those in the * original module. */ - public native @SharedPtr Module clone( - @Const @ByRef(nullValue = "c10::optional(c10::nullopt)") DeviceOptional device); - public native @SharedPtr Module clone(); + public native @SharedPtr("torch::nn::Module") @ByVal Module clone( + @Const @ByRef(nullValue = "c10::optional(c10::nullopt)") DeviceOptional device); + public native @SharedPtr("torch::nn::Module") @ByVal Module clone(); } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/CELUImplModuleHolder.java b/pytorch/src/gen/java/org/bytedeco/pytorch/CELUImplModuleHolder.java deleted file mode 100644 index 7c6875d1407..00000000000 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/CELUImplModuleHolder.java +++ /dev/null @@ -1,79 +0,0 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE - -package org.bytedeco.pytorch; - -import org.bytedeco.pytorch.Allocator; -import org.bytedeco.pytorch.Function; -import org.bytedeco.pytorch.Module; -import java.nio.*; -import org.bytedeco.javacpp.*; -import org.bytedeco.javacpp.annotation.*; - -import static org.bytedeco.javacpp.presets.javacpp.*; -import static org.bytedeco.openblas.global.openblas_nolapack.*; -import static org.bytedeco.openblas.global.openblas.*; - -import static org.bytedeco.pytorch.global.torch.*; - -@Name("torch::nn::ModuleHolder") @NoOffset @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) -public class CELUImplModuleHolder extends Pointer { - static { Loader.load(); } - /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ - public CELUImplModuleHolder(Pointer p) { super(p); } - - - /// - - /** Default constructs the contained module if if has a default constructor, - * else produces a static error. - * - * NOTE: This uses the behavior of template - * classes in C++ that constructors (or any methods) are only compiled when - * actually used. */ - - - /** Constructs the {@code ModuleHolder} with an empty contained value. Access to - * the underlying module is not permitted and will throw an exception, until - * a value is assigned. */ - /* implicit */ public CELUImplModuleHolder(@ByVal @Cast("std::nullptr_t*") PointerPointer arg0) { super((Pointer)null); allocate(arg0); } -private native void allocate(@ByVal @Cast("std::nullptr_t*") PointerPointer arg0); - - /** Constructs the {@code ModuleHolder} with a contained module, forwarding all - * arguments to its constructor. */ - - /** Constructs the {@code ModuleHolder} from a pointer to the contained type. - * Example: {@code Linear(std::make_shared(...))}. */ - /* implicit */ public CELUImplModuleHolder(@SharedPtr @Cast({"", "std::shared_ptr"}) CELUImpl module) { super((Pointer)null); allocate(module); } -private native void allocate(@SharedPtr @Cast({"", "std::shared_ptr"}) CELUImpl module); - - /** Returns true if the {@code ModuleHolder} contains a module, or false if it is - * {@code nullptr}. */ - public native @Cast("bool") @Name("operator bool") @NoException(true) boolean asBoolean(); - - /** Forwards to the contained module. */ - public native @Name("operator ->") CELUImpl access(); - - /** Forwards to the contained module. */ - - /** Returns a reference to the contained module. */ - public native @ByRef @Name("operator *") CELUImpl multiply(); - - /** Returns a const reference to the contained module. */ - - /** Returns a shared pointer to the underlying module. */ - public native @SharedPtr @Cast({"", "std::shared_ptr"}) CELUImpl ptr(); - - /** Returns a pointer to the underlying module. */ - public native CELUImpl get(); - - /** Returns a const pointer to the underlying module. */ - - /** Calls the {@code forward()} method of the contained module. */ - - /** Forwards to the subscript operator of the contained module. - * NOTE: std::forward is qualified to prevent VS2017 emitting - * error C2872: 'std': ambiguous symbol */ - - /** Returns true if the {@code ModuleHolder} does not contain a module. */ - public native @Cast("bool") @NoException(true) boolean is_empty(); -} diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/CELUOptions.java b/pytorch/src/gen/java/org/bytedeco/pytorch/CELUOptions.java index a4372e2d4dc..8dfb3170ab3 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/CELUOptions.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/CELUOptions.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/CPUGeneratorImpl.java b/pytorch/src/gen/java/org/bytedeco/pytorch/CPUGeneratorImpl.java new file mode 100644 index 00000000000..dd8c370d49d --- /dev/null +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/CPUGeneratorImpl.java @@ -0,0 +1,49 @@ +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE + +package org.bytedeco.pytorch; + +import org.bytedeco.pytorch.Allocator; +import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; +import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; +import java.nio.*; +import org.bytedeco.javacpp.*; +import org.bytedeco.javacpp.annotation.*; + +import static org.bytedeco.javacpp.presets.javacpp.*; +import static org.bytedeco.openblas.global.openblas_nolapack.*; +import static org.bytedeco.openblas.global.openblas.*; + +import static org.bytedeco.pytorch.global.torch.*; + + +@Namespace("at") @NoOffset @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) +public class CPUGeneratorImpl extends GeneratorImpl { + static { Loader.load(); } + /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ + public CPUGeneratorImpl(Pointer p) { super(p); } + + // Constructors + public CPUGeneratorImpl(@Cast("uint64_t") long seed_in/*=c10::default_rng_seed_val*/) { super((Pointer)null); allocate(seed_in); } + private native void allocate(@Cast("uint64_t") long seed_in/*=c10::default_rng_seed_val*/); + public CPUGeneratorImpl() { super((Pointer)null); allocate(); } + private native void allocate(); + + // CPUGeneratorImpl methods + public native @SharedPtr CPUGeneratorImpl clone(); + public native void set_current_seed(@Cast("uint64_t") long seed); + public native @Cast("uint64_t") long current_seed(); + public native @Cast("uint64_t") long seed(); + public native void set_state(@Const @ByRef TensorImpl new_state); + public native @ByVal TensorImplPtr get_state(); + public static native DeviceType device_type(); + public native @Cast("uint32_t") int random(); + public native @Cast("uint64_t") long random64(); + public native @ByVal FloatOptional next_float_normal_sample(); + public native @ByVal DoubleOptional next_double_normal_sample(); + public native void set_next_float_normal_sample(@ByVal FloatOptional randn); + public native void set_next_double_normal_sample(@ByVal DoubleOptional randn); + public native @ByVal @Cast("at::mt19937*") mt19937_engine engine(); + public native void set_engine(@ByVal @Cast("at::mt19937*") mt19937_engine engine); +} diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/CTCLoss.java b/pytorch/src/gen/java/org/bytedeco/pytorch/CTCLoss.java deleted file mode 100644 index 0689e6269a2..00000000000 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/CTCLoss.java +++ /dev/null @@ -1,34 +0,0 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE - -package org.bytedeco.pytorch; - -import org.bytedeco.pytorch.Allocator; -import org.bytedeco.pytorch.Function; -import org.bytedeco.pytorch.Module; -import java.nio.*; -import org.bytedeco.javacpp.*; -import org.bytedeco.javacpp.annotation.*; - -import static org.bytedeco.javacpp.presets.javacpp.*; -import static org.bytedeco.openblas.global.openblas_nolapack.*; -import static org.bytedeco.openblas.global.openblas.*; - -import static org.bytedeco.pytorch.global.torch.*; - - -/** A {@code ModuleHolder} subclass for {@code CTCLossImpl}. - * See the documentation for {@code CTCLossImpl} class to learn what methods it - * provides, and examples of how to use {@code CTCLoss} with - * {@code torch::nn::CTCLossOptions}. See the documentation for {@code ModuleHolder} to - * learn about PyTorch's module storage semantics. */ -@Namespace("torch::nn") @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) -public class CTCLoss extends CTCLossImplModuleHolder { - static { Loader.load(); } - - public CTCLoss(@ByVal @Cast("std::nullptr_t*") PointerPointer arg0) { super((Pointer)null); allocate(arg0); } - private native void allocate(@ByVal @Cast("std::nullptr_t*") PointerPointer arg0); public CTCLoss(@SharedPtr @Cast({"", "std::shared_ptr"}) CTCLossImpl module) { super((Pointer)null); allocate(module); } - private native void allocate(@SharedPtr @Cast({"", "std::shared_ptr"}) CTCLossImpl module); - /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ - public CTCLoss(Pointer p) { super(p); } - - } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/CTCLossImpl.java b/pytorch/src/gen/java/org/bytedeco/pytorch/CTCLossImpl.java index 0af4012f12f..9312f372bf7 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/CTCLossImpl.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/CTCLossImpl.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; @@ -46,9 +48,9 @@ public class CTCLossImpl extends CTCLossImplCloneable { } public CTCLossImpl(@ByVal(nullValue = "torch::nn::CTCLossOptions{}") CTCLossOptions options_) { super((Pointer)null); allocate(options_); } - @NoDeallocator private native void allocate(@ByVal(nullValue = "torch::nn::CTCLossOptions{}") CTCLossOptions options_); + @SharedPtr private native void allocate(@ByVal(nullValue = "torch::nn::CTCLossOptions{}") CTCLossOptions options_); public CTCLossImpl() { super((Pointer)null); allocate(); } - @NoDeallocator private native void allocate(); + @SharedPtr private native void allocate(); public native void reset(); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/CTCLossImplCloneable.java b/pytorch/src/gen/java/org/bytedeco/pytorch/CTCLossImplCloneable.java index ba24227fe7a..c5ffee98872 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/CTCLossImplCloneable.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/CTCLossImplCloneable.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; @@ -20,18 +22,18 @@ public class CTCLossImplCloneable extends Module { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public CTCLossImplCloneable(Pointer p) { super(p); } + @Override public Module asModule() { return asModule(this); } + @Namespace public static native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast>") Module asModule(@SharedPtr CTCLossImplCloneable pointer); /** {@code reset()} must perform initialization of all members with reference * semantics, most importantly parameters, buffers and submodules. */ public native void reset(); - @Override public Module asModule() { return asModule(this); } - @Namespace public static native @Name("static_cast") Module asModule(CTCLossImplCloneable module); /** Performs a recursive "deep copy" of the {@code Module}, such that all parameters * and submodules in the cloned module are different from those in the * original module. */ - public native @SharedPtr Module clone( - @Const @ByRef(nullValue = "c10::optional(c10::nullopt)") DeviceOptional device); - public native @SharedPtr Module clone(); + public native @SharedPtr("torch::nn::Module") @ByVal Module clone( + @Const @ByRef(nullValue = "c10::optional(c10::nullopt)") DeviceOptional device); + public native @SharedPtr("torch::nn::Module") @ByVal Module clone(); } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/CTCLossImplModuleHolder.java b/pytorch/src/gen/java/org/bytedeco/pytorch/CTCLossImplModuleHolder.java deleted file mode 100644 index 71527ae7e54..00000000000 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/CTCLossImplModuleHolder.java +++ /dev/null @@ -1,79 +0,0 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE - -package org.bytedeco.pytorch; - -import org.bytedeco.pytorch.Allocator; -import org.bytedeco.pytorch.Function; -import org.bytedeco.pytorch.Module; -import java.nio.*; -import org.bytedeco.javacpp.*; -import org.bytedeco.javacpp.annotation.*; - -import static org.bytedeco.javacpp.presets.javacpp.*; -import static org.bytedeco.openblas.global.openblas_nolapack.*; -import static org.bytedeco.openblas.global.openblas.*; - -import static org.bytedeco.pytorch.global.torch.*; - -@Name("torch::nn::ModuleHolder") @NoOffset @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) -public class CTCLossImplModuleHolder extends Pointer { - static { Loader.load(); } - /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ - public CTCLossImplModuleHolder(Pointer p) { super(p); } - - - /// - - /** Default constructs the contained module if if has a default constructor, - * else produces a static error. - * - * NOTE: This uses the behavior of template - * classes in C++ that constructors (or any methods) are only compiled when - * actually used. */ - - - /** Constructs the {@code ModuleHolder} with an empty contained value. Access to - * the underlying module is not permitted and will throw an exception, until - * a value is assigned. */ - /* implicit */ public CTCLossImplModuleHolder(@ByVal @Cast("std::nullptr_t*") PointerPointer arg0) { super((Pointer)null); allocate(arg0); } -private native void allocate(@ByVal @Cast("std::nullptr_t*") PointerPointer arg0); - - /** Constructs the {@code ModuleHolder} with a contained module, forwarding all - * arguments to its constructor. */ - - /** Constructs the {@code ModuleHolder} from a pointer to the contained type. - * Example: {@code Linear(std::make_shared(...))}. */ - /* implicit */ public CTCLossImplModuleHolder(@SharedPtr @Cast({"", "std::shared_ptr"}) CTCLossImpl module) { super((Pointer)null); allocate(module); } -private native void allocate(@SharedPtr @Cast({"", "std::shared_ptr"}) CTCLossImpl module); - - /** Returns true if the {@code ModuleHolder} contains a module, or false if it is - * {@code nullptr}. */ - public native @Cast("bool") @Name("operator bool") @NoException(true) boolean asBoolean(); - - /** Forwards to the contained module. */ - public native @Name("operator ->") CTCLossImpl access(); - - /** Forwards to the contained module. */ - - /** Returns a reference to the contained module. */ - public native @ByRef @Name("operator *") CTCLossImpl multiply(); - - /** Returns a const reference to the contained module. */ - - /** Returns a shared pointer to the underlying module. */ - public native @SharedPtr @Cast({"", "std::shared_ptr"}) CTCLossImpl ptr(); - - /** Returns a pointer to the underlying module. */ - public native CTCLossImpl get(); - - /** Returns a const pointer to the underlying module. */ - - /** Calls the {@code forward()} method of the contained module. */ - - /** Forwards to the subscript operator of the contained module. - * NOTE: std::forward is qualified to prevent VS2017 emitting - * error C2872: 'std': ambiguous symbol */ - - /** Returns true if the {@code ModuleHolder} does not contain a module. */ - public native @Cast("bool") @NoException(true) boolean is_empty(); -} diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/CTCLossOptions.java b/pytorch/src/gen/java/org/bytedeco/pytorch/CTCLossOptions.java index 1d9cc181f50..14b10935978 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/CTCLossOptions.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/CTCLossOptions.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; @@ -44,6 +46,6 @@ public class CTCLossOptions extends Pointer { } public native @Cast("int64_t*") @ByRef @NoException(true) LongPointer blank(); - public native @ByRef @NoException(true) loss_reduction_t reduction(); + public native @ByRef @NoException(true) LossReduction reduction(); public native @Cast("bool*") @ByRef @NoException(true) BoolPointer zero_infinity(); } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/CUDAHooksArgs.java b/pytorch/src/gen/java/org/bytedeco/pytorch/CUDAHooksArgs.java new file mode 100644 index 00000000000..d17028445e6 --- /dev/null +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/CUDAHooksArgs.java @@ -0,0 +1,29 @@ +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE + +package org.bytedeco.pytorch; + +import org.bytedeco.pytorch.Allocator; +import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; +import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; +import java.nio.*; +import org.bytedeco.javacpp.*; +import org.bytedeco.javacpp.annotation.*; + +import static org.bytedeco.javacpp.presets.javacpp.*; +import static org.bytedeco.openblas.global.openblas_nolapack.*; +import static org.bytedeco.openblas.global.openblas.*; + +import static org.bytedeco.pytorch.global.torch.*; + + +// NB: dummy argument to suppress "ISO C++11 requires at least one argument +// for the "..." in a variadic macro" +@Namespace("at") @Opaque @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) +public class CUDAHooksArgs extends Pointer { + /** Empty constructor. Calls {@code super((Pointer)null)}. */ + public CUDAHooksArgs() { super((Pointer)null); } + /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ + public CUDAHooksArgs(Pointer p) { super(p); } +} diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/CUDAHooksInterface.java b/pytorch/src/gen/java/org/bytedeco/pytorch/CUDAHooksInterface.java new file mode 100644 index 00000000000..9156c2370a2 --- /dev/null +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/CUDAHooksInterface.java @@ -0,0 +1,120 @@ +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE + +package org.bytedeco.pytorch; + +import org.bytedeco.pytorch.Allocator; +import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; +import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; +import java.nio.*; +import org.bytedeco.javacpp.*; +import org.bytedeco.javacpp.annotation.*; + +import static org.bytedeco.javacpp.presets.javacpp.*; +import static org.bytedeco.openblas.global.openblas_nolapack.*; +import static org.bytedeco.openblas.global.openblas.*; + +import static org.bytedeco.pytorch.global.torch.*; + +// #endif + +// The CUDAHooksInterface is an omnibus interface for any CUDA functionality +// which we may want to call into from CPU code (and thus must be dynamically +// dispatched, to allow for separate compilation of CUDA code). How do I +// decide if a function should live in this class? There are two tests: +// +// 1. Does the *implementation* of this function require linking against +// CUDA libraries? +// +// 2. Is this function *called* from non-CUDA ATen code? +// +// (2) should filter out many ostensible use-cases, since many times a CUDA +// function provided by ATen is only really ever used by actual CUDA code. +// +// TODO: Consider putting the stub definitions in another class, so that one +// never forgets to implement each virtual function in the real implementation +// in CUDAHooks. This probably doesn't buy us much though. +@Namespace("at") @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) +public class CUDAHooksInterface extends Pointer { + static { Loader.load(); } + /** Default native constructor. */ + public CUDAHooksInterface() { super((Pointer)null); allocate(); } + /** Native array allocator. Access with {@link Pointer#position(long)}. */ + public CUDAHooksInterface(long size) { super((Pointer)null); allocateArray(size); } + /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ + public CUDAHooksInterface(Pointer p) { super(p); } + private native void allocate(); + private native void allocateArray(long size); + @Override public CUDAHooksInterface position(long position) { + return (CUDAHooksInterface)super.position(position); + } + @Override public CUDAHooksInterface getPointer(long i) { + return new CUDAHooksInterface((Pointer)this).offsetAddress(i); + } + + // This should never actually be implemented, but it is used to + // squelch -Werror=non-virtual-dtor + + // Initialize THCState and, transitively, the CUDA state + public native void initCUDA(); + + public native @Const @ByRef Generator getDefaultCUDAGenerator(@Cast("c10::DeviceIndex") byte device_index/*=-1*/); + public native @Const @ByRef Generator getDefaultCUDAGenerator(); + + public native @ByVal Device getDeviceFromPtr(Pointer arg0); + + public native @Cast("bool") boolean isPinnedPtr(Pointer arg0); + + public native @Cast("bool") boolean hasCUDA(); + + public native @Cast("bool") boolean hasCUDART(); + + public native @Cast("bool") boolean hasMAGMA(); + + public native @Cast("bool") boolean hasCuDNN(); + + public native @Cast("bool") boolean hasCuSOLVER(); + + public native @Cast("bool") boolean hasROCM(); + + public native @Cast("const at::cuda::NVRTC*") @ByRef Pointer nvrtc(); + + public native @Cast("bool") boolean hasPrimaryContext(@Cast("int64_t") long device_index); + + public native @Cast("int64_t") long current_device(); + + public native Allocator getPinnedMemoryAllocator(); + + public native Allocator getCUDADeviceAllocator(); + + public native @Cast("bool") boolean compiledWithCuDNN(); + + public native @Cast("bool") boolean compiledWithMIOpen(); + + public native @Cast("bool") boolean supportsDilatedConvolutionWithCuDNN(); + + public native @Cast("bool") boolean supportsDepthwiseConvolutionWithCuDNN(); + + public native @Cast("bool") boolean supportsBFloat16ConvolutionWithCuDNNv8(); + + public native long versionCuDNN(); + + public native long versionCUDART(); + + public native @StdString BytePointer showConfig(); + + public native double batchnormMinEpsilonCuDNN(); + + public native @Cast("int64_t") long cuFFTGetPlanCacheMaxSize(@Cast("int64_t") long arg0); + + public native void cuFFTSetPlanCacheMaxSize(@Cast("int64_t") long arg0, @Cast("int64_t") long arg1); + + public native @Cast("int64_t") long cuFFTGetPlanCacheSize(@Cast("int64_t") long arg0); + + public native void cuFFTClearPlanCache(@Cast("int64_t") long arg0); + + public native int getNumGPUs(); + + public native void deviceSynchronize(@Cast("int64_t") long arg0); +} diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/_object.java b/pytorch/src/gen/java/org/bytedeco/pytorch/CUevent_st.java similarity index 69% rename from pytorch/src/gen/java/org/bytedeco/pytorch/_object.java rename to pytorch/src/gen/java/org/bytedeco/pytorch/CUevent_st.java index e3979dc962a..d36882ccba1 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/_object.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/CUevent_st.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; @@ -17,9 +19,9 @@ @Opaque @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) -public class _object extends Pointer { +public class CUevent_st extends Pointer { /** Empty constructor. Calls {@code super((Pointer)null)}. */ - public _object() { super((Pointer)null); } + public CUevent_st() { super((Pointer)null); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ - public _object(Pointer p) { super(p); } + public CUevent_st(Pointer p) { super(p); } } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/Call.java b/pytorch/src/gen/java/org/bytedeco/pytorch/Call.java index e9fa828cc13..7588b5aa72a 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/Call.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/Call.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; @@ -34,6 +36,6 @@ public class Call extends Pointer { return new Call((Pointer)this).offsetAddress(i); } - public native @StdString BytePointer fn_name(); public native Call fn_name(BytePointer setter); - public native @ByRef SourceRange caller_range(); public native Call caller_range(SourceRange setter); + public native @StdString @NoOffset BytePointer fn_name(); public native Call fn_name(BytePointer setter); + public native @ByRef @NoOffset SourceRange caller_range(); public native Call caller_range(SourceRange setter); } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/Capsule.java b/pytorch/src/gen/java/org/bytedeco/pytorch/Capsule.java deleted file mode 100644 index 913fe1a7f7a..00000000000 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/Capsule.java +++ /dev/null @@ -1,32 +0,0 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE - -package org.bytedeco.pytorch; - -import org.bytedeco.pytorch.Allocator; -import org.bytedeco.pytorch.Function; -import org.bytedeco.pytorch.Module; -import java.nio.*; -import org.bytedeco.javacpp.*; -import org.bytedeco.javacpp.annotation.*; - -import static org.bytedeco.javacpp.presets.javacpp.*; -import static org.bytedeco.openblas.global.openblas_nolapack.*; -import static org.bytedeco.openblas.global.openblas.*; - -import static org.bytedeco.pytorch.global.torch.*; - - -// Capsule is an internal implementation detail of custom C++ classes. We -// define it as an owning wrapper for -// c10::intrusive_ptr This wrapper is here to serve as -// an abstraction of the type erased custom class object pointer. It also allow -// pybind11 to treat this as a standalone class to register as a separate type -// caster, instead of a custom pointer holder which the pointer holder type -// caster try to "unwrap" it automatically. -@Namespace("c10") @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) -public class Capsule extends Pointer { - static { Loader.load(); } - /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ - public Capsule(Pointer p) { super(p); } - -} diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/CapsuleType.java b/pytorch/src/gen/java/org/bytedeco/pytorch/CapsuleType.java index b48ad3c7ef5..66575eb9008 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/CapsuleType.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/CapsuleType.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/CapsuleTypePtr.java b/pytorch/src/gen/java/org/bytedeco/pytorch/CapsuleTypePtr.java index 3b72c9e9c0d..29a1a9e2b76 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/CapsuleTypePtr.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/CapsuleTypePtr.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/CastValue.java b/pytorch/src/gen/java/org/bytedeco/pytorch/CastValue.java index 3a807f26e15..fa2f7fa27c8 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/CastValue.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/CastValue.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; @@ -27,10 +29,5 @@ public class CastValue extends BuiltinFunction { public CastValue(@ByVal Type.TypePtr type, @ByVal Symbol method) { super((Pointer)null); allocate(type, method); } private native void allocate(@ByVal Type.TypePtr type, @ByVal Symbol method); - public native @SharedPtr @ByVal SugaredValue call( - @Const @ByRef SourceRange loc, - @ByRef GraphFunction m, - @ByVal NamedValueArrayRef args, - @ByVal NamedValueArrayRef kwargs, - @Cast("size_t") long n_binders); + } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/ChunkBatchDataset.java b/pytorch/src/gen/java/org/bytedeco/pytorch/ChunkBatchDataset.java index 1250b25e1d9..eb2ccc28080 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/ChunkBatchDataset.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/ChunkBatchDataset.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/ChunkBatchSharedBatchDataset.java b/pytorch/src/gen/java/org/bytedeco/pytorch/ChunkBatchSharedBatchDataset.java index 84ec21fe62d..893a57c3d5e 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/ChunkBatchSharedBatchDataset.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/ChunkBatchSharedBatchDataset.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/ChunkDataReader.java b/pytorch/src/gen/java/org/bytedeco/pytorch/ChunkDataReader.java index c4bb052062b..59ec42fe86b 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/ChunkDataReader.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/ChunkDataReader.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/ChunkDataset.java b/pytorch/src/gen/java/org/bytedeco/pytorch/ChunkDataset.java index 77e97000851..8a8ccb0a87a 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/ChunkDataset.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/ChunkDataset.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/ChunkDatasetOptions.java b/pytorch/src/gen/java/org/bytedeco/pytorch/ChunkDatasetOptions.java index 5219f0cc922..3ef00de253c 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/ChunkDatasetOptions.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/ChunkDatasetOptions.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/ChunkMapBatchDataset.java b/pytorch/src/gen/java/org/bytedeco/pytorch/ChunkMapBatchDataset.java index 79656bbf1d4..a346bc60dd3 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/ChunkMapBatchDataset.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/ChunkMapBatchDataset.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/ChunkMapDataset.java b/pytorch/src/gen/java/org/bytedeco/pytorch/ChunkMapDataset.java index 338aac1f1a1..864f0fb09c2 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/ChunkMapDataset.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/ChunkMapDataset.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/ChunkRandomDataLoader.java b/pytorch/src/gen/java/org/bytedeco/pytorch/ChunkRandomDataLoader.java index b30ceabdf82..3ae0a154083 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/ChunkRandomDataLoader.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/ChunkRandomDataLoader.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/ChunkRandomDataLoaderBase.java b/pytorch/src/gen/java/org/bytedeco/pytorch/ChunkRandomDataLoaderBase.java index 4df70ececd6..d6f1ad4a879 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/ChunkRandomDataLoaderBase.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/ChunkRandomDataLoaderBase.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; @@ -51,5 +53,4 @@ public class ChunkRandomDataLoaderBase extends Pointer { public native void join(); /** Returns the options with which the DataLoader was configured. */ - public native @Const @ByRef @NoException(true) FullDataLoaderOptions options(); } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/ChunkSharedBatchDataset.java b/pytorch/src/gen/java/org/bytedeco/pytorch/ChunkSharedBatchDataset.java index 1732f312054..a9cef9f3f8b 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/ChunkSharedBatchDataset.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/ChunkSharedBatchDataset.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/ChunkStatefulDataset.java b/pytorch/src/gen/java/org/bytedeco/pytorch/ChunkStatefulDataset.java index b2efa02dedc..145c1c03674 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/ChunkStatefulDataset.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/ChunkStatefulDataset.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/ClassAttribute.java b/pytorch/src/gen/java/org/bytedeco/pytorch/ClassAttribute.java index 3844f6429eb..62a9dbb5662 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/ClassAttribute.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/ClassAttribute.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/ClassDef.java b/pytorch/src/gen/java/org/bytedeco/pytorch/ClassDef.java index 287dcfe0a75..b61ab0bdc53 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/ClassDef.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/ClassDef.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; @@ -19,13 +21,18 @@ @Namespace("torch::jit") @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) public class ClassDef extends TreeView { static { Loader.load(); } + /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ + public ClassDef(Pointer p) { super(p); } - public ClassDef(@Cast("const torch::jit::TreeRef*") @ByRef Pointer tree) { super((Pointer)null); allocate(tree); } - private native void allocate(@Cast("const torch::jit::TreeRef*") @ByRef Pointer tree); + public ClassDef(@Const @ByRef TreeRef tree) { super((Pointer)null); allocate(tree); } + private native void allocate(@Const @ByRef TreeRef tree); public native @ByVal ClassDef withName(@StdString BytePointer new_name); public native @ByVal ClassDef withName(@StdString String new_name); public native @ByVal Ident name(); public native @ByVal ExprMaybe superclass(); - public native @ByVal @Cast("torch::jit::Maybe >*") Pointer properties(); - public native @ByVal @Cast("torch::jit::Maybe >*") Pointer assigns(); + public native @ByVal StmtList body(); + public native @ByVal PropertyListMaybe properties(); + public native @ByVal AssignListMaybe assigns(); + + } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/ClassType.java b/pytorch/src/gen/java/org/bytedeco/pytorch/ClassType.java index 369479af30a..3a400637c06 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/ClassType.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/ClassType.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; @@ -178,7 +180,7 @@ public static class Property extends Pointer { public native @ByVal StringArrayRef constantNames(); - public native @ByVal @Cast("at::ArrayRef*") IValueArrayRef constantValues(); + public native @ByVal IValueArrayRef constantValues(); // [Internal Only] Remove constant from the ClassType // caller is responsible to make sure the modification is safe: @@ -249,9 +251,9 @@ public native void checkForwardHookSchema( // where it is know that there are not assignments to the objects slots // that would invalidate the refinement. // These variants are not registered in the global class table. - public native @SharedPtr @ByVal ClassType refine(@ByVal TypeArrayRef refined_slots); + public native @SharedPtr("c10::ClassType") @ByVal ClassType refine(@ByVal TypeArrayRef refined_slots); public native @Cast("bool") boolean isSubtypeOfExt(@Const @ByRef Type rhs, @Cast("std::ostream*") Pointer why_not); - @MemberGetter public static native @Const @ByRef TypeKind Kind(); + @MemberGetter public static native TypeKind Kind(); } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/ClassTypePropertyOptional.java b/pytorch/src/gen/java/org/bytedeco/pytorch/ClassTypePropertyOptional.java index 04d40dcfb77..af943f3f676 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/ClassTypePropertyOptional.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/ClassTypePropertyOptional.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; @@ -26,6 +28,7 @@ public class ClassTypePropertyOptional extends Pointer { public native @Name("operator =") @ByRef ClassTypePropertyOptional put(@ByRef ClassTypePropertyOptional x); public native boolean has_value(); + public native void reset(); public native @Name("value") @ByRef ClassType.Property get(); @ValueSetter public native ClassTypePropertyOptional put(@ByRef ClassType.Property value); } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/ClassValue.java b/pytorch/src/gen/java/org/bytedeco/pytorch/ClassValue.java index 3cd1f075f7a..7564d9c3b31 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/ClassValue.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/ClassValue.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; @@ -24,28 +26,23 @@ public class ClassValue extends SugaredValue { /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public ClassValue(Pointer p) { super(p); } - public ClassValue(@SharedPtr @ByVal ClassType type) { super((Pointer)null); allocate(type); } - private native void allocate(@SharedPtr @ByVal ClassType type); + public ClassValue(@SharedPtr("c10::ClassType") @ByVal ClassType type) { super((Pointer)null); allocate(type); } + private native void allocate(@SharedPtr("c10::ClassType") @ByVal ClassType type); // Call the type's constructor, as in: // n = Foo(constructor_arg) - public native @SharedPtr @ByVal SugaredValue call( - @Const @ByRef SourceRange loc, - @ByRef GraphFunction m, - @ByVal NamedValueArrayRef args, - @ByVal NamedValueArrayRef kwargs, - @Cast("size_t") long n_binders); + - public native @SharedPtr @ByVal SugaredValue attr( + public native @SharedPtr("torch::jit::SugaredValue") @ByVal SugaredValue attr( @Const @ByRef SourceRange loc, @ByRef GraphFunction m, @StdString BytePointer field); - public native @SharedPtr @ByVal SugaredValue attr( + public native @SharedPtr("torch::jit::SugaredValue") @ByVal SugaredValue attr( @Const @ByRef SourceRange loc, @ByRef GraphFunction m, @StdString String field); public native @StdString BytePointer kind(); - public native @SharedPtr @ByRef ClassType type_(); public native ClassValue type_(ClassType setter); + public native @SharedPtr("c10::ClassType") @ByRef ClassType type_(); public native ClassValue type_(ClassType setter); } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/ClosureValue.java b/pytorch/src/gen/java/org/bytedeco/pytorch/ClosureValue.java index 417c2e806bf..e7d386b9b13 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/ClosureValue.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/ClosureValue.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/Code.java b/pytorch/src/gen/java/org/bytedeco/pytorch/Code.java index 2459d84876d..1350f11bacf 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/Code.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/Code.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; @@ -39,32 +41,32 @@ public class Code extends Pointer { // is directly created by `GraphExecutor` in which case it's likely to contain // `prim::BailOut`s to control the maximum depth of bailout chains public Code( - @Const @SharedPtr @ByRef Graph graph, + @Const @SharedPtr("torch::jit::Graph") @ByRef Graph graph, @StdString BytePointer function_name, @Cast("size_t") long remaining_bailout_depth/*=0*/) { super((Pointer)null); allocate(graph, function_name, remaining_bailout_depth); } private native void allocate( - @Const @SharedPtr @ByRef Graph graph, + @Const @SharedPtr("torch::jit::Graph") @ByRef Graph graph, @StdString BytePointer function_name, @Cast("size_t") long remaining_bailout_depth/*=0*/); public Code( - @Const @SharedPtr @ByRef Graph graph, + @Const @SharedPtr("torch::jit::Graph") @ByRef Graph graph, @StdString BytePointer function_name) { super((Pointer)null); allocate(graph, function_name); } private native void allocate( - @Const @SharedPtr @ByRef Graph graph, + @Const @SharedPtr("torch::jit::Graph") @ByRef Graph graph, @StdString BytePointer function_name); public Code( - @Const @SharedPtr @ByRef Graph graph, + @Const @SharedPtr("torch::jit::Graph") @ByRef Graph graph, @StdString String function_name, @Cast("size_t") long remaining_bailout_depth/*=0*/) { super((Pointer)null); allocate(graph, function_name, remaining_bailout_depth); } private native void allocate( - @Const @SharedPtr @ByRef Graph graph, + @Const @SharedPtr("torch::jit::Graph") @ByRef Graph graph, @StdString String function_name, @Cast("size_t") long remaining_bailout_depth/*=0*/); public Code( - @Const @SharedPtr @ByRef Graph graph, + @Const @SharedPtr("torch::jit::Graph") @ByRef Graph graph, @StdString String function_name) { super((Pointer)null); allocate(graph, function_name); } private native void allocate( - @Const @SharedPtr @ByRef Graph graph, + @Const @SharedPtr("torch::jit::Graph") @ByRef Graph graph, @StdString String function_name); public native @Cast("torch::jit::GraphExecutor**") @StdVector PointerPointer grad_executors(); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/CodeImpl.java b/pytorch/src/gen/java/org/bytedeco/pytorch/CodeImpl.java index 46eacdbba3e..3667c98e779 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/CodeImpl.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/CodeImpl.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/CompilationUnit.java b/pytorch/src/gen/java/org/bytedeco/pytorch/CompilationUnit.java index 94ebc4f5690..4a8bbb3602d 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/CompilationUnit.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/CompilationUnit.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; @@ -48,15 +50,15 @@ public enum FunctionType { Method(0), Hook(1), PreHook(2); // constructor that takes a set of functions to compile using the native // resolver public CompilationUnit(@StdString BytePointer source) { super((Pointer)null); allocate(source); } - private native void allocate(@StdString BytePointer source); + @SharedPtr private native void allocate(@StdString BytePointer source); public CompilationUnit(@StdString String source) { super((Pointer)null); allocate(source); } - private native void allocate(@StdString String source); + @SharedPtr private native void allocate(@StdString String source); public CompilationUnit() { super((Pointer)null); allocate(); } - private native void allocate(); + @SharedPtr private native void allocate(); public native @ByRef @Name("operator =") CompilationUnit put(@ByRef(true) CompilationUnit arg0); public CompilationUnit(@ByRef(true) CompilationUnit arg0) { super((Pointer)null); allocate(arg0); } - private native void allocate(@ByRef(true) CompilationUnit arg0); + @SharedPtr private native void allocate(@ByRef(true) CompilationUnit arg0); @@ -108,31 +110,31 @@ public native void define_hooks( public native @ByVal FunctionVector define( @Const @ByRef QualifiedNameOptional prefix, @StdString BytePointer source, - @Const @SharedPtr @ByRef Resolver resolver, + @Const @SharedPtr("torch::jit::Resolver") @ByRef Resolver resolver, @Const Self self); public native @ByVal FunctionVector define( @Const @ByRef QualifiedNameOptional prefix, @StdString String source, - @Const @SharedPtr @ByRef Resolver resolver, + @Const @SharedPtr("torch::jit::Resolver") @ByRef Resolver resolver, @Const Self self); public native void define_interface( @Const @ByRef QualifiedName qualifiedName, @Const @ByRef ClassDef classDef, - @SharedPtr @ByVal Resolver rcb, + @SharedPtr("torch::jit::Resolver") @ByVal Resolver rcb, @Cast("bool") boolean is_module/*=false*/); public native void define_interface( @Const @ByRef QualifiedName qualifiedName, @Const @ByRef ClassDef classDef, - @SharedPtr @ByVal Resolver rcb); + @SharedPtr("torch::jit::Resolver") @ByVal Resolver rcb); public native Function create_function( @ByVal QualifiedName name, - @SharedPtr @ByVal Graph graph, + @SharedPtr("torch::jit::Graph") @ByVal Graph graph, @Cast("bool") boolean shouldMangle/*=false*/); public native Function create_function( @ByVal QualifiedName name, - @SharedPtr @ByVal Graph graph); + @SharedPtr("torch::jit::Graph") @ByVal Graph graph); /// @@ -159,19 +161,19 @@ public native Function create_function( /** * Register a class as being owned by this compilation unit. */ - public native void register_type(@SharedPtr @ByVal NamedType namedType); + public native void register_type(@SharedPtr NamedType namedType); - public native @SharedPtr @ByVal ClassType get_class(@Const @ByRef QualifiedName name); + public native @SharedPtr("c10::ClassType") @ByVal ClassType get_class(@Const @ByRef QualifiedName name); public native @SharedPtr InterfaceType get_interface(@Const @ByRef QualifiedName name); public native @SharedPtr TupleType get_named_tuple(@Const @ByRef QualifiedName name); - public native @SharedPtr @ByVal NamedType get_type(@Const @ByRef QualifiedName name); + public native @SharedPtr NamedType get_type(@Const @ByRef QualifiedName name); // For testing: clear all Python-defined classes to ensure that unit tests // have isolation. - public native void _clear_python_cu(); + // [Internal Only] Remove method. // Note Used for freezing. diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/CompilationUnitVector.java b/pytorch/src/gen/java/org/bytedeco/pytorch/CompilationUnitVector.java index bdfdb2fa6a6..ff4b8bb1df9 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/CompilationUnitVector.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/CompilationUnitVector.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; @@ -27,6 +29,8 @@ public class CompilationUnitVector extends Pointer { public boolean empty() { return size() == 0; } public native long size(); + public CompilationUnit front() { return get(0); } + public CompilationUnit back() { return get(size() - 1); } @Index(function = "at") public native @ByRef CompilationUnit get(@Cast("size_t") long i); public native @ByVal Iterator begin(); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/CompileTimeEmptyString.java b/pytorch/src/gen/java/org/bytedeco/pytorch/CompileTimeEmptyString.java index 35ab5a72abf..ada53f661d5 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/CompileTimeEmptyString.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/CompileTimeEmptyString.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; @@ -34,5 +36,6 @@ public class CompileTimeEmptyString extends Pointer { return new CompileTimeEmptyString((Pointer)this).offsetAddress(i); } - public native @Const @ByRef @Name("operator const std::string&") @StdString BytePointer asBytePointer(); + public native @Const @ByRef @Name("operator const std::string&") @StdString @Override String toString(); + public native @Const @Name("operator const char*") @Cast("const char*") BytePointer asBytePointer(); } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/CompleteArgumentInfo.java b/pytorch/src/gen/java/org/bytedeco/pytorch/CompleteArgumentInfo.java deleted file mode 100644 index 09154017cf6..00000000000 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/CompleteArgumentInfo.java +++ /dev/null @@ -1,37 +0,0 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE - -package org.bytedeco.pytorch; - -import org.bytedeco.pytorch.Allocator; -import org.bytedeco.pytorch.Function; -import org.bytedeco.pytorch.Module; -import java.nio.*; -import org.bytedeco.javacpp.*; -import org.bytedeco.javacpp.annotation.*; - -import static org.bytedeco.javacpp.presets.javacpp.*; -import static org.bytedeco.openblas.global.openblas_nolapack.*; -import static org.bytedeco.openblas.global.openblas.*; - -import static org.bytedeco.pytorch.global.torch.*; - - -// public view of compressed CompleteArgumentInfo -@Namespace("torch::jit") @NoOffset @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) -public class CompleteArgumentInfo extends Pointer { - static { Loader.load(); } - /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ - public CompleteArgumentInfo(Pointer p) { super(p); } - - public CompleteArgumentInfo(@Const @ByRef CompleteArgumentSpec spec, int i) { super((Pointer)null); allocate(spec, i); } - private native void allocate(@Const @ByRef CompleteArgumentSpec spec, int i); - public native @Cast("bool") boolean isTensor(); - public native ScalarType type(); - public native @Cast("bool") boolean defined(); - public native @Cast("bool") boolean requires_grad(); - public native @ByVal Device device(); - public native int ndimension(); - public native @ByVal @Cast("c10::ArrayRef*") LongArrayRef sizes(); - public native @ByVal @Cast("c10::ArrayRef*") LongArrayRef strides(); - public native @ByVal @Name("operator c10::TypePtr") Type.TypePtr asTypePtr(); -} diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/CompleteArgumentInfoPOD.java b/pytorch/src/gen/java/org/bytedeco/pytorch/CompleteArgumentInfoPOD.java deleted file mode 100644 index 8fb9c1c306b..00000000000 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/CompleteArgumentInfoPOD.java +++ /dev/null @@ -1,58 +0,0 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE - -package org.bytedeco.pytorch; - -import org.bytedeco.pytorch.Allocator; -import org.bytedeco.pytorch.Function; -import org.bytedeco.pytorch.Module; -import java.nio.*; -import org.bytedeco.javacpp.*; -import org.bytedeco.javacpp.annotation.*; - -import static org.bytedeco.javacpp.presets.javacpp.*; -import static org.bytedeco.openblas.global.openblas_nolapack.*; -import static org.bytedeco.openblas.global.openblas.*; - -import static org.bytedeco.pytorch.global.torch.*; - - -// CompleteArgumentSpec represents one particular specialization. -// It is designed so that it can be created, hashed, and compared quickly -// since it is used along the hot-path of the JIT to check if the code -// we have created is valid for the given inputs. - -// COmpleteArgumentInfoPOD is only used internally in CompleteArgumentSpec -// API users should use ArgumentInfo -@Namespace("torch::jit") @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) -public class CompleteArgumentInfoPOD extends Pointer { - static { Loader.load(); } - /** Default native constructor. */ - public CompleteArgumentInfoPOD() { super((Pointer)null); allocate(); } - /** Native array allocator. Access with {@link Pointer#position(long)}. */ - public CompleteArgumentInfoPOD(long size) { super((Pointer)null); allocateArray(size); } - /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ - public CompleteArgumentInfoPOD(Pointer p) { super(p); } - private native void allocate(); - private native void allocateArray(long size); - @Override public CompleteArgumentInfoPOD position(long position) { - return (CompleteArgumentInfoPOD)super.position(position); - } - @Override public CompleteArgumentInfoPOD getPointer(long i) { - return new CompleteArgumentInfoPOD((Pointer)this).offsetAddress(i); - } - - // total size is 64-bit - public native @Cast("unsigned") @NoOffset int is_tensor(); public native CompleteArgumentInfoPOD is_tensor(int setter); // all other fields are invalid if this is false - public native @Cast("unsigned") @NoOffset int type(); public native CompleteArgumentInfoPOD type(int setter); // scalar type - public native @Cast("unsigned") @NoOffset int defined(); public native CompleteArgumentInfoPOD defined(int setter); - public native @Cast("unsigned") @NoOffset int requires_grad(); public native CompleteArgumentInfoPOD requires_grad(int setter); - public native @NoOffset int device(); public native CompleteArgumentInfoPOD device(int setter); - public native @Cast("unsigned") @NoOffset int dev_type(); public native CompleteArgumentInfoPOD dev_type(int setter); - public native @Cast("unsigned") @NoOffset int total_dims(); public native CompleteArgumentInfoPOD total_dims(int setter); // all TensorInfoPODs are in CompleteArgumentSpec's - // tensor_info() array. total_dims is the total number of - // dimensions seen so far in all previous members of - // tensor_info(), including this tensor 2*total_dims - // becomes the offset into the sizes_strides list for the - // _next_ tensor in the tensor_info array for tensor 0, - // the offset is always 0 -} diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/CompleteArgumentSpec.java b/pytorch/src/gen/java/org/bytedeco/pytorch/CompleteArgumentSpec.java deleted file mode 100644 index dd51d7671f0..00000000000 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/CompleteArgumentSpec.java +++ /dev/null @@ -1,36 +0,0 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE - -package org.bytedeco.pytorch; - -import org.bytedeco.pytorch.Allocator; -import org.bytedeco.pytorch.Function; -import org.bytedeco.pytorch.Module; -import java.nio.*; -import org.bytedeco.javacpp.*; -import org.bytedeco.javacpp.annotation.*; - -import static org.bytedeco.javacpp.presets.javacpp.*; -import static org.bytedeco.openblas.global.openblas_nolapack.*; -import static org.bytedeco.openblas.global.openblas.*; - -import static org.bytedeco.pytorch.global.torch.*; - - -@Namespace("torch::jit") @NoOffset @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) -public class CompleteArgumentSpec extends Pointer { - static { Loader.load(); } - /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ - public CompleteArgumentSpec(Pointer p) { super(p); } - - // NOLINTNEXTLINE(cppcoreguidelines-pro-type-member-init) - public CompleteArgumentSpec(@Cast("bool") boolean with_grad, @ByVal @Cast("at::ArrayRef*") IValueArrayRef inputs) { super((Pointer)null); allocate(with_grad, inputs); } - private native void allocate(@Cast("bool") boolean with_grad, @ByVal @Cast("at::ArrayRef*") IValueArrayRef inputs); - - // equality is fast: check ninputs, and then check the raw array data, - // there are no size/stride indirections - public native @Cast("bool") @Name("operator ==") boolean equals(@Const @ByRef CompleteArgumentSpec spec); - public native @Cast("bool") @Name("operator !=") boolean notEquals(@Const @ByRef CompleteArgumentSpec spec); - public native @ByVal CompleteArgumentInfo at(@Cast("size_t") long i); - public native @Cast("size_t") long size(); - public native @Cast("size_t") @Name("hashCode") long _hashCode(); -} diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/ComplexHolder.java b/pytorch/src/gen/java/org/bytedeco/pytorch/ComplexHolder.java deleted file mode 100644 index a58dc087eb4..00000000000 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/ComplexHolder.java +++ /dev/null @@ -1,39 +0,0 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE - -package org.bytedeco.pytorch; - -import org.bytedeco.pytorch.Allocator; -import org.bytedeco.pytorch.Function; -import org.bytedeco.pytorch.Module; -import java.nio.*; -import org.bytedeco.javacpp.*; -import org.bytedeco.javacpp.annotation.*; - -import static org.bytedeco.javacpp.presets.javacpp.*; -import static org.bytedeco.openblas.global.openblas_nolapack.*; -import static org.bytedeco.openblas.global.openblas.*; - -import static org.bytedeco.pytorch.global.torch.*; - -// We need a ComplexHolder because currently the payloads in the Union -// only take 64 bits. Since ComplexDouble takes up 128 bits, and is too big -// to fit in the IValue directly, we indirect complex numbers through an intrusive -// pointer to ComplexHolder (which contains a c10::complex). -@Namespace("c10::ivalue") @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) -public class ComplexHolder extends Pointer { - static { Loader.load(); } - /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ - public ComplexHolder(Pointer p) { super(p); } - /** Native array allocator. Access with {@link Pointer#position(long)}. */ - public ComplexHolder(long size) { super((Pointer)null); allocateArray(size); } - private native void allocateArray(long size); - @Override public ComplexHolder position(long position) { - return (ComplexHolder)super.position(position); - } - @Override public ComplexHolder getPointer(long i) { - return new ComplexHolder((Pointer)this).offsetAddress(i); - } - - public ComplexHolder() { super((Pointer)null); allocate(); } - private native void allocate(); -} diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/ComplexType.java b/pytorch/src/gen/java/org/bytedeco/pytorch/ComplexType.java index 1398393483d..3292927f901 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/ComplexType.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/ComplexType.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; @@ -27,5 +29,5 @@ public class ComplexType extends NumberType { public native @Cast("bool") boolean isSubtypeOfExt(@Const @ByRef Type rhs, @Cast("std::ostream*") Pointer why_not); @MemberGetter public static native TypeKind Kind(); // global singleton - + public static native @ByVal @Name("get") ComplexTypePtr getComplexTypePtr(); } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/ComplexTypePtr.java b/pytorch/src/gen/java/org/bytedeco/pytorch/ComplexTypePtr.java index b00507ad64f..a92bf7f046f 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/ComplexTypePtr.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/ComplexTypePtr.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/Compound.java b/pytorch/src/gen/java/org/bytedeco/pytorch/Compound.java index 92afde8af1e..0ef8c5dbe94 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/Compound.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/Compound.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; @@ -24,13 +26,13 @@ public class Compound extends Tree { public Compound(int kind, @ByVal SourceRange range) { super((Pointer)null); allocate(kind, range); } private native void allocate(int kind, @ByVal SourceRange range); - public Compound(int kind, @Const @ByRef SourceRange range_, @Cast("torch::jit::TreeList*") @ByRef(true) Pointer trees_) { super((Pointer)null); allocate(kind, range_, trees_); } - private native void allocate(int kind, @Const @ByRef SourceRange range_, @Cast("torch::jit::TreeList*") @ByRef(true) Pointer trees_); - public native @Cast("const torch::jit::TreeList*") @ByRef Pointer trees(); - public static native @ByVal @Cast("torch::jit::TreeRef*") Pointer create( + public Compound(int kind, @Const @ByRef SourceRange range_, @Cast("torch::jit::TreeList*") @ByRef(true) SymDimVector trees_) { super((Pointer)null); allocate(kind, range_, trees_); } + private native void allocate(int kind, @Const @ByRef SourceRange range_, @Cast("torch::jit::TreeList*") @ByRef(true) SymDimVector trees_); + public native @Cast("const torch::jit::TreeList*") @ByRef SymDimVector trees(); + public static native @ByVal TreeRef create( int kind, @Const @ByRef SourceRange range_, - @Cast("torch::jit::TreeList*") @ByRef(true) Pointer trees_); + @Cast("torch::jit::TreeList*") @ByRef(true) SymDimVector trees_); public native @Cast("bool") boolean isAtom(); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/ConstExpr.java b/pytorch/src/gen/java/org/bytedeco/pytorch/ConstExpr.java index 9f39f88e00d..d1135a3a9b0 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/ConstExpr.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/ConstExpr.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; @@ -19,14 +21,17 @@ @Name("torch::jit::Const") @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) public class ConstExpr extends Expr { static { Loader.load(); } + /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ + public ConstExpr(Pointer p) { super(p); } - public ConstExpr(@Cast("const torch::jit::TreeRef*") @ByRef Pointer tree) { super((Pointer)null); allocate(tree); } - private native void allocate(@Cast("const torch::jit::TreeRef*") @ByRef Pointer tree); + public ConstExpr(@Const @ByRef TreeRef tree) { super((Pointer)null); allocate(tree); } + private native void allocate(@Const @ByRef TreeRef tree); public native @Cast("bool") boolean isFloatingPoint(); public native @Cast("bool") boolean isIntegral(); public native @Cast("bool") boolean isComplex(); public native @Cast("int64_t") long asIntegral(); public native double asFloatingPoint(); + public native @ByVal DoubleComplex asComplex(); public native @StdString BytePointer text(); public static native @ByVal ConstExpr create(@Const @ByRef SourceRange range, @StdString BytePointer value); public static native @ByVal ConstExpr create(@Const @ByRef SourceRange range, @StdString String value); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/ConstantPad1d.java b/pytorch/src/gen/java/org/bytedeco/pytorch/ConstantPad1d.java deleted file mode 100644 index 30c1beb518d..00000000000 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/ConstantPad1d.java +++ /dev/null @@ -1,34 +0,0 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE - -package org.bytedeco.pytorch; - -import org.bytedeco.pytorch.Allocator; -import org.bytedeco.pytorch.Function; -import org.bytedeco.pytorch.Module; -import java.nio.*; -import org.bytedeco.javacpp.*; -import org.bytedeco.javacpp.annotation.*; - -import static org.bytedeco.javacpp.presets.javacpp.*; -import static org.bytedeco.openblas.global.openblas_nolapack.*; -import static org.bytedeco.openblas.global.openblas.*; - -import static org.bytedeco.pytorch.global.torch.*; - - -/** A {@code ModuleHolder} subclass for {@code ConstantPad1dImpl}. - * See the documentation for {@code ConstantPad1dImpl} class to learn what methods it - * provides, and examples of how to use {@code ConstantPad1d} with - * {@code torch::nn::ConstantPad1dOptions}. See the documentation for {@code ModuleHolder} - * to learn about PyTorch's module storage semantics. */ -@Namespace("torch::nn") @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) -public class ConstantPad1d extends ConstantPad1dImplModuleHolder { - static { Loader.load(); } - - public ConstantPad1d(@ByVal @Cast("std::nullptr_t*") PointerPointer arg0) { super((Pointer)null); allocate(arg0); } - private native void allocate(@ByVal @Cast("std::nullptr_t*") PointerPointer arg0); public ConstantPad1d(@SharedPtr @Cast({"", "std::shared_ptr"}) ConstantPad1dImpl module) { super((Pointer)null); allocate(module); } - private native void allocate(@SharedPtr @Cast({"", "std::shared_ptr"}) ConstantPad1dImpl module); - /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ - public ConstantPad1d(Pointer p) { super(p); } - - } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/ConstantPad1dImpl.java b/pytorch/src/gen/java/org/bytedeco/pytorch/ConstantPad1dImpl.java index 5a6005f827c..def707bac9b 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/ConstantPad1dImpl.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/ConstantPad1dImpl.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; @@ -35,9 +37,9 @@ public class ConstantPad1dImpl extends ConstantPad1dImplBase { public ConstantPad1dImpl(@ByVal @Cast("torch::ExpandingArray<1*2>*") LongPointer padding, double value) { super((Pointer)null); allocate(padding, value); } - @NoDeallocator private native void allocate(@ByVal @Cast("torch::ExpandingArray<1*2>*") LongPointer padding, double value); + private native void allocate(@ByVal @Cast("torch::ExpandingArray<1*2>*") LongPointer padding, double value); public ConstantPad1dImpl(@Const @ByRef ConstantPad1dOptions options_) { super((Pointer)null); allocate(options_); } - @NoDeallocator private native void allocate(@Const @ByRef ConstantPad1dOptions options_); + private native void allocate(@Const @ByRef ConstantPad1dOptions options_); /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public ConstantPad1dImpl(Pointer p) { super(p); } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/ConstantPad1dImplBase.java b/pytorch/src/gen/java/org/bytedeco/pytorch/ConstantPad1dImplBase.java index 2cc9a13d7d4..bf512337f1b 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/ConstantPad1dImplBase.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/ConstantPad1dImplBase.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; @@ -26,9 +28,9 @@ public class ConstantPad1dImplBase extends ConstantPad1dImplCloneable { public ConstantPad1dImplBase(Pointer p) { super(p); } public ConstantPad1dImplBase(@ByVal @Cast("torch::ExpandingArray<1*2>*") LongPointer padding, double value) { super((Pointer)null); allocate(padding, value); } - @NoDeallocator private native void allocate(@ByVal @Cast("torch::ExpandingArray<1*2>*") LongPointer padding, double value); + private native void allocate(@ByVal @Cast("torch::ExpandingArray<1*2>*") LongPointer padding, double value); public ConstantPad1dImplBase(@Const @ByRef ConstantPad1dOptions options_) { super((Pointer)null); allocate(options_); } - @NoDeallocator private native void allocate(@Const @ByRef ConstantPad1dOptions options_); + private native void allocate(@Const @ByRef ConstantPad1dOptions options_); public native void reset(); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/ConstantPad1dImplCloneable.java b/pytorch/src/gen/java/org/bytedeco/pytorch/ConstantPad1dImplCloneable.java index 58ff3eb103c..77c533320e3 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/ConstantPad1dImplCloneable.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/ConstantPad1dImplCloneable.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; @@ -20,18 +22,18 @@ public class ConstantPad1dImplCloneable extends Module { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public ConstantPad1dImplCloneable(Pointer p) { super(p); } + @Override public Module asModule() { return asModule(this); } + @Namespace public static native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast>") Module asModule(@SharedPtr ConstantPad1dImplCloneable pointer); /** {@code reset()} must perform initialization of all members with reference * semantics, most importantly parameters, buffers and submodules. */ public native void reset(); - @Override public Module asModule() { return asModule(this); } - @Namespace public static native @Name("static_cast") Module asModule(ConstantPad1dImplCloneable module); /** Performs a recursive "deep copy" of the {@code Module}, such that all parameters * and submodules in the cloned module are different from those in the * original module. */ - public native @SharedPtr Module clone( - @Const @ByRef(nullValue = "c10::optional(c10::nullopt)") DeviceOptional device); - public native @SharedPtr Module clone(); + public native @SharedPtr("torch::nn::Module") @ByVal Module clone( + @Const @ByRef(nullValue = "c10::optional(c10::nullopt)") DeviceOptional device); + public native @SharedPtr("torch::nn::Module") @ByVal Module clone(); } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/ConstantPad1dImplModuleHolder.java b/pytorch/src/gen/java/org/bytedeco/pytorch/ConstantPad1dImplModuleHolder.java deleted file mode 100644 index 426e9038983..00000000000 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/ConstantPad1dImplModuleHolder.java +++ /dev/null @@ -1,79 +0,0 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE - -package org.bytedeco.pytorch; - -import org.bytedeco.pytorch.Allocator; -import org.bytedeco.pytorch.Function; -import org.bytedeco.pytorch.Module; -import java.nio.*; -import org.bytedeco.javacpp.*; -import org.bytedeco.javacpp.annotation.*; - -import static org.bytedeco.javacpp.presets.javacpp.*; -import static org.bytedeco.openblas.global.openblas_nolapack.*; -import static org.bytedeco.openblas.global.openblas.*; - -import static org.bytedeco.pytorch.global.torch.*; - -@Name("torch::nn::ModuleHolder") @NoOffset @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) -public class ConstantPad1dImplModuleHolder extends Pointer { - static { Loader.load(); } - /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ - public ConstantPad1dImplModuleHolder(Pointer p) { super(p); } - - - /// - - /** Default constructs the contained module if if has a default constructor, - * else produces a static error. - * - * NOTE: This uses the behavior of template - * classes in C++ that constructors (or any methods) are only compiled when - * actually used. */ - - - /** Constructs the {@code ModuleHolder} with an empty contained value. Access to - * the underlying module is not permitted and will throw an exception, until - * a value is assigned. */ - /* implicit */ public ConstantPad1dImplModuleHolder(@ByVal @Cast("std::nullptr_t*") PointerPointer arg0) { super((Pointer)null); allocate(arg0); } -private native void allocate(@ByVal @Cast("std::nullptr_t*") PointerPointer arg0); - - /** Constructs the {@code ModuleHolder} with a contained module, forwarding all - * arguments to its constructor. */ - - /** Constructs the {@code ModuleHolder} from a pointer to the contained type. - * Example: {@code Linear(std::make_shared(...))}. */ - /* implicit */ public ConstantPad1dImplModuleHolder(@SharedPtr @Cast({"", "std::shared_ptr"}) ConstantPad1dImpl module) { super((Pointer)null); allocate(module); } -private native void allocate(@SharedPtr @Cast({"", "std::shared_ptr"}) ConstantPad1dImpl module); - - /** Returns true if the {@code ModuleHolder} contains a module, or false if it is - * {@code nullptr}. */ - public native @Cast("bool") @Name("operator bool") @NoException(true) boolean asBoolean(); - - /** Forwards to the contained module. */ - public native @Name("operator ->") ConstantPad1dImpl access(); - - /** Forwards to the contained module. */ - - /** Returns a reference to the contained module. */ - public native @ByRef @Name("operator *") ConstantPad1dImpl multiply(); - - /** Returns a const reference to the contained module. */ - - /** Returns a shared pointer to the underlying module. */ - public native @SharedPtr @Cast({"", "std::shared_ptr"}) ConstantPad1dImpl ptr(); - - /** Returns a pointer to the underlying module. */ - public native ConstantPad1dImpl get(); - - /** Returns a const pointer to the underlying module. */ - - /** Calls the {@code forward()} method of the contained module. */ - - /** Forwards to the subscript operator of the contained module. - * NOTE: std::forward is qualified to prevent VS2017 emitting - * error C2872: 'std': ambiguous symbol */ - - /** Returns true if the {@code ModuleHolder} does not contain a module. */ - public native @Cast("bool") @NoException(true) boolean is_empty(); -} diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/ConstantPad1dOptions.java b/pytorch/src/gen/java/org/bytedeco/pytorch/ConstantPad1dOptions.java index 7635e0b0493..fb76165c453 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/ConstantPad1dOptions.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/ConstantPad1dOptions.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/ConstantPad2d.java b/pytorch/src/gen/java/org/bytedeco/pytorch/ConstantPad2d.java deleted file mode 100644 index 73b367357c1..00000000000 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/ConstantPad2d.java +++ /dev/null @@ -1,34 +0,0 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE - -package org.bytedeco.pytorch; - -import org.bytedeco.pytorch.Allocator; -import org.bytedeco.pytorch.Function; -import org.bytedeco.pytorch.Module; -import java.nio.*; -import org.bytedeco.javacpp.*; -import org.bytedeco.javacpp.annotation.*; - -import static org.bytedeco.javacpp.presets.javacpp.*; -import static org.bytedeco.openblas.global.openblas_nolapack.*; -import static org.bytedeco.openblas.global.openblas.*; - -import static org.bytedeco.pytorch.global.torch.*; - - -/** A {@code ModuleHolder} subclass for {@code ConstantPad2dImpl}. - * See the documentation for {@code ConstantPad2dImpl} class to learn what methods it - * provides, and examples of how to use {@code ConstantPad2d} with - * {@code torch::nn::ConstantPad2dOptions}. See the documentation for {@code ModuleHolder} - * to learn about PyTorch's module storage semantics. */ -@Namespace("torch::nn") @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) -public class ConstantPad2d extends ConstantPad2dImplModuleHolder { - static { Loader.load(); } - - public ConstantPad2d(@ByVal @Cast("std::nullptr_t*") PointerPointer arg0) { super((Pointer)null); allocate(arg0); } - private native void allocate(@ByVal @Cast("std::nullptr_t*") PointerPointer arg0); public ConstantPad2d(@SharedPtr @Cast({"", "std::shared_ptr"}) ConstantPad2dImpl module) { super((Pointer)null); allocate(module); } - private native void allocate(@SharedPtr @Cast({"", "std::shared_ptr"}) ConstantPad2dImpl module); - /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ - public ConstantPad2d(Pointer p) { super(p); } - - } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/ConstantPad2dImpl.java b/pytorch/src/gen/java/org/bytedeco/pytorch/ConstantPad2dImpl.java index a3565c1b4de..47c6f1939c4 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/ConstantPad2dImpl.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/ConstantPad2dImpl.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; @@ -35,9 +37,9 @@ public class ConstantPad2dImpl extends ConstantPad2dImplBase { public ConstantPad2dImpl(@ByVal @Cast("torch::ExpandingArray<2*2>*") LongPointer padding, double value) { super((Pointer)null); allocate(padding, value); } - @NoDeallocator private native void allocate(@ByVal @Cast("torch::ExpandingArray<2*2>*") LongPointer padding, double value); + private native void allocate(@ByVal @Cast("torch::ExpandingArray<2*2>*") LongPointer padding, double value); public ConstantPad2dImpl(@Const @ByRef ConstantPad2dOptions options_) { super((Pointer)null); allocate(options_); } - @NoDeallocator private native void allocate(@Const @ByRef ConstantPad2dOptions options_); + private native void allocate(@Const @ByRef ConstantPad2dOptions options_); /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public ConstantPad2dImpl(Pointer p) { super(p); } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/ConstantPad2dImplBase.java b/pytorch/src/gen/java/org/bytedeco/pytorch/ConstantPad2dImplBase.java index c04155ac134..3e01e544152 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/ConstantPad2dImplBase.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/ConstantPad2dImplBase.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; @@ -22,9 +24,9 @@ public class ConstantPad2dImplBase extends ConstantPad2dImplCloneable { public ConstantPad2dImplBase(Pointer p) { super(p); } public ConstantPad2dImplBase(@ByVal @Cast("torch::ExpandingArray<2*2>*") LongPointer padding, double value) { super((Pointer)null); allocate(padding, value); } - @NoDeallocator private native void allocate(@ByVal @Cast("torch::ExpandingArray<2*2>*") LongPointer padding, double value); + private native void allocate(@ByVal @Cast("torch::ExpandingArray<2*2>*") LongPointer padding, double value); public ConstantPad2dImplBase(@Const @ByRef ConstantPad2dOptions options_) { super((Pointer)null); allocate(options_); } - @NoDeallocator private native void allocate(@Const @ByRef ConstantPad2dOptions options_); + private native void allocate(@Const @ByRef ConstantPad2dOptions options_); public native void reset(); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/ConstantPad2dImplCloneable.java b/pytorch/src/gen/java/org/bytedeco/pytorch/ConstantPad2dImplCloneable.java index 8994295a9fa..a1c7bdb5453 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/ConstantPad2dImplCloneable.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/ConstantPad2dImplCloneable.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; @@ -20,18 +22,18 @@ public class ConstantPad2dImplCloneable extends Module { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public ConstantPad2dImplCloneable(Pointer p) { super(p); } + @Override public Module asModule() { return asModule(this); } + @Namespace public static native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast>") Module asModule(@SharedPtr ConstantPad2dImplCloneable pointer); /** {@code reset()} must perform initialization of all members with reference * semantics, most importantly parameters, buffers and submodules. */ public native void reset(); - @Override public Module asModule() { return asModule(this); } - @Namespace public static native @Name("static_cast") Module asModule(ConstantPad2dImplCloneable module); /** Performs a recursive "deep copy" of the {@code Module}, such that all parameters * and submodules in the cloned module are different from those in the * original module. */ - public native @SharedPtr Module clone( - @Const @ByRef(nullValue = "c10::optional(c10::nullopt)") DeviceOptional device); - public native @SharedPtr Module clone(); + public native @SharedPtr("torch::nn::Module") @ByVal Module clone( + @Const @ByRef(nullValue = "c10::optional(c10::nullopt)") DeviceOptional device); + public native @SharedPtr("torch::nn::Module") @ByVal Module clone(); } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/ConstantPad2dImplModuleHolder.java b/pytorch/src/gen/java/org/bytedeco/pytorch/ConstantPad2dImplModuleHolder.java deleted file mode 100644 index cabc68a4498..00000000000 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/ConstantPad2dImplModuleHolder.java +++ /dev/null @@ -1,79 +0,0 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE - -package org.bytedeco.pytorch; - -import org.bytedeco.pytorch.Allocator; -import org.bytedeco.pytorch.Function; -import org.bytedeco.pytorch.Module; -import java.nio.*; -import org.bytedeco.javacpp.*; -import org.bytedeco.javacpp.annotation.*; - -import static org.bytedeco.javacpp.presets.javacpp.*; -import static org.bytedeco.openblas.global.openblas_nolapack.*; -import static org.bytedeco.openblas.global.openblas.*; - -import static org.bytedeco.pytorch.global.torch.*; - -@Name("torch::nn::ModuleHolder") @NoOffset @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) -public class ConstantPad2dImplModuleHolder extends Pointer { - static { Loader.load(); } - /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ - public ConstantPad2dImplModuleHolder(Pointer p) { super(p); } - - - /// - - /** Default constructs the contained module if if has a default constructor, - * else produces a static error. - * - * NOTE: This uses the behavior of template - * classes in C++ that constructors (or any methods) are only compiled when - * actually used. */ - - - /** Constructs the {@code ModuleHolder} with an empty contained value. Access to - * the underlying module is not permitted and will throw an exception, until - * a value is assigned. */ - /* implicit */ public ConstantPad2dImplModuleHolder(@ByVal @Cast("std::nullptr_t*") PointerPointer arg0) { super((Pointer)null); allocate(arg0); } -private native void allocate(@ByVal @Cast("std::nullptr_t*") PointerPointer arg0); - - /** Constructs the {@code ModuleHolder} with a contained module, forwarding all - * arguments to its constructor. */ - - /** Constructs the {@code ModuleHolder} from a pointer to the contained type. - * Example: {@code Linear(std::make_shared(...))}. */ - /* implicit */ public ConstantPad2dImplModuleHolder(@SharedPtr @Cast({"", "std::shared_ptr"}) ConstantPad2dImpl module) { super((Pointer)null); allocate(module); } -private native void allocate(@SharedPtr @Cast({"", "std::shared_ptr"}) ConstantPad2dImpl module); - - /** Returns true if the {@code ModuleHolder} contains a module, or false if it is - * {@code nullptr}. */ - public native @Cast("bool") @Name("operator bool") @NoException(true) boolean asBoolean(); - - /** Forwards to the contained module. */ - public native @Name("operator ->") ConstantPad2dImpl access(); - - /** Forwards to the contained module. */ - - /** Returns a reference to the contained module. */ - public native @ByRef @Name("operator *") ConstantPad2dImpl multiply(); - - /** Returns a const reference to the contained module. */ - - /** Returns a shared pointer to the underlying module. */ - public native @SharedPtr @Cast({"", "std::shared_ptr"}) ConstantPad2dImpl ptr(); - - /** Returns a pointer to the underlying module. */ - public native ConstantPad2dImpl get(); - - /** Returns a const pointer to the underlying module. */ - - /** Calls the {@code forward()} method of the contained module. */ - - /** Forwards to the subscript operator of the contained module. - * NOTE: std::forward is qualified to prevent VS2017 emitting - * error C2872: 'std': ambiguous symbol */ - - /** Returns true if the {@code ModuleHolder} does not contain a module. */ - public native @Cast("bool") @NoException(true) boolean is_empty(); -} diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/ConstantPad2dOptions.java b/pytorch/src/gen/java/org/bytedeco/pytorch/ConstantPad2dOptions.java index 1eccb33452b..ea01c8825fe 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/ConstantPad2dOptions.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/ConstantPad2dOptions.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/ConstantPad3d.java b/pytorch/src/gen/java/org/bytedeco/pytorch/ConstantPad3d.java deleted file mode 100644 index 849ef5a76bd..00000000000 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/ConstantPad3d.java +++ /dev/null @@ -1,34 +0,0 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE - -package org.bytedeco.pytorch; - -import org.bytedeco.pytorch.Allocator; -import org.bytedeco.pytorch.Function; -import org.bytedeco.pytorch.Module; -import java.nio.*; -import org.bytedeco.javacpp.*; -import org.bytedeco.javacpp.annotation.*; - -import static org.bytedeco.javacpp.presets.javacpp.*; -import static org.bytedeco.openblas.global.openblas_nolapack.*; -import static org.bytedeco.openblas.global.openblas.*; - -import static org.bytedeco.pytorch.global.torch.*; - - -/** A {@code ModuleHolder} subclass for {@code ConstantPad3dImpl}. - * See the documentation for {@code ConstantPad3dImpl} class to learn what methods it - * provides, and examples of how to use {@code ConstantPad3d} with - * {@code torch::nn::ConstantPad3dOptions}. See the documentation for {@code ModuleHolder} - * to learn about PyTorch's module storage semantics. */ -@Namespace("torch::nn") @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) -public class ConstantPad3d extends ConstantPad3dImplModuleHolder { - static { Loader.load(); } - - public ConstantPad3d(@ByVal @Cast("std::nullptr_t*") PointerPointer arg0) { super((Pointer)null); allocate(arg0); } - private native void allocate(@ByVal @Cast("std::nullptr_t*") PointerPointer arg0); public ConstantPad3d(@SharedPtr @Cast({"", "std::shared_ptr"}) ConstantPad3dImpl module) { super((Pointer)null); allocate(module); } - private native void allocate(@SharedPtr @Cast({"", "std::shared_ptr"}) ConstantPad3dImpl module); - /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ - public ConstantPad3d(Pointer p) { super(p); } - - } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/ConstantPad3dImpl.java b/pytorch/src/gen/java/org/bytedeco/pytorch/ConstantPad3dImpl.java index fc678da3f46..65b166530e7 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/ConstantPad3dImpl.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/ConstantPad3dImpl.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; @@ -35,9 +37,9 @@ public class ConstantPad3dImpl extends ConstantPad3dImplBase { public ConstantPad3dImpl(@ByVal @Cast("torch::ExpandingArray<3*2>*") LongPointer padding, double value) { super((Pointer)null); allocate(padding, value); } - @NoDeallocator private native void allocate(@ByVal @Cast("torch::ExpandingArray<3*2>*") LongPointer padding, double value); + private native void allocate(@ByVal @Cast("torch::ExpandingArray<3*2>*") LongPointer padding, double value); public ConstantPad3dImpl(@Const @ByRef ConstantPad3dOptions options_) { super((Pointer)null); allocate(options_); } - @NoDeallocator private native void allocate(@Const @ByRef ConstantPad3dOptions options_); + private native void allocate(@Const @ByRef ConstantPad3dOptions options_); /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public ConstantPad3dImpl(Pointer p) { super(p); } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/ConstantPad3dImplBase.java b/pytorch/src/gen/java/org/bytedeco/pytorch/ConstantPad3dImplBase.java index 8a396fc6fbc..4dccffffbec 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/ConstantPad3dImplBase.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/ConstantPad3dImplBase.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; @@ -22,9 +24,9 @@ public class ConstantPad3dImplBase extends ConstantPad3dImplCloneable { public ConstantPad3dImplBase(Pointer p) { super(p); } public ConstantPad3dImplBase(@ByVal @Cast("torch::ExpandingArray<3*2>*") LongPointer padding, double value) { super((Pointer)null); allocate(padding, value); } - @NoDeallocator private native void allocate(@ByVal @Cast("torch::ExpandingArray<3*2>*") LongPointer padding, double value); + private native void allocate(@ByVal @Cast("torch::ExpandingArray<3*2>*") LongPointer padding, double value); public ConstantPad3dImplBase(@Const @ByRef ConstantPad3dOptions options_) { super((Pointer)null); allocate(options_); } - @NoDeallocator private native void allocate(@Const @ByRef ConstantPad3dOptions options_); + private native void allocate(@Const @ByRef ConstantPad3dOptions options_); public native void reset(); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/ConstantPad3dImplCloneable.java b/pytorch/src/gen/java/org/bytedeco/pytorch/ConstantPad3dImplCloneable.java index 858127b99d0..bdf421014ad 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/ConstantPad3dImplCloneable.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/ConstantPad3dImplCloneable.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; @@ -20,18 +22,18 @@ public class ConstantPad3dImplCloneable extends Module { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public ConstantPad3dImplCloneable(Pointer p) { super(p); } + @Override public Module asModule() { return asModule(this); } + @Namespace public static native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast>") Module asModule(@SharedPtr ConstantPad3dImplCloneable pointer); /** {@code reset()} must perform initialization of all members with reference * semantics, most importantly parameters, buffers and submodules. */ public native void reset(); - @Override public Module asModule() { return asModule(this); } - @Namespace public static native @Name("static_cast") Module asModule(ConstantPad3dImplCloneable module); /** Performs a recursive "deep copy" of the {@code Module}, such that all parameters * and submodules in the cloned module are different from those in the * original module. */ - public native @SharedPtr Module clone( - @Const @ByRef(nullValue = "c10::optional(c10::nullopt)") DeviceOptional device); - public native @SharedPtr Module clone(); + public native @SharedPtr("torch::nn::Module") @ByVal Module clone( + @Const @ByRef(nullValue = "c10::optional(c10::nullopt)") DeviceOptional device); + public native @SharedPtr("torch::nn::Module") @ByVal Module clone(); } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/ConstantPad3dImplModuleHolder.java b/pytorch/src/gen/java/org/bytedeco/pytorch/ConstantPad3dImplModuleHolder.java deleted file mode 100644 index a7056fdf1c3..00000000000 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/ConstantPad3dImplModuleHolder.java +++ /dev/null @@ -1,79 +0,0 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE - -package org.bytedeco.pytorch; - -import org.bytedeco.pytorch.Allocator; -import org.bytedeco.pytorch.Function; -import org.bytedeco.pytorch.Module; -import java.nio.*; -import org.bytedeco.javacpp.*; -import org.bytedeco.javacpp.annotation.*; - -import static org.bytedeco.javacpp.presets.javacpp.*; -import static org.bytedeco.openblas.global.openblas_nolapack.*; -import static org.bytedeco.openblas.global.openblas.*; - -import static org.bytedeco.pytorch.global.torch.*; - -@Name("torch::nn::ModuleHolder") @NoOffset @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) -public class ConstantPad3dImplModuleHolder extends Pointer { - static { Loader.load(); } - /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ - public ConstantPad3dImplModuleHolder(Pointer p) { super(p); } - - - /// - - /** Default constructs the contained module if if has a default constructor, - * else produces a static error. - * - * NOTE: This uses the behavior of template - * classes in C++ that constructors (or any methods) are only compiled when - * actually used. */ - - - /** Constructs the {@code ModuleHolder} with an empty contained value. Access to - * the underlying module is not permitted and will throw an exception, until - * a value is assigned. */ - /* implicit */ public ConstantPad3dImplModuleHolder(@ByVal @Cast("std::nullptr_t*") PointerPointer arg0) { super((Pointer)null); allocate(arg0); } -private native void allocate(@ByVal @Cast("std::nullptr_t*") PointerPointer arg0); - - /** Constructs the {@code ModuleHolder} with a contained module, forwarding all - * arguments to its constructor. */ - - /** Constructs the {@code ModuleHolder} from a pointer to the contained type. - * Example: {@code Linear(std::make_shared(...))}. */ - /* implicit */ public ConstantPad3dImplModuleHolder(@SharedPtr @Cast({"", "std::shared_ptr"}) ConstantPad3dImpl module) { super((Pointer)null); allocate(module); } -private native void allocate(@SharedPtr @Cast({"", "std::shared_ptr"}) ConstantPad3dImpl module); - - /** Returns true if the {@code ModuleHolder} contains a module, or false if it is - * {@code nullptr}. */ - public native @Cast("bool") @Name("operator bool") @NoException(true) boolean asBoolean(); - - /** Forwards to the contained module. */ - public native @Name("operator ->") ConstantPad3dImpl access(); - - /** Forwards to the contained module. */ - - /** Returns a reference to the contained module. */ - public native @ByRef @Name("operator *") ConstantPad3dImpl multiply(); - - /** Returns a const reference to the contained module. */ - - /** Returns a shared pointer to the underlying module. */ - public native @SharedPtr @Cast({"", "std::shared_ptr"}) ConstantPad3dImpl ptr(); - - /** Returns a pointer to the underlying module. */ - public native ConstantPad3dImpl get(); - - /** Returns a const pointer to the underlying module. */ - - /** Calls the {@code forward()} method of the contained module. */ - - /** Forwards to the subscript operator of the contained module. - * NOTE: std::forward is qualified to prevent VS2017 emitting - * error C2872: 'std': ambiguous symbol */ - - /** Returns true if the {@code ModuleHolder} does not contain a module. */ - public native @Cast("bool") @NoException(true) boolean is_empty(); -} diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/ConstantPad3dOptions.java b/pytorch/src/gen/java/org/bytedeco/pytorch/ConstantPad3dOptions.java index e5dae62ebd9..1c575867687 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/ConstantPad3dOptions.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/ConstantPad3dOptions.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/ConstantString.java b/pytorch/src/gen/java/org/bytedeco/pytorch/ConstantString.java index 3055e3882b1..20383cafdc0 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/ConstantString.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/ConstantString.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; @@ -15,10 +17,25 @@ import static org.bytedeco.pytorch.global.torch.*; -@Namespace("c10::ivalue") @Opaque @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) + +// string +@Namespace("c10::ivalue") @NoOffset @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) public class ConstantString extends Pointer { - /** Empty constructor. Calls {@code super((Pointer)null)}. */ - public ConstantString() { super((Pointer)null); } - /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ - public ConstantString(Pointer p) { super(p); } + static { Loader.load(); } + + public ConstantString(@StdString BytePointer str) { super((Pointer)null); allocate(str); } + private native void allocate(@StdString BytePointer str); + public ConstantString(@StdString String str) { super((Pointer)null); allocate(str); } + private native void allocate(@StdString String str); + public ConstantString(@ByVal @Cast("c10::string_view*") Pointer str) { super((Pointer)null); allocate(str); } + private native void allocate(@ByVal @Cast("c10::string_view*") Pointer str); + public static native @ByVal ConstantStringPtr create(@StdString BytePointer str_); + public static native @ByVal ConstantStringPtr create(@StdString String str_); + public static native @ByVal ConstantStringPtr create(@ByVal @Cast("c10::string_view*") Pointer str_); + + public native @StdString BytePointer string(); + public native @ByVal @Cast("c10::string_view*") Pointer string_view(); + + public native @Const @ByRef @Name("operator const std::string&") @StdString @Override String toString(); + } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/ConstantStringPtr.java b/pytorch/src/gen/java/org/bytedeco/pytorch/ConstantStringPtr.java new file mode 100644 index 00000000000..ce088869776 --- /dev/null +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/ConstantStringPtr.java @@ -0,0 +1,150 @@ +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE + +package org.bytedeco.pytorch; + +import org.bytedeco.pytorch.Allocator; +import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; +import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; +import java.nio.*; +import org.bytedeco.javacpp.*; +import org.bytedeco.javacpp.annotation.*; + +import static org.bytedeco.javacpp.presets.javacpp.*; +import static org.bytedeco.openblas.global.openblas_nolapack.*; +import static org.bytedeco.openblas.global.openblas.*; + +import static org.bytedeco.pytorch.global.torch.*; + + +@Name("c10::intrusive_ptr") @NoOffset @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) +public class ConstantStringPtr extends Pointer { + static { Loader.load(); } + /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ + public ConstantStringPtr(Pointer p) { super(p); } + /** Native array allocator. Access with {@link Pointer#position(long)}. */ + public ConstantStringPtr(long size) { super((Pointer)null); allocateArray(size); } + private native void allocateArray(long size); + @Override public ConstantStringPtr position(long position) { + return (ConstantStringPtr)super.position(position); + } + @Override public ConstantStringPtr getPointer(long i) { + return new ConstantStringPtr((Pointer)this).offsetAddress(i); + } + + + public ConstantStringPtr() { super((Pointer)null); allocate(); } + @NoException(true) private native void allocate(); + + public ConstantStringPtr(@ByVal @Cast("std::nullptr_t*") PointerPointer arg0) { super((Pointer)null); allocate(arg0); } + @NoException(true) private native void allocate(@ByVal @Cast("std::nullptr_t*") PointerPointer arg0); + + // This constructor will not increase the ref counter for you. + // We use the tagged dispatch mechanism to explicitly mark this constructor + // to not increase the refcount + public ConstantStringPtr(ConstantString target, @ByVal DontIncreaseRefcount arg1) { super((Pointer)null); allocate(target, arg1); } + @NoException(true) private native void allocate(ConstantString target, @ByVal DontIncreaseRefcount arg1); + + + + public ConstantStringPtr(@ByRef(true) ConstantStringPtr rhs) { super((Pointer)null); allocate(rhs); } + @NoException(true) private native void allocate(@ByRef(true) ConstantStringPtr rhs); + + public native @ByRef @Name("operator =") @NoException(true) ConstantStringPtr put(@ByRef(true) ConstantStringPtr rhs); + + public native @NoException(true) ConstantString get(); + + public native @ByRef @Name("operator *") @NoException(true) ConstantString multiply(); + + public native @Name("operator ->") @NoException(true) ConstantString access(); + + public native @Cast("bool") @Name("operator bool") @NoException(true) boolean asBoolean(); + + public native @NoException(true) void reset(); + + public native @NoException(true) void swap(@ByRef ConstantStringPtr rhs); + + // We do a lot of null-pointer checks in our code, good to have this be cheap. + public native @Cast("bool") @NoException(true) boolean defined(); + + public native @Cast("size_t") @NoException(true) long use_count(); + + public native @Cast("size_t") @NoException(true) long weak_use_count(); + + public native @Cast("bool") @NoException(true) boolean unique(); + + /** + * Returns an owning (!) pointer to the underlying object and makes the + * intrusive_ptr instance invalid. That means the refcount is not decreased. + * You *must* put the returned pointer back into a intrusive_ptr using + * intrusive_ptr::reclaim(ptr) to properly destruct it. + * This is helpful for C APIs. + */ + public native @NoException(true) ConstantString release(); + + /** + * Takes an owning pointer to TTarget* and creates an intrusive_ptr that takes + * over ownership. That means the refcount is not increased. + * This is the counter-part to intrusive_ptr::release() and the pointer + * passed in *must* have been created using intrusive_ptr::release(). + */ + public static native @ByVal ConstantStringPtr reclaim(ConstantString owning_ptr); + + /** + * Takes an owning pointer to TTarget* and creates an intrusive_ptr + * representing a new reference, i.e. the raw pointer retains + * ownership. + */ + public static native @ByVal ConstantStringPtr reclaim_copy(ConstantString owning_ptr); + + /** + * Allocate a heap object with args and wrap it inside a intrusive_ptr and + * incref. This is a helper function to let make_intrusive() access private + * intrusive_ptr constructors. + */ + + /** + * Turn a new instance of TTarget (e.g., literally allocated + * using new TTarget(...) into an intrusive_ptr. If possible, + * use intrusive_ptr::make instead which statically guarantees + * that the allocation was done properly. + * + * At the moment, the only reason this method exists is because + * pybind11 holder types expect to be able to allocate in + * this way (because pybind11 handles the new allocation itself). + */ + public static native @ByVal ConstantStringPtr unsafe_steal_from_new(ConstantString raw_ptr); + + /** + * Turn an instance of TTarget that should not be reference counted + * (e.g., allocated into an arena with placement new) into an + * intrusive_ptr. This is gratuitously unsafe and should only be + * used if you can guarantee that the pointer will not escape and be + * refcounted as normal. + * + * {@code expected_decrefs} is a debugging parameter: it indicates the + * number of strong owners the intrusive_ptr_target in question is + * expected to get. In most use cases, this will likely be 1. + * + * The reason this method exists is for manually sharing + * StorageImpls across Tensors in the static runtime. It needs + * access to private intrusive_ptr members so that the refcounts can + * be initialized to custom values. + */ + public static native @ByVal ConstantStringPtr unsafe_adapt_non_heap_allocated( + ConstantString raw_ptr, + @Cast("size_t") long expected_decrefs); + + /** + * Turn a **non-owning raw pointer** to an intrusive_ptr. It is + * the moral equivalent of enable_shared_from_this on a shared pointer. + * + * This method is only valid for objects that are already live. If + * you are looking for the moral equivalent of unique_ptr(T*) + * constructor, see steal_from_new. + * + * TODO: https://github.com/pytorch/pytorch/issues/56482 + */ + public static native @ByVal ConstantStringPtr unsafe_reclaim_from_nonowning(ConstantString raw_ptr); +} diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/Context.java b/pytorch/src/gen/java/org/bytedeco/pytorch/Context.java index 760258bd5f5..dc114eb7b9d 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/Context.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/Context.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/Continue.java b/pytorch/src/gen/java/org/bytedeco/pytorch/Continue.java index e38b860d778..85b772f9cd3 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/Continue.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/Continue.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; @@ -19,8 +21,10 @@ @Namespace("torch::jit") @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) public class Continue extends Stmt { static { Loader.load(); } + /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ + public Continue(Pointer p) { super(p); } - public Continue(@Cast("const torch::jit::TreeRef*") @ByRef Pointer tree) { super((Pointer)null); allocate(tree); } - private native void allocate(@Cast("const torch::jit::TreeRef*") @ByRef Pointer tree); + public Continue(@Const @ByRef TreeRef tree) { super((Pointer)null); allocate(tree); } + private native void allocate(@Const @ByRef TreeRef tree); public static native @ByVal Continue create(@Const @ByRef SourceRange range); } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/Conv1d.java b/pytorch/src/gen/java/org/bytedeco/pytorch/Conv1d.java deleted file mode 100644 index 2a3d6e358d6..00000000000 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/Conv1d.java +++ /dev/null @@ -1,34 +0,0 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE - -package org.bytedeco.pytorch; - -import org.bytedeco.pytorch.Allocator; -import org.bytedeco.pytorch.Function; -import org.bytedeco.pytorch.Module; -import java.nio.*; -import org.bytedeco.javacpp.*; -import org.bytedeco.javacpp.annotation.*; - -import static org.bytedeco.javacpp.presets.javacpp.*; -import static org.bytedeco.openblas.global.openblas_nolapack.*; -import static org.bytedeco.openblas.global.openblas.*; - -import static org.bytedeco.pytorch.global.torch.*; - - -/** A {@code ModuleHolder} subclass for {@code Conv1dImpl}. - * See the documentation for {@code Conv1dImpl} class to learn what methods it - * provides, and examples of how to use {@code Conv1d} with - * {@code torch::nn::Conv1dOptions}. See the documentation for {@code ModuleHolder} to - * learn about PyTorch's module storage semantics. */ -@Namespace("torch::nn") @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) -public class Conv1d extends Conv1dImplModuleHolder { - static { Loader.load(); } - - public Conv1d(@ByVal @Cast("std::nullptr_t*") PointerPointer arg0) { super((Pointer)null); allocate(arg0); } - private native void allocate(@ByVal @Cast("std::nullptr_t*") PointerPointer arg0); public Conv1d(@SharedPtr @Cast({"", "std::shared_ptr"}) Conv1dImpl module) { super((Pointer)null); allocate(module); } - private native void allocate(@SharedPtr @Cast({"", "std::shared_ptr"}) Conv1dImpl module); - /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ - public Conv1d(Pointer p) { super(p); } - - } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/Conv1dFuncOptions.java b/pytorch/src/gen/java/org/bytedeco/pytorch/Conv1dFuncOptions.java index 3779e96a24a..8dcee34fdbc 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/Conv1dFuncOptions.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/Conv1dFuncOptions.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; @@ -37,7 +39,7 @@ public class Conv1dFuncOptions extends Pointer { public native @ByRef @NoException(true) Tensor bias(); public native @Cast("torch::ExpandingArray<1>*") @ByRef @NoException(true) LongPointer stride(); - public native @ByRef @NoException(true) conv_padding_t1 padding(); + public native @ByRef @NoException(true) Conv1dPadding padding(); public native @Cast("torch::ExpandingArray<1>*") @ByRef @NoException(true) LongPointer dilation(); public native @Cast("int64_t*") @ByRef @NoException(true) LongPointer groups(); } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/Conv1dImpl.java b/pytorch/src/gen/java/org/bytedeco/pytorch/Conv1dImpl.java index eb82b9229bd..2aa9590600f 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/Conv1dImpl.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/Conv1dImpl.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; @@ -39,11 +41,11 @@ public Conv1dImpl( @Cast("int64_t") long input_channels, @Cast("int64_t") long output_channels, @ByVal @Cast("torch::ExpandingArray<1>*") LongPointer kernel_size) { super((Pointer)null); allocate(input_channels, output_channels, kernel_size); } - @NoDeallocator private native void allocate( + @SharedPtr private native void allocate( @Cast("int64_t") long input_channels, @Cast("int64_t") long output_channels, @ByVal @Cast("torch::ExpandingArray<1>*") LongPointer kernel_size); public Conv1dImpl(@ByVal Conv1dOptions options_) { super((Pointer)null); allocate(options_); } - @NoDeallocator private native void allocate(@ByVal Conv1dOptions options_); + @SharedPtr private native void allocate(@ByVal Conv1dOptions options_); public native @ByVal Tensor forward(@Const @ByRef Tensor input); } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/Conv1dImplBase.java b/pytorch/src/gen/java/org/bytedeco/pytorch/Conv1dImplBase.java index f3320d87927..60060d92854 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/Conv1dImplBase.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/Conv1dImplBase.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; @@ -24,7 +26,7 @@ public class Conv1dImplBase extends Conv1dImplCloneable { public Conv1dImplBase(Pointer p) { super(p); } public Conv1dImplBase(@ByVal DetailConv1dOptions options_) { super((Pointer)null); allocate(options_); } - @NoDeallocator private native void allocate(@ByVal DetailConv1dOptions options_); + private native void allocate(@ByVal DetailConv1dOptions options_); public native void reset(); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/Conv1dImplCloneable.java b/pytorch/src/gen/java/org/bytedeco/pytorch/Conv1dImplCloneable.java index 4a3d5098607..f6917c76891 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/Conv1dImplCloneable.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/Conv1dImplCloneable.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; @@ -20,18 +22,18 @@ public class Conv1dImplCloneable extends Module { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public Conv1dImplCloneable(Pointer p) { super(p); } + @Override public Module asModule() { return asModule(this); } + @Namespace public static native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast>") Module asModule(@SharedPtr Conv1dImplCloneable pointer); /** {@code reset()} must perform initialization of all members with reference * semantics, most importantly parameters, buffers and submodules. */ public native void reset(); - @Override public Module asModule() { return asModule(this); } - @Namespace public static native @Name("static_cast") Module asModule(Conv1dImplCloneable module); /** Performs a recursive "deep copy" of the {@code Module}, such that all parameters * and submodules in the cloned module are different from those in the * original module. */ - public native @SharedPtr Module clone( - @Const @ByRef(nullValue = "c10::optional(c10::nullopt)") DeviceOptional device); - public native @SharedPtr Module clone(); + public native @SharedPtr("torch::nn::Module") @ByVal Module clone( + @Const @ByRef(nullValue = "c10::optional(c10::nullopt)") DeviceOptional device); + public native @SharedPtr("torch::nn::Module") @ByVal Module clone(); } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/Conv1dImplModuleHolder.java b/pytorch/src/gen/java/org/bytedeco/pytorch/Conv1dImplModuleHolder.java deleted file mode 100644 index a7703fa6c58..00000000000 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/Conv1dImplModuleHolder.java +++ /dev/null @@ -1,79 +0,0 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE - -package org.bytedeco.pytorch; - -import org.bytedeco.pytorch.Allocator; -import org.bytedeco.pytorch.Function; -import org.bytedeco.pytorch.Module; -import java.nio.*; -import org.bytedeco.javacpp.*; -import org.bytedeco.javacpp.annotation.*; - -import static org.bytedeco.javacpp.presets.javacpp.*; -import static org.bytedeco.openblas.global.openblas_nolapack.*; -import static org.bytedeco.openblas.global.openblas.*; - -import static org.bytedeco.pytorch.global.torch.*; - -@Name("torch::nn::ModuleHolder") @NoOffset @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) -public class Conv1dImplModuleHolder extends Pointer { - static { Loader.load(); } - /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ - public Conv1dImplModuleHolder(Pointer p) { super(p); } - - - /// - - /** Default constructs the contained module if if has a default constructor, - * else produces a static error. - * - * NOTE: This uses the behavior of template - * classes in C++ that constructors (or any methods) are only compiled when - * actually used. */ - - - /** Constructs the {@code ModuleHolder} with an empty contained value. Access to - * the underlying module is not permitted and will throw an exception, until - * a value is assigned. */ - /* implicit */ public Conv1dImplModuleHolder(@ByVal @Cast("std::nullptr_t*") PointerPointer arg0) { super((Pointer)null); allocate(arg0); } -private native void allocate(@ByVal @Cast("std::nullptr_t*") PointerPointer arg0); - - /** Constructs the {@code ModuleHolder} with a contained module, forwarding all - * arguments to its constructor. */ - - /** Constructs the {@code ModuleHolder} from a pointer to the contained type. - * Example: {@code Linear(std::make_shared(...))}. */ - /* implicit */ public Conv1dImplModuleHolder(@SharedPtr @Cast({"", "std::shared_ptr"}) Conv1dImpl module) { super((Pointer)null); allocate(module); } -private native void allocate(@SharedPtr @Cast({"", "std::shared_ptr"}) Conv1dImpl module); - - /** Returns true if the {@code ModuleHolder} contains a module, or false if it is - * {@code nullptr}. */ - public native @Cast("bool") @Name("operator bool") @NoException(true) boolean asBoolean(); - - /** Forwards to the contained module. */ - public native @Name("operator ->") Conv1dImpl access(); - - /** Forwards to the contained module. */ - - /** Returns a reference to the contained module. */ - public native @ByRef @Name("operator *") Conv1dImpl multiply(); - - /** Returns a const reference to the contained module. */ - - /** Returns a shared pointer to the underlying module. */ - public native @SharedPtr @Cast({"", "std::shared_ptr"}) Conv1dImpl ptr(); - - /** Returns a pointer to the underlying module. */ - public native Conv1dImpl get(); - - /** Returns a const pointer to the underlying module. */ - - /** Calls the {@code forward()} method of the contained module. */ - - /** Forwards to the subscript operator of the contained module. - * NOTE: std::forward is qualified to prevent VS2017 emitting - * error C2872: 'std': ambiguous symbol */ - - /** Returns true if the {@code ModuleHolder} does not contain a module. */ - public native @Cast("bool") @NoException(true) boolean is_empty(); -} diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/Conv1dOptions.java b/pytorch/src/gen/java/org/bytedeco/pytorch/Conv1dOptions.java index a8b83264a7a..d3231ae8e11 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/Conv1dOptions.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/Conv1dOptions.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; @@ -38,9 +40,9 @@ private native void allocate( public native @Cast("int64_t*") @ByRef @NoException(true) LongPointer out_channels(); public native @Cast("torch::ExpandingArray<1>*") @ByRef @NoException(true) LongPointer kernel_size(); public native @Cast("torch::ExpandingArray<1>*") @ByRef @NoException(true) LongPointer stride(); - public native @ByRef @NoException(true) conv_padding_t1 padding(); + public native @ByRef @NoException(true) Conv1dPadding padding(); public native @Cast("torch::ExpandingArray<1>*") @ByRef @NoException(true) LongPointer dilation(); public native @Cast("int64_t*") @ByRef @NoException(true) LongPointer groups(); public native @Cast("bool*") @ByRef @NoException(true) BoolPointer bias(); - public native @ByRef @NoException(true) conv_padding_mode_t padding_mode(); + public native @ByRef @NoException(true) ConvPaddingMode padding_mode(); } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/conv_padding_t1.java b/pytorch/src/gen/java/org/bytedeco/pytorch/Conv1dPadding.java similarity index 62% rename from pytorch/src/gen/java/org/bytedeco/pytorch/conv_padding_t1.java rename to pytorch/src/gen/java/org/bytedeco/pytorch/Conv1dPadding.java index c7936b0563b..65438c9836c 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/conv_padding_t1.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/Conv1dPadding.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; @@ -16,18 +18,18 @@ import static org.bytedeco.pytorch.global.torch.*; @NoOffset @Name("c10::variant,torch::enumtype::kValid,torch::enumtype::kSame>") @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) -public class conv_padding_t1 extends Pointer { +public class Conv1dPadding extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ - public conv_padding_t1(Pointer p) { super(p); } + public Conv1dPadding(Pointer p) { super(p); } public @Cast("torch::ExpandingArray<1>*") @ByRef LongPointer get0() { return get0(this); } - @Namespace @Name("c10::get<0>") public static native @Cast("torch::ExpandingArray<1>*") @ByRef LongPointer get0(@ByRef conv_padding_t1 container); - @ValueSetter public native conv_padding_t1 put(@Cast("torch::ExpandingArray<1>*") @ByRef LongPointer value); + @Namespace @Name("c10::get<0>") public static native @Cast("torch::ExpandingArray<1>*") @ByRef LongPointer get0(@ByRef Conv1dPadding container); + @ValueSetter public native Conv1dPadding put(@Cast("torch::ExpandingArray<1>*") @ByRef LongPointer value); public @ByRef kValid get1() { return get1(this); } - @Namespace @Name("c10::get<1>") public static native @ByRef kValid get1(@ByRef conv_padding_t1 container); - @ValueSetter public native conv_padding_t1 put(@ByRef kValid value); + @Namespace @Name("c10::get<1>") public static native @ByRef kValid get1(@ByRef Conv1dPadding container); + @ValueSetter public native Conv1dPadding put(@ByRef kValid value); public @ByRef kSame get2() { return get2(this); } - @Namespace @Name("c10::get<2>") public static native @ByRef kSame get2(@ByRef conv_padding_t1 container); - @ValueSetter public native conv_padding_t1 put(@ByRef kSame value); + @Namespace @Name("c10::get<2>") public static native @ByRef kSame get2(@ByRef Conv1dPadding container); + @ValueSetter public native Conv1dPadding put(@ByRef kSame value); } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/Conv2d.java b/pytorch/src/gen/java/org/bytedeco/pytorch/Conv2d.java deleted file mode 100644 index fecad4772cc..00000000000 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/Conv2d.java +++ /dev/null @@ -1,34 +0,0 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE - -package org.bytedeco.pytorch; - -import org.bytedeco.pytorch.Allocator; -import org.bytedeco.pytorch.Function; -import org.bytedeco.pytorch.Module; -import java.nio.*; -import org.bytedeco.javacpp.*; -import org.bytedeco.javacpp.annotation.*; - -import static org.bytedeco.javacpp.presets.javacpp.*; -import static org.bytedeco.openblas.global.openblas_nolapack.*; -import static org.bytedeco.openblas.global.openblas.*; - -import static org.bytedeco.pytorch.global.torch.*; - - -/** A {@code ModuleHolder} subclass for {@code Conv2dImpl}. - * See the documentation for {@code Conv2dImpl} class to learn what methods it - * provides, and examples of how to use {@code Conv2d} with - * {@code torch::nn::Conv2dOptions}. See the documentation for {@code ModuleHolder} to - * learn about PyTorch's module storage semantics. */ -@Namespace("torch::nn") @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) -public class Conv2d extends Conv2dImplModuleHolder { - static { Loader.load(); } - - public Conv2d(@ByVal @Cast("std::nullptr_t*") PointerPointer arg0) { super((Pointer)null); allocate(arg0); } - private native void allocate(@ByVal @Cast("std::nullptr_t*") PointerPointer arg0); public Conv2d(@SharedPtr @Cast({"", "std::shared_ptr"}) Conv2dImpl module) { super((Pointer)null); allocate(module); } - private native void allocate(@SharedPtr @Cast({"", "std::shared_ptr"}) Conv2dImpl module); - /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ - public Conv2d(Pointer p) { super(p); } - - } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/Conv2dFuncOptions.java b/pytorch/src/gen/java/org/bytedeco/pytorch/Conv2dFuncOptions.java index 84c83ad3979..33d8cdf4416 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/Conv2dFuncOptions.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/Conv2dFuncOptions.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; @@ -35,7 +37,7 @@ public class Conv2dFuncOptions extends Pointer { public native @ByRef @NoException(true) Tensor bias(); public native @Cast("torch::ExpandingArray<2>*") @ByRef @NoException(true) LongPointer stride(); - public native @ByRef @NoException(true) conv_padding_t2 padding(); + public native @ByRef @NoException(true) Conv2dPadding padding(); public native @Cast("torch::ExpandingArray<2>*") @ByRef @NoException(true) LongPointer dilation(); public native @Cast("int64_t*") @ByRef @NoException(true) LongPointer groups(); } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/Conv2dImpl.java b/pytorch/src/gen/java/org/bytedeco/pytorch/Conv2dImpl.java index 5b5106cd801..fa90e74e409 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/Conv2dImpl.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/Conv2dImpl.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; @@ -39,11 +41,11 @@ public Conv2dImpl( @Cast("int64_t") long input_channels, @Cast("int64_t") long output_channels, @ByVal @Cast("torch::ExpandingArray<2>*") LongPointer kernel_size) { super((Pointer)null); allocate(input_channels, output_channels, kernel_size); } - @NoDeallocator private native void allocate( + @SharedPtr private native void allocate( @Cast("int64_t") long input_channels, @Cast("int64_t") long output_channels, @ByVal @Cast("torch::ExpandingArray<2>*") LongPointer kernel_size); public Conv2dImpl(@ByVal Conv2dOptions options_) { super((Pointer)null); allocate(options_); } - @NoDeallocator private native void allocate(@ByVal Conv2dOptions options_); + @SharedPtr private native void allocate(@ByVal Conv2dOptions options_); public native @ByVal Tensor forward(@Const @ByRef Tensor input); } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/Conv2dImplBase.java b/pytorch/src/gen/java/org/bytedeco/pytorch/Conv2dImplBase.java index be82f30495b..20281a5c589 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/Conv2dImplBase.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/Conv2dImplBase.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; @@ -22,7 +24,7 @@ public class Conv2dImplBase extends Conv2dImplCloneable { public Conv2dImplBase(Pointer p) { super(p); } public Conv2dImplBase(@ByVal DetailConv2dOptions options_) { super((Pointer)null); allocate(options_); } - @NoDeallocator private native void allocate(@ByVal DetailConv2dOptions options_); + private native void allocate(@ByVal DetailConv2dOptions options_); public native void reset(); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/Conv2dImplCloneable.java b/pytorch/src/gen/java/org/bytedeco/pytorch/Conv2dImplCloneable.java index 2a22539aea7..cd3f0d8e838 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/Conv2dImplCloneable.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/Conv2dImplCloneable.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; @@ -20,18 +22,18 @@ public class Conv2dImplCloneable extends Module { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public Conv2dImplCloneable(Pointer p) { super(p); } + @Override public Module asModule() { return asModule(this); } + @Namespace public static native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast>") Module asModule(@SharedPtr Conv2dImplCloneable pointer); /** {@code reset()} must perform initialization of all members with reference * semantics, most importantly parameters, buffers and submodules. */ public native void reset(); - @Override public Module asModule() { return asModule(this); } - @Namespace public static native @Name("static_cast") Module asModule(Conv2dImplCloneable module); /** Performs a recursive "deep copy" of the {@code Module}, such that all parameters * and submodules in the cloned module are different from those in the * original module. */ - public native @SharedPtr Module clone( - @Const @ByRef(nullValue = "c10::optional(c10::nullopt)") DeviceOptional device); - public native @SharedPtr Module clone(); + public native @SharedPtr("torch::nn::Module") @ByVal Module clone( + @Const @ByRef(nullValue = "c10::optional(c10::nullopt)") DeviceOptional device); + public native @SharedPtr("torch::nn::Module") @ByVal Module clone(); } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/Conv2dImplModuleHolder.java b/pytorch/src/gen/java/org/bytedeco/pytorch/Conv2dImplModuleHolder.java deleted file mode 100644 index bef5973105b..00000000000 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/Conv2dImplModuleHolder.java +++ /dev/null @@ -1,79 +0,0 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE - -package org.bytedeco.pytorch; - -import org.bytedeco.pytorch.Allocator; -import org.bytedeco.pytorch.Function; -import org.bytedeco.pytorch.Module; -import java.nio.*; -import org.bytedeco.javacpp.*; -import org.bytedeco.javacpp.annotation.*; - -import static org.bytedeco.javacpp.presets.javacpp.*; -import static org.bytedeco.openblas.global.openblas_nolapack.*; -import static org.bytedeco.openblas.global.openblas.*; - -import static org.bytedeco.pytorch.global.torch.*; - -@Name("torch::nn::ModuleHolder") @NoOffset @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) -public class Conv2dImplModuleHolder extends Pointer { - static { Loader.load(); } - /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ - public Conv2dImplModuleHolder(Pointer p) { super(p); } - - - /// - - /** Default constructs the contained module if if has a default constructor, - * else produces a static error. - * - * NOTE: This uses the behavior of template - * classes in C++ that constructors (or any methods) are only compiled when - * actually used. */ - - - /** Constructs the {@code ModuleHolder} with an empty contained value. Access to - * the underlying module is not permitted and will throw an exception, until - * a value is assigned. */ - /* implicit */ public Conv2dImplModuleHolder(@ByVal @Cast("std::nullptr_t*") PointerPointer arg0) { super((Pointer)null); allocate(arg0); } -private native void allocate(@ByVal @Cast("std::nullptr_t*") PointerPointer arg0); - - /** Constructs the {@code ModuleHolder} with a contained module, forwarding all - * arguments to its constructor. */ - - /** Constructs the {@code ModuleHolder} from a pointer to the contained type. - * Example: {@code Linear(std::make_shared(...))}. */ - /* implicit */ public Conv2dImplModuleHolder(@SharedPtr @Cast({"", "std::shared_ptr"}) Conv2dImpl module) { super((Pointer)null); allocate(module); } -private native void allocate(@SharedPtr @Cast({"", "std::shared_ptr"}) Conv2dImpl module); - - /** Returns true if the {@code ModuleHolder} contains a module, or false if it is - * {@code nullptr}. */ - public native @Cast("bool") @Name("operator bool") @NoException(true) boolean asBoolean(); - - /** Forwards to the contained module. */ - public native @Name("operator ->") Conv2dImpl access(); - - /** Forwards to the contained module. */ - - /** Returns a reference to the contained module. */ - public native @ByRef @Name("operator *") Conv2dImpl multiply(); - - /** Returns a const reference to the contained module. */ - - /** Returns a shared pointer to the underlying module. */ - public native @SharedPtr @Cast({"", "std::shared_ptr"}) Conv2dImpl ptr(); - - /** Returns a pointer to the underlying module. */ - public native Conv2dImpl get(); - - /** Returns a const pointer to the underlying module. */ - - /** Calls the {@code forward()} method of the contained module. */ - - /** Forwards to the subscript operator of the contained module. - * NOTE: std::forward is qualified to prevent VS2017 emitting - * error C2872: 'std': ambiguous symbol */ - - /** Returns true if the {@code ModuleHolder} does not contain a module. */ - public native @Cast("bool") @NoException(true) boolean is_empty(); -} diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/Conv2dOptions.java b/pytorch/src/gen/java/org/bytedeco/pytorch/Conv2dOptions.java index 0ad32ebdb40..d38e10c4604 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/Conv2dOptions.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/Conv2dOptions.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; @@ -34,9 +36,9 @@ private native void allocate( public native @Cast("int64_t*") @ByRef @NoException(true) LongPointer out_channels(); public native @Cast("torch::ExpandingArray<2>*") @ByRef @NoException(true) LongPointer kernel_size(); public native @Cast("torch::ExpandingArray<2>*") @ByRef @NoException(true) LongPointer stride(); - public native @ByRef @NoException(true) conv_padding_t2 padding(); + public native @ByRef @NoException(true) Conv2dPadding padding(); public native @Cast("torch::ExpandingArray<2>*") @ByRef @NoException(true) LongPointer dilation(); public native @Cast("int64_t*") @ByRef @NoException(true) LongPointer groups(); public native @Cast("bool*") @ByRef @NoException(true) BoolPointer bias(); - public native @ByRef @NoException(true) conv_padding_mode_t padding_mode(); + public native @ByRef @NoException(true) ConvPaddingMode padding_mode(); } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/conv_padding_t2.java b/pytorch/src/gen/java/org/bytedeco/pytorch/Conv2dPadding.java similarity index 62% rename from pytorch/src/gen/java/org/bytedeco/pytorch/conv_padding_t2.java rename to pytorch/src/gen/java/org/bytedeco/pytorch/Conv2dPadding.java index 9922d70431f..5ff606cc516 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/conv_padding_t2.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/Conv2dPadding.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; @@ -16,18 +18,18 @@ import static org.bytedeco.pytorch.global.torch.*; @NoOffset @Name("c10::variant,torch::enumtype::kValid,torch::enumtype::kSame>") @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) -public class conv_padding_t2 extends Pointer { +public class Conv2dPadding extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ - public conv_padding_t2(Pointer p) { super(p); } + public Conv2dPadding(Pointer p) { super(p); } public @Cast("torch::ExpandingArray<2>*") @ByRef LongPointer get0() { return get0(this); } - @Namespace @Name("c10::get<0>") public static native @Cast("torch::ExpandingArray<2>*") @ByRef LongPointer get0(@ByRef conv_padding_t2 container); - @ValueSetter public native conv_padding_t2 put(@Cast("torch::ExpandingArray<2>*") @ByRef LongPointer value); + @Namespace @Name("c10::get<0>") public static native @Cast("torch::ExpandingArray<2>*") @ByRef LongPointer get0(@ByRef Conv2dPadding container); + @ValueSetter public native Conv2dPadding put(@Cast("torch::ExpandingArray<2>*") @ByRef LongPointer value); public @ByRef kValid get1() { return get1(this); } - @Namespace @Name("c10::get<1>") public static native @ByRef kValid get1(@ByRef conv_padding_t2 container); - @ValueSetter public native conv_padding_t2 put(@ByRef kValid value); + @Namespace @Name("c10::get<1>") public static native @ByRef kValid get1(@ByRef Conv2dPadding container); + @ValueSetter public native Conv2dPadding put(@ByRef kValid value); public @ByRef kSame get2() { return get2(this); } - @Namespace @Name("c10::get<2>") public static native @ByRef kSame get2(@ByRef conv_padding_t2 container); - @ValueSetter public native conv_padding_t2 put(@ByRef kSame value); + @Namespace @Name("c10::get<2>") public static native @ByRef kSame get2(@ByRef Conv2dPadding container); + @ValueSetter public native Conv2dPadding put(@ByRef kSame value); } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/Conv3d.java b/pytorch/src/gen/java/org/bytedeco/pytorch/Conv3d.java deleted file mode 100644 index 11278fee82c..00000000000 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/Conv3d.java +++ /dev/null @@ -1,34 +0,0 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE - -package org.bytedeco.pytorch; - -import org.bytedeco.pytorch.Allocator; -import org.bytedeco.pytorch.Function; -import org.bytedeco.pytorch.Module; -import java.nio.*; -import org.bytedeco.javacpp.*; -import org.bytedeco.javacpp.annotation.*; - -import static org.bytedeco.javacpp.presets.javacpp.*; -import static org.bytedeco.openblas.global.openblas_nolapack.*; -import static org.bytedeco.openblas.global.openblas.*; - -import static org.bytedeco.pytorch.global.torch.*; - - -/** A {@code ModuleHolder} subclass for {@code Conv3dImpl}. - * See the documentation for {@code Conv3dImpl} class to learn what methods it - * provides, and examples of how to use {@code Conv3d} with - * {@code torch::nn::Conv3dOptions}. See the documentation for {@code ModuleHolder} to - * learn about PyTorch's module storage semantics. */ -@Namespace("torch::nn") @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) -public class Conv3d extends Conv3dImplModuleHolder { - static { Loader.load(); } - - public Conv3d(@ByVal @Cast("std::nullptr_t*") PointerPointer arg0) { super((Pointer)null); allocate(arg0); } - private native void allocate(@ByVal @Cast("std::nullptr_t*") PointerPointer arg0); public Conv3d(@SharedPtr @Cast({"", "std::shared_ptr"}) Conv3dImpl module) { super((Pointer)null); allocate(module); } - private native void allocate(@SharedPtr @Cast({"", "std::shared_ptr"}) Conv3dImpl module); - /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ - public Conv3d(Pointer p) { super(p); } - - } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/Conv3dFuncOptions.java b/pytorch/src/gen/java/org/bytedeco/pytorch/Conv3dFuncOptions.java index 21f3e161971..cf57c7f17ac 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/Conv3dFuncOptions.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/Conv3dFuncOptions.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; @@ -35,7 +37,7 @@ public class Conv3dFuncOptions extends Pointer { public native @ByRef @NoException(true) Tensor bias(); public native @Cast("torch::ExpandingArray<3>*") @ByRef @NoException(true) LongPointer stride(); - public native @ByRef @NoException(true) conv_padding_t3 padding(); + public native @ByRef @NoException(true) Conv3dPadding padding(); public native @Cast("torch::ExpandingArray<3>*") @ByRef @NoException(true) LongPointer dilation(); public native @Cast("int64_t*") @ByRef @NoException(true) LongPointer groups(); } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/Conv3dImpl.java b/pytorch/src/gen/java/org/bytedeco/pytorch/Conv3dImpl.java index 3cb8da76f6c..0bfafc5d10d 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/Conv3dImpl.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/Conv3dImpl.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; @@ -39,11 +41,11 @@ public Conv3dImpl( @Cast("int64_t") long input_channels, @Cast("int64_t") long output_channels, @ByVal @Cast("torch::ExpandingArray<3>*") LongPointer kernel_size) { super((Pointer)null); allocate(input_channels, output_channels, kernel_size); } - @NoDeallocator private native void allocate( + @SharedPtr private native void allocate( @Cast("int64_t") long input_channels, @Cast("int64_t") long output_channels, @ByVal @Cast("torch::ExpandingArray<3>*") LongPointer kernel_size); public Conv3dImpl(@ByVal Conv3dOptions options_) { super((Pointer)null); allocate(options_); } - @NoDeallocator private native void allocate(@ByVal Conv3dOptions options_); + @SharedPtr private native void allocate(@ByVal Conv3dOptions options_); public native @ByVal Tensor forward(@Const @ByRef Tensor input); } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/Conv3dImplBase.java b/pytorch/src/gen/java/org/bytedeco/pytorch/Conv3dImplBase.java index e547c8ff070..f463912dadf 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/Conv3dImplBase.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/Conv3dImplBase.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; @@ -22,7 +24,7 @@ public class Conv3dImplBase extends Conv3dImplCloneable { public Conv3dImplBase(Pointer p) { super(p); } public Conv3dImplBase(@ByVal DetailConv3dOptions options_) { super((Pointer)null); allocate(options_); } - @NoDeallocator private native void allocate(@ByVal DetailConv3dOptions options_); + private native void allocate(@ByVal DetailConv3dOptions options_); public native void reset(); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/Conv3dImplCloneable.java b/pytorch/src/gen/java/org/bytedeco/pytorch/Conv3dImplCloneable.java index 6249dbba06b..d509cc82c7e 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/Conv3dImplCloneable.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/Conv3dImplCloneable.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; @@ -20,18 +22,18 @@ public class Conv3dImplCloneable extends Module { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public Conv3dImplCloneable(Pointer p) { super(p); } + @Override public Module asModule() { return asModule(this); } + @Namespace public static native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast>") Module asModule(@SharedPtr Conv3dImplCloneable pointer); /** {@code reset()} must perform initialization of all members with reference * semantics, most importantly parameters, buffers and submodules. */ public native void reset(); - @Override public Module asModule() { return asModule(this); } - @Namespace public static native @Name("static_cast") Module asModule(Conv3dImplCloneable module); /** Performs a recursive "deep copy" of the {@code Module}, such that all parameters * and submodules in the cloned module are different from those in the * original module. */ - public native @SharedPtr Module clone( - @Const @ByRef(nullValue = "c10::optional(c10::nullopt)") DeviceOptional device); - public native @SharedPtr Module clone(); + public native @SharedPtr("torch::nn::Module") @ByVal Module clone( + @Const @ByRef(nullValue = "c10::optional(c10::nullopt)") DeviceOptional device); + public native @SharedPtr("torch::nn::Module") @ByVal Module clone(); } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/Conv3dImplModuleHolder.java b/pytorch/src/gen/java/org/bytedeco/pytorch/Conv3dImplModuleHolder.java deleted file mode 100644 index 53152cb55df..00000000000 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/Conv3dImplModuleHolder.java +++ /dev/null @@ -1,79 +0,0 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE - -package org.bytedeco.pytorch; - -import org.bytedeco.pytorch.Allocator; -import org.bytedeco.pytorch.Function; -import org.bytedeco.pytorch.Module; -import java.nio.*; -import org.bytedeco.javacpp.*; -import org.bytedeco.javacpp.annotation.*; - -import static org.bytedeco.javacpp.presets.javacpp.*; -import static org.bytedeco.openblas.global.openblas_nolapack.*; -import static org.bytedeco.openblas.global.openblas.*; - -import static org.bytedeco.pytorch.global.torch.*; - -@Name("torch::nn::ModuleHolder") @NoOffset @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) -public class Conv3dImplModuleHolder extends Pointer { - static { Loader.load(); } - /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ - public Conv3dImplModuleHolder(Pointer p) { super(p); } - - - /// - - /** Default constructs the contained module if if has a default constructor, - * else produces a static error. - * - * NOTE: This uses the behavior of template - * classes in C++ that constructors (or any methods) are only compiled when - * actually used. */ - - - /** Constructs the {@code ModuleHolder} with an empty contained value. Access to - * the underlying module is not permitted and will throw an exception, until - * a value is assigned. */ - /* implicit */ public Conv3dImplModuleHolder(@ByVal @Cast("std::nullptr_t*") PointerPointer arg0) { super((Pointer)null); allocate(arg0); } -private native void allocate(@ByVal @Cast("std::nullptr_t*") PointerPointer arg0); - - /** Constructs the {@code ModuleHolder} with a contained module, forwarding all - * arguments to its constructor. */ - - /** Constructs the {@code ModuleHolder} from a pointer to the contained type. - * Example: {@code Linear(std::make_shared(...))}. */ - /* implicit */ public Conv3dImplModuleHolder(@SharedPtr @Cast({"", "std::shared_ptr"}) Conv3dImpl module) { super((Pointer)null); allocate(module); } -private native void allocate(@SharedPtr @Cast({"", "std::shared_ptr"}) Conv3dImpl module); - - /** Returns true if the {@code ModuleHolder} contains a module, or false if it is - * {@code nullptr}. */ - public native @Cast("bool") @Name("operator bool") @NoException(true) boolean asBoolean(); - - /** Forwards to the contained module. */ - public native @Name("operator ->") Conv3dImpl access(); - - /** Forwards to the contained module. */ - - /** Returns a reference to the contained module. */ - public native @ByRef @Name("operator *") Conv3dImpl multiply(); - - /** Returns a const reference to the contained module. */ - - /** Returns a shared pointer to the underlying module. */ - public native @SharedPtr @Cast({"", "std::shared_ptr"}) Conv3dImpl ptr(); - - /** Returns a pointer to the underlying module. */ - public native Conv3dImpl get(); - - /** Returns a const pointer to the underlying module. */ - - /** Calls the {@code forward()} method of the contained module. */ - - /** Forwards to the subscript operator of the contained module. - * NOTE: std::forward is qualified to prevent VS2017 emitting - * error C2872: 'std': ambiguous symbol */ - - /** Returns true if the {@code ModuleHolder} does not contain a module. */ - public native @Cast("bool") @NoException(true) boolean is_empty(); -} diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/Conv3dOptions.java b/pytorch/src/gen/java/org/bytedeco/pytorch/Conv3dOptions.java index 8797b91502f..ce79bad870c 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/Conv3dOptions.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/Conv3dOptions.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; @@ -34,9 +36,9 @@ private native void allocate( public native @Cast("int64_t*") @ByRef @NoException(true) LongPointer out_channels(); public native @Cast("torch::ExpandingArray<3>*") @ByRef @NoException(true) LongPointer kernel_size(); public native @Cast("torch::ExpandingArray<3>*") @ByRef @NoException(true) LongPointer stride(); - public native @ByRef @NoException(true) conv_padding_t3 padding(); + public native @ByRef @NoException(true) Conv3dPadding padding(); public native @Cast("torch::ExpandingArray<3>*") @ByRef @NoException(true) LongPointer dilation(); public native @Cast("int64_t*") @ByRef @NoException(true) LongPointer groups(); public native @Cast("bool*") @ByRef @NoException(true) BoolPointer bias(); - public native @ByRef @NoException(true) conv_padding_mode_t padding_mode(); + public native @ByRef @NoException(true) ConvPaddingMode padding_mode(); } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/conv_padding_t3.java b/pytorch/src/gen/java/org/bytedeco/pytorch/Conv3dPadding.java similarity index 62% rename from pytorch/src/gen/java/org/bytedeco/pytorch/conv_padding_t3.java rename to pytorch/src/gen/java/org/bytedeco/pytorch/Conv3dPadding.java index e214c8721d4..5aa251b9657 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/conv_padding_t3.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/Conv3dPadding.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; @@ -16,18 +18,18 @@ import static org.bytedeco.pytorch.global.torch.*; @NoOffset @Name("c10::variant,torch::enumtype::kValid,torch::enumtype::kSame>") @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) -public class conv_padding_t3 extends Pointer { +public class Conv3dPadding extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ - public conv_padding_t3(Pointer p) { super(p); } + public Conv3dPadding(Pointer p) { super(p); } public @Cast("torch::ExpandingArray<3>*") @ByRef LongPointer get0() { return get0(this); } - @Namespace @Name("c10::get<0>") public static native @Cast("torch::ExpandingArray<3>*") @ByRef LongPointer get0(@ByRef conv_padding_t3 container); - @ValueSetter public native conv_padding_t3 put(@Cast("torch::ExpandingArray<3>*") @ByRef LongPointer value); + @Namespace @Name("c10::get<0>") public static native @Cast("torch::ExpandingArray<3>*") @ByRef LongPointer get0(@ByRef Conv3dPadding container); + @ValueSetter public native Conv3dPadding put(@Cast("torch::ExpandingArray<3>*") @ByRef LongPointer value); public @ByRef kValid get1() { return get1(this); } - @Namespace @Name("c10::get<1>") public static native @ByRef kValid get1(@ByRef conv_padding_t3 container); - @ValueSetter public native conv_padding_t3 put(@ByRef kValid value); + @Namespace @Name("c10::get<1>") public static native @ByRef kValid get1(@ByRef Conv3dPadding container); + @ValueSetter public native Conv3dPadding put(@ByRef kValid value); public @ByRef kSame get2() { return get2(this); } - @Namespace @Name("c10::get<2>") public static native @ByRef kSame get2(@ByRef conv_padding_t3 container); - @ValueSetter public native conv_padding_t3 put(@ByRef kSame value); + @Namespace @Name("c10::get<2>") public static native @ByRef kSame get2(@ByRef Conv3dPadding container); + @ValueSetter public native Conv3dPadding put(@ByRef kSame value); } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/conv_padding_mode_t.java b/pytorch/src/gen/java/org/bytedeco/pytorch/ConvPaddingMode.java similarity index 52% rename from pytorch/src/gen/java/org/bytedeco/pytorch/conv_padding_mode_t.java rename to pytorch/src/gen/java/org/bytedeco/pytorch/ConvPaddingMode.java index 23e2409b829..2d65e7d63e4 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/conv_padding_mode_t.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/ConvPaddingMode.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; @@ -16,29 +18,29 @@ import static org.bytedeco.pytorch.global.torch.*; @NoOffset @Name("c10::variant") @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) -public class conv_padding_mode_t extends Pointer { +public class ConvPaddingMode extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ - public conv_padding_mode_t(Pointer p) { super(p); } - public conv_padding_mode_t(kZeros value) { this(); put(value); } - public conv_padding_mode_t(kReflect value) { this(); put(value); } - public conv_padding_mode_t(kReplicate value) { this(); put(value); } - public conv_padding_mode_t(kCircular value) { this(); put(value); } - public conv_padding_mode_t() { allocate(); } + public ConvPaddingMode(Pointer p) { super(p); } + public ConvPaddingMode(kZeros value) { this(); put(value); } + public ConvPaddingMode(kReflect value) { this(); put(value); } + public ConvPaddingMode(kReplicate value) { this(); put(value); } + public ConvPaddingMode(kCircular value) { this(); put(value); } + public ConvPaddingMode() { allocate(); } private native void allocate(); - public native @Name("operator =") @ByRef conv_padding_mode_t put(@ByRef conv_padding_mode_t x); + public native @Name("operator =") @ByRef ConvPaddingMode put(@ByRef ConvPaddingMode x); public @ByRef kZeros get0() { return get0(this); } - @Namespace @Name("c10::get<0>") public static native @ByRef kZeros get0(@ByRef conv_padding_mode_t container); - @ValueSetter public native conv_padding_mode_t put(@ByRef kZeros value); + @Namespace @Name("c10::get<0>") public static native @ByRef kZeros get0(@ByRef ConvPaddingMode container); + @ValueSetter public native ConvPaddingMode put(@ByRef kZeros value); public @ByRef kReflect get1() { return get1(this); } - @Namespace @Name("c10::get<1>") public static native @ByRef kReflect get1(@ByRef conv_padding_mode_t container); - @ValueSetter public native conv_padding_mode_t put(@ByRef kReflect value); + @Namespace @Name("c10::get<1>") public static native @ByRef kReflect get1(@ByRef ConvPaddingMode container); + @ValueSetter public native ConvPaddingMode put(@ByRef kReflect value); public @ByRef kReplicate get2() { return get2(this); } - @Namespace @Name("c10::get<2>") public static native @ByRef kReplicate get2(@ByRef conv_padding_mode_t container); - @ValueSetter public native conv_padding_mode_t put(@ByRef kReplicate value); + @Namespace @Name("c10::get<2>") public static native @ByRef kReplicate get2(@ByRef ConvPaddingMode container); + @ValueSetter public native ConvPaddingMode put(@ByRef kReplicate value); public @ByRef kCircular get3() { return get3(this); } - @Namespace @Name("c10::get<3>") public static native @ByRef kCircular get3(@ByRef conv_padding_mode_t container); - @ValueSetter public native conv_padding_mode_t put(@ByRef kCircular value); + @Namespace @Name("c10::get<3>") public static native @ByRef kCircular get3(@ByRef ConvPaddingMode container); + @ValueSetter public native ConvPaddingMode put(@ByRef kCircular value); } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/ConvTranspose1d.java b/pytorch/src/gen/java/org/bytedeco/pytorch/ConvTranspose1d.java deleted file mode 100644 index 57ac8428eda..00000000000 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/ConvTranspose1d.java +++ /dev/null @@ -1,34 +0,0 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE - -package org.bytedeco.pytorch; - -import org.bytedeco.pytorch.Allocator; -import org.bytedeco.pytorch.Function; -import org.bytedeco.pytorch.Module; -import java.nio.*; -import org.bytedeco.javacpp.*; -import org.bytedeco.javacpp.annotation.*; - -import static org.bytedeco.javacpp.presets.javacpp.*; -import static org.bytedeco.openblas.global.openblas_nolapack.*; -import static org.bytedeco.openblas.global.openblas.*; - -import static org.bytedeco.pytorch.global.torch.*; - - -/** A {@code ModuleHolder} subclass for {@code ConvTranspose1dImpl}. - * See the documentation for {@code ConvTranspose1dImpl} class to learn what methods - * it provides, and examples of how to use {@code ConvTranspose1d} with - * {@code torch::nn::ConvTranspose1dOptions}. See the documentation for - * {@code ModuleHolder} to learn about PyTorch's module storage semantics. */ -@Namespace("torch::nn") @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) -public class ConvTranspose1d extends ConvTranspose1dImplModuleHolder { - static { Loader.load(); } - - public ConvTranspose1d(@ByVal @Cast("std::nullptr_t*") PointerPointer arg0) { super((Pointer)null); allocate(arg0); } - private native void allocate(@ByVal @Cast("std::nullptr_t*") PointerPointer arg0); public ConvTranspose1d(@SharedPtr @Cast({"", "std::shared_ptr"}) ConvTranspose1dImpl module) { super((Pointer)null); allocate(module); } - private native void allocate(@SharedPtr @Cast({"", "std::shared_ptr"}) ConvTranspose1dImpl module); - /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ - public ConvTranspose1d(Pointer p) { super(p); } - - } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/ConvTranspose1dFuncOptions.java b/pytorch/src/gen/java/org/bytedeco/pytorch/ConvTranspose1dFuncOptions.java index a3f005be3f6..603209552d4 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/ConvTranspose1dFuncOptions.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/ConvTranspose1dFuncOptions.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/ConvTranspose1dImpl.java b/pytorch/src/gen/java/org/bytedeco/pytorch/ConvTranspose1dImpl.java index 8e2c7c9a931..5230849fdd2 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/ConvTranspose1dImpl.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/ConvTranspose1dImpl.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; @@ -41,12 +43,12 @@ public ConvTranspose1dImpl( @Cast("int64_t") long input_channels, @Cast("int64_t") long output_channels, @ByVal @Cast("torch::ExpandingArray<1>*") LongPointer kernel_size) { super((Pointer)null); allocate(input_channels, output_channels, kernel_size); } - @NoDeallocator private native void allocate( + @SharedPtr private native void allocate( @Cast("int64_t") long input_channels, @Cast("int64_t") long output_channels, @ByVal @Cast("torch::ExpandingArray<1>*") LongPointer kernel_size); public ConvTranspose1dImpl(@ByVal ConvTranspose1dOptions options_) { super((Pointer)null); allocate(options_); } - @NoDeallocator private native void allocate(@ByVal ConvTranspose1dOptions options_); + @SharedPtr private native void allocate(@ByVal ConvTranspose1dOptions options_); public native @ByVal Tensor forward( @Const @ByRef Tensor input, @Const @ByRef(nullValue = "c10::optional(c10::nullopt)") LongArrayRefOptional output_size); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/ConvTranspose1dImplBase.java b/pytorch/src/gen/java/org/bytedeco/pytorch/ConvTranspose1dImplBase.java index 5a97aefc630..fd7c3877547 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/ConvTranspose1dImplBase.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/ConvTranspose1dImplBase.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; @@ -22,6 +24,10 @@ @Name("torch::nn::ConvTransposeNdImpl<1,torch::nn::ConvTranspose1dImpl>") @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) public class ConvTranspose1dImplBase extends ConvTranspose1dImplBaseBase { static { Loader.load(); } + + + public ConvTranspose1dImplBase(@ByVal DetailConv1dOptions options_) { super((Pointer)null); allocate(options_); } + private native void allocate(@ByVal DetailConv1dOptions options_); /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public ConvTranspose1dImplBase(Pointer p) { super(p); } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/ConvTranspose1dImplBaseBase.java b/pytorch/src/gen/java/org/bytedeco/pytorch/ConvTranspose1dImplBaseBase.java index 6b70cbe9f0b..0668559692f 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/ConvTranspose1dImplBaseBase.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/ConvTranspose1dImplBaseBase.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/ConvTranspose1dImplCloneable.java b/pytorch/src/gen/java/org/bytedeco/pytorch/ConvTranspose1dImplCloneable.java index ed4f0c6ab30..4af16c9fc40 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/ConvTranspose1dImplCloneable.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/ConvTranspose1dImplCloneable.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; @@ -20,18 +22,18 @@ public class ConvTranspose1dImplCloneable extends Module { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public ConvTranspose1dImplCloneable(Pointer p) { super(p); } + @Override public Module asModule() { return asModule(this); } + @Namespace public static native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast>") Module asModule(@SharedPtr ConvTranspose1dImplCloneable pointer); /** {@code reset()} must perform initialization of all members with reference * semantics, most importantly parameters, buffers and submodules. */ public native void reset(); - @Override public Module asModule() { return asModule(this); } - @Namespace public static native @Name("static_cast") Module asModule(ConvTranspose1dImplCloneable module); /** Performs a recursive "deep copy" of the {@code Module}, such that all parameters * and submodules in the cloned module are different from those in the * original module. */ - public native @SharedPtr Module clone( - @Const @ByRef(nullValue = "c10::optional(c10::nullopt)") DeviceOptional device); - public native @SharedPtr Module clone(); + public native @SharedPtr("torch::nn::Module") @ByVal Module clone( + @Const @ByRef(nullValue = "c10::optional(c10::nullopt)") DeviceOptional device); + public native @SharedPtr("torch::nn::Module") @ByVal Module clone(); } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/ConvTranspose1dImplModuleHolder.java b/pytorch/src/gen/java/org/bytedeco/pytorch/ConvTranspose1dImplModuleHolder.java deleted file mode 100644 index 6e35ce0056b..00000000000 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/ConvTranspose1dImplModuleHolder.java +++ /dev/null @@ -1,79 +0,0 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE - -package org.bytedeco.pytorch; - -import org.bytedeco.pytorch.Allocator; -import org.bytedeco.pytorch.Function; -import org.bytedeco.pytorch.Module; -import java.nio.*; -import org.bytedeco.javacpp.*; -import org.bytedeco.javacpp.annotation.*; - -import static org.bytedeco.javacpp.presets.javacpp.*; -import static org.bytedeco.openblas.global.openblas_nolapack.*; -import static org.bytedeco.openblas.global.openblas.*; - -import static org.bytedeco.pytorch.global.torch.*; - -@Name("torch::nn::ModuleHolder") @NoOffset @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) -public class ConvTranspose1dImplModuleHolder extends Pointer { - static { Loader.load(); } - /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ - public ConvTranspose1dImplModuleHolder(Pointer p) { super(p); } - - - /// - - /** Default constructs the contained module if if has a default constructor, - * else produces a static error. - * - * NOTE: This uses the behavior of template - * classes in C++ that constructors (or any methods) are only compiled when - * actually used. */ - - - /** Constructs the {@code ModuleHolder} with an empty contained value. Access to - * the underlying module is not permitted and will throw an exception, until - * a value is assigned. */ - /* implicit */ public ConvTranspose1dImplModuleHolder(@ByVal @Cast("std::nullptr_t*") PointerPointer arg0) { super((Pointer)null); allocate(arg0); } -private native void allocate(@ByVal @Cast("std::nullptr_t*") PointerPointer arg0); - - /** Constructs the {@code ModuleHolder} with a contained module, forwarding all - * arguments to its constructor. */ - - /** Constructs the {@code ModuleHolder} from a pointer to the contained type. - * Example: {@code Linear(std::make_shared(...))}. */ - /* implicit */ public ConvTranspose1dImplModuleHolder(@SharedPtr @Cast({"", "std::shared_ptr"}) ConvTranspose1dImpl module) { super((Pointer)null); allocate(module); } -private native void allocate(@SharedPtr @Cast({"", "std::shared_ptr"}) ConvTranspose1dImpl module); - - /** Returns true if the {@code ModuleHolder} contains a module, or false if it is - * {@code nullptr}. */ - public native @Cast("bool") @Name("operator bool") @NoException(true) boolean asBoolean(); - - /** Forwards to the contained module. */ - public native @Name("operator ->") ConvTranspose1dImpl access(); - - /** Forwards to the contained module. */ - - /** Returns a reference to the contained module. */ - public native @ByRef @Name("operator *") ConvTranspose1dImpl multiply(); - - /** Returns a const reference to the contained module. */ - - /** Returns a shared pointer to the underlying module. */ - public native @SharedPtr @Cast({"", "std::shared_ptr"}) ConvTranspose1dImpl ptr(); - - /** Returns a pointer to the underlying module. */ - public native ConvTranspose1dImpl get(); - - /** Returns a const pointer to the underlying module. */ - - /** Calls the {@code forward()} method of the contained module. */ - - /** Forwards to the subscript operator of the contained module. - * NOTE: std::forward is qualified to prevent VS2017 emitting - * error C2872: 'std': ambiguous symbol */ - - /** Returns true if the {@code ModuleHolder} does not contain a module. */ - public native @Cast("bool") @NoException(true) boolean is_empty(); -} diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/ConvTranspose1dOptions.java b/pytorch/src/gen/java/org/bytedeco/pytorch/ConvTranspose1dOptions.java index 563dc8ad7f4..c6b1256b096 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/ConvTranspose1dOptions.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/ConvTranspose1dOptions.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; @@ -42,5 +44,5 @@ private native void allocate( public native @Cast("int64_t*") @ByRef @NoException(true) LongPointer groups(); public native @Cast("bool*") @ByRef @NoException(true) BoolPointer bias(); public native @Cast("torch::ExpandingArray<1>*") @ByRef @NoException(true) LongPointer dilation(); - public native @ByRef @NoException(true) conv_padding_mode_t padding_mode(); + public native @ByRef @NoException(true) ConvPaddingMode padding_mode(); } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/ConvTranspose2d.java b/pytorch/src/gen/java/org/bytedeco/pytorch/ConvTranspose2d.java deleted file mode 100644 index 1df83088c7c..00000000000 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/ConvTranspose2d.java +++ /dev/null @@ -1,34 +0,0 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE - -package org.bytedeco.pytorch; - -import org.bytedeco.pytorch.Allocator; -import org.bytedeco.pytorch.Function; -import org.bytedeco.pytorch.Module; -import java.nio.*; -import org.bytedeco.javacpp.*; -import org.bytedeco.javacpp.annotation.*; - -import static org.bytedeco.javacpp.presets.javacpp.*; -import static org.bytedeco.openblas.global.openblas_nolapack.*; -import static org.bytedeco.openblas.global.openblas.*; - -import static org.bytedeco.pytorch.global.torch.*; - - -/** A {@code ModuleHolder} subclass for {@code ConvTranspose2dImpl}. - * See the documentation for {@code ConvTranspose2dImpl} class to learn what methods - * it provides, and examples of how to use {@code ConvTranspose2d} with - * {@code torch::nn::ConvTranspose2dOptions}. See the documentation for - * {@code ModuleHolder} to learn about PyTorch's module storage semantics. */ -@Namespace("torch::nn") @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) -public class ConvTranspose2d extends ConvTranspose2dImplModuleHolder { - static { Loader.load(); } - - public ConvTranspose2d(@ByVal @Cast("std::nullptr_t*") PointerPointer arg0) { super((Pointer)null); allocate(arg0); } - private native void allocate(@ByVal @Cast("std::nullptr_t*") PointerPointer arg0); public ConvTranspose2d(@SharedPtr @Cast({"", "std::shared_ptr"}) ConvTranspose2dImpl module) { super((Pointer)null); allocate(module); } - private native void allocate(@SharedPtr @Cast({"", "std::shared_ptr"}) ConvTranspose2dImpl module); - /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ - public ConvTranspose2d(Pointer p) { super(p); } - - } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/ConvTranspose2dFuncOptions.java b/pytorch/src/gen/java/org/bytedeco/pytorch/ConvTranspose2dFuncOptions.java index 82c62e2b23a..496ab9b1974 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/ConvTranspose2dFuncOptions.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/ConvTranspose2dFuncOptions.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/ConvTranspose2dImpl.java b/pytorch/src/gen/java/org/bytedeco/pytorch/ConvTranspose2dImpl.java index df2e4162085..600b7daa48a 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/ConvTranspose2dImpl.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/ConvTranspose2dImpl.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; @@ -41,12 +43,12 @@ public ConvTranspose2dImpl( @Cast("int64_t") long input_channels, @Cast("int64_t") long output_channels, @ByVal @Cast("torch::ExpandingArray<2>*") LongPointer kernel_size) { super((Pointer)null); allocate(input_channels, output_channels, kernel_size); } - @NoDeallocator private native void allocate( + @SharedPtr private native void allocate( @Cast("int64_t") long input_channels, @Cast("int64_t") long output_channels, @ByVal @Cast("torch::ExpandingArray<2>*") LongPointer kernel_size); public ConvTranspose2dImpl(@ByVal ConvTranspose2dOptions options_) { super((Pointer)null); allocate(options_); } - @NoDeallocator private native void allocate(@ByVal ConvTranspose2dOptions options_); + @SharedPtr private native void allocate(@ByVal ConvTranspose2dOptions options_); public native @ByVal Tensor forward( @Const @ByRef Tensor input, @Const @ByRef(nullValue = "c10::optional(c10::nullopt)") LongArrayRefOptional output_size); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/ConvTranspose2dImplBase.java b/pytorch/src/gen/java/org/bytedeco/pytorch/ConvTranspose2dImplBase.java index a96ccb44cc0..94183c0acda 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/ConvTranspose2dImplBase.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/ConvTranspose2dImplBase.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; @@ -18,6 +20,10 @@ @Name("torch::nn::ConvTransposeNdImpl<2,torch::nn::ConvTranspose2dImpl>") @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) public class ConvTranspose2dImplBase extends ConvTranspose2dImplBaseBase { static { Loader.load(); } + + + public ConvTranspose2dImplBase(@ByVal DetailConv2dOptions options_) { super((Pointer)null); allocate(options_); } + private native void allocate(@ByVal DetailConv2dOptions options_); /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public ConvTranspose2dImplBase(Pointer p) { super(p); } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/ConvTranspose2dImplBaseBase.java b/pytorch/src/gen/java/org/bytedeco/pytorch/ConvTranspose2dImplBaseBase.java index 7b4180ca673..bd712c2287e 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/ConvTranspose2dImplBaseBase.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/ConvTranspose2dImplBaseBase.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/ConvTranspose2dImplCloneable.java b/pytorch/src/gen/java/org/bytedeco/pytorch/ConvTranspose2dImplCloneable.java index 033782a0efd..a4c094051c8 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/ConvTranspose2dImplCloneable.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/ConvTranspose2dImplCloneable.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; @@ -20,18 +22,18 @@ public class ConvTranspose2dImplCloneable extends Module { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public ConvTranspose2dImplCloneable(Pointer p) { super(p); } + @Override public Module asModule() { return asModule(this); } + @Namespace public static native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast>") Module asModule(@SharedPtr ConvTranspose2dImplCloneable pointer); /** {@code reset()} must perform initialization of all members with reference * semantics, most importantly parameters, buffers and submodules. */ public native void reset(); - @Override public Module asModule() { return asModule(this); } - @Namespace public static native @Name("static_cast") Module asModule(ConvTranspose2dImplCloneable module); /** Performs a recursive "deep copy" of the {@code Module}, such that all parameters * and submodules in the cloned module are different from those in the * original module. */ - public native @SharedPtr Module clone( - @Const @ByRef(nullValue = "c10::optional(c10::nullopt)") DeviceOptional device); - public native @SharedPtr Module clone(); + public native @SharedPtr("torch::nn::Module") @ByVal Module clone( + @Const @ByRef(nullValue = "c10::optional(c10::nullopt)") DeviceOptional device); + public native @SharedPtr("torch::nn::Module") @ByVal Module clone(); } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/ConvTranspose2dImplModuleHolder.java b/pytorch/src/gen/java/org/bytedeco/pytorch/ConvTranspose2dImplModuleHolder.java deleted file mode 100644 index 7aa2f5646a3..00000000000 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/ConvTranspose2dImplModuleHolder.java +++ /dev/null @@ -1,79 +0,0 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE - -package org.bytedeco.pytorch; - -import org.bytedeco.pytorch.Allocator; -import org.bytedeco.pytorch.Function; -import org.bytedeco.pytorch.Module; -import java.nio.*; -import org.bytedeco.javacpp.*; -import org.bytedeco.javacpp.annotation.*; - -import static org.bytedeco.javacpp.presets.javacpp.*; -import static org.bytedeco.openblas.global.openblas_nolapack.*; -import static org.bytedeco.openblas.global.openblas.*; - -import static org.bytedeco.pytorch.global.torch.*; - -@Name("torch::nn::ModuleHolder") @NoOffset @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) -public class ConvTranspose2dImplModuleHolder extends Pointer { - static { Loader.load(); } - /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ - public ConvTranspose2dImplModuleHolder(Pointer p) { super(p); } - - - /// - - /** Default constructs the contained module if if has a default constructor, - * else produces a static error. - * - * NOTE: This uses the behavior of template - * classes in C++ that constructors (or any methods) are only compiled when - * actually used. */ - - - /** Constructs the {@code ModuleHolder} with an empty contained value. Access to - * the underlying module is not permitted and will throw an exception, until - * a value is assigned. */ - /* implicit */ public ConvTranspose2dImplModuleHolder(@ByVal @Cast("std::nullptr_t*") PointerPointer arg0) { super((Pointer)null); allocate(arg0); } -private native void allocate(@ByVal @Cast("std::nullptr_t*") PointerPointer arg0); - - /** Constructs the {@code ModuleHolder} with a contained module, forwarding all - * arguments to its constructor. */ - - /** Constructs the {@code ModuleHolder} from a pointer to the contained type. - * Example: {@code Linear(std::make_shared(...))}. */ - /* implicit */ public ConvTranspose2dImplModuleHolder(@SharedPtr @Cast({"", "std::shared_ptr"}) ConvTranspose2dImpl module) { super((Pointer)null); allocate(module); } -private native void allocate(@SharedPtr @Cast({"", "std::shared_ptr"}) ConvTranspose2dImpl module); - - /** Returns true if the {@code ModuleHolder} contains a module, or false if it is - * {@code nullptr}. */ - public native @Cast("bool") @Name("operator bool") @NoException(true) boolean asBoolean(); - - /** Forwards to the contained module. */ - public native @Name("operator ->") ConvTranspose2dImpl access(); - - /** Forwards to the contained module. */ - - /** Returns a reference to the contained module. */ - public native @ByRef @Name("operator *") ConvTranspose2dImpl multiply(); - - /** Returns a const reference to the contained module. */ - - /** Returns a shared pointer to the underlying module. */ - public native @SharedPtr @Cast({"", "std::shared_ptr"}) ConvTranspose2dImpl ptr(); - - /** Returns a pointer to the underlying module. */ - public native ConvTranspose2dImpl get(); - - /** Returns a const pointer to the underlying module. */ - - /** Calls the {@code forward()} method of the contained module. */ - - /** Forwards to the subscript operator of the contained module. - * NOTE: std::forward is qualified to prevent VS2017 emitting - * error C2872: 'std': ambiguous symbol */ - - /** Returns true if the {@code ModuleHolder} does not contain a module. */ - public native @Cast("bool") @NoException(true) boolean is_empty(); -} diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/ConvTranspose2dOptions.java b/pytorch/src/gen/java/org/bytedeco/pytorch/ConvTranspose2dOptions.java index a2e1567d06b..3ee213a0a7c 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/ConvTranspose2dOptions.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/ConvTranspose2dOptions.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; @@ -40,5 +42,5 @@ private native void allocate( public native @Cast("int64_t*") @ByRef @NoException(true) LongPointer groups(); public native @Cast("bool*") @ByRef @NoException(true) BoolPointer bias(); public native @Cast("torch::ExpandingArray<2>*") @ByRef @NoException(true) LongPointer dilation(); - public native @ByRef @NoException(true) conv_padding_mode_t padding_mode(); + public native @ByRef @NoException(true) ConvPaddingMode padding_mode(); } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/ConvTranspose3d.java b/pytorch/src/gen/java/org/bytedeco/pytorch/ConvTranspose3d.java deleted file mode 100644 index 44d611dc849..00000000000 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/ConvTranspose3d.java +++ /dev/null @@ -1,34 +0,0 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE - -package org.bytedeco.pytorch; - -import org.bytedeco.pytorch.Allocator; -import org.bytedeco.pytorch.Function; -import org.bytedeco.pytorch.Module; -import java.nio.*; -import org.bytedeco.javacpp.*; -import org.bytedeco.javacpp.annotation.*; - -import static org.bytedeco.javacpp.presets.javacpp.*; -import static org.bytedeco.openblas.global.openblas_nolapack.*; -import static org.bytedeco.openblas.global.openblas.*; - -import static org.bytedeco.pytorch.global.torch.*; - - -/** A {@code ModuleHolder} subclass for {@code ConvTranspose3dImpl}. - * See the documentation for {@code ConvTranspose3dImpl} class to learn what methods - * it provides, and examples of how to use {@code ConvTranspose3d} with - * {@code torch::nn::ConvTranspose3dOptions}. See the documentation for - * {@code ModuleHolder} to learn about PyTorch's module storage semantics. */ -@Namespace("torch::nn") @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) -public class ConvTranspose3d extends ConvTranspose3dImplModuleHolder { - static { Loader.load(); } - - public ConvTranspose3d(@ByVal @Cast("std::nullptr_t*") PointerPointer arg0) { super((Pointer)null); allocate(arg0); } - private native void allocate(@ByVal @Cast("std::nullptr_t*") PointerPointer arg0); public ConvTranspose3d(@SharedPtr @Cast({"", "std::shared_ptr"}) ConvTranspose3dImpl module) { super((Pointer)null); allocate(module); } - private native void allocate(@SharedPtr @Cast({"", "std::shared_ptr"}) ConvTranspose3dImpl module); - /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ - public ConvTranspose3d(Pointer p) { super(p); } - - } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/ConvTranspose3dFuncOptions.java b/pytorch/src/gen/java/org/bytedeco/pytorch/ConvTranspose3dFuncOptions.java index fbd79b521f4..441c3c3c88a 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/ConvTranspose3dFuncOptions.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/ConvTranspose3dFuncOptions.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/ConvTranspose3dImpl.java b/pytorch/src/gen/java/org/bytedeco/pytorch/ConvTranspose3dImpl.java index e3fc64494a3..e1aaefd843c 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/ConvTranspose3dImpl.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/ConvTranspose3dImpl.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; @@ -41,12 +43,12 @@ public ConvTranspose3dImpl( @Cast("int64_t") long input_channels, @Cast("int64_t") long output_channels, @ByVal @Cast("torch::ExpandingArray<3>*") LongPointer kernel_size) { super((Pointer)null); allocate(input_channels, output_channels, kernel_size); } - @NoDeallocator private native void allocate( + @SharedPtr private native void allocate( @Cast("int64_t") long input_channels, @Cast("int64_t") long output_channels, @ByVal @Cast("torch::ExpandingArray<3>*") LongPointer kernel_size); public ConvTranspose3dImpl(@ByVal ConvTranspose3dOptions options_) { super((Pointer)null); allocate(options_); } - @NoDeallocator private native void allocate(@ByVal ConvTranspose3dOptions options_); + @SharedPtr private native void allocate(@ByVal ConvTranspose3dOptions options_); public native @ByVal Tensor forward( @Const @ByRef Tensor input, @Const @ByRef(nullValue = "c10::optional(c10::nullopt)") LongArrayRefOptional output_size); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/ConvTranspose3dImplBase.java b/pytorch/src/gen/java/org/bytedeco/pytorch/ConvTranspose3dImplBase.java index 5ef806a2f37..52c64d0639e 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/ConvTranspose3dImplBase.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/ConvTranspose3dImplBase.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; @@ -18,6 +20,10 @@ @Name("torch::nn::ConvTransposeNdImpl<3,torch::nn::ConvTranspose3dImpl>") @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) public class ConvTranspose3dImplBase extends ConvTranspose3dImplBaseBase { static { Loader.load(); } + + + public ConvTranspose3dImplBase(@ByVal DetailConv3dOptions options_) { super((Pointer)null); allocate(options_); } + private native void allocate(@ByVal DetailConv3dOptions options_); /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public ConvTranspose3dImplBase(Pointer p) { super(p); } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/ConvTranspose3dImplBaseBase.java b/pytorch/src/gen/java/org/bytedeco/pytorch/ConvTranspose3dImplBaseBase.java index 3b72de7da1b..362c4a65668 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/ConvTranspose3dImplBaseBase.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/ConvTranspose3dImplBaseBase.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/ConvTranspose3dImplCloneable.java b/pytorch/src/gen/java/org/bytedeco/pytorch/ConvTranspose3dImplCloneable.java index c17d1cdc060..27e987e1ac7 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/ConvTranspose3dImplCloneable.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/ConvTranspose3dImplCloneable.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; @@ -20,18 +22,18 @@ public class ConvTranspose3dImplCloneable extends Module { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public ConvTranspose3dImplCloneable(Pointer p) { super(p); } + @Override public Module asModule() { return asModule(this); } + @Namespace public static native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast>") Module asModule(@SharedPtr ConvTranspose3dImplCloneable pointer); /** {@code reset()} must perform initialization of all members with reference * semantics, most importantly parameters, buffers and submodules. */ public native void reset(); - @Override public Module asModule() { return asModule(this); } - @Namespace public static native @Name("static_cast") Module asModule(ConvTranspose3dImplCloneable module); /** Performs a recursive "deep copy" of the {@code Module}, such that all parameters * and submodules in the cloned module are different from those in the * original module. */ - public native @SharedPtr Module clone( - @Const @ByRef(nullValue = "c10::optional(c10::nullopt)") DeviceOptional device); - public native @SharedPtr Module clone(); + public native @SharedPtr("torch::nn::Module") @ByVal Module clone( + @Const @ByRef(nullValue = "c10::optional(c10::nullopt)") DeviceOptional device); + public native @SharedPtr("torch::nn::Module") @ByVal Module clone(); } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/ConvTranspose3dImplModuleHolder.java b/pytorch/src/gen/java/org/bytedeco/pytorch/ConvTranspose3dImplModuleHolder.java deleted file mode 100644 index be0a9168980..00000000000 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/ConvTranspose3dImplModuleHolder.java +++ /dev/null @@ -1,79 +0,0 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE - -package org.bytedeco.pytorch; - -import org.bytedeco.pytorch.Allocator; -import org.bytedeco.pytorch.Function; -import org.bytedeco.pytorch.Module; -import java.nio.*; -import org.bytedeco.javacpp.*; -import org.bytedeco.javacpp.annotation.*; - -import static org.bytedeco.javacpp.presets.javacpp.*; -import static org.bytedeco.openblas.global.openblas_nolapack.*; -import static org.bytedeco.openblas.global.openblas.*; - -import static org.bytedeco.pytorch.global.torch.*; - -@Name("torch::nn::ModuleHolder") @NoOffset @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) -public class ConvTranspose3dImplModuleHolder extends Pointer { - static { Loader.load(); } - /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ - public ConvTranspose3dImplModuleHolder(Pointer p) { super(p); } - - - /// - - /** Default constructs the contained module if if has a default constructor, - * else produces a static error. - * - * NOTE: This uses the behavior of template - * classes in C++ that constructors (or any methods) are only compiled when - * actually used. */ - - - /** Constructs the {@code ModuleHolder} with an empty contained value. Access to - * the underlying module is not permitted and will throw an exception, until - * a value is assigned. */ - /* implicit */ public ConvTranspose3dImplModuleHolder(@ByVal @Cast("std::nullptr_t*") PointerPointer arg0) { super((Pointer)null); allocate(arg0); } -private native void allocate(@ByVal @Cast("std::nullptr_t*") PointerPointer arg0); - - /** Constructs the {@code ModuleHolder} with a contained module, forwarding all - * arguments to its constructor. */ - - /** Constructs the {@code ModuleHolder} from a pointer to the contained type. - * Example: {@code Linear(std::make_shared(...))}. */ - /* implicit */ public ConvTranspose3dImplModuleHolder(@SharedPtr @Cast({"", "std::shared_ptr"}) ConvTranspose3dImpl module) { super((Pointer)null); allocate(module); } -private native void allocate(@SharedPtr @Cast({"", "std::shared_ptr"}) ConvTranspose3dImpl module); - - /** Returns true if the {@code ModuleHolder} contains a module, or false if it is - * {@code nullptr}. */ - public native @Cast("bool") @Name("operator bool") @NoException(true) boolean asBoolean(); - - /** Forwards to the contained module. */ - public native @Name("operator ->") ConvTranspose3dImpl access(); - - /** Forwards to the contained module. */ - - /** Returns a reference to the contained module. */ - public native @ByRef @Name("operator *") ConvTranspose3dImpl multiply(); - - /** Returns a const reference to the contained module. */ - - /** Returns a shared pointer to the underlying module. */ - public native @SharedPtr @Cast({"", "std::shared_ptr"}) ConvTranspose3dImpl ptr(); - - /** Returns a pointer to the underlying module. */ - public native ConvTranspose3dImpl get(); - - /** Returns a const pointer to the underlying module. */ - - /** Calls the {@code forward()} method of the contained module. */ - - /** Forwards to the subscript operator of the contained module. - * NOTE: std::forward is qualified to prevent VS2017 emitting - * error C2872: 'std': ambiguous symbol */ - - /** Returns true if the {@code ModuleHolder} does not contain a module. */ - public native @Cast("bool") @NoException(true) boolean is_empty(); -} diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/ConvTranspose3dOptions.java b/pytorch/src/gen/java/org/bytedeco/pytorch/ConvTranspose3dOptions.java index 3d4cc44764c..6a6878580b9 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/ConvTranspose3dOptions.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/ConvTranspose3dOptions.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; @@ -40,5 +42,5 @@ private native void allocate( public native @Cast("int64_t*") @ByRef @NoException(true) LongPointer groups(); public native @Cast("bool*") @ByRef @NoException(true) BoolPointer bias(); public native @Cast("torch::ExpandingArray<3>*") @ByRef @NoException(true) LongPointer dilation(); - public native @ByRef @NoException(true) conv_padding_mode_t padding_mode(); + public native @ByRef @NoException(true) ConvPaddingMode padding_mode(); } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/CopyBytesFunction.java b/pytorch/src/gen/java/org/bytedeco/pytorch/CopyBytesFunction.java index 422a8876ef8..f6f6de80c36 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/CopyBytesFunction.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/CopyBytesFunction.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/CosineEmbeddingLoss.java b/pytorch/src/gen/java/org/bytedeco/pytorch/CosineEmbeddingLoss.java deleted file mode 100644 index d0e05e3de9b..00000000000 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/CosineEmbeddingLoss.java +++ /dev/null @@ -1,34 +0,0 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE - -package org.bytedeco.pytorch; - -import org.bytedeco.pytorch.Allocator; -import org.bytedeco.pytorch.Function; -import org.bytedeco.pytorch.Module; -import java.nio.*; -import org.bytedeco.javacpp.*; -import org.bytedeco.javacpp.annotation.*; - -import static org.bytedeco.javacpp.presets.javacpp.*; -import static org.bytedeco.openblas.global.openblas_nolapack.*; -import static org.bytedeco.openblas.global.openblas.*; - -import static org.bytedeco.pytorch.global.torch.*; - - -/** A {@code ModuleHolder} subclass for {@code CosineEmbeddingLossImpl}. - * See the documentation for {@code CosineEmbeddingLossImpl} class to learn what - * methods it provides, and examples of how to use {@code CosineEmbeddingLoss} with - * {@code torch::nn::CosineEmbeddingLossOptions}. See the documentation for - * {@code ModuleHolder} to learn about PyTorch's module storage semantics. */ -@Namespace("torch::nn") @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) -public class CosineEmbeddingLoss extends CosineEmbeddingLossImplModuleHolder { - static { Loader.load(); } - - public CosineEmbeddingLoss(@ByVal @Cast("std::nullptr_t*") PointerPointer arg0) { super((Pointer)null); allocate(arg0); } - private native void allocate(@ByVal @Cast("std::nullptr_t*") PointerPointer arg0); public CosineEmbeddingLoss(@SharedPtr @Cast({"", "std::shared_ptr"}) CosineEmbeddingLossImpl module) { super((Pointer)null); allocate(module); } - private native void allocate(@SharedPtr @Cast({"", "std::shared_ptr"}) CosineEmbeddingLossImpl module); - /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ - public CosineEmbeddingLoss(Pointer p) { super(p); } - - } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/CosineEmbeddingLossImpl.java b/pytorch/src/gen/java/org/bytedeco/pytorch/CosineEmbeddingLossImpl.java index e2781aa16f9..90300641499 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/CosineEmbeddingLossImpl.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/CosineEmbeddingLossImpl.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; @@ -50,9 +52,9 @@ public class CosineEmbeddingLossImpl extends CosineEmbeddingLossImplCloneable { } public CosineEmbeddingLossImpl(@ByVal(nullValue = "torch::nn::CosineEmbeddingLossOptions{}") CosineEmbeddingLossOptions options_) { super((Pointer)null); allocate(options_); } - @NoDeallocator private native void allocate(@ByVal(nullValue = "torch::nn::CosineEmbeddingLossOptions{}") CosineEmbeddingLossOptions options_); + @SharedPtr private native void allocate(@ByVal(nullValue = "torch::nn::CosineEmbeddingLossOptions{}") CosineEmbeddingLossOptions options_); public CosineEmbeddingLossImpl() { super((Pointer)null); allocate(); } - @NoDeallocator private native void allocate(); + @SharedPtr private native void allocate(); public native void reset(); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/CosineEmbeddingLossImplCloneable.java b/pytorch/src/gen/java/org/bytedeco/pytorch/CosineEmbeddingLossImplCloneable.java index 1f07ca5925d..c7eb4c304f5 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/CosineEmbeddingLossImplCloneable.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/CosineEmbeddingLossImplCloneable.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; @@ -20,18 +22,18 @@ public class CosineEmbeddingLossImplCloneable extends Module { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public CosineEmbeddingLossImplCloneable(Pointer p) { super(p); } + @Override public Module asModule() { return asModule(this); } + @Namespace public static native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast>") Module asModule(@SharedPtr CosineEmbeddingLossImplCloneable pointer); /** {@code reset()} must perform initialization of all members with reference * semantics, most importantly parameters, buffers and submodules. */ public native void reset(); - @Override public Module asModule() { return asModule(this); } - @Namespace public static native @Name("static_cast") Module asModule(CosineEmbeddingLossImplCloneable module); /** Performs a recursive "deep copy" of the {@code Module}, such that all parameters * and submodules in the cloned module are different from those in the * original module. */ - public native @SharedPtr Module clone( - @Const @ByRef(nullValue = "c10::optional(c10::nullopt)") DeviceOptional device); - public native @SharedPtr Module clone(); + public native @SharedPtr("torch::nn::Module") @ByVal Module clone( + @Const @ByRef(nullValue = "c10::optional(c10::nullopt)") DeviceOptional device); + public native @SharedPtr("torch::nn::Module") @ByVal Module clone(); } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/CosineEmbeddingLossImplModuleHolder.java b/pytorch/src/gen/java/org/bytedeco/pytorch/CosineEmbeddingLossImplModuleHolder.java deleted file mode 100644 index 015a5fadd0c..00000000000 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/CosineEmbeddingLossImplModuleHolder.java +++ /dev/null @@ -1,79 +0,0 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE - -package org.bytedeco.pytorch; - -import org.bytedeco.pytorch.Allocator; -import org.bytedeco.pytorch.Function; -import org.bytedeco.pytorch.Module; -import java.nio.*; -import org.bytedeco.javacpp.*; -import org.bytedeco.javacpp.annotation.*; - -import static org.bytedeco.javacpp.presets.javacpp.*; -import static org.bytedeco.openblas.global.openblas_nolapack.*; -import static org.bytedeco.openblas.global.openblas.*; - -import static org.bytedeco.pytorch.global.torch.*; - -@Name("torch::nn::ModuleHolder") @NoOffset @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) -public class CosineEmbeddingLossImplModuleHolder extends Pointer { - static { Loader.load(); } - /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ - public CosineEmbeddingLossImplModuleHolder(Pointer p) { super(p); } - - - /// - - /** Default constructs the contained module if if has a default constructor, - * else produces a static error. - * - * NOTE: This uses the behavior of template - * classes in C++ that constructors (or any methods) are only compiled when - * actually used. */ - - - /** Constructs the {@code ModuleHolder} with an empty contained value. Access to - * the underlying module is not permitted and will throw an exception, until - * a value is assigned. */ - /* implicit */ public CosineEmbeddingLossImplModuleHolder(@ByVal @Cast("std::nullptr_t*") PointerPointer arg0) { super((Pointer)null); allocate(arg0); } -private native void allocate(@ByVal @Cast("std::nullptr_t*") PointerPointer arg0); - - /** Constructs the {@code ModuleHolder} with a contained module, forwarding all - * arguments to its constructor. */ - - /** Constructs the {@code ModuleHolder} from a pointer to the contained type. - * Example: {@code Linear(std::make_shared(...))}. */ - /* implicit */ public CosineEmbeddingLossImplModuleHolder(@SharedPtr @Cast({"", "std::shared_ptr"}) CosineEmbeddingLossImpl module) { super((Pointer)null); allocate(module); } -private native void allocate(@SharedPtr @Cast({"", "std::shared_ptr"}) CosineEmbeddingLossImpl module); - - /** Returns true if the {@code ModuleHolder} contains a module, or false if it is - * {@code nullptr}. */ - public native @Cast("bool") @Name("operator bool") @NoException(true) boolean asBoolean(); - - /** Forwards to the contained module. */ - public native @Name("operator ->") CosineEmbeddingLossImpl access(); - - /** Forwards to the contained module. */ - - /** Returns a reference to the contained module. */ - public native @ByRef @Name("operator *") CosineEmbeddingLossImpl multiply(); - - /** Returns a const reference to the contained module. */ - - /** Returns a shared pointer to the underlying module. */ - public native @SharedPtr @Cast({"", "std::shared_ptr"}) CosineEmbeddingLossImpl ptr(); - - /** Returns a pointer to the underlying module. */ - public native CosineEmbeddingLossImpl get(); - - /** Returns a const pointer to the underlying module. */ - - /** Calls the {@code forward()} method of the contained module. */ - - /** Forwards to the subscript operator of the contained module. - * NOTE: std::forward is qualified to prevent VS2017 emitting - * error C2872: 'std': ambiguous symbol */ - - /** Returns true if the {@code ModuleHolder} does not contain a module. */ - public native @Cast("bool") @NoException(true) boolean is_empty(); -} diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/CosineEmbeddingLossOptions.java b/pytorch/src/gen/java/org/bytedeco/pytorch/CosineEmbeddingLossOptions.java index a5d01c25106..68bc51b4307 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/CosineEmbeddingLossOptions.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/CosineEmbeddingLossOptions.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; @@ -43,5 +45,5 @@ public class CosineEmbeddingLossOptions extends Pointer { } public native @ByRef @NoException(true) DoublePointer margin(); - public native @ByRef @NoException(true) loss_reduction_t reduction(); + public native @ByRef @NoException(true) LossReduction reduction(); } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/CosineSimilarity.java b/pytorch/src/gen/java/org/bytedeco/pytorch/CosineSimilarity.java deleted file mode 100644 index 9117d351c7c..00000000000 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/CosineSimilarity.java +++ /dev/null @@ -1,34 +0,0 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE - -package org.bytedeco.pytorch; - -import org.bytedeco.pytorch.Allocator; -import org.bytedeco.pytorch.Function; -import org.bytedeco.pytorch.Module; -import java.nio.*; -import org.bytedeco.javacpp.*; -import org.bytedeco.javacpp.annotation.*; - -import static org.bytedeco.javacpp.presets.javacpp.*; -import static org.bytedeco.openblas.global.openblas_nolapack.*; -import static org.bytedeco.openblas.global.openblas.*; - -import static org.bytedeco.pytorch.global.torch.*; - - -/** A {@code ModuleHolder} subclass for {@code CosineSimilarityImpl}. - * See the documentation for {@code CosineSimilarityImpl} class to learn what methods - * it provides, and examples of how to use {@code CosineSimilarity} with - * {@code torch::nn::CosineSimilarityOptions}. See the documentation for - * {@code ModuleHolder} to learn about PyTorch's module storage semantics. */ -@Namespace("torch::nn") @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) -public class CosineSimilarity extends CosineSimilarityImplModuleHolder { - static { Loader.load(); } - - public CosineSimilarity(@ByVal @Cast("std::nullptr_t*") PointerPointer arg0) { super((Pointer)null); allocate(arg0); } - private native void allocate(@ByVal @Cast("std::nullptr_t*") PointerPointer arg0); public CosineSimilarity(@SharedPtr @Cast({"", "std::shared_ptr"}) CosineSimilarityImpl module) { super((Pointer)null); allocate(module); } - private native void allocate(@SharedPtr @Cast({"", "std::shared_ptr"}) CosineSimilarityImpl module); - /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ - public CosineSimilarity(Pointer p) { super(p); } - - } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/CosineSimilarityImpl.java b/pytorch/src/gen/java/org/bytedeco/pytorch/CosineSimilarityImpl.java index 9fd9eb2bd37..3e69b1f49e4 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/CosineSimilarityImpl.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/CosineSimilarityImpl.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; @@ -44,9 +46,9 @@ public class CosineSimilarityImpl extends CosineSimilarityImplCloneable { } public CosineSimilarityImpl(@Const @ByRef(nullValue = "torch::nn::CosineSimilarityOptions{}") CosineSimilarityOptions options_) { super((Pointer)null); allocate(options_); } - @NoDeallocator private native void allocate(@Const @ByRef(nullValue = "torch::nn::CosineSimilarityOptions{}") CosineSimilarityOptions options_); + @SharedPtr private native void allocate(@Const @ByRef(nullValue = "torch::nn::CosineSimilarityOptions{}") CosineSimilarityOptions options_); public CosineSimilarityImpl() { super((Pointer)null); allocate(); } - @NoDeallocator private native void allocate(); + @SharedPtr private native void allocate(); public native void reset(); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/CosineSimilarityImplCloneable.java b/pytorch/src/gen/java/org/bytedeco/pytorch/CosineSimilarityImplCloneable.java index 54cb61fff0a..b0b8f8bf895 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/CosineSimilarityImplCloneable.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/CosineSimilarityImplCloneable.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; @@ -20,18 +22,18 @@ public class CosineSimilarityImplCloneable extends Module { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public CosineSimilarityImplCloneable(Pointer p) { super(p); } + @Override public Module asModule() { return asModule(this); } + @Namespace public static native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast>") Module asModule(@SharedPtr CosineSimilarityImplCloneable pointer); /** {@code reset()} must perform initialization of all members with reference * semantics, most importantly parameters, buffers and submodules. */ public native void reset(); - @Override public Module asModule() { return asModule(this); } - @Namespace public static native @Name("static_cast") Module asModule(CosineSimilarityImplCloneable module); /** Performs a recursive "deep copy" of the {@code Module}, such that all parameters * and submodules in the cloned module are different from those in the * original module. */ - public native @SharedPtr Module clone( - @Const @ByRef(nullValue = "c10::optional(c10::nullopt)") DeviceOptional device); - public native @SharedPtr Module clone(); + public native @SharedPtr("torch::nn::Module") @ByVal Module clone( + @Const @ByRef(nullValue = "c10::optional(c10::nullopt)") DeviceOptional device); + public native @SharedPtr("torch::nn::Module") @ByVal Module clone(); } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/CosineSimilarityImplModuleHolder.java b/pytorch/src/gen/java/org/bytedeco/pytorch/CosineSimilarityImplModuleHolder.java deleted file mode 100644 index 0c564e374b9..00000000000 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/CosineSimilarityImplModuleHolder.java +++ /dev/null @@ -1,79 +0,0 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE - -package org.bytedeco.pytorch; - -import org.bytedeco.pytorch.Allocator; -import org.bytedeco.pytorch.Function; -import org.bytedeco.pytorch.Module; -import java.nio.*; -import org.bytedeco.javacpp.*; -import org.bytedeco.javacpp.annotation.*; - -import static org.bytedeco.javacpp.presets.javacpp.*; -import static org.bytedeco.openblas.global.openblas_nolapack.*; -import static org.bytedeco.openblas.global.openblas.*; - -import static org.bytedeco.pytorch.global.torch.*; - -@Name("torch::nn::ModuleHolder") @NoOffset @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) -public class CosineSimilarityImplModuleHolder extends Pointer { - static { Loader.load(); } - /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ - public CosineSimilarityImplModuleHolder(Pointer p) { super(p); } - - - /// - - /** Default constructs the contained module if if has a default constructor, - * else produces a static error. - * - * NOTE: This uses the behavior of template - * classes in C++ that constructors (or any methods) are only compiled when - * actually used. */ - - - /** Constructs the {@code ModuleHolder} with an empty contained value. Access to - * the underlying module is not permitted and will throw an exception, until - * a value is assigned. */ - /* implicit */ public CosineSimilarityImplModuleHolder(@ByVal @Cast("std::nullptr_t*") PointerPointer arg0) { super((Pointer)null); allocate(arg0); } -private native void allocate(@ByVal @Cast("std::nullptr_t*") PointerPointer arg0); - - /** Constructs the {@code ModuleHolder} with a contained module, forwarding all - * arguments to its constructor. */ - - /** Constructs the {@code ModuleHolder} from a pointer to the contained type. - * Example: {@code Linear(std::make_shared(...))}. */ - /* implicit */ public CosineSimilarityImplModuleHolder(@SharedPtr @Cast({"", "std::shared_ptr"}) CosineSimilarityImpl module) { super((Pointer)null); allocate(module); } -private native void allocate(@SharedPtr @Cast({"", "std::shared_ptr"}) CosineSimilarityImpl module); - - /** Returns true if the {@code ModuleHolder} contains a module, or false if it is - * {@code nullptr}. */ - public native @Cast("bool") @Name("operator bool") @NoException(true) boolean asBoolean(); - - /** Forwards to the contained module. */ - public native @Name("operator ->") CosineSimilarityImpl access(); - - /** Forwards to the contained module. */ - - /** Returns a reference to the contained module. */ - public native @ByRef @Name("operator *") CosineSimilarityImpl multiply(); - - /** Returns a const reference to the contained module. */ - - /** Returns a shared pointer to the underlying module. */ - public native @SharedPtr @Cast({"", "std::shared_ptr"}) CosineSimilarityImpl ptr(); - - /** Returns a pointer to the underlying module. */ - public native CosineSimilarityImpl get(); - - /** Returns a const pointer to the underlying module. */ - - /** Calls the {@code forward()} method of the contained module. */ - - /** Forwards to the subscript operator of the contained module. - * NOTE: std::forward is qualified to prevent VS2017 emitting - * error C2872: 'std': ambiguous symbol */ - - /** Returns true if the {@code ModuleHolder} does not contain a module. */ - public native @Cast("bool") @NoException(true) boolean is_empty(); -} diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/CosineSimilarityOptions.java b/pytorch/src/gen/java/org/bytedeco/pytorch/CosineSimilarityOptions.java index f8afd91772b..7ba0a2071ed 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/CosineSimilarityOptions.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/CosineSimilarityOptions.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/CppFunction.java b/pytorch/src/gen/java/org/bytedeco/pytorch/CppFunction.java new file mode 100644 index 00000000000..2d4e8558404 --- /dev/null +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/CppFunction.java @@ -0,0 +1,104 @@ +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE + +package org.bytedeco.pytorch; + +import org.bytedeco.pytorch.Allocator; +import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; +import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; +import java.nio.*; +import org.bytedeco.javacpp.*; +import org.bytedeco.javacpp.annotation.*; + +import static org.bytedeco.javacpp.presets.javacpp.*; +import static org.bytedeco.openblas.global.openblas_nolapack.*; +import static org.bytedeco.openblas.global.openblas.*; + +import static org.bytedeco.pytorch.global.torch.*; + + +/** Represents a C++ function that implements an operator. Most users won't + * interact directly with this class, except via error messages: the + * constructors this function define the set of permissible "function"-like + * things you can bind via the interface. + * + * This class erases the type of the passed in function, but durably records + * the type via an inferred schema for the function. */ +@Namespace("torch") @NoOffset @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) +public class CppFunction extends Pointer { + static { Loader.load(); } + /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ + public CppFunction(Pointer p) { super(p); } + + /** This overload accepts function pointers, e.g., {@code CppFunction(&add_impl)} */ + + /** This overload accepts compile time function pointers, e.g., + * {@code CppFunction(TORCH_FN(add_impl))} */ + + /** This overload accepts lambdas, e.g., {@code CppFunction([](const Tensor& self) { + * ... })} */ + +// #if defined C10_MOBILE + /** This overload accepts function pointers, e.g., {@code CppFunction(&add_impl, + * NoInferSchemaTag())} */ + + /** This overload accepts compile time function pointers, e.g., + * {@code CppFunction(TORCH_FN(add_impl), NoInferSchemaTag())} */ + + /** This overload accepts lambdas, e.g., {@code CppFunction([](const Tensor& self) { + * ... }. NoInferSchemaTag())} */ +// #endif + + public CppFunction(@ByRef(true) CppFunction arg0) { super((Pointer)null); allocate(arg0); } + @NoException(true) private native void allocate(@ByRef(true) CppFunction arg0); + + public native @ByRef @Name("operator =") CppFunction put(@ByRef(true) CppFunction arg0); + + /** \private + * Creates a function from a type-erased boxed kernel. */ + + /** This creates a fallthrough function. Fallthrough functions + * immediately redispatch to the next available dispatch key, + * but are implemented more efficiently than a hand written + * function done in the same way. */ + + /// + public static native @ByVal CppFunction makeFallthrough(); + + /** \private + * + * Creates a function that raises an error saying that named tensors + * are not supported when called. */ + public static native @ByVal CppFunction makeNamedNotSupported(); + + /** Create a function from a boxed kernel function with signature + * {@code void(const OperatorHandle&, Stack*)}; i.e., they receive a + * stack of arguments in a boxed calling convention, rather than + * in the native C++ calling convention. Boxed functions are + * typically only used to register backend fallbacks via + * torch::Library::fallback(). */ + + // Variant that takes in a boxed kernel function with a plumbed + // DispatchKeySet. See Note [Plumbing Keys Through The Dispatcher] for + // details. + + /** Create a function from a boxed kernel functor which defines + * {@code operator()(const OperatorHandle&, DispatchKeySet, Stack*)} + * (receiving arguments from boxed calling convention) and inherits + * from {@code c10::OperatorKernel}. Unlike makeFromBoxedFunction, functions + * registered in this way can also carry additional state which + * is managed by the functor; this is useful if you're writing an + * adapter to some other implementation, e.g., a Python callable, which + * is dynamically associated with the registered kernel. */ + + /** Create a function from an unboxed kernel function. + * This is typically used to register common operators. */ + + /** Create a function from a compile time unboxed kernel function pointer. + * This is typically used to register common operators. + * Compile time function pointers can be used to allow the compiler + * to optimize (e.g. inline) calls to it. */ + + +} diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/CppSignature.java b/pytorch/src/gen/java/org/bytedeco/pytorch/CppSignature.java index e94575b6997..c8aac6553fd 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/CppSignature.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/CppSignature.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; @@ -30,5 +32,6 @@ public class CppSignature extends Pointer { public native @StdString BytePointer name(); - + private static native @Namespace @Cast("bool") @Name("operator ==") boolean equals(@Const @ByRef CppSignature lhs, @Const @ByRef CppSignature rhs); + public boolean equals(CppSignature rhs) { return equals(this, rhs); } } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/CppSignatureOptional.java b/pytorch/src/gen/java/org/bytedeco/pytorch/CppSignatureOptional.java index d6cd9175549..bdc1f881a46 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/CppSignatureOptional.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/CppSignatureOptional.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; @@ -26,6 +28,7 @@ public class CppSignatureOptional extends Pointer { public native @Name("operator =") @ByRef CppSignatureOptional put(@ByRef CppSignatureOptional x); public native boolean has_value(); + public native void reset(); public native @Name("value") @ByRef CppSignature get(); @ValueSetter public native CppSignatureOptional put(@ByRef CppSignature value); } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/CrossEntropyLoss.java b/pytorch/src/gen/java/org/bytedeco/pytorch/CrossEntropyLoss.java deleted file mode 100644 index d0b8236011e..00000000000 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/CrossEntropyLoss.java +++ /dev/null @@ -1,34 +0,0 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE - -package org.bytedeco.pytorch; - -import org.bytedeco.pytorch.Allocator; -import org.bytedeco.pytorch.Function; -import org.bytedeco.pytorch.Module; -import java.nio.*; -import org.bytedeco.javacpp.*; -import org.bytedeco.javacpp.annotation.*; - -import static org.bytedeco.javacpp.presets.javacpp.*; -import static org.bytedeco.openblas.global.openblas_nolapack.*; -import static org.bytedeco.openblas.global.openblas.*; - -import static org.bytedeco.pytorch.global.torch.*; - - -/** A {@code ModuleHolder} subclass for {@code CrossEntropyLossImpl}. - * See the documentation for {@code CrossEntropyLossImpl} class to learn what methods - * it provides, and examples of how to use {@code CrossEntropyLoss} with - * {@code torch::nn::CrossEntropyLossOptions}. See the documentation for - * {@code ModuleHolder} to learn about PyTorch's module storage semantics. */ -@Namespace("torch::nn") @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) -public class CrossEntropyLoss extends CrossEntropyLossImplModuleHolder { - static { Loader.load(); } - - public CrossEntropyLoss(@ByVal @Cast("std::nullptr_t*") PointerPointer arg0) { super((Pointer)null); allocate(arg0); } - private native void allocate(@ByVal @Cast("std::nullptr_t*") PointerPointer arg0); public CrossEntropyLoss(@SharedPtr @Cast({"", "std::shared_ptr"}) CrossEntropyLossImpl module) { super((Pointer)null); allocate(module); } - private native void allocate(@SharedPtr @Cast({"", "std::shared_ptr"}) CrossEntropyLossImpl module); - /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ - public CrossEntropyLoss(Pointer p) { super(p); } - - } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/CrossEntropyLossImpl.java b/pytorch/src/gen/java/org/bytedeco/pytorch/CrossEntropyLossImpl.java index ece7b6ab4c4..dc200e5cfd1 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/CrossEntropyLossImpl.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/CrossEntropyLossImpl.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; @@ -48,9 +50,9 @@ public class CrossEntropyLossImpl extends CrossEntropyLossImplCloneable { } public CrossEntropyLossImpl(@ByVal(nullValue = "torch::nn::CrossEntropyLossOptions{}") CrossEntropyLossOptions options_) { super((Pointer)null); allocate(options_); } - @NoDeallocator private native void allocate(@ByVal(nullValue = "torch::nn::CrossEntropyLossOptions{}") CrossEntropyLossOptions options_); + @SharedPtr private native void allocate(@ByVal(nullValue = "torch::nn::CrossEntropyLossOptions{}") CrossEntropyLossOptions options_); public CrossEntropyLossImpl() { super((Pointer)null); allocate(); } - @NoDeallocator private native void allocate(); + @SharedPtr private native void allocate(); public native void reset(); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/CrossEntropyLossImplCloneable.java b/pytorch/src/gen/java/org/bytedeco/pytorch/CrossEntropyLossImplCloneable.java index b5afe2b18ae..4e7caf7d2b8 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/CrossEntropyLossImplCloneable.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/CrossEntropyLossImplCloneable.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; @@ -20,18 +22,18 @@ public class CrossEntropyLossImplCloneable extends Module { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public CrossEntropyLossImplCloneable(Pointer p) { super(p); } + @Override public Module asModule() { return asModule(this); } + @Namespace public static native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast>") Module asModule(@SharedPtr CrossEntropyLossImplCloneable pointer); /** {@code reset()} must perform initialization of all members with reference * semantics, most importantly parameters, buffers and submodules. */ public native void reset(); - @Override public Module asModule() { return asModule(this); } - @Namespace public static native @Name("static_cast") Module asModule(CrossEntropyLossImplCloneable module); /** Performs a recursive "deep copy" of the {@code Module}, such that all parameters * and submodules in the cloned module are different from those in the * original module. */ - public native @SharedPtr Module clone( - @Const @ByRef(nullValue = "c10::optional(c10::nullopt)") DeviceOptional device); - public native @SharedPtr Module clone(); + public native @SharedPtr("torch::nn::Module") @ByVal Module clone( + @Const @ByRef(nullValue = "c10::optional(c10::nullopt)") DeviceOptional device); + public native @SharedPtr("torch::nn::Module") @ByVal Module clone(); } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/CrossEntropyLossImplModuleHolder.java b/pytorch/src/gen/java/org/bytedeco/pytorch/CrossEntropyLossImplModuleHolder.java deleted file mode 100644 index 5b7c788eb9e..00000000000 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/CrossEntropyLossImplModuleHolder.java +++ /dev/null @@ -1,79 +0,0 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE - -package org.bytedeco.pytorch; - -import org.bytedeco.pytorch.Allocator; -import org.bytedeco.pytorch.Function; -import org.bytedeco.pytorch.Module; -import java.nio.*; -import org.bytedeco.javacpp.*; -import org.bytedeco.javacpp.annotation.*; - -import static org.bytedeco.javacpp.presets.javacpp.*; -import static org.bytedeco.openblas.global.openblas_nolapack.*; -import static org.bytedeco.openblas.global.openblas.*; - -import static org.bytedeco.pytorch.global.torch.*; - -@Name("torch::nn::ModuleHolder") @NoOffset @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) -public class CrossEntropyLossImplModuleHolder extends Pointer { - static { Loader.load(); } - /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ - public CrossEntropyLossImplModuleHolder(Pointer p) { super(p); } - - - /// - - /** Default constructs the contained module if if has a default constructor, - * else produces a static error. - * - * NOTE: This uses the behavior of template - * classes in C++ that constructors (or any methods) are only compiled when - * actually used. */ - - - /** Constructs the {@code ModuleHolder} with an empty contained value. Access to - * the underlying module is not permitted and will throw an exception, until - * a value is assigned. */ - /* implicit */ public CrossEntropyLossImplModuleHolder(@ByVal @Cast("std::nullptr_t*") PointerPointer arg0) { super((Pointer)null); allocate(arg0); } -private native void allocate(@ByVal @Cast("std::nullptr_t*") PointerPointer arg0); - - /** Constructs the {@code ModuleHolder} with a contained module, forwarding all - * arguments to its constructor. */ - - /** Constructs the {@code ModuleHolder} from a pointer to the contained type. - * Example: {@code Linear(std::make_shared(...))}. */ - /* implicit */ public CrossEntropyLossImplModuleHolder(@SharedPtr @Cast({"", "std::shared_ptr"}) CrossEntropyLossImpl module) { super((Pointer)null); allocate(module); } -private native void allocate(@SharedPtr @Cast({"", "std::shared_ptr"}) CrossEntropyLossImpl module); - - /** Returns true if the {@code ModuleHolder} contains a module, or false if it is - * {@code nullptr}. */ - public native @Cast("bool") @Name("operator bool") @NoException(true) boolean asBoolean(); - - /** Forwards to the contained module. */ - public native @Name("operator ->") CrossEntropyLossImpl access(); - - /** Forwards to the contained module. */ - - /** Returns a reference to the contained module. */ - public native @ByRef @Name("operator *") CrossEntropyLossImpl multiply(); - - /** Returns a const reference to the contained module. */ - - /** Returns a shared pointer to the underlying module. */ - public native @SharedPtr @Cast({"", "std::shared_ptr"}) CrossEntropyLossImpl ptr(); - - /** Returns a pointer to the underlying module. */ - public native CrossEntropyLossImpl get(); - - /** Returns a const pointer to the underlying module. */ - - /** Calls the {@code forward()} method of the contained module. */ - - /** Forwards to the subscript operator of the contained module. - * NOTE: std::forward is qualified to prevent VS2017 emitting - * error C2872: 'std': ambiguous symbol */ - - /** Returns true if the {@code ModuleHolder} does not contain a module. */ - public native @Cast("bool") @NoException(true) boolean is_empty(); -} diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/CrossEntropyLossOptions.java b/pytorch/src/gen/java/org/bytedeco/pytorch/CrossEntropyLossOptions.java index 83bd3f923b2..6c29876afcf 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/CrossEntropyLossOptions.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/CrossEntropyLossOptions.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; @@ -45,6 +47,6 @@ public class CrossEntropyLossOptions extends Pointer { public native @ByRef @NoException(true) Tensor weight(); public native @Cast("int64_t*") @ByRef @NoException(true) LongPointer ignore_index(); - public native @ByRef @NoException(true) loss_reduction_t reduction(); + public native @ByRef @NoException(true) LossReduction reduction(); public native @ByRef @NoException(true) DoublePointer label_smoothing(); } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/CrossMapLRN2d.java b/pytorch/src/gen/java/org/bytedeco/pytorch/CrossMapLRN2d.java deleted file mode 100644 index a01bcbe318f..00000000000 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/CrossMapLRN2d.java +++ /dev/null @@ -1,34 +0,0 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE - -package org.bytedeco.pytorch; - -import org.bytedeco.pytorch.Allocator; -import org.bytedeco.pytorch.Function; -import org.bytedeco.pytorch.Module; -import java.nio.*; -import org.bytedeco.javacpp.*; -import org.bytedeco.javacpp.annotation.*; - -import static org.bytedeco.javacpp.presets.javacpp.*; -import static org.bytedeco.openblas.global.openblas_nolapack.*; -import static org.bytedeco.openblas.global.openblas.*; - -import static org.bytedeco.pytorch.global.torch.*; - - -/** A {@code ModuleHolder} subclass for {@code CrossMapLRN2dImpl}. - * See the documentation for {@code CrossMapLRN2dImpl} class to learn what methods it - * provides, and examples of how to use {@code CrossMapLRN2d} with - * {@code torch::nn::CrossMapLRN2dOptions}. See the documentation for {@code ModuleHolder} - * to learn about PyTorch's module storage semantics. */ -@Namespace("torch::nn") @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) -public class CrossMapLRN2d extends CrossMapLRN2dImplModuleHolder { - static { Loader.load(); } - - public CrossMapLRN2d(@ByVal @Cast("std::nullptr_t*") PointerPointer arg0) { super((Pointer)null); allocate(arg0); } - private native void allocate(@ByVal @Cast("std::nullptr_t*") PointerPointer arg0); public CrossMapLRN2d(@SharedPtr @Cast({"", "std::shared_ptr"}) CrossMapLRN2dImpl module) { super((Pointer)null); allocate(module); } - private native void allocate(@SharedPtr @Cast({"", "std::shared_ptr"}) CrossMapLRN2dImpl module); - /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ - public CrossMapLRN2d(Pointer p) { super(p); } - - } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/CrossMapLRN2dImpl.java b/pytorch/src/gen/java/org/bytedeco/pytorch/CrossMapLRN2dImpl.java index 98a0a4ebf13..da3d6e50792 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/CrossMapLRN2dImpl.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/CrossMapLRN2dImpl.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; @@ -32,9 +34,9 @@ public class CrossMapLRN2dImpl extends CrossMapLRN2dImplCloneable { public CrossMapLRN2dImpl(Pointer p) { super(p); } public CrossMapLRN2dImpl(@Cast("int64_t") long size) { super((Pointer)null); allocate(size); } - @NoDeallocator private native void allocate(@Cast("int64_t") long size); + @SharedPtr private native void allocate(@Cast("int64_t") long size); public CrossMapLRN2dImpl(@Const @ByRef CrossMapLRN2dOptions options_) { super((Pointer)null); allocate(options_); } - @NoDeallocator private native void allocate(@Const @ByRef CrossMapLRN2dOptions options_); + @SharedPtr private native void allocate(@Const @ByRef CrossMapLRN2dOptions options_); public native void reset(); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/CrossMapLRN2dImplCloneable.java b/pytorch/src/gen/java/org/bytedeco/pytorch/CrossMapLRN2dImplCloneable.java index e46f0e63686..3e4905298ef 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/CrossMapLRN2dImplCloneable.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/CrossMapLRN2dImplCloneable.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; @@ -20,18 +22,18 @@ public class CrossMapLRN2dImplCloneable extends Module { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public CrossMapLRN2dImplCloneable(Pointer p) { super(p); } + @Override public Module asModule() { return asModule(this); } + @Namespace public static native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast>") Module asModule(@SharedPtr CrossMapLRN2dImplCloneable pointer); /** {@code reset()} must perform initialization of all members with reference * semantics, most importantly parameters, buffers and submodules. */ public native void reset(); - @Override public Module asModule() { return asModule(this); } - @Namespace public static native @Name("static_cast") Module asModule(CrossMapLRN2dImplCloneable module); /** Performs a recursive "deep copy" of the {@code Module}, such that all parameters * and submodules in the cloned module are different from those in the * original module. */ - public native @SharedPtr Module clone( - @Const @ByRef(nullValue = "c10::optional(c10::nullopt)") DeviceOptional device); - public native @SharedPtr Module clone(); + public native @SharedPtr("torch::nn::Module") @ByVal Module clone( + @Const @ByRef(nullValue = "c10::optional(c10::nullopt)") DeviceOptional device); + public native @SharedPtr("torch::nn::Module") @ByVal Module clone(); } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/CrossMapLRN2dImplModuleHolder.java b/pytorch/src/gen/java/org/bytedeco/pytorch/CrossMapLRN2dImplModuleHolder.java deleted file mode 100644 index 52a74954061..00000000000 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/CrossMapLRN2dImplModuleHolder.java +++ /dev/null @@ -1,79 +0,0 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE - -package org.bytedeco.pytorch; - -import org.bytedeco.pytorch.Allocator; -import org.bytedeco.pytorch.Function; -import org.bytedeco.pytorch.Module; -import java.nio.*; -import org.bytedeco.javacpp.*; -import org.bytedeco.javacpp.annotation.*; - -import static org.bytedeco.javacpp.presets.javacpp.*; -import static org.bytedeco.openblas.global.openblas_nolapack.*; -import static org.bytedeco.openblas.global.openblas.*; - -import static org.bytedeco.pytorch.global.torch.*; - -@Name("torch::nn::ModuleHolder") @NoOffset @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) -public class CrossMapLRN2dImplModuleHolder extends Pointer { - static { Loader.load(); } - /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ - public CrossMapLRN2dImplModuleHolder(Pointer p) { super(p); } - - - /// - - /** Default constructs the contained module if if has a default constructor, - * else produces a static error. - * - * NOTE: This uses the behavior of template - * classes in C++ that constructors (or any methods) are only compiled when - * actually used. */ - - - /** Constructs the {@code ModuleHolder} with an empty contained value. Access to - * the underlying module is not permitted and will throw an exception, until - * a value is assigned. */ - /* implicit */ public CrossMapLRN2dImplModuleHolder(@ByVal @Cast("std::nullptr_t*") PointerPointer arg0) { super((Pointer)null); allocate(arg0); } -private native void allocate(@ByVal @Cast("std::nullptr_t*") PointerPointer arg0); - - /** Constructs the {@code ModuleHolder} with a contained module, forwarding all - * arguments to its constructor. */ - - /** Constructs the {@code ModuleHolder} from a pointer to the contained type. - * Example: {@code Linear(std::make_shared(...))}. */ - /* implicit */ public CrossMapLRN2dImplModuleHolder(@SharedPtr @Cast({"", "std::shared_ptr"}) CrossMapLRN2dImpl module) { super((Pointer)null); allocate(module); } -private native void allocate(@SharedPtr @Cast({"", "std::shared_ptr"}) CrossMapLRN2dImpl module); - - /** Returns true if the {@code ModuleHolder} contains a module, or false if it is - * {@code nullptr}. */ - public native @Cast("bool") @Name("operator bool") @NoException(true) boolean asBoolean(); - - /** Forwards to the contained module. */ - public native @Name("operator ->") CrossMapLRN2dImpl access(); - - /** Forwards to the contained module. */ - - /** Returns a reference to the contained module. */ - public native @ByRef @Name("operator *") CrossMapLRN2dImpl multiply(); - - /** Returns a const reference to the contained module. */ - - /** Returns a shared pointer to the underlying module. */ - public native @SharedPtr @Cast({"", "std::shared_ptr"}) CrossMapLRN2dImpl ptr(); - - /** Returns a pointer to the underlying module. */ - public native CrossMapLRN2dImpl get(); - - /** Returns a const pointer to the underlying module. */ - - /** Calls the {@code forward()} method of the contained module. */ - - /** Forwards to the subscript operator of the contained module. - * NOTE: std::forward is qualified to prevent VS2017 emitting - * error C2872: 'std': ambiguous symbol */ - - /** Returns true if the {@code ModuleHolder} does not contain a module. */ - public native @Cast("bool") @NoException(true) boolean is_empty(); -} diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/CrossMapLRN2dOptions.java b/pytorch/src/gen/java/org/bytedeco/pytorch/CrossMapLRN2dOptions.java index db8131be8f1..007ff6d9154 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/CrossMapLRN2dOptions.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/CrossMapLRN2dOptions.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/CustomBatchRequest.java b/pytorch/src/gen/java/org/bytedeco/pytorch/CustomBatchRequest.java index e39ac75f163..4d0d8743fac 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/CustomBatchRequest.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/CustomBatchRequest.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/CustomClassHolder.java b/pytorch/src/gen/java/org/bytedeco/pytorch/CustomClassHolder.java index b363088b800..2f563d05e29 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/CustomClassHolder.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/CustomClassHolder.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/DDPLoggingData.java b/pytorch/src/gen/java/org/bytedeco/pytorch/DDPLoggingData.java index ed7dca634c0..6b17aefe1cd 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/DDPLoggingData.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/DDPLoggingData.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; @@ -40,7 +42,7 @@ public class DDPLoggingData extends Pointer { } // logging fields that are string types. - public native @ByRef StringStringMap strs_map(); public native DDPLoggingData strs_map(StringStringMap setter); + public native @ByRef @NoOffset StringStringMap strs_map(); public native DDPLoggingData strs_map(StringStringMap setter); // logging fields that are int64_t types. - public native @ByRef StringLongMap ints_map(); public native DDPLoggingData ints_map(StringLongMap setter); + public native @ByRef @NoOffset StringLongMap ints_map(); public native DDPLoggingData ints_map(StringLongMap setter); } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/DataLoaderOptions.java b/pytorch/src/gen/java/org/bytedeco/pytorch/DataLoaderOptions.java index 02157a82d93..6f313c5a03d 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/DataLoaderOptions.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/DataLoaderOptions.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/DataPtr.java b/pytorch/src/gen/java/org/bytedeco/pytorch/DataPtr.java index 18ad6ca11d5..88ca2be01a9 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/DataPtr.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/DataPtr.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; @@ -44,8 +46,8 @@ public class DataPtr extends Pointer { private native void allocate(); public DataPtr(Pointer data, @ByVal Device device) { super((Pointer)null); allocate(data, device); } private native void allocate(Pointer data, @ByVal Device device); - public DataPtr(Pointer data, Pointer ctx, @Cast("c10::DeleterFnPtr") Deleter ctx_deleter, @ByVal Device device) { super((Pointer)null); allocate(data, ctx, ctx_deleter, device); } - private native void allocate(Pointer data, Pointer ctx, @Cast("c10::DeleterFnPtr") Deleter ctx_deleter, @ByVal Device device); + public DataPtr(Pointer data, Pointer ctx, @Cast("c10::DeleterFnPtr") PointerConsumer ctx_deleter, @ByVal Device device) { super((Pointer)null); allocate(data, ctx, ctx_deleter, device); } + private native void allocate(Pointer data, Pointer ctx, @Cast("c10::DeleterFnPtr") PointerConsumer ctx_deleter, @ByVal Device device); public DataPtr(Pointer data, Pointer ctx, @Cast("c10::DeleterFnPtr") Pointer ctx_deleter, @ByVal Device device) { super((Pointer)null); allocate(data, ctx, ctx_deleter, device); } private native void allocate(Pointer data, Pointer ctx, @Cast("c10::DeleterFnPtr") Pointer ctx_deleter, @ByVal Device device); public DataPtr(Pointer data, Pointer ctx, @Cast("c10::DeleterFnPtr") long ctx_deleter, @ByVal Device device) { super((Pointer)null); allocate(data, ctx, ctx_deleter, device); } @@ -57,7 +59,7 @@ public class DataPtr extends Pointer { public native Pointer release_context(); public native @Cast("bool") @Name("operator bool") boolean asBoolean(); - public native @Cast("c10::DeleterFnPtr") Deleter get_deleter(); + public native @Cast("c10::DeleterFnPtr") PointerConsumer get_deleter(); /** * Compare the deleter in a DataPtr to expected_deleter. * If it matches, replace the deleter with new_deleter @@ -96,8 +98,8 @@ public class DataPtr extends Pointer { * in question to confirm this. */ public native @Cast("bool") boolean compare_exchange_deleter( - @Cast("c10::DeleterFnPtr") Deleter expected_deleter, - @Cast("c10::DeleterFnPtr") Deleter new_deleter); + @Cast("c10::DeleterFnPtr") PointerConsumer expected_deleter, + @Cast("c10::DeleterFnPtr") PointerConsumer new_deleter); public native @Cast("bool") boolean compare_exchange_deleter( @Cast("c10::DeleterFnPtr") Pointer expected_deleter, @Cast("c10::DeleterFnPtr") Pointer new_deleter); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/DebugInfoBase.java b/pytorch/src/gen/java/org/bytedeco/pytorch/DebugInfoBase.java index 288ac6265c8..a2381d828ca 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/DebugInfoBase.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/DebugInfoBase.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/DebugInfoGuard.java b/pytorch/src/gen/java/org/bytedeco/pytorch/DebugInfoGuard.java index 5ef88212d67..72938345511 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/DebugInfoGuard.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/DebugInfoGuard.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/Decl.java b/pytorch/src/gen/java/org/bytedeco/pytorch/Decl.java index 9d000bc3830..90b336edc82 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/Decl.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/Decl.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; @@ -23,8 +25,15 @@ @Namespace("torch::jit") @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) public class Decl extends TreeView { static { Loader.load(); } + /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ + public Decl(Pointer p) { super(p); } - public Decl(@Cast("const torch::jit::TreeRef*") @ByRef Pointer tree) { super((Pointer)null); allocate(tree); } - private native void allocate(@Cast("const torch::jit::TreeRef*") @ByRef Pointer tree); + public Decl(@Const @ByRef TreeRef tree) { super((Pointer)null); allocate(tree); } + private native void allocate(@Const @ByRef TreeRef tree); + public native @ByVal ParamList params(); public native @ByVal ExprMaybe return_type(); + public static native @ByVal Decl create( + @Const @ByRef SourceRange range, + @Const @ByRef ParamList params, + @Const @ByRef ExprMaybe return_type); } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/Def.java b/pytorch/src/gen/java/org/bytedeco/pytorch/Def.java index dcae0615860..5950a7a0322 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/Def.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/Def.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; @@ -19,12 +21,20 @@ @Namespace("torch::jit") @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) public class Def extends TreeView { static { Loader.load(); } + /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ + public Def(Pointer p) { super(p); } - public Def(@Cast("const torch::jit::TreeRef*") @ByRef Pointer tree) { super((Pointer)null); allocate(tree); } - private native void allocate(@Cast("const torch::jit::TreeRef*") @ByRef Pointer tree); + public Def(@Const @ByRef TreeRef tree) { super((Pointer)null); allocate(tree); } + private native void allocate(@Const @ByRef TreeRef tree); public native @ByVal Def withName(@StdString BytePointer new_name); public native @ByVal Def withName(@StdString String new_name); public native @ByVal Def withDecl(@Const @ByRef Decl decl); public native @ByVal Ident name(); public native @ByVal Decl decl(); + public native @ByVal StmtList statements(); + public static native @ByVal Def create( + @Const @ByRef SourceRange range, + @Const @ByRef Ident name, + @Const @ByRef Decl decl, + @Const @ByRef StmtList stmts); } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/DefMaybe.java b/pytorch/src/gen/java/org/bytedeco/pytorch/DefMaybe.java index b52bcd36812..0930d3140f2 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/DefMaybe.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/DefMaybe.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; @@ -19,9 +21,11 @@ @Name("torch::jit::Maybe") @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) public class DefMaybe extends TreeView { static { Loader.load(); } + /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ + public DefMaybe(Pointer p) { super(p); } - public DefMaybe(@Cast("const torch::jit::TreeRef*") @ByRef Pointer tree) { super((Pointer)null); allocate(tree); } - private native void allocate(@Cast("const torch::jit::TreeRef*") @ByRef Pointer tree); + public DefMaybe(@Const @ByRef TreeRef tree) { super((Pointer)null); allocate(tree); } + private native void allocate(@Const @ByRef TreeRef tree); /* implicit */ public DefMaybe(@Const @ByRef Def tree) { super((Pointer)null); allocate(tree); } private native void allocate(@Const @ByRef Def tree); public native @Cast("bool") boolean present(); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/DefVector.java b/pytorch/src/gen/java/org/bytedeco/pytorch/DefVector.java index 3609ae5de31..041ce3d70a5 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/DefVector.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/DefVector.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; @@ -27,6 +29,8 @@ public class DefVector extends Pointer { public boolean empty() { return size() == 0; } public native long size(); + public Def front() { return get(0); } + public Def back() { return get(size() - 1); } @Index(function = "at") public native @ByRef Def get(@Cast("size_t") long i); public native @ByVal Iterator begin(); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/Delete.java b/pytorch/src/gen/java/org/bytedeco/pytorch/Delete.java index 737bbaef496..cd49137b987 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/Delete.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/Delete.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; @@ -19,7 +21,11 @@ @Namespace("torch::jit") @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) public class Delete extends Stmt { static { Loader.load(); } + /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ + public Delete(Pointer p) { super(p); } - public Delete(@Cast("const torch::jit::TreeRef*") @ByRef Pointer tree) { super((Pointer)null); allocate(tree); } - private native void allocate(@Cast("const torch::jit::TreeRef*") @ByRef Pointer tree); + public Delete(@Const @ByRef TreeRef tree) { super((Pointer)null); allocate(tree); } + private native void allocate(@Const @ByRef TreeRef tree); + public native @ByVal ExprList targets(); + public static native @ByVal Delete create(@Const @ByRef SourceRange range, @Const @ByRef ExprList targets); } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/warn_fn_type.java b/pytorch/src/gen/java/org/bytedeco/pytorch/DeleterFnPtr.java similarity index 66% rename from pytorch/src/gen/java/org/bytedeco/pytorch/warn_fn_type.java rename to pytorch/src/gen/java/org/bytedeco/pytorch/DeleterFnPtr.java index b052d98f73a..eb998102820 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/warn_fn_type.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/DeleterFnPtr.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; @@ -17,11 +19,11 @@ @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) -public class warn_fn_type extends FunctionPointer { +public class DeleterFnPtr extends FunctionPointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ - public warn_fn_type(Pointer p) { super(p); } - protected warn_fn_type() { allocate(); } + public DeleterFnPtr(Pointer p) { super(p); } + protected DeleterFnPtr() { allocate(); } private native void allocate(); - public native void call(@StdString BytePointer msg); + public native void call(Pointer arg0); } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/DeserializationStorageContext.java b/pytorch/src/gen/java/org/bytedeco/pytorch/DeserializationStorageContext.java index 80eb0311d0b..67d3a7dc2a0 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/DeserializationStorageContext.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/DeserializationStorageContext.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/DetailConv1dOptions.java b/pytorch/src/gen/java/org/bytedeco/pytorch/DetailConv1dOptions.java index af791435c9f..2fa6ca9415a 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/DetailConv1dOptions.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/DetailConv1dOptions.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; @@ -35,11 +37,11 @@ private native void allocate( public native @Cast("int64_t*") @ByRef @NoException(true) LongPointer out_channels(); public native @Cast("torch::ExpandingArray<1>*") @ByRef @NoException(true) LongPointer kernel_size(); public native @Cast("torch::ExpandingArray<1>*") @ByRef @NoException(true) LongPointer stride(); - public native @ByRef @NoException(true) conv_padding_t1 padding(); + public native @ByRef @NoException(true) Conv1dPadding padding(); public native @Cast("torch::ExpandingArray<1>*") @ByRef @NoException(true) LongPointer dilation(); public native @Cast("bool*") @ByRef @NoException(true) BoolPointer transposed(); public native @Cast("torch::ExpandingArray<1>*") @ByRef @NoException(true) LongPointer output_padding(); public native @Cast("int64_t*") @ByRef @NoException(true) LongPointer groups(); public native @Cast("bool*") @ByRef @NoException(true) BoolPointer bias(); - public native @ByRef @NoException(true) conv_padding_mode_t padding_mode(); + public native @ByRef @NoException(true) ConvPaddingMode padding_mode(); } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/DetailConv2dOptions.java b/pytorch/src/gen/java/org/bytedeco/pytorch/DetailConv2dOptions.java index 11f288af9ce..e633bfbb185 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/DetailConv2dOptions.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/DetailConv2dOptions.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; @@ -33,11 +35,11 @@ private native void allocate( public native @Cast("int64_t*") @ByRef @NoException(true) LongPointer out_channels(); public native @Cast("torch::ExpandingArray<2>*") @ByRef @NoException(true) LongPointer kernel_size(); public native @Cast("torch::ExpandingArray<2>*") @ByRef @NoException(true) LongPointer stride(); - public native @ByRef @NoException(true) conv_padding_t2 padding(); + public native @ByRef @NoException(true) Conv2dPadding padding(); public native @Cast("torch::ExpandingArray<2>*") @ByRef @NoException(true) LongPointer dilation(); public native @Cast("bool*") @ByRef @NoException(true) BoolPointer transposed(); public native @Cast("torch::ExpandingArray<2>*") @ByRef @NoException(true) LongPointer output_padding(); public native @Cast("int64_t*") @ByRef @NoException(true) LongPointer groups(); public native @Cast("bool*") @ByRef @NoException(true) BoolPointer bias(); - public native @ByRef @NoException(true) conv_padding_mode_t padding_mode(); + public native @ByRef @NoException(true) ConvPaddingMode padding_mode(); } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/DetailConv3dOptions.java b/pytorch/src/gen/java/org/bytedeco/pytorch/DetailConv3dOptions.java index 5e51bdcdad9..441ac21f8b6 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/DetailConv3dOptions.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/DetailConv3dOptions.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; @@ -33,11 +35,11 @@ private native void allocate( public native @Cast("int64_t*") @ByRef @NoException(true) LongPointer out_channels(); public native @Cast("torch::ExpandingArray<3>*") @ByRef @NoException(true) LongPointer kernel_size(); public native @Cast("torch::ExpandingArray<3>*") @ByRef @NoException(true) LongPointer stride(); - public native @ByRef @NoException(true) conv_padding_t3 padding(); + public native @ByRef @NoException(true) Conv3dPadding padding(); public native @Cast("torch::ExpandingArray<3>*") @ByRef @NoException(true) LongPointer dilation(); public native @Cast("bool*") @ByRef @NoException(true) BoolPointer transposed(); public native @Cast("torch::ExpandingArray<3>*") @ByRef @NoException(true) LongPointer output_padding(); public native @Cast("int64_t*") @ByRef @NoException(true) LongPointer groups(); public native @Cast("bool*") @ByRef @NoException(true) BoolPointer bias(); - public native @ByRef @NoException(true) conv_padding_mode_t padding_mode(); + public native @ByRef @NoException(true) ConvPaddingMode padding_mode(); } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/DetectAnomalyGuard.java b/pytorch/src/gen/java/org/bytedeco/pytorch/DetectAnomalyGuard.java index d9dfe01a8a7..487ca543cac 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/DetectAnomalyGuard.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/DetectAnomalyGuard.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/Device.java b/pytorch/src/gen/java/org/bytedeco/pytorch/Device.java index 6ad2b88d51d..0a895e88832 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/Device.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/Device.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; @@ -56,11 +58,11 @@ public class Device extends Pointer { /** Returns true if the type and index of this {@code Device} matches that of * {@code other}. */ - + public native @Cast("bool") @Name("operator ==") @NoException(true) boolean equals(@Const @ByRef Device other); /** Returns true if the type or index of this {@code Device} differs from that of * {@code other}. */ - + public native @Cast("bool") @Name("operator !=") @NoException(true) boolean notEquals(@Const @ByRef Device other); /** Sets the device index. */ public native void set_index(@Cast("c10::DeviceIndex") byte index); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/DeviceGuard.java b/pytorch/src/gen/java/org/bytedeco/pytorch/DeviceGuard.java deleted file mode 100644 index c825ea67b01..00000000000 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/DeviceGuard.java +++ /dev/null @@ -1,85 +0,0 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE - -package org.bytedeco.pytorch; - -import org.bytedeco.pytorch.Allocator; -import org.bytedeco.pytorch.Function; -import org.bytedeco.pytorch.Module; -import java.nio.*; -import org.bytedeco.javacpp.*; -import org.bytedeco.javacpp.annotation.*; - -import static org.bytedeco.javacpp.presets.javacpp.*; -import static org.bytedeco.openblas.global.openblas_nolapack.*; -import static org.bytedeco.openblas.global.openblas.*; - -import static org.bytedeco.pytorch.global.torch.*; - - -/** RAII guard that sets a certain default device in its constructor, and - * changes it back to the device that was originally active upon destruction. - * - * The device is always reset to the one that was active at the time of - * construction of the guard. Even if you {@code set_device} after construction, the - * destructor will still reset the device to the one that was active at - * construction time. - * - * This device guard does NOT have an uninitialized state; it is guaranteed - * to reset a device on exit. If you are in a situation where you *might* - * want to setup a guard (i.e., are looking for the moral equivalent - * of optional), see OptionalDeviceGuard. */ -@Namespace("c10") @NoOffset @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) -public class DeviceGuard extends Pointer { - static { Loader.load(); } - /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ - public DeviceGuard(Pointer p) { super(p); } - - /** No default constructor; see Note [Omitted default constructor from RAII] */ - - - /** Set the current device to the passed Device. */ - public DeviceGuard(@ByVal Device device) { super((Pointer)null); allocate(device); } - private native void allocate(@ByVal Device device); - - /** This constructor is for testing only. */ - public DeviceGuard( - @ByVal Device device, - @Cast("const c10::impl::DeviceGuardImplInterface*") Pointer impl) { super((Pointer)null); allocate(device, impl); } - private native void allocate( - @ByVal Device device, - @Cast("const c10::impl::DeviceGuardImplInterface*") Pointer impl); - - /** Copy is disallowed */ - - - - /** Move is disallowed, as DeviceGuard does not have an uninitialized state, - * which is required for moves on types with nontrivial destructors. */ - - - - /** Sets the device to the given one. The specified device must be consistent - * with the device type originally specified during guard construction. - * - * TODO: The consistency check here is inconsistent with StreamGuard's - * behavior with set_stream, where a stream on a different device than - * the original one isn't an error; we just reset the stream and then - * switch devices. */ - public native void reset_device(@ByVal Device device); - - /** This method is for testing only. */ - public native void reset_device( - @ByVal Device device, - @Cast("const c10::impl::DeviceGuardImplInterface*") Pointer impl); - - /** Sets the device index to the given one. The device type is inferred - * from the original device type the guard was constructed with. */ - public native void set_index(@Cast("c10::DeviceIndex") byte index); - - /** Returns the device that was set at the time the guard was constructed. */ - public native @ByVal Device original_device(); - - /** Returns the most recent device that was set using this device guard, - * either from construction, or via set_device. */ - public native @ByVal Device current_device(); -} diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/DeviceGuardImplInterface.java b/pytorch/src/gen/java/org/bytedeco/pytorch/DeviceGuardImplInterface.java new file mode 100644 index 00000000000..2fdf1dc53df --- /dev/null +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/DeviceGuardImplInterface.java @@ -0,0 +1,180 @@ +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE + +package org.bytedeco.pytorch; + +import org.bytedeco.pytorch.Allocator; +import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; +import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; +import java.nio.*; +import org.bytedeco.javacpp.*; +import org.bytedeco.javacpp.annotation.*; + +import static org.bytedeco.javacpp.presets.javacpp.*; +import static org.bytedeco.openblas.global.openblas_nolapack.*; +import static org.bytedeco.openblas.global.openblas.*; + +import static org.bytedeco.pytorch.global.torch.*; + + +/** + * DeviceGuardImplInterface represents the virtual interface which provides + * functionality to provide an RAII class for device and stream switching, + * via DeviceGuard. Every distinct device type, e.g., CUDA and HIP, is + * expected to implement and register an implementation of this interface. + * All classes which inherit from DeviceGuardImplInterface should be declared + * 'final'. + * + * This class exists because we provide a unified interface for performing + * device guards via DeviceGuard, but we cannot assume that we have actually + * compiled against the, e.g., CUDA library, which actually implements + * this guard functionality. In this case, a dynamic dispatch is required + * to cross the library boundary. + * + * If possible, you should directly use implementations of this interface; + * those uses will be devirtualized. + */ +@Namespace("c10::impl") @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) +public class DeviceGuardImplInterface extends Pointer { + static { Loader.load(); } + /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ + public DeviceGuardImplInterface(Pointer p) { super(p); } + + /** + * Return the type of device managed by this guard implementation. + */ + public native DeviceType type(); + + /** + * Set the current device to Device, and return the previous Device. + */ + public native @ByVal Device exchangeDevice(@ByVal Device arg0); + // NB: Implementations of exchangeDevice can be a bit boilerplatey. You might + // consider replacing exchangeDevice with a non-virtual function with a baked + // in implementation; however, note that this will triple the number of + // virtual calls (when you implement exchangeDevice in a final subclass, + // the compiler gets to devirtualize everything; it won't do that if you don't + // define it in the subclass!) A common way to solve this problem is to use + // some sort of CRTP; however, we can template DeviceGuardImplInterface since + // we really *do* need it to be virtual. A little boilerplate seems easiest + // to explain. (Another way around this problem is to provide inline + // functions that provide the default implementations, but this seems a little + // hard to explain. In any case, we're only going to have on order of ten + // implementations of this anyway.) + + /** + * Get the current device. + */ + public native @ByVal Device getDevice(); + + /** + * Set the current device to Device. + */ + public native void setDevice(@ByVal Device arg0); + + /** + * Set the current device to Device, without checking for errors + * (so, e.g., this can be called from a destructor). + */ + public native @NoException(true) void uncheckedSetDevice(@ByVal Device arg0); + + /** + * Get the current stream for a given device. + */ + public native @ByVal @NoException(true) Stream getStream(@ByVal Device arg0); + + /** + * Get the default stream for a given device. + */ + public native @ByVal Stream getDefaultStream(@ByVal Device arg0); + + /** + * Get a stream from the global pool for a given device. + */ + public native @ByVal Stream getStreamFromGlobalPool(@ByVal Device arg0, @Cast("bool") boolean isHighPriority/*=false*/); + public native @ByVal Stream getStreamFromGlobalPool(@ByVal Device arg0); + + /** + * Set a stream to be the thread local current stream for its device. + * Return the previous stream for that device. You are NOT required + * to set the current device to match the device of this stream. + */ + public native @ByVal @NoException(true) Stream exchangeStream(@ByVal Stream arg0); + + /** + * Destroys the given event. + */ + public native @NoException(true) void destroyEvent(Pointer arg0, @Cast("const c10::DeviceIndex") byte arg1); + + /** + * Increments the event's version and enqueues a job with this version + * in the stream's work queue. When the stream process that job + * it notifies all streams waiting on / blocked by that version of the + * event to continue and marks that version as recorded. + * */ + public native void record( + @Cast("void**") PointerPointer arg0, + @Const @ByRef Stream arg1, + @Cast("const c10::DeviceIndex") byte arg2, + EventFlag arg3); + public native void record( + @Cast("void**") @ByPtrPtr Pointer arg0, + @Const @ByRef Stream arg1, + @Cast("const c10::DeviceIndex") byte arg2, + EventFlag arg3); + public native void record( + @Cast("void**") @ByPtrPtr Pointer arg0, + @Const @ByRef Stream arg1, + @Cast("const c10::DeviceIndex") byte arg2, + @Cast("c10::EventFlag") int arg3); + + /** + * Does nothing if the event has not been scheduled to be recorded. + * If the event was previously enqueued to be recorded, a command + * to wait for the version of the event that exists at the time of this call + * is inserted in the stream's work queue. + * When the stream reaches this command it will stop processing + * additional commands until that version of the event is marked as recorded. + */ + public native void block(Pointer arg0, @Const @ByRef Stream arg1); + + /** + * Returns true if (and only if) + * (1) the event has never been scheduled to be recorded + * (2) the current version is marked as recorded. + * Returns false otherwise. + */ + public native @Cast("bool") boolean queryEvent(Pointer arg0); + + /** + * Get the number of devices. WARNING: This is REQUIRED to not raise + * an exception. If there is some sort of problem, e.g., driver error, + * you should report that there are zero available devices. + */ + public native @Cast("c10::DeviceIndex") @NoException(true) byte deviceCount(); + + /** + * Return true if all the work previously enqueued on the stream for + * asynchronous execution has completed running on the device. + */ + public native @Cast("bool") boolean queryStream(@Const @ByRef Stream arg0); + + /** + * Wait (by blocking the calling thread) until all the work previously + * enqueued on the stream has completed running on the device. + */ + public native void synchronizeStream(@Const @ByRef Stream arg0); + + /** + * Ensure the caching allocator (if any) is aware that the given DataPtr is + * being used on the given stream, and that it should thus avoid recycling the + * DataPtr until all work on that stream is done. + */ + public native void recordDataPtrOnStream(@Cast({"", "c10::DataPtr&&"}) @StdMove DataPtr arg0, @Const @ByRef Stream arg1); + + /** + * Intended use of this class is to leak the DeviceGuardImpl at program end. + * So you better not call the destructor, buster! + */ +} diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/DeviceGuardImplRegistrar.java b/pytorch/src/gen/java/org/bytedeco/pytorch/DeviceGuardImplRegistrar.java new file mode 100644 index 00000000000..9b855bd43ab --- /dev/null +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/DeviceGuardImplRegistrar.java @@ -0,0 +1,39 @@ +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE + +package org.bytedeco.pytorch; + +import org.bytedeco.pytorch.Allocator; +import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; +import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; +import java.nio.*; +import org.bytedeco.javacpp.*; +import org.bytedeco.javacpp.annotation.*; + +import static org.bytedeco.javacpp.presets.javacpp.*; +import static org.bytedeco.openblas.global.openblas_nolapack.*; +import static org.bytedeco.openblas.global.openblas.*; + +import static org.bytedeco.pytorch.global.torch.*; + + +// I can't conveniently use c10/util/Registry.h for the following reason: +// c10/util/Registry.h gives me a slow way of Create'ing a object of some +// interface from the registry, but no way of quickly accessing an already +// created object. I'll be banging on getDeviceGuardImpl every time we do a +// DeviceGuard, so I really don't want to be doing an unordered_map lookup. +// Better if the registration mechanism directly drops its implementation +// into device_guard_impl_registry. + +@Namespace("c10::impl") @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) +public class DeviceGuardImplRegistrar extends Pointer { + static { Loader.load(); } + /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ + public DeviceGuardImplRegistrar(Pointer p) { super(p); } + + public DeviceGuardImplRegistrar(DeviceType arg0, @Const DeviceGuardImplInterface arg1) { super((Pointer)null); allocate(arg0, arg1); } + private native void allocate(DeviceType arg0, @Const DeviceGuardImplInterface arg1); + public DeviceGuardImplRegistrar(@Cast("c10::DeviceType") byte arg0, @Const DeviceGuardImplInterface arg1) { super((Pointer)null); allocate(arg0, arg1); } + private native void allocate(@Cast("c10::DeviceType") byte arg0, @Const DeviceGuardImplInterface arg1); +} diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/DeviceHash.java b/pytorch/src/gen/java/org/bytedeco/pytorch/DeviceHash.java deleted file mode 100644 index 2529c6f5bb6..00000000000 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/DeviceHash.java +++ /dev/null @@ -1,37 +0,0 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE - -package org.bytedeco.pytorch; - -import org.bytedeco.pytorch.Allocator; -import org.bytedeco.pytorch.Function; -import org.bytedeco.pytorch.Module; -import java.nio.*; -import org.bytedeco.javacpp.*; -import org.bytedeco.javacpp.annotation.*; - -import static org.bytedeco.javacpp.presets.javacpp.*; -import static org.bytedeco.openblas.global.openblas_nolapack.*; -import static org.bytedeco.openblas.global.openblas.*; - -import static org.bytedeco.pytorch.global.torch.*; - // namespace c10 -@Name("std::hash") @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) -public class DeviceHash extends Pointer { - static { Loader.load(); } - /** Default native constructor. */ - public DeviceHash() { super((Pointer)null); allocate(); } - /** Native array allocator. Access with {@link Pointer#position(long)}. */ - public DeviceHash(long size) { super((Pointer)null); allocateArray(size); } - /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ - public DeviceHash(Pointer p) { super(p); } - private native void allocate(); - private native void allocateArray(long size); - @Override public DeviceHash position(long position) { - return (DeviceHash)super.position(position); - } - @Override public DeviceHash getPointer(long i) { - return new DeviceHash((Pointer)this).offsetAddress(i); - } - - public native @Cast("std::size_t") @Name("operator ()") @NoException(true) long apply(@ByVal Device d); -} diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/DeviceObjType.java b/pytorch/src/gen/java/org/bytedeco/pytorch/DeviceObjType.java index a3ba8312d69..50d5dc3a8ee 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/DeviceObjType.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/DeviceObjType.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/DeviceObjTypePtr.java b/pytorch/src/gen/java/org/bytedeco/pytorch/DeviceObjTypePtr.java index cf76bfcdbf3..f16322c7677 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/DeviceObjTypePtr.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/DeviceObjTypePtr.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/DeviceOptional.java b/pytorch/src/gen/java/org/bytedeco/pytorch/DeviceOptional.java index 2523ab1620f..828a0c2bd36 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/DeviceOptional.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/DeviceOptional.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; @@ -26,6 +28,7 @@ public class DeviceOptional extends Pointer { public native @Name("operator =") @ByRef DeviceOptional put(@ByRef DeviceOptional x); public native boolean has_value(); + public native void reset(); public native @Name("value") @ByRef Device get(); @ValueSetter public native DeviceOptional put(@ByRef Device value); } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/DeviceTypeHash.java b/pytorch/src/gen/java/org/bytedeco/pytorch/DeviceTypeHash.java deleted file mode 100644 index 8d2d0062e3b..00000000000 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/DeviceTypeHash.java +++ /dev/null @@ -1,37 +0,0 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE - -package org.bytedeco.pytorch; - -import org.bytedeco.pytorch.Allocator; -import org.bytedeco.pytorch.Function; -import org.bytedeco.pytorch.Module; -import java.nio.*; -import org.bytedeco.javacpp.*; -import org.bytedeco.javacpp.annotation.*; - -import static org.bytedeco.javacpp.presets.javacpp.*; -import static org.bytedeco.openblas.global.openblas_nolapack.*; -import static org.bytedeco.openblas.global.openblas.*; - -import static org.bytedeco.pytorch.global.torch.*; - - @Name("std::hash") @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) -public class DeviceTypeHash extends Pointer { - static { Loader.load(); } - /** Default native constructor. */ - public DeviceTypeHash() { super((Pointer)null); allocate(); } - /** Native array allocator. Access with {@link Pointer#position(long)}. */ - public DeviceTypeHash(long size) { super((Pointer)null); allocateArray(size); } - /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ - public DeviceTypeHash(Pointer p) { super(p); } - private native void allocate(); - private native void allocateArray(long size); - @Override public DeviceTypeHash position(long position) { - return (DeviceTypeHash)super.position(position); - } - @Override public DeviceTypeHash getPointer(long i) { - return new DeviceTypeHash((Pointer)this).offsetAddress(i); - } - - public native @Cast("std::size_t") @Name("operator ()") long apply(@ByVal TypeIdentifier x); - } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/DictComp.java b/pytorch/src/gen/java/org/bytedeco/pytorch/DictComp.java index 8e135720a1f..5592552f5d6 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/DictComp.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/DictComp.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; @@ -20,9 +22,11 @@ @Namespace("torch::jit") @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) public class DictComp extends Expr { static { Loader.load(); } + /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ + public DictComp(Pointer p) { super(p); } - public DictComp(@Cast("const torch::jit::TreeRef*") @ByRef Pointer tree) { super((Pointer)null); allocate(tree); } - private native void allocate(@Cast("const torch::jit::TreeRef*") @ByRef Pointer tree); + public DictComp(@Const @ByRef TreeRef tree) { super((Pointer)null); allocate(tree); } + private native void allocate(@Const @ByRef TreeRef tree); public native @ByVal Expr key(); public native @ByVal Expr value(); public native @ByVal Expr target(); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/DictKeyEqualTo.java b/pytorch/src/gen/java/org/bytedeco/pytorch/DictKeyEqualTo.java deleted file mode 100644 index 5f7443d560c..00000000000 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/DictKeyEqualTo.java +++ /dev/null @@ -1,38 +0,0 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE - -package org.bytedeco.pytorch; - -import org.bytedeco.pytorch.Allocator; -import org.bytedeco.pytorch.Function; -import org.bytedeco.pytorch.Module; -import java.nio.*; -import org.bytedeco.javacpp.*; -import org.bytedeco.javacpp.annotation.*; - -import static org.bytedeco.javacpp.presets.javacpp.*; -import static org.bytedeco.openblas.global.openblas_nolapack.*; -import static org.bytedeco.openblas.global.openblas.*; - -import static org.bytedeco.pytorch.global.torch.*; - - -@Namespace("c10::detail") @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) -public class DictKeyEqualTo extends Pointer { - static { Loader.load(); } - /** Default native constructor. */ - public DictKeyEqualTo() { super((Pointer)null); allocate(); } - /** Native array allocator. Access with {@link Pointer#position(long)}. */ - public DictKeyEqualTo(long size) { super((Pointer)null); allocateArray(size); } - /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ - public DictKeyEqualTo(Pointer p) { super(p); } - private native void allocate(); - private native void allocateArray(long size); - @Override public DictKeyEqualTo position(long position) { - return (DictKeyEqualTo)super.position(position); - } - @Override public DictKeyEqualTo getPointer(long i) { - return new DictKeyEqualTo((Pointer)this).offsetAddress(i); - } - - public native @Cast("bool") @Name("operator ()") boolean apply(@Const @ByRef IValue lhs, @Const @ByRef IValue rhs); -} diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/DictKeyHash.java b/pytorch/src/gen/java/org/bytedeco/pytorch/DictKeyHash.java deleted file mode 100644 index 5be4a8b1d56..00000000000 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/DictKeyHash.java +++ /dev/null @@ -1,38 +0,0 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE - -package org.bytedeco.pytorch; - -import org.bytedeco.pytorch.Allocator; -import org.bytedeco.pytorch.Function; -import org.bytedeco.pytorch.Module; -import java.nio.*; -import org.bytedeco.javacpp.*; -import org.bytedeco.javacpp.annotation.*; - -import static org.bytedeco.javacpp.presets.javacpp.*; -import static org.bytedeco.openblas.global.openblas_nolapack.*; -import static org.bytedeco.openblas.global.openblas.*; - -import static org.bytedeco.pytorch.global.torch.*; - - -@Namespace("c10::detail") @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) -public class DictKeyHash extends Pointer { - static { Loader.load(); } - /** Default native constructor. */ - public DictKeyHash() { super((Pointer)null); allocate(); } - /** Native array allocator. Access with {@link Pointer#position(long)}. */ - public DictKeyHash(long size) { super((Pointer)null); allocateArray(size); } - /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ - public DictKeyHash(Pointer p) { super(p); } - private native void allocate(); - private native void allocateArray(long size); - @Override public DictKeyHash position(long position) { - return (DictKeyHash)super.position(position); - } - @Override public DictKeyHash getPointer(long i) { - return new DictKeyHash((Pointer)this).offsetAddress(i); - } - - public native @Cast("size_t") @Name("operator ()") long apply(@Const @ByRef IValue ivalue); -} diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/DictLiteral.java b/pytorch/src/gen/java/org/bytedeco/pytorch/DictLiteral.java index 61aa23936f7..fbb5ddbf872 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/DictLiteral.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/DictLiteral.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; @@ -19,7 +21,15 @@ @Namespace("torch::jit") @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) public class DictLiteral extends Expr { static { Loader.load(); } + /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ + public DictLiteral(Pointer p) { super(p); } - public DictLiteral(@Cast("const torch::jit::TreeRef*") @ByRef Pointer tree) { super((Pointer)null); allocate(tree); } - private native void allocate(@Cast("const torch::jit::TreeRef*") @ByRef Pointer tree); + public DictLiteral(@Const @ByRef TreeRef tree) { super((Pointer)null); allocate(tree); } + private native void allocate(@Const @ByRef TreeRef tree); + public native @ByVal ExprList key_inputs(); + public native @ByVal ExprList value_inputs(); + public static native @ByVal DictLiteral create( + @Const @ByRef SourceRange range, + @Const @ByRef ExprList keys, + @Const @ByRef ExprList values); } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/DictType.java b/pytorch/src/gen/java/org/bytedeco/pytorch/DictType.java index 98965cd43ae..7c6d123085c 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/DictType.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/DictType.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/DifferentiableViewMeta.java b/pytorch/src/gen/java/org/bytedeco/pytorch/DifferentiableViewMeta.java index 24a71bb381c..e6e5b8d7994 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/DifferentiableViewMeta.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/DifferentiableViewMeta.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/DimVector.java b/pytorch/src/gen/java/org/bytedeco/pytorch/DimVector.java index 71d21b4887d..4413b17fb39 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/DimVector.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/DimVector.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; @@ -15,25 +17,8 @@ import static org.bytedeco.pytorch.global.torch.*; - -/** This is a 'vector' (really, a variable-sized array), optimized - * for the case when the array is small. It contains some number of elements - * in-place, which allows it to avoid heap allocation when the actual number of - * elements is below that threshold. This allows normal "small" cases to be - * fast without losing generality for large inputs. - * - * \note - * In the absence of a well-motivated choice for the number of inlined - * elements \p N, it is recommended to use \c SmallVector (that is, - * omitting the \p N). This will choose a default number of inlined elements - * reasonable for allocation on the stack (for example, trying to keep \c - * sizeof(SmallVector) around 64 bytes). - * - * \warning This does not attempt to be exception safe. - * - * @see https://llvm.org/docs/ProgrammersManual.html#llvm-adt-smallvector-h */ @Name("c10::SmallVector") @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) -public class DimVector extends DimVectorImpl { +public class DimVector extends LongSmallVectorImpl { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public DimVector(Pointer p) { super(p); } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/DimVectorInferExpandGeometryResult.java b/pytorch/src/gen/java/org/bytedeco/pytorch/DimVectorInferExpandGeometryResult.java index cd45c771938..1fee80e6d40 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/DimVectorInferExpandGeometryResult.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/DimVectorInferExpandGeometryResult.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; @@ -28,8 +30,8 @@ public class DimVectorInferExpandGeometryResult extends Pointer { public native @ByRef DimVector strides(); public native DimVectorInferExpandGeometryResult strides(DimVector setter); public DimVectorInferExpandGeometryResult(@Cast("size_t") long ndim) { super((Pointer)null); allocate(ndim); } private native void allocate(@Cast("size_t") long ndim); - public DimVectorInferExpandGeometryResult(@ByVal @Cast("c10::ArrayRef*") LongArrayRef sizes_, @Cast("size_t") long ndim) { super((Pointer)null); allocate(sizes_, ndim); } - private native void allocate(@ByVal @Cast("c10::ArrayRef*") LongArrayRef sizes_, @Cast("size_t") long ndim); + public DimVectorInferExpandGeometryResult(@ByVal LongArrayRef sizes_, @Cast("size_t") long ndim) { super((Pointer)null); allocate(sizes_, ndim); } + private native void allocate(@ByVal LongArrayRef sizes_, @Cast("size_t") long ndim); public DimVectorInferExpandGeometryResult(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] sizes_, @Cast("size_t") long ndim) { super((Pointer)null); allocate(sizes_, ndim); } private native void allocate(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] sizes_, @Cast("size_t") long ndim); } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/DimVectorOptional.java b/pytorch/src/gen/java/org/bytedeco/pytorch/DimVectorOptional.java index 6fb0650814b..ec6c42c7db7 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/DimVectorOptional.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/DimVectorOptional.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; @@ -26,6 +28,7 @@ public class DimVectorOptional extends Pointer { public native @Name("operator =") @ByRef DimVectorOptional put(@ByRef DimVectorOptional x); public native boolean has_value(); + public native void reset(); public native @Name("value") @ByRef DimVector get(); @ValueSetter public native DimVectorOptional put(@ByRef DimVector value); } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/Dimname.java b/pytorch/src/gen/java/org/bytedeco/pytorch/Dimname.java index 2f0985a6bce..d0528903f73 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/Dimname.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/Dimname.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/DimnameArrayRef.java b/pytorch/src/gen/java/org/bytedeco/pytorch/DimnameArrayRef.java index 8efddf764c0..11347cb7689 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/DimnameArrayRef.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/DimnameArrayRef.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; @@ -39,8 +41,7 @@ public class DimnameArrayRef extends Pointer { /** Construct an ArrayRef from a single element. */ // TODO Make this explicit - public DimnameArrayRef(@Const @ByRef Dimname OneElt) { super((Pointer)null); allocate(OneElt); } - private native void allocate(@Const @ByRef Dimname OneElt); + /** Construct an ArrayRef from a pointer and length. */ public DimnameArrayRef(@Const Dimname data, @Cast("size_t") long length) { super((Pointer)null); allocate(data, length); } @@ -58,6 +59,8 @@ public class DimnameArrayRef extends Pointer { // The enable_if stuff here makes sure that this isn't used for // std::vector, because ArrayRef can't work on a std::vector // bitfield. + public DimnameArrayRef(@ByRef DimnameVector vec) { super((Pointer)null); allocate(vec); } + private native void allocate(@ByRef DimnameVector vec); /** Construct an ArrayRef from a std::array */ @@ -70,13 +73,13 @@ public class DimnameArrayRef extends Pointer { * \name Simple Operations * \{ */ - public native @ByVal @Cast("const c10::ArrayRef::iterator*") Dimname begin(); - public native @ByVal @Cast("const c10::ArrayRef::iterator*") Dimname end(); + public native @Const @ByPtr Dimname begin(); + public native @Const @ByPtr Dimname end(); // These are actually the same as iterator, since ArrayRef only // gives you const iterators. - public native @ByVal @Cast("const c10::ArrayRef::const_iterator*") Dimname cbegin(); - public native @ByVal @Cast("const c10::ArrayRef::const_iterator*") Dimname cend(); + public native @Const @ByPtr Dimname cbegin(); + public native @Const @ByPtr Dimname cend(); /** empty - Check if the array is empty. */ public native @Cast("const bool") boolean empty(); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/DimnameListOptional.java b/pytorch/src/gen/java/org/bytedeco/pytorch/DimnameListOptional.java index 7511ddedce3..35ec2490e64 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/DimnameListOptional.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/DimnameListOptional.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; @@ -26,6 +28,7 @@ public class DimnameListOptional extends Pointer { public native @Name("operator =") @ByRef DimnameListOptional put(@ByRef DimnameListOptional x); public native boolean has_value(); + public native void reset(); public native @Name("value") @ByRef DimnameArrayRef get(); @ValueSetter public native DimnameListOptional put(@ByRef DimnameArrayRef value); } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/DimnameOptional.java b/pytorch/src/gen/java/org/bytedeco/pytorch/DimnameOptional.java index 4a1fab1c8d3..2b6c66e8929 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/DimnameOptional.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/DimnameOptional.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; @@ -26,6 +28,7 @@ public class DimnameOptional extends Pointer { public native @Name("operator =") @ByRef DimnameOptional put(@ByRef DimnameOptional x); public native boolean has_value(); + public native void reset(); public native @Name("value") @ByRef Dimname get(); @ValueSetter public native DimnameOptional put(@ByRef Dimname value); } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/DimnameVector.java b/pytorch/src/gen/java/org/bytedeco/pytorch/DimnameVector.java index d6b19a94c08..0a83e009713 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/DimnameVector.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/DimnameVector.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; @@ -27,6 +29,8 @@ public class DimnameVector extends Pointer { public boolean empty() { return size() == 0; } public native long size(); + public Dimname front() { return get(0); } + public Dimname back() { return get(size() - 1); } @Index(function = "at") public native @ByRef Dimname get(@Cast("size_t") long i); public native @ByVal Iterator begin(); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/DisablePythonDispatcher.java b/pytorch/src/gen/java/org/bytedeco/pytorch/DisablePythonDispatcher.java new file mode 100644 index 00000000000..f1d17def9ca --- /dev/null +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/DisablePythonDispatcher.java @@ -0,0 +1,39 @@ +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE + +package org.bytedeco.pytorch; + +import org.bytedeco.pytorch.Allocator; +import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; +import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; +import java.nio.*; +import org.bytedeco.javacpp.*; +import org.bytedeco.javacpp.annotation.*; + +import static org.bytedeco.javacpp.presets.javacpp.*; +import static org.bytedeco.openblas.global.openblas_nolapack.*; +import static org.bytedeco.openblas.global.openblas.*; + +import static org.bytedeco.pytorch.global.torch.*; + + +@Namespace("c10::impl") @NoOffset @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) +public class DisablePythonDispatcher extends Pointer { + static { Loader.load(); } + /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ + public DisablePythonDispatcher(Pointer p) { super(p); } + /** Native array allocator. Access with {@link Pointer#position(long)}. */ + public DisablePythonDispatcher(long size) { super((Pointer)null); allocateArray(size); } + private native void allocateArray(long size); + @Override public DisablePythonDispatcher position(long position) { + return (DisablePythonDispatcher)super.position(position); + } + @Override public DisablePythonDispatcher getPointer(long i) { + return new DisablePythonDispatcher((Pointer)this).offsetAddress(i); + } + + public DisablePythonDispatcher() { super((Pointer)null); allocate(); } + private native void allocate(); + public native PyInterpreter old_(); public native DisablePythonDispatcher old_(PyInterpreter setter); +} diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/DisableRecordFunctionGuard.java b/pytorch/src/gen/java/org/bytedeco/pytorch/DisableRecordFunctionGuard.java index 475af5f00e5..f1f74399c11 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/DisableRecordFunctionGuard.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/DisableRecordFunctionGuard.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/DisabledStr.java b/pytorch/src/gen/java/org/bytedeco/pytorch/DisabledStr.java new file mode 100644 index 00000000000..496047ce6a6 --- /dev/null +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/DisabledStr.java @@ -0,0 +1,38 @@ +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE + +package org.bytedeco.pytorch; + +import org.bytedeco.pytorch.Allocator; +import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; +import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; +import java.nio.*; +import org.bytedeco.javacpp.*; +import org.bytedeco.javacpp.annotation.*; + +import static org.bytedeco.javacpp.presets.javacpp.*; +import static org.bytedeco.openblas.global.openblas_nolapack.*; +import static org.bytedeco.openblas.global.openblas.*; + +import static org.bytedeco.pytorch.global.torch.*; + + +// A SelectiveStr is like a const char*, except that it also comes +// with a type brand that says whether or not the name is enabled or +// not. If the string is disabled, then (at compile time) we DON'T generate +// a registration call for it. This class is not intended to be called +// directly; use TORCH_SELECTIVE_NAME or TORCH_SELECTIVE_SCHEMA macros below +// to create it. +@Name("torch::detail::SelectiveStr") @NoOffset @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) +public class DisabledStr extends Pointer { + static { Loader.load(); } + /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ + public DisabledStr(Pointer p) { super(p); } + + public DisabledStr(@Cast("const char*") BytePointer name) { super((Pointer)null); allocate(name); } + private native void allocate(@Cast("const char*") BytePointer name); + public DisabledStr(String name) { super((Pointer)null); allocate(name); } + private native void allocate(String name); + public native @Name("operator const char*") @Cast("const char*") BytePointer asBytePointer(); +} diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/DispatchKeyExtractor.java b/pytorch/src/gen/java/org/bytedeco/pytorch/DispatchKeyExtractor.java index 6e08d52de63..c3e2e9d3f18 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/DispatchKeyExtractor.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/DispatchKeyExtractor.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/DispatchKeyOptional.java b/pytorch/src/gen/java/org/bytedeco/pytorch/DispatchKeyOptional.java index 53458cc173a..96960bb3371 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/DispatchKeyOptional.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/DispatchKeyOptional.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; @@ -26,6 +28,7 @@ public class DispatchKeyOptional extends Pointer { public native @Name("operator =") @ByRef DispatchKeyOptional put(@ByRef DispatchKeyOptional x); public native boolean has_value(); + public native void reset(); public native @Name("value") @ByRef DispatchKey get(); @ValueSetter public native DispatchKeyOptional put(@ByRef DispatchKey value); } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/DispatchKeySet.java b/pytorch/src/gen/java/org/bytedeco/pytorch/DispatchKeySet.java index 06070a492e6..8ae801a4972 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/DispatchKeySet.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/DispatchKeySet.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; @@ -238,8 +240,8 @@ public enum Raw { RAW(0); // Compute self ^ other public native @Const @ByVal @Name("operator ^") DispatchKeySet xor(@ByVal DispatchKeySet other); - - + public native @Cast("bool") @Name("operator ==") boolean equals(@ByVal DispatchKeySet other); + public native @Cast("bool") @Name("operator !=") boolean notEquals(@ByVal DispatchKeySet other); // Add a DispatchKey to the DispatchKey set. Does NOT mutate, // returns the extended DispatchKeySet! public native @Const @ByVal DispatchKeySet add(DispatchKey t); @@ -392,8 +394,8 @@ private native void allocate( public native @ByVal @Cast("c10::DispatchKeySet::iterator::self_type*") @Name("operator ++") iterator increment(int arg0); - - + public native @Cast("bool") @Name("operator ==") boolean equals(@Cast("const c10::DispatchKeySet::iterator::self_type*") @ByRef iterator rhs); + public native @Cast("bool") @Name("operator !=") boolean notEquals(@Cast("const c10::DispatchKeySet::iterator::self_type*") @ByRef iterator rhs); public native @Name("operator *") DispatchKey multiply(); } // Returns iterator to the first key in the set. If no keys are in the diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/DispatchTraceNestingGuard.java b/pytorch/src/gen/java/org/bytedeco/pytorch/DispatchTraceNestingGuard.java deleted file mode 100644 index c959da29c33..00000000000 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/DispatchTraceNestingGuard.java +++ /dev/null @@ -1,36 +0,0 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE - -package org.bytedeco.pytorch; - -import org.bytedeco.pytorch.Allocator; -import org.bytedeco.pytorch.Function; -import org.bytedeco.pytorch.Module; -import java.nio.*; -import org.bytedeco.javacpp.*; -import org.bytedeco.javacpp.annotation.*; - -import static org.bytedeco.javacpp.presets.javacpp.*; -import static org.bytedeco.openblas.global.openblas_nolapack.*; -import static org.bytedeco.openblas.global.openblas.*; - -import static org.bytedeco.pytorch.global.torch.*; - - -@Namespace("c10") @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) -public class DispatchTraceNestingGuard extends Pointer { - static { Loader.load(); } - /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ - public DispatchTraceNestingGuard(Pointer p) { super(p); } - /** Native array allocator. Access with {@link Pointer#position(long)}. */ - public DispatchTraceNestingGuard(long size) { super((Pointer)null); allocateArray(size); } - private native void allocateArray(long size); - @Override public DispatchTraceNestingGuard position(long position) { - return (DispatchTraceNestingGuard)super.position(position); - } - @Override public DispatchTraceNestingGuard getPointer(long i) { - return new DispatchTraceNestingGuard((Pointer)this).offsetAddress(i); - } - - public DispatchTraceNestingGuard() { super((Pointer)null); allocate(); } - private native void allocate(); -} diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/Dispatcher.java b/pytorch/src/gen/java/org/bytedeco/pytorch/Dispatcher.java index 9c03d88d5cc..767c73458af 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/Dispatcher.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/Dispatcher.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/DistBackendError.java b/pytorch/src/gen/java/org/bytedeco/pytorch/DistBackendError.java index 247ae68330e..8e949167f17 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/DistBackendError.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/DistBackendError.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/DistributedRandomSampler.java b/pytorch/src/gen/java/org/bytedeco/pytorch/DistributedRandomSampler.java index dd3cfbb2835..be7e77fc0e9 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/DistributedRandomSampler.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/DistributedRandomSampler.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/DistributedSampler.java b/pytorch/src/gen/java/org/bytedeco/pytorch/DistributedSampler.java index d1e10043ac7..8d1941143a7 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/DistributedSampler.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/DistributedSampler.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/DistributedSequentialSampler.java b/pytorch/src/gen/java/org/bytedeco/pytorch/DistributedSequentialSampler.java index 2cb79ef5240..5b6e3ae18b4 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/DistributedSequentialSampler.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/DistributedSequentialSampler.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/any_of.java b/pytorch/src/gen/java/org/bytedeco/pytorch/DontIncreaseRefcount.java similarity index 56% rename from pytorch/src/gen/java/org/bytedeco/pytorch/any_of.java rename to pytorch/src/gen/java/org/bytedeco/pytorch/DontIncreaseRefcount.java index 65084bc5a0a..cc53b5e6941 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/any_of.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/DontIncreaseRefcount.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; @@ -16,10 +18,11 @@ import static org.bytedeco.pytorch.global.torch.*; -@Namespace("torch") @Opaque @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) -public class any_of extends Pointer { +// constructor tag used by intrusive_ptr constructors +@Namespace("c10::raw") @Opaque @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) +public class DontIncreaseRefcount extends Pointer { /** Empty constructor. Calls {@code super((Pointer)null)}. */ - public any_of() { super((Pointer)null); } + public DontIncreaseRefcount() { super((Pointer)null); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ - public any_of(Pointer p) { super(p); } + public DontIncreaseRefcount(Pointer p) { super(p); } } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/Dots.java b/pytorch/src/gen/java/org/bytedeco/pytorch/Dots.java index efbd5939da7..736f4314d4b 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/Dots.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/Dots.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; @@ -19,8 +21,10 @@ @Namespace("torch::jit") @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) public class Dots extends Expr { static { Loader.load(); } + /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ + public Dots(Pointer p) { super(p); } - public Dots(@Cast("const torch::jit::TreeRef*") @ByRef Pointer tree) { super((Pointer)null); allocate(tree); } - private native void allocate(@Cast("const torch::jit::TreeRef*") @ByRef Pointer tree); + public Dots(@Const @ByRef TreeRef tree) { super((Pointer)null); allocate(tree); } + private native void allocate(@Const @ByRef TreeRef tree); public static native @ByVal Dots create(@Const @ByRef SourceRange range); } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/DoubleArrayRef.java b/pytorch/src/gen/java/org/bytedeco/pytorch/DoubleArrayRef.java index 5f8124cf466..e1f5b5211f8 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/DoubleArrayRef.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/DoubleArrayRef.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; @@ -20,6 +22,15 @@ public class DoubleArrayRef extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public DoubleArrayRef(Pointer p) { super(p); } + /** Native array allocator. Access with {@link Pointer#position(long)}. */ + public DoubleArrayRef(long size) { super((Pointer)null); allocateArray(size); } + private native void allocateArray(long size); + @Override public DoubleArrayRef position(long position) { + return (DoubleArrayRef)super.position(position); + } + @Override public DoubleArrayRef getPointer(long i) { + return new DoubleArrayRef((Pointer)this).offsetAddress(i); + } /** \name Constructors * \{ @@ -30,8 +41,7 @@ public class DoubleArrayRef extends Pointer { /** Construct an ArrayRef from a single element. */ // TODO Make this explicit - public DoubleArrayRef(double OneElt) { super((Pointer)null); allocate(OneElt); } - private native void allocate(double OneElt); + /** Construct an ArrayRef from a pointer and length. */ public DoubleArrayRef(@Const DoublePointer data, @Cast("size_t") long length) { super((Pointer)null); allocate(data, length); } @@ -57,6 +67,8 @@ public class DoubleArrayRef extends Pointer { // The enable_if stuff here makes sure that this isn't used for // std::vector, because ArrayRef can't work on a std::vector // bitfield. + public DoubleArrayRef(@ByRef DoubleVector vec) { super((Pointer)null); allocate(vec); } + private native void allocate(@ByRef DoubleVector vec); /** Construct an ArrayRef from a std::array */ @@ -69,13 +81,13 @@ public class DoubleArrayRef extends Pointer { * \name Simple Operations * \{ */ - public native @ByVal @Cast("const c10::ArrayRef::iterator*") DoublePointer begin(); - public native @ByVal @Cast("const c10::ArrayRef::iterator*") DoublePointer end(); + public native @Const DoublePointer begin(); + public native @Const DoublePointer end(); // These are actually the same as iterator, since ArrayRef only // gives you const iterators. - public native @ByVal @Cast("const c10::ArrayRef::const_iterator*") DoublePointer cbegin(); - public native @ByVal @Cast("const c10::ArrayRef::const_iterator*") DoublePointer cend(); + public native @Const DoublePointer cbegin(); + public native @Const DoublePointer cend(); /** empty - Check if the array is empty. */ public native @Cast("const bool") boolean empty(); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/DoubleArrayRefOptional.java b/pytorch/src/gen/java/org/bytedeco/pytorch/DoubleArrayRefOptional.java index 02a9c51b461..0a61d9c1085 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/DoubleArrayRefOptional.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/DoubleArrayRefOptional.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; @@ -26,6 +28,7 @@ public class DoubleArrayRefOptional extends Pointer { public native @Name("operator =") @ByRef DoubleArrayRefOptional put(@ByRef DoubleArrayRefOptional x); public native boolean has_value(); + public native void reset(); public native @Name("value") @ByRef DoubleArrayRef get(); @ValueSetter public native DoubleArrayRefOptional put(@ByRef DoubleArrayRef value); } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/DoubleComplex.java b/pytorch/src/gen/java/org/bytedeco/pytorch/DoubleComplex.java new file mode 100644 index 00000000000..8c74eb5a0e1 --- /dev/null +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/DoubleComplex.java @@ -0,0 +1,197 @@ +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE + +package org.bytedeco.pytorch; + +import org.bytedeco.pytorch.Allocator; +import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; +import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; +import java.nio.*; +import org.bytedeco.javacpp.*; +import org.bytedeco.javacpp.annotation.*; + +import static org.bytedeco.javacpp.presets.javacpp.*; +import static org.bytedeco.openblas.global.openblas_nolapack.*; +import static org.bytedeco.openblas.global.openblas.*; + +import static org.bytedeco.pytorch.global.torch.*; + + +// c10::complex is an implementation of complex numbers that aims +// to work on all devices supported by PyTorch +// +// Most of the APIs duplicates std::complex +// Reference: https://en.cppreference.com/w/cpp/numeric/complex +// +// [NOTE: Complex Operator Unification] +// Operators currently use a mix of std::complex, thrust::complex, and +// c10::complex internally. The end state is that all operators will use +// c10::complex internally. Until then, there may be some hacks to support all +// variants. +// +// +// [Note on Constructors] +// +// The APIs of constructors are mostly copied from C++ standard: +// https://en.cppreference.com/w/cpp/numeric/complex/complex +// +// Since C++14, all constructors are constexpr in std::complex +// +// There are three types of constructors: +// - initializing from real and imag: +// `constexpr complex( const T& re = T(), const T& im = T() );` +// - implicitly-declared copy constructor +// - converting constructors +// +// Converting constructors: +// - std::complex defines converting constructor between float/double/long +// double, +// while we define converting constructor between float/double. +// - For these converting constructors, upcasting is implicit, downcasting is +// explicit. +// - We also define explicit casting from std::complex/thrust::complex +// - Note that the conversion from thrust is not constexpr, because +// thrust does not define them as constexpr ???? +// +// +// [Operator =] +// +// The APIs of operator = are mostly copied from C++ standard: +// https://en.cppreference.com/w/cpp/numeric/complex/operator%3D +// +// Since C++20, all operator= are constexpr. Although we are not building with +// C++20, we also obey this behavior. +// +// There are three types of assign operator: +// - Assign a real value from the same scalar type +// - In std, this is templated as complex& operator=(const T& x) +// with specialization `complex& operator=(T x)` for float/double/long +// double Since we only support float and double, on will use `complex& +// operator=(T x)` +// - Copy assignment operator and converting assignment operator +// - There is no specialization of converting assignment operators, which type +// is +// convertible is solely dependent on whether the scalar type is convertible +// +// In addition to the standard assignment, we also provide assignment operators +// with std and thrust +// +// +// [Casting operators] +// +// std::complex does not have casting operators. We define casting operators +// casting to std::complex and thrust::complex +// +// +// [Operator ""] +// +// std::complex has custom literals `i`, `if` and `il` defined in namespace +// `std::literals::complex_literals`. We define our own custom literals in the +// namespace `c10::complex_literals`. Our custom literals does not follow the +// same behavior as in std::complex, instead, we define _if, _id to construct +// float/double complex literals. +// +// +// [real() and imag()] +// +// In C++20, there are two overload of these functions, one it to return the +// real/imag, another is to set real/imag, they are both constexpr. We follow +// this design. +// +// +// [Operator +=,-=,*=,/=] +// +// Since C++20, these operators become constexpr. In our implementation, they +// are also constexpr. +// +// There are two types of such operators: operating with a real number, or +// operating with another complex number. For the operating with a real number, +// the generic template form has argument type `const T &`, while the overload +// for float/double/long double has `T`. We will follow the same type as +// float/double/long double in std. +// +// [Unary operator +-] +// +// Since C++20, they are constexpr. We also make them expr +// +// [Binary operators +-*/] +// +// Each operator has three versions (taking + as example): +// - complex + complex +// - complex + real +// - real + complex +// +// [Operator ==, !=] +// +// Each operator has three versions (taking == as example): +// - complex == complex +// - complex == real +// - real == complex +// +// Some of them are removed on C++20, but we decide to keep them +// +// [Operator <<, >>] +// +// These are implemented by casting to std::complex +// +// +// +// TODO(@zasdfgbnm): c10::complex is not currently supported, +// because: +// - lots of members and functions of c10::Half are not constexpr +// - thrust::complex only support float and double + +@Name("c10::complex") @NoOffset @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) +public class DoubleComplex extends Pointer { + static { Loader.load(); } + /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ + public DoubleComplex(Pointer p) { super(p); } + + + public native double real_(); public native DoubleComplex real_(double setter); + public native double imag_(); public native DoubleComplex imag_(double setter); + + public DoubleComplex() { super((Pointer)null); allocate(); } + private native void allocate(); + public DoubleComplex(double re, double im/*=double()*/) { super((Pointer)null); allocate(re, im); } + private native void allocate(double re, double im/*=double()*/); + public DoubleComplex(double re) { super((Pointer)null); allocate(re); } + private native void allocate(double re); +// #if defined(__CUDACC__) || defined(__HIPCC__) +// #endif + + // Use SFINAE to specialize casting constructor for c10::complex and + // c10::complex + + public native @Const @ByRef @Name("operator =") DoubleComplex put(double re); + + public native @Const @ByRef @Name("operator +=") DoubleComplex addPut(double re); + + public native @Const @ByRef @Name("operator -=") DoubleComplex subtractPut(double re); + + public native @Const @ByRef @Name("operator *=") DoubleComplex multiplyPut(double re); + + public native @Const @ByRef @Name("operator /=") DoubleComplex dividePut(double re); + +// #ifdef __APPLE__ +// #define FORCE_INLINE_APPLE __attribute__((always_inline)) +// #else +// #define FORCE_INLINE_APPLE +// #endif +// #undef FORCE_INLINE_APPLE + +// #if defined(__CUDACC__) || defined(__HIPCC__) +// #endif + +// #if defined(__CUDACC__) || defined(__HIPCC__) +// #endif + + // consistent with NumPy behavior + public native @Cast("bool") @Name("operator bool") boolean asBoolean(); + + public native @org.bytedeco.javacpp.annotation.Function double real(); + public native @org.bytedeco.javacpp.annotation.Function void real(double value); + public native @org.bytedeco.javacpp.annotation.Function double imag(); + public native @org.bytedeco.javacpp.annotation.Function void imag(double value); +} diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/DoubleComplexrrayRef.java b/pytorch/src/gen/java/org/bytedeco/pytorch/DoubleComplexArrayRef.java similarity index 59% rename from pytorch/src/gen/java/org/bytedeco/pytorch/DoubleComplexrrayRef.java rename to pytorch/src/gen/java/org/bytedeco/pytorch/DoubleComplexArrayRef.java index 853818eb4ea..4b8421090ab 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/DoubleComplexrrayRef.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/DoubleComplexArrayRef.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; @@ -16,33 +18,38 @@ import static org.bytedeco.pytorch.global.torch.*; @Name("c10::ArrayRef >") @NoOffset @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) -public class DoubleComplexrrayRef extends Pointer { +public class DoubleComplexArrayRef extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ - public DoubleComplexrrayRef(Pointer p) { super(p); } + public DoubleComplexArrayRef(Pointer p) { super(p); } /** Native array allocator. Access with {@link Pointer#position(long)}. */ - public DoubleComplexrrayRef(long size) { super((Pointer)null); allocateArray(size); } + public DoubleComplexArrayRef(long size) { super((Pointer)null); allocateArray(size); } private native void allocateArray(long size); - @Override public DoubleComplexrrayRef position(long position) { - return (DoubleComplexrrayRef)super.position(position); + @Override public DoubleComplexArrayRef position(long position) { + return (DoubleComplexArrayRef)super.position(position); } - @Override public DoubleComplexrrayRef getPointer(long i) { - return new DoubleComplexrrayRef((Pointer)this).offsetAddress(i); + @Override public DoubleComplexArrayRef getPointer(long i) { + return new DoubleComplexArrayRef((Pointer)this).offsetAddress(i); } /** \name Constructors * \{

* Construct an empty ArrayRef. */ - /* implicit */ public DoubleComplexrrayRef() { super((Pointer)null); allocate(); } + /* implicit */ public DoubleComplexArrayRef() { super((Pointer)null); allocate(); } private native void allocate(); /** Construct an ArrayRef from a single element. */ // TODO Make this explicit + /** Construct an ArrayRef from a pointer and length. */ + public DoubleComplexArrayRef(@Const DoubleComplex data, @Cast("size_t") long length) { super((Pointer)null); allocate(data, length); } + private native void allocate(@Const DoubleComplex data, @Cast("size_t") long length); /** Construct an ArrayRef from a range. */ + public DoubleComplexArrayRef(@Const DoubleComplex begin, @Const DoubleComplex end) { super((Pointer)null); allocate(begin, end); } + private native void allocate(@Const DoubleComplex begin, @Const DoubleComplex end); /** Construct an ArrayRef from a SmallVector. This is templated in order to * avoid instantiating SmallVectorTemplateCommon whenever we @@ -64,38 +71,46 @@ public class DoubleComplexrrayRef extends Pointer { * \name Simple Operations * \{ */ - public native @ByVal @Cast("const c10::ArrayRef >::iterator*") DoublePointer begin(); - public native @ByVal @Cast("const c10::ArrayRef >::iterator*") DoublePointer end(); + public native @Const @ByPtr DoubleComplex begin(); + public native @Const @ByPtr DoubleComplex end(); // These are actually the same as iterator, since ArrayRef only // gives you const iterators. - public native @ByVal @Cast("const c10::ArrayRef >::const_iterator*") DoublePointer cbegin(); - public native @ByVal @Cast("const c10::ArrayRef >::const_iterator*") DoublePointer cend(); + public native @Const @ByPtr DoubleComplex cbegin(); + public native @Const @ByPtr DoubleComplex cend(); /** empty - Check if the array is empty. */ public native @Cast("const bool") boolean empty(); + public native @Const DoubleComplex data(); + /** size - Get the array size. */ public native @Cast("const size_t") long size(); /** front - Get the first element. */ + public native @Const @ByRef DoubleComplex front(); /** back - Get the last element. */ + public native @Const @ByRef DoubleComplex back(); /** equals - Check for element-wise equality. */ - public native @Cast("const bool") boolean equals(@ByVal DoubleComplexrrayRef RHS); + public native @Cast("const bool") boolean equals(@ByVal DoubleComplexArrayRef RHS); /** slice(n, m) - Take M elements of the array starting at element N */ - public native @Const @ByVal DoubleComplexrrayRef slice(@Cast("size_t") long N, @Cast("size_t") long M); + public native @Const @ByVal DoubleComplexArrayRef slice(@Cast("size_t") long N, @Cast("size_t") long M); /** slice(n) - Chop off the first N elements of the array. */ - public native @Const @ByVal DoubleComplexrrayRef slice(@Cast("size_t") long N); + public native @Const @ByVal DoubleComplexArrayRef slice(@Cast("size_t") long N); /** \} * \name Operator Overloads * \{ */ + public native @Const @ByRef @Name("operator []") DoubleComplex get(@Cast("size_t") long Index); /** Vector compatibility */ + + /// + public native @Const @ByRef DoubleComplex at(@Cast("size_t") long Index); /** Disallow accidental assignment from a temporary. * @@ -112,6 +127,7 @@ public class DoubleComplexrrayRef extends Pointer { /** \} * \name Expensive Operations * \{ */ + public native @StdVector DoubleComplex vec(); /** \} */ } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/DoubleComplexElementReference.java b/pytorch/src/gen/java/org/bytedeco/pytorch/DoubleComplexElementReference.java new file mode 100644 index 00000000000..3a3de8a5159 --- /dev/null +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/DoubleComplexElementReference.java @@ -0,0 +1,42 @@ +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE + +package org.bytedeco.pytorch; + +import org.bytedeco.pytorch.Allocator; +import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; +import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; +import java.nio.*; +import org.bytedeco.javacpp.*; +import org.bytedeco.javacpp.annotation.*; + +import static org.bytedeco.javacpp.presets.javacpp.*; +import static org.bytedeco.openblas.global.openblas_nolapack.*; +import static org.bytedeco.openblas.global.openblas.*; + +import static org.bytedeco.pytorch.global.torch.*; + + +@Name("c10::impl::ListElementReference,c10::detail::ListImpl::list_type::iterator>") @NoOffset @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) +public class DoubleComplexElementReference extends Pointer { + static { Loader.load(); } + /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ + public DoubleComplexElementReference(Pointer p) { super(p); } + + public native @Name("operator std::conditional_t >::type>::value,const c10::complex&,c10::complex >") @ByVal DoubleComplex getDoubleComplex(); + + + + + + // assigning another ref to this assigns the underlying value + + + public native @Const @ByRef IValue get(); + + + + + +} diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/DoubleComplexList.java b/pytorch/src/gen/java/org/bytedeco/pytorch/DoubleComplexList.java new file mode 100644 index 00000000000..fe13f5459c8 --- /dev/null +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/DoubleComplexList.java @@ -0,0 +1,256 @@ +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE + +package org.bytedeco.pytorch; + +import org.bytedeco.pytorch.Allocator; +import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; +import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; +import java.nio.*; +import org.bytedeco.javacpp.*; +import org.bytedeco.javacpp.annotation.*; + +import static org.bytedeco.javacpp.presets.javacpp.*; +import static org.bytedeco.openblas.global.openblas_nolapack.*; +import static org.bytedeco.openblas.global.openblas.*; + +import static org.bytedeco.pytorch.global.torch.*; + + +/** + * An object of this class stores a list of values of type T. + * + * This is a pointer type. After a copy, both Lists + * will share the same storage: + * + * > List a; + * > List b = a; + * > b.push_back("three"); + * > ASSERT("three" == a.get(0)); + * + * We use this class in the PyTorch kernel API instead of + * std::vector, because that allows us to do optimizations + * and switch out the underlying list implementation without + * breaking backwards compatibility for the kernel API. + */ +@Name("c10::List >") @NoOffset @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) +public class DoubleComplexList extends Pointer { + static { Loader.load(); } + /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ + public DoubleComplexList(Pointer p) { super(p); } + /** Native array allocator. Access with {@link Pointer#position(long)}. */ + public DoubleComplexList(long size) { super((Pointer)null); allocateArray(size); } + private native void allocateArray(long size); + @Override public DoubleComplexList position(long position) { + return (DoubleComplexList)super.position(position); + } + @Override public DoubleComplexList getPointer(long i) { + return new DoubleComplexList((Pointer)this).offsetAddress(i); + } + + + /** + * Constructs an empty list. + */ + public DoubleComplexList() { super((Pointer)null); allocate(); } + private native void allocate(); + + /** + * Constructs a list with some initial values. + * Example: + * List a({2, 3, 4}); + */ + public DoubleComplexList(@ByVal DoubleComplexArrayRef initial_values) { super((Pointer)null); allocate(initial_values); } + private native void allocate(@ByVal DoubleComplexArrayRef initial_values); + + /** + * Create a generic list with runtime type information. + * This only works for c10::impl::GenericList and is not part of the public API + * but only supposed to be used internally by PyTorch. + */ + + + public DoubleComplexList(@Const @ByRef DoubleComplexList arg0) { super((Pointer)null); allocate(arg0); } + private native void allocate(@Const @ByRef DoubleComplexList arg0); + public native @ByRef @Name("operator =") DoubleComplexList put(@Const @ByRef DoubleComplexList arg0); + + /** + * Create a new List pointing to a deep copy of the same data. + * The List returned is a new list with separate storage. + * Changes in it are not reflected in the original list or vice versa. + */ + public native @ByVal DoubleComplexList copy(); + + /** + * Returns the element at specified location pos, with bounds checking. + * If pos is not within the range of the container, an exception of type std::out_of_range is thrown. + */ + public native @ByVal DoubleComplex get(long pos); + + /** + * Moves out the element at the specified location pos and returns it, with bounds checking. + * If pos is not within the range of the container, an exception of type std::out_of_range is thrown. + * The list contains an invalid element at position pos afterwards. Any operations + * on it before re-setting it are invalid. + */ + public native @ByVal DoubleComplex extract(long pos); + + /** + * Returns a reference to the element at specified location pos, with bounds checking. + * If pos is not within the range of the container, an exception of type std::out_of_range is thrown. + * + * You cannot store the reference, but you can read it and assign new values to it: + * + * List list = ...; + * list[2] = 5; + * int64_t v = list[1]; + */ + + + + + /** + * Assigns a new value to the element at location pos. + */ + public native void set(long pos, @ByVal DoubleComplex value); + + /** + * Assigns a new value to the element at location pos. + */ + + /** + * Returns an iterator to the first element of the container. + * If the container is empty, the returned iterator will be equal to end(). + */ + public native @ByVal @Cast("c10::List >::iterator*") DoubleComplexListIterator begin(); + + /** + * Returns an iterator to the element following the last element of the container. + * This element acts as a placeholder; attempting to access it results in undefined behavior. + */ + public native @ByVal @Cast("c10::List >::iterator*") DoubleComplexListIterator end(); + + /** + * Checks if the container has no elements. + */ + public native @Cast("bool") boolean empty(); + + /** + * Returns the number of elements in the container + */ + public native long size(); + + /** + * Increase the capacity of the vector to a value that's greater or equal to new_cap. + */ + public native void reserve(long new_cap); + + /** + * Erases all elements from the container. After this call, size() returns zero. + * Invalidates any references, pointers, or iterators referring to contained elements. Any past-the-end iterators are also invalidated. + */ + public native void clear(); + + /** + * Inserts value before pos. + * May invalidate any references, pointers, or iterators referring to contained elements. Any past-the-end iterators may also be invalidated. + */ + public native @ByVal @Cast("c10::List >::iterator*") DoubleComplexListIterator insert(@ByVal @Cast("c10::List >::iterator*") DoubleComplexListIterator pos, @Const @ByRef DoubleComplex value); + + /** + * Inserts value before pos. + * May invalidate any references, pointers, or iterators referring to contained elements. Any past-the-end iterators may also be invalidated. + */ + + /** + * Inserts a new element into the container directly before pos. + * The new element is constructed with the given arguments. + * May invalidate any references, pointers, or iterators referring to contained elements. Any past-the-end iterators may also be invalidated. + */ + + /** + * Appends the given element value to the end of the container. + * May invalidate any references, pointers, or iterators referring to contained elements. Any past-the-end iterators may also be invalidated. + */ + public native void push_back(@Const @ByRef DoubleComplex value); + + /** + * Appends the given element value to the end of the container. + * May invalidate any references, pointers, or iterators referring to contained elements. Any past-the-end iterators may also be invalidated. + */ + + /** + * Appends the given list to the end of the container. Uses at most one memory allocation. + * May invalidate any references, pointers, or iterators referring to contained elements. Any past-the-end iterators may also be invalidated. + */ + public native void append(@ByVal DoubleComplexList lst); + + /** + * Appends the given element value to the end of the container. + * The new element is constructed with the given arguments. + * May invalidate any references, pointers, or iterators referring to contained elements. Any past-the-end iterators may also be invalidated. + */ + + /** + * Removes the element at pos. + * May invalidate any references, pointers, or iterators referring to contained elements. Any past-the-end iterators may also be invalidated. + */ + public native @ByVal @Cast("c10::List >::iterator*") DoubleComplexListIterator erase(@ByVal @Cast("c10::List >::iterator*") DoubleComplexListIterator pos); + + /** + * Removes the elements in the range [first, last). + * May invalidate any references, pointers, or iterators referring to contained elements. Any past-the-end iterators may also be invalidated. + */ + public native @ByVal @Cast("c10::List >::iterator*") DoubleComplexListIterator erase(@ByVal @Cast("c10::List >::iterator*") DoubleComplexListIterator first, @ByVal @Cast("c10::List >::iterator*") DoubleComplexListIterator last); + + /** + * Removes the last element of the container. + * Calling pop_back on an empty container is undefined. + * May invalidate any references, pointers, or iterators referring to contained elements. Any past-the-end iterators may also be invalidated. + */ + public native void pop_back(); + + /** + * Resizes the container to contain count elements. + * If the current size is less than count, additional default-inserted elements are appended. + * May invalidate any references, pointers, or iterators referring to contained elements. Any past-the-end iterators may also be invalidated. + */ + public native void resize(long count); + + /** + * Resizes the container to contain count elements. + * If the current size is less than count, additional copies of value are appended. + * May invalidate any references, pointers, or iterators referring to contained elements. Any past-the-end iterators may also be invalidated. + */ + public native void resize(long count, @Const @ByRef DoubleComplex value); + + /** + * Value equality comparison. This function implements Python-like semantics for + * equality: two lists with the same identity (e.g. same pointer) trivially + * compare equal, otherwise each element is compared for equality. + */ + + + + + /** + * Identity comparison. Returns true if and only if {@code rhs} represents the same + * List object as {@code this}. + */ + public native @Cast("bool") boolean is(@Const @ByRef DoubleComplexList rhs); + + public native @StdVector DoubleComplex vec(); + + /** + * Returns the number of Lists currently pointing to this same list. + * If this is the only instance pointing to this list, returns 1. + */ + // TODO Test use_count + public native @Cast("size_t") long use_count(); + + public native @ByVal Type.TypePtr elementType(); + + // See [unsafe set type] for why this exists. + public native void unsafeSetElementType(@ByVal Type.TypePtr t); +} diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/DoubleComplexListIterator.java b/pytorch/src/gen/java/org/bytedeco/pytorch/DoubleComplexListIterator.java new file mode 100644 index 00000000000..e04e13175a5 --- /dev/null +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/DoubleComplexListIterator.java @@ -0,0 +1,87 @@ +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE + +package org.bytedeco.pytorch; + +import org.bytedeco.pytorch.Allocator; +import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; +import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; +import java.nio.*; +import org.bytedeco.javacpp.*; +import org.bytedeco.javacpp.annotation.*; + +import static org.bytedeco.javacpp.presets.javacpp.*; +import static org.bytedeco.openblas.global.openblas_nolapack.*; +import static org.bytedeco.openblas.global.openblas.*; + +import static org.bytedeco.pytorch.global.torch.*; + + +// this wraps vector::iterator to make sure user code can't rely +// on it being the type of the underlying vector. +@Name("c10::impl::ListIterator,c10::detail::ListImpl::list_type::iterator>") @NoOffset @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) +public class DoubleComplexListIterator extends Pointer { + static { Loader.load(); } + /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ + public DoubleComplexListIterator(Pointer p) { super(p); } + /** Native array allocator. Access with {@link Pointer#position(long)}. */ + public DoubleComplexListIterator(long size) { super((Pointer)null); allocateArray(size); } + private native void allocateArray(long size); + @Override public DoubleComplexListIterator position(long position) { + return (DoubleComplexListIterator)super.position(position); + } + @Override public DoubleComplexListIterator getPointer(long i) { + return new DoubleComplexListIterator((Pointer)this).offsetAddress(i); + } + + // C++17 friendly std::iterator implementation + + public DoubleComplexListIterator() { super((Pointer)null); allocate(); } + private native void allocate(); + + public DoubleComplexListIterator(@Const @ByRef DoubleComplexListIterator arg0) { super((Pointer)null); allocate(arg0); } + private native void allocate(@Const @ByRef DoubleComplexListIterator arg0); + public native @ByRef @Name("operator =") DoubleComplexListIterator put(@Const @ByRef DoubleComplexListIterator arg0); + + public native @ByRef @Name("operator ++") DoubleComplexListIterator increment(); + + public native @ByVal @Name("operator ++") DoubleComplexListIterator increment(int arg0); + + public native @ByRef @Name("operator --") DoubleComplexListIterator decrement(); + + public native @ByVal @Name("operator --") DoubleComplexListIterator decrement(int arg0); + + public native @ByRef @Name("operator +=") DoubleComplexListIterator addPut(long offset); + + public native @ByRef @Name("operator -=") DoubleComplexListIterator subtractPut(long offset); + + public native @ByVal @Name("operator +") DoubleComplexListIterator add(long offset); + + public native @ByVal @Name("operator -") DoubleComplexListIterator subtract(long offset); + + private static native @Namespace @Cast("c10::impl::ListIterator,c10::detail::ListImpl::list_type::iterator>::difference_type") @Name("operator -") long subtract(@Const @ByRef DoubleComplexListIterator lhs, @Const @ByRef DoubleComplexListIterator rhs); + public long subtract(DoubleComplexListIterator rhs) { return subtract(this, rhs); } + + + + + + private static native @Namespace @Cast("bool") @Name("operator ==") boolean equals(@Const @ByRef DoubleComplexListIterator lhs, @Const @ByRef DoubleComplexListIterator rhs); + public boolean equals(DoubleComplexListIterator rhs) { return equals(this, rhs); } + + private static native @Namespace @Cast("bool") @Name("operator !=") boolean notEquals(@Const @ByRef DoubleComplexListIterator lhs, @Const @ByRef DoubleComplexListIterator rhs); + public boolean notEquals(DoubleComplexListIterator rhs) { return notEquals(this, rhs); } + + private static native @Namespace @Cast("bool") @Name("operator <") boolean lessThan(@Const @ByRef DoubleComplexListIterator lhs, @Const @ByRef DoubleComplexListIterator rhs); + public boolean lessThan(DoubleComplexListIterator rhs) { return lessThan(this, rhs); } + + private static native @Namespace @Cast("bool") @Name("operator <=") boolean lessThanEquals(@Const @ByRef DoubleComplexListIterator lhs, @Const @ByRef DoubleComplexListIterator rhs); + public boolean lessThanEquals(DoubleComplexListIterator rhs) { return lessThanEquals(this, rhs); } + + private static native @Namespace @Cast("bool") @Name("operator >") boolean greaterThan(@Const @ByRef DoubleComplexListIterator lhs, @Const @ByRef DoubleComplexListIterator rhs); + public boolean greaterThan(DoubleComplexListIterator rhs) { return greaterThan(this, rhs); } + + private static native @Namespace @Cast("bool") @Name("operator >=") boolean greaterThanEquals(@Const @ByRef DoubleComplexListIterator lhs, @Const @ByRef DoubleComplexListIterator rhs); + public boolean greaterThanEquals(DoubleComplexListIterator rhs) { return greaterThanEquals(this, rhs); } +} diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/DoubleElementReference.java b/pytorch/src/gen/java/org/bytedeco/pytorch/DoubleElementReference.java new file mode 100644 index 00000000000..e5ca9af5e16 --- /dev/null +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/DoubleElementReference.java @@ -0,0 +1,42 @@ +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE + +package org.bytedeco.pytorch; + +import org.bytedeco.pytorch.Allocator; +import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; +import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; +import java.nio.*; +import org.bytedeco.javacpp.*; +import org.bytedeco.javacpp.annotation.*; + +import static org.bytedeco.javacpp.presets.javacpp.*; +import static org.bytedeco.openblas.global.openblas_nolapack.*; +import static org.bytedeco.openblas.global.openblas.*; + +import static org.bytedeco.pytorch.global.torch.*; + + +@Name("c10::impl::ListElementReference") @NoOffset @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) +public class DoubleElementReference extends Pointer { + static { Loader.load(); } + /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ + public DoubleElementReference(Pointer p) { super(p); } + + public native @Name("operator std::conditional_t::type>::value,const double&,double>") double getDouble(); + + + + + + // assigning another ref to this assigns the underlying value + + + public native @Const @ByRef IValue get(); + + + + + +} diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/DoubleExpandingArrayOptional.java b/pytorch/src/gen/java/org/bytedeco/pytorch/DoubleExpandingArrayOptional.java index 50ef2cfa655..cf174e7aef4 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/DoubleExpandingArrayOptional.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/DoubleExpandingArrayOptional.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; @@ -26,6 +28,7 @@ public class DoubleExpandingArrayOptional extends Pointer { public native @Name("operator =") @ByRef DoubleExpandingArrayOptional put(@ByRef DoubleExpandingArrayOptional x); public native boolean has_value(); + public native void reset(); public native @Name("value") @Cast("torch::ExpandingArray<1,double>*") @ByRef DoublePointer get(); @ValueSetter public native DoubleExpandingArrayOptional put(@Cast("torch::ExpandingArray<1,double>*") @ByRef DoublePointer value); } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/DoubleList.java b/pytorch/src/gen/java/org/bytedeco/pytorch/DoubleList.java new file mode 100644 index 00000000000..20d60ed38ff --- /dev/null +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/DoubleList.java @@ -0,0 +1,239 @@ +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE + +package org.bytedeco.pytorch; + +import org.bytedeco.pytorch.Allocator; +import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; +import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; +import java.nio.*; +import org.bytedeco.javacpp.*; +import org.bytedeco.javacpp.annotation.*; + +import static org.bytedeco.javacpp.presets.javacpp.*; +import static org.bytedeco.openblas.global.openblas_nolapack.*; +import static org.bytedeco.openblas.global.openblas.*; + +import static org.bytedeco.pytorch.global.torch.*; + +@Name("c10::List") @NoOffset @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) +public class DoubleList extends Pointer { + static { Loader.load(); } + /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ + public DoubleList(Pointer p) { super(p); } + /** Native array allocator. Access with {@link Pointer#position(long)}. */ + public DoubleList(long size) { super((Pointer)null); allocateArray(size); } + private native void allocateArray(long size); + @Override public DoubleList position(long position) { + return (DoubleList)super.position(position); + } + @Override public DoubleList getPointer(long i) { + return new DoubleList((Pointer)this).offsetAddress(i); + } + + + /** + * Constructs an empty list. + */ + public DoubleList() { super((Pointer)null); allocate(); } + private native void allocate(); + + /** + * Constructs a list with some initial values. + * Example: + * List a({2, 3, 4}); + */ + public DoubleList(@ByVal DoubleArrayRef initial_values) { super((Pointer)null); allocate(initial_values); } + private native void allocate(@ByVal DoubleArrayRef initial_values); + + /** + * Create a generic list with runtime type information. + * This only works for c10::impl::GenericList and is not part of the public API + * but only supposed to be used internally by PyTorch. + */ + + + public DoubleList(@Const @ByRef DoubleList arg0) { super((Pointer)null); allocate(arg0); } + private native void allocate(@Const @ByRef DoubleList arg0); + public native @ByRef @Name("operator =") DoubleList put(@Const @ByRef DoubleList arg0); + + /** + * Create a new List pointing to a deep copy of the same data. + * The List returned is a new list with separate storage. + * Changes in it are not reflected in the original list or vice versa. + */ + public native @ByVal DoubleList copy(); + + /** + * Returns the element at specified location pos, with bounds checking. + * If pos is not within the range of the container, an exception of type std::out_of_range is thrown. + */ + public native double get(long pos); + + /** + * Moves out the element at the specified location pos and returns it, with bounds checking. + * If pos is not within the range of the container, an exception of type std::out_of_range is thrown. + * The list contains an invalid element at position pos afterwards. Any operations + * on it before re-setting it are invalid. + */ + public native double extract(long pos); + + /** + * Returns a reference to the element at specified location pos, with bounds checking. + * If pos is not within the range of the container, an exception of type std::out_of_range is thrown. + * + * You cannot store the reference, but you can read it and assign new values to it: + * + * List list = ...; + * list[2] = 5; + * int64_t v = list[1]; + */ + + + + + /** + * Assigns a new value to the element at location pos. + */ + public native void set(long pos, double value); + + /** + * Assigns a new value to the element at location pos. + */ + + /** + * Returns an iterator to the first element of the container. + * If the container is empty, the returned iterator will be equal to end(). + */ + public native @ByVal @Cast("c10::List::iterator*") DoubleListIterator begin(); + + /** + * Returns an iterator to the element following the last element of the container. + * This element acts as a placeholder; attempting to access it results in undefined behavior. + */ + public native @ByVal @Cast("c10::List::iterator*") DoubleListIterator end(); + + /** + * Checks if the container has no elements. + */ + public native @Cast("bool") boolean empty(); + + /** + * Returns the number of elements in the container + */ + public native long size(); + + /** + * Increase the capacity of the vector to a value that's greater or equal to new_cap. + */ + public native void reserve(long new_cap); + + /** + * Erases all elements from the container. After this call, size() returns zero. + * Invalidates any references, pointers, or iterators referring to contained elements. Any past-the-end iterators are also invalidated. + */ + public native void clear(); + + /** + * Inserts value before pos. + * May invalidate any references, pointers, or iterators referring to contained elements. Any past-the-end iterators may also be invalidated. + */ + public native @ByVal @Cast("c10::List::iterator*") DoubleListIterator insert(@ByVal @Cast("c10::List::iterator*") DoubleListIterator pos, double value); + + /** + * Inserts value before pos. + * May invalidate any references, pointers, or iterators referring to contained elements. Any past-the-end iterators may also be invalidated. + */ + + /** + * Inserts a new element into the container directly before pos. + * The new element is constructed with the given arguments. + * May invalidate any references, pointers, or iterators referring to contained elements. Any past-the-end iterators may also be invalidated. + */ + + /** + * Appends the given element value to the end of the container. + * May invalidate any references, pointers, or iterators referring to contained elements. Any past-the-end iterators may also be invalidated. + */ + public native void push_back(double value); + + /** + * Appends the given element value to the end of the container. + * May invalidate any references, pointers, or iterators referring to contained elements. Any past-the-end iterators may also be invalidated. + */ + + /** + * Appends the given list to the end of the container. Uses at most one memory allocation. + * May invalidate any references, pointers, or iterators referring to contained elements. Any past-the-end iterators may also be invalidated. + */ + public native void append(@ByVal DoubleList lst); + + /** + * Appends the given element value to the end of the container. + * The new element is constructed with the given arguments. + * May invalidate any references, pointers, or iterators referring to contained elements. Any past-the-end iterators may also be invalidated. + */ + + /** + * Removes the element at pos. + * May invalidate any references, pointers, or iterators referring to contained elements. Any past-the-end iterators may also be invalidated. + */ + public native @ByVal @Cast("c10::List::iterator*") DoubleListIterator erase(@ByVal @Cast("c10::List::iterator*") DoubleListIterator pos); + + /** + * Removes the elements in the range [first, last). + * May invalidate any references, pointers, or iterators referring to contained elements. Any past-the-end iterators may also be invalidated. + */ + public native @ByVal @Cast("c10::List::iterator*") DoubleListIterator erase(@ByVal @Cast("c10::List::iterator*") DoubleListIterator first, @ByVal @Cast("c10::List::iterator*") DoubleListIterator last); + + /** + * Removes the last element of the container. + * Calling pop_back on an empty container is undefined. + * May invalidate any references, pointers, or iterators referring to contained elements. Any past-the-end iterators may also be invalidated. + */ + public native void pop_back(); + + /** + * Resizes the container to contain count elements. + * If the current size is less than count, additional default-inserted elements are appended. + * May invalidate any references, pointers, or iterators referring to contained elements. Any past-the-end iterators may also be invalidated. + */ + public native void resize(long count); + + /** + * Resizes the container to contain count elements. + * If the current size is less than count, additional copies of value are appended. + * May invalidate any references, pointers, or iterators referring to contained elements. Any past-the-end iterators may also be invalidated. + */ + public native void resize(long count, double value); + + /** + * Value equality comparison. This function implements Python-like semantics for + * equality: two lists with the same identity (e.g. same pointer) trivially + * compare equal, otherwise each element is compared for equality. + */ + + + + + /** + * Identity comparison. Returns true if and only if {@code rhs} represents the same + * List object as {@code this}. + */ + public native @Cast("bool") boolean is(@Const @ByRef DoubleList rhs); + + public native @ByVal @Cast("std::vector*") DoubleVector vec(); + + /** + * Returns the number of Lists currently pointing to this same list. + * If this is the only instance pointing to this list, returns 1. + */ + // TODO Test use_count + public native @Cast("size_t") long use_count(); + + public native @ByVal Type.TypePtr elementType(); + + // See [unsafe set type] for why this exists. + public native void unsafeSetElementType(@ByVal Type.TypePtr t); +} diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/DoubleListIterator.java b/pytorch/src/gen/java/org/bytedeco/pytorch/DoubleListIterator.java new file mode 100644 index 00000000000..84a9241e2a1 --- /dev/null +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/DoubleListIterator.java @@ -0,0 +1,84 @@ +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE + +package org.bytedeco.pytorch; + +import org.bytedeco.pytorch.Allocator; +import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; +import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; +import java.nio.*; +import org.bytedeco.javacpp.*; +import org.bytedeco.javacpp.annotation.*; + +import static org.bytedeco.javacpp.presets.javacpp.*; +import static org.bytedeco.openblas.global.openblas_nolapack.*; +import static org.bytedeco.openblas.global.openblas.*; + +import static org.bytedeco.pytorch.global.torch.*; + +@Name("c10::impl::ListIterator") @NoOffset @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) +public class DoubleListIterator extends Pointer { + static { Loader.load(); } + /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ + public DoubleListIterator(Pointer p) { super(p); } + /** Native array allocator. Access with {@link Pointer#position(long)}. */ + public DoubleListIterator(long size) { super((Pointer)null); allocateArray(size); } + private native void allocateArray(long size); + @Override public DoubleListIterator position(long position) { + return (DoubleListIterator)super.position(position); + } + @Override public DoubleListIterator getPointer(long i) { + return new DoubleListIterator((Pointer)this).offsetAddress(i); + } + + // C++17 friendly std::iterator implementation + + public DoubleListIterator() { super((Pointer)null); allocate(); } + private native void allocate(); + + public DoubleListIterator(@Const @ByRef DoubleListIterator arg0) { super((Pointer)null); allocate(arg0); } + private native void allocate(@Const @ByRef DoubleListIterator arg0); + public native @ByRef @Name("operator =") DoubleListIterator put(@Const @ByRef DoubleListIterator arg0); + + public native @ByRef @Name("operator ++") DoubleListIterator increment(); + + public native @ByVal @Name("operator ++") DoubleListIterator increment(int arg0); + + public native @ByRef @Name("operator --") DoubleListIterator decrement(); + + public native @ByVal @Name("operator --") DoubleListIterator decrement(int arg0); + + public native @ByRef @Name("operator +=") DoubleListIterator addPut(long offset); + + public native @ByRef @Name("operator -=") DoubleListIterator subtractPut(long offset); + + public native @ByVal @Name("operator +") DoubleListIterator add(long offset); + + public native @ByVal @Name("operator -") DoubleListIterator subtract(long offset); + + private static native @Namespace @Cast("c10::impl::ListIterator::difference_type") @Name("operator -") long subtract(@Const @ByRef DoubleListIterator lhs, @Const @ByRef DoubleListIterator rhs); + public long subtract(DoubleListIterator rhs) { return subtract(this, rhs); } + + + + + + private static native @Namespace @Cast("bool") @Name("operator ==") boolean equals(@Const @ByRef DoubleListIterator lhs, @Const @ByRef DoubleListIterator rhs); + public boolean equals(DoubleListIterator rhs) { return equals(this, rhs); } + + private static native @Namespace @Cast("bool") @Name("operator !=") boolean notEquals(@Const @ByRef DoubleListIterator lhs, @Const @ByRef DoubleListIterator rhs); + public boolean notEquals(DoubleListIterator rhs) { return notEquals(this, rhs); } + + private static native @Namespace @Cast("bool") @Name("operator <") boolean lessThan(@Const @ByRef DoubleListIterator lhs, @Const @ByRef DoubleListIterator rhs); + public boolean lessThan(DoubleListIterator rhs) { return lessThan(this, rhs); } + + private static native @Namespace @Cast("bool") @Name("operator <=") boolean lessThanEquals(@Const @ByRef DoubleListIterator lhs, @Const @ByRef DoubleListIterator rhs); + public boolean lessThanEquals(DoubleListIterator rhs) { return lessThanEquals(this, rhs); } + + private static native @Namespace @Cast("bool") @Name("operator >") boolean greaterThan(@Const @ByRef DoubleListIterator lhs, @Const @ByRef DoubleListIterator rhs); + public boolean greaterThan(DoubleListIterator rhs) { return greaterThan(this, rhs); } + + private static native @Namespace @Cast("bool") @Name("operator >=") boolean greaterThanEquals(@Const @ByRef DoubleListIterator lhs, @Const @ByRef DoubleListIterator rhs); + public boolean greaterThanEquals(DoubleListIterator rhs) { return greaterThanEquals(this, rhs); } +} diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/DoubleOptional.java b/pytorch/src/gen/java/org/bytedeco/pytorch/DoubleOptional.java index 9fb87dbff45..07d0f7d75bb 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/DoubleOptional.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/DoubleOptional.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; @@ -26,6 +28,7 @@ public class DoubleOptional extends Pointer { public native @Name("operator =") @ByRef DoubleOptional put(@ByRef DoubleOptional x); public native boolean has_value(); + public native void reset(); public native @Name("value") double get(); @ValueSetter public native DoubleOptional put(double value); } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/DoubleVector.java b/pytorch/src/gen/java/org/bytedeco/pytorch/DoubleVector.java index d84c08310be..a7b6a4ab3d1 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/DoubleVector.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/DoubleVector.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; @@ -33,6 +35,8 @@ public class DoubleVector extends Pointer { public void clear() { resize(0); } public native void resize(@Cast("size_t") long n); + public double front() { return get(0); } + public double back() { return get(size() - 1); } @Index(function = "at") public native double get(@Cast("size_t") long i); public native DoubleVector put(@Cast("size_t") long i, double value); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/DoubleVectorOptional.java b/pytorch/src/gen/java/org/bytedeco/pytorch/DoubleVectorOptional.java index 5bfe7957324..7d818175e12 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/DoubleVectorOptional.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/DoubleVectorOptional.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; @@ -26,6 +28,7 @@ public class DoubleVectorOptional extends Pointer { public native @Name("operator =") @ByRef DoubleVectorOptional put(@ByRef DoubleVectorOptional x); public native boolean has_value(); + public native void reset(); public native @Name("value") @Cast("std::vector*") @ByRef DoubleVector get(); @ValueSetter public native DoubleVectorOptional put(@Cast("std::vector*") @ByRef DoubleVector value); } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/Dropout.java b/pytorch/src/gen/java/org/bytedeco/pytorch/Dropout.java deleted file mode 100644 index 5401699074e..00000000000 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/Dropout.java +++ /dev/null @@ -1,34 +0,0 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE - -package org.bytedeco.pytorch; - -import org.bytedeco.pytorch.Allocator; -import org.bytedeco.pytorch.Function; -import org.bytedeco.pytorch.Module; -import java.nio.*; -import org.bytedeco.javacpp.*; -import org.bytedeco.javacpp.annotation.*; - -import static org.bytedeco.javacpp.presets.javacpp.*; -import static org.bytedeco.openblas.global.openblas_nolapack.*; -import static org.bytedeco.openblas.global.openblas.*; - -import static org.bytedeco.pytorch.global.torch.*; - - -/** A {@code ModuleHolder} subclass for {@code DropoutImpl}. - * See the documentation for {@code DropoutImpl} class to learn what methods it - * provides, and examples of how to use {@code Dropout} with - * {@code torch::nn::DropoutOptions}. See the documentation for {@code ModuleHolder} to - * learn about PyTorch's module storage semantics. */ -@Namespace("torch::nn") @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) -public class Dropout extends DropoutImplModuleHolder { - static { Loader.load(); } - - public Dropout(@ByVal @Cast("std::nullptr_t*") PointerPointer arg0) { super((Pointer)null); allocate(arg0); } - private native void allocate(@ByVal @Cast("std::nullptr_t*") PointerPointer arg0); public Dropout(@SharedPtr @Cast({"", "std::shared_ptr"}) DropoutImpl module) { super((Pointer)null); allocate(module); } - private native void allocate(@SharedPtr @Cast({"", "std::shared_ptr"}) DropoutImpl module); - /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ - public Dropout(Pointer p) { super(p); } - - } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/Dropout2d.java b/pytorch/src/gen/java/org/bytedeco/pytorch/Dropout2d.java deleted file mode 100644 index e04f4f89020..00000000000 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/Dropout2d.java +++ /dev/null @@ -1,34 +0,0 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE - -package org.bytedeco.pytorch; - -import org.bytedeco.pytorch.Allocator; -import org.bytedeco.pytorch.Function; -import org.bytedeco.pytorch.Module; -import java.nio.*; -import org.bytedeco.javacpp.*; -import org.bytedeco.javacpp.annotation.*; - -import static org.bytedeco.javacpp.presets.javacpp.*; -import static org.bytedeco.openblas.global.openblas_nolapack.*; -import static org.bytedeco.openblas.global.openblas.*; - -import static org.bytedeco.pytorch.global.torch.*; - - -/** A {@code ModuleHolder} subclass for {@code Dropout2dImpl}. - * See the documentation for {@code Dropout2dImpl} class to learn what methods it - * provides, and examples of how to use {@code Dropout2d} with - * {@code torch::nn::Dropout2dOptions}. See the documentation for {@code ModuleHolder} to - * learn about PyTorch's module storage semantics. */ -@Namespace("torch::nn") @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) -public class Dropout2d extends Dropout2dImplModuleHolder { - static { Loader.load(); } - - public Dropout2d(@ByVal @Cast("std::nullptr_t*") PointerPointer arg0) { super((Pointer)null); allocate(arg0); } - private native void allocate(@ByVal @Cast("std::nullptr_t*") PointerPointer arg0); public Dropout2d(@SharedPtr @Cast({"", "std::shared_ptr"}) Dropout2dImpl module) { super((Pointer)null); allocate(module); } - private native void allocate(@SharedPtr @Cast({"", "std::shared_ptr"}) Dropout2dImpl module); - /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ - public Dropout2d(Pointer p) { super(p); } - - } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/Dropout2dImpl.java b/pytorch/src/gen/java/org/bytedeco/pytorch/Dropout2dImpl.java index 208692a7d3a..feef124b7a1 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/Dropout2dImpl.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/Dropout2dImpl.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; @@ -35,12 +37,12 @@ public class Dropout2dImpl extends Dropout2dImplBase { public Dropout2dImpl(double p) { super((Pointer)null); allocate(p); } - @NoDeallocator private native void allocate(double p); + private native void allocate(double p); public Dropout2dImpl(@Const @ByRef(nullValue = "torch::nn::DropoutOptions{}") DropoutOptions options_) { super((Pointer)null); allocate(options_); } - @NoDeallocator private native void allocate(@Const @ByRef(nullValue = "torch::nn::DropoutOptions{}") DropoutOptions options_); + private native void allocate(@Const @ByRef(nullValue = "torch::nn::DropoutOptions{}") DropoutOptions options_); public Dropout2dImpl() { super((Pointer)null); allocate(); } - @NoDeallocator private native void allocate(); + private native void allocate(); /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public Dropout2dImpl(Pointer p) { super(p); } /** Native array allocator. Access with {@link Pointer#position(long)}. */ diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/Dropout2dImplBase.java b/pytorch/src/gen/java/org/bytedeco/pytorch/Dropout2dImplBase.java index cbb53d6932c..7cbb9abdb2b 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/Dropout2dImplBase.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/Dropout2dImplBase.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; @@ -23,12 +25,12 @@ public class Dropout2dImplBase extends Dropout2dImplCloneable { public Dropout2dImplBase(Pointer p) { super(p); } public Dropout2dImplBase(double p) { super((Pointer)null); allocate(p); } - @NoDeallocator private native void allocate(double p); + private native void allocate(double p); public Dropout2dImplBase(@Const @ByRef(nullValue = "torch::nn::DropoutOptions{}") DropoutOptions options_) { super((Pointer)null); allocate(options_); } - @NoDeallocator private native void allocate(@Const @ByRef(nullValue = "torch::nn::DropoutOptions{}") DropoutOptions options_); + private native void allocate(@Const @ByRef(nullValue = "torch::nn::DropoutOptions{}") DropoutOptions options_); public Dropout2dImplBase() { super((Pointer)null); allocate(); } - @NoDeallocator private native void allocate(); + private native void allocate(); public native void reset(); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/Dropout2dImplCloneable.java b/pytorch/src/gen/java/org/bytedeco/pytorch/Dropout2dImplCloneable.java index 446b2a43bc7..2b01bdc4424 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/Dropout2dImplCloneable.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/Dropout2dImplCloneable.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; @@ -20,18 +22,18 @@ public class Dropout2dImplCloneable extends Module { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public Dropout2dImplCloneable(Pointer p) { super(p); } + @Override public Module asModule() { return asModule(this); } + @Namespace public static native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast>") Module asModule(@SharedPtr Dropout2dImplCloneable pointer); /** {@code reset()} must perform initialization of all members with reference * semantics, most importantly parameters, buffers and submodules. */ public native void reset(); - @Override public Module asModule() { return asModule(this); } - @Namespace public static native @Name("static_cast") Module asModule(Dropout2dImplCloneable module); /** Performs a recursive "deep copy" of the {@code Module}, such that all parameters * and submodules in the cloned module are different from those in the * original module. */ - public native @SharedPtr Module clone( - @Const @ByRef(nullValue = "c10::optional(c10::nullopt)") DeviceOptional device); - public native @SharedPtr Module clone(); + public native @SharedPtr("torch::nn::Module") @ByVal Module clone( + @Const @ByRef(nullValue = "c10::optional(c10::nullopt)") DeviceOptional device); + public native @SharedPtr("torch::nn::Module") @ByVal Module clone(); } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/Dropout2dImplModuleHolder.java b/pytorch/src/gen/java/org/bytedeco/pytorch/Dropout2dImplModuleHolder.java deleted file mode 100644 index 0f1e8d885d1..00000000000 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/Dropout2dImplModuleHolder.java +++ /dev/null @@ -1,79 +0,0 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE - -package org.bytedeco.pytorch; - -import org.bytedeco.pytorch.Allocator; -import org.bytedeco.pytorch.Function; -import org.bytedeco.pytorch.Module; -import java.nio.*; -import org.bytedeco.javacpp.*; -import org.bytedeco.javacpp.annotation.*; - -import static org.bytedeco.javacpp.presets.javacpp.*; -import static org.bytedeco.openblas.global.openblas_nolapack.*; -import static org.bytedeco.openblas.global.openblas.*; - -import static org.bytedeco.pytorch.global.torch.*; - -@Name("torch::nn::ModuleHolder") @NoOffset @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) -public class Dropout2dImplModuleHolder extends Pointer { - static { Loader.load(); } - /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ - public Dropout2dImplModuleHolder(Pointer p) { super(p); } - - - /// - - /** Default constructs the contained module if if has a default constructor, - * else produces a static error. - * - * NOTE: This uses the behavior of template - * classes in C++ that constructors (or any methods) are only compiled when - * actually used. */ - - - /** Constructs the {@code ModuleHolder} with an empty contained value. Access to - * the underlying module is not permitted and will throw an exception, until - * a value is assigned. */ - /* implicit */ public Dropout2dImplModuleHolder(@ByVal @Cast("std::nullptr_t*") PointerPointer arg0) { super((Pointer)null); allocate(arg0); } -private native void allocate(@ByVal @Cast("std::nullptr_t*") PointerPointer arg0); - - /** Constructs the {@code ModuleHolder} with a contained module, forwarding all - * arguments to its constructor. */ - - /** Constructs the {@code ModuleHolder} from a pointer to the contained type. - * Example: {@code Linear(std::make_shared(...))}. */ - /* implicit */ public Dropout2dImplModuleHolder(@SharedPtr @Cast({"", "std::shared_ptr"}) Dropout2dImpl module) { super((Pointer)null); allocate(module); } -private native void allocate(@SharedPtr @Cast({"", "std::shared_ptr"}) Dropout2dImpl module); - - /** Returns true if the {@code ModuleHolder} contains a module, or false if it is - * {@code nullptr}. */ - public native @Cast("bool") @Name("operator bool") @NoException(true) boolean asBoolean(); - - /** Forwards to the contained module. */ - public native @Name("operator ->") Dropout2dImpl access(); - - /** Forwards to the contained module. */ - - /** Returns a reference to the contained module. */ - public native @ByRef @Name("operator *") Dropout2dImpl multiply(); - - /** Returns a const reference to the contained module. */ - - /** Returns a shared pointer to the underlying module. */ - public native @SharedPtr @Cast({"", "std::shared_ptr"}) Dropout2dImpl ptr(); - - /** Returns a pointer to the underlying module. */ - public native Dropout2dImpl get(); - - /** Returns a const pointer to the underlying module. */ - - /** Calls the {@code forward()} method of the contained module. */ - - /** Forwards to the subscript operator of the contained module. - * NOTE: std::forward is qualified to prevent VS2017 emitting - * error C2872: 'std': ambiguous symbol */ - - /** Returns true if the {@code ModuleHolder} does not contain a module. */ - public native @Cast("bool") @NoException(true) boolean is_empty(); -} diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/Dropout3d.java b/pytorch/src/gen/java/org/bytedeco/pytorch/Dropout3d.java deleted file mode 100644 index c568e29215b..00000000000 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/Dropout3d.java +++ /dev/null @@ -1,34 +0,0 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE - -package org.bytedeco.pytorch; - -import org.bytedeco.pytorch.Allocator; -import org.bytedeco.pytorch.Function; -import org.bytedeco.pytorch.Module; -import java.nio.*; -import org.bytedeco.javacpp.*; -import org.bytedeco.javacpp.annotation.*; - -import static org.bytedeco.javacpp.presets.javacpp.*; -import static org.bytedeco.openblas.global.openblas_nolapack.*; -import static org.bytedeco.openblas.global.openblas.*; - -import static org.bytedeco.pytorch.global.torch.*; - - -/** A {@code ModuleHolder} subclass for {@code Dropout3dImpl}. - * See the documentation for {@code Dropout3dImpl} class to learn what methods it - * provides, and examples of how to use {@code Dropout3d} with - * {@code torch::nn::Dropout3dOptions}. See the documentation for {@code ModuleHolder} to - * learn about PyTorch's module storage semantics. */ -@Namespace("torch::nn") @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) -public class Dropout3d extends Dropout3dImplModuleHolder { - static { Loader.load(); } - - public Dropout3d(@ByVal @Cast("std::nullptr_t*") PointerPointer arg0) { super((Pointer)null); allocate(arg0); } - private native void allocate(@ByVal @Cast("std::nullptr_t*") PointerPointer arg0); public Dropout3d(@SharedPtr @Cast({"", "std::shared_ptr"}) Dropout3dImpl module) { super((Pointer)null); allocate(module); } - private native void allocate(@SharedPtr @Cast({"", "std::shared_ptr"}) Dropout3dImpl module); - /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ - public Dropout3d(Pointer p) { super(p); } - - } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/Dropout3dImpl.java b/pytorch/src/gen/java/org/bytedeco/pytorch/Dropout3dImpl.java index 65d323be1db..0bed3a64b85 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/Dropout3dImpl.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/Dropout3dImpl.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; @@ -35,12 +37,12 @@ public class Dropout3dImpl extends Dropout3dImplBase { public Dropout3dImpl(double p) { super((Pointer)null); allocate(p); } - @NoDeallocator private native void allocate(double p); + private native void allocate(double p); public Dropout3dImpl(@Const @ByRef(nullValue = "torch::nn::DropoutOptions{}") DropoutOptions options_) { super((Pointer)null); allocate(options_); } - @NoDeallocator private native void allocate(@Const @ByRef(nullValue = "torch::nn::DropoutOptions{}") DropoutOptions options_); + private native void allocate(@Const @ByRef(nullValue = "torch::nn::DropoutOptions{}") DropoutOptions options_); public Dropout3dImpl() { super((Pointer)null); allocate(); } - @NoDeallocator private native void allocate(); + private native void allocate(); /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public Dropout3dImpl(Pointer p) { super(p); } /** Native array allocator. Access with {@link Pointer#position(long)}. */ diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/Dropout3dImplBase.java b/pytorch/src/gen/java/org/bytedeco/pytorch/Dropout3dImplBase.java index ee62cbe69f1..e016f8a10a0 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/Dropout3dImplBase.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/Dropout3dImplBase.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; @@ -23,12 +25,12 @@ public class Dropout3dImplBase extends Dropout3dImplCloneable { public Dropout3dImplBase(Pointer p) { super(p); } public Dropout3dImplBase(double p) { super((Pointer)null); allocate(p); } - @NoDeallocator private native void allocate(double p); + private native void allocate(double p); public Dropout3dImplBase(@Const @ByRef(nullValue = "torch::nn::DropoutOptions{}") DropoutOptions options_) { super((Pointer)null); allocate(options_); } - @NoDeallocator private native void allocate(@Const @ByRef(nullValue = "torch::nn::DropoutOptions{}") DropoutOptions options_); + private native void allocate(@Const @ByRef(nullValue = "torch::nn::DropoutOptions{}") DropoutOptions options_); public Dropout3dImplBase() { super((Pointer)null); allocate(); } - @NoDeallocator private native void allocate(); + private native void allocate(); public native void reset(); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/Dropout3dImplCloneable.java b/pytorch/src/gen/java/org/bytedeco/pytorch/Dropout3dImplCloneable.java index a9ebd2bd2b2..34280bd1b62 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/Dropout3dImplCloneable.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/Dropout3dImplCloneable.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; @@ -20,18 +22,18 @@ public class Dropout3dImplCloneable extends Module { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public Dropout3dImplCloneable(Pointer p) { super(p); } + @Override public Module asModule() { return asModule(this); } + @Namespace public static native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast>") Module asModule(@SharedPtr Dropout3dImplCloneable pointer); /** {@code reset()} must perform initialization of all members with reference * semantics, most importantly parameters, buffers and submodules. */ public native void reset(); - @Override public Module asModule() { return asModule(this); } - @Namespace public static native @Name("static_cast") Module asModule(Dropout3dImplCloneable module); /** Performs a recursive "deep copy" of the {@code Module}, such that all parameters * and submodules in the cloned module are different from those in the * original module. */ - public native @SharedPtr Module clone( - @Const @ByRef(nullValue = "c10::optional(c10::nullopt)") DeviceOptional device); - public native @SharedPtr Module clone(); + public native @SharedPtr("torch::nn::Module") @ByVal Module clone( + @Const @ByRef(nullValue = "c10::optional(c10::nullopt)") DeviceOptional device); + public native @SharedPtr("torch::nn::Module") @ByVal Module clone(); } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/Dropout3dImplModuleHolder.java b/pytorch/src/gen/java/org/bytedeco/pytorch/Dropout3dImplModuleHolder.java deleted file mode 100644 index e04337c324e..00000000000 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/Dropout3dImplModuleHolder.java +++ /dev/null @@ -1,79 +0,0 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE - -package org.bytedeco.pytorch; - -import org.bytedeco.pytorch.Allocator; -import org.bytedeco.pytorch.Function; -import org.bytedeco.pytorch.Module; -import java.nio.*; -import org.bytedeco.javacpp.*; -import org.bytedeco.javacpp.annotation.*; - -import static org.bytedeco.javacpp.presets.javacpp.*; -import static org.bytedeco.openblas.global.openblas_nolapack.*; -import static org.bytedeco.openblas.global.openblas.*; - -import static org.bytedeco.pytorch.global.torch.*; - -@Name("torch::nn::ModuleHolder") @NoOffset @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) -public class Dropout3dImplModuleHolder extends Pointer { - static { Loader.load(); } - /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ - public Dropout3dImplModuleHolder(Pointer p) { super(p); } - - - /// - - /** Default constructs the contained module if if has a default constructor, - * else produces a static error. - * - * NOTE: This uses the behavior of template - * classes in C++ that constructors (or any methods) are only compiled when - * actually used. */ - - - /** Constructs the {@code ModuleHolder} with an empty contained value. Access to - * the underlying module is not permitted and will throw an exception, until - * a value is assigned. */ - /* implicit */ public Dropout3dImplModuleHolder(@ByVal @Cast("std::nullptr_t*") PointerPointer arg0) { super((Pointer)null); allocate(arg0); } -private native void allocate(@ByVal @Cast("std::nullptr_t*") PointerPointer arg0); - - /** Constructs the {@code ModuleHolder} with a contained module, forwarding all - * arguments to its constructor. */ - - /** Constructs the {@code ModuleHolder} from a pointer to the contained type. - * Example: {@code Linear(std::make_shared(...))}. */ - /* implicit */ public Dropout3dImplModuleHolder(@SharedPtr @Cast({"", "std::shared_ptr"}) Dropout3dImpl module) { super((Pointer)null); allocate(module); } -private native void allocate(@SharedPtr @Cast({"", "std::shared_ptr"}) Dropout3dImpl module); - - /** Returns true if the {@code ModuleHolder} contains a module, or false if it is - * {@code nullptr}. */ - public native @Cast("bool") @Name("operator bool") @NoException(true) boolean asBoolean(); - - /** Forwards to the contained module. */ - public native @Name("operator ->") Dropout3dImpl access(); - - /** Forwards to the contained module. */ - - /** Returns a reference to the contained module. */ - public native @ByRef @Name("operator *") Dropout3dImpl multiply(); - - /** Returns a const reference to the contained module. */ - - /** Returns a shared pointer to the underlying module. */ - public native @SharedPtr @Cast({"", "std::shared_ptr"}) Dropout3dImpl ptr(); - - /** Returns a pointer to the underlying module. */ - public native Dropout3dImpl get(); - - /** Returns a const pointer to the underlying module. */ - - /** Calls the {@code forward()} method of the contained module. */ - - /** Forwards to the subscript operator of the contained module. - * NOTE: std::forward is qualified to prevent VS2017 emitting - * error C2872: 'std': ambiguous symbol */ - - /** Returns true if the {@code ModuleHolder} does not contain a module. */ - public native @Cast("bool") @NoException(true) boolean is_empty(); -} diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/DropoutFuncOptions.java b/pytorch/src/gen/java/org/bytedeco/pytorch/DropoutFuncOptions.java index 0fbcab36a2c..2b21e124e39 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/DropoutFuncOptions.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/DropoutFuncOptions.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/DropoutImpl.java b/pytorch/src/gen/java/org/bytedeco/pytorch/DropoutImpl.java index c9ff59af511..2ed4626dddf 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/DropoutImpl.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/DropoutImpl.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; @@ -35,12 +37,12 @@ public class DropoutImpl extends DropoutImplBase { public DropoutImpl(double p) { super((Pointer)null); allocate(p); } - @NoDeallocator private native void allocate(double p); + private native void allocate(double p); public DropoutImpl(@Const @ByRef(nullValue = "torch::nn::DropoutOptions{}") DropoutOptions options_) { super((Pointer)null); allocate(options_); } - @NoDeallocator private native void allocate(@Const @ByRef(nullValue = "torch::nn::DropoutOptions{}") DropoutOptions options_); + private native void allocate(@Const @ByRef(nullValue = "torch::nn::DropoutOptions{}") DropoutOptions options_); public DropoutImpl() { super((Pointer)null); allocate(); } - @NoDeallocator private native void allocate(); + private native void allocate(); /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public DropoutImpl(Pointer p) { super(p); } /** Native array allocator. Access with {@link Pointer#position(long)}. */ diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/DropoutImplBase.java b/pytorch/src/gen/java/org/bytedeco/pytorch/DropoutImplBase.java index 52260b346e5..12321edffc3 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/DropoutImplBase.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/DropoutImplBase.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; @@ -23,12 +25,12 @@ public class DropoutImplBase extends DropoutImplCloneable { public DropoutImplBase(Pointer p) { super(p); } public DropoutImplBase(double p) { super((Pointer)null); allocate(p); } - @NoDeallocator private native void allocate(double p); + private native void allocate(double p); public DropoutImplBase(@Const @ByRef(nullValue = "torch::nn::DropoutOptions{}") DropoutOptions options_) { super((Pointer)null); allocate(options_); } - @NoDeallocator private native void allocate(@Const @ByRef(nullValue = "torch::nn::DropoutOptions{}") DropoutOptions options_); + private native void allocate(@Const @ByRef(nullValue = "torch::nn::DropoutOptions{}") DropoutOptions options_); public DropoutImplBase() { super((Pointer)null); allocate(); } - @NoDeallocator private native void allocate(); + private native void allocate(); public native void reset(); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/DropoutImplCloneable.java b/pytorch/src/gen/java/org/bytedeco/pytorch/DropoutImplCloneable.java index b8b13311292..5243b179e30 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/DropoutImplCloneable.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/DropoutImplCloneable.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; @@ -20,18 +22,18 @@ public class DropoutImplCloneable extends Module { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public DropoutImplCloneable(Pointer p) { super(p); } + @Override public Module asModule() { return asModule(this); } + @Namespace public static native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast>") Module asModule(@SharedPtr DropoutImplCloneable pointer); /** {@code reset()} must perform initialization of all members with reference * semantics, most importantly parameters, buffers and submodules. */ public native void reset(); - @Override public Module asModule() { return asModule(this); } - @Namespace public static native @Name("static_cast") Module asModule(DropoutImplCloneable module); /** Performs a recursive "deep copy" of the {@code Module}, such that all parameters * and submodules in the cloned module are different from those in the * original module. */ - public native @SharedPtr Module clone( - @Const @ByRef(nullValue = "c10::optional(c10::nullopt)") DeviceOptional device); - public native @SharedPtr Module clone(); + public native @SharedPtr("torch::nn::Module") @ByVal Module clone( + @Const @ByRef(nullValue = "c10::optional(c10::nullopt)") DeviceOptional device); + public native @SharedPtr("torch::nn::Module") @ByVal Module clone(); } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/DropoutImplModuleHolder.java b/pytorch/src/gen/java/org/bytedeco/pytorch/DropoutImplModuleHolder.java deleted file mode 100644 index 0255f49d18e..00000000000 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/DropoutImplModuleHolder.java +++ /dev/null @@ -1,79 +0,0 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE - -package org.bytedeco.pytorch; - -import org.bytedeco.pytorch.Allocator; -import org.bytedeco.pytorch.Function; -import org.bytedeco.pytorch.Module; -import java.nio.*; -import org.bytedeco.javacpp.*; -import org.bytedeco.javacpp.annotation.*; - -import static org.bytedeco.javacpp.presets.javacpp.*; -import static org.bytedeco.openblas.global.openblas_nolapack.*; -import static org.bytedeco.openblas.global.openblas.*; - -import static org.bytedeco.pytorch.global.torch.*; - -@Name("torch::nn::ModuleHolder") @NoOffset @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) -public class DropoutImplModuleHolder extends Pointer { - static { Loader.load(); } - /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ - public DropoutImplModuleHolder(Pointer p) { super(p); } - - - /// - - /** Default constructs the contained module if if has a default constructor, - * else produces a static error. - * - * NOTE: This uses the behavior of template - * classes in C++ that constructors (or any methods) are only compiled when - * actually used. */ - - - /** Constructs the {@code ModuleHolder} with an empty contained value. Access to - * the underlying module is not permitted and will throw an exception, until - * a value is assigned. */ - /* implicit */ public DropoutImplModuleHolder(@ByVal @Cast("std::nullptr_t*") PointerPointer arg0) { super((Pointer)null); allocate(arg0); } -private native void allocate(@ByVal @Cast("std::nullptr_t*") PointerPointer arg0); - - /** Constructs the {@code ModuleHolder} with a contained module, forwarding all - * arguments to its constructor. */ - - /** Constructs the {@code ModuleHolder} from a pointer to the contained type. - * Example: {@code Linear(std::make_shared(...))}. */ - /* implicit */ public DropoutImplModuleHolder(@SharedPtr @Cast({"", "std::shared_ptr"}) DropoutImpl module) { super((Pointer)null); allocate(module); } -private native void allocate(@SharedPtr @Cast({"", "std::shared_ptr"}) DropoutImpl module); - - /** Returns true if the {@code ModuleHolder} contains a module, or false if it is - * {@code nullptr}. */ - public native @Cast("bool") @Name("operator bool") @NoException(true) boolean asBoolean(); - - /** Forwards to the contained module. */ - public native @Name("operator ->") DropoutImpl access(); - - /** Forwards to the contained module. */ - - /** Returns a reference to the contained module. */ - public native @ByRef @Name("operator *") DropoutImpl multiply(); - - /** Returns a const reference to the contained module. */ - - /** Returns a shared pointer to the underlying module. */ - public native @SharedPtr @Cast({"", "std::shared_ptr"}) DropoutImpl ptr(); - - /** Returns a pointer to the underlying module. */ - public native DropoutImpl get(); - - /** Returns a const pointer to the underlying module. */ - - /** Calls the {@code forward()} method of the contained module. */ - - /** Forwards to the subscript operator of the contained module. - * NOTE: std::forward is qualified to prevent VS2017 emitting - * error C2872: 'std': ambiguous symbol */ - - /** Returns true if the {@code ModuleHolder} does not contain a module. */ - public native @Cast("bool") @NoException(true) boolean is_empty(); -} diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/DropoutOptions.java b/pytorch/src/gen/java/org/bytedeco/pytorch/DropoutOptions.java index 9bc75f9d278..8f74a3c1d72 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/DropoutOptions.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/DropoutOptions.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/ELU.java b/pytorch/src/gen/java/org/bytedeco/pytorch/ELU.java deleted file mode 100644 index 9213e9a23f6..00000000000 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/ELU.java +++ /dev/null @@ -1,34 +0,0 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE - -package org.bytedeco.pytorch; - -import org.bytedeco.pytorch.Allocator; -import org.bytedeco.pytorch.Function; -import org.bytedeco.pytorch.Module; -import java.nio.*; -import org.bytedeco.javacpp.*; -import org.bytedeco.javacpp.annotation.*; - -import static org.bytedeco.javacpp.presets.javacpp.*; -import static org.bytedeco.openblas.global.openblas_nolapack.*; -import static org.bytedeco.openblas.global.openblas.*; - -import static org.bytedeco.pytorch.global.torch.*; - - -/** A {@code ModuleHolder} subclass for {@code ELUImpl}. - * See the documentation for {@code ELUImpl} class to learn what methods it - * provides, and examples of how to use {@code ELU} with {@code torch::nn::ELUOptions}. - * See the documentation for {@code ModuleHolder} to learn about PyTorch's - * module storage semantics. */ -@Namespace("torch::nn") @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) -public class ELU extends ELUImplModuleHolder { - static { Loader.load(); } - - public ELU(@ByVal @Cast("std::nullptr_t*") PointerPointer arg0) { super((Pointer)null); allocate(arg0); } - private native void allocate(@ByVal @Cast("std::nullptr_t*") PointerPointer arg0); public ELU(@SharedPtr @Cast({"", "std::shared_ptr"}) ELUImpl module) { super((Pointer)null); allocate(module); } - private native void allocate(@SharedPtr @Cast({"", "std::shared_ptr"}) ELUImpl module); - /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ - public ELU(Pointer p) { super(p); } - - } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/ELUImpl.java b/pytorch/src/gen/java/org/bytedeco/pytorch/ELUImpl.java index bd78b4dfcec..33120d24748 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/ELUImpl.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/ELUImpl.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; @@ -45,9 +47,9 @@ public class ELUImpl extends ELUImplCloneable { } public ELUImpl(@Const @ByRef(nullValue = "torch::nn::ELUOptions{}") ELUOptions options_) { super((Pointer)null); allocate(options_); } - @NoDeallocator private native void allocate(@Const @ByRef(nullValue = "torch::nn::ELUOptions{}") ELUOptions options_); + @SharedPtr private native void allocate(@Const @ByRef(nullValue = "torch::nn::ELUOptions{}") ELUOptions options_); public ELUImpl() { super((Pointer)null); allocate(); } - @NoDeallocator private native void allocate(); + @SharedPtr private native void allocate(); public native @ByVal Tensor forward(@ByVal Tensor input); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/ELUImplCloneable.java b/pytorch/src/gen/java/org/bytedeco/pytorch/ELUImplCloneable.java index bff779bb43b..3ac4f7f3853 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/ELUImplCloneable.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/ELUImplCloneable.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; @@ -20,18 +22,18 @@ public class ELUImplCloneable extends Module { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public ELUImplCloneable(Pointer p) { super(p); } + @Override public Module asModule() { return asModule(this); } + @Namespace public static native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast>") Module asModule(@SharedPtr ELUImplCloneable pointer); /** {@code reset()} must perform initialization of all members with reference * semantics, most importantly parameters, buffers and submodules. */ public native void reset(); - @Override public Module asModule() { return asModule(this); } - @Namespace public static native @Name("static_cast") Module asModule(ELUImplCloneable module); /** Performs a recursive "deep copy" of the {@code Module}, such that all parameters * and submodules in the cloned module are different from those in the * original module. */ - public native @SharedPtr Module clone( - @Const @ByRef(nullValue = "c10::optional(c10::nullopt)") DeviceOptional device); - public native @SharedPtr Module clone(); + public native @SharedPtr("torch::nn::Module") @ByVal Module clone( + @Const @ByRef(nullValue = "c10::optional(c10::nullopt)") DeviceOptional device); + public native @SharedPtr("torch::nn::Module") @ByVal Module clone(); } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/ELUImplModuleHolder.java b/pytorch/src/gen/java/org/bytedeco/pytorch/ELUImplModuleHolder.java deleted file mode 100644 index 1e61389d7c1..00000000000 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/ELUImplModuleHolder.java +++ /dev/null @@ -1,79 +0,0 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE - -package org.bytedeco.pytorch; - -import org.bytedeco.pytorch.Allocator; -import org.bytedeco.pytorch.Function; -import org.bytedeco.pytorch.Module; -import java.nio.*; -import org.bytedeco.javacpp.*; -import org.bytedeco.javacpp.annotation.*; - -import static org.bytedeco.javacpp.presets.javacpp.*; -import static org.bytedeco.openblas.global.openblas_nolapack.*; -import static org.bytedeco.openblas.global.openblas.*; - -import static org.bytedeco.pytorch.global.torch.*; - -@Name("torch::nn::ModuleHolder") @NoOffset @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) -public class ELUImplModuleHolder extends Pointer { - static { Loader.load(); } - /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ - public ELUImplModuleHolder(Pointer p) { super(p); } - - - /// - - /** Default constructs the contained module if if has a default constructor, - * else produces a static error. - * - * NOTE: This uses the behavior of template - * classes in C++ that constructors (or any methods) are only compiled when - * actually used. */ - - - /** Constructs the {@code ModuleHolder} with an empty contained value. Access to - * the underlying module is not permitted and will throw an exception, until - * a value is assigned. */ - /* implicit */ public ELUImplModuleHolder(@ByVal @Cast("std::nullptr_t*") PointerPointer arg0) { super((Pointer)null); allocate(arg0); } -private native void allocate(@ByVal @Cast("std::nullptr_t*") PointerPointer arg0); - - /** Constructs the {@code ModuleHolder} with a contained module, forwarding all - * arguments to its constructor. */ - - /** Constructs the {@code ModuleHolder} from a pointer to the contained type. - * Example: {@code Linear(std::make_shared(...))}. */ - /* implicit */ public ELUImplModuleHolder(@SharedPtr @Cast({"", "std::shared_ptr"}) ELUImpl module) { super((Pointer)null); allocate(module); } -private native void allocate(@SharedPtr @Cast({"", "std::shared_ptr"}) ELUImpl module); - - /** Returns true if the {@code ModuleHolder} contains a module, or false if it is - * {@code nullptr}. */ - public native @Cast("bool") @Name("operator bool") @NoException(true) boolean asBoolean(); - - /** Forwards to the contained module. */ - public native @Name("operator ->") ELUImpl access(); - - /** Forwards to the contained module. */ - - /** Returns a reference to the contained module. */ - public native @ByRef @Name("operator *") ELUImpl multiply(); - - /** Returns a const reference to the contained module. */ - - /** Returns a shared pointer to the underlying module. */ - public native @SharedPtr @Cast({"", "std::shared_ptr"}) ELUImpl ptr(); - - /** Returns a pointer to the underlying module. */ - public native ELUImpl get(); - - /** Returns a const pointer to the underlying module. */ - - /** Calls the {@code forward()} method of the contained module. */ - - /** Forwards to the subscript operator of the contained module. - * NOTE: std::forward is qualified to prevent VS2017 emitting - * error C2872: 'std': ambiguous symbol */ - - /** Returns true if the {@code ModuleHolder} does not contain a module. */ - public native @Cast("bool") @NoException(true) boolean is_empty(); -} diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/ELUOptions.java b/pytorch/src/gen/java/org/bytedeco/pytorch/ELUOptions.java index 80847d1013e..50872c8c23f 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/ELUOptions.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/ELUOptions.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/Edge.java b/pytorch/src/gen/java/org/bytedeco/pytorch/Edge.java index 80189dcb4c2..92b07b101ea 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/Edge.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/Edge.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/EdgeVector.java b/pytorch/src/gen/java/org/bytedeco/pytorch/EdgeVector.java index 51f6e3c0fda..494ac501649 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/EdgeVector.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/EdgeVector.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; @@ -33,6 +35,8 @@ public class EdgeVector extends Pointer { public void clear() { resize(0); } public native void resize(@Cast("size_t") long n); + public Edge front() { return get(0); } + public Edge back() { return get(size() - 1); } @Index(function = "at") public native @ByRef Edge get(@Cast("size_t") long i); public native EdgeVector put(@Cast("size_t") long i, Edge value); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/EllipsisIndexType.java b/pytorch/src/gen/java/org/bytedeco/pytorch/EllipsisIndexType.java index 30d40de326f..9f53c24365e 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/EllipsisIndexType.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/EllipsisIndexType.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/Embedding.java b/pytorch/src/gen/java/org/bytedeco/pytorch/Embedding.java deleted file mode 100644 index 228cd351206..00000000000 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/Embedding.java +++ /dev/null @@ -1,42 +0,0 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE - -package org.bytedeco.pytorch; - -import org.bytedeco.pytorch.Allocator; -import org.bytedeco.pytorch.Function; -import org.bytedeco.pytorch.Module; -import java.nio.*; -import org.bytedeco.javacpp.*; -import org.bytedeco.javacpp.annotation.*; - -import static org.bytedeco.javacpp.presets.javacpp.*; -import static org.bytedeco.openblas.global.openblas_nolapack.*; -import static org.bytedeco.openblas.global.openblas.*; - -import static org.bytedeco.pytorch.global.torch.*; - - -/** A {@code ModuleHolder} subclass for {@code EmbeddingImpl}. - * See the documentation for {@code EmbeddingImpl} class to learn what methods it - * provides, and examples of how to use {@code Embedding} with - * {@code torch::nn::EmbeddingOptions}. See the documentation for {@code ModuleHolder} to - * learn about PyTorch's module storage semantics. */ -@Namespace("torch::nn") @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) -public class Embedding extends EmbeddingImplModuleHolder { - static { Loader.load(); } - - public Embedding(@ByVal @Cast("std::nullptr_t*") PointerPointer arg0) { super((Pointer)null); allocate(arg0); } - private native void allocate(@ByVal @Cast("std::nullptr_t*") PointerPointer arg0); public Embedding(@SharedPtr @Cast({"", "std::shared_ptr"}) EmbeddingImpl module) { super((Pointer)null); allocate(module); } - private native void allocate(@SharedPtr @Cast({"", "std::shared_ptr"}) EmbeddingImpl module); - /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ - public Embedding(Pointer p) { super(p); } - - - /** See the documentation for {@code torch::nn::EmbeddingFromPretrainedOptions} - * class to learn what optional arguments are supported for this function. */ - public static native @ByVal Embedding from_pretrained( - @Const @ByRef Tensor embeddings, - @Const @ByRef(nullValue = "torch::nn::EmbeddingFromPretrainedOptions{}") EmbeddingFromPretrainedOptions options); - public static native @ByVal Embedding from_pretrained( - @Const @ByRef Tensor embeddings); -} diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/EmbeddingBag.java b/pytorch/src/gen/java/org/bytedeco/pytorch/EmbeddingBag.java deleted file mode 100644 index 9c6fbf17da4..00000000000 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/EmbeddingBag.java +++ /dev/null @@ -1,42 +0,0 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE - -package org.bytedeco.pytorch; - -import org.bytedeco.pytorch.Allocator; -import org.bytedeco.pytorch.Function; -import org.bytedeco.pytorch.Module; -import java.nio.*; -import org.bytedeco.javacpp.*; -import org.bytedeco.javacpp.annotation.*; - -import static org.bytedeco.javacpp.presets.javacpp.*; -import static org.bytedeco.openblas.global.openblas_nolapack.*; -import static org.bytedeco.openblas.global.openblas.*; - -import static org.bytedeco.pytorch.global.torch.*; - - -/** A {@code ModuleHolder} subclass for {@code EmbeddingBagImpl}. - * See the documentation for {@code EmbeddingBagImpl} class to learn what methods it - * provides, and examples of how to use {@code EmbeddingBag} with - * {@code torch::nn::EmbeddingBagOptions}. See the documentation for {@code ModuleHolder} - * to learn about PyTorch's module storage semantics. */ -@Namespace("torch::nn") @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) -public class EmbeddingBag extends EmbeddingBagImplModuleHolder { - static { Loader.load(); } - - public EmbeddingBag(@ByVal @Cast("std::nullptr_t*") PointerPointer arg0) { super((Pointer)null); allocate(arg0); } - private native void allocate(@ByVal @Cast("std::nullptr_t*") PointerPointer arg0); public EmbeddingBag(@SharedPtr @Cast({"", "std::shared_ptr"}) EmbeddingBagImpl module) { super((Pointer)null); allocate(module); } - private native void allocate(@SharedPtr @Cast({"", "std::shared_ptr"}) EmbeddingBagImpl module); - /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ - public EmbeddingBag(Pointer p) { super(p); } - - - /** See the documentation for {@code torch::nn::EmbeddingBagFromPretrainedOptions} - * class to learn what optional arguments are supported for this function. */ - public static native @ByVal EmbeddingBag from_pretrained( - @Const @ByRef Tensor embeddings, - @Const @ByRef(nullValue = "torch::nn::EmbeddingBagFromPretrainedOptions{}") EmbeddingBagFromPretrainedOptions options); - public static native @ByVal EmbeddingBag from_pretrained( - @Const @ByRef Tensor embeddings); -} diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/EmbeddingBagFromPretrainedOptions.java b/pytorch/src/gen/java/org/bytedeco/pytorch/EmbeddingBagFromPretrainedOptions.java index 09f861f3b70..0cf2d2aadca 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/EmbeddingBagFromPretrainedOptions.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/EmbeddingBagFromPretrainedOptions.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/EmbeddingBagFuncOptions.java b/pytorch/src/gen/java/org/bytedeco/pytorch/EmbeddingBagFuncOptions.java index fce12ff9c8f..c144b9b4229 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/EmbeddingBagFuncOptions.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/EmbeddingBagFuncOptions.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/EmbeddingBagImpl.java b/pytorch/src/gen/java/org/bytedeco/pytorch/EmbeddingBagImpl.java index a9dccaf4e3d..c7a9f2cb2a1 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/EmbeddingBagImpl.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/EmbeddingBagImpl.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; @@ -39,9 +41,9 @@ public class EmbeddingBagImpl extends EmbeddingBagImplCloneable { public EmbeddingBagImpl(Pointer p) { super(p); } public EmbeddingBagImpl(@Cast("int64_t") long num_embeddings, @Cast("int64_t") long embedding_dim) { super((Pointer)null); allocate(num_embeddings, embedding_dim); } - @NoDeallocator private native void allocate(@Cast("int64_t") long num_embeddings, @Cast("int64_t") long embedding_dim); + @SharedPtr private native void allocate(@Cast("int64_t") long num_embeddings, @Cast("int64_t") long embedding_dim); public EmbeddingBagImpl(@ByVal EmbeddingBagOptions options_) { super((Pointer)null); allocate(options_); } - @NoDeallocator private native void allocate(@ByVal EmbeddingBagOptions options_); + @SharedPtr private native void allocate(@ByVal EmbeddingBagOptions options_); public native void reset(); @@ -57,8 +59,8 @@ public class EmbeddingBagImpl extends EmbeddingBagImplCloneable { public native @ByVal Tensor forward( @Const @ByRef Tensor input, - @Const @ByRef(nullValue = "at::Tensor{}") Tensor offsets, - @Const @ByRef(nullValue = "at::Tensor{}") Tensor per_sample_weights); + @Const @ByRef(nullValue = "torch::Tensor{}") Tensor offsets, + @Const @ByRef(nullValue = "torch::Tensor{}") Tensor per_sample_weights); public native @ByVal Tensor forward( @Const @ByRef Tensor input); } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/EmbeddingBagImplCloneable.java b/pytorch/src/gen/java/org/bytedeco/pytorch/EmbeddingBagImplCloneable.java index 98e372c5df9..953e07cee99 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/EmbeddingBagImplCloneable.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/EmbeddingBagImplCloneable.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; @@ -20,18 +22,18 @@ public class EmbeddingBagImplCloneable extends Module { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public EmbeddingBagImplCloneable(Pointer p) { super(p); } + @Override public Module asModule() { return asModule(this); } + @Namespace public static native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast>") Module asModule(@SharedPtr EmbeddingBagImplCloneable pointer); /** {@code reset()} must perform initialization of all members with reference * semantics, most importantly parameters, buffers and submodules. */ public native void reset(); - @Override public Module asModule() { return asModule(this); } - @Namespace public static native @Name("static_cast") Module asModule(EmbeddingBagImplCloneable module); /** Performs a recursive "deep copy" of the {@code Module}, such that all parameters * and submodules in the cloned module are different from those in the * original module. */ - public native @SharedPtr Module clone( - @Const @ByRef(nullValue = "c10::optional(c10::nullopt)") DeviceOptional device); - public native @SharedPtr Module clone(); + public native @SharedPtr("torch::nn::Module") @ByVal Module clone( + @Const @ByRef(nullValue = "c10::optional(c10::nullopt)") DeviceOptional device); + public native @SharedPtr("torch::nn::Module") @ByVal Module clone(); } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/EmbeddingBagImplModuleHolder.java b/pytorch/src/gen/java/org/bytedeco/pytorch/EmbeddingBagImplModuleHolder.java deleted file mode 100644 index cc7386de250..00000000000 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/EmbeddingBagImplModuleHolder.java +++ /dev/null @@ -1,79 +0,0 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE - -package org.bytedeco.pytorch; - -import org.bytedeco.pytorch.Allocator; -import org.bytedeco.pytorch.Function; -import org.bytedeco.pytorch.Module; -import java.nio.*; -import org.bytedeco.javacpp.*; -import org.bytedeco.javacpp.annotation.*; - -import static org.bytedeco.javacpp.presets.javacpp.*; -import static org.bytedeco.openblas.global.openblas_nolapack.*; -import static org.bytedeco.openblas.global.openblas.*; - -import static org.bytedeco.pytorch.global.torch.*; - -@Name("torch::nn::ModuleHolder") @NoOffset @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) -public class EmbeddingBagImplModuleHolder extends Pointer { - static { Loader.load(); } - /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ - public EmbeddingBagImplModuleHolder(Pointer p) { super(p); } - - - /// - - /** Default constructs the contained module if if has a default constructor, - * else produces a static error. - * - * NOTE: This uses the behavior of template - * classes in C++ that constructors (or any methods) are only compiled when - * actually used. */ - - - /** Constructs the {@code ModuleHolder} with an empty contained value. Access to - * the underlying module is not permitted and will throw an exception, until - * a value is assigned. */ - /* implicit */ public EmbeddingBagImplModuleHolder(@ByVal @Cast("std::nullptr_t*") PointerPointer arg0) { super((Pointer)null); allocate(arg0); } -private native void allocate(@ByVal @Cast("std::nullptr_t*") PointerPointer arg0); - - /** Constructs the {@code ModuleHolder} with a contained module, forwarding all - * arguments to its constructor. */ - - /** Constructs the {@code ModuleHolder} from a pointer to the contained type. - * Example: {@code Linear(std::make_shared(...))}. */ - /* implicit */ public EmbeddingBagImplModuleHolder(@SharedPtr @Cast({"", "std::shared_ptr"}) EmbeddingBagImpl module) { super((Pointer)null); allocate(module); } -private native void allocate(@SharedPtr @Cast({"", "std::shared_ptr"}) EmbeddingBagImpl module); - - /** Returns true if the {@code ModuleHolder} contains a module, or false if it is - * {@code nullptr}. */ - public native @Cast("bool") @Name("operator bool") @NoException(true) boolean asBoolean(); - - /** Forwards to the contained module. */ - public native @Name("operator ->") EmbeddingBagImpl access(); - - /** Forwards to the contained module. */ - - /** Returns a reference to the contained module. */ - public native @ByRef @Name("operator *") EmbeddingBagImpl multiply(); - - /** Returns a const reference to the contained module. */ - - /** Returns a shared pointer to the underlying module. */ - public native @SharedPtr @Cast({"", "std::shared_ptr"}) EmbeddingBagImpl ptr(); - - /** Returns a pointer to the underlying module. */ - public native EmbeddingBagImpl get(); - - /** Returns a const pointer to the underlying module. */ - - /** Calls the {@code forward()} method of the contained module. */ - - /** Forwards to the subscript operator of the contained module. - * NOTE: std::forward is qualified to prevent VS2017 emitting - * error C2872: 'std': ambiguous symbol */ - - /** Returns true if the {@code ModuleHolder} does not contain a module. */ - public native @Cast("bool") @NoException(true) boolean is_empty(); -} diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/EmbeddingBagMode.java b/pytorch/src/gen/java/org/bytedeco/pytorch/EmbeddingBagMode.java index ce0cec35456..98392b87451 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/EmbeddingBagMode.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/EmbeddingBagMode.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/EmbeddingBagOptions.java b/pytorch/src/gen/java/org/bytedeco/pytorch/EmbeddingBagOptions.java index ceaa4e4bab3..7e699df1a6c 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/EmbeddingBagOptions.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/EmbeddingBagOptions.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/EmbeddingFromPretrainedOptions.java b/pytorch/src/gen/java/org/bytedeco/pytorch/EmbeddingFromPretrainedOptions.java index f16f1841926..fcf2271617a 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/EmbeddingFromPretrainedOptions.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/EmbeddingFromPretrainedOptions.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/EmbeddingFuncOptions.java b/pytorch/src/gen/java/org/bytedeco/pytorch/EmbeddingFuncOptions.java index 69d2a44fb73..b58d86003d6 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/EmbeddingFuncOptions.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/EmbeddingFuncOptions.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/EmbeddingImpl.java b/pytorch/src/gen/java/org/bytedeco/pytorch/EmbeddingImpl.java index 540be6f5fdd..46a68df8f0c 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/EmbeddingImpl.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/EmbeddingImpl.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; @@ -38,9 +40,9 @@ public class EmbeddingImpl extends EmbeddingImplCloneable { public EmbeddingImpl(Pointer p) { super(p); } public EmbeddingImpl(@Cast("int64_t") long num_embeddings, @Cast("int64_t") long embedding_dim) { super((Pointer)null); allocate(num_embeddings, embedding_dim); } - @NoDeallocator private native void allocate(@Cast("int64_t") long num_embeddings, @Cast("int64_t") long embedding_dim); + @SharedPtr private native void allocate(@Cast("int64_t") long num_embeddings, @Cast("int64_t") long embedding_dim); public EmbeddingImpl(@ByVal EmbeddingOptions options_) { super((Pointer)null); allocate(options_); } - @NoDeallocator private native void allocate(@ByVal EmbeddingOptions options_); + @SharedPtr private native void allocate(@ByVal EmbeddingOptions options_); public native void reset(); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/EmbeddingImplCloneable.java b/pytorch/src/gen/java/org/bytedeco/pytorch/EmbeddingImplCloneable.java index 98c1e9ab7b3..ef121bb9dea 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/EmbeddingImplCloneable.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/EmbeddingImplCloneable.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; @@ -20,18 +22,18 @@ public class EmbeddingImplCloneable extends Module { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public EmbeddingImplCloneable(Pointer p) { super(p); } + @Override public Module asModule() { return asModule(this); } + @Namespace public static native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast>") Module asModule(@SharedPtr EmbeddingImplCloneable pointer); /** {@code reset()} must perform initialization of all members with reference * semantics, most importantly parameters, buffers and submodules. */ public native void reset(); - @Override public Module asModule() { return asModule(this); } - @Namespace public static native @Name("static_cast") Module asModule(EmbeddingImplCloneable module); /** Performs a recursive "deep copy" of the {@code Module}, such that all parameters * and submodules in the cloned module are different from those in the * original module. */ - public native @SharedPtr Module clone( - @Const @ByRef(nullValue = "c10::optional(c10::nullopt)") DeviceOptional device); - public native @SharedPtr Module clone(); + public native @SharedPtr("torch::nn::Module") @ByVal Module clone( + @Const @ByRef(nullValue = "c10::optional(c10::nullopt)") DeviceOptional device); + public native @SharedPtr("torch::nn::Module") @ByVal Module clone(); } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/EmbeddingImplModuleHolder.java b/pytorch/src/gen/java/org/bytedeco/pytorch/EmbeddingImplModuleHolder.java deleted file mode 100644 index 5582c0ab178..00000000000 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/EmbeddingImplModuleHolder.java +++ /dev/null @@ -1,79 +0,0 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE - -package org.bytedeco.pytorch; - -import org.bytedeco.pytorch.Allocator; -import org.bytedeco.pytorch.Function; -import org.bytedeco.pytorch.Module; -import java.nio.*; -import org.bytedeco.javacpp.*; -import org.bytedeco.javacpp.annotation.*; - -import static org.bytedeco.javacpp.presets.javacpp.*; -import static org.bytedeco.openblas.global.openblas_nolapack.*; -import static org.bytedeco.openblas.global.openblas.*; - -import static org.bytedeco.pytorch.global.torch.*; - -@Name("torch::nn::ModuleHolder") @NoOffset @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) -public class EmbeddingImplModuleHolder extends Pointer { - static { Loader.load(); } - /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ - public EmbeddingImplModuleHolder(Pointer p) { super(p); } - - - /// - - /** Default constructs the contained module if if has a default constructor, - * else produces a static error. - * - * NOTE: This uses the behavior of template - * classes in C++ that constructors (or any methods) are only compiled when - * actually used. */ - - - /** Constructs the {@code ModuleHolder} with an empty contained value. Access to - * the underlying module is not permitted and will throw an exception, until - * a value is assigned. */ - /* implicit */ public EmbeddingImplModuleHolder(@ByVal @Cast("std::nullptr_t*") PointerPointer arg0) { super((Pointer)null); allocate(arg0); } -private native void allocate(@ByVal @Cast("std::nullptr_t*") PointerPointer arg0); - - /** Constructs the {@code ModuleHolder} with a contained module, forwarding all - * arguments to its constructor. */ - - /** Constructs the {@code ModuleHolder} from a pointer to the contained type. - * Example: {@code Linear(std::make_shared(...))}. */ - /* implicit */ public EmbeddingImplModuleHolder(@SharedPtr @Cast({"", "std::shared_ptr"}) EmbeddingImpl module) { super((Pointer)null); allocate(module); } -private native void allocate(@SharedPtr @Cast({"", "std::shared_ptr"}) EmbeddingImpl module); - - /** Returns true if the {@code ModuleHolder} contains a module, or false if it is - * {@code nullptr}. */ - public native @Cast("bool") @Name("operator bool") @NoException(true) boolean asBoolean(); - - /** Forwards to the contained module. */ - public native @Name("operator ->") EmbeddingImpl access(); - - /** Forwards to the contained module. */ - - /** Returns a reference to the contained module. */ - public native @ByRef @Name("operator *") EmbeddingImpl multiply(); - - /** Returns a const reference to the contained module. */ - - /** Returns a shared pointer to the underlying module. */ - public native @SharedPtr @Cast({"", "std::shared_ptr"}) EmbeddingImpl ptr(); - - /** Returns a pointer to the underlying module. */ - public native EmbeddingImpl get(); - - /** Returns a const pointer to the underlying module. */ - - /** Calls the {@code forward()} method of the contained module. */ - - /** Forwards to the subscript operator of the contained module. - * NOTE: std::forward is qualified to prevent VS2017 emitting - * error C2872: 'std': ambiguous symbol */ - - /** Returns true if the {@code ModuleHolder} does not contain a module. */ - public native @Cast("bool") @NoException(true) boolean is_empty(); -} diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/EmbeddingOptions.java b/pytorch/src/gen/java/org/bytedeco/pytorch/EmbeddingOptions.java index b7f4eecf064..e2a4fb50e6a 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/EmbeddingOptions.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/EmbeddingOptions.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/EnableProfilingGuard.java b/pytorch/src/gen/java/org/bytedeco/pytorch/EnableProfilingGuard.java index 554a86cc765..b5455f43dd3 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/EnableProfilingGuard.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/EnableProfilingGuard.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/EnabledStr.java b/pytorch/src/gen/java/org/bytedeco/pytorch/EnabledStr.java new file mode 100644 index 00000000000..a15d04e5ffc --- /dev/null +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/EnabledStr.java @@ -0,0 +1,31 @@ +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE + +package org.bytedeco.pytorch; + +import org.bytedeco.pytorch.Allocator; +import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; +import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; +import java.nio.*; +import org.bytedeco.javacpp.*; +import org.bytedeco.javacpp.annotation.*; + +import static org.bytedeco.javacpp.presets.javacpp.*; +import static org.bytedeco.openblas.global.openblas_nolapack.*; +import static org.bytedeco.openblas.global.openblas.*; + +import static org.bytedeco.pytorch.global.torch.*; + +@Name("torch::detail::SelectiveStr") @NoOffset @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) +public class EnabledStr extends Pointer { + static { Loader.load(); } + /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ + public EnabledStr(Pointer p) { super(p); } + + public EnabledStr(@Cast("const char*") BytePointer name) { super((Pointer)null); allocate(name); } + private native void allocate(@Cast("const char*") BytePointer name); + public EnabledStr(String name) { super((Pointer)null); allocate(name); } + private native void allocate(String name); + public native @Name("operator const char*") @Cast("const char*") BytePointer asBytePointer(); +} diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/EnforceFiniteError.java b/pytorch/src/gen/java/org/bytedeco/pytorch/EnforceFiniteError.java index 8ff498066f7..4f73a01e4dd 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/EnforceFiniteError.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/EnforceFiniteError.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/EnumHolder.java b/pytorch/src/gen/java/org/bytedeco/pytorch/EnumHolder.java index 817c58800ad..0c7e46c94e5 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/EnumHolder.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/EnumHolder.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; @@ -15,10 +17,31 @@ import static org.bytedeco.pytorch.global.torch.*; -@Namespace("c10::ivalue") @Opaque @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) + +@Name("c10::ivalue::EnumHolder") @NoOffset @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) public class EnumHolder extends Pointer { - /** Empty constructor. Calls {@code super((Pointer)null)}. */ - public EnumHolder() { super((Pointer)null); } + static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public EnumHolder(Pointer p) { super(p); } + + public EnumHolder(@SharedPtr EnumType type, @StdString BytePointer name, @ByVal IValue value) { super((Pointer)null); allocate(type, name, value); } + private native void allocate(@SharedPtr EnumType type, @StdString BytePointer name, @ByVal IValue value); + public EnumHolder(@SharedPtr EnumType type, @StdString String name, @ByVal IValue value) { super((Pointer)null); allocate(type, name, value); } + private native void allocate(@SharedPtr EnumType type, @StdString String name, @ByVal IValue value); + + + + + + + + public native @StdString BytePointer qualifiedClassName(); + + + + public native @StdString BytePointer name(); + + public native @Const @ByRef IValue value(); + + public native @SharedPtr EnumType type(); } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/EnumHolderPtr.java b/pytorch/src/gen/java/org/bytedeco/pytorch/EnumHolderPtr.java new file mode 100644 index 00000000000..89509290df6 --- /dev/null +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/EnumHolderPtr.java @@ -0,0 +1,150 @@ +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE + +package org.bytedeco.pytorch; + +import org.bytedeco.pytorch.Allocator; +import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; +import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; +import java.nio.*; +import org.bytedeco.javacpp.*; +import org.bytedeco.javacpp.annotation.*; + +import static org.bytedeco.javacpp.presets.javacpp.*; +import static org.bytedeco.openblas.global.openblas_nolapack.*; +import static org.bytedeco.openblas.global.openblas.*; + +import static org.bytedeco.pytorch.global.torch.*; + + +@Name("c10::intrusive_ptr") @NoOffset @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) +public class EnumHolderPtr extends Pointer { + static { Loader.load(); } + /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ + public EnumHolderPtr(Pointer p) { super(p); } + /** Native array allocator. Access with {@link Pointer#position(long)}. */ + public EnumHolderPtr(long size) { super((Pointer)null); allocateArray(size); } + private native void allocateArray(long size); + @Override public EnumHolderPtr position(long position) { + return (EnumHolderPtr)super.position(position); + } + @Override public EnumHolderPtr getPointer(long i) { + return new EnumHolderPtr((Pointer)this).offsetAddress(i); + } + + + public EnumHolderPtr() { super((Pointer)null); allocate(); } + @NoException(true) private native void allocate(); + + public EnumHolderPtr(@ByVal @Cast("std::nullptr_t*") PointerPointer arg0) { super((Pointer)null); allocate(arg0); } + @NoException(true) private native void allocate(@ByVal @Cast("std::nullptr_t*") PointerPointer arg0); + + // This constructor will not increase the ref counter for you. + // We use the tagged dispatch mechanism to explicitly mark this constructor + // to not increase the refcount + public EnumHolderPtr(EnumHolder target, @ByVal DontIncreaseRefcount arg1) { super((Pointer)null); allocate(target, arg1); } + @NoException(true) private native void allocate(EnumHolder target, @ByVal DontIncreaseRefcount arg1); + + + + public EnumHolderPtr(@ByRef(true) EnumHolderPtr rhs) { super((Pointer)null); allocate(rhs); } + @NoException(true) private native void allocate(@ByRef(true) EnumHolderPtr rhs); + + public native @ByRef @Name("operator =") @NoException(true) EnumHolderPtr put(@ByRef(true) EnumHolderPtr rhs); + + public native @NoException(true) EnumHolder get(); + + public native @ByRef @Name("operator *") @NoException(true) EnumHolder multiply(); + + public native @Name("operator ->") @NoException(true) EnumHolder access(); + + public native @Cast("bool") @Name("operator bool") @NoException(true) boolean asBoolean(); + + public native @NoException(true) void reset(); + + public native @NoException(true) void swap(@ByRef EnumHolderPtr rhs); + + // We do a lot of null-pointer checks in our code, good to have this be cheap. + public native @Cast("bool") @NoException(true) boolean defined(); + + public native @Cast("size_t") @NoException(true) long use_count(); + + public native @Cast("size_t") @NoException(true) long weak_use_count(); + + public native @Cast("bool") @NoException(true) boolean unique(); + + /** + * Returns an owning (!) pointer to the underlying object and makes the + * intrusive_ptr instance invalid. That means the refcount is not decreased. + * You *must* put the returned pointer back into a intrusive_ptr using + * intrusive_ptr::reclaim(ptr) to properly destruct it. + * This is helpful for C APIs. + */ + public native @NoException(true) EnumHolder release(); + + /** + * Takes an owning pointer to TTarget* and creates an intrusive_ptr that takes + * over ownership. That means the refcount is not increased. + * This is the counter-part to intrusive_ptr::release() and the pointer + * passed in *must* have been created using intrusive_ptr::release(). + */ + public static native @ByVal EnumHolderPtr reclaim(EnumHolder owning_ptr); + + /** + * Takes an owning pointer to TTarget* and creates an intrusive_ptr + * representing a new reference, i.e. the raw pointer retains + * ownership. + */ + public static native @ByVal EnumHolderPtr reclaim_copy(EnumHolder owning_ptr); + + /** + * Allocate a heap object with args and wrap it inside a intrusive_ptr and + * incref. This is a helper function to let make_intrusive() access private + * intrusive_ptr constructors. + */ + + /** + * Turn a new instance of TTarget (e.g., literally allocated + * using new TTarget(...) into an intrusive_ptr. If possible, + * use intrusive_ptr::make instead which statically guarantees + * that the allocation was done properly. + * + * At the moment, the only reason this method exists is because + * pybind11 holder types expect to be able to allocate in + * this way (because pybind11 handles the new allocation itself). + */ + public static native @ByVal EnumHolderPtr unsafe_steal_from_new(EnumHolder raw_ptr); + + /** + * Turn an instance of TTarget that should not be reference counted + * (e.g., allocated into an arena with placement new) into an + * intrusive_ptr. This is gratuitously unsafe and should only be + * used if you can guarantee that the pointer will not escape and be + * refcounted as normal. + * + * {@code expected_decrefs} is a debugging parameter: it indicates the + * number of strong owners the intrusive_ptr_target in question is + * expected to get. In most use cases, this will likely be 1. + * + * The reason this method exists is for manually sharing + * StorageImpls across Tensors in the static runtime. It needs + * access to private intrusive_ptr members so that the refcounts can + * be initialized to custom values. + */ + public static native @ByVal EnumHolderPtr unsafe_adapt_non_heap_allocated( + EnumHolder raw_ptr, + @Cast("size_t") long expected_decrefs); + + /** + * Turn a **non-owning raw pointer** to an intrusive_ptr. It is + * the moral equivalent of enable_shared_from_this on a shared pointer. + * + * This method is only valid for objects that are already live. If + * you are looking for the moral equivalent of unique_ptr(T*) + * constructor, see steal_from_new. + * + * TODO: https://github.com/pytorch/pytorch/issues/56482 + */ + public static native @ByVal EnumHolderPtr unsafe_reclaim_from_nonowning(EnumHolder raw_ptr); +} diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/EnumNameValue.java b/pytorch/src/gen/java/org/bytedeco/pytorch/EnumNameValue.java index c1010813460..66d78386e19 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/EnumNameValue.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/EnumNameValue.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/EnumNameValueArrayRef.java b/pytorch/src/gen/java/org/bytedeco/pytorch/EnumNameValueArrayRef.java index a3a809a67fa..70baecfc320 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/EnumNameValueArrayRef.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/EnumNameValueArrayRef.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; @@ -39,8 +41,7 @@ public class EnumNameValueArrayRef extends Pointer { /** Construct an ArrayRef from a single element. */ // TODO Make this explicit - public EnumNameValueArrayRef(@Const @ByRef EnumNameValue OneElt) { super((Pointer)null); allocate(OneElt); } - private native void allocate(@Const @ByRef EnumNameValue OneElt); + /** Construct an ArrayRef from a pointer and length. */ public EnumNameValueArrayRef(@Const EnumNameValue data, @Cast("size_t") long length) { super((Pointer)null); allocate(data, length); } @@ -70,13 +71,13 @@ public class EnumNameValueArrayRef extends Pointer { * \name Simple Operations * \{ */ - public native @ByVal @Cast("const c10::ArrayRef::iterator*") EnumNameValue begin(); - public native @ByVal @Cast("const c10::ArrayRef::iterator*") EnumNameValue end(); + public native @Const @ByPtr EnumNameValue begin(); + public native @Const @ByPtr EnumNameValue end(); // These are actually the same as iterator, since ArrayRef only // gives you const iterators. - public native @ByVal @Cast("const c10::ArrayRef::const_iterator*") EnumNameValue cbegin(); - public native @ByVal @Cast("const c10::ArrayRef::const_iterator*") EnumNameValue cend(); + public native @Const @ByPtr EnumNameValue cbegin(); + public native @Const @ByPtr EnumNameValue cend(); /** empty - Check if the array is empty. */ public native @Cast("const bool") boolean empty(); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/EnumType.java b/pytorch/src/gen/java/org/bytedeco/pytorch/EnumType.java index 0c57035c55c..204d300d4cd 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/EnumType.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/EnumType.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; @@ -21,7 +23,7 @@ public class EnumType extends NamedType { /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public EnumType(Pointer p) { super(p); } - @MemberGetter public static native @Const @ByRef TypeKind Kind(); + @MemberGetter public static native TypeKind Kind(); public native @StdString BytePointer str(); @@ -33,7 +35,7 @@ public class EnumType extends NamedType { public native @Cast("bool") boolean isSubtypeOfExt(@Const @ByRef Type rhs, @Cast("std::ostream*") Pointer why_not); - public native @SharedPtr @Cast("const torch::jit::CompilationUnit*") CompilationUnit compilation_unit(); + public native @SharedPtr("const torch::jit::CompilationUnit") @ByVal CompilationUnit compilation_unit(); public native @Const @ByVal QualifiedName qualifiedClassName(); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/EqualType.java b/pytorch/src/gen/java/org/bytedeco/pytorch/EqualType.java deleted file mode 100644 index 9a38b6288d9..00000000000 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/EqualType.java +++ /dev/null @@ -1,39 +0,0 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE - -package org.bytedeco.pytorch; - -import org.bytedeco.pytorch.Allocator; -import org.bytedeco.pytorch.Function; -import org.bytedeco.pytorch.Module; -import java.nio.*; -import org.bytedeco.javacpp.*; -import org.bytedeco.javacpp.annotation.*; - -import static org.bytedeco.javacpp.presets.javacpp.*; -import static org.bytedeco.openblas.global.openblas_nolapack.*; -import static org.bytedeco.openblas.global.openblas.*; - -import static org.bytedeco.pytorch.global.torch.*; - - -@Namespace("torch::jit") @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) -public class EqualType extends Pointer { - static { Loader.load(); } - /** Default native constructor. */ - public EqualType() { super((Pointer)null); allocate(); } - /** Native array allocator. Access with {@link Pointer#position(long)}. */ - public EqualType(long size) { super((Pointer)null); allocateArray(size); } - /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ - public EqualType(Pointer p) { super(p); } - private native void allocate(); - private native void allocateArray(long size); - @Override public EqualType position(long position) { - return (EqualType)super.position(position); - } - @Override public EqualType getPointer(long i) { - return new EqualType((Pointer)this).offsetAddress(i); - } - - - -} diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/Error.java b/pytorch/src/gen/java/org/bytedeco/pytorch/Error.java index babc5915239..2a6e790fec5 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/Error.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/Error.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/ErrorReport.java b/pytorch/src/gen/java/org/bytedeco/pytorch/ErrorReport.java index c3e6c8cb068..c0ac8830349 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/ErrorReport.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/ErrorReport.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; @@ -19,14 +21,16 @@ @Namespace("torch::jit") @NoOffset @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) public class ErrorReport extends Pointer { static { Loader.load(); } + /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ + public ErrorReport(Pointer p) { super(p); } public ErrorReport(@Const @ByRef ErrorReport e) { super((Pointer)null); allocate(e); } private native void allocate(@Const @ByRef ErrorReport e); public ErrorReport(@ByVal SourceRange r) { super((Pointer)null); allocate(r); } private native void allocate(@ByVal SourceRange r); - public ErrorReport(@Cast("const torch::jit::TreeRef*") @ByRef Pointer tree) { super((Pointer)null); allocate(tree); } - private native void allocate(@Cast("const torch::jit::TreeRef*") @ByRef Pointer tree); + public ErrorReport(@Const @ByRef TreeRef tree) { super((Pointer)null); allocate(tree); } + private native void allocate(@Const @ByRef TreeRef tree); public ErrorReport(@Const @ByRef Token tok) { super((Pointer)null); allocate(tok); } private native void allocate(@Const @ByRef Token tok); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/Example.java b/pytorch/src/gen/java/org/bytedeco/pytorch/Example.java index 1d0e7a8d30d..5f7f8440073 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/Example.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/Example.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/ExampleCollation.java b/pytorch/src/gen/java/org/bytedeco/pytorch/ExampleCollation.java index a4abfec807b..fc3d63ab8b1 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/ExampleCollation.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/ExampleCollation.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/ExampleIterator.java b/pytorch/src/gen/java/org/bytedeco/pytorch/ExampleIterator.java index 4578368df14..27e10847c52 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/ExampleIterator.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/ExampleIterator.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/ExampleOptional.java b/pytorch/src/gen/java/org/bytedeco/pytorch/ExampleOptional.java index e12b09580c9..e5b21a2711e 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/ExampleOptional.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/ExampleOptional.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; @@ -26,6 +28,7 @@ public class ExampleOptional extends Pointer { public native @Name("operator =") @ByRef ExampleOptional put(@ByRef ExampleOptional x); public native boolean has_value(); + public native void reset(); public native @Name("value") @ByRef Example get(); @ValueSetter public native ExampleOptional put(@ByRef Example value); } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/ExampleStack.java b/pytorch/src/gen/java/org/bytedeco/pytorch/ExampleStack.java index 643e96aabd5..f67ef9a2cd1 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/ExampleStack.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/ExampleStack.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/ExampleVector.java b/pytorch/src/gen/java/org/bytedeco/pytorch/ExampleVector.java index ddd2e457537..454209e92d4 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/ExampleVector.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/ExampleVector.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; @@ -33,6 +35,8 @@ public class ExampleVector extends Pointer { public void clear() { resize(0); } public native void resize(@Cast("size_t") long n); + public Example front() { return get(0); } + public Example back() { return get(size() - 1); } @Index(function = "at") public native @ByRef Example get(@Cast("size_t") long i); public native ExampleVector put(@Cast("size_t") long i, Example value); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/ExampleVectorIterator.java b/pytorch/src/gen/java/org/bytedeco/pytorch/ExampleVectorIterator.java deleted file mode 100644 index dcdfa898346..00000000000 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/ExampleVectorIterator.java +++ /dev/null @@ -1,44 +0,0 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE - -package org.bytedeco.pytorch; - -import org.bytedeco.pytorch.Allocator; -import org.bytedeco.pytorch.Function; -import org.bytedeco.pytorch.Module; -import java.nio.*; -import org.bytedeco.javacpp.*; -import org.bytedeco.javacpp.annotation.*; - -import static org.bytedeco.javacpp.presets.javacpp.*; -import static org.bytedeco.openblas.global.openblas_nolapack.*; -import static org.bytedeco.openblas.global.openblas.*; - -import static org.bytedeco.pytorch.global.torch.*; - - -@Name("torch::data::Iterator > >") @NoOffset @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) -public class ExampleVectorIterator extends Pointer { - static { Loader.load(); } - /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ - public ExampleVectorIterator(Pointer p) { super(p); } - - // Type aliases to make the class recognized as a proper iterator. - - /** Increments the iterator. - * Only permitted for valid iterators (not past the end). */ - public native @ByRef @Name("operator ++") ExampleVectorIterator increment(); - - /** Returns the current batch. - * Only permitted for valid iterators (not past the end). */ - public native @ByRef @Name("operator *") ExampleVector multiply(); - - /** Returns a pointer to the current batch. - * Only permitted for valid iterators (not past the end). */ - public native @Name("operator ->") ExampleVector access(); - - /** Compares two iterators for equality. */ - public native @Cast("bool") @Name("operator ==") boolean equals(@Const @ByRef ExampleVectorIterator other); - - /** Compares two iterators for inequality. */ - public native @Cast("bool") @Name("operator !=") boolean notEquals(@Const @ByRef ExampleVectorIterator other); -} diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/ExampleVectorOptional.java b/pytorch/src/gen/java/org/bytedeco/pytorch/ExampleVectorOptional.java index 45f978725fa..44ce876c817 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/ExampleVectorOptional.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/ExampleVectorOptional.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; @@ -26,6 +28,7 @@ public class ExampleVectorOptional extends Pointer { public native @Name("operator =") @ByRef ExampleVectorOptional put(@ByRef ExampleVectorOptional x); public native boolean has_value(); + public native void reset(); public native @Name("value") @ByRef ExampleVector get(); @ValueSetter public native ExampleVectorOptional put(@ByRef ExampleVector value); } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/ExampleVectorOptionalIterator.java b/pytorch/src/gen/java/org/bytedeco/pytorch/ExampleVectorOptionalIterator.java index 8f74c62e3ff..9d7253dfb60 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/ExampleVectorOptionalIterator.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/ExampleVectorOptionalIterator.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/ExceptionMessageValue.java b/pytorch/src/gen/java/org/bytedeco/pytorch/ExceptionMessageValue.java index 1ea2231624e..b727869ee09 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/ExceptionMessageValue.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/ExceptionMessageValue.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/ExceptionValue.java b/pytorch/src/gen/java/org/bytedeco/pytorch/ExceptionValue.java index 870c0a3cbc0..882a92de3e9 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/ExceptionValue.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/ExceptionValue.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; @@ -29,12 +31,7 @@ public class ExceptionValue extends SugaredValue { public native @StdString BytePointer kind(); - public native @SharedPtr @ByVal SugaredValue call( - @Const @ByRef SourceRange loc, - @ByRef GraphFunction m, - @ByVal NamedValueArrayRef args, - @ByVal NamedValueArrayRef arg3, - @Cast("size_t") long arg4); + public native @StdString BytePointer message_(); public native ExceptionValue message_(BytePointer setter); } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/ExecutionPlan.java b/pytorch/src/gen/java/org/bytedeco/pytorch/ExecutionPlan.java index 65045045af6..3a2e3981215 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/ExecutionPlan.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/ExecutionPlan.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; @@ -33,13 +35,13 @@ public class ExecutionPlan extends Pointer { public ExecutionPlan() { super((Pointer)null); allocate(); } private native void allocate(); - public ExecutionPlan(@SharedPtr @ByVal Graph graph, @StdString BytePointer function_name) { super((Pointer)null); allocate(graph, function_name); } - private native void allocate(@SharedPtr @ByVal Graph graph, @StdString BytePointer function_name); - public ExecutionPlan(@SharedPtr @ByVal Graph graph, @StdString String function_name) { super((Pointer)null); allocate(graph, function_name); } - private native void allocate(@SharedPtr @ByVal Graph graph, @StdString String function_name); + public ExecutionPlan(@SharedPtr("torch::jit::Graph") @ByVal Graph graph, @StdString BytePointer function_name) { super((Pointer)null); allocate(graph, function_name); } + private native void allocate(@SharedPtr("torch::jit::Graph") @ByVal Graph graph, @StdString BytePointer function_name); + public ExecutionPlan(@SharedPtr("torch::jit::Graph") @ByVal Graph graph, @StdString String function_name) { super((Pointer)null); allocate(graph, function_name); } + private native void allocate(@SharedPtr("torch::jit::Graph") @ByVal Graph graph, @StdString String function_name); public native @Cast("bool") @Name("operator bool") boolean asBoolean(); public native @ByRef Code code(); public native ExecutionPlan code(Code setter); - public native @SharedPtr @ByRef Graph graph(); public native ExecutionPlan graph(Graph setter); + public native @SharedPtr("torch::jit::Graph") @ByRef Graph graph(); public native ExecutionPlan graph(Graph setter); } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/ExecutorExecutionModeOptional.java b/pytorch/src/gen/java/org/bytedeco/pytorch/ExecutorExecutionModeOptional.java index b4f32528177..76a3c1973b8 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/ExecutorExecutionModeOptional.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/ExecutorExecutionModeOptional.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; @@ -26,6 +28,7 @@ public class ExecutorExecutionModeOptional extends Pointer { public native @Name("operator =") @ByRef ExecutorExecutionModeOptional put(@ByRef ExecutorExecutionModeOptional x); public native boolean has_value(); + public native void reset(); public native @Name("value") @ByRef ExecutorExecutionMode get(); @ValueSetter public native ExecutorExecutionModeOptional put(@ByRef ExecutorExecutionMode value); } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/ExperimentalConfig.java b/pytorch/src/gen/java/org/bytedeco/pytorch/ExperimentalConfig.java new file mode 100644 index 00000000000..7661ce7bd4e --- /dev/null +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/ExperimentalConfig.java @@ -0,0 +1,71 @@ +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE + +package org.bytedeco.pytorch; + +import org.bytedeco.pytorch.Allocator; +import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; +import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; +import java.nio.*; +import org.bytedeco.javacpp.*; +import org.bytedeco.javacpp.annotation.*; + +import static org.bytedeco.javacpp.presets.javacpp.*; +import static org.bytedeco.openblas.global.openblas_nolapack.*; +import static org.bytedeco.openblas.global.openblas.*; + +import static org.bytedeco.pytorch.global.torch.*; + + +@Namespace("torch::profiler::impl") @NoOffset @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) +public class ExperimentalConfig extends Pointer { + static { Loader.load(); } + /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ + public ExperimentalConfig(Pointer p) { super(p); } + /** Native array allocator. Access with {@link Pointer#position(long)}. */ + public ExperimentalConfig(long size) { super((Pointer)null); allocateArray(size); } + private native void allocateArray(long size); + @Override public ExperimentalConfig position(long position) { + return (ExperimentalConfig)super.position(position); + } + @Override public ExperimentalConfig getPointer(long i) { + return new ExperimentalConfig((Pointer)this).offsetAddress(i); + } + + public ExperimentalConfig( + @ByVal(nullValue = "std::vector{}") StringVector profiler_metrics, + @Cast("bool") boolean profiler_measure_per_kernel/*=false*/, + @Cast("bool") boolean verbose/*=false*/, + @ByVal(nullValue = "std::vector{}") StringVector performance_events, + @Cast("bool") boolean adjust_timestamps/*=false*/) { super((Pointer)null); allocate(profiler_metrics, profiler_measure_per_kernel, verbose, performance_events, adjust_timestamps); } + private native void allocate( + @ByVal(nullValue = "std::vector{}") StringVector profiler_metrics, + @Cast("bool") boolean profiler_measure_per_kernel/*=false*/, + @Cast("bool") boolean verbose/*=false*/, + @ByVal(nullValue = "std::vector{}") StringVector performance_events, + @Cast("bool") boolean adjust_timestamps/*=false*/); + public ExperimentalConfig() { super((Pointer)null); allocate(); } + private native void allocate(); + public native @Cast("bool") @Name("operator bool") boolean asBoolean(); + + public native @ByRef StringVector profiler_metrics(); public native ExperimentalConfig profiler_metrics(StringVector setter); + public native @Cast("bool") boolean profiler_measure_per_kernel(); public native ExperimentalConfig profiler_measure_per_kernel(boolean setter); + public native @Cast("bool") boolean verbose(); public native ExperimentalConfig verbose(boolean setter); + /* + * List of performance events to be profiled. + * An empty list will disable performance event based profiling altogether. + */ + public native @ByRef StringVector performance_events(); public native ExperimentalConfig performance_events(StringVector setter); + /* + * Controls whether or not timestamp adjustment occurs after profiling. + * The purpose of this is to adjust Vulkan event timelines to align with those + * of their parent CPU events. + * This sometimes requires increasing CPU event durations (to fully contain + * their child events) and delaying CPU event start times (to + * prevent overlaps), so this should not be used unless Vulkan events are + * being profiled and it is ok to use this modified timestamp/duration + * information instead of the the original information. + */ + public native @Cast("bool") boolean adjust_timestamps(); public native ExperimentalConfig adjust_timestamps(boolean setter); +} diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/Expr.java b/pytorch/src/gen/java/org/bytedeco/pytorch/Expr.java index f15bbac21d6..b37623d7342 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/Expr.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/Expr.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; @@ -19,7 +21,9 @@ @Namespace("torch::jit") @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) public class Expr extends TreeView { static { Loader.load(); } + /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ + public Expr(Pointer p) { super(p); } - public Expr(@Cast("const torch::jit::TreeRef*") @ByRef Pointer tree) { super((Pointer)null); allocate(tree); } - private native void allocate(@Cast("const torch::jit::TreeRef*") @ByRef Pointer tree); + public Expr(@Const @ByRef TreeRef tree) { super((Pointer)null); allocate(tree); } + private native void allocate(@Const @ByRef TreeRef tree); } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/ExprList.java b/pytorch/src/gen/java/org/bytedeco/pytorch/ExprList.java new file mode 100644 index 00000000000..7b2e48e6b7a --- /dev/null +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/ExprList.java @@ -0,0 +1,38 @@ +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE + +package org.bytedeco.pytorch; + +import org.bytedeco.pytorch.Allocator; +import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; +import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; +import java.nio.*; +import org.bytedeco.javacpp.*; +import org.bytedeco.javacpp.annotation.*; + +import static org.bytedeco.javacpp.presets.javacpp.*; +import static org.bytedeco.openblas.global.openblas_nolapack.*; +import static org.bytedeco.openblas.global.openblas.*; + +import static org.bytedeco.pytorch.global.torch.*; + + +@Name("torch::jit::List") @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) +public class ExprList extends TreeView { + static { Loader.load(); } + /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ + public ExprList(Pointer p) { super(p); } + + + public ExprList(@Const @ByRef TreeRef tree) { super((Pointer)null); allocate(tree); } + private native void allocate(@Const @ByRef TreeRef tree); + public native @ByVal @Cast("torch::jit::List::iterator*") ExprListIterator begin(); + public native @ByVal @Cast("torch::jit::List::iterator*") ExprListIterator end(); + public native @Cast("bool") boolean empty(); + public native @ByVal @Name("operator []") Expr get(@Cast("size_t") long i); + + public static native @ByVal ExprList create(@Const @ByRef SourceRange range, @StdVector Expr subtrees); + public static native @ByVal ExprList unsafeCreate(@Const @ByRef SourceRange range, @Cast("torch::jit::TreeList*") @ByRef(true) SymDimVector subtrees); + public native @Cast("size_t") long size(); +} diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/ExprListIterator.java b/pytorch/src/gen/java/org/bytedeco/pytorch/ExprListIterator.java new file mode 100644 index 00000000000..777b455af7d --- /dev/null +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/ExprListIterator.java @@ -0,0 +1,35 @@ +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE + +package org.bytedeco.pytorch; + +import org.bytedeco.pytorch.Allocator; +import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; +import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; +import java.nio.*; +import org.bytedeco.javacpp.*; +import org.bytedeco.javacpp.annotation.*; + +import static org.bytedeco.javacpp.presets.javacpp.*; +import static org.bytedeco.openblas.global.openblas_nolapack.*; +import static org.bytedeco.openblas.global.openblas.*; + +import static org.bytedeco.pytorch.global.torch.*; + + +@Name("torch::jit::ListIterator") @NoOffset @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) +public class ExprListIterator extends Pointer { + static { Loader.load(); } + /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ + public ExprListIterator(Pointer p) { super(p); } + + public ExprListIterator(@ByVal @Cast("torch::jit::TreeList::const_iterator*") TreeRef it) { super((Pointer)null); allocate(it); } + private native void allocate(@ByVal @Cast("torch::jit::TreeList::const_iterator*") TreeRef it); + public native @Cast("bool") @Name("operator !=") boolean notEquals(@Const @ByRef ExprListIterator rhs); + public native @Cast("bool") @Name("operator ==") boolean equals(@Const @ByRef ExprListIterator rhs); + public native @ByVal @Name("operator *") Expr multiply(); + public native @ByRef @Name("operator +=") ExprListIterator addPut(@Cast("std::ptrdiff_t") long n); + public native @ByRef @Name("operator ++") ExprListIterator increment(); + public native @ByRef @Name("operator --") ExprListIterator decrement(); +} diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/ExprMaybe.java b/pytorch/src/gen/java/org/bytedeco/pytorch/ExprMaybe.java index 58cff2a7663..85dc47e7394 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/ExprMaybe.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/ExprMaybe.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; @@ -19,9 +21,11 @@ @Name("torch::jit::Maybe") @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) public class ExprMaybe extends TreeView { static { Loader.load(); } + /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ + public ExprMaybe(Pointer p) { super(p); } - public ExprMaybe(@Cast("const torch::jit::TreeRef*") @ByRef Pointer tree) { super((Pointer)null); allocate(tree); } - private native void allocate(@Cast("const torch::jit::TreeRef*") @ByRef Pointer tree); + public ExprMaybe(@Const @ByRef TreeRef tree) { super((Pointer)null); allocate(tree); } + private native void allocate(@Const @ByRef TreeRef tree); /* implicit */ public ExprMaybe(@Const @ByRef Expr tree) { super((Pointer)null); allocate(tree); } private native void allocate(@Const @ByRef Expr tree); public native @Cast("bool") boolean present(); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/ExprStmt.java b/pytorch/src/gen/java/org/bytedeco/pytorch/ExprStmt.java index 6b41d309514..4a1ddbbc834 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/ExprStmt.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/ExprStmt.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; @@ -19,9 +21,11 @@ @Namespace("torch::jit") @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) public class ExprStmt extends Stmt { static { Loader.load(); } + /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ + public ExprStmt(Pointer p) { super(p); } - public ExprStmt(@Cast("const torch::jit::TreeRef*") @ByRef Pointer tree) { super((Pointer)null); allocate(tree); } - private native void allocate(@Cast("const torch::jit::TreeRef*") @ByRef Pointer tree); + public ExprStmt(@Const @ByRef TreeRef tree) { super((Pointer)null); allocate(tree); } + private native void allocate(@Const @ByRef TreeRef tree); public native @ByVal Expr expr(); public static native @ByVal ExprStmt create(@Const @ByRef SourceRange range, @Const @ByRef Expr list); } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/ExtraFilesMap.java b/pytorch/src/gen/java/org/bytedeco/pytorch/ExtraFilesMap.java index 3347fe27ce5..b876fbf6ec8 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/ExtraFilesMap.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/ExtraFilesMap.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/FanModeType.java b/pytorch/src/gen/java/org/bytedeco/pytorch/FanModeType.java index bf40ced50ac..a76cc46e3e6 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/FanModeType.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/FanModeType.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/FeatureAlphaDropout.java b/pytorch/src/gen/java/org/bytedeco/pytorch/FeatureAlphaDropout.java deleted file mode 100644 index 750c0193acc..00000000000 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/FeatureAlphaDropout.java +++ /dev/null @@ -1,34 +0,0 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE - -package org.bytedeco.pytorch; - -import org.bytedeco.pytorch.Allocator; -import org.bytedeco.pytorch.Function; -import org.bytedeco.pytorch.Module; -import java.nio.*; -import org.bytedeco.javacpp.*; -import org.bytedeco.javacpp.annotation.*; - -import static org.bytedeco.javacpp.presets.javacpp.*; -import static org.bytedeco.openblas.global.openblas_nolapack.*; -import static org.bytedeco.openblas.global.openblas.*; - -import static org.bytedeco.pytorch.global.torch.*; - - -/** A {@code ModuleHolder} subclass for {@code FeatureAlphaDropoutImpl}. - * See the documentation for {@code FeatureAlphaDropoutImpl} class to learn what - * methods it provides, and examples of how to use {@code FeatureAlphaDropout} with - * {@code torch::nn::FeatureAlphaDropoutOptions}. See the documentation for - * {@code ModuleHolder} to learn about PyTorch's module storage semantics. */ -@Namespace("torch::nn") @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) -public class FeatureAlphaDropout extends FeatureAlphaDropoutImplModuleHolder { - static { Loader.load(); } - - public FeatureAlphaDropout(@ByVal @Cast("std::nullptr_t*") PointerPointer arg0) { super((Pointer)null); allocate(arg0); } - private native void allocate(@ByVal @Cast("std::nullptr_t*") PointerPointer arg0); public FeatureAlphaDropout(@SharedPtr @Cast({"", "std::shared_ptr"}) FeatureAlphaDropoutImpl module) { super((Pointer)null); allocate(module); } - private native void allocate(@SharedPtr @Cast({"", "std::shared_ptr"}) FeatureAlphaDropoutImpl module); - /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ - public FeatureAlphaDropout(Pointer p) { super(p); } - - } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/FeatureAlphaDropoutFuncOptions.java b/pytorch/src/gen/java/org/bytedeco/pytorch/FeatureAlphaDropoutFuncOptions.java index ebbd22214bf..1fa34eaffc3 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/FeatureAlphaDropoutFuncOptions.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/FeatureAlphaDropoutFuncOptions.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/FeatureAlphaDropoutImpl.java b/pytorch/src/gen/java/org/bytedeco/pytorch/FeatureAlphaDropoutImpl.java index cab70b396fd..7465d0abfe1 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/FeatureAlphaDropoutImpl.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/FeatureAlphaDropoutImpl.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; @@ -32,12 +34,12 @@ public class FeatureAlphaDropoutImpl extends FeatureAlphaDropoutImplBase { public FeatureAlphaDropoutImpl(double p) { super((Pointer)null); allocate(p); } - @NoDeallocator private native void allocate(double p); + private native void allocate(double p); public FeatureAlphaDropoutImpl(@Const @ByRef(nullValue = "torch::nn::DropoutOptions{}") DropoutOptions options_) { super((Pointer)null); allocate(options_); } - @NoDeallocator private native void allocate(@Const @ByRef(nullValue = "torch::nn::DropoutOptions{}") DropoutOptions options_); + private native void allocate(@Const @ByRef(nullValue = "torch::nn::DropoutOptions{}") DropoutOptions options_); public FeatureAlphaDropoutImpl() { super((Pointer)null); allocate(); } - @NoDeallocator private native void allocate(); + private native void allocate(); /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public FeatureAlphaDropoutImpl(Pointer p) { super(p); } /** Native array allocator. Access with {@link Pointer#position(long)}. */ diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/FeatureAlphaDropoutImplBase.java b/pytorch/src/gen/java/org/bytedeco/pytorch/FeatureAlphaDropoutImplBase.java index 9a8b2772100..f99b80c4d67 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/FeatureAlphaDropoutImplBase.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/FeatureAlphaDropoutImplBase.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; @@ -23,12 +25,12 @@ public class FeatureAlphaDropoutImplBase extends FeatureAlphaDropoutImplCloneabl public FeatureAlphaDropoutImplBase(Pointer p) { super(p); } public FeatureAlphaDropoutImplBase(double p) { super((Pointer)null); allocate(p); } - @NoDeallocator private native void allocate(double p); + private native void allocate(double p); public FeatureAlphaDropoutImplBase(@Const @ByRef(nullValue = "torch::nn::DropoutOptions{}") DropoutOptions options_) { super((Pointer)null); allocate(options_); } - @NoDeallocator private native void allocate(@Const @ByRef(nullValue = "torch::nn::DropoutOptions{}") DropoutOptions options_); + private native void allocate(@Const @ByRef(nullValue = "torch::nn::DropoutOptions{}") DropoutOptions options_); public FeatureAlphaDropoutImplBase() { super((Pointer)null); allocate(); } - @NoDeallocator private native void allocate(); + private native void allocate(); public native void reset(); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/FeatureAlphaDropoutImplCloneable.java b/pytorch/src/gen/java/org/bytedeco/pytorch/FeatureAlphaDropoutImplCloneable.java index 9ea823e5075..250de2c87de 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/FeatureAlphaDropoutImplCloneable.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/FeatureAlphaDropoutImplCloneable.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; @@ -20,18 +22,18 @@ public class FeatureAlphaDropoutImplCloneable extends Module { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public FeatureAlphaDropoutImplCloneable(Pointer p) { super(p); } + @Override public Module asModule() { return asModule(this); } + @Namespace public static native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast>") Module asModule(@SharedPtr FeatureAlphaDropoutImplCloneable pointer); /** {@code reset()} must perform initialization of all members with reference * semantics, most importantly parameters, buffers and submodules. */ public native void reset(); - @Override public Module asModule() { return asModule(this); } - @Namespace public static native @Name("static_cast") Module asModule(FeatureAlphaDropoutImplCloneable module); /** Performs a recursive "deep copy" of the {@code Module}, such that all parameters * and submodules in the cloned module are different from those in the * original module. */ - public native @SharedPtr Module clone( - @Const @ByRef(nullValue = "c10::optional(c10::nullopt)") DeviceOptional device); - public native @SharedPtr Module clone(); + public native @SharedPtr("torch::nn::Module") @ByVal Module clone( + @Const @ByRef(nullValue = "c10::optional(c10::nullopt)") DeviceOptional device); + public native @SharedPtr("torch::nn::Module") @ByVal Module clone(); } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/FeatureAlphaDropoutImplModuleHolder.java b/pytorch/src/gen/java/org/bytedeco/pytorch/FeatureAlphaDropoutImplModuleHolder.java deleted file mode 100644 index 8e17556a0a1..00000000000 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/FeatureAlphaDropoutImplModuleHolder.java +++ /dev/null @@ -1,79 +0,0 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE - -package org.bytedeco.pytorch; - -import org.bytedeco.pytorch.Allocator; -import org.bytedeco.pytorch.Function; -import org.bytedeco.pytorch.Module; -import java.nio.*; -import org.bytedeco.javacpp.*; -import org.bytedeco.javacpp.annotation.*; - -import static org.bytedeco.javacpp.presets.javacpp.*; -import static org.bytedeco.openblas.global.openblas_nolapack.*; -import static org.bytedeco.openblas.global.openblas.*; - -import static org.bytedeco.pytorch.global.torch.*; - -@Name("torch::nn::ModuleHolder") @NoOffset @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) -public class FeatureAlphaDropoutImplModuleHolder extends Pointer { - static { Loader.load(); } - /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ - public FeatureAlphaDropoutImplModuleHolder(Pointer p) { super(p); } - - - /// - - /** Default constructs the contained module if if has a default constructor, - * else produces a static error. - * - * NOTE: This uses the behavior of template - * classes in C++ that constructors (or any methods) are only compiled when - * actually used. */ - - - /** Constructs the {@code ModuleHolder} with an empty contained value. Access to - * the underlying module is not permitted and will throw an exception, until - * a value is assigned. */ - /* implicit */ public FeatureAlphaDropoutImplModuleHolder(@ByVal @Cast("std::nullptr_t*") PointerPointer arg0) { super((Pointer)null); allocate(arg0); } -private native void allocate(@ByVal @Cast("std::nullptr_t*") PointerPointer arg0); - - /** Constructs the {@code ModuleHolder} with a contained module, forwarding all - * arguments to its constructor. */ - - /** Constructs the {@code ModuleHolder} from a pointer to the contained type. - * Example: {@code Linear(std::make_shared(...))}. */ - /* implicit */ public FeatureAlphaDropoutImplModuleHolder(@SharedPtr @Cast({"", "std::shared_ptr"}) FeatureAlphaDropoutImpl module) { super((Pointer)null); allocate(module); } -private native void allocate(@SharedPtr @Cast({"", "std::shared_ptr"}) FeatureAlphaDropoutImpl module); - - /** Returns true if the {@code ModuleHolder} contains a module, or false if it is - * {@code nullptr}. */ - public native @Cast("bool") @Name("operator bool") @NoException(true) boolean asBoolean(); - - /** Forwards to the contained module. */ - public native @Name("operator ->") FeatureAlphaDropoutImpl access(); - - /** Forwards to the contained module. */ - - /** Returns a reference to the contained module. */ - public native @ByRef @Name("operator *") FeatureAlphaDropoutImpl multiply(); - - /** Returns a const reference to the contained module. */ - - /** Returns a shared pointer to the underlying module. */ - public native @SharedPtr @Cast({"", "std::shared_ptr"}) FeatureAlphaDropoutImpl ptr(); - - /** Returns a pointer to the underlying module. */ - public native FeatureAlphaDropoutImpl get(); - - /** Returns a const pointer to the underlying module. */ - - /** Calls the {@code forward()} method of the contained module. */ - - /** Forwards to the subscript operator of the contained module. - * NOTE: std::forward is qualified to prevent VS2017 emitting - * error C2872: 'std': ambiguous symbol */ - - /** Returns true if the {@code ModuleHolder} does not contain a module. */ - public native @Cast("bool") @NoException(true) boolean is_empty(); -} diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/FileLineFunc.java b/pytorch/src/gen/java/org/bytedeco/pytorch/FileLineFunc.java new file mode 100644 index 00000000000..14c85bc8fdb --- /dev/null +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/FileLineFunc.java @@ -0,0 +1,42 @@ +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE + +package org.bytedeco.pytorch; + +import org.bytedeco.pytorch.Allocator; +import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; +import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; +import java.nio.*; +import org.bytedeco.javacpp.*; +import org.bytedeco.javacpp.annotation.*; + +import static org.bytedeco.javacpp.presets.javacpp.*; +import static org.bytedeco.openblas.global.openblas_nolapack.*; +import static org.bytedeco.openblas.global.openblas.*; + +import static org.bytedeco.pytorch.global.torch.*; + + +@Namespace("torch::profiler::impl") @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) +public class FileLineFunc extends Pointer { + static { Loader.load(); } + /** Default native constructor. */ + public FileLineFunc() { super((Pointer)null); allocate(); } + /** Native array allocator. Access with {@link Pointer#position(long)}. */ + public FileLineFunc(long size) { super((Pointer)null); allocateArray(size); } + /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ + public FileLineFunc(Pointer p) { super(p); } + private native void allocate(); + private native void allocateArray(long size); + @Override public FileLineFunc position(long position) { + return (FileLineFunc)super.position(position); + } + @Override public FileLineFunc getPointer(long i) { + return new FileLineFunc((Pointer)this).offsetAddress(i); + } + + public native @StdString BytePointer filename(); public native FileLineFunc filename(BytePointer setter); + public native @Cast("size_t") long line(); public native FileLineFunc line(long setter); + public native @StdString BytePointer funcname(); public native FileLineFunc funcname(BytePointer setter); +} diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/Flatten.java b/pytorch/src/gen/java/org/bytedeco/pytorch/Flatten.java deleted file mode 100644 index 18c9179091c..00000000000 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/Flatten.java +++ /dev/null @@ -1,34 +0,0 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE - -package org.bytedeco.pytorch; - -import org.bytedeco.pytorch.Allocator; -import org.bytedeco.pytorch.Function; -import org.bytedeco.pytorch.Module; -import java.nio.*; -import org.bytedeco.javacpp.*; -import org.bytedeco.javacpp.annotation.*; - -import static org.bytedeco.javacpp.presets.javacpp.*; -import static org.bytedeco.openblas.global.openblas_nolapack.*; -import static org.bytedeco.openblas.global.openblas.*; - -import static org.bytedeco.pytorch.global.torch.*; - - -/** A {@code ModuleHolder} subclass for {@code FlattenImpl}. - * See the documentation for {@code FlattenImpl} class to learn what methods it - * provides, and examples of how to use {@code Flatten} with - * {@code torch::nn::FlattenOptions}. See the documentation for {@code ModuleHolder} to - * learn about PyTorch's module storage semantics. */ -@Namespace("torch::nn") @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) -public class Flatten extends FlattenImplModuleHolder { - static { Loader.load(); } - - public Flatten(@ByVal @Cast("std::nullptr_t*") PointerPointer arg0) { super((Pointer)null); allocate(arg0); } - private native void allocate(@ByVal @Cast("std::nullptr_t*") PointerPointer arg0); public Flatten(@SharedPtr @Cast({"", "std::shared_ptr"}) FlattenImpl module) { super((Pointer)null); allocate(module); } - private native void allocate(@SharedPtr @Cast({"", "std::shared_ptr"}) FlattenImpl module); - /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ - public Flatten(Pointer p) { super(p); } - - } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/FlattenImpl.java b/pytorch/src/gen/java/org/bytedeco/pytorch/FlattenImpl.java index 1548c69f47d..ce3e26d90ef 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/FlattenImpl.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/FlattenImpl.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; @@ -45,9 +47,9 @@ public class FlattenImpl extends FlattenImplCloneable { } public FlattenImpl(@Const @ByRef(nullValue = "torch::nn::FlattenOptions{}") FlattenOptions options_) { super((Pointer)null); allocate(options_); } - @NoDeallocator private native void allocate(@Const @ByRef(nullValue = "torch::nn::FlattenOptions{}") FlattenOptions options_); + @SharedPtr private native void allocate(@Const @ByRef(nullValue = "torch::nn::FlattenOptions{}") FlattenOptions options_); public FlattenImpl() { super((Pointer)null); allocate(); } - @NoDeallocator private native void allocate(); + @SharedPtr private native void allocate(); public native void reset(); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/FlattenImplCloneable.java b/pytorch/src/gen/java/org/bytedeco/pytorch/FlattenImplCloneable.java index 92f9773a8a3..db95e788eef 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/FlattenImplCloneable.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/FlattenImplCloneable.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; @@ -20,18 +22,18 @@ public class FlattenImplCloneable extends Module { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public FlattenImplCloneable(Pointer p) { super(p); } + @Override public Module asModule() { return asModule(this); } + @Namespace public static native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast>") Module asModule(@SharedPtr FlattenImplCloneable pointer); /** {@code reset()} must perform initialization of all members with reference * semantics, most importantly parameters, buffers and submodules. */ public native void reset(); - @Override public Module asModule() { return asModule(this); } - @Namespace public static native @Name("static_cast") Module asModule(FlattenImplCloneable module); /** Performs a recursive "deep copy" of the {@code Module}, such that all parameters * and submodules in the cloned module are different from those in the * original module. */ - public native @SharedPtr Module clone( - @Const @ByRef(nullValue = "c10::optional(c10::nullopt)") DeviceOptional device); - public native @SharedPtr Module clone(); + public native @SharedPtr("torch::nn::Module") @ByVal Module clone( + @Const @ByRef(nullValue = "c10::optional(c10::nullopt)") DeviceOptional device); + public native @SharedPtr("torch::nn::Module") @ByVal Module clone(); } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/FlattenImplModuleHolder.java b/pytorch/src/gen/java/org/bytedeco/pytorch/FlattenImplModuleHolder.java deleted file mode 100644 index 6d9de5a5d6d..00000000000 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/FlattenImplModuleHolder.java +++ /dev/null @@ -1,79 +0,0 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE - -package org.bytedeco.pytorch; - -import org.bytedeco.pytorch.Allocator; -import org.bytedeco.pytorch.Function; -import org.bytedeco.pytorch.Module; -import java.nio.*; -import org.bytedeco.javacpp.*; -import org.bytedeco.javacpp.annotation.*; - -import static org.bytedeco.javacpp.presets.javacpp.*; -import static org.bytedeco.openblas.global.openblas_nolapack.*; -import static org.bytedeco.openblas.global.openblas.*; - -import static org.bytedeco.pytorch.global.torch.*; - -@Name("torch::nn::ModuleHolder") @NoOffset @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) -public class FlattenImplModuleHolder extends Pointer { - static { Loader.load(); } - /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ - public FlattenImplModuleHolder(Pointer p) { super(p); } - - - /// - - /** Default constructs the contained module if if has a default constructor, - * else produces a static error. - * - * NOTE: This uses the behavior of template - * classes in C++ that constructors (or any methods) are only compiled when - * actually used. */ - - - /** Constructs the {@code ModuleHolder} with an empty contained value. Access to - * the underlying module is not permitted and will throw an exception, until - * a value is assigned. */ - /* implicit */ public FlattenImplModuleHolder(@ByVal @Cast("std::nullptr_t*") PointerPointer arg0) { super((Pointer)null); allocate(arg0); } -private native void allocate(@ByVal @Cast("std::nullptr_t*") PointerPointer arg0); - - /** Constructs the {@code ModuleHolder} with a contained module, forwarding all - * arguments to its constructor. */ - - /** Constructs the {@code ModuleHolder} from a pointer to the contained type. - * Example: {@code Linear(std::make_shared(...))}. */ - /* implicit */ public FlattenImplModuleHolder(@SharedPtr @Cast({"", "std::shared_ptr"}) FlattenImpl module) { super((Pointer)null); allocate(module); } -private native void allocate(@SharedPtr @Cast({"", "std::shared_ptr"}) FlattenImpl module); - - /** Returns true if the {@code ModuleHolder} contains a module, or false if it is - * {@code nullptr}. */ - public native @Cast("bool") @Name("operator bool") @NoException(true) boolean asBoolean(); - - /** Forwards to the contained module. */ - public native @Name("operator ->") FlattenImpl access(); - - /** Forwards to the contained module. */ - - /** Returns a reference to the contained module. */ - public native @ByRef @Name("operator *") FlattenImpl multiply(); - - /** Returns a const reference to the contained module. */ - - /** Returns a shared pointer to the underlying module. */ - public native @SharedPtr @Cast({"", "std::shared_ptr"}) FlattenImpl ptr(); - - /** Returns a pointer to the underlying module. */ - public native FlattenImpl get(); - - /** Returns a const pointer to the underlying module. */ - - /** Calls the {@code forward()} method of the contained module. */ - - /** Forwards to the subscript operator of the contained module. - * NOTE: std::forward is qualified to prevent VS2017 emitting - * error C2872: 'std': ambiguous symbol */ - - /** Returns true if the {@code ModuleHolder} does not contain a module. */ - public native @Cast("bool") @NoException(true) boolean is_empty(); -} diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/FlattenOptions.java b/pytorch/src/gen/java/org/bytedeco/pytorch/FlattenOptions.java index 51e0f654984..65f4cf390d3 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/FlattenOptions.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/FlattenOptions.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/FloatArrayRef.java b/pytorch/src/gen/java/org/bytedeco/pytorch/FloatArrayRef.java index 22fa3dac074..ae0f3aa9ea8 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/FloatArrayRef.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/FloatArrayRef.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; @@ -20,6 +22,15 @@ public class FloatArrayRef extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public FloatArrayRef(Pointer p) { super(p); } + /** Native array allocator. Access with {@link Pointer#position(long)}. */ + public FloatArrayRef(long size) { super((Pointer)null); allocateArray(size); } + private native void allocateArray(long size); + @Override public FloatArrayRef position(long position) { + return (FloatArrayRef)super.position(position); + } + @Override public FloatArrayRef getPointer(long i) { + return new FloatArrayRef((Pointer)this).offsetAddress(i); + } /** \name Constructors * \{ @@ -30,8 +41,7 @@ public class FloatArrayRef extends Pointer { /** Construct an ArrayRef from a single element. */ // TODO Make this explicit - public FloatArrayRef(float OneElt) { super((Pointer)null); allocate(OneElt); } - private native void allocate(float OneElt); + /** Construct an ArrayRef from a pointer and length. */ public FloatArrayRef(@Const FloatPointer data, @Cast("size_t") long length) { super((Pointer)null); allocate(data, length); } @@ -69,13 +79,13 @@ public class FloatArrayRef extends Pointer { * \name Simple Operations * \{ */ - public native @ByVal @Cast("const c10::ArrayRef::iterator*") FloatPointer begin(); - public native @ByVal @Cast("const c10::ArrayRef::iterator*") FloatPointer end(); + public native @Const FloatPointer begin(); + public native @Const FloatPointer end(); // These are actually the same as iterator, since ArrayRef only // gives you const iterators. - public native @ByVal @Cast("const c10::ArrayRef::const_iterator*") FloatPointer cbegin(); - public native @ByVal @Cast("const c10::ArrayRef::const_iterator*") FloatPointer cend(); + public native @Const FloatPointer cbegin(); + public native @Const FloatPointer cend(); /** empty - Check if the array is empty. */ public native @Cast("const bool") boolean empty(); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/FloatComplex.java b/pytorch/src/gen/java/org/bytedeco/pytorch/FloatComplex.java new file mode 100644 index 00000000000..6c745ea16fa --- /dev/null +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/FloatComplex.java @@ -0,0 +1,73 @@ +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE + +package org.bytedeco.pytorch; + +import org.bytedeco.pytorch.Allocator; +import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; +import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; +import java.nio.*; +import org.bytedeco.javacpp.*; +import org.bytedeco.javacpp.annotation.*; + +import static org.bytedeco.javacpp.presets.javacpp.*; +import static org.bytedeco.openblas.global.openblas_nolapack.*; +import static org.bytedeco.openblas.global.openblas.*; + +import static org.bytedeco.pytorch.global.torch.*; + + +@Name("c10::complex") @NoOffset @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) +public class FloatComplex extends Pointer { + static { Loader.load(); } + /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ + public FloatComplex(Pointer p) { super(p); } + + + public native float real_(); public native FloatComplex real_(float setter); + public native float imag_(); public native FloatComplex imag_(float setter); + + public FloatComplex() { super((Pointer)null); allocate(); } + private native void allocate(); + public FloatComplex(float re, float im/*=float()*/) { super((Pointer)null); allocate(re, im); } + private native void allocate(float re, float im/*=float()*/); + public FloatComplex(float re) { super((Pointer)null); allocate(re); } + private native void allocate(float re); +// #if defined(__CUDACC__) || defined(__HIPCC__) +// #endif + + // Use SFINAE to specialize casting constructor for c10::complex and + // c10::complex + + public native @Const @ByRef @Name("operator =") FloatComplex put(float re); + + public native @Const @ByRef @Name("operator +=") FloatComplex addPut(float re); + + public native @Const @ByRef @Name("operator -=") FloatComplex subtractPut(float re); + + public native @Const @ByRef @Name("operator *=") FloatComplex multiplyPut(float re); + + public native @Const @ByRef @Name("operator /=") FloatComplex dividePut(float re); + +// #ifdef __APPLE__ +// #define FORCE_INLINE_APPLE __attribute__((always_inline)) +// #else +// #define FORCE_INLINE_APPLE +// #endif +// #undef FORCE_INLINE_APPLE + +// #if defined(__CUDACC__) || defined(__HIPCC__) +// #endif + +// #if defined(__CUDACC__) || defined(__HIPCC__) +// #endif + + // consistent with NumPy behavior + public native @Cast("bool") @Name("operator bool") boolean asBoolean(); + + public native @org.bytedeco.javacpp.annotation.Function float real(); + public native @org.bytedeco.javacpp.annotation.Function void real(float value); + public native @org.bytedeco.javacpp.annotation.Function float imag(); + public native @org.bytedeco.javacpp.annotation.Function void imag(float value); +} diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/FloatComplexrrayRef.java b/pytorch/src/gen/java/org/bytedeco/pytorch/FloatComplexArrayRef.java similarity index 60% rename from pytorch/src/gen/java/org/bytedeco/pytorch/FloatComplexrrayRef.java rename to pytorch/src/gen/java/org/bytedeco/pytorch/FloatComplexArrayRef.java index 67c359f315e..9f71a97467b 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/FloatComplexrrayRef.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/FloatComplexArrayRef.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; @@ -16,33 +18,38 @@ import static org.bytedeco.pytorch.global.torch.*; @Name("c10::ArrayRef >") @NoOffset @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) -public class FloatComplexrrayRef extends Pointer { +public class FloatComplexArrayRef extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ - public FloatComplexrrayRef(Pointer p) { super(p); } + public FloatComplexArrayRef(Pointer p) { super(p); } /** Native array allocator. Access with {@link Pointer#position(long)}. */ - public FloatComplexrrayRef(long size) { super((Pointer)null); allocateArray(size); } + public FloatComplexArrayRef(long size) { super((Pointer)null); allocateArray(size); } private native void allocateArray(long size); - @Override public FloatComplexrrayRef position(long position) { - return (FloatComplexrrayRef)super.position(position); + @Override public FloatComplexArrayRef position(long position) { + return (FloatComplexArrayRef)super.position(position); } - @Override public FloatComplexrrayRef getPointer(long i) { - return new FloatComplexrrayRef((Pointer)this).offsetAddress(i); + @Override public FloatComplexArrayRef getPointer(long i) { + return new FloatComplexArrayRef((Pointer)this).offsetAddress(i); } /** \name Constructors * \{

* Construct an empty ArrayRef. */ - /* implicit */ public FloatComplexrrayRef() { super((Pointer)null); allocate(); } + /* implicit */ public FloatComplexArrayRef() { super((Pointer)null); allocate(); } private native void allocate(); /** Construct an ArrayRef from a single element. */ // TODO Make this explicit + /** Construct an ArrayRef from a pointer and length. */ + public FloatComplexArrayRef(@Const FloatComplex data, @Cast("size_t") long length) { super((Pointer)null); allocate(data, length); } + private native void allocate(@Const FloatComplex data, @Cast("size_t") long length); /** Construct an ArrayRef from a range. */ + public FloatComplexArrayRef(@Const FloatComplex begin, @Const FloatComplex end) { super((Pointer)null); allocate(begin, end); } + private native void allocate(@Const FloatComplex begin, @Const FloatComplex end); /** Construct an ArrayRef from a SmallVector. This is templated in order to * avoid instantiating SmallVectorTemplateCommon whenever we @@ -64,38 +71,46 @@ public class FloatComplexrrayRef extends Pointer { * \name Simple Operations * \{ */ - public native @ByVal @Cast("const c10::ArrayRef >::iterator*") FloatPointer begin(); - public native @ByVal @Cast("const c10::ArrayRef >::iterator*") FloatPointer end(); + public native @Const @ByPtr FloatComplex begin(); + public native @Const @ByPtr FloatComplex end(); // These are actually the same as iterator, since ArrayRef only // gives you const iterators. - public native @ByVal @Cast("const c10::ArrayRef >::const_iterator*") FloatPointer cbegin(); - public native @ByVal @Cast("const c10::ArrayRef >::const_iterator*") FloatPointer cend(); + public native @Const @ByPtr FloatComplex cbegin(); + public native @Const @ByPtr FloatComplex cend(); /** empty - Check if the array is empty. */ public native @Cast("const bool") boolean empty(); + public native @Const FloatComplex data(); + /** size - Get the array size. */ public native @Cast("const size_t") long size(); /** front - Get the first element. */ + public native @Const @ByRef FloatComplex front(); /** back - Get the last element. */ + public native @Const @ByRef FloatComplex back(); /** equals - Check for element-wise equality. */ - public native @Cast("const bool") boolean equals(@ByVal FloatComplexrrayRef RHS); + public native @Cast("const bool") boolean equals(@ByVal FloatComplexArrayRef RHS); /** slice(n, m) - Take M elements of the array starting at element N */ - public native @Const @ByVal FloatComplexrrayRef slice(@Cast("size_t") long N, @Cast("size_t") long M); + public native @Const @ByVal FloatComplexArrayRef slice(@Cast("size_t") long N, @Cast("size_t") long M); /** slice(n) - Chop off the first N elements of the array. */ - public native @Const @ByVal FloatComplexrrayRef slice(@Cast("size_t") long N); + public native @Const @ByVal FloatComplexArrayRef slice(@Cast("size_t") long N); /** \} * \name Operator Overloads * \{ */ + public native @Const @ByRef @Name("operator []") FloatComplex get(@Cast("size_t") long Index); /** Vector compatibility */ + + /// + public native @Const @ByRef FloatComplex at(@Cast("size_t") long Index); /** Disallow accidental assignment from a temporary. * @@ -112,6 +127,7 @@ public class FloatComplexrrayRef extends Pointer { /** \} * \name Expensive Operations * \{ */ + public native @StdVector FloatComplex vec(); /** \} */ } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/FloatOptional.java b/pytorch/src/gen/java/org/bytedeco/pytorch/FloatOptional.java new file mode 100644 index 00000000000..83444e1e29c --- /dev/null +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/FloatOptional.java @@ -0,0 +1,35 @@ +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE + +package org.bytedeco.pytorch; + +import org.bytedeco.pytorch.Allocator; +import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; +import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; +import java.nio.*; +import org.bytedeco.javacpp.*; +import org.bytedeco.javacpp.annotation.*; + +import static org.bytedeco.javacpp.presets.javacpp.*; +import static org.bytedeco.openblas.global.openblas_nolapack.*; +import static org.bytedeco.openblas.global.openblas.*; + +import static org.bytedeco.pytorch.global.torch.*; + +@NoOffset @Name("c10::optional") @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) +public class FloatOptional extends Pointer { + static { Loader.load(); } + /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ + public FloatOptional(Pointer p) { super(p); } + public FloatOptional(float value) { this(); put(value); } + public FloatOptional() { allocate(); } + private native void allocate(); + public native @Name("operator =") @ByRef FloatOptional put(@ByRef FloatOptional x); + + public native boolean has_value(); + public native void reset(); + public native @Name("value") float get(); + @ValueSetter public native FloatOptional put(float value); +} + diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/FloatType.java b/pytorch/src/gen/java/org/bytedeco/pytorch/FloatType.java index 0c53d28db20..628ea261914 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/FloatType.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/FloatType.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; @@ -27,5 +29,5 @@ public class FloatType extends NumberType { public native @Cast("bool") boolean isSubtypeOfExt(@Const @ByRef Type rhs, @Cast("std::ostream*") Pointer why_not); @MemberGetter public static native TypeKind Kind(); // global singleton - + public static native @ByVal @Name("get") FloatTypePtr getFloatTypePtr(); } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/FloatTypePtr.java b/pytorch/src/gen/java/org/bytedeco/pytorch/FloatTypePtr.java index b8b4c183cde..8d76491331a 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/FloatTypePtr.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/FloatTypePtr.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/Fold.java b/pytorch/src/gen/java/org/bytedeco/pytorch/Fold.java deleted file mode 100644 index 9e1578ab15e..00000000000 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/Fold.java +++ /dev/null @@ -1,34 +0,0 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE - -package org.bytedeco.pytorch; - -import org.bytedeco.pytorch.Allocator; -import org.bytedeco.pytorch.Function; -import org.bytedeco.pytorch.Module; -import java.nio.*; -import org.bytedeco.javacpp.*; -import org.bytedeco.javacpp.annotation.*; - -import static org.bytedeco.javacpp.presets.javacpp.*; -import static org.bytedeco.openblas.global.openblas_nolapack.*; -import static org.bytedeco.openblas.global.openblas.*; - -import static org.bytedeco.pytorch.global.torch.*; - - -/** A {@code ModuleHolder} subclass for {@code FoldImpl}. - * See the documentation for {@code FoldImpl} class to learn what methods it - * provides, and examples of how to use {@code Fold} with {@code torch::nn::FoldOptions}. - * See the documentation for {@code ModuleHolder} to learn about PyTorch's - * module storage semantics. */ -@Namespace("torch::nn") @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) -public class Fold extends FoldImplModuleHolder { - static { Loader.load(); } - - public Fold(@ByVal @Cast("std::nullptr_t*") PointerPointer arg0) { super((Pointer)null); allocate(arg0); } - private native void allocate(@ByVal @Cast("std::nullptr_t*") PointerPointer arg0); public Fold(@SharedPtr @Cast({"", "std::shared_ptr"}) FoldImpl module) { super((Pointer)null); allocate(module); } - private native void allocate(@SharedPtr @Cast({"", "std::shared_ptr"}) FoldImpl module); - /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ - public Fold(Pointer p) { super(p); } - - } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/FoldImpl.java b/pytorch/src/gen/java/org/bytedeco/pytorch/FoldImpl.java index 22b1df31fce..f9fd42c89f2 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/FoldImpl.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/FoldImpl.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; @@ -35,9 +37,9 @@ public class FoldImpl extends FoldImplCloneable { public FoldImpl(Pointer p) { super(p); } public FoldImpl(@ByVal @Cast("torch::ExpandingArray<2>*") LongPointer output_size, @ByVal @Cast("torch::ExpandingArray<2>*") LongPointer kernel_size) { super((Pointer)null); allocate(output_size, kernel_size); } - @NoDeallocator private native void allocate(@ByVal @Cast("torch::ExpandingArray<2>*") LongPointer output_size, @ByVal @Cast("torch::ExpandingArray<2>*") LongPointer kernel_size); + @SharedPtr private native void allocate(@ByVal @Cast("torch::ExpandingArray<2>*") LongPointer output_size, @ByVal @Cast("torch::ExpandingArray<2>*") LongPointer kernel_size); public FoldImpl(@Const @ByRef FoldOptions options_) { super((Pointer)null); allocate(options_); } - @NoDeallocator private native void allocate(@Const @ByRef FoldOptions options_); + @SharedPtr private native void allocate(@Const @ByRef FoldOptions options_); public native void reset(); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/FoldImplCloneable.java b/pytorch/src/gen/java/org/bytedeco/pytorch/FoldImplCloneable.java index 5df0a5d6dd5..7d7c51ec54c 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/FoldImplCloneable.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/FoldImplCloneable.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; @@ -20,18 +22,18 @@ public class FoldImplCloneable extends Module { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public FoldImplCloneable(Pointer p) { super(p); } + @Override public Module asModule() { return asModule(this); } + @Namespace public static native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast>") Module asModule(@SharedPtr FoldImplCloneable pointer); /** {@code reset()} must perform initialization of all members with reference * semantics, most importantly parameters, buffers and submodules. */ public native void reset(); - @Override public Module asModule() { return asModule(this); } - @Namespace public static native @Name("static_cast") Module asModule(FoldImplCloneable module); /** Performs a recursive "deep copy" of the {@code Module}, such that all parameters * and submodules in the cloned module are different from those in the * original module. */ - public native @SharedPtr Module clone( - @Const @ByRef(nullValue = "c10::optional(c10::nullopt)") DeviceOptional device); - public native @SharedPtr Module clone(); + public native @SharedPtr("torch::nn::Module") @ByVal Module clone( + @Const @ByRef(nullValue = "c10::optional(c10::nullopt)") DeviceOptional device); + public native @SharedPtr("torch::nn::Module") @ByVal Module clone(); } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/FoldImplModuleHolder.java b/pytorch/src/gen/java/org/bytedeco/pytorch/FoldImplModuleHolder.java deleted file mode 100644 index 13809505bbe..00000000000 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/FoldImplModuleHolder.java +++ /dev/null @@ -1,79 +0,0 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE - -package org.bytedeco.pytorch; - -import org.bytedeco.pytorch.Allocator; -import org.bytedeco.pytorch.Function; -import org.bytedeco.pytorch.Module; -import java.nio.*; -import org.bytedeco.javacpp.*; -import org.bytedeco.javacpp.annotation.*; - -import static org.bytedeco.javacpp.presets.javacpp.*; -import static org.bytedeco.openblas.global.openblas_nolapack.*; -import static org.bytedeco.openblas.global.openblas.*; - -import static org.bytedeco.pytorch.global.torch.*; - -@Name("torch::nn::ModuleHolder") @NoOffset @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) -public class FoldImplModuleHolder extends Pointer { - static { Loader.load(); } - /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ - public FoldImplModuleHolder(Pointer p) { super(p); } - - - /// - - /** Default constructs the contained module if if has a default constructor, - * else produces a static error. - * - * NOTE: This uses the behavior of template - * classes in C++ that constructors (or any methods) are only compiled when - * actually used. */ - - - /** Constructs the {@code ModuleHolder} with an empty contained value. Access to - * the underlying module is not permitted and will throw an exception, until - * a value is assigned. */ - /* implicit */ public FoldImplModuleHolder(@ByVal @Cast("std::nullptr_t*") PointerPointer arg0) { super((Pointer)null); allocate(arg0); } -private native void allocate(@ByVal @Cast("std::nullptr_t*") PointerPointer arg0); - - /** Constructs the {@code ModuleHolder} with a contained module, forwarding all - * arguments to its constructor. */ - - /** Constructs the {@code ModuleHolder} from a pointer to the contained type. - * Example: {@code Linear(std::make_shared(...))}. */ - /* implicit */ public FoldImplModuleHolder(@SharedPtr @Cast({"", "std::shared_ptr"}) FoldImpl module) { super((Pointer)null); allocate(module); } -private native void allocate(@SharedPtr @Cast({"", "std::shared_ptr"}) FoldImpl module); - - /** Returns true if the {@code ModuleHolder} contains a module, or false if it is - * {@code nullptr}. */ - public native @Cast("bool") @Name("operator bool") @NoException(true) boolean asBoolean(); - - /** Forwards to the contained module. */ - public native @Name("operator ->") FoldImpl access(); - - /** Forwards to the contained module. */ - - /** Returns a reference to the contained module. */ - public native @ByRef @Name("operator *") FoldImpl multiply(); - - /** Returns a const reference to the contained module. */ - - /** Returns a shared pointer to the underlying module. */ - public native @SharedPtr @Cast({"", "std::shared_ptr"}) FoldImpl ptr(); - - /** Returns a pointer to the underlying module. */ - public native FoldImpl get(); - - /** Returns a const pointer to the underlying module. */ - - /** Calls the {@code forward()} method of the contained module. */ - - /** Forwards to the subscript operator of the contained module. - * NOTE: std::forward is qualified to prevent VS2017 emitting - * error C2872: 'std': ambiguous symbol */ - - /** Returns true if the {@code ModuleHolder} does not contain a module. */ - public native @Cast("bool") @NoException(true) boolean is_empty(); -} diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/FoldOptions.java b/pytorch/src/gen/java/org/bytedeco/pytorch/FoldOptions.java index 8921cd475ba..71e9788ff44 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/FoldOptions.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/FoldOptions.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/For.java b/pytorch/src/gen/java/org/bytedeco/pytorch/For.java index 17bcf924244..e765c391e9d 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/For.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/For.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; @@ -19,7 +21,17 @@ @Namespace("torch::jit") @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) public class For extends Stmt { static { Loader.load(); } + /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ + public For(Pointer p) { super(p); } - public For(@Cast("const torch::jit::TreeRef*") @ByRef Pointer tree) { super((Pointer)null); allocate(tree); } - private native void allocate(@Cast("const torch::jit::TreeRef*") @ByRef Pointer tree); + public For(@Const @ByRef TreeRef tree) { super((Pointer)null); allocate(tree); } + private native void allocate(@Const @ByRef TreeRef tree); + public native @ByVal ExprList targets(); + public native @ByVal ExprList itrs(); + public native @ByVal StmtList body(); + public static native @ByVal For create( + @Const @ByRef SourceRange range, + @Const @ByRef ExprList targets, + @Const @ByRef ExprList itrs, + @Const @ByRef StmtList body); } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/NodeGuard.java b/pytorch/src/gen/java/org/bytedeco/pytorch/ForceDispatchKeyGuard.java similarity index 51% rename from pytorch/src/gen/java/org/bytedeco/pytorch/NodeGuard.java rename to pytorch/src/gen/java/org/bytedeco/pytorch/ForceDispatchKeyGuard.java index 509f1a69b67..d643032145f 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/NodeGuard.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/ForceDispatchKeyGuard.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; @@ -16,11 +18,12 @@ import static org.bytedeco.pytorch.global.torch.*; -// Guard that sets and restores the evaluating node -@Namespace("torch::autograd") @NoOffset @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) -public class NodeGuard extends Pointer { +@Namespace("c10::impl") @NoOffset @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) +public class ForceDispatchKeyGuard extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ - public NodeGuard(Pointer p) { super(p); } + public ForceDispatchKeyGuard(Pointer p) { super(p); } + public ForceDispatchKeyGuard(@ByVal LocalDispatchKeySet key_set) { super((Pointer)null); allocate(key_set); } + private native void allocate(@ByVal LocalDispatchKeySet key_set); } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/ForwardADLevel.java b/pytorch/src/gen/java/org/bytedeco/pytorch/ForwardADLevel.java index 0881d4bc322..f00845e329b 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/ForwardADLevel.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/ForwardADLevel.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/ForwardGrad.java b/pytorch/src/gen/java/org/bytedeco/pytorch/ForwardGrad.java index 3678bc77a6b..fe57613ea88 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/ForwardGrad.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/ForwardGrad.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/FractionalMaxPool1dOptions.java b/pytorch/src/gen/java/org/bytedeco/pytorch/FractionalMaxPool1dOptions.java index 7bf7691b41e..f7e30082298 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/FractionalMaxPool1dOptions.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/FractionalMaxPool1dOptions.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/FractionalMaxPool2d.java b/pytorch/src/gen/java/org/bytedeco/pytorch/FractionalMaxPool2d.java deleted file mode 100644 index 701f8bbcd33..00000000000 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/FractionalMaxPool2d.java +++ /dev/null @@ -1,34 +0,0 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE - -package org.bytedeco.pytorch; - -import org.bytedeco.pytorch.Allocator; -import org.bytedeco.pytorch.Function; -import org.bytedeco.pytorch.Module; -import java.nio.*; -import org.bytedeco.javacpp.*; -import org.bytedeco.javacpp.annotation.*; - -import static org.bytedeco.javacpp.presets.javacpp.*; -import static org.bytedeco.openblas.global.openblas_nolapack.*; -import static org.bytedeco.openblas.global.openblas.*; - -import static org.bytedeco.pytorch.global.torch.*; - - -/** A {@code ModuleHolder} subclass for {@code FractionalMaxPool2dImpl}. - * See the documentation for {@code FractionalMaxPool2dImpl} class to learn what - * methods it provides, and examples of how to use {@code FractionalMaxPool2d} with - * {@code torch::nn::FractionalMaxPool2dOptions}. See the documentation for - * {@code ModuleHolder} to learn about PyTorch's module storage semantics. */ -@Namespace("torch::nn") @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) -public class FractionalMaxPool2d extends FractionalMaxPool2dImplModuleHolder { - static { Loader.load(); } - - public FractionalMaxPool2d(@ByVal @Cast("std::nullptr_t*") PointerPointer arg0) { super((Pointer)null); allocate(arg0); } - private native void allocate(@ByVal @Cast("std::nullptr_t*") PointerPointer arg0); public FractionalMaxPool2d(@SharedPtr @Cast({"", "std::shared_ptr"}) FractionalMaxPool2dImpl module) { super((Pointer)null); allocate(module); } - private native void allocate(@SharedPtr @Cast({"", "std::shared_ptr"}) FractionalMaxPool2dImpl module); - /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ - public FractionalMaxPool2d(Pointer p) { super(p); } - - } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/FractionalMaxPool2dImpl.java b/pytorch/src/gen/java/org/bytedeco/pytorch/FractionalMaxPool2dImpl.java index 063d1652e94..7edc4eeb4e9 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/FractionalMaxPool2dImpl.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/FractionalMaxPool2dImpl.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; @@ -37,9 +39,9 @@ public class FractionalMaxPool2dImpl extends FractionalMaxPool2dImplCloneable { public FractionalMaxPool2dImpl(Pointer p) { super(p); } public FractionalMaxPool2dImpl(@ByVal @Cast("torch::ExpandingArray<2>*") LongPointer kernel_size) { super((Pointer)null); allocate(kernel_size); } - @NoDeallocator private native void allocate(@ByVal @Cast("torch::ExpandingArray<2>*") LongPointer kernel_size); + @SharedPtr private native void allocate(@ByVal @Cast("torch::ExpandingArray<2>*") LongPointer kernel_size); public FractionalMaxPool2dImpl(@ByVal FractionalMaxPool2dOptions options_) { super((Pointer)null); allocate(options_); } - @NoDeallocator private native void allocate(@ByVal FractionalMaxPool2dOptions options_); + @SharedPtr private native void allocate(@ByVal FractionalMaxPool2dOptions options_); public native void reset(); @@ -50,7 +52,7 @@ public class FractionalMaxPool2dImpl extends FractionalMaxPool2dImplCloneable { /** Returns the outputs and the indices of the max values. * Useful for {@code torch::nn::MaxUnpool2d} later. */ - public native @ByVal TensorTensorTuple forward_with_indices(@Const @ByRef Tensor input); + public native @ByVal T_TensorTensor_T forward_with_indices(@Const @ByRef Tensor input); /** The options with which this {@code Module} was constructed. */ public native @ByRef FractionalMaxPool2dOptions options(); public native FractionalMaxPool2dImpl options(FractionalMaxPool2dOptions setter); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/FractionalMaxPool2dImplCloneable.java b/pytorch/src/gen/java/org/bytedeco/pytorch/FractionalMaxPool2dImplCloneable.java index ed1322a9fcd..f5fb60d7f84 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/FractionalMaxPool2dImplCloneable.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/FractionalMaxPool2dImplCloneable.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; @@ -20,18 +22,18 @@ public class FractionalMaxPool2dImplCloneable extends Module { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public FractionalMaxPool2dImplCloneable(Pointer p) { super(p); } + @Override public Module asModule() { return asModule(this); } + @Namespace public static native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast>") Module asModule(@SharedPtr FractionalMaxPool2dImplCloneable pointer); /** {@code reset()} must perform initialization of all members with reference * semantics, most importantly parameters, buffers and submodules. */ public native void reset(); - @Override public Module asModule() { return asModule(this); } - @Namespace public static native @Name("static_cast") Module asModule(FractionalMaxPool2dImplCloneable module); /** Performs a recursive "deep copy" of the {@code Module}, such that all parameters * and submodules in the cloned module are different from those in the * original module. */ - public native @SharedPtr Module clone( - @Const @ByRef(nullValue = "c10::optional(c10::nullopt)") DeviceOptional device); - public native @SharedPtr Module clone(); + public native @SharedPtr("torch::nn::Module") @ByVal Module clone( + @Const @ByRef(nullValue = "c10::optional(c10::nullopt)") DeviceOptional device); + public native @SharedPtr("torch::nn::Module") @ByVal Module clone(); } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/FractionalMaxPool2dImplModuleHolder.java b/pytorch/src/gen/java/org/bytedeco/pytorch/FractionalMaxPool2dImplModuleHolder.java deleted file mode 100644 index 6ecd6f2d004..00000000000 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/FractionalMaxPool2dImplModuleHolder.java +++ /dev/null @@ -1,79 +0,0 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE - -package org.bytedeco.pytorch; - -import org.bytedeco.pytorch.Allocator; -import org.bytedeco.pytorch.Function; -import org.bytedeco.pytorch.Module; -import java.nio.*; -import org.bytedeco.javacpp.*; -import org.bytedeco.javacpp.annotation.*; - -import static org.bytedeco.javacpp.presets.javacpp.*; -import static org.bytedeco.openblas.global.openblas_nolapack.*; -import static org.bytedeco.openblas.global.openblas.*; - -import static org.bytedeco.pytorch.global.torch.*; - -@Name("torch::nn::ModuleHolder") @NoOffset @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) -public class FractionalMaxPool2dImplModuleHolder extends Pointer { - static { Loader.load(); } - /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ - public FractionalMaxPool2dImplModuleHolder(Pointer p) { super(p); } - - - /// - - /** Default constructs the contained module if if has a default constructor, - * else produces a static error. - * - * NOTE: This uses the behavior of template - * classes in C++ that constructors (or any methods) are only compiled when - * actually used. */ - - - /** Constructs the {@code ModuleHolder} with an empty contained value. Access to - * the underlying module is not permitted and will throw an exception, until - * a value is assigned. */ - /* implicit */ public FractionalMaxPool2dImplModuleHolder(@ByVal @Cast("std::nullptr_t*") PointerPointer arg0) { super((Pointer)null); allocate(arg0); } -private native void allocate(@ByVal @Cast("std::nullptr_t*") PointerPointer arg0); - - /** Constructs the {@code ModuleHolder} with a contained module, forwarding all - * arguments to its constructor. */ - - /** Constructs the {@code ModuleHolder} from a pointer to the contained type. - * Example: {@code Linear(std::make_shared(...))}. */ - /* implicit */ public FractionalMaxPool2dImplModuleHolder(@SharedPtr @Cast({"", "std::shared_ptr"}) FractionalMaxPool2dImpl module) { super((Pointer)null); allocate(module); } -private native void allocate(@SharedPtr @Cast({"", "std::shared_ptr"}) FractionalMaxPool2dImpl module); - - /** Returns true if the {@code ModuleHolder} contains a module, or false if it is - * {@code nullptr}. */ - public native @Cast("bool") @Name("operator bool") @NoException(true) boolean asBoolean(); - - /** Forwards to the contained module. */ - public native @Name("operator ->") FractionalMaxPool2dImpl access(); - - /** Forwards to the contained module. */ - - /** Returns a reference to the contained module. */ - public native @ByRef @Name("operator *") FractionalMaxPool2dImpl multiply(); - - /** Returns a const reference to the contained module. */ - - /** Returns a shared pointer to the underlying module. */ - public native @SharedPtr @Cast({"", "std::shared_ptr"}) FractionalMaxPool2dImpl ptr(); - - /** Returns a pointer to the underlying module. */ - public native FractionalMaxPool2dImpl get(); - - /** Returns a const pointer to the underlying module. */ - - /** Calls the {@code forward()} method of the contained module. */ - - /** Forwards to the subscript operator of the contained module. - * NOTE: std::forward is qualified to prevent VS2017 emitting - * error C2872: 'std': ambiguous symbol */ - - /** Returns true if the {@code ModuleHolder} does not contain a module. */ - public native @Cast("bool") @NoException(true) boolean is_empty(); -} diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/FractionalMaxPool2dOptions.java b/pytorch/src/gen/java/org/bytedeco/pytorch/FractionalMaxPool2dOptions.java index 7f7bd3f5b55..18c5d30304e 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/FractionalMaxPool2dOptions.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/FractionalMaxPool2dOptions.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/FractionalMaxPool3d.java b/pytorch/src/gen/java/org/bytedeco/pytorch/FractionalMaxPool3d.java deleted file mode 100644 index 6596ef61a37..00000000000 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/FractionalMaxPool3d.java +++ /dev/null @@ -1,34 +0,0 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE - -package org.bytedeco.pytorch; - -import org.bytedeco.pytorch.Allocator; -import org.bytedeco.pytorch.Function; -import org.bytedeco.pytorch.Module; -import java.nio.*; -import org.bytedeco.javacpp.*; -import org.bytedeco.javacpp.annotation.*; - -import static org.bytedeco.javacpp.presets.javacpp.*; -import static org.bytedeco.openblas.global.openblas_nolapack.*; -import static org.bytedeco.openblas.global.openblas.*; - -import static org.bytedeco.pytorch.global.torch.*; - - -/** A {@code ModuleHolder} subclass for {@code FractionalMaxPool3dImpl}. - * See the documentation for {@code FractionalMaxPool3dImpl} class to learn what - * methods it provides, and examples of how to use {@code FractionalMaxPool3d} with - * {@code torch::nn::FractionalMaxPool3dOptions}. See the documentation for - * {@code ModuleHolder} to learn about PyTorch's module storage semantics. */ -@Namespace("torch::nn") @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) -public class FractionalMaxPool3d extends FractionalMaxPool3dImplModuleHolder { - static { Loader.load(); } - - public FractionalMaxPool3d(@ByVal @Cast("std::nullptr_t*") PointerPointer arg0) { super((Pointer)null); allocate(arg0); } - private native void allocate(@ByVal @Cast("std::nullptr_t*") PointerPointer arg0); public FractionalMaxPool3d(@SharedPtr @Cast({"", "std::shared_ptr"}) FractionalMaxPool3dImpl module) { super((Pointer)null); allocate(module); } - private native void allocate(@SharedPtr @Cast({"", "std::shared_ptr"}) FractionalMaxPool3dImpl module); - /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ - public FractionalMaxPool3d(Pointer p) { super(p); } - - } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/FractionalMaxPool3dImpl.java b/pytorch/src/gen/java/org/bytedeco/pytorch/FractionalMaxPool3dImpl.java index fbeadb84853..3dbd532e3a1 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/FractionalMaxPool3dImpl.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/FractionalMaxPool3dImpl.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; @@ -37,9 +39,9 @@ public class FractionalMaxPool3dImpl extends FractionalMaxPool3dImplCloneable { public FractionalMaxPool3dImpl(Pointer p) { super(p); } public FractionalMaxPool3dImpl(@ByVal @Cast("torch::ExpandingArray<3>*") LongPointer kernel_size) { super((Pointer)null); allocate(kernel_size); } - @NoDeallocator private native void allocate(@ByVal @Cast("torch::ExpandingArray<3>*") LongPointer kernel_size); + @SharedPtr private native void allocate(@ByVal @Cast("torch::ExpandingArray<3>*") LongPointer kernel_size); public FractionalMaxPool3dImpl(@ByVal FractionalMaxPool3dOptions options_) { super((Pointer)null); allocate(options_); } - @NoDeallocator private native void allocate(@ByVal FractionalMaxPool3dOptions options_); + @SharedPtr private native void allocate(@ByVal FractionalMaxPool3dOptions options_); public native void reset(); @@ -50,7 +52,7 @@ public class FractionalMaxPool3dImpl extends FractionalMaxPool3dImplCloneable { /** Returns the outputs and the indices of the max values. * Useful for {@code torch::nn::MaxUnpool3d} later. */ - public native @ByVal TensorTensorTuple forward_with_indices(@Const @ByRef Tensor input); + public native @ByVal T_TensorTensor_T forward_with_indices(@Const @ByRef Tensor input); /** The options with which this {@code Module} was constructed. */ public native @ByRef FractionalMaxPool3dOptions options(); public native FractionalMaxPool3dImpl options(FractionalMaxPool3dOptions setter); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/FractionalMaxPool3dImplCloneable.java b/pytorch/src/gen/java/org/bytedeco/pytorch/FractionalMaxPool3dImplCloneable.java index 327916944d5..aaef5baf793 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/FractionalMaxPool3dImplCloneable.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/FractionalMaxPool3dImplCloneable.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; @@ -20,18 +22,18 @@ public class FractionalMaxPool3dImplCloneable extends Module { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public FractionalMaxPool3dImplCloneable(Pointer p) { super(p); } + @Override public Module asModule() { return asModule(this); } + @Namespace public static native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast>") Module asModule(@SharedPtr FractionalMaxPool3dImplCloneable pointer); /** {@code reset()} must perform initialization of all members with reference * semantics, most importantly parameters, buffers and submodules. */ public native void reset(); - @Override public Module asModule() { return asModule(this); } - @Namespace public static native @Name("static_cast") Module asModule(FractionalMaxPool3dImplCloneable module); /** Performs a recursive "deep copy" of the {@code Module}, such that all parameters * and submodules in the cloned module are different from those in the * original module. */ - public native @SharedPtr Module clone( - @Const @ByRef(nullValue = "c10::optional(c10::nullopt)") DeviceOptional device); - public native @SharedPtr Module clone(); + public native @SharedPtr("torch::nn::Module") @ByVal Module clone( + @Const @ByRef(nullValue = "c10::optional(c10::nullopt)") DeviceOptional device); + public native @SharedPtr("torch::nn::Module") @ByVal Module clone(); } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/FractionalMaxPool3dImplModuleHolder.java b/pytorch/src/gen/java/org/bytedeco/pytorch/FractionalMaxPool3dImplModuleHolder.java deleted file mode 100644 index 8cb40aca1f6..00000000000 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/FractionalMaxPool3dImplModuleHolder.java +++ /dev/null @@ -1,79 +0,0 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE - -package org.bytedeco.pytorch; - -import org.bytedeco.pytorch.Allocator; -import org.bytedeco.pytorch.Function; -import org.bytedeco.pytorch.Module; -import java.nio.*; -import org.bytedeco.javacpp.*; -import org.bytedeco.javacpp.annotation.*; - -import static org.bytedeco.javacpp.presets.javacpp.*; -import static org.bytedeco.openblas.global.openblas_nolapack.*; -import static org.bytedeco.openblas.global.openblas.*; - -import static org.bytedeco.pytorch.global.torch.*; - -@Name("torch::nn::ModuleHolder") @NoOffset @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) -public class FractionalMaxPool3dImplModuleHolder extends Pointer { - static { Loader.load(); } - /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ - public FractionalMaxPool3dImplModuleHolder(Pointer p) { super(p); } - - - /// - - /** Default constructs the contained module if if has a default constructor, - * else produces a static error. - * - * NOTE: This uses the behavior of template - * classes in C++ that constructors (or any methods) are only compiled when - * actually used. */ - - - /** Constructs the {@code ModuleHolder} with an empty contained value. Access to - * the underlying module is not permitted and will throw an exception, until - * a value is assigned. */ - /* implicit */ public FractionalMaxPool3dImplModuleHolder(@ByVal @Cast("std::nullptr_t*") PointerPointer arg0) { super((Pointer)null); allocate(arg0); } -private native void allocate(@ByVal @Cast("std::nullptr_t*") PointerPointer arg0); - - /** Constructs the {@code ModuleHolder} with a contained module, forwarding all - * arguments to its constructor. */ - - /** Constructs the {@code ModuleHolder} from a pointer to the contained type. - * Example: {@code Linear(std::make_shared(...))}. */ - /* implicit */ public FractionalMaxPool3dImplModuleHolder(@SharedPtr @Cast({"", "std::shared_ptr"}) FractionalMaxPool3dImpl module) { super((Pointer)null); allocate(module); } -private native void allocate(@SharedPtr @Cast({"", "std::shared_ptr"}) FractionalMaxPool3dImpl module); - - /** Returns true if the {@code ModuleHolder} contains a module, or false if it is - * {@code nullptr}. */ - public native @Cast("bool") @Name("operator bool") @NoException(true) boolean asBoolean(); - - /** Forwards to the contained module. */ - public native @Name("operator ->") FractionalMaxPool3dImpl access(); - - /** Forwards to the contained module. */ - - /** Returns a reference to the contained module. */ - public native @ByRef @Name("operator *") FractionalMaxPool3dImpl multiply(); - - /** Returns a const reference to the contained module. */ - - /** Returns a shared pointer to the underlying module. */ - public native @SharedPtr @Cast({"", "std::shared_ptr"}) FractionalMaxPool3dImpl ptr(); - - /** Returns a pointer to the underlying module. */ - public native FractionalMaxPool3dImpl get(); - - /** Returns a const pointer to the underlying module. */ - - /** Calls the {@code forward()} method of the contained module. */ - - /** Forwards to the subscript operator of the contained module. - * NOTE: std::forward is qualified to prevent VS2017 emitting - * error C2872: 'std': ambiguous symbol */ - - /** Returns true if the {@code ModuleHolder} does not contain a module. */ - public native @Cast("bool") @NoException(true) boolean is_empty(); -} diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/FractionalMaxPool3dOptions.java b/pytorch/src/gen/java/org/bytedeco/pytorch/FractionalMaxPool3dOptions.java index 425005f80f0..e533ccad869 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/FractionalMaxPool3dOptions.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/FractionalMaxPool3dOptions.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/FullDataLoaderOptions.java b/pytorch/src/gen/java/org/bytedeco/pytorch/FullDataLoaderOptions.java deleted file mode 100644 index 3ab7a535946..00000000000 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/FullDataLoaderOptions.java +++ /dev/null @@ -1,40 +0,0 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE - -package org.bytedeco.pytorch; - -import org.bytedeco.pytorch.Allocator; -import org.bytedeco.pytorch.Function; -import org.bytedeco.pytorch.Module; -import java.nio.*; -import org.bytedeco.javacpp.*; -import org.bytedeco.javacpp.annotation.*; - -import static org.bytedeco.javacpp.presets.javacpp.*; -import static org.bytedeco.openblas.global.openblas_nolapack.*; -import static org.bytedeco.openblas.global.openblas.*; - -import static org.bytedeco.pytorch.global.torch.*; - - -/** Like {@code DataLoaderOptions}, but without any unconfigured state. - * {@code DataLoaderOptions} has some options that depend on other options - * ({@code max_jobs} => {@code 2 * workers}). In the spirit of properly using the C++ type - * system, {@code DataLoaderOptions} allows only setting values. To access values, - * you must create a {@code FullDataLoaderOptions} from a {@code DataLoaderOptions} - * instance, which will do any necessary coalescing. */ -@Namespace("torch::data") @NoOffset @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) -public class FullDataLoaderOptions extends Pointer { - static { Loader.load(); } - /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ - public FullDataLoaderOptions(Pointer p) { super(p); } - - public FullDataLoaderOptions(@ByVal DataLoaderOptions options) { super((Pointer)null); allocate(options); } - private native void allocate(@ByVal DataLoaderOptions options); - - public native @Cast("size_t") long batch_size(); public native FullDataLoaderOptions batch_size(long setter); - public native @Cast("size_t") long workers(); public native FullDataLoaderOptions workers(long setter); - public native @Cast("size_t") long max_jobs(); public native FullDataLoaderOptions max_jobs(long setter); - public native @ByRef @Cast("c10::optional*") Pointer timeout(); public native FullDataLoaderOptions timeout(Pointer setter); - public native @Cast("bool") boolean enforce_ordering(); public native FullDataLoaderOptions enforce_ordering(boolean setter); - public native @Cast("bool") boolean drop_last(); public native FullDataLoaderOptions drop_last(boolean setter); -} diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/FuncTorchTLSBase.java b/pytorch/src/gen/java/org/bytedeco/pytorch/FuncTorchTLSBase.java new file mode 100644 index 00000000000..c826d754b4c --- /dev/null +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/FuncTorchTLSBase.java @@ -0,0 +1,50 @@ +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE + +package org.bytedeco.pytorch; + +import org.bytedeco.pytorch.Allocator; +import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; +import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; +import java.nio.*; +import org.bytedeco.javacpp.*; +import org.bytedeco.javacpp.annotation.*; + +import static org.bytedeco.javacpp.presets.javacpp.*; +import static org.bytedeco.openblas.global.openblas_nolapack.*; +import static org.bytedeco.openblas.global.openblas.*; + +import static org.bytedeco.pytorch.global.torch.*; + + +// NOTE [functorch TLS in pytorch/pytorch] +// +// functorch lives out-of-tree. However, it has some TLS that needs to be +// propagated. The solution for that is we store a pointer to the TLS +// inside pytorch/pytorch and extend FuncTorchTLSBase inside functorch to +// include whatever functorch needs. +// +// We need to store a pointer due to the indirection: +// inside functorch, we will create a subclass of FunctorchTLSBase called +// FuncTorchTLSImpl that actually contains metadata, like the DynamicLayerStack. +// FuncTorchTLSBase doesn't have any metadata because it hasn't been defined +// yet. +// +// Here in pytorch/pytorch, we will pass around FuncTorchTLSBase*, but inside +// functorch, we will assign a FuncTorchTLSImpl* to the FunctorchTLSBase*. +// We can't directly pass around FunctorchTLSBase (without a pointer) because +// FuncTorchTLSImpl does not fit inside a FuncTorchTLSBase by virtue of having +// more elements. +@Namespace("at::functorch") @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) +public class FuncTorchTLSBase extends Pointer { + static { Loader.load(); } + /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ + public FuncTorchTLSBase(Pointer p) { super(p); } + + public native @UniquePtr FuncTorchTLSBase deepcopy(); + + public native @Cast("int64_t") long checkSupportsSingleLevelAutogradFunction(); + public native void checkSupportsInplaceRequiresGrad(); + public native void checkSupportsRetainGrad(); +} diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/Function.java b/pytorch/src/gen/java/org/bytedeco/pytorch/Function.java index b1487598e82..3c07618ec99 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/Function.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/Function.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; @@ -32,6 +34,12 @@ public class Function extends Pointer { public native void run(@ByRef IValueVector stack); + public native @ByVal FuturePtr runAsync( + @ByRef IValueVector arg0, + @ByVal(nullValue = "torch::jit::TaskLauncher(at::launch)") @Cast("torch::jit::TaskLauncher*") Pointer taskLauncher); + public native @ByVal FuturePtr runAsync( + @ByRef IValueVector arg0); + public native @ByVal @Name("operator ()") IValue apply( @ByVal IValueVector stack, @Cast("const torch::jit::Kwargs*") @ByRef(nullValue = "torch::jit::Kwargs()") StringIValueMap kwargs); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/FunctionCrossMapLRN2d.java b/pytorch/src/gen/java/org/bytedeco/pytorch/FunctionCrossMapLRN2d.java new file mode 100644 index 00000000000..e70e9e386eb --- /dev/null +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/FunctionCrossMapLRN2d.java @@ -0,0 +1,98 @@ +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE + +package org.bytedeco.pytorch; + +import org.bytedeco.pytorch.Allocator; +import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; +import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; +import java.nio.*; +import org.bytedeco.javacpp.*; +import org.bytedeco.javacpp.annotation.*; + +import static org.bytedeco.javacpp.presets.javacpp.*; +import static org.bytedeco.openblas.global.openblas_nolapack.*; +import static org.bytedeco.openblas.global.openblas.*; + +import static org.bytedeco.pytorch.global.torch.*; + + +/** To use custom autograd operations, implement a Function subclass with + * static forward and backward functions: + * + * {@code forward} can take as many arguments as you want and should return either a + * variable list or a Variable. Use of any direct Variable arguments will be + * registered in the graph but no vectors/sets or any other data structures + * will be traversed. You can use c10::optional as one of the arguments + * and it will be registered as a variable in the graph if the argument has a + * value. It should take a pointer to {@code torch::autograd::AutogradContext} as the + * first argument. Variables can be saved in the {@code ctx} using + * {@code ctx->save_for_backward} + * (see {@code torch::autograd::AutogradContext::save_for_backward}) and other data + * can be saved in the {@code ctx->saved_data} map + * (see {@code torch::autograd::AutogradContext::saved_data}) + * in the form of {@code } pairs. + * + * {@code backward} should take a pointer to {@code torch::autograd::AutogradContext} + * and a variable list containing as many Variables as there were outputs from + * {@code forward} as arguments. It should return as many Variables as there were + * inputs with each of them containing the gradient w.r.t. its corresponding + * input. Variables saved in {@code forward} can be accessed with + * {@code ctx->get_saved_variables} (see + * {@code torch::autograd::AutogradContext::get_saved_variables}) and other saved + * data can be accessed from {@code ctx->saved_data}. + * + * For example: + *

{@code
+ *  class MyFunction : public Function {
+ *    public:
+ *    static variable_list forward(AutogradContext *ctx, int n, Variable var) {
+ *       // Save data for backward in context
+ *       ctx->saved_data["n"] = n;
+ *       var.mul_(2);
+ *       // Mark var as modified by inplace operation
+ *       ctx->mark_dirty({var});
+ *       return {var};
+ *    }
+ * 
+ *    static variable_list backward(AutogradContext *ctx, variable_list
+ *    grad_output) {
+ *       // Use data saved in forward
+ *       auto n = ctx->saved_data["n"].toInt();
+ *       return {grad_output[0]*n};
+ *    }
+ *  };
+ *  }
+ * + * To use {@code MyFunction}: + *
{@code
+ *  Variable x;
+ *  auto y = MyFunction::apply(6, x);
+ *  // Example backward call
+ *  y[0].sum().backward();
+ *  }
*/ +@Name("torch::autograd::Function") @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) +public class FunctionCrossMapLRN2d extends Pointer { + static { Loader.load(); } + /** Default native constructor. */ + public FunctionCrossMapLRN2d() { super((Pointer)null); allocate(); } + /** Native array allocator. Access with {@link Pointer#position(long)}. */ + public FunctionCrossMapLRN2d(long size) { super((Pointer)null); allocateArray(size); } + /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ + public FunctionCrossMapLRN2d(Pointer p) { super(p); } + private native void allocate(); + private native void allocateArray(long size); + @Override public FunctionCrossMapLRN2d position(long position) { + return (FunctionCrossMapLRN2d)super.position(position); + } + @Override public FunctionCrossMapLRN2d getPointer(long i) { + return new FunctionCrossMapLRN2d((Pointer)this).offsetAddress(i); + } + + // We need to use a different template parameter than T here because T will + // inherit from Function, and when Function is instantiated, T::forward + // is not declared yet. + // The enable_if check is to ensure that the user doesn't explicitly provide + // the parameter X. +} diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/FunctionPostHook.java b/pytorch/src/gen/java/org/bytedeco/pytorch/FunctionPostHook.java index 4af05b01da2..b8499221f1b 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/FunctionPostHook.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/FunctionPostHook.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; @@ -22,7 +24,7 @@ public class FunctionPostHook extends Pointer { /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public FunctionPostHook(Pointer p) { super(p); } - public native @Name("operator ()") @Cast({"", "std::vector"}) @StdMove TensorVector apply( - @Cast({"", "std::vector"}) @StdMove TensorVector outputs, - @Cast({"", "std::vector"}) @StdMove TensorVector inputs); + public native @Name("operator ()") @Cast({"", "std::vector"}) @StdMove TensorVector apply( + @Cast({"", "std::vector"}) @StdMove TensorVector outputs, + @Cast({"", "std::vector"}) @StdMove TensorVector inputs); } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/FunctionPostHookVector.java b/pytorch/src/gen/java/org/bytedeco/pytorch/FunctionPostHookVector.java index 271824dbaa4..3c560cafa14 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/FunctionPostHookVector.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/FunctionPostHookVector.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; @@ -27,6 +29,8 @@ public class FunctionPostHookVector extends Pointer { public boolean empty() { return size() == 0; } public native long size(); + public FunctionPostHook front() { return get(0); } + public FunctionPostHook back() { return get(size() - 1); } @Index(function = "at") public native @UniquePtr @Cast({"", "std::unique_ptr&&"}) FunctionPostHook get(@Cast("size_t") long i); public native @ByVal Iterator begin(); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/FunctionPreHook.java b/pytorch/src/gen/java/org/bytedeco/pytorch/FunctionPreHook.java index 74795850f4b..1edde112cfc 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/FunctionPreHook.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/FunctionPreHook.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; @@ -22,5 +24,5 @@ public class FunctionPreHook extends Pointer { /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public FunctionPreHook(Pointer p) { super(p); } - public native @Name("operator ()") @Cast({"", "std::vector"}) @StdMove TensorVector apply(@Cast({"", "std::vector"}) @StdMove TensorVector grads); + public native @Name("operator ()") @Cast({"", "std::vector"}) @StdMove TensorVector apply(@Cast({"", "std::vector"}) @StdMove TensorVector grads); } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/FunctionPreHookVector.java b/pytorch/src/gen/java/org/bytedeco/pytorch/FunctionPreHookVector.java index f8e70842d4f..3a165e5802a 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/FunctionPreHookVector.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/FunctionPreHookVector.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; @@ -27,6 +29,8 @@ public class FunctionPreHookVector extends Pointer { public boolean empty() { return size() == 0; } public native long size(); + public FunctionPreHook front() { return get(0); } + public FunctionPreHook back() { return get(size() - 1); } @Index(function = "at") public native @UniquePtr @Cast({"", "std::unique_ptr&&"}) FunctionPreHook get(@Cast("size_t") long i); public native @ByVal Iterator begin(); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/FunctionSchema.java b/pytorch/src/gen/java/org/bytedeco/pytorch/FunctionSchema.java index 4a39fa2a1f8..a2f6424e7ce 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/FunctionSchema.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/FunctionSchema.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; @@ -25,100 +27,100 @@ public class FunctionSchema extends Pointer { public FunctionSchema( @StdString BytePointer name, @StdString BytePointer overload_name, - @ByVal ArgumentVector arguments, - @ByVal ArgumentVector returns, + @StdVector Argument arguments, + @StdVector Argument returns, @Cast("bool") boolean is_vararg/*=false*/, @Cast("bool") boolean is_varret/*=false*/) { super((Pointer)null); allocate(name, overload_name, arguments, returns, is_vararg, is_varret); } private native void allocate( @StdString BytePointer name, @StdString BytePointer overload_name, - @ByVal ArgumentVector arguments, - @ByVal ArgumentVector returns, + @StdVector Argument arguments, + @StdVector Argument returns, @Cast("bool") boolean is_vararg/*=false*/, @Cast("bool") boolean is_varret/*=false*/); public FunctionSchema( @StdString BytePointer name, @StdString BytePointer overload_name, - @ByVal ArgumentVector arguments, - @ByVal ArgumentVector returns) { super((Pointer)null); allocate(name, overload_name, arguments, returns); } + @StdVector Argument arguments, + @StdVector Argument returns) { super((Pointer)null); allocate(name, overload_name, arguments, returns); } private native void allocate( @StdString BytePointer name, @StdString BytePointer overload_name, - @ByVal ArgumentVector arguments, - @ByVal ArgumentVector returns); + @StdVector Argument arguments, + @StdVector Argument returns); public FunctionSchema( @StdString String name, @StdString String overload_name, - @ByVal ArgumentVector arguments, - @ByVal ArgumentVector returns, + @StdVector Argument arguments, + @StdVector Argument returns, @Cast("bool") boolean is_vararg/*=false*/, @Cast("bool") boolean is_varret/*=false*/) { super((Pointer)null); allocate(name, overload_name, arguments, returns, is_vararg, is_varret); } private native void allocate( @StdString String name, @StdString String overload_name, - @ByVal ArgumentVector arguments, - @ByVal ArgumentVector returns, + @StdVector Argument arguments, + @StdVector Argument returns, @Cast("bool") boolean is_vararg/*=false*/, @Cast("bool") boolean is_varret/*=false*/); public FunctionSchema( @StdString String name, @StdString String overload_name, - @ByVal ArgumentVector arguments, - @ByVal ArgumentVector returns) { super((Pointer)null); allocate(name, overload_name, arguments, returns); } + @StdVector Argument arguments, + @StdVector Argument returns) { super((Pointer)null); allocate(name, overload_name, arguments, returns); } private native void allocate( @StdString String name, @StdString String overload_name, - @ByVal ArgumentVector arguments, - @ByVal ArgumentVector returns); + @StdVector Argument arguments, + @StdVector Argument returns); public FunctionSchema( @ByVal Symbol name, @StdString BytePointer overload_name, - @ByVal ArgumentVector arguments, - @ByVal ArgumentVector returns, + @StdVector Argument arguments, + @StdVector Argument returns, @Cast("bool") boolean is_vararg/*=false*/, @Cast("bool") boolean is_varret/*=false*/) { super((Pointer)null); allocate(name, overload_name, arguments, returns, is_vararg, is_varret); } private native void allocate( @ByVal Symbol name, @StdString BytePointer overload_name, - @ByVal ArgumentVector arguments, - @ByVal ArgumentVector returns, + @StdVector Argument arguments, + @StdVector Argument returns, @Cast("bool") boolean is_vararg/*=false*/, @Cast("bool") boolean is_varret/*=false*/); public FunctionSchema( @ByVal Symbol name, @StdString BytePointer overload_name, - @ByVal ArgumentVector arguments, - @ByVal ArgumentVector returns) { super((Pointer)null); allocate(name, overload_name, arguments, returns); } + @StdVector Argument arguments, + @StdVector Argument returns) { super((Pointer)null); allocate(name, overload_name, arguments, returns); } private native void allocate( @ByVal Symbol name, @StdString BytePointer overload_name, - @ByVal ArgumentVector arguments, - @ByVal ArgumentVector returns); + @StdVector Argument arguments, + @StdVector Argument returns); public FunctionSchema( @ByVal Symbol name, @StdString String overload_name, - @ByVal ArgumentVector arguments, - @ByVal ArgumentVector returns, + @StdVector Argument arguments, + @StdVector Argument returns, @Cast("bool") boolean is_vararg/*=false*/, @Cast("bool") boolean is_varret/*=false*/) { super((Pointer)null); allocate(name, overload_name, arguments, returns, is_vararg, is_varret); } private native void allocate( @ByVal Symbol name, @StdString String overload_name, - @ByVal ArgumentVector arguments, - @ByVal ArgumentVector returns, + @StdVector Argument arguments, + @StdVector Argument returns, @Cast("bool") boolean is_vararg/*=false*/, @Cast("bool") boolean is_varret/*=false*/); public FunctionSchema( @ByVal Symbol name, @StdString String overload_name, - @ByVal ArgumentVector arguments, - @ByVal ArgumentVector returns) { super((Pointer)null); allocate(name, overload_name, arguments, returns); } + @StdVector Argument arguments, + @StdVector Argument returns) { super((Pointer)null); allocate(name, overload_name, arguments, returns); } private native void allocate( @ByVal Symbol name, @StdString String overload_name, - @ByVal ArgumentVector arguments, - @ByVal ArgumentVector returns); + @StdVector Argument arguments, + @StdVector Argument returns); // Checks whether this schema is backward compatible with the old one. // The following conditions must be true: @@ -176,8 +178,8 @@ private native void allocate( public native @Const @ByRef OperatorName operator_name(); public native @StdString BytePointer name(); public native @StdString BytePointer overload_name(); - public native @Const @ByRef ArgumentVector arguments(); - public native @Const @ByRef ArgumentVector returns(); + public native @StdVector Argument arguments(); + public native @StdVector Argument returns(); public native @Cast("bool") boolean is_vararg(); public native @Cast("bool") boolean is_varret(); public native @Cast("bool") boolean is_aliasing(@Const @ByRef SchemaArgument argument); @@ -213,14 +215,14 @@ private native void allocate( // Returns either arguments() or returns() depending on the SchemaArgType // output => returns(), input => arguments() - public native @Const @ByRef ArgumentVector getCorrectList(SchemaArgType type); - public native @Const @ByRef ArgumentVector getCorrectList(@Cast("c10::SchemaArgType") int type); + public native @StdVector Argument getCorrectList(SchemaArgType type); + public native @StdVector Argument getCorrectList(@Cast("c10::SchemaArgType") int type); public native @ByVal IntOptional argumentIndexWithName(@ByVal @Cast("c10::string_view*") Pointer name); public native @ByVal FunctionSchema cloneWithName(@StdString BytePointer name, @StdString BytePointer overload_name); public native @ByVal FunctionSchema cloneWithName(@StdString String name, @StdString String overload_name); - public native @ByVal FunctionSchema cloneWithArguments(@ByVal ArgumentVector new_arguments); - public native @ByVal FunctionSchema cloneWithReturns(@ByVal ArgumentVector new_returns); + public native @ByVal FunctionSchema cloneWithArguments(@StdVector Argument new_arguments); + public native @ByVal FunctionSchema cloneWithReturns(@StdVector Argument new_returns); public native @StdString BytePointer formatTypeMismatchMsg( @Const @ByRef Argument expected, @@ -255,8 +257,9 @@ private native void allocate( // TODO remove the mutation here public native @Cast("bool") boolean isDefaultAliasAnalysisKind(); - public native @ByVal AliasAnalysisKind aliasAnalysis(); - public native void setAliasAnalysis(@ByVal AliasAnalysisKind v); + public native AliasAnalysisKind aliasAnalysis(); + public native void setAliasAnalysis(AliasAnalysisKind v); + public native void setAliasAnalysis(@Cast("c10::AliasAnalysisKind") byte v); public native @ByVal @Cast("c10::optional*") Pointer getNamespace(); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/FunctionSchemaOptional.java b/pytorch/src/gen/java/org/bytedeco/pytorch/FunctionSchemaOptional.java index 2ef2cde78fb..3a886613630 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/FunctionSchemaOptional.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/FunctionSchemaOptional.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; @@ -26,6 +28,7 @@ public class FunctionSchemaOptional extends Pointer { public native @Name("operator =") @ByRef FunctionSchemaOptional put(@ByRef FunctionSchemaOptional x); public native boolean has_value(); + public native void reset(); public native @Name("value") @ByRef FunctionSchema get(); @ValueSetter public native FunctionSchemaOptional put(@ByRef FunctionSchema value); } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/FunctionSchemaVector.java b/pytorch/src/gen/java/org/bytedeco/pytorch/FunctionSchemaVector.java new file mode 100644 index 00000000000..b651c921384 --- /dev/null +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/FunctionSchemaVector.java @@ -0,0 +1,47 @@ +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE + +package org.bytedeco.pytorch; + +import org.bytedeco.pytorch.Allocator; +import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; +import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; +import java.nio.*; +import org.bytedeco.javacpp.*; +import org.bytedeco.javacpp.annotation.*; + +import static org.bytedeco.javacpp.presets.javacpp.*; +import static org.bytedeco.openblas.global.openblas_nolapack.*; +import static org.bytedeco.openblas.global.openblas.*; + +import static org.bytedeco.pytorch.global.torch.*; + +@Name("std::vector") @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) +public class FunctionSchemaVector extends Pointer { + static { Loader.load(); } + /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ + public FunctionSchemaVector(Pointer p) { super(p); } + public FunctionSchemaVector() { allocate(); } + private native void allocate(); + + + public boolean empty() { return size() == 0; } + public native long size(); + + public FunctionSchema front() { return get(0); } + public FunctionSchema back() { return get(size() - 1); } + @Index(function = "at") public native @Const FunctionSchema get(@Cast("size_t") long i); + + public native @ByVal Iterator begin(); + public native @ByVal Iterator end(); + @NoOffset @Name("iterator") public static class Iterator extends Pointer { + public Iterator(Pointer p) { super(p); } + public Iterator() { } + + public native @Name("operator ++") @ByRef Iterator increment(); + public native @Name("operator ==") boolean equals(@ByRef Iterator it); + public native @Name("operator *") @Const FunctionSchema get(); + } +} + diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/FunctionType.java b/pytorch/src/gen/java/org/bytedeco/pytorch/FunctionType.java index d4633d32d92..82bda95ee9f 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/FunctionType.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/FunctionType.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/FunctionValue.java b/pytorch/src/gen/java/org/bytedeco/pytorch/FunctionValue.java index c41461b191f..891f33f5fb3 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/FunctionValue.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/FunctionValue.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; @@ -24,17 +26,10 @@ public class FunctionValue extends SugaredValue { public FunctionValue(Function callee) { super((Pointer)null); allocate(callee); } private native void allocate(Function callee); - public FunctionValue(@Const @ByRef StrongFunctionPtr p) { super((Pointer)null); allocate(p); } - private native void allocate(@Const @ByRef StrongFunctionPtr p); public native @StdString BytePointer kind(); - public native @SharedPtr @ByVal SugaredValue call( - @Const @ByRef SourceRange loc, - @ByRef GraphFunction f, - @ByVal NamedValueArrayRef args, - @ByVal NamedValueArrayRef kwargs, - @Cast("size_t") long n_binders); + public native @Const @ByRef FunctionVector callees(); } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/FunctionVector.java b/pytorch/src/gen/java/org/bytedeco/pytorch/FunctionVector.java index d055dba197f..af590bcbf39 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/FunctionVector.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/FunctionVector.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; @@ -33,6 +35,8 @@ public class FunctionVector extends Pointer { public void clear() { resize(0); } public native void resize(@Cast("size_t") long n); + public Function front() { return get(0); } + public Function back() { return get(size() - 1); } @Index(function = "at") public native Function get(@Cast("size_t") long i); public native FunctionVector put(@Cast("size_t") long i, Function value); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/FunctionalityOffsetAndMask.java b/pytorch/src/gen/java/org/bytedeco/pytorch/FunctionalityOffsetAndMask.java index 545272f2c75..32ff9b06f7f 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/FunctionalityOffsetAndMask.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/FunctionalityOffsetAndMask.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/FusionStrategy.java b/pytorch/src/gen/java/org/bytedeco/pytorch/FusionStrategy.java index 6d696af6230..2dd3fd9d666 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/FusionStrategy.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/FusionStrategy.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/Future.java b/pytorch/src/gen/java/org/bytedeco/pytorch/Future.java index eebe41836d1..365c78f9e26 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/Future.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/Future.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; @@ -14,11 +16,122 @@ import static org.bytedeco.openblas.global.openblas.*; import static org.bytedeco.pytorch.global.torch.*; + // namespace ivalue -@Namespace("c10::ivalue") @Opaque @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) +// Future +@Name("c10::ivalue::Future") @NoOffset @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) public class Future extends Pointer { - /** Empty constructor. Calls {@code super((Pointer)null)}. */ - public Future() { super((Pointer)null); } + static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public Future(Pointer p) { super(p); } + + + + + + + @NoOffset public static class FutureError extends Pointer { + static { Loader.load(); } + /** Default native constructor. */ + public FutureError() { super((Pointer)null); allocate(); } + /** Native array allocator. Access with {@link Pointer#position(long)}. */ + public FutureError(long size) { super((Pointer)null); allocateArray(size); } + /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ + public FutureError(Pointer p) { super(p); } + private native void allocate(); + private native void allocateArray(long size); + @Override public FutureError position(long position) { + return (FutureError)super.position(position); + } + @Override public FutureError getPointer(long i) { + return new FutureError((Pointer)this).offsetAddress(i); + } + + + + + + public native @NoException(true) @Cast("const char*") BytePointer what(); + + public native @StdString BytePointer error_msg(); public native FutureError error_msg(BytePointer setter); + } + + /** + * Wait on the future until it completes. + */ + public native @Name("wait") void _wait(); + + /** + * Wait on the future until it completes and throw an + * exception if an error exists. + */ + public native void waitAndThrow(); + + /** + * Explicitly mark the future as completed with the output value. Optionally, + * the storages for all tensors in IValue can be passed as well. The DataPtrs + * of these storages are used to synchronize CUDA streams. If storages isn't + * given we will attempt to extract it from the value, if we need to (this + * happens if a non-empty set of devices was given to the constructor). Thus + * one only needs to provide storages when 1) they cannot be extracted through + * IValue::getSubValues() or through pickling in case of Python object; or + * when 2) customized storage extraction is more efficient. + */ + public native void markCompleted( + @ByVal IValue value, + @ByVal(nullValue = "c10::optional >(c10::nullopt)") WeakStorageVectorOptional storages); + public native void markCompleted( + @ByVal IValue value); + + public native void markCompleted(); + + public native void setError(@ByVal @Cast("std::exception_ptr*") Pointer eptr); + + public native void setErrorIfNeeded(@ByVal @Cast("std::exception_ptr*") Pointer eptr); + + // Get the result of the current future. + public native @ByVal IValue value(); + + // This accessor should only be used if we know that the future is + // completed() with no error. + public native @Const @ByRef IValue constValue(); + + // This accessor should only be used if we know that the future is + // completed() with no error. + public native @Const @ByRef WeakStorageVector storages(); + + /** + * Add a callback to the future. + * The callbacks will be executed once the future completes. + * If the future has already completed, + * this function will execute the callback immediately. + */ + + /** + * Add a callback to the future, and return another Future to hold the return + * value of the callback. This is necessary when the callback provider needs + * to know for sure when the callback has finished. + */ + + // Tries to retrieve the error message from std::exception_ptr. + public native @StdString BytePointer tryRetrieveErrorMessage(); + + // Check if the current future has completed + public native @Cast("bool") boolean completed(); + + public native @Cast("bool") boolean hasValue(); + + public native @Cast("bool") boolean hasError(); + + public native @ByVal @Cast("std::exception_ptr*") Pointer exception_ptr(); + + + + public native @ByVal Type.TypePtr elementType(); + + public native @StdVector Device devices(); + + // This method should be used when one intends to manually create a child + // future, for example when implementing a customized version of then(). + public native @ByVal FuturePtr createInstance(@ByVal Type.TypePtr type); } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/FuturePtr.java b/pytorch/src/gen/java/org/bytedeco/pytorch/FuturePtr.java new file mode 100644 index 00000000000..8f093ee4a12 --- /dev/null +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/FuturePtr.java @@ -0,0 +1,150 @@ +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE + +package org.bytedeco.pytorch; + +import org.bytedeco.pytorch.Allocator; +import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; +import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; +import java.nio.*; +import org.bytedeco.javacpp.*; +import org.bytedeco.javacpp.annotation.*; + +import static org.bytedeco.javacpp.presets.javacpp.*; +import static org.bytedeco.openblas.global.openblas_nolapack.*; +import static org.bytedeco.openblas.global.openblas.*; + +import static org.bytedeco.pytorch.global.torch.*; + + +@Name("c10::intrusive_ptr") @NoOffset @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) +public class FuturePtr extends Pointer { + static { Loader.load(); } + /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ + public FuturePtr(Pointer p) { super(p); } + /** Native array allocator. Access with {@link Pointer#position(long)}. */ + public FuturePtr(long size) { super((Pointer)null); allocateArray(size); } + private native void allocateArray(long size); + @Override public FuturePtr position(long position) { + return (FuturePtr)super.position(position); + } + @Override public FuturePtr getPointer(long i) { + return new FuturePtr((Pointer)this).offsetAddress(i); + } + + + public FuturePtr() { super((Pointer)null); allocate(); } + @NoException(true) private native void allocate(); + + public FuturePtr(@ByVal @Cast("std::nullptr_t*") PointerPointer arg0) { super((Pointer)null); allocate(arg0); } + @NoException(true) private native void allocate(@ByVal @Cast("std::nullptr_t*") PointerPointer arg0); + + // This constructor will not increase the ref counter for you. + // We use the tagged dispatch mechanism to explicitly mark this constructor + // to not increase the refcount + public FuturePtr(Future target, @ByVal DontIncreaseRefcount arg1) { super((Pointer)null); allocate(target, arg1); } + @NoException(true) private native void allocate(Future target, @ByVal DontIncreaseRefcount arg1); + + + + public FuturePtr(@ByRef(true) FuturePtr rhs) { super((Pointer)null); allocate(rhs); } + @NoException(true) private native void allocate(@ByRef(true) FuturePtr rhs); + + public native @ByRef @Name("operator =") @NoException(true) FuturePtr put(@ByRef(true) FuturePtr rhs); + + public native @NoException(true) Future get(); + + public native @ByRef @Name("operator *") @NoException(true) Future multiply(); + + public native @Name("operator ->") @NoException(true) Future access(); + + public native @Cast("bool") @Name("operator bool") @NoException(true) boolean asBoolean(); + + public native @NoException(true) void reset(); + + public native @NoException(true) void swap(@ByRef FuturePtr rhs); + + // We do a lot of null-pointer checks in our code, good to have this be cheap. + public native @Cast("bool") @NoException(true) boolean defined(); + + public native @Cast("size_t") @NoException(true) long use_count(); + + public native @Cast("size_t") @NoException(true) long weak_use_count(); + + public native @Cast("bool") @NoException(true) boolean unique(); + + /** + * Returns an owning (!) pointer to the underlying object and makes the + * intrusive_ptr instance invalid. That means the refcount is not decreased. + * You *must* put the returned pointer back into a intrusive_ptr using + * intrusive_ptr::reclaim(ptr) to properly destruct it. + * This is helpful for C APIs. + */ + public native @NoException(true) Future release(); + + /** + * Takes an owning pointer to TTarget* and creates an intrusive_ptr that takes + * over ownership. That means the refcount is not increased. + * This is the counter-part to intrusive_ptr::release() and the pointer + * passed in *must* have been created using intrusive_ptr::release(). + */ + public static native @ByVal FuturePtr reclaim(Future owning_ptr); + + /** + * Takes an owning pointer to TTarget* and creates an intrusive_ptr + * representing a new reference, i.e. the raw pointer retains + * ownership. + */ + public static native @ByVal FuturePtr reclaim_copy(Future owning_ptr); + + /** + * Allocate a heap object with args and wrap it inside a intrusive_ptr and + * incref. This is a helper function to let make_intrusive() access private + * intrusive_ptr constructors. + */ + + /** + * Turn a new instance of TTarget (e.g., literally allocated + * using new TTarget(...) into an intrusive_ptr. If possible, + * use intrusive_ptr::make instead which statically guarantees + * that the allocation was done properly. + * + * At the moment, the only reason this method exists is because + * pybind11 holder types expect to be able to allocate in + * this way (because pybind11 handles the new allocation itself). + */ + public static native @ByVal FuturePtr unsafe_steal_from_new(Future raw_ptr); + + /** + * Turn an instance of TTarget that should not be reference counted + * (e.g., allocated into an arena with placement new) into an + * intrusive_ptr. This is gratuitously unsafe and should only be + * used if you can guarantee that the pointer will not escape and be + * refcounted as normal. + * + * {@code expected_decrefs} is a debugging parameter: it indicates the + * number of strong owners the intrusive_ptr_target in question is + * expected to get. In most use cases, this will likely be 1. + * + * The reason this method exists is for manually sharing + * StorageImpls across Tensors in the static runtime. It needs + * access to private intrusive_ptr members so that the refcounts can + * be initialized to custom values. + */ + public static native @ByVal FuturePtr unsafe_adapt_non_heap_allocated( + Future raw_ptr, + @Cast("size_t") long expected_decrefs); + + /** + * Turn a **non-owning raw pointer** to an intrusive_ptr. It is + * the moral equivalent of enable_shared_from_this on a shared pointer. + * + * This method is only valid for objects that are already live. If + * you are looking for the moral equivalent of unique_ptr(T*) + * constructor, see steal_from_new. + * + * TODO: https://github.com/pytorch/pytorch/issues/56482 + */ + public static native @ByVal FuturePtr unsafe_reclaim_from_nonowning(Future raw_ptr); +} diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/FuturePtrArrayRef.java b/pytorch/src/gen/java/org/bytedeco/pytorch/FuturePtrArrayRef.java new file mode 100644 index 00000000000..be7477f6ccf --- /dev/null +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/FuturePtrArrayRef.java @@ -0,0 +1,133 @@ +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE + +package org.bytedeco.pytorch; + +import org.bytedeco.pytorch.Allocator; +import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; +import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; +import java.nio.*; +import org.bytedeco.javacpp.*; +import org.bytedeco.javacpp.annotation.*; + +import static org.bytedeco.javacpp.presets.javacpp.*; +import static org.bytedeco.openblas.global.openblas_nolapack.*; +import static org.bytedeco.openblas.global.openblas.*; + +import static org.bytedeco.pytorch.global.torch.*; + +@Name("c10::ArrayRef >") @NoOffset @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) +public class FuturePtrArrayRef extends Pointer { + static { Loader.load(); } + /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ + public FuturePtrArrayRef(Pointer p) { super(p); } + /** Native array allocator. Access with {@link Pointer#position(long)}. */ + public FuturePtrArrayRef(long size) { super((Pointer)null); allocateArray(size); } + private native void allocateArray(long size); + @Override public FuturePtrArrayRef position(long position) { + return (FuturePtrArrayRef)super.position(position); + } + @Override public FuturePtrArrayRef getPointer(long i) { + return new FuturePtrArrayRef((Pointer)this).offsetAddress(i); + } + + /** \name Constructors + * \{ +

+ * Construct an empty ArrayRef. */ + /* implicit */ public FuturePtrArrayRef() { super((Pointer)null); allocate(); } +private native void allocate(); + + /** Construct an ArrayRef from a single element. */ + // TODO Make this explicit + + + /** Construct an ArrayRef from a pointer and length. */ + public FuturePtrArrayRef(@Const FuturePtr data, @Cast("size_t") long length) { super((Pointer)null); allocate(data, length); } + private native void allocate(@Const FuturePtr data, @Cast("size_t") long length); + + /** Construct an ArrayRef from a range. */ + public FuturePtrArrayRef(@Const FuturePtr begin, @Const FuturePtr end) { super((Pointer)null); allocate(begin, end); } + private native void allocate(@Const FuturePtr begin, @Const FuturePtr end); + + /** Construct an ArrayRef from a SmallVector. This is templated in order to + * avoid instantiating SmallVectorTemplateCommon whenever we + * copy-construct an ArrayRef. */ + + /** Construct an ArrayRef from a std::vector. */ + // The enable_if stuff here makes sure that this isn't used for + // std::vector, because ArrayRef can't work on a std::vector + // bitfield. + + /** Construct an ArrayRef from a std::array */ + + /** Construct an ArrayRef from a C array. */ + + /** Construct an ArrayRef from a std::initializer_list. */ + /* implicit */ + + /** \} + * \name Simple Operations + * \{ */ + + public native @Const @ByPtr FuturePtr begin(); + public native @Const @ByPtr FuturePtr end(); + + // These are actually the same as iterator, since ArrayRef only + // gives you const iterators. + public native @Const @ByPtr FuturePtr cbegin(); + public native @Const @ByPtr FuturePtr cend(); + + /** empty - Check if the array is empty. */ + public native @Cast("const bool") boolean empty(); + + public native @Const FuturePtr data(); + + /** size - Get the array size. */ + public native @Cast("const size_t") long size(); + + /** front - Get the first element. */ + public native @Const @ByRef FuturePtr front(); + + /** back - Get the last element. */ + public native @Const @ByRef FuturePtr back(); + + /** equals - Check for element-wise equality. */ + public native @Cast("const bool") boolean equals(@ByVal FuturePtrArrayRef RHS); + + /** slice(n, m) - Take M elements of the array starting at element N */ + public native @Const @ByVal FuturePtrArrayRef slice(@Cast("size_t") long N, @Cast("size_t") long M); + + /** slice(n) - Chop off the first N elements of the array. */ + public native @Const @ByVal FuturePtrArrayRef slice(@Cast("size_t") long N); + + /** \} + * \name Operator Overloads + * \{ */ + public native @Const @ByRef @Name("operator []") FuturePtr get(@Cast("size_t") long Index); + + /** Vector compatibility */ + + /// + public native @Const @ByRef FuturePtr at(@Cast("size_t") long Index); + + /** Disallow accidental assignment from a temporary. + * + * The declaration here is extra complicated so that "arrayRef = {}" + * continues to select the move assignment operator. */ + + + /** Disallow accidental assignment from a temporary. + * + * The declaration here is extra complicated so that "arrayRef = {}" + * continues to select the move assignment operator. */ + + + /** \} + * \name Expensive Operations + * \{ */ + public native @StdVector FuturePtr vec(); + + /** \} */ +} diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/FuturePtrElementReference.java b/pytorch/src/gen/java/org/bytedeco/pytorch/FuturePtrElementReference.java new file mode 100644 index 00000000000..2f433b92efd --- /dev/null +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/FuturePtrElementReference.java @@ -0,0 +1,42 @@ +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE + +package org.bytedeco.pytorch; + +import org.bytedeco.pytorch.Allocator; +import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; +import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; +import java.nio.*; +import org.bytedeco.javacpp.*; +import org.bytedeco.javacpp.annotation.*; + +import static org.bytedeco.javacpp.presets.javacpp.*; +import static org.bytedeco.openblas.global.openblas_nolapack.*; +import static org.bytedeco.openblas.global.openblas.*; + +import static org.bytedeco.pytorch.global.torch.*; + + +@Name("c10::impl::ListElementReference,c10::detail::ListImpl::list_type::iterator>") @NoOffset @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) +public class FuturePtrElementReference extends Pointer { + static { Loader.load(); } + /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ + public FuturePtrElementReference(Pointer p) { super(p); } + + public native @Name("operator std::conditional_t >::type>::value,const c10::intrusive_ptr&,c10::intrusive_ptr >") @ByVal FuturePtr getFuturePtr(); + + + + + + // assigning another ref to this assigns the underlying value + + + public native @Const @ByRef IValue get(); + + + + + +} diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/FuturePtrList.java b/pytorch/src/gen/java/org/bytedeco/pytorch/FuturePtrList.java new file mode 100644 index 00000000000..5a79fc2e7af --- /dev/null +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/FuturePtrList.java @@ -0,0 +1,239 @@ +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE + +package org.bytedeco.pytorch; + +import org.bytedeco.pytorch.Allocator; +import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; +import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; +import java.nio.*; +import org.bytedeco.javacpp.*; +import org.bytedeco.javacpp.annotation.*; + +import static org.bytedeco.javacpp.presets.javacpp.*; +import static org.bytedeco.openblas.global.openblas_nolapack.*; +import static org.bytedeco.openblas.global.openblas.*; + +import static org.bytedeco.pytorch.global.torch.*; + +@Name("c10::List >") @NoOffset @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) +public class FuturePtrList extends Pointer { + static { Loader.load(); } + /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ + public FuturePtrList(Pointer p) { super(p); } + /** Native array allocator. Access with {@link Pointer#position(long)}. */ + public FuturePtrList(long size) { super((Pointer)null); allocateArray(size); } + private native void allocateArray(long size); + @Override public FuturePtrList position(long position) { + return (FuturePtrList)super.position(position); + } + @Override public FuturePtrList getPointer(long i) { + return new FuturePtrList((Pointer)this).offsetAddress(i); + } + + + /** + * Constructs an empty list. + */ + public FuturePtrList() { super((Pointer)null); allocate(); } + private native void allocate(); + + /** + * Constructs a list with some initial values. + * Example: + * List a({2, 3, 4}); + */ + public FuturePtrList(@ByVal FuturePtrArrayRef initial_values) { super((Pointer)null); allocate(initial_values); } + private native void allocate(@ByVal FuturePtrArrayRef initial_values); + + /** + * Create a generic list with runtime type information. + * This only works for c10::impl::GenericList and is not part of the public API + * but only supposed to be used internally by PyTorch. + */ + + + public FuturePtrList(@Const @ByRef FuturePtrList arg0) { super((Pointer)null); allocate(arg0); } + private native void allocate(@Const @ByRef FuturePtrList arg0); + public native @ByRef @Name("operator =") FuturePtrList put(@Const @ByRef FuturePtrList arg0); + + /** + * Create a new List pointing to a deep copy of the same data. + * The List returned is a new list with separate storage. + * Changes in it are not reflected in the original list or vice versa. + */ + public native @ByVal FuturePtrList copy(); + + /** + * Returns the element at specified location pos, with bounds checking. + * If pos is not within the range of the container, an exception of type std::out_of_range is thrown. + */ + public native @ByVal FuturePtr get(long pos); + + /** + * Moves out the element at the specified location pos and returns it, with bounds checking. + * If pos is not within the range of the container, an exception of type std::out_of_range is thrown. + * The list contains an invalid element at position pos afterwards. Any operations + * on it before re-setting it are invalid. + */ + public native @ByVal FuturePtr extract(long pos); + + /** + * Returns a reference to the element at specified location pos, with bounds checking. + * If pos is not within the range of the container, an exception of type std::out_of_range is thrown. + * + * You cannot store the reference, but you can read it and assign new values to it: + * + * List list = ...; + * list[2] = 5; + * int64_t v = list[1]; + */ + + + + + /** + * Assigns a new value to the element at location pos. + */ + public native void set(long pos, @ByVal FuturePtr value); + + /** + * Assigns a new value to the element at location pos. + */ + + /** + * Returns an iterator to the first element of the container. + * If the container is empty, the returned iterator will be equal to end(). + */ + public native @ByVal @Cast("c10::List >::iterator*") FuturePtrListIterator begin(); + + /** + * Returns an iterator to the element following the last element of the container. + * This element acts as a placeholder; attempting to access it results in undefined behavior. + */ + public native @ByVal @Cast("c10::List >::iterator*") FuturePtrListIterator end(); + + /** + * Checks if the container has no elements. + */ + public native @Cast("bool") boolean empty(); + + /** + * Returns the number of elements in the container + */ + public native long size(); + + /** + * Increase the capacity of the vector to a value that's greater or equal to new_cap. + */ + public native void reserve(long new_cap); + + /** + * Erases all elements from the container. After this call, size() returns zero. + * Invalidates any references, pointers, or iterators referring to contained elements. Any past-the-end iterators are also invalidated. + */ + public native void clear(); + + /** + * Inserts value before pos. + * May invalidate any references, pointers, or iterators referring to contained elements. Any past-the-end iterators may also be invalidated. + */ + public native @ByVal @Cast("c10::List >::iterator*") FuturePtrListIterator insert(@ByVal @Cast("c10::List >::iterator*") FuturePtrListIterator pos, @Const @ByRef FuturePtr value); + + /** + * Inserts value before pos. + * May invalidate any references, pointers, or iterators referring to contained elements. Any past-the-end iterators may also be invalidated. + */ + + /** + * Inserts a new element into the container directly before pos. + * The new element is constructed with the given arguments. + * May invalidate any references, pointers, or iterators referring to contained elements. Any past-the-end iterators may also be invalidated. + */ + + /** + * Appends the given element value to the end of the container. + * May invalidate any references, pointers, or iterators referring to contained elements. Any past-the-end iterators may also be invalidated. + */ + public native void push_back(@Const @ByRef FuturePtr value); + + /** + * Appends the given element value to the end of the container. + * May invalidate any references, pointers, or iterators referring to contained elements. Any past-the-end iterators may also be invalidated. + */ + + /** + * Appends the given list to the end of the container. Uses at most one memory allocation. + * May invalidate any references, pointers, or iterators referring to contained elements. Any past-the-end iterators may also be invalidated. + */ + public native void append(@ByVal FuturePtrList lst); + + /** + * Appends the given element value to the end of the container. + * The new element is constructed with the given arguments. + * May invalidate any references, pointers, or iterators referring to contained elements. Any past-the-end iterators may also be invalidated. + */ + + /** + * Removes the element at pos. + * May invalidate any references, pointers, or iterators referring to contained elements. Any past-the-end iterators may also be invalidated. + */ + public native @ByVal @Cast("c10::List >::iterator*") FuturePtrListIterator erase(@ByVal @Cast("c10::List >::iterator*") FuturePtrListIterator pos); + + /** + * Removes the elements in the range [first, last). + * May invalidate any references, pointers, or iterators referring to contained elements. Any past-the-end iterators may also be invalidated. + */ + public native @ByVal @Cast("c10::List >::iterator*") FuturePtrListIterator erase(@ByVal @Cast("c10::List >::iterator*") FuturePtrListIterator first, @ByVal @Cast("c10::List >::iterator*") FuturePtrListIterator last); + + /** + * Removes the last element of the container. + * Calling pop_back on an empty container is undefined. + * May invalidate any references, pointers, or iterators referring to contained elements. Any past-the-end iterators may also be invalidated. + */ + public native void pop_back(); + + /** + * Resizes the container to contain count elements. + * If the current size is less than count, additional default-inserted elements are appended. + * May invalidate any references, pointers, or iterators referring to contained elements. Any past-the-end iterators may also be invalidated. + */ + public native void resize(long count); + + /** + * Resizes the container to contain count elements. + * If the current size is less than count, additional copies of value are appended. + * May invalidate any references, pointers, or iterators referring to contained elements. Any past-the-end iterators may also be invalidated. + */ + public native void resize(long count, @Const @ByRef FuturePtr value); + + /** + * Value equality comparison. This function implements Python-like semantics for + * equality: two lists with the same identity (e.g. same pointer) trivially + * compare equal, otherwise each element is compared for equality. + */ + + + + + /** + * Identity comparison. Returns true if and only if {@code rhs} represents the same + * List object as {@code this}. + */ + public native @Cast("bool") boolean is(@Const @ByRef FuturePtrList rhs); + + public native @StdVector FuturePtr vec(); + + /** + * Returns the number of Lists currently pointing to this same list. + * If this is the only instance pointing to this list, returns 1. + */ + // TODO Test use_count + public native @Cast("size_t") long use_count(); + + public native @ByVal Type.TypePtr elementType(); + + // See [unsafe set type] for why this exists. + public native void unsafeSetElementType(@ByVal Type.TypePtr t); +} diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/FuturePtrListIterator.java b/pytorch/src/gen/java/org/bytedeco/pytorch/FuturePtrListIterator.java new file mode 100644 index 00000000000..9649b0a3a93 --- /dev/null +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/FuturePtrListIterator.java @@ -0,0 +1,84 @@ +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE + +package org.bytedeco.pytorch; + +import org.bytedeco.pytorch.Allocator; +import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; +import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; +import java.nio.*; +import org.bytedeco.javacpp.*; +import org.bytedeco.javacpp.annotation.*; + +import static org.bytedeco.javacpp.presets.javacpp.*; +import static org.bytedeco.openblas.global.openblas_nolapack.*; +import static org.bytedeco.openblas.global.openblas.*; + +import static org.bytedeco.pytorch.global.torch.*; + +@Name("c10::impl::ListIterator,c10::detail::ListImpl::list_type::iterator>") @NoOffset @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) +public class FuturePtrListIterator extends Pointer { + static { Loader.load(); } + /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ + public FuturePtrListIterator(Pointer p) { super(p); } + /** Native array allocator. Access with {@link Pointer#position(long)}. */ + public FuturePtrListIterator(long size) { super((Pointer)null); allocateArray(size); } + private native void allocateArray(long size); + @Override public FuturePtrListIterator position(long position) { + return (FuturePtrListIterator)super.position(position); + } + @Override public FuturePtrListIterator getPointer(long i) { + return new FuturePtrListIterator((Pointer)this).offsetAddress(i); + } + + // C++17 friendly std::iterator implementation + + public FuturePtrListIterator() { super((Pointer)null); allocate(); } + private native void allocate(); + + public FuturePtrListIterator(@Const @ByRef FuturePtrListIterator arg0) { super((Pointer)null); allocate(arg0); } + private native void allocate(@Const @ByRef FuturePtrListIterator arg0); + public native @ByRef @Name("operator =") FuturePtrListIterator put(@Const @ByRef FuturePtrListIterator arg0); + + public native @ByRef @Name("operator ++") FuturePtrListIterator increment(); + + public native @ByVal @Name("operator ++") FuturePtrListIterator increment(int arg0); + + public native @ByRef @Name("operator --") FuturePtrListIterator decrement(); + + public native @ByVal @Name("operator --") FuturePtrListIterator decrement(int arg0); + + public native @ByRef @Name("operator +=") FuturePtrListIterator addPut(long offset); + + public native @ByRef @Name("operator -=") FuturePtrListIterator subtractPut(long offset); + + public native @ByVal @Name("operator +") FuturePtrListIterator add(long offset); + + public native @ByVal @Name("operator -") FuturePtrListIterator subtract(long offset); + + private static native @Namespace @Cast("c10::impl::ListIterator,c10::detail::ListImpl::list_type::iterator>::difference_type") @Name("operator -") long subtract(@Const @ByRef FuturePtrListIterator lhs, @Const @ByRef FuturePtrListIterator rhs); + public long subtract(FuturePtrListIterator rhs) { return subtract(this, rhs); } + + + + + + private static native @Namespace @Cast("bool") @Name("operator ==") boolean equals(@Const @ByRef FuturePtrListIterator lhs, @Const @ByRef FuturePtrListIterator rhs); + public boolean equals(FuturePtrListIterator rhs) { return equals(this, rhs); } + + private static native @Namespace @Cast("bool") @Name("operator !=") boolean notEquals(@Const @ByRef FuturePtrListIterator lhs, @Const @ByRef FuturePtrListIterator rhs); + public boolean notEquals(FuturePtrListIterator rhs) { return notEquals(this, rhs); } + + private static native @Namespace @Cast("bool") @Name("operator <") boolean lessThan(@Const @ByRef FuturePtrListIterator lhs, @Const @ByRef FuturePtrListIterator rhs); + public boolean lessThan(FuturePtrListIterator rhs) { return lessThan(this, rhs); } + + private static native @Namespace @Cast("bool") @Name("operator <=") boolean lessThanEquals(@Const @ByRef FuturePtrListIterator lhs, @Const @ByRef FuturePtrListIterator rhs); + public boolean lessThanEquals(FuturePtrListIterator rhs) { return lessThanEquals(this, rhs); } + + private static native @Namespace @Cast("bool") @Name("operator >") boolean greaterThan(@Const @ByRef FuturePtrListIterator lhs, @Const @ByRef FuturePtrListIterator rhs); + public boolean greaterThan(FuturePtrListIterator rhs) { return greaterThan(this, rhs); } + + private static native @Namespace @Cast("bool") @Name("operator >=") boolean greaterThanEquals(@Const @ByRef FuturePtrListIterator lhs, @Const @ByRef FuturePtrListIterator rhs); + public boolean greaterThanEquals(FuturePtrListIterator rhs) { return greaterThanEquals(this, rhs); } +} diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/FutureSingleElementType.java b/pytorch/src/gen/java/org/bytedeco/pytorch/FutureSingleElementType.java index 26e07997ca7..778d79efbfb 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/FutureSingleElementType.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/FutureSingleElementType.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/FutureType.java b/pytorch/src/gen/java/org/bytedeco/pytorch/FutureType.java index d19a003f63e..07d2bcbf16f 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/FutureType.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/FutureType.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/GELU.java b/pytorch/src/gen/java/org/bytedeco/pytorch/GELU.java deleted file mode 100644 index c5eb46680d8..00000000000 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/GELU.java +++ /dev/null @@ -1,33 +0,0 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE - -package org.bytedeco.pytorch; - -import org.bytedeco.pytorch.Allocator; -import org.bytedeco.pytorch.Function; -import org.bytedeco.pytorch.Module; -import java.nio.*; -import org.bytedeco.javacpp.*; -import org.bytedeco.javacpp.annotation.*; - -import static org.bytedeco.javacpp.presets.javacpp.*; -import static org.bytedeco.openblas.global.openblas_nolapack.*; -import static org.bytedeco.openblas.global.openblas.*; - -import static org.bytedeco.pytorch.global.torch.*; - - -/** A {@code ModuleHolder} subclass for {@code GELUImpl}. - * See the documentation for {@code GELUImpl} class to learn what methods it - * provides, or the documentation for {@code ModuleHolder} to learn about PyTorch's - * module storage semantics. */ -@Namespace("torch::nn") @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) -public class GELU extends GELUImplModuleHolder { - static { Loader.load(); } - - public GELU(@ByVal @Cast("std::nullptr_t*") PointerPointer arg0) { super((Pointer)null); allocate(arg0); } - private native void allocate(@ByVal @Cast("std::nullptr_t*") PointerPointer arg0); public GELU(@SharedPtr @Cast({"", "std::shared_ptr"}) GELUImpl module) { super((Pointer)null); allocate(module); } - private native void allocate(@SharedPtr @Cast({"", "std::shared_ptr"}) GELUImpl module); - /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ - public GELU(Pointer p) { super(p); } - - } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/GELUImpl.java b/pytorch/src/gen/java/org/bytedeco/pytorch/GELUImpl.java index f6350f34acb..d6da93bfb8d 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/GELUImpl.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/GELUImpl.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; @@ -37,9 +39,9 @@ public class GELUImpl extends GELUImplCloneable { } public GELUImpl(@ByVal(nullValue = "torch::nn::GELUOptions{}") GELUOptions options_) { super((Pointer)null); allocate(options_); } - @NoDeallocator private native void allocate(@ByVal(nullValue = "torch::nn::GELUOptions{}") GELUOptions options_); + @SharedPtr private native void allocate(@ByVal(nullValue = "torch::nn::GELUOptions{}") GELUOptions options_); public GELUImpl() { super((Pointer)null); allocate(); } - @NoDeallocator private native void allocate(); + @SharedPtr private native void allocate(); public native @ByVal Tensor forward(@Const @ByRef Tensor input); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/GELUImplCloneable.java b/pytorch/src/gen/java/org/bytedeco/pytorch/GELUImplCloneable.java index a894d09a671..3d8061d04a1 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/GELUImplCloneable.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/GELUImplCloneable.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; @@ -20,18 +22,18 @@ public class GELUImplCloneable extends Module { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public GELUImplCloneable(Pointer p) { super(p); } + @Override public Module asModule() { return asModule(this); } + @Namespace public static native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast>") Module asModule(@SharedPtr GELUImplCloneable pointer); /** {@code reset()} must perform initialization of all members with reference * semantics, most importantly parameters, buffers and submodules. */ public native void reset(); - @Override public Module asModule() { return asModule(this); } - @Namespace public static native @Name("static_cast") Module asModule(GELUImplCloneable module); /** Performs a recursive "deep copy" of the {@code Module}, such that all parameters * and submodules in the cloned module are different from those in the * original module. */ - public native @SharedPtr Module clone( - @Const @ByRef(nullValue = "c10::optional(c10::nullopt)") DeviceOptional device); - public native @SharedPtr Module clone(); + public native @SharedPtr("torch::nn::Module") @ByVal Module clone( + @Const @ByRef(nullValue = "c10::optional(c10::nullopt)") DeviceOptional device); + public native @SharedPtr("torch::nn::Module") @ByVal Module clone(); } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/GELUImplModuleHolder.java b/pytorch/src/gen/java/org/bytedeco/pytorch/GELUImplModuleHolder.java deleted file mode 100644 index db4309d90d9..00000000000 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/GELUImplModuleHolder.java +++ /dev/null @@ -1,79 +0,0 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE - -package org.bytedeco.pytorch; - -import org.bytedeco.pytorch.Allocator; -import org.bytedeco.pytorch.Function; -import org.bytedeco.pytorch.Module; -import java.nio.*; -import org.bytedeco.javacpp.*; -import org.bytedeco.javacpp.annotation.*; - -import static org.bytedeco.javacpp.presets.javacpp.*; -import static org.bytedeco.openblas.global.openblas_nolapack.*; -import static org.bytedeco.openblas.global.openblas.*; - -import static org.bytedeco.pytorch.global.torch.*; - -@Name("torch::nn::ModuleHolder") @NoOffset @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) -public class GELUImplModuleHolder extends Pointer { - static { Loader.load(); } - /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ - public GELUImplModuleHolder(Pointer p) { super(p); } - - - /// - - /** Default constructs the contained module if if has a default constructor, - * else produces a static error. - * - * NOTE: This uses the behavior of template - * classes in C++ that constructors (or any methods) are only compiled when - * actually used. */ - - - /** Constructs the {@code ModuleHolder} with an empty contained value. Access to - * the underlying module is not permitted and will throw an exception, until - * a value is assigned. */ - /* implicit */ public GELUImplModuleHolder(@ByVal @Cast("std::nullptr_t*") PointerPointer arg0) { super((Pointer)null); allocate(arg0); } -private native void allocate(@ByVal @Cast("std::nullptr_t*") PointerPointer arg0); - - /** Constructs the {@code ModuleHolder} with a contained module, forwarding all - * arguments to its constructor. */ - - /** Constructs the {@code ModuleHolder} from a pointer to the contained type. - * Example: {@code Linear(std::make_shared(...))}. */ - /* implicit */ public GELUImplModuleHolder(@SharedPtr @Cast({"", "std::shared_ptr"}) GELUImpl module) { super((Pointer)null); allocate(module); } -private native void allocate(@SharedPtr @Cast({"", "std::shared_ptr"}) GELUImpl module); - - /** Returns true if the {@code ModuleHolder} contains a module, or false if it is - * {@code nullptr}. */ - public native @Cast("bool") @Name("operator bool") @NoException(true) boolean asBoolean(); - - /** Forwards to the contained module. */ - public native @Name("operator ->") GELUImpl access(); - - /** Forwards to the contained module. */ - - /** Returns a reference to the contained module. */ - public native @ByRef @Name("operator *") GELUImpl multiply(); - - /** Returns a const reference to the contained module. */ - - /** Returns a shared pointer to the underlying module. */ - public native @SharedPtr @Cast({"", "std::shared_ptr"}) GELUImpl ptr(); - - /** Returns a pointer to the underlying module. */ - public native GELUImpl get(); - - /** Returns a const pointer to the underlying module. */ - - /** Calls the {@code forward()} method of the contained module. */ - - /** Forwards to the subscript operator of the contained module. - * NOTE: std::forward is qualified to prevent VS2017 emitting - * error C2872: 'std': ambiguous symbol */ - - /** Returns true if the {@code ModuleHolder} does not contain a module. */ - public native @Cast("bool") @NoException(true) boolean is_empty(); -} diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/GELUOptions.java b/pytorch/src/gen/java/org/bytedeco/pytorch/GELUOptions.java index 1ccff0a3421..6a9418353ce 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/GELUOptions.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/GELUOptions.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/GLU.java b/pytorch/src/gen/java/org/bytedeco/pytorch/GLU.java deleted file mode 100644 index d19e7870d86..00000000000 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/GLU.java +++ /dev/null @@ -1,34 +0,0 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE - -package org.bytedeco.pytorch; - -import org.bytedeco.pytorch.Allocator; -import org.bytedeco.pytorch.Function; -import org.bytedeco.pytorch.Module; -import java.nio.*; -import org.bytedeco.javacpp.*; -import org.bytedeco.javacpp.annotation.*; - -import static org.bytedeco.javacpp.presets.javacpp.*; -import static org.bytedeco.openblas.global.openblas_nolapack.*; -import static org.bytedeco.openblas.global.openblas.*; - -import static org.bytedeco.pytorch.global.torch.*; - - -/** A {@code ModuleHolder} subclass for {@code GLUImpl}. - * See the documentation for {@code GLUImpl} class to learn what methods it - * provides, and examples of how to use {@code GLU} with {@code torch::nn::GLUOptions}. - * See the documentation for {@code ModuleHolder} to learn about PyTorch's - * module storage semantics. */ -@Namespace("torch::nn") @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) -public class GLU extends GLUImplModuleHolder { - static { Loader.load(); } - - public GLU(@ByVal @Cast("std::nullptr_t*") PointerPointer arg0) { super((Pointer)null); allocate(arg0); } - private native void allocate(@ByVal @Cast("std::nullptr_t*") PointerPointer arg0); public GLU(@SharedPtr @Cast({"", "std::shared_ptr"}) GLUImpl module) { super((Pointer)null); allocate(module); } - private native void allocate(@SharedPtr @Cast({"", "std::shared_ptr"}) GLUImpl module); - /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ - public GLU(Pointer p) { super(p); } - - } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/GLUImpl.java b/pytorch/src/gen/java/org/bytedeco/pytorch/GLUImpl.java index 002345d8672..4f49d330f6e 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/GLUImpl.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/GLUImpl.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; @@ -45,9 +47,9 @@ public class GLUImpl extends GLUImplCloneable { } public GLUImpl(@Const @ByRef(nullValue = "torch::nn::GLUOptions{}") GLUOptions options_) { super((Pointer)null); allocate(options_); } - @NoDeallocator private native void allocate(@Const @ByRef(nullValue = "torch::nn::GLUOptions{}") GLUOptions options_); + @SharedPtr private native void allocate(@Const @ByRef(nullValue = "torch::nn::GLUOptions{}") GLUOptions options_); public GLUImpl() { super((Pointer)null); allocate(); } - @NoDeallocator private native void allocate(); + @SharedPtr private native void allocate(); public native @ByVal Tensor forward(@Const @ByRef Tensor input); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/GLUImplCloneable.java b/pytorch/src/gen/java/org/bytedeco/pytorch/GLUImplCloneable.java index 66c4ee1a914..07edf9dc8ca 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/GLUImplCloneable.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/GLUImplCloneable.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; @@ -20,18 +22,18 @@ public class GLUImplCloneable extends Module { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public GLUImplCloneable(Pointer p) { super(p); } + @Override public Module asModule() { return asModule(this); } + @Namespace public static native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast>") Module asModule(@SharedPtr GLUImplCloneable pointer); /** {@code reset()} must perform initialization of all members with reference * semantics, most importantly parameters, buffers and submodules. */ public native void reset(); - @Override public Module asModule() { return asModule(this); } - @Namespace public static native @Name("static_cast") Module asModule(GLUImplCloneable module); /** Performs a recursive "deep copy" of the {@code Module}, such that all parameters * and submodules in the cloned module are different from those in the * original module. */ - public native @SharedPtr Module clone( - @Const @ByRef(nullValue = "c10::optional(c10::nullopt)") DeviceOptional device); - public native @SharedPtr Module clone(); + public native @SharedPtr("torch::nn::Module") @ByVal Module clone( + @Const @ByRef(nullValue = "c10::optional(c10::nullopt)") DeviceOptional device); + public native @SharedPtr("torch::nn::Module") @ByVal Module clone(); } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/GLUImplModuleHolder.java b/pytorch/src/gen/java/org/bytedeco/pytorch/GLUImplModuleHolder.java deleted file mode 100644 index 73d0487bfe1..00000000000 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/GLUImplModuleHolder.java +++ /dev/null @@ -1,79 +0,0 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE - -package org.bytedeco.pytorch; - -import org.bytedeco.pytorch.Allocator; -import org.bytedeco.pytorch.Function; -import org.bytedeco.pytorch.Module; -import java.nio.*; -import org.bytedeco.javacpp.*; -import org.bytedeco.javacpp.annotation.*; - -import static org.bytedeco.javacpp.presets.javacpp.*; -import static org.bytedeco.openblas.global.openblas_nolapack.*; -import static org.bytedeco.openblas.global.openblas.*; - -import static org.bytedeco.pytorch.global.torch.*; - -@Name("torch::nn::ModuleHolder") @NoOffset @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) -public class GLUImplModuleHolder extends Pointer { - static { Loader.load(); } - /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ - public GLUImplModuleHolder(Pointer p) { super(p); } - - - /// - - /** Default constructs the contained module if if has a default constructor, - * else produces a static error. - * - * NOTE: This uses the behavior of template - * classes in C++ that constructors (or any methods) are only compiled when - * actually used. */ - - - /** Constructs the {@code ModuleHolder} with an empty contained value. Access to - * the underlying module is not permitted and will throw an exception, until - * a value is assigned. */ - /* implicit */ public GLUImplModuleHolder(@ByVal @Cast("std::nullptr_t*") PointerPointer arg0) { super((Pointer)null); allocate(arg0); } -private native void allocate(@ByVal @Cast("std::nullptr_t*") PointerPointer arg0); - - /** Constructs the {@code ModuleHolder} with a contained module, forwarding all - * arguments to its constructor. */ - - /** Constructs the {@code ModuleHolder} from a pointer to the contained type. - * Example: {@code Linear(std::make_shared(...))}. */ - /* implicit */ public GLUImplModuleHolder(@SharedPtr @Cast({"", "std::shared_ptr"}) GLUImpl module) { super((Pointer)null); allocate(module); } -private native void allocate(@SharedPtr @Cast({"", "std::shared_ptr"}) GLUImpl module); - - /** Returns true if the {@code ModuleHolder} contains a module, or false if it is - * {@code nullptr}. */ - public native @Cast("bool") @Name("operator bool") @NoException(true) boolean asBoolean(); - - /** Forwards to the contained module. */ - public native @Name("operator ->") GLUImpl access(); - - /** Forwards to the contained module. */ - - /** Returns a reference to the contained module. */ - public native @ByRef @Name("operator *") GLUImpl multiply(); - - /** Returns a const reference to the contained module. */ - - /** Returns a shared pointer to the underlying module. */ - public native @SharedPtr @Cast({"", "std::shared_ptr"}) GLUImpl ptr(); - - /** Returns a pointer to the underlying module. */ - public native GLUImpl get(); - - /** Returns a const pointer to the underlying module. */ - - /** Calls the {@code forward()} method of the contained module. */ - - /** Forwards to the subscript operator of the contained module. - * NOTE: std::forward is qualified to prevent VS2017 emitting - * error C2872: 'std': ambiguous symbol */ - - /** Returns true if the {@code ModuleHolder} does not contain a module. */ - public native @Cast("bool") @NoException(true) boolean is_empty(); -} diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/GLUOptions.java b/pytorch/src/gen/java/org/bytedeco/pytorch/GLUOptions.java index df18bdbdbc4..1263dd09fcf 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/GLUOptions.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/GLUOptions.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/GRU.java b/pytorch/src/gen/java/org/bytedeco/pytorch/GRU.java deleted file mode 100644 index 17014e5fb97..00000000000 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/GRU.java +++ /dev/null @@ -1,34 +0,0 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE - -package org.bytedeco.pytorch; - -import org.bytedeco.pytorch.Allocator; -import org.bytedeco.pytorch.Function; -import org.bytedeco.pytorch.Module; -import java.nio.*; -import org.bytedeco.javacpp.*; -import org.bytedeco.javacpp.annotation.*; - -import static org.bytedeco.javacpp.presets.javacpp.*; -import static org.bytedeco.openblas.global.openblas_nolapack.*; -import static org.bytedeco.openblas.global.openblas.*; - -import static org.bytedeco.pytorch.global.torch.*; - - -/** A {@code ModuleHolder} subclass for {@code GRUImpl}. - * See the documentation for {@code GRUImpl} class to learn what methods it - * provides, and examples of how to use {@code GRU} with {@code torch::nn::GRUOptions}. - * See the documentation for {@code ModuleHolder} to learn about PyTorch's - * module storage semantics. */ -@Namespace("torch::nn") @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) -public class GRU extends GRUImplModuleHolder { - static { Loader.load(); } - - public GRU(@ByVal @Cast("std::nullptr_t*") PointerPointer arg0) { super((Pointer)null); allocate(arg0); } - private native void allocate(@ByVal @Cast("std::nullptr_t*") PointerPointer arg0); public GRU(@SharedPtr @Cast({"", "std::shared_ptr"}) GRUImpl module) { super((Pointer)null); allocate(module); } - private native void allocate(@SharedPtr @Cast({"", "std::shared_ptr"}) GRUImpl module); - /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ - public GRU(Pointer p) { super(p); } - - } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/GRUCell.java b/pytorch/src/gen/java/org/bytedeco/pytorch/GRUCell.java deleted file mode 100644 index 895b0a67514..00000000000 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/GRUCell.java +++ /dev/null @@ -1,34 +0,0 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE - -package org.bytedeco.pytorch; - -import org.bytedeco.pytorch.Allocator; -import org.bytedeco.pytorch.Function; -import org.bytedeco.pytorch.Module; -import java.nio.*; -import org.bytedeco.javacpp.*; -import org.bytedeco.javacpp.annotation.*; - -import static org.bytedeco.javacpp.presets.javacpp.*; -import static org.bytedeco.openblas.global.openblas_nolapack.*; -import static org.bytedeco.openblas.global.openblas.*; - -import static org.bytedeco.pytorch.global.torch.*; - - -/** A {@code ModuleHolder} subclass for {@code GRUCellImpl}. - * See the documentation for {@code GRUCellImpl} class to learn what methods it - * provides, and examples of how to use {@code GRUCell} with - * {@code torch::nn::GRUCellOptions}. See the documentation for {@code ModuleHolder} to - * learn about PyTorch's module storage semantics. */ -@Namespace("torch::nn") @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) -public class GRUCell extends GRUCellImplModuleHolder { - static { Loader.load(); } - - public GRUCell(@ByVal @Cast("std::nullptr_t*") PointerPointer arg0) { super((Pointer)null); allocate(arg0); } - private native void allocate(@ByVal @Cast("std::nullptr_t*") PointerPointer arg0); public GRUCell(@SharedPtr @Cast({"", "std::shared_ptr"}) GRUCellImpl module) { super((Pointer)null); allocate(module); } - private native void allocate(@SharedPtr @Cast({"", "std::shared_ptr"}) GRUCellImpl module); - /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ - public GRUCell(Pointer p) { super(p); } - - } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/GRUCellImpl.java b/pytorch/src/gen/java/org/bytedeco/pytorch/GRUCellImpl.java index 275f25eda48..b79e6521857 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/GRUCellImpl.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/GRUCellImpl.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; @@ -37,11 +39,11 @@ public class GRUCellImpl extends GRUCellImplBase { public GRUCellImpl(Pointer p) { super(p); } public GRUCellImpl(@Cast("int64_t") long input_size, @Cast("int64_t") long hidden_size) { super((Pointer)null); allocate(input_size, hidden_size); } - @NoDeallocator private native void allocate(@Cast("int64_t") long input_size, @Cast("int64_t") long hidden_size); + @SharedPtr private native void allocate(@Cast("int64_t") long input_size, @Cast("int64_t") long hidden_size); public GRUCellImpl(@Const @ByRef GRUCellOptions options_) { super((Pointer)null); allocate(options_); } - @NoDeallocator private native void allocate(@Const @ByRef GRUCellOptions options_); + @SharedPtr private native void allocate(@Const @ByRef GRUCellOptions options_); - public native @ByVal Tensor forward(@Const @ByRef Tensor input, @ByVal(nullValue = "at::Tensor{}") Tensor hx); + public native @ByVal Tensor forward(@Const @ByRef Tensor input, @ByVal(nullValue = "torch::Tensor{}") Tensor hx); public native @ByVal Tensor forward(@Const @ByRef Tensor input); public native @ByRef GRUCellOptions options(); public native GRUCellImpl options(GRUCellOptions setter); } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/GRUCellImplBase.java b/pytorch/src/gen/java/org/bytedeco/pytorch/GRUCellImplBase.java index 54b1f935d94..2a094ae44bf 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/GRUCellImplBase.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/GRUCellImplBase.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; @@ -22,7 +24,7 @@ public class GRUCellImplBase extends GRUCellImplCloneable { public GRUCellImplBase(Pointer p) { super(p); } public GRUCellImplBase(@Const @ByRef RNNCellOptionsBase options_) { super((Pointer)null); allocate(options_); } - @NoDeallocator private native void allocate(@Const @ByRef RNNCellOptionsBase options_); + private native void allocate(@Const @ByRef RNNCellOptionsBase options_); /** Initializes the parameters of the RNNCell module. */ public native void reset(); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/GRUCellImplCloneable.java b/pytorch/src/gen/java/org/bytedeco/pytorch/GRUCellImplCloneable.java index 83115fc839f..eef3d754108 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/GRUCellImplCloneable.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/GRUCellImplCloneable.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; @@ -20,18 +22,18 @@ public class GRUCellImplCloneable extends Module { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public GRUCellImplCloneable(Pointer p) { super(p); } + @Override public Module asModule() { return asModule(this); } + @Namespace public static native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast>") Module asModule(@SharedPtr GRUCellImplCloneable pointer); /** {@code reset()} must perform initialization of all members with reference * semantics, most importantly parameters, buffers and submodules. */ public native void reset(); - @Override public Module asModule() { return asModule(this); } - @Namespace public static native @Name("static_cast") Module asModule(GRUCellImplCloneable module); /** Performs a recursive "deep copy" of the {@code Module}, such that all parameters * and submodules in the cloned module are different from those in the * original module. */ - public native @SharedPtr Module clone( - @Const @ByRef(nullValue = "c10::optional(c10::nullopt)") DeviceOptional device); - public native @SharedPtr Module clone(); + public native @SharedPtr("torch::nn::Module") @ByVal Module clone( + @Const @ByRef(nullValue = "c10::optional(c10::nullopt)") DeviceOptional device); + public native @SharedPtr("torch::nn::Module") @ByVal Module clone(); } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/GRUCellImplModuleHolder.java b/pytorch/src/gen/java/org/bytedeco/pytorch/GRUCellImplModuleHolder.java deleted file mode 100644 index 3795a34d9b2..00000000000 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/GRUCellImplModuleHolder.java +++ /dev/null @@ -1,79 +0,0 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE - -package org.bytedeco.pytorch; - -import org.bytedeco.pytorch.Allocator; -import org.bytedeco.pytorch.Function; -import org.bytedeco.pytorch.Module; -import java.nio.*; -import org.bytedeco.javacpp.*; -import org.bytedeco.javacpp.annotation.*; - -import static org.bytedeco.javacpp.presets.javacpp.*; -import static org.bytedeco.openblas.global.openblas_nolapack.*; -import static org.bytedeco.openblas.global.openblas.*; - -import static org.bytedeco.pytorch.global.torch.*; - -@Name("torch::nn::ModuleHolder") @NoOffset @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) -public class GRUCellImplModuleHolder extends Pointer { - static { Loader.load(); } - /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ - public GRUCellImplModuleHolder(Pointer p) { super(p); } - - - /// - - /** Default constructs the contained module if if has a default constructor, - * else produces a static error. - * - * NOTE: This uses the behavior of template - * classes in C++ that constructors (or any methods) are only compiled when - * actually used. */ - - - /** Constructs the {@code ModuleHolder} with an empty contained value. Access to - * the underlying module is not permitted and will throw an exception, until - * a value is assigned. */ - /* implicit */ public GRUCellImplModuleHolder(@ByVal @Cast("std::nullptr_t*") PointerPointer arg0) { super((Pointer)null); allocate(arg0); } -private native void allocate(@ByVal @Cast("std::nullptr_t*") PointerPointer arg0); - - /** Constructs the {@code ModuleHolder} with a contained module, forwarding all - * arguments to its constructor. */ - - /** Constructs the {@code ModuleHolder} from a pointer to the contained type. - * Example: {@code Linear(std::make_shared(...))}. */ - /* implicit */ public GRUCellImplModuleHolder(@SharedPtr @Cast({"", "std::shared_ptr"}) GRUCellImpl module) { super((Pointer)null); allocate(module); } -private native void allocate(@SharedPtr @Cast({"", "std::shared_ptr"}) GRUCellImpl module); - - /** Returns true if the {@code ModuleHolder} contains a module, or false if it is - * {@code nullptr}. */ - public native @Cast("bool") @Name("operator bool") @NoException(true) boolean asBoolean(); - - /** Forwards to the contained module. */ - public native @Name("operator ->") GRUCellImpl access(); - - /** Forwards to the contained module. */ - - /** Returns a reference to the contained module. */ - public native @ByRef @Name("operator *") GRUCellImpl multiply(); - - /** Returns a const reference to the contained module. */ - - /** Returns a shared pointer to the underlying module. */ - public native @SharedPtr @Cast({"", "std::shared_ptr"}) GRUCellImpl ptr(); - - /** Returns a pointer to the underlying module. */ - public native GRUCellImpl get(); - - /** Returns a const pointer to the underlying module. */ - - /** Calls the {@code forward()} method of the contained module. */ - - /** Forwards to the subscript operator of the contained module. - * NOTE: std::forward is qualified to prevent VS2017 emitting - * error C2872: 'std': ambiguous symbol */ - - /** Returns true if the {@code ModuleHolder} does not contain a module. */ - public native @Cast("bool") @NoException(true) boolean is_empty(); -} diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/GRUCellOptions.java b/pytorch/src/gen/java/org/bytedeco/pytorch/GRUCellOptions.java index 76a677f6bfd..a26f807a6ad 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/GRUCellOptions.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/GRUCellOptions.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/GRUImpl.java b/pytorch/src/gen/java/org/bytedeco/pytorch/GRUImpl.java index a5e7aab4664..37696eff60e 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/GRUImpl.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/GRUImpl.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; @@ -37,16 +39,16 @@ public class GRUImpl extends GRUImplBase { public GRUImpl(Pointer p) { super(p); } public GRUImpl(@Cast("int64_t") long input_size, @Cast("int64_t") long hidden_size) { super((Pointer)null); allocate(input_size, hidden_size); } - @NoDeallocator private native void allocate(@Cast("int64_t") long input_size, @Cast("int64_t") long hidden_size); + @SharedPtr private native void allocate(@Cast("int64_t") long input_size, @Cast("int64_t") long hidden_size); public GRUImpl(@Const @ByRef GRUOptions options_) { super((Pointer)null); allocate(options_); } - @NoDeallocator private native void allocate(@Const @ByRef GRUOptions options_); + @SharedPtr private native void allocate(@Const @ByRef GRUOptions options_); - public native @ByVal TensorTensorTuple forward(@Const @ByRef Tensor input, @ByVal(nullValue = "at::Tensor{}") Tensor hx); - public native @ByVal TensorTensorTuple forward(@Const @ByRef Tensor input); - public native @ByVal PackedSequenceTensorTuple forward_with_packed_input( + public native @ByVal T_TensorTensor_T forward(@Const @ByRef Tensor input, @ByVal(nullValue = "torch::Tensor{}") Tensor hx); + public native @ByVal T_TensorTensor_T forward(@Const @ByRef Tensor input); + public native @ByVal T_PackedSequenceTensor_T forward_with_packed_input( @Const @ByRef PackedSequence packed_input, - @ByVal(nullValue = "at::Tensor{}") Tensor hx); - public native @ByVal PackedSequenceTensorTuple forward_with_packed_input( + @ByVal(nullValue = "torch::Tensor{}") Tensor hx); + public native @ByVal T_PackedSequenceTensor_T forward_with_packed_input( @Const @ByRef PackedSequence packed_input); public native @ByRef GRUOptions options(); public native GRUImpl options(GRUOptions setter); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/GRUImplBase.java b/pytorch/src/gen/java/org/bytedeco/pytorch/GRUImplBase.java index 94522ec6349..d5b5dfe42ec 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/GRUImplBase.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/GRUImplBase.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; @@ -22,7 +24,7 @@ public class GRUImplBase extends GRUImplCloneable { public GRUImplBase(Pointer p) { super(p); } public GRUImplBase(@Const @ByRef RNNOptionsBase options_) { super((Pointer)null); allocate(options_); } - @NoDeallocator private native void allocate(@Const @ByRef RNNOptionsBase options_); + private native void allocate(@Const @ByRef RNNOptionsBase options_); /** Initializes the parameters of the RNN module. */ public native void reset(); @@ -53,7 +55,7 @@ public class GRUImplBase extends GRUImplCloneable { * called once upon construction, inside {@code reset()}. */ public native void flatten_parameters(); - public native @Cast({"", "std::vector"}) @StdMove TensorVector all_weights(); + public native @Cast({"", "std::vector"}) @StdMove TensorVector all_weights(); /** The RNN's options. */ // NOLINTNEXTLINE(cppcoreguidelines-non-private-member-variables-in-classes) diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/GRUImplCloneable.java b/pytorch/src/gen/java/org/bytedeco/pytorch/GRUImplCloneable.java index d83b4afa985..045548107cd 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/GRUImplCloneable.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/GRUImplCloneable.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; @@ -20,18 +22,18 @@ public class GRUImplCloneable extends Module { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public GRUImplCloneable(Pointer p) { super(p); } + @Override public Module asModule() { return asModule(this); } + @Namespace public static native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast>") Module asModule(@SharedPtr GRUImplCloneable pointer); /** {@code reset()} must perform initialization of all members with reference * semantics, most importantly parameters, buffers and submodules. */ public native void reset(); - @Override public Module asModule() { return asModule(this); } - @Namespace public static native @Name("static_cast") Module asModule(GRUImplCloneable module); /** Performs a recursive "deep copy" of the {@code Module}, such that all parameters * and submodules in the cloned module are different from those in the * original module. */ - public native @SharedPtr Module clone( - @Const @ByRef(nullValue = "c10::optional(c10::nullopt)") DeviceOptional device); - public native @SharedPtr Module clone(); + public native @SharedPtr("torch::nn::Module") @ByVal Module clone( + @Const @ByRef(nullValue = "c10::optional(c10::nullopt)") DeviceOptional device); + public native @SharedPtr("torch::nn::Module") @ByVal Module clone(); } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/GRUImplModuleHolder.java b/pytorch/src/gen/java/org/bytedeco/pytorch/GRUImplModuleHolder.java deleted file mode 100644 index 0b8832d3243..00000000000 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/GRUImplModuleHolder.java +++ /dev/null @@ -1,79 +0,0 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE - -package org.bytedeco.pytorch; - -import org.bytedeco.pytorch.Allocator; -import org.bytedeco.pytorch.Function; -import org.bytedeco.pytorch.Module; -import java.nio.*; -import org.bytedeco.javacpp.*; -import org.bytedeco.javacpp.annotation.*; - -import static org.bytedeco.javacpp.presets.javacpp.*; -import static org.bytedeco.openblas.global.openblas_nolapack.*; -import static org.bytedeco.openblas.global.openblas.*; - -import static org.bytedeco.pytorch.global.torch.*; - -@Name("torch::nn::ModuleHolder") @NoOffset @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) -public class GRUImplModuleHolder extends Pointer { - static { Loader.load(); } - /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ - public GRUImplModuleHolder(Pointer p) { super(p); } - - - /// - - /** Default constructs the contained module if if has a default constructor, - * else produces a static error. - * - * NOTE: This uses the behavior of template - * classes in C++ that constructors (or any methods) are only compiled when - * actually used. */ - - - /** Constructs the {@code ModuleHolder} with an empty contained value. Access to - * the underlying module is not permitted and will throw an exception, until - * a value is assigned. */ - /* implicit */ public GRUImplModuleHolder(@ByVal @Cast("std::nullptr_t*") PointerPointer arg0) { super((Pointer)null); allocate(arg0); } -private native void allocate(@ByVal @Cast("std::nullptr_t*") PointerPointer arg0); - - /** Constructs the {@code ModuleHolder} with a contained module, forwarding all - * arguments to its constructor. */ - - /** Constructs the {@code ModuleHolder} from a pointer to the contained type. - * Example: {@code Linear(std::make_shared(...))}. */ - /* implicit */ public GRUImplModuleHolder(@SharedPtr @Cast({"", "std::shared_ptr"}) GRUImpl module) { super((Pointer)null); allocate(module); } -private native void allocate(@SharedPtr @Cast({"", "std::shared_ptr"}) GRUImpl module); - - /** Returns true if the {@code ModuleHolder} contains a module, or false if it is - * {@code nullptr}. */ - public native @Cast("bool") @Name("operator bool") @NoException(true) boolean asBoolean(); - - /** Forwards to the contained module. */ - public native @Name("operator ->") GRUImpl access(); - - /** Forwards to the contained module. */ - - /** Returns a reference to the contained module. */ - public native @ByRef @Name("operator *") GRUImpl multiply(); - - /** Returns a const reference to the contained module. */ - - /** Returns a shared pointer to the underlying module. */ - public native @SharedPtr @Cast({"", "std::shared_ptr"}) GRUImpl ptr(); - - /** Returns a pointer to the underlying module. */ - public native GRUImpl get(); - - /** Returns a const pointer to the underlying module. */ - - /** Calls the {@code forward()} method of the contained module. */ - - /** Forwards to the subscript operator of the contained module. - * NOTE: std::forward is qualified to prevent VS2017 emitting - * error C2872: 'std': ambiguous symbol */ - - /** Returns true if the {@code ModuleHolder} does not contain a module. */ - public native @Cast("bool") @NoException(true) boolean is_empty(); -} diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/GRUOptions.java b/pytorch/src/gen/java/org/bytedeco/pytorch/GRUOptions.java index cf7c937ed63..ca49e417c6c 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/GRUOptions.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/GRUOptions.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/Generator.java b/pytorch/src/gen/java/org/bytedeco/pytorch/Generator.java index 44e241a7cd9..8dd29e67335 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/Generator.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/Generator.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; @@ -49,7 +51,7 @@ * forks into other threads). */ -@Namespace("at") @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) +@Namespace("at") @NoOffset @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) public class Generator extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ @@ -67,15 +69,20 @@ public class Generator extends Pointer { public Generator() { super((Pointer)null); allocate(); } private native void allocate(); + public Generator(@ByVal GeneratorImplPtr gen_impl) { super((Pointer)null); allocate(gen_impl); } + private native void allocate(@ByVal GeneratorImplPtr gen_impl); + public native @Cast("bool") @Name("operator ==") boolean equals(@Const @ByRef Generator rhs); public native @Cast("bool") @Name("operator !=") boolean notEquals(@Const @ByRef Generator rhs); public native @Cast("bool") boolean defined(); - public native @Cast("c10::GeneratorImpl*") Pointer unsafeGetGeneratorImpl(); + public native GeneratorImpl unsafeGetGeneratorImpl(); + + public native GeneratorImpl unsafeReleaseGeneratorImpl(); - public native @Cast("c10::GeneratorImpl*") Pointer unsafeReleaseGeneratorImpl(); + public native @Const @ByRef GeneratorImplPtr getIntrusivePtr(); public native void set_current_seed(@Cast("uint64_t") long seed); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/GeneratorImpl.java b/pytorch/src/gen/java/org/bytedeco/pytorch/GeneratorImpl.java new file mode 100644 index 00000000000..ad4d1087ba7 --- /dev/null +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/GeneratorImpl.java @@ -0,0 +1,51 @@ +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE + +package org.bytedeco.pytorch; + +import org.bytedeco.pytorch.Allocator; +import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; +import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; +import java.nio.*; +import org.bytedeco.javacpp.*; +import org.bytedeco.javacpp.annotation.*; + +import static org.bytedeco.javacpp.presets.javacpp.*; +import static org.bytedeco.openblas.global.openblas_nolapack.*; +import static org.bytedeco.openblas.global.openblas.*; + +import static org.bytedeco.pytorch.global.torch.*; + + +@Namespace("c10") @NoOffset @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) +public class GeneratorImpl extends Pointer { + static { Loader.load(); } + /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ + public GeneratorImpl(Pointer p) { super(p); } + + // Constructors + + // Delete all copy and move assignment in favor of clone() + // method + + + + public native @ByVal @Name("clone") GeneratorImplPtr clonePtr(); + + // Common methods for all generators + public native void set_current_seed(@Cast("uint64_t") long seed); + public native @Cast("uint64_t") long current_seed(); + public native @Cast("uint64_t") long seed(); + public native void set_state(@Const @ByRef TensorImpl new_state); + public native @ByVal TensorImplPtr get_state(); + public native @ByVal Device device(); + + // See Note [Acquire lock when using random generators] + + public native @ByVal DispatchKeySet key_set(); + + public native @NoException(true) void set_pyobj(@Cast("PyObject*") Pointer pyobj); + + public native @Cast("PyObject*") @NoException(true) Pointer pyobj(); +} diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/GeneratorImplPtr.java b/pytorch/src/gen/java/org/bytedeco/pytorch/GeneratorImplPtr.java new file mode 100644 index 00000000000..8f0491c3fd4 --- /dev/null +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/GeneratorImplPtr.java @@ -0,0 +1,150 @@ +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE + +package org.bytedeco.pytorch; + +import org.bytedeco.pytorch.Allocator; +import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; +import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; +import java.nio.*; +import org.bytedeco.javacpp.*; +import org.bytedeco.javacpp.annotation.*; + +import static org.bytedeco.javacpp.presets.javacpp.*; +import static org.bytedeco.openblas.global.openblas_nolapack.*; +import static org.bytedeco.openblas.global.openblas.*; + +import static org.bytedeco.pytorch.global.torch.*; + + +@Name("c10::intrusive_ptr") @NoOffset @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) +public class GeneratorImplPtr extends Pointer { + static { Loader.load(); } + /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ + public GeneratorImplPtr(Pointer p) { super(p); } + /** Native array allocator. Access with {@link Pointer#position(long)}. */ + public GeneratorImplPtr(long size) { super((Pointer)null); allocateArray(size); } + private native void allocateArray(long size); + @Override public GeneratorImplPtr position(long position) { + return (GeneratorImplPtr)super.position(position); + } + @Override public GeneratorImplPtr getPointer(long i) { + return new GeneratorImplPtr((Pointer)this).offsetAddress(i); + } + + + public GeneratorImplPtr() { super((Pointer)null); allocate(); } + @NoException(true) private native void allocate(); + + public GeneratorImplPtr(@ByVal @Cast("std::nullptr_t*") PointerPointer arg0) { super((Pointer)null); allocate(arg0); } + @NoException(true) private native void allocate(@ByVal @Cast("std::nullptr_t*") PointerPointer arg0); + + // This constructor will not increase the ref counter for you. + // We use the tagged dispatch mechanism to explicitly mark this constructor + // to not increase the refcount + public GeneratorImplPtr(GeneratorImpl target, @ByVal DontIncreaseRefcount arg1) { super((Pointer)null); allocate(target, arg1); } + @NoException(true) private native void allocate(GeneratorImpl target, @ByVal DontIncreaseRefcount arg1); + + + + public GeneratorImplPtr(@ByRef(true) GeneratorImplPtr rhs) { super((Pointer)null); allocate(rhs); } + @NoException(true) private native void allocate(@ByRef(true) GeneratorImplPtr rhs); + + public native @ByRef @Name("operator =") @NoException(true) GeneratorImplPtr put(@ByRef(true) GeneratorImplPtr rhs); + + public native @NoException(true) GeneratorImpl get(); + + public native @ByRef @Name("operator *") @NoException(true) GeneratorImpl multiply(); + + public native @Name("operator ->") @NoException(true) GeneratorImpl access(); + + public native @Cast("bool") @Name("operator bool") @NoException(true) boolean asBoolean(); + + public native @NoException(true) void reset(); + + public native @NoException(true) void swap(@ByRef GeneratorImplPtr rhs); + + // We do a lot of null-pointer checks in our code, good to have this be cheap. + public native @Cast("bool") @NoException(true) boolean defined(); + + public native @Cast("size_t") @NoException(true) long use_count(); + + public native @Cast("size_t") @NoException(true) long weak_use_count(); + + public native @Cast("bool") @NoException(true) boolean unique(); + + /** + * Returns an owning (!) pointer to the underlying object and makes the + * intrusive_ptr instance invalid. That means the refcount is not decreased. + * You *must* put the returned pointer back into a intrusive_ptr using + * intrusive_ptr::reclaim(ptr) to properly destruct it. + * This is helpful for C APIs. + */ + public native @NoException(true) GeneratorImpl release(); + + /** + * Takes an owning pointer to TTarget* and creates an intrusive_ptr that takes + * over ownership. That means the refcount is not increased. + * This is the counter-part to intrusive_ptr::release() and the pointer + * passed in *must* have been created using intrusive_ptr::release(). + */ + public static native @ByVal GeneratorImplPtr reclaim(GeneratorImpl owning_ptr); + + /** + * Takes an owning pointer to TTarget* and creates an intrusive_ptr + * representing a new reference, i.e. the raw pointer retains + * ownership. + */ + public static native @ByVal GeneratorImplPtr reclaim_copy(GeneratorImpl owning_ptr); + + /** + * Allocate a heap object with args and wrap it inside a intrusive_ptr and + * incref. This is a helper function to let make_intrusive() access private + * intrusive_ptr constructors. + */ + + /** + * Turn a new instance of TTarget (e.g., literally allocated + * using new TTarget(...) into an intrusive_ptr. If possible, + * use intrusive_ptr::make instead which statically guarantees + * that the allocation was done properly. + * + * At the moment, the only reason this method exists is because + * pybind11 holder types expect to be able to allocate in + * this way (because pybind11 handles the new allocation itself). + */ + public static native @ByVal GeneratorImplPtr unsafe_steal_from_new(GeneratorImpl raw_ptr); + + /** + * Turn an instance of TTarget that should not be reference counted + * (e.g., allocated into an arena with placement new) into an + * intrusive_ptr. This is gratuitously unsafe and should only be + * used if you can guarantee that the pointer will not escape and be + * refcounted as normal. + * + * {@code expected_decrefs} is a debugging parameter: it indicates the + * number of strong owners the intrusive_ptr_target in question is + * expected to get. In most use cases, this will likely be 1. + * + * The reason this method exists is for manually sharing + * StorageImpls across Tensors in the static runtime. It needs + * access to private intrusive_ptr members so that the refcounts can + * be initialized to custom values. + */ + public static native @ByVal GeneratorImplPtr unsafe_adapt_non_heap_allocated( + GeneratorImpl raw_ptr, + @Cast("size_t") long expected_decrefs); + + /** + * Turn a **non-owning raw pointer** to an intrusive_ptr. It is + * the moral equivalent of enable_shared_from_this on a shared pointer. + * + * This method is only valid for objects that are already live. If + * you are looking for the moral equivalent of unique_ptr(T*) + * constructor, see steal_from_new. + * + * TODO: https://github.com/pytorch/pytorch/issues/56482 + */ + public static native @ByVal GeneratorImplPtr unsafe_reclaim_from_nonowning(GeneratorImpl raw_ptr); +} diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/GeneratorOptional.java b/pytorch/src/gen/java/org/bytedeco/pytorch/GeneratorOptional.java index 29a49f5486d..7cc486c05e0 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/GeneratorOptional.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/GeneratorOptional.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; @@ -26,6 +28,7 @@ public class GeneratorOptional extends Pointer { public native @Name("operator =") @ByRef GeneratorOptional put(@ByRef GeneratorOptional x); public native boolean has_value(); + public native void reset(); public native @Name("value") @ByRef Generator get(); @ValueSetter public native GeneratorOptional put(@ByRef Generator value); } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/GeneratorType.java b/pytorch/src/gen/java/org/bytedeco/pytorch/GeneratorType.java index e949be18536..33f736325cf 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/GeneratorType.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/GeneratorType.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/GeneratorTypePtr.java b/pytorch/src/gen/java/org/bytedeco/pytorch/GeneratorTypePtr.java index b09a69c6337..1cdbaf5a583 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/GeneratorTypePtr.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/GeneratorTypePtr.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/GenericDict.java b/pytorch/src/gen/java/org/bytedeco/pytorch/GenericDict.java index 1033a90f60b..4ff27f2537e 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/GenericDict.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/GenericDict.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; @@ -15,7 +17,24 @@ import static org.bytedeco.pytorch.global.torch.*; -@Name("c10::Dict") @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) + +/** + * An object of this class stores a map from Key to Value. + * + * This is a pointer type. After a copy, both Dicts + * will share the same storage: + * + * > Dict a; + * > Dict b = a; + * > b.insert(3, "three"); + * > ASSERT("three" == a.at(3)); + * + * We use this class in the PyTorch kernel API because that + * allows us to do optimizations and switch out the underlying + * map implementation without breaking backwards compatibility + * for the kernel API. + */ +@Name("c10::Dict") @NoOffset @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) public class GenericDict extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/GenericDictEntryRef.java b/pytorch/src/gen/java/org/bytedeco/pytorch/GenericDictEntryRef.java index d5a88719312..f48b005d330 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/GenericDictEntryRef.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/GenericDictEntryRef.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/GenericDictIterator.java b/pytorch/src/gen/java/org/bytedeco/pytorch/GenericDictIterator.java index ab1890ac0c7..ba9d833d791 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/GenericDictIterator.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/GenericDictIterator.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; @@ -36,4 +38,10 @@ public class GenericDictIterator extends Pointer { public native @Const @Name("operator ->") GenericDictEntryRef access(); + + private static native @Namespace @Cast("bool") @Name("operator ==") boolean equals(@Const @ByRef GenericDictIterator lhs, @Const @ByRef GenericDictIterator rhs); + public boolean equals(GenericDictIterator rhs) { return equals(this, rhs); } + + private static native @Namespace @Cast("bool") @Name("operator !=") boolean notEquals(@Const @ByRef GenericDictIterator lhs, @Const @ByRef GenericDictIterator rhs); + public boolean notEquals(GenericDictIterator rhs) { return notEquals(this, rhs); } } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/GenericElementReference.java b/pytorch/src/gen/java/org/bytedeco/pytorch/GenericElementReference.java new file mode 100644 index 00000000000..93d8bda6401 --- /dev/null +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/GenericElementReference.java @@ -0,0 +1,42 @@ +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE + +package org.bytedeco.pytorch; + +import org.bytedeco.pytorch.Allocator; +import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; +import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; +import java.nio.*; +import org.bytedeco.javacpp.*; +import org.bytedeco.javacpp.annotation.*; + +import static org.bytedeco.javacpp.presets.javacpp.*; +import static org.bytedeco.openblas.global.openblas_nolapack.*; +import static org.bytedeco.openblas.global.openblas.*; + +import static org.bytedeco.pytorch.global.torch.*; + + +@Name("c10::impl::ListElementReference") @NoOffset @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) +public class GenericElementReference extends Pointer { + static { Loader.load(); } + /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ + public GenericElementReference(Pointer p) { super(p); } + + public native @Name("operator std::conditional_t::type>::value,const c10::IValue&,c10::IValue>") @ByVal IValue getGeneric(); + + + + + + // assigning another ref to this assigns the underlying value + + + public native @Const @ByRef IValue get(); + + + + + +} diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/GenericList.java b/pytorch/src/gen/java/org/bytedeco/pytorch/GenericList.java new file mode 100644 index 00000000000..737620cf54b --- /dev/null +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/GenericList.java @@ -0,0 +1,232 @@ +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE + +package org.bytedeco.pytorch; + +import org.bytedeco.pytorch.Allocator; +import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; +import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; +import java.nio.*; +import org.bytedeco.javacpp.*; +import org.bytedeco.javacpp.annotation.*; + +import static org.bytedeco.javacpp.presets.javacpp.*; +import static org.bytedeco.openblas.global.openblas_nolapack.*; +import static org.bytedeco.openblas.global.openblas.*; + +import static org.bytedeco.pytorch.global.torch.*; + +@Name("c10::List") @NoOffset @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) +public class GenericList extends Pointer { + static { Loader.load(); } + /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ + public GenericList(Pointer p) { super(p); } + + + /** + * Constructs an empty list. + */ + + + /** + * Constructs a list with some initial values. + * Example: + * List a({2, 3, 4}); + */ + + + + /** + * Create a generic list with runtime type information. + * This only works for c10::impl::GenericList and is not part of the public API + * but only supposed to be used internally by PyTorch. + */ + public GenericList(@ByVal Type.TypePtr elementType) { super((Pointer)null); allocate(elementType); } + private native void allocate(@ByVal Type.TypePtr elementType); + + public GenericList(@Const @ByRef GenericList arg0) { super((Pointer)null); allocate(arg0); } + private native void allocate(@Const @ByRef GenericList arg0); + public native @ByRef @Name("operator =") GenericList put(@Const @ByRef GenericList arg0); + + /** + * Create a new List pointing to a deep copy of the same data. + * The List returned is a new list with separate storage. + * Changes in it are not reflected in the original list or vice versa. + */ + public native @ByVal GenericList copy(); + + /** + * Returns the element at specified location pos, with bounds checking. + * If pos is not within the range of the container, an exception of type std::out_of_range is thrown. + */ + public native @ByVal IValue get(long pos); + + /** + * Moves out the element at the specified location pos and returns it, with bounds checking. + * If pos is not within the range of the container, an exception of type std::out_of_range is thrown. + * The list contains an invalid element at position pos afterwards. Any operations + * on it before re-setting it are invalid. + */ + public native @ByVal IValue extract(long pos); + + /** + * Returns a reference to the element at specified location pos, with bounds checking. + * If pos is not within the range of the container, an exception of type std::out_of_range is thrown. + * + * You cannot store the reference, but you can read it and assign new values to it: + * + * List list = ...; + * list[2] = 5; + * int64_t v = list[1]; + */ + + + + + /** + * Assigns a new value to the element at location pos. + */ + public native void set(long pos, @ByVal IValue value); + + /** + * Assigns a new value to the element at location pos. + */ + + /** + * Returns an iterator to the first element of the container. + * If the container is empty, the returned iterator will be equal to end(). + */ + public native @ByVal @Cast("c10::List::iterator*") GenericListIterator begin(); + + /** + * Returns an iterator to the element following the last element of the container. + * This element acts as a placeholder; attempting to access it results in undefined behavior. + */ + public native @ByVal @Cast("c10::List::iterator*") GenericListIterator end(); + + /** + * Checks if the container has no elements. + */ + public native @Cast("bool") boolean empty(); + + /** + * Returns the number of elements in the container + */ + public native long size(); + + /** + * Increase the capacity of the vector to a value that's greater or equal to new_cap. + */ + public native void reserve(long new_cap); + + /** + * Erases all elements from the container. After this call, size() returns zero. + * Invalidates any references, pointers, or iterators referring to contained elements. Any past-the-end iterators are also invalidated. + */ + public native void clear(); + + /** + * Inserts value before pos. + * May invalidate any references, pointers, or iterators referring to contained elements. Any past-the-end iterators may also be invalidated. + */ + public native @ByVal @Cast("c10::List::iterator*") GenericListIterator insert(@ByVal @Cast("c10::List::iterator*") GenericListIterator pos, @Const @ByRef IValue value); + + /** + * Inserts value before pos. + * May invalidate any references, pointers, or iterators referring to contained elements. Any past-the-end iterators may also be invalidated. + */ + + /** + * Inserts a new element into the container directly before pos. + * The new element is constructed with the given arguments. + * May invalidate any references, pointers, or iterators referring to contained elements. Any past-the-end iterators may also be invalidated. + */ + + /** + * Appends the given element value to the end of the container. + * May invalidate any references, pointers, or iterators referring to contained elements. Any past-the-end iterators may also be invalidated. + */ + public native void push_back(@Const @ByRef IValue value); + + /** + * Appends the given element value to the end of the container. + * May invalidate any references, pointers, or iterators referring to contained elements. Any past-the-end iterators may also be invalidated. + */ + + /** + * Appends the given list to the end of the container. Uses at most one memory allocation. + * May invalidate any references, pointers, or iterators referring to contained elements. Any past-the-end iterators may also be invalidated. + */ + public native void append(@ByVal GenericList lst); + + /** + * Appends the given element value to the end of the container. + * The new element is constructed with the given arguments. + * May invalidate any references, pointers, or iterators referring to contained elements. Any past-the-end iterators may also be invalidated. + */ + + /** + * Removes the element at pos. + * May invalidate any references, pointers, or iterators referring to contained elements. Any past-the-end iterators may also be invalidated. + */ + public native @ByVal @Cast("c10::List::iterator*") GenericListIterator erase(@ByVal @Cast("c10::List::iterator*") GenericListIterator pos); + + /** + * Removes the elements in the range [first, last). + * May invalidate any references, pointers, or iterators referring to contained elements. Any past-the-end iterators may also be invalidated. + */ + public native @ByVal @Cast("c10::List::iterator*") GenericListIterator erase(@ByVal @Cast("c10::List::iterator*") GenericListIterator first, @ByVal @Cast("c10::List::iterator*") GenericListIterator last); + + /** + * Removes the last element of the container. + * Calling pop_back on an empty container is undefined. + * May invalidate any references, pointers, or iterators referring to contained elements. Any past-the-end iterators may also be invalidated. + */ + public native void pop_back(); + + /** + * Resizes the container to contain count elements. + * If the current size is less than count, additional default-inserted elements are appended. + * May invalidate any references, pointers, or iterators referring to contained elements. Any past-the-end iterators may also be invalidated. + */ + public native void resize(long count); + + /** + * Resizes the container to contain count elements. + * If the current size is less than count, additional copies of value are appended. + * May invalidate any references, pointers, or iterators referring to contained elements. Any past-the-end iterators may also be invalidated. + */ + public native void resize(long count, @Const @ByRef IValue value); + + /** + * Value equality comparison. This function implements Python-like semantics for + * equality: two lists with the same identity (e.g. same pointer) trivially + * compare equal, otherwise each element is compared for equality. + */ + + + + + /** + * Identity comparison. Returns true if and only if {@code rhs} represents the same + * List object as {@code this}. + */ + public native @Cast("bool") boolean is(@Const @ByRef GenericList rhs); + + public native @ByVal IValueVector vec(); + + /** + * Returns the number of Lists currently pointing to this same list. + * If this is the only instance pointing to this list, returns 1. + */ + // TODO Test use_count + public native @Cast("size_t") long use_count(); + + public native @ByVal Type.TypePtr elementType(); + + // See [unsafe set type] for why this exists. + public native void unsafeSetElementType(@ByVal Type.TypePtr t); + private static native @Namespace @Const @Name("c10::impl::ptr_to_first_element") IValue ptr_to_first_element(@Const @ByRef GenericList list); + public IValue ptr_to_first_element() { return ptr_to_first_element(this); } +} diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/GenericListIterator.java b/pytorch/src/gen/java/org/bytedeco/pytorch/GenericListIterator.java new file mode 100644 index 00000000000..7d870aa88f7 --- /dev/null +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/GenericListIterator.java @@ -0,0 +1,84 @@ +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE + +package org.bytedeco.pytorch; + +import org.bytedeco.pytorch.Allocator; +import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; +import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; +import java.nio.*; +import org.bytedeco.javacpp.*; +import org.bytedeco.javacpp.annotation.*; + +import static org.bytedeco.javacpp.presets.javacpp.*; +import static org.bytedeco.openblas.global.openblas_nolapack.*; +import static org.bytedeco.openblas.global.openblas.*; + +import static org.bytedeco.pytorch.global.torch.*; + +@Name("c10::impl::ListIterator") @NoOffset @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) +public class GenericListIterator extends Pointer { + static { Loader.load(); } + /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ + public GenericListIterator(Pointer p) { super(p); } + /** Native array allocator. Access with {@link Pointer#position(long)}. */ + public GenericListIterator(long size) { super((Pointer)null); allocateArray(size); } + private native void allocateArray(long size); + @Override public GenericListIterator position(long position) { + return (GenericListIterator)super.position(position); + } + @Override public GenericListIterator getPointer(long i) { + return new GenericListIterator((Pointer)this).offsetAddress(i); + } + + // C++17 friendly std::iterator implementation + + public GenericListIterator() { super((Pointer)null); allocate(); } + private native void allocate(); + + public GenericListIterator(@Const @ByRef GenericListIterator arg0) { super((Pointer)null); allocate(arg0); } + private native void allocate(@Const @ByRef GenericListIterator arg0); + public native @ByRef @Name("operator =") GenericListIterator put(@Const @ByRef GenericListIterator arg0); + + public native @ByRef @Name("operator ++") GenericListIterator increment(); + + public native @ByVal @Name("operator ++") GenericListIterator increment(int arg0); + + public native @ByRef @Name("operator --") GenericListIterator decrement(); + + public native @ByVal @Name("operator --") GenericListIterator decrement(int arg0); + + public native @ByRef @Name("operator +=") GenericListIterator addPut(long offset); + + public native @ByRef @Name("operator -=") GenericListIterator subtractPut(long offset); + + public native @ByVal @Name("operator +") GenericListIterator add(long offset); + + public native @ByVal @Name("operator -") GenericListIterator subtract(long offset); + + private static native @Namespace @Cast("c10::impl::ListIterator::difference_type") @Name("operator -") long subtract(@Const @ByRef GenericListIterator lhs, @Const @ByRef GenericListIterator rhs); + public long subtract(GenericListIterator rhs) { return subtract(this, rhs); } + + + + + + private static native @Namespace @Cast("bool") @Name("operator ==") boolean equals(@Const @ByRef GenericListIterator lhs, @Const @ByRef GenericListIterator rhs); + public boolean equals(GenericListIterator rhs) { return equals(this, rhs); } + + private static native @Namespace @Cast("bool") @Name("operator !=") boolean notEquals(@Const @ByRef GenericListIterator lhs, @Const @ByRef GenericListIterator rhs); + public boolean notEquals(GenericListIterator rhs) { return notEquals(this, rhs); } + + private static native @Namespace @Cast("bool") @Name("operator <") boolean lessThan(@Const @ByRef GenericListIterator lhs, @Const @ByRef GenericListIterator rhs); + public boolean lessThan(GenericListIterator rhs) { return lessThan(this, rhs); } + + private static native @Namespace @Cast("bool") @Name("operator <=") boolean lessThanEquals(@Const @ByRef GenericListIterator lhs, @Const @ByRef GenericListIterator rhs); + public boolean lessThanEquals(GenericListIterator rhs) { return lessThanEquals(this, rhs); } + + private static native @Namespace @Cast("bool") @Name("operator >") boolean greaterThan(@Const @ByRef GenericListIterator lhs, @Const @ByRef GenericListIterator rhs); + public boolean greaterThan(GenericListIterator rhs) { return greaterThan(this, rhs); } + + private static native @Namespace @Cast("bool") @Name("operator >=") boolean greaterThanEquals(@Const @ByRef GenericListIterator lhs, @Const @ByRef GenericListIterator rhs); + public boolean greaterThanEquals(GenericListIterator rhs) { return greaterThanEquals(this, rhs); } +} diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/Global.java b/pytorch/src/gen/java/org/bytedeco/pytorch/Global.java index c3311061a52..e413fd45f67 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/Global.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/Global.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; @@ -19,7 +21,11 @@ @Namespace("torch::jit") @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) public class Global extends Stmt { static { Loader.load(); } + /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ + public Global(Pointer p) { super(p); } - public Global(@Cast("const torch::jit::TreeRef*") @ByRef Pointer tree) { super((Pointer)null); allocate(tree); } - private native void allocate(@Cast("const torch::jit::TreeRef*") @ByRef Pointer tree); + public Global(@Const @ByRef TreeRef tree) { super((Pointer)null); allocate(tree); } + private native void allocate(@Const @ByRef TreeRef tree); + public native @ByVal IdentList names(); + public static native @ByVal Global create(@Const @ByRef SourceRange range, @Const @ByRef IdentList names); } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/GradMode.java b/pytorch/src/gen/java/org/bytedeco/pytorch/GradMode.java index 664ca2aa1c0..719615ce6f8 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/GradMode.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/GradMode.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/Graph.java b/pytorch/src/gen/java/org/bytedeco/pytorch/Graph.java index 490a637c22d..f2b23283c78 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/Graph.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/Graph.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; @@ -32,9 +34,9 @@ public class Graph extends Pointer { public Graph(@ByVal(nullValue = "torch::jit::ScopePtr(c10::make_intrusive())") @Cast("torch::jit::ScopePtr*") Pointer scope_root) { super((Pointer)null); allocate(scope_root); } - private native void allocate(@ByVal(nullValue = "torch::jit::ScopePtr(c10::make_intrusive())") @Cast("torch::jit::ScopePtr*") Pointer scope_root); + @SharedPtr private native void allocate(@ByVal(nullValue = "torch::jit::ScopePtr(c10::make_intrusive())") @Cast("torch::jit::ScopePtr*") Pointer scope_root); public Graph() { super((Pointer)null); allocate(); } - private native void allocate(); + @SharedPtr private native void allocate(); public native @ByVal ValueArrayRef inputs(); public native @ByVal ValueArrayRef outputs(); @@ -107,7 +109,7 @@ public native JitNode createDict( @ByVal ValueArrayRef keys, @ByVal ValueArrayRef values); public native JitNode createNumToTensor(Value value); - public native JitNode createObject(@Const @SharedPtr @ByRef ClassType type); + public native JitNode createObject(@Const @SharedPtr("c10::ClassType") @ByRef ClassType type); public native JitNode createSetAttr( Value obj, @StdString BytePointer field, @@ -212,9 +214,10 @@ public native Value insert( public native @Cast("std::ostream*") @ByRef Pointer print( @Cast("std::ostream*") @ByRef Pointer out); - + private static native @Namespace @Cast("std::ostream*") @ByRef @Name("operator <<") Pointer shiftLeft(@Cast("std::ostream*") @ByRef Pointer out, @Const @ByRef Graph g); + public Pointer shiftLeft(Pointer out) { return shiftLeft(out, this); } - public native @SharedPtr @ByVal Graph copy(); + public native @SharedPtr("torch::jit::Graph") @ByVal Graph copy(); public native @UniquePtr Graph copyUnique(); public native void remapTypes(@Const @ByRef TypeMapper type_map); } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/GraphAttr.java b/pytorch/src/gen/java/org/bytedeco/pytorch/GraphAttr.java index e634906318f..e024b408dc7 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/GraphAttr.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/GraphAttr.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; @@ -24,9 +26,9 @@ public class GraphAttr extends AttributeValue { /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public GraphAttr(Pointer p) { super(p); } - public GraphAttr(@ByVal Symbol name, @SharedPtr @ByVal Graph value_) { super((Pointer)null); allocate(name, value_); } - private native void allocate(@ByVal Symbol name, @SharedPtr @ByVal Graph value_); - public native @SharedPtr @ByRef Graph value(); + public GraphAttr(@ByVal Symbol name, @SharedPtr("torch::jit::Graph") @ByVal Graph value_) { super((Pointer)null); allocate(name, value_); } + private native void allocate(@ByVal Symbol name, @SharedPtr("torch::jit::Graph") @ByVal Graph value_); + public native @SharedPtr("torch::jit::Graph") @ByRef Graph value(); public native @UniquePtr @ByVal AttributeValue clone(); public native JitAttributeKind kind(); } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/GraphExecutor.java b/pytorch/src/gen/java/org/bytedeco/pytorch/GraphExecutor.java index bc3c88cb3c0..a63011aee3b 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/GraphExecutor.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/GraphExecutor.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; @@ -32,29 +34,34 @@ public class GraphExecutor extends Pointer { public GraphExecutor() { super((Pointer)null); allocate(); } private native void allocate(); - public GraphExecutor(@Const @SharedPtr @ByRef Graph graph, @StdString BytePointer function_name) { super((Pointer)null); allocate(graph, function_name); } - private native void allocate(@Const @SharedPtr @ByRef Graph graph, @StdString BytePointer function_name); - public GraphExecutor(@Const @SharedPtr @ByRef Graph graph, @StdString String function_name) { super((Pointer)null); allocate(graph, function_name); } - private native void allocate(@Const @SharedPtr @ByRef Graph graph, @StdString String function_name); + public GraphExecutor(@Const @SharedPtr("torch::jit::Graph") @ByRef Graph graph, @StdString BytePointer function_name) { super((Pointer)null); allocate(graph, function_name); } + private native void allocate(@Const @SharedPtr("torch::jit::Graph") @ByRef Graph graph, @StdString BytePointer function_name); + public GraphExecutor(@Const @SharedPtr("torch::jit::Graph") @ByRef Graph graph, @StdString String function_name) { super((Pointer)null); allocate(graph, function_name); } + private native void allocate(@Const @SharedPtr("torch::jit::Graph") @ByRef Graph graph, @StdString String function_name); public GraphExecutor( - @Const @SharedPtr @ByRef Graph graph, + @Const @SharedPtr("torch::jit::Graph") @ByRef Graph graph, @StdString BytePointer function_name, ExecutorExecutionMode executor_mode) { super((Pointer)null); allocate(graph, function_name, executor_mode); } private native void allocate( - @Const @SharedPtr @ByRef Graph graph, + @Const @SharedPtr("torch::jit::Graph") @ByRef Graph graph, @StdString BytePointer function_name, ExecutorExecutionMode executor_mode); public GraphExecutor( - @Const @SharedPtr @ByRef Graph graph, + @Const @SharedPtr("torch::jit::Graph") @ByRef Graph graph, @StdString String function_name, @Cast("torch::jit::ExecutorExecutionMode") int executor_mode) { super((Pointer)null); allocate(graph, function_name, executor_mode); } private native void allocate( - @Const @SharedPtr @ByRef Graph graph, + @Const @SharedPtr("torch::jit::Graph") @ByRef Graph graph, @StdString String function_name, @Cast("torch::jit::ExecutorExecutionMode") int executor_mode); public native void run(@ByRef IValueVector inputs); + public native @ByVal FuturePtr runAsync( + @ByRef IValueVector stack, + @ByVal(nullValue = "torch::jit::TaskLauncher(at::launch)") @Cast("torch::jit::TaskLauncher*") Pointer taskLauncher); + public native @ByVal FuturePtr runAsync( + @ByRef IValueVector stack); // `remaining_bailout_depth` stands for the maximum number of profiled and // specialized recompilations allowed for the current `GraphExecutor`. if diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/GraphExecutorImplBase.java b/pytorch/src/gen/java/org/bytedeco/pytorch/GraphExecutorImplBase.java index b7f6e8f2e83..c53d6cd732e 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/GraphExecutorImplBase.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/GraphExecutorImplBase.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/GraphExecutorState.java b/pytorch/src/gen/java/org/bytedeco/pytorch/GraphExecutorState.java index e0d1b032733..0f61b45ab37 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/GraphExecutorState.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/GraphExecutorState.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/GraphFunction.java b/pytorch/src/gen/java/org/bytedeco/pytorch/GraphFunction.java index cb53f8fae95..c0799b019af 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/GraphFunction.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/GraphFunction.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; @@ -25,21 +27,21 @@ public class GraphFunction extends Function { // NOLINTNEXTLINE(cppcoreguidelines-pro-type-member-init) public GraphFunction( @ByVal QualifiedName name, - @SharedPtr @ByVal Graph graph, + @SharedPtr("torch::jit::Graph") @ByVal Graph graph, @ByVal GraphFunctionCreator function_creator, @ByVal(nullValue = "c10::optional(c10::nullopt)") ExecutorExecutionModeOptional executor_execution_mode) { super((Pointer)null); allocate(name, graph, function_creator, executor_execution_mode); } private native void allocate( @ByVal QualifiedName name, - @SharedPtr @ByVal Graph graph, + @SharedPtr("torch::jit::Graph") @ByVal Graph graph, @ByVal GraphFunctionCreator function_creator, @ByVal(nullValue = "c10::optional(c10::nullopt)") ExecutorExecutionModeOptional executor_execution_mode); public GraphFunction( @ByVal QualifiedName name, - @SharedPtr @ByVal Graph graph, + @SharedPtr("torch::jit::Graph") @ByVal Graph graph, @ByVal GraphFunctionCreator function_creator) { super((Pointer)null); allocate(name, graph, function_creator); } private native void allocate( @ByVal QualifiedName name, - @SharedPtr @ByVal Graph graph, + @SharedPtr("torch::jit::Graph") @ByVal Graph graph, @ByVal GraphFunctionCreator function_creator); public native @Cast("bool") boolean isGraphFunction(); @@ -48,21 +50,26 @@ private native void allocate( - public native @SharedPtr @ByVal Graph graph(); + public native @ByVal FuturePtr runAsync( + @ByRef IValueVector stack, + @ByVal(nullValue = "torch::jit::TaskLauncher(at::launch)") @Cast("torch::jit::TaskLauncher*") Pointer taskLauncher); + public native @ByVal FuturePtr runAsync( + @ByRef IValueVector stack); - public native @SharedPtr @ByVal Graph optimized_graph(); + public native @SharedPtr("torch::jit::Graph") @ByVal Graph graph(); + + public native @SharedPtr("torch::jit::Graph") @ByVal Graph optimized_graph(); public native @Const @ByRef QualifiedName qualname(); // private/unstable api. sets the initial execution mode // will not affect executor if there is an existing executor // created for this function - public native void _set_initial_executor_execution_mode(ExecutorExecutionMode mode); - public native void _set_initial_executor_execution_mode(@Cast("torch::jit::ExecutorExecutionMode") int mode); + // private/unstable api. sets flag of whether or not to ignore amp. // will not affect executor if there is an existing executor // created for this function - public native void _set_ignore_amp(@Cast("bool") boolean ignore_amp); + // if this isn't yet defined, run its method_creator function public native void ensure_defined(); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/GraphOptimizerEnabledGuard.java b/pytorch/src/gen/java/org/bytedeco/pytorch/GraphOptimizerEnabledGuard.java index e9d2fe8f7ae..282c6d0e958 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/GraphOptimizerEnabledGuard.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/GraphOptimizerEnabledGuard.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/GraphVector.java b/pytorch/src/gen/java/org/bytedeco/pytorch/GraphVector.java index 8a3ac708bac..5db8a7c1bcd 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/GraphVector.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/GraphVector.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; @@ -33,10 +35,12 @@ public class GraphVector extends Pointer { public void clear() { resize(0); } public native void resize(@Cast("size_t") long n); - @Index(function = "at") public native @SharedPtr Graph get(@Cast("size_t") long i); + public Graph front() { return get(0); } + public Graph back() { return get(size() - 1); } + @Index(function = "at") public native @SharedPtr("torch::jit::Graph") Graph get(@Cast("size_t") long i); public native GraphVector put(@Cast("size_t") long i, Graph value); - public native @ByVal Iterator insert(@ByVal Iterator pos, @SharedPtr Graph value); + public native @ByVal Iterator insert(@ByVal Iterator pos, @SharedPtr("torch::jit::Graph") Graph value); public native @ByVal Iterator erase(@ByVal Iterator pos); public native @ByVal Iterator begin(); public native @ByVal Iterator end(); @@ -46,7 +50,7 @@ public Iterator() { } public native @Name("operator ++") @ByRef Iterator increment(); public native @Name("operator ==") boolean equals(@ByRef Iterator it); - public native @Name("operator *") @SharedPtr @Const Graph get(); + public native @Name("operator *") @SharedPtr("torch::jit::Graph") @Const Graph get(); } public Graph[] get() { diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/GraphsAttr.java b/pytorch/src/gen/java/org/bytedeco/pytorch/GraphsAttr.java index ee71f69fe64..3c0e8058f68 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/GraphsAttr.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/GraphsAttr.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/GridSampleFuncOptions.java b/pytorch/src/gen/java/org/bytedeco/pytorch/GridSampleFuncOptions.java index 3a4388042b7..686e117d789 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/GridSampleFuncOptions.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/GridSampleFuncOptions.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; @@ -42,7 +44,7 @@ public class GridSampleFuncOptions extends Pointer { return new GridSampleFuncOptions((Pointer)this).offsetAddress(i); } - public native @ByRef @NoException(true) grid_sample_mode_t mode(); - public native @ByRef @NoException(true) grid_sample_padding_mode_t padding_mode(); + public native @ByRef @NoException(true) GridSampleMode mode(); + public native @ByRef @NoException(true) GridSamplePaddingMode padding_mode(); public native @ByRef @NoException(true) BoolOptional align_corners(); } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/grid_sample_mode_t.java b/pytorch/src/gen/java/org/bytedeco/pytorch/GridSampleMode.java similarity index 56% rename from pytorch/src/gen/java/org/bytedeco/pytorch/grid_sample_mode_t.java rename to pytorch/src/gen/java/org/bytedeco/pytorch/GridSampleMode.java index 15bb9fc1103..39ed5cf6fe1 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/grid_sample_mode_t.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/GridSampleMode.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; @@ -16,21 +18,21 @@ import static org.bytedeco.pytorch.global.torch.*; @NoOffset @Name("c10::variant") @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) -public class grid_sample_mode_t extends Pointer { +public class GridSampleMode extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ - public grid_sample_mode_t(Pointer p) { super(p); } - public grid_sample_mode_t(kBilinear value) { this(); put(value); } - public grid_sample_mode_t(kNearest value) { this(); put(value); } - public grid_sample_mode_t() { allocate(); } + public GridSampleMode(Pointer p) { super(p); } + public GridSampleMode(kBilinear value) { this(); put(value); } + public GridSampleMode(kNearest value) { this(); put(value); } + public GridSampleMode() { allocate(); } private native void allocate(); - public native @Name("operator =") @ByRef grid_sample_mode_t put(@ByRef grid_sample_mode_t x); + public native @Name("operator =") @ByRef GridSampleMode put(@ByRef GridSampleMode x); public @ByRef kBilinear get0() { return get0(this); } - @Namespace @Name("c10::get<0>") public static native @ByRef kBilinear get0(@ByRef grid_sample_mode_t container); - @ValueSetter public native grid_sample_mode_t put(@ByRef kBilinear value); + @Namespace @Name("c10::get<0>") public static native @ByRef kBilinear get0(@ByRef GridSampleMode container); + @ValueSetter public native GridSampleMode put(@ByRef kBilinear value); public @ByRef kNearest get1() { return get1(this); } - @Namespace @Name("c10::get<1>") public static native @ByRef kNearest get1(@ByRef grid_sample_mode_t container); - @ValueSetter public native grid_sample_mode_t put(@ByRef kNearest value); + @Namespace @Name("c10::get<1>") public static native @ByRef kNearest get1(@ByRef GridSampleMode container); + @ValueSetter public native GridSampleMode put(@ByRef kNearest value); } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/grid_sample_padding_mode_t.java b/pytorch/src/gen/java/org/bytedeco/pytorch/GridSamplePaddingMode.java similarity index 52% rename from pytorch/src/gen/java/org/bytedeco/pytorch/grid_sample_padding_mode_t.java rename to pytorch/src/gen/java/org/bytedeco/pytorch/GridSamplePaddingMode.java index c88d805bb6a..b1bd2fe9eef 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/grid_sample_padding_mode_t.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/GridSamplePaddingMode.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; @@ -16,25 +18,25 @@ import static org.bytedeco.pytorch.global.torch.*; @NoOffset @Name("c10::variant") @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) -public class grid_sample_padding_mode_t extends Pointer { +public class GridSamplePaddingMode extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ - public grid_sample_padding_mode_t(Pointer p) { super(p); } - public grid_sample_padding_mode_t(kZeros value) { this(); put(value); } - public grid_sample_padding_mode_t(kBorder value) { this(); put(value); } - public grid_sample_padding_mode_t(kReflection value) { this(); put(value); } - public grid_sample_padding_mode_t() { allocate(); } + public GridSamplePaddingMode(Pointer p) { super(p); } + public GridSamplePaddingMode(kZeros value) { this(); put(value); } + public GridSamplePaddingMode(kBorder value) { this(); put(value); } + public GridSamplePaddingMode(kReflection value) { this(); put(value); } + public GridSamplePaddingMode() { allocate(); } private native void allocate(); - public native @Name("operator =") @ByRef grid_sample_padding_mode_t put(@ByRef grid_sample_padding_mode_t x); + public native @Name("operator =") @ByRef GridSamplePaddingMode put(@ByRef GridSamplePaddingMode x); public @ByRef kZeros get0() { return get0(this); } - @Namespace @Name("c10::get<0>") public static native @ByRef kZeros get0(@ByRef grid_sample_padding_mode_t container); - @ValueSetter public native grid_sample_padding_mode_t put(@ByRef kZeros value); + @Namespace @Name("c10::get<0>") public static native @ByRef kZeros get0(@ByRef GridSamplePaddingMode container); + @ValueSetter public native GridSamplePaddingMode put(@ByRef kZeros value); public @ByRef kBorder get1() { return get1(this); } - @Namespace @Name("c10::get<1>") public static native @ByRef kBorder get1(@ByRef grid_sample_padding_mode_t container); - @ValueSetter public native grid_sample_padding_mode_t put(@ByRef kBorder value); + @Namespace @Name("c10::get<1>") public static native @ByRef kBorder get1(@ByRef GridSamplePaddingMode container); + @ValueSetter public native GridSamplePaddingMode put(@ByRef kBorder value); public @ByRef kReflection get2() { return get2(this); } - @Namespace @Name("c10::get<2>") public static native @ByRef kReflection get2(@ByRef grid_sample_padding_mode_t container); - @ValueSetter public native grid_sample_padding_mode_t put(@ByRef kReflection value); + @Namespace @Name("c10::get<2>") public static native @ByRef kReflection get2(@ByRef GridSamplePaddingMode container); + @ValueSetter public native GridSamplePaddingMode put(@ByRef kReflection value); } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/GroupNorm.java b/pytorch/src/gen/java/org/bytedeco/pytorch/GroupNorm.java deleted file mode 100644 index b307d403ff4..00000000000 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/GroupNorm.java +++ /dev/null @@ -1,34 +0,0 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE - -package org.bytedeco.pytorch; - -import org.bytedeco.pytorch.Allocator; -import org.bytedeco.pytorch.Function; -import org.bytedeco.pytorch.Module; -import java.nio.*; -import org.bytedeco.javacpp.*; -import org.bytedeco.javacpp.annotation.*; - -import static org.bytedeco.javacpp.presets.javacpp.*; -import static org.bytedeco.openblas.global.openblas_nolapack.*; -import static org.bytedeco.openblas.global.openblas.*; - -import static org.bytedeco.pytorch.global.torch.*; - - -/** A {@code ModuleHolder} subclass for {@code GroupNormImpl}. - * See the documentation for {@code GroupNormImpl} class to learn what methods it - * provides, and examples of how to use {@code GroupNorm} with - * {@code torch::nn::GroupNormOptions}. See the documentation for {@code ModuleHolder} to - * learn about PyTorch's module storage semantics. */ -@Namespace("torch::nn") @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) -public class GroupNorm extends GroupNormImplModuleHolder { - static { Loader.load(); } - - public GroupNorm(@ByVal @Cast("std::nullptr_t*") PointerPointer arg0) { super((Pointer)null); allocate(arg0); } - private native void allocate(@ByVal @Cast("std::nullptr_t*") PointerPointer arg0); public GroupNorm(@SharedPtr @Cast({"", "std::shared_ptr"}) GroupNormImpl module) { super((Pointer)null); allocate(module); } - private native void allocate(@SharedPtr @Cast({"", "std::shared_ptr"}) GroupNormImpl module); - /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ - public GroupNorm(Pointer p) { super(p); } - - } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/GroupNormFuncOptions.java b/pytorch/src/gen/java/org/bytedeco/pytorch/GroupNormFuncOptions.java index 5caee2ddee0..7c6557d9259 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/GroupNormFuncOptions.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/GroupNormFuncOptions.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/GroupNormImpl.java b/pytorch/src/gen/java/org/bytedeco/pytorch/GroupNormImpl.java index 359cb816a95..1bc8dbefd25 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/GroupNormImpl.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/GroupNormImpl.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; @@ -37,9 +39,9 @@ public class GroupNormImpl extends GroupNormImplCloneable { public GroupNormImpl(Pointer p) { super(p); } public GroupNormImpl(@Cast("int64_t") long num_groups, @Cast("int64_t") long num_channels) { super((Pointer)null); allocate(num_groups, num_channels); } - @NoDeallocator private native void allocate(@Cast("int64_t") long num_groups, @Cast("int64_t") long num_channels); + @SharedPtr private native void allocate(@Cast("int64_t") long num_groups, @Cast("int64_t") long num_channels); public GroupNormImpl(@Const @ByRef GroupNormOptions options_) { super((Pointer)null); allocate(options_); } - @NoDeallocator private native void allocate(@Const @ByRef GroupNormOptions options_); + @SharedPtr private native void allocate(@Const @ByRef GroupNormOptions options_); public native void reset(); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/GroupNormImplCloneable.java b/pytorch/src/gen/java/org/bytedeco/pytorch/GroupNormImplCloneable.java index be484baa290..529245fffc8 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/GroupNormImplCloneable.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/GroupNormImplCloneable.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; @@ -20,18 +22,18 @@ public class GroupNormImplCloneable extends Module { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public GroupNormImplCloneable(Pointer p) { super(p); } + @Override public Module asModule() { return asModule(this); } + @Namespace public static native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast>") Module asModule(@SharedPtr GroupNormImplCloneable pointer); /** {@code reset()} must perform initialization of all members with reference * semantics, most importantly parameters, buffers and submodules. */ public native void reset(); - @Override public Module asModule() { return asModule(this); } - @Namespace public static native @Name("static_cast") Module asModule(GroupNormImplCloneable module); /** Performs a recursive "deep copy" of the {@code Module}, such that all parameters * and submodules in the cloned module are different from those in the * original module. */ - public native @SharedPtr Module clone( - @Const @ByRef(nullValue = "c10::optional(c10::nullopt)") DeviceOptional device); - public native @SharedPtr Module clone(); + public native @SharedPtr("torch::nn::Module") @ByVal Module clone( + @Const @ByRef(nullValue = "c10::optional(c10::nullopt)") DeviceOptional device); + public native @SharedPtr("torch::nn::Module") @ByVal Module clone(); } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/GroupNormImplModuleHolder.java b/pytorch/src/gen/java/org/bytedeco/pytorch/GroupNormImplModuleHolder.java deleted file mode 100644 index 45f80b153d8..00000000000 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/GroupNormImplModuleHolder.java +++ /dev/null @@ -1,79 +0,0 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE - -package org.bytedeco.pytorch; - -import org.bytedeco.pytorch.Allocator; -import org.bytedeco.pytorch.Function; -import org.bytedeco.pytorch.Module; -import java.nio.*; -import org.bytedeco.javacpp.*; -import org.bytedeco.javacpp.annotation.*; - -import static org.bytedeco.javacpp.presets.javacpp.*; -import static org.bytedeco.openblas.global.openblas_nolapack.*; -import static org.bytedeco.openblas.global.openblas.*; - -import static org.bytedeco.pytorch.global.torch.*; - -@Name("torch::nn::ModuleHolder") @NoOffset @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) -public class GroupNormImplModuleHolder extends Pointer { - static { Loader.load(); } - /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ - public GroupNormImplModuleHolder(Pointer p) { super(p); } - - - /// - - /** Default constructs the contained module if if has a default constructor, - * else produces a static error. - * - * NOTE: This uses the behavior of template - * classes in C++ that constructors (or any methods) are only compiled when - * actually used. */ - - - /** Constructs the {@code ModuleHolder} with an empty contained value. Access to - * the underlying module is not permitted and will throw an exception, until - * a value is assigned. */ - /* implicit */ public GroupNormImplModuleHolder(@ByVal @Cast("std::nullptr_t*") PointerPointer arg0) { super((Pointer)null); allocate(arg0); } -private native void allocate(@ByVal @Cast("std::nullptr_t*") PointerPointer arg0); - - /** Constructs the {@code ModuleHolder} with a contained module, forwarding all - * arguments to its constructor. */ - - /** Constructs the {@code ModuleHolder} from a pointer to the contained type. - * Example: {@code Linear(std::make_shared(...))}. */ - /* implicit */ public GroupNormImplModuleHolder(@SharedPtr @Cast({"", "std::shared_ptr"}) GroupNormImpl module) { super((Pointer)null); allocate(module); } -private native void allocate(@SharedPtr @Cast({"", "std::shared_ptr"}) GroupNormImpl module); - - /** Returns true if the {@code ModuleHolder} contains a module, or false if it is - * {@code nullptr}. */ - public native @Cast("bool") @Name("operator bool") @NoException(true) boolean asBoolean(); - - /** Forwards to the contained module. */ - public native @Name("operator ->") GroupNormImpl access(); - - /** Forwards to the contained module. */ - - /** Returns a reference to the contained module. */ - public native @ByRef @Name("operator *") GroupNormImpl multiply(); - - /** Returns a const reference to the contained module. */ - - /** Returns a shared pointer to the underlying module. */ - public native @SharedPtr @Cast({"", "std::shared_ptr"}) GroupNormImpl ptr(); - - /** Returns a pointer to the underlying module. */ - public native GroupNormImpl get(); - - /** Returns a const pointer to the underlying module. */ - - /** Calls the {@code forward()} method of the contained module. */ - - /** Forwards to the subscript operator of the contained module. - * NOTE: std::forward is qualified to prevent VS2017 emitting - * error C2872: 'std': ambiguous symbol */ - - /** Returns true if the {@code ModuleHolder} does not contain a module. */ - public native @Cast("bool") @NoException(true) boolean is_empty(); -} diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/GroupNormOptions.java b/pytorch/src/gen/java/org/bytedeco/pytorch/GroupNormOptions.java index 4a6f6709e1d..38c4c1dbf1a 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/GroupNormOptions.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/GroupNormOptions.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/GumbelSoftmaxFuncOptions.java b/pytorch/src/gen/java/org/bytedeco/pytorch/GumbelSoftmaxFuncOptions.java index 10afca0c8ab..132ce218723 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/GumbelSoftmaxFuncOptions.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/GumbelSoftmaxFuncOptions.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/HIPHooksArgs.java b/pytorch/src/gen/java/org/bytedeco/pytorch/HIPHooksArgs.java new file mode 100644 index 00000000000..f56c2053545 --- /dev/null +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/HIPHooksArgs.java @@ -0,0 +1,29 @@ +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE + +package org.bytedeco.pytorch; + +import org.bytedeco.pytorch.Allocator; +import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; +import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; +import java.nio.*; +import org.bytedeco.javacpp.*; +import org.bytedeco.javacpp.annotation.*; + +import static org.bytedeco.javacpp.presets.javacpp.*; +import static org.bytedeco.openblas.global.openblas_nolapack.*; +import static org.bytedeco.openblas.global.openblas.*; + +import static org.bytedeco.pytorch.global.torch.*; + + +// NB: dummy argument to suppress "ISO C++11 requires at least one argument +// for the "..." in a variadic macro" +@Namespace("at") @Opaque @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) +public class HIPHooksArgs extends Pointer { + /** Empty constructor. Calls {@code super((Pointer)null)}. */ + public HIPHooksArgs() { super((Pointer)null); } + /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ + public HIPHooksArgs(Pointer p) { super(p); } +} diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/HIPHooksInterface.java b/pytorch/src/gen/java/org/bytedeco/pytorch/HIPHooksInterface.java new file mode 100644 index 00000000000..5ef668d82c7 --- /dev/null +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/HIPHooksInterface.java @@ -0,0 +1,60 @@ +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE + +package org.bytedeco.pytorch; + +import org.bytedeco.pytorch.Allocator; +import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; +import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; +import java.nio.*; +import org.bytedeco.javacpp.*; +import org.bytedeco.javacpp.annotation.*; + +import static org.bytedeco.javacpp.presets.javacpp.*; +import static org.bytedeco.openblas.global.openblas_nolapack.*; +import static org.bytedeco.openblas.global.openblas.*; + +import static org.bytedeco.pytorch.global.torch.*; + + +// The HIPHooksInterface is an omnibus interface for any HIP functionality +// which we may want to call into from CPU code (and thus must be dynamically +// dispatched, to allow for separate compilation of HIP code). See +// CUDAHooksInterface for more detailed motivation. +@Namespace("at") @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) +public class HIPHooksInterface extends Pointer { + static { Loader.load(); } + /** Default native constructor. */ + public HIPHooksInterface() { super((Pointer)null); allocate(); } + /** Native array allocator. Access with {@link Pointer#position(long)}. */ + public HIPHooksInterface(long size) { super((Pointer)null); allocateArray(size); } + /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ + public HIPHooksInterface(Pointer p) { super(p); } + private native void allocate(); + private native void allocateArray(long size); + @Override public HIPHooksInterface position(long position) { + return (HIPHooksInterface)super.position(position); + } + @Override public HIPHooksInterface getPointer(long i) { + return new HIPHooksInterface((Pointer)this).offsetAddress(i); + } + + // This should never actually be implemented, but it is used to + // squelch -Werror=non-virtual-dtor + + // Initialize the HIP library state + public native void initHIP(); + + public native @UniquePtr GeneratorImpl initHIPGenerator(Context arg0); + + public native @Cast("bool") boolean hasHIP(); + + public native @Cast("int64_t") long current_device(); + + public native Allocator getPinnedMemoryAllocator(); + + public native void registerHIPTypes(Context arg0); + + public native int getNumGPUs(); +} diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/Half.java b/pytorch/src/gen/java/org/bytedeco/pytorch/Half.java index 0a3bede75b6..2d9662d9d4a 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/Half.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/Half.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/HalfArrayRef.java b/pytorch/src/gen/java/org/bytedeco/pytorch/HalfArrayRef.java index 2eea3d4b98d..3d0b3c101ae 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/HalfArrayRef.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/HalfArrayRef.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; @@ -39,8 +41,7 @@ public class HalfArrayRef extends Pointer { /** Construct an ArrayRef from a single element. */ // TODO Make this explicit - public HalfArrayRef(@Const @ByRef Half OneElt) { super((Pointer)null); allocate(OneElt); } - private native void allocate(@Const @ByRef Half OneElt); + /** Construct an ArrayRef from a pointer and length. */ public HalfArrayRef(@Const Half data, @Cast("size_t") long length) { super((Pointer)null); allocate(data, length); } @@ -70,13 +71,13 @@ public class HalfArrayRef extends Pointer { * \name Simple Operations * \{ */ - public native @ByVal @Cast("const c10::ArrayRef::t)>::iterator*") ShortPointer begin(); - public native @ByVal @Cast("const c10::ArrayRef::t)>::iterator*") ShortPointer end(); + public native @Const @ByPtr Half begin(); + public native @Const @ByPtr Half end(); // These are actually the same as iterator, since ArrayRef only // gives you const iterators. - public native @ByVal @Cast("const c10::ArrayRef::t)>::const_iterator*") ShortPointer cbegin(); - public native @ByVal @Cast("const c10::ArrayRef::t)>::const_iterator*") ShortPointer cend(); + public native @Const @ByPtr Half cbegin(); + public native @Const @ByPtr Half cend(); /** empty - Check if the array is empty. */ public native @Cast("const bool") boolean empty(); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/HalfComplex.java b/pytorch/src/gen/java/org/bytedeco/pytorch/HalfComplex.java new file mode 100644 index 00000000000..b6b36add496 --- /dev/null +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/HalfComplex.java @@ -0,0 +1,61 @@ +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE + +package org.bytedeco.pytorch; + +import org.bytedeco.pytorch.Allocator; +import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; +import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; +import java.nio.*; +import org.bytedeco.javacpp.*; +import org.bytedeco.javacpp.annotation.*; + +import static org.bytedeco.javacpp.presets.javacpp.*; +import static org.bytedeco.openblas.global.openblas_nolapack.*; +import static org.bytedeco.openblas.global.openblas.*; + +import static org.bytedeco.pytorch.global.torch.*; + + +// TODO : move to complex.h +@Name("c10::complex") @NoOffset @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) +public class HalfComplex extends Pointer { + static { Loader.load(); } + /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ + public HalfComplex(Pointer p) { super(p); } + /** Native array allocator. Access with {@link Pointer#position(long)}. */ + public HalfComplex(long size) { super((Pointer)null); allocateArray(size); } + private native void allocateArray(long size); + @Override public HalfComplex position(long position) { + return (HalfComplex)super.position(position); + } + @Override public HalfComplex getPointer(long i) { + return new HalfComplex((Pointer)this).offsetAddress(i); + } + + public native @ByRef Half real_(); public native HalfComplex real_(Half setter); + public native @ByRef Half imag_(); public native HalfComplex imag_(Half setter); + + // Constructors + public HalfComplex() { super((Pointer)null); allocate(); } + private native void allocate(); + // Half constructor is not constexpr so the following constructor can't + // be constexpr + public HalfComplex(@Const @ByRef Half real, @Const @ByRef Half imag) { super((Pointer)null); allocate(real, imag); } + private native void allocate(@Const @ByRef Half real, @Const @ByRef Half imag); + public HalfComplex(@Const @ByRef FloatComplex value) { super((Pointer)null); allocate(value); } + private native void allocate(@Const @ByRef FloatComplex value); + + // Conversion operator + public native @ByVal @Name("operator c10::complex") FloatComplex asFloatComplex(); + + public native @Const @ByVal @org.bytedeco.javacpp.annotation.Function Half real(); + public native @Const @ByVal @org.bytedeco.javacpp.annotation.Function Half imag(); + + public native @ByRef @Name("operator +=") HalfComplex addPut(@Const @ByRef HalfComplex other); + + public native @ByRef @Name("operator -=") HalfComplex subtractPut(@Const @ByRef HalfComplex other); + + public native @ByRef @Name("operator *=") HalfComplex multiplyPut(@Const @ByRef HalfComplex other); +} diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/Hardshrink.java b/pytorch/src/gen/java/org/bytedeco/pytorch/Hardshrink.java deleted file mode 100644 index ac6a0dcdbfc..00000000000 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/Hardshrink.java +++ /dev/null @@ -1,34 +0,0 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE - -package org.bytedeco.pytorch; - -import org.bytedeco.pytorch.Allocator; -import org.bytedeco.pytorch.Function; -import org.bytedeco.pytorch.Module; -import java.nio.*; -import org.bytedeco.javacpp.*; -import org.bytedeco.javacpp.annotation.*; - -import static org.bytedeco.javacpp.presets.javacpp.*; -import static org.bytedeco.openblas.global.openblas_nolapack.*; -import static org.bytedeco.openblas.global.openblas.*; - -import static org.bytedeco.pytorch.global.torch.*; - - -/** A {@code ModuleHolder} subclass for {@code HardshrinkImpl}. - * See the documentation for {@code HardshrinkImpl} class to learn what methods it - * provides, and examples of how to use {@code Hardshrink} with - * {@code torch::nn::HardshrinkOptions}. See the documentation for {@code ModuleHolder} to - * learn about PyTorch's module storage semantics. */ -@Namespace("torch::nn") @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) -public class Hardshrink extends HardshrinkImplModuleHolder { - static { Loader.load(); } - - public Hardshrink(@ByVal @Cast("std::nullptr_t*") PointerPointer arg0) { super((Pointer)null); allocate(arg0); } - private native void allocate(@ByVal @Cast("std::nullptr_t*") PointerPointer arg0); public Hardshrink(@SharedPtr @Cast({"", "std::shared_ptr"}) HardshrinkImpl module) { super((Pointer)null); allocate(module); } - private native void allocate(@SharedPtr @Cast({"", "std::shared_ptr"}) HardshrinkImpl module); - /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ - public Hardshrink(Pointer p) { super(p); } - - } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/HardshrinkImpl.java b/pytorch/src/gen/java/org/bytedeco/pytorch/HardshrinkImpl.java index 8945aa9866d..d721a4481ac 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/HardshrinkImpl.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/HardshrinkImpl.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; @@ -45,9 +47,9 @@ public class HardshrinkImpl extends HardshrinkImplCloneable { } public HardshrinkImpl(@Const @ByRef(nullValue = "torch::nn::HardshrinkOptions{}") HardshrinkOptions options_) { super((Pointer)null); allocate(options_); } - @NoDeallocator private native void allocate(@Const @ByRef(nullValue = "torch::nn::HardshrinkOptions{}") HardshrinkOptions options_); + @SharedPtr private native void allocate(@Const @ByRef(nullValue = "torch::nn::HardshrinkOptions{}") HardshrinkOptions options_); public HardshrinkImpl() { super((Pointer)null); allocate(); } - @NoDeallocator private native void allocate(); + @SharedPtr private native void allocate(); public native @ByVal Tensor forward(@Const @ByRef Tensor input); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/HardshrinkImplCloneable.java b/pytorch/src/gen/java/org/bytedeco/pytorch/HardshrinkImplCloneable.java index e41dea56a25..3bbda28cfc1 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/HardshrinkImplCloneable.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/HardshrinkImplCloneable.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; @@ -20,18 +22,18 @@ public class HardshrinkImplCloneable extends Module { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public HardshrinkImplCloneable(Pointer p) { super(p); } + @Override public Module asModule() { return asModule(this); } + @Namespace public static native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast>") Module asModule(@SharedPtr HardshrinkImplCloneable pointer); /** {@code reset()} must perform initialization of all members with reference * semantics, most importantly parameters, buffers and submodules. */ public native void reset(); - @Override public Module asModule() { return asModule(this); } - @Namespace public static native @Name("static_cast") Module asModule(HardshrinkImplCloneable module); /** Performs a recursive "deep copy" of the {@code Module}, such that all parameters * and submodules in the cloned module are different from those in the * original module. */ - public native @SharedPtr Module clone( - @Const @ByRef(nullValue = "c10::optional(c10::nullopt)") DeviceOptional device); - public native @SharedPtr Module clone(); + public native @SharedPtr("torch::nn::Module") @ByVal Module clone( + @Const @ByRef(nullValue = "c10::optional(c10::nullopt)") DeviceOptional device); + public native @SharedPtr("torch::nn::Module") @ByVal Module clone(); } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/HardshrinkImplModuleHolder.java b/pytorch/src/gen/java/org/bytedeco/pytorch/HardshrinkImplModuleHolder.java deleted file mode 100644 index cd4827fadf2..00000000000 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/HardshrinkImplModuleHolder.java +++ /dev/null @@ -1,79 +0,0 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE - -package org.bytedeco.pytorch; - -import org.bytedeco.pytorch.Allocator; -import org.bytedeco.pytorch.Function; -import org.bytedeco.pytorch.Module; -import java.nio.*; -import org.bytedeco.javacpp.*; -import org.bytedeco.javacpp.annotation.*; - -import static org.bytedeco.javacpp.presets.javacpp.*; -import static org.bytedeco.openblas.global.openblas_nolapack.*; -import static org.bytedeco.openblas.global.openblas.*; - -import static org.bytedeco.pytorch.global.torch.*; - -@Name("torch::nn::ModuleHolder") @NoOffset @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) -public class HardshrinkImplModuleHolder extends Pointer { - static { Loader.load(); } - /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ - public HardshrinkImplModuleHolder(Pointer p) { super(p); } - - - /// - - /** Default constructs the contained module if if has a default constructor, - * else produces a static error. - * - * NOTE: This uses the behavior of template - * classes in C++ that constructors (or any methods) are only compiled when - * actually used. */ - - - /** Constructs the {@code ModuleHolder} with an empty contained value. Access to - * the underlying module is not permitted and will throw an exception, until - * a value is assigned. */ - /* implicit */ public HardshrinkImplModuleHolder(@ByVal @Cast("std::nullptr_t*") PointerPointer arg0) { super((Pointer)null); allocate(arg0); } -private native void allocate(@ByVal @Cast("std::nullptr_t*") PointerPointer arg0); - - /** Constructs the {@code ModuleHolder} with a contained module, forwarding all - * arguments to its constructor. */ - - /** Constructs the {@code ModuleHolder} from a pointer to the contained type. - * Example: {@code Linear(std::make_shared(...))}. */ - /* implicit */ public HardshrinkImplModuleHolder(@SharedPtr @Cast({"", "std::shared_ptr"}) HardshrinkImpl module) { super((Pointer)null); allocate(module); } -private native void allocate(@SharedPtr @Cast({"", "std::shared_ptr"}) HardshrinkImpl module); - - /** Returns true if the {@code ModuleHolder} contains a module, or false if it is - * {@code nullptr}. */ - public native @Cast("bool") @Name("operator bool") @NoException(true) boolean asBoolean(); - - /** Forwards to the contained module. */ - public native @Name("operator ->") HardshrinkImpl access(); - - /** Forwards to the contained module. */ - - /** Returns a reference to the contained module. */ - public native @ByRef @Name("operator *") HardshrinkImpl multiply(); - - /** Returns a const reference to the contained module. */ - - /** Returns a shared pointer to the underlying module. */ - public native @SharedPtr @Cast({"", "std::shared_ptr"}) HardshrinkImpl ptr(); - - /** Returns a pointer to the underlying module. */ - public native HardshrinkImpl get(); - - /** Returns a const pointer to the underlying module. */ - - /** Calls the {@code forward()} method of the contained module. */ - - /** Forwards to the subscript operator of the contained module. - * NOTE: std::forward is qualified to prevent VS2017 emitting - * error C2872: 'std': ambiguous symbol */ - - /** Returns true if the {@code ModuleHolder} does not contain a module. */ - public native @Cast("bool") @NoException(true) boolean is_empty(); -} diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/HardshrinkOptions.java b/pytorch/src/gen/java/org/bytedeco/pytorch/HardshrinkOptions.java index ed09d55fbba..e16f95d27a6 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/HardshrinkOptions.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/HardshrinkOptions.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/Hardtanh.java b/pytorch/src/gen/java/org/bytedeco/pytorch/Hardtanh.java deleted file mode 100644 index 47f518041ba..00000000000 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/Hardtanh.java +++ /dev/null @@ -1,34 +0,0 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE - -package org.bytedeco.pytorch; - -import org.bytedeco.pytorch.Allocator; -import org.bytedeco.pytorch.Function; -import org.bytedeco.pytorch.Module; -import java.nio.*; -import org.bytedeco.javacpp.*; -import org.bytedeco.javacpp.annotation.*; - -import static org.bytedeco.javacpp.presets.javacpp.*; -import static org.bytedeco.openblas.global.openblas_nolapack.*; -import static org.bytedeco.openblas.global.openblas.*; - -import static org.bytedeco.pytorch.global.torch.*; - - -/** A {@code ModuleHolder} subclass for {@code HardtanhImpl}. - * See the documentation for {@code HardtanhImpl} class to learn what methods it - * provides, and examples of how to use {@code Hardtanh} with - * {@code torch::nn::HardtanhOptions}. See the documentation for {@code ModuleHolder} to - * learn about PyTorch's module storage semantics. */ -@Namespace("torch::nn") @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) -public class Hardtanh extends HardtanhImplModuleHolder { - static { Loader.load(); } - - public Hardtanh(@ByVal @Cast("std::nullptr_t*") PointerPointer arg0) { super((Pointer)null); allocate(arg0); } - private native void allocate(@ByVal @Cast("std::nullptr_t*") PointerPointer arg0); public Hardtanh(@SharedPtr @Cast({"", "std::shared_ptr"}) HardtanhImpl module) { super((Pointer)null); allocate(module); } - private native void allocate(@SharedPtr @Cast({"", "std::shared_ptr"}) HardtanhImpl module); - /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ - public Hardtanh(Pointer p) { super(p); } - - } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/HardtanhImpl.java b/pytorch/src/gen/java/org/bytedeco/pytorch/HardtanhImpl.java index e9ef4c00010..76bc777bdbf 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/HardtanhImpl.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/HardtanhImpl.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; @@ -46,9 +48,9 @@ public class HardtanhImpl extends HardtanhImplCloneable { } public HardtanhImpl(@Const @ByRef(nullValue = "torch::nn::HardtanhOptions{}") HardtanhOptions options_) { super((Pointer)null); allocate(options_); } - @NoDeallocator private native void allocate(@Const @ByRef(nullValue = "torch::nn::HardtanhOptions{}") HardtanhOptions options_); + @SharedPtr private native void allocate(@Const @ByRef(nullValue = "torch::nn::HardtanhOptions{}") HardtanhOptions options_); public HardtanhImpl() { super((Pointer)null); allocate(); } - @NoDeallocator private native void allocate(); + @SharedPtr private native void allocate(); public native @ByVal Tensor forward(@ByVal Tensor input); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/HardtanhImplCloneable.java b/pytorch/src/gen/java/org/bytedeco/pytorch/HardtanhImplCloneable.java index f0bb3223576..bdb15e3647f 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/HardtanhImplCloneable.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/HardtanhImplCloneable.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; @@ -20,18 +22,18 @@ public class HardtanhImplCloneable extends Module { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public HardtanhImplCloneable(Pointer p) { super(p); } + @Override public Module asModule() { return asModule(this); } + @Namespace public static native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast>") Module asModule(@SharedPtr HardtanhImplCloneable pointer); /** {@code reset()} must perform initialization of all members with reference * semantics, most importantly parameters, buffers and submodules. */ public native void reset(); - @Override public Module asModule() { return asModule(this); } - @Namespace public static native @Name("static_cast") Module asModule(HardtanhImplCloneable module); /** Performs a recursive "deep copy" of the {@code Module}, such that all parameters * and submodules in the cloned module are different from those in the * original module. */ - public native @SharedPtr Module clone( - @Const @ByRef(nullValue = "c10::optional(c10::nullopt)") DeviceOptional device); - public native @SharedPtr Module clone(); + public native @SharedPtr("torch::nn::Module") @ByVal Module clone( + @Const @ByRef(nullValue = "c10::optional(c10::nullopt)") DeviceOptional device); + public native @SharedPtr("torch::nn::Module") @ByVal Module clone(); } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/HardtanhImplModuleHolder.java b/pytorch/src/gen/java/org/bytedeco/pytorch/HardtanhImplModuleHolder.java deleted file mode 100644 index 1dbd9417e15..00000000000 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/HardtanhImplModuleHolder.java +++ /dev/null @@ -1,79 +0,0 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE - -package org.bytedeco.pytorch; - -import org.bytedeco.pytorch.Allocator; -import org.bytedeco.pytorch.Function; -import org.bytedeco.pytorch.Module; -import java.nio.*; -import org.bytedeco.javacpp.*; -import org.bytedeco.javacpp.annotation.*; - -import static org.bytedeco.javacpp.presets.javacpp.*; -import static org.bytedeco.openblas.global.openblas_nolapack.*; -import static org.bytedeco.openblas.global.openblas.*; - -import static org.bytedeco.pytorch.global.torch.*; - -@Name("torch::nn::ModuleHolder") @NoOffset @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) -public class HardtanhImplModuleHolder extends Pointer { - static { Loader.load(); } - /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ - public HardtanhImplModuleHolder(Pointer p) { super(p); } - - - /// - - /** Default constructs the contained module if if has a default constructor, - * else produces a static error. - * - * NOTE: This uses the behavior of template - * classes in C++ that constructors (or any methods) are only compiled when - * actually used. */ - - - /** Constructs the {@code ModuleHolder} with an empty contained value. Access to - * the underlying module is not permitted and will throw an exception, until - * a value is assigned. */ - /* implicit */ public HardtanhImplModuleHolder(@ByVal @Cast("std::nullptr_t*") PointerPointer arg0) { super((Pointer)null); allocate(arg0); } -private native void allocate(@ByVal @Cast("std::nullptr_t*") PointerPointer arg0); - - /** Constructs the {@code ModuleHolder} with a contained module, forwarding all - * arguments to its constructor. */ - - /** Constructs the {@code ModuleHolder} from a pointer to the contained type. - * Example: {@code Linear(std::make_shared(...))}. */ - /* implicit */ public HardtanhImplModuleHolder(@SharedPtr @Cast({"", "std::shared_ptr"}) HardtanhImpl module) { super((Pointer)null); allocate(module); } -private native void allocate(@SharedPtr @Cast({"", "std::shared_ptr"}) HardtanhImpl module); - - /** Returns true if the {@code ModuleHolder} contains a module, or false if it is - * {@code nullptr}. */ - public native @Cast("bool") @Name("operator bool") @NoException(true) boolean asBoolean(); - - /** Forwards to the contained module. */ - public native @Name("operator ->") HardtanhImpl access(); - - /** Forwards to the contained module. */ - - /** Returns a reference to the contained module. */ - public native @ByRef @Name("operator *") HardtanhImpl multiply(); - - /** Returns a const reference to the contained module. */ - - /** Returns a shared pointer to the underlying module. */ - public native @SharedPtr @Cast({"", "std::shared_ptr"}) HardtanhImpl ptr(); - - /** Returns a pointer to the underlying module. */ - public native HardtanhImpl get(); - - /** Returns a const pointer to the underlying module. */ - - /** Calls the {@code forward()} method of the contained module. */ - - /** Forwards to the subscript operator of the contained module. - * NOTE: std::forward is qualified to prevent VS2017 emitting - * error C2872: 'std': ambiguous symbol */ - - /** Returns true if the {@code ModuleHolder} does not contain a module. */ - public native @Cast("bool") @NoException(true) boolean is_empty(); -} diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/HardtanhOptions.java b/pytorch/src/gen/java/org/bytedeco/pytorch/HardtanhOptions.java index f7500428f5e..5c48b9887b4 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/HardtanhOptions.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/HardtanhOptions.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/HashAliasedIValueMap.java b/pytorch/src/gen/java/org/bytedeco/pytorch/HashAliasedIValueMap.java index 24452bc12dd..0f950d7420a 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/HashAliasedIValueMap.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/HashAliasedIValueMap.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/HashAliasedIValues.java b/pytorch/src/gen/java/org/bytedeco/pytorch/HashAliasedIValues.java index 30e90637219..8a58287fc19 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/HashAliasedIValues.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/HashAliasedIValues.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; @@ -27,6 +29,7 @@ public class HashAliasedIValues extends Pointer { public boolean empty() { return size() == 0; } public native long size(); + public IValue front() { try (Iterator it = begin()) { return it.get(); } } public native void insert(@ByRef IValue value); public native void erase(@ByRef IValue value); public native @ByVal Iterator begin(); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/HashType.java b/pytorch/src/gen/java/org/bytedeco/pytorch/HashType.java deleted file mode 100644 index e792b55541e..00000000000 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/HashType.java +++ /dev/null @@ -1,39 +0,0 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE - -package org.bytedeco.pytorch; - -import org.bytedeco.pytorch.Allocator; -import org.bytedeco.pytorch.Function; -import org.bytedeco.pytorch.Module; -import java.nio.*; -import org.bytedeco.javacpp.*; -import org.bytedeco.javacpp.annotation.*; - -import static org.bytedeco.javacpp.presets.javacpp.*; -import static org.bytedeco.openblas.global.openblas_nolapack.*; -import static org.bytedeco.openblas.global.openblas.*; - -import static org.bytedeco.pytorch.global.torch.*; - - -@Namespace("torch::jit") @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) -public class HashType extends Pointer { - static { Loader.load(); } - /** Default native constructor. */ - public HashType() { super((Pointer)null); allocate(); } - /** Native array allocator. Access with {@link Pointer#position(long)}. */ - public HashType(long size) { super((Pointer)null); allocateArray(size); } - /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ - public HashType(Pointer p) { super(p); } - private native void allocate(); - private native void allocateArray(long size); - @Override public HashType position(long position) { - return (HashType)super.position(position); - } - @Override public HashType getPointer(long i) { - return new HashType((Pointer)this).offsetAddress(i); - } - - - -} diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/HermeticPyObjectTLS.java b/pytorch/src/gen/java/org/bytedeco/pytorch/HermeticPyObjectTLS.java new file mode 100644 index 00000000000..f96441034eb --- /dev/null +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/HermeticPyObjectTLS.java @@ -0,0 +1,48 @@ +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE + +package org.bytedeco.pytorch; + +import org.bytedeco.pytorch.Allocator; +import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; +import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; +import java.nio.*; +import org.bytedeco.javacpp.*; +import org.bytedeco.javacpp.annotation.*; + +import static org.bytedeco.javacpp.presets.javacpp.*; +import static org.bytedeco.openblas.global.openblas_nolapack.*; +import static org.bytedeco.openblas.global.openblas.*; + +import static org.bytedeco.pytorch.global.torch.*; + + +// This TLS controls whether or not we permanently associate PyObject +// with Tensor the first time it is allocated. When hermetic PyObject +// TLS is enabled (state is true), we DO NOT save PyObjects to Tensor, +// meaning you get a distinct PyObject whenever you execute the code in +// question. +@Namespace("c10::impl") @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) +public class HermeticPyObjectTLS extends Pointer { + static { Loader.load(); } + /** Default native constructor. */ + public HermeticPyObjectTLS() { super((Pointer)null); allocate(); } + /** Native array allocator. Access with {@link Pointer#position(long)}. */ + public HermeticPyObjectTLS(long size) { super((Pointer)null); allocateArray(size); } + /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ + public HermeticPyObjectTLS(Pointer p) { super(p); } + private native void allocate(); + private native void allocateArray(long size); + @Override public HermeticPyObjectTLS position(long position) { + return (HermeticPyObjectTLS)super.position(position); + } + @Override public HermeticPyObjectTLS getPointer(long i) { + return new HermeticPyObjectTLS((Pointer)this).offsetAddress(i); + } + + public static native void set_state(@Cast("bool") boolean state); + public static native @Cast("bool") boolean get_state(); + // Call this from the multipy/torchdeploy top level + public static native void init_state(); +} diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/HingeEmbeddingLoss.java b/pytorch/src/gen/java/org/bytedeco/pytorch/HingeEmbeddingLoss.java deleted file mode 100644 index 48f46c198a7..00000000000 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/HingeEmbeddingLoss.java +++ /dev/null @@ -1,34 +0,0 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE - -package org.bytedeco.pytorch; - -import org.bytedeco.pytorch.Allocator; -import org.bytedeco.pytorch.Function; -import org.bytedeco.pytorch.Module; -import java.nio.*; -import org.bytedeco.javacpp.*; -import org.bytedeco.javacpp.annotation.*; - -import static org.bytedeco.javacpp.presets.javacpp.*; -import static org.bytedeco.openblas.global.openblas_nolapack.*; -import static org.bytedeco.openblas.global.openblas.*; - -import static org.bytedeco.pytorch.global.torch.*; - - -/** A {@code ModuleHolder} subclass for {@code HingeEmbeddingLossImpl}. - * See the documentation for {@code HingeEmbeddingLossImpl} class to learn what - * methods it provides, and examples of how to use {@code HingeEmbeddingLoss} with - * {@code torch::nn::HingeEmbeddingLossOptions}. See the documentation for - * {@code ModuleHolder} to learn about PyTorch's module storage semantics. */ -@Namespace("torch::nn") @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) -public class HingeEmbeddingLoss extends HingeEmbeddingLossImplModuleHolder { - static { Loader.load(); } - - public HingeEmbeddingLoss(@ByVal @Cast("std::nullptr_t*") PointerPointer arg0) { super((Pointer)null); allocate(arg0); } - private native void allocate(@ByVal @Cast("std::nullptr_t*") PointerPointer arg0); public HingeEmbeddingLoss(@SharedPtr @Cast({"", "std::shared_ptr"}) HingeEmbeddingLossImpl module) { super((Pointer)null); allocate(module); } - private native void allocate(@SharedPtr @Cast({"", "std::shared_ptr"}) HingeEmbeddingLossImpl module); - /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ - public HingeEmbeddingLoss(Pointer p) { super(p); } - - } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/HingeEmbeddingLossImpl.java b/pytorch/src/gen/java/org/bytedeco/pytorch/HingeEmbeddingLossImpl.java index 68f1c8ae9e1..0255c887079 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/HingeEmbeddingLossImpl.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/HingeEmbeddingLossImpl.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; @@ -48,9 +50,9 @@ public class HingeEmbeddingLossImpl extends HingeEmbeddingLossImplCloneable { } public HingeEmbeddingLossImpl(@ByVal(nullValue = "torch::nn::HingeEmbeddingLossOptions{}") HingeEmbeddingLossOptions options_) { super((Pointer)null); allocate(options_); } - @NoDeallocator private native void allocate(@ByVal(nullValue = "torch::nn::HingeEmbeddingLossOptions{}") HingeEmbeddingLossOptions options_); + @SharedPtr private native void allocate(@ByVal(nullValue = "torch::nn::HingeEmbeddingLossOptions{}") HingeEmbeddingLossOptions options_); public HingeEmbeddingLossImpl() { super((Pointer)null); allocate(); } - @NoDeallocator private native void allocate(); + @SharedPtr private native void allocate(); public native void reset(); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/HingeEmbeddingLossImplCloneable.java b/pytorch/src/gen/java/org/bytedeco/pytorch/HingeEmbeddingLossImplCloneable.java index cd531adef89..a6aa3a93d60 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/HingeEmbeddingLossImplCloneable.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/HingeEmbeddingLossImplCloneable.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; @@ -20,18 +22,18 @@ public class HingeEmbeddingLossImplCloneable extends Module { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public HingeEmbeddingLossImplCloneable(Pointer p) { super(p); } + @Override public Module asModule() { return asModule(this); } + @Namespace public static native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast>") Module asModule(@SharedPtr HingeEmbeddingLossImplCloneable pointer); /** {@code reset()} must perform initialization of all members with reference * semantics, most importantly parameters, buffers and submodules. */ public native void reset(); - @Override public Module asModule() { return asModule(this); } - @Namespace public static native @Name("static_cast") Module asModule(HingeEmbeddingLossImplCloneable module); /** Performs a recursive "deep copy" of the {@code Module}, such that all parameters * and submodules in the cloned module are different from those in the * original module. */ - public native @SharedPtr Module clone( - @Const @ByRef(nullValue = "c10::optional(c10::nullopt)") DeviceOptional device); - public native @SharedPtr Module clone(); + public native @SharedPtr("torch::nn::Module") @ByVal Module clone( + @Const @ByRef(nullValue = "c10::optional(c10::nullopt)") DeviceOptional device); + public native @SharedPtr("torch::nn::Module") @ByVal Module clone(); } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/HingeEmbeddingLossImplModuleHolder.java b/pytorch/src/gen/java/org/bytedeco/pytorch/HingeEmbeddingLossImplModuleHolder.java deleted file mode 100644 index 41bd2a85480..00000000000 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/HingeEmbeddingLossImplModuleHolder.java +++ /dev/null @@ -1,79 +0,0 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE - -package org.bytedeco.pytorch; - -import org.bytedeco.pytorch.Allocator; -import org.bytedeco.pytorch.Function; -import org.bytedeco.pytorch.Module; -import java.nio.*; -import org.bytedeco.javacpp.*; -import org.bytedeco.javacpp.annotation.*; - -import static org.bytedeco.javacpp.presets.javacpp.*; -import static org.bytedeco.openblas.global.openblas_nolapack.*; -import static org.bytedeco.openblas.global.openblas.*; - -import static org.bytedeco.pytorch.global.torch.*; - -@Name("torch::nn::ModuleHolder") @NoOffset @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) -public class HingeEmbeddingLossImplModuleHolder extends Pointer { - static { Loader.load(); } - /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ - public HingeEmbeddingLossImplModuleHolder(Pointer p) { super(p); } - - - /// - - /** Default constructs the contained module if if has a default constructor, - * else produces a static error. - * - * NOTE: This uses the behavior of template - * classes in C++ that constructors (or any methods) are only compiled when - * actually used. */ - - - /** Constructs the {@code ModuleHolder} with an empty contained value. Access to - * the underlying module is not permitted and will throw an exception, until - * a value is assigned. */ - /* implicit */ public HingeEmbeddingLossImplModuleHolder(@ByVal @Cast("std::nullptr_t*") PointerPointer arg0) { super((Pointer)null); allocate(arg0); } -private native void allocate(@ByVal @Cast("std::nullptr_t*") PointerPointer arg0); - - /** Constructs the {@code ModuleHolder} with a contained module, forwarding all - * arguments to its constructor. */ - - /** Constructs the {@code ModuleHolder} from a pointer to the contained type. - * Example: {@code Linear(std::make_shared(...))}. */ - /* implicit */ public HingeEmbeddingLossImplModuleHolder(@SharedPtr @Cast({"", "std::shared_ptr"}) HingeEmbeddingLossImpl module) { super((Pointer)null); allocate(module); } -private native void allocate(@SharedPtr @Cast({"", "std::shared_ptr"}) HingeEmbeddingLossImpl module); - - /** Returns true if the {@code ModuleHolder} contains a module, or false if it is - * {@code nullptr}. */ - public native @Cast("bool") @Name("operator bool") @NoException(true) boolean asBoolean(); - - /** Forwards to the contained module. */ - public native @Name("operator ->") HingeEmbeddingLossImpl access(); - - /** Forwards to the contained module. */ - - /** Returns a reference to the contained module. */ - public native @ByRef @Name("operator *") HingeEmbeddingLossImpl multiply(); - - /** Returns a const reference to the contained module. */ - - /** Returns a shared pointer to the underlying module. */ - public native @SharedPtr @Cast({"", "std::shared_ptr"}) HingeEmbeddingLossImpl ptr(); - - /** Returns a pointer to the underlying module. */ - public native HingeEmbeddingLossImpl get(); - - /** Returns a const pointer to the underlying module. */ - - /** Calls the {@code forward()} method of the contained module. */ - - /** Forwards to the subscript operator of the contained module. - * NOTE: std::forward is qualified to prevent VS2017 emitting - * error C2872: 'std': ambiguous symbol */ - - /** Returns true if the {@code ModuleHolder} does not contain a module. */ - public native @Cast("bool") @NoException(true) boolean is_empty(); -} diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/HingeEmbeddingLossOptions.java b/pytorch/src/gen/java/org/bytedeco/pytorch/HingeEmbeddingLossOptions.java index 07201b20856..c7c0dcd65e8 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/HingeEmbeddingLossOptions.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/HingeEmbeddingLossOptions.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; @@ -44,5 +46,5 @@ public class HingeEmbeddingLossOptions extends Pointer { } public native @ByRef @NoException(true) DoublePointer margin(); - public native @ByRef @NoException(true) loss_reduction_t reduction(); + public native @ByRef @NoException(true) LossReduction reduction(); } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/HuberLoss.java b/pytorch/src/gen/java/org/bytedeco/pytorch/HuberLoss.java deleted file mode 100644 index 358a49a8915..00000000000 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/HuberLoss.java +++ /dev/null @@ -1,34 +0,0 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE - -package org.bytedeco.pytorch; - -import org.bytedeco.pytorch.Allocator; -import org.bytedeco.pytorch.Function; -import org.bytedeco.pytorch.Module; -import java.nio.*; -import org.bytedeco.javacpp.*; -import org.bytedeco.javacpp.annotation.*; - -import static org.bytedeco.javacpp.presets.javacpp.*; -import static org.bytedeco.openblas.global.openblas_nolapack.*; -import static org.bytedeco.openblas.global.openblas.*; - -import static org.bytedeco.pytorch.global.torch.*; - - -/** A {@code ModuleHolder} subclass for {@code HuberLossImpl}. - * See the documentation for {@code HuberLossImpl} class to learn what methods it - * provides, and examples of how to use {@code HuberLoss} with - * {@code torch::nn::HuberLossOptions}. See the documentation for {@code ModuleHolder} to - * learn about PyTorch's module storage semantics. */ -@Namespace("torch::nn") @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) -public class HuberLoss extends HuberLossImplModuleHolder { - static { Loader.load(); } - - public HuberLoss(@ByVal @Cast("std::nullptr_t*") PointerPointer arg0) { super((Pointer)null); allocate(arg0); } - private native void allocate(@ByVal @Cast("std::nullptr_t*") PointerPointer arg0); public HuberLoss(@SharedPtr @Cast({"", "std::shared_ptr"}) HuberLossImpl module) { super((Pointer)null); allocate(module); } - private native void allocate(@SharedPtr @Cast({"", "std::shared_ptr"}) HuberLossImpl module); - /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ - public HuberLoss(Pointer p) { super(p); } - - } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/HuberLossImpl.java b/pytorch/src/gen/java/org/bytedeco/pytorch/HuberLossImpl.java index 97b199616b9..059af1815d3 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/HuberLossImpl.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/HuberLossImpl.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; @@ -47,9 +49,9 @@ public class HuberLossImpl extends HuberLossImplCloneable { } public HuberLossImpl(@ByVal(nullValue = "torch::nn::HuberLossOptions{}") HuberLossOptions options_) { super((Pointer)null); allocate(options_); } - @NoDeallocator private native void allocate(@ByVal(nullValue = "torch::nn::HuberLossOptions{}") HuberLossOptions options_); + @SharedPtr private native void allocate(@ByVal(nullValue = "torch::nn::HuberLossOptions{}") HuberLossOptions options_); public HuberLossImpl() { super((Pointer)null); allocate(); } - @NoDeallocator private native void allocate(); + @SharedPtr private native void allocate(); public native void reset(); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/HuberLossImplCloneable.java b/pytorch/src/gen/java/org/bytedeco/pytorch/HuberLossImplCloneable.java index e0cba75430d..adf692cb26e 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/HuberLossImplCloneable.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/HuberLossImplCloneable.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; @@ -20,18 +22,18 @@ public class HuberLossImplCloneable extends Module { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public HuberLossImplCloneable(Pointer p) { super(p); } + @Override public Module asModule() { return asModule(this); } + @Namespace public static native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast>") Module asModule(@SharedPtr HuberLossImplCloneable pointer); /** {@code reset()} must perform initialization of all members with reference * semantics, most importantly parameters, buffers and submodules. */ public native void reset(); - @Override public Module asModule() { return asModule(this); } - @Namespace public static native @Name("static_cast") Module asModule(HuberLossImplCloneable module); /** Performs a recursive "deep copy" of the {@code Module}, such that all parameters * and submodules in the cloned module are different from those in the * original module. */ - public native @SharedPtr Module clone( - @Const @ByRef(nullValue = "c10::optional(c10::nullopt)") DeviceOptional device); - public native @SharedPtr Module clone(); + public native @SharedPtr("torch::nn::Module") @ByVal Module clone( + @Const @ByRef(nullValue = "c10::optional(c10::nullopt)") DeviceOptional device); + public native @SharedPtr("torch::nn::Module") @ByVal Module clone(); } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/HuberLossImplModuleHolder.java b/pytorch/src/gen/java/org/bytedeco/pytorch/HuberLossImplModuleHolder.java deleted file mode 100644 index 9c38fd2bc5f..00000000000 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/HuberLossImplModuleHolder.java +++ /dev/null @@ -1,79 +0,0 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE - -package org.bytedeco.pytorch; - -import org.bytedeco.pytorch.Allocator; -import org.bytedeco.pytorch.Function; -import org.bytedeco.pytorch.Module; -import java.nio.*; -import org.bytedeco.javacpp.*; -import org.bytedeco.javacpp.annotation.*; - -import static org.bytedeco.javacpp.presets.javacpp.*; -import static org.bytedeco.openblas.global.openblas_nolapack.*; -import static org.bytedeco.openblas.global.openblas.*; - -import static org.bytedeco.pytorch.global.torch.*; - -@Name("torch::nn::ModuleHolder") @NoOffset @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) -public class HuberLossImplModuleHolder extends Pointer { - static { Loader.load(); } - /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ - public HuberLossImplModuleHolder(Pointer p) { super(p); } - - - /// - - /** Default constructs the contained module if if has a default constructor, - * else produces a static error. - * - * NOTE: This uses the behavior of template - * classes in C++ that constructors (or any methods) are only compiled when - * actually used. */ - - - /** Constructs the {@code ModuleHolder} with an empty contained value. Access to - * the underlying module is not permitted and will throw an exception, until - * a value is assigned. */ - /* implicit */ public HuberLossImplModuleHolder(@ByVal @Cast("std::nullptr_t*") PointerPointer arg0) { super((Pointer)null); allocate(arg0); } -private native void allocate(@ByVal @Cast("std::nullptr_t*") PointerPointer arg0); - - /** Constructs the {@code ModuleHolder} with a contained module, forwarding all - * arguments to its constructor. */ - - /** Constructs the {@code ModuleHolder} from a pointer to the contained type. - * Example: {@code Linear(std::make_shared(...))}. */ - /* implicit */ public HuberLossImplModuleHolder(@SharedPtr @Cast({"", "std::shared_ptr"}) HuberLossImpl module) { super((Pointer)null); allocate(module); } -private native void allocate(@SharedPtr @Cast({"", "std::shared_ptr"}) HuberLossImpl module); - - /** Returns true if the {@code ModuleHolder} contains a module, or false if it is - * {@code nullptr}. */ - public native @Cast("bool") @Name("operator bool") @NoException(true) boolean asBoolean(); - - /** Forwards to the contained module. */ - public native @Name("operator ->") HuberLossImpl access(); - - /** Forwards to the contained module. */ - - /** Returns a reference to the contained module. */ - public native @ByRef @Name("operator *") HuberLossImpl multiply(); - - /** Returns a const reference to the contained module. */ - - /** Returns a shared pointer to the underlying module. */ - public native @SharedPtr @Cast({"", "std::shared_ptr"}) HuberLossImpl ptr(); - - /** Returns a pointer to the underlying module. */ - public native HuberLossImpl get(); - - /** Returns a const pointer to the underlying module. */ - - /** Calls the {@code forward()} method of the contained module. */ - - /** Forwards to the subscript operator of the contained module. - * NOTE: std::forward is qualified to prevent VS2017 emitting - * error C2872: 'std': ambiguous symbol */ - - /** Returns true if the {@code ModuleHolder} does not contain a module. */ - public native @Cast("bool") @NoException(true) boolean is_empty(); -} diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/HuberLossOptions.java b/pytorch/src/gen/java/org/bytedeco/pytorch/HuberLossOptions.java index 194ccd64a45..fb2229920f8 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/HuberLossOptions.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/HuberLossOptions.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; @@ -51,6 +53,6 @@ public class HuberLossOptions extends Pointer { public HuberLossOptions(@ByVal kSum reduction) { super((Pointer)null); allocate(reduction); } private native void allocate(@ByVal kSum reduction); - public native @ByRef @NoException(true) loss_reduction_t reduction(); + public native @ByRef @NoException(true) LossReduction reduction(); public native @ByRef @NoException(true) DoublePointer delta(); } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/IMethod.java b/pytorch/src/gen/java/org/bytedeco/pytorch/IMethod.java index 628e86204c7..1494367c813 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/IMethod.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/IMethod.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/IRAttributeError.java b/pytorch/src/gen/java/org/bytedeco/pytorch/IRAttributeError.java deleted file mode 100644 index 0a2fd6e2dfa..00000000000 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/IRAttributeError.java +++ /dev/null @@ -1,28 +0,0 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE - -package org.bytedeco.pytorch; - -import org.bytedeco.pytorch.Allocator; -import org.bytedeco.pytorch.Function; -import org.bytedeco.pytorch.Module; -import java.nio.*; -import org.bytedeco.javacpp.*; -import org.bytedeco.javacpp.annotation.*; - -import static org.bytedeco.javacpp.presets.javacpp.*; -import static org.bytedeco.openblas.global.openblas_nolapack.*; -import static org.bytedeco.openblas.global.openblas.*; - -import static org.bytedeco.pytorch.global.torch.*; - - -@Namespace("torch::jit") @NoOffset @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) -public class IRAttributeError extends Pointer { - static { Loader.load(); } - /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ - public IRAttributeError(Pointer p) { super(p); } - - public IRAttributeError(@ByVal Symbol name, @Cast("bool") boolean defined) { super((Pointer)null); allocate(name, defined); } - private native void allocate(@ByVal Symbol name, @Cast("bool") boolean defined); - public native @NoException(true) @Cast("const char*") BytePointer what(); -} diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/IStreamAdapter.java b/pytorch/src/gen/java/org/bytedeco/pytorch/IStreamAdapter.java new file mode 100644 index 00000000000..0d8b14547f7 --- /dev/null +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/IStreamAdapter.java @@ -0,0 +1,34 @@ +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE + +package org.bytedeco.pytorch; + +import org.bytedeco.pytorch.Allocator; +import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; +import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; +import java.nio.*; +import org.bytedeco.javacpp.*; +import org.bytedeco.javacpp.annotation.*; + +import static org.bytedeco.javacpp.presets.javacpp.*; +import static org.bytedeco.openblas.global.openblas_nolapack.*; +import static org.bytedeco.openblas.global.openblas.*; + +import static org.bytedeco.pytorch.global.torch.*; + + +// this is a reader implemented by std::istream +@Namespace("caffe2::serialize") @NoOffset @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) +public class IStreamAdapter extends ReadAdapterInterface { + static { Loader.load(); } + + + + public IStreamAdapter(@Cast("std::istream*") Pointer istream) { super((Pointer)null); allocate(istream); } + private native void allocate(@Cast("std::istream*") Pointer istream); + public native @Cast("size_t") long size(); + public native @Cast("size_t") long read(@Cast("uint64_t") long pos, Pointer buf, @Cast("size_t") long n, @Cast("const char*") BytePointer what/*=""*/); + public native @Cast("size_t") long read(@Cast("uint64_t") long pos, Pointer buf, @Cast("size_t") long n); + public native @Cast("size_t") long read(@Cast("uint64_t") long pos, Pointer buf, @Cast("size_t") long n, String what/*=""*/); +} diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/IValue.java b/pytorch/src/gen/java/org/bytedeco/pytorch/IValue.java index 7901584565c..923609a90b9 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/IValue.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/IValue.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; @@ -84,13 +86,14 @@ public class IValue extends Pointer { * for efficiency. * TODO: need to support customizing equality */ - public native @ByVal IValue equals(@Const @ByRef IValue rhs); /** * This implements the same semantics as {@code bool(lhs == rhs)} in Python. which * is the same as {@code equals()} except for Tensor types. */ - - + private static native @Namespace @Cast("bool") @Name("operator ==") boolean equals(@Const @ByRef IValue lhs, @Const @ByRef IValue rhs); + public boolean equals(IValue rhs) { return equals(this, rhs); } + private static native @Namespace @Cast("bool") @Name("operator !=") boolean notEquals(@Const @ByRef IValue lhs, @Const @ByRef IValue rhs); + public boolean notEquals(IValue rhs) { return notEquals(this, rhs); } /** * Identity comparison. Checks if {@code this} is the same object as {@code rhs}. The @@ -129,7 +132,10 @@ public class IValue extends Pointer { * compare identity) [tensor1] == [tensor1_copy] -> RuntimeError: * Boolean value of Tensor with more than one value is ambiguous */ - + private static native @Namespace @Cast("bool") boolean _fastEqualsForContainer( + @Const @ByRef IValue lhs, + @Const @ByRef IValue rhs); + public boolean _fastEqualsForContainer(IValue rhs) { return _fastEqualsForContainer(this, rhs); } public native @Cast("bool") boolean isAliasOf(@Const @ByRef IValue rhs); /** \private [doxygen private] */ @@ -157,6 +163,8 @@ public class IValue extends Pointer { public native @ByRef IValue toIValue(); /** \private [doxygen private] */ + public IValue(@ByVal @Cast("c10::intrusive_ptr*") Pointer blob) { super((Pointer)null); allocate(blob); } + private native void allocate(@ByVal @Cast("c10::intrusive_ptr*") Pointer blob); /** \private [doxygen private] */ public native @Cast("bool") boolean isBlob(); @@ -165,19 +173,26 @@ public class IValue extends Pointer { /** \private [doxygen private] */ + public native @ByVal @Cast("c10::intrusive_ptr*") Pointer toBlob(); // Capsule. No new callsites of these APIs should // be introduced. + public static native @ByVal IValue make_capsule( + @ByVal @Cast("c10::intrusive_ptr*") Pointer blob); public native @Cast("bool") boolean isCapsule(); + public native @ByVal @Cast("c10::intrusive_ptr*") Pointer toCapsule(); // Custom C++ classes public native @Cast("bool") boolean isCustomClass(); // Tuple + public IValue(@ByVal TuplePtr v) { super((Pointer)null); allocate(v); } + private native void allocate(@ByVal TuplePtr v); public native @Cast("bool") boolean isTuple(); + public native @ByVal TuplePtr toTuple(); public native @ByRef Tuple toTupleRef(); // Double @@ -188,20 +203,34 @@ public class IValue extends Pointer { // ComplexDouble public native @Cast("bool") boolean isComplexDouble(); + public native @ByVal DoubleComplex toComplexDouble(); // Future + public IValue(@ByVal FuturePtr v) { super((Pointer)null); allocate(v); } + private native void allocate(@ByVal FuturePtr v); public native @Cast("bool") boolean isFuture(); + public native @ByVal FuturePtr toFuture(); + + public IValue(@ByVal AwaitPtr v) { super((Pointer)null); allocate(v); } + private native void allocate(@ByVal AwaitPtr v); public native @Cast("bool") boolean isAwait(); + public native @ByVal AwaitPtr toAwait(); // RRef + public IValue(@ByVal RRefInterfacePtr v) { super((Pointer)null); allocate(v); } + private native void allocate(@ByVal RRefInterfacePtr v); public native @Cast("bool") boolean isRRef(); + public native @ByVal RRefInterfacePtr toRRef(); // Quantizer + public IValue(@ByVal QuantizerPtr v) { super((Pointer)null); allocate(v); } + private native void allocate(@ByVal QuantizerPtr v); public native @Cast("bool") boolean isQuantizer(); + public native @ByVal QuantizerPtr toQuantizer(); // Int public IValue(@Cast("int64_t") long i) { super((Pointer)null); allocate(i); } @@ -240,18 +269,20 @@ public class IValue extends Pointer { // IntList public native @Cast("bool") boolean isIntList(); + public native @ByVal LongList toIntList(); public native @ByVal @Cast("std::vector*") LongVector toIntVector(); public native @ByVal DimVector toDimVector(); // ConstantString + public IValue(@ByVal ConstantStringPtr v) { super((Pointer)null); allocate(v); } + private native void allocate(@ByVal ConstantStringPtr v); public IValue(@StdString BytePointer v) { super((Pointer)null); allocate(v); } private native void allocate(@StdString BytePointer v); public IValue(@StdString String v) { super((Pointer)null); allocate(v); } private native void allocate(@StdString String v); - public IValue(@ByVal @Cast("c10::string_view*") Pointer v) { super((Pointer)null); allocate(v); } - private native void allocate(@ByVal @Cast("c10::string_view*") Pointer v); public native @Cast("bool") boolean isString(); + public native @ByVal @Name("toString") ConstantStringPtr toConstantString(); public native @StdString BytePointer toStringRef(); public native @ByVal @Cast("c10::optional >*") Pointer toOptionalStringRef(); public native @ByVal @Cast("c10::string_view*") Pointer toStringView(); @@ -259,30 +290,39 @@ public class IValue extends Pointer { // DoubleList public native @Cast("bool") boolean isDoubleList(); + public native @ByVal DoubleList toDoubleList(); public native @ByVal @Cast("std::vector*") DoubleVector toDoubleVector(); // ComplexDoubleList public native @Cast("bool") boolean isComplexDoubleList(); + public native @ByVal DoubleComplexList toComplexDoubleList(); + public native @StdVector DoubleComplex toComplexDoubleVector(); // BoolList public native @Cast("bool") boolean isBoolList(); + public native @ByVal BooleanList toBoolList(); // TensorList public native @Cast("bool") boolean isTensorList(); - public native @Cast({"", "std::vector"}) @StdMove TensorVector toTensorVector(); + public native @ByVal TensorList toTensorList(); + public native @Cast({"", "std::vector"}) @StdMove TensorVector toTensorVector(); // OptionalTensorList public native @Cast("bool") boolean isOptionalTensorList(); + public native @ByVal TensorOptionalList toOptionalTensorList(); public native @StdVector TensorOptional toOptionalTensorVector(); // GenericList + public IValue(@ByVal GenericList v) { super((Pointer)null); allocate(v); } + private native void allocate(@ByVal GenericList v); public native @Cast("bool") boolean isList(); - public native @ByVal @Cast("c10::ArrayRef*") IValueArrayRef toListRef(); + public native @ByVal GenericList toList(); + public native @ByVal IValueArrayRef toListRef(); // Some template constructors of IValue calls another constructor recursively. // This SFINAEs the called constructor exists. @@ -311,19 +351,24 @@ public class IValue extends Pointer { public native @Cast("bool") boolean isObject(); public native @ByVal @Cast("c10::intrusive_ptr*") Pointer toObject(); - public native @ByRef Object toObjectRef(); public native @Cast("bool") boolean isModule(); // PyObject + public IValue(@ByVal PyObjectHolderPtr v) { super((Pointer)null); allocate(v); } + private native void allocate(@ByVal PyObjectHolderPtr v); public native @Cast("bool") boolean isPyObject(); + public native @ByVal PyObjectHolderPtr toPyObjectHolder(); public native @Cast("PyObject*") Pointer toPyObject(); // Enum + public IValue(@ByVal EnumHolderPtr v) { super((Pointer)null); allocate(v); } + private native void allocate(@ByVal EnumHolderPtr v); public native @Cast("bool") boolean isEnum(); + public native @ByVal EnumHolderPtr toEnumHolder(); // None public IValue() { super((Pointer)null); allocate(); } @@ -422,16 +467,17 @@ public class IValue extends Pointer { // the serializer's constant table). // // repr() is not necessarily defined on all objects! - public native @Cast("std::ostream*") @ByRef Pointer repr( - @Cast("std::ostream*") @ByRef Pointer stream, - @ByVal CustomFormatter customFormatter); + // Computes an "informal" string representation of an IValue. This should be // used for debugging, or servicing `print()`-like functions. // This is different from `repr()` in that there is no expectation that we can // exactly reconstruct an IValue from the output; feel free to use a // concise/pretty form - + private static native @Namespace @Cast("std::ostream*") @ByRef @Name("operator <<") Pointer shiftLeft( + @Cast("std::ostream*") @ByRef Pointer out, + @Const @ByRef IValue v); + public Pointer shiftLeft(Pointer out) { return shiftLeft(out, this); } public native @Cast("bool") boolean isPtrType(); @@ -439,46 +485,6 @@ public class IValue extends Pointer { public native @Const Pointer internalToPointer(); // Detect aliased tensors. - public static class HashAliasedIValue extends Pointer { - static { Loader.load(); } - /** Default native constructor. */ - public HashAliasedIValue() { super((Pointer)null); allocate(); } - /** Native array allocator. Access with {@link Pointer#position(long)}. */ - public HashAliasedIValue(long size) { super((Pointer)null); allocateArray(size); } - /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ - public HashAliasedIValue(Pointer p) { super(p); } - private native void allocate(); - private native void allocateArray(long size); - @Override public HashAliasedIValue position(long position) { - return (HashAliasedIValue)super.position(position); - } - @Override public HashAliasedIValue getPointer(long i) { - return new HashAliasedIValue((Pointer)this).offsetAddress(i); - } - - public native @Cast("size_t") long hashTensor(@Const @ByRef Tensor ten); - public native @Cast("size_t") @Name("operator ()") long apply(@Const @ByRef IValue val); - } - - public static class CompAliasedIValues extends Pointer { - static { Loader.load(); } - /** Default native constructor. */ - public CompAliasedIValues() { super((Pointer)null); allocate(); } - /** Native array allocator. Access with {@link Pointer#position(long)}. */ - public CompAliasedIValues(long size) { super((Pointer)null); allocateArray(size); } - /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ - public CompAliasedIValues(Pointer p) { super(p); } - private native void allocate(); - private native void allocateArray(long size); - @Override public CompAliasedIValues position(long position) { - return (CompAliasedIValues)super.position(position); - } - @Override public CompAliasedIValues getPointer(long i) { - return new CompAliasedIValues((Pointer)this).offsetAddress(i); - } - - public native @Cast("bool") @Name("operator ()") boolean apply(@Const @ByRef IValue lhs, @Const @ByRef IValue rhs); - } // Chechs if this and rhs has a subvalues in common. // [t1,t2] and [t2, t3] returns true. @@ -490,7 +496,7 @@ public static class CompAliasedIValues extends Pointer { // Apply visitor to every subvalue. // TODO: There are several places that recurse over IValue. This is fragile. // This visitor should be used to recurse over ivalues. - public native void visit(@Const @ByRef IValueVisitor visitor); + public native @ByVal IValue deepcopy(); public native @ByVal IValue deepcopy(@ByRef HashAliasedIValueMap memo); } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/IValueArrayRef.java b/pytorch/src/gen/java/org/bytedeco/pytorch/IValueArrayRef.java index 585e176edbd..74bce3536a7 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/IValueArrayRef.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/IValueArrayRef.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; @@ -39,8 +41,7 @@ public class IValueArrayRef extends Pointer { /** Construct an ArrayRef from a single element. */ // TODO Make this explicit - public IValueArrayRef(@Const @ByRef IValue OneElt) { super((Pointer)null); allocate(OneElt); } - private native void allocate(@Const @ByRef IValue OneElt); + /** Construct an ArrayRef from a pointer and length. */ public IValueArrayRef(@Const IValue data, @Cast("size_t") long length) { super((Pointer)null); allocate(data, length); } @@ -58,6 +59,8 @@ public class IValueArrayRef extends Pointer { // The enable_if stuff here makes sure that this isn't used for // std::vector, because ArrayRef can't work on a std::vector // bitfield. + public IValueArrayRef(@ByRef IValueVector vec) { super((Pointer)null); allocate(vec); } + private native void allocate(@ByRef IValueVector vec); /** Construct an ArrayRef from a std::array */ @@ -70,13 +73,13 @@ public class IValueArrayRef extends Pointer { * \name Simple Operations * \{ */ - public native @ByVal @Cast("const c10::ArrayRef::iterator*") IValue begin(); - public native @ByVal @Cast("const c10::ArrayRef::iterator*") IValue end(); + public native @Const @ByPtr IValue begin(); + public native @Const @ByPtr IValue end(); // These are actually the same as iterator, since ArrayRef only // gives you const iterators. - public native @ByVal @Cast("const c10::ArrayRef::const_iterator*") IValue cbegin(); - public native @ByVal @Cast("const c10::ArrayRef::const_iterator*") IValue cend(); + public native @Const @ByPtr IValue cbegin(); + public native @Const @ByPtr IValue cend(); /** empty - Check if the array is empty. */ public native @Cast("const bool") boolean empty(); @@ -93,13 +96,13 @@ public class IValueArrayRef extends Pointer { public native @Const @ByRef IValue back(); /** equals - Check for element-wise equality. */ - public native @Cast("const bool") boolean equals(@ByVal @Cast("c10::ArrayRef*") IValueArrayRef RHS); + public native @Cast("const bool") boolean equals(@ByVal IValueArrayRef RHS); /** slice(n, m) - Take M elements of the array starting at element N */ - public native @ByVal @Cast("const c10::ArrayRef*") IValueArrayRef slice(@Cast("size_t") long N, @Cast("size_t") long M); + public native @Const @ByVal IValueArrayRef slice(@Cast("size_t") long N, @Cast("size_t") long M); /** slice(n) - Chop off the first N elements of the array. */ - public native @ByVal @Cast("const c10::ArrayRef*") IValueArrayRef slice(@Cast("size_t") long N); + public native @Const @ByVal IValueArrayRef slice(@Cast("size_t") long N); /** \} * \name Operator Overloads diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/IValueOptional.java b/pytorch/src/gen/java/org/bytedeco/pytorch/IValueOptional.java index 1cef759768e..634a433dc42 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/IValueOptional.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/IValueOptional.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; @@ -26,6 +28,7 @@ public class IValueOptional extends Pointer { public native @Name("operator =") @ByRef IValueOptional put(@ByRef IValueOptional x); public native boolean has_value(); + public native void reset(); public native @Name("value") @ByRef IValue get(); @ValueSetter public native IValueOptional put(@ByRef IValue value); } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/IValueOptionalVector.java b/pytorch/src/gen/java/org/bytedeco/pytorch/IValueOptionalVector.java index 26844337981..45ee1064472 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/IValueOptionalVector.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/IValueOptionalVector.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; @@ -33,6 +35,8 @@ public class IValueOptionalVector extends Pointer { public void clear() { resize(0); } public native void resize(@Cast("size_t") long n); + public IValueOptional front() { return get(0); } + public IValueOptional back() { return get(size() - 1); } @Index(function = "at") public native @ByRef IValueOptional get(@Cast("size_t") long i); public native IValueOptionalVector put(@Cast("size_t") long i, IValueOptional value); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/IValueVector.java b/pytorch/src/gen/java/org/bytedeco/pytorch/IValueVector.java index 148e1545563..b83152313b0 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/IValueVector.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/IValueVector.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; @@ -33,6 +35,8 @@ public class IValueVector extends Pointer { public void clear() { resize(0); } public native void resize(@Cast("size_t") long n); + public IValue front() { return get(0); } + public IValue back() { return get(size() - 1); } @Index(function = "at") public native @ByRef IValue get(@Cast("size_t") long i); public native IValueVector put(@Cast("size_t") long i, IValue value); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/Ident.java b/pytorch/src/gen/java/org/bytedeco/pytorch/Ident.java index 3be5f980c2d..83781f13c60 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/Ident.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/Ident.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; @@ -19,9 +21,11 @@ @Namespace("torch::jit") @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) public class Ident extends TreeView { static { Loader.load(); } + /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ + public Ident(Pointer p) { super(p); } - public Ident(@Cast("const torch::jit::TreeRef*") @ByRef Pointer tree) { super((Pointer)null); allocate(tree); } - private native void allocate(@Cast("const torch::jit::TreeRef*") @ByRef Pointer tree); + public Ident(@Const @ByRef TreeRef tree) { super((Pointer)null); allocate(tree); } + private native void allocate(@Const @ByRef TreeRef tree); public native @StdString BytePointer name(); public static native @ByVal Ident create(@Const @ByRef SourceRange range, @StdString BytePointer name); public static native @ByVal Ident create(@Const @ByRef SourceRange range, @StdString String name); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/IdentList.java b/pytorch/src/gen/java/org/bytedeco/pytorch/IdentList.java new file mode 100644 index 00000000000..babc72af3a9 --- /dev/null +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/IdentList.java @@ -0,0 +1,38 @@ +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE + +package org.bytedeco.pytorch; + +import org.bytedeco.pytorch.Allocator; +import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; +import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; +import java.nio.*; +import org.bytedeco.javacpp.*; +import org.bytedeco.javacpp.annotation.*; + +import static org.bytedeco.javacpp.presets.javacpp.*; +import static org.bytedeco.openblas.global.openblas_nolapack.*; +import static org.bytedeco.openblas.global.openblas.*; + +import static org.bytedeco.pytorch.global.torch.*; + + +@Name("torch::jit::List") @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) +public class IdentList extends TreeView { + static { Loader.load(); } + /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ + public IdentList(Pointer p) { super(p); } + + + public IdentList(@Const @ByRef TreeRef tree) { super((Pointer)null); allocate(tree); } + private native void allocate(@Const @ByRef TreeRef tree); + public native @ByVal @Cast("torch::jit::List::iterator*") IdentListIterator begin(); + public native @ByVal @Cast("torch::jit::List::iterator*") IdentListIterator end(); + public native @Cast("bool") boolean empty(); + public native @ByVal @Name("operator []") Ident get(@Cast("size_t") long i); + + public static native @ByVal IdentList create(@Const @ByRef SourceRange range, @StdVector Ident subtrees); + public static native @ByVal IdentList unsafeCreate(@Const @ByRef SourceRange range, @Cast("torch::jit::TreeList*") @ByRef(true) SymDimVector subtrees); + public native @Cast("size_t") long size(); +} diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/IdentListIterator.java b/pytorch/src/gen/java/org/bytedeco/pytorch/IdentListIterator.java new file mode 100644 index 00000000000..14bc5f15119 --- /dev/null +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/IdentListIterator.java @@ -0,0 +1,35 @@ +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE + +package org.bytedeco.pytorch; + +import org.bytedeco.pytorch.Allocator; +import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; +import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; +import java.nio.*; +import org.bytedeco.javacpp.*; +import org.bytedeco.javacpp.annotation.*; + +import static org.bytedeco.javacpp.presets.javacpp.*; +import static org.bytedeco.openblas.global.openblas_nolapack.*; +import static org.bytedeco.openblas.global.openblas.*; + +import static org.bytedeco.pytorch.global.torch.*; + + +@Name("torch::jit::ListIterator") @NoOffset @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) +public class IdentListIterator extends Pointer { + static { Loader.load(); } + /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ + public IdentListIterator(Pointer p) { super(p); } + + public IdentListIterator(@ByVal @Cast("torch::jit::TreeList::const_iterator*") TreeRef it) { super((Pointer)null); allocate(it); } + private native void allocate(@ByVal @Cast("torch::jit::TreeList::const_iterator*") TreeRef it); + public native @Cast("bool") @Name("operator !=") boolean notEquals(@Const @ByRef IdentListIterator rhs); + public native @Cast("bool") @Name("operator ==") boolean equals(@Const @ByRef IdentListIterator rhs); + public native @ByVal @Name("operator *") Ident multiply(); + public native @ByRef @Name("operator +=") IdentListIterator addPut(@Cast("std::ptrdiff_t") long n); + public native @ByRef @Name("operator ++") IdentListIterator increment(); + public native @ByRef @Name("operator --") IdentListIterator decrement(); +} diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/Identity.java b/pytorch/src/gen/java/org/bytedeco/pytorch/Identity.java deleted file mode 100644 index 572de77c43f..00000000000 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/Identity.java +++ /dev/null @@ -1,33 +0,0 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE - -package org.bytedeco.pytorch; - -import org.bytedeco.pytorch.Allocator; -import org.bytedeco.pytorch.Function; -import org.bytedeco.pytorch.Module; -import java.nio.*; -import org.bytedeco.javacpp.*; -import org.bytedeco.javacpp.annotation.*; - -import static org.bytedeco.javacpp.presets.javacpp.*; -import static org.bytedeco.openblas.global.openblas_nolapack.*; -import static org.bytedeco.openblas.global.openblas.*; - -import static org.bytedeco.pytorch.global.torch.*; - - -/** A {@code ModuleHolder} subclass for {@code IdentityImpl}. - * See the documentation for {@code IdentityImpl} class to learn what methods it - * provides, or the documentation for {@code ModuleHolder} to learn about PyTorch's - * module storage semantics. */ -@Namespace("torch::nn") @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) -public class Identity extends IdentityImplModuleHolder { - static { Loader.load(); } - - public Identity(@ByVal @Cast("std::nullptr_t*") PointerPointer arg0) { super((Pointer)null); allocate(arg0); } - private native void allocate(@ByVal @Cast("std::nullptr_t*") PointerPointer arg0); public Identity(@SharedPtr @Cast({"", "std::shared_ptr"}) IdentityImpl module) { super((Pointer)null); allocate(module); } - private native void allocate(@SharedPtr @Cast({"", "std::shared_ptr"}) IdentityImpl module); - /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ - public Identity(Pointer p) { super(p); } - - } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/IdentityImpl.java b/pytorch/src/gen/java/org/bytedeco/pytorch/IdentityImpl.java index 582c85fe000..80c8e6de2cd 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/IdentityImpl.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/IdentityImpl.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/IdentityImplCloneable.java b/pytorch/src/gen/java/org/bytedeco/pytorch/IdentityImplCloneable.java index 78923f52019..d3138e250e4 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/IdentityImplCloneable.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/IdentityImplCloneable.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; @@ -20,18 +22,18 @@ public class IdentityImplCloneable extends Module { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public IdentityImplCloneable(Pointer p) { super(p); } + @Override public Module asModule() { return asModule(this); } + @Namespace public static native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast>") Module asModule(@SharedPtr IdentityImplCloneable pointer); /** {@code reset()} must perform initialization of all members with reference * semantics, most importantly parameters, buffers and submodules. */ public native void reset(); - @Override public Module asModule() { return asModule(this); } - @Namespace public static native @Name("static_cast") Module asModule(IdentityImplCloneable module); /** Performs a recursive "deep copy" of the {@code Module}, such that all parameters * and submodules in the cloned module are different from those in the * original module. */ - public native @SharedPtr Module clone( - @Const @ByRef(nullValue = "c10::optional(c10::nullopt)") DeviceOptional device); - public native @SharedPtr Module clone(); + public native @SharedPtr("torch::nn::Module") @ByVal Module clone( + @Const @ByRef(nullValue = "c10::optional(c10::nullopt)") DeviceOptional device); + public native @SharedPtr("torch::nn::Module") @ByVal Module clone(); } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/IdentityImplModuleHolder.java b/pytorch/src/gen/java/org/bytedeco/pytorch/IdentityImplModuleHolder.java deleted file mode 100644 index 9fa50c0ce3a..00000000000 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/IdentityImplModuleHolder.java +++ /dev/null @@ -1,79 +0,0 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE - -package org.bytedeco.pytorch; - -import org.bytedeco.pytorch.Allocator; -import org.bytedeco.pytorch.Function; -import org.bytedeco.pytorch.Module; -import java.nio.*; -import org.bytedeco.javacpp.*; -import org.bytedeco.javacpp.annotation.*; - -import static org.bytedeco.javacpp.presets.javacpp.*; -import static org.bytedeco.openblas.global.openblas_nolapack.*; -import static org.bytedeco.openblas.global.openblas.*; - -import static org.bytedeco.pytorch.global.torch.*; - -@Name("torch::nn::ModuleHolder") @NoOffset @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) -public class IdentityImplModuleHolder extends Pointer { - static { Loader.load(); } - /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ - public IdentityImplModuleHolder(Pointer p) { super(p); } - - - /// - - /** Default constructs the contained module if if has a default constructor, - * else produces a static error. - * - * NOTE: This uses the behavior of template - * classes in C++ that constructors (or any methods) are only compiled when - * actually used. */ - - - /** Constructs the {@code ModuleHolder} with an empty contained value. Access to - * the underlying module is not permitted and will throw an exception, until - * a value is assigned. */ - /* implicit */ public IdentityImplModuleHolder(@ByVal @Cast("std::nullptr_t*") PointerPointer arg0) { super((Pointer)null); allocate(arg0); } -private native void allocate(@ByVal @Cast("std::nullptr_t*") PointerPointer arg0); - - /** Constructs the {@code ModuleHolder} with a contained module, forwarding all - * arguments to its constructor. */ - - /** Constructs the {@code ModuleHolder} from a pointer to the contained type. - * Example: {@code Linear(std::make_shared(...))}. */ - /* implicit */ public IdentityImplModuleHolder(@SharedPtr @Cast({"", "std::shared_ptr"}) IdentityImpl module) { super((Pointer)null); allocate(module); } -private native void allocate(@SharedPtr @Cast({"", "std::shared_ptr"}) IdentityImpl module); - - /** Returns true if the {@code ModuleHolder} contains a module, or false if it is - * {@code nullptr}. */ - public native @Cast("bool") @Name("operator bool") @NoException(true) boolean asBoolean(); - - /** Forwards to the contained module. */ - public native @Name("operator ->") IdentityImpl access(); - - /** Forwards to the contained module. */ - - /** Returns a reference to the contained module. */ - public native @ByRef @Name("operator *") IdentityImpl multiply(); - - /** Returns a const reference to the contained module. */ - - /** Returns a shared pointer to the underlying module. */ - public native @SharedPtr @Cast({"", "std::shared_ptr"}) IdentityImpl ptr(); - - /** Returns a pointer to the underlying module. */ - public native IdentityImpl get(); - - /** Returns a const pointer to the underlying module. */ - - /** Calls the {@code forward()} method of the contained module. */ - - /** Forwards to the subscript operator of the contained module. - * NOTE: std::forward is qualified to prevent VS2017 emitting - * error C2872: 'std': ambiguous symbol */ - - /** Returns true if the {@code ModuleHolder} does not contain a module. */ - public native @Cast("bool") @NoException(true) boolean is_empty(); -} diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/If.java b/pytorch/src/gen/java/org/bytedeco/pytorch/If.java index c9b53bf83a8..16e5d1bd9bc 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/If.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/If.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; @@ -23,8 +25,20 @@ @Namespace("torch::jit") @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) public class If extends Stmt { static { Loader.load(); } + /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ + public If(Pointer p) { super(p); } - public If(@Cast("const torch::jit::TreeRef*") @ByRef Pointer tree) { super((Pointer)null); allocate(tree); } - private native void allocate(@Cast("const torch::jit::TreeRef*") @ByRef Pointer tree); + public If(@Const @ByRef TreeRef tree) { super((Pointer)null); allocate(tree); } + private native void allocate(@Const @ByRef TreeRef tree); public native @ByVal Expr cond(); + public native @ByVal StmtList trueBranch(); + public native @ByVal StmtList falseBranch(); + public native @ByVal If withNewBranches( + @Const @ByRef StmtList true_branch, + @Const @ByRef StmtList false_branch); + public static native @ByVal If create( + @Const @ByRef SourceRange range, + @Const @ByRef Expr cond, + @Const @ByRef StmtList true_branch, + @Const @ByRef StmtList false_branch); } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/IncludeDispatchKeyGuard.java b/pytorch/src/gen/java/org/bytedeco/pytorch/IncludeDispatchKeyGuard.java new file mode 100644 index 00000000000..8da0f75ba65 --- /dev/null +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/IncludeDispatchKeyGuard.java @@ -0,0 +1,39 @@ +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE + +package org.bytedeco.pytorch; + +import org.bytedeco.pytorch.Allocator; +import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; +import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; +import java.nio.*; +import org.bytedeco.javacpp.*; +import org.bytedeco.javacpp.annotation.*; + +import static org.bytedeco.javacpp.presets.javacpp.*; +import static org.bytedeco.openblas.global.openblas_nolapack.*; +import static org.bytedeco.openblas.global.openblas.*; + +import static org.bytedeco.pytorch.global.torch.*; + + +// RAII API for manipulating the thread-local dispatch state. + +@Namespace("c10::impl") @NoOffset @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) +public class IncludeDispatchKeyGuard extends Pointer { + static { Loader.load(); } + /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ + public IncludeDispatchKeyGuard(Pointer p) { super(p); } + + public IncludeDispatchKeyGuard(@ByVal DispatchKeySet arg0) { super((Pointer)null); allocate(arg0); } + private native void allocate(@ByVal DispatchKeySet arg0); + public IncludeDispatchKeyGuard(DispatchKey k) { super((Pointer)null); allocate(k); } + private native void allocate(DispatchKey k); + public IncludeDispatchKeyGuard(@Cast("c10::DispatchKey") short k) { super((Pointer)null); allocate(k); } + private native void allocate(@Cast("c10::DispatchKey") short k); + + + + +} diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/IndexError.java b/pytorch/src/gen/java/org/bytedeco/pytorch/IndexError.java index e3c7d98e193..0edc60aaa77 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/IndexError.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/IndexError.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/Indices.java b/pytorch/src/gen/java/org/bytedeco/pytorch/Indices.java deleted file mode 100644 index 3b6cd4deb44..00000000000 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/Indices.java +++ /dev/null @@ -1,30 +0,0 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE - -package org.bytedeco.pytorch; - -import org.bytedeco.pytorch.Allocator; -import org.bytedeco.pytorch.Function; -import org.bytedeco.pytorch.Module; -import java.nio.*; -import org.bytedeco.javacpp.*; -import org.bytedeco.javacpp.annotation.*; - -import static org.bytedeco.javacpp.presets.javacpp.*; -import static org.bytedeco.openblas.global.openblas_nolapack.*; -import static org.bytedeco.openblas.global.openblas.*; - -import static org.bytedeco.pytorch.global.torch.*; - - -//===----------------------------------------------------------------------===// -// std::index_sequence shim for C++11 -//===----------------------------------------------------------------------===// - -// A container of type-template parameter indices. -@Namespace("torch") @Opaque @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) -public class Indices extends Pointer { - /** Empty constructor. Calls {@code super((Pointer)null)}. */ - public Indices() { super((Pointer)null); } - /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ - public Indices(Pointer p) { super(p); } -} diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/InferenceMode.java b/pytorch/src/gen/java/org/bytedeco/pytorch/InferenceMode.java index b354ff8441d..7b01b98d7a2 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/InferenceMode.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/InferenceMode.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/InferredType.java b/pytorch/src/gen/java/org/bytedeco/pytorch/InferredType.java index a0b1192a1e7..a8805ca0ba6 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/InferredType.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/InferredType.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/InlinedCallStack.java b/pytorch/src/gen/java/org/bytedeco/pytorch/InlinedCallStack.java index 56cb9fb1d9a..3731694b37b 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/InlinedCallStack.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/InlinedCallStack.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; @@ -74,7 +76,7 @@ private native void allocate( public native @StdString BytePointer function_name(); // Return callstack as a vector of [Function, SourceRange] pairs. - public native @Cast("torch::jit::InlinedCallStackEntry*") @StdVector LongPointer vec(); + public native @Cast("torch::jit::InlinedCallStackEntry*") @StdVector LongVector vec(); public native void setCallee(@ByVal @Cast("c10::optional*") InlinedCallStackOptional arg0); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/InlinedCallStackOptional.java b/pytorch/src/gen/java/org/bytedeco/pytorch/InlinedCallStackOptional.java index 578e6f5e616..0ae4e3e0c19 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/InlinedCallStackOptional.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/InlinedCallStackOptional.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; @@ -26,6 +28,7 @@ public class InlinedCallStackOptional extends Pointer { public native @Name("operator =") @ByRef InlinedCallStackOptional put(@ByRef InlinedCallStackOptional x); public native boolean has_value(); + public native void reset(); public native @Name("value") @ByRef InlinedCallStack get(); @ValueSetter public native InlinedCallStackOptional put(@ByRef InlinedCallStack value); } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/InputArchive.java b/pytorch/src/gen/java/org/bytedeco/pytorch/InputArchive.java index 5b354cd60d0..78fbcea53ad 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/InputArchive.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/InputArchive.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; @@ -126,12 +128,12 @@ public native void load_from( // Loads given the specified read and size functions. public native void load_from( - @Const @ByRef ReadFunction read_func, - @Const @ByRef SizeFunction size_func, + @Const @ByRef Reader read_func, + @Const @ByRef SizeTSupplier size_func, @ByVal(nullValue = "c10::optional(c10::nullopt)") DeviceOptional device); public native void load_from( - @Const @ByRef ReadFunction read_func, - @Const @ByRef SizeFunction size_func); + @Const @ByRef Reader read_func, + @Const @ByRef SizeTSupplier size_func); // Returns the vector of keys in the input archive. public native @ByVal StringVector keys(); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/InputMetadata.java b/pytorch/src/gen/java/org/bytedeco/pytorch/InputMetadata.java deleted file mode 100644 index 28b3f5f06c5..00000000000 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/InputMetadata.java +++ /dev/null @@ -1,79 +0,0 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE - -package org.bytedeco.pytorch; - -import org.bytedeco.pytorch.Allocator; -import org.bytedeco.pytorch.Function; -import org.bytedeco.pytorch.Module; -import java.nio.*; -import org.bytedeco.javacpp.*; -import org.bytedeco.javacpp.annotation.*; - -import static org.bytedeco.javacpp.presets.javacpp.*; -import static org.bytedeco.openblas.global.openblas_nolapack.*; -import static org.bytedeco.openblas.global.openblas.*; - -import static org.bytedeco.pytorch.global.torch.*; - - -/** - * Records TensorOptions, shape of the tensor, whether or not the Python - * dispatch key is set (tensor subclass), and, where applicable, the stream the - * corresponding operation took place on. - * - * If is_valid() is false, then the corresponding input is not used and may be - * an undefined tensor. - */ -@Namespace("torch::autograd") @NoOffset @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) -public class InputMetadata extends Pointer { - static { Loader.load(); } - /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ - public InputMetadata(Pointer p) { super(p); } - /** Native array allocator. Access with {@link Pointer#position(long)}. */ - public InputMetadata(long size) { super((Pointer)null); allocateArray(size); } - private native void allocateArray(long size); - @Override public InputMetadata position(long position) { - return (InputMetadata)super.position(position); - } - @Override public InputMetadata getPointer(long i) { - return new InputMetadata((Pointer)this).offsetAddress(i); - } - - public InputMetadata() { super((Pointer)null); allocate(); } - private native void allocate(); - - public InputMetadata( - @Const @ByVal TensorOptions options, - @ByVal @Cast("torch::autograd::MetadataShape*") NonlinearityType input_shape, - @Cast("bool") boolean is_tensor_subclass) { super((Pointer)null); allocate(options, input_shape, is_tensor_subclass); } - private native void allocate( - @Const @ByVal TensorOptions options, - @ByVal @Cast("torch::autograd::MetadataShape*") NonlinearityType input_shape, - @Cast("bool") boolean is_tensor_subclass); - - public InputMetadata(@Const @ByRef Tensor t) { super((Pointer)null); allocate(t); } - private native void allocate(@Const @ByRef Tensor t); - - public native @Const @ByVal TensorOptions options(); - - public native @ByVal TypeMeta dtype(); - - public native @ByVal Device device(); - - public native @ByVal Layout layout(); - - public native @ByVal Stream stream(); - - public native @Cast("bool") boolean is_tensor_subclass(); - - public native @ByVal Tensor zeros_like(); - - public native @Cast("bool") boolean is_same_shape(@Const @ByRef Tensor grad); - public native @Cast("bool") boolean is_expandable_to_shape(@Const @ByRef Tensor grad); - - public native @ByVal Tensor reduce_grad(@ByRef Tensor grad); - - public native @ByVal @Cast("std::stringstream*") Pointer incompatible_shape_error_message( - @Cast("const size_t") long index, - @Const @ByRef Tensor grad); -} diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/InstanceNorm1d.java b/pytorch/src/gen/java/org/bytedeco/pytorch/InstanceNorm1d.java deleted file mode 100644 index b804a0273b7..00000000000 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/InstanceNorm1d.java +++ /dev/null @@ -1,34 +0,0 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE - -package org.bytedeco.pytorch; - -import org.bytedeco.pytorch.Allocator; -import org.bytedeco.pytorch.Function; -import org.bytedeco.pytorch.Module; -import java.nio.*; -import org.bytedeco.javacpp.*; -import org.bytedeco.javacpp.annotation.*; - -import static org.bytedeco.javacpp.presets.javacpp.*; -import static org.bytedeco.openblas.global.openblas_nolapack.*; -import static org.bytedeco.openblas.global.openblas.*; - -import static org.bytedeco.pytorch.global.torch.*; - - -/** A {@code ModuleHolder} subclass for {@code InstanceNorm1dImpl}. - * See the documentation for {@code InstanceNorm1dImpl} class to learn what methods - * it provides, and examples of how to use {@code InstanceNorm1d} with - * {@code torch::nn::InstanceNorm1dOptions}. See the documentation for {@code ModuleHolder} - * to learn about PyTorch's module storage semantics. */ -@Namespace("torch::nn") @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) -public class InstanceNorm1d extends InstanceNorm1dImplModuleHolder { - static { Loader.load(); } - - public InstanceNorm1d(@ByVal @Cast("std::nullptr_t*") PointerPointer arg0) { super((Pointer)null); allocate(arg0); } - private native void allocate(@ByVal @Cast("std::nullptr_t*") PointerPointer arg0); public InstanceNorm1d(@SharedPtr @Cast({"", "std::shared_ptr"}) InstanceNorm1dImpl module) { super((Pointer)null); allocate(module); } - private native void allocate(@SharedPtr @Cast({"", "std::shared_ptr"}) InstanceNorm1dImpl module); - /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ - public InstanceNorm1d(Pointer p) { super(p); } - - } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/InstanceNorm1dImpl.java b/pytorch/src/gen/java/org/bytedeco/pytorch/InstanceNorm1dImpl.java index ca1d5830053..68f4e8b9fa1 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/InstanceNorm1dImpl.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/InstanceNorm1dImpl.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/InstanceNorm1dImplBase.java b/pytorch/src/gen/java/org/bytedeco/pytorch/InstanceNorm1dImplBase.java index 5dbe36e13a4..92ca6ec7905 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/InstanceNorm1dImplBase.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/InstanceNorm1dImplBase.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/InstanceNorm1dImplBaseBase.java b/pytorch/src/gen/java/org/bytedeco/pytorch/InstanceNorm1dImplBaseBase.java index 401af060573..555f456c535 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/InstanceNorm1dImplBaseBase.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/InstanceNorm1dImplBaseBase.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/InstanceNorm1dImplCloneable.java b/pytorch/src/gen/java/org/bytedeco/pytorch/InstanceNorm1dImplCloneable.java index 0ddf5500379..ddd021dade4 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/InstanceNorm1dImplCloneable.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/InstanceNorm1dImplCloneable.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; @@ -20,18 +22,18 @@ public class InstanceNorm1dImplCloneable extends Module { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public InstanceNorm1dImplCloneable(Pointer p) { super(p); } + @Override public Module asModule() { return asModule(this); } + @Namespace public static native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast>") Module asModule(@SharedPtr InstanceNorm1dImplCloneable pointer); /** {@code reset()} must perform initialization of all members with reference * semantics, most importantly parameters, buffers and submodules. */ public native void reset(); - @Override public Module asModule() { return asModule(this); } - @Namespace public static native @Name("static_cast") Module asModule(InstanceNorm1dImplCloneable module); /** Performs a recursive "deep copy" of the {@code Module}, such that all parameters * and submodules in the cloned module are different from those in the * original module. */ - public native @SharedPtr Module clone( - @Const @ByRef(nullValue = "c10::optional(c10::nullopt)") DeviceOptional device); - public native @SharedPtr Module clone(); + public native @SharedPtr("torch::nn::Module") @ByVal Module clone( + @Const @ByRef(nullValue = "c10::optional(c10::nullopt)") DeviceOptional device); + public native @SharedPtr("torch::nn::Module") @ByVal Module clone(); } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/InstanceNorm1dImplModuleHolder.java b/pytorch/src/gen/java/org/bytedeco/pytorch/InstanceNorm1dImplModuleHolder.java deleted file mode 100644 index 8174f6c677a..00000000000 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/InstanceNorm1dImplModuleHolder.java +++ /dev/null @@ -1,79 +0,0 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE - -package org.bytedeco.pytorch; - -import org.bytedeco.pytorch.Allocator; -import org.bytedeco.pytorch.Function; -import org.bytedeco.pytorch.Module; -import java.nio.*; -import org.bytedeco.javacpp.*; -import org.bytedeco.javacpp.annotation.*; - -import static org.bytedeco.javacpp.presets.javacpp.*; -import static org.bytedeco.openblas.global.openblas_nolapack.*; -import static org.bytedeco.openblas.global.openblas.*; - -import static org.bytedeco.pytorch.global.torch.*; - -@Name("torch::nn::ModuleHolder") @NoOffset @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) -public class InstanceNorm1dImplModuleHolder extends Pointer { - static { Loader.load(); } - /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ - public InstanceNorm1dImplModuleHolder(Pointer p) { super(p); } - - - /// - - /** Default constructs the contained module if if has a default constructor, - * else produces a static error. - * - * NOTE: This uses the behavior of template - * classes in C++ that constructors (or any methods) are only compiled when - * actually used. */ - - - /** Constructs the {@code ModuleHolder} with an empty contained value. Access to - * the underlying module is not permitted and will throw an exception, until - * a value is assigned. */ - /* implicit */ public InstanceNorm1dImplModuleHolder(@ByVal @Cast("std::nullptr_t*") PointerPointer arg0) { super((Pointer)null); allocate(arg0); } -private native void allocate(@ByVal @Cast("std::nullptr_t*") PointerPointer arg0); - - /** Constructs the {@code ModuleHolder} with a contained module, forwarding all - * arguments to its constructor. */ - - /** Constructs the {@code ModuleHolder} from a pointer to the contained type. - * Example: {@code Linear(std::make_shared(...))}. */ - /* implicit */ public InstanceNorm1dImplModuleHolder(@SharedPtr @Cast({"", "std::shared_ptr"}) InstanceNorm1dImpl module) { super((Pointer)null); allocate(module); } -private native void allocate(@SharedPtr @Cast({"", "std::shared_ptr"}) InstanceNorm1dImpl module); - - /** Returns true if the {@code ModuleHolder} contains a module, or false if it is - * {@code nullptr}. */ - public native @Cast("bool") @Name("operator bool") @NoException(true) boolean asBoolean(); - - /** Forwards to the contained module. */ - public native @Name("operator ->") InstanceNorm1dImpl access(); - - /** Forwards to the contained module. */ - - /** Returns a reference to the contained module. */ - public native @ByRef @Name("operator *") InstanceNorm1dImpl multiply(); - - /** Returns a const reference to the contained module. */ - - /** Returns a shared pointer to the underlying module. */ - public native @SharedPtr @Cast({"", "std::shared_ptr"}) InstanceNorm1dImpl ptr(); - - /** Returns a pointer to the underlying module. */ - public native InstanceNorm1dImpl get(); - - /** Returns a const pointer to the underlying module. */ - - /** Calls the {@code forward()} method of the contained module. */ - - /** Forwards to the subscript operator of the contained module. - * NOTE: std::forward is qualified to prevent VS2017 emitting - * error C2872: 'std': ambiguous symbol */ - - /** Returns true if the {@code ModuleHolder} does not contain a module. */ - public native @Cast("bool") @NoException(true) boolean is_empty(); -} diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/InstanceNorm2d.java b/pytorch/src/gen/java/org/bytedeco/pytorch/InstanceNorm2d.java deleted file mode 100644 index 9442b69c79d..00000000000 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/InstanceNorm2d.java +++ /dev/null @@ -1,34 +0,0 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE - -package org.bytedeco.pytorch; - -import org.bytedeco.pytorch.Allocator; -import org.bytedeco.pytorch.Function; -import org.bytedeco.pytorch.Module; -import java.nio.*; -import org.bytedeco.javacpp.*; -import org.bytedeco.javacpp.annotation.*; - -import static org.bytedeco.javacpp.presets.javacpp.*; -import static org.bytedeco.openblas.global.openblas_nolapack.*; -import static org.bytedeco.openblas.global.openblas.*; - -import static org.bytedeco.pytorch.global.torch.*; - - -/** A {@code ModuleHolder} subclass for {@code InstanceNorm2dImpl}. - * See the documentation for {@code InstanceNorm2dImpl} class to learn what methods - * it provides, and examples of how to use {@code InstanceNorm2d} with - * {@code torch::nn::InstanceNorm2dOptions}. See the documentation for {@code ModuleHolder} - * to learn about PyTorch's module storage semantics. */ -@Namespace("torch::nn") @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) -public class InstanceNorm2d extends InstanceNorm2dImplModuleHolder { - static { Loader.load(); } - - public InstanceNorm2d(@ByVal @Cast("std::nullptr_t*") PointerPointer arg0) { super((Pointer)null); allocate(arg0); } - private native void allocate(@ByVal @Cast("std::nullptr_t*") PointerPointer arg0); public InstanceNorm2d(@SharedPtr @Cast({"", "std::shared_ptr"}) InstanceNorm2dImpl module) { super((Pointer)null); allocate(module); } - private native void allocate(@SharedPtr @Cast({"", "std::shared_ptr"}) InstanceNorm2dImpl module); - /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ - public InstanceNorm2d(Pointer p) { super(p); } - - } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/InstanceNorm2dImpl.java b/pytorch/src/gen/java/org/bytedeco/pytorch/InstanceNorm2dImpl.java index 82fb91cf8e5..44131b2b8ea 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/InstanceNorm2dImpl.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/InstanceNorm2dImpl.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/InstanceNorm2dImplBase.java b/pytorch/src/gen/java/org/bytedeco/pytorch/InstanceNorm2dImplBase.java index 9bed65e7d39..d753c04ee71 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/InstanceNorm2dImplBase.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/InstanceNorm2dImplBase.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/InstanceNorm2dImplBaseBase.java b/pytorch/src/gen/java/org/bytedeco/pytorch/InstanceNorm2dImplBaseBase.java index 627f82025bf..ca1f82de43c 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/InstanceNorm2dImplBaseBase.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/InstanceNorm2dImplBaseBase.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/InstanceNorm2dImplCloneable.java b/pytorch/src/gen/java/org/bytedeco/pytorch/InstanceNorm2dImplCloneable.java index 2b207b0fcbd..c1788b33b40 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/InstanceNorm2dImplCloneable.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/InstanceNorm2dImplCloneable.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; @@ -20,18 +22,18 @@ public class InstanceNorm2dImplCloneable extends Module { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public InstanceNorm2dImplCloneable(Pointer p) { super(p); } + @Override public Module asModule() { return asModule(this); } + @Namespace public static native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast>") Module asModule(@SharedPtr InstanceNorm2dImplCloneable pointer); /** {@code reset()} must perform initialization of all members with reference * semantics, most importantly parameters, buffers and submodules. */ public native void reset(); - @Override public Module asModule() { return asModule(this); } - @Namespace public static native @Name("static_cast") Module asModule(InstanceNorm2dImplCloneable module); /** Performs a recursive "deep copy" of the {@code Module}, such that all parameters * and submodules in the cloned module are different from those in the * original module. */ - public native @SharedPtr Module clone( - @Const @ByRef(nullValue = "c10::optional(c10::nullopt)") DeviceOptional device); - public native @SharedPtr Module clone(); + public native @SharedPtr("torch::nn::Module") @ByVal Module clone( + @Const @ByRef(nullValue = "c10::optional(c10::nullopt)") DeviceOptional device); + public native @SharedPtr("torch::nn::Module") @ByVal Module clone(); } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/InstanceNorm2dImplModuleHolder.java b/pytorch/src/gen/java/org/bytedeco/pytorch/InstanceNorm2dImplModuleHolder.java deleted file mode 100644 index 88f2ba756f9..00000000000 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/InstanceNorm2dImplModuleHolder.java +++ /dev/null @@ -1,79 +0,0 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE - -package org.bytedeco.pytorch; - -import org.bytedeco.pytorch.Allocator; -import org.bytedeco.pytorch.Function; -import org.bytedeco.pytorch.Module; -import java.nio.*; -import org.bytedeco.javacpp.*; -import org.bytedeco.javacpp.annotation.*; - -import static org.bytedeco.javacpp.presets.javacpp.*; -import static org.bytedeco.openblas.global.openblas_nolapack.*; -import static org.bytedeco.openblas.global.openblas.*; - -import static org.bytedeco.pytorch.global.torch.*; - -@Name("torch::nn::ModuleHolder") @NoOffset @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) -public class InstanceNorm2dImplModuleHolder extends Pointer { - static { Loader.load(); } - /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ - public InstanceNorm2dImplModuleHolder(Pointer p) { super(p); } - - - /// - - /** Default constructs the contained module if if has a default constructor, - * else produces a static error. - * - * NOTE: This uses the behavior of template - * classes in C++ that constructors (or any methods) are only compiled when - * actually used. */ - - - /** Constructs the {@code ModuleHolder} with an empty contained value. Access to - * the underlying module is not permitted and will throw an exception, until - * a value is assigned. */ - /* implicit */ public InstanceNorm2dImplModuleHolder(@ByVal @Cast("std::nullptr_t*") PointerPointer arg0) { super((Pointer)null); allocate(arg0); } -private native void allocate(@ByVal @Cast("std::nullptr_t*") PointerPointer arg0); - - /** Constructs the {@code ModuleHolder} with a contained module, forwarding all - * arguments to its constructor. */ - - /** Constructs the {@code ModuleHolder} from a pointer to the contained type. - * Example: {@code Linear(std::make_shared(...))}. */ - /* implicit */ public InstanceNorm2dImplModuleHolder(@SharedPtr @Cast({"", "std::shared_ptr"}) InstanceNorm2dImpl module) { super((Pointer)null); allocate(module); } -private native void allocate(@SharedPtr @Cast({"", "std::shared_ptr"}) InstanceNorm2dImpl module); - - /** Returns true if the {@code ModuleHolder} contains a module, or false if it is - * {@code nullptr}. */ - public native @Cast("bool") @Name("operator bool") @NoException(true) boolean asBoolean(); - - /** Forwards to the contained module. */ - public native @Name("operator ->") InstanceNorm2dImpl access(); - - /** Forwards to the contained module. */ - - /** Returns a reference to the contained module. */ - public native @ByRef @Name("operator *") InstanceNorm2dImpl multiply(); - - /** Returns a const reference to the contained module. */ - - /** Returns a shared pointer to the underlying module. */ - public native @SharedPtr @Cast({"", "std::shared_ptr"}) InstanceNorm2dImpl ptr(); - - /** Returns a pointer to the underlying module. */ - public native InstanceNorm2dImpl get(); - - /** Returns a const pointer to the underlying module. */ - - /** Calls the {@code forward()} method of the contained module. */ - - /** Forwards to the subscript operator of the contained module. - * NOTE: std::forward is qualified to prevent VS2017 emitting - * error C2872: 'std': ambiguous symbol */ - - /** Returns true if the {@code ModuleHolder} does not contain a module. */ - public native @Cast("bool") @NoException(true) boolean is_empty(); -} diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/InstanceNorm3d.java b/pytorch/src/gen/java/org/bytedeco/pytorch/InstanceNorm3d.java deleted file mode 100644 index 7739e0cb3e3..00000000000 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/InstanceNorm3d.java +++ /dev/null @@ -1,34 +0,0 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE - -package org.bytedeco.pytorch; - -import org.bytedeco.pytorch.Allocator; -import org.bytedeco.pytorch.Function; -import org.bytedeco.pytorch.Module; -import java.nio.*; -import org.bytedeco.javacpp.*; -import org.bytedeco.javacpp.annotation.*; - -import static org.bytedeco.javacpp.presets.javacpp.*; -import static org.bytedeco.openblas.global.openblas_nolapack.*; -import static org.bytedeco.openblas.global.openblas.*; - -import static org.bytedeco.pytorch.global.torch.*; - - -/** A {@code ModuleHolder} subclass for {@code InstanceNorm3dImpl}. - * See the documentation for {@code InstanceNorm3dImpl} class to learn what methods - * it provides, and examples of how to use {@code InstanceNorm3d} with - * {@code torch::nn::InstanceNorm3dOptions}. See the documentation for {@code ModuleHolder} - * to learn about PyTorch's module storage semantics. */ -@Namespace("torch::nn") @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) -public class InstanceNorm3d extends InstanceNorm3dImplModuleHolder { - static { Loader.load(); } - - public InstanceNorm3d(@ByVal @Cast("std::nullptr_t*") PointerPointer arg0) { super((Pointer)null); allocate(arg0); } - private native void allocate(@ByVal @Cast("std::nullptr_t*") PointerPointer arg0); public InstanceNorm3d(@SharedPtr @Cast({"", "std::shared_ptr"}) InstanceNorm3dImpl module) { super((Pointer)null); allocate(module); } - private native void allocate(@SharedPtr @Cast({"", "std::shared_ptr"}) InstanceNorm3dImpl module); - /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ - public InstanceNorm3d(Pointer p) { super(p); } - - } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/InstanceNorm3dImpl.java b/pytorch/src/gen/java/org/bytedeco/pytorch/InstanceNorm3dImpl.java index 393a9841654..4de31c198c4 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/InstanceNorm3dImpl.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/InstanceNorm3dImpl.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/InstanceNorm3dImplBase.java b/pytorch/src/gen/java/org/bytedeco/pytorch/InstanceNorm3dImplBase.java index 759b87d7930..52ce402ef11 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/InstanceNorm3dImplBase.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/InstanceNorm3dImplBase.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/InstanceNorm3dImplBaseBase.java b/pytorch/src/gen/java/org/bytedeco/pytorch/InstanceNorm3dImplBaseBase.java index c3bfd8956b2..ff13601e6ea 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/InstanceNorm3dImplBaseBase.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/InstanceNorm3dImplBaseBase.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/InstanceNorm3dImplCloneable.java b/pytorch/src/gen/java/org/bytedeco/pytorch/InstanceNorm3dImplCloneable.java index c7d07160e23..0fabb125a3a 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/InstanceNorm3dImplCloneable.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/InstanceNorm3dImplCloneable.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; @@ -20,18 +22,18 @@ public class InstanceNorm3dImplCloneable extends Module { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public InstanceNorm3dImplCloneable(Pointer p) { super(p); } + @Override public Module asModule() { return asModule(this); } + @Namespace public static native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast>") Module asModule(@SharedPtr InstanceNorm3dImplCloneable pointer); /** {@code reset()} must perform initialization of all members with reference * semantics, most importantly parameters, buffers and submodules. */ public native void reset(); - @Override public Module asModule() { return asModule(this); } - @Namespace public static native @Name("static_cast") Module asModule(InstanceNorm3dImplCloneable module); /** Performs a recursive "deep copy" of the {@code Module}, such that all parameters * and submodules in the cloned module are different from those in the * original module. */ - public native @SharedPtr Module clone( - @Const @ByRef(nullValue = "c10::optional(c10::nullopt)") DeviceOptional device); - public native @SharedPtr Module clone(); + public native @SharedPtr("torch::nn::Module") @ByVal Module clone( + @Const @ByRef(nullValue = "c10::optional(c10::nullopt)") DeviceOptional device); + public native @SharedPtr("torch::nn::Module") @ByVal Module clone(); } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/InstanceNorm3dImplModuleHolder.java b/pytorch/src/gen/java/org/bytedeco/pytorch/InstanceNorm3dImplModuleHolder.java deleted file mode 100644 index 3d89fc36d52..00000000000 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/InstanceNorm3dImplModuleHolder.java +++ /dev/null @@ -1,79 +0,0 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE - -package org.bytedeco.pytorch; - -import org.bytedeco.pytorch.Allocator; -import org.bytedeco.pytorch.Function; -import org.bytedeco.pytorch.Module; -import java.nio.*; -import org.bytedeco.javacpp.*; -import org.bytedeco.javacpp.annotation.*; - -import static org.bytedeco.javacpp.presets.javacpp.*; -import static org.bytedeco.openblas.global.openblas_nolapack.*; -import static org.bytedeco.openblas.global.openblas.*; - -import static org.bytedeco.pytorch.global.torch.*; - -@Name("torch::nn::ModuleHolder") @NoOffset @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) -public class InstanceNorm3dImplModuleHolder extends Pointer { - static { Loader.load(); } - /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ - public InstanceNorm3dImplModuleHolder(Pointer p) { super(p); } - - - /// - - /** Default constructs the contained module if if has a default constructor, - * else produces a static error. - * - * NOTE: This uses the behavior of template - * classes in C++ that constructors (or any methods) are only compiled when - * actually used. */ - - - /** Constructs the {@code ModuleHolder} with an empty contained value. Access to - * the underlying module is not permitted and will throw an exception, until - * a value is assigned. */ - /* implicit */ public InstanceNorm3dImplModuleHolder(@ByVal @Cast("std::nullptr_t*") PointerPointer arg0) { super((Pointer)null); allocate(arg0); } -private native void allocate(@ByVal @Cast("std::nullptr_t*") PointerPointer arg0); - - /** Constructs the {@code ModuleHolder} with a contained module, forwarding all - * arguments to its constructor. */ - - /** Constructs the {@code ModuleHolder} from a pointer to the contained type. - * Example: {@code Linear(std::make_shared(...))}. */ - /* implicit */ public InstanceNorm3dImplModuleHolder(@SharedPtr @Cast({"", "std::shared_ptr"}) InstanceNorm3dImpl module) { super((Pointer)null); allocate(module); } -private native void allocate(@SharedPtr @Cast({"", "std::shared_ptr"}) InstanceNorm3dImpl module); - - /** Returns true if the {@code ModuleHolder} contains a module, or false if it is - * {@code nullptr}. */ - public native @Cast("bool") @Name("operator bool") @NoException(true) boolean asBoolean(); - - /** Forwards to the contained module. */ - public native @Name("operator ->") InstanceNorm3dImpl access(); - - /** Forwards to the contained module. */ - - /** Returns a reference to the contained module. */ - public native @ByRef @Name("operator *") InstanceNorm3dImpl multiply(); - - /** Returns a const reference to the contained module. */ - - /** Returns a shared pointer to the underlying module. */ - public native @SharedPtr @Cast({"", "std::shared_ptr"}) InstanceNorm3dImpl ptr(); - - /** Returns a pointer to the underlying module. */ - public native InstanceNorm3dImpl get(); - - /** Returns a const pointer to the underlying module. */ - - /** Calls the {@code forward()} method of the contained module. */ - - /** Forwards to the subscript operator of the contained module. - * NOTE: std::forward is qualified to prevent VS2017 emitting - * error C2872: 'std': ambiguous symbol */ - - /** Returns true if the {@code ModuleHolder} does not contain a module. */ - public native @Cast("bool") @NoException(true) boolean is_empty(); -} diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/InstanceNormFuncOptions.java b/pytorch/src/gen/java/org/bytedeco/pytorch/InstanceNormFuncOptions.java index 1556b4c42fb..08565304a0a 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/InstanceNormFuncOptions.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/InstanceNormFuncOptions.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/InstanceNormOptions.java b/pytorch/src/gen/java/org/bytedeco/pytorch/InstanceNormOptions.java index 7f032a93148..daacdb15775 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/InstanceNormOptions.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/InstanceNormOptions.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/Instruction.java b/pytorch/src/gen/java/org/bytedeco/pytorch/Instruction.java index 9141110f05c..63aa5fdf3b2 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/Instruction.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/Instruction.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; @@ -15,16 +17,10 @@ import static org.bytedeco.pytorch.global.torch.*; - -@Namespace("torch::jit") @NoOffset @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) +@Namespace("torch::jit") @Opaque @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) public class Instruction extends Pointer { - static { Loader.load(); } + /** Empty constructor. Calls {@code super((Pointer)null)}. */ + public Instruction() { super((Pointer)null); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public Instruction(Pointer p) { super(p); } - - public native OpCode op(); public native Instruction op(OpCode setter); - public native @Cast("uint8_t") byte unused(); public native Instruction unused(byte setter); - public native @Cast("uint16_t") short N(); public native Instruction N(short setter); - public native int X(); public native Instruction X(int setter); - // TODO: check for overflow } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/InstructionVector.java b/pytorch/src/gen/java/org/bytedeco/pytorch/InstructionVector.java index 93c9943b739..5df2a22e996 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/InstructionVector.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/InstructionVector.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; @@ -27,6 +29,8 @@ public class InstructionVector extends Pointer { public boolean empty() { return size() == 0; } public native long size(); + public Instruction front() { return get(0); } + public Instruction back() { return get(size() - 1); } @Index(function = "at") public native @ByRef Instruction get(@Cast("size_t") long i); public native @ByVal Iterator begin(); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/IntArrayRef.java b/pytorch/src/gen/java/org/bytedeco/pytorch/IntArrayRef.java index 65b164e3f9f..9ec05261006 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/IntArrayRef.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/IntArrayRef.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; @@ -20,6 +22,15 @@ public class IntArrayRef extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public IntArrayRef(Pointer p) { super(p); } + /** Native array allocator. Access with {@link Pointer#position(long)}. */ + public IntArrayRef(long size) { super((Pointer)null); allocateArray(size); } + private native void allocateArray(long size); + @Override public IntArrayRef position(long position) { + return (IntArrayRef)super.position(position); + } + @Override public IntArrayRef getPointer(long i) { + return new IntArrayRef((Pointer)this).offsetAddress(i); + } /** \name Constructors * \{ @@ -30,8 +41,7 @@ public class IntArrayRef extends Pointer { /** Construct an ArrayRef from a single element. */ // TODO Make this explicit - public IntArrayRef(int OneElt) { super((Pointer)null); allocate(OneElt); } - private native void allocate(int OneElt); + /** Construct an ArrayRef from a pointer and length. */ public IntArrayRef(@Const IntPointer data, @Cast("size_t") long length) { super((Pointer)null); allocate(data, length); } @@ -69,13 +79,13 @@ public class IntArrayRef extends Pointer { * \name Simple Operations * \{ */ - public native @ByVal @Cast("const c10::ArrayRef::iterator*") IntPointer begin(); - public native @ByVal @Cast("const c10::ArrayRef::iterator*") IntPointer end(); + public native @Const IntPointer begin(); + public native @Const IntPointer end(); // These are actually the same as iterator, since ArrayRef only // gives you const iterators. - public native @ByVal @Cast("const c10::ArrayRef::const_iterator*") IntPointer cbegin(); - public native @ByVal @Cast("const c10::ArrayRef::const_iterator*") IntPointer cend(); + public native @Const IntPointer cbegin(); + public native @Const IntPointer cend(); /** empty - Check if the array is empty. */ public native @Cast("const bool") boolean empty(); @@ -92,13 +102,13 @@ public class IntArrayRef extends Pointer { public native int back(); /** equals - Check for element-wise equality. */ - public native @Cast("const bool") boolean equals(@ByVal @Cast("c10::ArrayRef*") IntArrayRef RHS); + public native @Cast("const bool") boolean equals(@ByVal IntArrayRef RHS); /** slice(n, m) - Take M elements of the array starting at element N */ - public native @ByVal @Cast("const c10::ArrayRef*") IntArrayRef slice(@Cast("size_t") long N, @Cast("size_t") long M); + public native @Const @ByVal IntArrayRef slice(@Cast("size_t") long N, @Cast("size_t") long M); /** slice(n) - Chop off the first N elements of the array. */ - public native @ByVal @Cast("const c10::ArrayRef*") IntArrayRef slice(@Cast("size_t") long N); + public native @Const @ByVal IntArrayRef slice(@Cast("size_t") long N); /** \} * \name Operator Overloads diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/IntOptional.java b/pytorch/src/gen/java/org/bytedeco/pytorch/IntOptional.java index 67e71547fce..11e27d4ddd3 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/IntOptional.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/IntOptional.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; @@ -26,6 +28,7 @@ public class IntOptional extends Pointer { public native @Name("operator =") @ByRef IntOptional put(@ByRef IntOptional x); public native boolean has_value(); + public native void reset(); public native @Name("value") int get(); @ValueSetter public native IntOptional put(int value); } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/IntSizedSmallVectorBase.java b/pytorch/src/gen/java/org/bytedeco/pytorch/IntSizedSmallVectorBase.java new file mode 100644 index 00000000000..7b1e99ab99a --- /dev/null +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/IntSizedSmallVectorBase.java @@ -0,0 +1,54 @@ +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE + +package org.bytedeco.pytorch; + +import org.bytedeco.pytorch.Allocator; +import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; +import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; +import java.nio.*; +import org.bytedeco.javacpp.*; +import org.bytedeco.javacpp.annotation.*; + +import static org.bytedeco.javacpp.presets.javacpp.*; +import static org.bytedeco.openblas.global.openblas_nolapack.*; +import static org.bytedeco.openblas.global.openblas.*; + +import static org.bytedeco.pytorch.global.torch.*; + + +/** This is all the stuff common to all SmallVectors. + * + * The template parameter specifies the type which should be used to hold the + * Size and Capacity of the SmallVector, so it can be adjusted. + * Using 32 bit size is desirable to shrink the size of the SmallVector. + * Using 64 bit size is desirable for cases like SmallVector, where a + * 32 bit size would limit the vector to ~4GB. SmallVectors are used for + * buffering bitcode output - which can exceed 4GB. */ +@Name("c10::SmallVectorBase >") @NoOffset @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) +public class IntSizedSmallVectorBase extends Pointer { + static { Loader.load(); } + /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ + public IntSizedSmallVectorBase(Pointer p) { super(p); } + + + public native @Cast("size_t") long size(); + public native @Cast("size_t") @Name("capacity") long _capacity(); + + + /// + /// + public native @Cast("bool") boolean empty(); + + /** Set the array size to \p N, which the current array must have enough + * capacity for. + * + * This does not construct or destroy any elements in the vector. + * + * Clients can use this in conjunction with capacity() to write past the end + * of the buffer when they know that more elements are available, and only + * update the size later. This avoids the cost of value initializing elements + * which will only be overwritten. */ + public native void set_size(@Cast("size_t") long N); +} diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/IntType.java b/pytorch/src/gen/java/org/bytedeco/pytorch/IntType.java index 536f052aca0..236deba923a 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/IntType.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/IntType.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; @@ -27,5 +29,5 @@ public class IntType extends NumberType { public native @Cast("bool") boolean isSubtypeOfExt(@Const @ByRef Type rhs, @Cast("std::ostream*") Pointer why_not); @MemberGetter public static native TypeKind Kind(); // global singleton - + public static native @ByVal @Name("get") IntTypePtr getIntTypePtr(); } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/IntTypePtr.java b/pytorch/src/gen/java/org/bytedeco/pytorch/IntTypePtr.java index 00bd7a47cbb..baf40e19a5b 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/IntTypePtr.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/IntTypePtr.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/InterfaceType.java b/pytorch/src/gen/java/org/bytedeco/pytorch/InterfaceType.java index 16e7fa98e4e..3fcdf9d4c72 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/InterfaceType.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/InterfaceType.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/InterpolateFuncOptions.java b/pytorch/src/gen/java/org/bytedeco/pytorch/InterpolateFuncOptions.java index 53f97880048..d44f0b661d6 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/InterpolateFuncOptions.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/InterpolateFuncOptions.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; @@ -44,7 +46,7 @@ public class InterpolateFuncOptions extends Pointer { public native @ByRef @NoException(true) LongVectorOptional size(); public native @ByRef @NoException(true) DoubleVectorOptional scale_factor(); - public native @ByRef @NoException(true) interpolate_mode_t mode(); + public native @ByRef @NoException(true) InterpolateMode mode(); public native @ByRef @NoException(true) BoolOptional align_corners(); public native @ByRef @NoException(true) BoolOptional recompute_scale_factor(); public native @Cast("bool*") @ByRef @NoException(true) BoolPointer antialias(); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/interpolate_mode_t.java b/pytorch/src/gen/java/org/bytedeco/pytorch/InterpolateMode.java similarity index 50% rename from pytorch/src/gen/java/org/bytedeco/pytorch/interpolate_mode_t.java rename to pytorch/src/gen/java/org/bytedeco/pytorch/InterpolateMode.java index f4c97c7c61e..b71a5f3a46d 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/interpolate_mode_t.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/InterpolateMode.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; @@ -16,41 +18,41 @@ import static org.bytedeco.pytorch.global.torch.*; @NoOffset @Name("c10::variant") @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) -public class interpolate_mode_t extends Pointer { +public class InterpolateMode extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ - public interpolate_mode_t(Pointer p) { super(p); } - public interpolate_mode_t(kNearest value) { this(); put(value); } - public interpolate_mode_t(kLinear value) { this(); put(value); } - public interpolate_mode_t(kBilinear value) { this(); put(value); } - public interpolate_mode_t(kBicubic value) { this(); put(value); } - public interpolate_mode_t(kTrilinear value) { this(); put(value); } - public interpolate_mode_t(kArea value) { this(); put(value); } - public interpolate_mode_t(kNearestExact value) { this(); put(value); } - public interpolate_mode_t() { allocate(); } + public InterpolateMode(Pointer p) { super(p); } + public InterpolateMode(kNearest value) { this(); put(value); } + public InterpolateMode(kLinear value) { this(); put(value); } + public InterpolateMode(kBilinear value) { this(); put(value); } + public InterpolateMode(kBicubic value) { this(); put(value); } + public InterpolateMode(kTrilinear value) { this(); put(value); } + public InterpolateMode(kArea value) { this(); put(value); } + public InterpolateMode(kNearestExact value) { this(); put(value); } + public InterpolateMode() { allocate(); } private native void allocate(); - public native @Name("operator =") @ByRef interpolate_mode_t put(@ByRef interpolate_mode_t x); + public native @Name("operator =") @ByRef InterpolateMode put(@ByRef InterpolateMode x); public @ByRef kNearest get0() { return get0(this); } - @Namespace @Name("c10::get<0>") public static native @ByRef kNearest get0(@ByRef interpolate_mode_t container); - @ValueSetter public native interpolate_mode_t put(@ByRef kNearest value); + @Namespace @Name("c10::get<0>") public static native @ByRef kNearest get0(@ByRef InterpolateMode container); + @ValueSetter public native InterpolateMode put(@ByRef kNearest value); public @ByRef kLinear get1() { return get1(this); } - @Namespace @Name("c10::get<1>") public static native @ByRef kLinear get1(@ByRef interpolate_mode_t container); - @ValueSetter public native interpolate_mode_t put(@ByRef kLinear value); + @Namespace @Name("c10::get<1>") public static native @ByRef kLinear get1(@ByRef InterpolateMode container); + @ValueSetter public native InterpolateMode put(@ByRef kLinear value); public @ByRef kBilinear get2() { return get2(this); } - @Namespace @Name("c10::get<2>") public static native @ByRef kBilinear get2(@ByRef interpolate_mode_t container); - @ValueSetter public native interpolate_mode_t put(@ByRef kBilinear value); + @Namespace @Name("c10::get<2>") public static native @ByRef kBilinear get2(@ByRef InterpolateMode container); + @ValueSetter public native InterpolateMode put(@ByRef kBilinear value); public @ByRef kBicubic get3() { return get3(this); } - @Namespace @Name("c10::get<3>") public static native @ByRef kBicubic get3(@ByRef interpolate_mode_t container); - @ValueSetter public native interpolate_mode_t put(@ByRef kBicubic value); + @Namespace @Name("c10::get<3>") public static native @ByRef kBicubic get3(@ByRef InterpolateMode container); + @ValueSetter public native InterpolateMode put(@ByRef kBicubic value); public @ByRef kTrilinear get4() { return get4(this); } - @Namespace @Name("c10::get<4>") public static native @ByRef kTrilinear get4(@ByRef interpolate_mode_t container); - @ValueSetter public native interpolate_mode_t put(@ByRef kTrilinear value); + @Namespace @Name("c10::get<4>") public static native @ByRef kTrilinear get4(@ByRef InterpolateMode container); + @ValueSetter public native InterpolateMode put(@ByRef kTrilinear value); public @ByRef kArea get5() { return get5(this); } - @Namespace @Name("c10::get<5>") public static native @ByRef kArea get5(@ByRef interpolate_mode_t container); - @ValueSetter public native interpolate_mode_t put(@ByRef kArea value); + @Namespace @Name("c10::get<5>") public static native @ByRef kArea get5(@ByRef InterpolateMode container); + @ValueSetter public native InterpolateMode put(@ByRef kArea value); public @ByRef kNearestExact get6() { return get6(this); } - @Namespace @Name("c10::get<6>") public static native @ByRef kNearestExact get6(@ByRef interpolate_mode_t container); - @ValueSetter public native interpolate_mode_t put(@ByRef kNearestExact value); + @Namespace @Name("c10::get<6>") public static native @ByRef kNearestExact get6(@ByRef InterpolateMode container); + @ValueSetter public native InterpolateMode put(@ByRef kNearestExact value); } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/InterpreterContinuation.java b/pytorch/src/gen/java/org/bytedeco/pytorch/InterpreterContinuation.java deleted file mode 100644 index d6be33229ee..00000000000 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/InterpreterContinuation.java +++ /dev/null @@ -1,47 +0,0 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE - -package org.bytedeco.pytorch; - -import org.bytedeco.pytorch.Allocator; -import org.bytedeco.pytorch.Function; -import org.bytedeco.pytorch.Module; -import java.nio.*; -import org.bytedeco.javacpp.*; -import org.bytedeco.javacpp.annotation.*; - -import static org.bytedeco.javacpp.presets.javacpp.*; -import static org.bytedeco.openblas.global.openblas_nolapack.*; -import static org.bytedeco.openblas.global.openblas.*; - -import static org.bytedeco.pytorch.global.torch.*; - - -// InterpreterContinuation propagates dist_autograd_context_id -// through (and only through) the forward pass manually, other -// thread local settings are propagated with ThreadLocalState -@Namespace("torch::jit") @NoOffset @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) -public class InterpreterContinuation extends Pointer { - static { Loader.load(); } - /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ - public InterpreterContinuation(Pointer p) { super(p); } - - // NOLINTNEXTLINE(cppcoreguidelines-pro-type-member-init) - public InterpreterContinuation( - @Const @ByRef InterpreterState state_, - @ByVal IValueVector stack_, - @Cast("int64_t") long dist_autograd_context_id/*=0*/, - @ByVal(nullValue = "c10::optional(c10::nullopt)") ThreadLocalStateOptional tls_state) { super((Pointer)null); allocate(state_, stack_, dist_autograd_context_id, tls_state); } - private native void allocate( - @Const @ByRef InterpreterState state_, - @ByVal IValueVector stack_, - @Cast("int64_t") long dist_autograd_context_id/*=0*/, - @ByVal(nullValue = "c10::optional(c10::nullopt)") ThreadLocalStateOptional tls_state); - public InterpreterContinuation( - @Const @ByRef InterpreterState state_, - @ByVal IValueVector stack_) { super((Pointer)null); allocate(state_, stack_); } - private native void allocate( - @Const @ByRef InterpreterState state_, - @ByVal IValueVector stack_); - - -} diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/InterpreterState.java b/pytorch/src/gen/java/org/bytedeco/pytorch/InterpreterState.java deleted file mode 100644 index b5acf692a4c..00000000000 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/InterpreterState.java +++ /dev/null @@ -1,36 +0,0 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE - -package org.bytedeco.pytorch; - -import org.bytedeco.pytorch.Allocator; -import org.bytedeco.pytorch.Function; -import org.bytedeco.pytorch.Module; -import java.nio.*; -import org.bytedeco.javacpp.*; -import org.bytedeco.javacpp.annotation.*; - -import static org.bytedeco.javacpp.presets.javacpp.*; -import static org.bytedeco.openblas.global.openblas_nolapack.*; -import static org.bytedeco.openblas.global.openblas.*; - -import static org.bytedeco.pytorch.global.torch.*; - - -@Namespace("torch::jit") @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) -public class InterpreterState extends Pointer { - static { Loader.load(); } - /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ - public InterpreterState(Pointer p) { super(p); } - - public InterpreterState( - @Const @ByRef Code code, - @ByVal(nullValue = "torch::jit::TaskLauncher(at::launch)") @Cast("torch::jit::TaskLauncher*") Pointer taskLauncher) { super((Pointer)null); allocate(code, taskLauncher); } - private native void allocate( - @Const @ByRef Code code, - @ByVal(nullValue = "torch::jit::TaskLauncher(at::launch)") @Cast("torch::jit::TaskLauncher*") Pointer taskLauncher); - public InterpreterState( - @Const @ByRef Code code) { super((Pointer)null); allocate(code); } - private native void allocate( - @Const @ByRef Code code); - public native void run(@ByRef IValueVector stack); -} diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/InterpreterStateImpl.java b/pytorch/src/gen/java/org/bytedeco/pytorch/InterpreterStateImpl.java index 080ac56dfc5..0307371678f 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/InterpreterStateImpl.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/InterpreterStateImpl.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/IterableTree.java b/pytorch/src/gen/java/org/bytedeco/pytorch/IterableTree.java deleted file mode 100644 index 8a74a1a7758..00000000000 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/IterableTree.java +++ /dev/null @@ -1,87 +0,0 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE - -package org.bytedeco.pytorch; - -import org.bytedeco.pytorch.Allocator; -import org.bytedeco.pytorch.Function; -import org.bytedeco.pytorch.Module; -import java.nio.*; -import org.bytedeco.javacpp.*; -import org.bytedeco.javacpp.annotation.*; - -import static org.bytedeco.javacpp.presets.javacpp.*; -import static org.bytedeco.openblas.global.openblas_nolapack.*; -import static org.bytedeco.openblas.global.openblas.*; - -import static org.bytedeco.pytorch.global.torch.*; - - -// Specialized Tree structure to matched against for special handling -// of builtin functions iterables expressions like zip(), enumerate(), etc. -// zip and enumerate can be modeled as a tree of SimpleValue/RangeValue: -// zip(x, y) -> (x, y) with tuple assignment to each loop target -// enumerate(x) -> (range(0, math.inf, 1), x) -// So a complicated expression like zip(a, enumerate(b), range(0, 100)) will be: -// (a, (range(0, math.inf, 1), b), range(0, 100)) -// We use those base iterables to fill in the loop information like -// max_trip_count and set the value table for loop targets -// Iterables can contain lists of SugaredValues like ModuleLists. If it -// does, then we emit it unrolled and require that all values it contains -// have a statically-determinable length. -@Namespace("torch::jit") @NoOffset @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) -public class IterableTree extends SugaredValue { - static { Loader.load(); } - /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ - public IterableTree(Pointer p) { super(p); } - /** Native array allocator. Access with {@link Pointer#position(long)}. */ - public IterableTree(long size) { super((Pointer)null); allocateArray(size); } - private native void allocateArray(long size); - @Override public IterableTree position(long position) { - return (IterableTree)super.position(position); - } - @Override public IterableTree getPointer(long i) { - return new IterableTree((Pointer)this).offsetAddress(i); - } - - public IterableTree() { super((Pointer)null); allocate(); } - private native void allocate(); - public IterableTree( - @Const @ByRef SourceRange range, - @ByRef GraphFunction m, - @ByVal SugaredValueArrayRef children) { super((Pointer)null); allocate(range, m, children); } - private native void allocate( - @Const @ByRef SourceRange range, - @ByRef GraphFunction m, - @ByVal SugaredValueArrayRef children); - public native @StdString BytePointer kind(); - - public native @SharedPtr @ByVal SugaredValue iter(@Const @ByRef SourceRange loc, @ByRef GraphFunction m); - - public native void addChild( - @Const @ByRef SourceRange range, - @ByRef GraphFunction m, - @Const @SharedPtr @ByRef SugaredValue iter_value); - - public native @ByVal SugaredValueVector get_children(); - - // If this iterable contains a ModuleList or Tuple, then it will have a - // static length, and we will emit it as an unrolled for loop. - public native @ByVal LongOptional staticLen(); - - // given a IterableTree node, get all the base iterables/leaves under the - // IterableTree node. This enables - // us to get all the basic SugaredValues that contains valid loop information - // with len() and getitem() - public native @ByVal SugaredValueVector get_base_iterables(); - - public native Value len(@Const @ByRef SourceRange loc, @ByRef GraphFunction m); - public native @SharedPtr @ByVal SugaredValue getitem( - @Const @ByRef SourceRange loc, - @ByRef GraphFunction m, - Value idx, - @ByVal(nullValue = "c10::TypePtr(nullptr)") Type.TypePtr type_hint); - public native @SharedPtr @ByVal SugaredValue getitem( - @Const @ByRef SourceRange loc, - @ByRef GraphFunction m, - Value idx); -} diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/JitModule.java b/pytorch/src/gen/java/org/bytedeco/pytorch/JitModule.java index 1a4cec5b54a..5df5dbd4718 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/JitModule.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/JitModule.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; @@ -31,8 +33,8 @@ public class JitModule extends JitObject { public JitModule(@ByVal QualifiedName class_name) { super((Pointer)null); allocate(class_name); } private native void allocate(@ByVal QualifiedName class_name); - public JitModule(@SharedPtr CompilationUnit cu, @Const @SharedPtr @ByRef ClassType type) { super((Pointer)null); allocate(cu, type); } - private native void allocate(@SharedPtr CompilationUnit cu, @Const @SharedPtr @ByRef ClassType type); + public JitModule(@SharedPtr CompilationUnit cu, @Const @SharedPtr("c10::ClassType") @ByRef ClassType type) { super((Pointer)null); allocate(cu, type); } + private native void allocate(@SharedPtr CompilationUnit cu, @Const @SharedPtr("c10::ClassType") @ByRef ClassType type); public JitModule() { super((Pointer)null); allocate(); } private native void allocate(); public JitModule( @@ -99,7 +101,7 @@ public native void register_attribute( public native void register_module(@StdString BytePointer name, @Const @ByRef JitModule module); public native void register_module(@StdString String name, @Const @ByRef JitModule module); - public native void apply(@Const @ByRef ModuleFunction fn); + public native void apply(@Const @ByRef JitModuleApplyFunction fn); public native @ByVal buffer_list buffers(@Cast("bool") boolean recurse/*=true*/); public native @ByVal buffer_list buffers(); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/JitNode.java b/pytorch/src/gen/java/org/bytedeco/pytorch/JitNode.java index bb58ec20219..3a0f8d63ed4 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/JitNode.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/JitNode.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; @@ -380,11 +382,11 @@ public class JitNode extends Pointer { public native @Cast("const torch::jit::IntAttr::ValueType") long i(@ByVal Symbol name); public native JitNode is_(@ByVal Symbol name, @ByVal @Cast("torch::jit::IntsAttr::ConstructorType*") LongVector v); public native @Cast("const torch::jit::IntsAttr::ValueType*") @ByRef LongVector is(@ByVal Symbol name); - public native JitNode g_(@ByVal Symbol name, @SharedPtr @ByVal Graph v); + public native JitNode g_(@ByVal Symbol name, @SharedPtr("torch::jit::Graph") @ByVal Graph v); public native JitNode gs_(@ByVal Symbol name, @ByVal @Cast("torch::jit::GraphsAttr::ConstructorType*") GraphVector v); public native @Cast("const torch::jit::GraphsAttr::ValueType*") @ByRef GraphVector gs(@ByVal Symbol name); - public native JitNode ty_(@ByVal Symbol name, @ByVal @Cast("torch::jit::TypeAttr::ConstructorType*") Type.TypePtr v); - public native @Cast("const torch::jit::TypeAttr::ValueType*") @ByRef Type.TypePtr ty(@ByVal Symbol name); + public native JitNode ty_(@ByVal Symbol name, @ByVal Type.TypePtr v); + public native @Const @ByRef Type.TypePtr ty(@ByVal Symbol name); public native JitNode tys_(@ByVal Symbol name, @ByVal @Cast("torch::jit::TypesAttr::ConstructorType*") TypeVector v); public native @Cast("const torch::jit::TypesAttr::ValueType*") @ByRef TypeVector tys(@ByVal Symbol name); public native JitNode ival_(@ByVal Symbol name, @ByVal @Cast("torch::jit::IValueAttr::ConstructorType*") IValue v); @@ -394,7 +396,7 @@ public class JitNode extends Pointer { // Our Graphs are not very const-correct, so we need to allow returning // non-const references too - public native @SharedPtr @ByRef Graph g(@ByVal Symbol name); + public native @SharedPtr("torch::jit::Graph") @ByRef Graph g(@ByVal Symbol name); // does not use CREATE_ACCESSOR because we need additional asserts public native JitNode t_(@ByVal Symbol name, @ByVal @Cast("torch::jit::TensorAttr::ConstructorType*") Tensor v); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/JitNodeVector.java b/pytorch/src/gen/java/org/bytedeco/pytorch/JitNodeVector.java index 6e7c6401f7c..847e584ca64 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/JitNodeVector.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/JitNodeVector.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; @@ -33,6 +35,8 @@ public class JitNodeVector extends Pointer { public void clear() { resize(0); } public native void resize(@Cast("size_t") long n); + public JitNode front() { return get(0); } + public JitNode back() { return get(size() - 1); } @Index(function = "at") public native @Const JitNode get(@Cast("size_t") long i); public native JitNodeVector put(@Cast("size_t") long i, JitNode value); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/JitNodeWrap.java b/pytorch/src/gen/java/org/bytedeco/pytorch/JitNodeWrap.java index e5c082536e3..9d2dbfaf87b 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/JitNodeWrap.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/JitNodeWrap.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/JitObject.java b/pytorch/src/gen/java/org/bytedeco/pytorch/JitObject.java index 17e206642d5..4dcc937a66b 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/JitObject.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/JitObject.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; @@ -35,22 +37,22 @@ public class JitObject extends Pointer { // NOLINTNEXTLINE(cppcoreguidelines-pro-type-member-init) public JitObject(@ByVal @Cast("torch::jit::ObjectPtr*") Pointer _ivalue) { super((Pointer)null); allocate(_ivalue); } private native void allocate(@ByVal @Cast("torch::jit::ObjectPtr*") Pointer _ivalue); - public JitObject(@SharedPtr CompilationUnit cu, @Const @SharedPtr @ByRef ClassType type) { super((Pointer)null); allocate(cu, type); } - private native void allocate(@SharedPtr CompilationUnit cu, @Const @SharedPtr @ByRef ClassType type); + public JitObject(@SharedPtr CompilationUnit cu, @Const @SharedPtr("c10::ClassType") @ByRef ClassType type) { super((Pointer)null); allocate(cu, type); } + private native void allocate(@SharedPtr CompilationUnit cu, @Const @SharedPtr("c10::ClassType") @ByRef ClassType type); public native @ByVal @Cast("torch::jit::ObjectPtr*") Pointer _ivalue(); - public native @SharedPtr @ByVal ClassType type(); + public native @SharedPtr("c10::ClassType") @ByVal ClassType type(); public static class Property extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public Property(Pointer p) { super(p); } - public native @StdString BytePointer name(); public native Property name(BytePointer setter); - public native @ByRef Method getter_func(); public native Property getter_func(Method setter); - public native @ByRef MethodOptional setter_func(); public native Property setter_func(MethodOptional setter); + public native @StdString @NoOffset BytePointer name(); public native Property name(BytePointer setter); + public native @ByRef @NoOffset Method getter_func(); public native Property getter_func(Method setter); + public native @ByRef @NoOffset MethodOptional setter_func(); public native Property setter_func(MethodOptional setter); } public native void setattr(@StdString BytePointer name, @ByVal IValue v); @@ -102,9 +104,9 @@ public static class Property extends Pointer { * from the method */ // so that C++ users can easily add methods - public native void define(@StdString BytePointer src, @Const @SharedPtr @ByRef(nullValue = "torch::jit::ResolverPtr(nullptr)") Resolver resolver); + public native void define(@StdString BytePointer src, @Const @SharedPtr("torch::jit::Resolver") @ByRef(nullValue = "std::shared_ptr(nullptr)") Resolver resolver); public native void define(@StdString BytePointer src); - public native void define(@StdString String src, @Const @SharedPtr @ByRef(nullValue = "torch::jit::ResolverPtr(nullptr)") Resolver resolver); + public native void define(@StdString String src, @Const @SharedPtr("torch::jit::Resolver") @ByRef(nullValue = "std::shared_ptr(nullptr)") Resolver resolver); public native void define(@StdString String src); public native @Cast("size_t") long num_slots(); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/JitString.java b/pytorch/src/gen/java/org/bytedeco/pytorch/JitString.java index 25c80a402fb..e7a11161eb2 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/JitString.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/JitString.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/KLDivLoss.java b/pytorch/src/gen/java/org/bytedeco/pytorch/KLDivLoss.java deleted file mode 100644 index 32c515ee761..00000000000 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/KLDivLoss.java +++ /dev/null @@ -1,34 +0,0 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE - -package org.bytedeco.pytorch; - -import org.bytedeco.pytorch.Allocator; -import org.bytedeco.pytorch.Function; -import org.bytedeco.pytorch.Module; -import java.nio.*; -import org.bytedeco.javacpp.*; -import org.bytedeco.javacpp.annotation.*; - -import static org.bytedeco.javacpp.presets.javacpp.*; -import static org.bytedeco.openblas.global.openblas_nolapack.*; -import static org.bytedeco.openblas.global.openblas.*; - -import static org.bytedeco.pytorch.global.torch.*; - - -/** A {@code ModuleHolder} subclass for {@code KLDivLossImpl}. - * See the documentation for {@code KLDivLossImpl} class to learn what methods it - * provides, and examples of how to use {@code KLDivLoss} with - * {@code torch::nn::KLDivLossOptions}. See the documentation for {@code ModuleHolder} to - * learn about PyTorch's module storage semantics. */ -@Namespace("torch::nn") @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) -public class KLDivLoss extends KLDivLossImplModuleHolder { - static { Loader.load(); } - - public KLDivLoss(@ByVal @Cast("std::nullptr_t*") PointerPointer arg0) { super((Pointer)null); allocate(arg0); } - private native void allocate(@ByVal @Cast("std::nullptr_t*") PointerPointer arg0); public KLDivLoss(@SharedPtr @Cast({"", "std::shared_ptr"}) KLDivLossImpl module) { super((Pointer)null); allocate(module); } - private native void allocate(@SharedPtr @Cast({"", "std::shared_ptr"}) KLDivLossImpl module); - /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ - public KLDivLoss(Pointer p) { super(p); } - - } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/KLDivLossImpl.java b/pytorch/src/gen/java/org/bytedeco/pytorch/KLDivLossImpl.java index faa45ab7bc4..e5697d664b8 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/KLDivLossImpl.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/KLDivLossImpl.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; @@ -46,9 +48,9 @@ public class KLDivLossImpl extends KLDivLossImplCloneable { } public KLDivLossImpl(@ByVal(nullValue = "torch::nn::KLDivLossOptions{}") KLDivLossOptions options_) { super((Pointer)null); allocate(options_); } - @NoDeallocator private native void allocate(@ByVal(nullValue = "torch::nn::KLDivLossOptions{}") KLDivLossOptions options_); + @SharedPtr private native void allocate(@ByVal(nullValue = "torch::nn::KLDivLossOptions{}") KLDivLossOptions options_); public KLDivLossImpl() { super((Pointer)null); allocate(); } - @NoDeallocator private native void allocate(); + @SharedPtr private native void allocate(); public native void reset(); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/KLDivLossImplCloneable.java b/pytorch/src/gen/java/org/bytedeco/pytorch/KLDivLossImplCloneable.java index 5048ac7a18c..5304cc14566 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/KLDivLossImplCloneable.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/KLDivLossImplCloneable.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; @@ -20,18 +22,18 @@ public class KLDivLossImplCloneable extends Module { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public KLDivLossImplCloneable(Pointer p) { super(p); } + @Override public Module asModule() { return asModule(this); } + @Namespace public static native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast>") Module asModule(@SharedPtr KLDivLossImplCloneable pointer); /** {@code reset()} must perform initialization of all members with reference * semantics, most importantly parameters, buffers and submodules. */ public native void reset(); - @Override public Module asModule() { return asModule(this); } - @Namespace public static native @Name("static_cast") Module asModule(KLDivLossImplCloneable module); /** Performs a recursive "deep copy" of the {@code Module}, such that all parameters * and submodules in the cloned module are different from those in the * original module. */ - public native @SharedPtr Module clone( - @Const @ByRef(nullValue = "c10::optional(c10::nullopt)") DeviceOptional device); - public native @SharedPtr Module clone(); + public native @SharedPtr("torch::nn::Module") @ByVal Module clone( + @Const @ByRef(nullValue = "c10::optional(c10::nullopt)") DeviceOptional device); + public native @SharedPtr("torch::nn::Module") @ByVal Module clone(); } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/KLDivLossImplModuleHolder.java b/pytorch/src/gen/java/org/bytedeco/pytorch/KLDivLossImplModuleHolder.java deleted file mode 100644 index 4e1f3ed7e46..00000000000 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/KLDivLossImplModuleHolder.java +++ /dev/null @@ -1,79 +0,0 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE - -package org.bytedeco.pytorch; - -import org.bytedeco.pytorch.Allocator; -import org.bytedeco.pytorch.Function; -import org.bytedeco.pytorch.Module; -import java.nio.*; -import org.bytedeco.javacpp.*; -import org.bytedeco.javacpp.annotation.*; - -import static org.bytedeco.javacpp.presets.javacpp.*; -import static org.bytedeco.openblas.global.openblas_nolapack.*; -import static org.bytedeco.openblas.global.openblas.*; - -import static org.bytedeco.pytorch.global.torch.*; - -@Name("torch::nn::ModuleHolder") @NoOffset @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) -public class KLDivLossImplModuleHolder extends Pointer { - static { Loader.load(); } - /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ - public KLDivLossImplModuleHolder(Pointer p) { super(p); } - - - /// - - /** Default constructs the contained module if if has a default constructor, - * else produces a static error. - * - * NOTE: This uses the behavior of template - * classes in C++ that constructors (or any methods) are only compiled when - * actually used. */ - - - /** Constructs the {@code ModuleHolder} with an empty contained value. Access to - * the underlying module is not permitted and will throw an exception, until - * a value is assigned. */ - /* implicit */ public KLDivLossImplModuleHolder(@ByVal @Cast("std::nullptr_t*") PointerPointer arg0) { super((Pointer)null); allocate(arg0); } -private native void allocate(@ByVal @Cast("std::nullptr_t*") PointerPointer arg0); - - /** Constructs the {@code ModuleHolder} with a contained module, forwarding all - * arguments to its constructor. */ - - /** Constructs the {@code ModuleHolder} from a pointer to the contained type. - * Example: {@code Linear(std::make_shared(...))}. */ - /* implicit */ public KLDivLossImplModuleHolder(@SharedPtr @Cast({"", "std::shared_ptr"}) KLDivLossImpl module) { super((Pointer)null); allocate(module); } -private native void allocate(@SharedPtr @Cast({"", "std::shared_ptr"}) KLDivLossImpl module); - - /** Returns true if the {@code ModuleHolder} contains a module, or false if it is - * {@code nullptr}. */ - public native @Cast("bool") @Name("operator bool") @NoException(true) boolean asBoolean(); - - /** Forwards to the contained module. */ - public native @Name("operator ->") KLDivLossImpl access(); - - /** Forwards to the contained module. */ - - /** Returns a reference to the contained module. */ - public native @ByRef @Name("operator *") KLDivLossImpl multiply(); - - /** Returns a const reference to the contained module. */ - - /** Returns a shared pointer to the underlying module. */ - public native @SharedPtr @Cast({"", "std::shared_ptr"}) KLDivLossImpl ptr(); - - /** Returns a pointer to the underlying module. */ - public native KLDivLossImpl get(); - - /** Returns a const pointer to the underlying module. */ - - /** Calls the {@code forward()} method of the contained module. */ - - /** Forwards to the subscript operator of the contained module. - * NOTE: std::forward is qualified to prevent VS2017 emitting - * error C2872: 'std': ambiguous symbol */ - - /** Returns true if the {@code ModuleHolder} does not contain a module. */ - public native @Cast("bool") @NoException(true) boolean is_empty(); -} diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/KLDivLossOptions.java b/pytorch/src/gen/java/org/bytedeco/pytorch/KLDivLossOptions.java index 9dab8191f94..5f11f9da3a1 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/KLDivLossOptions.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/KLDivLossOptions.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; @@ -55,6 +57,6 @@ public class KLDivLossOptions extends Pointer { public KLDivLossOptions(@ByVal kMean reduction) { super((Pointer)null); allocate(reduction); } private native void allocate(@ByVal kMean reduction); - public native @ByRef @NoException(true) kldiv_loss_reduction_t reduction(); + public native @ByRef @NoException(true) KLDivLossReduction reduction(); public native @Cast("bool*") @ByRef @NoException(true) BoolPointer log_target(); } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/kldiv_loss_reduction_t.java b/pytorch/src/gen/java/org/bytedeco/pytorch/KLDivLossReduction.java similarity index 51% rename from pytorch/src/gen/java/org/bytedeco/pytorch/kldiv_loss_reduction_t.java rename to pytorch/src/gen/java/org/bytedeco/pytorch/KLDivLossReduction.java index bffc3f0f28e..9718f155825 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/kldiv_loss_reduction_t.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/KLDivLossReduction.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; @@ -16,29 +18,29 @@ import static org.bytedeco.pytorch.global.torch.*; @NoOffset @Name("c10::variant") @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) -public class kldiv_loss_reduction_t extends Pointer { +public class KLDivLossReduction extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ - public kldiv_loss_reduction_t(Pointer p) { super(p); } - public kldiv_loss_reduction_t(kNone value) { this(); put(value); } - public kldiv_loss_reduction_t(kBatchMean value) { this(); put(value); } - public kldiv_loss_reduction_t(kSum value) { this(); put(value); } - public kldiv_loss_reduction_t(kMean value) { this(); put(value); } - public kldiv_loss_reduction_t() { allocate(); } + public KLDivLossReduction(Pointer p) { super(p); } + public KLDivLossReduction(kNone value) { this(); put(value); } + public KLDivLossReduction(kBatchMean value) { this(); put(value); } + public KLDivLossReduction(kSum value) { this(); put(value); } + public KLDivLossReduction(kMean value) { this(); put(value); } + public KLDivLossReduction() { allocate(); } private native void allocate(); - public native @Name("operator =") @ByRef kldiv_loss_reduction_t put(@ByRef kldiv_loss_reduction_t x); + public native @Name("operator =") @ByRef KLDivLossReduction put(@ByRef KLDivLossReduction x); public @ByRef kNone get0() { return get0(this); } - @Namespace @Name("c10::get<0>") public static native @ByRef kNone get0(@ByRef kldiv_loss_reduction_t container); - @ValueSetter public native kldiv_loss_reduction_t put(@ByRef kNone value); + @Namespace @Name("c10::get<0>") public static native @ByRef kNone get0(@ByRef KLDivLossReduction container); + @ValueSetter public native KLDivLossReduction put(@ByRef kNone value); public @ByRef kBatchMean get1() { return get1(this); } - @Namespace @Name("c10::get<1>") public static native @ByRef kBatchMean get1(@ByRef kldiv_loss_reduction_t container); - @ValueSetter public native kldiv_loss_reduction_t put(@ByRef kBatchMean value); + @Namespace @Name("c10::get<1>") public static native @ByRef kBatchMean get1(@ByRef KLDivLossReduction container); + @ValueSetter public native KLDivLossReduction put(@ByRef kBatchMean value); public @ByRef kSum get2() { return get2(this); } - @Namespace @Name("c10::get<2>") public static native @ByRef kSum get2(@ByRef kldiv_loss_reduction_t container); - @ValueSetter public native kldiv_loss_reduction_t put(@ByRef kSum value); + @Namespace @Name("c10::get<2>") public static native @ByRef kSum get2(@ByRef KLDivLossReduction container); + @ValueSetter public native KLDivLossReduction put(@ByRef kSum value); public @ByRef kMean get3() { return get3(this); } - @Namespace @Name("c10::get<3>") public static native @ByRef kMean get3(@ByRef kldiv_loss_reduction_t container); - @ValueSetter public native kldiv_loss_reduction_t put(@ByRef kMean value); + @Namespace @Name("c10::get<3>") public static native @ByRef kMean get3(@ByRef KLDivLossReduction container); + @ValueSetter public native KLDivLossReduction put(@ByRef kMean value); } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/KernelFunction.java b/pytorch/src/gen/java/org/bytedeco/pytorch/KernelFunction.java index cde2819166c..0fe5397079e 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/KernelFunction.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/KernelFunction.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; @@ -170,5 +172,5 @@ public class KernelFunction extends Pointer { public native @StdString BytePointer dumpState(); // For testing internal invariants only - public native @Cast("bool") boolean _equalsBoxedAndUnboxed(@Const @ByRef KernelFunction arg0); + } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/L1Loss.java b/pytorch/src/gen/java/org/bytedeco/pytorch/L1Loss.java deleted file mode 100644 index 565fbc556e7..00000000000 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/L1Loss.java +++ /dev/null @@ -1,34 +0,0 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE - -package org.bytedeco.pytorch; - -import org.bytedeco.pytorch.Allocator; -import org.bytedeco.pytorch.Function; -import org.bytedeco.pytorch.Module; -import java.nio.*; -import org.bytedeco.javacpp.*; -import org.bytedeco.javacpp.annotation.*; - -import static org.bytedeco.javacpp.presets.javacpp.*; -import static org.bytedeco.openblas.global.openblas_nolapack.*; -import static org.bytedeco.openblas.global.openblas.*; - -import static org.bytedeco.pytorch.global.torch.*; - - -/** A {@code ModuleHolder} subclass for {@code L1LossImpl}. - * See the documentation for {@code L1LossImpl} class to learn what methods it - * provides, and examples of how to use {@code L1Loss} with - * {@code torch::nn::L1LossOptions}. See the documentation for {@code ModuleHolder} to - * learn about PyTorch's module storage semantics. */ -@Namespace("torch::nn") @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) -public class L1Loss extends L1LossImplModuleHolder { - static { Loader.load(); } - - public L1Loss(@ByVal @Cast("std::nullptr_t*") PointerPointer arg0) { super((Pointer)null); allocate(arg0); } - private native void allocate(@ByVal @Cast("std::nullptr_t*") PointerPointer arg0); public L1Loss(@SharedPtr @Cast({"", "std::shared_ptr"}) L1LossImpl module) { super((Pointer)null); allocate(module); } - private native void allocate(@SharedPtr @Cast({"", "std::shared_ptr"}) L1LossImpl module); - /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ - public L1Loss(Pointer p) { super(p); } - - } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/L1LossImpl.java b/pytorch/src/gen/java/org/bytedeco/pytorch/L1LossImpl.java index b5336b1876b..aa9134366b7 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/L1LossImpl.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/L1LossImpl.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; @@ -46,9 +48,9 @@ public class L1LossImpl extends L1LossImplCloneable { } public L1LossImpl(@ByVal(nullValue = "torch::nn::L1LossOptions{}") L1LossOptions options_) { super((Pointer)null); allocate(options_); } - @NoDeallocator private native void allocate(@ByVal(nullValue = "torch::nn::L1LossOptions{}") L1LossOptions options_); + @SharedPtr private native void allocate(@ByVal(nullValue = "torch::nn::L1LossOptions{}") L1LossOptions options_); public L1LossImpl() { super((Pointer)null); allocate(); } - @NoDeallocator private native void allocate(); + @SharedPtr private native void allocate(); public native void reset(); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/L1LossImplCloneable.java b/pytorch/src/gen/java/org/bytedeco/pytorch/L1LossImplCloneable.java index 2e1e45ed81b..63f3520178d 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/L1LossImplCloneable.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/L1LossImplCloneable.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; @@ -20,18 +22,18 @@ public class L1LossImplCloneable extends Module { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public L1LossImplCloneable(Pointer p) { super(p); } + @Override public Module asModule() { return asModule(this); } + @Namespace public static native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast>") Module asModule(@SharedPtr L1LossImplCloneable pointer); /** {@code reset()} must perform initialization of all members with reference * semantics, most importantly parameters, buffers and submodules. */ public native void reset(); - @Override public Module asModule() { return asModule(this); } - @Namespace public static native @Name("static_cast") Module asModule(L1LossImplCloneable module); /** Performs a recursive "deep copy" of the {@code Module}, such that all parameters * and submodules in the cloned module are different from those in the * original module. */ - public native @SharedPtr Module clone( - @Const @ByRef(nullValue = "c10::optional(c10::nullopt)") DeviceOptional device); - public native @SharedPtr Module clone(); + public native @SharedPtr("torch::nn::Module") @ByVal Module clone( + @Const @ByRef(nullValue = "c10::optional(c10::nullopt)") DeviceOptional device); + public native @SharedPtr("torch::nn::Module") @ByVal Module clone(); } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/L1LossImplModuleHolder.java b/pytorch/src/gen/java/org/bytedeco/pytorch/L1LossImplModuleHolder.java deleted file mode 100644 index 17d48097c50..00000000000 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/L1LossImplModuleHolder.java +++ /dev/null @@ -1,79 +0,0 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE - -package org.bytedeco.pytorch; - -import org.bytedeco.pytorch.Allocator; -import org.bytedeco.pytorch.Function; -import org.bytedeco.pytorch.Module; -import java.nio.*; -import org.bytedeco.javacpp.*; -import org.bytedeco.javacpp.annotation.*; - -import static org.bytedeco.javacpp.presets.javacpp.*; -import static org.bytedeco.openblas.global.openblas_nolapack.*; -import static org.bytedeco.openblas.global.openblas.*; - -import static org.bytedeco.pytorch.global.torch.*; - -@Name("torch::nn::ModuleHolder") @NoOffset @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) -public class L1LossImplModuleHolder extends Pointer { - static { Loader.load(); } - /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ - public L1LossImplModuleHolder(Pointer p) { super(p); } - - - /// - - /** Default constructs the contained module if if has a default constructor, - * else produces a static error. - * - * NOTE: This uses the behavior of template - * classes in C++ that constructors (or any methods) are only compiled when - * actually used. */ - - - /** Constructs the {@code ModuleHolder} with an empty contained value. Access to - * the underlying module is not permitted and will throw an exception, until - * a value is assigned. */ - /* implicit */ public L1LossImplModuleHolder(@ByVal @Cast("std::nullptr_t*") PointerPointer arg0) { super((Pointer)null); allocate(arg0); } -private native void allocate(@ByVal @Cast("std::nullptr_t*") PointerPointer arg0); - - /** Constructs the {@code ModuleHolder} with a contained module, forwarding all - * arguments to its constructor. */ - - /** Constructs the {@code ModuleHolder} from a pointer to the contained type. - * Example: {@code Linear(std::make_shared(...))}. */ - /* implicit */ public L1LossImplModuleHolder(@SharedPtr @Cast({"", "std::shared_ptr"}) L1LossImpl module) { super((Pointer)null); allocate(module); } -private native void allocate(@SharedPtr @Cast({"", "std::shared_ptr"}) L1LossImpl module); - - /** Returns true if the {@code ModuleHolder} contains a module, or false if it is - * {@code nullptr}. */ - public native @Cast("bool") @Name("operator bool") @NoException(true) boolean asBoolean(); - - /** Forwards to the contained module. */ - public native @Name("operator ->") L1LossImpl access(); - - /** Forwards to the contained module. */ - - /** Returns a reference to the contained module. */ - public native @ByRef @Name("operator *") L1LossImpl multiply(); - - /** Returns a const reference to the contained module. */ - - /** Returns a shared pointer to the underlying module. */ - public native @SharedPtr @Cast({"", "std::shared_ptr"}) L1LossImpl ptr(); - - /** Returns a pointer to the underlying module. */ - public native L1LossImpl get(); - - /** Returns a const pointer to the underlying module. */ - - /** Calls the {@code forward()} method of the contained module. */ - - /** Forwards to the subscript operator of the contained module. - * NOTE: std::forward is qualified to prevent VS2017 emitting - * error C2872: 'std': ambiguous symbol */ - - /** Returns true if the {@code ModuleHolder} does not contain a module. */ - public native @Cast("bool") @NoException(true) boolean is_empty(); -} diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/L1LossOptions.java b/pytorch/src/gen/java/org/bytedeco/pytorch/L1LossOptions.java index 78fa5354460..d3a25b502f9 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/L1LossOptions.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/L1LossOptions.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; @@ -46,5 +48,5 @@ public class L1LossOptions extends Pointer { private native void allocate(@ByVal kMean reduction); public L1LossOptions(@ByVal kSum reduction) { super((Pointer)null); allocate(reduction); } private native void allocate(@ByVal kSum reduction); - public native @ByRef @NoException(true) loss_reduction_t reduction(); + public native @ByRef @NoException(true) LossReduction reduction(); } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/LBFGS.java b/pytorch/src/gen/java/org/bytedeco/pytorch/LBFGS.java index 22af676e6da..3f0fdef0a44 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/LBFGS.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/LBFGS.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; @@ -32,10 +34,10 @@ public LBFGS( @ByVal OptimizerParamGroupVector param_groups) { super((Pointer)null); allocate(param_groups); } private native void allocate( @ByVal OptimizerParamGroupVector param_groups); - public LBFGS(@Cast({"", "std::vector"}) @StdMove TensorVector params, @ByVal(nullValue = "torch::optim::LBFGSOptions{}") LBFGSOptions defaults) { super((Pointer)null); allocate(params, defaults); } - private native void allocate(@Cast({"", "std::vector"}) @StdMove TensorVector params, @ByVal(nullValue = "torch::optim::LBFGSOptions{}") LBFGSOptions defaults); - public LBFGS(@Cast({"", "std::vector"}) @StdMove TensorVector params) { super((Pointer)null); allocate(params); } - private native void allocate(@Cast({"", "std::vector"}) @StdMove TensorVector params); + public LBFGS(@Cast({"", "std::vector"}) @StdMove TensorVector params, @ByVal(nullValue = "torch::optim::LBFGSOptions{}") LBFGSOptions defaults) { super((Pointer)null); allocate(params, defaults); } + private native void allocate(@Cast({"", "std::vector"}) @StdMove TensorVector params, @ByVal(nullValue = "torch::optim::LBFGSOptions{}") LBFGSOptions defaults); + public LBFGS(@Cast({"", "std::vector"}) @StdMove TensorVector params) { super((Pointer)null); allocate(params); } + private native void allocate(@Cast({"", "std::vector"}) @StdMove TensorVector params); public native @ByVal Tensor step(@ByVal LossClosure closure); public native void save(@ByRef OutputArchive archive); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/LBFGSOptions.java b/pytorch/src/gen/java/org/bytedeco/pytorch/LBFGSOptions.java index c9f34be7e7f..f58c5054ede 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/LBFGSOptions.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/LBFGSOptions.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; @@ -35,7 +37,10 @@ public class LBFGSOptions extends OptimizerCloneableLBFGSOptions { public native @ByRef @NoException(true) StringOptional line_search_fn(); - + private static native @Namespace @Cast("bool") @Name("operator ==") boolean equals( + @Const @ByRef LBFGSOptions lhs, + @Const @ByRef LBFGSOptions rhs); + public boolean equals(LBFGSOptions rhs) { return equals(this, rhs); } public native double get_lr(); public native void set_lr(double lr); } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/LBFGSParamState.java b/pytorch/src/gen/java/org/bytedeco/pytorch/LBFGSParamState.java index 2dbb0b65b68..be1a7e7ee3c 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/LBFGSParamState.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/LBFGSParamState.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; @@ -47,5 +49,8 @@ public class LBFGSParamState extends OptimizerCloneableLBFGSParamState { public native @ByRef @NoException(true) TensorVectorOptional al(); - + private static native @Namespace @Cast("bool") @Name("operator ==") boolean equals( + @Const @ByRef LBFGSParamState lhs, + @Const @ByRef LBFGSParamState rhs); + public boolean equals(LBFGSParamState rhs) { return equals(this, rhs); } } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/LPPool1d.java b/pytorch/src/gen/java/org/bytedeco/pytorch/LPPool1d.java deleted file mode 100644 index 40cf399a1b6..00000000000 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/LPPool1d.java +++ /dev/null @@ -1,34 +0,0 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE - -package org.bytedeco.pytorch; - -import org.bytedeco.pytorch.Allocator; -import org.bytedeco.pytorch.Function; -import org.bytedeco.pytorch.Module; -import java.nio.*; -import org.bytedeco.javacpp.*; -import org.bytedeco.javacpp.annotation.*; - -import static org.bytedeco.javacpp.presets.javacpp.*; -import static org.bytedeco.openblas.global.openblas_nolapack.*; -import static org.bytedeco.openblas.global.openblas.*; - -import static org.bytedeco.pytorch.global.torch.*; - - -/** A {@code ModuleHolder} subclass for {@code LPPool1dImpl}. - * See the documentation for {@code LPPool1dImpl} class to learn what methods it - * provides, and examples of how to use {@code LPPool1d} with - * {@code torch::nn::LPPool1dOptions}. See the documentation for {@code ModuleHolder} to - * learn about PyTorch's module storage semantics. */ -@Namespace("torch::nn") @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) -public class LPPool1d extends LPPool1dImplModuleHolder { - static { Loader.load(); } - - public LPPool1d(@ByVal @Cast("std::nullptr_t*") PointerPointer arg0) { super((Pointer)null); allocate(arg0); } - private native void allocate(@ByVal @Cast("std::nullptr_t*") PointerPointer arg0); public LPPool1d(@SharedPtr @Cast({"", "std::shared_ptr"}) LPPool1dImpl module) { super((Pointer)null); allocate(module); } - private native void allocate(@SharedPtr @Cast({"", "std::shared_ptr"}) LPPool1dImpl module); - /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ - public LPPool1d(Pointer p) { super(p); } - - } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/LPPool1dImpl.java b/pytorch/src/gen/java/org/bytedeco/pytorch/LPPool1dImpl.java index dacd9b92c87..d331471f8c0 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/LPPool1dImpl.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/LPPool1dImpl.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; @@ -35,9 +37,9 @@ public class LPPool1dImpl extends LPPool1dImplBase { public LPPool1dImpl(double norm_type, @ByVal @Cast("torch::ExpandingArray<1>*") LongPointer kernel_size) { super((Pointer)null); allocate(norm_type, kernel_size); } - @NoDeallocator private native void allocate(double norm_type, @ByVal @Cast("torch::ExpandingArray<1>*") LongPointer kernel_size); + private native void allocate(double norm_type, @ByVal @Cast("torch::ExpandingArray<1>*") LongPointer kernel_size); public LPPool1dImpl(@Const @ByRef LPPool1dOptions options_) { super((Pointer)null); allocate(options_); } - @NoDeallocator private native void allocate(@Const @ByRef LPPool1dOptions options_); + private native void allocate(@Const @ByRef LPPool1dOptions options_); /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public LPPool1dImpl(Pointer p) { super(p); } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/LPPool1dImplBase.java b/pytorch/src/gen/java/org/bytedeco/pytorch/LPPool1dImplBase.java index 8c7b6c5a1b2..542f496814f 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/LPPool1dImplBase.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/LPPool1dImplBase.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; @@ -26,9 +28,9 @@ public class LPPool1dImplBase extends LPPool1dImplCloneable { public LPPool1dImplBase(Pointer p) { super(p); } public LPPool1dImplBase(double norm_type, @ByVal @Cast("torch::ExpandingArray<1>*") LongPointer kernel_size) { super((Pointer)null); allocate(norm_type, kernel_size); } - @NoDeallocator private native void allocate(double norm_type, @ByVal @Cast("torch::ExpandingArray<1>*") LongPointer kernel_size); + private native void allocate(double norm_type, @ByVal @Cast("torch::ExpandingArray<1>*") LongPointer kernel_size); public LPPool1dImplBase(@Const @ByRef LPPool1dOptions options_) { super((Pointer)null); allocate(options_); } - @NoDeallocator private native void allocate(@Const @ByRef LPPool1dOptions options_); + private native void allocate(@Const @ByRef LPPool1dOptions options_); public native void reset(); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/LPPool1dImplCloneable.java b/pytorch/src/gen/java/org/bytedeco/pytorch/LPPool1dImplCloneable.java index 4f37c584e7d..8ef91cb134e 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/LPPool1dImplCloneable.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/LPPool1dImplCloneable.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; @@ -20,18 +22,18 @@ public class LPPool1dImplCloneable extends Module { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public LPPool1dImplCloneable(Pointer p) { super(p); } + @Override public Module asModule() { return asModule(this); } + @Namespace public static native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast>") Module asModule(@SharedPtr LPPool1dImplCloneable pointer); /** {@code reset()} must perform initialization of all members with reference * semantics, most importantly parameters, buffers and submodules. */ public native void reset(); - @Override public Module asModule() { return asModule(this); } - @Namespace public static native @Name("static_cast") Module asModule(LPPool1dImplCloneable module); /** Performs a recursive "deep copy" of the {@code Module}, such that all parameters * and submodules in the cloned module are different from those in the * original module. */ - public native @SharedPtr Module clone( - @Const @ByRef(nullValue = "c10::optional(c10::nullopt)") DeviceOptional device); - public native @SharedPtr Module clone(); + public native @SharedPtr("torch::nn::Module") @ByVal Module clone( + @Const @ByRef(nullValue = "c10::optional(c10::nullopt)") DeviceOptional device); + public native @SharedPtr("torch::nn::Module") @ByVal Module clone(); } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/LPPool1dImplModuleHolder.java b/pytorch/src/gen/java/org/bytedeco/pytorch/LPPool1dImplModuleHolder.java deleted file mode 100644 index b7dd9455768..00000000000 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/LPPool1dImplModuleHolder.java +++ /dev/null @@ -1,79 +0,0 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE - -package org.bytedeco.pytorch; - -import org.bytedeco.pytorch.Allocator; -import org.bytedeco.pytorch.Function; -import org.bytedeco.pytorch.Module; -import java.nio.*; -import org.bytedeco.javacpp.*; -import org.bytedeco.javacpp.annotation.*; - -import static org.bytedeco.javacpp.presets.javacpp.*; -import static org.bytedeco.openblas.global.openblas_nolapack.*; -import static org.bytedeco.openblas.global.openblas.*; - -import static org.bytedeco.pytorch.global.torch.*; - -@Name("torch::nn::ModuleHolder") @NoOffset @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) -public class LPPool1dImplModuleHolder extends Pointer { - static { Loader.load(); } - /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ - public LPPool1dImplModuleHolder(Pointer p) { super(p); } - - - /// - - /** Default constructs the contained module if if has a default constructor, - * else produces a static error. - * - * NOTE: This uses the behavior of template - * classes in C++ that constructors (or any methods) are only compiled when - * actually used. */ - - - /** Constructs the {@code ModuleHolder} with an empty contained value. Access to - * the underlying module is not permitted and will throw an exception, until - * a value is assigned. */ - /* implicit */ public LPPool1dImplModuleHolder(@ByVal @Cast("std::nullptr_t*") PointerPointer arg0) { super((Pointer)null); allocate(arg0); } -private native void allocate(@ByVal @Cast("std::nullptr_t*") PointerPointer arg0); - - /** Constructs the {@code ModuleHolder} with a contained module, forwarding all - * arguments to its constructor. */ - - /** Constructs the {@code ModuleHolder} from a pointer to the contained type. - * Example: {@code Linear(std::make_shared(...))}. */ - /* implicit */ public LPPool1dImplModuleHolder(@SharedPtr @Cast({"", "std::shared_ptr"}) LPPool1dImpl module) { super((Pointer)null); allocate(module); } -private native void allocate(@SharedPtr @Cast({"", "std::shared_ptr"}) LPPool1dImpl module); - - /** Returns true if the {@code ModuleHolder} contains a module, or false if it is - * {@code nullptr}. */ - public native @Cast("bool") @Name("operator bool") @NoException(true) boolean asBoolean(); - - /** Forwards to the contained module. */ - public native @Name("operator ->") LPPool1dImpl access(); - - /** Forwards to the contained module. */ - - /** Returns a reference to the contained module. */ - public native @ByRef @Name("operator *") LPPool1dImpl multiply(); - - /** Returns a const reference to the contained module. */ - - /** Returns a shared pointer to the underlying module. */ - public native @SharedPtr @Cast({"", "std::shared_ptr"}) LPPool1dImpl ptr(); - - /** Returns a pointer to the underlying module. */ - public native LPPool1dImpl get(); - - /** Returns a const pointer to the underlying module. */ - - /** Calls the {@code forward()} method of the contained module. */ - - /** Forwards to the subscript operator of the contained module. - * NOTE: std::forward is qualified to prevent VS2017 emitting - * error C2872: 'std': ambiguous symbol */ - - /** Returns true if the {@code ModuleHolder} does not contain a module. */ - public native @Cast("bool") @NoException(true) boolean is_empty(); -} diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/LPPool1dOptions.java b/pytorch/src/gen/java/org/bytedeco/pytorch/LPPool1dOptions.java index df24a1e0fc5..6122b1371e8 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/LPPool1dOptions.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/LPPool1dOptions.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/LPPool2d.java b/pytorch/src/gen/java/org/bytedeco/pytorch/LPPool2d.java deleted file mode 100644 index 05ab061cc60..00000000000 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/LPPool2d.java +++ /dev/null @@ -1,34 +0,0 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE - -package org.bytedeco.pytorch; - -import org.bytedeco.pytorch.Allocator; -import org.bytedeco.pytorch.Function; -import org.bytedeco.pytorch.Module; -import java.nio.*; -import org.bytedeco.javacpp.*; -import org.bytedeco.javacpp.annotation.*; - -import static org.bytedeco.javacpp.presets.javacpp.*; -import static org.bytedeco.openblas.global.openblas_nolapack.*; -import static org.bytedeco.openblas.global.openblas.*; - -import static org.bytedeco.pytorch.global.torch.*; - - -/** A {@code ModuleHolder} subclass for {@code LPPool2dImpl}. - * See the documentation for {@code LPPool2dImpl} class to learn what methods it - * provides, and examples of how to use {@code LPPool2d} with - * {@code torch::nn::LPPool2dOptions}. See the documentation for {@code ModuleHolder} to - * learn about PyTorch's module storage semantics. */ -@Namespace("torch::nn") @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) -public class LPPool2d extends LPPool2dImplModuleHolder { - static { Loader.load(); } - - public LPPool2d(@ByVal @Cast("std::nullptr_t*") PointerPointer arg0) { super((Pointer)null); allocate(arg0); } - private native void allocate(@ByVal @Cast("std::nullptr_t*") PointerPointer arg0); public LPPool2d(@SharedPtr @Cast({"", "std::shared_ptr"}) LPPool2dImpl module) { super((Pointer)null); allocate(module); } - private native void allocate(@SharedPtr @Cast({"", "std::shared_ptr"}) LPPool2dImpl module); - /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ - public LPPool2d(Pointer p) { super(p); } - - } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/LPPool2dImpl.java b/pytorch/src/gen/java/org/bytedeco/pytorch/LPPool2dImpl.java index df1eb51b35b..d0e9d6dd967 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/LPPool2dImpl.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/LPPool2dImpl.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; @@ -36,9 +38,9 @@ public class LPPool2dImpl extends LPPool2dImplBase { public LPPool2dImpl(double norm_type, @ByVal @Cast("torch::ExpandingArray<2>*") LongPointer kernel_size) { super((Pointer)null); allocate(norm_type, kernel_size); } - @NoDeallocator private native void allocate(double norm_type, @ByVal @Cast("torch::ExpandingArray<2>*") LongPointer kernel_size); + private native void allocate(double norm_type, @ByVal @Cast("torch::ExpandingArray<2>*") LongPointer kernel_size); public LPPool2dImpl(@Const @ByRef LPPool2dOptions options_) { super((Pointer)null); allocate(options_); } - @NoDeallocator private native void allocate(@Const @ByRef LPPool2dOptions options_); + private native void allocate(@Const @ByRef LPPool2dOptions options_); /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public LPPool2dImpl(Pointer p) { super(p); } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/LPPool2dImplBase.java b/pytorch/src/gen/java/org/bytedeco/pytorch/LPPool2dImplBase.java index 5055d3a18dc..24d6d3509ef 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/LPPool2dImplBase.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/LPPool2dImplBase.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; @@ -22,9 +24,9 @@ public class LPPool2dImplBase extends LPPool2dImplCloneable { public LPPool2dImplBase(Pointer p) { super(p); } public LPPool2dImplBase(double norm_type, @ByVal @Cast("torch::ExpandingArray<2>*") LongPointer kernel_size) { super((Pointer)null); allocate(norm_type, kernel_size); } - @NoDeallocator private native void allocate(double norm_type, @ByVal @Cast("torch::ExpandingArray<2>*") LongPointer kernel_size); + private native void allocate(double norm_type, @ByVal @Cast("torch::ExpandingArray<2>*") LongPointer kernel_size); public LPPool2dImplBase(@Const @ByRef LPPool2dOptions options_) { super((Pointer)null); allocate(options_); } - @NoDeallocator private native void allocate(@Const @ByRef LPPool2dOptions options_); + private native void allocate(@Const @ByRef LPPool2dOptions options_); public native void reset(); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/LPPool2dImplCloneable.java b/pytorch/src/gen/java/org/bytedeco/pytorch/LPPool2dImplCloneable.java index dbd55748ab7..0fea8e6edb1 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/LPPool2dImplCloneable.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/LPPool2dImplCloneable.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; @@ -20,18 +22,18 @@ public class LPPool2dImplCloneable extends Module { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public LPPool2dImplCloneable(Pointer p) { super(p); } + @Override public Module asModule() { return asModule(this); } + @Namespace public static native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast>") Module asModule(@SharedPtr LPPool2dImplCloneable pointer); /** {@code reset()} must perform initialization of all members with reference * semantics, most importantly parameters, buffers and submodules. */ public native void reset(); - @Override public Module asModule() { return asModule(this); } - @Namespace public static native @Name("static_cast") Module asModule(LPPool2dImplCloneable module); /** Performs a recursive "deep copy" of the {@code Module}, such that all parameters * and submodules in the cloned module are different from those in the * original module. */ - public native @SharedPtr Module clone( - @Const @ByRef(nullValue = "c10::optional(c10::nullopt)") DeviceOptional device); - public native @SharedPtr Module clone(); + public native @SharedPtr("torch::nn::Module") @ByVal Module clone( + @Const @ByRef(nullValue = "c10::optional(c10::nullopt)") DeviceOptional device); + public native @SharedPtr("torch::nn::Module") @ByVal Module clone(); } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/LPPool2dImplModuleHolder.java b/pytorch/src/gen/java/org/bytedeco/pytorch/LPPool2dImplModuleHolder.java deleted file mode 100644 index 0abfb2d546a..00000000000 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/LPPool2dImplModuleHolder.java +++ /dev/null @@ -1,79 +0,0 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE - -package org.bytedeco.pytorch; - -import org.bytedeco.pytorch.Allocator; -import org.bytedeco.pytorch.Function; -import org.bytedeco.pytorch.Module; -import java.nio.*; -import org.bytedeco.javacpp.*; -import org.bytedeco.javacpp.annotation.*; - -import static org.bytedeco.javacpp.presets.javacpp.*; -import static org.bytedeco.openblas.global.openblas_nolapack.*; -import static org.bytedeco.openblas.global.openblas.*; - -import static org.bytedeco.pytorch.global.torch.*; - -@Name("torch::nn::ModuleHolder") @NoOffset @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) -public class LPPool2dImplModuleHolder extends Pointer { - static { Loader.load(); } - /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ - public LPPool2dImplModuleHolder(Pointer p) { super(p); } - - - /// - - /** Default constructs the contained module if if has a default constructor, - * else produces a static error. - * - * NOTE: This uses the behavior of template - * classes in C++ that constructors (or any methods) are only compiled when - * actually used. */ - - - /** Constructs the {@code ModuleHolder} with an empty contained value. Access to - * the underlying module is not permitted and will throw an exception, until - * a value is assigned. */ - /* implicit */ public LPPool2dImplModuleHolder(@ByVal @Cast("std::nullptr_t*") PointerPointer arg0) { super((Pointer)null); allocate(arg0); } -private native void allocate(@ByVal @Cast("std::nullptr_t*") PointerPointer arg0); - - /** Constructs the {@code ModuleHolder} with a contained module, forwarding all - * arguments to its constructor. */ - - /** Constructs the {@code ModuleHolder} from a pointer to the contained type. - * Example: {@code Linear(std::make_shared(...))}. */ - /* implicit */ public LPPool2dImplModuleHolder(@SharedPtr @Cast({"", "std::shared_ptr"}) LPPool2dImpl module) { super((Pointer)null); allocate(module); } -private native void allocate(@SharedPtr @Cast({"", "std::shared_ptr"}) LPPool2dImpl module); - - /** Returns true if the {@code ModuleHolder} contains a module, or false if it is - * {@code nullptr}. */ - public native @Cast("bool") @Name("operator bool") @NoException(true) boolean asBoolean(); - - /** Forwards to the contained module. */ - public native @Name("operator ->") LPPool2dImpl access(); - - /** Forwards to the contained module. */ - - /** Returns a reference to the contained module. */ - public native @ByRef @Name("operator *") LPPool2dImpl multiply(); - - /** Returns a const reference to the contained module. */ - - /** Returns a shared pointer to the underlying module. */ - public native @SharedPtr @Cast({"", "std::shared_ptr"}) LPPool2dImpl ptr(); - - /** Returns a pointer to the underlying module. */ - public native LPPool2dImpl get(); - - /** Returns a const pointer to the underlying module. */ - - /** Calls the {@code forward()} method of the contained module. */ - - /** Forwards to the subscript operator of the contained module. - * NOTE: std::forward is qualified to prevent VS2017 emitting - * error C2872: 'std': ambiguous symbol */ - - /** Returns true if the {@code ModuleHolder} does not contain a module. */ - public native @Cast("bool") @NoException(true) boolean is_empty(); -} diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/LPPool2dOptions.java b/pytorch/src/gen/java/org/bytedeco/pytorch/LPPool2dOptions.java index 9acbf34d973..8ec162d780b 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/LPPool2dOptions.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/LPPool2dOptions.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/LPPool3dOptions.java b/pytorch/src/gen/java/org/bytedeco/pytorch/LPPool3dOptions.java index b6de05f791b..926df8f3d83 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/LPPool3dOptions.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/LPPool3dOptions.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/LRScheduler.java b/pytorch/src/gen/java/org/bytedeco/pytorch/LRScheduler.java index 4f3e1eba0b3..efd7ed8185c 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/LRScheduler.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/LRScheduler.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/LSTM.java b/pytorch/src/gen/java/org/bytedeco/pytorch/LSTM.java deleted file mode 100644 index a471568532f..00000000000 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/LSTM.java +++ /dev/null @@ -1,34 +0,0 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE - -package org.bytedeco.pytorch; - -import org.bytedeco.pytorch.Allocator; -import org.bytedeco.pytorch.Function; -import org.bytedeco.pytorch.Module; -import java.nio.*; -import org.bytedeco.javacpp.*; -import org.bytedeco.javacpp.annotation.*; - -import static org.bytedeco.javacpp.presets.javacpp.*; -import static org.bytedeco.openblas.global.openblas_nolapack.*; -import static org.bytedeco.openblas.global.openblas.*; - -import static org.bytedeco.pytorch.global.torch.*; - - -/** A {@code ModuleHolder} subclass for {@code LSTMImpl}. - * See the documentation for {@code LSTMImpl} class to learn what methods it - * provides, and examples of how to use {@code LSTM} with {@code torch::nn::LSTMOptions}. - * See the documentation for {@code ModuleHolder} to learn about PyTorch's - * module storage semantics. */ -@Namespace("torch::nn") @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) -public class LSTM extends LSTMImplModuleHolder { - static { Loader.load(); } - - public LSTM(@ByVal @Cast("std::nullptr_t*") PointerPointer arg0) { super((Pointer)null); allocate(arg0); } - private native void allocate(@ByVal @Cast("std::nullptr_t*") PointerPointer arg0); public LSTM(@SharedPtr @Cast({"", "std::shared_ptr"}) LSTMImpl module) { super((Pointer)null); allocate(module); } - private native void allocate(@SharedPtr @Cast({"", "std::shared_ptr"}) LSTMImpl module); - /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ - public LSTM(Pointer p) { super(p); } - - } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/LSTMCell.java b/pytorch/src/gen/java/org/bytedeco/pytorch/LSTMCell.java deleted file mode 100644 index e12cefbc800..00000000000 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/LSTMCell.java +++ /dev/null @@ -1,34 +0,0 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE - -package org.bytedeco.pytorch; - -import org.bytedeco.pytorch.Allocator; -import org.bytedeco.pytorch.Function; -import org.bytedeco.pytorch.Module; -import java.nio.*; -import org.bytedeco.javacpp.*; -import org.bytedeco.javacpp.annotation.*; - -import static org.bytedeco.javacpp.presets.javacpp.*; -import static org.bytedeco.openblas.global.openblas_nolapack.*; -import static org.bytedeco.openblas.global.openblas.*; - -import static org.bytedeco.pytorch.global.torch.*; - - -/** A {@code ModuleHolder} subclass for {@code LSTMCellImpl}. - * See the documentation for {@code LSTMCellImpl} class to learn what methods it - * provides, and examples of how to use {@code LSTMCell} with - * {@code torch::nn::LSTMCellOptions}. See the documentation for {@code ModuleHolder} to - * learn about PyTorch's module storage semantics. */ -@Namespace("torch::nn") @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) -public class LSTMCell extends LSTMCellImplModuleHolder { - static { Loader.load(); } - - public LSTMCell(@ByVal @Cast("std::nullptr_t*") PointerPointer arg0) { super((Pointer)null); allocate(arg0); } - private native void allocate(@ByVal @Cast("std::nullptr_t*") PointerPointer arg0); public LSTMCell(@SharedPtr @Cast({"", "std::shared_ptr"}) LSTMCellImpl module) { super((Pointer)null); allocate(module); } - private native void allocate(@SharedPtr @Cast({"", "std::shared_ptr"}) LSTMCellImpl module); - /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ - public LSTMCell(Pointer p) { super(p); } - - } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/LSTMCellImpl.java b/pytorch/src/gen/java/org/bytedeco/pytorch/LSTMCellImpl.java index 87843b424ee..049ca25b277 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/LSTMCellImpl.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/LSTMCellImpl.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; @@ -37,14 +39,14 @@ public class LSTMCellImpl extends LSTMCellImplBase { public LSTMCellImpl(Pointer p) { super(p); } public LSTMCellImpl(@Cast("int64_t") long input_size, @Cast("int64_t") long hidden_size) { super((Pointer)null); allocate(input_size, hidden_size); } - @NoDeallocator private native void allocate(@Cast("int64_t") long input_size, @Cast("int64_t") long hidden_size); + @SharedPtr private native void allocate(@Cast("int64_t") long input_size, @Cast("int64_t") long hidden_size); public LSTMCellImpl(@Const @ByRef LSTMCellOptions options_) { super((Pointer)null); allocate(options_); } - @NoDeallocator private native void allocate(@Const @ByRef LSTMCellOptions options_); + @SharedPtr private native void allocate(@Const @ByRef LSTMCellOptions options_); - public native @ByVal TensorTensorTuple forward( + public native @ByVal T_TensorTensor_T forward( @Const @ByRef Tensor input, - @ByVal(nullValue = "torch::optional >{}") TensorTensorOptional hx_opt); - public native @ByVal TensorTensorTuple forward( + @ByVal(nullValue = "torch::optional >{}") T_TensorTensor_TOptional hx_opt); + public native @ByVal T_TensorTensor_T forward( @Const @ByRef Tensor input); public native @ByRef LSTMCellOptions options(); public native LSTMCellImpl options(LSTMCellOptions setter); } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/LSTMCellImplBase.java b/pytorch/src/gen/java/org/bytedeco/pytorch/LSTMCellImplBase.java index 7ebb42d17dd..70f15bcdd02 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/LSTMCellImplBase.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/LSTMCellImplBase.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; @@ -22,7 +24,7 @@ public class LSTMCellImplBase extends LSTMCellImplCloneable { public LSTMCellImplBase(Pointer p) { super(p); } public LSTMCellImplBase(@Const @ByRef RNNCellOptionsBase options_) { super((Pointer)null); allocate(options_); } - @NoDeallocator private native void allocate(@Const @ByRef RNNCellOptionsBase options_); + private native void allocate(@Const @ByRef RNNCellOptionsBase options_); /** Initializes the parameters of the RNNCell module. */ public native void reset(); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/LSTMCellImplCloneable.java b/pytorch/src/gen/java/org/bytedeco/pytorch/LSTMCellImplCloneable.java index 4435bb07f92..a39748328cb 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/LSTMCellImplCloneable.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/LSTMCellImplCloneable.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; @@ -20,18 +22,18 @@ public class LSTMCellImplCloneable extends Module { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public LSTMCellImplCloneable(Pointer p) { super(p); } + @Override public Module asModule() { return asModule(this); } + @Namespace public static native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast>") Module asModule(@SharedPtr LSTMCellImplCloneable pointer); /** {@code reset()} must perform initialization of all members with reference * semantics, most importantly parameters, buffers and submodules. */ public native void reset(); - @Override public Module asModule() { return asModule(this); } - @Namespace public static native @Name("static_cast") Module asModule(LSTMCellImplCloneable module); /** Performs a recursive "deep copy" of the {@code Module}, such that all parameters * and submodules in the cloned module are different from those in the * original module. */ - public native @SharedPtr Module clone( - @Const @ByRef(nullValue = "c10::optional(c10::nullopt)") DeviceOptional device); - public native @SharedPtr Module clone(); + public native @SharedPtr("torch::nn::Module") @ByVal Module clone( + @Const @ByRef(nullValue = "c10::optional(c10::nullopt)") DeviceOptional device); + public native @SharedPtr("torch::nn::Module") @ByVal Module clone(); } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/LSTMCellImplModuleHolder.java b/pytorch/src/gen/java/org/bytedeco/pytorch/LSTMCellImplModuleHolder.java deleted file mode 100644 index c0389752e62..00000000000 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/LSTMCellImplModuleHolder.java +++ /dev/null @@ -1,79 +0,0 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE - -package org.bytedeco.pytorch; - -import org.bytedeco.pytorch.Allocator; -import org.bytedeco.pytorch.Function; -import org.bytedeco.pytorch.Module; -import java.nio.*; -import org.bytedeco.javacpp.*; -import org.bytedeco.javacpp.annotation.*; - -import static org.bytedeco.javacpp.presets.javacpp.*; -import static org.bytedeco.openblas.global.openblas_nolapack.*; -import static org.bytedeco.openblas.global.openblas.*; - -import static org.bytedeco.pytorch.global.torch.*; - -@Name("torch::nn::ModuleHolder") @NoOffset @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) -public class LSTMCellImplModuleHolder extends Pointer { - static { Loader.load(); } - /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ - public LSTMCellImplModuleHolder(Pointer p) { super(p); } - - - /// - - /** Default constructs the contained module if if has a default constructor, - * else produces a static error. - * - * NOTE: This uses the behavior of template - * classes in C++ that constructors (or any methods) are only compiled when - * actually used. */ - - - /** Constructs the {@code ModuleHolder} with an empty contained value. Access to - * the underlying module is not permitted and will throw an exception, until - * a value is assigned. */ - /* implicit */ public LSTMCellImplModuleHolder(@ByVal @Cast("std::nullptr_t*") PointerPointer arg0) { super((Pointer)null); allocate(arg0); } -private native void allocate(@ByVal @Cast("std::nullptr_t*") PointerPointer arg0); - - /** Constructs the {@code ModuleHolder} with a contained module, forwarding all - * arguments to its constructor. */ - - /** Constructs the {@code ModuleHolder} from a pointer to the contained type. - * Example: {@code Linear(std::make_shared(...))}. */ - /* implicit */ public LSTMCellImplModuleHolder(@SharedPtr @Cast({"", "std::shared_ptr"}) LSTMCellImpl module) { super((Pointer)null); allocate(module); } -private native void allocate(@SharedPtr @Cast({"", "std::shared_ptr"}) LSTMCellImpl module); - - /** Returns true if the {@code ModuleHolder} contains a module, or false if it is - * {@code nullptr}. */ - public native @Cast("bool") @Name("operator bool") @NoException(true) boolean asBoolean(); - - /** Forwards to the contained module. */ - public native @Name("operator ->") LSTMCellImpl access(); - - /** Forwards to the contained module. */ - - /** Returns a reference to the contained module. */ - public native @ByRef @Name("operator *") LSTMCellImpl multiply(); - - /** Returns a const reference to the contained module. */ - - /** Returns a shared pointer to the underlying module. */ - public native @SharedPtr @Cast({"", "std::shared_ptr"}) LSTMCellImpl ptr(); - - /** Returns a pointer to the underlying module. */ - public native LSTMCellImpl get(); - - /** Returns a const pointer to the underlying module. */ - - /** Calls the {@code forward()} method of the contained module. */ - - /** Forwards to the subscript operator of the contained module. - * NOTE: std::forward is qualified to prevent VS2017 emitting - * error C2872: 'std': ambiguous symbol */ - - /** Returns true if the {@code ModuleHolder} does not contain a module. */ - public native @Cast("bool") @NoException(true) boolean is_empty(); -} diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/LSTMCellOptions.java b/pytorch/src/gen/java/org/bytedeco/pytorch/LSTMCellOptions.java index d1cfe34af35..4a1f5b70890 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/LSTMCellOptions.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/LSTMCellOptions.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/LSTMImpl.java b/pytorch/src/gen/java/org/bytedeco/pytorch/LSTMImpl.java index 6ba18583046..5416c888b4f 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/LSTMImpl.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/LSTMImpl.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; @@ -37,19 +39,19 @@ public class LSTMImpl extends LSTMImplBase { public LSTMImpl(Pointer p) { super(p); } public LSTMImpl(@Cast("int64_t") long input_size, @Cast("int64_t") long hidden_size) { super((Pointer)null); allocate(input_size, hidden_size); } - @NoDeallocator private native void allocate(@Cast("int64_t") long input_size, @Cast("int64_t") long hidden_size); + @SharedPtr private native void allocate(@Cast("int64_t") long input_size, @Cast("int64_t") long hidden_size); public LSTMImpl(@Const @ByRef LSTMOptions options_) { super((Pointer)null); allocate(options_); } - @NoDeallocator private native void allocate(@Const @ByRef LSTMOptions options_); + @SharedPtr private native void allocate(@Const @ByRef LSTMOptions options_); - public native @ByVal TensorTensorTensorTupleTuple forward( + public native @ByVal T_TensorT_TensorTensor_T_T forward( @Const @ByRef Tensor input, - @ByVal(nullValue = "torch::optional >{}") TensorTensorOptional hx_opt); - public native @ByVal TensorTensorTensorTupleTuple forward( + @ByVal(nullValue = "torch::optional >{}") T_TensorTensor_TOptional hx_opt); + public native @ByVal T_TensorT_TensorTensor_T_T forward( @Const @ByRef Tensor input); - public native @ByVal PackedSequenceTensorTensorTupleTuple forward_with_packed_input( + public native @ByVal T_PackedSequenceT_TensorTensor_T_T forward_with_packed_input( @Const @ByRef PackedSequence packed_input, - @ByVal(nullValue = "torch::optional >{}") TensorTensorOptional hx_opt); - public native @ByVal PackedSequenceTensorTensorTupleTuple forward_with_packed_input( + @ByVal(nullValue = "torch::optional >{}") T_TensorTensor_TOptional hx_opt); + public native @ByVal T_PackedSequenceT_TensorTensor_T_T forward_with_packed_input( @Const @ByRef PackedSequence packed_input); public native @ByRef LSTMOptions options(); public native LSTMImpl options(LSTMOptions setter); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/LSTMImplBase.java b/pytorch/src/gen/java/org/bytedeco/pytorch/LSTMImplBase.java index 00ca38061e9..7a47b1b97f5 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/LSTMImplBase.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/LSTMImplBase.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; @@ -22,7 +24,7 @@ public class LSTMImplBase extends LSTMImplCloneable { public LSTMImplBase(Pointer p) { super(p); } public LSTMImplBase(@Const @ByRef RNNOptionsBase options_) { super((Pointer)null); allocate(options_); } - @NoDeallocator private native void allocate(@Const @ByRef RNNOptionsBase options_); + private native void allocate(@Const @ByRef RNNOptionsBase options_); /** Initializes the parameters of the RNN module. */ public native void reset(); @@ -53,7 +55,7 @@ public class LSTMImplBase extends LSTMImplCloneable { * called once upon construction, inside {@code reset()}. */ public native void flatten_parameters(); - public native @Cast({"", "std::vector"}) @StdMove TensorVector all_weights(); + public native @Cast({"", "std::vector"}) @StdMove TensorVector all_weights(); /** The RNN's options. */ // NOLINTNEXTLINE(cppcoreguidelines-non-private-member-variables-in-classes) diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/LSTMImplCloneable.java b/pytorch/src/gen/java/org/bytedeco/pytorch/LSTMImplCloneable.java index 7f03cd51e5f..851d770ae07 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/LSTMImplCloneable.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/LSTMImplCloneable.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; @@ -20,18 +22,18 @@ public class LSTMImplCloneable extends Module { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public LSTMImplCloneable(Pointer p) { super(p); } + @Override public Module asModule() { return asModule(this); } + @Namespace public static native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast>") Module asModule(@SharedPtr LSTMImplCloneable pointer); /** {@code reset()} must perform initialization of all members with reference * semantics, most importantly parameters, buffers and submodules. */ public native void reset(); - @Override public Module asModule() { return asModule(this); } - @Namespace public static native @Name("static_cast") Module asModule(LSTMImplCloneable module); /** Performs a recursive "deep copy" of the {@code Module}, such that all parameters * and submodules in the cloned module are different from those in the * original module. */ - public native @SharedPtr Module clone( - @Const @ByRef(nullValue = "c10::optional(c10::nullopt)") DeviceOptional device); - public native @SharedPtr Module clone(); + public native @SharedPtr("torch::nn::Module") @ByVal Module clone( + @Const @ByRef(nullValue = "c10::optional(c10::nullopt)") DeviceOptional device); + public native @SharedPtr("torch::nn::Module") @ByVal Module clone(); } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/LSTMImplModuleHolder.java b/pytorch/src/gen/java/org/bytedeco/pytorch/LSTMImplModuleHolder.java deleted file mode 100644 index c2104f80368..00000000000 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/LSTMImplModuleHolder.java +++ /dev/null @@ -1,79 +0,0 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE - -package org.bytedeco.pytorch; - -import org.bytedeco.pytorch.Allocator; -import org.bytedeco.pytorch.Function; -import org.bytedeco.pytorch.Module; -import java.nio.*; -import org.bytedeco.javacpp.*; -import org.bytedeco.javacpp.annotation.*; - -import static org.bytedeco.javacpp.presets.javacpp.*; -import static org.bytedeco.openblas.global.openblas_nolapack.*; -import static org.bytedeco.openblas.global.openblas.*; - -import static org.bytedeco.pytorch.global.torch.*; - -@Name("torch::nn::ModuleHolder") @NoOffset @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) -public class LSTMImplModuleHolder extends Pointer { - static { Loader.load(); } - /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ - public LSTMImplModuleHolder(Pointer p) { super(p); } - - - /// - - /** Default constructs the contained module if if has a default constructor, - * else produces a static error. - * - * NOTE: This uses the behavior of template - * classes in C++ that constructors (or any methods) are only compiled when - * actually used. */ - - - /** Constructs the {@code ModuleHolder} with an empty contained value. Access to - * the underlying module is not permitted and will throw an exception, until - * a value is assigned. */ - /* implicit */ public LSTMImplModuleHolder(@ByVal @Cast("std::nullptr_t*") PointerPointer arg0) { super((Pointer)null); allocate(arg0); } -private native void allocate(@ByVal @Cast("std::nullptr_t*") PointerPointer arg0); - - /** Constructs the {@code ModuleHolder} with a contained module, forwarding all - * arguments to its constructor. */ - - /** Constructs the {@code ModuleHolder} from a pointer to the contained type. - * Example: {@code Linear(std::make_shared(...))}. */ - /* implicit */ public LSTMImplModuleHolder(@SharedPtr @Cast({"", "std::shared_ptr"}) LSTMImpl module) { super((Pointer)null); allocate(module); } -private native void allocate(@SharedPtr @Cast({"", "std::shared_ptr"}) LSTMImpl module); - - /** Returns true if the {@code ModuleHolder} contains a module, or false if it is - * {@code nullptr}. */ - public native @Cast("bool") @Name("operator bool") @NoException(true) boolean asBoolean(); - - /** Forwards to the contained module. */ - public native @Name("operator ->") LSTMImpl access(); - - /** Forwards to the contained module. */ - - /** Returns a reference to the contained module. */ - public native @ByRef @Name("operator *") LSTMImpl multiply(); - - /** Returns a const reference to the contained module. */ - - /** Returns a shared pointer to the underlying module. */ - public native @SharedPtr @Cast({"", "std::shared_ptr"}) LSTMImpl ptr(); - - /** Returns a pointer to the underlying module. */ - public native LSTMImpl get(); - - /** Returns a const pointer to the underlying module. */ - - /** Calls the {@code forward()} method of the contained module. */ - - /** Forwards to the subscript operator of the contained module. - * NOTE: std::forward is qualified to prevent VS2017 emitting - * error C2872: 'std': ambiguous symbol */ - - /** Returns true if the {@code ModuleHolder} does not contain a module. */ - public native @Cast("bool") @NoException(true) boolean is_empty(); -} diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/LSTMOptions.java b/pytorch/src/gen/java/org/bytedeco/pytorch/LSTMOptions.java index 8126e6e2d9e..dc58ae0d78f 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/LSTMOptions.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/LSTMOptions.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/LayerNorm.java b/pytorch/src/gen/java/org/bytedeco/pytorch/LayerNorm.java deleted file mode 100644 index fa3603de1fe..00000000000 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/LayerNorm.java +++ /dev/null @@ -1,34 +0,0 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE - -package org.bytedeco.pytorch; - -import org.bytedeco.pytorch.Allocator; -import org.bytedeco.pytorch.Function; -import org.bytedeco.pytorch.Module; -import java.nio.*; -import org.bytedeco.javacpp.*; -import org.bytedeco.javacpp.annotation.*; - -import static org.bytedeco.javacpp.presets.javacpp.*; -import static org.bytedeco.openblas.global.openblas_nolapack.*; -import static org.bytedeco.openblas.global.openblas.*; - -import static org.bytedeco.pytorch.global.torch.*; - - -/** A {@code ModuleHolder} subclass for {@code LayerNormImpl}. - * See the documentation for {@code LayerNormImpl} class to learn what methods it - * provides, and examples of how to use {@code LayerNorm} with - * {@code torch::nn::LayerNormOptions}. See the documentation for {@code ModuleHolder} to - * learn about PyTorch's module storage semantics. */ -@Namespace("torch::nn") @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) -public class LayerNorm extends LayerNormImplModuleHolder { - static { Loader.load(); } - - public LayerNorm(@ByVal @Cast("std::nullptr_t*") PointerPointer arg0) { super((Pointer)null); allocate(arg0); } - private native void allocate(@ByVal @Cast("std::nullptr_t*") PointerPointer arg0); public LayerNorm(@SharedPtr @Cast({"", "std::shared_ptr"}) LayerNormImpl module) { super((Pointer)null); allocate(module); } - private native void allocate(@SharedPtr @Cast({"", "std::shared_ptr"}) LayerNormImpl module); - /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ - public LayerNorm(Pointer p) { super(p); } - - } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/LayerNormFuncOptions.java b/pytorch/src/gen/java/org/bytedeco/pytorch/LayerNormFuncOptions.java index a64c38accd3..8e28bf4f4f0 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/LayerNormFuncOptions.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/LayerNormFuncOptions.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/LayerNormImpl.java b/pytorch/src/gen/java/org/bytedeco/pytorch/LayerNormImpl.java index ff38b5812ee..cc9f77c603b 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/LayerNormImpl.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/LayerNormImpl.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; @@ -38,9 +40,9 @@ public class LayerNormImpl extends LayerNormImplCloneable { public LayerNormImpl(Pointer p) { super(p); } public LayerNormImpl(@ByVal @Cast("std::vector*") LongVector normalized_shape) { super((Pointer)null); allocate(normalized_shape); } - @NoDeallocator private native void allocate(@ByVal @Cast("std::vector*") LongVector normalized_shape); + @SharedPtr private native void allocate(@ByVal @Cast("std::vector*") LongVector normalized_shape); public LayerNormImpl(@ByVal LayerNormOptions options_) { super((Pointer)null); allocate(options_); } - @NoDeallocator private native void allocate(@ByVal LayerNormOptions options_); + @SharedPtr private native void allocate(@ByVal LayerNormOptions options_); public native void reset(); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/LayerNormImplCloneable.java b/pytorch/src/gen/java/org/bytedeco/pytorch/LayerNormImplCloneable.java index b77242d22f2..9b156c3722c 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/LayerNormImplCloneable.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/LayerNormImplCloneable.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; @@ -20,18 +22,18 @@ public class LayerNormImplCloneable extends Module { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public LayerNormImplCloneable(Pointer p) { super(p); } + @Override public Module asModule() { return asModule(this); } + @Namespace public static native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast>") Module asModule(@SharedPtr LayerNormImplCloneable pointer); /** {@code reset()} must perform initialization of all members with reference * semantics, most importantly parameters, buffers and submodules. */ public native void reset(); - @Override public Module asModule() { return asModule(this); } - @Namespace public static native @Name("static_cast") Module asModule(LayerNormImplCloneable module); /** Performs a recursive "deep copy" of the {@code Module}, such that all parameters * and submodules in the cloned module are different from those in the * original module. */ - public native @SharedPtr Module clone( - @Const @ByRef(nullValue = "c10::optional(c10::nullopt)") DeviceOptional device); - public native @SharedPtr Module clone(); + public native @SharedPtr("torch::nn::Module") @ByVal Module clone( + @Const @ByRef(nullValue = "c10::optional(c10::nullopt)") DeviceOptional device); + public native @SharedPtr("torch::nn::Module") @ByVal Module clone(); } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/LayerNormImplModuleHolder.java b/pytorch/src/gen/java/org/bytedeco/pytorch/LayerNormImplModuleHolder.java deleted file mode 100644 index c95ce31b98a..00000000000 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/LayerNormImplModuleHolder.java +++ /dev/null @@ -1,79 +0,0 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE - -package org.bytedeco.pytorch; - -import org.bytedeco.pytorch.Allocator; -import org.bytedeco.pytorch.Function; -import org.bytedeco.pytorch.Module; -import java.nio.*; -import org.bytedeco.javacpp.*; -import org.bytedeco.javacpp.annotation.*; - -import static org.bytedeco.javacpp.presets.javacpp.*; -import static org.bytedeco.openblas.global.openblas_nolapack.*; -import static org.bytedeco.openblas.global.openblas.*; - -import static org.bytedeco.pytorch.global.torch.*; - -@Name("torch::nn::ModuleHolder") @NoOffset @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) -public class LayerNormImplModuleHolder extends Pointer { - static { Loader.load(); } - /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ - public LayerNormImplModuleHolder(Pointer p) { super(p); } - - - /// - - /** Default constructs the contained module if if has a default constructor, - * else produces a static error. - * - * NOTE: This uses the behavior of template - * classes in C++ that constructors (or any methods) are only compiled when - * actually used. */ - - - /** Constructs the {@code ModuleHolder} with an empty contained value. Access to - * the underlying module is not permitted and will throw an exception, until - * a value is assigned. */ - /* implicit */ public LayerNormImplModuleHolder(@ByVal @Cast("std::nullptr_t*") PointerPointer arg0) { super((Pointer)null); allocate(arg0); } -private native void allocate(@ByVal @Cast("std::nullptr_t*") PointerPointer arg0); - - /** Constructs the {@code ModuleHolder} with a contained module, forwarding all - * arguments to its constructor. */ - - /** Constructs the {@code ModuleHolder} from a pointer to the contained type. - * Example: {@code Linear(std::make_shared(...))}. */ - /* implicit */ public LayerNormImplModuleHolder(@SharedPtr @Cast({"", "std::shared_ptr"}) LayerNormImpl module) { super((Pointer)null); allocate(module); } -private native void allocate(@SharedPtr @Cast({"", "std::shared_ptr"}) LayerNormImpl module); - - /** Returns true if the {@code ModuleHolder} contains a module, or false if it is - * {@code nullptr}. */ - public native @Cast("bool") @Name("operator bool") @NoException(true) boolean asBoolean(); - - /** Forwards to the contained module. */ - public native @Name("operator ->") LayerNormImpl access(); - - /** Forwards to the contained module. */ - - /** Returns a reference to the contained module. */ - public native @ByRef @Name("operator *") LayerNormImpl multiply(); - - /** Returns a const reference to the contained module. */ - - /** Returns a shared pointer to the underlying module. */ - public native @SharedPtr @Cast({"", "std::shared_ptr"}) LayerNormImpl ptr(); - - /** Returns a pointer to the underlying module. */ - public native LayerNormImpl get(); - - /** Returns a const pointer to the underlying module. */ - - /** Calls the {@code forward()} method of the contained module. */ - - /** Forwards to the subscript operator of the contained module. - * NOTE: std::forward is qualified to prevent VS2017 emitting - * error C2872: 'std': ambiguous symbol */ - - /** Returns true if the {@code ModuleHolder} does not contain a module. */ - public native @Cast("bool") @NoException(true) boolean is_empty(); -} diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/LayerNormOptions.java b/pytorch/src/gen/java/org/bytedeco/pytorch/LayerNormOptions.java index f0f4d993d47..ca043083d23 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/LayerNormOptions.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/LayerNormOptions.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/LayoutEnumerationType.java b/pytorch/src/gen/java/org/bytedeco/pytorch/LayoutEnumerationType.java index 222d1085b66..af1cae0b075 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/LayoutEnumerationType.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/LayoutEnumerationType.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/LayoutOptional.java b/pytorch/src/gen/java/org/bytedeco/pytorch/LayoutOptional.java index 594d3b4f9b0..a3c6e21903d 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/LayoutOptional.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/LayoutOptional.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; @@ -26,6 +28,7 @@ public class LayoutOptional extends Pointer { public native @Name("operator =") @ByRef LayoutOptional put(@ByRef LayoutOptional x); public native boolean has_value(); + public native void reset(); public native @Name("value") @ByRef Layout get(); @ValueSetter public native LayoutOptional put(@ByRef Layout value); } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/LayoutType.java b/pytorch/src/gen/java/org/bytedeco/pytorch/LayoutType.java index 850c2e884e8..86f4528a16b 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/LayoutType.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/LayoutType.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/LayoutTypePtr.java b/pytorch/src/gen/java/org/bytedeco/pytorch/LayoutTypePtr.java index 02727d52bdf..feac55766b2 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/LayoutTypePtr.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/LayoutTypePtr.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/LeakyReLU.java b/pytorch/src/gen/java/org/bytedeco/pytorch/LeakyReLU.java deleted file mode 100644 index 3a2916892de..00000000000 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/LeakyReLU.java +++ /dev/null @@ -1,34 +0,0 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE - -package org.bytedeco.pytorch; - -import org.bytedeco.pytorch.Allocator; -import org.bytedeco.pytorch.Function; -import org.bytedeco.pytorch.Module; -import java.nio.*; -import org.bytedeco.javacpp.*; -import org.bytedeco.javacpp.annotation.*; - -import static org.bytedeco.javacpp.presets.javacpp.*; -import static org.bytedeco.openblas.global.openblas_nolapack.*; -import static org.bytedeco.openblas.global.openblas.*; - -import static org.bytedeco.pytorch.global.torch.*; - - -/** A {@code ModuleHolder} subclass for {@code LeakyReLUImpl}. - * See the documentation for {@code LeakyReLUImpl} class to learn what methods it - * provides, and examples of how to use {@code LeakyReLU} with - * {@code torch::nn::LeakyReLUOptions}. See the documentation for {@code ModuleHolder} to - * learn about PyTorch's module storage semantics. */ -@Namespace("torch::nn") @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) -public class LeakyReLU extends LeakyReLUImplModuleHolder { - static { Loader.load(); } - - public LeakyReLU(@ByVal @Cast("std::nullptr_t*") PointerPointer arg0) { super((Pointer)null); allocate(arg0); } - private native void allocate(@ByVal @Cast("std::nullptr_t*") PointerPointer arg0); public LeakyReLU(@SharedPtr @Cast({"", "std::shared_ptr"}) LeakyReLUImpl module) { super((Pointer)null); allocate(module); } - private native void allocate(@SharedPtr @Cast({"", "std::shared_ptr"}) LeakyReLUImpl module); - /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ - public LeakyReLU(Pointer p) { super(p); } - - } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/LeakyReLUImpl.java b/pytorch/src/gen/java/org/bytedeco/pytorch/LeakyReLUImpl.java index 9cf72cb1105..1422bc42d11 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/LeakyReLUImpl.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/LeakyReLUImpl.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; @@ -45,9 +47,9 @@ public class LeakyReLUImpl extends LeakyReLUImplCloneable { } public LeakyReLUImpl(@Const @ByRef(nullValue = "torch::nn::LeakyReLUOptions{}") LeakyReLUOptions options_) { super((Pointer)null); allocate(options_); } - @NoDeallocator private native void allocate(@Const @ByRef(nullValue = "torch::nn::LeakyReLUOptions{}") LeakyReLUOptions options_); + @SharedPtr private native void allocate(@Const @ByRef(nullValue = "torch::nn::LeakyReLUOptions{}") LeakyReLUOptions options_); public LeakyReLUImpl() { super((Pointer)null); allocate(); } - @NoDeallocator private native void allocate(); + @SharedPtr private native void allocate(); public native @ByVal Tensor forward(@ByVal Tensor input); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/LeakyReLUImplCloneable.java b/pytorch/src/gen/java/org/bytedeco/pytorch/LeakyReLUImplCloneable.java index 554cb93ac37..c91e1140a44 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/LeakyReLUImplCloneable.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/LeakyReLUImplCloneable.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; @@ -20,18 +22,18 @@ public class LeakyReLUImplCloneable extends Module { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public LeakyReLUImplCloneable(Pointer p) { super(p); } + @Override public Module asModule() { return asModule(this); } + @Namespace public static native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast>") Module asModule(@SharedPtr LeakyReLUImplCloneable pointer); /** {@code reset()} must perform initialization of all members with reference * semantics, most importantly parameters, buffers and submodules. */ public native void reset(); - @Override public Module asModule() { return asModule(this); } - @Namespace public static native @Name("static_cast") Module asModule(LeakyReLUImplCloneable module); /** Performs a recursive "deep copy" of the {@code Module}, such that all parameters * and submodules in the cloned module are different from those in the * original module. */ - public native @SharedPtr Module clone( - @Const @ByRef(nullValue = "c10::optional(c10::nullopt)") DeviceOptional device); - public native @SharedPtr Module clone(); + public native @SharedPtr("torch::nn::Module") @ByVal Module clone( + @Const @ByRef(nullValue = "c10::optional(c10::nullopt)") DeviceOptional device); + public native @SharedPtr("torch::nn::Module") @ByVal Module clone(); } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/LeakyReLUImplModuleHolder.java b/pytorch/src/gen/java/org/bytedeco/pytorch/LeakyReLUImplModuleHolder.java deleted file mode 100644 index 290712a0460..00000000000 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/LeakyReLUImplModuleHolder.java +++ /dev/null @@ -1,79 +0,0 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE - -package org.bytedeco.pytorch; - -import org.bytedeco.pytorch.Allocator; -import org.bytedeco.pytorch.Function; -import org.bytedeco.pytorch.Module; -import java.nio.*; -import org.bytedeco.javacpp.*; -import org.bytedeco.javacpp.annotation.*; - -import static org.bytedeco.javacpp.presets.javacpp.*; -import static org.bytedeco.openblas.global.openblas_nolapack.*; -import static org.bytedeco.openblas.global.openblas.*; - -import static org.bytedeco.pytorch.global.torch.*; - -@Name("torch::nn::ModuleHolder") @NoOffset @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) -public class LeakyReLUImplModuleHolder extends Pointer { - static { Loader.load(); } - /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ - public LeakyReLUImplModuleHolder(Pointer p) { super(p); } - - - /// - - /** Default constructs the contained module if if has a default constructor, - * else produces a static error. - * - * NOTE: This uses the behavior of template - * classes in C++ that constructors (or any methods) are only compiled when - * actually used. */ - - - /** Constructs the {@code ModuleHolder} with an empty contained value. Access to - * the underlying module is not permitted and will throw an exception, until - * a value is assigned. */ - /* implicit */ public LeakyReLUImplModuleHolder(@ByVal @Cast("std::nullptr_t*") PointerPointer arg0) { super((Pointer)null); allocate(arg0); } -private native void allocate(@ByVal @Cast("std::nullptr_t*") PointerPointer arg0); - - /** Constructs the {@code ModuleHolder} with a contained module, forwarding all - * arguments to its constructor. */ - - /** Constructs the {@code ModuleHolder} from a pointer to the contained type. - * Example: {@code Linear(std::make_shared(...))}. */ - /* implicit */ public LeakyReLUImplModuleHolder(@SharedPtr @Cast({"", "std::shared_ptr"}) LeakyReLUImpl module) { super((Pointer)null); allocate(module); } -private native void allocate(@SharedPtr @Cast({"", "std::shared_ptr"}) LeakyReLUImpl module); - - /** Returns true if the {@code ModuleHolder} contains a module, or false if it is - * {@code nullptr}. */ - public native @Cast("bool") @Name("operator bool") @NoException(true) boolean asBoolean(); - - /** Forwards to the contained module. */ - public native @Name("operator ->") LeakyReLUImpl access(); - - /** Forwards to the contained module. */ - - /** Returns a reference to the contained module. */ - public native @ByRef @Name("operator *") LeakyReLUImpl multiply(); - - /** Returns a const reference to the contained module. */ - - /** Returns a shared pointer to the underlying module. */ - public native @SharedPtr @Cast({"", "std::shared_ptr"}) LeakyReLUImpl ptr(); - - /** Returns a pointer to the underlying module. */ - public native LeakyReLUImpl get(); - - /** Returns a const pointer to the underlying module. */ - - /** Calls the {@code forward()} method of the contained module. */ - - /** Forwards to the subscript operator of the contained module. - * NOTE: std::forward is qualified to prevent VS2017 emitting - * error C2872: 'std': ambiguous symbol */ - - /** Returns true if the {@code ModuleHolder} does not contain a module. */ - public native @Cast("bool") @NoException(true) boolean is_empty(); -} diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/LeakyReLUOptions.java b/pytorch/src/gen/java/org/bytedeco/pytorch/LeakyReLUOptions.java index c30b5338c01..00690eb0386 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/LeakyReLUOptions.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/LeakyReLUOptions.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/LegacyTensorConstructor.java b/pytorch/src/gen/java/org/bytedeco/pytorch/LegacyTensorConstructor.java index 16c13b4ecb7..6124c4666b6 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/LegacyTensorConstructor.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/LegacyTensorConstructor.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/Lexer.java b/pytorch/src/gen/java/org/bytedeco/pytorch/Lexer.java index 12a075043c5..ff7b37f261f 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/Lexer.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/Lexer.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/Library.java b/pytorch/src/gen/java/org/bytedeco/pytorch/Library.java new file mode 100644 index 00000000000..bd4d657f373 --- /dev/null +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/Library.java @@ -0,0 +1,242 @@ +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE + +package org.bytedeco.pytorch; + +import org.bytedeco.pytorch.Allocator; +import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; +import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; +import java.nio.*; +import org.bytedeco.javacpp.*; +import org.bytedeco.javacpp.annotation.*; + +import static org.bytedeco.javacpp.presets.javacpp.*; +import static org.bytedeco.openblas.global.openblas_nolapack.*; +import static org.bytedeco.openblas.global.openblas.*; + +import static org.bytedeco.pytorch.global.torch.*; + // namespace detail + +/** This object provides the API for defining operators and providing + * implementations at dispatch keys. Typically, a torch::Library + * is not allocated directly; instead it is created by the + * TORCH_LIBRARY() or TORCH_LIBRARY_IMPL() macros. + * + * Most methods on torch::Library return a reference to itself, + * supporting method chaining. + * + *

{@code
+ *  // Examples:
+ * 
+ *  TORCH_LIBRARY(torchvision, m) {
+ *     // m is a torch::Library
+ *     m.def("roi_align", ...);
+ *     ...
+ *  }
+ * 
+ *  TORCH_LIBRARY_IMPL(aten, XLA, m) {
+ *     // m is a torch::Library
+ *     m.impl("add", ...);
+ *     ...
+ *  }
+ *  }
+ * */ +@Namespace("torch") @NoOffset @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) +public class Library extends Pointer { + static { Loader.load(); } + /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ + public Library(Pointer p) { super(p); } + + /** \private + * + * Which type of macro produced this Library */ + public enum Kind { + DEF(0), // from TORCH_LIBRARY (no qualifier) + IMPL(1), + FRAGMENT(2); + + public final int value; + private Kind(int v) { this.value = v; } + private Kind(Kind e) { this.value = e.value; } + public Kind intern() { for (Kind e : values()) if (e.value == value) return e; return this; } + @Override public String toString() { return intern().name(); } + } + + /** \private + * + * Use TORCH_LIBRARY() or TORCH_LIBRARY_IMPL() instead of using these + * constructors directly */ + public Library( + Kind kind, + @StdString BytePointer ns, + @ByVal DispatchKeyOptional k, + @Cast("const char*") BytePointer file, + @Cast("uint32_t") int line) { super((Pointer)null); allocate(kind, ns, k, file, line); } + private native void allocate( + Kind kind, + @StdString BytePointer ns, + @ByVal DispatchKeyOptional k, + @Cast("const char*") BytePointer file, + @Cast("uint32_t") int line); + public Library( + @Cast("torch::Library::Kind") int kind, + @StdString String ns, + @ByVal DispatchKeyOptional k, + String file, + @Cast("uint32_t") int line) { super((Pointer)null); allocate(kind, ns, k, file, line); } + private native void allocate( + @Cast("torch::Library::Kind") int kind, + @StdString String ns, + @ByVal DispatchKeyOptional k, + String file, + @Cast("uint32_t") int line); + + + + public Library(@ByRef(true) Library arg0) { super((Pointer)null); allocate(arg0); } + private native void allocate(@ByRef(true) Library arg0); + + /// + /// + public native @ByRef @Name("operator =") Library put(@ByRef(true) Library arg0); + + // Some notes about the API design here. We had the following constraints: + // + // - We need to support multiple "types" of arguments for schema and + // functions (e.g., unnamed lambda types, regular functions, const char*, + // fully instantiated schemas) + // - We don't want to write exponentially many overloads + // - We don't want to rely on implicit conversion to a common type, + // because the C++ compiler will only be willing to do a single + // implicit conversion (reducing the set of valid types which you + // can invoke with); also error messages are worse when an implicit + // conversion is not selected (as the compiler will not explain + // why it didn't select an implicit conversion; this is different + // from overloads where it will explain each candidate overload and + // why it didn't apply) + // + // To solve all of these constraints at the same time, we use a trick taken + // from the pybind11 library: template over the argument in the user visible + // API, and inside of the templated function explicitly call an overloaded + // function to resolve the argument to a real type. You get the good error + // messages from overloads, but at the same time you only need to write the + // overload for any given argument type once. + + /** Declare an operator with a schema, but don't provide any implementations + * for it. You're expected to then provide implementations using the + * impl() method. All template arguments are inferred. + * + * @param raw_schema The schema of the operator to be defined. + * Typically, this is a {@code const char*} string literal, but any type + * accepted by torch::schema() is accepted here. + * + *
{@code
+   *  // Example:
+   *  TORCH_LIBRARY(myops, m) {
+   *    m.def("add(Tensor self, Tensor other) -> Tensor");
+   *  }
+   *  }
*/ + /** Define an operator for a schema and then register an implementation for + * it. This is typically what you would use if you aren't planning + * on making use of the dispatcher to structure your operator + * implementation. It's roughly equivalent to calling def() and + * then impl(), but if you omit the schema of the operator, we will + * infer it from the type of your C++ function. All template + * arguments are inferred. + * + * @param raw_name_or_schema The schema of the operator to be + * defined, or just the name of the operator if the schema is to be + * inferred from {@code raw_f}. Typically a {@code const char*} literal. + * @param raw_f The C++ function that implements this operator. + * Any valid constructor of torch::CppFunction is accepted here; + * typically you provide a function pointer or lambda. + * + *
{@code
+   *  // Example:
+   *  TORCH_LIBRARY(myops, m) {
+   *    m.def("add", add_fn);
+   *  }
+   *  }
*/ + + /** Register an implementation for an operator. You may register multiple + * implementations for a single operator at different dispatch keys + * (see torch::dispatch()). Implementations must have a corresponding + * declaration (from def()), otherwise they are invalid. If you plan + * to register multiple implementations, DO NOT provide a function + * implementation when you def() the operator. + * + * @param name The name of the operator to implement. Do NOT provide + * schema here. + * @param raw_f The C++ function that implements this operator. Any + * valid constructor of torch::CppFunction is accepted here; + * typically you provide a function pointer or lambda. + * + *
{@code
+   *  // Example:
+   *  TORCH_LIBRARY_IMPL(myops, CUDA, m) {
+   *    m.impl("add", add_cuda);
+   *  }
+   *  }
*/ + +// #if defined C10_MOBILE + // Note: This overload is needed only for C10_MOBILE, since the automatically + // defined copy constructor for the CppFunction doesn't have the additional + // NoInferSchemaTag argument. We define the overload for the impl() function + // to accept a CppFunction&& argument. The already constructed CppFunction + // object may or may not have the inferred schema, but it doesn't matter + // for our purposes since if it already has the inferred schema, then we + // might as well just pass it through directly. + // +// #endif + + // Helper for getting an OperatorName for a const char*. You probably + // don't need this. + + /// + public native @ByVal OperatorName _resolve(@Cast("const char*") BytePointer name); + public native @ByVal OperatorName _resolve(String name); + + /** \private + * + * Convenience overload for directly specifying the dispatch key when + * impl(). You probably don't need this; instead, prefer specifying + * the dispatch key for the entire block in TORCH_LIBRARY_IMPL() */ + + // These overloads cover cases when a SelectiveStr (see Note [Selective + // build]) has been disabled at compile time. In that case, don't generate + // any code referencing the passed in functions at all. + public native @ByRef Library def(@ByVal DisabledStr arg0); + public native @ByRef Library def(@ByVal EnabledStr raw_schema); + + /** Register a fallback implementation for all operators which will be used + * if there is not a specific implementation for an operator available. + * There MUST be a DispatchKey associated with a fallback; e.g., + * only call this from TORCH_LIBRARY_IMPL() with namespace {@code _}. + * + * @param raw_f The function that implements the fallback. Unboxed + * functions typically do not work as fallback functions, as + * fallback functions must work for every operator (even though + * they have varying type signatures). Typical arguments are + * CppFunction::makeFallthrough() or + * CppFunction::makeFromBoxedFunction() + * + *
{@code
+   *  // Example:
+   * 
+   *  TORCH_LIBRARY_IMPL(_, AutogradXLA, m) {
+   *    // If there is not a kernel explicitly registered
+   *    // for AutogradXLA, fallthrough to the next
+   *    // available kernel
+   *    m.fallback(torch::CppFunction::makeFallthrough());
+   *  }
+   * 
+   *  // See aten/src/ATen/core/dispatch/backend_fallback_test.cpp
+   *  // for a full example of boxed fallback
+   *  }
*/ + + // These overloads enable the use of selective build on classes registered + // within a library. The API is the same as before with 1 minor change. + // Instead of m.class_("foo") you instead do + // m.class_(TORCH_SELECTIVE_CLASS("foo")) +} diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/LinAlgError.java b/pytorch/src/gen/java/org/bytedeco/pytorch/LinAlgError.java index 1774c293713..09582fdf42f 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/LinAlgError.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/LinAlgError.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/Linear.java b/pytorch/src/gen/java/org/bytedeco/pytorch/Linear.java deleted file mode 100644 index 8be673dcac7..00000000000 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/Linear.java +++ /dev/null @@ -1,34 +0,0 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE - -package org.bytedeco.pytorch; - -import org.bytedeco.pytorch.Allocator; -import org.bytedeco.pytorch.Function; -import org.bytedeco.pytorch.Module; -import java.nio.*; -import org.bytedeco.javacpp.*; -import org.bytedeco.javacpp.annotation.*; - -import static org.bytedeco.javacpp.presets.javacpp.*; -import static org.bytedeco.openblas.global.openblas_nolapack.*; -import static org.bytedeco.openblas.global.openblas.*; - -import static org.bytedeco.pytorch.global.torch.*; - - -/** A {@code ModuleHolder} subclass for {@code LinearImpl}. - * See the documentation for {@code LinearImpl} class to learn what methods it - * provides, and examples of how to use {@code Linear} with - * {@code torch::nn::LinearOptions}. See the documentation for {@code ModuleHolder} to - * learn about PyTorch's module storage semantics. */ -@Namespace("torch::nn") @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) -public class Linear extends LinearImplModuleHolder { - static { Loader.load(); } - - public Linear(@ByVal @Cast("std::nullptr_t*") PointerPointer arg0) { super((Pointer)null); allocate(arg0); } - private native void allocate(@ByVal @Cast("std::nullptr_t*") PointerPointer arg0); public Linear(@SharedPtr @Cast({"", "std::shared_ptr"}) LinearImpl module) { super((Pointer)null); allocate(module); } - private native void allocate(@SharedPtr @Cast({"", "std::shared_ptr"}) LinearImpl module); - /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ - public Linear(Pointer p) { super(p); } - - } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/LinearImpl.java b/pytorch/src/gen/java/org/bytedeco/pytorch/LinearImpl.java index bf7215c63fc..92e565af3bd 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/LinearImpl.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/LinearImpl.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; @@ -36,9 +38,9 @@ public class LinearImpl extends LinearImplCloneable { public LinearImpl(Pointer p) { super(p); } public LinearImpl(@Cast("int64_t") long in_features, @Cast("int64_t") long out_features) { super((Pointer)null); allocate(in_features, out_features); } - @NoDeallocator private native void allocate(@Cast("int64_t") long in_features, @Cast("int64_t") long out_features); + @SharedPtr private native void allocate(@Cast("int64_t") long in_features, @Cast("int64_t") long out_features); public LinearImpl(@Const @ByRef LinearOptions options_) { super((Pointer)null); allocate(options_); } - @NoDeallocator private native void allocate(@Const @ByRef LinearOptions options_); + @SharedPtr private native void allocate(@Const @ByRef LinearOptions options_); public native void reset(); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/LinearImplCloneable.java b/pytorch/src/gen/java/org/bytedeco/pytorch/LinearImplCloneable.java index c3432b618d6..7715fdf3bb7 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/LinearImplCloneable.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/LinearImplCloneable.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; @@ -20,18 +22,18 @@ public class LinearImplCloneable extends Module { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public LinearImplCloneable(Pointer p) { super(p); } + @Override public Module asModule() { return asModule(this); } + @Namespace public static native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast>") Module asModule(@SharedPtr LinearImplCloneable pointer); /** {@code reset()} must perform initialization of all members with reference * semantics, most importantly parameters, buffers and submodules. */ public native void reset(); - @Override public Module asModule() { return asModule(this); } - @Namespace public static native @Name("static_cast") Module asModule(LinearImplCloneable module); /** Performs a recursive "deep copy" of the {@code Module}, such that all parameters * and submodules in the cloned module are different from those in the * original module. */ - public native @SharedPtr Module clone( - @Const @ByRef(nullValue = "c10::optional(c10::nullopt)") DeviceOptional device); - public native @SharedPtr Module clone(); + public native @SharedPtr("torch::nn::Module") @ByVal Module clone( + @Const @ByRef(nullValue = "c10::optional(c10::nullopt)") DeviceOptional device); + public native @SharedPtr("torch::nn::Module") @ByVal Module clone(); } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/LinearImplModuleHolder.java b/pytorch/src/gen/java/org/bytedeco/pytorch/LinearImplModuleHolder.java deleted file mode 100644 index c78d8cabdb9..00000000000 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/LinearImplModuleHolder.java +++ /dev/null @@ -1,79 +0,0 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE - -package org.bytedeco.pytorch; - -import org.bytedeco.pytorch.Allocator; -import org.bytedeco.pytorch.Function; -import org.bytedeco.pytorch.Module; -import java.nio.*; -import org.bytedeco.javacpp.*; -import org.bytedeco.javacpp.annotation.*; - -import static org.bytedeco.javacpp.presets.javacpp.*; -import static org.bytedeco.openblas.global.openblas_nolapack.*; -import static org.bytedeco.openblas.global.openblas.*; - -import static org.bytedeco.pytorch.global.torch.*; - -@Name("torch::nn::ModuleHolder") @NoOffset @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) -public class LinearImplModuleHolder extends Pointer { - static { Loader.load(); } - /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ - public LinearImplModuleHolder(Pointer p) { super(p); } - - - /// - - /** Default constructs the contained module if if has a default constructor, - * else produces a static error. - * - * NOTE: This uses the behavior of template - * classes in C++ that constructors (or any methods) are only compiled when - * actually used. */ - - - /** Constructs the {@code ModuleHolder} with an empty contained value. Access to - * the underlying module is not permitted and will throw an exception, until - * a value is assigned. */ - /* implicit */ public LinearImplModuleHolder(@ByVal @Cast("std::nullptr_t*") PointerPointer arg0) { super((Pointer)null); allocate(arg0); } -private native void allocate(@ByVal @Cast("std::nullptr_t*") PointerPointer arg0); - - /** Constructs the {@code ModuleHolder} with a contained module, forwarding all - * arguments to its constructor. */ - - /** Constructs the {@code ModuleHolder} from a pointer to the contained type. - * Example: {@code Linear(std::make_shared(...))}. */ - /* implicit */ public LinearImplModuleHolder(@SharedPtr @Cast({"", "std::shared_ptr"}) LinearImpl module) { super((Pointer)null); allocate(module); } -private native void allocate(@SharedPtr @Cast({"", "std::shared_ptr"}) LinearImpl module); - - /** Returns true if the {@code ModuleHolder} contains a module, or false if it is - * {@code nullptr}. */ - public native @Cast("bool") @Name("operator bool") @NoException(true) boolean asBoolean(); - - /** Forwards to the contained module. */ - public native @Name("operator ->") LinearImpl access(); - - /** Forwards to the contained module. */ - - /** Returns a reference to the contained module. */ - public native @ByRef @Name("operator *") LinearImpl multiply(); - - /** Returns a const reference to the contained module. */ - - /** Returns a shared pointer to the underlying module. */ - public native @SharedPtr @Cast({"", "std::shared_ptr"}) LinearImpl ptr(); - - /** Returns a pointer to the underlying module. */ - public native LinearImpl get(); - - /** Returns a const pointer to the underlying module. */ - - /** Calls the {@code forward()} method of the contained module. */ - - /** Forwards to the subscript operator of the contained module. - * NOTE: std::forward is qualified to prevent VS2017 emitting - * error C2872: 'std': ambiguous symbol */ - - /** Returns true if the {@code ModuleHolder} does not contain a module. */ - public native @Cast("bool") @NoException(true) boolean is_empty(); -} diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/LinearOptions.java b/pytorch/src/gen/java/org/bytedeco/pytorch/LinearOptions.java index 00401d0c9a4..1a07090f5e0 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/LinearOptions.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/LinearOptions.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/ListComp.java b/pytorch/src/gen/java/org/bytedeco/pytorch/ListComp.java index 5f4b2172954..0d45447f998 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/ListComp.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/ListComp.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; @@ -20,9 +22,11 @@ @Namespace("torch::jit") @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) public class ListComp extends Expr { static { Loader.load(); } + /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ + public ListComp(Pointer p) { super(p); } - public ListComp(@Cast("const torch::jit::TreeRef*") @ByRef Pointer tree) { super((Pointer)null); allocate(tree); } - private native void allocate(@Cast("const torch::jit::TreeRef*") @ByRef Pointer tree); + public ListComp(@Const @ByRef TreeRef tree) { super((Pointer)null); allocate(tree); } + private native void allocate(@Const @ByRef TreeRef tree); public native @ByVal Expr elt(); public native @ByVal Expr target(); public native @ByVal Expr iter(); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/ListElementConstReferenceTraits.java b/pytorch/src/gen/java/org/bytedeco/pytorch/ListElementConstReferenceTraits.java deleted file mode 100644 index daec183d8b8..00000000000 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/ListElementConstReferenceTraits.java +++ /dev/null @@ -1,38 +0,0 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE - -package org.bytedeco.pytorch; - -import org.bytedeco.pytorch.Allocator; -import org.bytedeco.pytorch.Function; -import org.bytedeco.pytorch.Module; -import java.nio.*; -import org.bytedeco.javacpp.*; -import org.bytedeco.javacpp.annotation.*; - -import static org.bytedeco.javacpp.presets.javacpp.*; -import static org.bytedeco.openblas.global.openblas_nolapack.*; -import static org.bytedeco.openblas.global.openblas.*; - -import static org.bytedeco.pytorch.global.torch.*; - - -// There is no to() overload for c10::optional. -@Name("c10::impl::ListElementConstReferenceTraits >") @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) -public class ListElementConstReferenceTraits extends Pointer { - static { Loader.load(); } - /** Default native constructor. */ - public ListElementConstReferenceTraits() { super((Pointer)null); allocate(); } - /** Native array allocator. Access with {@link Pointer#position(long)}. */ - public ListElementConstReferenceTraits(long size) { super((Pointer)null); allocateArray(size); } - /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ - public ListElementConstReferenceTraits(Pointer p) { super(p); } - private native void allocate(); - private native void allocateArray(long size); - @Override public ListElementConstReferenceTraits position(long position) { - return (ListElementConstReferenceTraits)super.position(position); - } - @Override public ListElementConstReferenceTraits getPointer(long i) { - return new ListElementConstReferenceTraits((Pointer)this).offsetAddress(i); - } - -} diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/ListImpl.java b/pytorch/src/gen/java/org/bytedeco/pytorch/ListImpl.java deleted file mode 100644 index f07b7ce7c25..00000000000 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/ListImpl.java +++ /dev/null @@ -1,33 +0,0 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE - -package org.bytedeco.pytorch; - -import org.bytedeco.pytorch.Allocator; -import org.bytedeco.pytorch.Function; -import org.bytedeco.pytorch.Module; -import java.nio.*; -import org.bytedeco.javacpp.*; -import org.bytedeco.javacpp.annotation.*; - -import static org.bytedeco.javacpp.presets.javacpp.*; -import static org.bytedeco.openblas.global.openblas_nolapack.*; -import static org.bytedeco.openblas.global.openblas.*; - -import static org.bytedeco.pytorch.global.torch.*; - - -@Namespace("c10::detail") @NoOffset @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) -public class ListImpl extends Pointer { - static { Loader.load(); } - /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ - public ListImpl(Pointer p) { super(p); } - - - public ListImpl(@ByVal @Cast("c10::detail::ListImpl::list_type*") IValueVector list_, @ByVal Type.TypePtr elementType_) { super((Pointer)null); allocate(list_, elementType_); } - private native void allocate(@ByVal @Cast("c10::detail::ListImpl::list_type*") IValueVector list_, @ByVal Type.TypePtr elementType_); - - public native @ByRef @Cast("c10::detail::ListImpl::list_type*") IValueVector list(); public native ListImpl list(IValueVector setter); - - public native @ByRef Type.TypePtr elementType(); public native ListImpl elementType(Type.TypePtr setter); - -} diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/ListLiteral.java b/pytorch/src/gen/java/org/bytedeco/pytorch/ListLiteral.java index 1889e1994e4..09c74a9951e 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/ListLiteral.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/ListLiteral.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; @@ -19,7 +21,13 @@ @Namespace("torch::jit") @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) public class ListLiteral extends Expr { static { Loader.load(); } + /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ + public ListLiteral(Pointer p) { super(p); } - public ListLiteral(@Cast("const torch::jit::TreeRef*") @ByRef Pointer tree) { super((Pointer)null); allocate(tree); } - private native void allocate(@Cast("const torch::jit::TreeRef*") @ByRef Pointer tree); + public ListLiteral(@Const @ByRef TreeRef tree) { super((Pointer)null); allocate(tree); } + private native void allocate(@Const @ByRef TreeRef tree); + public native @ByVal ExprList inputs(); + public static native @ByVal ListLiteral create( + @Const @ByRef SourceRange range, + @Const @ByRef ExprList inputs); } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/ListSingleElementType.java b/pytorch/src/gen/java/org/bytedeco/pytorch/ListSingleElementType.java index 938406e0223..be5a0f82837 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/ListSingleElementType.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/ListSingleElementType.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/ListType.java b/pytorch/src/gen/java/org/bytedeco/pytorch/ListType.java index 63a13a1abbc..222672f536b 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/ListType.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/ListType.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/LocalDispatchKeySet.java b/pytorch/src/gen/java/org/bytedeco/pytorch/LocalDispatchKeySet.java new file mode 100644 index 00000000000..60f21c5a51c --- /dev/null +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/LocalDispatchKeySet.java @@ -0,0 +1,31 @@ +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE + +package org.bytedeco.pytorch; + +import org.bytedeco.pytorch.Allocator; +import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; +import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; +import java.nio.*; +import org.bytedeco.javacpp.*; +import org.bytedeco.javacpp.annotation.*; + +import static org.bytedeco.javacpp.presets.javacpp.*; +import static org.bytedeco.openblas.global.openblas_nolapack.*; +import static org.bytedeco.openblas.global.openblas.*; + +import static org.bytedeco.pytorch.global.torch.*; + + +@Namespace("c10::impl") @NoOffset @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) +public class LocalDispatchKeySet extends Pointer { + static { Loader.load(); } + /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ + public LocalDispatchKeySet(Pointer p) { super(p); } + + /* implicit */ public LocalDispatchKeySet(@ByVal PODLocalDispatchKeySet x) { super((Pointer)null); allocate(x); } +private native void allocate(@ByVal PODLocalDispatchKeySet x); + public native @ByRef DispatchKeySet included_(); public native LocalDispatchKeySet included_(DispatchKeySet setter); + public native @ByRef DispatchKeySet excluded_(); public native LocalDispatchKeySet excluded_(DispatchKeySet setter); +} diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/LocalResponseNorm.java b/pytorch/src/gen/java/org/bytedeco/pytorch/LocalResponseNorm.java deleted file mode 100644 index f411131afe8..00000000000 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/LocalResponseNorm.java +++ /dev/null @@ -1,34 +0,0 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE - -package org.bytedeco.pytorch; - -import org.bytedeco.pytorch.Allocator; -import org.bytedeco.pytorch.Function; -import org.bytedeco.pytorch.Module; -import java.nio.*; -import org.bytedeco.javacpp.*; -import org.bytedeco.javacpp.annotation.*; - -import static org.bytedeco.javacpp.presets.javacpp.*; -import static org.bytedeco.openblas.global.openblas_nolapack.*; -import static org.bytedeco.openblas.global.openblas.*; - -import static org.bytedeco.pytorch.global.torch.*; - - -/** A {@code ModuleHolder} subclass for {@code LocalResponseNormImpl}. - * See the documentation for {@code LocalResponseNormImpl} class to learn what - * methods it provides, and examples of how to use {@code LocalResponseNorm} with - * {@code torch::nn::LocalResponseNormOptions}. See the documentation for - * {@code ModuleHolder} to learn about PyTorch's module storage semantics. */ -@Namespace("torch::nn") @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) -public class LocalResponseNorm extends LocalResponseNormImplModuleHolder { - static { Loader.load(); } - - public LocalResponseNorm(@ByVal @Cast("std::nullptr_t*") PointerPointer arg0) { super((Pointer)null); allocate(arg0); } - private native void allocate(@ByVal @Cast("std::nullptr_t*") PointerPointer arg0); public LocalResponseNorm(@SharedPtr @Cast({"", "std::shared_ptr"}) LocalResponseNormImpl module) { super((Pointer)null); allocate(module); } - private native void allocate(@SharedPtr @Cast({"", "std::shared_ptr"}) LocalResponseNormImpl module); - /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ - public LocalResponseNorm(Pointer p) { super(p); } - - } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/LocalResponseNormImpl.java b/pytorch/src/gen/java/org/bytedeco/pytorch/LocalResponseNormImpl.java index e7e5f01565b..403863f6de2 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/LocalResponseNormImpl.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/LocalResponseNormImpl.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; @@ -40,9 +42,9 @@ public class LocalResponseNormImpl extends LocalResponseNormImplCloneable { public LocalResponseNormImpl(Pointer p) { super(p); } public LocalResponseNormImpl(@Cast("int64_t") long size) { super((Pointer)null); allocate(size); } - @NoDeallocator private native void allocate(@Cast("int64_t") long size); + @SharedPtr private native void allocate(@Cast("int64_t") long size); public LocalResponseNormImpl(@Const @ByRef LocalResponseNormOptions options_) { super((Pointer)null); allocate(options_); } - @NoDeallocator private native void allocate(@Const @ByRef LocalResponseNormOptions options_); + @SharedPtr private native void allocate(@Const @ByRef LocalResponseNormOptions options_); public native @ByVal Tensor forward(@Const @ByRef Tensor input); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/LocalResponseNormImplCloneable.java b/pytorch/src/gen/java/org/bytedeco/pytorch/LocalResponseNormImplCloneable.java index 58e296a8ed6..738e7b47f2f 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/LocalResponseNormImplCloneable.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/LocalResponseNormImplCloneable.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; @@ -20,18 +22,18 @@ public class LocalResponseNormImplCloneable extends Module { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public LocalResponseNormImplCloneable(Pointer p) { super(p); } + @Override public Module asModule() { return asModule(this); } + @Namespace public static native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast>") Module asModule(@SharedPtr LocalResponseNormImplCloneable pointer); /** {@code reset()} must perform initialization of all members with reference * semantics, most importantly parameters, buffers and submodules. */ public native void reset(); - @Override public Module asModule() { return asModule(this); } - @Namespace public static native @Name("static_cast") Module asModule(LocalResponseNormImplCloneable module); /** Performs a recursive "deep copy" of the {@code Module}, such that all parameters * and submodules in the cloned module are different from those in the * original module. */ - public native @SharedPtr Module clone( - @Const @ByRef(nullValue = "c10::optional(c10::nullopt)") DeviceOptional device); - public native @SharedPtr Module clone(); + public native @SharedPtr("torch::nn::Module") @ByVal Module clone( + @Const @ByRef(nullValue = "c10::optional(c10::nullopt)") DeviceOptional device); + public native @SharedPtr("torch::nn::Module") @ByVal Module clone(); } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/LocalResponseNormImplModuleHolder.java b/pytorch/src/gen/java/org/bytedeco/pytorch/LocalResponseNormImplModuleHolder.java deleted file mode 100644 index 4b3164c040b..00000000000 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/LocalResponseNormImplModuleHolder.java +++ /dev/null @@ -1,79 +0,0 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE - -package org.bytedeco.pytorch; - -import org.bytedeco.pytorch.Allocator; -import org.bytedeco.pytorch.Function; -import org.bytedeco.pytorch.Module; -import java.nio.*; -import org.bytedeco.javacpp.*; -import org.bytedeco.javacpp.annotation.*; - -import static org.bytedeco.javacpp.presets.javacpp.*; -import static org.bytedeco.openblas.global.openblas_nolapack.*; -import static org.bytedeco.openblas.global.openblas.*; - -import static org.bytedeco.pytorch.global.torch.*; - -@Name("torch::nn::ModuleHolder") @NoOffset @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) -public class LocalResponseNormImplModuleHolder extends Pointer { - static { Loader.load(); } - /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ - public LocalResponseNormImplModuleHolder(Pointer p) { super(p); } - - - /// - - /** Default constructs the contained module if if has a default constructor, - * else produces a static error. - * - * NOTE: This uses the behavior of template - * classes in C++ that constructors (or any methods) are only compiled when - * actually used. */ - - - /** Constructs the {@code ModuleHolder} with an empty contained value. Access to - * the underlying module is not permitted and will throw an exception, until - * a value is assigned. */ - /* implicit */ public LocalResponseNormImplModuleHolder(@ByVal @Cast("std::nullptr_t*") PointerPointer arg0) { super((Pointer)null); allocate(arg0); } -private native void allocate(@ByVal @Cast("std::nullptr_t*") PointerPointer arg0); - - /** Constructs the {@code ModuleHolder} with a contained module, forwarding all - * arguments to its constructor. */ - - /** Constructs the {@code ModuleHolder} from a pointer to the contained type. - * Example: {@code Linear(std::make_shared(...))}. */ - /* implicit */ public LocalResponseNormImplModuleHolder(@SharedPtr @Cast({"", "std::shared_ptr"}) LocalResponseNormImpl module) { super((Pointer)null); allocate(module); } -private native void allocate(@SharedPtr @Cast({"", "std::shared_ptr"}) LocalResponseNormImpl module); - - /** Returns true if the {@code ModuleHolder} contains a module, or false if it is - * {@code nullptr}. */ - public native @Cast("bool") @Name("operator bool") @NoException(true) boolean asBoolean(); - - /** Forwards to the contained module. */ - public native @Name("operator ->") LocalResponseNormImpl access(); - - /** Forwards to the contained module. */ - - /** Returns a reference to the contained module. */ - public native @ByRef @Name("operator *") LocalResponseNormImpl multiply(); - - /** Returns a const reference to the contained module. */ - - /** Returns a shared pointer to the underlying module. */ - public native @SharedPtr @Cast({"", "std::shared_ptr"}) LocalResponseNormImpl ptr(); - - /** Returns a pointer to the underlying module. */ - public native LocalResponseNormImpl get(); - - /** Returns a const pointer to the underlying module. */ - - /** Calls the {@code forward()} method of the contained module. */ - - /** Forwards to the subscript operator of the contained module. - * NOTE: std::forward is qualified to prevent VS2017 emitting - * error C2872: 'std': ambiguous symbol */ - - /** Returns true if the {@code ModuleHolder} does not contain a module. */ - public native @Cast("bool") @NoException(true) boolean is_empty(); -} diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/LocalResponseNormOptions.java b/pytorch/src/gen/java/org/bytedeco/pytorch/LocalResponseNormOptions.java index 4a012d02213..a2dd1b197eb 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/LocalResponseNormOptions.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/LocalResponseNormOptions.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/LogSigmoid.java b/pytorch/src/gen/java/org/bytedeco/pytorch/LogSigmoid.java deleted file mode 100644 index dc76323589e..00000000000 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/LogSigmoid.java +++ /dev/null @@ -1,33 +0,0 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE - -package org.bytedeco.pytorch; - -import org.bytedeco.pytorch.Allocator; -import org.bytedeco.pytorch.Function; -import org.bytedeco.pytorch.Module; -import java.nio.*; -import org.bytedeco.javacpp.*; -import org.bytedeco.javacpp.annotation.*; - -import static org.bytedeco.javacpp.presets.javacpp.*; -import static org.bytedeco.openblas.global.openblas_nolapack.*; -import static org.bytedeco.openblas.global.openblas.*; - -import static org.bytedeco.pytorch.global.torch.*; - - -/** A {@code ModuleHolder} subclass for {@code LogSigmoidImpl}. - * See the documentation for {@code LogSigmoidImpl} class to learn what methods it - * provides, or the documentation for {@code ModuleHolder} to learn about PyTorch's - * module storage semantics. */ -@Namespace("torch::nn") @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) -public class LogSigmoid extends LogSigmoidImplModuleHolder { - static { Loader.load(); } - - public LogSigmoid(@ByVal @Cast("std::nullptr_t*") PointerPointer arg0) { super((Pointer)null); allocate(arg0); } - private native void allocate(@ByVal @Cast("std::nullptr_t*") PointerPointer arg0); public LogSigmoid(@SharedPtr @Cast({"", "std::shared_ptr"}) LogSigmoidImpl module) { super((Pointer)null); allocate(module); } - private native void allocate(@SharedPtr @Cast({"", "std::shared_ptr"}) LogSigmoidImpl module); - /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ - public LogSigmoid(Pointer p) { super(p); } - - } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/LogSigmoidImpl.java b/pytorch/src/gen/java/org/bytedeco/pytorch/LogSigmoidImpl.java index 3678f35e745..b9ac75d9bae 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/LogSigmoidImpl.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/LogSigmoidImpl.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/LogSigmoidImplCloneable.java b/pytorch/src/gen/java/org/bytedeco/pytorch/LogSigmoidImplCloneable.java index cad2f6b2a14..44e6697c45b 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/LogSigmoidImplCloneable.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/LogSigmoidImplCloneable.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; @@ -20,18 +22,18 @@ public class LogSigmoidImplCloneable extends Module { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public LogSigmoidImplCloneable(Pointer p) { super(p); } + @Override public Module asModule() { return asModule(this); } + @Namespace public static native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast>") Module asModule(@SharedPtr LogSigmoidImplCloneable pointer); /** {@code reset()} must perform initialization of all members with reference * semantics, most importantly parameters, buffers and submodules. */ public native void reset(); - @Override public Module asModule() { return asModule(this); } - @Namespace public static native @Name("static_cast") Module asModule(LogSigmoidImplCloneable module); /** Performs a recursive "deep copy" of the {@code Module}, such that all parameters * and submodules in the cloned module are different from those in the * original module. */ - public native @SharedPtr Module clone( - @Const @ByRef(nullValue = "c10::optional(c10::nullopt)") DeviceOptional device); - public native @SharedPtr Module clone(); + public native @SharedPtr("torch::nn::Module") @ByVal Module clone( + @Const @ByRef(nullValue = "c10::optional(c10::nullopt)") DeviceOptional device); + public native @SharedPtr("torch::nn::Module") @ByVal Module clone(); } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/LogSigmoidImplModuleHolder.java b/pytorch/src/gen/java/org/bytedeco/pytorch/LogSigmoidImplModuleHolder.java deleted file mode 100644 index b9f8b4ab5fb..00000000000 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/LogSigmoidImplModuleHolder.java +++ /dev/null @@ -1,79 +0,0 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE - -package org.bytedeco.pytorch; - -import org.bytedeco.pytorch.Allocator; -import org.bytedeco.pytorch.Function; -import org.bytedeco.pytorch.Module; -import java.nio.*; -import org.bytedeco.javacpp.*; -import org.bytedeco.javacpp.annotation.*; - -import static org.bytedeco.javacpp.presets.javacpp.*; -import static org.bytedeco.openblas.global.openblas_nolapack.*; -import static org.bytedeco.openblas.global.openblas.*; - -import static org.bytedeco.pytorch.global.torch.*; - -@Name("torch::nn::ModuleHolder") @NoOffset @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) -public class LogSigmoidImplModuleHolder extends Pointer { - static { Loader.load(); } - /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ - public LogSigmoidImplModuleHolder(Pointer p) { super(p); } - - - /// - - /** Default constructs the contained module if if has a default constructor, - * else produces a static error. - * - * NOTE: This uses the behavior of template - * classes in C++ that constructors (or any methods) are only compiled when - * actually used. */ - - - /** Constructs the {@code ModuleHolder} with an empty contained value. Access to - * the underlying module is not permitted and will throw an exception, until - * a value is assigned. */ - /* implicit */ public LogSigmoidImplModuleHolder(@ByVal @Cast("std::nullptr_t*") PointerPointer arg0) { super((Pointer)null); allocate(arg0); } -private native void allocate(@ByVal @Cast("std::nullptr_t*") PointerPointer arg0); - - /** Constructs the {@code ModuleHolder} with a contained module, forwarding all - * arguments to its constructor. */ - - /** Constructs the {@code ModuleHolder} from a pointer to the contained type. - * Example: {@code Linear(std::make_shared(...))}. */ - /* implicit */ public LogSigmoidImplModuleHolder(@SharedPtr @Cast({"", "std::shared_ptr"}) LogSigmoidImpl module) { super((Pointer)null); allocate(module); } -private native void allocate(@SharedPtr @Cast({"", "std::shared_ptr"}) LogSigmoidImpl module); - - /** Returns true if the {@code ModuleHolder} contains a module, or false if it is - * {@code nullptr}. */ - public native @Cast("bool") @Name("operator bool") @NoException(true) boolean asBoolean(); - - /** Forwards to the contained module. */ - public native @Name("operator ->") LogSigmoidImpl access(); - - /** Forwards to the contained module. */ - - /** Returns a reference to the contained module. */ - public native @ByRef @Name("operator *") LogSigmoidImpl multiply(); - - /** Returns a const reference to the contained module. */ - - /** Returns a shared pointer to the underlying module. */ - public native @SharedPtr @Cast({"", "std::shared_ptr"}) LogSigmoidImpl ptr(); - - /** Returns a pointer to the underlying module. */ - public native LogSigmoidImpl get(); - - /** Returns a const pointer to the underlying module. */ - - /** Calls the {@code forward()} method of the contained module. */ - - /** Forwards to the subscript operator of the contained module. - * NOTE: std::forward is qualified to prevent VS2017 emitting - * error C2872: 'std': ambiguous symbol */ - - /** Returns true if the {@code ModuleHolder} does not contain a module. */ - public native @Cast("bool") @NoException(true) boolean is_empty(); -} diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/LogSoftmax.java b/pytorch/src/gen/java/org/bytedeco/pytorch/LogSoftmax.java deleted file mode 100644 index b9e65488d05..00000000000 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/LogSoftmax.java +++ /dev/null @@ -1,34 +0,0 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE - -package org.bytedeco.pytorch; - -import org.bytedeco.pytorch.Allocator; -import org.bytedeco.pytorch.Function; -import org.bytedeco.pytorch.Module; -import java.nio.*; -import org.bytedeco.javacpp.*; -import org.bytedeco.javacpp.annotation.*; - -import static org.bytedeco.javacpp.presets.javacpp.*; -import static org.bytedeco.openblas.global.openblas_nolapack.*; -import static org.bytedeco.openblas.global.openblas.*; - -import static org.bytedeco.pytorch.global.torch.*; - - -/** A {@code ModuleHolder} subclass for {@code LogSoftmaxImpl}. - * See the documentation for {@code LogSoftmaxImpl} class to learn what methods it - * provides, and examples of how to use {@code LogSoftmax} with - * {@code torch::nn::LogSoftmaxOptions}. See the documentation for {@code ModuleHolder} to - * learn about PyTorch's module storage semantics. */ -@Namespace("torch::nn") @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) -public class LogSoftmax extends LogSoftmaxImplModuleHolder { - static { Loader.load(); } - - public LogSoftmax(@ByVal @Cast("std::nullptr_t*") PointerPointer arg0) { super((Pointer)null); allocate(arg0); } - private native void allocate(@ByVal @Cast("std::nullptr_t*") PointerPointer arg0); public LogSoftmax(@SharedPtr @Cast({"", "std::shared_ptr"}) LogSoftmaxImpl module) { super((Pointer)null); allocate(module); } - private native void allocate(@SharedPtr @Cast({"", "std::shared_ptr"}) LogSoftmaxImpl module); - /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ - public LogSoftmax(Pointer p) { super(p); } - - } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/LogSoftmaxFuncOptions.java b/pytorch/src/gen/java/org/bytedeco/pytorch/LogSoftmaxFuncOptions.java index 8ab86ed9287..ea8d7e6fd93 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/LogSoftmaxFuncOptions.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/LogSoftmaxFuncOptions.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/LogSoftmaxImpl.java b/pytorch/src/gen/java/org/bytedeco/pytorch/LogSoftmaxImpl.java index 75a6201ce67..de8a84fa2e7 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/LogSoftmaxImpl.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/LogSoftmaxImpl.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; @@ -36,9 +38,9 @@ public class LogSoftmaxImpl extends LogSoftmaxImplCloneable { public LogSoftmaxImpl(Pointer p) { super(p); } public LogSoftmaxImpl(@Cast("int64_t") long dim) { super((Pointer)null); allocate(dim); } - @NoDeallocator private native void allocate(@Cast("int64_t") long dim); + @SharedPtr private native void allocate(@Cast("int64_t") long dim); public LogSoftmaxImpl(@Const @ByRef LogSoftmaxOptions options_) { super((Pointer)null); allocate(options_); } - @NoDeallocator private native void allocate(@Const @ByRef LogSoftmaxOptions options_); + @SharedPtr private native void allocate(@Const @ByRef LogSoftmaxOptions options_); public native @ByVal Tensor forward(@Const @ByRef Tensor input); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/LogSoftmaxImplCloneable.java b/pytorch/src/gen/java/org/bytedeco/pytorch/LogSoftmaxImplCloneable.java index 550f5c11cb0..1e34f4086ea 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/LogSoftmaxImplCloneable.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/LogSoftmaxImplCloneable.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; @@ -20,18 +22,18 @@ public class LogSoftmaxImplCloneable extends Module { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public LogSoftmaxImplCloneable(Pointer p) { super(p); } + @Override public Module asModule() { return asModule(this); } + @Namespace public static native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast>") Module asModule(@SharedPtr LogSoftmaxImplCloneable pointer); /** {@code reset()} must perform initialization of all members with reference * semantics, most importantly parameters, buffers and submodules. */ public native void reset(); - @Override public Module asModule() { return asModule(this); } - @Namespace public static native @Name("static_cast") Module asModule(LogSoftmaxImplCloneable module); /** Performs a recursive "deep copy" of the {@code Module}, such that all parameters * and submodules in the cloned module are different from those in the * original module. */ - public native @SharedPtr Module clone( - @Const @ByRef(nullValue = "c10::optional(c10::nullopt)") DeviceOptional device); - public native @SharedPtr Module clone(); + public native @SharedPtr("torch::nn::Module") @ByVal Module clone( + @Const @ByRef(nullValue = "c10::optional(c10::nullopt)") DeviceOptional device); + public native @SharedPtr("torch::nn::Module") @ByVal Module clone(); } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/LogSoftmaxImplModuleHolder.java b/pytorch/src/gen/java/org/bytedeco/pytorch/LogSoftmaxImplModuleHolder.java deleted file mode 100644 index 01b7b1a92ce..00000000000 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/LogSoftmaxImplModuleHolder.java +++ /dev/null @@ -1,79 +0,0 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE - -package org.bytedeco.pytorch; - -import org.bytedeco.pytorch.Allocator; -import org.bytedeco.pytorch.Function; -import org.bytedeco.pytorch.Module; -import java.nio.*; -import org.bytedeco.javacpp.*; -import org.bytedeco.javacpp.annotation.*; - -import static org.bytedeco.javacpp.presets.javacpp.*; -import static org.bytedeco.openblas.global.openblas_nolapack.*; -import static org.bytedeco.openblas.global.openblas.*; - -import static org.bytedeco.pytorch.global.torch.*; - -@Name("torch::nn::ModuleHolder") @NoOffset @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) -public class LogSoftmaxImplModuleHolder extends Pointer { - static { Loader.load(); } - /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ - public LogSoftmaxImplModuleHolder(Pointer p) { super(p); } - - - /// - - /** Default constructs the contained module if if has a default constructor, - * else produces a static error. - * - * NOTE: This uses the behavior of template - * classes in C++ that constructors (or any methods) are only compiled when - * actually used. */ - - - /** Constructs the {@code ModuleHolder} with an empty contained value. Access to - * the underlying module is not permitted and will throw an exception, until - * a value is assigned. */ - /* implicit */ public LogSoftmaxImplModuleHolder(@ByVal @Cast("std::nullptr_t*") PointerPointer arg0) { super((Pointer)null); allocate(arg0); } -private native void allocate(@ByVal @Cast("std::nullptr_t*") PointerPointer arg0); - - /** Constructs the {@code ModuleHolder} with a contained module, forwarding all - * arguments to its constructor. */ - - /** Constructs the {@code ModuleHolder} from a pointer to the contained type. - * Example: {@code Linear(std::make_shared(...))}. */ - /* implicit */ public LogSoftmaxImplModuleHolder(@SharedPtr @Cast({"", "std::shared_ptr"}) LogSoftmaxImpl module) { super((Pointer)null); allocate(module); } -private native void allocate(@SharedPtr @Cast({"", "std::shared_ptr"}) LogSoftmaxImpl module); - - /** Returns true if the {@code ModuleHolder} contains a module, or false if it is - * {@code nullptr}. */ - public native @Cast("bool") @Name("operator bool") @NoException(true) boolean asBoolean(); - - /** Forwards to the contained module. */ - public native @Name("operator ->") LogSoftmaxImpl access(); - - /** Forwards to the contained module. */ - - /** Returns a reference to the contained module. */ - public native @ByRef @Name("operator *") LogSoftmaxImpl multiply(); - - /** Returns a const reference to the contained module. */ - - /** Returns a shared pointer to the underlying module. */ - public native @SharedPtr @Cast({"", "std::shared_ptr"}) LogSoftmaxImpl ptr(); - - /** Returns a pointer to the underlying module. */ - public native LogSoftmaxImpl get(); - - /** Returns a const pointer to the underlying module. */ - - /** Calls the {@code forward()} method of the contained module. */ - - /** Forwards to the subscript operator of the contained module. - * NOTE: std::forward is qualified to prevent VS2017 emitting - * error C2872: 'std': ambiguous symbol */ - - /** Returns true if the {@code ModuleHolder} does not contain a module. */ - public native @Cast("bool") @NoException(true) boolean is_empty(); -} diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/LogSoftmaxOptions.java b/pytorch/src/gen/java/org/bytedeco/pytorch/LogSoftmaxOptions.java index 7f03a47f6df..3e73afe5e33 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/LogSoftmaxOptions.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/LogSoftmaxOptions.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/LongArrayRef.java b/pytorch/src/gen/java/org/bytedeco/pytorch/LongArrayRef.java index 31ea4f49668..3d957c2bb04 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/LongArrayRef.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/LongArrayRef.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; @@ -20,6 +22,15 @@ public class LongArrayRef extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public LongArrayRef(Pointer p) { super(p); } + /** Native array allocator. Access with {@link Pointer#position(long)}. */ + public LongArrayRef(long size) { super((Pointer)null); allocateArray(size); } + private native void allocateArray(long size); + @Override public LongArrayRef position(long position) { + return (LongArrayRef)super.position(position); + } + @Override public LongArrayRef getPointer(long i) { + return new LongArrayRef((Pointer)this).offsetAddress(i); + } /** \name Constructors * \{ @@ -30,8 +41,7 @@ public class LongArrayRef extends Pointer { /** Construct an ArrayRef from a single element. */ // TODO Make this explicit - public LongArrayRef(@Cast("const int64_t") long OneElt) { super((Pointer)null); allocate(OneElt); } - private native void allocate(@Cast("const int64_t") long OneElt); + /** Construct an ArrayRef from a pointer and length. */ public LongArrayRef(@Cast("const int64_t*") LongPointer data, @Cast("size_t") long length) { super((Pointer)null); allocate(data, length); } @@ -57,6 +67,8 @@ public class LongArrayRef extends Pointer { // The enable_if stuff here makes sure that this isn't used for // std::vector, because ArrayRef can't work on a std::vector // bitfield. + public LongArrayRef(@ByRef LongVector vec) { super((Pointer)null); allocate(vec); } + private native void allocate(@ByRef LongVector vec); /** Construct an ArrayRef from a std::array */ @@ -69,13 +81,13 @@ public class LongArrayRef extends Pointer { * \name Simple Operations * \{ */ - public native @Cast("const c10::ArrayRef::iterator") long begin(); - public native @Cast("const c10::ArrayRef::iterator") long end(); + public native @Const LongPointer begin(); + public native @Const LongPointer end(); // These are actually the same as iterator, since ArrayRef only // gives you const iterators. - public native @Cast("const c10::ArrayRef::const_iterator") long cbegin(); - public native @Cast("const c10::ArrayRef::const_iterator") long cend(); + public native @Const LongPointer cbegin(); + public native @Const LongPointer cend(); /** empty - Check if the array is empty. */ public native @Cast("const bool") boolean empty(); @@ -92,14 +104,14 @@ public class LongArrayRef extends Pointer { public native @Cast("const int64_t") long back(); /** equals - Check for element-wise equality. */ - public native @Cast("const bool") boolean equals(@ByVal @Cast("c10::ArrayRef*") LongArrayRef RHS); + public native @Cast("const bool") boolean equals(@ByVal LongArrayRef RHS); public native @Cast("const bool") boolean equals(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... RHS); /** slice(n, m) - Take M elements of the array starting at element N */ - public native @ByVal @Cast("c10::ArrayRef*") LongArrayRef slice(@Cast("size_t") long N, @Cast("size_t") long M); + public native @Const @ByVal LongArrayRef slice(@Cast("size_t") long N, @Cast("size_t") long M); /** slice(n) - Chop off the first N elements of the array. */ - public native @ByVal @Cast("c10::ArrayRef*") LongArrayRef slice(@Cast("size_t") long N); + public native @Const @ByVal LongArrayRef slice(@Cast("size_t") long N); /** \} * \name Operator Overloads diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/LongArrayRefOptional.java b/pytorch/src/gen/java/org/bytedeco/pytorch/LongArrayRefOptional.java index 8196f5d47e3..d5aeec4c537 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/LongArrayRefOptional.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/LongArrayRefOptional.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; @@ -20,15 +22,16 @@ public class LongArrayRefOptional extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public LongArrayRefOptional(Pointer p) { super(p); } - public LongArrayRefOptional(@Cast("c10::ArrayRef*") LongArrayRef value) { this(); put(value); } + public LongArrayRefOptional(LongArrayRef value) { this(); put(value); } public LongArrayRefOptional(@Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... value) { this(); put(value); } public LongArrayRefOptional() { allocate(); } private native void allocate(); public native @Name("operator =") @ByRef LongArrayRefOptional put(@ByRef LongArrayRefOptional x); public native boolean has_value(); - public native @Name("value") @ByRef @Cast("c10::ArrayRef*") LongArrayRef get(); - @ValueSetter public native LongArrayRefOptional put(@ByRef @Cast("c10::ArrayRef*") LongArrayRef value); + public native void reset(); + public native @Name("value") @ByRef LongArrayRef get(); + @ValueSetter public native LongArrayRefOptional put(@ByRef LongArrayRef value); @ValueSetter public native LongArrayRefOptional put(@ByRef @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... value); } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/LongElementReference.java b/pytorch/src/gen/java/org/bytedeco/pytorch/LongElementReference.java new file mode 100644 index 00000000000..7a77261b758 --- /dev/null +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/LongElementReference.java @@ -0,0 +1,42 @@ +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE + +package org.bytedeco.pytorch; + +import org.bytedeco.pytorch.Allocator; +import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; +import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; +import java.nio.*; +import org.bytedeco.javacpp.*; +import org.bytedeco.javacpp.annotation.*; + +import static org.bytedeco.javacpp.presets.javacpp.*; +import static org.bytedeco.openblas.global.openblas_nolapack.*; +import static org.bytedeco.openblas.global.openblas.*; + +import static org.bytedeco.pytorch.global.torch.*; + + +@Name("c10::impl::ListElementReference") @NoOffset @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) +public class LongElementReference extends Pointer { + static { Loader.load(); } + /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ + public LongElementReference(Pointer p) { super(p); } + + public native @Name("operator std::conditional_t::type>::value,const int64_t&,int64_t>") long getLong(); + + + + + + // assigning another ref to this assigns the underlying value + + + public native @Const @ByRef IValue get(); + + + + + +} diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/LongExpandingArrayOptional.java b/pytorch/src/gen/java/org/bytedeco/pytorch/LongExpandingArrayOptional.java index 203975a9d0c..ba3e6777ca4 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/LongExpandingArrayOptional.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/LongExpandingArrayOptional.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; @@ -26,6 +28,7 @@ public class LongExpandingArrayOptional extends Pointer { public native @Name("operator =") @ByRef LongExpandingArrayOptional put(@ByRef LongExpandingArrayOptional x); public native boolean has_value(); + public native void reset(); public native @Name("value") @Cast("torch::ExpandingArray<1>*") @ByRef LongPointer get(); @ValueSetter public native LongExpandingArrayOptional put(@Cast("torch::ExpandingArray<1>*") @ByRef LongPointer value); } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/LongList.java b/pytorch/src/gen/java/org/bytedeco/pytorch/LongList.java new file mode 100644 index 00000000000..33e4be27673 --- /dev/null +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/LongList.java @@ -0,0 +1,241 @@ +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE + +package org.bytedeco.pytorch; + +import org.bytedeco.pytorch.Allocator; +import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; +import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; +import java.nio.*; +import org.bytedeco.javacpp.*; +import org.bytedeco.javacpp.annotation.*; + +import static org.bytedeco.javacpp.presets.javacpp.*; +import static org.bytedeco.openblas.global.openblas_nolapack.*; +import static org.bytedeco.openblas.global.openblas.*; + +import static org.bytedeco.pytorch.global.torch.*; + +@Name("c10::List") @NoOffset @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) +public class LongList extends Pointer { + static { Loader.load(); } + /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ + public LongList(Pointer p) { super(p); } + /** Native array allocator. Access with {@link Pointer#position(long)}. */ + public LongList(long size) { super((Pointer)null); allocateArray(size); } + private native void allocateArray(long size); + @Override public LongList position(long position) { + return (LongList)super.position(position); + } + @Override public LongList getPointer(long i) { + return new LongList((Pointer)this).offsetAddress(i); + } + + + /** + * Constructs an empty list. + */ + public LongList() { super((Pointer)null); allocate(); } + private native void allocate(); + + /** + * Constructs a list with some initial values. + * Example: + * List a({2, 3, 4}); + */ + public LongList(@ByVal LongArrayRef initial_values) { super((Pointer)null); allocate(initial_values); } + private native void allocate(@ByVal LongArrayRef initial_values); + public LongList(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... initial_values) { super((Pointer)null); allocate(initial_values); } + private native void allocate(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... initial_values); + + /** + * Create a generic list with runtime type information. + * This only works for c10::impl::GenericList and is not part of the public API + * but only supposed to be used internally by PyTorch. + */ + + + public LongList(@Const @ByRef LongList arg0) { super((Pointer)null); allocate(arg0); } + private native void allocate(@Const @ByRef LongList arg0); + public native @ByRef @Name("operator =") LongList put(@Const @ByRef LongList arg0); + + /** + * Create a new List pointing to a deep copy of the same data. + * The List returned is a new list with separate storage. + * Changes in it are not reflected in the original list or vice versa. + */ + public native @ByVal LongList copy(); + + /** + * Returns the element at specified location pos, with bounds checking. + * If pos is not within the range of the container, an exception of type std::out_of_range is thrown. + */ + public native long get(long pos); + + /** + * Moves out the element at the specified location pos and returns it, with bounds checking. + * If pos is not within the range of the container, an exception of type std::out_of_range is thrown. + * The list contains an invalid element at position pos afterwards. Any operations + * on it before re-setting it are invalid. + */ + public native long extract(long pos); + + /** + * Returns a reference to the element at specified location pos, with bounds checking. + * If pos is not within the range of the container, an exception of type std::out_of_range is thrown. + * + * You cannot store the reference, but you can read it and assign new values to it: + * + * List list = ...; + * list[2] = 5; + * int64_t v = list[1]; + */ + + + + + /** + * Assigns a new value to the element at location pos. + */ + public native void set(long pos, long value); + + /** + * Assigns a new value to the element at location pos. + */ + + /** + * Returns an iterator to the first element of the container. + * If the container is empty, the returned iterator will be equal to end(). + */ + public native @ByVal @Cast("c10::List::iterator*") LongListIterator begin(); + + /** + * Returns an iterator to the element following the last element of the container. + * This element acts as a placeholder; attempting to access it results in undefined behavior. + */ + public native @ByVal @Cast("c10::List::iterator*") LongListIterator end(); + + /** + * Checks if the container has no elements. + */ + public native @Cast("bool") boolean empty(); + + /** + * Returns the number of elements in the container + */ + public native long size(); + + /** + * Increase the capacity of the vector to a value that's greater or equal to new_cap. + */ + public native void reserve(long new_cap); + + /** + * Erases all elements from the container. After this call, size() returns zero. + * Invalidates any references, pointers, or iterators referring to contained elements. Any past-the-end iterators are also invalidated. + */ + public native void clear(); + + /** + * Inserts value before pos. + * May invalidate any references, pointers, or iterators referring to contained elements. Any past-the-end iterators may also be invalidated. + */ + public native @ByVal @Cast("c10::List::iterator*") LongListIterator insert(@ByVal @Cast("c10::List::iterator*") LongListIterator pos, @Cast("const int64_t") long value); + + /** + * Inserts value before pos. + * May invalidate any references, pointers, or iterators referring to contained elements. Any past-the-end iterators may also be invalidated. + */ + + /** + * Inserts a new element into the container directly before pos. + * The new element is constructed with the given arguments. + * May invalidate any references, pointers, or iterators referring to contained elements. Any past-the-end iterators may also be invalidated. + */ + + /** + * Appends the given element value to the end of the container. + * May invalidate any references, pointers, or iterators referring to contained elements. Any past-the-end iterators may also be invalidated. + */ + public native void push_back(@Cast("const int64_t") long value); + + /** + * Appends the given element value to the end of the container. + * May invalidate any references, pointers, or iterators referring to contained elements. Any past-the-end iterators may also be invalidated. + */ + + /** + * Appends the given list to the end of the container. Uses at most one memory allocation. + * May invalidate any references, pointers, or iterators referring to contained elements. Any past-the-end iterators may also be invalidated. + */ + public native void append(@ByVal LongList lst); + + /** + * Appends the given element value to the end of the container. + * The new element is constructed with the given arguments. + * May invalidate any references, pointers, or iterators referring to contained elements. Any past-the-end iterators may also be invalidated. + */ + + /** + * Removes the element at pos. + * May invalidate any references, pointers, or iterators referring to contained elements. Any past-the-end iterators may also be invalidated. + */ + public native @ByVal @Cast("c10::List::iterator*") LongListIterator erase(@ByVal @Cast("c10::List::iterator*") LongListIterator pos); + + /** + * Removes the elements in the range [first, last). + * May invalidate any references, pointers, or iterators referring to contained elements. Any past-the-end iterators may also be invalidated. + */ + public native @ByVal @Cast("c10::List::iterator*") LongListIterator erase(@ByVal @Cast("c10::List::iterator*") LongListIterator first, @ByVal @Cast("c10::List::iterator*") LongListIterator last); + + /** + * Removes the last element of the container. + * Calling pop_back on an empty container is undefined. + * May invalidate any references, pointers, or iterators referring to contained elements. Any past-the-end iterators may also be invalidated. + */ + public native void pop_back(); + + /** + * Resizes the container to contain count elements. + * If the current size is less than count, additional default-inserted elements are appended. + * May invalidate any references, pointers, or iterators referring to contained elements. Any past-the-end iterators may also be invalidated. + */ + public native void resize(long count); + + /** + * Resizes the container to contain count elements. + * If the current size is less than count, additional copies of value are appended. + * May invalidate any references, pointers, or iterators referring to contained elements. Any past-the-end iterators may also be invalidated. + */ + public native void resize(long count, @Cast("const int64_t") long value); + + /** + * Value equality comparison. This function implements Python-like semantics for + * equality: two lists with the same identity (e.g. same pointer) trivially + * compare equal, otherwise each element is compared for equality. + */ + + + + + /** + * Identity comparison. Returns true if and only if {@code rhs} represents the same + * List object as {@code this}. + */ + public native @Cast("bool") boolean is(@Const @ByRef LongList rhs); + + public native @ByVal @Cast("std::vector*") LongVector vec(); + + /** + * Returns the number of Lists currently pointing to this same list. + * If this is the only instance pointing to this list, returns 1. + */ + // TODO Test use_count + public native @Cast("size_t") long use_count(); + + public native @ByVal Type.TypePtr elementType(); + + // See [unsafe set type] for why this exists. + public native void unsafeSetElementType(@ByVal Type.TypePtr t); +} diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/LongListIterator.java b/pytorch/src/gen/java/org/bytedeco/pytorch/LongListIterator.java new file mode 100644 index 00000000000..1d7181a32dd --- /dev/null +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/LongListIterator.java @@ -0,0 +1,84 @@ +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE + +package org.bytedeco.pytorch; + +import org.bytedeco.pytorch.Allocator; +import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; +import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; +import java.nio.*; +import org.bytedeco.javacpp.*; +import org.bytedeco.javacpp.annotation.*; + +import static org.bytedeco.javacpp.presets.javacpp.*; +import static org.bytedeco.openblas.global.openblas_nolapack.*; +import static org.bytedeco.openblas.global.openblas.*; + +import static org.bytedeco.pytorch.global.torch.*; + +@Name("c10::impl::ListIterator") @NoOffset @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) +public class LongListIterator extends Pointer { + static { Loader.load(); } + /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ + public LongListIterator(Pointer p) { super(p); } + /** Native array allocator. Access with {@link Pointer#position(long)}. */ + public LongListIterator(long size) { super((Pointer)null); allocateArray(size); } + private native void allocateArray(long size); + @Override public LongListIterator position(long position) { + return (LongListIterator)super.position(position); + } + @Override public LongListIterator getPointer(long i) { + return new LongListIterator((Pointer)this).offsetAddress(i); + } + + // C++17 friendly std::iterator implementation + + public LongListIterator() { super((Pointer)null); allocate(); } + private native void allocate(); + + public LongListIterator(@Const @ByRef LongListIterator arg0) { super((Pointer)null); allocate(arg0); } + private native void allocate(@Const @ByRef LongListIterator arg0); + public native @ByRef @Name("operator =") LongListIterator put(@Const @ByRef LongListIterator arg0); + + public native @ByRef @Name("operator ++") LongListIterator increment(); + + public native @ByVal @Name("operator ++") LongListIterator increment(int arg0); + + public native @ByRef @Name("operator --") LongListIterator decrement(); + + public native @ByVal @Name("operator --") LongListIterator decrement(int arg0); + + public native @ByRef @Name("operator +=") LongListIterator addPut(long offset); + + public native @ByRef @Name("operator -=") LongListIterator subtractPut(long offset); + + public native @ByVal @Name("operator +") LongListIterator add(long offset); + + public native @ByVal @Name("operator -") LongListIterator subtract(long offset); + + private static native @Namespace @Cast("c10::impl::ListIterator::difference_type") @Name("operator -") long subtract(@Const @ByRef LongListIterator lhs, @Const @ByRef LongListIterator rhs); + public long subtract(LongListIterator rhs) { return subtract(this, rhs); } + + + + + + private static native @Namespace @Cast("bool") @Name("operator ==") boolean equals(@Const @ByRef LongListIterator lhs, @Const @ByRef LongListIterator rhs); + public boolean equals(LongListIterator rhs) { return equals(this, rhs); } + + private static native @Namespace @Cast("bool") @Name("operator !=") boolean notEquals(@Const @ByRef LongListIterator lhs, @Const @ByRef LongListIterator rhs); + public boolean notEquals(LongListIterator rhs) { return notEquals(this, rhs); } + + private static native @Namespace @Cast("bool") @Name("operator <") boolean lessThan(@Const @ByRef LongListIterator lhs, @Const @ByRef LongListIterator rhs); + public boolean lessThan(LongListIterator rhs) { return lessThan(this, rhs); } + + private static native @Namespace @Cast("bool") @Name("operator <=") boolean lessThanEquals(@Const @ByRef LongListIterator lhs, @Const @ByRef LongListIterator rhs); + public boolean lessThanEquals(LongListIterator rhs) { return lessThanEquals(this, rhs); } + + private static native @Namespace @Cast("bool") @Name("operator >") boolean greaterThan(@Const @ByRef LongListIterator lhs, @Const @ByRef LongListIterator rhs); + public boolean greaterThan(LongListIterator rhs) { return greaterThan(this, rhs); } + + private static native @Namespace @Cast("bool") @Name("operator >=") boolean greaterThanEquals(@Const @ByRef LongListIterator lhs, @Const @ByRef LongListIterator rhs); + public boolean greaterThanEquals(LongListIterator rhs) { return greaterThanEquals(this, rhs); } +} diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/LongOptional.java b/pytorch/src/gen/java/org/bytedeco/pytorch/LongOptional.java index 0a99aaad579..485e726693c 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/LongOptional.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/LongOptional.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; @@ -26,6 +28,7 @@ public class LongOptional extends Pointer { public native @Name("operator =") @ByRef LongOptional put(@ByRef LongOptional x); public native boolean has_value(); + public native void reset(); public native @Name("value") @Cast("int64_t") long get(); @ValueSetter public native LongOptional put(@Cast("int64_t") long value); } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/SugaredValueArrayRef.java b/pytorch/src/gen/java/org/bytedeco/pytorch/LongOptionalArrayRef.java similarity index 55% rename from pytorch/src/gen/java/org/bytedeco/pytorch/SugaredValueArrayRef.java rename to pytorch/src/gen/java/org/bytedeco/pytorch/LongOptionalArrayRef.java index fc16afd85c8..61effcb0bab 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/SugaredValueArrayRef.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/LongOptionalArrayRef.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; @@ -15,40 +17,39 @@ import static org.bytedeco.pytorch.global.torch.*; -@Name("c10::ArrayRef") @NoOffset @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) -public class SugaredValueArrayRef extends Pointer { +@Name("c10::ArrayRef >") @NoOffset @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) +public class LongOptionalArrayRef extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ - public SugaredValueArrayRef(Pointer p) { super(p); } + public LongOptionalArrayRef(Pointer p) { super(p); } /** Native array allocator. Access with {@link Pointer#position(long)}. */ - public SugaredValueArrayRef(long size) { super((Pointer)null); allocateArray(size); } + public LongOptionalArrayRef(long size) { super((Pointer)null); allocateArray(size); } private native void allocateArray(long size); - @Override public SugaredValueArrayRef position(long position) { - return (SugaredValueArrayRef)super.position(position); + @Override public LongOptionalArrayRef position(long position) { + return (LongOptionalArrayRef)super.position(position); } - @Override public SugaredValueArrayRef getPointer(long i) { - return new SugaredValueArrayRef((Pointer)this).offsetAddress(i); + @Override public LongOptionalArrayRef getPointer(long i) { + return new LongOptionalArrayRef((Pointer)this).offsetAddress(i); } /** \name Constructors * \{

* Construct an empty ArrayRef. */ - /* implicit */ public SugaredValueArrayRef() { super((Pointer)null); allocate(); } + /* implicit */ public LongOptionalArrayRef() { super((Pointer)null); allocate(); } private native void allocate(); /** Construct an ArrayRef from a single element. */ // TODO Make this explicit - public SugaredValueArrayRef(@Const @SharedPtr @ByRef SugaredValue OneElt) { super((Pointer)null); allocate(OneElt); } - private native void allocate(@Const @SharedPtr @ByRef SugaredValue OneElt); + /** Construct an ArrayRef from a pointer and length. */ - public SugaredValueArrayRef(@Const @SharedPtr SugaredValue data, @Cast("size_t") long length) { super((Pointer)null); allocate(data, length); } - private native void allocate(@Const @SharedPtr SugaredValue data, @Cast("size_t") long length); + public LongOptionalArrayRef(@Const LongOptional data, @Cast("size_t") long length) { super((Pointer)null); allocate(data, length); } + private native void allocate(@Const LongOptional data, @Cast("size_t") long length); /** Construct an ArrayRef from a range. */ - public SugaredValueArrayRef(@Const @SharedPtr SugaredValue begin, @Const @SharedPtr SugaredValue end) { super((Pointer)null); allocate(begin, end); } - private native void allocate(@Const @SharedPtr SugaredValue begin, @Const @SharedPtr SugaredValue end); + public LongOptionalArrayRef(@Const LongOptional begin, @Const LongOptional end) { super((Pointer)null); allocate(begin, end); } + private native void allocate(@Const LongOptional begin, @Const LongOptional end); /** Construct an ArrayRef from a SmallVector. This is templated in order to * avoid instantiating SmallVectorTemplateCommon whenever we @@ -70,46 +71,46 @@ public class SugaredValueArrayRef extends Pointer { * \name Simple Operations * \{ */ - public native @Const @SharedPtr @ByVal SugaredValue begin(); - public native @Const @SharedPtr @ByVal SugaredValue end(); + public native @Const @ByPtr LongOptional begin(); + public native @Const @ByPtr LongOptional end(); // These are actually the same as iterator, since ArrayRef only // gives you const iterators. - public native @Const @SharedPtr @ByVal SugaredValue cbegin(); - public native @Const @SharedPtr @ByVal SugaredValue cend(); + public native @Const @ByPtr LongOptional cbegin(); + public native @Const @ByPtr LongOptional cend(); /** empty - Check if the array is empty. */ public native @Cast("const bool") boolean empty(); - public native @Const @SharedPtr SugaredValue data(); + public native @Const LongOptional data(); /** size - Get the array size. */ public native @Cast("const size_t") long size(); /** front - Get the first element. */ - public native @Const @SharedPtr @ByRef SugaredValue front(); + public native @Const @ByRef LongOptional front(); /** back - Get the last element. */ - public native @Const @SharedPtr @ByRef SugaredValue back(); + public native @Const @ByRef LongOptional back(); /** equals - Check for element-wise equality. */ - public native @Cast("const bool") boolean equals(@ByVal SugaredValueArrayRef RHS); + public native @Cast("const bool") boolean equals(@ByVal LongOptionalArrayRef RHS); /** slice(n, m) - Take M elements of the array starting at element N */ - public native @Const @ByVal SugaredValueArrayRef slice(@Cast("size_t") long N, @Cast("size_t") long M); + public native @Const @ByVal LongOptionalArrayRef slice(@Cast("size_t") long N, @Cast("size_t") long M); /** slice(n) - Chop off the first N elements of the array. */ - public native @Const @ByVal SugaredValueArrayRef slice(@Cast("size_t") long N); + public native @Const @ByVal LongOptionalArrayRef slice(@Cast("size_t") long N); /** \} * \name Operator Overloads * \{ */ - public native @Const @SharedPtr @ByRef @Name("operator []") SugaredValue get(@Cast("size_t") long Index); + public native @Const @ByRef @Name("operator []") LongOptional get(@Cast("size_t") long Index); /** Vector compatibility */ /// - public native @Const @SharedPtr @ByRef SugaredValue at(@Cast("size_t") long Index); + public native @Const @ByRef LongOptional at(@Cast("size_t") long Index); /** Disallow accidental assignment from a temporary. * @@ -126,7 +127,7 @@ public class SugaredValueArrayRef extends Pointer { /** \} * \name Expensive Operations * \{ */ - public native @ByVal SugaredValueVector vec(); + public native @ByVal LongOptionalVector vec(); /** \} */ } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/LongOptionalVector.java b/pytorch/src/gen/java/org/bytedeco/pytorch/LongOptionalVector.java index b81cbbf6642..c24517ff23c 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/LongOptionalVector.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/LongOptionalVector.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; @@ -33,6 +35,8 @@ public class LongOptionalVector extends Pointer { public void clear() { resize(0); } public native void resize(@Cast("size_t") long n); + public LongOptional front() { return get(0); } + public LongOptional back() { return get(size() - 1); } @Index(function = "at") public native @ByRef LongOptional get(@Cast("size_t") long i); public native LongOptionalVector put(@Cast("size_t") long i, LongOptional value); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/SymSmallVectorBase.java b/pytorch/src/gen/java/org/bytedeco/pytorch/LongSmallVectorBase.java similarity index 56% rename from pytorch/src/gen/java/org/bytedeco/pytorch/SymSmallVectorBase.java rename to pytorch/src/gen/java/org/bytedeco/pytorch/LongSmallVectorBase.java index 0f52bbef866..434271c4f69 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/SymSmallVectorBase.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/LongSmallVectorBase.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; @@ -15,13 +17,13 @@ import static org.bytedeco.pytorch.global.torch.*; -@Name("c10::SmallVectorTemplateBase") @NoOffset @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) -public class SymSmallVectorBase extends Pointer { +@Name("c10::SmallVectorTemplateBase") @NoOffset @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) +public class LongSmallVectorBase extends LongSmallVectorCommon { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ - public SymSmallVectorBase(Pointer p) { super(p); } + public LongSmallVectorBase(Pointer p) { super(p); } - public native void push_back(@Const @ByRef SymInt Elt); + public native void push_back(@Cast("const int64_t") long Elt); public native void pop_back(); } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/LongSmallVectorCommon.java b/pytorch/src/gen/java/org/bytedeco/pytorch/LongSmallVectorCommon.java new file mode 100644 index 00000000000..e89d42f9464 --- /dev/null +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/LongSmallVectorCommon.java @@ -0,0 +1,49 @@ +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE + +package org.bytedeco.pytorch; + +import org.bytedeco.pytorch.Allocator; +import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; +import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; +import java.nio.*; +import org.bytedeco.javacpp.*; +import org.bytedeco.javacpp.annotation.*; + +import static org.bytedeco.javacpp.presets.javacpp.*; +import static org.bytedeco.openblas.global.openblas_nolapack.*; +import static org.bytedeco.openblas.global.openblas.*; + +import static org.bytedeco.pytorch.global.torch.*; + +@Name("c10::SmallVectorTemplateCommon") @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) +public class LongSmallVectorCommon extends IntSizedSmallVectorBase { + static { Loader.load(); } + /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ + public LongSmallVectorCommon(Pointer p) { super(p); } + + + // forward iterator creation methods. + public native @ByVal @Cast("c10::SmallVectorTemplateCommon::iterator*") LongPointer begin(); + public native @ByVal @Cast("c10::SmallVectorTemplateCommon::iterator*") LongPointer end(); + + // reverse iterator creation methods. + + public native long size_in_bytes(); + public native long max_size(); + + public native @Cast("size_t") long capacity_in_bytes(); + + /** Return a pointer to the vector's buffer, even if empty(). */ + public native @ByVal @Cast("c10::SmallVectorTemplateCommon::pointer*") LongPointer data(); + /** Return a pointer to the vector's buffer, even if empty(). */ + + // SmallVector::at is NOT from LLVM. + public native long at(long idx); + public native @Name("operator []") long get(long idx); + + public native long front(); + + public native long back(); +} diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/DimVectorImpl.java b/pytorch/src/gen/java/org/bytedeco/pytorch/LongSmallVectorImpl.java similarity index 73% rename from pytorch/src/gen/java/org/bytedeco/pytorch/DimVectorImpl.java rename to pytorch/src/gen/java/org/bytedeco/pytorch/LongSmallVectorImpl.java index fe2ca6e65c5..5d26672a5eb 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/DimVectorImpl.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/LongSmallVectorImpl.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; @@ -15,14 +17,11 @@ import static org.bytedeco.pytorch.global.torch.*; - -/** This class consists of common code factored out of the SmallVector class to - * reduce code duplication based on the SmallVector 'N' template parameter. */ @Name("c10::SmallVectorImpl") @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) -public class DimVectorImpl extends SmallVectorBase { +public class LongSmallVectorImpl extends LongSmallVectorBase { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ - public DimVectorImpl(Pointer p) { super(p); } + public LongSmallVectorImpl(Pointer p) { super(p); } @@ -40,21 +39,21 @@ public class DimVectorImpl extends SmallVectorBase { public native @Cast("int64_t") long pop_back_val(); - public native void swap(@ByRef DimVectorImpl RHS); + public native void swap(@ByRef LongSmallVectorImpl RHS); /** Add the specified range to the end of the SmallVector. */ /** Append \p NumInputs copies of \p Elt to the end. */ public native void append(long NumInputs, long Elt); - public native void append(@Const @ByRef DimVectorImpl RHS); + public native void append(@Const @ByRef LongSmallVectorImpl RHS); public native void assign(long NumElts, long Elt); // FIXME: Consider assigning over existing elements, rather than clearing & // re-initializing them - for all assign(...) variants. - public native void assign(@Const @ByRef DimVectorImpl RHS); + public native void assign(@Const @ByRef LongSmallVectorImpl RHS); public native @ByVal @Cast("c10::SmallVectorImpl::iterator*") LongPointer erase(@ByVal @Cast("c10::SmallVectorImpl::const_iterator*") LongPointer CI); @@ -63,10 +62,10 @@ public class DimVectorImpl extends SmallVectorBase { public native @ByVal @Cast("c10::SmallVectorImpl::iterator*") LongPointer insert(@ByVal @Cast("c10::SmallVectorImpl::iterator*") LongPointer I, long NumToInsert, long Elt); - public native @ByRef @Name("operator =") DimVectorImpl put(@Const @ByRef DimVectorImpl RHS); + public native @ByRef @Name("operator =") LongSmallVectorImpl put(@Const @ByRef LongSmallVectorImpl RHS); - - + public native @Cast("bool") @Name("operator ==") boolean equals(@Const @ByRef LongSmallVectorImpl RHS); + public native @Cast("bool") @Name("operator !=") boolean notEquals(@Const @ByRef LongSmallVectorImpl RHS); - public native @Cast("bool") @Name("operator <") boolean lessThan(@Const @ByRef DimVectorImpl RHS); + public native @Cast("bool") @Name("operator <") boolean lessThan(@Const @ByRef LongSmallVectorImpl RHS); } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/LongStringMap.java b/pytorch/src/gen/java/org/bytedeco/pytorch/LongStringMap.java index 23c730f8baf..869e85d05ef 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/LongStringMap.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/LongStringMap.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; @@ -27,6 +29,8 @@ public class LongStringMap extends Pointer { public boolean empty() { return size() == 0; } public native long size(); + public BytePointer front() { return get(0); } + public BytePointer back() { return get(size() - 1); } @Index public native @StdString BytePointer get(@Cast("int64_t") long i); public native LongStringMap put(@Cast("int64_t") long i, BytePointer value); @ValueSetter @Index public native LongStringMap put(@Cast("int64_t") long i, @StdString String value); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/LongVaryingShape.java b/pytorch/src/gen/java/org/bytedeco/pytorch/LongVaryingShape.java index 62004a10a04..88f25ea2bd4 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/LongVaryingShape.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/LongVaryingShape.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; @@ -25,8 +27,8 @@ public class LongVaryingShape extends Pointer { public LongVaryingShape(@Cast("const std::vector*") @ByRef LongVector vec) { super((Pointer)null); allocate(vec); } private native void allocate(@Cast("const std::vector*") @ByRef LongVector vec); - public LongVaryingShape(@ByVal @Cast("c10::ArrayRef*") LongArrayRef vec) { super((Pointer)null); allocate(vec); } - private native void allocate(@ByVal @Cast("c10::ArrayRef*") LongArrayRef vec); + public LongVaryingShape(@ByVal LongArrayRef vec) { super((Pointer)null); allocate(vec); } + private native void allocate(@ByVal LongArrayRef vec); public LongVaryingShape(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... vec) { super((Pointer)null); allocate(vec); } private native void allocate(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... vec); @@ -41,7 +43,7 @@ public class LongVaryingShape extends Pointer { public LongVaryingShape(@Cast("size_t") long size) { super((Pointer)null); allocate(size); } private native void allocate(@Cast("size_t") long size); - + public native @Cast("bool") @Name("operator ==") boolean equals(@Const @ByRef LongVaryingShape other); public native @Const @ByRef @Name("operator []") LongOptional get(@Cast("size_t") long i); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/LongVector.java b/pytorch/src/gen/java/org/bytedeco/pytorch/LongVector.java index 0bd2bdb5182..5c04248ed6c 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/LongVector.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/LongVector.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; @@ -32,6 +34,8 @@ public class LongVector extends Pointer { public void clear() { resize(0); } public native void resize(@Cast("size_t") long n); + public long front() { return get(0); } + public long back() { return get(size() - 1); } @Index(function = "at") public native @Cast("int64_t") long get(@Cast("size_t") long i); public native LongVector put(@Cast("size_t") long i, long value); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/LongVectorArrayRef.java b/pytorch/src/gen/java/org/bytedeco/pytorch/LongVectorArrayRef.java new file mode 100644 index 00000000000..75d2d27fa99 --- /dev/null +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/LongVectorArrayRef.java @@ -0,0 +1,133 @@ +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE + +package org.bytedeco.pytorch; + +import org.bytedeco.pytorch.Allocator; +import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; +import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; +import java.nio.*; +import org.bytedeco.javacpp.*; +import org.bytedeco.javacpp.annotation.*; + +import static org.bytedeco.javacpp.presets.javacpp.*; +import static org.bytedeco.openblas.global.openblas_nolapack.*; +import static org.bytedeco.openblas.global.openblas.*; + +import static org.bytedeco.pytorch.global.torch.*; + +@Name("c10::ArrayRef >") @NoOffset @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) +public class LongVectorArrayRef extends Pointer { + static { Loader.load(); } + /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ + public LongVectorArrayRef(Pointer p) { super(p); } + /** Native array allocator. Access with {@link Pointer#position(long)}. */ + public LongVectorArrayRef(long size) { super((Pointer)null); allocateArray(size); } + private native void allocateArray(long size); + @Override public LongVectorArrayRef position(long position) { + return (LongVectorArrayRef)super.position(position); + } + @Override public LongVectorArrayRef getPointer(long i) { + return new LongVectorArrayRef((Pointer)this).offsetAddress(i); + } + + /** \name Constructors + * \{ +

+ * Construct an empty ArrayRef. */ + /* implicit */ public LongVectorArrayRef() { super((Pointer)null); allocate(); } +private native void allocate(); + + /** Construct an ArrayRef from a single element. */ + // TODO Make this explicit + + + /** Construct an ArrayRef from a pointer and length. */ + public LongVectorArrayRef(@Cast("const std::vector*") LongVector data, @Cast("size_t") long length) { super((Pointer)null); allocate(data, length); } + private native void allocate(@Cast("const std::vector*") LongVector data, @Cast("size_t") long length); + + /** Construct an ArrayRef from a range. */ + public LongVectorArrayRef(@Cast("const std::vector*") LongVector begin, @Cast("const std::vector*") LongVector end) { super((Pointer)null); allocate(begin, end); } + private native void allocate(@Cast("const std::vector*") LongVector begin, @Cast("const std::vector*") LongVector end); + + /** Construct an ArrayRef from a SmallVector. This is templated in order to + * avoid instantiating SmallVectorTemplateCommon whenever we + * copy-construct an ArrayRef. */ + + /** Construct an ArrayRef from a std::vector. */ + // The enable_if stuff here makes sure that this isn't used for + // std::vector, because ArrayRef can't work on a std::vector + // bitfield. + + /** Construct an ArrayRef from a std::array */ + + /** Construct an ArrayRef from a C array. */ + + /** Construct an ArrayRef from a std::initializer_list. */ + /* implicit */ + + /** \} + * \name Simple Operations + * \{ */ + + public native @Const @ByPtr LongVector begin(); + public native @Const @ByPtr LongVector end(); + + // These are actually the same as iterator, since ArrayRef only + // gives you const iterators. + public native @Const @ByPtr LongVector cbegin(); + public native @Const @ByPtr LongVector cend(); + + /** empty - Check if the array is empty. */ + public native @Cast("const bool") boolean empty(); + + public native @Cast("const std::vector*") LongVector data(); + + /** size - Get the array size. */ + public native @Cast("const size_t") long size(); + + /** front - Get the first element. */ + public native @Cast("const std::vector*") @ByRef LongVector front(); + + /** back - Get the last element. */ + public native @Cast("const std::vector*") @ByRef LongVector back(); + + /** equals - Check for element-wise equality. */ + public native @Cast("const bool") boolean equals(@ByVal LongVectorArrayRef RHS); + + /** slice(n, m) - Take M elements of the array starting at element N */ + public native @Const @ByVal LongVectorArrayRef slice(@Cast("size_t") long N, @Cast("size_t") long M); + + /** slice(n) - Chop off the first N elements of the array. */ + public native @Const @ByVal LongVectorArrayRef slice(@Cast("size_t") long N); + + /** \} + * \name Operator Overloads + * \{ */ + public native @Cast("const std::vector*") @ByRef @Name("operator []") LongVector get(@Cast("size_t") long Index); + + /** Vector compatibility */ + + /// + public native @Cast("const std::vector*") @ByRef LongVector at(@Cast("size_t") long Index); + + /** Disallow accidental assignment from a temporary. + * + * The declaration here is extra complicated so that "arrayRef = {}" + * continues to select the move assignment operator. */ + + + /** Disallow accidental assignment from a temporary. + * + * The declaration here is extra complicated so that "arrayRef = {}" + * continues to select the move assignment operator. */ + + + /** \} + * \name Expensive Operations + * \{ */ + public native @Cast("std::vector*") @StdVector LongVector vec(); + + /** \} */ +} diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/LongVectorOptional.java b/pytorch/src/gen/java/org/bytedeco/pytorch/LongVectorOptional.java index 830404482d3..d182b12b81f 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/LongVectorOptional.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/LongVectorOptional.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; @@ -26,6 +28,7 @@ public class LongVectorOptional extends Pointer { public native @Name("operator =") @ByRef LongVectorOptional put(@ByRef LongVectorOptional x); public native boolean has_value(); + public native void reset(); public native @Name("value") @Cast("std::vector*") @ByRef LongVector get(); @ValueSetter public native LongVectorOptional put(@Cast("std::vector*") @ByRef LongVector value); } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/loss_reduction_t.java b/pytorch/src/gen/java/org/bytedeco/pytorch/LossReduction.java similarity index 55% rename from pytorch/src/gen/java/org/bytedeco/pytorch/loss_reduction_t.java rename to pytorch/src/gen/java/org/bytedeco/pytorch/LossReduction.java index 4884571a51d..b5409ddd906 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/loss_reduction_t.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/LossReduction.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; @@ -16,25 +18,25 @@ import static org.bytedeco.pytorch.global.torch.*; @NoOffset @Name("c10::variant") @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) -public class loss_reduction_t extends Pointer { +public class LossReduction extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ - public loss_reduction_t(Pointer p) { super(p); } - public loss_reduction_t(kNone value) { this(); put(value); } - public loss_reduction_t(kMean value) { this(); put(value); } - public loss_reduction_t(kSum value) { this(); put(value); } - public loss_reduction_t() { allocate(); } + public LossReduction(Pointer p) { super(p); } + public LossReduction(kNone value) { this(); put(value); } + public LossReduction(kMean value) { this(); put(value); } + public LossReduction(kSum value) { this(); put(value); } + public LossReduction() { allocate(); } private native void allocate(); - public native @Name("operator =") @ByRef loss_reduction_t put(@ByRef loss_reduction_t x); + public native @Name("operator =") @ByRef LossReduction put(@ByRef LossReduction x); public @ByRef kNone get0() { return get0(this); } - @Namespace @Name("c10::get<0>") public static native @ByRef kNone get0(@ByRef loss_reduction_t container); - @ValueSetter public native loss_reduction_t put(@ByRef kNone value); + @Namespace @Name("c10::get<0>") public static native @ByRef kNone get0(@ByRef LossReduction container); + @ValueSetter public native LossReduction put(@ByRef kNone value); public @ByRef kMean get1() { return get1(this); } - @Namespace @Name("c10::get<1>") public static native @ByRef kMean get1(@ByRef loss_reduction_t container); - @ValueSetter public native loss_reduction_t put(@ByRef kMean value); + @Namespace @Name("c10::get<1>") public static native @ByRef kMean get1(@ByRef LossReduction container); + @ValueSetter public native LossReduction put(@ByRef kMean value); public @ByRef kSum get2() { return get2(this); } - @Namespace @Name("c10::get<2>") public static native @ByRef kSum get2(@ByRef loss_reduction_t container); - @ValueSetter public native loss_reduction_t put(@ByRef kSum value); + @Namespace @Name("c10::get<2>") public static native @ByRef kSum get2(@ByRef LossReduction container); + @ValueSetter public native LossReduction put(@ByRef kSum value); } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/MNIST.java b/pytorch/src/gen/java/org/bytedeco/pytorch/MNIST.java index dabfae2a994..65c76d47138 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/MNIST.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/MNIST.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/MNISTBatchDataset.java b/pytorch/src/gen/java/org/bytedeco/pytorch/MNISTBatchDataset.java index e8bf1835e04..fd972737358 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/MNISTBatchDataset.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/MNISTBatchDataset.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/MNISTDataset.java b/pytorch/src/gen/java/org/bytedeco/pytorch/MNISTDataset.java index 4a06e9dc969..044a0551e2c 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/MNISTDataset.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/MNISTDataset.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/MNISTMapBatchDataset.java b/pytorch/src/gen/java/org/bytedeco/pytorch/MNISTMapBatchDataset.java index b3e855b02b3..102b36a8934 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/MNISTMapBatchDataset.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/MNISTMapBatchDataset.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/MNISTMapDataset.java b/pytorch/src/gen/java/org/bytedeco/pytorch/MNISTMapDataset.java index 916177cf312..a58420e1589 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/MNISTMapDataset.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/MNISTMapDataset.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/MNISTRandomDataLoader.java b/pytorch/src/gen/java/org/bytedeco/pytorch/MNISTRandomDataLoader.java index 0867a08e7c7..2ca2c38e1a3 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/MNISTRandomDataLoader.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/MNISTRandomDataLoader.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/MNISTRandomDataLoaderBase.java b/pytorch/src/gen/java/org/bytedeco/pytorch/MNISTRandomDataLoaderBase.java index 823f6777dcb..d300ca79ebd 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/MNISTRandomDataLoaderBase.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/MNISTRandomDataLoaderBase.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; @@ -51,5 +53,4 @@ public class MNISTRandomDataLoaderBase extends Pointer { public native void join(); /** Returns the options with which the DataLoader was configured. */ - public native @Const @ByRef @NoException(true) FullDataLoaderOptions options(); } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/_Uninitialized.java b/pytorch/src/gen/java/org/bytedeco/pytorch/MPSHooksArgs.java similarity index 61% rename from pytorch/src/gen/java/org/bytedeco/pytorch/_Uninitialized.java rename to pytorch/src/gen/java/org/bytedeco/pytorch/MPSHooksArgs.java index 23c6ba65ebd..84d5123e32c 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/_Uninitialized.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/MPSHooksArgs.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; @@ -16,14 +18,10 @@ import static org.bytedeco.pytorch.global.torch.*; -/** - * Destructor for non-fundamental types. - */ - -@Namespace("caffe2::detail") @Opaque @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) -public class _Uninitialized extends Pointer { +@Namespace("at") @Opaque @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) +public class MPSHooksArgs extends Pointer { /** Empty constructor. Calls {@code super((Pointer)null)}. */ - public _Uninitialized() { super((Pointer)null); } + public MPSHooksArgs() { super((Pointer)null); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ - public _Uninitialized(Pointer p) { super(p); } + public MPSHooksArgs(Pointer p) { super(p); } } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/MPSHooksInterface.java b/pytorch/src/gen/java/org/bytedeco/pytorch/MPSHooksInterface.java new file mode 100644 index 00000000000..8be7bcf388e --- /dev/null +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/MPSHooksInterface.java @@ -0,0 +1,60 @@ +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE + +package org.bytedeco.pytorch; + +import org.bytedeco.pytorch.Allocator; +import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; +import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; +import java.nio.*; +import org.bytedeco.javacpp.*; +import org.bytedeco.javacpp.annotation.*; + +import static org.bytedeco.javacpp.presets.javacpp.*; +import static org.bytedeco.openblas.global.openblas_nolapack.*; +import static org.bytedeco.openblas.global.openblas.*; + +import static org.bytedeco.pytorch.global.torch.*; + + +@Namespace("at") @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) +public class MPSHooksInterface extends Pointer { + static { Loader.load(); } + /** Default native constructor. */ + public MPSHooksInterface() { super((Pointer)null); allocate(); } + /** Native array allocator. Access with {@link Pointer#position(long)}. */ + public MPSHooksInterface(long size) { super((Pointer)null); allocateArray(size); } + /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ + public MPSHooksInterface(Pointer p) { super(p); } + private native void allocate(); + private native void allocateArray(long size); + @Override public MPSHooksInterface position(long position) { + return (MPSHooksInterface)super.position(position); + } + @Override public MPSHooksInterface getPointer(long i) { + return new MPSHooksInterface((Pointer)this).offsetAddress(i); + } + + + // Initialize the MPS library state + public native void initMPS(); + + public native @Cast("bool") boolean hasMPS(); + + public native @Cast("bool") boolean isOnMacOS13orNewer(); + + public native @Const @ByRef Generator getDefaultMPSGenerator(); + + public native Allocator getMPSDeviceAllocator(); + + public native void deviceSynchronize(); + + public native void emptyCache(); + + public native @Cast("size_t") long getCurrentAllocatedMemory(); + + public native @Cast("size_t") long getDriverAllocatedMemory(); + + public native void setMemoryFraction(double arg0); +} diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/MSELoss.java b/pytorch/src/gen/java/org/bytedeco/pytorch/MSELoss.java deleted file mode 100644 index f323e0ae53a..00000000000 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/MSELoss.java +++ /dev/null @@ -1,34 +0,0 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE - -package org.bytedeco.pytorch; - -import org.bytedeco.pytorch.Allocator; -import org.bytedeco.pytorch.Function; -import org.bytedeco.pytorch.Module; -import java.nio.*; -import org.bytedeco.javacpp.*; -import org.bytedeco.javacpp.annotation.*; - -import static org.bytedeco.javacpp.presets.javacpp.*; -import static org.bytedeco.openblas.global.openblas_nolapack.*; -import static org.bytedeco.openblas.global.openblas.*; - -import static org.bytedeco.pytorch.global.torch.*; - - -/** A {@code ModuleHolder} subclass for {@code MSELossImpl}. - * See the documentation for {@code MSELossImpl} class to learn what methods it - * provides, and examples of how to use {@code MSELoss} with - * {@code torch::nn::MSELossOptions}. See the documentation for {@code ModuleHolder} to - * learn about PyTorch's module storage semantics. */ -@Namespace("torch::nn") @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) -public class MSELoss extends MSELossImplModuleHolder { - static { Loader.load(); } - - public MSELoss(@ByVal @Cast("std::nullptr_t*") PointerPointer arg0) { super((Pointer)null); allocate(arg0); } - private native void allocate(@ByVal @Cast("std::nullptr_t*") PointerPointer arg0); public MSELoss(@SharedPtr @Cast({"", "std::shared_ptr"}) MSELossImpl module) { super((Pointer)null); allocate(module); } - private native void allocate(@SharedPtr @Cast({"", "std::shared_ptr"}) MSELossImpl module); - /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ - public MSELoss(Pointer p) { super(p); } - - } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/MSELossImpl.java b/pytorch/src/gen/java/org/bytedeco/pytorch/MSELossImpl.java index ceac1bc7eb9..d5e5a345f5a 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/MSELossImpl.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/MSELossImpl.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; @@ -46,9 +48,9 @@ public class MSELossImpl extends MSELossImplCloneable { } public MSELossImpl(@ByVal(nullValue = "torch::nn::MSELossOptions{}") MSELossOptions options_) { super((Pointer)null); allocate(options_); } - @NoDeallocator private native void allocate(@ByVal(nullValue = "torch::nn::MSELossOptions{}") MSELossOptions options_); + @SharedPtr private native void allocate(@ByVal(nullValue = "torch::nn::MSELossOptions{}") MSELossOptions options_); public MSELossImpl() { super((Pointer)null); allocate(); } - @NoDeallocator private native void allocate(); + @SharedPtr private native void allocate(); public native void reset(); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/MSELossImplCloneable.java b/pytorch/src/gen/java/org/bytedeco/pytorch/MSELossImplCloneable.java index c68a23e4140..11f7b86113d 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/MSELossImplCloneable.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/MSELossImplCloneable.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; @@ -20,18 +22,18 @@ public class MSELossImplCloneable extends Module { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public MSELossImplCloneable(Pointer p) { super(p); } + @Override public Module asModule() { return asModule(this); } + @Namespace public static native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast>") Module asModule(@SharedPtr MSELossImplCloneable pointer); /** {@code reset()} must perform initialization of all members with reference * semantics, most importantly parameters, buffers and submodules. */ public native void reset(); - @Override public Module asModule() { return asModule(this); } - @Namespace public static native @Name("static_cast") Module asModule(MSELossImplCloneable module); /** Performs a recursive "deep copy" of the {@code Module}, such that all parameters * and submodules in the cloned module are different from those in the * original module. */ - public native @SharedPtr Module clone( - @Const @ByRef(nullValue = "c10::optional(c10::nullopt)") DeviceOptional device); - public native @SharedPtr Module clone(); + public native @SharedPtr("torch::nn::Module") @ByVal Module clone( + @Const @ByRef(nullValue = "c10::optional(c10::nullopt)") DeviceOptional device); + public native @SharedPtr("torch::nn::Module") @ByVal Module clone(); } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/MSELossImplModuleHolder.java b/pytorch/src/gen/java/org/bytedeco/pytorch/MSELossImplModuleHolder.java deleted file mode 100644 index c04ea1770fe..00000000000 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/MSELossImplModuleHolder.java +++ /dev/null @@ -1,79 +0,0 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE - -package org.bytedeco.pytorch; - -import org.bytedeco.pytorch.Allocator; -import org.bytedeco.pytorch.Function; -import org.bytedeco.pytorch.Module; -import java.nio.*; -import org.bytedeco.javacpp.*; -import org.bytedeco.javacpp.annotation.*; - -import static org.bytedeco.javacpp.presets.javacpp.*; -import static org.bytedeco.openblas.global.openblas_nolapack.*; -import static org.bytedeco.openblas.global.openblas.*; - -import static org.bytedeco.pytorch.global.torch.*; - -@Name("torch::nn::ModuleHolder") @NoOffset @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) -public class MSELossImplModuleHolder extends Pointer { - static { Loader.load(); } - /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ - public MSELossImplModuleHolder(Pointer p) { super(p); } - - - /// - - /** Default constructs the contained module if if has a default constructor, - * else produces a static error. - * - * NOTE: This uses the behavior of template - * classes in C++ that constructors (or any methods) are only compiled when - * actually used. */ - - - /** Constructs the {@code ModuleHolder} with an empty contained value. Access to - * the underlying module is not permitted and will throw an exception, until - * a value is assigned. */ - /* implicit */ public MSELossImplModuleHolder(@ByVal @Cast("std::nullptr_t*") PointerPointer arg0) { super((Pointer)null); allocate(arg0); } -private native void allocate(@ByVal @Cast("std::nullptr_t*") PointerPointer arg0); - - /** Constructs the {@code ModuleHolder} with a contained module, forwarding all - * arguments to its constructor. */ - - /** Constructs the {@code ModuleHolder} from a pointer to the contained type. - * Example: {@code Linear(std::make_shared(...))}. */ - /* implicit */ public MSELossImplModuleHolder(@SharedPtr @Cast({"", "std::shared_ptr"}) MSELossImpl module) { super((Pointer)null); allocate(module); } -private native void allocate(@SharedPtr @Cast({"", "std::shared_ptr"}) MSELossImpl module); - - /** Returns true if the {@code ModuleHolder} contains a module, or false if it is - * {@code nullptr}. */ - public native @Cast("bool") @Name("operator bool") @NoException(true) boolean asBoolean(); - - /** Forwards to the contained module. */ - public native @Name("operator ->") MSELossImpl access(); - - /** Forwards to the contained module. */ - - /** Returns a reference to the contained module. */ - public native @ByRef @Name("operator *") MSELossImpl multiply(); - - /** Returns a const reference to the contained module. */ - - /** Returns a shared pointer to the underlying module. */ - public native @SharedPtr @Cast({"", "std::shared_ptr"}) MSELossImpl ptr(); - - /** Returns a pointer to the underlying module. */ - public native MSELossImpl get(); - - /** Returns a const pointer to the underlying module. */ - - /** Calls the {@code forward()} method of the contained module. */ - - /** Forwards to the subscript operator of the contained module. - * NOTE: std::forward is qualified to prevent VS2017 emitting - * error C2872: 'std': ambiguous symbol */ - - /** Returns true if the {@code ModuleHolder} does not contain a module. */ - public native @Cast("bool") @NoException(true) boolean is_empty(); -} diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/MSELossOptions.java b/pytorch/src/gen/java/org/bytedeco/pytorch/MSELossOptions.java index 5018164ed8c..5fcf542a082 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/MSELossOptions.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/MSELossOptions.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; @@ -48,5 +50,5 @@ public class MSELossOptions extends Pointer { private native void allocate(@ByVal kMean reduction); public MSELossOptions(@ByVal kSum reduction) { super((Pointer)null); allocate(reduction); } private native void allocate(@ByVal kSum reduction); - public native @ByRef @NoException(true) loss_reduction_t reduction(); + public native @ByRef @NoException(true) LossReduction reduction(); } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/MagicMethod.java b/pytorch/src/gen/java/org/bytedeco/pytorch/MagicMethod.java index dc8e53c75f5..fb2632b059d 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/MagicMethod.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/MagicMethod.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; @@ -24,17 +26,12 @@ public class MagicMethod extends SugaredValue { /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public MagicMethod(Pointer p) { super(p); } - public MagicMethod(@StdString BytePointer desugared_name, @SharedPtr @ByVal SugaredValue base) { super((Pointer)null); allocate(desugared_name, base); } - private native void allocate(@StdString BytePointer desugared_name, @SharedPtr @ByVal SugaredValue base); - public MagicMethod(@StdString String desugared_name, @SharedPtr @ByVal SugaredValue base) { super((Pointer)null); allocate(desugared_name, base); } - private native void allocate(@StdString String desugared_name, @SharedPtr @ByVal SugaredValue base); + public MagicMethod(@StdString BytePointer desugared_name, @SharedPtr("torch::jit::SugaredValue") @ByVal SugaredValue base) { super((Pointer)null); allocate(desugared_name, base); } + private native void allocate(@StdString BytePointer desugared_name, @SharedPtr("torch::jit::SugaredValue") @ByVal SugaredValue base); + public MagicMethod(@StdString String desugared_name, @SharedPtr("torch::jit::SugaredValue") @ByVal SugaredValue base) { super((Pointer)null); allocate(desugared_name, base); } + private native void allocate(@StdString String desugared_name, @SharedPtr("torch::jit::SugaredValue") @ByVal SugaredValue base); public native @StdString BytePointer kind(); - public native @SharedPtr @ByVal SugaredValue call( - @Const @ByRef SourceRange loc, - @ByRef GraphFunction m, - @ByVal NamedValueArrayRef args, - @ByVal NamedValueArrayRef kwargs, - @Cast("size_t") long n_binders); + } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/MakeIndices.java b/pytorch/src/gen/java/org/bytedeco/pytorch/MakeIndices.java deleted file mode 100644 index 160a218c769..00000000000 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/MakeIndices.java +++ /dev/null @@ -1,41 +0,0 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE - -package org.bytedeco.pytorch; - -import org.bytedeco.pytorch.Allocator; -import org.bytedeco.pytorch.Function; -import org.bytedeco.pytorch.Module; -import java.nio.*; -import org.bytedeco.javacpp.*; -import org.bytedeco.javacpp.annotation.*; - -import static org.bytedeco.javacpp.presets.javacpp.*; -import static org.bytedeco.openblas.global.openblas_nolapack.*; -import static org.bytedeco.openblas.global.openblas.*; - -import static org.bytedeco.pytorch.global.torch.*; - - -// Partial specialization that forms our base case. When N is zero, we stop -// and define a typedef that will be visible to earlier classes due to -// inheritance. The typedef we define is an index list containing the numbers -// 0 through N-1. -@Name("torch::MakeIndices<0>") @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) -public class MakeIndices extends Pointer { - static { Loader.load(); } - /** Default native constructor. */ - public MakeIndices() { super((Pointer)null); allocate(); } - /** Native array allocator. Access with {@link Pointer#position(long)}. */ - public MakeIndices(long size) { super((Pointer)null); allocateArray(size); } - /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ - public MakeIndices(Pointer p) { super(p); } - private native void allocate(); - private native void allocateArray(long size); - @Override public MakeIndices position(long position) { - return (MakeIndices)super.position(position); - } - @Override public MakeIndices getPointer(long i) { - return new MakeIndices((Pointer)this).offsetAddress(i); - } - -} diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/MarginRankingLoss.java b/pytorch/src/gen/java/org/bytedeco/pytorch/MarginRankingLoss.java deleted file mode 100644 index b5e18bf7fba..00000000000 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/MarginRankingLoss.java +++ /dev/null @@ -1,34 +0,0 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE - -package org.bytedeco.pytorch; - -import org.bytedeco.pytorch.Allocator; -import org.bytedeco.pytorch.Function; -import org.bytedeco.pytorch.Module; -import java.nio.*; -import org.bytedeco.javacpp.*; -import org.bytedeco.javacpp.annotation.*; - -import static org.bytedeco.javacpp.presets.javacpp.*; -import static org.bytedeco.openblas.global.openblas_nolapack.*; -import static org.bytedeco.openblas.global.openblas.*; - -import static org.bytedeco.pytorch.global.torch.*; - - -/** A {@code ModuleHolder} subclass for {@code MarginRankingLossImpl}. - * See the documentation for {@code MarginRankingLossImpl} class to learn what - * methods it provides, and examples of how to use {@code MarginRankingLoss} with - * {@code torch::nn::MarginRankingLossOptions}. See the documentation for - * {@code ModuleHolder} to learn about PyTorch's module storage semantics. */ -@Namespace("torch::nn") @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) -public class MarginRankingLoss extends MarginRankingLossImplModuleHolder { - static { Loader.load(); } - - public MarginRankingLoss(@ByVal @Cast("std::nullptr_t*") PointerPointer arg0) { super((Pointer)null); allocate(arg0); } - private native void allocate(@ByVal @Cast("std::nullptr_t*") PointerPointer arg0); public MarginRankingLoss(@SharedPtr @Cast({"", "std::shared_ptr"}) MarginRankingLossImpl module) { super((Pointer)null); allocate(module); } - private native void allocate(@SharedPtr @Cast({"", "std::shared_ptr"}) MarginRankingLossImpl module); - /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ - public MarginRankingLoss(Pointer p) { super(p); } - - } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/MarginRankingLossImpl.java b/pytorch/src/gen/java/org/bytedeco/pytorch/MarginRankingLossImpl.java index 4f0df2635f9..d8f4d4fac7c 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/MarginRankingLossImpl.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/MarginRankingLossImpl.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; @@ -49,9 +51,9 @@ public class MarginRankingLossImpl extends MarginRankingLossImplCloneable { } public MarginRankingLossImpl(@ByVal(nullValue = "torch::nn::MarginRankingLossOptions{}") MarginRankingLossOptions options_) { super((Pointer)null); allocate(options_); } - @NoDeallocator private native void allocate(@ByVal(nullValue = "torch::nn::MarginRankingLossOptions{}") MarginRankingLossOptions options_); + @SharedPtr private native void allocate(@ByVal(nullValue = "torch::nn::MarginRankingLossOptions{}") MarginRankingLossOptions options_); public MarginRankingLossImpl() { super((Pointer)null); allocate(); } - @NoDeallocator private native void allocate(); + @SharedPtr private native void allocate(); public native void reset(); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/MarginRankingLossImplCloneable.java b/pytorch/src/gen/java/org/bytedeco/pytorch/MarginRankingLossImplCloneable.java index e3d03ae332d..4172ab881df 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/MarginRankingLossImplCloneable.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/MarginRankingLossImplCloneable.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; @@ -20,18 +22,18 @@ public class MarginRankingLossImplCloneable extends Module { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public MarginRankingLossImplCloneable(Pointer p) { super(p); } + @Override public Module asModule() { return asModule(this); } + @Namespace public static native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast>") Module asModule(@SharedPtr MarginRankingLossImplCloneable pointer); /** {@code reset()} must perform initialization of all members with reference * semantics, most importantly parameters, buffers and submodules. */ public native void reset(); - @Override public Module asModule() { return asModule(this); } - @Namespace public static native @Name("static_cast") Module asModule(MarginRankingLossImplCloneable module); /** Performs a recursive "deep copy" of the {@code Module}, such that all parameters * and submodules in the cloned module are different from those in the * original module. */ - public native @SharedPtr Module clone( - @Const @ByRef(nullValue = "c10::optional(c10::nullopt)") DeviceOptional device); - public native @SharedPtr Module clone(); + public native @SharedPtr("torch::nn::Module") @ByVal Module clone( + @Const @ByRef(nullValue = "c10::optional(c10::nullopt)") DeviceOptional device); + public native @SharedPtr("torch::nn::Module") @ByVal Module clone(); } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/MarginRankingLossImplModuleHolder.java b/pytorch/src/gen/java/org/bytedeco/pytorch/MarginRankingLossImplModuleHolder.java deleted file mode 100644 index 4a723c85fbe..00000000000 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/MarginRankingLossImplModuleHolder.java +++ /dev/null @@ -1,79 +0,0 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE - -package org.bytedeco.pytorch; - -import org.bytedeco.pytorch.Allocator; -import org.bytedeco.pytorch.Function; -import org.bytedeco.pytorch.Module; -import java.nio.*; -import org.bytedeco.javacpp.*; -import org.bytedeco.javacpp.annotation.*; - -import static org.bytedeco.javacpp.presets.javacpp.*; -import static org.bytedeco.openblas.global.openblas_nolapack.*; -import static org.bytedeco.openblas.global.openblas.*; - -import static org.bytedeco.pytorch.global.torch.*; - -@Name("torch::nn::ModuleHolder") @NoOffset @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) -public class MarginRankingLossImplModuleHolder extends Pointer { - static { Loader.load(); } - /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ - public MarginRankingLossImplModuleHolder(Pointer p) { super(p); } - - - /// - - /** Default constructs the contained module if if has a default constructor, - * else produces a static error. - * - * NOTE: This uses the behavior of template - * classes in C++ that constructors (or any methods) are only compiled when - * actually used. */ - - - /** Constructs the {@code ModuleHolder} with an empty contained value. Access to - * the underlying module is not permitted and will throw an exception, until - * a value is assigned. */ - /* implicit */ public MarginRankingLossImplModuleHolder(@ByVal @Cast("std::nullptr_t*") PointerPointer arg0) { super((Pointer)null); allocate(arg0); } -private native void allocate(@ByVal @Cast("std::nullptr_t*") PointerPointer arg0); - - /** Constructs the {@code ModuleHolder} with a contained module, forwarding all - * arguments to its constructor. */ - - /** Constructs the {@code ModuleHolder} from a pointer to the contained type. - * Example: {@code Linear(std::make_shared(...))}. */ - /* implicit */ public MarginRankingLossImplModuleHolder(@SharedPtr @Cast({"", "std::shared_ptr"}) MarginRankingLossImpl module) { super((Pointer)null); allocate(module); } -private native void allocate(@SharedPtr @Cast({"", "std::shared_ptr"}) MarginRankingLossImpl module); - - /** Returns true if the {@code ModuleHolder} contains a module, or false if it is - * {@code nullptr}. */ - public native @Cast("bool") @Name("operator bool") @NoException(true) boolean asBoolean(); - - /** Forwards to the contained module. */ - public native @Name("operator ->") MarginRankingLossImpl access(); - - /** Forwards to the contained module. */ - - /** Returns a reference to the contained module. */ - public native @ByRef @Name("operator *") MarginRankingLossImpl multiply(); - - /** Returns a const reference to the contained module. */ - - /** Returns a shared pointer to the underlying module. */ - public native @SharedPtr @Cast({"", "std::shared_ptr"}) MarginRankingLossImpl ptr(); - - /** Returns a pointer to the underlying module. */ - public native MarginRankingLossImpl get(); - - /** Returns a const pointer to the underlying module. */ - - /** Calls the {@code forward()} method of the contained module. */ - - /** Forwards to the subscript operator of the contained module. - * NOTE: std::forward is qualified to prevent VS2017 emitting - * error C2872: 'std': ambiguous symbol */ - - /** Returns true if the {@code ModuleHolder} does not contain a module. */ - public native @Cast("bool") @NoException(true) boolean is_empty(); -} diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/MarginRankingLossOptions.java b/pytorch/src/gen/java/org/bytedeco/pytorch/MarginRankingLossOptions.java index 5763ff686a8..111eed6b4c4 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/MarginRankingLossOptions.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/MarginRankingLossOptions.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; @@ -44,5 +46,5 @@ public class MarginRankingLossOptions extends Pointer { } public native @ByRef @NoException(true) DoublePointer margin(); - public native @ByRef @NoException(true) loss_reduction_t reduction(); + public native @ByRef @NoException(true) LossReduction reduction(); } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/MatchTypeReturn.java b/pytorch/src/gen/java/org/bytedeco/pytorch/MatchTypeReturn.java index c9999c2d2e1..179596354f5 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/MatchTypeReturn.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/MatchTypeReturn.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/MatchedSchema.java b/pytorch/src/gen/java/org/bytedeco/pytorch/MatchedSchema.java index eac5704d455..d02b1c19cf2 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/MatchedSchema.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/MatchedSchema.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; @@ -14,11 +16,33 @@ import static org.bytedeco.openblas.global.openblas.*; import static org.bytedeco.pytorch.global.torch.*; - // namespace cuda -@Namespace("torch::jit") @Opaque @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) + + +// Try to match a list of inputs and keyword 'attributes' to this +// schema. Return the flat list of positional inputs to the call or +// `c10::nullopt` on failure (`failure_messages` contains a good error +// report in this case) + +@Namespace("torch::jit") @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) public class MatchedSchema extends Pointer { - /** Empty constructor. Calls {@code super((Pointer)null)}. */ - public MatchedSchema() { super((Pointer)null); } + static { Loader.load(); } + /** Default native constructor. */ + public MatchedSchema() { super((Pointer)null); allocate(); } + /** Native array allocator. Access with {@link Pointer#position(long)}. */ + public MatchedSchema(long size) { super((Pointer)null); allocateArray(size); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public MatchedSchema(Pointer p) { super(p); } + private native void allocate(); + private native void allocateArray(long size); + @Override public MatchedSchema position(long position) { + return (MatchedSchema)super.position(position); + } + @Override public MatchedSchema getPointer(long i) { + return new MatchedSchema((Pointer)this).offsetAddress(i); + } + + public native @ByRef ValueVector inputs(); public native MatchedSchema inputs(ValueVector setter); + public native @ByRef TypeVector return_types(); public native MatchedSchema return_types(TypeVector setter); + public native @ByRef @Cast("c10::OptNameList*") StringVectorOptional return_field_names(); public native MatchedSchema return_field_names(StringVectorOptional setter); + public native @StdString BytePointer schema_name(); public native MatchedSchema schema_name(BytePointer setter); } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/MaxPool1d.java b/pytorch/src/gen/java/org/bytedeco/pytorch/MaxPool1d.java deleted file mode 100644 index 6da7600a499..00000000000 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/MaxPool1d.java +++ /dev/null @@ -1,34 +0,0 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE - -package org.bytedeco.pytorch; - -import org.bytedeco.pytorch.Allocator; -import org.bytedeco.pytorch.Function; -import org.bytedeco.pytorch.Module; -import java.nio.*; -import org.bytedeco.javacpp.*; -import org.bytedeco.javacpp.annotation.*; - -import static org.bytedeco.javacpp.presets.javacpp.*; -import static org.bytedeco.openblas.global.openblas_nolapack.*; -import static org.bytedeco.openblas.global.openblas.*; - -import static org.bytedeco.pytorch.global.torch.*; - - -/** A {@code ModuleHolder} subclass for {@code MaxPool1dImpl}. - * See the documentation for {@code MaxPool1dImpl} class to learn what methods it - * provides, and examples of how to use {@code MaxPool1d} with - * {@code torch::nn::MaxPool1dOptions}. See the documentation for {@code ModuleHolder} to - * learn about PyTorch's module storage semantics. */ -@Namespace("torch::nn") @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) -public class MaxPool1d extends MaxPool1dImplModuleHolder { - static { Loader.load(); } - - public MaxPool1d(@ByVal @Cast("std::nullptr_t*") PointerPointer arg0) { super((Pointer)null); allocate(arg0); } - private native void allocate(@ByVal @Cast("std::nullptr_t*") PointerPointer arg0); public MaxPool1d(@SharedPtr @Cast({"", "std::shared_ptr"}) MaxPool1dImpl module) { super((Pointer)null); allocate(module); } - private native void allocate(@SharedPtr @Cast({"", "std::shared_ptr"}) MaxPool1dImpl module); - /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ - public MaxPool1d(Pointer p) { super(p); } - - } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/MaxPool1dImpl.java b/pytorch/src/gen/java/org/bytedeco/pytorch/MaxPool1dImpl.java index 5702264a653..396420523b1 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/MaxPool1dImpl.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/MaxPool1dImpl.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; @@ -35,9 +37,9 @@ public class MaxPool1dImpl extends MaxPool1dImplBase { public MaxPool1dImpl(@ByVal @Cast("torch::ExpandingArray<1>*") LongPointer kernel_size) { super((Pointer)null); allocate(kernel_size); } - @NoDeallocator private native void allocate(@ByVal @Cast("torch::ExpandingArray<1>*") LongPointer kernel_size); + private native void allocate(@ByVal @Cast("torch::ExpandingArray<1>*") LongPointer kernel_size); public MaxPool1dImpl(@Const @ByRef MaxPool1dOptions options_) { super((Pointer)null); allocate(options_); } - @NoDeallocator private native void allocate(@Const @ByRef MaxPool1dOptions options_); + private native void allocate(@Const @ByRef MaxPool1dOptions options_); /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public MaxPool1dImpl(Pointer p) { super(p); } @@ -45,5 +47,5 @@ public class MaxPool1dImpl extends MaxPool1dImplBase { /** Returns the outputs and the indices of the max values. * Useful for {@code torch::nn::MaxUnpool1d} later. */ - public native @ByVal TensorTensorTuple forward_with_indices(@Const @ByRef Tensor input); + public native @ByVal T_TensorTensor_T forward_with_indices(@Const @ByRef Tensor input); } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/MaxPool1dImplBase.java b/pytorch/src/gen/java/org/bytedeco/pytorch/MaxPool1dImplBase.java index b5fb4e5b0da..aa0a1a47e80 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/MaxPool1dImplBase.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/MaxPool1dImplBase.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; @@ -26,9 +28,9 @@ public class MaxPool1dImplBase extends MaxPool1dImplCloneable { public MaxPool1dImplBase(Pointer p) { super(p); } public MaxPool1dImplBase(@ByVal @Cast("torch::ExpandingArray<1>*") LongPointer kernel_size) { super((Pointer)null); allocate(kernel_size); } - @NoDeallocator private native void allocate(@ByVal @Cast("torch::ExpandingArray<1>*") LongPointer kernel_size); + private native void allocate(@ByVal @Cast("torch::ExpandingArray<1>*") LongPointer kernel_size); public MaxPool1dImplBase(@Const @ByRef MaxPool1dOptions options_) { super((Pointer)null); allocate(options_); } - @NoDeallocator private native void allocate(@Const @ByRef MaxPool1dOptions options_); + private native void allocate(@Const @ByRef MaxPool1dOptions options_); public native void reset(); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/MaxPool1dImplCloneable.java b/pytorch/src/gen/java/org/bytedeco/pytorch/MaxPool1dImplCloneable.java index 11c0033ea23..995afcd6dd3 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/MaxPool1dImplCloneable.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/MaxPool1dImplCloneable.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; @@ -20,18 +22,18 @@ public class MaxPool1dImplCloneable extends Module { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public MaxPool1dImplCloneable(Pointer p) { super(p); } + @Override public Module asModule() { return asModule(this); } + @Namespace public static native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast>") Module asModule(@SharedPtr MaxPool1dImplCloneable pointer); /** {@code reset()} must perform initialization of all members with reference * semantics, most importantly parameters, buffers and submodules. */ public native void reset(); - @Override public Module asModule() { return asModule(this); } - @Namespace public static native @Name("static_cast") Module asModule(MaxPool1dImplCloneable module); /** Performs a recursive "deep copy" of the {@code Module}, such that all parameters * and submodules in the cloned module are different from those in the * original module. */ - public native @SharedPtr Module clone( - @Const @ByRef(nullValue = "c10::optional(c10::nullopt)") DeviceOptional device); - public native @SharedPtr Module clone(); + public native @SharedPtr("torch::nn::Module") @ByVal Module clone( + @Const @ByRef(nullValue = "c10::optional(c10::nullopt)") DeviceOptional device); + public native @SharedPtr("torch::nn::Module") @ByVal Module clone(); } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/MaxPool1dImplModuleHolder.java b/pytorch/src/gen/java/org/bytedeco/pytorch/MaxPool1dImplModuleHolder.java deleted file mode 100644 index f2ce6e40240..00000000000 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/MaxPool1dImplModuleHolder.java +++ /dev/null @@ -1,79 +0,0 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE - -package org.bytedeco.pytorch; - -import org.bytedeco.pytorch.Allocator; -import org.bytedeco.pytorch.Function; -import org.bytedeco.pytorch.Module; -import java.nio.*; -import org.bytedeco.javacpp.*; -import org.bytedeco.javacpp.annotation.*; - -import static org.bytedeco.javacpp.presets.javacpp.*; -import static org.bytedeco.openblas.global.openblas_nolapack.*; -import static org.bytedeco.openblas.global.openblas.*; - -import static org.bytedeco.pytorch.global.torch.*; - -@Name("torch::nn::ModuleHolder") @NoOffset @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) -public class MaxPool1dImplModuleHolder extends Pointer { - static { Loader.load(); } - /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ - public MaxPool1dImplModuleHolder(Pointer p) { super(p); } - - - /// - - /** Default constructs the contained module if if has a default constructor, - * else produces a static error. - * - * NOTE: This uses the behavior of template - * classes in C++ that constructors (or any methods) are only compiled when - * actually used. */ - - - /** Constructs the {@code ModuleHolder} with an empty contained value. Access to - * the underlying module is not permitted and will throw an exception, until - * a value is assigned. */ - /* implicit */ public MaxPool1dImplModuleHolder(@ByVal @Cast("std::nullptr_t*") PointerPointer arg0) { super((Pointer)null); allocate(arg0); } -private native void allocate(@ByVal @Cast("std::nullptr_t*") PointerPointer arg0); - - /** Constructs the {@code ModuleHolder} with a contained module, forwarding all - * arguments to its constructor. */ - - /** Constructs the {@code ModuleHolder} from a pointer to the contained type. - * Example: {@code Linear(std::make_shared(...))}. */ - /* implicit */ public MaxPool1dImplModuleHolder(@SharedPtr @Cast({"", "std::shared_ptr"}) MaxPool1dImpl module) { super((Pointer)null); allocate(module); } -private native void allocate(@SharedPtr @Cast({"", "std::shared_ptr"}) MaxPool1dImpl module); - - /** Returns true if the {@code ModuleHolder} contains a module, or false if it is - * {@code nullptr}. */ - public native @Cast("bool") @Name("operator bool") @NoException(true) boolean asBoolean(); - - /** Forwards to the contained module. */ - public native @Name("operator ->") MaxPool1dImpl access(); - - /** Forwards to the contained module. */ - - /** Returns a reference to the contained module. */ - public native @ByRef @Name("operator *") MaxPool1dImpl multiply(); - - /** Returns a const reference to the contained module. */ - - /** Returns a shared pointer to the underlying module. */ - public native @SharedPtr @Cast({"", "std::shared_ptr"}) MaxPool1dImpl ptr(); - - /** Returns a pointer to the underlying module. */ - public native MaxPool1dImpl get(); - - /** Returns a const pointer to the underlying module. */ - - /** Calls the {@code forward()} method of the contained module. */ - - /** Forwards to the subscript operator of the contained module. - * NOTE: std::forward is qualified to prevent VS2017 emitting - * error C2872: 'std': ambiguous symbol */ - - /** Returns true if the {@code ModuleHolder} does not contain a module. */ - public native @Cast("bool") @NoException(true) boolean is_empty(); -} diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/MaxPool1dOptions.java b/pytorch/src/gen/java/org/bytedeco/pytorch/MaxPool1dOptions.java index e2e01cbd263..3c60ecb524f 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/MaxPool1dOptions.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/MaxPool1dOptions.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/MaxPool2d.java b/pytorch/src/gen/java/org/bytedeco/pytorch/MaxPool2d.java deleted file mode 100644 index b6ca57bcb79..00000000000 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/MaxPool2d.java +++ /dev/null @@ -1,34 +0,0 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE - -package org.bytedeco.pytorch; - -import org.bytedeco.pytorch.Allocator; -import org.bytedeco.pytorch.Function; -import org.bytedeco.pytorch.Module; -import java.nio.*; -import org.bytedeco.javacpp.*; -import org.bytedeco.javacpp.annotation.*; - -import static org.bytedeco.javacpp.presets.javacpp.*; -import static org.bytedeco.openblas.global.openblas_nolapack.*; -import static org.bytedeco.openblas.global.openblas.*; - -import static org.bytedeco.pytorch.global.torch.*; - - -/** A {@code ModuleHolder} subclass for {@code MaxPool2dImpl}. - * See the documentation for {@code MaxPool2dImpl} class to learn what methods it - * provides, and examples of how to use {@code MaxPool2d} with - * {@code torch::nn::MaxPool2dOptions}. See the documentation for {@code ModuleHolder} to - * learn about PyTorch's module storage semantics. */ -@Namespace("torch::nn") @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) -public class MaxPool2d extends MaxPool2dImplModuleHolder { - static { Loader.load(); } - - public MaxPool2d(@ByVal @Cast("std::nullptr_t*") PointerPointer arg0) { super((Pointer)null); allocate(arg0); } - private native void allocate(@ByVal @Cast("std::nullptr_t*") PointerPointer arg0); public MaxPool2d(@SharedPtr @Cast({"", "std::shared_ptr"}) MaxPool2dImpl module) { super((Pointer)null); allocate(module); } - private native void allocate(@SharedPtr @Cast({"", "std::shared_ptr"}) MaxPool2dImpl module); - /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ - public MaxPool2d(Pointer p) { super(p); } - - } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/MaxPool2dImpl.java b/pytorch/src/gen/java/org/bytedeco/pytorch/MaxPool2dImpl.java index 6083316cb8f..5cc4fbb4d54 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/MaxPool2dImpl.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/MaxPool2dImpl.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; @@ -35,9 +37,9 @@ public class MaxPool2dImpl extends MaxPool2dImplBase { public MaxPool2dImpl(@ByVal @Cast("torch::ExpandingArray<2>*") LongPointer kernel_size) { super((Pointer)null); allocate(kernel_size); } - @NoDeallocator private native void allocate(@ByVal @Cast("torch::ExpandingArray<2>*") LongPointer kernel_size); + private native void allocate(@ByVal @Cast("torch::ExpandingArray<2>*") LongPointer kernel_size); public MaxPool2dImpl(@Const @ByRef MaxPool2dOptions options_) { super((Pointer)null); allocate(options_); } - @NoDeallocator private native void allocate(@Const @ByRef MaxPool2dOptions options_); + private native void allocate(@Const @ByRef MaxPool2dOptions options_); /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public MaxPool2dImpl(Pointer p) { super(p); } @@ -45,5 +47,5 @@ public class MaxPool2dImpl extends MaxPool2dImplBase { /** Returns the outputs and the indices of the max values. * Useful for {@code torch::nn::MaxUnpool2d} later. */ - public native @ByVal TensorTensorTuple forward_with_indices(@Const @ByRef Tensor input); + public native @ByVal T_TensorTensor_T forward_with_indices(@Const @ByRef Tensor input); } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/MaxPool2dImplBase.java b/pytorch/src/gen/java/org/bytedeco/pytorch/MaxPool2dImplBase.java index d153ea7be2b..d1d7de44569 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/MaxPool2dImplBase.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/MaxPool2dImplBase.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; @@ -22,9 +24,9 @@ public class MaxPool2dImplBase extends MaxPool2dImplCloneable { public MaxPool2dImplBase(Pointer p) { super(p); } public MaxPool2dImplBase(@ByVal @Cast("torch::ExpandingArray<2>*") LongPointer kernel_size) { super((Pointer)null); allocate(kernel_size); } - @NoDeallocator private native void allocate(@ByVal @Cast("torch::ExpandingArray<2>*") LongPointer kernel_size); + private native void allocate(@ByVal @Cast("torch::ExpandingArray<2>*") LongPointer kernel_size); public MaxPool2dImplBase(@Const @ByRef MaxPool2dOptions options_) { super((Pointer)null); allocate(options_); } - @NoDeallocator private native void allocate(@Const @ByRef MaxPool2dOptions options_); + private native void allocate(@Const @ByRef MaxPool2dOptions options_); public native void reset(); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/MaxPool2dImplCloneable.java b/pytorch/src/gen/java/org/bytedeco/pytorch/MaxPool2dImplCloneable.java index a9df0acdf6e..7af0d92b666 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/MaxPool2dImplCloneable.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/MaxPool2dImplCloneable.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; @@ -20,18 +22,18 @@ public class MaxPool2dImplCloneable extends Module { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public MaxPool2dImplCloneable(Pointer p) { super(p); } + @Override public Module asModule() { return asModule(this); } + @Namespace public static native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast>") Module asModule(@SharedPtr MaxPool2dImplCloneable pointer); /** {@code reset()} must perform initialization of all members with reference * semantics, most importantly parameters, buffers and submodules. */ public native void reset(); - @Override public Module asModule() { return asModule(this); } - @Namespace public static native @Name("static_cast") Module asModule(MaxPool2dImplCloneable module); /** Performs a recursive "deep copy" of the {@code Module}, such that all parameters * and submodules in the cloned module are different from those in the * original module. */ - public native @SharedPtr Module clone( - @Const @ByRef(nullValue = "c10::optional(c10::nullopt)") DeviceOptional device); - public native @SharedPtr Module clone(); + public native @SharedPtr("torch::nn::Module") @ByVal Module clone( + @Const @ByRef(nullValue = "c10::optional(c10::nullopt)") DeviceOptional device); + public native @SharedPtr("torch::nn::Module") @ByVal Module clone(); } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/MaxPool2dImplModuleHolder.java b/pytorch/src/gen/java/org/bytedeco/pytorch/MaxPool2dImplModuleHolder.java deleted file mode 100644 index 4c1df90bdef..00000000000 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/MaxPool2dImplModuleHolder.java +++ /dev/null @@ -1,79 +0,0 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE - -package org.bytedeco.pytorch; - -import org.bytedeco.pytorch.Allocator; -import org.bytedeco.pytorch.Function; -import org.bytedeco.pytorch.Module; -import java.nio.*; -import org.bytedeco.javacpp.*; -import org.bytedeco.javacpp.annotation.*; - -import static org.bytedeco.javacpp.presets.javacpp.*; -import static org.bytedeco.openblas.global.openblas_nolapack.*; -import static org.bytedeco.openblas.global.openblas.*; - -import static org.bytedeco.pytorch.global.torch.*; - -@Name("torch::nn::ModuleHolder") @NoOffset @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) -public class MaxPool2dImplModuleHolder extends Pointer { - static { Loader.load(); } - /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ - public MaxPool2dImplModuleHolder(Pointer p) { super(p); } - - - /// - - /** Default constructs the contained module if if has a default constructor, - * else produces a static error. - * - * NOTE: This uses the behavior of template - * classes in C++ that constructors (or any methods) are only compiled when - * actually used. */ - - - /** Constructs the {@code ModuleHolder} with an empty contained value. Access to - * the underlying module is not permitted and will throw an exception, until - * a value is assigned. */ - /* implicit */ public MaxPool2dImplModuleHolder(@ByVal @Cast("std::nullptr_t*") PointerPointer arg0) { super((Pointer)null); allocate(arg0); } -private native void allocate(@ByVal @Cast("std::nullptr_t*") PointerPointer arg0); - - /** Constructs the {@code ModuleHolder} with a contained module, forwarding all - * arguments to its constructor. */ - - /** Constructs the {@code ModuleHolder} from a pointer to the contained type. - * Example: {@code Linear(std::make_shared(...))}. */ - /* implicit */ public MaxPool2dImplModuleHolder(@SharedPtr @Cast({"", "std::shared_ptr"}) MaxPool2dImpl module) { super((Pointer)null); allocate(module); } -private native void allocate(@SharedPtr @Cast({"", "std::shared_ptr"}) MaxPool2dImpl module); - - /** Returns true if the {@code ModuleHolder} contains a module, or false if it is - * {@code nullptr}. */ - public native @Cast("bool") @Name("operator bool") @NoException(true) boolean asBoolean(); - - /** Forwards to the contained module. */ - public native @Name("operator ->") MaxPool2dImpl access(); - - /** Forwards to the contained module. */ - - /** Returns a reference to the contained module. */ - public native @ByRef @Name("operator *") MaxPool2dImpl multiply(); - - /** Returns a const reference to the contained module. */ - - /** Returns a shared pointer to the underlying module. */ - public native @SharedPtr @Cast({"", "std::shared_ptr"}) MaxPool2dImpl ptr(); - - /** Returns a pointer to the underlying module. */ - public native MaxPool2dImpl get(); - - /** Returns a const pointer to the underlying module. */ - - /** Calls the {@code forward()} method of the contained module. */ - - /** Forwards to the subscript operator of the contained module. - * NOTE: std::forward is qualified to prevent VS2017 emitting - * error C2872: 'std': ambiguous symbol */ - - /** Returns true if the {@code ModuleHolder} does not contain a module. */ - public native @Cast("bool") @NoException(true) boolean is_empty(); -} diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/MaxPool2dOptions.java b/pytorch/src/gen/java/org/bytedeco/pytorch/MaxPool2dOptions.java index 45667d1eacb..27dc6bd23d9 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/MaxPool2dOptions.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/MaxPool2dOptions.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/MaxPool3d.java b/pytorch/src/gen/java/org/bytedeco/pytorch/MaxPool3d.java deleted file mode 100644 index a04eb2f2130..00000000000 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/MaxPool3d.java +++ /dev/null @@ -1,34 +0,0 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE - -package org.bytedeco.pytorch; - -import org.bytedeco.pytorch.Allocator; -import org.bytedeco.pytorch.Function; -import org.bytedeco.pytorch.Module; -import java.nio.*; -import org.bytedeco.javacpp.*; -import org.bytedeco.javacpp.annotation.*; - -import static org.bytedeco.javacpp.presets.javacpp.*; -import static org.bytedeco.openblas.global.openblas_nolapack.*; -import static org.bytedeco.openblas.global.openblas.*; - -import static org.bytedeco.pytorch.global.torch.*; - - -/** A {@code ModuleHolder} subclass for {@code MaxPool3dImpl}. - * See the documentation for {@code MaxPool3dImpl} class to learn what methods it - * provides, and examples of how to use {@code MaxPool3d} with - * {@code torch::nn::MaxPool3dOptions}. See the documentation for {@code ModuleHolder} to - * learn about PyTorch's module storage semantics. */ -@Namespace("torch::nn") @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) -public class MaxPool3d extends MaxPool3dImplModuleHolder { - static { Loader.load(); } - - public MaxPool3d(@ByVal @Cast("std::nullptr_t*") PointerPointer arg0) { super((Pointer)null); allocate(arg0); } - private native void allocate(@ByVal @Cast("std::nullptr_t*") PointerPointer arg0); public MaxPool3d(@SharedPtr @Cast({"", "std::shared_ptr"}) MaxPool3dImpl module) { super((Pointer)null); allocate(module); } - private native void allocate(@SharedPtr @Cast({"", "std::shared_ptr"}) MaxPool3dImpl module); - /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ - public MaxPool3d(Pointer p) { super(p); } - - } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/MaxPool3dImpl.java b/pytorch/src/gen/java/org/bytedeco/pytorch/MaxPool3dImpl.java index c51563edb48..b564461af18 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/MaxPool3dImpl.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/MaxPool3dImpl.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; @@ -35,9 +37,9 @@ public class MaxPool3dImpl extends MaxPool3dImplBase { public MaxPool3dImpl(@ByVal @Cast("torch::ExpandingArray<3>*") LongPointer kernel_size) { super((Pointer)null); allocate(kernel_size); } - @NoDeallocator private native void allocate(@ByVal @Cast("torch::ExpandingArray<3>*") LongPointer kernel_size); + private native void allocate(@ByVal @Cast("torch::ExpandingArray<3>*") LongPointer kernel_size); public MaxPool3dImpl(@Const @ByRef MaxPool3dOptions options_) { super((Pointer)null); allocate(options_); } - @NoDeallocator private native void allocate(@Const @ByRef MaxPool3dOptions options_); + private native void allocate(@Const @ByRef MaxPool3dOptions options_); /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public MaxPool3dImpl(Pointer p) { super(p); } @@ -45,5 +47,5 @@ public class MaxPool3dImpl extends MaxPool3dImplBase { /** Returns the outputs and the indices of the max values. * Useful for {@code torch::nn::MaxUnpool3d} later. */ - public native @ByVal TensorTensorTuple forward_with_indices(@Const @ByRef Tensor input); + public native @ByVal T_TensorTensor_T forward_with_indices(@Const @ByRef Tensor input); } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/MaxPool3dImplBase.java b/pytorch/src/gen/java/org/bytedeco/pytorch/MaxPool3dImplBase.java index 8f1ddb7ede9..55b12f2afc3 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/MaxPool3dImplBase.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/MaxPool3dImplBase.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; @@ -22,9 +24,9 @@ public class MaxPool3dImplBase extends MaxPool3dImplCloneable { public MaxPool3dImplBase(Pointer p) { super(p); } public MaxPool3dImplBase(@ByVal @Cast("torch::ExpandingArray<3>*") LongPointer kernel_size) { super((Pointer)null); allocate(kernel_size); } - @NoDeallocator private native void allocate(@ByVal @Cast("torch::ExpandingArray<3>*") LongPointer kernel_size); + private native void allocate(@ByVal @Cast("torch::ExpandingArray<3>*") LongPointer kernel_size); public MaxPool3dImplBase(@Const @ByRef MaxPool3dOptions options_) { super((Pointer)null); allocate(options_); } - @NoDeallocator private native void allocate(@Const @ByRef MaxPool3dOptions options_); + private native void allocate(@Const @ByRef MaxPool3dOptions options_); public native void reset(); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/MaxPool3dImplCloneable.java b/pytorch/src/gen/java/org/bytedeco/pytorch/MaxPool3dImplCloneable.java index f69123412bc..1334c4ff6bc 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/MaxPool3dImplCloneable.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/MaxPool3dImplCloneable.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; @@ -20,18 +22,18 @@ public class MaxPool3dImplCloneable extends Module { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public MaxPool3dImplCloneable(Pointer p) { super(p); } + @Override public Module asModule() { return asModule(this); } + @Namespace public static native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast>") Module asModule(@SharedPtr MaxPool3dImplCloneable pointer); /** {@code reset()} must perform initialization of all members with reference * semantics, most importantly parameters, buffers and submodules. */ public native void reset(); - @Override public Module asModule() { return asModule(this); } - @Namespace public static native @Name("static_cast") Module asModule(MaxPool3dImplCloneable module); /** Performs a recursive "deep copy" of the {@code Module}, such that all parameters * and submodules in the cloned module are different from those in the * original module. */ - public native @SharedPtr Module clone( - @Const @ByRef(nullValue = "c10::optional(c10::nullopt)") DeviceOptional device); - public native @SharedPtr Module clone(); + public native @SharedPtr("torch::nn::Module") @ByVal Module clone( + @Const @ByRef(nullValue = "c10::optional(c10::nullopt)") DeviceOptional device); + public native @SharedPtr("torch::nn::Module") @ByVal Module clone(); } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/MaxPool3dImplModuleHolder.java b/pytorch/src/gen/java/org/bytedeco/pytorch/MaxPool3dImplModuleHolder.java deleted file mode 100644 index 8f12a92b55f..00000000000 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/MaxPool3dImplModuleHolder.java +++ /dev/null @@ -1,79 +0,0 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE - -package org.bytedeco.pytorch; - -import org.bytedeco.pytorch.Allocator; -import org.bytedeco.pytorch.Function; -import org.bytedeco.pytorch.Module; -import java.nio.*; -import org.bytedeco.javacpp.*; -import org.bytedeco.javacpp.annotation.*; - -import static org.bytedeco.javacpp.presets.javacpp.*; -import static org.bytedeco.openblas.global.openblas_nolapack.*; -import static org.bytedeco.openblas.global.openblas.*; - -import static org.bytedeco.pytorch.global.torch.*; - -@Name("torch::nn::ModuleHolder") @NoOffset @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) -public class MaxPool3dImplModuleHolder extends Pointer { - static { Loader.load(); } - /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ - public MaxPool3dImplModuleHolder(Pointer p) { super(p); } - - - /// - - /** Default constructs the contained module if if has a default constructor, - * else produces a static error. - * - * NOTE: This uses the behavior of template - * classes in C++ that constructors (or any methods) are only compiled when - * actually used. */ - - - /** Constructs the {@code ModuleHolder} with an empty contained value. Access to - * the underlying module is not permitted and will throw an exception, until - * a value is assigned. */ - /* implicit */ public MaxPool3dImplModuleHolder(@ByVal @Cast("std::nullptr_t*") PointerPointer arg0) { super((Pointer)null); allocate(arg0); } -private native void allocate(@ByVal @Cast("std::nullptr_t*") PointerPointer arg0); - - /** Constructs the {@code ModuleHolder} with a contained module, forwarding all - * arguments to its constructor. */ - - /** Constructs the {@code ModuleHolder} from a pointer to the contained type. - * Example: {@code Linear(std::make_shared(...))}. */ - /* implicit */ public MaxPool3dImplModuleHolder(@SharedPtr @Cast({"", "std::shared_ptr"}) MaxPool3dImpl module) { super((Pointer)null); allocate(module); } -private native void allocate(@SharedPtr @Cast({"", "std::shared_ptr"}) MaxPool3dImpl module); - - /** Returns true if the {@code ModuleHolder} contains a module, or false if it is - * {@code nullptr}. */ - public native @Cast("bool") @Name("operator bool") @NoException(true) boolean asBoolean(); - - /** Forwards to the contained module. */ - public native @Name("operator ->") MaxPool3dImpl access(); - - /** Forwards to the contained module. */ - - /** Returns a reference to the contained module. */ - public native @ByRef @Name("operator *") MaxPool3dImpl multiply(); - - /** Returns a const reference to the contained module. */ - - /** Returns a shared pointer to the underlying module. */ - public native @SharedPtr @Cast({"", "std::shared_ptr"}) MaxPool3dImpl ptr(); - - /** Returns a pointer to the underlying module. */ - public native MaxPool3dImpl get(); - - /** Returns a const pointer to the underlying module. */ - - /** Calls the {@code forward()} method of the contained module. */ - - /** Forwards to the subscript operator of the contained module. - * NOTE: std::forward is qualified to prevent VS2017 emitting - * error C2872: 'std': ambiguous symbol */ - - /** Returns true if the {@code ModuleHolder} does not contain a module. */ - public native @Cast("bool") @NoException(true) boolean is_empty(); -} diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/MaxPool3dOptions.java b/pytorch/src/gen/java/org/bytedeco/pytorch/MaxPool3dOptions.java index 8c457862db1..4beb1138cae 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/MaxPool3dOptions.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/MaxPool3dOptions.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/MaxUnpool1d.java b/pytorch/src/gen/java/org/bytedeco/pytorch/MaxUnpool1d.java deleted file mode 100644 index a2804e37c30..00000000000 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/MaxUnpool1d.java +++ /dev/null @@ -1,34 +0,0 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE - -package org.bytedeco.pytorch; - -import org.bytedeco.pytorch.Allocator; -import org.bytedeco.pytorch.Function; -import org.bytedeco.pytorch.Module; -import java.nio.*; -import org.bytedeco.javacpp.*; -import org.bytedeco.javacpp.annotation.*; - -import static org.bytedeco.javacpp.presets.javacpp.*; -import static org.bytedeco.openblas.global.openblas_nolapack.*; -import static org.bytedeco.openblas.global.openblas.*; - -import static org.bytedeco.pytorch.global.torch.*; - - -/** A {@code ModuleHolder} subclass for {@code MaxUnpool1dImpl}. - * See the documentation for {@code MaxUnpool1dImpl} class to learn what methods it - * provides, and examples of how to use {@code MaxUnpool1d} with - * {@code torch::nn::MaxUnpool1dOptions}. See the documentation for {@code ModuleHolder} to - * learn about PyTorch's module storage semantics. */ -@Namespace("torch::nn") @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) -public class MaxUnpool1d extends MaxUnpool1dImplModuleHolder { - static { Loader.load(); } - - public MaxUnpool1d(@ByVal @Cast("std::nullptr_t*") PointerPointer arg0) { super((Pointer)null); allocate(arg0); } - private native void allocate(@ByVal @Cast("std::nullptr_t*") PointerPointer arg0); public MaxUnpool1d(@SharedPtr @Cast({"", "std::shared_ptr"}) MaxUnpool1dImpl module) { super((Pointer)null); allocate(module); } - private native void allocate(@SharedPtr @Cast({"", "std::shared_ptr"}) MaxUnpool1dImpl module); - /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ - public MaxUnpool1d(Pointer p) { super(p); } - - } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/MaxUnpool1dFuncOptions.java b/pytorch/src/gen/java/org/bytedeco/pytorch/MaxUnpool1dFuncOptions.java index 37926571c41..de3dfeea043 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/MaxUnpool1dFuncOptions.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/MaxUnpool1dFuncOptions.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/MaxUnpool1dImpl.java b/pytorch/src/gen/java/org/bytedeco/pytorch/MaxUnpool1dImpl.java index 301390671cf..3c1915babe4 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/MaxUnpool1dImpl.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/MaxUnpool1dImpl.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; @@ -35,9 +37,9 @@ public class MaxUnpool1dImpl extends MaxUnpool1dImplBase { public MaxUnpool1dImpl(@ByVal @Cast("torch::ExpandingArray<1>*") LongPointer kernel_size) { super((Pointer)null); allocate(kernel_size); } - @NoDeallocator private native void allocate(@ByVal @Cast("torch::ExpandingArray<1>*") LongPointer kernel_size); + private native void allocate(@ByVal @Cast("torch::ExpandingArray<1>*") LongPointer kernel_size); public MaxUnpool1dImpl(@Const @ByRef MaxUnpool1dOptions options_) { super((Pointer)null); allocate(options_); } - @NoDeallocator private native void allocate(@Const @ByRef MaxUnpool1dOptions options_); + private native void allocate(@Const @ByRef MaxUnpool1dOptions options_); /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public MaxUnpool1dImpl(Pointer p) { super(p); } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/MaxUnpool1dImplBase.java b/pytorch/src/gen/java/org/bytedeco/pytorch/MaxUnpool1dImplBase.java index 473bff5a9a9..84cb64e3af4 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/MaxUnpool1dImplBase.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/MaxUnpool1dImplBase.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; @@ -26,9 +28,9 @@ public class MaxUnpool1dImplBase extends MaxUnpool1dImplCloneable { public MaxUnpool1dImplBase(Pointer p) { super(p); } public MaxUnpool1dImplBase(@ByVal @Cast("torch::ExpandingArray<1>*") LongPointer kernel_size) { super((Pointer)null); allocate(kernel_size); } - @NoDeallocator private native void allocate(@ByVal @Cast("torch::ExpandingArray<1>*") LongPointer kernel_size); + private native void allocate(@ByVal @Cast("torch::ExpandingArray<1>*") LongPointer kernel_size); public MaxUnpool1dImplBase(@Const @ByRef MaxUnpool1dOptions options_) { super((Pointer)null); allocate(options_); } - @NoDeallocator private native void allocate(@Const @ByRef MaxUnpool1dOptions options_); + private native void allocate(@Const @ByRef MaxUnpool1dOptions options_); public native void reset(); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/MaxUnpool1dImplCloneable.java b/pytorch/src/gen/java/org/bytedeco/pytorch/MaxUnpool1dImplCloneable.java index 62f40d33fa5..6c149986337 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/MaxUnpool1dImplCloneable.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/MaxUnpool1dImplCloneable.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; @@ -20,18 +22,18 @@ public class MaxUnpool1dImplCloneable extends Module { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public MaxUnpool1dImplCloneable(Pointer p) { super(p); } + @Override public Module asModule() { return asModule(this); } + @Namespace public static native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast>") Module asModule(@SharedPtr MaxUnpool1dImplCloneable pointer); /** {@code reset()} must perform initialization of all members with reference * semantics, most importantly parameters, buffers and submodules. */ public native void reset(); - @Override public Module asModule() { return asModule(this); } - @Namespace public static native @Name("static_cast") Module asModule(MaxUnpool1dImplCloneable module); /** Performs a recursive "deep copy" of the {@code Module}, such that all parameters * and submodules in the cloned module are different from those in the * original module. */ - public native @SharedPtr Module clone( - @Const @ByRef(nullValue = "c10::optional(c10::nullopt)") DeviceOptional device); - public native @SharedPtr Module clone(); + public native @SharedPtr("torch::nn::Module") @ByVal Module clone( + @Const @ByRef(nullValue = "c10::optional(c10::nullopt)") DeviceOptional device); + public native @SharedPtr("torch::nn::Module") @ByVal Module clone(); } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/MaxUnpool1dImplModuleHolder.java b/pytorch/src/gen/java/org/bytedeco/pytorch/MaxUnpool1dImplModuleHolder.java deleted file mode 100644 index 046318a25f8..00000000000 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/MaxUnpool1dImplModuleHolder.java +++ /dev/null @@ -1,79 +0,0 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE - -package org.bytedeco.pytorch; - -import org.bytedeco.pytorch.Allocator; -import org.bytedeco.pytorch.Function; -import org.bytedeco.pytorch.Module; -import java.nio.*; -import org.bytedeco.javacpp.*; -import org.bytedeco.javacpp.annotation.*; - -import static org.bytedeco.javacpp.presets.javacpp.*; -import static org.bytedeco.openblas.global.openblas_nolapack.*; -import static org.bytedeco.openblas.global.openblas.*; - -import static org.bytedeco.pytorch.global.torch.*; - -@Name("torch::nn::ModuleHolder") @NoOffset @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) -public class MaxUnpool1dImplModuleHolder extends Pointer { - static { Loader.load(); } - /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ - public MaxUnpool1dImplModuleHolder(Pointer p) { super(p); } - - - /// - - /** Default constructs the contained module if if has a default constructor, - * else produces a static error. - * - * NOTE: This uses the behavior of template - * classes in C++ that constructors (or any methods) are only compiled when - * actually used. */ - - - /** Constructs the {@code ModuleHolder} with an empty contained value. Access to - * the underlying module is not permitted and will throw an exception, until - * a value is assigned. */ - /* implicit */ public MaxUnpool1dImplModuleHolder(@ByVal @Cast("std::nullptr_t*") PointerPointer arg0) { super((Pointer)null); allocate(arg0); } -private native void allocate(@ByVal @Cast("std::nullptr_t*") PointerPointer arg0); - - /** Constructs the {@code ModuleHolder} with a contained module, forwarding all - * arguments to its constructor. */ - - /** Constructs the {@code ModuleHolder} from a pointer to the contained type. - * Example: {@code Linear(std::make_shared(...))}. */ - /* implicit */ public MaxUnpool1dImplModuleHolder(@SharedPtr @Cast({"", "std::shared_ptr"}) MaxUnpool1dImpl module) { super((Pointer)null); allocate(module); } -private native void allocate(@SharedPtr @Cast({"", "std::shared_ptr"}) MaxUnpool1dImpl module); - - /** Returns true if the {@code ModuleHolder} contains a module, or false if it is - * {@code nullptr}. */ - public native @Cast("bool") @Name("operator bool") @NoException(true) boolean asBoolean(); - - /** Forwards to the contained module. */ - public native @Name("operator ->") MaxUnpool1dImpl access(); - - /** Forwards to the contained module. */ - - /** Returns a reference to the contained module. */ - public native @ByRef @Name("operator *") MaxUnpool1dImpl multiply(); - - /** Returns a const reference to the contained module. */ - - /** Returns a shared pointer to the underlying module. */ - public native @SharedPtr @Cast({"", "std::shared_ptr"}) MaxUnpool1dImpl ptr(); - - /** Returns a pointer to the underlying module. */ - public native MaxUnpool1dImpl get(); - - /** Returns a const pointer to the underlying module. */ - - /** Calls the {@code forward()} method of the contained module. */ - - /** Forwards to the subscript operator of the contained module. - * NOTE: std::forward is qualified to prevent VS2017 emitting - * error C2872: 'std': ambiguous symbol */ - - /** Returns true if the {@code ModuleHolder} does not contain a module. */ - public native @Cast("bool") @NoException(true) boolean is_empty(); -} diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/MaxUnpool1dOptions.java b/pytorch/src/gen/java/org/bytedeco/pytorch/MaxUnpool1dOptions.java index 9ef4849a6d5..87ab2b1e08c 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/MaxUnpool1dOptions.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/MaxUnpool1dOptions.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/MaxUnpool2d.java b/pytorch/src/gen/java/org/bytedeco/pytorch/MaxUnpool2d.java deleted file mode 100644 index 74acf736baa..00000000000 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/MaxUnpool2d.java +++ /dev/null @@ -1,34 +0,0 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE - -package org.bytedeco.pytorch; - -import org.bytedeco.pytorch.Allocator; -import org.bytedeco.pytorch.Function; -import org.bytedeco.pytorch.Module; -import java.nio.*; -import org.bytedeco.javacpp.*; -import org.bytedeco.javacpp.annotation.*; - -import static org.bytedeco.javacpp.presets.javacpp.*; -import static org.bytedeco.openblas.global.openblas_nolapack.*; -import static org.bytedeco.openblas.global.openblas.*; - -import static org.bytedeco.pytorch.global.torch.*; - - -/** A {@code ModuleHolder} subclass for {@code MaxUnpool2dImpl}. - * See the documentation for {@code MaxUnpool2dImpl} class to learn what methods it - * provides, and examples of how to use {@code MaxUnpool2d} with - * {@code torch::nn::MaxUnpool2dOptions}. See the documentation for {@code ModuleHolder} to - * learn about PyTorch's module storage semantics. */ -@Namespace("torch::nn") @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) -public class MaxUnpool2d extends MaxUnpool2dImplModuleHolder { - static { Loader.load(); } - - public MaxUnpool2d(@ByVal @Cast("std::nullptr_t*") PointerPointer arg0) { super((Pointer)null); allocate(arg0); } - private native void allocate(@ByVal @Cast("std::nullptr_t*") PointerPointer arg0); public MaxUnpool2d(@SharedPtr @Cast({"", "std::shared_ptr"}) MaxUnpool2dImpl module) { super((Pointer)null); allocate(module); } - private native void allocate(@SharedPtr @Cast({"", "std::shared_ptr"}) MaxUnpool2dImpl module); - /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ - public MaxUnpool2d(Pointer p) { super(p); } - - } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/MaxUnpool2dFuncOptions.java b/pytorch/src/gen/java/org/bytedeco/pytorch/MaxUnpool2dFuncOptions.java index 7d4476ffd56..3c1b3ccb31e 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/MaxUnpool2dFuncOptions.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/MaxUnpool2dFuncOptions.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/MaxUnpool2dImpl.java b/pytorch/src/gen/java/org/bytedeco/pytorch/MaxUnpool2dImpl.java index 0465bfa0e5c..fa219490054 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/MaxUnpool2dImpl.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/MaxUnpool2dImpl.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; @@ -35,9 +37,9 @@ public class MaxUnpool2dImpl extends MaxUnpool2dImplBase { public MaxUnpool2dImpl(@ByVal @Cast("torch::ExpandingArray<2>*") LongPointer kernel_size) { super((Pointer)null); allocate(kernel_size); } - @NoDeallocator private native void allocate(@ByVal @Cast("torch::ExpandingArray<2>*") LongPointer kernel_size); + private native void allocate(@ByVal @Cast("torch::ExpandingArray<2>*") LongPointer kernel_size); public MaxUnpool2dImpl(@Const @ByRef MaxUnpool2dOptions options_) { super((Pointer)null); allocate(options_); } - @NoDeallocator private native void allocate(@Const @ByRef MaxUnpool2dOptions options_); + private native void allocate(@Const @ByRef MaxUnpool2dOptions options_); /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public MaxUnpool2dImpl(Pointer p) { super(p); } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/MaxUnpool2dImplBase.java b/pytorch/src/gen/java/org/bytedeco/pytorch/MaxUnpool2dImplBase.java index 5ca75520b74..c285af93212 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/MaxUnpool2dImplBase.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/MaxUnpool2dImplBase.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; @@ -22,9 +24,9 @@ public class MaxUnpool2dImplBase extends MaxUnpool2dImplCloneable { public MaxUnpool2dImplBase(Pointer p) { super(p); } public MaxUnpool2dImplBase(@ByVal @Cast("torch::ExpandingArray<2>*") LongPointer kernel_size) { super((Pointer)null); allocate(kernel_size); } - @NoDeallocator private native void allocate(@ByVal @Cast("torch::ExpandingArray<2>*") LongPointer kernel_size); + private native void allocate(@ByVal @Cast("torch::ExpandingArray<2>*") LongPointer kernel_size); public MaxUnpool2dImplBase(@Const @ByRef MaxUnpool2dOptions options_) { super((Pointer)null); allocate(options_); } - @NoDeallocator private native void allocate(@Const @ByRef MaxUnpool2dOptions options_); + private native void allocate(@Const @ByRef MaxUnpool2dOptions options_); public native void reset(); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/MaxUnpool2dImplCloneable.java b/pytorch/src/gen/java/org/bytedeco/pytorch/MaxUnpool2dImplCloneable.java index 047fcecccb8..fac762d4164 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/MaxUnpool2dImplCloneable.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/MaxUnpool2dImplCloneable.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; @@ -20,18 +22,18 @@ public class MaxUnpool2dImplCloneable extends Module { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public MaxUnpool2dImplCloneable(Pointer p) { super(p); } + @Override public Module asModule() { return asModule(this); } + @Namespace public static native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast>") Module asModule(@SharedPtr MaxUnpool2dImplCloneable pointer); /** {@code reset()} must perform initialization of all members with reference * semantics, most importantly parameters, buffers and submodules. */ public native void reset(); - @Override public Module asModule() { return asModule(this); } - @Namespace public static native @Name("static_cast") Module asModule(MaxUnpool2dImplCloneable module); /** Performs a recursive "deep copy" of the {@code Module}, such that all parameters * and submodules in the cloned module are different from those in the * original module. */ - public native @SharedPtr Module clone( - @Const @ByRef(nullValue = "c10::optional(c10::nullopt)") DeviceOptional device); - public native @SharedPtr Module clone(); + public native @SharedPtr("torch::nn::Module") @ByVal Module clone( + @Const @ByRef(nullValue = "c10::optional(c10::nullopt)") DeviceOptional device); + public native @SharedPtr("torch::nn::Module") @ByVal Module clone(); } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/MaxUnpool2dImplModuleHolder.java b/pytorch/src/gen/java/org/bytedeco/pytorch/MaxUnpool2dImplModuleHolder.java deleted file mode 100644 index c07ce095a11..00000000000 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/MaxUnpool2dImplModuleHolder.java +++ /dev/null @@ -1,79 +0,0 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE - -package org.bytedeco.pytorch; - -import org.bytedeco.pytorch.Allocator; -import org.bytedeco.pytorch.Function; -import org.bytedeco.pytorch.Module; -import java.nio.*; -import org.bytedeco.javacpp.*; -import org.bytedeco.javacpp.annotation.*; - -import static org.bytedeco.javacpp.presets.javacpp.*; -import static org.bytedeco.openblas.global.openblas_nolapack.*; -import static org.bytedeco.openblas.global.openblas.*; - -import static org.bytedeco.pytorch.global.torch.*; - -@Name("torch::nn::ModuleHolder") @NoOffset @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) -public class MaxUnpool2dImplModuleHolder extends Pointer { - static { Loader.load(); } - /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ - public MaxUnpool2dImplModuleHolder(Pointer p) { super(p); } - - - /// - - /** Default constructs the contained module if if has a default constructor, - * else produces a static error. - * - * NOTE: This uses the behavior of template - * classes in C++ that constructors (or any methods) are only compiled when - * actually used. */ - - - /** Constructs the {@code ModuleHolder} with an empty contained value. Access to - * the underlying module is not permitted and will throw an exception, until - * a value is assigned. */ - /* implicit */ public MaxUnpool2dImplModuleHolder(@ByVal @Cast("std::nullptr_t*") PointerPointer arg0) { super((Pointer)null); allocate(arg0); } -private native void allocate(@ByVal @Cast("std::nullptr_t*") PointerPointer arg0); - - /** Constructs the {@code ModuleHolder} with a contained module, forwarding all - * arguments to its constructor. */ - - /** Constructs the {@code ModuleHolder} from a pointer to the contained type. - * Example: {@code Linear(std::make_shared(...))}. */ - /* implicit */ public MaxUnpool2dImplModuleHolder(@SharedPtr @Cast({"", "std::shared_ptr"}) MaxUnpool2dImpl module) { super((Pointer)null); allocate(module); } -private native void allocate(@SharedPtr @Cast({"", "std::shared_ptr"}) MaxUnpool2dImpl module); - - /** Returns true if the {@code ModuleHolder} contains a module, or false if it is - * {@code nullptr}. */ - public native @Cast("bool") @Name("operator bool") @NoException(true) boolean asBoolean(); - - /** Forwards to the contained module. */ - public native @Name("operator ->") MaxUnpool2dImpl access(); - - /** Forwards to the contained module. */ - - /** Returns a reference to the contained module. */ - public native @ByRef @Name("operator *") MaxUnpool2dImpl multiply(); - - /** Returns a const reference to the contained module. */ - - /** Returns a shared pointer to the underlying module. */ - public native @SharedPtr @Cast({"", "std::shared_ptr"}) MaxUnpool2dImpl ptr(); - - /** Returns a pointer to the underlying module. */ - public native MaxUnpool2dImpl get(); - - /** Returns a const pointer to the underlying module. */ - - /** Calls the {@code forward()} method of the contained module. */ - - /** Forwards to the subscript operator of the contained module. - * NOTE: std::forward is qualified to prevent VS2017 emitting - * error C2872: 'std': ambiguous symbol */ - - /** Returns true if the {@code ModuleHolder} does not contain a module. */ - public native @Cast("bool") @NoException(true) boolean is_empty(); -} diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/MaxUnpool2dOptions.java b/pytorch/src/gen/java/org/bytedeco/pytorch/MaxUnpool2dOptions.java index 0ca4761c893..dc84d0ee442 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/MaxUnpool2dOptions.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/MaxUnpool2dOptions.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/MaxUnpool3d.java b/pytorch/src/gen/java/org/bytedeco/pytorch/MaxUnpool3d.java deleted file mode 100644 index 6b4feef48c9..00000000000 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/MaxUnpool3d.java +++ /dev/null @@ -1,34 +0,0 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE - -package org.bytedeco.pytorch; - -import org.bytedeco.pytorch.Allocator; -import org.bytedeco.pytorch.Function; -import org.bytedeco.pytorch.Module; -import java.nio.*; -import org.bytedeco.javacpp.*; -import org.bytedeco.javacpp.annotation.*; - -import static org.bytedeco.javacpp.presets.javacpp.*; -import static org.bytedeco.openblas.global.openblas_nolapack.*; -import static org.bytedeco.openblas.global.openblas.*; - -import static org.bytedeco.pytorch.global.torch.*; - - -/** A {@code ModuleHolder} subclass for {@code MaxUnpool3dImpl}. - * See the documentation for {@code MaxUnpool3dImpl} class to learn what methods it - * provides, and examples of how to use {@code MaxUnpool3d} with - * {@code torch::nn::MaxUnpool3dOptions}. See the documentation for {@code ModuleHolder} to - * learn about PyTorch's module storage semantics. */ -@Namespace("torch::nn") @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) -public class MaxUnpool3d extends MaxUnpool3dImplModuleHolder { - static { Loader.load(); } - - public MaxUnpool3d(@ByVal @Cast("std::nullptr_t*") PointerPointer arg0) { super((Pointer)null); allocate(arg0); } - private native void allocate(@ByVal @Cast("std::nullptr_t*") PointerPointer arg0); public MaxUnpool3d(@SharedPtr @Cast({"", "std::shared_ptr"}) MaxUnpool3dImpl module) { super((Pointer)null); allocate(module); } - private native void allocate(@SharedPtr @Cast({"", "std::shared_ptr"}) MaxUnpool3dImpl module); - /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ - public MaxUnpool3d(Pointer p) { super(p); } - - } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/MaxUnpool3dFuncOptions.java b/pytorch/src/gen/java/org/bytedeco/pytorch/MaxUnpool3dFuncOptions.java index 189fc3abf95..e34d867e244 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/MaxUnpool3dFuncOptions.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/MaxUnpool3dFuncOptions.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/MaxUnpool3dImpl.java b/pytorch/src/gen/java/org/bytedeco/pytorch/MaxUnpool3dImpl.java index 2d2ee12c793..3dfc5f0aa5d 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/MaxUnpool3dImpl.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/MaxUnpool3dImpl.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; @@ -35,9 +37,9 @@ public class MaxUnpool3dImpl extends MaxUnpool3dImplBase { public MaxUnpool3dImpl(@ByVal @Cast("torch::ExpandingArray<3>*") LongPointer kernel_size) { super((Pointer)null); allocate(kernel_size); } - @NoDeallocator private native void allocate(@ByVal @Cast("torch::ExpandingArray<3>*") LongPointer kernel_size); + private native void allocate(@ByVal @Cast("torch::ExpandingArray<3>*") LongPointer kernel_size); public MaxUnpool3dImpl(@Const @ByRef MaxUnpool3dOptions options_) { super((Pointer)null); allocate(options_); } - @NoDeallocator private native void allocate(@Const @ByRef MaxUnpool3dOptions options_); + private native void allocate(@Const @ByRef MaxUnpool3dOptions options_); /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public MaxUnpool3dImpl(Pointer p) { super(p); } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/MaxUnpool3dImplBase.java b/pytorch/src/gen/java/org/bytedeco/pytorch/MaxUnpool3dImplBase.java index a8467994ced..7df4fb8f5bc 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/MaxUnpool3dImplBase.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/MaxUnpool3dImplBase.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; @@ -22,9 +24,9 @@ public class MaxUnpool3dImplBase extends MaxUnpool3dImplCloneable { public MaxUnpool3dImplBase(Pointer p) { super(p); } public MaxUnpool3dImplBase(@ByVal @Cast("torch::ExpandingArray<3>*") LongPointer kernel_size) { super((Pointer)null); allocate(kernel_size); } - @NoDeallocator private native void allocate(@ByVal @Cast("torch::ExpandingArray<3>*") LongPointer kernel_size); + private native void allocate(@ByVal @Cast("torch::ExpandingArray<3>*") LongPointer kernel_size); public MaxUnpool3dImplBase(@Const @ByRef MaxUnpool3dOptions options_) { super((Pointer)null); allocate(options_); } - @NoDeallocator private native void allocate(@Const @ByRef MaxUnpool3dOptions options_); + private native void allocate(@Const @ByRef MaxUnpool3dOptions options_); public native void reset(); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/MaxUnpool3dImplCloneable.java b/pytorch/src/gen/java/org/bytedeco/pytorch/MaxUnpool3dImplCloneable.java index 63bae211c29..93d4eb6ff57 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/MaxUnpool3dImplCloneable.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/MaxUnpool3dImplCloneable.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; @@ -20,18 +22,18 @@ public class MaxUnpool3dImplCloneable extends Module { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public MaxUnpool3dImplCloneable(Pointer p) { super(p); } + @Override public Module asModule() { return asModule(this); } + @Namespace public static native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast>") Module asModule(@SharedPtr MaxUnpool3dImplCloneable pointer); /** {@code reset()} must perform initialization of all members with reference * semantics, most importantly parameters, buffers and submodules. */ public native void reset(); - @Override public Module asModule() { return asModule(this); } - @Namespace public static native @Name("static_cast") Module asModule(MaxUnpool3dImplCloneable module); /** Performs a recursive "deep copy" of the {@code Module}, such that all parameters * and submodules in the cloned module are different from those in the * original module. */ - public native @SharedPtr Module clone( - @Const @ByRef(nullValue = "c10::optional(c10::nullopt)") DeviceOptional device); - public native @SharedPtr Module clone(); + public native @SharedPtr("torch::nn::Module") @ByVal Module clone( + @Const @ByRef(nullValue = "c10::optional(c10::nullopt)") DeviceOptional device); + public native @SharedPtr("torch::nn::Module") @ByVal Module clone(); } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/MaxUnpool3dImplModuleHolder.java b/pytorch/src/gen/java/org/bytedeco/pytorch/MaxUnpool3dImplModuleHolder.java deleted file mode 100644 index 27ec855b667..00000000000 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/MaxUnpool3dImplModuleHolder.java +++ /dev/null @@ -1,79 +0,0 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE - -package org.bytedeco.pytorch; - -import org.bytedeco.pytorch.Allocator; -import org.bytedeco.pytorch.Function; -import org.bytedeco.pytorch.Module; -import java.nio.*; -import org.bytedeco.javacpp.*; -import org.bytedeco.javacpp.annotation.*; - -import static org.bytedeco.javacpp.presets.javacpp.*; -import static org.bytedeco.openblas.global.openblas_nolapack.*; -import static org.bytedeco.openblas.global.openblas.*; - -import static org.bytedeco.pytorch.global.torch.*; - -@Name("torch::nn::ModuleHolder") @NoOffset @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) -public class MaxUnpool3dImplModuleHolder extends Pointer { - static { Loader.load(); } - /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ - public MaxUnpool3dImplModuleHolder(Pointer p) { super(p); } - - - /// - - /** Default constructs the contained module if if has a default constructor, - * else produces a static error. - * - * NOTE: This uses the behavior of template - * classes in C++ that constructors (or any methods) are only compiled when - * actually used. */ - - - /** Constructs the {@code ModuleHolder} with an empty contained value. Access to - * the underlying module is not permitted and will throw an exception, until - * a value is assigned. */ - /* implicit */ public MaxUnpool3dImplModuleHolder(@ByVal @Cast("std::nullptr_t*") PointerPointer arg0) { super((Pointer)null); allocate(arg0); } -private native void allocate(@ByVal @Cast("std::nullptr_t*") PointerPointer arg0); - - /** Constructs the {@code ModuleHolder} with a contained module, forwarding all - * arguments to its constructor. */ - - /** Constructs the {@code ModuleHolder} from a pointer to the contained type. - * Example: {@code Linear(std::make_shared(...))}. */ - /* implicit */ public MaxUnpool3dImplModuleHolder(@SharedPtr @Cast({"", "std::shared_ptr"}) MaxUnpool3dImpl module) { super((Pointer)null); allocate(module); } -private native void allocate(@SharedPtr @Cast({"", "std::shared_ptr"}) MaxUnpool3dImpl module); - - /** Returns true if the {@code ModuleHolder} contains a module, or false if it is - * {@code nullptr}. */ - public native @Cast("bool") @Name("operator bool") @NoException(true) boolean asBoolean(); - - /** Forwards to the contained module. */ - public native @Name("operator ->") MaxUnpool3dImpl access(); - - /** Forwards to the contained module. */ - - /** Returns a reference to the contained module. */ - public native @ByRef @Name("operator *") MaxUnpool3dImpl multiply(); - - /** Returns a const reference to the contained module. */ - - /** Returns a shared pointer to the underlying module. */ - public native @SharedPtr @Cast({"", "std::shared_ptr"}) MaxUnpool3dImpl ptr(); - - /** Returns a pointer to the underlying module. */ - public native MaxUnpool3dImpl get(); - - /** Returns a const pointer to the underlying module. */ - - /** Calls the {@code forward()} method of the contained module. */ - - /** Forwards to the subscript operator of the contained module. - * NOTE: std::forward is qualified to prevent VS2017 emitting - * error C2872: 'std': ambiguous symbol */ - - /** Returns true if the {@code ModuleHolder} does not contain a module. */ - public native @Cast("bool") @NoException(true) boolean is_empty(); -} diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/MaxUnpool3dOptions.java b/pytorch/src/gen/java/org/bytedeco/pytorch/MaxUnpool3dOptions.java index 35a8cb41dd4..a5f24055fcf 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/MaxUnpool3dOptions.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/MaxUnpool3dOptions.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/MaybeOwnedTraitsGenericImplTensor.java b/pytorch/src/gen/java/org/bytedeco/pytorch/MaybeOwnedTraitsGenericImplTensor.java new file mode 100644 index 00000000000..0bdbc06c2d1 --- /dev/null +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/MaybeOwnedTraitsGenericImplTensor.java @@ -0,0 +1,54 @@ +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE + +package org.bytedeco.pytorch; + +import org.bytedeco.pytorch.Allocator; +import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; +import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; +import java.nio.*; +import org.bytedeco.javacpp.*; +import org.bytedeco.javacpp.annotation.*; + +import static org.bytedeco.javacpp.presets.javacpp.*; +import static org.bytedeco.openblas.global.openblas_nolapack.*; +import static org.bytedeco.openblas.global.openblas.*; + +import static org.bytedeco.pytorch.global.torch.*; + + +/** MaybeOwnedTraits describes how to borrow from T. Here is how we + * can implement borrowing from an arbitrary type T using a raw + * pointer to const: */ +@Name("c10::MaybeOwnedTraitsGenericImpl >") @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) +public class MaybeOwnedTraitsGenericImplTensor extends Pointer { + static { Loader.load(); } + /** Default native constructor. */ + public MaybeOwnedTraitsGenericImplTensor() { super((Pointer)null); allocate(); } + /** Native array allocator. Access with {@link Pointer#position(long)}. */ + public MaybeOwnedTraitsGenericImplTensor(long size) { super((Pointer)null); allocateArray(size); } + /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ + public MaybeOwnedTraitsGenericImplTensor(Pointer p) { super(p); } + private native void allocate(); + private native void allocateArray(long size); + @Override public MaybeOwnedTraitsGenericImplTensor position(long position) { + return (MaybeOwnedTraitsGenericImplTensor)super.position(position); + } + @Override public MaybeOwnedTraitsGenericImplTensor getPointer(long i) { + return new MaybeOwnedTraitsGenericImplTensor((Pointer)this).offsetAddress(i); + } + + + public static native @Const @SharedPtr("at::Tensor") Tensor createBorrow(@Const @SharedPtr("at::Tensor") @ByRef Tensor from); + + + + + + public static native @Const @SharedPtr("at::Tensor") @ByRef Tensor referenceFromBorrow(@SharedPtr("at::Tensor") @ByPtrRef Tensor borrow); + + public static native @Const @SharedPtr("at::Tensor") Tensor pointerFromBorrow(@SharedPtr("at::Tensor") @ByPtrRef Tensor borrow); + + public static native @Cast("bool") boolean debugBorrowIsValid(@SharedPtr("at::Tensor") @ByPtrRef Tensor borrow); +} diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/MaybeOwnedTraits.java b/pytorch/src/gen/java/org/bytedeco/pytorch/MaybeOwnedTraitsTensor.java similarity index 74% rename from pytorch/src/gen/java/org/bytedeco/pytorch/MaybeOwnedTraits.java rename to pytorch/src/gen/java/org/bytedeco/pytorch/MaybeOwnedTraitsTensor.java index bd3f8dd4fdf..ea178decbfb 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/MaybeOwnedTraits.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/MaybeOwnedTraitsTensor.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; @@ -16,21 +18,21 @@ import static org.bytedeco.pytorch.global.torch.*; // namespace at @Name("c10::MaybeOwnedTraits") @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) -public class MaybeOwnedTraits extends Pointer { +public class MaybeOwnedTraitsTensor extends Pointer { static { Loader.load(); } /** Default native constructor. */ - public MaybeOwnedTraits() { super((Pointer)null); allocate(); } + public MaybeOwnedTraitsTensor() { super((Pointer)null); allocate(); } /** Native array allocator. Access with {@link Pointer#position(long)}. */ - public MaybeOwnedTraits(long size) { super((Pointer)null); allocateArray(size); } + public MaybeOwnedTraitsTensor(long size) { super((Pointer)null); allocateArray(size); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ - public MaybeOwnedTraits(Pointer p) { super(p); } + public MaybeOwnedTraitsTensor(Pointer p) { super(p); } private native void allocate(); private native void allocateArray(long size); - @Override public MaybeOwnedTraits position(long position) { - return (MaybeOwnedTraits)super.position(position); + @Override public MaybeOwnedTraitsTensor position(long position) { + return (MaybeOwnedTraitsTensor)super.position(position); } - @Override public MaybeOwnedTraits getPointer(long i) { - return new MaybeOwnedTraits((Pointer)this).offsetAddress(i); + @Override public MaybeOwnedTraitsTensor getPointer(long i) { + return new MaybeOwnedTraitsTensor((Pointer)this).offsetAddress(i); } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/MemoryFormatOptional.java b/pytorch/src/gen/java/org/bytedeco/pytorch/MemoryFormatOptional.java index 9fbf846aac2..c6e0d2bd5ab 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/MemoryFormatOptional.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/MemoryFormatOptional.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; @@ -26,6 +28,7 @@ public class MemoryFormatOptional extends Pointer { public native @Name("operator =") @ByRef MemoryFormatOptional put(@ByRef MemoryFormatOptional x); public native boolean has_value(); + public native void reset(); public native @Name("value") @ByRef MemoryFormat get(); @ValueSetter public native MemoryFormatOptional put(@ByRef MemoryFormat value); } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/MemoryFormatType.java b/pytorch/src/gen/java/org/bytedeco/pytorch/MemoryFormatType.java index 09c94290d3d..f52689465a5 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/MemoryFormatType.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/MemoryFormatType.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/MemoryFormattEnumerationType.java b/pytorch/src/gen/java/org/bytedeco/pytorch/MemoryFormattEnumerationType.java index 118fd7cc439..2f4ce1ee8c0 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/MemoryFormattEnumerationType.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/MemoryFormattEnumerationType.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/MemoryReportingInfoBase.java b/pytorch/src/gen/java/org/bytedeco/pytorch/MemoryReportingInfoBase.java index 38fefe512e2..4136db28795 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/MemoryReportingInfoBase.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/MemoryReportingInfoBase.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/MetaBase.java b/pytorch/src/gen/java/org/bytedeco/pytorch/MetaBase.java new file mode 100644 index 00000000000..f0aafab5c3f --- /dev/null +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/MetaBase.java @@ -0,0 +1,129 @@ +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE + +package org.bytedeco.pytorch; + +import org.bytedeco.pytorch.Allocator; +import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; +import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; +import java.nio.*; +import org.bytedeco.javacpp.*; +import org.bytedeco.javacpp.annotation.*; + +import static org.bytedeco.javacpp.presets.javacpp.*; +import static org.bytedeco.openblas.global.openblas_nolapack.*; +import static org.bytedeco.openblas.global.openblas.*; + +import static org.bytedeco.pytorch.global.torch.*; + + +// Base class for all structured kernel classes. The set_output virtual +// method is varied depending whether or not the operator is +// functional/out/inplace, and could also be specialized for CPU/CUDA/etc +// (although presently it isn't). +// +// A notable subclass of this interface is TensorIteratorBase. +@Namespace("at::impl") @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) +public class MetaBase extends Pointer { + static { Loader.load(); } + /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ + public MetaBase(Pointer p) { super(p); } + + public native @Const @ByRef Tensor maybe_get_output(@Cast("int64_t") long output_idx); + + // Note: [set_output_*] + // See: https://github.com/pytorch/pytorch/issues/69813 + // Whenever defining the output properties in the META function of a + // structured kernel (what was usually done with `set_output`), use one of + // these 3 variants, instead. In order to decide which variant to use, check + // the following decision tree: + // + // - Can the kernel you are going to implement support output tensors + // with arbitrary strides? + // | + // -- YES: `set_output_raw_strided` + // | + // -- NO: Should the output tensor strides be contiguous? + // | + // -- YES: `set_output_contiguous` + // | + // -- NO: `set_output_strided` + // + // Use this function whenever the kernel requires specific strides for the + // output. If `strides` does not match the given output strides, proxy outputs + // will be created and passed to the IMPL function. + public native void set_output_strided( + @Cast("int64_t") long output_idx, + @ByVal LongArrayRef sizes, + @ByVal LongArrayRef strides, + @ByVal TensorOptions options, + @ByVal(nullValue = "at::DimnameList{}") DimnameArrayRef names); + public native void set_output_strided( + @Cast("int64_t") long output_idx, + @ByVal LongArrayRef sizes, + @ByVal LongArrayRef strides, + @ByVal TensorOptions options); + public native void set_output_strided( + @Cast("int64_t") long output_idx, + @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] sizes, + @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] strides, + @ByVal TensorOptions options, + @ByVal(nullValue = "at::DimnameList{}") DimnameArrayRef names); + public native void set_output_strided( + @Cast("int64_t") long output_idx, + @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] sizes, + @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] strides, + @ByVal TensorOptions options); + + // Use this function whenever the kernel knows how to handle arbitrary strided + // outputs. This function has the same behavior as the old `set_output`: it + // will only re-stride if the given output was resized. + public native void set_output_raw_strided( + @Cast("int64_t") long output_idx, + @ByVal LongArrayRef sizes, + @ByVal LongArrayRef strides_hint, + @ByVal TensorOptions options, + @ByVal(nullValue = "at::DimnameList{}") DimnameArrayRef names); + public native void set_output_raw_strided( + @Cast("int64_t") long output_idx, + @ByVal LongArrayRef sizes, + @ByVal LongArrayRef strides_hint, + @ByVal TensorOptions options); + public native void set_output_raw_strided( + @Cast("int64_t") long output_idx, + @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] sizes, + @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] strides_hint, + @ByVal TensorOptions options, + @ByVal(nullValue = "at::DimnameList{}") DimnameArrayRef names); + public native void set_output_raw_strided( + @Cast("int64_t") long output_idx, + @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] sizes, + @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] strides_hint, + @ByVal TensorOptions options); + + // Use this function if the kernel requires contiguous strides. + // Alias for `set_output_strided`, but with contiguous strides. + public native void set_output_contiguous( + @Cast("int64_t") long output_idx, + @ByVal LongArrayRef sizes, + @ByVal TensorOptions options, + @ByVal(nullValue = "at::DimnameList{}") DimnameArrayRef names); + public native void set_output_contiguous( + @Cast("int64_t") long output_idx, + @ByVal LongArrayRef sizes, + @ByVal TensorOptions options); + public native void set_output_contiguous( + @Cast("int64_t") long output_idx, + @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] sizes, + @ByVal TensorOptions options, + @ByVal(nullValue = "at::DimnameList{}") DimnameArrayRef names); + public native void set_output_contiguous( + @Cast("int64_t") long output_idx, + @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] sizes, + @ByVal TensorOptions options); + + // Returns a reference to an undefined tensor if there is no presupplied + // output + public native @Const @ByRef Tensor maybe_get_output(); +} diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/Method.java b/pytorch/src/gen/java/org/bytedeco/pytorch/Method.java index 8efb8aa944e..efd61098104 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/Method.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/Method.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; @@ -45,8 +47,14 @@ public class Method extends IMethod { // interpreter that executes ops inline, one by one, on caller's thread. A // model can utilize async op, i.e. `fork`, to launch an asynchronous task // which will be launched on provided `taskLauncher`. + public native @ByVal FuturePtr run_async( + @ByVal IValueVector stack, + @Cast("const torch::jit::Kwargs*") @ByRef(nullValue = "torch::jit::Kwargs()") StringIValueMap kwargs, + @ByVal(nullValue = "torch::jit::TaskLauncher(at::launch)") @Cast("torch::jit::TaskLauncher*") Pointer taskLauncher); + public native @ByVal FuturePtr run_async( + @ByVal IValueVector stack); - public native @SharedPtr @ByVal Graph graph(); + public native @SharedPtr("torch::jit::Graph") @ByVal Graph graph(); public native @StdString BytePointer name(); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/MethodOptional.java b/pytorch/src/gen/java/org/bytedeco/pytorch/MethodOptional.java index 259646c7ee2..cd28a3bdc18 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/MethodOptional.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/MethodOptional.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; @@ -26,6 +28,7 @@ public class MethodOptional extends Pointer { public native @Name("operator =") @ByRef MethodOptional put(@ByRef MethodOptional x); public native boolean has_value(); + public native void reset(); public native @Name("value") @ByRef Method get(); @ValueSetter public native MethodOptional put(@ByRef Method value); } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/MethodValue.java b/pytorch/src/gen/java/org/bytedeco/pytorch/MethodValue.java index ddc00360c6a..cbc33650302 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/MethodValue.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/MethodValue.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; @@ -32,10 +34,5 @@ public class MethodValue extends SugaredValue { public native @StdString BytePointer kind(); - public native @SharedPtr @ByVal SugaredValue call( - @Const @ByRef SourceRange loc, - @ByRef GraphFunction f, - @ByVal NamedValueArrayRef args, - @ByVal NamedValueArrayRef kwargs, - @Cast("size_t") long n_binders); + } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/Mish.java b/pytorch/src/gen/java/org/bytedeco/pytorch/Mish.java deleted file mode 100644 index 000ad0eb0b1..00000000000 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/Mish.java +++ /dev/null @@ -1,33 +0,0 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE - -package org.bytedeco.pytorch; - -import org.bytedeco.pytorch.Allocator; -import org.bytedeco.pytorch.Function; -import org.bytedeco.pytorch.Module; -import java.nio.*; -import org.bytedeco.javacpp.*; -import org.bytedeco.javacpp.annotation.*; - -import static org.bytedeco.javacpp.presets.javacpp.*; -import static org.bytedeco.openblas.global.openblas_nolapack.*; -import static org.bytedeco.openblas.global.openblas.*; - -import static org.bytedeco.pytorch.global.torch.*; - - -/** A {@code ModuleHolder} subclass for {@code MishImpl}. - * See the documentation for {@code MishImpl} class to learn what methods it - * provides, or the documentation for {@code ModuleHolder} to learn about PyTorch's - * module storage semantics. */ -@Namespace("torch::nn") @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) -public class Mish extends MishImplModuleHolder { - static { Loader.load(); } - - public Mish(@ByVal @Cast("std::nullptr_t*") PointerPointer arg0) { super((Pointer)null); allocate(arg0); } - private native void allocate(@ByVal @Cast("std::nullptr_t*") PointerPointer arg0); public Mish(@SharedPtr @Cast({"", "std::shared_ptr"}) MishImpl module) { super((Pointer)null); allocate(module); } - private native void allocate(@SharedPtr @Cast({"", "std::shared_ptr"}) MishImpl module); - /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ - public Mish(Pointer p) { super(p); } - - } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/MishImpl.java b/pytorch/src/gen/java/org/bytedeco/pytorch/MishImpl.java index 05240c52310..005e5214978 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/MishImpl.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/MishImpl.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/MishImplCloneable.java b/pytorch/src/gen/java/org/bytedeco/pytorch/MishImplCloneable.java index 808dc15d527..55915701e23 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/MishImplCloneable.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/MishImplCloneable.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; @@ -20,18 +22,18 @@ public class MishImplCloneable extends Module { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public MishImplCloneable(Pointer p) { super(p); } + @Override public Module asModule() { return asModule(this); } + @Namespace public static native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast>") Module asModule(@SharedPtr MishImplCloneable pointer); /** {@code reset()} must perform initialization of all members with reference * semantics, most importantly parameters, buffers and submodules. */ public native void reset(); - @Override public Module asModule() { return asModule(this); } - @Namespace public static native @Name("static_cast") Module asModule(MishImplCloneable module); /** Performs a recursive "deep copy" of the {@code Module}, such that all parameters * and submodules in the cloned module are different from those in the * original module. */ - public native @SharedPtr Module clone( - @Const @ByRef(nullValue = "c10::optional(c10::nullopt)") DeviceOptional device); - public native @SharedPtr Module clone(); + public native @SharedPtr("torch::nn::Module") @ByVal Module clone( + @Const @ByRef(nullValue = "c10::optional(c10::nullopt)") DeviceOptional device); + public native @SharedPtr("torch::nn::Module") @ByVal Module clone(); } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/MishImplModuleHolder.java b/pytorch/src/gen/java/org/bytedeco/pytorch/MishImplModuleHolder.java deleted file mode 100644 index 3c5020b427f..00000000000 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/MishImplModuleHolder.java +++ /dev/null @@ -1,79 +0,0 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE - -package org.bytedeco.pytorch; - -import org.bytedeco.pytorch.Allocator; -import org.bytedeco.pytorch.Function; -import org.bytedeco.pytorch.Module; -import java.nio.*; -import org.bytedeco.javacpp.*; -import org.bytedeco.javacpp.annotation.*; - -import static org.bytedeco.javacpp.presets.javacpp.*; -import static org.bytedeco.openblas.global.openblas_nolapack.*; -import static org.bytedeco.openblas.global.openblas.*; - -import static org.bytedeco.pytorch.global.torch.*; - -@Name("torch::nn::ModuleHolder") @NoOffset @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) -public class MishImplModuleHolder extends Pointer { - static { Loader.load(); } - /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ - public MishImplModuleHolder(Pointer p) { super(p); } - - - /// - - /** Default constructs the contained module if if has a default constructor, - * else produces a static error. - * - * NOTE: This uses the behavior of template - * classes in C++ that constructors (or any methods) are only compiled when - * actually used. */ - - - /** Constructs the {@code ModuleHolder} with an empty contained value. Access to - * the underlying module is not permitted and will throw an exception, until - * a value is assigned. */ - /* implicit */ public MishImplModuleHolder(@ByVal @Cast("std::nullptr_t*") PointerPointer arg0) { super((Pointer)null); allocate(arg0); } -private native void allocate(@ByVal @Cast("std::nullptr_t*") PointerPointer arg0); - - /** Constructs the {@code ModuleHolder} with a contained module, forwarding all - * arguments to its constructor. */ - - /** Constructs the {@code ModuleHolder} from a pointer to the contained type. - * Example: {@code Linear(std::make_shared(...))}. */ - /* implicit */ public MishImplModuleHolder(@SharedPtr @Cast({"", "std::shared_ptr"}) MishImpl module) { super((Pointer)null); allocate(module); } -private native void allocate(@SharedPtr @Cast({"", "std::shared_ptr"}) MishImpl module); - - /** Returns true if the {@code ModuleHolder} contains a module, or false if it is - * {@code nullptr}. */ - public native @Cast("bool") @Name("operator bool") @NoException(true) boolean asBoolean(); - - /** Forwards to the contained module. */ - public native @Name("operator ->") MishImpl access(); - - /** Forwards to the contained module. */ - - /** Returns a reference to the contained module. */ - public native @ByRef @Name("operator *") MishImpl multiply(); - - /** Returns a const reference to the contained module. */ - - /** Returns a shared pointer to the underlying module. */ - public native @SharedPtr @Cast({"", "std::shared_ptr"}) MishImpl ptr(); - - /** Returns a pointer to the underlying module. */ - public native MishImpl get(); - - /** Returns a const pointer to the underlying module. */ - - /** Calls the {@code forward()} method of the contained module. */ - - /** Forwards to the subscript operator of the contained module. - * NOTE: std::forward is qualified to prevent VS2017 emitting - * error C2872: 'std': ambiguous symbol */ - - /** Returns true if the {@code ModuleHolder} does not contain a module. */ - public native @Cast("bool") @NoException(true) boolean is_empty(); -} diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/MobileCode.java b/pytorch/src/gen/java/org/bytedeco/pytorch/MobileCode.java index 126e059ec48..c53a2b2579a 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/MobileCode.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/MobileCode.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; @@ -23,43 +25,43 @@ public class MobileCode extends Code { public MobileCode(Pointer p) { super(p); } public MobileCode( - @Const @SharedPtr @ByRef Graph graph, + @Const @SharedPtr("torch::jit::Graph") @ByRef Graph graph, @StdString BytePointer function_name, @Cast("bool") boolean emit_default_input_instructions/*=true*/, @Cast("bool") boolean support_default_args_before_out/*=true*/, @Cast("bool") boolean emit_promoted_ops/*=true*/, @Cast("size_t") long remaining_bailout_depth/*=0*/) { super((Pointer)null); allocate(graph, function_name, emit_default_input_instructions, support_default_args_before_out, emit_promoted_ops, remaining_bailout_depth); } private native void allocate( - @Const @SharedPtr @ByRef Graph graph, + @Const @SharedPtr("torch::jit::Graph") @ByRef Graph graph, @StdString BytePointer function_name, @Cast("bool") boolean emit_default_input_instructions/*=true*/, @Cast("bool") boolean support_default_args_before_out/*=true*/, @Cast("bool") boolean emit_promoted_ops/*=true*/, @Cast("size_t") long remaining_bailout_depth/*=0*/); public MobileCode( - @Const @SharedPtr @ByRef Graph graph, + @Const @SharedPtr("torch::jit::Graph") @ByRef Graph graph, @StdString BytePointer function_name) { super((Pointer)null); allocate(graph, function_name); } private native void allocate( - @Const @SharedPtr @ByRef Graph graph, + @Const @SharedPtr("torch::jit::Graph") @ByRef Graph graph, @StdString BytePointer function_name); public MobileCode( - @Const @SharedPtr @ByRef Graph graph, + @Const @SharedPtr("torch::jit::Graph") @ByRef Graph graph, @StdString String function_name, @Cast("bool") boolean emit_default_input_instructions/*=true*/, @Cast("bool") boolean support_default_args_before_out/*=true*/, @Cast("bool") boolean emit_promoted_ops/*=true*/, @Cast("size_t") long remaining_bailout_depth/*=0*/) { super((Pointer)null); allocate(graph, function_name, emit_default_input_instructions, support_default_args_before_out, emit_promoted_ops, remaining_bailout_depth); } private native void allocate( - @Const @SharedPtr @ByRef Graph graph, + @Const @SharedPtr("torch::jit::Graph") @ByRef Graph graph, @StdString String function_name, @Cast("bool") boolean emit_default_input_instructions/*=true*/, @Cast("bool") boolean support_default_args_before_out/*=true*/, @Cast("bool") boolean emit_promoted_ops/*=true*/, @Cast("size_t") long remaining_bailout_depth/*=0*/); public MobileCode( - @Const @SharedPtr @ByRef Graph graph, + @Const @SharedPtr("torch::jit::Graph") @ByRef Graph graph, @StdString String function_name) { super((Pointer)null); allocate(graph, function_name); } private native void allocate( - @Const @SharedPtr @ByRef Graph graph, + @Const @SharedPtr("torch::jit::Graph") @ByRef Graph graph, @StdString String function_name); } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/Module.java b/pytorch/src/gen/java/org/bytedeco/pytorch/Module.java index 1f001834dbd..2da0762f136 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/Module.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/Module.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; @@ -72,19 +74,20 @@ public class Module extends Pointer { @Override public Module getPointer(long i) { return new Module((Pointer)this).offsetAddress(i); } + public Module asModule() { return this; } /** Tells the base {@code Module} about the name of the submodule. */ public Module(@StdString BytePointer name) { super((Pointer)null); allocate(name); } - private native void allocate(@StdString BytePointer name); + @SharedPtr private native void allocate(@StdString BytePointer name); public Module(@StdString String name) { super((Pointer)null); allocate(name); } - private native void allocate(@StdString String name); + @SharedPtr private native void allocate(@StdString String name); /** Constructs the module without immediate knowledge of the submodule's name. * The name of the submodule is inferred via RTTI (if possible) the first * time {@code .name()} is invoked. */ public Module() { super((Pointer)null); allocate(); } - private native void allocate(); + @SharedPtr private native void allocate(); /** Returns the name of the {@code Module}. * @@ -98,7 +101,8 @@ public class Module extends Pointer { /// /// - public native @StdString @NoException(true) BytePointer name(); + public BytePointer name() { return asModule()._name(); } + private native @StdString @NoException(true) @Name("name") BytePointer _name(); /** Performs a recursive deep copy of the module and all its registered * parameters, buffers and submodules. @@ -118,9 +122,9 @@ public class Module extends Pointer { * \endrst */ /// - public native @SharedPtr @Cast({"", "std::shared_ptr"}) Module clone( - @Const @ByRef(nullValue = "c10::optional(c10::nullopt)") DeviceOptional device); - public native @SharedPtr @Cast({"", "std::shared_ptr"}) Module clone(); + public Module clone(DeviceOptional device) { return asModule()._clone(device); } + private native @SharedPtr("torch::nn::Module") @ByVal @Virtual(subclasses=false, method="clone") @Cast({"", "std::shared_ptr"}) @Const({false, false, true}) @Name("clone") Module _clone( + @Const @ByRef(nullValue = "c10::optional(c10::nullopt)") DeviceOptional device); /** Applies the {@code function} to the {@code Module} and recursively to every submodule. * The function must accept a {@code Module&}. @@ -134,7 +138,8 @@ public class Module extends Pointer { * \endrst */ /// - public native void apply(@Cast("const torch::nn::Module::ModuleApplyFunction*") @ByRef ModuleFunction function); + public void apply(ModuleApplyFunction function) { asModule()._apply(function); } + private native @Name("apply") void _apply(@Const @ByRef ModuleApplyFunction function); /** Applies the {@code function} to the {@code Module} and recursively to every submodule. * The function must accept a {@code const Module&}. @@ -162,13 +167,16 @@ public class Module extends Pointer { * \endrst */ /// - public native void apply( - @Cast("const torch::nn::Module::NamedModuleApplyFunction*") @ByRef Pointer function, + public void apply(NamedModuleApplyFunction function, BytePointer name_prefix) { asModule()._apply(function, name_prefix); } + private native @Name("apply") void _apply( + @Const @ByRef NamedModuleApplyFunction function, @StdString BytePointer name_prefix/*=std::string()*/); - public native void apply( - @Cast("const torch::nn::Module::NamedModuleApplyFunction*") @ByRef Pointer function); - public native void apply( - @Cast("const torch::nn::Module::NamedModuleApplyFunction*") @ByRef Pointer function, + public void apply(NamedModuleApplyFunction function) { asModule()._apply(function); } + private native @Name("apply") void _apply( + @Const @ByRef NamedModuleApplyFunction function); + public void apply(NamedModuleApplyFunction function, String name_prefix) { asModule()._apply(function, name_prefix); } + private native @Name("apply") void _apply( + @Const @ByRef NamedModuleApplyFunction function, @StdString String name_prefix/*=std::string()*/); /** Applies the {@code function} to the {@code Module} and recursively to every submodule. @@ -195,6 +203,10 @@ public native void apply( * std::cout << module->name() << std::endl; * }); * \endrst */ + + /// + public void apply(SharedModuleApplyFunction function) { asModule()._apply(function); } + private native @Name("apply") void _apply(@Cast("const torch::nn::Module::ModulePointerApplyFunction*") @ByRef SharedModuleApplyFunction function); /** Applies the {@code function} to the {@code Module} and recursively to every submodule. * The function must accept a {@code const std::string&} for the key of the module, @@ -211,28 +223,47 @@ public native void apply( * std::cout << key << ": " << module->name() << std::endl; * }); * \endrst */ + public void apply(NamedSharedModuleApplyFunction function, BytePointer name_prefix) { asModule()._apply(function, name_prefix); } + private native @Name("apply") void _apply( + @Const @ByRef NamedSharedModuleApplyFunction function, + @StdString BytePointer name_prefix/*=std::string()*/); + public void apply(NamedSharedModuleApplyFunction function) { asModule()._apply(function); } + private native @Name("apply") void _apply( + @Const @ByRef NamedSharedModuleApplyFunction function); + public void apply(NamedSharedModuleApplyFunction function, String name_prefix) { asModule()._apply(function, name_prefix); } + private native @Name("apply") void _apply( + @Const @ByRef NamedSharedModuleApplyFunction function, + @StdString String name_prefix/*=std::string()*/); /** Returns the parameters of this {@code Module} and if {@code recurse} is true, also * recursively of every submodule. */ - public native @Cast({"", "std::vector"}) @StdMove TensorVector parameters(@Cast("bool") boolean recurse/*=true*/); - public native @Cast({"", "std::vector"}) @StdMove TensorVector parameters(); + public TensorVector parameters(boolean recurse) { return asModule()._parameters(recurse); } + private native @Name("parameters") @Cast({"", "std::vector"}) @StdMove TensorVector _parameters(@Cast("bool") boolean recurse/*=true*/); + public TensorVector parameters() { return asModule()._parameters(); } + private native @Name("parameters") @Cast({"", "std::vector"}) @StdMove TensorVector _parameters(); /** Returns an {@code OrderedDict} with the parameters of this {@code Module} along with * their keys, and if {@code recurse} is true also recursively of every submodule. */ - public native @ByVal StringTensorDict named_parameters(@Cast("bool") boolean recurse/*=true*/); - public native @ByVal StringTensorDict named_parameters(); + public StringTensorDict named_parameters(boolean recurse) { return asModule()._named_parameters(recurse); } + private native @ByVal @Name("named_parameters") StringTensorDict _named_parameters(@Cast("bool") boolean recurse/*=true*/); + public StringTensorDict named_parameters() { return asModule()._named_parameters(); } + private native @ByVal @Name("named_parameters") StringTensorDict _named_parameters(); /** Returns the buffers of this {@code Module} and if {@code recurse} is true, also * recursively of every submodule. */ - public native @Cast({"", "std::vector"}) @StdMove TensorVector buffers(@Cast("bool") boolean recurse/*=true*/); - public native @Cast({"", "std::vector"}) @StdMove TensorVector buffers(); + public TensorVector buffers(boolean recurse) { return asModule()._buffers(recurse); } + private native @Name("buffers") @Cast({"", "std::vector"}) @StdMove TensorVector _buffers(@Cast("bool") boolean recurse/*=true*/); + public TensorVector buffers() { return asModule()._buffers(); } + private native @Name("buffers") @Cast({"", "std::vector"}) @StdMove TensorVector _buffers(); /** Returns an {@code OrderedDict} with the buffers of this {@code Module} along with * their keys, and if {@code recurse} is true also recursively of every submodule. */ /// - public native @ByVal StringTensorDict named_buffers(@Cast("bool") boolean recurse/*=true*/); - public native @ByVal StringTensorDict named_buffers(); + public StringTensorDict named_buffers(boolean recurse) { return asModule()._named_buffers(recurse); } + private native @ByVal @Name("named_buffers") StringTensorDict _named_buffers(@Cast("bool") boolean recurse/*=true*/); + public StringTensorDict named_buffers() { return asModule()._named_buffers(); } + private native @ByVal @Name("named_buffers") StringTensorDict _named_buffers(); /** Returns the submodules of this {@code Module} (the entire submodule hierarchy) * and if {@code include_self} is true, also inserts a {@code shared_ptr} to this module @@ -247,8 +278,10 @@ public native void apply( * \endrst */ /// - public native @ByVal SharedModuleVector modules(@Cast("bool") boolean include_self/*=true*/); - public native @ByVal SharedModuleVector modules(); + public SharedModuleVector modules(boolean include_self) { return asModule()._modules(include_self); } + private native @ByVal @Name("modules") SharedModuleVector _modules(@Cast("bool") boolean include_self/*=true*/); + public SharedModuleVector modules() { return asModule()._modules(); } + private native @ByVal @Name("modules") SharedModuleVector _modules(); /** Returns an {@code OrderedDict} of the submodules of this {@code Module} (the entire * submodule hierarchy) and their keys, and if {@code include_self} is true, also @@ -263,30 +296,36 @@ public native void apply( * this method with {@code include_self} set to false if your {@code Module} is not * stored in a {@code shared_ptr}. * \endrst */ - public native @ByVal StringSharedModuleDict named_modules( + public StringSharedModuleDict named_modules(BytePointer name_prefix, boolean include_self) { return asModule()._named_modules(name_prefix, include_self); } + private native @ByVal @Name("named_modules") StringSharedModuleDict _named_modules( @StdString BytePointer name_prefix/*=std::string()*/, @Cast("bool") boolean include_self/*=true*/); - public native @ByVal StringSharedModuleDict named_modules(); - public native @ByVal StringSharedModuleDict named_modules( + public StringSharedModuleDict named_modules() { return asModule()._named_modules(); } + private native @ByVal @Name("named_modules") StringSharedModuleDict _named_modules(); + public StringSharedModuleDict named_modules(String name_prefix, boolean include_self) { return asModule()._named_modules(name_prefix, include_self); } + private native @ByVal @Name("named_modules") StringSharedModuleDict _named_modules( @StdString String name_prefix/*=std::string()*/, @Cast("bool") boolean include_self/*=true*/); /** Returns the direct submodules of this {@code Module}. */ - public native @ByVal SharedModuleVector children(); + public SharedModuleVector children() { return asModule()._children(); } + private native @ByVal @Name("children") SharedModuleVector _children(); /** Returns an {@code OrderedDict} of the direct submodules of this {@code Module} and * their keys. */ - public native @ByVal StringSharedModuleDict named_children(); + public StringSharedModuleDict named_children() { return asModule()._named_children(); } + private native @ByVal @Name("named_children") StringSharedModuleDict _named_children(); /** Enables "training" mode. */ - public native void train(@Cast("bool") boolean on/*=true*/); - public native void train(); + public void train(boolean on) { asModule()._train(on); } + private native @Virtual(subclasses=false, method="train") @Name("train") void _train(@Cast("bool") boolean on/*=true*/); /** Calls train(false) to enable "eval" mode. * Do not override this method, override {@code train()} instead. */ /// - public native void eval(); + public void eval() { asModule()._eval(); } + private native @Name("eval") void _eval(); /** True if the module is in training mode. * @@ -299,7 +338,8 @@ public native void apply( * depending on this property. */ /// - public native @Cast("bool") @NoException(true) boolean is_training(); + public boolean is_training() { return asModule()._is_training(); } + private native @Cast("bool") @Virtual(subclasses=false, method="is_training") @NoException(true) @Const({false, false, true}) @Name("is_training") boolean _is_training(); /** Recursively casts all parameters to the given {@code dtype} and {@code device}. * @@ -309,13 +349,11 @@ public native void apply( * effect. */ /// - public native void to( + public void to(Device device, ScalarType dtype, boolean non_blocking) { asModule()._to(device, dtype, non_blocking); } + private native @Virtual(subclasses=false, method="to") @Name("to") void _to( @ByVal Device device, ScalarType dtype, @Cast("bool") boolean non_blocking/*=false*/); - public native void to( - @ByVal Device device, - ScalarType dtype); /** Recursively casts all parameters to the given dtype. * @@ -325,8 +363,8 @@ public native void to( * effect. */ /// - public native void to(ScalarType dtype, @Cast("bool") boolean non_blocking/*=false*/); - public native void to(ScalarType dtype); + public void to(ScalarType dtype, boolean non_blocking) { asModule()._to(dtype, non_blocking); } + private native @Virtual(subclasses=false, method="to") @Name("to") void _to(ScalarType dtype, @Cast("bool") boolean non_blocking/*=false*/); /** Recursively moves all parameters to the given device. * @@ -334,16 +372,16 @@ public native void to( * destination is on the GPU or vice versa, the copy is performed * asynchronously with respect to the host. Otherwise, the argument has no * effect. */ - public native void to(@ByVal Device device, @Cast("bool") boolean non_blocking/*=false*/); - public native void to(@ByVal Device device); + public void to(Device device, boolean non_blocking) { asModule()._to(device, non_blocking); } + private native @Virtual(subclasses=false, method="to") @Name("to") void _to(@ByVal Device device, @Cast("bool") boolean non_blocking/*=false*/); /** Recursively zeros out the {@code grad} value of each registered parameter. */ /// /// /// - public native void zero_grad(@Cast("bool") boolean set_to_none/*=true*/); - public native void zero_grad(); + public void zero_grad(boolean set_to_none) { asModule()._zero_grad(set_to_none); } + private native @Virtual(subclasses=false, method="zero_grad") @Name("zero_grad") void _zero_grad(@Cast("bool") boolean set_to_none/*=true*/); /** Attempts to cast this {@code Module} to the given {@code ModuleType}. * @@ -361,10 +399,6 @@ public native void to( * MyModule module; * module->apply(initialize_weights); * \endrst */ - - /// - /// - public Module asModule() { return this; } /** Attempts to cast this {@code Module} to the given {@code ModuleType}. * @@ -422,7 +456,8 @@ public native void to( * {@code nn::Functional}), those submodules are skipped when serializing. */ /// - public native void save(@ByRef OutputArchive archive); + public void save(OutputArchive archive) { asModule()._save(archive); } + private native @Virtual(subclasses=false, method="save") @Const({false, false, true}) @Name("save") void _save(@ByRef OutputArchive archive); /** Deserializes the {@code Module} from the given {@code InputArchive}. * @@ -431,7 +466,8 @@ public native void to( * {@code InputArchive} when deserializing. */ /// - public native void load(@ByRef InputArchive archive); + public void load(InputArchive archive) { asModule()._load(archive); } + private native @Virtual(subclasses=false, method="load") @Name("load") void _load(@ByRef InputArchive archive); /** Streams a pretty representation of the {@code Module} into the given {@code stream}. * By default, this representation will be the name of the module (taken from @@ -440,7 +476,8 @@ public native void to( * * Override this method to change the pretty print. The input * {@code stream} should be returned from the method, to allow easy chaining. */ - public native void pretty_print(@Cast("std::ostream*") @ByRef Pointer stream); + public void pretty_print(Pointer stream) { asModule()._pretty_print(stream); } + private native @Virtual(subclasses=false, method="pretty_print") @Const({false, false, true}) @Name("pretty_print") void _pretty_print(@Cast("std::ostream*") @ByRef Pointer stream); /** Returns whether the {@code Module} is serializable. */ @@ -448,7 +485,8 @@ public native void to( /// /// /// - public native @Cast("bool") boolean is_serializable(); + public boolean is_serializable() { return asModule()._is_serializable(); } + private native @Cast("bool") @Virtual(subclasses=false, method="is_serializable") @Const({false, false, true}) @Name("is_serializable") boolean _is_serializable(); /** Registers a parameter with this {@code Module}. * @@ -471,18 +509,22 @@ public native void to( /// /// /// - public native @ByRef Tensor register_parameter( + public Tensor register_parameter(BytePointer name, Tensor tensor, boolean requires_grad) { return asModule()._register_parameter(name, tensor, requires_grad); } + private native @ByRef @Name("register_parameter") Tensor _register_parameter( @StdString BytePointer name, @ByVal Tensor tensor, @Cast("bool") boolean requires_grad/*=true*/); - public native @ByRef Tensor register_parameter( + public Tensor register_parameter(BytePointer name, Tensor tensor) { return asModule()._register_parameter(name, tensor); } + private native @ByRef @Name("register_parameter") Tensor _register_parameter( @StdString BytePointer name, @ByVal Tensor tensor); - public native @ByRef Tensor register_parameter( + public Tensor register_parameter(String name, Tensor tensor, boolean requires_grad) { return asModule()._register_parameter(name, tensor, requires_grad); } + private native @ByRef @Name("register_parameter") Tensor _register_parameter( @StdString String name, @ByVal Tensor tensor, @Cast("bool") boolean requires_grad/*=true*/); - public native @ByRef Tensor register_parameter( + public Tensor register_parameter(String name, Tensor tensor) { return asModule()._register_parameter(name, tensor); } + private native @ByRef @Name("register_parameter") Tensor _register_parameter( @StdString String name, @ByVal Tensor tensor); @@ -503,8 +545,10 @@ public native void to( /// /// /// - public native @ByRef Tensor register_buffer(@StdString BytePointer name, @ByVal Tensor tensor); - public native @ByRef Tensor register_buffer(@StdString String name, @ByVal Tensor tensor); + public Tensor register_buffer(BytePointer name, Tensor tensor) { return asModule()._register_buffer(name, tensor); } + private native @ByRef @Name("register_buffer") Tensor _register_buffer(@StdString BytePointer name, @ByVal Tensor tensor); + public Tensor register_buffer(String name, Tensor tensor) { return asModule()._register_buffer(name, tensor); } + private native @ByRef @Name("register_buffer") Tensor _register_buffer(@StdString String name, @ByVal Tensor tensor); /** Registers a submodule with this {@code Module}. * @@ -523,2596 +567,29 @@ public native void to( /// /// /// - public native @SharedPtr @Name("register_module") @Cast({"", "std::shared_ptr"}) Module register_module( - @StdString BytePointer name, - @SharedPtr @Cast({"", "std::shared_ptr"}) Module module); - public native @SharedPtr @Name("register_module") @Cast({"", "std::shared_ptr"}) Module register_module( - @StdString String name, - @SharedPtr @Cast({"", "std::shared_ptr"}) Module module); - - /// - /// - /// - /// - public native @SharedPtr @Name("register_module") @Cast({"", "std::shared_ptr"}) ModuleDictImpl register_module( - @StdString BytePointer name, - @SharedPtr @Cast({"", "std::shared_ptr"}) ModuleDictImpl module); - public native @SharedPtr @Name("register_module") @Cast({"", "std::shared_ptr"}) ModuleDictImpl register_module( - @StdString String name, - @SharedPtr @Cast({"", "std::shared_ptr"}) ModuleDictImpl module); - - /// - /// - /// - /// - public native @SharedPtr @Name("register_module") @Cast({"", "std::shared_ptr"}) ModuleListImpl register_module( - @StdString BytePointer name, - @SharedPtr @Cast({"", "std::shared_ptr"}) ModuleListImpl module); - public native @SharedPtr @Name("register_module") @Cast({"", "std::shared_ptr"}) ModuleListImpl register_module( - @StdString String name, - @SharedPtr @Cast({"", "std::shared_ptr"}) ModuleListImpl module); - - /// - /// - /// - /// - public native @SharedPtr @Name("register_module") @Cast({"", "std::shared_ptr"}) SequentialImpl register_module( - @StdString BytePointer name, - @SharedPtr @Cast({"", "std::shared_ptr"}) SequentialImpl module); - public native @SharedPtr @Name("register_module") @Cast({"", "std::shared_ptr"}) SequentialImpl register_module( - @StdString String name, - @SharedPtr @Cast({"", "std::shared_ptr"}) SequentialImpl module); - - /// - /// - /// - /// - public native @SharedPtr @Name("register_module") @Cast({"", "std::shared_ptr"}) ParameterDictImpl register_module( - @StdString BytePointer name, - @SharedPtr @Cast({"", "std::shared_ptr"}) ParameterDictImpl module); - public native @SharedPtr @Name("register_module") @Cast({"", "std::shared_ptr"}) ParameterDictImpl register_module( - @StdString String name, - @SharedPtr @Cast({"", "std::shared_ptr"}) ParameterDictImpl module); - - /// - /// - /// - /// - public native @SharedPtr @Name("register_module") @Cast({"", "std::shared_ptr"}) ParameterListImpl register_module( - @StdString BytePointer name, - @SharedPtr @Cast({"", "std::shared_ptr"}) ParameterListImpl module); - public native @SharedPtr @Name("register_module") @Cast({"", "std::shared_ptr"}) ParameterListImpl register_module( - @StdString String name, - @SharedPtr @Cast({"", "std::shared_ptr"}) ParameterListImpl module); - - /// - /// - /// - /// - public native @SharedPtr @Name("register_module") @Cast({"", "std::shared_ptr"}) AdaptiveLogSoftmaxWithLossImpl register_module( - @StdString BytePointer name, - @SharedPtr @Cast({"", "std::shared_ptr"}) AdaptiveLogSoftmaxWithLossImpl module); - public native @SharedPtr @Name("register_module") @Cast({"", "std::shared_ptr"}) AdaptiveLogSoftmaxWithLossImpl register_module( - @StdString String name, - @SharedPtr @Cast({"", "std::shared_ptr"}) AdaptiveLogSoftmaxWithLossImpl module); - - /// - /// - /// - /// - public native @SharedPtr @Name("register_module") @Cast({"", "std::shared_ptr"}) BatchNorm1dImpl register_module( - @StdString BytePointer name, - @SharedPtr @Cast({"", "std::shared_ptr"}) BatchNorm1dImpl module); - public native @SharedPtr @Name("register_module") @Cast({"", "std::shared_ptr"}) BatchNorm1dImpl register_module( - @StdString String name, - @SharedPtr @Cast({"", "std::shared_ptr"}) BatchNorm1dImpl module); - - /// - /// - /// - /// - public native @SharedPtr @Name("register_module") @Cast({"", "std::shared_ptr"}) InstanceNorm1dImpl register_module( - @StdString BytePointer name, - @SharedPtr @Cast({"", "std::shared_ptr"}) InstanceNorm1dImpl module); - public native @SharedPtr @Name("register_module") @Cast({"", "std::shared_ptr"}) InstanceNorm1dImpl register_module( - @StdString String name, - @SharedPtr @Cast({"", "std::shared_ptr"}) InstanceNorm1dImpl module); - - /// - /// - /// - /// - public native @SharedPtr @Name("register_module") @Cast({"", "std::shared_ptr"}) Conv1dImpl register_module( - @StdString BytePointer name, - @SharedPtr @Cast({"", "std::shared_ptr"}) Conv1dImpl module); - public native @SharedPtr @Name("register_module") @Cast({"", "std::shared_ptr"}) Conv1dImpl register_module( - @StdString String name, - @SharedPtr @Cast({"", "std::shared_ptr"}) Conv1dImpl module); - - /// - /// - /// - /// - public native @SharedPtr @Name("register_module") @Cast({"", "std::shared_ptr"}) ConvTranspose1dImpl register_module( - @StdString BytePointer name, - @SharedPtr @Cast({"", "std::shared_ptr"}) ConvTranspose1dImpl module); - public native @SharedPtr @Name("register_module") @Cast({"", "std::shared_ptr"}) ConvTranspose1dImpl register_module( - @StdString String name, - @SharedPtr @Cast({"", "std::shared_ptr"}) ConvTranspose1dImpl module); - - /// - /// - /// - /// - public native @SharedPtr @Name("register_module") @Cast({"", "std::shared_ptr"}) DropoutImpl register_module( - @StdString BytePointer name, - @SharedPtr @Cast({"", "std::shared_ptr"}) DropoutImpl module); - public native @SharedPtr @Name("register_module") @Cast({"", "std::shared_ptr"}) DropoutImpl register_module( - @StdString String name, - @SharedPtr @Cast({"", "std::shared_ptr"}) DropoutImpl module); - - /// - /// - /// - /// - public native @SharedPtr @Name("register_module") @Cast({"", "std::shared_ptr"}) BatchNorm2dImpl register_module( - @StdString BytePointer name, - @SharedPtr @Cast({"", "std::shared_ptr"}) BatchNorm2dImpl module); - public native @SharedPtr @Name("register_module") @Cast({"", "std::shared_ptr"}) BatchNorm2dImpl register_module( - @StdString String name, - @SharedPtr @Cast({"", "std::shared_ptr"}) BatchNorm2dImpl module); - - /// - /// - /// - /// - public native @SharedPtr @Name("register_module") @Cast({"", "std::shared_ptr"}) InstanceNorm2dImpl register_module( - @StdString BytePointer name, - @SharedPtr @Cast({"", "std::shared_ptr"}) InstanceNorm2dImpl module); - public native @SharedPtr @Name("register_module") @Cast({"", "std::shared_ptr"}) InstanceNorm2dImpl register_module( - @StdString String name, - @SharedPtr @Cast({"", "std::shared_ptr"}) InstanceNorm2dImpl module); - - /// - /// - /// - /// - public native @SharedPtr @Name("register_module") @Cast({"", "std::shared_ptr"}) Conv2dImpl register_module( - @StdString BytePointer name, - @SharedPtr @Cast({"", "std::shared_ptr"}) Conv2dImpl module); - public native @SharedPtr @Name("register_module") @Cast({"", "std::shared_ptr"}) Conv2dImpl register_module( - @StdString String name, - @SharedPtr @Cast({"", "std::shared_ptr"}) Conv2dImpl module); - - /// - /// - /// - /// - public native @SharedPtr @Name("register_module") @Cast({"", "std::shared_ptr"}) ConvTranspose2dImpl register_module( - @StdString BytePointer name, - @SharedPtr @Cast({"", "std::shared_ptr"}) ConvTranspose2dImpl module); - public native @SharedPtr @Name("register_module") @Cast({"", "std::shared_ptr"}) ConvTranspose2dImpl register_module( - @StdString String name, - @SharedPtr @Cast({"", "std::shared_ptr"}) ConvTranspose2dImpl module); - - /// - /// - /// - /// - public native @SharedPtr @Name("register_module") @Cast({"", "std::shared_ptr"}) Dropout2dImpl register_module( - @StdString BytePointer name, - @SharedPtr @Cast({"", "std::shared_ptr"}) Dropout2dImpl module); - public native @SharedPtr @Name("register_module") @Cast({"", "std::shared_ptr"}) Dropout2dImpl register_module( - @StdString String name, - @SharedPtr @Cast({"", "std::shared_ptr"}) Dropout2dImpl module); - - /// - /// - /// - /// - public native @SharedPtr @Name("register_module") @Cast({"", "std::shared_ptr"}) BatchNorm3dImpl register_module( - @StdString BytePointer name, - @SharedPtr @Cast({"", "std::shared_ptr"}) BatchNorm3dImpl module); - public native @SharedPtr @Name("register_module") @Cast({"", "std::shared_ptr"}) BatchNorm3dImpl register_module( - @StdString String name, - @SharedPtr @Cast({"", "std::shared_ptr"}) BatchNorm3dImpl module); - - /// - /// - /// - /// - public native @SharedPtr @Name("register_module") @Cast({"", "std::shared_ptr"}) InstanceNorm3dImpl register_module( - @StdString BytePointer name, - @SharedPtr @Cast({"", "std::shared_ptr"}) InstanceNorm3dImpl module); - public native @SharedPtr @Name("register_module") @Cast({"", "std::shared_ptr"}) InstanceNorm3dImpl register_module( - @StdString String name, - @SharedPtr @Cast({"", "std::shared_ptr"}) InstanceNorm3dImpl module); - - /// - /// - /// - /// - public native @SharedPtr @Name("register_module") @Cast({"", "std::shared_ptr"}) Conv3dImpl register_module( - @StdString BytePointer name, - @SharedPtr @Cast({"", "std::shared_ptr"}) Conv3dImpl module); - public native @SharedPtr @Name("register_module") @Cast({"", "std::shared_ptr"}) Conv3dImpl register_module( - @StdString String name, - @SharedPtr @Cast({"", "std::shared_ptr"}) Conv3dImpl module); - - /// - /// - /// - /// - public native @SharedPtr @Name("register_module") @Cast({"", "std::shared_ptr"}) ConvTranspose3dImpl register_module( - @StdString BytePointer name, - @SharedPtr @Cast({"", "std::shared_ptr"}) ConvTranspose3dImpl module); - public native @SharedPtr @Name("register_module") @Cast({"", "std::shared_ptr"}) ConvTranspose3dImpl register_module( - @StdString String name, - @SharedPtr @Cast({"", "std::shared_ptr"}) ConvTranspose3dImpl module); - - /// - /// - /// - /// - public native @SharedPtr @Name("register_module") @Cast({"", "std::shared_ptr"}) Dropout3dImpl register_module( - @StdString BytePointer name, - @SharedPtr @Cast({"", "std::shared_ptr"}) Dropout3dImpl module); - public native @SharedPtr @Name("register_module") @Cast({"", "std::shared_ptr"}) Dropout3dImpl register_module( - @StdString String name, - @SharedPtr @Cast({"", "std::shared_ptr"}) Dropout3dImpl module); - - /// - /// - /// - /// - public native @SharedPtr @Name("register_module") @Cast({"", "std::shared_ptr"}) AlphaDropoutImpl register_module( - @StdString BytePointer name, - @SharedPtr @Cast({"", "std::shared_ptr"}) AlphaDropoutImpl module); - public native @SharedPtr @Name("register_module") @Cast({"", "std::shared_ptr"}) AlphaDropoutImpl register_module( - @StdString String name, - @SharedPtr @Cast({"", "std::shared_ptr"}) AlphaDropoutImpl module); - - /// - /// - /// - /// - public native @SharedPtr @Name("register_module") @Cast({"", "std::shared_ptr"}) FeatureAlphaDropoutImpl register_module( - @StdString BytePointer name, - @SharedPtr @Cast({"", "std::shared_ptr"}) FeatureAlphaDropoutImpl module); - public native @SharedPtr @Name("register_module") @Cast({"", "std::shared_ptr"}) FeatureAlphaDropoutImpl register_module( - @StdString String name, - @SharedPtr @Cast({"", "std::shared_ptr"}) FeatureAlphaDropoutImpl module); - - /// - /// - /// - /// - public native @SharedPtr @Name("register_module") @Cast({"", "std::shared_ptr"}) CosineSimilarityImpl register_module( - @StdString BytePointer name, - @SharedPtr @Cast({"", "std::shared_ptr"}) CosineSimilarityImpl module); - public native @SharedPtr @Name("register_module") @Cast({"", "std::shared_ptr"}) CosineSimilarityImpl register_module( - @StdString String name, - @SharedPtr @Cast({"", "std::shared_ptr"}) CosineSimilarityImpl module); - - /// - /// - /// - /// - public native @SharedPtr @Name("register_module") @Cast({"", "std::shared_ptr"}) PairwiseDistanceImpl register_module( - @StdString BytePointer name, - @SharedPtr @Cast({"", "std::shared_ptr"}) PairwiseDistanceImpl module); - public native @SharedPtr @Name("register_module") @Cast({"", "std::shared_ptr"}) PairwiseDistanceImpl register_module( - @StdString String name, - @SharedPtr @Cast({"", "std::shared_ptr"}) PairwiseDistanceImpl module); - - /// - /// - /// - /// - public native @SharedPtr @Name("register_module") @Cast({"", "std::shared_ptr"}) EmbeddingImpl register_module( - @StdString BytePointer name, - @SharedPtr @Cast({"", "std::shared_ptr"}) EmbeddingImpl module); - public native @SharedPtr @Name("register_module") @Cast({"", "std::shared_ptr"}) EmbeddingImpl register_module( - @StdString String name, - @SharedPtr @Cast({"", "std::shared_ptr"}) EmbeddingImpl module); - - /// - /// - /// - /// - public native @SharedPtr @Name("register_module") @Cast({"", "std::shared_ptr"}) EmbeddingBagImpl register_module( - @StdString BytePointer name, - @SharedPtr @Cast({"", "std::shared_ptr"}) EmbeddingBagImpl module); - public native @SharedPtr @Name("register_module") @Cast({"", "std::shared_ptr"}) EmbeddingBagImpl register_module( - @StdString String name, - @SharedPtr @Cast({"", "std::shared_ptr"}) EmbeddingBagImpl module); - - /// - /// - /// - /// - public native @SharedPtr @Name("register_module") @Cast({"", "std::shared_ptr"}) FoldImpl register_module( - @StdString BytePointer name, - @SharedPtr @Cast({"", "std::shared_ptr"}) FoldImpl module); - public native @SharedPtr @Name("register_module") @Cast({"", "std::shared_ptr"}) FoldImpl register_module( - @StdString String name, - @SharedPtr @Cast({"", "std::shared_ptr"}) FoldImpl module); - - /// - /// - /// - /// - public native @SharedPtr @Name("register_module") @Cast({"", "std::shared_ptr"}) UnfoldImpl register_module( - @StdString BytePointer name, - @SharedPtr @Cast({"", "std::shared_ptr"}) UnfoldImpl module); - public native @SharedPtr @Name("register_module") @Cast({"", "std::shared_ptr"}) UnfoldImpl register_module( - @StdString String name, - @SharedPtr @Cast({"", "std::shared_ptr"}) UnfoldImpl module); - - /// - /// - /// - /// - public native @SharedPtr @Name("register_module") @Cast({"", "std::shared_ptr"}) IdentityImpl register_module( - @StdString BytePointer name, - @SharedPtr @Cast({"", "std::shared_ptr"}) IdentityImpl module); - public native @SharedPtr @Name("register_module") @Cast({"", "std::shared_ptr"}) IdentityImpl register_module( - @StdString String name, - @SharedPtr @Cast({"", "std::shared_ptr"}) IdentityImpl module); - - /// - /// - /// - /// - public native @SharedPtr @Name("register_module") @Cast({"", "std::shared_ptr"}) LinearImpl register_module( - @StdString BytePointer name, - @SharedPtr @Cast({"", "std::shared_ptr"}) LinearImpl module); - public native @SharedPtr @Name("register_module") @Cast({"", "std::shared_ptr"}) LinearImpl register_module( - @StdString String name, - @SharedPtr @Cast({"", "std::shared_ptr"}) LinearImpl module); - - /// - /// - /// - /// - public native @SharedPtr @Name("register_module") @Cast({"", "std::shared_ptr"}) BilinearImpl register_module( - @StdString BytePointer name, - @SharedPtr @Cast({"", "std::shared_ptr"}) BilinearImpl module); - public native @SharedPtr @Name("register_module") @Cast({"", "std::shared_ptr"}) BilinearImpl register_module( - @StdString String name, - @SharedPtr @Cast({"", "std::shared_ptr"}) BilinearImpl module); - - /// - /// - /// - /// - public native @SharedPtr @Name("register_module") @Cast({"", "std::shared_ptr"}) FlattenImpl register_module( - @StdString BytePointer name, - @SharedPtr @Cast({"", "std::shared_ptr"}) FlattenImpl module); - public native @SharedPtr @Name("register_module") @Cast({"", "std::shared_ptr"}) FlattenImpl register_module( - @StdString String name, - @SharedPtr @Cast({"", "std::shared_ptr"}) FlattenImpl module); - - /// - /// - /// - /// - public native @SharedPtr @Name("register_module") @Cast({"", "std::shared_ptr"}) UnflattenImpl register_module( - @StdString BytePointer name, - @SharedPtr @Cast({"", "std::shared_ptr"}) UnflattenImpl module); - public native @SharedPtr @Name("register_module") @Cast({"", "std::shared_ptr"}) UnflattenImpl register_module( - @StdString String name, - @SharedPtr @Cast({"", "std::shared_ptr"}) UnflattenImpl module); - - /// - /// - /// - /// - public native @SharedPtr @Name("register_module") @Cast({"", "std::shared_ptr"}) L1LossImpl register_module( + public Module register_module(BytePointer name, Module module) { return asModule()._register_module(name, module.asModule()); } + private native @SharedPtr("torch::nn::Module") @ByVal @Name("register_module") Module _register_module( @StdString BytePointer name, - @SharedPtr @Cast({"", "std::shared_ptr"}) L1LossImpl module); - public native @SharedPtr @Name("register_module") @Cast({"", "std::shared_ptr"}) L1LossImpl register_module( + @SharedPtr("torch::nn::Module") @ByVal Module module); + public Module register_module(String name, Module module) { return asModule()._register_module(name, module.asModule()); } + private native @SharedPtr("torch::nn::Module") @ByVal @Name("register_module") Module _register_module( @StdString String name, - @SharedPtr @Cast({"", "std::shared_ptr"}) L1LossImpl module); - - /// - /// - /// - /// - public native @SharedPtr @Name("register_module") @Cast({"", "std::shared_ptr"}) KLDivLossImpl register_module( - @StdString BytePointer name, - @SharedPtr @Cast({"", "std::shared_ptr"}) KLDivLossImpl module); - public native @SharedPtr @Name("register_module") @Cast({"", "std::shared_ptr"}) KLDivLossImpl register_module( - @StdString String name, - @SharedPtr @Cast({"", "std::shared_ptr"}) KLDivLossImpl module); - - /// - /// - /// - /// - public native @SharedPtr @Name("register_module") @Cast({"", "std::shared_ptr"}) MSELossImpl register_module( - @StdString BytePointer name, - @SharedPtr @Cast({"", "std::shared_ptr"}) MSELossImpl module); - public native @SharedPtr @Name("register_module") @Cast({"", "std::shared_ptr"}) MSELossImpl register_module( - @StdString String name, - @SharedPtr @Cast({"", "std::shared_ptr"}) MSELossImpl module); - - /// - /// - /// - /// - public native @SharedPtr @Name("register_module") @Cast({"", "std::shared_ptr"}) BCELossImpl register_module( - @StdString BytePointer name, - @SharedPtr @Cast({"", "std::shared_ptr"}) BCELossImpl module); - public native @SharedPtr @Name("register_module") @Cast({"", "std::shared_ptr"}) BCELossImpl register_module( - @StdString String name, - @SharedPtr @Cast({"", "std::shared_ptr"}) BCELossImpl module); - - /// - /// - /// - /// - public native @SharedPtr @Name("register_module") @Cast({"", "std::shared_ptr"}) HingeEmbeddingLossImpl register_module( - @StdString BytePointer name, - @SharedPtr @Cast({"", "std::shared_ptr"}) HingeEmbeddingLossImpl module); - public native @SharedPtr @Name("register_module") @Cast({"", "std::shared_ptr"}) HingeEmbeddingLossImpl register_module( - @StdString String name, - @SharedPtr @Cast({"", "std::shared_ptr"}) HingeEmbeddingLossImpl module); - - /// - /// - /// - /// - public native @SharedPtr @Name("register_module") @Cast({"", "std::shared_ptr"}) MultiMarginLossImpl register_module( - @StdString BytePointer name, - @SharedPtr @Cast({"", "std::shared_ptr"}) MultiMarginLossImpl module); - public native @SharedPtr @Name("register_module") @Cast({"", "std::shared_ptr"}) MultiMarginLossImpl register_module( - @StdString String name, - @SharedPtr @Cast({"", "std::shared_ptr"}) MultiMarginLossImpl module); - - /// - /// - /// - /// - public native @SharedPtr @Name("register_module") @Cast({"", "std::shared_ptr"}) CosineEmbeddingLossImpl register_module( - @StdString BytePointer name, - @SharedPtr @Cast({"", "std::shared_ptr"}) CosineEmbeddingLossImpl module); - public native @SharedPtr @Name("register_module") @Cast({"", "std::shared_ptr"}) CosineEmbeddingLossImpl register_module( - @StdString String name, - @SharedPtr @Cast({"", "std::shared_ptr"}) CosineEmbeddingLossImpl module); - - /// - /// - /// - /// - public native @SharedPtr @Name("register_module") @Cast({"", "std::shared_ptr"}) SmoothL1LossImpl register_module( - @StdString BytePointer name, - @SharedPtr @Cast({"", "std::shared_ptr"}) SmoothL1LossImpl module); - public native @SharedPtr @Name("register_module") @Cast({"", "std::shared_ptr"}) SmoothL1LossImpl register_module( - @StdString String name, - @SharedPtr @Cast({"", "std::shared_ptr"}) SmoothL1LossImpl module); - - /// - /// - /// - /// - public native @SharedPtr @Name("register_module") @Cast({"", "std::shared_ptr"}) HuberLossImpl register_module( - @StdString BytePointer name, - @SharedPtr @Cast({"", "std::shared_ptr"}) HuberLossImpl module); - public native @SharedPtr @Name("register_module") @Cast({"", "std::shared_ptr"}) HuberLossImpl register_module( - @StdString String name, - @SharedPtr @Cast({"", "std::shared_ptr"}) HuberLossImpl module); - - /// - /// - /// - /// - public native @SharedPtr @Name("register_module") @Cast({"", "std::shared_ptr"}) MultiLabelMarginLossImpl register_module( - @StdString BytePointer name, - @SharedPtr @Cast({"", "std::shared_ptr"}) MultiLabelMarginLossImpl module); - public native @SharedPtr @Name("register_module") @Cast({"", "std::shared_ptr"}) MultiLabelMarginLossImpl register_module( - @StdString String name, - @SharedPtr @Cast({"", "std::shared_ptr"}) MultiLabelMarginLossImpl module); - - /// - /// - /// - /// - public native @SharedPtr @Name("register_module") @Cast({"", "std::shared_ptr"}) SoftMarginLossImpl register_module( - @StdString BytePointer name, - @SharedPtr @Cast({"", "std::shared_ptr"}) SoftMarginLossImpl module); - public native @SharedPtr @Name("register_module") @Cast({"", "std::shared_ptr"}) SoftMarginLossImpl register_module( - @StdString String name, - @SharedPtr @Cast({"", "std::shared_ptr"}) SoftMarginLossImpl module); - - /// - /// - /// - /// - public native @SharedPtr @Name("register_module") @Cast({"", "std::shared_ptr"}) MultiLabelSoftMarginLossImpl register_module( - @StdString BytePointer name, - @SharedPtr @Cast({"", "std::shared_ptr"}) MultiLabelSoftMarginLossImpl module); - public native @SharedPtr @Name("register_module") @Cast({"", "std::shared_ptr"}) MultiLabelSoftMarginLossImpl register_module( - @StdString String name, - @SharedPtr @Cast({"", "std::shared_ptr"}) MultiLabelSoftMarginLossImpl module); - - /// - /// - /// - /// - public native @SharedPtr @Name("register_module") @Cast({"", "std::shared_ptr"}) TripletMarginLossImpl register_module( - @StdString BytePointer name, - @SharedPtr @Cast({"", "std::shared_ptr"}) TripletMarginLossImpl module); - public native @SharedPtr @Name("register_module") @Cast({"", "std::shared_ptr"}) TripletMarginLossImpl register_module( - @StdString String name, - @SharedPtr @Cast({"", "std::shared_ptr"}) TripletMarginLossImpl module); - - /// - /// - /// - /// - public native @SharedPtr @Name("register_module") @Cast({"", "std::shared_ptr"}) TripletMarginWithDistanceLossImpl register_module( - @StdString BytePointer name, - @SharedPtr @Cast({"", "std::shared_ptr"}) TripletMarginWithDistanceLossImpl module); - public native @SharedPtr @Name("register_module") @Cast({"", "std::shared_ptr"}) TripletMarginWithDistanceLossImpl register_module( - @StdString String name, - @SharedPtr @Cast({"", "std::shared_ptr"}) TripletMarginWithDistanceLossImpl module); - - /// - /// - /// - /// - public native @SharedPtr @Name("register_module") @Cast({"", "std::shared_ptr"}) CTCLossImpl register_module( - @StdString BytePointer name, - @SharedPtr @Cast({"", "std::shared_ptr"}) CTCLossImpl module); - public native @SharedPtr @Name("register_module") @Cast({"", "std::shared_ptr"}) CTCLossImpl register_module( - @StdString String name, - @SharedPtr @Cast({"", "std::shared_ptr"}) CTCLossImpl module); - - /// - /// - /// - /// - public native @SharedPtr @Name("register_module") @Cast({"", "std::shared_ptr"}) PoissonNLLLossImpl register_module( - @StdString BytePointer name, - @SharedPtr @Cast({"", "std::shared_ptr"}) PoissonNLLLossImpl module); - public native @SharedPtr @Name("register_module") @Cast({"", "std::shared_ptr"}) PoissonNLLLossImpl register_module( - @StdString String name, - @SharedPtr @Cast({"", "std::shared_ptr"}) PoissonNLLLossImpl module); - - /// - /// - /// - /// - public native @SharedPtr @Name("register_module") @Cast({"", "std::shared_ptr"}) MarginRankingLossImpl register_module( - @StdString BytePointer name, - @SharedPtr @Cast({"", "std::shared_ptr"}) MarginRankingLossImpl module); - public native @SharedPtr @Name("register_module") @Cast({"", "std::shared_ptr"}) MarginRankingLossImpl register_module( - @StdString String name, - @SharedPtr @Cast({"", "std::shared_ptr"}) MarginRankingLossImpl module); - - /// - /// - /// - /// - public native @SharedPtr @Name("register_module") @Cast({"", "std::shared_ptr"}) NLLLossImpl register_module( - @StdString BytePointer name, - @SharedPtr @Cast({"", "std::shared_ptr"}) NLLLossImpl module); - public native @SharedPtr @Name("register_module") @Cast({"", "std::shared_ptr"}) NLLLossImpl register_module( - @StdString String name, - @SharedPtr @Cast({"", "std::shared_ptr"}) NLLLossImpl module); - - /// - /// - /// - /// - public native @SharedPtr @Name("register_module") @Cast({"", "std::shared_ptr"}) CrossEntropyLossImpl register_module( - @StdString BytePointer name, - @SharedPtr @Cast({"", "std::shared_ptr"}) CrossEntropyLossImpl module); - public native @SharedPtr @Name("register_module") @Cast({"", "std::shared_ptr"}) CrossEntropyLossImpl register_module( - @StdString String name, - @SharedPtr @Cast({"", "std::shared_ptr"}) CrossEntropyLossImpl module); - - /// - /// - /// - /// - public native @SharedPtr @Name("register_module") @Cast({"", "std::shared_ptr"}) BCEWithLogitsLossImpl register_module( - @StdString BytePointer name, - @SharedPtr @Cast({"", "std::shared_ptr"}) BCEWithLogitsLossImpl module); - public native @SharedPtr @Name("register_module") @Cast({"", "std::shared_ptr"}) BCEWithLogitsLossImpl register_module( - @StdString String name, - @SharedPtr @Cast({"", "std::shared_ptr"}) BCEWithLogitsLossImpl module); - - /// - /// - /// - /// - public native @SharedPtr @Name("register_module") @Cast({"", "std::shared_ptr"}) ReflectionPad1dImpl register_module( - @StdString BytePointer name, - @SharedPtr @Cast({"", "std::shared_ptr"}) ReflectionPad1dImpl module); - public native @SharedPtr @Name("register_module") @Cast({"", "std::shared_ptr"}) ReflectionPad1dImpl register_module( - @StdString String name, - @SharedPtr @Cast({"", "std::shared_ptr"}) ReflectionPad1dImpl module); - - /// - /// - /// - /// - public native @SharedPtr @Name("register_module") @Cast({"", "std::shared_ptr"}) ReplicationPad1dImpl register_module( - @StdString BytePointer name, - @SharedPtr @Cast({"", "std::shared_ptr"}) ReplicationPad1dImpl module); - public native @SharedPtr @Name("register_module") @Cast({"", "std::shared_ptr"}) ReplicationPad1dImpl register_module( - @StdString String name, - @SharedPtr @Cast({"", "std::shared_ptr"}) ReplicationPad1dImpl module); - - /// - /// - /// - /// - public native @SharedPtr @Name("register_module") @Cast({"", "std::shared_ptr"}) ConstantPad1dImpl register_module( - @StdString BytePointer name, - @SharedPtr @Cast({"", "std::shared_ptr"}) ConstantPad1dImpl module); - public native @SharedPtr @Name("register_module") @Cast({"", "std::shared_ptr"}) ConstantPad1dImpl register_module( - @StdString String name, - @SharedPtr @Cast({"", "std::shared_ptr"}) ConstantPad1dImpl module); - - /// - /// - /// - /// - public native @SharedPtr @Name("register_module") @Cast({"", "std::shared_ptr"}) AvgPool1dImpl register_module( - @StdString BytePointer name, - @SharedPtr @Cast({"", "std::shared_ptr"}) AvgPool1dImpl module); - public native @SharedPtr @Name("register_module") @Cast({"", "std::shared_ptr"}) AvgPool1dImpl register_module( - @StdString String name, - @SharedPtr @Cast({"", "std::shared_ptr"}) AvgPool1dImpl module); - - /// - /// - /// - /// - public native @SharedPtr @Name("register_module") @Cast({"", "std::shared_ptr"}) MaxPool1dImpl register_module( - @StdString BytePointer name, - @SharedPtr @Cast({"", "std::shared_ptr"}) MaxPool1dImpl module); - public native @SharedPtr @Name("register_module") @Cast({"", "std::shared_ptr"}) MaxPool1dImpl register_module( - @StdString String name, - @SharedPtr @Cast({"", "std::shared_ptr"}) MaxPool1dImpl module); - - /// - /// - /// - /// - public native @SharedPtr @Name("register_module") @Cast({"", "std::shared_ptr"}) AdaptiveAvgPool1dImpl register_module( - @StdString BytePointer name, - @SharedPtr @Cast({"", "std::shared_ptr"}) AdaptiveAvgPool1dImpl module); - public native @SharedPtr @Name("register_module") @Cast({"", "std::shared_ptr"}) AdaptiveAvgPool1dImpl register_module( - @StdString String name, - @SharedPtr @Cast({"", "std::shared_ptr"}) AdaptiveAvgPool1dImpl module); - - /// - /// - /// - /// - public native @SharedPtr @Name("register_module") @Cast({"", "std::shared_ptr"}) AdaptiveMaxPool1dImpl register_module( - @StdString BytePointer name, - @SharedPtr @Cast({"", "std::shared_ptr"}) AdaptiveMaxPool1dImpl module); - public native @SharedPtr @Name("register_module") @Cast({"", "std::shared_ptr"}) AdaptiveMaxPool1dImpl register_module( - @StdString String name, - @SharedPtr @Cast({"", "std::shared_ptr"}) AdaptiveMaxPool1dImpl module); - - /// - /// - /// - /// - public native @SharedPtr @Name("register_module") @Cast({"", "std::shared_ptr"}) MaxUnpool1dImpl register_module( - @StdString BytePointer name, - @SharedPtr @Cast({"", "std::shared_ptr"}) MaxUnpool1dImpl module); - public native @SharedPtr @Name("register_module") @Cast({"", "std::shared_ptr"}) MaxUnpool1dImpl register_module( - @StdString String name, - @SharedPtr @Cast({"", "std::shared_ptr"}) MaxUnpool1dImpl module); - - /// - /// - /// - /// - public native @SharedPtr @Name("register_module") @Cast({"", "std::shared_ptr"}) LPPool1dImpl register_module( - @StdString BytePointer name, - @SharedPtr @Cast({"", "std::shared_ptr"}) LPPool1dImpl module); - public native @SharedPtr @Name("register_module") @Cast({"", "std::shared_ptr"}) LPPool1dImpl register_module( - @StdString String name, - @SharedPtr @Cast({"", "std::shared_ptr"}) LPPool1dImpl module); - - /// - /// - /// - /// - public native @SharedPtr @Name("register_module") @Cast({"", "std::shared_ptr"}) ReflectionPad2dImpl register_module( - @StdString BytePointer name, - @SharedPtr @Cast({"", "std::shared_ptr"}) ReflectionPad2dImpl module); - public native @SharedPtr @Name("register_module") @Cast({"", "std::shared_ptr"}) ReflectionPad2dImpl register_module( - @StdString String name, - @SharedPtr @Cast({"", "std::shared_ptr"}) ReflectionPad2dImpl module); - - /// - /// - /// - /// - public native @SharedPtr @Name("register_module") @Cast({"", "std::shared_ptr"}) ReplicationPad2dImpl register_module( - @StdString BytePointer name, - @SharedPtr @Cast({"", "std::shared_ptr"}) ReplicationPad2dImpl module); - public native @SharedPtr @Name("register_module") @Cast({"", "std::shared_ptr"}) ReplicationPad2dImpl register_module( - @StdString String name, - @SharedPtr @Cast({"", "std::shared_ptr"}) ReplicationPad2dImpl module); - - /// - /// - /// - /// - public native @SharedPtr @Name("register_module") @Cast({"", "std::shared_ptr"}) ConstantPad2dImpl register_module( - @StdString BytePointer name, - @SharedPtr @Cast({"", "std::shared_ptr"}) ConstantPad2dImpl module); - public native @SharedPtr @Name("register_module") @Cast({"", "std::shared_ptr"}) ConstantPad2dImpl register_module( - @StdString String name, - @SharedPtr @Cast({"", "std::shared_ptr"}) ConstantPad2dImpl module); - - /// - /// - /// - /// - public native @SharedPtr @Name("register_module") @Cast({"", "std::shared_ptr"}) ZeroPad2dImpl register_module( - @StdString BytePointer name, - @SharedPtr @Cast({"", "std::shared_ptr"}) ZeroPad2dImpl module); - public native @SharedPtr @Name("register_module") @Cast({"", "std::shared_ptr"}) ZeroPad2dImpl register_module( - @StdString String name, - @SharedPtr @Cast({"", "std::shared_ptr"}) ZeroPad2dImpl module); - - /// - /// - /// - /// - public native @SharedPtr @Name("register_module") @Cast({"", "std::shared_ptr"}) AvgPool2dImpl register_module( - @StdString BytePointer name, - @SharedPtr @Cast({"", "std::shared_ptr"}) AvgPool2dImpl module); - public native @SharedPtr @Name("register_module") @Cast({"", "std::shared_ptr"}) AvgPool2dImpl register_module( - @StdString String name, - @SharedPtr @Cast({"", "std::shared_ptr"}) AvgPool2dImpl module); - - /// - /// - /// - /// - public native @SharedPtr @Name("register_module") @Cast({"", "std::shared_ptr"}) MaxPool2dImpl register_module( - @StdString BytePointer name, - @SharedPtr @Cast({"", "std::shared_ptr"}) MaxPool2dImpl module); - public native @SharedPtr @Name("register_module") @Cast({"", "std::shared_ptr"}) MaxPool2dImpl register_module( - @StdString String name, - @SharedPtr @Cast({"", "std::shared_ptr"}) MaxPool2dImpl module); - - /// - /// - /// - /// - public native @SharedPtr @Name("register_module") @Cast({"", "std::shared_ptr"}) AdaptiveAvgPool2dImpl register_module( - @StdString BytePointer name, - @SharedPtr @Cast({"", "std::shared_ptr"}) AdaptiveAvgPool2dImpl module); - public native @SharedPtr @Name("register_module") @Cast({"", "std::shared_ptr"}) AdaptiveAvgPool2dImpl register_module( - @StdString String name, - @SharedPtr @Cast({"", "std::shared_ptr"}) AdaptiveAvgPool2dImpl module); - - /// - /// - /// - /// - public native @SharedPtr @Name("register_module") @Cast({"", "std::shared_ptr"}) AdaptiveMaxPool2dImpl register_module( - @StdString BytePointer name, - @SharedPtr @Cast({"", "std::shared_ptr"}) AdaptiveMaxPool2dImpl module); - public native @SharedPtr @Name("register_module") @Cast({"", "std::shared_ptr"}) AdaptiveMaxPool2dImpl register_module( - @StdString String name, - @SharedPtr @Cast({"", "std::shared_ptr"}) AdaptiveMaxPool2dImpl module); - - /// - /// - /// - /// - public native @SharedPtr @Name("register_module") @Cast({"", "std::shared_ptr"}) MaxUnpool2dImpl register_module( - @StdString BytePointer name, - @SharedPtr @Cast({"", "std::shared_ptr"}) MaxUnpool2dImpl module); - public native @SharedPtr @Name("register_module") @Cast({"", "std::shared_ptr"}) MaxUnpool2dImpl register_module( - @StdString String name, - @SharedPtr @Cast({"", "std::shared_ptr"}) MaxUnpool2dImpl module); - - /// - /// - /// - /// - public native @SharedPtr @Name("register_module") @Cast({"", "std::shared_ptr"}) FractionalMaxPool2dImpl register_module( - @StdString BytePointer name, - @SharedPtr @Cast({"", "std::shared_ptr"}) FractionalMaxPool2dImpl module); - public native @SharedPtr @Name("register_module") @Cast({"", "std::shared_ptr"}) FractionalMaxPool2dImpl register_module( - @StdString String name, - @SharedPtr @Cast({"", "std::shared_ptr"}) FractionalMaxPool2dImpl module); - - /// - /// - /// - /// - public native @SharedPtr @Name("register_module") @Cast({"", "std::shared_ptr"}) LPPool2dImpl register_module( - @StdString BytePointer name, - @SharedPtr @Cast({"", "std::shared_ptr"}) LPPool2dImpl module); - public native @SharedPtr @Name("register_module") @Cast({"", "std::shared_ptr"}) LPPool2dImpl register_module( - @StdString String name, - @SharedPtr @Cast({"", "std::shared_ptr"}) LPPool2dImpl module); - - /// - /// - /// - /// - public native @SharedPtr @Name("register_module") @Cast({"", "std::shared_ptr"}) ReflectionPad3dImpl register_module( - @StdString BytePointer name, - @SharedPtr @Cast({"", "std::shared_ptr"}) ReflectionPad3dImpl module); - public native @SharedPtr @Name("register_module") @Cast({"", "std::shared_ptr"}) ReflectionPad3dImpl register_module( - @StdString String name, - @SharedPtr @Cast({"", "std::shared_ptr"}) ReflectionPad3dImpl module); - - /// - /// - /// - /// - public native @SharedPtr @Name("register_module") @Cast({"", "std::shared_ptr"}) ReplicationPad3dImpl register_module( - @StdString BytePointer name, - @SharedPtr @Cast({"", "std::shared_ptr"}) ReplicationPad3dImpl module); - public native @SharedPtr @Name("register_module") @Cast({"", "std::shared_ptr"}) ReplicationPad3dImpl register_module( - @StdString String name, - @SharedPtr @Cast({"", "std::shared_ptr"}) ReplicationPad3dImpl module); - - /// - /// - /// - /// - public native @SharedPtr @Name("register_module") @Cast({"", "std::shared_ptr"}) ConstantPad3dImpl register_module( - @StdString BytePointer name, - @SharedPtr @Cast({"", "std::shared_ptr"}) ConstantPad3dImpl module); - public native @SharedPtr @Name("register_module") @Cast({"", "std::shared_ptr"}) ConstantPad3dImpl register_module( - @StdString String name, - @SharedPtr @Cast({"", "std::shared_ptr"}) ConstantPad3dImpl module); - - /// - /// - /// - /// - public native @SharedPtr @Name("register_module") @Cast({"", "std::shared_ptr"}) AvgPool3dImpl register_module( - @StdString BytePointer name, - @SharedPtr @Cast({"", "std::shared_ptr"}) AvgPool3dImpl module); - public native @SharedPtr @Name("register_module") @Cast({"", "std::shared_ptr"}) AvgPool3dImpl register_module( - @StdString String name, - @SharedPtr @Cast({"", "std::shared_ptr"}) AvgPool3dImpl module); - - /// - /// - /// - /// - public native @SharedPtr @Name("register_module") @Cast({"", "std::shared_ptr"}) MaxPool3dImpl register_module( - @StdString BytePointer name, - @SharedPtr @Cast({"", "std::shared_ptr"}) MaxPool3dImpl module); - public native @SharedPtr @Name("register_module") @Cast({"", "std::shared_ptr"}) MaxPool3dImpl register_module( - @StdString String name, - @SharedPtr @Cast({"", "std::shared_ptr"}) MaxPool3dImpl module); - - /// - /// - /// - /// - public native @SharedPtr @Name("register_module") @Cast({"", "std::shared_ptr"}) AdaptiveAvgPool3dImpl register_module( - @StdString BytePointer name, - @SharedPtr @Cast({"", "std::shared_ptr"}) AdaptiveAvgPool3dImpl module); - public native @SharedPtr @Name("register_module") @Cast({"", "std::shared_ptr"}) AdaptiveAvgPool3dImpl register_module( - @StdString String name, - @SharedPtr @Cast({"", "std::shared_ptr"}) AdaptiveAvgPool3dImpl module); - - /// - /// - /// - /// - public native @SharedPtr @Name("register_module") @Cast({"", "std::shared_ptr"}) AdaptiveMaxPool3dImpl register_module( - @StdString BytePointer name, - @SharedPtr @Cast({"", "std::shared_ptr"}) AdaptiveMaxPool3dImpl module); - public native @SharedPtr @Name("register_module") @Cast({"", "std::shared_ptr"}) AdaptiveMaxPool3dImpl register_module( - @StdString String name, - @SharedPtr @Cast({"", "std::shared_ptr"}) AdaptiveMaxPool3dImpl module); - - /// - /// - /// - /// - public native @SharedPtr @Name("register_module") @Cast({"", "std::shared_ptr"}) MaxUnpool3dImpl register_module( - @StdString BytePointer name, - @SharedPtr @Cast({"", "std::shared_ptr"}) MaxUnpool3dImpl module); - public native @SharedPtr @Name("register_module") @Cast({"", "std::shared_ptr"}) MaxUnpool3dImpl register_module( - @StdString String name, - @SharedPtr @Cast({"", "std::shared_ptr"}) MaxUnpool3dImpl module); - - /// - /// - /// - /// - public native @SharedPtr @Name("register_module") @Cast({"", "std::shared_ptr"}) FractionalMaxPool3dImpl register_module( - @StdString BytePointer name, - @SharedPtr @Cast({"", "std::shared_ptr"}) FractionalMaxPool3dImpl module); - public native @SharedPtr @Name("register_module") @Cast({"", "std::shared_ptr"}) FractionalMaxPool3dImpl register_module( - @StdString String name, - @SharedPtr @Cast({"", "std::shared_ptr"}) FractionalMaxPool3dImpl module); - - /// - /// - /// - /// - public native @SharedPtr @Name("register_module") @Cast({"", "std::shared_ptr"}) RNNImpl register_module( - @StdString BytePointer name, - @SharedPtr @Cast({"", "std::shared_ptr"}) RNNImpl module); - public native @SharedPtr @Name("register_module") @Cast({"", "std::shared_ptr"}) RNNImpl register_module( - @StdString String name, - @SharedPtr @Cast({"", "std::shared_ptr"}) RNNImpl module); - - /// - /// - /// - /// - public native @SharedPtr @Name("register_module") @Cast({"", "std::shared_ptr"}) LSTMImpl register_module( - @StdString BytePointer name, - @SharedPtr @Cast({"", "std::shared_ptr"}) LSTMImpl module); - public native @SharedPtr @Name("register_module") @Cast({"", "std::shared_ptr"}) LSTMImpl register_module( - @StdString String name, - @SharedPtr @Cast({"", "std::shared_ptr"}) LSTMImpl module); - - /// - /// - /// - /// - public native @SharedPtr @Name("register_module") @Cast({"", "std::shared_ptr"}) GRUImpl register_module( - @StdString BytePointer name, - @SharedPtr @Cast({"", "std::shared_ptr"}) GRUImpl module); - public native @SharedPtr @Name("register_module") @Cast({"", "std::shared_ptr"}) GRUImpl register_module( - @StdString String name, - @SharedPtr @Cast({"", "std::shared_ptr"}) GRUImpl module); - - /// - /// - /// - /// - public native @SharedPtr @Name("register_module") @Cast({"", "std::shared_ptr"}) RNNCellImpl register_module( - @StdString BytePointer name, - @SharedPtr @Cast({"", "std::shared_ptr"}) RNNCellImpl module); - public native @SharedPtr @Name("register_module") @Cast({"", "std::shared_ptr"}) RNNCellImpl register_module( - @StdString String name, - @SharedPtr @Cast({"", "std::shared_ptr"}) RNNCellImpl module); - - /// - /// - /// - /// - public native @SharedPtr @Name("register_module") @Cast({"", "std::shared_ptr"}) LSTMCellImpl register_module( - @StdString BytePointer name, - @SharedPtr @Cast({"", "std::shared_ptr"}) LSTMCellImpl module); - public native @SharedPtr @Name("register_module") @Cast({"", "std::shared_ptr"}) LSTMCellImpl register_module( - @StdString String name, - @SharedPtr @Cast({"", "std::shared_ptr"}) LSTMCellImpl module); - - /// - /// - /// - /// - public native @SharedPtr @Name("register_module") @Cast({"", "std::shared_ptr"}) GRUCellImpl register_module( - @StdString BytePointer name, - @SharedPtr @Cast({"", "std::shared_ptr"}) GRUCellImpl module); - public native @SharedPtr @Name("register_module") @Cast({"", "std::shared_ptr"}) GRUCellImpl register_module( - @StdString String name, - @SharedPtr @Cast({"", "std::shared_ptr"}) GRUCellImpl module); - - /// - /// - /// - /// - public native @SharedPtr @Name("register_module") @Cast({"", "std::shared_ptr"}) PixelShuffleImpl register_module( - @StdString BytePointer name, - @SharedPtr @Cast({"", "std::shared_ptr"}) PixelShuffleImpl module); - public native @SharedPtr @Name("register_module") @Cast({"", "std::shared_ptr"}) PixelShuffleImpl register_module( - @StdString String name, - @SharedPtr @Cast({"", "std::shared_ptr"}) PixelShuffleImpl module); - - /// - /// - /// - /// - public native @SharedPtr @Name("register_module") @Cast({"", "std::shared_ptr"}) PixelUnshuffleImpl register_module( - @StdString BytePointer name, - @SharedPtr @Cast({"", "std::shared_ptr"}) PixelUnshuffleImpl module); - public native @SharedPtr @Name("register_module") @Cast({"", "std::shared_ptr"}) PixelUnshuffleImpl register_module( - @StdString String name, - @SharedPtr @Cast({"", "std::shared_ptr"}) PixelUnshuffleImpl module); - - /// - /// - /// - /// - public native @SharedPtr @Name("register_module") @Cast({"", "std::shared_ptr"}) UpsampleImpl register_module( - @StdString BytePointer name, - @SharedPtr @Cast({"", "std::shared_ptr"}) UpsampleImpl module); - public native @SharedPtr @Name("register_module") @Cast({"", "std::shared_ptr"}) UpsampleImpl register_module( - @StdString String name, - @SharedPtr @Cast({"", "std::shared_ptr"}) UpsampleImpl module); - - /// - /// - /// - /// - public native @SharedPtr @Name("register_module") @Cast({"", "std::shared_ptr"}) ELUImpl register_module( - @StdString BytePointer name, - @SharedPtr @Cast({"", "std::shared_ptr"}) ELUImpl module); - public native @SharedPtr @Name("register_module") @Cast({"", "std::shared_ptr"}) ELUImpl register_module( - @StdString String name, - @SharedPtr @Cast({"", "std::shared_ptr"}) ELUImpl module); - - /// - /// - /// - /// - public native @SharedPtr @Name("register_module") @Cast({"", "std::shared_ptr"}) SELUImpl register_module( - @StdString BytePointer name, - @SharedPtr @Cast({"", "std::shared_ptr"}) SELUImpl module); - public native @SharedPtr @Name("register_module") @Cast({"", "std::shared_ptr"}) SELUImpl register_module( - @StdString String name, - @SharedPtr @Cast({"", "std::shared_ptr"}) SELUImpl module); - - /// - /// - /// - /// - public native @SharedPtr @Name("register_module") @Cast({"", "std::shared_ptr"}) HardshrinkImpl register_module( - @StdString BytePointer name, - @SharedPtr @Cast({"", "std::shared_ptr"}) HardshrinkImpl module); - public native @SharedPtr @Name("register_module") @Cast({"", "std::shared_ptr"}) HardshrinkImpl register_module( - @StdString String name, - @SharedPtr @Cast({"", "std::shared_ptr"}) HardshrinkImpl module); - - /// - /// - /// - /// - public native @SharedPtr @Name("register_module") @Cast({"", "std::shared_ptr"}) HardtanhImpl register_module( - @StdString BytePointer name, - @SharedPtr @Cast({"", "std::shared_ptr"}) HardtanhImpl module); - public native @SharedPtr @Name("register_module") @Cast({"", "std::shared_ptr"}) HardtanhImpl register_module( - @StdString String name, - @SharedPtr @Cast({"", "std::shared_ptr"}) HardtanhImpl module); - - /// - /// - /// - /// - public native @SharedPtr @Name("register_module") @Cast({"", "std::shared_ptr"}) LeakyReLUImpl register_module( - @StdString BytePointer name, - @SharedPtr @Cast({"", "std::shared_ptr"}) LeakyReLUImpl module); - public native @SharedPtr @Name("register_module") @Cast({"", "std::shared_ptr"}) LeakyReLUImpl register_module( - @StdString String name, - @SharedPtr @Cast({"", "std::shared_ptr"}) LeakyReLUImpl module); - - /// - /// - /// - /// - public native @SharedPtr @Name("register_module") @Cast({"", "std::shared_ptr"}) LogSigmoidImpl register_module( - @StdString BytePointer name, - @SharedPtr @Cast({"", "std::shared_ptr"}) LogSigmoidImpl module); - public native @SharedPtr @Name("register_module") @Cast({"", "std::shared_ptr"}) LogSigmoidImpl register_module( - @StdString String name, - @SharedPtr @Cast({"", "std::shared_ptr"}) LogSigmoidImpl module); - - /// - /// - /// - /// - public native @SharedPtr @Name("register_module") @Cast({"", "std::shared_ptr"}) SoftmaxImpl register_module( - @StdString BytePointer name, - @SharedPtr @Cast({"", "std::shared_ptr"}) SoftmaxImpl module); - public native @SharedPtr @Name("register_module") @Cast({"", "std::shared_ptr"}) SoftmaxImpl register_module( - @StdString String name, - @SharedPtr @Cast({"", "std::shared_ptr"}) SoftmaxImpl module); - - /// - /// - /// - /// - public native @SharedPtr @Name("register_module") @Cast({"", "std::shared_ptr"}) SoftminImpl register_module( - @StdString BytePointer name, - @SharedPtr @Cast({"", "std::shared_ptr"}) SoftminImpl module); - public native @SharedPtr @Name("register_module") @Cast({"", "std::shared_ptr"}) SoftminImpl register_module( - @StdString String name, - @SharedPtr @Cast({"", "std::shared_ptr"}) SoftminImpl module); - - /// - /// - /// - /// - public native @SharedPtr @Name("register_module") @Cast({"", "std::shared_ptr"}) LogSoftmaxImpl register_module( - @StdString BytePointer name, - @SharedPtr @Cast({"", "std::shared_ptr"}) LogSoftmaxImpl module); - public native @SharedPtr @Name("register_module") @Cast({"", "std::shared_ptr"}) LogSoftmaxImpl register_module( - @StdString String name, - @SharedPtr @Cast({"", "std::shared_ptr"}) LogSoftmaxImpl module); - - /// - /// - /// - /// - public native @SharedPtr @Name("register_module") @Cast({"", "std::shared_ptr"}) Softmax2dImpl register_module( - @StdString BytePointer name, - @SharedPtr @Cast({"", "std::shared_ptr"}) Softmax2dImpl module); - public native @SharedPtr @Name("register_module") @Cast({"", "std::shared_ptr"}) Softmax2dImpl register_module( - @StdString String name, - @SharedPtr @Cast({"", "std::shared_ptr"}) Softmax2dImpl module); - - /// - /// - /// - /// - public native @SharedPtr @Name("register_module") @Cast({"", "std::shared_ptr"}) PReLUImpl register_module( - @StdString BytePointer name, - @SharedPtr @Cast({"", "std::shared_ptr"}) PReLUImpl module); - public native @SharedPtr @Name("register_module") @Cast({"", "std::shared_ptr"}) PReLUImpl register_module( - @StdString String name, - @SharedPtr @Cast({"", "std::shared_ptr"}) PReLUImpl module); - - /// - /// - /// - /// - public native @SharedPtr @Name("register_module") @Cast({"", "std::shared_ptr"}) ReLUImpl register_module( - @StdString BytePointer name, - @SharedPtr @Cast({"", "std::shared_ptr"}) ReLUImpl module); - public native @SharedPtr @Name("register_module") @Cast({"", "std::shared_ptr"}) ReLUImpl register_module( - @StdString String name, - @SharedPtr @Cast({"", "std::shared_ptr"}) ReLUImpl module); - - /// - /// - /// - /// - public native @SharedPtr @Name("register_module") @Cast({"", "std::shared_ptr"}) ReLU6Impl register_module( - @StdString BytePointer name, - @SharedPtr @Cast({"", "std::shared_ptr"}) ReLU6Impl module); - public native @SharedPtr @Name("register_module") @Cast({"", "std::shared_ptr"}) ReLU6Impl register_module( - @StdString String name, - @SharedPtr @Cast({"", "std::shared_ptr"}) ReLU6Impl module); - - /// - /// - /// - /// - public native @SharedPtr @Name("register_module") @Cast({"", "std::shared_ptr"}) RReLUImpl register_module( - @StdString BytePointer name, - @SharedPtr @Cast({"", "std::shared_ptr"}) RReLUImpl module); - public native @SharedPtr @Name("register_module") @Cast({"", "std::shared_ptr"}) RReLUImpl register_module( - @StdString String name, - @SharedPtr @Cast({"", "std::shared_ptr"}) RReLUImpl module); - - /// - /// - /// - /// - public native @SharedPtr @Name("register_module") @Cast({"", "std::shared_ptr"}) CELUImpl register_module( - @StdString BytePointer name, - @SharedPtr @Cast({"", "std::shared_ptr"}) CELUImpl module); - public native @SharedPtr @Name("register_module") @Cast({"", "std::shared_ptr"}) CELUImpl register_module( - @StdString String name, - @SharedPtr @Cast({"", "std::shared_ptr"}) CELUImpl module); - - /// - /// - /// - /// - public native @SharedPtr @Name("register_module") @Cast({"", "std::shared_ptr"}) GLUImpl register_module( - @StdString BytePointer name, - @SharedPtr @Cast({"", "std::shared_ptr"}) GLUImpl module); - public native @SharedPtr @Name("register_module") @Cast({"", "std::shared_ptr"}) GLUImpl register_module( - @StdString String name, - @SharedPtr @Cast({"", "std::shared_ptr"}) GLUImpl module); - - /// - /// - /// - /// - public native @SharedPtr @Name("register_module") @Cast({"", "std::shared_ptr"}) GELUImpl register_module( - @StdString BytePointer name, - @SharedPtr @Cast({"", "std::shared_ptr"}) GELUImpl module); - public native @SharedPtr @Name("register_module") @Cast({"", "std::shared_ptr"}) GELUImpl register_module( - @StdString String name, - @SharedPtr @Cast({"", "std::shared_ptr"}) GELUImpl module); - - /// - /// - /// - /// - public native @SharedPtr @Name("register_module") @Cast({"", "std::shared_ptr"}) SiLUImpl register_module( - @StdString BytePointer name, - @SharedPtr @Cast({"", "std::shared_ptr"}) SiLUImpl module); - public native @SharedPtr @Name("register_module") @Cast({"", "std::shared_ptr"}) SiLUImpl register_module( - @StdString String name, - @SharedPtr @Cast({"", "std::shared_ptr"}) SiLUImpl module); - - /// - /// - /// - /// - public native @SharedPtr @Name("register_module") @Cast({"", "std::shared_ptr"}) MishImpl register_module( - @StdString BytePointer name, - @SharedPtr @Cast({"", "std::shared_ptr"}) MishImpl module); - public native @SharedPtr @Name("register_module") @Cast({"", "std::shared_ptr"}) MishImpl register_module( - @StdString String name, - @SharedPtr @Cast({"", "std::shared_ptr"}) MishImpl module); - - /// - /// - /// - /// - public native @SharedPtr @Name("register_module") @Cast({"", "std::shared_ptr"}) SigmoidImpl register_module( - @StdString BytePointer name, - @SharedPtr @Cast({"", "std::shared_ptr"}) SigmoidImpl module); - public native @SharedPtr @Name("register_module") @Cast({"", "std::shared_ptr"}) SigmoidImpl register_module( - @StdString String name, - @SharedPtr @Cast({"", "std::shared_ptr"}) SigmoidImpl module); - - /// - /// - /// - /// - public native @SharedPtr @Name("register_module") @Cast({"", "std::shared_ptr"}) SoftplusImpl register_module( - @StdString BytePointer name, - @SharedPtr @Cast({"", "std::shared_ptr"}) SoftplusImpl module); - public native @SharedPtr @Name("register_module") @Cast({"", "std::shared_ptr"}) SoftplusImpl register_module( - @StdString String name, - @SharedPtr @Cast({"", "std::shared_ptr"}) SoftplusImpl module); - - /// - /// - /// - /// - public native @SharedPtr @Name("register_module") @Cast({"", "std::shared_ptr"}) SoftshrinkImpl register_module( - @StdString BytePointer name, - @SharedPtr @Cast({"", "std::shared_ptr"}) SoftshrinkImpl module); - public native @SharedPtr @Name("register_module") @Cast({"", "std::shared_ptr"}) SoftshrinkImpl register_module( - @StdString String name, - @SharedPtr @Cast({"", "std::shared_ptr"}) SoftshrinkImpl module); - - /// - /// - /// - /// - public native @SharedPtr @Name("register_module") @Cast({"", "std::shared_ptr"}) SoftsignImpl register_module( - @StdString BytePointer name, - @SharedPtr @Cast({"", "std::shared_ptr"}) SoftsignImpl module); - public native @SharedPtr @Name("register_module") @Cast({"", "std::shared_ptr"}) SoftsignImpl register_module( - @StdString String name, - @SharedPtr @Cast({"", "std::shared_ptr"}) SoftsignImpl module); - - /// - /// - /// - /// - public native @SharedPtr @Name("register_module") @Cast({"", "std::shared_ptr"}) TanhImpl register_module( - @StdString BytePointer name, - @SharedPtr @Cast({"", "std::shared_ptr"}) TanhImpl module); - public native @SharedPtr @Name("register_module") @Cast({"", "std::shared_ptr"}) TanhImpl register_module( - @StdString String name, - @SharedPtr @Cast({"", "std::shared_ptr"}) TanhImpl module); - - /// - /// - /// - /// - public native @SharedPtr @Name("register_module") @Cast({"", "std::shared_ptr"}) TanhshrinkImpl register_module( - @StdString BytePointer name, - @SharedPtr @Cast({"", "std::shared_ptr"}) TanhshrinkImpl module); - public native @SharedPtr @Name("register_module") @Cast({"", "std::shared_ptr"}) TanhshrinkImpl register_module( - @StdString String name, - @SharedPtr @Cast({"", "std::shared_ptr"}) TanhshrinkImpl module); - - /// - /// - /// - /// - public native @SharedPtr @Name("register_module") @Cast({"", "std::shared_ptr"}) ThresholdImpl register_module( - @StdString BytePointer name, - @SharedPtr @Cast({"", "std::shared_ptr"}) ThresholdImpl module); - public native @SharedPtr @Name("register_module") @Cast({"", "std::shared_ptr"}) ThresholdImpl register_module( - @StdString String name, - @SharedPtr @Cast({"", "std::shared_ptr"}) ThresholdImpl module); - - /// - /// - /// - /// - public native @SharedPtr @Name("register_module") @Cast({"", "std::shared_ptr"}) MultiheadAttentionImpl register_module( - @StdString BytePointer name, - @SharedPtr @Cast({"", "std::shared_ptr"}) MultiheadAttentionImpl module); - public native @SharedPtr @Name("register_module") @Cast({"", "std::shared_ptr"}) MultiheadAttentionImpl register_module( - @StdString String name, - @SharedPtr @Cast({"", "std::shared_ptr"}) MultiheadAttentionImpl module); - - /// - /// - /// - /// - public native @SharedPtr @Name("register_module") @Cast({"", "std::shared_ptr"}) LayerNormImpl register_module( - @StdString BytePointer name, - @SharedPtr @Cast({"", "std::shared_ptr"}) LayerNormImpl module); - public native @SharedPtr @Name("register_module") @Cast({"", "std::shared_ptr"}) LayerNormImpl register_module( - @StdString String name, - @SharedPtr @Cast({"", "std::shared_ptr"}) LayerNormImpl module); - - /// - /// - /// - /// - public native @SharedPtr @Name("register_module") @Cast({"", "std::shared_ptr"}) LocalResponseNormImpl register_module( - @StdString BytePointer name, - @SharedPtr @Cast({"", "std::shared_ptr"}) LocalResponseNormImpl module); - public native @SharedPtr @Name("register_module") @Cast({"", "std::shared_ptr"}) LocalResponseNormImpl register_module( - @StdString String name, - @SharedPtr @Cast({"", "std::shared_ptr"}) LocalResponseNormImpl module); - - /// - /// - /// - /// - public native @SharedPtr @Name("register_module") @Cast({"", "std::shared_ptr"}) CrossMapLRN2dImpl register_module( - @StdString BytePointer name, - @SharedPtr @Cast({"", "std::shared_ptr"}) CrossMapLRN2dImpl module); - public native @SharedPtr @Name("register_module") @Cast({"", "std::shared_ptr"}) CrossMapLRN2dImpl register_module( - @StdString String name, - @SharedPtr @Cast({"", "std::shared_ptr"}) CrossMapLRN2dImpl module); - - /// - /// - /// - /// - public native @SharedPtr @Name("register_module") @Cast({"", "std::shared_ptr"}) GroupNormImpl register_module( - @StdString BytePointer name, - @SharedPtr @Cast({"", "std::shared_ptr"}) GroupNormImpl module); - public native @SharedPtr @Name("register_module") @Cast({"", "std::shared_ptr"}) GroupNormImpl register_module( - @StdString String name, - @SharedPtr @Cast({"", "std::shared_ptr"}) GroupNormImpl module); - - /// - /// - /// - /// - public native @SharedPtr @Name("register_module") @Cast({"", "std::shared_ptr"}) TransformerEncoderLayerImpl register_module( - @StdString BytePointer name, - @SharedPtr @Cast({"", "std::shared_ptr"}) TransformerEncoderLayerImpl module); - public native @SharedPtr @Name("register_module") @Cast({"", "std::shared_ptr"}) TransformerEncoderLayerImpl register_module( - @StdString String name, - @SharedPtr @Cast({"", "std::shared_ptr"}) TransformerEncoderLayerImpl module); - - /// - /// - /// - /// - public native @SharedPtr @Name("register_module") @Cast({"", "std::shared_ptr"}) TransformerDecoderLayerImpl register_module( - @StdString BytePointer name, - @SharedPtr @Cast({"", "std::shared_ptr"}) TransformerDecoderLayerImpl module); - public native @SharedPtr @Name("register_module") @Cast({"", "std::shared_ptr"}) TransformerDecoderLayerImpl register_module( - @StdString String name, - @SharedPtr @Cast({"", "std::shared_ptr"}) TransformerDecoderLayerImpl module); - - /// - /// - /// - /// - public native @SharedPtr @Name("register_module") @Cast({"", "std::shared_ptr"}) TransformerEncoderImpl register_module( - @StdString BytePointer name, - @SharedPtr @Cast({"", "std::shared_ptr"}) TransformerEncoderImpl module); - public native @SharedPtr @Name("register_module") @Cast({"", "std::shared_ptr"}) TransformerEncoderImpl register_module( - @StdString String name, - @SharedPtr @Cast({"", "std::shared_ptr"}) TransformerEncoderImpl module); - - /// - /// - /// - /// - public native @SharedPtr @Name("register_module") @Cast({"", "std::shared_ptr"}) TransformerDecoderImpl register_module( - @StdString BytePointer name, - @SharedPtr @Cast({"", "std::shared_ptr"}) TransformerDecoderImpl module); - public native @SharedPtr @Name("register_module") @Cast({"", "std::shared_ptr"}) TransformerDecoderImpl register_module( - @StdString String name, - @SharedPtr @Cast({"", "std::shared_ptr"}) TransformerDecoderImpl module); - - /// - /// - /// - /// - public native @SharedPtr @Name("register_module") @Cast({"", "std::shared_ptr"}) TransformerImpl register_module( - @StdString BytePointer name, - @SharedPtr @Cast({"", "std::shared_ptr"}) TransformerImpl module); - public native @SharedPtr @Name("register_module") @Cast({"", "std::shared_ptr"}) TransformerImpl register_module( - @StdString String name, - @SharedPtr @Cast({"", "std::shared_ptr"}) TransformerImpl module); - - /** Registers a submodule with this {@code Module}. - * - * This method deals with {@code ModuleHolder}s. - * - * Registering a module makes it available to methods such as {@code modules()}, - * {@code clone()} or {@code to()}. - * - * \rst - * .. code-block:: cpp - * - * MyModule::MyModule() { - * submodule_ = register_module("linear", torch::nn::Linear(3, 4)); - * } - * \endrst */ - - /// - /// - public native @SharedPtr @Name("register_module") @Cast({"", "std::shared_ptr"}) Module register_module( - @StdString BytePointer name, - @ByVal ModuleHolder module_holder); - public native @SharedPtr @Name("register_module") @Cast({"", "std::shared_ptr"}) Module register_module( - @StdString String name, - @ByVal ModuleHolder module_holder); - - /// - /// - public native @SharedPtr @Name("register_module") @Cast({"", "std::shared_ptr"}) ModuleDictImpl register_module( - @StdString BytePointer name, - @ByVal ModuleDictImplModuleHolder module_holder); - public native @SharedPtr @Name("register_module") @Cast({"", "std::shared_ptr"}) ModuleDictImpl register_module( - @StdString String name, - @ByVal ModuleDictImplModuleHolder module_holder); - - /// - /// - public native @SharedPtr @Name("register_module") @Cast({"", "std::shared_ptr"}) ModuleListImpl register_module( - @StdString BytePointer name, - @ByVal ModuleListImplModuleHolder module_holder); - public native @SharedPtr @Name("register_module") @Cast({"", "std::shared_ptr"}) ModuleListImpl register_module( - @StdString String name, - @ByVal ModuleListImplModuleHolder module_holder); - - /// - /// - public native @SharedPtr @Name("register_module") @Cast({"", "std::shared_ptr"}) SequentialImpl register_module( - @StdString BytePointer name, - @ByVal SequentialImplModuleHolder module_holder); - public native @SharedPtr @Name("register_module") @Cast({"", "std::shared_ptr"}) SequentialImpl register_module( - @StdString String name, - @ByVal SequentialImplModuleHolder module_holder); - - /// - /// - public native @SharedPtr @Name("register_module") @Cast({"", "std::shared_ptr"}) ParameterDictImpl register_module( - @StdString BytePointer name, - @ByVal ParameterDictImplModuleHolder module_holder); - public native @SharedPtr @Name("register_module") @Cast({"", "std::shared_ptr"}) ParameterDictImpl register_module( - @StdString String name, - @ByVal ParameterDictImplModuleHolder module_holder); - - /// - /// - public native @SharedPtr @Name("register_module") @Cast({"", "std::shared_ptr"}) ParameterListImpl register_module( - @StdString BytePointer name, - @ByVal ParameterListImplModuleHolder module_holder); - public native @SharedPtr @Name("register_module") @Cast({"", "std::shared_ptr"}) ParameterListImpl register_module( - @StdString String name, - @ByVal ParameterListImplModuleHolder module_holder); - - /// - /// - public native @SharedPtr @Name("register_module") @Cast({"", "std::shared_ptr"}) AdaptiveLogSoftmaxWithLossImpl register_module( - @StdString BytePointer name, - @ByVal AdaptiveLogSoftmaxWithLossImplModuleHolder module_holder); - public native @SharedPtr @Name("register_module") @Cast({"", "std::shared_ptr"}) AdaptiveLogSoftmaxWithLossImpl register_module( - @StdString String name, - @ByVal AdaptiveLogSoftmaxWithLossImplModuleHolder module_holder); - - /// - /// - public native @SharedPtr @Name("register_module") @Cast({"", "std::shared_ptr"}) BatchNorm1dImpl register_module( - @StdString BytePointer name, - @ByVal BatchNorm1dImplModuleHolder module_holder); - public native @SharedPtr @Name("register_module") @Cast({"", "std::shared_ptr"}) BatchNorm1dImpl register_module( - @StdString String name, - @ByVal BatchNorm1dImplModuleHolder module_holder); - - /// - /// - public native @SharedPtr @Name("register_module") @Cast({"", "std::shared_ptr"}) InstanceNorm1dImpl register_module( - @StdString BytePointer name, - @ByVal InstanceNorm1dImplModuleHolder module_holder); - public native @SharedPtr @Name("register_module") @Cast({"", "std::shared_ptr"}) InstanceNorm1dImpl register_module( - @StdString String name, - @ByVal InstanceNorm1dImplModuleHolder module_holder); - - /// - /// - public native @SharedPtr @Name("register_module") @Cast({"", "std::shared_ptr"}) Conv1dImpl register_module( - @StdString BytePointer name, - @ByVal Conv1dImplModuleHolder module_holder); - public native @SharedPtr @Name("register_module") @Cast({"", "std::shared_ptr"}) Conv1dImpl register_module( - @StdString String name, - @ByVal Conv1dImplModuleHolder module_holder); - - /// - /// - public native @SharedPtr @Name("register_module") @Cast({"", "std::shared_ptr"}) ConvTranspose1dImpl register_module( - @StdString BytePointer name, - @ByVal ConvTranspose1dImplModuleHolder module_holder); - public native @SharedPtr @Name("register_module") @Cast({"", "std::shared_ptr"}) ConvTranspose1dImpl register_module( - @StdString String name, - @ByVal ConvTranspose1dImplModuleHolder module_holder); - - /// - /// - public native @SharedPtr @Name("register_module") @Cast({"", "std::shared_ptr"}) DropoutImpl register_module( - @StdString BytePointer name, - @ByVal DropoutImplModuleHolder module_holder); - public native @SharedPtr @Name("register_module") @Cast({"", "std::shared_ptr"}) DropoutImpl register_module( - @StdString String name, - @ByVal DropoutImplModuleHolder module_holder); - - /// - /// - public native @SharedPtr @Name("register_module") @Cast({"", "std::shared_ptr"}) BatchNorm2dImpl register_module( - @StdString BytePointer name, - @ByVal BatchNorm2dImplModuleHolder module_holder); - public native @SharedPtr @Name("register_module") @Cast({"", "std::shared_ptr"}) BatchNorm2dImpl register_module( - @StdString String name, - @ByVal BatchNorm2dImplModuleHolder module_holder); - - /// - /// - public native @SharedPtr @Name("register_module") @Cast({"", "std::shared_ptr"}) InstanceNorm2dImpl register_module( - @StdString BytePointer name, - @ByVal InstanceNorm2dImplModuleHolder module_holder); - public native @SharedPtr @Name("register_module") @Cast({"", "std::shared_ptr"}) InstanceNorm2dImpl register_module( - @StdString String name, - @ByVal InstanceNorm2dImplModuleHolder module_holder); - - /// - /// - public native @SharedPtr @Name("register_module") @Cast({"", "std::shared_ptr"}) Conv2dImpl register_module( - @StdString BytePointer name, - @ByVal Conv2dImplModuleHolder module_holder); - public native @SharedPtr @Name("register_module") @Cast({"", "std::shared_ptr"}) Conv2dImpl register_module( - @StdString String name, - @ByVal Conv2dImplModuleHolder module_holder); - - /// - /// - public native @SharedPtr @Name("register_module") @Cast({"", "std::shared_ptr"}) ConvTranspose2dImpl register_module( - @StdString BytePointer name, - @ByVal ConvTranspose2dImplModuleHolder module_holder); - public native @SharedPtr @Name("register_module") @Cast({"", "std::shared_ptr"}) ConvTranspose2dImpl register_module( - @StdString String name, - @ByVal ConvTranspose2dImplModuleHolder module_holder); - - /// - /// - public native @SharedPtr @Name("register_module") @Cast({"", "std::shared_ptr"}) Dropout2dImpl register_module( - @StdString BytePointer name, - @ByVal Dropout2dImplModuleHolder module_holder); - public native @SharedPtr @Name("register_module") @Cast({"", "std::shared_ptr"}) Dropout2dImpl register_module( - @StdString String name, - @ByVal Dropout2dImplModuleHolder module_holder); - - /// - /// - public native @SharedPtr @Name("register_module") @Cast({"", "std::shared_ptr"}) BatchNorm3dImpl register_module( - @StdString BytePointer name, - @ByVal BatchNorm3dImplModuleHolder module_holder); - public native @SharedPtr @Name("register_module") @Cast({"", "std::shared_ptr"}) BatchNorm3dImpl register_module( - @StdString String name, - @ByVal BatchNorm3dImplModuleHolder module_holder); - - /// - /// - public native @SharedPtr @Name("register_module") @Cast({"", "std::shared_ptr"}) InstanceNorm3dImpl register_module( - @StdString BytePointer name, - @ByVal InstanceNorm3dImplModuleHolder module_holder); - public native @SharedPtr @Name("register_module") @Cast({"", "std::shared_ptr"}) InstanceNorm3dImpl register_module( - @StdString String name, - @ByVal InstanceNorm3dImplModuleHolder module_holder); - - /// - /// - public native @SharedPtr @Name("register_module") @Cast({"", "std::shared_ptr"}) Conv3dImpl register_module( - @StdString BytePointer name, - @ByVal Conv3dImplModuleHolder module_holder); - public native @SharedPtr @Name("register_module") @Cast({"", "std::shared_ptr"}) Conv3dImpl register_module( - @StdString String name, - @ByVal Conv3dImplModuleHolder module_holder); - - /// - /// - public native @SharedPtr @Name("register_module") @Cast({"", "std::shared_ptr"}) ConvTranspose3dImpl register_module( - @StdString BytePointer name, - @ByVal ConvTranspose3dImplModuleHolder module_holder); - public native @SharedPtr @Name("register_module") @Cast({"", "std::shared_ptr"}) ConvTranspose3dImpl register_module( - @StdString String name, - @ByVal ConvTranspose3dImplModuleHolder module_holder); - - /// - /// - public native @SharedPtr @Name("register_module") @Cast({"", "std::shared_ptr"}) Dropout3dImpl register_module( - @StdString BytePointer name, - @ByVal Dropout3dImplModuleHolder module_holder); - public native @SharedPtr @Name("register_module") @Cast({"", "std::shared_ptr"}) Dropout3dImpl register_module( - @StdString String name, - @ByVal Dropout3dImplModuleHolder module_holder); - - /// - /// - public native @SharedPtr @Name("register_module") @Cast({"", "std::shared_ptr"}) AlphaDropoutImpl register_module( - @StdString BytePointer name, - @ByVal AlphaDropoutImplModuleHolder module_holder); - public native @SharedPtr @Name("register_module") @Cast({"", "std::shared_ptr"}) AlphaDropoutImpl register_module( - @StdString String name, - @ByVal AlphaDropoutImplModuleHolder module_holder); - - /// - /// - public native @SharedPtr @Name("register_module") @Cast({"", "std::shared_ptr"}) FeatureAlphaDropoutImpl register_module( - @StdString BytePointer name, - @ByVal FeatureAlphaDropoutImplModuleHolder module_holder); - public native @SharedPtr @Name("register_module") @Cast({"", "std::shared_ptr"}) FeatureAlphaDropoutImpl register_module( - @StdString String name, - @ByVal FeatureAlphaDropoutImplModuleHolder module_holder); - - /// - /// - public native @SharedPtr @Name("register_module") @Cast({"", "std::shared_ptr"}) CosineSimilarityImpl register_module( - @StdString BytePointer name, - @ByVal CosineSimilarityImplModuleHolder module_holder); - public native @SharedPtr @Name("register_module") @Cast({"", "std::shared_ptr"}) CosineSimilarityImpl register_module( - @StdString String name, - @ByVal CosineSimilarityImplModuleHolder module_holder); - - /// - /// - public native @SharedPtr @Name("register_module") @Cast({"", "std::shared_ptr"}) PairwiseDistanceImpl register_module( - @StdString BytePointer name, - @ByVal PairwiseDistanceImplModuleHolder module_holder); - public native @SharedPtr @Name("register_module") @Cast({"", "std::shared_ptr"}) PairwiseDistanceImpl register_module( - @StdString String name, - @ByVal PairwiseDistanceImplModuleHolder module_holder); - - /// - /// - public native @SharedPtr @Name("register_module") @Cast({"", "std::shared_ptr"}) EmbeddingImpl register_module( - @StdString BytePointer name, - @ByVal EmbeddingImplModuleHolder module_holder); - public native @SharedPtr @Name("register_module") @Cast({"", "std::shared_ptr"}) EmbeddingImpl register_module( - @StdString String name, - @ByVal EmbeddingImplModuleHolder module_holder); - - /// - /// - public native @SharedPtr @Name("register_module") @Cast({"", "std::shared_ptr"}) EmbeddingBagImpl register_module( - @StdString BytePointer name, - @ByVal EmbeddingBagImplModuleHolder module_holder); - public native @SharedPtr @Name("register_module") @Cast({"", "std::shared_ptr"}) EmbeddingBagImpl register_module( - @StdString String name, - @ByVal EmbeddingBagImplModuleHolder module_holder); - - /// - /// - public native @SharedPtr @Name("register_module") @Cast({"", "std::shared_ptr"}) FoldImpl register_module( - @StdString BytePointer name, - @ByVal FoldImplModuleHolder module_holder); - public native @SharedPtr @Name("register_module") @Cast({"", "std::shared_ptr"}) FoldImpl register_module( - @StdString String name, - @ByVal FoldImplModuleHolder module_holder); - - /// - /// - public native @SharedPtr @Name("register_module") @Cast({"", "std::shared_ptr"}) UnfoldImpl register_module( - @StdString BytePointer name, - @ByVal UnfoldImplModuleHolder module_holder); - public native @SharedPtr @Name("register_module") @Cast({"", "std::shared_ptr"}) UnfoldImpl register_module( - @StdString String name, - @ByVal UnfoldImplModuleHolder module_holder); - - /// - /// - public native @SharedPtr @Name("register_module") @Cast({"", "std::shared_ptr"}) IdentityImpl register_module( - @StdString BytePointer name, - @ByVal IdentityImplModuleHolder module_holder); - public native @SharedPtr @Name("register_module") @Cast({"", "std::shared_ptr"}) IdentityImpl register_module( - @StdString String name, - @ByVal IdentityImplModuleHolder module_holder); - - /// - /// - public native @SharedPtr @Name("register_module") @Cast({"", "std::shared_ptr"}) LinearImpl register_module( - @StdString BytePointer name, - @ByVal LinearImplModuleHolder module_holder); - public native @SharedPtr @Name("register_module") @Cast({"", "std::shared_ptr"}) LinearImpl register_module( - @StdString String name, - @ByVal LinearImplModuleHolder module_holder); - - /// - /// - public native @SharedPtr @Name("register_module") @Cast({"", "std::shared_ptr"}) BilinearImpl register_module( - @StdString BytePointer name, - @ByVal BilinearImplModuleHolder module_holder); - public native @SharedPtr @Name("register_module") @Cast({"", "std::shared_ptr"}) BilinearImpl register_module( - @StdString String name, - @ByVal BilinearImplModuleHolder module_holder); - - /// - /// - public native @SharedPtr @Name("register_module") @Cast({"", "std::shared_ptr"}) FlattenImpl register_module( - @StdString BytePointer name, - @ByVal FlattenImplModuleHolder module_holder); - public native @SharedPtr @Name("register_module") @Cast({"", "std::shared_ptr"}) FlattenImpl register_module( - @StdString String name, - @ByVal FlattenImplModuleHolder module_holder); - - /// - /// - public native @SharedPtr @Name("register_module") @Cast({"", "std::shared_ptr"}) UnflattenImpl register_module( - @StdString BytePointer name, - @ByVal UnflattenImplModuleHolder module_holder); - public native @SharedPtr @Name("register_module") @Cast({"", "std::shared_ptr"}) UnflattenImpl register_module( - @StdString String name, - @ByVal UnflattenImplModuleHolder module_holder); - - /// - /// - public native @SharedPtr @Name("register_module") @Cast({"", "std::shared_ptr"}) L1LossImpl register_module( - @StdString BytePointer name, - @ByVal L1LossImplModuleHolder module_holder); - public native @SharedPtr @Name("register_module") @Cast({"", "std::shared_ptr"}) L1LossImpl register_module( - @StdString String name, - @ByVal L1LossImplModuleHolder module_holder); - - /// - /// - public native @SharedPtr @Name("register_module") @Cast({"", "std::shared_ptr"}) KLDivLossImpl register_module( - @StdString BytePointer name, - @ByVal KLDivLossImplModuleHolder module_holder); - public native @SharedPtr @Name("register_module") @Cast({"", "std::shared_ptr"}) KLDivLossImpl register_module( - @StdString String name, - @ByVal KLDivLossImplModuleHolder module_holder); - - /// - /// - public native @SharedPtr @Name("register_module") @Cast({"", "std::shared_ptr"}) MSELossImpl register_module( - @StdString BytePointer name, - @ByVal MSELossImplModuleHolder module_holder); - public native @SharedPtr @Name("register_module") @Cast({"", "std::shared_ptr"}) MSELossImpl register_module( - @StdString String name, - @ByVal MSELossImplModuleHolder module_holder); - - /// - /// - public native @SharedPtr @Name("register_module") @Cast({"", "std::shared_ptr"}) BCELossImpl register_module( - @StdString BytePointer name, - @ByVal BCELossImplModuleHolder module_holder); - public native @SharedPtr @Name("register_module") @Cast({"", "std::shared_ptr"}) BCELossImpl register_module( - @StdString String name, - @ByVal BCELossImplModuleHolder module_holder); - - /// - /// - public native @SharedPtr @Name("register_module") @Cast({"", "std::shared_ptr"}) HingeEmbeddingLossImpl register_module( - @StdString BytePointer name, - @ByVal HingeEmbeddingLossImplModuleHolder module_holder); - public native @SharedPtr @Name("register_module") @Cast({"", "std::shared_ptr"}) HingeEmbeddingLossImpl register_module( - @StdString String name, - @ByVal HingeEmbeddingLossImplModuleHolder module_holder); - - /// - /// - public native @SharedPtr @Name("register_module") @Cast({"", "std::shared_ptr"}) MultiMarginLossImpl register_module( - @StdString BytePointer name, - @ByVal MultiMarginLossImplModuleHolder module_holder); - public native @SharedPtr @Name("register_module") @Cast({"", "std::shared_ptr"}) MultiMarginLossImpl register_module( - @StdString String name, - @ByVal MultiMarginLossImplModuleHolder module_holder); - - /// - /// - public native @SharedPtr @Name("register_module") @Cast({"", "std::shared_ptr"}) CosineEmbeddingLossImpl register_module( - @StdString BytePointer name, - @ByVal CosineEmbeddingLossImplModuleHolder module_holder); - public native @SharedPtr @Name("register_module") @Cast({"", "std::shared_ptr"}) CosineEmbeddingLossImpl register_module( - @StdString String name, - @ByVal CosineEmbeddingLossImplModuleHolder module_holder); - - /// - /// - public native @SharedPtr @Name("register_module") @Cast({"", "std::shared_ptr"}) SmoothL1LossImpl register_module( - @StdString BytePointer name, - @ByVal SmoothL1LossImplModuleHolder module_holder); - public native @SharedPtr @Name("register_module") @Cast({"", "std::shared_ptr"}) SmoothL1LossImpl register_module( - @StdString String name, - @ByVal SmoothL1LossImplModuleHolder module_holder); - - /// - /// - public native @SharedPtr @Name("register_module") @Cast({"", "std::shared_ptr"}) HuberLossImpl register_module( - @StdString BytePointer name, - @ByVal HuberLossImplModuleHolder module_holder); - public native @SharedPtr @Name("register_module") @Cast({"", "std::shared_ptr"}) HuberLossImpl register_module( - @StdString String name, - @ByVal HuberLossImplModuleHolder module_holder); - - /// - /// - public native @SharedPtr @Name("register_module") @Cast({"", "std::shared_ptr"}) MultiLabelMarginLossImpl register_module( - @StdString BytePointer name, - @ByVal MultiLabelMarginLossImplModuleHolder module_holder); - public native @SharedPtr @Name("register_module") @Cast({"", "std::shared_ptr"}) MultiLabelMarginLossImpl register_module( - @StdString String name, - @ByVal MultiLabelMarginLossImplModuleHolder module_holder); - - /// - /// - public native @SharedPtr @Name("register_module") @Cast({"", "std::shared_ptr"}) SoftMarginLossImpl register_module( - @StdString BytePointer name, - @ByVal SoftMarginLossImplModuleHolder module_holder); - public native @SharedPtr @Name("register_module") @Cast({"", "std::shared_ptr"}) SoftMarginLossImpl register_module( - @StdString String name, - @ByVal SoftMarginLossImplModuleHolder module_holder); - - /// - /// - public native @SharedPtr @Name("register_module") @Cast({"", "std::shared_ptr"}) MultiLabelSoftMarginLossImpl register_module( - @StdString BytePointer name, - @ByVal MultiLabelSoftMarginLossImplModuleHolder module_holder); - public native @SharedPtr @Name("register_module") @Cast({"", "std::shared_ptr"}) MultiLabelSoftMarginLossImpl register_module( - @StdString String name, - @ByVal MultiLabelSoftMarginLossImplModuleHolder module_holder); - - /// - /// - public native @SharedPtr @Name("register_module") @Cast({"", "std::shared_ptr"}) TripletMarginLossImpl register_module( - @StdString BytePointer name, - @ByVal TripletMarginLossImplModuleHolder module_holder); - public native @SharedPtr @Name("register_module") @Cast({"", "std::shared_ptr"}) TripletMarginLossImpl register_module( - @StdString String name, - @ByVal TripletMarginLossImplModuleHolder module_holder); - - /// - /// - public native @SharedPtr @Name("register_module") @Cast({"", "std::shared_ptr"}) TripletMarginWithDistanceLossImpl register_module( - @StdString BytePointer name, - @ByVal TripletMarginWithDistanceLossImplModuleHolder module_holder); - public native @SharedPtr @Name("register_module") @Cast({"", "std::shared_ptr"}) TripletMarginWithDistanceLossImpl register_module( - @StdString String name, - @ByVal TripletMarginWithDistanceLossImplModuleHolder module_holder); - - /// - /// - public native @SharedPtr @Name("register_module") @Cast({"", "std::shared_ptr"}) CTCLossImpl register_module( - @StdString BytePointer name, - @ByVal CTCLossImplModuleHolder module_holder); - public native @SharedPtr @Name("register_module") @Cast({"", "std::shared_ptr"}) CTCLossImpl register_module( - @StdString String name, - @ByVal CTCLossImplModuleHolder module_holder); - - /// - /// - public native @SharedPtr @Name("register_module") @Cast({"", "std::shared_ptr"}) PoissonNLLLossImpl register_module( - @StdString BytePointer name, - @ByVal PoissonNLLLossImplModuleHolder module_holder); - public native @SharedPtr @Name("register_module") @Cast({"", "std::shared_ptr"}) PoissonNLLLossImpl register_module( - @StdString String name, - @ByVal PoissonNLLLossImplModuleHolder module_holder); - - /// - /// - public native @SharedPtr @Name("register_module") @Cast({"", "std::shared_ptr"}) MarginRankingLossImpl register_module( - @StdString BytePointer name, - @ByVal MarginRankingLossImplModuleHolder module_holder); - public native @SharedPtr @Name("register_module") @Cast({"", "std::shared_ptr"}) MarginRankingLossImpl register_module( - @StdString String name, - @ByVal MarginRankingLossImplModuleHolder module_holder); - - /// - /// - public native @SharedPtr @Name("register_module") @Cast({"", "std::shared_ptr"}) NLLLossImpl register_module( - @StdString BytePointer name, - @ByVal NLLLossImplModuleHolder module_holder); - public native @SharedPtr @Name("register_module") @Cast({"", "std::shared_ptr"}) NLLLossImpl register_module( - @StdString String name, - @ByVal NLLLossImplModuleHolder module_holder); - - /// - /// - public native @SharedPtr @Name("register_module") @Cast({"", "std::shared_ptr"}) CrossEntropyLossImpl register_module( - @StdString BytePointer name, - @ByVal CrossEntropyLossImplModuleHolder module_holder); - public native @SharedPtr @Name("register_module") @Cast({"", "std::shared_ptr"}) CrossEntropyLossImpl register_module( - @StdString String name, - @ByVal CrossEntropyLossImplModuleHolder module_holder); - - /// - /// - public native @SharedPtr @Name("register_module") @Cast({"", "std::shared_ptr"}) BCEWithLogitsLossImpl register_module( - @StdString BytePointer name, - @ByVal BCEWithLogitsLossImplModuleHolder module_holder); - public native @SharedPtr @Name("register_module") @Cast({"", "std::shared_ptr"}) BCEWithLogitsLossImpl register_module( - @StdString String name, - @ByVal BCEWithLogitsLossImplModuleHolder module_holder); - - /// - /// - public native @SharedPtr @Name("register_module") @Cast({"", "std::shared_ptr"}) ReflectionPad1dImpl register_module( - @StdString BytePointer name, - @ByVal ReflectionPad1dImplModuleHolder module_holder); - public native @SharedPtr @Name("register_module") @Cast({"", "std::shared_ptr"}) ReflectionPad1dImpl register_module( - @StdString String name, - @ByVal ReflectionPad1dImplModuleHolder module_holder); - - /// - /// - public native @SharedPtr @Name("register_module") @Cast({"", "std::shared_ptr"}) ReplicationPad1dImpl register_module( - @StdString BytePointer name, - @ByVal ReplicationPad1dImplModuleHolder module_holder); - public native @SharedPtr @Name("register_module") @Cast({"", "std::shared_ptr"}) ReplicationPad1dImpl register_module( - @StdString String name, - @ByVal ReplicationPad1dImplModuleHolder module_holder); - - /// - /// - public native @SharedPtr @Name("register_module") @Cast({"", "std::shared_ptr"}) ConstantPad1dImpl register_module( - @StdString BytePointer name, - @ByVal ConstantPad1dImplModuleHolder module_holder); - public native @SharedPtr @Name("register_module") @Cast({"", "std::shared_ptr"}) ConstantPad1dImpl register_module( - @StdString String name, - @ByVal ConstantPad1dImplModuleHolder module_holder); - - /// - /// - public native @SharedPtr @Name("register_module") @Cast({"", "std::shared_ptr"}) AvgPool1dImpl register_module( - @StdString BytePointer name, - @ByVal AvgPool1dImplModuleHolder module_holder); - public native @SharedPtr @Name("register_module") @Cast({"", "std::shared_ptr"}) AvgPool1dImpl register_module( - @StdString String name, - @ByVal AvgPool1dImplModuleHolder module_holder); - - /// - /// - public native @SharedPtr @Name("register_module") @Cast({"", "std::shared_ptr"}) MaxPool1dImpl register_module( - @StdString BytePointer name, - @ByVal MaxPool1dImplModuleHolder module_holder); - public native @SharedPtr @Name("register_module") @Cast({"", "std::shared_ptr"}) MaxPool1dImpl register_module( - @StdString String name, - @ByVal MaxPool1dImplModuleHolder module_holder); - - /// - /// - public native @SharedPtr @Name("register_module") @Cast({"", "std::shared_ptr"}) AdaptiveAvgPool1dImpl register_module( - @StdString BytePointer name, - @ByVal AdaptiveAvgPool1dImplModuleHolder module_holder); - public native @SharedPtr @Name("register_module") @Cast({"", "std::shared_ptr"}) AdaptiveAvgPool1dImpl register_module( - @StdString String name, - @ByVal AdaptiveAvgPool1dImplModuleHolder module_holder); - - /// - /// - public native @SharedPtr @Name("register_module") @Cast({"", "std::shared_ptr"}) AdaptiveMaxPool1dImpl register_module( - @StdString BytePointer name, - @ByVal AdaptiveMaxPool1dImplModuleHolder module_holder); - public native @SharedPtr @Name("register_module") @Cast({"", "std::shared_ptr"}) AdaptiveMaxPool1dImpl register_module( - @StdString String name, - @ByVal AdaptiveMaxPool1dImplModuleHolder module_holder); - - /// - /// - public native @SharedPtr @Name("register_module") @Cast({"", "std::shared_ptr"}) MaxUnpool1dImpl register_module( - @StdString BytePointer name, - @ByVal MaxUnpool1dImplModuleHolder module_holder); - public native @SharedPtr @Name("register_module") @Cast({"", "std::shared_ptr"}) MaxUnpool1dImpl register_module( - @StdString String name, - @ByVal MaxUnpool1dImplModuleHolder module_holder); - - /// - /// - public native @SharedPtr @Name("register_module") @Cast({"", "std::shared_ptr"}) LPPool1dImpl register_module( - @StdString BytePointer name, - @ByVal LPPool1dImplModuleHolder module_holder); - public native @SharedPtr @Name("register_module") @Cast({"", "std::shared_ptr"}) LPPool1dImpl register_module( - @StdString String name, - @ByVal LPPool1dImplModuleHolder module_holder); - - /// - /// - public native @SharedPtr @Name("register_module") @Cast({"", "std::shared_ptr"}) ReflectionPad2dImpl register_module( - @StdString BytePointer name, - @ByVal ReflectionPad2dImplModuleHolder module_holder); - public native @SharedPtr @Name("register_module") @Cast({"", "std::shared_ptr"}) ReflectionPad2dImpl register_module( - @StdString String name, - @ByVal ReflectionPad2dImplModuleHolder module_holder); - - /// - /// - public native @SharedPtr @Name("register_module") @Cast({"", "std::shared_ptr"}) ReplicationPad2dImpl register_module( - @StdString BytePointer name, - @ByVal ReplicationPad2dImplModuleHolder module_holder); - public native @SharedPtr @Name("register_module") @Cast({"", "std::shared_ptr"}) ReplicationPad2dImpl register_module( - @StdString String name, - @ByVal ReplicationPad2dImplModuleHolder module_holder); - - /// - /// - public native @SharedPtr @Name("register_module") @Cast({"", "std::shared_ptr"}) ConstantPad2dImpl register_module( - @StdString BytePointer name, - @ByVal ConstantPad2dImplModuleHolder module_holder); - public native @SharedPtr @Name("register_module") @Cast({"", "std::shared_ptr"}) ConstantPad2dImpl register_module( - @StdString String name, - @ByVal ConstantPad2dImplModuleHolder module_holder); - - /// - /// - public native @SharedPtr @Name("register_module") @Cast({"", "std::shared_ptr"}) ZeroPad2dImpl register_module( - @StdString BytePointer name, - @ByVal ZeroPad2dImplModuleHolder module_holder); - public native @SharedPtr @Name("register_module") @Cast({"", "std::shared_ptr"}) ZeroPad2dImpl register_module( - @StdString String name, - @ByVal ZeroPad2dImplModuleHolder module_holder); - - /// - /// - public native @SharedPtr @Name("register_module") @Cast({"", "std::shared_ptr"}) AvgPool2dImpl register_module( - @StdString BytePointer name, - @ByVal AvgPool2dImplModuleHolder module_holder); - public native @SharedPtr @Name("register_module") @Cast({"", "std::shared_ptr"}) AvgPool2dImpl register_module( - @StdString String name, - @ByVal AvgPool2dImplModuleHolder module_holder); - - /// - /// - public native @SharedPtr @Name("register_module") @Cast({"", "std::shared_ptr"}) MaxPool2dImpl register_module( - @StdString BytePointer name, - @ByVal MaxPool2dImplModuleHolder module_holder); - public native @SharedPtr @Name("register_module") @Cast({"", "std::shared_ptr"}) MaxPool2dImpl register_module( - @StdString String name, - @ByVal MaxPool2dImplModuleHolder module_holder); - - /// - /// - public native @SharedPtr @Name("register_module") @Cast({"", "std::shared_ptr"}) AdaptiveAvgPool2dImpl register_module( - @StdString BytePointer name, - @ByVal AdaptiveAvgPool2dImplModuleHolder module_holder); - public native @SharedPtr @Name("register_module") @Cast({"", "std::shared_ptr"}) AdaptiveAvgPool2dImpl register_module( - @StdString String name, - @ByVal AdaptiveAvgPool2dImplModuleHolder module_holder); - - /// - /// - public native @SharedPtr @Name("register_module") @Cast({"", "std::shared_ptr"}) AdaptiveMaxPool2dImpl register_module( - @StdString BytePointer name, - @ByVal AdaptiveMaxPool2dImplModuleHolder module_holder); - public native @SharedPtr @Name("register_module") @Cast({"", "std::shared_ptr"}) AdaptiveMaxPool2dImpl register_module( - @StdString String name, - @ByVal AdaptiveMaxPool2dImplModuleHolder module_holder); - - /// - /// - public native @SharedPtr @Name("register_module") @Cast({"", "std::shared_ptr"}) MaxUnpool2dImpl register_module( - @StdString BytePointer name, - @ByVal MaxUnpool2dImplModuleHolder module_holder); - public native @SharedPtr @Name("register_module") @Cast({"", "std::shared_ptr"}) MaxUnpool2dImpl register_module( - @StdString String name, - @ByVal MaxUnpool2dImplModuleHolder module_holder); - - /// - /// - public native @SharedPtr @Name("register_module") @Cast({"", "std::shared_ptr"}) FractionalMaxPool2dImpl register_module( - @StdString BytePointer name, - @ByVal FractionalMaxPool2dImplModuleHolder module_holder); - public native @SharedPtr @Name("register_module") @Cast({"", "std::shared_ptr"}) FractionalMaxPool2dImpl register_module( - @StdString String name, - @ByVal FractionalMaxPool2dImplModuleHolder module_holder); - - /// - /// - public native @SharedPtr @Name("register_module") @Cast({"", "std::shared_ptr"}) LPPool2dImpl register_module( - @StdString BytePointer name, - @ByVal LPPool2dImplModuleHolder module_holder); - public native @SharedPtr @Name("register_module") @Cast({"", "std::shared_ptr"}) LPPool2dImpl register_module( - @StdString String name, - @ByVal LPPool2dImplModuleHolder module_holder); - - /// - /// - public native @SharedPtr @Name("register_module") @Cast({"", "std::shared_ptr"}) ReflectionPad3dImpl register_module( - @StdString BytePointer name, - @ByVal ReflectionPad3dImplModuleHolder module_holder); - public native @SharedPtr @Name("register_module") @Cast({"", "std::shared_ptr"}) ReflectionPad3dImpl register_module( - @StdString String name, - @ByVal ReflectionPad3dImplModuleHolder module_holder); - - /// - /// - public native @SharedPtr @Name("register_module") @Cast({"", "std::shared_ptr"}) ReplicationPad3dImpl register_module( - @StdString BytePointer name, - @ByVal ReplicationPad3dImplModuleHolder module_holder); - public native @SharedPtr @Name("register_module") @Cast({"", "std::shared_ptr"}) ReplicationPad3dImpl register_module( - @StdString String name, - @ByVal ReplicationPad3dImplModuleHolder module_holder); - - /// - /// - public native @SharedPtr @Name("register_module") @Cast({"", "std::shared_ptr"}) ConstantPad3dImpl register_module( - @StdString BytePointer name, - @ByVal ConstantPad3dImplModuleHolder module_holder); - public native @SharedPtr @Name("register_module") @Cast({"", "std::shared_ptr"}) ConstantPad3dImpl register_module( - @StdString String name, - @ByVal ConstantPad3dImplModuleHolder module_holder); - - /// - /// - public native @SharedPtr @Name("register_module") @Cast({"", "std::shared_ptr"}) AvgPool3dImpl register_module( - @StdString BytePointer name, - @ByVal AvgPool3dImplModuleHolder module_holder); - public native @SharedPtr @Name("register_module") @Cast({"", "std::shared_ptr"}) AvgPool3dImpl register_module( - @StdString String name, - @ByVal AvgPool3dImplModuleHolder module_holder); - - /// - /// - public native @SharedPtr @Name("register_module") @Cast({"", "std::shared_ptr"}) MaxPool3dImpl register_module( - @StdString BytePointer name, - @ByVal MaxPool3dImplModuleHolder module_holder); - public native @SharedPtr @Name("register_module") @Cast({"", "std::shared_ptr"}) MaxPool3dImpl register_module( - @StdString String name, - @ByVal MaxPool3dImplModuleHolder module_holder); - - /// - /// - public native @SharedPtr @Name("register_module") @Cast({"", "std::shared_ptr"}) AdaptiveAvgPool3dImpl register_module( - @StdString BytePointer name, - @ByVal AdaptiveAvgPool3dImplModuleHolder module_holder); - public native @SharedPtr @Name("register_module") @Cast({"", "std::shared_ptr"}) AdaptiveAvgPool3dImpl register_module( - @StdString String name, - @ByVal AdaptiveAvgPool3dImplModuleHolder module_holder); - - /// - /// - public native @SharedPtr @Name("register_module") @Cast({"", "std::shared_ptr"}) AdaptiveMaxPool3dImpl register_module( - @StdString BytePointer name, - @ByVal AdaptiveMaxPool3dImplModuleHolder module_holder); - public native @SharedPtr @Name("register_module") @Cast({"", "std::shared_ptr"}) AdaptiveMaxPool3dImpl register_module( - @StdString String name, - @ByVal AdaptiveMaxPool3dImplModuleHolder module_holder); - - /// - /// - public native @SharedPtr @Name("register_module") @Cast({"", "std::shared_ptr"}) MaxUnpool3dImpl register_module( - @StdString BytePointer name, - @ByVal MaxUnpool3dImplModuleHolder module_holder); - public native @SharedPtr @Name("register_module") @Cast({"", "std::shared_ptr"}) MaxUnpool3dImpl register_module( - @StdString String name, - @ByVal MaxUnpool3dImplModuleHolder module_holder); - - /// - /// - public native @SharedPtr @Name("register_module") @Cast({"", "std::shared_ptr"}) FractionalMaxPool3dImpl register_module( - @StdString BytePointer name, - @ByVal FractionalMaxPool3dImplModuleHolder module_holder); - public native @SharedPtr @Name("register_module") @Cast({"", "std::shared_ptr"}) FractionalMaxPool3dImpl register_module( - @StdString String name, - @ByVal FractionalMaxPool3dImplModuleHolder module_holder); - - /// - /// - public native @SharedPtr @Name("register_module") @Cast({"", "std::shared_ptr"}) RNNImpl register_module( - @StdString BytePointer name, - @ByVal RNNImplModuleHolder module_holder); - public native @SharedPtr @Name("register_module") @Cast({"", "std::shared_ptr"}) RNNImpl register_module( - @StdString String name, - @ByVal RNNImplModuleHolder module_holder); - - /// - /// - public native @SharedPtr @Name("register_module") @Cast({"", "std::shared_ptr"}) LSTMImpl register_module( - @StdString BytePointer name, - @ByVal LSTMImplModuleHolder module_holder); - public native @SharedPtr @Name("register_module") @Cast({"", "std::shared_ptr"}) LSTMImpl register_module( - @StdString String name, - @ByVal LSTMImplModuleHolder module_holder); - - /// - /// - public native @SharedPtr @Name("register_module") @Cast({"", "std::shared_ptr"}) GRUImpl register_module( - @StdString BytePointer name, - @ByVal GRUImplModuleHolder module_holder); - public native @SharedPtr @Name("register_module") @Cast({"", "std::shared_ptr"}) GRUImpl register_module( - @StdString String name, - @ByVal GRUImplModuleHolder module_holder); - - /// - /// - public native @SharedPtr @Name("register_module") @Cast({"", "std::shared_ptr"}) RNNCellImpl register_module( - @StdString BytePointer name, - @ByVal RNNCellImplModuleHolder module_holder); - public native @SharedPtr @Name("register_module") @Cast({"", "std::shared_ptr"}) RNNCellImpl register_module( - @StdString String name, - @ByVal RNNCellImplModuleHolder module_holder); - - /// - /// - public native @SharedPtr @Name("register_module") @Cast({"", "std::shared_ptr"}) LSTMCellImpl register_module( - @StdString BytePointer name, - @ByVal LSTMCellImplModuleHolder module_holder); - public native @SharedPtr @Name("register_module") @Cast({"", "std::shared_ptr"}) LSTMCellImpl register_module( - @StdString String name, - @ByVal LSTMCellImplModuleHolder module_holder); - - /// - /// - public native @SharedPtr @Name("register_module") @Cast({"", "std::shared_ptr"}) GRUCellImpl register_module( - @StdString BytePointer name, - @ByVal GRUCellImplModuleHolder module_holder); - public native @SharedPtr @Name("register_module") @Cast({"", "std::shared_ptr"}) GRUCellImpl register_module( - @StdString String name, - @ByVal GRUCellImplModuleHolder module_holder); - - /// - /// - public native @SharedPtr @Name("register_module") @Cast({"", "std::shared_ptr"}) PixelShuffleImpl register_module( - @StdString BytePointer name, - @ByVal PixelShuffleImplModuleHolder module_holder); - public native @SharedPtr @Name("register_module") @Cast({"", "std::shared_ptr"}) PixelShuffleImpl register_module( - @StdString String name, - @ByVal PixelShuffleImplModuleHolder module_holder); - - /// - /// - public native @SharedPtr @Name("register_module") @Cast({"", "std::shared_ptr"}) PixelUnshuffleImpl register_module( - @StdString BytePointer name, - @ByVal PixelUnshuffleImplModuleHolder module_holder); - public native @SharedPtr @Name("register_module") @Cast({"", "std::shared_ptr"}) PixelUnshuffleImpl register_module( - @StdString String name, - @ByVal PixelUnshuffleImplModuleHolder module_holder); - - /// - /// - public native @SharedPtr @Name("register_module") @Cast({"", "std::shared_ptr"}) UpsampleImpl register_module( - @StdString BytePointer name, - @ByVal UpsampleImplModuleHolder module_holder); - public native @SharedPtr @Name("register_module") @Cast({"", "std::shared_ptr"}) UpsampleImpl register_module( - @StdString String name, - @ByVal UpsampleImplModuleHolder module_holder); - - /// - /// - public native @SharedPtr @Name("register_module") @Cast({"", "std::shared_ptr"}) ELUImpl register_module( - @StdString BytePointer name, - @ByVal ELUImplModuleHolder module_holder); - public native @SharedPtr @Name("register_module") @Cast({"", "std::shared_ptr"}) ELUImpl register_module( - @StdString String name, - @ByVal ELUImplModuleHolder module_holder); - - /// - /// - public native @SharedPtr @Name("register_module") @Cast({"", "std::shared_ptr"}) SELUImpl register_module( - @StdString BytePointer name, - @ByVal SELUImplModuleHolder module_holder); - public native @SharedPtr @Name("register_module") @Cast({"", "std::shared_ptr"}) SELUImpl register_module( - @StdString String name, - @ByVal SELUImplModuleHolder module_holder); - - /// - /// - public native @SharedPtr @Name("register_module") @Cast({"", "std::shared_ptr"}) HardshrinkImpl register_module( - @StdString BytePointer name, - @ByVal HardshrinkImplModuleHolder module_holder); - public native @SharedPtr @Name("register_module") @Cast({"", "std::shared_ptr"}) HardshrinkImpl register_module( - @StdString String name, - @ByVal HardshrinkImplModuleHolder module_holder); - - /// - /// - public native @SharedPtr @Name("register_module") @Cast({"", "std::shared_ptr"}) HardtanhImpl register_module( - @StdString BytePointer name, - @ByVal HardtanhImplModuleHolder module_holder); - public native @SharedPtr @Name("register_module") @Cast({"", "std::shared_ptr"}) HardtanhImpl register_module( - @StdString String name, - @ByVal HardtanhImplModuleHolder module_holder); - - /// - /// - public native @SharedPtr @Name("register_module") @Cast({"", "std::shared_ptr"}) LeakyReLUImpl register_module( - @StdString BytePointer name, - @ByVal LeakyReLUImplModuleHolder module_holder); - public native @SharedPtr @Name("register_module") @Cast({"", "std::shared_ptr"}) LeakyReLUImpl register_module( - @StdString String name, - @ByVal LeakyReLUImplModuleHolder module_holder); - - /// - /// - public native @SharedPtr @Name("register_module") @Cast({"", "std::shared_ptr"}) LogSigmoidImpl register_module( - @StdString BytePointer name, - @ByVal LogSigmoidImplModuleHolder module_holder); - public native @SharedPtr @Name("register_module") @Cast({"", "std::shared_ptr"}) LogSigmoidImpl register_module( - @StdString String name, - @ByVal LogSigmoidImplModuleHolder module_holder); - - /// - /// - public native @SharedPtr @Name("register_module") @Cast({"", "std::shared_ptr"}) SoftmaxImpl register_module( - @StdString BytePointer name, - @ByVal SoftmaxImplModuleHolder module_holder); - public native @SharedPtr @Name("register_module") @Cast({"", "std::shared_ptr"}) SoftmaxImpl register_module( - @StdString String name, - @ByVal SoftmaxImplModuleHolder module_holder); - - /// - /// - public native @SharedPtr @Name("register_module") @Cast({"", "std::shared_ptr"}) SoftminImpl register_module( - @StdString BytePointer name, - @ByVal SoftminImplModuleHolder module_holder); - public native @SharedPtr @Name("register_module") @Cast({"", "std::shared_ptr"}) SoftminImpl register_module( - @StdString String name, - @ByVal SoftminImplModuleHolder module_holder); - - /// - /// - public native @SharedPtr @Name("register_module") @Cast({"", "std::shared_ptr"}) LogSoftmaxImpl register_module( - @StdString BytePointer name, - @ByVal LogSoftmaxImplModuleHolder module_holder); - public native @SharedPtr @Name("register_module") @Cast({"", "std::shared_ptr"}) LogSoftmaxImpl register_module( - @StdString String name, - @ByVal LogSoftmaxImplModuleHolder module_holder); - - /// - /// - public native @SharedPtr @Name("register_module") @Cast({"", "std::shared_ptr"}) Softmax2dImpl register_module( - @StdString BytePointer name, - @ByVal Softmax2dImplModuleHolder module_holder); - public native @SharedPtr @Name("register_module") @Cast({"", "std::shared_ptr"}) Softmax2dImpl register_module( - @StdString String name, - @ByVal Softmax2dImplModuleHolder module_holder); - - /// - /// - public native @SharedPtr @Name("register_module") @Cast({"", "std::shared_ptr"}) PReLUImpl register_module( - @StdString BytePointer name, - @ByVal PReLUImplModuleHolder module_holder); - public native @SharedPtr @Name("register_module") @Cast({"", "std::shared_ptr"}) PReLUImpl register_module( - @StdString String name, - @ByVal PReLUImplModuleHolder module_holder); - - /// - /// - public native @SharedPtr @Name("register_module") @Cast({"", "std::shared_ptr"}) ReLUImpl register_module( - @StdString BytePointer name, - @ByVal ReLUImplModuleHolder module_holder); - public native @SharedPtr @Name("register_module") @Cast({"", "std::shared_ptr"}) ReLUImpl register_module( - @StdString String name, - @ByVal ReLUImplModuleHolder module_holder); - - /// - /// - public native @SharedPtr @Name("register_module") @Cast({"", "std::shared_ptr"}) ReLU6Impl register_module( - @StdString BytePointer name, - @ByVal ReLU6ImplModuleHolder module_holder); - public native @SharedPtr @Name("register_module") @Cast({"", "std::shared_ptr"}) ReLU6Impl register_module( - @StdString String name, - @ByVal ReLU6ImplModuleHolder module_holder); - - /// - /// - public native @SharedPtr @Name("register_module") @Cast({"", "std::shared_ptr"}) RReLUImpl register_module( - @StdString BytePointer name, - @ByVal RReLUImplModuleHolder module_holder); - public native @SharedPtr @Name("register_module") @Cast({"", "std::shared_ptr"}) RReLUImpl register_module( - @StdString String name, - @ByVal RReLUImplModuleHolder module_holder); - - /// - /// - public native @SharedPtr @Name("register_module") @Cast({"", "std::shared_ptr"}) CELUImpl register_module( - @StdString BytePointer name, - @ByVal CELUImplModuleHolder module_holder); - public native @SharedPtr @Name("register_module") @Cast({"", "std::shared_ptr"}) CELUImpl register_module( - @StdString String name, - @ByVal CELUImplModuleHolder module_holder); - - /// - /// - public native @SharedPtr @Name("register_module") @Cast({"", "std::shared_ptr"}) GLUImpl register_module( - @StdString BytePointer name, - @ByVal GLUImplModuleHolder module_holder); - public native @SharedPtr @Name("register_module") @Cast({"", "std::shared_ptr"}) GLUImpl register_module( - @StdString String name, - @ByVal GLUImplModuleHolder module_holder); - - /// - /// - public native @SharedPtr @Name("register_module") @Cast({"", "std::shared_ptr"}) GELUImpl register_module( - @StdString BytePointer name, - @ByVal GELUImplModuleHolder module_holder); - public native @SharedPtr @Name("register_module") @Cast({"", "std::shared_ptr"}) GELUImpl register_module( - @StdString String name, - @ByVal GELUImplModuleHolder module_holder); - - /// - /// - public native @SharedPtr @Name("register_module") @Cast({"", "std::shared_ptr"}) SiLUImpl register_module( - @StdString BytePointer name, - @ByVal SiLUImplModuleHolder module_holder); - public native @SharedPtr @Name("register_module") @Cast({"", "std::shared_ptr"}) SiLUImpl register_module( - @StdString String name, - @ByVal SiLUImplModuleHolder module_holder); - - /// - /// - public native @SharedPtr @Name("register_module") @Cast({"", "std::shared_ptr"}) MishImpl register_module( - @StdString BytePointer name, - @ByVal MishImplModuleHolder module_holder); - public native @SharedPtr @Name("register_module") @Cast({"", "std::shared_ptr"}) MishImpl register_module( - @StdString String name, - @ByVal MishImplModuleHolder module_holder); - - /// - /// - public native @SharedPtr @Name("register_module") @Cast({"", "std::shared_ptr"}) SigmoidImpl register_module( - @StdString BytePointer name, - @ByVal SigmoidImplModuleHolder module_holder); - public native @SharedPtr @Name("register_module") @Cast({"", "std::shared_ptr"}) SigmoidImpl register_module( - @StdString String name, - @ByVal SigmoidImplModuleHolder module_holder); - - /// - /// - public native @SharedPtr @Name("register_module") @Cast({"", "std::shared_ptr"}) SoftplusImpl register_module( - @StdString BytePointer name, - @ByVal SoftplusImplModuleHolder module_holder); - public native @SharedPtr @Name("register_module") @Cast({"", "std::shared_ptr"}) SoftplusImpl register_module( - @StdString String name, - @ByVal SoftplusImplModuleHolder module_holder); - - /// - /// - public native @SharedPtr @Name("register_module") @Cast({"", "std::shared_ptr"}) SoftshrinkImpl register_module( - @StdString BytePointer name, - @ByVal SoftshrinkImplModuleHolder module_holder); - public native @SharedPtr @Name("register_module") @Cast({"", "std::shared_ptr"}) SoftshrinkImpl register_module( - @StdString String name, - @ByVal SoftshrinkImplModuleHolder module_holder); - - /// - /// - public native @SharedPtr @Name("register_module") @Cast({"", "std::shared_ptr"}) SoftsignImpl register_module( - @StdString BytePointer name, - @ByVal SoftsignImplModuleHolder module_holder); - public native @SharedPtr @Name("register_module") @Cast({"", "std::shared_ptr"}) SoftsignImpl register_module( - @StdString String name, - @ByVal SoftsignImplModuleHolder module_holder); - - /// - /// - public native @SharedPtr @Name("register_module") @Cast({"", "std::shared_ptr"}) TanhImpl register_module( - @StdString BytePointer name, - @ByVal TanhImplModuleHolder module_holder); - public native @SharedPtr @Name("register_module") @Cast({"", "std::shared_ptr"}) TanhImpl register_module( - @StdString String name, - @ByVal TanhImplModuleHolder module_holder); - - /// - /// - public native @SharedPtr @Name("register_module") @Cast({"", "std::shared_ptr"}) TanhshrinkImpl register_module( - @StdString BytePointer name, - @ByVal TanhshrinkImplModuleHolder module_holder); - public native @SharedPtr @Name("register_module") @Cast({"", "std::shared_ptr"}) TanhshrinkImpl register_module( - @StdString String name, - @ByVal TanhshrinkImplModuleHolder module_holder); - - /// - /// - public native @SharedPtr @Name("register_module") @Cast({"", "std::shared_ptr"}) ThresholdImpl register_module( - @StdString BytePointer name, - @ByVal ThresholdImplModuleHolder module_holder); - public native @SharedPtr @Name("register_module") @Cast({"", "std::shared_ptr"}) ThresholdImpl register_module( - @StdString String name, - @ByVal ThresholdImplModuleHolder module_holder); - - /// - /// - public native @SharedPtr @Name("register_module") @Cast({"", "std::shared_ptr"}) MultiheadAttentionImpl register_module( - @StdString BytePointer name, - @ByVal MultiheadAttentionImplModuleHolder module_holder); - public native @SharedPtr @Name("register_module") @Cast({"", "std::shared_ptr"}) MultiheadAttentionImpl register_module( - @StdString String name, - @ByVal MultiheadAttentionImplModuleHolder module_holder); - - /// - /// - public native @SharedPtr @Name("register_module") @Cast({"", "std::shared_ptr"}) LayerNormImpl register_module( - @StdString BytePointer name, - @ByVal LayerNormImplModuleHolder module_holder); - public native @SharedPtr @Name("register_module") @Cast({"", "std::shared_ptr"}) LayerNormImpl register_module( - @StdString String name, - @ByVal LayerNormImplModuleHolder module_holder); - - /// - /// - public native @SharedPtr @Name("register_module") @Cast({"", "std::shared_ptr"}) LocalResponseNormImpl register_module( - @StdString BytePointer name, - @ByVal LocalResponseNormImplModuleHolder module_holder); - public native @SharedPtr @Name("register_module") @Cast({"", "std::shared_ptr"}) LocalResponseNormImpl register_module( - @StdString String name, - @ByVal LocalResponseNormImplModuleHolder module_holder); - - /// - /// - public native @SharedPtr @Name("register_module") @Cast({"", "std::shared_ptr"}) CrossMapLRN2dImpl register_module( - @StdString BytePointer name, - @ByVal CrossMapLRN2dImplModuleHolder module_holder); - public native @SharedPtr @Name("register_module") @Cast({"", "std::shared_ptr"}) CrossMapLRN2dImpl register_module( - @StdString String name, - @ByVal CrossMapLRN2dImplModuleHolder module_holder); - - /// - /// - public native @SharedPtr @Name("register_module") @Cast({"", "std::shared_ptr"}) GroupNormImpl register_module( - @StdString BytePointer name, - @ByVal GroupNormImplModuleHolder module_holder); - public native @SharedPtr @Name("register_module") @Cast({"", "std::shared_ptr"}) GroupNormImpl register_module( - @StdString String name, - @ByVal GroupNormImplModuleHolder module_holder); - - /// - /// - public native @SharedPtr @Name("register_module") @Cast({"", "std::shared_ptr"}) TransformerEncoderLayerImpl register_module( - @StdString BytePointer name, - @ByVal TransformerEncoderLayerImplModuleHolder module_holder); - public native @SharedPtr @Name("register_module") @Cast({"", "std::shared_ptr"}) TransformerEncoderLayerImpl register_module( - @StdString String name, - @ByVal TransformerEncoderLayerImplModuleHolder module_holder); - - /// - /// - public native @SharedPtr @Name("register_module") @Cast({"", "std::shared_ptr"}) TransformerDecoderLayerImpl register_module( - @StdString BytePointer name, - @ByVal TransformerDecoderLayerImplModuleHolder module_holder); - public native @SharedPtr @Name("register_module") @Cast({"", "std::shared_ptr"}) TransformerDecoderLayerImpl register_module( - @StdString String name, - @ByVal TransformerDecoderLayerImplModuleHolder module_holder); - - /// - /// - public native @SharedPtr @Name("register_module") @Cast({"", "std::shared_ptr"}) TransformerEncoderImpl register_module( - @StdString BytePointer name, - @ByVal TransformerEncoderImplModuleHolder module_holder); - public native @SharedPtr @Name("register_module") @Cast({"", "std::shared_ptr"}) TransformerEncoderImpl register_module( - @StdString String name, - @ByVal TransformerEncoderImplModuleHolder module_holder); - - /// - /// - public native @SharedPtr @Name("register_module") @Cast({"", "std::shared_ptr"}) TransformerDecoderImpl register_module( - @StdString BytePointer name, - @ByVal TransformerDecoderImplModuleHolder module_holder); - public native @SharedPtr @Name("register_module") @Cast({"", "std::shared_ptr"}) TransformerDecoderImpl register_module( - @StdString String name, - @ByVal TransformerDecoderImplModuleHolder module_holder); - - /// - /// - public native @SharedPtr @Name("register_module") @Cast({"", "std::shared_ptr"}) TransformerImpl register_module( - @StdString BytePointer name, - @ByVal TransformerImplModuleHolder module_holder); - public native @SharedPtr @Name("register_module") @Cast({"", "std::shared_ptr"}) TransformerImpl register_module( - @StdString String name, - @ByVal TransformerImplModuleHolder module_holder); + @SharedPtr("torch::nn::Module") @ByVal Module module); + + /** Registers a submodule with this {@code Module}. + * + * This method deals with {@code ModuleHolder}s. + * + * Registering a module makes it available to methods such as {@code modules()}, + * {@code clone()} or {@code to()}. + * + * \rst + * .. code-block:: cpp + * + * MyModule::MyModule() { + * submodule_ = register_module("linear", torch::nn::Linear(3, 4)); + * } + * \endrst */ /** Replaces a registered submodule with this {@code Module}. * @@ -3140,6 +617,13 @@ public native void to( /** Unregisters a submodule from this {@code Module}. If there is no such module * with {@code name} an exception is thrown. */ - public native void unregister_module(@StdString BytePointer name); - public native void unregister_module(@StdString String name); + public void unregister_module(BytePointer name) { asModule()._unregister_module(name); } + private native @Name("unregister_module") void _unregister_module(@StdString BytePointer name); + public void unregister_module(String name) { asModule()._unregister_module(name); } + private native @Name("unregister_module") void _unregister_module(@StdString String name); + private static Pointer shiftLeft(Pointer stream, Module module) { return _shiftLeft(stream, module.asModule()); } + private static native @Namespace @Cast("std::ostream*") @ByRef @Name("operator <<") Pointer _shiftLeft( + @Cast("std::ostream*") @ByRef Pointer stream, + @Const @ByRef Module module); + public Pointer shiftLeft(Pointer stream) { return shiftLeft(stream, this); } } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/ModuleDict.java b/pytorch/src/gen/java/org/bytedeco/pytorch/ModuleDict.java deleted file mode 100644 index 030c888b28f..00000000000 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/ModuleDict.java +++ /dev/null @@ -1,44 +0,0 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE - -package org.bytedeco.pytorch; - -import org.bytedeco.pytorch.Allocator; -import org.bytedeco.pytorch.Function; -import org.bytedeco.pytorch.Module; -import java.nio.*; -import org.bytedeco.javacpp.*; -import org.bytedeco.javacpp.annotation.*; - -import static org.bytedeco.javacpp.presets.javacpp.*; -import static org.bytedeco.openblas.global.openblas_nolapack.*; -import static org.bytedeco.openblas.global.openblas.*; - -import static org.bytedeco.pytorch.global.torch.*; - - -/** A {@code ModuleHolder} subclass for {@code ModuleDictImpl}. - * See the documentation for {@code ModuleDictImpl} class to learn what methods it - * provides, or the documentation for {@code ModuleHolder} to learn about PyTorch's - * module storage semantics. */ -@Namespace("torch::nn") @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) -public class ModuleDict extends ModuleDictImplModuleHolder { - static { Loader.load(); } - - - public ModuleDict() { super((Pointer)null); allocate(); } - private native void allocate(); public ModuleDict(@ByVal @Cast("std::nullptr_t*") PointerPointer arg0) { super((Pointer)null); allocate(arg0); } - private native void allocate(@ByVal @Cast("std::nullptr_t*") PointerPointer arg0); public ModuleDict(@SharedPtr @Cast({"", "std::shared_ptr"}) ModuleDictImpl module) { super((Pointer)null); allocate(module); } - private native void allocate(@SharedPtr @Cast({"", "std::shared_ptr"}) ModuleDictImpl module); - /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ - public ModuleDict(Pointer p) { super(p); } - /** Native array allocator. Access with {@link Pointer#position(long)}. */ - public ModuleDict(long size) { super((Pointer)null); allocateArray(size); } - private native void allocateArray(long size); - @Override public ModuleDict position(long position) { - return (ModuleDict)super.position(position); - } - @Override public ModuleDict getPointer(long i) { - return new ModuleDict((Pointer)this).offsetAddress(i); - } - - } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/ModuleDictImpl.java b/pytorch/src/gen/java/org/bytedeco/pytorch/ModuleDictImpl.java index f1f7a875ac4..bce177db037 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/ModuleDictImpl.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/ModuleDictImpl.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; @@ -89,22 +91,22 @@ public class ModuleDictImpl extends ModuleDictImplCloneable { public ModuleDictImpl() { super((Pointer)null); allocate(); } - @NoDeallocator private native void allocate(); + @SharedPtr private native void allocate(); /** Constructs the {@code ModuleDict} from a list of string-Module pairs. */ public ModuleDictImpl( - @Const @ByRef StringSharedModulePairVector modules) { super((Pointer)null); allocate(modules); } - @NoDeallocator private native void allocate( - @Const @ByRef StringSharedModulePairVector modules); + @Const @ByRef StringSharedModuleVector modules) { super((Pointer)null); allocate(modules); } + @SharedPtr private native void allocate( + @Const @ByRef StringSharedModuleVector modules); /** Constructs the {@code ModuleDict} from an {@code OrderedDict}. */ public ModuleDictImpl( @Const @ByRef StringSharedModuleDict modules) { super((Pointer)null); allocate(modules); } - @NoDeallocator private native void allocate( + @SharedPtr private native void allocate( @Const @ByRef StringSharedModuleDict modules); /** Return the items in the {@code ModuleDict}. */ - public native @ByVal StringSharedModulePairVector items(); + public native @ByVal StringSharedModuleVector items(); /** Return the keys in the {@code ModuleDict}. */ public native @ByVal StringVector keys(); @@ -113,12 +115,12 @@ public ModuleDictImpl( public native @ByVal SharedModuleVector values(); /** Return an iterator to the start of {@code ModuleDict}. */ - public native @ByVal @Cast("torch::nn::ModuleDictImpl::Iterator*") StringSharedModuleDictItem begin(); + public native @ByVal @Cast("torch::nn::ModuleDictImpl::Iterator*") StringSharedModuleDictItemVector.Iterator begin(); /** Return a const iterator to the start of {@code ModuleDict}. */ /** Return an iterator to the end of {@code ModuleDict}. */ - public native @ByVal @Cast("torch::nn::ModuleDictImpl::Iterator*") StringSharedModuleDictItem end(); + public native @ByVal @Cast("torch::nn::ModuleDictImpl::Iterator*") StringSharedModuleDictItemVector.Iterator end(); /** Return a const iterator to the end of {@code ModuleDict}. */ @@ -137,9 +139,9 @@ public ModuleDictImpl( /** Special cloning function for {@code ModuleDict} because it does not use * {@code reset()}. */ - public native @SharedPtr @Cast({"", "std::shared_ptr"}) Module clone( - @Const @ByRef(nullValue = "c10::optional(c10::nullopt)") DeviceOptional device); - public native @SharedPtr @Cast({"", "std::shared_ptr"}) Module clone(); + public native @SharedPtr("torch::nn::Module") @ByVal Module clone( + @Const @ByRef(nullValue = "c10::optional(c10::nullopt)") DeviceOptional device); + public native @SharedPtr("torch::nn::Module") @ByVal Module clone(); /** {@code reset()} is empty for {@code ModuleDict}, since it does not have parameters of * its own. */ @@ -151,8 +153,8 @@ public ModuleDictImpl( /** Attempts to returns the {@code Module} associated with the given {@code key}. Throws * an exception if no such {@code key} is stored in the {@code ModuleDict}. Check * contains(key) before for a non-throwing way of access. */ - public native @SharedPtr @Name("operator []") @Cast({"", "std::shared_ptr"}) Module get(@StdString BytePointer key); - public native @SharedPtr @Name("operator []") @Cast({"", "std::shared_ptr"}) Module get(@StdString String key); + public native @SharedPtr("torch::nn::Module") @ByVal @Name("operator []") Module get(@StdString BytePointer key); + public native @SharedPtr("torch::nn::Module") @ByVal @Name("operator []") Module get(@StdString String key); /** Attempts to return the module at the given key as the requested type. * Throws an exception if no such {@code key} is stored in the {@code ModuleDict}. @@ -165,12 +167,12 @@ public ModuleDictImpl( /** Removes and returns the {@code Module} associated with the given {@code key}. * Throws an exception if no such {@code key} is stored in the {@code ModuleDict}. * Check contains(key) before for a non-throwing way of access. */ - public native @SharedPtr @Cast({"", "std::shared_ptr"}) Module pop(@StdString BytePointer key); - public native @SharedPtr @Cast({"", "std::shared_ptr"}) Module pop(@StdString String key); + public native @SharedPtr("torch::nn::Module") @ByVal Module pop(@StdString BytePointer key); + public native @SharedPtr("torch::nn::Module") @ByVal Module pop(@StdString String key); /** Updated the {@code ModuleDict} with a vector of key-module pairs. */ public native void update( - @Const @ByRef StringSharedModulePairVector modules); + @Const @ByRef StringSharedModuleVector modules); /** Updated the {@code ModuleDict} with key-value pairs from {@code OrderedDict} or * {@code ModuleDict}. */ diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/ModuleDictImplCloneable.java b/pytorch/src/gen/java/org/bytedeco/pytorch/ModuleDictImplCloneable.java index 79906df0376..83949f92495 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/ModuleDictImplCloneable.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/ModuleDictImplCloneable.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; @@ -27,18 +29,18 @@ public class ModuleDictImplCloneable extends Module { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public ModuleDictImplCloneable(Pointer p) { super(p); } + @Override public Module asModule() { return asModule(this); } + @Namespace public static native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast>") Module asModule(@SharedPtr ModuleDictImplCloneable pointer); /** {@code reset()} must perform initialization of all members with reference * semantics, most importantly parameters, buffers and submodules. */ public native void reset(); - @Override public Module asModule() { return asModule(this); } - @Namespace public static native @Name("static_cast") Module asModule(ModuleDictImplCloneable module); /** Performs a recursive "deep copy" of the {@code Module}, such that all parameters * and submodules in the cloned module are different from those in the * original module. */ - public native @SharedPtr Module clone( - @Const @ByRef(nullValue = "c10::optional(c10::nullopt)") DeviceOptional device); - public native @SharedPtr Module clone(); + public native @SharedPtr("torch::nn::Module") @ByVal Module clone( + @Const @ByRef(nullValue = "c10::optional(c10::nullopt)") DeviceOptional device); + public native @SharedPtr("torch::nn::Module") @ByVal Module clone(); } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/ModuleDictImplModuleHolder.java b/pytorch/src/gen/java/org/bytedeco/pytorch/ModuleDictImplModuleHolder.java deleted file mode 100644 index 1e84bd992c1..00000000000 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/ModuleDictImplModuleHolder.java +++ /dev/null @@ -1,89 +0,0 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE - -package org.bytedeco.pytorch; - -import org.bytedeco.pytorch.Allocator; -import org.bytedeco.pytorch.Function; -import org.bytedeco.pytorch.Module; -import java.nio.*; -import org.bytedeco.javacpp.*; -import org.bytedeco.javacpp.annotation.*; - -import static org.bytedeco.javacpp.presets.javacpp.*; -import static org.bytedeco.openblas.global.openblas_nolapack.*; -import static org.bytedeco.openblas.global.openblas.*; - -import static org.bytedeco.pytorch.global.torch.*; - -@Name("torch::nn::ModuleHolder") @NoOffset @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) -public class ModuleDictImplModuleHolder extends Pointer { - static { Loader.load(); } - /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ - public ModuleDictImplModuleHolder(Pointer p) { super(p); } - /** Native array allocator. Access with {@link Pointer#position(long)}. */ - public ModuleDictImplModuleHolder(long size) { super((Pointer)null); allocateArray(size); } - private native void allocateArray(long size); - @Override public ModuleDictImplModuleHolder position(long position) { - return (ModuleDictImplModuleHolder)super.position(position); - } - @Override public ModuleDictImplModuleHolder getPointer(long i) { - return new ModuleDictImplModuleHolder((Pointer)this).offsetAddress(i); - } - - - /// - - /** Default constructs the contained module if if has a default constructor, - * else produces a static error. - * - * NOTE: This uses the behavior of template - * classes in C++ that constructors (or any methods) are only compiled when - * actually used. */ - public ModuleDictImplModuleHolder() { super((Pointer)null); allocate(); } - private native void allocate(); - - /** Constructs the {@code ModuleHolder} with an empty contained value. Access to - * the underlying module is not permitted and will throw an exception, until - * a value is assigned. */ - /* implicit */ public ModuleDictImplModuleHolder(@ByVal @Cast("std::nullptr_t*") PointerPointer arg0) { super((Pointer)null); allocate(arg0); } -private native void allocate(@ByVal @Cast("std::nullptr_t*") PointerPointer arg0); - - /** Constructs the {@code ModuleHolder} with a contained module, forwarding all - * arguments to its constructor. */ - - /** Constructs the {@code ModuleHolder} from a pointer to the contained type. - * Example: {@code Linear(std::make_shared(...))}. */ - /* implicit */ public ModuleDictImplModuleHolder(@SharedPtr @Cast({"", "std::shared_ptr"}) ModuleDictImpl module) { super((Pointer)null); allocate(module); } -private native void allocate(@SharedPtr @Cast({"", "std::shared_ptr"}) ModuleDictImpl module); - - /** Returns true if the {@code ModuleHolder} contains a module, or false if it is - * {@code nullptr}. */ - public native @Cast("bool") @Name("operator bool") @NoException(true) boolean asBoolean(); - - /** Forwards to the contained module. */ - public native @Name("operator ->") ModuleDictImpl access(); - - /** Forwards to the contained module. */ - - /** Returns a reference to the contained module. */ - public native @ByRef @Name("operator *") ModuleDictImpl multiply(); - - /** Returns a const reference to the contained module. */ - - /** Returns a shared pointer to the underlying module. */ - public native @SharedPtr @Cast({"", "std::shared_ptr"}) ModuleDictImpl ptr(); - - /** Returns a pointer to the underlying module. */ - public native ModuleDictImpl get(); - - /** Returns a const pointer to the underlying module. */ - - /** Calls the {@code forward()} method of the contained module. */ - - /** Forwards to the subscript operator of the contained module. - * NOTE: std::forward is qualified to prevent VS2017 emitting - * error C2872: 'std': ambiguous symbol */ - - /** Returns true if the {@code ModuleHolder} does not contain a module. */ - public native @Cast("bool") @NoException(true) boolean is_empty(); -} diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/ModuleHolder.java b/pytorch/src/gen/java/org/bytedeco/pytorch/ModuleHolder.java deleted file mode 100644 index 5a73a96b7ad..00000000000 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/ModuleHolder.java +++ /dev/null @@ -1,93 +0,0 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE - -package org.bytedeco.pytorch; - -import org.bytedeco.pytorch.Allocator; -import org.bytedeco.pytorch.Function; -import org.bytedeco.pytorch.Module; -import java.nio.*; -import org.bytedeco.javacpp.*; -import org.bytedeco.javacpp.annotation.*; - -import static org.bytedeco.javacpp.presets.javacpp.*; -import static org.bytedeco.openblas.global.openblas_nolapack.*; -import static org.bytedeco.openblas.global.openblas.*; - -import static org.bytedeco.pytorch.global.torch.*; - - -/** A {@code ModuleHolder} is essentially a wrapper around {@code std::shared_ptr} where - * {@code M} is an {@code nn::Module} subclass, with convenient constructors defined for - * the kind of constructions we want to allow for our modules. */ -@Name("torch::nn::ModuleHolder") @NoOffset @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) -public class ModuleHolder extends Pointer { - static { Loader.load(); } - /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ - public ModuleHolder(Pointer p) { super(p); } - /** Native array allocator. Access with {@link Pointer#position(long)}. */ - public ModuleHolder(long size) { super((Pointer)null); allocateArray(size); } - private native void allocateArray(long size); - @Override public ModuleHolder position(long position) { - return (ModuleHolder)super.position(position); - } - @Override public ModuleHolder getPointer(long i) { - return new ModuleHolder((Pointer)this).offsetAddress(i); - } - - - /// - - /** Default constructs the contained module if if has a default constructor, - * else produces a static error. - * - * NOTE: This uses the behavior of template - * classes in C++ that constructors (or any methods) are only compiled when - * actually used. */ - public ModuleHolder() { super((Pointer)null); allocate(); } - private native void allocate(); - - /** Constructs the {@code ModuleHolder} with an empty contained value. Access to - * the underlying module is not permitted and will throw an exception, until - * a value is assigned. */ - /* implicit */ public ModuleHolder(@ByVal @Cast("std::nullptr_t*") PointerPointer arg0) { super((Pointer)null); allocate(arg0); } -private native void allocate(@ByVal @Cast("std::nullptr_t*") PointerPointer arg0); - - /** Constructs the {@code ModuleHolder} with a contained module, forwarding all - * arguments to its constructor. */ - - /** Constructs the {@code ModuleHolder} from a pointer to the contained type. - * Example: {@code Linear(std::make_shared(...))}. */ - /* implicit */ public ModuleHolder(@SharedPtr @Cast({"", "std::shared_ptr"}) Module module) { super((Pointer)null); allocate(module); } -private native void allocate(@SharedPtr @Cast({"", "std::shared_ptr"}) Module module); - - /** Returns true if the {@code ModuleHolder} contains a module, or false if it is - * {@code nullptr}. */ - public native @Cast("bool") @Name("operator bool") @NoException(true) boolean asBoolean(); - - /** Forwards to the contained module. */ - public native @Name("operator ->") Module access(); - - /** Forwards to the contained module. */ - - /** Returns a reference to the contained module. */ - public native @ByRef @Name("operator *") Module multiply(); - - /** Returns a const reference to the contained module. */ - - /** Returns a shared pointer to the underlying module. */ - public native @SharedPtr @Cast({"", "std::shared_ptr"}) Module ptr(); - - /** Returns a pointer to the underlying module. */ - public native Module get(); - - /** Returns a const pointer to the underlying module. */ - - /** Calls the {@code forward()} method of the contained module. */ - - /** Forwards to the subscript operator of the contained module. - * NOTE: std::forward is qualified to prevent VS2017 emitting - * error C2872: 'std': ambiguous symbol */ - - /** Returns true if the {@code ModuleHolder} does not contain a module. */ - public native @Cast("bool") @NoException(true) boolean is_empty(); -} diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/ModuleInstanceInfo.java b/pytorch/src/gen/java/org/bytedeco/pytorch/ModuleInstanceInfo.java index e62164ac129..5275ef08a30 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/ModuleInstanceInfo.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/ModuleInstanceInfo.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; @@ -30,7 +32,7 @@ public class ModuleInstanceInfo extends Pointer { /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public ModuleInstanceInfo(Pointer p) { super(p); } - public native @SharedPtr @ByVal ClassType class_type(); + public native @SharedPtr("c10::ClassType") @ByVal ClassType class_type(); public native @StdString BytePointer instance_name(); public native @Cast("bool") @Name("operator ==") boolean equals(@Const @ByRef ModuleInstanceInfo rhs); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/ModuleInstanceInfoOptional.java b/pytorch/src/gen/java/org/bytedeco/pytorch/ModuleInstanceInfoOptional.java index 4769e0bb84e..8bf4673d778 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/ModuleInstanceInfoOptional.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/ModuleInstanceInfoOptional.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; @@ -26,6 +28,7 @@ public class ModuleInstanceInfoOptional extends Pointer { public native @Name("operator =") @ByRef ModuleInstanceInfoOptional put(@ByRef ModuleInstanceInfoOptional x); public native boolean has_value(); + public native void reset(); public native @Name("value") @ByRef ModuleInstanceInfo get(); @ValueSetter public native ModuleInstanceInfoOptional put(@ByRef ModuleInstanceInfo value); } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/ModuleList.java b/pytorch/src/gen/java/org/bytedeco/pytorch/ModuleList.java deleted file mode 100644 index dc25e79f641..00000000000 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/ModuleList.java +++ /dev/null @@ -1,44 +0,0 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE - -package org.bytedeco.pytorch; - -import org.bytedeco.pytorch.Allocator; -import org.bytedeco.pytorch.Function; -import org.bytedeco.pytorch.Module; -import java.nio.*; -import org.bytedeco.javacpp.*; -import org.bytedeco.javacpp.annotation.*; - -import static org.bytedeco.javacpp.presets.javacpp.*; -import static org.bytedeco.openblas.global.openblas_nolapack.*; -import static org.bytedeco.openblas.global.openblas.*; - -import static org.bytedeco.pytorch.global.torch.*; - - -/** A {@code ModuleHolder} subclass for {@code ModuleListImpl}. - * See the documentation for {@code ModuleListImpl} class to learn what methods it - * provides, or the documentation for {@code ModuleHolder} to learn about PyTorch's - * module storage semantics. */ -@Namespace("torch::nn") @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) -public class ModuleList extends ModuleListImplModuleHolder { - static { Loader.load(); } - - - public ModuleList() { super((Pointer)null); allocate(); } - private native void allocate(); public ModuleList(@ByVal @Cast("std::nullptr_t*") PointerPointer arg0) { super((Pointer)null); allocate(arg0); } - private native void allocate(@ByVal @Cast("std::nullptr_t*") PointerPointer arg0); public ModuleList(@SharedPtr @Cast({"", "std::shared_ptr"}) ModuleListImpl module) { super((Pointer)null); allocate(module); } - private native void allocate(@SharedPtr @Cast({"", "std::shared_ptr"}) ModuleListImpl module); - /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ - public ModuleList(Pointer p) { super(p); } - /** Native array allocator. Access with {@link Pointer#position(long)}. */ - public ModuleList(long size) { super((Pointer)null); allocateArray(size); } - private native void allocateArray(long size); - @Override public ModuleList position(long position) { - return (ModuleList)super.position(position); - } - @Override public ModuleList getPointer(long i) { - return new ModuleList((Pointer)this).offsetAddress(i); - } - - } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/ModuleListImpl.java b/pytorch/src/gen/java/org/bytedeco/pytorch/ModuleListImpl.java index fbcfbebdd83..d41dd904624 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/ModuleListImpl.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/ModuleListImpl.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; @@ -77,15 +79,15 @@ public class ModuleListImpl extends ModuleListImplCloneable { public ModuleListImpl() { super((Pointer)null); allocate(); } - @NoDeallocator private native void allocate(); + @SharedPtr private native void allocate(); /** Constructs the {@code ModuleList} from a variadic list of modules. */ /** Special cloning function for {@code ModuleList} because it does not use * {@code reset()}. */ - public native @SharedPtr @Cast({"", "std::shared_ptr"}) Module clone( - @Const @ByRef(nullValue = "c10::optional(c10::nullopt)") DeviceOptional device); - public native @SharedPtr @Cast({"", "std::shared_ptr"}) Module clone(); + public native @SharedPtr("torch::nn::Module") @ByVal Module clone( + @Const @ByRef(nullValue = "c10::optional(c10::nullopt)") DeviceOptional device); + public native @SharedPtr("torch::nn::Module") @ByVal Module clone(); /** {@code reset()} is empty for {@code ModuleList}, since it does not have parameters of * its own. */ @@ -94,7 +96,8 @@ public class ModuleListImpl extends ModuleListImplCloneable { /** Pretty prints the {@code ModuleList} module into the given {@code stream}. */ public native void pretty_print(@Cast("std::ostream*") @ByRef Pointer stream); - public native void push_back(@SharedPtr @Cast({"", "std::shared_ptr"}) Module module); + public void push_back(Module module) { _push_back(module.asModule()); } + private native @Name("push_back") void _push_back(@SharedPtr("torch::nn::Module") @ByVal Module module); /** Adds a new {@code Module} to the {@code ModuleList} container, moving or copying * it into a {@code shared_ptr} internally. This method allows passing value types, @@ -126,14 +129,14 @@ public class ModuleListImpl extends ModuleListImplCloneable { /** Attempts to return a {@code std::shared_ptr} whose dynamic type is that of the * underlying module at the given index. Throws an exception if the index is * out of bounds. */ - public native @SharedPtr @Cast({"", "std::shared_ptr"}) Module ptr(@Cast("size_t") long index); + public native @SharedPtr("torch::nn::Module") @ByVal Module ptr(@Cast("size_t") long index); /** Attempts to return a {@code std::shared_ptr} whose type is the one provided. * Throws an exception if the index is out of bounds or the types do not * match. */ /** Like {@code ptr(index)}. */ - public native @SharedPtr @Name("operator []") @Cast({"", "std::shared_ptr"}) Module get(@Cast("size_t") long index); + public native @SharedPtr("torch::nn::Module") @ByVal @Name("operator []") Module get(@Cast("size_t") long index); /** The current size of the {@code ModuleList} container. */ public native @Cast("size_t") @NoException(true) long size(); @@ -141,7 +144,8 @@ public class ModuleListImpl extends ModuleListImplCloneable { /** True if there are no modules in the {@code ModuleList}. */ public native @Cast("bool") @NoException(true) boolean is_empty(); - public native void insert(@Cast("size_t") long index, @SharedPtr @Cast({"", "std::shared_ptr"}) Module module); + public void insert(long index, Module module) { _insert(index, module.asModule()); } + private native @Name("insert") void _insert(@Cast("size_t") long index, @SharedPtr("torch::nn::Module") @ByVal Module module); /** Unwraps the contained module of a {@code ModuleHolder} and inserts it in the * {@code ModuleList}. */ diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/ModuleListImplCloneable.java b/pytorch/src/gen/java/org/bytedeco/pytorch/ModuleListImplCloneable.java index 31c2865c987..8580276f85b 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/ModuleListImplCloneable.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/ModuleListImplCloneable.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; @@ -20,18 +22,18 @@ public class ModuleListImplCloneable extends Module { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public ModuleListImplCloneable(Pointer p) { super(p); } + @Override public Module asModule() { return asModule(this); } + @Namespace public static native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast>") Module asModule(@SharedPtr ModuleListImplCloneable pointer); /** {@code reset()} must perform initialization of all members with reference * semantics, most importantly parameters, buffers and submodules. */ public native void reset(); - @Override public Module asModule() { return asModule(this); } - @Namespace public static native @Name("static_cast") Module asModule(ModuleListImplCloneable module); /** Performs a recursive "deep copy" of the {@code Module}, such that all parameters * and submodules in the cloned module are different from those in the * original module. */ - public native @SharedPtr Module clone( - @Const @ByRef(nullValue = "c10::optional(c10::nullopt)") DeviceOptional device); - public native @SharedPtr Module clone(); + public native @SharedPtr("torch::nn::Module") @ByVal Module clone( + @Const @ByRef(nullValue = "c10::optional(c10::nullopt)") DeviceOptional device); + public native @SharedPtr("torch::nn::Module") @ByVal Module clone(); } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/ModuleListImplModuleHolder.java b/pytorch/src/gen/java/org/bytedeco/pytorch/ModuleListImplModuleHolder.java deleted file mode 100644 index b4ca4f113f6..00000000000 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/ModuleListImplModuleHolder.java +++ /dev/null @@ -1,89 +0,0 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE - -package org.bytedeco.pytorch; - -import org.bytedeco.pytorch.Allocator; -import org.bytedeco.pytorch.Function; -import org.bytedeco.pytorch.Module; -import java.nio.*; -import org.bytedeco.javacpp.*; -import org.bytedeco.javacpp.annotation.*; - -import static org.bytedeco.javacpp.presets.javacpp.*; -import static org.bytedeco.openblas.global.openblas_nolapack.*; -import static org.bytedeco.openblas.global.openblas.*; - -import static org.bytedeco.pytorch.global.torch.*; - -@Name("torch::nn::ModuleHolder") @NoOffset @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) -public class ModuleListImplModuleHolder extends Pointer { - static { Loader.load(); } - /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ - public ModuleListImplModuleHolder(Pointer p) { super(p); } - /** Native array allocator. Access with {@link Pointer#position(long)}. */ - public ModuleListImplModuleHolder(long size) { super((Pointer)null); allocateArray(size); } - private native void allocateArray(long size); - @Override public ModuleListImplModuleHolder position(long position) { - return (ModuleListImplModuleHolder)super.position(position); - } - @Override public ModuleListImplModuleHolder getPointer(long i) { - return new ModuleListImplModuleHolder((Pointer)this).offsetAddress(i); - } - - - /// - - /** Default constructs the contained module if if has a default constructor, - * else produces a static error. - * - * NOTE: This uses the behavior of template - * classes in C++ that constructors (or any methods) are only compiled when - * actually used. */ - public ModuleListImplModuleHolder() { super((Pointer)null); allocate(); } - private native void allocate(); - - /** Constructs the {@code ModuleHolder} with an empty contained value. Access to - * the underlying module is not permitted and will throw an exception, until - * a value is assigned. */ - /* implicit */ public ModuleListImplModuleHolder(@ByVal @Cast("std::nullptr_t*") PointerPointer arg0) { super((Pointer)null); allocate(arg0); } -private native void allocate(@ByVal @Cast("std::nullptr_t*") PointerPointer arg0); - - /** Constructs the {@code ModuleHolder} with a contained module, forwarding all - * arguments to its constructor. */ - - /** Constructs the {@code ModuleHolder} from a pointer to the contained type. - * Example: {@code Linear(std::make_shared(...))}. */ - /* implicit */ public ModuleListImplModuleHolder(@SharedPtr @Cast({"", "std::shared_ptr"}) ModuleListImpl module) { super((Pointer)null); allocate(module); } -private native void allocate(@SharedPtr @Cast({"", "std::shared_ptr"}) ModuleListImpl module); - - /** Returns true if the {@code ModuleHolder} contains a module, or false if it is - * {@code nullptr}. */ - public native @Cast("bool") @Name("operator bool") @NoException(true) boolean asBoolean(); - - /** Forwards to the contained module. */ - public native @Name("operator ->") ModuleListImpl access(); - - /** Forwards to the contained module. */ - - /** Returns a reference to the contained module. */ - public native @ByRef @Name("operator *") ModuleListImpl multiply(); - - /** Returns a const reference to the contained module. */ - - /** Returns a shared pointer to the underlying module. */ - public native @SharedPtr @Cast({"", "std::shared_ptr"}) ModuleListImpl ptr(); - - /** Returns a pointer to the underlying module. */ - public native ModuleListImpl get(); - - /** Returns a const pointer to the underlying module. */ - - /** Calls the {@code forward()} method of the contained module. */ - - /** Forwards to the subscript operator of the contained module. - * NOTE: std::forward is qualified to prevent VS2017 emitting - * error C2872: 'std': ambiguous symbol */ - - /** Returns true if the {@code ModuleHolder} does not contain a module. */ - public native @Cast("bool") @NoException(true) boolean is_empty(); -} diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/ModulePolicy.java b/pytorch/src/gen/java/org/bytedeco/pytorch/ModulePolicy.java index 0d017bc74a4..2ab99c73569 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/ModulePolicy.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/ModulePolicy.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; @@ -46,7 +48,7 @@ public class ModulePolicy extends Pointer { @ByVal IValue v); // is slot i in typ something that this iterator should return, otherwise, // we skip it. - public static native @Cast("bool") boolean valid(@Const @SharedPtr @ByRef ClassType typ, @Cast("size_t") long i, @Const @ByRef IValue v); + public static native @Cast("bool") boolean valid(@Const @SharedPtr("c10::ClassType") @ByRef ClassType typ, @Cast("size_t") long i, @Const @ByRef IValue v); // are we going to return everything? If so, we can optimize the calculate // of the size of the list. @MemberGetter public static native @Cast("const bool") boolean all_slots(); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/ModuleVector.java b/pytorch/src/gen/java/org/bytedeco/pytorch/ModuleVector.java index 91d53de3ea4..610b36f5e9f 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/ModuleVector.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/ModuleVector.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; @@ -33,6 +35,8 @@ public class ModuleVector extends Pointer { public void clear() { resize(0); } public native void resize(@Cast("size_t") long n); + public Module front() { return get(0); } + public Module back() { return get(size() - 1); } @Index(function = "at") public native @ByRef Module get(@Cast("size_t") long i); public native ModuleVector put(@Cast("size_t") long i, Module value); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/MultiLabelMarginLoss.java b/pytorch/src/gen/java/org/bytedeco/pytorch/MultiLabelMarginLoss.java deleted file mode 100644 index 3d9e9e1191e..00000000000 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/MultiLabelMarginLoss.java +++ /dev/null @@ -1,34 +0,0 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE - -package org.bytedeco.pytorch; - -import org.bytedeco.pytorch.Allocator; -import org.bytedeco.pytorch.Function; -import org.bytedeco.pytorch.Module; -import java.nio.*; -import org.bytedeco.javacpp.*; -import org.bytedeco.javacpp.annotation.*; - -import static org.bytedeco.javacpp.presets.javacpp.*; -import static org.bytedeco.openblas.global.openblas_nolapack.*; -import static org.bytedeco.openblas.global.openblas.*; - -import static org.bytedeco.pytorch.global.torch.*; - - -/** A {@code ModuleHolder} subclass for {@code MultiLabelMarginLossImpl}. - * See the documentation for {@code MultiLabelMarginLossImpl} class to learn what - * methods it provides, and examples of how to use {@code MultiLabelMarginLoss} with - * {@code torch::nn::MultiLabelMarginLossOptions}. See the documentation for - * {@code ModuleHolder} to learn about PyTorch's module storage semantics. */ -@Namespace("torch::nn") @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) -public class MultiLabelMarginLoss extends MultiLabelMarginLossImplModuleHolder { - static { Loader.load(); } - - public MultiLabelMarginLoss(@ByVal @Cast("std::nullptr_t*") PointerPointer arg0) { super((Pointer)null); allocate(arg0); } - private native void allocate(@ByVal @Cast("std::nullptr_t*") PointerPointer arg0); public MultiLabelMarginLoss(@SharedPtr @Cast({"", "std::shared_ptr"}) MultiLabelMarginLossImpl module) { super((Pointer)null); allocate(module); } - private native void allocate(@SharedPtr @Cast({"", "std::shared_ptr"}) MultiLabelMarginLossImpl module); - /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ - public MultiLabelMarginLoss(Pointer p) { super(p); } - - } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/MultiLabelMarginLossImpl.java b/pytorch/src/gen/java/org/bytedeco/pytorch/MultiLabelMarginLossImpl.java index 6651935a41d..9790a642bff 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/MultiLabelMarginLossImpl.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/MultiLabelMarginLossImpl.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; @@ -49,9 +51,9 @@ public class MultiLabelMarginLossImpl extends MultiLabelMarginLossImplCloneable } public MultiLabelMarginLossImpl(@ByVal(nullValue = "torch::nn::MultiLabelMarginLossOptions{}") MultiLabelMarginLossOptions options_) { super((Pointer)null); allocate(options_); } - @NoDeallocator private native void allocate(@ByVal(nullValue = "torch::nn::MultiLabelMarginLossOptions{}") MultiLabelMarginLossOptions options_); + @SharedPtr private native void allocate(@ByVal(nullValue = "torch::nn::MultiLabelMarginLossOptions{}") MultiLabelMarginLossOptions options_); public MultiLabelMarginLossImpl() { super((Pointer)null); allocate(); } - @NoDeallocator private native void allocate(); + @SharedPtr private native void allocate(); public native void reset(); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/MultiLabelMarginLossImplCloneable.java b/pytorch/src/gen/java/org/bytedeco/pytorch/MultiLabelMarginLossImplCloneable.java index 255ff670480..33ce8340d6a 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/MultiLabelMarginLossImplCloneable.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/MultiLabelMarginLossImplCloneable.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; @@ -20,18 +22,18 @@ public class MultiLabelMarginLossImplCloneable extends Module { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public MultiLabelMarginLossImplCloneable(Pointer p) { super(p); } + @Override public Module asModule() { return asModule(this); } + @Namespace public static native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast>") Module asModule(@SharedPtr MultiLabelMarginLossImplCloneable pointer); /** {@code reset()} must perform initialization of all members with reference * semantics, most importantly parameters, buffers and submodules. */ public native void reset(); - @Override public Module asModule() { return asModule(this); } - @Namespace public static native @Name("static_cast") Module asModule(MultiLabelMarginLossImplCloneable module); /** Performs a recursive "deep copy" of the {@code Module}, such that all parameters * and submodules in the cloned module are different from those in the * original module. */ - public native @SharedPtr Module clone( - @Const @ByRef(nullValue = "c10::optional(c10::nullopt)") DeviceOptional device); - public native @SharedPtr Module clone(); + public native @SharedPtr("torch::nn::Module") @ByVal Module clone( + @Const @ByRef(nullValue = "c10::optional(c10::nullopt)") DeviceOptional device); + public native @SharedPtr("torch::nn::Module") @ByVal Module clone(); } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/MultiLabelMarginLossImplModuleHolder.java b/pytorch/src/gen/java/org/bytedeco/pytorch/MultiLabelMarginLossImplModuleHolder.java deleted file mode 100644 index 77b2e632313..00000000000 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/MultiLabelMarginLossImplModuleHolder.java +++ /dev/null @@ -1,79 +0,0 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE - -package org.bytedeco.pytorch; - -import org.bytedeco.pytorch.Allocator; -import org.bytedeco.pytorch.Function; -import org.bytedeco.pytorch.Module; -import java.nio.*; -import org.bytedeco.javacpp.*; -import org.bytedeco.javacpp.annotation.*; - -import static org.bytedeco.javacpp.presets.javacpp.*; -import static org.bytedeco.openblas.global.openblas_nolapack.*; -import static org.bytedeco.openblas.global.openblas.*; - -import static org.bytedeco.pytorch.global.torch.*; - -@Name("torch::nn::ModuleHolder") @NoOffset @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) -public class MultiLabelMarginLossImplModuleHolder extends Pointer { - static { Loader.load(); } - /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ - public MultiLabelMarginLossImplModuleHolder(Pointer p) { super(p); } - - - /// - - /** Default constructs the contained module if if has a default constructor, - * else produces a static error. - * - * NOTE: This uses the behavior of template - * classes in C++ that constructors (or any methods) are only compiled when - * actually used. */ - - - /** Constructs the {@code ModuleHolder} with an empty contained value. Access to - * the underlying module is not permitted and will throw an exception, until - * a value is assigned. */ - /* implicit */ public MultiLabelMarginLossImplModuleHolder(@ByVal @Cast("std::nullptr_t*") PointerPointer arg0) { super((Pointer)null); allocate(arg0); } -private native void allocate(@ByVal @Cast("std::nullptr_t*") PointerPointer arg0); - - /** Constructs the {@code ModuleHolder} with a contained module, forwarding all - * arguments to its constructor. */ - - /** Constructs the {@code ModuleHolder} from a pointer to the contained type. - * Example: {@code Linear(std::make_shared(...))}. */ - /* implicit */ public MultiLabelMarginLossImplModuleHolder(@SharedPtr @Cast({"", "std::shared_ptr"}) MultiLabelMarginLossImpl module) { super((Pointer)null); allocate(module); } -private native void allocate(@SharedPtr @Cast({"", "std::shared_ptr"}) MultiLabelMarginLossImpl module); - - /** Returns true if the {@code ModuleHolder} contains a module, or false if it is - * {@code nullptr}. */ - public native @Cast("bool") @Name("operator bool") @NoException(true) boolean asBoolean(); - - /** Forwards to the contained module. */ - public native @Name("operator ->") MultiLabelMarginLossImpl access(); - - /** Forwards to the contained module. */ - - /** Returns a reference to the contained module. */ - public native @ByRef @Name("operator *") MultiLabelMarginLossImpl multiply(); - - /** Returns a const reference to the contained module. */ - - /** Returns a shared pointer to the underlying module. */ - public native @SharedPtr @Cast({"", "std::shared_ptr"}) MultiLabelMarginLossImpl ptr(); - - /** Returns a pointer to the underlying module. */ - public native MultiLabelMarginLossImpl get(); - - /** Returns a const pointer to the underlying module. */ - - /** Calls the {@code forward()} method of the contained module. */ - - /** Forwards to the subscript operator of the contained module. - * NOTE: std::forward is qualified to prevent VS2017 emitting - * error C2872: 'std': ambiguous symbol */ - - /** Returns true if the {@code ModuleHolder} does not contain a module. */ - public native @Cast("bool") @NoException(true) boolean is_empty(); -} diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/MultiLabelMarginLossOptions.java b/pytorch/src/gen/java/org/bytedeco/pytorch/MultiLabelMarginLossOptions.java index d1e67aabda3..0098079b03b 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/MultiLabelMarginLossOptions.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/MultiLabelMarginLossOptions.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; @@ -51,5 +53,5 @@ public class MultiLabelMarginLossOptions extends Pointer { public MultiLabelMarginLossOptions(@ByVal kSum reduction) { super((Pointer)null); allocate(reduction); } private native void allocate(@ByVal kSum reduction); - public native @ByRef @NoException(true) loss_reduction_t reduction(); + public native @ByRef @NoException(true) LossReduction reduction(); } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/MultiLabelSoftMarginLoss.java b/pytorch/src/gen/java/org/bytedeco/pytorch/MultiLabelSoftMarginLoss.java deleted file mode 100644 index 2940bf9e771..00000000000 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/MultiLabelSoftMarginLoss.java +++ /dev/null @@ -1,34 +0,0 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE - -package org.bytedeco.pytorch; - -import org.bytedeco.pytorch.Allocator; -import org.bytedeco.pytorch.Function; -import org.bytedeco.pytorch.Module; -import java.nio.*; -import org.bytedeco.javacpp.*; -import org.bytedeco.javacpp.annotation.*; - -import static org.bytedeco.javacpp.presets.javacpp.*; -import static org.bytedeco.openblas.global.openblas_nolapack.*; -import static org.bytedeco.openblas.global.openblas.*; - -import static org.bytedeco.pytorch.global.torch.*; - - -/** A {@code ModuleHolder} subclass for {@code MultiLabelSoftMarginLossImpl}. - * See the documentation for {@code MultiLabelSoftMarginLossImpl} class to learn what - * methods it provides, and examples of how to use {@code MultiLabelSoftMarginLoss} - * with {@code torch::nn::MultiLabelSoftMarginLossOptions}. See the documentation for - * {@code ModuleHolder} to learn about PyTorch's module storage semantics. */ -@Namespace("torch::nn") @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) -public class MultiLabelSoftMarginLoss extends MultiLabelSoftMarginLossImplModuleHolder { - static { Loader.load(); } - - public MultiLabelSoftMarginLoss(@ByVal @Cast("std::nullptr_t*") PointerPointer arg0) { super((Pointer)null); allocate(arg0); } - private native void allocate(@ByVal @Cast("std::nullptr_t*") PointerPointer arg0); public MultiLabelSoftMarginLoss(@SharedPtr @Cast({"", "std::shared_ptr"}) MultiLabelSoftMarginLossImpl module) { super((Pointer)null); allocate(module); } - private native void allocate(@SharedPtr @Cast({"", "std::shared_ptr"}) MultiLabelSoftMarginLossImpl module); - /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ - public MultiLabelSoftMarginLoss(Pointer p) { super(p); } - - } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/MultiLabelSoftMarginLossImpl.java b/pytorch/src/gen/java/org/bytedeco/pytorch/MultiLabelSoftMarginLossImpl.java index e8530c7fe6c..261ec52c51f 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/MultiLabelSoftMarginLossImpl.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/MultiLabelSoftMarginLossImpl.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; @@ -50,10 +52,10 @@ public class MultiLabelSoftMarginLossImpl extends MultiLabelSoftMarginLossImplCl public MultiLabelSoftMarginLossImpl( @ByVal(nullValue = "torch::nn::MultiLabelSoftMarginLossOptions{}") MultiLabelSoftMarginLossOptions options_) { super((Pointer)null); allocate(options_); } - @NoDeallocator private native void allocate( + @SharedPtr private native void allocate( @ByVal(nullValue = "torch::nn::MultiLabelSoftMarginLossOptions{}") MultiLabelSoftMarginLossOptions options_); public MultiLabelSoftMarginLossImpl() { super((Pointer)null); allocate(); } - @NoDeallocator private native void allocate(); + @SharedPtr private native void allocate(); /** Pretty prints the {@code MultiLabelSoftMarginLoss} module into the given * {@code stream}. */ diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/MultiLabelSoftMarginLossImplCloneable.java b/pytorch/src/gen/java/org/bytedeco/pytorch/MultiLabelSoftMarginLossImplCloneable.java index ef64aaf078c..ef7de6a6efe 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/MultiLabelSoftMarginLossImplCloneable.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/MultiLabelSoftMarginLossImplCloneable.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; @@ -20,18 +22,18 @@ public class MultiLabelSoftMarginLossImplCloneable extends Module { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public MultiLabelSoftMarginLossImplCloneable(Pointer p) { super(p); } + @Override public Module asModule() { return asModule(this); } + @Namespace public static native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast>") Module asModule(@SharedPtr MultiLabelSoftMarginLossImplCloneable pointer); /** {@code reset()} must perform initialization of all members with reference * semantics, most importantly parameters, buffers and submodules. */ public native void reset(); - @Override public Module asModule() { return asModule(this); } - @Namespace public static native @Name("static_cast") Module asModule(MultiLabelSoftMarginLossImplCloneable module); /** Performs a recursive "deep copy" of the {@code Module}, such that all parameters * and submodules in the cloned module are different from those in the * original module. */ - public native @SharedPtr Module clone( - @Const @ByRef(nullValue = "c10::optional(c10::nullopt)") DeviceOptional device); - public native @SharedPtr Module clone(); + public native @SharedPtr("torch::nn::Module") @ByVal Module clone( + @Const @ByRef(nullValue = "c10::optional(c10::nullopt)") DeviceOptional device); + public native @SharedPtr("torch::nn::Module") @ByVal Module clone(); } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/MultiLabelSoftMarginLossImplModuleHolder.java b/pytorch/src/gen/java/org/bytedeco/pytorch/MultiLabelSoftMarginLossImplModuleHolder.java deleted file mode 100644 index 5b601590c45..00000000000 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/MultiLabelSoftMarginLossImplModuleHolder.java +++ /dev/null @@ -1,79 +0,0 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE - -package org.bytedeco.pytorch; - -import org.bytedeco.pytorch.Allocator; -import org.bytedeco.pytorch.Function; -import org.bytedeco.pytorch.Module; -import java.nio.*; -import org.bytedeco.javacpp.*; -import org.bytedeco.javacpp.annotation.*; - -import static org.bytedeco.javacpp.presets.javacpp.*; -import static org.bytedeco.openblas.global.openblas_nolapack.*; -import static org.bytedeco.openblas.global.openblas.*; - -import static org.bytedeco.pytorch.global.torch.*; - -@Name("torch::nn::ModuleHolder") @NoOffset @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) -public class MultiLabelSoftMarginLossImplModuleHolder extends Pointer { - static { Loader.load(); } - /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ - public MultiLabelSoftMarginLossImplModuleHolder(Pointer p) { super(p); } - - - /// - - /** Default constructs the contained module if if has a default constructor, - * else produces a static error. - * - * NOTE: This uses the behavior of template - * classes in C++ that constructors (or any methods) are only compiled when - * actually used. */ - - - /** Constructs the {@code ModuleHolder} with an empty contained value. Access to - * the underlying module is not permitted and will throw an exception, until - * a value is assigned. */ - /* implicit */ public MultiLabelSoftMarginLossImplModuleHolder(@ByVal @Cast("std::nullptr_t*") PointerPointer arg0) { super((Pointer)null); allocate(arg0); } -private native void allocate(@ByVal @Cast("std::nullptr_t*") PointerPointer arg0); - - /** Constructs the {@code ModuleHolder} with a contained module, forwarding all - * arguments to its constructor. */ - - /** Constructs the {@code ModuleHolder} from a pointer to the contained type. - * Example: {@code Linear(std::make_shared(...))}. */ - /* implicit */ public MultiLabelSoftMarginLossImplModuleHolder(@SharedPtr @Cast({"", "std::shared_ptr"}) MultiLabelSoftMarginLossImpl module) { super((Pointer)null); allocate(module); } -private native void allocate(@SharedPtr @Cast({"", "std::shared_ptr"}) MultiLabelSoftMarginLossImpl module); - - /** Returns true if the {@code ModuleHolder} contains a module, or false if it is - * {@code nullptr}. */ - public native @Cast("bool") @Name("operator bool") @NoException(true) boolean asBoolean(); - - /** Forwards to the contained module. */ - public native @Name("operator ->") MultiLabelSoftMarginLossImpl access(); - - /** Forwards to the contained module. */ - - /** Returns a reference to the contained module. */ - public native @ByRef @Name("operator *") MultiLabelSoftMarginLossImpl multiply(); - - /** Returns a const reference to the contained module. */ - - /** Returns a shared pointer to the underlying module. */ - public native @SharedPtr @Cast({"", "std::shared_ptr"}) MultiLabelSoftMarginLossImpl ptr(); - - /** Returns a pointer to the underlying module. */ - public native MultiLabelSoftMarginLossImpl get(); - - /** Returns a const pointer to the underlying module. */ - - /** Calls the {@code forward()} method of the contained module. */ - - /** Forwards to the subscript operator of the contained module. - * NOTE: std::forward is qualified to prevent VS2017 emitting - * error C2872: 'std': ambiguous symbol */ - - /** Returns true if the {@code ModuleHolder} does not contain a module. */ - public native @Cast("bool") @NoException(true) boolean is_empty(); -} diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/MultiLabelSoftMarginLossOptions.java b/pytorch/src/gen/java/org/bytedeco/pytorch/MultiLabelSoftMarginLossOptions.java index 2b06c6e22fd..3f49444613e 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/MultiLabelSoftMarginLossOptions.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/MultiLabelSoftMarginLossOptions.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; @@ -44,5 +46,5 @@ public class MultiLabelSoftMarginLossOptions extends Pointer { } public native @ByRef @NoException(true) Tensor weight(); - public native @ByRef @NoException(true) loss_reduction_t reduction(); + public native @ByRef @NoException(true) LossReduction reduction(); } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/MultiMarginLoss.java b/pytorch/src/gen/java/org/bytedeco/pytorch/MultiMarginLoss.java deleted file mode 100644 index 5ca5d52e894..00000000000 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/MultiMarginLoss.java +++ /dev/null @@ -1,34 +0,0 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE - -package org.bytedeco.pytorch; - -import org.bytedeco.pytorch.Allocator; -import org.bytedeco.pytorch.Function; -import org.bytedeco.pytorch.Module; -import java.nio.*; -import org.bytedeco.javacpp.*; -import org.bytedeco.javacpp.annotation.*; - -import static org.bytedeco.javacpp.presets.javacpp.*; -import static org.bytedeco.openblas.global.openblas_nolapack.*; -import static org.bytedeco.openblas.global.openblas.*; - -import static org.bytedeco.pytorch.global.torch.*; - - -/** A {@code ModuleHolder} subclass for {@code MultiMarginLossImpl}. - * See the documentation for {@code MultiMarginLossImpl} class to learn what methods - * it provides, and examples of how to use {@code MultiMarginLoss} with - * {@code torch::nn::MultiMarginLossOptions}. See the documentation for - * {@code ModuleHolder} to learn about PyTorch's module storage semantics. */ -@Namespace("torch::nn") @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) -public class MultiMarginLoss extends MultiMarginLossImplModuleHolder { - static { Loader.load(); } - - public MultiMarginLoss(@ByVal @Cast("std::nullptr_t*") PointerPointer arg0) { super((Pointer)null); allocate(arg0); } - private native void allocate(@ByVal @Cast("std::nullptr_t*") PointerPointer arg0); public MultiMarginLoss(@SharedPtr @Cast({"", "std::shared_ptr"}) MultiMarginLossImpl module) { super((Pointer)null); allocate(module); } - private native void allocate(@SharedPtr @Cast({"", "std::shared_ptr"}) MultiMarginLossImpl module); - /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ - public MultiMarginLoss(Pointer p) { super(p); } - - } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/MultiMarginLossImpl.java b/pytorch/src/gen/java/org/bytedeco/pytorch/MultiMarginLossImpl.java index 9b02ee7b447..9fbec19e09d 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/MultiMarginLossImpl.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/MultiMarginLossImpl.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; @@ -49,9 +51,9 @@ public class MultiMarginLossImpl extends MultiMarginLossImplCloneable { } public MultiMarginLossImpl(@ByVal(nullValue = "torch::nn::MultiMarginLossOptions{}") MultiMarginLossOptions options_) { super((Pointer)null); allocate(options_); } - @NoDeallocator private native void allocate(@ByVal(nullValue = "torch::nn::MultiMarginLossOptions{}") MultiMarginLossOptions options_); + @SharedPtr private native void allocate(@ByVal(nullValue = "torch::nn::MultiMarginLossOptions{}") MultiMarginLossOptions options_); public MultiMarginLossImpl() { super((Pointer)null); allocate(); } - @NoDeallocator private native void allocate(); + @SharedPtr private native void allocate(); public native void reset(); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/MultiMarginLossImplCloneable.java b/pytorch/src/gen/java/org/bytedeco/pytorch/MultiMarginLossImplCloneable.java index b08a73a0d8e..922936b9cbb 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/MultiMarginLossImplCloneable.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/MultiMarginLossImplCloneable.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; @@ -20,18 +22,18 @@ public class MultiMarginLossImplCloneable extends Module { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public MultiMarginLossImplCloneable(Pointer p) { super(p); } + @Override public Module asModule() { return asModule(this); } + @Namespace public static native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast>") Module asModule(@SharedPtr MultiMarginLossImplCloneable pointer); /** {@code reset()} must perform initialization of all members with reference * semantics, most importantly parameters, buffers and submodules. */ public native void reset(); - @Override public Module asModule() { return asModule(this); } - @Namespace public static native @Name("static_cast") Module asModule(MultiMarginLossImplCloneable module); /** Performs a recursive "deep copy" of the {@code Module}, such that all parameters * and submodules in the cloned module are different from those in the * original module. */ - public native @SharedPtr Module clone( - @Const @ByRef(nullValue = "c10::optional(c10::nullopt)") DeviceOptional device); - public native @SharedPtr Module clone(); + public native @SharedPtr("torch::nn::Module") @ByVal Module clone( + @Const @ByRef(nullValue = "c10::optional(c10::nullopt)") DeviceOptional device); + public native @SharedPtr("torch::nn::Module") @ByVal Module clone(); } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/MultiMarginLossImplModuleHolder.java b/pytorch/src/gen/java/org/bytedeco/pytorch/MultiMarginLossImplModuleHolder.java deleted file mode 100644 index a03462b34b2..00000000000 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/MultiMarginLossImplModuleHolder.java +++ /dev/null @@ -1,79 +0,0 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE - -package org.bytedeco.pytorch; - -import org.bytedeco.pytorch.Allocator; -import org.bytedeco.pytorch.Function; -import org.bytedeco.pytorch.Module; -import java.nio.*; -import org.bytedeco.javacpp.*; -import org.bytedeco.javacpp.annotation.*; - -import static org.bytedeco.javacpp.presets.javacpp.*; -import static org.bytedeco.openblas.global.openblas_nolapack.*; -import static org.bytedeco.openblas.global.openblas.*; - -import static org.bytedeco.pytorch.global.torch.*; - -@Name("torch::nn::ModuleHolder") @NoOffset @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) -public class MultiMarginLossImplModuleHolder extends Pointer { - static { Loader.load(); } - /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ - public MultiMarginLossImplModuleHolder(Pointer p) { super(p); } - - - /// - - /** Default constructs the contained module if if has a default constructor, - * else produces a static error. - * - * NOTE: This uses the behavior of template - * classes in C++ that constructors (or any methods) are only compiled when - * actually used. */ - - - /** Constructs the {@code ModuleHolder} with an empty contained value. Access to - * the underlying module is not permitted and will throw an exception, until - * a value is assigned. */ - /* implicit */ public MultiMarginLossImplModuleHolder(@ByVal @Cast("std::nullptr_t*") PointerPointer arg0) { super((Pointer)null); allocate(arg0); } -private native void allocate(@ByVal @Cast("std::nullptr_t*") PointerPointer arg0); - - /** Constructs the {@code ModuleHolder} with a contained module, forwarding all - * arguments to its constructor. */ - - /** Constructs the {@code ModuleHolder} from a pointer to the contained type. - * Example: {@code Linear(std::make_shared(...))}. */ - /* implicit */ public MultiMarginLossImplModuleHolder(@SharedPtr @Cast({"", "std::shared_ptr"}) MultiMarginLossImpl module) { super((Pointer)null); allocate(module); } -private native void allocate(@SharedPtr @Cast({"", "std::shared_ptr"}) MultiMarginLossImpl module); - - /** Returns true if the {@code ModuleHolder} contains a module, or false if it is - * {@code nullptr}. */ - public native @Cast("bool") @Name("operator bool") @NoException(true) boolean asBoolean(); - - /** Forwards to the contained module. */ - public native @Name("operator ->") MultiMarginLossImpl access(); - - /** Forwards to the contained module. */ - - /** Returns a reference to the contained module. */ - public native @ByRef @Name("operator *") MultiMarginLossImpl multiply(); - - /** Returns a const reference to the contained module. */ - - /** Returns a shared pointer to the underlying module. */ - public native @SharedPtr @Cast({"", "std::shared_ptr"}) MultiMarginLossImpl ptr(); - - /** Returns a pointer to the underlying module. */ - public native MultiMarginLossImpl get(); - - /** Returns a const pointer to the underlying module. */ - - /** Calls the {@code forward()} method of the contained module. */ - - /** Forwards to the subscript operator of the contained module. - * NOTE: std::forward is qualified to prevent VS2017 emitting - * error C2872: 'std': ambiguous symbol */ - - /** Returns true if the {@code ModuleHolder} does not contain a module. */ - public native @Cast("bool") @NoException(true) boolean is_empty(); -} diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/MultiMarginLossOptions.java b/pytorch/src/gen/java/org/bytedeco/pytorch/MultiMarginLossOptions.java index 4a627f753d7..bd5c7196fa5 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/MultiMarginLossOptions.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/MultiMarginLossOptions.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; @@ -45,5 +47,5 @@ public class MultiMarginLossOptions extends Pointer { public native @Cast("int64_t*") @ByRef @NoException(true) LongPointer p(); public native @ByRef @NoException(true) DoublePointer margin(); public native @ByRef @NoException(true) Tensor weight(); - public native @ByRef @NoException(true) loss_reduction_t reduction(); + public native @ByRef @NoException(true) LossReduction reduction(); } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/MultiheadAttention.java b/pytorch/src/gen/java/org/bytedeco/pytorch/MultiheadAttention.java deleted file mode 100644 index a44bfd16af0..00000000000 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/MultiheadAttention.java +++ /dev/null @@ -1,34 +0,0 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE - -package org.bytedeco.pytorch; - -import org.bytedeco.pytorch.Allocator; -import org.bytedeco.pytorch.Function; -import org.bytedeco.pytorch.Module; -import java.nio.*; -import org.bytedeco.javacpp.*; -import org.bytedeco.javacpp.annotation.*; - -import static org.bytedeco.javacpp.presets.javacpp.*; -import static org.bytedeco.openblas.global.openblas_nolapack.*; -import static org.bytedeco.openblas.global.openblas.*; - -import static org.bytedeco.pytorch.global.torch.*; - - -/** A {@code ModuleHolder} subclass for {@code MultiheadAttentionImpl}. - * See the documentation for {@code MultiheadAttentionImpl} class to learn what - * methods it provides, and examples of how to use {@code MultiheadAttention} with - * {@code torch::nn::MultiheadAttentionOptions}. See the documentation for - * {@code ModuleHolder} to learn about PyTorch's module storage semantics. */ -@Namespace("torch::nn") @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) -public class MultiheadAttention extends MultiheadAttentionImplModuleHolder { - static { Loader.load(); } - - public MultiheadAttention(@ByVal @Cast("std::nullptr_t*") PointerPointer arg0) { super((Pointer)null); allocate(arg0); } - private native void allocate(@ByVal @Cast("std::nullptr_t*") PointerPointer arg0); public MultiheadAttention(@SharedPtr @Cast({"", "std::shared_ptr"}) MultiheadAttentionImpl module) { super((Pointer)null); allocate(module); } - private native void allocate(@SharedPtr @Cast({"", "std::shared_ptr"}) MultiheadAttentionImpl module); - /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ - public MultiheadAttention(Pointer p) { super(p); } - - } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/MultiheadAttentionForwardFuncOptions.java b/pytorch/src/gen/java/org/bytedeco/pytorch/MultiheadAttentionForwardFuncOptions.java index 1ceb447de8a..f9efb6bf18a 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/MultiheadAttentionForwardFuncOptions.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/MultiheadAttentionForwardFuncOptions.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/MultiheadAttentionImpl.java b/pytorch/src/gen/java/org/bytedeco/pytorch/MultiheadAttentionImpl.java index 4b21207ad00..42d5e88e385 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/MultiheadAttentionImpl.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/MultiheadAttentionImpl.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; @@ -36,19 +38,19 @@ public class MultiheadAttentionImpl extends MultiheadAttentionImplCloneable { public MultiheadAttentionImpl(Pointer p) { super(p); } public MultiheadAttentionImpl(@Cast("int64_t") long embed_dim, @Cast("int64_t") long num_heads) { super((Pointer)null); allocate(embed_dim, num_heads); } - @NoDeallocator private native void allocate(@Cast("int64_t") long embed_dim, @Cast("int64_t") long num_heads); + @SharedPtr private native void allocate(@Cast("int64_t") long embed_dim, @Cast("int64_t") long num_heads); public MultiheadAttentionImpl(@Const @ByRef MultiheadAttentionOptions options_) { super((Pointer)null); allocate(options_); } - @NoDeallocator private native void allocate(@Const @ByRef MultiheadAttentionOptions options_); + @SharedPtr private native void allocate(@Const @ByRef MultiheadAttentionOptions options_); - public native @ByVal TensorTensorTuple forward( + public native @ByVal T_TensorTensor_T forward( @Const @ByRef Tensor query, @Const @ByRef Tensor key, @Const @ByRef Tensor value, - @Const @ByRef(nullValue = "at::Tensor{}") Tensor key_padding_mask, + @Const @ByRef(nullValue = "torch::Tensor{}") Tensor key_padding_mask, @Cast("bool") boolean need_weights/*=true*/, - @Const @ByRef(nullValue = "at::Tensor{}") Tensor attn_mask, + @Const @ByRef(nullValue = "torch::Tensor{}") Tensor attn_mask, @Cast("bool") boolean average_attn_weights/*=true*/); - public native @ByVal TensorTensorTuple forward( + public native @ByVal T_TensorTensor_T forward( @Const @ByRef Tensor query, @Const @ByRef Tensor key, @Const @ByRef Tensor value); @@ -64,7 +66,6 @@ public class MultiheadAttentionImpl extends MultiheadAttentionImplCloneable { public native @ByRef Tensor in_proj_bias(); public native MultiheadAttentionImpl in_proj_bias(Tensor setter); public native @ByRef Tensor bias_k(); public native MultiheadAttentionImpl bias_k(Tensor setter); public native @ByRef Tensor bias_v(); public native MultiheadAttentionImpl bias_v(Tensor setter); - public native @ByRef Linear out_proj(); public native MultiheadAttentionImpl out_proj(Linear setter); public native @ByRef Tensor q_proj_weight(); public native MultiheadAttentionImpl q_proj_weight(Tensor setter); public native @ByRef Tensor k_proj_weight(); public native MultiheadAttentionImpl k_proj_weight(Tensor setter); public native @ByRef Tensor v_proj_weight(); public native MultiheadAttentionImpl v_proj_weight(Tensor setter); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/MultiheadAttentionImplCloneable.java b/pytorch/src/gen/java/org/bytedeco/pytorch/MultiheadAttentionImplCloneable.java index b7d3ad17222..6ff01f777e9 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/MultiheadAttentionImplCloneable.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/MultiheadAttentionImplCloneable.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; @@ -20,18 +22,18 @@ public class MultiheadAttentionImplCloneable extends Module { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public MultiheadAttentionImplCloneable(Pointer p) { super(p); } + @Override public Module asModule() { return asModule(this); } + @Namespace public static native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast>") Module asModule(@SharedPtr MultiheadAttentionImplCloneable pointer); /** {@code reset()} must perform initialization of all members with reference * semantics, most importantly parameters, buffers and submodules. */ public native void reset(); - @Override public Module asModule() { return asModule(this); } - @Namespace public static native @Name("static_cast") Module asModule(MultiheadAttentionImplCloneable module); /** Performs a recursive "deep copy" of the {@code Module}, such that all parameters * and submodules in the cloned module are different from those in the * original module. */ - public native @SharedPtr Module clone( - @Const @ByRef(nullValue = "c10::optional(c10::nullopt)") DeviceOptional device); - public native @SharedPtr Module clone(); + public native @SharedPtr("torch::nn::Module") @ByVal Module clone( + @Const @ByRef(nullValue = "c10::optional(c10::nullopt)") DeviceOptional device); + public native @SharedPtr("torch::nn::Module") @ByVal Module clone(); } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/MultiheadAttentionImplModuleHolder.java b/pytorch/src/gen/java/org/bytedeco/pytorch/MultiheadAttentionImplModuleHolder.java deleted file mode 100644 index a93c125459e..00000000000 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/MultiheadAttentionImplModuleHolder.java +++ /dev/null @@ -1,79 +0,0 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE - -package org.bytedeco.pytorch; - -import org.bytedeco.pytorch.Allocator; -import org.bytedeco.pytorch.Function; -import org.bytedeco.pytorch.Module; -import java.nio.*; -import org.bytedeco.javacpp.*; -import org.bytedeco.javacpp.annotation.*; - -import static org.bytedeco.javacpp.presets.javacpp.*; -import static org.bytedeco.openblas.global.openblas_nolapack.*; -import static org.bytedeco.openblas.global.openblas.*; - -import static org.bytedeco.pytorch.global.torch.*; - -@Name("torch::nn::ModuleHolder") @NoOffset @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) -public class MultiheadAttentionImplModuleHolder extends Pointer { - static { Loader.load(); } - /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ - public MultiheadAttentionImplModuleHolder(Pointer p) { super(p); } - - - /// - - /** Default constructs the contained module if if has a default constructor, - * else produces a static error. - * - * NOTE: This uses the behavior of template - * classes in C++ that constructors (or any methods) are only compiled when - * actually used. */ - - - /** Constructs the {@code ModuleHolder} with an empty contained value. Access to - * the underlying module is not permitted and will throw an exception, until - * a value is assigned. */ - /* implicit */ public MultiheadAttentionImplModuleHolder(@ByVal @Cast("std::nullptr_t*") PointerPointer arg0) { super((Pointer)null); allocate(arg0); } -private native void allocate(@ByVal @Cast("std::nullptr_t*") PointerPointer arg0); - - /** Constructs the {@code ModuleHolder} with a contained module, forwarding all - * arguments to its constructor. */ - - /** Constructs the {@code ModuleHolder} from a pointer to the contained type. - * Example: {@code Linear(std::make_shared(...))}. */ - /* implicit */ public MultiheadAttentionImplModuleHolder(@SharedPtr @Cast({"", "std::shared_ptr"}) MultiheadAttentionImpl module) { super((Pointer)null); allocate(module); } -private native void allocate(@SharedPtr @Cast({"", "std::shared_ptr"}) MultiheadAttentionImpl module); - - /** Returns true if the {@code ModuleHolder} contains a module, or false if it is - * {@code nullptr}. */ - public native @Cast("bool") @Name("operator bool") @NoException(true) boolean asBoolean(); - - /** Forwards to the contained module. */ - public native @Name("operator ->") MultiheadAttentionImpl access(); - - /** Forwards to the contained module. */ - - /** Returns a reference to the contained module. */ - public native @ByRef @Name("operator *") MultiheadAttentionImpl multiply(); - - /** Returns a const reference to the contained module. */ - - /** Returns a shared pointer to the underlying module. */ - public native @SharedPtr @Cast({"", "std::shared_ptr"}) MultiheadAttentionImpl ptr(); - - /** Returns a pointer to the underlying module. */ - public native MultiheadAttentionImpl get(); - - /** Returns a const pointer to the underlying module. */ - - /** Calls the {@code forward()} method of the contained module. */ - - /** Forwards to the subscript operator of the contained module. - * NOTE: std::forward is qualified to prevent VS2017 emitting - * error C2872: 'std': ambiguous symbol */ - - /** Returns true if the {@code ModuleHolder} does not contain a module. */ - public native @Cast("bool") @NoException(true) boolean is_empty(); -} diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/MultiheadAttentionOptions.java b/pytorch/src/gen/java/org/bytedeco/pytorch/MultiheadAttentionOptions.java index f3cde18238e..b2102cd8b86 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/MultiheadAttentionOptions.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/MultiheadAttentionOptions.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/NLLLoss.java b/pytorch/src/gen/java/org/bytedeco/pytorch/NLLLoss.java deleted file mode 100644 index 08eada224c7..00000000000 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/NLLLoss.java +++ /dev/null @@ -1,34 +0,0 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE - -package org.bytedeco.pytorch; - -import org.bytedeco.pytorch.Allocator; -import org.bytedeco.pytorch.Function; -import org.bytedeco.pytorch.Module; -import java.nio.*; -import org.bytedeco.javacpp.*; -import org.bytedeco.javacpp.annotation.*; - -import static org.bytedeco.javacpp.presets.javacpp.*; -import static org.bytedeco.openblas.global.openblas_nolapack.*; -import static org.bytedeco.openblas.global.openblas.*; - -import static org.bytedeco.pytorch.global.torch.*; - - -/** A {@code ModuleHolder} subclass for {@code NLLLossImpl}. - * See the documentation for {@code NLLLossImpl} class to learn what methods it - * provides, and examples of how to use {@code NLLLoss} with - * {@code torch::nn::NLLLossOptions}. See the documentation for {@code ModuleHolder} to - * learn about PyTorch's module storage semantics. */ -@Namespace("torch::nn") @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) -public class NLLLoss extends NLLLossImplModuleHolder { - static { Loader.load(); } - - public NLLLoss(@ByVal @Cast("std::nullptr_t*") PointerPointer arg0) { super((Pointer)null); allocate(arg0); } - private native void allocate(@ByVal @Cast("std::nullptr_t*") PointerPointer arg0); public NLLLoss(@SharedPtr @Cast({"", "std::shared_ptr"}) NLLLossImpl module) { super((Pointer)null); allocate(module); } - private native void allocate(@SharedPtr @Cast({"", "std::shared_ptr"}) NLLLossImpl module); - /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ - public NLLLoss(Pointer p) { super(p); } - - } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/NLLLossImpl.java b/pytorch/src/gen/java/org/bytedeco/pytorch/NLLLossImpl.java index 74621fabb5d..ea39ed82cd6 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/NLLLossImpl.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/NLLLossImpl.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; @@ -46,9 +48,9 @@ public class NLLLossImpl extends NLLLossImplCloneable { } public NLLLossImpl(@ByVal(nullValue = "torch::nn::NLLLossOptions{}") NLLLossOptions options_) { super((Pointer)null); allocate(options_); } - @NoDeallocator private native void allocate(@ByVal(nullValue = "torch::nn::NLLLossOptions{}") NLLLossOptions options_); + @SharedPtr private native void allocate(@ByVal(nullValue = "torch::nn::NLLLossOptions{}") NLLLossOptions options_); public NLLLossImpl() { super((Pointer)null); allocate(); } - @NoDeallocator private native void allocate(); + @SharedPtr private native void allocate(); /** Pretty prints the {@code NLLLoss} module into the given {@code stream}. */ public native void pretty_print(@Cast("std::ostream*") @ByRef Pointer stream); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/NLLLossImplCloneable.java b/pytorch/src/gen/java/org/bytedeco/pytorch/NLLLossImplCloneable.java index 00902e2e45d..7d39db10873 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/NLLLossImplCloneable.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/NLLLossImplCloneable.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; @@ -20,18 +22,18 @@ public class NLLLossImplCloneable extends Module { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public NLLLossImplCloneable(Pointer p) { super(p); } + @Override public Module asModule() { return asModule(this); } + @Namespace public static native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast>") Module asModule(@SharedPtr NLLLossImplCloneable pointer); /** {@code reset()} must perform initialization of all members with reference * semantics, most importantly parameters, buffers and submodules. */ public native void reset(); - @Override public Module asModule() { return asModule(this); } - @Namespace public static native @Name("static_cast") Module asModule(NLLLossImplCloneable module); /** Performs a recursive "deep copy" of the {@code Module}, such that all parameters * and submodules in the cloned module are different from those in the * original module. */ - public native @SharedPtr Module clone( - @Const @ByRef(nullValue = "c10::optional(c10::nullopt)") DeviceOptional device); - public native @SharedPtr Module clone(); + public native @SharedPtr("torch::nn::Module") @ByVal Module clone( + @Const @ByRef(nullValue = "c10::optional(c10::nullopt)") DeviceOptional device); + public native @SharedPtr("torch::nn::Module") @ByVal Module clone(); } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/NLLLossImplModuleHolder.java b/pytorch/src/gen/java/org/bytedeco/pytorch/NLLLossImplModuleHolder.java deleted file mode 100644 index f9f9e7962d7..00000000000 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/NLLLossImplModuleHolder.java +++ /dev/null @@ -1,79 +0,0 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE - -package org.bytedeco.pytorch; - -import org.bytedeco.pytorch.Allocator; -import org.bytedeco.pytorch.Function; -import org.bytedeco.pytorch.Module; -import java.nio.*; -import org.bytedeco.javacpp.*; -import org.bytedeco.javacpp.annotation.*; - -import static org.bytedeco.javacpp.presets.javacpp.*; -import static org.bytedeco.openblas.global.openblas_nolapack.*; -import static org.bytedeco.openblas.global.openblas.*; - -import static org.bytedeco.pytorch.global.torch.*; - -@Name("torch::nn::ModuleHolder") @NoOffset @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) -public class NLLLossImplModuleHolder extends Pointer { - static { Loader.load(); } - /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ - public NLLLossImplModuleHolder(Pointer p) { super(p); } - - - /// - - /** Default constructs the contained module if if has a default constructor, - * else produces a static error. - * - * NOTE: This uses the behavior of template - * classes in C++ that constructors (or any methods) are only compiled when - * actually used. */ - - - /** Constructs the {@code ModuleHolder} with an empty contained value. Access to - * the underlying module is not permitted and will throw an exception, until - * a value is assigned. */ - /* implicit */ public NLLLossImplModuleHolder(@ByVal @Cast("std::nullptr_t*") PointerPointer arg0) { super((Pointer)null); allocate(arg0); } -private native void allocate(@ByVal @Cast("std::nullptr_t*") PointerPointer arg0); - - /** Constructs the {@code ModuleHolder} with a contained module, forwarding all - * arguments to its constructor. */ - - /** Constructs the {@code ModuleHolder} from a pointer to the contained type. - * Example: {@code Linear(std::make_shared(...))}. */ - /* implicit */ public NLLLossImplModuleHolder(@SharedPtr @Cast({"", "std::shared_ptr"}) NLLLossImpl module) { super((Pointer)null); allocate(module); } -private native void allocate(@SharedPtr @Cast({"", "std::shared_ptr"}) NLLLossImpl module); - - /** Returns true if the {@code ModuleHolder} contains a module, or false if it is - * {@code nullptr}. */ - public native @Cast("bool") @Name("operator bool") @NoException(true) boolean asBoolean(); - - /** Forwards to the contained module. */ - public native @Name("operator ->") NLLLossImpl access(); - - /** Forwards to the contained module. */ - - /** Returns a reference to the contained module. */ - public native @ByRef @Name("operator *") NLLLossImpl multiply(); - - /** Returns a const reference to the contained module. */ - - /** Returns a shared pointer to the underlying module. */ - public native @SharedPtr @Cast({"", "std::shared_ptr"}) NLLLossImpl ptr(); - - /** Returns a pointer to the underlying module. */ - public native NLLLossImpl get(); - - /** Returns a const pointer to the underlying module. */ - - /** Calls the {@code forward()} method of the contained module. */ - - /** Forwards to the subscript operator of the contained module. - * NOTE: std::forward is qualified to prevent VS2017 emitting - * error C2872: 'std': ambiguous symbol */ - - /** Returns true if the {@code ModuleHolder} does not contain a module. */ - public native @Cast("bool") @NoException(true) boolean is_empty(); -} diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/NLLLossOptions.java b/pytorch/src/gen/java/org/bytedeco/pytorch/NLLLossOptions.java index 28837412779..e6cec91833e 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/NLLLossOptions.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/NLLLossOptions.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; @@ -44,5 +46,5 @@ public class NLLLossOptions extends Pointer { public native @ByRef @NoException(true) Tensor weight(); public native @Cast("int64_t*") @ByRef @NoException(true) LongPointer ignore_index(); - public native @ByRef @NoException(true) loss_reduction_t reduction(); + public native @ByRef @NoException(true) LossReduction reduction(); } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/NameMangler.java b/pytorch/src/gen/java/org/bytedeco/pytorch/NameMangler.java index c085196f800..969dce1a4e6 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/NameMangler.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/NameMangler.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/NamedAnyModule.java b/pytorch/src/gen/java/org/bytedeco/pytorch/NamedAnyModule.java deleted file mode 100644 index 2be05ee6c2f..00000000000 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/NamedAnyModule.java +++ /dev/null @@ -1,69 +0,0 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE - -package org.bytedeco.pytorch; - -import org.bytedeco.pytorch.Allocator; -import org.bytedeco.pytorch.Function; -import org.bytedeco.pytorch.Module; -import java.nio.*; -import org.bytedeco.javacpp.*; -import org.bytedeco.javacpp.annotation.*; - -import static org.bytedeco.javacpp.presets.javacpp.*; -import static org.bytedeco.openblas.global.openblas_nolapack.*; -import static org.bytedeco.openblas.global.openblas.*; - -import static org.bytedeco.pytorch.global.torch.*; - - -/** Stores a type erased {@code Module} with name. - * - * The {@code NamedAnyModule} class enables the following API for constructing - * {@code nn::Sequential} with named submodules: - * \rst - * .. code-block:: cpp - * - * struct M : torch::nn::Module { - * explicit M(int value_) : value(value_) {} - * int value; - * int forward() { - * return value; - * } - * }; - * - * Sequential sequential({ - * {"m1", std::make_shared(1)}, // shared pointer to {@code Module} is - * supported {std::string("m2"), M(2)}, // {@code Module} is supported - * {"linear1", Linear(10, 3)} // {@code ModuleHolder} is supported - * }); - * \endrst */ -@Namespace("torch::nn") @NoOffset @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) -public class NamedAnyModule extends Pointer { - static { Loader.load(); } - /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ - public NamedAnyModule(Pointer p) { super(p); } - - /** Creates a {@code NamedAnyModule} from a (boxed) {@code Module}. */ - - /** Creates a {@code NamedAnyModule} from a {@code Module}, moving or copying it - * into a {@code shared_ptr} internally. */ - // NOTE: We need to use `std::remove_reference::type` to get rid of - // any reference components for make_unique. - - /** Creates a {@code NamedAnyModule} from a {@code Module} that is unwrapped from - * a {@code ModuleHolder}. */ - - /** Creates a {@code NamedAnyModule} from a type-erased {@code AnyModule}. */ - public NamedAnyModule(@StdString BytePointer name, @ByVal AnyModule any_module) { super((Pointer)null); allocate(name, any_module); } - private native void allocate(@StdString BytePointer name, @ByVal AnyModule any_module); - public NamedAnyModule(@StdString String name, @ByVal AnyModule any_module) { super((Pointer)null); allocate(name, any_module); } - private native void allocate(@StdString String name, @ByVal AnyModule any_module); - - /** Returns a reference to the name. */ - public native @StdString @NoException(true) BytePointer name(); - - /** Returns a reference to the module. */ - public native @ByRef @NoException(true) AnyModule module(); - - /** Returns a const reference to the module. */ -} diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/NamedBufferPolicy.java b/pytorch/src/gen/java/org/bytedeco/pytorch/NamedBufferPolicy.java deleted file mode 100644 index 86d1d6f409a..00000000000 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/NamedBufferPolicy.java +++ /dev/null @@ -1,42 +0,0 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE - -package org.bytedeco.pytorch; - -import org.bytedeco.pytorch.Allocator; -import org.bytedeco.pytorch.Function; -import org.bytedeco.pytorch.Module; -import java.nio.*; -import org.bytedeco.javacpp.*; -import org.bytedeco.javacpp.annotation.*; - -import static org.bytedeco.javacpp.presets.javacpp.*; -import static org.bytedeco.openblas.global.openblas_nolapack.*; -import static org.bytedeco.openblas.global.openblas.*; - -import static org.bytedeco.pytorch.global.torch.*; - -@Name("torch::jit::detail::NamedPolicy") @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) -public class NamedBufferPolicy extends Pointer { - static { Loader.load(); } - /** Default native constructor. */ - public NamedBufferPolicy() { super((Pointer)null); allocate(); } - /** Native array allocator. Access with {@link Pointer#position(long)}. */ - public NamedBufferPolicy(long size) { super((Pointer)null); allocateArray(size); } - /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ - public NamedBufferPolicy(Pointer p) { super(p); } - private native void allocate(); - private native void allocateArray(long size); - @Override public NamedBufferPolicy position(long position) { - return (NamedBufferPolicy)super.position(position); - } - @Override public NamedBufferPolicy getPointer(long i) { - return new NamedBufferPolicy((Pointer)this).offsetAddress(i); - } - - public static native @ByVal @Cast("torch::jit::detail::NamedPolicy::value_type*") NamedJitModule create( - @StdVector SlotCursor cursors, - @ByVal IValue v); - public static native @Cast("bool") boolean valid(@Const @SharedPtr @ByRef ClassType t, @Cast("size_t") long i, @Const @ByRef IValue v); - @MemberGetter public static native @Cast("const bool") boolean all_slots(); - public static final boolean all_slots = all_slots(); -} diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/NamedIValue.java b/pytorch/src/gen/java/org/bytedeco/pytorch/NamedIValue.java index ec3a21f0699..d3e34d907d8 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/NamedIValue.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/NamedIValue.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/NamedAttributePolicy.java b/pytorch/src/gen/java/org/bytedeco/pytorch/NamedIValuePolicy.java similarity index 63% rename from pytorch/src/gen/java/org/bytedeco/pytorch/NamedAttributePolicy.java rename to pytorch/src/gen/java/org/bytedeco/pytorch/NamedIValuePolicy.java index cfbf6417d7f..7ef631e5448 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/NamedAttributePolicy.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/NamedIValuePolicy.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; @@ -16,27 +18,27 @@ import static org.bytedeco.pytorch.global.torch.*; @Name("torch::jit::detail::NamedPolicy") @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) -public class NamedAttributePolicy extends Pointer { +public class NamedIValuePolicy extends Pointer { static { Loader.load(); } /** Default native constructor. */ - public NamedAttributePolicy() { super((Pointer)null); allocate(); } + public NamedIValuePolicy() { super((Pointer)null); allocate(); } /** Native array allocator. Access with {@link Pointer#position(long)}. */ - public NamedAttributePolicy(long size) { super((Pointer)null); allocateArray(size); } + public NamedIValuePolicy(long size) { super((Pointer)null); allocateArray(size); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ - public NamedAttributePolicy(Pointer p) { super(p); } + public NamedIValuePolicy(Pointer p) { super(p); } private native void allocate(); private native void allocateArray(long size); - @Override public NamedAttributePolicy position(long position) { - return (NamedAttributePolicy)super.position(position); + @Override public NamedIValuePolicy position(long position) { + return (NamedIValuePolicy)super.position(position); } - @Override public NamedAttributePolicy getPointer(long i) { - return new NamedAttributePolicy((Pointer)this).offsetAddress(i); + @Override public NamedIValuePolicy getPointer(long i) { + return new NamedIValuePolicy((Pointer)this).offsetAddress(i); } public static native @ByVal @Cast("torch::jit::detail::NamedPolicy::value_type*") NamedJitModule create( @StdVector SlotCursor cursors, @ByVal IValue v); - public static native @Cast("bool") boolean valid(@Const @SharedPtr @ByRef ClassType t, @Cast("size_t") long i, @Const @ByRef IValue v); + public static native @Cast("bool") boolean valid(@Const @SharedPtr("c10::ClassType") @ByRef ClassType t, @Cast("size_t") long i, @Const @ByRef IValue v); @MemberGetter public static native @Cast("const bool") boolean all_slots(); public static final boolean all_slots = all_slots(); } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/NamedJitModule.java b/pytorch/src/gen/java/org/bytedeco/pytorch/NamedJitModule.java index d26de4659c0..7731ca33ec0 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/NamedJitModule.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/NamedJitModule.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; @@ -34,6 +36,6 @@ public class NamedJitModule extends Pointer { return new NamedJitModule((Pointer)this).offsetAddress(i); } - public native @StdString BytePointer name(); public native NamedJitModule name(BytePointer setter); - public native @ByRef JitModule value(); public native NamedJitModule value(JitModule setter); + public native @StdString @NoOffset BytePointer name(); public native NamedJitModule name(BytePointer setter); + public native @ByRef @NoOffset JitModule value(); public native NamedJitModule value(JitModule setter); } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/NamedModulePolicy.java b/pytorch/src/gen/java/org/bytedeco/pytorch/NamedJitModulePolicy.java similarity index 66% rename from pytorch/src/gen/java/org/bytedeco/pytorch/NamedModulePolicy.java rename to pytorch/src/gen/java/org/bytedeco/pytorch/NamedJitModulePolicy.java index e46f819d791..555c0b766e9 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/NamedModulePolicy.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/NamedJitModulePolicy.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; @@ -20,27 +22,27 @@ // along with the fully qualified name of that slot. This is used for the named_ // variants like named_parameters(). @Name("torch::jit::detail::NamedPolicy") @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) -public class NamedModulePolicy extends Pointer { +public class NamedJitModulePolicy extends Pointer { static { Loader.load(); } /** Default native constructor. */ - public NamedModulePolicy() { super((Pointer)null); allocate(); } + public NamedJitModulePolicy() { super((Pointer)null); allocate(); } /** Native array allocator. Access with {@link Pointer#position(long)}. */ - public NamedModulePolicy(long size) { super((Pointer)null); allocateArray(size); } + public NamedJitModulePolicy(long size) { super((Pointer)null); allocateArray(size); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ - public NamedModulePolicy(Pointer p) { super(p); } + public NamedJitModulePolicy(Pointer p) { super(p); } private native void allocate(); private native void allocateArray(long size); - @Override public NamedModulePolicy position(long position) { - return (NamedModulePolicy)super.position(position); + @Override public NamedJitModulePolicy position(long position) { + return (NamedJitModulePolicy)super.position(position); } - @Override public NamedModulePolicy getPointer(long i) { - return new NamedModulePolicy((Pointer)this).offsetAddress(i); + @Override public NamedJitModulePolicy getPointer(long i) { + return new NamedJitModulePolicy((Pointer)this).offsetAddress(i); } public static native @ByVal @Cast("torch::jit::detail::NamedPolicy::value_type*") NamedJitModule create( @StdVector SlotCursor cursors, @ByVal IValue v); - public static native @Cast("bool") boolean valid(@Const @SharedPtr @ByRef ClassType t, @Cast("size_t") long i, @Const @ByRef IValue v); + public static native @Cast("bool") boolean valid(@Const @SharedPtr("c10::ClassType") @ByRef ClassType t, @Cast("size_t") long i, @Const @ByRef IValue v); @MemberGetter public static native @Cast("const bool") boolean all_slots(); public static final boolean all_slots = all_slots(); } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/NamedTensor.java b/pytorch/src/gen/java/org/bytedeco/pytorch/NamedTensor.java index 97a1fec284f..09bf1bd7203 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/NamedTensor.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/NamedTensor.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; @@ -16,7 +18,7 @@ import static org.bytedeco.pytorch.global.torch.*; -@Name("torch::jit::Named") @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) +@Name("torch::jit::Named") @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) public class NamedTensor extends Pointer { static { Loader.load(); } /** Default native constructor. */ diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/NamedTensorMeta.java b/pytorch/src/gen/java/org/bytedeco/pytorch/NamedTensorMeta.java index 8c0890d827d..7640c19c132 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/NamedTensorMeta.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/NamedTensorMeta.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/NamedTensorMetaInterface.java b/pytorch/src/gen/java/org/bytedeco/pytorch/NamedTensorMetaInterface.java index 3c28daabae9..18506ddaaf7 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/NamedTensorMetaInterface.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/NamedTensorMetaInterface.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/NamedParameterPolicy.java b/pytorch/src/gen/java/org/bytedeco/pytorch/NamedTensorPolicy.java similarity index 63% rename from pytorch/src/gen/java/org/bytedeco/pytorch/NamedParameterPolicy.java rename to pytorch/src/gen/java/org/bytedeco/pytorch/NamedTensorPolicy.java index 1796b9d3563..ce27ce3456b 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/NamedParameterPolicy.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/NamedTensorPolicy.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; @@ -16,27 +18,27 @@ import static org.bytedeco.pytorch.global.torch.*; @Name("torch::jit::detail::NamedPolicy") @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) -public class NamedParameterPolicy extends Pointer { +public class NamedTensorPolicy extends Pointer { static { Loader.load(); } /** Default native constructor. */ - public NamedParameterPolicy() { super((Pointer)null); allocate(); } + public NamedTensorPolicy() { super((Pointer)null); allocate(); } /** Native array allocator. Access with {@link Pointer#position(long)}. */ - public NamedParameterPolicy(long size) { super((Pointer)null); allocateArray(size); } + public NamedTensorPolicy(long size) { super((Pointer)null); allocateArray(size); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ - public NamedParameterPolicy(Pointer p) { super(p); } + public NamedTensorPolicy(Pointer p) { super(p); } private native void allocate(); private native void allocateArray(long size); - @Override public NamedParameterPolicy position(long position) { - return (NamedParameterPolicy)super.position(position); + @Override public NamedTensorPolicy position(long position) { + return (NamedTensorPolicy)super.position(position); } - @Override public NamedParameterPolicy getPointer(long i) { - return new NamedParameterPolicy((Pointer)this).offsetAddress(i); + @Override public NamedTensorPolicy getPointer(long i) { + return new NamedTensorPolicy((Pointer)this).offsetAddress(i); } public static native @ByVal @Cast("torch::jit::detail::NamedPolicy::value_type*") NamedJitModule create( @StdVector SlotCursor cursors, @ByVal IValue v); - public static native @Cast("bool") boolean valid(@Const @SharedPtr @ByRef ClassType t, @Cast("size_t") long i, @Const @ByRef IValue v); + public static native @Cast("bool") boolean valid(@Const @SharedPtr("c10::ClassType") @ByRef ClassType t, @Cast("size_t") long i, @Const @ByRef IValue v); @MemberGetter public static native @Cast("const bool") boolean all_slots(); public static final boolean all_slots = all_slots(); } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/NamedTupleConstructor.java b/pytorch/src/gen/java/org/bytedeco/pytorch/NamedTupleConstructor.java index ee2eb93f0de..aeffdf6450f 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/NamedTupleConstructor.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/NamedTupleConstructor.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; @@ -25,12 +27,7 @@ public class NamedTupleConstructor extends SugaredValue { public NamedTupleConstructor(@SharedPtr TupleType type) { super((Pointer)null); allocate(type); } private native void allocate(@SharedPtr TupleType type); - public native @SharedPtr @ByVal SugaredValue call( - @Const @ByRef SourceRange loc, - @ByRef GraphFunction m, - @ByVal NamedValueArrayRef args, - @ByVal NamedValueArrayRef kwargs, - @Cast("size_t") long n_binders); + public native @StdString BytePointer kind(); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/NamedType.java b/pytorch/src/gen/java/org/bytedeco/pytorch/NamedType.java index 1b64a994a0c..458bee706e2 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/NamedType.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/NamedType.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/NamedValue.java b/pytorch/src/gen/java/org/bytedeco/pytorch/NamedValue.java index 5d48699b178..40760d2d092 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/NamedValue.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/NamedValue.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/NamedValueArrayRef.java b/pytorch/src/gen/java/org/bytedeco/pytorch/NamedValueArrayRef.java index c17d3d0e800..c565d0f1f9c 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/NamedValueArrayRef.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/NamedValueArrayRef.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; @@ -39,8 +41,7 @@ public class NamedValueArrayRef extends Pointer { /** Construct an ArrayRef from a single element. */ // TODO Make this explicit - public NamedValueArrayRef(@Const @ByRef NamedValue OneElt) { super((Pointer)null); allocate(OneElt); } - private native void allocate(@Const @ByRef NamedValue OneElt); + /** Construct an ArrayRef from a pointer and length. */ public NamedValueArrayRef(@Const NamedValue data, @Cast("size_t") long length) { super((Pointer)null); allocate(data, length); } @@ -70,13 +71,13 @@ public class NamedValueArrayRef extends Pointer { * \name Simple Operations * \{ */ - public native @ByVal @Cast("const c10::ArrayRef::iterator*") NamedValue begin(); - public native @ByVal @Cast("const c10::ArrayRef::iterator*") NamedValue end(); + public native @Const @ByPtr NamedValue begin(); + public native @Const @ByPtr NamedValue end(); // These are actually the same as iterator, since ArrayRef only // gives you const iterators. - public native @ByVal @Cast("const c10::ArrayRef::const_iterator*") NamedValue cbegin(); - public native @ByVal @Cast("const c10::ArrayRef::const_iterator*") NamedValue cend(); + public native @Const @ByPtr NamedValue cbegin(); + public native @Const @ByPtr NamedValue cend(); /** empty - Check if the array is empty. */ public native @Cast("const bool") boolean empty(); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/NamedValueOptional.java b/pytorch/src/gen/java/org/bytedeco/pytorch/NamedValueOptional.java index f49d705f9e2..1cc50097c00 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/NamedValueOptional.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/NamedValueOptional.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; @@ -26,6 +28,7 @@ public class NamedValueOptional extends Pointer { public native @Name("operator =") @ByRef NamedValueOptional put(@ByRef NamedValueOptional x); public native boolean has_value(); + public native void reset(); public native @Name("value") @ByRef NamedValue get(); @ValueSetter public native NamedValueOptional put(@ByRef NamedValue value); } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/NamesMode.java b/pytorch/src/gen/java/org/bytedeco/pytorch/NamesMode.java index 62b296c05ef..7e10ddf078b 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/NamesMode.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/NamesMode.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/NativeResolver.java b/pytorch/src/gen/java/org/bytedeco/pytorch/NativeResolver.java index 54a797d65e7..c7f481468fd 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/NativeResolver.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/NativeResolver.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; @@ -35,11 +37,11 @@ public class NativeResolver extends Resolver { return new NativeResolver((Pointer)this).offsetAddress(i); } - public native @SharedPtr @ByVal SugaredValue resolveValue( + public native @SharedPtr("torch::jit::SugaredValue") @ByVal SugaredValue resolveValue( @StdString BytePointer name, @ByRef GraphFunction m, @Const @ByRef SourceRange loc); - public native @SharedPtr @ByVal SugaredValue resolveValue( + public native @SharedPtr("torch::jit::SugaredValue") @ByVal SugaredValue resolveValue( @StdString String name, @ByRef GraphFunction m, @Const @ByRef SourceRange loc); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/NestedTensorImpl.java b/pytorch/src/gen/java/org/bytedeco/pytorch/NestedTensorImpl.java index 4d03b74a787..7f879309617 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/NestedTensorImpl.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/NestedTensorImpl.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/NoGradGuard.java b/pytorch/src/gen/java/org/bytedeco/pytorch/NoGradGuard.java index 3aee5ad3ad0..5d1ba7968e5 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/NoGradGuard.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/NoGradGuard.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/NoNamesGuard.java b/pytorch/src/gen/java/org/bytedeco/pytorch/NoNamesGuard.java index 9e73487a109..d63f99fd5a7 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/NoNamesGuard.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/NoNamesGuard.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/NoTF32Guard.java b/pytorch/src/gen/java/org/bytedeco/pytorch/NoTF32Guard.java index 46b3dade7f9..d9bfc7209e6 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/NoTF32Guard.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/NoTF32Guard.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/NoTarget.java b/pytorch/src/gen/java/org/bytedeco/pytorch/NoTarget.java index 770bbdf3895..671463dfa80 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/NoTarget.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/NoTarget.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/NoTracerDispatchMode.java b/pytorch/src/gen/java/org/bytedeco/pytorch/NoTracerDispatchMode.java deleted file mode 100644 index 1ed0d28fe1a..00000000000 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/NoTracerDispatchMode.java +++ /dev/null @@ -1,37 +0,0 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE - -package org.bytedeco.pytorch; - -import org.bytedeco.pytorch.Allocator; -import org.bytedeco.pytorch.Function; -import org.bytedeco.pytorch.Module; -import java.nio.*; -import org.bytedeco.javacpp.*; -import org.bytedeco.javacpp.annotation.*; - -import static org.bytedeco.javacpp.presets.javacpp.*; -import static org.bytedeco.openblas.global.openblas_nolapack.*; -import static org.bytedeco.openblas.global.openblas.*; - -import static org.bytedeco.pytorch.global.torch.*; - - -@Namespace("at::tracer::impl") @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) -public class NoTracerDispatchMode extends Pointer { - static { Loader.load(); } - /** Default native constructor. */ - public NoTracerDispatchMode() { super((Pointer)null); allocate(); } - /** Native array allocator. Access with {@link Pointer#position(long)}. */ - public NoTracerDispatchMode(long size) { super((Pointer)null); allocateArray(size); } - /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ - public NoTracerDispatchMode(Pointer p) { super(p); } - private native void allocate(); - private native void allocateArray(long size); - @Override public NoTracerDispatchMode position(long position) { - return (NoTracerDispatchMode)super.position(position); - } - @Override public NoTracerDispatchMode getPointer(long i) { - return new NoTracerDispatchMode((Pointer)this).offsetAddress(i); - } - -} diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/NoWarn.java b/pytorch/src/gen/java/org/bytedeco/pytorch/NoWarn.java deleted file mode 100644 index 782029b7f71..00000000000 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/NoWarn.java +++ /dev/null @@ -1,38 +0,0 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE - -package org.bytedeco.pytorch; - -import org.bytedeco.pytorch.Allocator; -import org.bytedeco.pytorch.Function; -import org.bytedeco.pytorch.Module; -import java.nio.*; -import org.bytedeco.javacpp.*; -import org.bytedeco.javacpp.annotation.*; - -import static org.bytedeco.javacpp.presets.javacpp.*; -import static org.bytedeco.openblas.global.openblas_nolapack.*; -import static org.bytedeco.openblas.global.openblas.*; - -import static org.bytedeco.pytorch.global.torch.*; - - -@Namespace("torch::jit::tracer") @NoOffset @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) -public class NoWarn extends Pointer { - static { Loader.load(); } - /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ - public NoWarn(Pointer p) { super(p); } - /** Native array allocator. Access with {@link Pointer#position(long)}. */ - public NoWarn(long size) { super((Pointer)null); allocateArray(size); } - private native void allocateArray(long size); - @Override public NoWarn position(long position) { - return (NoWarn)super.position(position); - } - @Override public NoWarn getPointer(long i) { - return new NoWarn((Pointer)this).offsetAddress(i); - } - - public NoWarn() { super((Pointer)null); allocate(); } - private native void allocate(); - public native @SharedPtr TracingState state(); public native NoWarn state(TracingState setter); - public native @Cast("bool") boolean prev(); public native NoWarn prev(boolean setter); -} diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/Node.java b/pytorch/src/gen/java/org/bytedeco/pytorch/Node.java index 033b693b1bc..194657a7a85 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/Node.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/Node.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; @@ -83,7 +85,7 @@ public class Node extends Pointer { public native @SharedPtr Node getptr(); /** Evaluates the function on the given inputs and returns the result of the * function call. */ - public native @Name("operator ()") @Cast({"", "std::vector"}) @StdMove TensorVector apply(@Cast({"", "std::vector"}) @StdMove TensorVector inputs); + public native @Name("operator ()") @Cast({"", "std::vector"}) @StdMove TensorVector apply(@Cast({"", "std::vector"}) @StdMove TensorVector inputs); // Graph Connectivity API //~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ @@ -103,7 +105,7 @@ public class Node extends Pointer { * of the new input. */ public native @Cast("uint32_t") @NoException(true) int add_input_metadata( @Const @ByRef TensorOptions options, - @ByVal SymIntRef shape, + @ByVal SymIntArrayRef shape, @Cast("bool") boolean is_tensor_subclass); public native @Cast("uint32_t") @NoException(true) int add_input_metadata(@Const @ByRef Tensor t); @@ -113,8 +115,6 @@ public class Node extends Pointer { public native @Cast("uint32_t") @NoException(true) int num_inputs(); - public native @Const @ByRef InputMetadata input_metadata(@Cast("size_t") long index); - /** * Note: Function Streams * A function's stream (for a given device type) is the stream of the first @@ -277,7 +277,7 @@ public native void add_retains_grad_hook( public native @ByRef @NoException(true) FunctionPreHookVector tensor_pre_hooks(); - public native @ByRef @NoException(true) IntFunctionPreHookMap retains_grad_hooks(); + // Customization Points for Subclasses //~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/IntFunctionPreHookMap.java b/pytorch/src/gen/java/org/bytedeco/pytorch/NodeIntMap.java similarity index 64% rename from pytorch/src/gen/java/org/bytedeco/pytorch/IntFunctionPreHookMap.java rename to pytorch/src/gen/java/org/bytedeco/pytorch/NodeIntMap.java index 6cb1b136928..ed7622fa1b7 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/IntFunctionPreHookMap.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/NodeIntMap.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; @@ -15,20 +17,22 @@ import static org.bytedeco.pytorch.global.torch.*; -@Name("std::unordered_map >") @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) -public class IntFunctionPreHookMap extends Pointer { +@Name("std::unordered_map") @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) +public class NodeIntMap extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ - public IntFunctionPreHookMap(Pointer p) { super(p); } - public IntFunctionPreHookMap() { allocate(); } + public NodeIntMap(Pointer p) { super(p); } + public NodeIntMap() { allocate(); } private native void allocate(); - + public native @Name("operator =") @ByRef NodeIntMap put(@ByRef NodeIntMap x); public boolean empty() { return size() == 0; } public native long size(); - @Index(function = "at") public native @UniquePtr @Cast({"", "std::unique_ptr&&"}) FunctionPreHook get(int i); + @Index public native int get(Node i); + public native NodeIntMap put(Node i, int value); + public native void erase(@ByVal Iterator pos); public native @ByVal Iterator begin(); public native @ByVal Iterator end(); @NoOffset @Name("iterator") public static class Iterator extends Pointer { @@ -37,8 +41,8 @@ public Iterator() { } public native @Name("operator ++") @ByRef Iterator increment(); public native @Name("operator ==") boolean equals(@ByRef Iterator it); - public native @Name("operator *().first") @MemberGetter int first(); - public native @Name("operator *().second") @MemberGetter @UniquePtr @Cast({"", "std::unique_ptr&&"}) FunctionPreHook second(); + public native @Name("operator *().first") @MemberGetter @Const Node first(); + public native @Name("operator *().second") @MemberGetter int second(); } } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/TokenTrieVector.java b/pytorch/src/gen/java/org/bytedeco/pytorch/NodeSet.java similarity index 61% rename from pytorch/src/gen/java/org/bytedeco/pytorch/TokenTrieVector.java rename to pytorch/src/gen/java/org/bytedeco/pytorch/NodeSet.java index 5033980aeae..1847f3d7432 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/TokenTrieVector.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/NodeSet.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; @@ -15,20 +17,21 @@ import static org.bytedeco.pytorch.global.torch.*; -@Name("std::vector >") @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) -public class TokenTrieVector extends Pointer { +@Name("std::unordered_set") @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) +public class NodeSet extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ - public TokenTrieVector(Pointer p) { super(p); } - public TokenTrieVector() { allocate(); } + public NodeSet(Pointer p) { super(p); } + public NodeSet() { allocate(); } private native void allocate(); - + public native @Name("operator =") @ByRef NodeSet put(@ByRef NodeSet x); public boolean empty() { return size() == 0; } public native long size(); - @Index(function = "at") public native @UniquePtr TokenTrie get(@Cast("size_t") long i); - + public Node front() { try (Iterator it = begin()) { return it.get(); } } + public native void insert(Node value); + public native void erase(Node value); public native @ByVal Iterator begin(); public native @ByVal Iterator end(); @NoOffset @Name("iterator") public static class Iterator extends Pointer { @@ -37,7 +40,7 @@ public Iterator() { } public native @Name("operator ++") @ByRef Iterator increment(); public native @Name("operator ==") boolean equals(@ByRef Iterator it); - public native @Name("operator *") @UniquePtr @Const TokenTrie get(); + public native @Name("operator *") @Const Node get(); } } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/Suspend.java b/pytorch/src/gen/java/org/bytedeco/pytorch/NodeSmallVectorBase.java similarity index 53% rename from pytorch/src/gen/java/org/bytedeco/pytorch/Suspend.java rename to pytorch/src/gen/java/org/bytedeco/pytorch/NodeSmallVectorBase.java index a7390304b08..8fa0aa80cbb 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/Suspend.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/NodeSmallVectorBase.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; @@ -15,15 +17,13 @@ import static org.bytedeco.pytorch.global.torch.*; - -// Created by wait() -@Namespace("torch::jit") @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) -public class Suspend extends Pointer { +@Name("c10::SmallVectorTemplateBase") @NoOffset @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) +public class NodeSmallVectorBase extends NodeSmallVectorCommon { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ - public Suspend(Pointer p) { super(p); } + public NodeSmallVectorBase(Pointer p) { super(p); } - public native @NoException(true) @Cast("const char*") BytePointer what(); + public native void push_back(@ByPtrRef Node Elt); - // NOLINTNEXTLINE(cppcoreguidelines-pro-type-member-init) + public native void pop_back(); } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/NodeSmallVectorCommon.java b/pytorch/src/gen/java/org/bytedeco/pytorch/NodeSmallVectorCommon.java new file mode 100644 index 00000000000..4cf04304a22 --- /dev/null +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/NodeSmallVectorCommon.java @@ -0,0 +1,49 @@ +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE + +package org.bytedeco.pytorch; + +import org.bytedeco.pytorch.Allocator; +import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; +import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; +import java.nio.*; +import org.bytedeco.javacpp.*; +import org.bytedeco.javacpp.annotation.*; + +import static org.bytedeco.javacpp.presets.javacpp.*; +import static org.bytedeco.openblas.global.openblas_nolapack.*; +import static org.bytedeco.openblas.global.openblas.*; + +import static org.bytedeco.pytorch.global.torch.*; + +@Name("c10::SmallVectorTemplateCommon") @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) +public class NodeSmallVectorCommon extends IntSizedSmallVectorBase { + static { Loader.load(); } + /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ + public NodeSmallVectorCommon(Pointer p) { super(p); } + + + // forward iterator creation methods. + public native @ByVal @Cast("c10::SmallVectorTemplateCommon::iterator*") Node begin(); + public native @ByVal @Cast("c10::SmallVectorTemplateCommon::iterator*") Node end(); + + // reverse iterator creation methods. + + public native long size_in_bytes(); + public native long max_size(); + + public native @Cast("size_t") long capacity_in_bytes(); + + /** Return a pointer to the vector's buffer, even if empty(). */ + public native @ByVal @Cast("c10::SmallVectorTemplateCommon::pointer*") Node data(); + /** Return a pointer to the vector's buffer, even if empty(). */ + + // SmallVector::at is NOT from LLVM. + public native @ByPtr Node at(long idx); + public native @Name("operator []") @ByPtr Node get(long idx); + + public native @ByPtr Node front(); + + public native @ByPtr Node back(); +} diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/NodeSmallVectorImpl.java b/pytorch/src/gen/java/org/bytedeco/pytorch/NodeSmallVectorImpl.java new file mode 100644 index 00000000000..3e30e7be69d --- /dev/null +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/NodeSmallVectorImpl.java @@ -0,0 +1,71 @@ +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE + +package org.bytedeco.pytorch; + +import org.bytedeco.pytorch.Allocator; +import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; +import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; +import java.nio.*; +import org.bytedeco.javacpp.*; +import org.bytedeco.javacpp.annotation.*; + +import static org.bytedeco.javacpp.presets.javacpp.*; +import static org.bytedeco.openblas.global.openblas_nolapack.*; +import static org.bytedeco.openblas.global.openblas.*; + +import static org.bytedeco.pytorch.global.torch.*; + +@Name("c10::SmallVectorImpl") @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) +public class NodeSmallVectorImpl extends NodeSmallVectorBase { + static { Loader.load(); } + /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ + public NodeSmallVectorImpl(Pointer p) { super(p); } + + + + public native void clear(); + public native void resize(long N); + + /** Like resize, but \ref T is POD, the new values won't be initialized. */ + public native void resize_for_overwrite(long N); + + public native void resize(long N, @ByPtr Node NV); + + public native void reserve(long N); + + public native void pop_back_n(long NumItems); + + public native Node pop_back_val(); + + public native void swap(@ByRef NodeSmallVectorImpl RHS); + + /** Add the specified range to the end of the SmallVector. */ + + /** Append \p NumInputs copies of \p Elt to the end. */ + public native void append(long NumInputs, @ByPtr Node Elt); + + public native void append(@Const @ByRef NodeSmallVectorImpl RHS); + + public native void assign(long NumElts, @ByPtr Node Elt); + + // FIXME: Consider assigning over existing elements, rather than clearing & + // re-initializing them - for all assign(...) variants. + + public native void assign(@Const @ByRef NodeSmallVectorImpl RHS); + + public native @ByVal @Cast("c10::SmallVectorImpl::iterator*") Node erase(@ByVal @Cast("c10::SmallVectorImpl::const_iterator*") Node CI); + + public native @ByVal @Cast("c10::SmallVectorImpl::iterator*") Node erase(@ByVal @Cast("c10::SmallVectorImpl::const_iterator*") Node CS, @ByVal @Cast("c10::SmallVectorImpl::const_iterator*") Node CE); + public native @ByVal @Cast("c10::SmallVectorImpl::iterator*") Node insert(@ByVal @Cast("c10::SmallVectorImpl::iterator*") Node I, Node Elt); + + public native @ByVal @Cast("c10::SmallVectorImpl::iterator*") Node insert(@ByVal @Cast("c10::SmallVectorImpl::iterator*") Node I, long NumToInsert, @ByPtr Node Elt); + + public native @ByRef @Name("operator =") NodeSmallVectorImpl put(@Const @ByRef NodeSmallVectorImpl RHS); + + public native @Cast("bool") @Name("operator ==") boolean equals(@Const @ByRef NodeSmallVectorImpl RHS); + public native @Cast("bool") @Name("operator !=") boolean notEquals(@Const @ByRef NodeSmallVectorImpl RHS); + + public native @Cast("bool") @Name("operator <") boolean lessThan(@Const @ByRef NodeSmallVectorImpl RHS); +} diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/NoneType.java b/pytorch/src/gen/java/org/bytedeco/pytorch/NoneType.java index cfca4ed7e16..e86b2fd45a8 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/NoneType.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/NoneType.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/NoneTypePtr.java b/pytorch/src/gen/java/org/bytedeco/pytorch/NoneTypePtr.java index f143dfc4853..590973fa0f6 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/NoneTypePtr.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/NoneTypePtr.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/Nonlinearity.java b/pytorch/src/gen/java/org/bytedeco/pytorch/Nonlinearity.java new file mode 100644 index 00000000000..3a0afbd171a --- /dev/null +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/Nonlinearity.java @@ -0,0 +1,74 @@ +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE + +package org.bytedeco.pytorch; + +import org.bytedeco.pytorch.Allocator; +import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; +import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; +import java.nio.*; +import org.bytedeco.javacpp.*; +import org.bytedeco.javacpp.annotation.*; + +import static org.bytedeco.javacpp.presets.javacpp.*; +import static org.bytedeco.openblas.global.openblas_nolapack.*; +import static org.bytedeco.openblas.global.openblas.*; + +import static org.bytedeco.pytorch.global.torch.*; + +@NoOffset @Name("c10::variant") @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) +public class Nonlinearity extends Pointer { + static { Loader.load(); } + /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ + public Nonlinearity(Pointer p) { super(p); } + public Nonlinearity(kLinear value) { this(); put(value); } + public Nonlinearity(kConv1D value) { this(); put(value); } + public Nonlinearity(kConv2D value) { this(); put(value); } + public Nonlinearity(kConv3D value) { this(); put(value); } + public Nonlinearity(kConvTranspose1D value) { this(); put(value); } + public Nonlinearity(kConvTranspose2D value) { this(); put(value); } + public Nonlinearity(kConvTranspose3D value) { this(); put(value); } + public Nonlinearity(kSigmoid value) { this(); put(value); } + public Nonlinearity(kTanh value) { this(); put(value); } + public Nonlinearity(kReLU value) { this(); put(value); } + public Nonlinearity(kLeakyReLU value) { this(); put(value); } + public Nonlinearity() { allocate(); } + private native void allocate(); + public native @Name("operator =") @ByRef Nonlinearity put(@ByRef Nonlinearity x); + + public @ByRef kLinear get0() { return get0(this); } + @Namespace @Name("c10::get<0>") public static native @ByRef kLinear get0(@ByRef Nonlinearity container); + @ValueSetter public native Nonlinearity put(@ByRef kLinear value); + public @ByRef kConv1D get1() { return get1(this); } + @Namespace @Name("c10::get<1>") public static native @ByRef kConv1D get1(@ByRef Nonlinearity container); + @ValueSetter public native Nonlinearity put(@ByRef kConv1D value); + public @ByRef kConv2D get2() { return get2(this); } + @Namespace @Name("c10::get<2>") public static native @ByRef kConv2D get2(@ByRef Nonlinearity container); + @ValueSetter public native Nonlinearity put(@ByRef kConv2D value); + public @ByRef kConv3D get3() { return get3(this); } + @Namespace @Name("c10::get<3>") public static native @ByRef kConv3D get3(@ByRef Nonlinearity container); + @ValueSetter public native Nonlinearity put(@ByRef kConv3D value); + public @ByRef kConvTranspose1D get4() { return get4(this); } + @Namespace @Name("c10::get<4>") public static native @ByRef kConvTranspose1D get4(@ByRef Nonlinearity container); + @ValueSetter public native Nonlinearity put(@ByRef kConvTranspose1D value); + public @ByRef kConvTranspose2D get5() { return get5(this); } + @Namespace @Name("c10::get<5>") public static native @ByRef kConvTranspose2D get5(@ByRef Nonlinearity container); + @ValueSetter public native Nonlinearity put(@ByRef kConvTranspose2D value); + public @ByRef kConvTranspose3D get6() { return get6(this); } + @Namespace @Name("c10::get<6>") public static native @ByRef kConvTranspose3D get6(@ByRef Nonlinearity container); + @ValueSetter public native Nonlinearity put(@ByRef kConvTranspose3D value); + public @ByRef kSigmoid get7() { return get7(this); } + @Namespace @Name("c10::get<7>") public static native @ByRef kSigmoid get7(@ByRef Nonlinearity container); + @ValueSetter public native Nonlinearity put(@ByRef kSigmoid value); + public @ByRef kTanh get8() { return get8(this); } + @Namespace @Name("c10::get<8>") public static native @ByRef kTanh get8(@ByRef Nonlinearity container); + @ValueSetter public native Nonlinearity put(@ByRef kTanh value); + public @ByRef kReLU get9() { return get9(this); } + @Namespace @Name("c10::get<9>") public static native @ByRef kReLU get9(@ByRef Nonlinearity container); + @ValueSetter public native Nonlinearity put(@ByRef kReLU value); + public @ByRef kLeakyReLU get10() { return get10(this); } + @Namespace @Name("c10::get<10>") public static native @ByRef kLeakyReLU get10(@ByRef Nonlinearity container); + @ValueSetter public native Nonlinearity put(@ByRef kLeakyReLU value); +} + diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/NonlinearityType.java b/pytorch/src/gen/java/org/bytedeco/pytorch/NonlinearityType.java deleted file mode 100644 index 481e9e80ac5..00000000000 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/NonlinearityType.java +++ /dev/null @@ -1,72 +0,0 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE - -package org.bytedeco.pytorch; - -import org.bytedeco.pytorch.Allocator; -import org.bytedeco.pytorch.Function; -import org.bytedeco.pytorch.Module; -import java.nio.*; -import org.bytedeco.javacpp.*; -import org.bytedeco.javacpp.annotation.*; - -import static org.bytedeco.javacpp.presets.javacpp.*; -import static org.bytedeco.openblas.global.openblas_nolapack.*; -import static org.bytedeco.openblas.global.openblas.*; - -import static org.bytedeco.pytorch.global.torch.*; - -@NoOffset @Name("c10::variant") @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) -public class NonlinearityType extends Pointer { - static { Loader.load(); } - /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ - public NonlinearityType(Pointer p) { super(p); } - public NonlinearityType(kLinear value) { this(); put(value); } - public NonlinearityType(kConv1D value) { this(); put(value); } - public NonlinearityType(kConv2D value) { this(); put(value); } - public NonlinearityType(kConv3D value) { this(); put(value); } - public NonlinearityType(kConvTranspose1D value) { this(); put(value); } - public NonlinearityType(kConvTranspose2D value) { this(); put(value); } - public NonlinearityType(kConvTranspose3D value) { this(); put(value); } - public NonlinearityType(kSigmoid value) { this(); put(value); } - public NonlinearityType(kTanh value) { this(); put(value); } - public NonlinearityType(kReLU value) { this(); put(value); } - public NonlinearityType(kLeakyReLU value) { this(); put(value); } - public NonlinearityType() { allocate(); } - private native void allocate(); - public native @Name("operator =") @ByRef NonlinearityType put(@ByRef NonlinearityType x); - - public @ByRef kLinear get0() { return get0(this); } - @Namespace @Name("c10::get<0>") public static native @ByRef kLinear get0(@ByRef NonlinearityType container); - @ValueSetter public native NonlinearityType put(@ByRef kLinear value); - public @ByRef kConv1D get1() { return get1(this); } - @Namespace @Name("c10::get<1>") public static native @ByRef kConv1D get1(@ByRef NonlinearityType container); - @ValueSetter public native NonlinearityType put(@ByRef kConv1D value); - public @ByRef kConv2D get2() { return get2(this); } - @Namespace @Name("c10::get<2>") public static native @ByRef kConv2D get2(@ByRef NonlinearityType container); - @ValueSetter public native NonlinearityType put(@ByRef kConv2D value); - public @ByRef kConv3D get3() { return get3(this); } - @Namespace @Name("c10::get<3>") public static native @ByRef kConv3D get3(@ByRef NonlinearityType container); - @ValueSetter public native NonlinearityType put(@ByRef kConv3D value); - public @ByRef kConvTranspose1D get4() { return get4(this); } - @Namespace @Name("c10::get<4>") public static native @ByRef kConvTranspose1D get4(@ByRef NonlinearityType container); - @ValueSetter public native NonlinearityType put(@ByRef kConvTranspose1D value); - public @ByRef kConvTranspose2D get5() { return get5(this); } - @Namespace @Name("c10::get<5>") public static native @ByRef kConvTranspose2D get5(@ByRef NonlinearityType container); - @ValueSetter public native NonlinearityType put(@ByRef kConvTranspose2D value); - public @ByRef kConvTranspose3D get6() { return get6(this); } - @Namespace @Name("c10::get<6>") public static native @ByRef kConvTranspose3D get6(@ByRef NonlinearityType container); - @ValueSetter public native NonlinearityType put(@ByRef kConvTranspose3D value); - public @ByRef kSigmoid get7() { return get7(this); } - @Namespace @Name("c10::get<7>") public static native @ByRef kSigmoid get7(@ByRef NonlinearityType container); - @ValueSetter public native NonlinearityType put(@ByRef kSigmoid value); - public @ByRef kTanh get8() { return get8(this); } - @Namespace @Name("c10::get<8>") public static native @ByRef kTanh get8(@ByRef NonlinearityType container); - @ValueSetter public native NonlinearityType put(@ByRef kTanh value); - public @ByRef kReLU get9() { return get9(this); } - @Namespace @Name("c10::get<9>") public static native @ByRef kReLU get9(@ByRef NonlinearityType container); - @ValueSetter public native NonlinearityType put(@ByRef kReLU value); - public @ByRef kLeakyReLU get10() { return get10(this); } - @Namespace @Name("c10::get<10>") public static native @ByRef kLeakyReLU get10(@ByRef NonlinearityType container); - @ValueSetter public native NonlinearityType put(@ByRef kLeakyReLU value); -} - diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/NormalizeFuncOptions.java b/pytorch/src/gen/java/org/bytedeco/pytorch/NormalizeFuncOptions.java index a0a77db97b6..a235e85335e 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/NormalizeFuncOptions.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/NormalizeFuncOptions.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/NotImplementedError.java b/pytorch/src/gen/java/org/bytedeco/pytorch/NotImplementedError.java index df6ba88e0d5..90e40cdc20a 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/NotImplementedError.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/NotImplementedError.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/NumberType.java b/pytorch/src/gen/java/org/bytedeco/pytorch/NumberType.java index df19fb50f2f..a719763c3d6 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/NumberType.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/NumberType.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; @@ -38,5 +40,5 @@ public class NumberType extends Type { public native @StdString BytePointer str(); @MemberGetter public static native TypeKind Kind(); // global singleton - public static native @ByVal NumberTypePtr get(); + public static native @ByVal @Name("get") NumberTypePtr getNumberIntTypePtr(); } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/NumberTypePtr.java b/pytorch/src/gen/java/org/bytedeco/pytorch/NumberTypePtr.java index d14a0627338..25a98d4b00f 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/NumberTypePtr.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/NumberTypePtr.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/ORTHooksArgs.java b/pytorch/src/gen/java/org/bytedeco/pytorch/ORTHooksArgs.java new file mode 100644 index 00000000000..cde750fc8fb --- /dev/null +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/ORTHooksArgs.java @@ -0,0 +1,29 @@ +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE + +package org.bytedeco.pytorch; + +import org.bytedeco.pytorch.Allocator; +import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; +import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; +import java.nio.*; +import org.bytedeco.javacpp.*; +import org.bytedeco.javacpp.annotation.*; + +import static org.bytedeco.javacpp.presets.javacpp.*; +import static org.bytedeco.openblas.global.openblas_nolapack.*; +import static org.bytedeco.openblas.global.openblas.*; + +import static org.bytedeco.pytorch.global.torch.*; + + +// NB: dummy argument to suppress "ISO C++11 requires at least one argument +// for the "..." in a variadic macro" +@Namespace("at") @Opaque @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) +public class ORTHooksArgs extends Pointer { + /** Empty constructor. Calls {@code super((Pointer)null)}. */ + public ORTHooksArgs() { super((Pointer)null); } + /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ + public ORTHooksArgs(Pointer p) { super(p); } +} diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/ORTHooksInterface.java b/pytorch/src/gen/java/org/bytedeco/pytorch/ORTHooksInterface.java new file mode 100644 index 00000000000..a14fdc7cb52 --- /dev/null +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/ORTHooksInterface.java @@ -0,0 +1,45 @@ +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE + +package org.bytedeco.pytorch; + +import org.bytedeco.pytorch.Allocator; +import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; +import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; +import java.nio.*; +import org.bytedeco.javacpp.*; +import org.bytedeco.javacpp.annotation.*; + +import static org.bytedeco.javacpp.presets.javacpp.*; +import static org.bytedeco.openblas.global.openblas_nolapack.*; +import static org.bytedeco.openblas.global.openblas.*; + +import static org.bytedeco.pytorch.global.torch.*; + + +// NB: Class must live in `at` due to limitations of Registry.h. + +@Namespace("at") @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) +public class ORTHooksInterface extends Pointer { + static { Loader.load(); } + /** Default native constructor. */ + public ORTHooksInterface() { super((Pointer)null); allocate(); } + /** Native array allocator. Access with {@link Pointer#position(long)}. */ + public ORTHooksInterface(long size) { super((Pointer)null); allocateArray(size); } + /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ + public ORTHooksInterface(Pointer p) { super(p); } + private native void allocate(); + private native void allocateArray(long size); + @Override public ORTHooksInterface position(long position) { + return (ORTHooksInterface)super.position(position); + } + @Override public ORTHooksInterface getPointer(long i) { + return new ORTHooksInterface((Pointer)this).offsetAddress(i); + } + + // This should never actually be implemented, but it is used to + // squelch -Werror=non-virtual-dtor + + public native @StdString BytePointer showConfig(); +} diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/OnnxfiBackendSystemError.java b/pytorch/src/gen/java/org/bytedeco/pytorch/OnnxfiBackendSystemError.java index b113540bb3d..b83697817c8 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/OnnxfiBackendSystemError.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/OnnxfiBackendSystemError.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/OpRegistrationListener.java b/pytorch/src/gen/java/org/bytedeco/pytorch/OpRegistrationListener.java index dcd964adce8..0ad97b93473 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/OpRegistrationListener.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/OpRegistrationListener.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/OpTableOffsetAndMask.java b/pytorch/src/gen/java/org/bytedeco/pytorch/OpTableOffsetAndMask.java deleted file mode 100644 index 9ad07c3b5ef..00000000000 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/OpTableOffsetAndMask.java +++ /dev/null @@ -1,39 +0,0 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE - -package org.bytedeco.pytorch; - -import org.bytedeco.pytorch.Allocator; -import org.bytedeco.pytorch.Function; -import org.bytedeco.pytorch.Module; -import java.nio.*; -import org.bytedeco.javacpp.*; -import org.bytedeco.javacpp.annotation.*; - -import static org.bytedeco.javacpp.presets.javacpp.*; -import static org.bytedeco.openblas.global.openblas_nolapack.*; -import static org.bytedeco.openblas.global.openblas.*; - -import static org.bytedeco.pytorch.global.torch.*; - - -@Namespace("c10") @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) -public class OpTableOffsetAndMask extends Pointer { - static { Loader.load(); } - /** Default native constructor. */ - public OpTableOffsetAndMask() { super((Pointer)null); allocate(); } - /** Native array allocator. Access with {@link Pointer#position(long)}. */ - public OpTableOffsetAndMask(long size) { super((Pointer)null); allocateArray(size); } - /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ - public OpTableOffsetAndMask(Pointer p) { super(p); } - private native void allocate(); - private native void allocateArray(long size); - @Override public OpTableOffsetAndMask position(long position) { - return (OpTableOffsetAndMask)super.position(position); - } - @Override public OpTableOffsetAndMask getPointer(long i) { - return new OpTableOffsetAndMask((Pointer)this).offsetAddress(i); - } - - public native @Cast("uint16_t") short offset(); public native OpTableOffsetAndMask offset(short setter); - public native @Cast("uint16_t") short backend_mask(); public native OpTableOffsetAndMask backend_mask(short setter); -} diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/OpaqueOptionalTensorRef.java b/pytorch/src/gen/java/org/bytedeco/pytorch/OpaqueOptionalTensorRef.java new file mode 100644 index 00000000000..ccb53cba6a8 --- /dev/null +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/OpaqueOptionalTensorRef.java @@ -0,0 +1,46 @@ +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE + +package org.bytedeco.pytorch; + +import org.bytedeco.pytorch.Allocator; +import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; +import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; +import java.nio.*; +import org.bytedeco.javacpp.*; +import org.bytedeco.javacpp.annotation.*; + +import static org.bytedeco.javacpp.presets.javacpp.*; +import static org.bytedeco.openblas.global.openblas_nolapack.*; +import static org.bytedeco.openblas.global.openblas.*; + +import static org.bytedeco.pytorch.global.torch.*; + + +// Storage for a non-owning Tensor, without needing to include Tensor.h +@Namespace("at::internal") @NoOffset @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) +public class OpaqueOptionalTensorRef extends Pointer { + static { Loader.load(); } + /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ + public OpaqueOptionalTensorRef(Pointer p) { super(p); } + /** Native array allocator. Access with {@link Pointer#position(long)}. */ + public OpaqueOptionalTensorRef(long size) { super((Pointer)null); allocateArray(size); } + private native void allocateArray(long size); + @Override public OpaqueOptionalTensorRef position(long position) { + return (OpaqueOptionalTensorRef)super.position(position); + } + @Override public OpaqueOptionalTensorRef getPointer(long i) { + return new OpaqueOptionalTensorRef((Pointer)this).offsetAddress(i); + } + + public OpaqueOptionalTensorRef() { super((Pointer)null); allocate(); } + private native void allocate(); + + public native OptionalTensorRef get(); + + public native @ByRef @Name("operator *") OptionalTensorRef multiply(); + public native @Name("operator ->") OptionalTensorRef access(); + + public native @Const @ByRef Tensor getTensor(); +} diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/OperandInfo.java b/pytorch/src/gen/java/org/bytedeco/pytorch/OperandInfo.java new file mode 100644 index 00000000000..7fd6d89ac87 --- /dev/null +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/OperandInfo.java @@ -0,0 +1,95 @@ +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE + +package org.bytedeco.pytorch; + +import org.bytedeco.pytorch.Allocator; +import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; +import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; +import java.nio.*; +import org.bytedeco.javacpp.*; +import org.bytedeco.javacpp.annotation.*; + +import static org.bytedeco.javacpp.presets.javacpp.*; +import static org.bytedeco.openblas.global.openblas_nolapack.*; +import static org.bytedeco.openblas.global.openblas.*; + +import static org.bytedeco.pytorch.global.torch.*; + // namespace internal + +@Namespace("at") @NoOffset @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) +public class OperandInfo extends Pointer { + static { Loader.load(); } + /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ + public OperandInfo(Pointer p) { super(p); } + /** Native array allocator. Access with {@link Pointer#position(long)}. */ + public OperandInfo(long size) { super((Pointer)null); allocateArray(size); } + private native void allocateArray(long size); + @Override public OperandInfo position(long position) { + return (OperandInfo)super.position(position); + } + @Override public OperandInfo getPointer(long i) { + return new OperandInfo((Pointer)this).offsetAddress(i); + } + + public OperandInfo() { super((Pointer)null); allocate(); } + private native void allocate(); + public OperandInfo(@Cast({"", "c10::MaybeOwned&&"}) @StdMove TensorBaseMaybeOwned t) { super((Pointer)null); allocate(t); } + private native void allocate(@Cast({"", "c10::MaybeOwned&&"}) @StdMove TensorBaseMaybeOwned t); + + /** Stride after broadcasting. The stride is in bytes, not number of elements. */ + public native @ByRef @Cast("at::OperandInfo::StrideVector*") SymDimVector stride_bytes(); public native OperandInfo stride_bytes(SymDimVector setter); + + /** The desired device and type for the operand. For inputs, this specifies + * that the input should be converted to this type if necessary. For outputs, + * this specifies which type to allocate. target_dtype and device are + * initialized with the dtype and device of the tensor but during type + * promotion target_dtype value can become different from tensor's dtype + * also, during type promotion target_dtype and device can be set for an + * undefined tensor so that tensor can be properly constructed later. */ + public native @ByRef DeviceOptional device(); public native OperandInfo device(DeviceOptional setter); + public native ScalarType target_dtype(); public native OperandInfo target_dtype(ScalarType setter); + // Caches dtype of the tensor, because scalar_type is an expensive operation + // If dtype of the tensor is changed (e.g. as a result of type promotion or in + // allocate_outputs), this + // value should be changed too. + public native ScalarType current_dtype(); public native OperandInfo current_dtype(ScalarType setter); + + public native @Cast("bool") boolean is_device_defined(); + public native @Cast("bool") boolean is_type_defined(); + public native @ByVal TensorOptions options(); + + /** The data pointer. This may be different from tensor->data_ptr() if the + * iterator is split. */ + public native Pointer data(); public native OperandInfo data(Pointer setter); + + public native @Cast("bool") boolean is_output(); public native OperandInfo is_output(boolean setter); + + public native @Cast("bool") boolean will_resize(); public native OperandInfo will_resize(boolean setter); + + public native @Cast("bool") boolean is_read_write(); public native OperandInfo is_read_write(boolean setter); + + public native void validate(); + + /** The tensor operand. Note that the strides, data pointer, and + * other attributes may differ due to dimension reordering and + * coalescing. */ + public native @Const @ByRef Tensor tensor(); + public native @Const @ByRef TensorBase tensor_base(); + public native void tensor(@Cast({"", "c10::MaybeOwned&&"}) @StdMove TensorBaseMaybeOwned tensor); + + // Save the original tensor operand in cases when an output is modified + // (e.g. if dtype is changed) + public native @Const @ByRef Tensor original_tensor(); + public native @Const @ByRef TensorBase original_tensor_base(); + + // Set tensor to a new value, and store the old tensor value in + // original_tensor Should only ever be called once for the lifetime of an + // operand + public native void exchange_tensor(@Cast({"", "c10::MaybeOwned&&"}) @StdMove TensorBaseMaybeOwned new_tensor); + + // Move original_tensor back into tensor, exchange_tensor must have been + // called before + public native void restore_original_tensor(); +} diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/Operation.java b/pytorch/src/gen/java/org/bytedeco/pytorch/Operation.java index e1494c4dc0c..3c5694528b1 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/Operation.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/Operation.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/OperationCreator.java b/pytorch/src/gen/java/org/bytedeco/pytorch/OperationCreator.java index f84d1b1975e..ecfe9fdf858 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/OperationCreator.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/OperationCreator.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/Operator.java b/pytorch/src/gen/java/org/bytedeco/pytorch/Operator.java index da1476f4a45..0a218cf6cea 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/Operator.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/Operator.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; @@ -35,7 +37,7 @@ // An Operator is a thin wrapper around either a pure JIT operator (e.g. prim // ops) or a c10 operator, allowing some common operations and abstracting away // the concrete operator nature. -@Namespace("torch::jit") @NoOffset @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) +@Namespace("torch::jit") @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) public class Operator extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ @@ -55,7 +57,7 @@ public class Operator extends Pointer { public native @Const @ByRef FunctionSchema schema(); - public native @ByVal @Cast("c10::ArrayRef*") IntArrayRef getTags(); + public native @ByVal TagArrayRef getTags(); public native @Cast("bool") boolean isC10Op(); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/OperatorHandle.java b/pytorch/src/gen/java/org/bytedeco/pytorch/OperatorHandle.java index 7c5309a4102..4b714bbc142 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/OperatorHandle.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/OperatorHandle.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; @@ -54,7 +56,7 @@ public class OperatorHandle extends Pointer { public native void checkInvariants(); - public native @ByVal @Cast("c10::ArrayRef*") IntArrayRef getTags(); + public native @ByVal TagArrayRef getTags(); public native @Cast("bool") boolean hasTag(Tag tag); public native @Cast("bool") boolean hasTag(@Cast("at::Tag") int tag); @@ -66,7 +68,7 @@ public class OperatorHandle extends Pointer { public native void redispatchBoxed(@ByVal DispatchKeySet ks, @Cast("c10::Stack*") IValueVector stack); - + public native @Cast("bool") @Name("operator ==") boolean equals(@Const @ByRef OperatorHandle other); - + public native @Cast("bool") @Name("operator !=") boolean notEquals(@Const @ByRef OperatorHandle other); } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/OperatorHandleOptional.java b/pytorch/src/gen/java/org/bytedeco/pytorch/OperatorHandleOptional.java index 0e30ab1c9bd..071517577ea 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/OperatorHandleOptional.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/OperatorHandleOptional.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; @@ -26,6 +28,7 @@ public class OperatorHandleOptional extends Pointer { public native @Name("operator =") @ByRef OperatorHandleOptional put(@ByRef OperatorHandleOptional x); public native boolean has_value(); + public native void reset(); public native @Name("value") @ByRef OperatorHandle get(); @ValueSetter public native OperatorHandleOptional put(@ByRef OperatorHandle value); } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/OperatorKernel.java b/pytorch/src/gen/java/org/bytedeco/pytorch/OperatorKernel.java index a0f8732dbac..001743fee74 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/OperatorKernel.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/OperatorKernel.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; @@ -14,11 +16,41 @@ import static org.bytedeco.openblas.global.openblas.*; import static org.bytedeco.pytorch.global.torch.*; - // TODO Instead of this, move torch::jit::Stack to the c10 namespace. -@Namespace("c10") @Opaque @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) + + +/** + * Inherit from OperatorKernel to implement a c10 kernel. + * + * Example: + * > namespace { + * > class my_kernel_cpu final : public c10::OperatorKernel { + * > public: + * > Tensor operator()(Tensor a, Tensor b) {...} + * > }; + * > } + * + * The kernel class is allowed to have members but these are equivalent + * to global variables. The kernel implementation is responsible for + * preventing race conditions on them. + * + * See below for how to register this kernel with PyTorch. + */ +@Namespace("c10") @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) public class OperatorKernel extends Pointer { - /** Empty constructor. Calls {@code super((Pointer)null)}. */ - public OperatorKernel() { super((Pointer)null); } + static { Loader.load(); } + /** Default native constructor. */ + public OperatorKernel() { super((Pointer)null); allocate(); } + /** Native array allocator. Access with {@link Pointer#position(long)}. */ + public OperatorKernel(long size) { super((Pointer)null); allocateArray(size); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public OperatorKernel(Pointer p) { super(p); } + private native void allocate(); + private native void allocateArray(long size); + @Override public OperatorKernel position(long position) { + return (OperatorKernel)super.position(position); + } + @Override public OperatorKernel getPointer(long i) { + return new OperatorKernel((Pointer)this).offsetAddress(i); + } + } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/OperatorName.java b/pytorch/src/gen/java/org/bytedeco/pytorch/OperatorName.java index 8bf8905dd0d..ca743c87f48 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/OperatorName.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/OperatorName.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/OperatorNameOptional.java b/pytorch/src/gen/java/org/bytedeco/pytorch/OperatorNameOptional.java index d618e816e68..261bbac5ad5 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/OperatorNameOptional.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/OperatorNameOptional.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; @@ -26,6 +28,7 @@ public class OperatorNameOptional extends Pointer { public native @Name("operator =") @ByRef OperatorNameOptional put(@ByRef OperatorNameOptional x); public native boolean has_value(); + public native void reset(); public native @Name("value") @ByRef OperatorName get(); @ValueSetter public native OperatorNameOptional put(@ByRef OperatorName value); } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/OperatorNameView.java b/pytorch/src/gen/java/org/bytedeco/pytorch/OperatorNameView.java deleted file mode 100644 index 1007e427bbd..00000000000 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/OperatorNameView.java +++ /dev/null @@ -1,32 +0,0 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE - -package org.bytedeco.pytorch; - -import org.bytedeco.pytorch.Allocator; -import org.bytedeco.pytorch.Function; -import org.bytedeco.pytorch.Module; -import java.nio.*; -import org.bytedeco.javacpp.*; -import org.bytedeco.javacpp.annotation.*; - -import static org.bytedeco.javacpp.presets.javacpp.*; -import static org.bytedeco.openblas.global.openblas_nolapack.*; -import static org.bytedeco.openblas.global.openblas.*; - -import static org.bytedeco.pytorch.global.torch.*; - - -// Non-owning view of an OperatorName. Unlike OperatorName, most of -// its functions are constexpr, so it can be used for compile time -// computations -@Namespace("c10") @NoOffset @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) -public class OperatorNameView extends Pointer { - static { Loader.load(); } - /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ - public OperatorNameView(Pointer p) { super(p); } - - public native @ByRef @Cast("c10::string_view*") Pointer name(); public native OperatorNameView name(Pointer setter); - public native @ByRef @Cast("c10::string_view*") Pointer overload_name(); public native OperatorNameView overload_name(Pointer setter); - // Parses strings like "foo.overload" and also "foo" - public static native @Const @ByVal OperatorNameView parse(@ByVal @Cast("c10::string_view*") Pointer full_name); -} diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/OperatorOptional.java b/pytorch/src/gen/java/org/bytedeco/pytorch/OperatorOptional.java index fd1f1409d77..eb12b5e8e1e 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/OperatorOptional.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/OperatorOptional.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; @@ -26,6 +28,7 @@ public class OperatorOptional extends Pointer { public native @Name("operator =") @ByRef OperatorOptional put(@ByRef OperatorOptional x); public native boolean has_value(); + public native void reset(); public native @Name("value") @ByRef Operator get(); @ValueSetter public native OperatorOptional put(@ByRef Operator value); } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/OperatorOptionalVector.java b/pytorch/src/gen/java/org/bytedeco/pytorch/OperatorOptionalVector.java index 0fe6bbb7ec7..2f4f89bbea6 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/OperatorOptionalVector.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/OperatorOptionalVector.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; @@ -33,6 +35,8 @@ public class OperatorOptionalVector extends Pointer { public void clear() { resize(0); } public native void resize(@Cast("size_t") long n); + public OperatorOptional front() { return get(0); } + public OperatorOptional back() { return get(size() - 1); } @Index(function = "at") public native @ByRef OperatorOptional get(@Cast("size_t") long i); public native OperatorOptionalVector put(@Cast("size_t") long i, OperatorOptional value); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/OperatorSet.java b/pytorch/src/gen/java/org/bytedeco/pytorch/OperatorSet.java index cac884e08d9..20cecaaaca3 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/OperatorSet.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/OperatorSet.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/OperatorVector.java b/pytorch/src/gen/java/org/bytedeco/pytorch/OperatorVector.java index 45ad958126d..78208f3b8e8 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/OperatorVector.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/OperatorVector.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; @@ -33,10 +35,12 @@ public class OperatorVector extends Pointer { public void clear() { resize(0); } public native void resize(@Cast("size_t") long n); - @Index(function = "at") public native @SharedPtr Operator get(@Cast("size_t") long i); + public Operator front() { return get(0); } + public Operator back() { return get(size() - 1); } + @Index(function = "at") public native @SharedPtr("torch::jit::Operator") Operator get(@Cast("size_t") long i); public native OperatorVector put(@Cast("size_t") long i, Operator value); - public native @ByVal Iterator insert(@ByVal Iterator pos, @SharedPtr Operator value); + public native @ByVal Iterator insert(@ByVal Iterator pos, @SharedPtr("torch::jit::Operator") Operator value); public native @ByVal Iterator erase(@ByVal Iterator pos); public native @ByVal Iterator begin(); public native @ByVal Iterator end(); @@ -46,7 +50,7 @@ public Iterator() { } public native @Name("operator ++") @ByRef Iterator increment(); public native @Name("operator ==") boolean equals(@ByRef Iterator it); - public native @Name("operator *") @SharedPtr @Const Operator get(); + public native @Name("operator *") @SharedPtr("torch::jit::Operator") @Const Operator get(); } public Operator[] get() { diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/Optimizer.java b/pytorch/src/gen/java/org/bytedeco/pytorch/Optimizer.java index aba26977bae..2b5d8611908 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/Optimizer.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/Optimizer.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; @@ -31,11 +33,11 @@ public class Optimizer extends Pointer { /** Adds the given param_group to the optimizer's param_group list. */ public native void add_param_group(@Const @ByRef OptimizerParamGroup param_group); /** A loss function closure, which is expected to return the loss value. */ - public native @ByVal Tensor step(@ByVal(nullValue = "torch::optim::Optimizer::LossClosure(nullptr)") @Cast("torch::optim::Optimizer::LossClosure*") Pointer closure); + public native @ByVal Tensor step(@ByVal(nullValue = "torch::optim::Optimizer::LossClosure(nullptr)") LossClosure closure); public native @ByVal Tensor step(); /** Adds the given vector of parameters to the optimizer's parameter list. */ - public native void add_parameters(@Cast({"", "std::vector"}) @StdMove TensorVector parameters); + public native void add_parameters(@Cast({"", "std::vector"}) @StdMove TensorVector parameters); /** Zeros out the gradients of all parameters. */ public native void zero_grad(@Cast("bool") boolean set_to_none/*=true*/); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/OptimizerCloneableAdagradOptions.java b/pytorch/src/gen/java/org/bytedeco/pytorch/OptimizerCloneableAdagradOptions.java index d72ef44900a..287ee162fae 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/OptimizerCloneableAdagradOptions.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/OptimizerCloneableAdagradOptions.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/OptimizerCloneableAdagradParamState.java b/pytorch/src/gen/java/org/bytedeco/pytorch/OptimizerCloneableAdagradParamState.java index 0d793a7cbb3..c4b5332205a 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/OptimizerCloneableAdagradParamState.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/OptimizerCloneableAdagradParamState.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/OptimizerCloneableAdamOptions.java b/pytorch/src/gen/java/org/bytedeco/pytorch/OptimizerCloneableAdamOptions.java index 95b3c6a2bc2..5d9828c5721 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/OptimizerCloneableAdamOptions.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/OptimizerCloneableAdamOptions.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/OptimizerCloneableAdamParamState.java b/pytorch/src/gen/java/org/bytedeco/pytorch/OptimizerCloneableAdamParamState.java index 5409ee25fc2..099290df64f 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/OptimizerCloneableAdamParamState.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/OptimizerCloneableAdamParamState.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/OptimizerCloneableAdamWOptions.java b/pytorch/src/gen/java/org/bytedeco/pytorch/OptimizerCloneableAdamWOptions.java index 9279c91a66f..73f5a646a40 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/OptimizerCloneableAdamWOptions.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/OptimizerCloneableAdamWOptions.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/OptimizerCloneableAdamWParamState.java b/pytorch/src/gen/java/org/bytedeco/pytorch/OptimizerCloneableAdamWParamState.java index 24ee4e28672..e2b4de59d0e 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/OptimizerCloneableAdamWParamState.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/OptimizerCloneableAdamWParamState.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/OptimizerCloneableLBFGSOptions.java b/pytorch/src/gen/java/org/bytedeco/pytorch/OptimizerCloneableLBFGSOptions.java index 99d50f5acce..dbbba472281 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/OptimizerCloneableLBFGSOptions.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/OptimizerCloneableLBFGSOptions.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/OptimizerCloneableLBFGSParamState.java b/pytorch/src/gen/java/org/bytedeco/pytorch/OptimizerCloneableLBFGSParamState.java index 2b1f1cd85fe..0a260259983 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/OptimizerCloneableLBFGSParamState.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/OptimizerCloneableLBFGSParamState.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/OptimizerCloneableRMSpropOptions.java b/pytorch/src/gen/java/org/bytedeco/pytorch/OptimizerCloneableRMSpropOptions.java index 0a8c03d4f54..f1d59ebdfd7 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/OptimizerCloneableRMSpropOptions.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/OptimizerCloneableRMSpropOptions.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/OptimizerCloneableRMSpropParamState.java b/pytorch/src/gen/java/org/bytedeco/pytorch/OptimizerCloneableRMSpropParamState.java index e56a240c200..baa223a011b 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/OptimizerCloneableRMSpropParamState.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/OptimizerCloneableRMSpropParamState.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/OptimizerCloneableSGDOptions.java b/pytorch/src/gen/java/org/bytedeco/pytorch/OptimizerCloneableSGDOptions.java index 9142efe72f2..e8be0309def 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/OptimizerCloneableSGDOptions.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/OptimizerCloneableSGDOptions.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/OptimizerCloneableSGDParamState.java b/pytorch/src/gen/java/org/bytedeco/pytorch/OptimizerCloneableSGDParamState.java index beee88080be..b0c7f9b5f3f 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/OptimizerCloneableSGDParamState.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/OptimizerCloneableSGDParamState.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/OptimizerOptions.java b/pytorch/src/gen/java/org/bytedeco/pytorch/OptimizerOptions.java index 9222d86e077..0ec08f22d91 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/OptimizerOptions.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/OptimizerOptions.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/OptimizerParamGroup.java b/pytorch/src/gen/java/org/bytedeco/pytorch/OptimizerParamGroup.java index 3bc1b91d00c..7c35cfbf859 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/OptimizerParamGroup.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/OptimizerParamGroup.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; @@ -28,13 +30,13 @@ public class OptimizerParamGroup extends Pointer { // be copy-constructible. public OptimizerParamGroup(@Const @ByRef OptimizerParamGroup param_group) { super((Pointer)null); allocate(param_group); } private native void allocate(@Const @ByRef OptimizerParamGroup param_group); - public OptimizerParamGroup(@Cast({"", "std::vector"}) @StdMove TensorVector params) { super((Pointer)null); allocate(params); } - private native void allocate(@Cast({"", "std::vector"}) @StdMove TensorVector params); + public OptimizerParamGroup(@Cast({"", "std::vector"}) @StdMove TensorVector params) { super((Pointer)null); allocate(params); } + private native void allocate(@Cast({"", "std::vector"}) @StdMove TensorVector params); public OptimizerParamGroup( - @Cast({"", "std::vector"}) @StdMove TensorVector params, + @Cast({"", "std::vector"}) @StdMove TensorVector params, @UniquePtr OptimizerOptions options) { super((Pointer)null); allocate(params, options); } private native void allocate( - @Cast({"", "std::vector"}) @StdMove TensorVector params, + @Cast({"", "std::vector"}) @StdMove TensorVector params, @UniquePtr OptimizerOptions options); public native @Cast("bool") boolean has_options(); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/OptimizerParamGroupVector.java b/pytorch/src/gen/java/org/bytedeco/pytorch/OptimizerParamGroupVector.java index f1a440de423..3205934e271 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/OptimizerParamGroupVector.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/OptimizerParamGroupVector.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; @@ -27,6 +29,8 @@ public class OptimizerParamGroupVector extends Pointer { public boolean empty() { return size() == 0; } public native long size(); + public OptimizerParamGroup front() { return get(0); } + public OptimizerParamGroup back() { return get(size() - 1); } @Index(function = "at") public native @ByRef OptimizerParamGroup get(@Cast("size_t") long i); public native @ByVal Iterator begin(); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/OptimizerParamState.java b/pytorch/src/gen/java/org/bytedeco/pytorch/OptimizerParamState.java index f496283274f..fbc4240869d 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/OptimizerParamState.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/OptimizerParamState.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/OptionalDeviceGuard.java b/pytorch/src/gen/java/org/bytedeco/pytorch/OptionalDeviceGuard.java index d69eb6486b9..1f3e75245d7 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/OptionalDeviceGuard.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/OptionalDeviceGuard.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; @@ -90,10 +92,10 @@ public class OptionalDeviceGuard extends Pointer { /** Constructor for testing only. */ public OptionalDeviceGuard( @ByVal Device device, - @Cast("const c10::impl::DeviceGuardImplInterface*") Pointer impl) { super((Pointer)null); allocate(device, impl); } + @Const DeviceGuardImplInterface impl) { super((Pointer)null); allocate(device, impl); } private native void allocate( @ByVal Device device, - @Cast("const c10::impl::DeviceGuardImplInterface*") Pointer impl); + @Const DeviceGuardImplInterface impl); /** Copy is disallowed */ @@ -113,7 +115,7 @@ private native void allocate( /** For testing only */ public native void reset_device( @ByVal Device device, - @Cast("const c10::impl::DeviceGuardImplInterface*") Pointer impl); + @Const DeviceGuardImplInterface impl); /** Returns the device that was set at the time the guard was constructed. */ public native @ByVal DeviceOptional original_device(); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/OptionalSingleElementType.java b/pytorch/src/gen/java/org/bytedeco/pytorch/OptionalSingleElementType.java index 4ebbca41839..609e2b3b6b0 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/OptionalSingleElementType.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/OptionalSingleElementType.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/OptionalTensorRef.java b/pytorch/src/gen/java/org/bytedeco/pytorch/OptionalTensorRef.java index 461bb84509b..c8bde6fc129 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/OptionalTensorRef.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/OptionalTensorRef.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/OptionalType.java b/pytorch/src/gen/java/org/bytedeco/pytorch/OptionalType.java index cfade715e0f..e570860277c 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/OptionalType.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/OptionalType.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/OutOfMemoryError.java b/pytorch/src/gen/java/org/bytedeco/pytorch/OutOfMemoryError.java index 4d8c16aeda6..c0db13a6fd4 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/OutOfMemoryError.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/OutOfMemoryError.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/OutputArchive.java b/pytorch/src/gen/java/org/bytedeco/pytorch/OutputArchive.java index 05af475abb9..644a126e174 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/OutputArchive.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/OutputArchive.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; @@ -83,7 +85,7 @@ public native void write( /** Saves the {@code OutputArchive} into a serialized representation using the * given writer function. */ - public native void save_to(@Const @ByRef WriteFunction func); + public native void save_to(@Const @ByRef ArchiveWriter func); /** Forwards all arguments to {@code write()}. * Useful for generic code that can be re-used for both {@code OutputArchive} and diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/OwnedSourceRange.java b/pytorch/src/gen/java/org/bytedeco/pytorch/OwnedSourceRange.java deleted file mode 100644 index 2980d38c7f8..00000000000 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/OwnedSourceRange.java +++ /dev/null @@ -1,29 +0,0 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE - -package org.bytedeco.pytorch; - -import org.bytedeco.pytorch.Allocator; -import org.bytedeco.pytorch.Function; -import org.bytedeco.pytorch.Module; -import java.nio.*; -import org.bytedeco.javacpp.*; -import org.bytedeco.javacpp.annotation.*; - -import static org.bytedeco.javacpp.presets.javacpp.*; -import static org.bytedeco.openblas.global.openblas_nolapack.*; -import static org.bytedeco.openblas.global.openblas.*; - -import static org.bytedeco.pytorch.global.torch.*; - - -// OwnedSourceRange is just like a SourceRange except that it owns a `Source` -// instead of `Source`. Thus OwnedSourceRange owns a copy of source text. -@Namespace("torch::jit") @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) -public class OwnedSourceRange extends SourceRange { - static { Loader.load(); } - /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ - public OwnedSourceRange(Pointer p) { super(p); } - - public OwnedSourceRange(@Const @ByRef SourceRange source_range) { super((Pointer)null); allocate(source_range); } - private native void allocate(@Const @ByRef SourceRange source_range); -} diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/PODLocalDispatchKeySet.java b/pytorch/src/gen/java/org/bytedeco/pytorch/PODLocalDispatchKeySet.java new file mode 100644 index 00000000000..8b65d4994fd --- /dev/null +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/PODLocalDispatchKeySet.java @@ -0,0 +1,54 @@ +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE + +package org.bytedeco.pytorch; + +import org.bytedeco.pytorch.Allocator; +import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; +import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; +import java.nio.*; +import org.bytedeco.javacpp.*; +import org.bytedeco.javacpp.annotation.*; + +import static org.bytedeco.javacpp.presets.javacpp.*; +import static org.bytedeco.openblas.global.openblas_nolapack.*; +import static org.bytedeco.openblas.global.openblas.*; + +import static org.bytedeco.pytorch.global.torch.*; + + +// POD version of LocalDispatchKeySet. Declared here just so that +// we can put it in the guards. +// This struct encapsulates special handling for TLS initialization +// in set_included()/included() API so that they reflect the truth. +// If you want to create PODLocalDispatchKeySet with non-zero state, +// use set_included() instead of default constructor. +@Namespace("c10::impl") @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) +public class PODLocalDispatchKeySet extends Pointer { + static { Loader.load(); } + /** Default native constructor. */ + public PODLocalDispatchKeySet() { super((Pointer)null); allocate(); } + /** Native array allocator. Access with {@link Pointer#position(long)}. */ + public PODLocalDispatchKeySet(long size) { super((Pointer)null); allocateArray(size); } + /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ + public PODLocalDispatchKeySet(Pointer p) { super(p); } + private native void allocate(); + private native void allocateArray(long size); + @Override public PODLocalDispatchKeySet position(long position) { + return (PODLocalDispatchKeySet)super.position(position); + } + @Override public PODLocalDispatchKeySet getPointer(long i) { + return new PODLocalDispatchKeySet((Pointer)this).offsetAddress(i); + } + + public native @Cast("uint64_t") long included_(); public native PODLocalDispatchKeySet included_(long setter); + public native @Cast("uint64_t") long excluded_(); public native PODLocalDispatchKeySet excluded_(long setter); + + // See Note [TLS Initialization] + public native @ByVal DispatchKeySet included(); + public native @ByVal DispatchKeySet excluded(); + + public native void set_included(@ByVal DispatchKeySet x); + public native void set_excluded(@ByVal DispatchKeySet x); +} diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/PReLU.java b/pytorch/src/gen/java/org/bytedeco/pytorch/PReLU.java deleted file mode 100644 index 338316be036..00000000000 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/PReLU.java +++ /dev/null @@ -1,34 +0,0 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE - -package org.bytedeco.pytorch; - -import org.bytedeco.pytorch.Allocator; -import org.bytedeco.pytorch.Function; -import org.bytedeco.pytorch.Module; -import java.nio.*; -import org.bytedeco.javacpp.*; -import org.bytedeco.javacpp.annotation.*; - -import static org.bytedeco.javacpp.presets.javacpp.*; -import static org.bytedeco.openblas.global.openblas_nolapack.*; -import static org.bytedeco.openblas.global.openblas.*; - -import static org.bytedeco.pytorch.global.torch.*; - - -/** A {@code ModuleHolder} subclass for {@code PReLUImpl}. - * See the documentation for {@code PReLUImpl} class to learn what methods it - * provides, and examples of how to use {@code PReLU} with {@code torch::nn::PReLUOptions}. - * See the documentation for {@code ModuleHolder} to learn about PyTorch's - * module storage semantics. */ -@Namespace("torch::nn") @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) -public class PReLU extends PReLUImplModuleHolder { - static { Loader.load(); } - - public PReLU(@ByVal @Cast("std::nullptr_t*") PointerPointer arg0) { super((Pointer)null); allocate(arg0); } - private native void allocate(@ByVal @Cast("std::nullptr_t*") PointerPointer arg0); public PReLU(@SharedPtr @Cast({"", "std::shared_ptr"}) PReLUImpl module) { super((Pointer)null); allocate(module); } - private native void allocate(@SharedPtr @Cast({"", "std::shared_ptr"}) PReLUImpl module); - /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ - public PReLU(Pointer p) { super(p); } - - } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/PReLUImpl.java b/pytorch/src/gen/java/org/bytedeco/pytorch/PReLUImpl.java index 8787b424b59..9ba9d92167f 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/PReLUImpl.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/PReLUImpl.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; @@ -45,9 +47,9 @@ public class PReLUImpl extends PReLUImplCloneable { } public PReLUImpl(@Const @ByRef(nullValue = "torch::nn::PReLUOptions{}") PReLUOptions options_) { super((Pointer)null); allocate(options_); } - @NoDeallocator private native void allocate(@Const @ByRef(nullValue = "torch::nn::PReLUOptions{}") PReLUOptions options_); + @SharedPtr private native void allocate(@Const @ByRef(nullValue = "torch::nn::PReLUOptions{}") PReLUOptions options_); public PReLUImpl() { super((Pointer)null); allocate(); } - @NoDeallocator private native void allocate(); + @SharedPtr private native void allocate(); public native @ByVal Tensor forward(@Const @ByRef Tensor input); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/PReLUImplCloneable.java b/pytorch/src/gen/java/org/bytedeco/pytorch/PReLUImplCloneable.java index 8315c58eace..d0cc096b017 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/PReLUImplCloneable.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/PReLUImplCloneable.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; @@ -20,18 +22,18 @@ public class PReLUImplCloneable extends Module { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public PReLUImplCloneable(Pointer p) { super(p); } + @Override public Module asModule() { return asModule(this); } + @Namespace public static native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast>") Module asModule(@SharedPtr PReLUImplCloneable pointer); /** {@code reset()} must perform initialization of all members with reference * semantics, most importantly parameters, buffers and submodules. */ public native void reset(); - @Override public Module asModule() { return asModule(this); } - @Namespace public static native @Name("static_cast") Module asModule(PReLUImplCloneable module); /** Performs a recursive "deep copy" of the {@code Module}, such that all parameters * and submodules in the cloned module are different from those in the * original module. */ - public native @SharedPtr Module clone( - @Const @ByRef(nullValue = "c10::optional(c10::nullopt)") DeviceOptional device); - public native @SharedPtr Module clone(); + public native @SharedPtr("torch::nn::Module") @ByVal Module clone( + @Const @ByRef(nullValue = "c10::optional(c10::nullopt)") DeviceOptional device); + public native @SharedPtr("torch::nn::Module") @ByVal Module clone(); } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/PReLUImplModuleHolder.java b/pytorch/src/gen/java/org/bytedeco/pytorch/PReLUImplModuleHolder.java deleted file mode 100644 index 93b4f4bec30..00000000000 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/PReLUImplModuleHolder.java +++ /dev/null @@ -1,79 +0,0 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE - -package org.bytedeco.pytorch; - -import org.bytedeco.pytorch.Allocator; -import org.bytedeco.pytorch.Function; -import org.bytedeco.pytorch.Module; -import java.nio.*; -import org.bytedeco.javacpp.*; -import org.bytedeco.javacpp.annotation.*; - -import static org.bytedeco.javacpp.presets.javacpp.*; -import static org.bytedeco.openblas.global.openblas_nolapack.*; -import static org.bytedeco.openblas.global.openblas.*; - -import static org.bytedeco.pytorch.global.torch.*; - -@Name("torch::nn::ModuleHolder") @NoOffset @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) -public class PReLUImplModuleHolder extends Pointer { - static { Loader.load(); } - /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ - public PReLUImplModuleHolder(Pointer p) { super(p); } - - - /// - - /** Default constructs the contained module if if has a default constructor, - * else produces a static error. - * - * NOTE: This uses the behavior of template - * classes in C++ that constructors (or any methods) are only compiled when - * actually used. */ - - - /** Constructs the {@code ModuleHolder} with an empty contained value. Access to - * the underlying module is not permitted and will throw an exception, until - * a value is assigned. */ - /* implicit */ public PReLUImplModuleHolder(@ByVal @Cast("std::nullptr_t*") PointerPointer arg0) { super((Pointer)null); allocate(arg0); } -private native void allocate(@ByVal @Cast("std::nullptr_t*") PointerPointer arg0); - - /** Constructs the {@code ModuleHolder} with a contained module, forwarding all - * arguments to its constructor. */ - - /** Constructs the {@code ModuleHolder} from a pointer to the contained type. - * Example: {@code Linear(std::make_shared(...))}. */ - /* implicit */ public PReLUImplModuleHolder(@SharedPtr @Cast({"", "std::shared_ptr"}) PReLUImpl module) { super((Pointer)null); allocate(module); } -private native void allocate(@SharedPtr @Cast({"", "std::shared_ptr"}) PReLUImpl module); - - /** Returns true if the {@code ModuleHolder} contains a module, or false if it is - * {@code nullptr}. */ - public native @Cast("bool") @Name("operator bool") @NoException(true) boolean asBoolean(); - - /** Forwards to the contained module. */ - public native @Name("operator ->") PReLUImpl access(); - - /** Forwards to the contained module. */ - - /** Returns a reference to the contained module. */ - public native @ByRef @Name("operator *") PReLUImpl multiply(); - - /** Returns a const reference to the contained module. */ - - /** Returns a shared pointer to the underlying module. */ - public native @SharedPtr @Cast({"", "std::shared_ptr"}) PReLUImpl ptr(); - - /** Returns a pointer to the underlying module. */ - public native PReLUImpl get(); - - /** Returns a const pointer to the underlying module. */ - - /** Calls the {@code forward()} method of the contained module. */ - - /** Forwards to the subscript operator of the contained module. - * NOTE: std::forward is qualified to prevent VS2017 emitting - * error C2872: 'std': ambiguous symbol */ - - /** Returns true if the {@code ModuleHolder} does not contain a module. */ - public native @Cast("bool") @NoException(true) boolean is_empty(); -} diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/PReLUOptions.java b/pytorch/src/gen/java/org/bytedeco/pytorch/PReLUOptions.java index 91d495d45c8..a93c20d96e1 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/PReLUOptions.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/PReLUOptions.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/PackedSequence.java b/pytorch/src/gen/java/org/bytedeco/pytorch/PackedSequence.java index 64bc2a3b779..91ab3bbdd58 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/PackedSequence.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/PackedSequence.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; @@ -58,13 +60,13 @@ public class PackedSequence extends Pointer { public PackedSequence( @ByVal Tensor data, @ByVal Tensor batch_sizes, - @ByVal(nullValue = "at::Tensor{}") Tensor sorted_indices, - @ByVal(nullValue = "at::Tensor{}") Tensor unsorted_indices) { super((Pointer)null); allocate(data, batch_sizes, sorted_indices, unsorted_indices); } + @ByVal(nullValue = "torch::Tensor{}") Tensor sorted_indices, + @ByVal(nullValue = "torch::Tensor{}") Tensor unsorted_indices) { super((Pointer)null); allocate(data, batch_sizes, sorted_indices, unsorted_indices); } private native void allocate( @ByVal Tensor data, @ByVal Tensor batch_sizes, - @ByVal(nullValue = "at::Tensor{}") Tensor sorted_indices, - @ByVal(nullValue = "at::Tensor{}") Tensor unsorted_indices); + @ByVal(nullValue = "torch::Tensor{}") Tensor sorted_indices, + @ByVal(nullValue = "torch::Tensor{}") Tensor unsorted_indices); public PackedSequence( @ByVal Tensor data, @ByVal Tensor batch_sizes) { super((Pointer)null); allocate(data, batch_sizes); } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/PadFuncOptions.java b/pytorch/src/gen/java/org/bytedeco/pytorch/PadFuncOptions.java index 93e92b16e9c..ce2f4774684 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/PadFuncOptions.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/PadFuncOptions.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; @@ -34,6 +36,6 @@ public class PadFuncOptions extends Pointer { public PadFuncOptions(@ByVal @Cast("std::vector*") LongVector pad) { super((Pointer)null); allocate(pad); } private native void allocate(@ByVal @Cast("std::vector*") LongVector pad); public native @Cast("std::vector*") @ByRef @NoException(true) LongVector pad(); - public native @ByRef @NoException(true) pad_mode_t mode(); + public native @ByRef @NoException(true) PaddingMode mode(); public native @ByRef @NoException(true) DoublePointer value(); } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/pad_mode_t.java b/pytorch/src/gen/java/org/bytedeco/pytorch/PaddingMode.java similarity index 54% rename from pytorch/src/gen/java/org/bytedeco/pytorch/pad_mode_t.java rename to pytorch/src/gen/java/org/bytedeco/pytorch/PaddingMode.java index 601f3e7f213..8c50daee726 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/pad_mode_t.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/PaddingMode.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; @@ -16,29 +18,29 @@ import static org.bytedeco.pytorch.global.torch.*; @NoOffset @Name("c10::variant") @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) -public class pad_mode_t extends Pointer { +public class PaddingMode extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ - public pad_mode_t(Pointer p) { super(p); } - public pad_mode_t(kConstant value) { this(); put(value); } - public pad_mode_t(kReflect value) { this(); put(value); } - public pad_mode_t(kReplicate value) { this(); put(value); } - public pad_mode_t(kCircular value) { this(); put(value); } - public pad_mode_t() { allocate(); } + public PaddingMode(Pointer p) { super(p); } + public PaddingMode(kConstant value) { this(); put(value); } + public PaddingMode(kReflect value) { this(); put(value); } + public PaddingMode(kReplicate value) { this(); put(value); } + public PaddingMode(kCircular value) { this(); put(value); } + public PaddingMode() { allocate(); } private native void allocate(); - public native @Name("operator =") @ByRef pad_mode_t put(@ByRef pad_mode_t x); + public native @Name("operator =") @ByRef PaddingMode put(@ByRef PaddingMode x); public @ByRef kConstant get0() { return get0(this); } - @Namespace @Name("c10::get<0>") public static native @ByRef kConstant get0(@ByRef pad_mode_t container); - @ValueSetter public native pad_mode_t put(@ByRef kConstant value); + @Namespace @Name("c10::get<0>") public static native @ByRef kConstant get0(@ByRef PaddingMode container); + @ValueSetter public native PaddingMode put(@ByRef kConstant value); public @ByRef kReflect get1() { return get1(this); } - @Namespace @Name("c10::get<1>") public static native @ByRef kReflect get1(@ByRef pad_mode_t container); - @ValueSetter public native pad_mode_t put(@ByRef kReflect value); + @Namespace @Name("c10::get<1>") public static native @ByRef kReflect get1(@ByRef PaddingMode container); + @ValueSetter public native PaddingMode put(@ByRef kReflect value); public @ByRef kReplicate get2() { return get2(this); } - @Namespace @Name("c10::get<2>") public static native @ByRef kReplicate get2(@ByRef pad_mode_t container); - @ValueSetter public native pad_mode_t put(@ByRef kReplicate value); + @Namespace @Name("c10::get<2>") public static native @ByRef kReplicate get2(@ByRef PaddingMode container); + @ValueSetter public native PaddingMode put(@ByRef kReplicate value); public @ByRef kCircular get3() { return get3(this); } - @Namespace @Name("c10::get<3>") public static native @ByRef kCircular get3(@ByRef pad_mode_t container); - @ValueSetter public native pad_mode_t put(@ByRef kCircular value); + @Namespace @Name("c10::get<3>") public static native @ByRef kCircular get3(@ByRef PaddingMode container); + @ValueSetter public native PaddingMode put(@ByRef kCircular value); } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/PairwiseDistance.java b/pytorch/src/gen/java/org/bytedeco/pytorch/PairwiseDistance.java deleted file mode 100644 index 0bf1dd474dd..00000000000 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/PairwiseDistance.java +++ /dev/null @@ -1,34 +0,0 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE - -package org.bytedeco.pytorch; - -import org.bytedeco.pytorch.Allocator; -import org.bytedeco.pytorch.Function; -import org.bytedeco.pytorch.Module; -import java.nio.*; -import org.bytedeco.javacpp.*; -import org.bytedeco.javacpp.annotation.*; - -import static org.bytedeco.javacpp.presets.javacpp.*; -import static org.bytedeco.openblas.global.openblas_nolapack.*; -import static org.bytedeco.openblas.global.openblas.*; - -import static org.bytedeco.pytorch.global.torch.*; - - -/** A {@code ModuleHolder} subclass for {@code PairwiseDistanceImpl}. - * See the documentation for {@code PairwiseDistanceImpl} class to learn what methods - * it provides, and examples of how to use {@code PairwiseDistance} with - * {@code torch::nn::PairwiseDistanceOptions}. See the documentation for - * {@code ModuleHolder} to learn about PyTorch's module storage semantics. */ -@Namespace("torch::nn") @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) -public class PairwiseDistance extends PairwiseDistanceImplModuleHolder { - static { Loader.load(); } - - public PairwiseDistance(@ByVal @Cast("std::nullptr_t*") PointerPointer arg0) { super((Pointer)null); allocate(arg0); } - private native void allocate(@ByVal @Cast("std::nullptr_t*") PointerPointer arg0); public PairwiseDistance(@SharedPtr @Cast({"", "std::shared_ptr"}) PairwiseDistanceImpl module) { super((Pointer)null); allocate(module); } - private native void allocate(@SharedPtr @Cast({"", "std::shared_ptr"}) PairwiseDistanceImpl module); - /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ - public PairwiseDistance(Pointer p) { super(p); } - - } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/PairwiseDistanceImpl.java b/pytorch/src/gen/java/org/bytedeco/pytorch/PairwiseDistanceImpl.java index eee6117108e..251c3d821e7 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/PairwiseDistanceImpl.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/PairwiseDistanceImpl.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; @@ -47,9 +49,9 @@ public class PairwiseDistanceImpl extends PairwiseDistanceImplCloneable { } public PairwiseDistanceImpl(@Const @ByRef(nullValue = "torch::nn::PairwiseDistanceOptions{}") PairwiseDistanceOptions options_) { super((Pointer)null); allocate(options_); } - @NoDeallocator private native void allocate(@Const @ByRef(nullValue = "torch::nn::PairwiseDistanceOptions{}") PairwiseDistanceOptions options_); + @SharedPtr private native void allocate(@Const @ByRef(nullValue = "torch::nn::PairwiseDistanceOptions{}") PairwiseDistanceOptions options_); public PairwiseDistanceImpl() { super((Pointer)null); allocate(); } - @NoDeallocator private native void allocate(); + @SharedPtr private native void allocate(); public native void reset(); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/PairwiseDistanceImplCloneable.java b/pytorch/src/gen/java/org/bytedeco/pytorch/PairwiseDistanceImplCloneable.java index d8ea5ec1f58..3b6d939e5de 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/PairwiseDistanceImplCloneable.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/PairwiseDistanceImplCloneable.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; @@ -20,18 +22,18 @@ public class PairwiseDistanceImplCloneable extends Module { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public PairwiseDistanceImplCloneable(Pointer p) { super(p); } + @Override public Module asModule() { return asModule(this); } + @Namespace public static native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast>") Module asModule(@SharedPtr PairwiseDistanceImplCloneable pointer); /** {@code reset()} must perform initialization of all members with reference * semantics, most importantly parameters, buffers and submodules. */ public native void reset(); - @Override public Module asModule() { return asModule(this); } - @Namespace public static native @Name("static_cast") Module asModule(PairwiseDistanceImplCloneable module); /** Performs a recursive "deep copy" of the {@code Module}, such that all parameters * and submodules in the cloned module are different from those in the * original module. */ - public native @SharedPtr Module clone( - @Const @ByRef(nullValue = "c10::optional(c10::nullopt)") DeviceOptional device); - public native @SharedPtr Module clone(); + public native @SharedPtr("torch::nn::Module") @ByVal Module clone( + @Const @ByRef(nullValue = "c10::optional(c10::nullopt)") DeviceOptional device); + public native @SharedPtr("torch::nn::Module") @ByVal Module clone(); } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/PairwiseDistanceImplModuleHolder.java b/pytorch/src/gen/java/org/bytedeco/pytorch/PairwiseDistanceImplModuleHolder.java deleted file mode 100644 index 171272d355a..00000000000 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/PairwiseDistanceImplModuleHolder.java +++ /dev/null @@ -1,79 +0,0 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE - -package org.bytedeco.pytorch; - -import org.bytedeco.pytorch.Allocator; -import org.bytedeco.pytorch.Function; -import org.bytedeco.pytorch.Module; -import java.nio.*; -import org.bytedeco.javacpp.*; -import org.bytedeco.javacpp.annotation.*; - -import static org.bytedeco.javacpp.presets.javacpp.*; -import static org.bytedeco.openblas.global.openblas_nolapack.*; -import static org.bytedeco.openblas.global.openblas.*; - -import static org.bytedeco.pytorch.global.torch.*; - -@Name("torch::nn::ModuleHolder") @NoOffset @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) -public class PairwiseDistanceImplModuleHolder extends Pointer { - static { Loader.load(); } - /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ - public PairwiseDistanceImplModuleHolder(Pointer p) { super(p); } - - - /// - - /** Default constructs the contained module if if has a default constructor, - * else produces a static error. - * - * NOTE: This uses the behavior of template - * classes in C++ that constructors (or any methods) are only compiled when - * actually used. */ - - - /** Constructs the {@code ModuleHolder} with an empty contained value. Access to - * the underlying module is not permitted and will throw an exception, until - * a value is assigned. */ - /* implicit */ public PairwiseDistanceImplModuleHolder(@ByVal @Cast("std::nullptr_t*") PointerPointer arg0) { super((Pointer)null); allocate(arg0); } -private native void allocate(@ByVal @Cast("std::nullptr_t*") PointerPointer arg0); - - /** Constructs the {@code ModuleHolder} with a contained module, forwarding all - * arguments to its constructor. */ - - /** Constructs the {@code ModuleHolder} from a pointer to the contained type. - * Example: {@code Linear(std::make_shared(...))}. */ - /* implicit */ public PairwiseDistanceImplModuleHolder(@SharedPtr @Cast({"", "std::shared_ptr"}) PairwiseDistanceImpl module) { super((Pointer)null); allocate(module); } -private native void allocate(@SharedPtr @Cast({"", "std::shared_ptr"}) PairwiseDistanceImpl module); - - /** Returns true if the {@code ModuleHolder} contains a module, or false if it is - * {@code nullptr}. */ - public native @Cast("bool") @Name("operator bool") @NoException(true) boolean asBoolean(); - - /** Forwards to the contained module. */ - public native @Name("operator ->") PairwiseDistanceImpl access(); - - /** Forwards to the contained module. */ - - /** Returns a reference to the contained module. */ - public native @ByRef @Name("operator *") PairwiseDistanceImpl multiply(); - - /** Returns a const reference to the contained module. */ - - /** Returns a shared pointer to the underlying module. */ - public native @SharedPtr @Cast({"", "std::shared_ptr"}) PairwiseDistanceImpl ptr(); - - /** Returns a pointer to the underlying module. */ - public native PairwiseDistanceImpl get(); - - /** Returns a const pointer to the underlying module. */ - - /** Calls the {@code forward()} method of the contained module. */ - - /** Forwards to the subscript operator of the contained module. - * NOTE: std::forward is qualified to prevent VS2017 emitting - * error C2872: 'std': ambiguous symbol */ - - /** Returns true if the {@code ModuleHolder} does not contain a module. */ - public native @Cast("bool") @NoException(true) boolean is_empty(); -} diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/PairwiseDistanceOptions.java b/pytorch/src/gen/java/org/bytedeco/pytorch/PairwiseDistanceOptions.java index e08891c3ed5..c6552c3ab71 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/PairwiseDistanceOptions.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/PairwiseDistanceOptions.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/Param.java b/pytorch/src/gen/java/org/bytedeco/pytorch/Param.java index 328683530d5..11e2a400757 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/Param.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/Param.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; @@ -19,9 +21,11 @@ @Namespace("torch::jit") @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) public class Param extends TreeView { static { Loader.load(); } + /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ + public Param(Pointer p) { super(p); } - public Param(@Cast("const torch::jit::TreeRef*") @ByRef Pointer tree) { super((Pointer)null); allocate(tree); } - private native void allocate(@Cast("const torch::jit::TreeRef*") @ByRef Pointer tree); + public Param(@Const @ByRef TreeRef tree) { super((Pointer)null); allocate(tree); } + private native void allocate(@Const @ByRef TreeRef tree); public static native @ByVal Param create( @Const @ByRef SourceRange range, @Const @ByRef Ident ident, diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/ParamList.java b/pytorch/src/gen/java/org/bytedeco/pytorch/ParamList.java new file mode 100644 index 00000000000..104b909de60 --- /dev/null +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/ParamList.java @@ -0,0 +1,38 @@ +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE + +package org.bytedeco.pytorch; + +import org.bytedeco.pytorch.Allocator; +import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; +import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; +import java.nio.*; +import org.bytedeco.javacpp.*; +import org.bytedeco.javacpp.annotation.*; + +import static org.bytedeco.javacpp.presets.javacpp.*; +import static org.bytedeco.openblas.global.openblas_nolapack.*; +import static org.bytedeco.openblas.global.openblas.*; + +import static org.bytedeco.pytorch.global.torch.*; + + +@Name("torch::jit::List") @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) +public class ParamList extends TreeView { + static { Loader.load(); } + /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ + public ParamList(Pointer p) { super(p); } + + + public ParamList(@Const @ByRef TreeRef tree) { super((Pointer)null); allocate(tree); } + private native void allocate(@Const @ByRef TreeRef tree); + public native @ByVal @Cast("torch::jit::List::iterator*") ParamListIterator begin(); + public native @ByVal @Cast("torch::jit::List::iterator*") ParamListIterator end(); + public native @Cast("bool") boolean empty(); + public native @ByVal @Name("operator []") Param get(@Cast("size_t") long i); + + public static native @ByVal ParamList create(@Const @ByRef SourceRange range, @StdVector Param subtrees); + public static native @ByVal ParamList unsafeCreate(@Const @ByRef SourceRange range, @Cast("torch::jit::TreeList*") @ByRef(true) SymDimVector subtrees); + public native @Cast("size_t") long size(); +} diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/ParamListIterator.java b/pytorch/src/gen/java/org/bytedeco/pytorch/ParamListIterator.java new file mode 100644 index 00000000000..4592874c7aa --- /dev/null +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/ParamListIterator.java @@ -0,0 +1,35 @@ +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE + +package org.bytedeco.pytorch; + +import org.bytedeco.pytorch.Allocator; +import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; +import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; +import java.nio.*; +import org.bytedeco.javacpp.*; +import org.bytedeco.javacpp.annotation.*; + +import static org.bytedeco.javacpp.presets.javacpp.*; +import static org.bytedeco.openblas.global.openblas_nolapack.*; +import static org.bytedeco.openblas.global.openblas.*; + +import static org.bytedeco.pytorch.global.torch.*; + + +@Name("torch::jit::ListIterator") @NoOffset @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) +public class ParamListIterator extends Pointer { + static { Loader.load(); } + /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ + public ParamListIterator(Pointer p) { super(p); } + + public ParamListIterator(@ByVal @Cast("torch::jit::TreeList::const_iterator*") TreeRef it) { super((Pointer)null); allocate(it); } + private native void allocate(@ByVal @Cast("torch::jit::TreeList::const_iterator*") TreeRef it); + public native @Cast("bool") @Name("operator !=") boolean notEquals(@Const @ByRef ParamListIterator rhs); + public native @Cast("bool") @Name("operator ==") boolean equals(@Const @ByRef ParamListIterator rhs); + public native @ByVal @Name("operator *") Param multiply(); + public native @ByRef @Name("operator +=") ParamListIterator addPut(@Cast("std::ptrdiff_t") long n); + public native @ByRef @Name("operator ++") ParamListIterator increment(); + public native @ByRef @Name("operator --") ParamListIterator decrement(); +} diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/ParameterDict.java b/pytorch/src/gen/java/org/bytedeco/pytorch/ParameterDict.java deleted file mode 100644 index 21336f3af95..00000000000 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/ParameterDict.java +++ /dev/null @@ -1,40 +0,0 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE - -package org.bytedeco.pytorch; - -import org.bytedeco.pytorch.Allocator; -import org.bytedeco.pytorch.Function; -import org.bytedeco.pytorch.Module; -import java.nio.*; -import org.bytedeco.javacpp.*; -import org.bytedeco.javacpp.annotation.*; - -import static org.bytedeco.javacpp.presets.javacpp.*; -import static org.bytedeco.openblas.global.openblas_nolapack.*; -import static org.bytedeco.openblas.global.openblas.*; - -import static org.bytedeco.pytorch.global.torch.*; - - -@Namespace("torch::nn") @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) -public class ParameterDict extends ParameterDictImplModuleHolder { - static { Loader.load(); } - - - public ParameterDict() { super((Pointer)null); allocate(); } - private native void allocate(); public ParameterDict(@ByVal @Cast("std::nullptr_t*") PointerPointer arg0) { super((Pointer)null); allocate(arg0); } - private native void allocate(@ByVal @Cast("std::nullptr_t*") PointerPointer arg0); public ParameterDict(@SharedPtr @Cast({"", "std::shared_ptr"}) ParameterDictImpl module) { super((Pointer)null); allocate(module); } - private native void allocate(@SharedPtr @Cast({"", "std::shared_ptr"}) ParameterDictImpl module); - /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ - public ParameterDict(Pointer p) { super(p); } - /** Native array allocator. Access with {@link Pointer#position(long)}. */ - public ParameterDict(long size) { super((Pointer)null); allocateArray(size); } - private native void allocateArray(long size); - @Override public ParameterDict position(long position) { - return (ParameterDict)super.position(position); - } - @Override public ParameterDict getPointer(long i) { - return new ParameterDict((Pointer)this).offsetAddress(i); - } - - } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/ParameterDictImpl.java b/pytorch/src/gen/java/org/bytedeco/pytorch/ParameterDictImpl.java index 1603a91170e..e44a0bf2a89 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/ParameterDictImpl.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/ParameterDictImpl.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; @@ -33,11 +35,11 @@ public class ParameterDictImpl extends ParameterDictImplCloneable { public ParameterDictImpl() { super((Pointer)null); allocate(); } - @NoDeallocator private native void allocate(); + @SharedPtr private native void allocate(); public ParameterDictImpl( @Const @ByRef StringTensorDict params) { super((Pointer)null); allocate(params); } - @NoDeallocator private native void allocate( + @SharedPtr private native void allocate( @Const @ByRef StringTensorDict params); /** {@code reset()} is empty for {@code ParameterDict}, since it does not have @@ -62,15 +64,15 @@ public ParameterDictImpl( public native @ByVal StringVector keys(); /** Return the Values in the dict */ - public native @StdVector Tensor values(); + public native @Cast({"", "std::vector"}) @StdMove TensorVector values(); /** Return an iterator to the start of ParameterDict */ - public native @ByVal @Cast("torch::nn::ParameterDictImpl::Iterator*") StringTensorDictItem begin(); + public native @ByVal @Cast("torch::nn::ParameterDictImpl::Iterator*") StringTensorDictItemVector.Iterator begin(); /** Return a const iterator to the start of ParameterDict */ /** Return an iterator to the end of ParameterDict */ - public native @ByVal @Cast("torch::nn::ParameterDictImpl::Iterator*") StringTensorDictItem end(); + public native @ByVal @Cast("torch::nn::ParameterDictImpl::Iterator*") StringTensorDictItemVector.Iterator end(); /** Return a const iterator to the end of ParameterDict */ diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/ParameterDictImplCloneable.java b/pytorch/src/gen/java/org/bytedeco/pytorch/ParameterDictImplCloneable.java index 79ef77efc86..3527f76c67c 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/ParameterDictImplCloneable.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/ParameterDictImplCloneable.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; @@ -20,18 +22,18 @@ public class ParameterDictImplCloneable extends Module { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public ParameterDictImplCloneable(Pointer p) { super(p); } + @Override public Module asModule() { return asModule(this); } + @Namespace public static native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast>") Module asModule(@SharedPtr ParameterDictImplCloneable pointer); /** {@code reset()} must perform initialization of all members with reference * semantics, most importantly parameters, buffers and submodules. */ public native void reset(); - @Override public Module asModule() { return asModule(this); } - @Namespace public static native @Name("static_cast") Module asModule(ParameterDictImplCloneable module); /** Performs a recursive "deep copy" of the {@code Module}, such that all parameters * and submodules in the cloned module are different from those in the * original module. */ - public native @SharedPtr Module clone( - @Const @ByRef(nullValue = "c10::optional(c10::nullopt)") DeviceOptional device); - public native @SharedPtr Module clone(); + public native @SharedPtr("torch::nn::Module") @ByVal Module clone( + @Const @ByRef(nullValue = "c10::optional(c10::nullopt)") DeviceOptional device); + public native @SharedPtr("torch::nn::Module") @ByVal Module clone(); } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/ParameterDictImplModuleHolder.java b/pytorch/src/gen/java/org/bytedeco/pytorch/ParameterDictImplModuleHolder.java deleted file mode 100644 index 85b768fa43c..00000000000 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/ParameterDictImplModuleHolder.java +++ /dev/null @@ -1,89 +0,0 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE - -package org.bytedeco.pytorch; - -import org.bytedeco.pytorch.Allocator; -import org.bytedeco.pytorch.Function; -import org.bytedeco.pytorch.Module; -import java.nio.*; -import org.bytedeco.javacpp.*; -import org.bytedeco.javacpp.annotation.*; - -import static org.bytedeco.javacpp.presets.javacpp.*; -import static org.bytedeco.openblas.global.openblas_nolapack.*; -import static org.bytedeco.openblas.global.openblas.*; - -import static org.bytedeco.pytorch.global.torch.*; - -@Name("torch::nn::ModuleHolder") @NoOffset @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) -public class ParameterDictImplModuleHolder extends Pointer { - static { Loader.load(); } - /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ - public ParameterDictImplModuleHolder(Pointer p) { super(p); } - /** Native array allocator. Access with {@link Pointer#position(long)}. */ - public ParameterDictImplModuleHolder(long size) { super((Pointer)null); allocateArray(size); } - private native void allocateArray(long size); - @Override public ParameterDictImplModuleHolder position(long position) { - return (ParameterDictImplModuleHolder)super.position(position); - } - @Override public ParameterDictImplModuleHolder getPointer(long i) { - return new ParameterDictImplModuleHolder((Pointer)this).offsetAddress(i); - } - - - /// - - /** Default constructs the contained module if if has a default constructor, - * else produces a static error. - * - * NOTE: This uses the behavior of template - * classes in C++ that constructors (or any methods) are only compiled when - * actually used. */ - public ParameterDictImplModuleHolder() { super((Pointer)null); allocate(); } - private native void allocate(); - - /** Constructs the {@code ModuleHolder} with an empty contained value. Access to - * the underlying module is not permitted and will throw an exception, until - * a value is assigned. */ - /* implicit */ public ParameterDictImplModuleHolder(@ByVal @Cast("std::nullptr_t*") PointerPointer arg0) { super((Pointer)null); allocate(arg0); } -private native void allocate(@ByVal @Cast("std::nullptr_t*") PointerPointer arg0); - - /** Constructs the {@code ModuleHolder} with a contained module, forwarding all - * arguments to its constructor. */ - - /** Constructs the {@code ModuleHolder} from a pointer to the contained type. - * Example: {@code Linear(std::make_shared(...))}. */ - /* implicit */ public ParameterDictImplModuleHolder(@SharedPtr @Cast({"", "std::shared_ptr"}) ParameterDictImpl module) { super((Pointer)null); allocate(module); } -private native void allocate(@SharedPtr @Cast({"", "std::shared_ptr"}) ParameterDictImpl module); - - /** Returns true if the {@code ModuleHolder} contains a module, or false if it is - * {@code nullptr}. */ - public native @Cast("bool") @Name("operator bool") @NoException(true) boolean asBoolean(); - - /** Forwards to the contained module. */ - public native @Name("operator ->") ParameterDictImpl access(); - - /** Forwards to the contained module. */ - - /** Returns a reference to the contained module. */ - public native @ByRef @Name("operator *") ParameterDictImpl multiply(); - - /** Returns a const reference to the contained module. */ - - /** Returns a shared pointer to the underlying module. */ - public native @SharedPtr @Cast({"", "std::shared_ptr"}) ParameterDictImpl ptr(); - - /** Returns a pointer to the underlying module. */ - public native ParameterDictImpl get(); - - /** Returns a const pointer to the underlying module. */ - - /** Calls the {@code forward()} method of the contained module. */ - - /** Forwards to the subscript operator of the contained module. - * NOTE: std::forward is qualified to prevent VS2017 emitting - * error C2872: 'std': ambiguous symbol */ - - /** Returns true if the {@code ModuleHolder} does not contain a module. */ - public native @Cast("bool") @NoException(true) boolean is_empty(); -} diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/ParameterList.java b/pytorch/src/gen/java/org/bytedeco/pytorch/ParameterList.java deleted file mode 100644 index 1ac4c697983..00000000000 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/ParameterList.java +++ /dev/null @@ -1,39 +0,0 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE - -package org.bytedeco.pytorch; - -import org.bytedeco.pytorch.Allocator; -import org.bytedeco.pytorch.Function; -import org.bytedeco.pytorch.Module; -import java.nio.*; -import org.bytedeco.javacpp.*; -import org.bytedeco.javacpp.annotation.*; - -import static org.bytedeco.javacpp.presets.javacpp.*; -import static org.bytedeco.openblas.global.openblas_nolapack.*; -import static org.bytedeco.openblas.global.openblas.*; - -import static org.bytedeco.pytorch.global.torch.*; - -@Namespace("torch::nn") @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) -public class ParameterList extends ParameterListImplModuleHolder { - static { Loader.load(); } - - - public ParameterList() { super((Pointer)null); allocate(); } - private native void allocate(); public ParameterList(@ByVal @Cast("std::nullptr_t*") PointerPointer arg0) { super((Pointer)null); allocate(arg0); } - private native void allocate(@ByVal @Cast("std::nullptr_t*") PointerPointer arg0); public ParameterList(@SharedPtr @Cast({"", "std::shared_ptr"}) ParameterListImpl module) { super((Pointer)null); allocate(module); } - private native void allocate(@SharedPtr @Cast({"", "std::shared_ptr"}) ParameterListImpl module); - /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ - public ParameterList(Pointer p) { super(p); } - /** Native array allocator. Access with {@link Pointer#position(long)}. */ - public ParameterList(long size) { super((Pointer)null); allocateArray(size); } - private native void allocateArray(long size); - @Override public ParameterList position(long position) { - return (ParameterList)super.position(position); - } - @Override public ParameterList getPointer(long i) { - return new ParameterList((Pointer)this).offsetAddress(i); - } - - } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/ParameterListImpl.java b/pytorch/src/gen/java/org/bytedeco/pytorch/ParameterListImpl.java index 319550167ad..fd2f1b48a0b 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/ParameterListImpl.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/ParameterListImpl.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; @@ -32,7 +34,7 @@ public class ParameterListImpl extends ParameterListImplCloneable { public ParameterListImpl() { super((Pointer)null); allocate(); } - @NoDeallocator private native void allocate(); + @SharedPtr private native void allocate(); /** Constructs the {@code ParameterList} from a variadic list of ParameterList. */ @@ -51,14 +53,14 @@ public class ParameterListImpl extends ParameterListImplCloneable { /** push the a given parameter at the end of the list * And the key of the pair will be discarded, only the value * will be added into the {@code ParameterList} */ - public native void append(@Cast("const torch::OrderedDict::Item*") @ByRef StringTensorPair pair); + public native void append(@Const @ByRef StringTensorDictItem pair); /** extend parameters from a container to the end of the list */ /** Returns an iterator to the start of the ParameterList * the iterator returned will be type of {@code OrderedDict::Item} */ - public native @ByVal @Cast("torch::nn::ParameterListImpl::Iterator*") StringTensorPair begin(); + public native @ByVal @Cast("torch::nn::ParameterListImpl::Iterator*") StringTensorDictItemVector.Iterator begin(); /** Returns a const iterator to the start of the ParameterList * the iterator returned will be type of {@code OrderedDict::Item} */ - public native @ByVal @Cast("torch::nn::ParameterListImpl::Iterator*") StringTensorPair end(); + public native @ByVal @Cast("torch::nn::ParameterListImpl::Iterator*") StringTensorDictItemVector.Iterator end(); /** Returns a const iterator to the end of the ParameterList * the iterator returned will be type of {@code OrderedDict>") Module asModule(@SharedPtr ParameterListImplCloneable pointer); /** {@code reset()} must perform initialization of all members with reference * semantics, most importantly parameters, buffers and submodules. */ public native void reset(); - @Override public Module asModule() { return asModule(this); } - @Namespace public static native @Name("static_cast") Module asModule(ParameterListImplCloneable module); /** Performs a recursive "deep copy" of the {@code Module}, such that all parameters * and submodules in the cloned module are different from those in the * original module. */ - public native @SharedPtr Module clone( - @Const @ByRef(nullValue = "c10::optional(c10::nullopt)") DeviceOptional device); - public native @SharedPtr Module clone(); + public native @SharedPtr("torch::nn::Module") @ByVal Module clone( + @Const @ByRef(nullValue = "c10::optional(c10::nullopt)") DeviceOptional device); + public native @SharedPtr("torch::nn::Module") @ByVal Module clone(); } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/ParameterListImplModuleHolder.java b/pytorch/src/gen/java/org/bytedeco/pytorch/ParameterListImplModuleHolder.java deleted file mode 100644 index 41e3413cf6e..00000000000 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/ParameterListImplModuleHolder.java +++ /dev/null @@ -1,89 +0,0 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE - -package org.bytedeco.pytorch; - -import org.bytedeco.pytorch.Allocator; -import org.bytedeco.pytorch.Function; -import org.bytedeco.pytorch.Module; -import java.nio.*; -import org.bytedeco.javacpp.*; -import org.bytedeco.javacpp.annotation.*; - -import static org.bytedeco.javacpp.presets.javacpp.*; -import static org.bytedeco.openblas.global.openblas_nolapack.*; -import static org.bytedeco.openblas.global.openblas.*; - -import static org.bytedeco.pytorch.global.torch.*; - -@Name("torch::nn::ModuleHolder") @NoOffset @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) -public class ParameterListImplModuleHolder extends Pointer { - static { Loader.load(); } - /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ - public ParameterListImplModuleHolder(Pointer p) { super(p); } - /** Native array allocator. Access with {@link Pointer#position(long)}. */ - public ParameterListImplModuleHolder(long size) { super((Pointer)null); allocateArray(size); } - private native void allocateArray(long size); - @Override public ParameterListImplModuleHolder position(long position) { - return (ParameterListImplModuleHolder)super.position(position); - } - @Override public ParameterListImplModuleHolder getPointer(long i) { - return new ParameterListImplModuleHolder((Pointer)this).offsetAddress(i); - } - - - /// - - /** Default constructs the contained module if if has a default constructor, - * else produces a static error. - * - * NOTE: This uses the behavior of template - * classes in C++ that constructors (or any methods) are only compiled when - * actually used. */ - public ParameterListImplModuleHolder() { super((Pointer)null); allocate(); } - private native void allocate(); - - /** Constructs the {@code ModuleHolder} with an empty contained value. Access to - * the underlying module is not permitted and will throw an exception, until - * a value is assigned. */ - /* implicit */ public ParameterListImplModuleHolder(@ByVal @Cast("std::nullptr_t*") PointerPointer arg0) { super((Pointer)null); allocate(arg0); } -private native void allocate(@ByVal @Cast("std::nullptr_t*") PointerPointer arg0); - - /** Constructs the {@code ModuleHolder} with a contained module, forwarding all - * arguments to its constructor. */ - - /** Constructs the {@code ModuleHolder} from a pointer to the contained type. - * Example: {@code Linear(std::make_shared(...))}. */ - /* implicit */ public ParameterListImplModuleHolder(@SharedPtr @Cast({"", "std::shared_ptr"}) ParameterListImpl module) { super((Pointer)null); allocate(module); } -private native void allocate(@SharedPtr @Cast({"", "std::shared_ptr"}) ParameterListImpl module); - - /** Returns true if the {@code ModuleHolder} contains a module, or false if it is - * {@code nullptr}. */ - public native @Cast("bool") @Name("operator bool") @NoException(true) boolean asBoolean(); - - /** Forwards to the contained module. */ - public native @Name("operator ->") ParameterListImpl access(); - - /** Forwards to the contained module. */ - - /** Returns a reference to the contained module. */ - public native @ByRef @Name("operator *") ParameterListImpl multiply(); - - /** Returns a const reference to the contained module. */ - - /** Returns a shared pointer to the underlying module. */ - public native @SharedPtr @Cast({"", "std::shared_ptr"}) ParameterListImpl ptr(); - - /** Returns a pointer to the underlying module. */ - public native ParameterListImpl get(); - - /** Returns a const pointer to the underlying module. */ - - /** Calls the {@code forward()} method of the contained module. */ - - /** Forwards to the subscript operator of the contained module. - * NOTE: std::forward is qualified to prevent VS2017 emitting - * error C2872: 'std': ambiguous symbol */ - - /** Returns true if the {@code ModuleHolder} does not contain a module. */ - public native @Cast("bool") @NoException(true) boolean is_empty(); -} diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/ParameterPolicy.java b/pytorch/src/gen/java/org/bytedeco/pytorch/ParameterPolicy.java index 6ac8899a03b..925896d7de5 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/ParameterPolicy.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/ParameterPolicy.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; @@ -37,7 +39,7 @@ public class ParameterPolicy extends Pointer { public static native @ByVal @Cast("torch::jit::detail::ParameterPolicy::value_type*") Tensor create( @StdVector SlotCursor cursors, @ByVal IValue v); - public static native @Cast("bool") boolean valid(@Const @SharedPtr @ByRef ClassType typ, @Cast("size_t") long i, @Const @ByRef IValue v); + public static native @Cast("bool") boolean valid(@Const @SharedPtr("c10::ClassType") @ByRef ClassType typ, @Cast("size_t") long i, @Const @ByRef IValue v); @MemberGetter public static native @Cast("const bool") boolean all_slots(); public static final boolean all_slots = all_slots(); } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/Pass.java b/pytorch/src/gen/java/org/bytedeco/pytorch/Pass.java index b332b610226..0fa40c8d6c0 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/Pass.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/Pass.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; @@ -19,8 +21,10 @@ @Namespace("torch::jit") @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) public class Pass extends Stmt { static { Loader.load(); } + /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ + public Pass(Pointer p) { super(p); } - public Pass(@Cast("const torch::jit::TreeRef*") @ByRef Pointer tree) { super((Pointer)null); allocate(tree); } - private native void allocate(@Cast("const torch::jit::TreeRef*") @ByRef Pointer tree); + public Pass(@Const @ByRef TreeRef tree) { super((Pointer)null); allocate(tree); } + private native void allocate(@Const @ByRef TreeRef tree); public static native @ByVal Pass create(@Const @ByRef SourceRange range); } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/Pickler.java b/pytorch/src/gen/java/org/bytedeco/pytorch/Pickler.java index 7f552bf9b97..26259b9828d 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/Pickler.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/Pickler.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; @@ -22,34 +24,34 @@ public class Pickler extends Pointer { /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public Pickler(Pointer p) { super(p); } - public Pickler(@ByVal Writer writer) { super((Pointer)null); allocate(writer); } - private native void allocate(@ByVal Writer writer); + public Pickler(@ByVal PickleWriter writer) { super((Pointer)null); allocate(writer); } + private native void allocate(@ByVal PickleWriter writer); // NOLINTNEXTLINE(cppcoreguidelines-pro-type-member-init) public Pickler( - @ByVal Writer writer, + @ByVal PickleWriter writer, TensorVector tensor_table, @ByVal TypeRenamer type_renamer, - ClassTypeVector memoized_class_types, + SharedClassTypeVector memoized_class_types, @ByVal(nullValue = "std::function(nullptr)") TensorIdGetter get_tensor_id, @Cast("bool") boolean tag_aggregates/*=true*/) { super((Pointer)null); allocate(writer, tensor_table, type_renamer, memoized_class_types, get_tensor_id, tag_aggregates); } private native void allocate( - @ByVal Writer writer, + @ByVal PickleWriter writer, TensorVector tensor_table, @ByVal TypeRenamer type_renamer, - ClassTypeVector memoized_class_types, + SharedClassTypeVector memoized_class_types, @ByVal(nullValue = "std::function(nullptr)") TensorIdGetter get_tensor_id, @Cast("bool") boolean tag_aggregates/*=true*/); public Pickler( - @ByVal Writer writer, + @ByVal PickleWriter writer, TensorVector tensor_table, @ByVal TypeRenamer type_renamer, - ClassTypeVector memoized_class_types) { super((Pointer)null); allocate(writer, tensor_table, type_renamer, memoized_class_types); } + SharedClassTypeVector memoized_class_types) { super((Pointer)null); allocate(writer, tensor_table, type_renamer, memoized_class_types); } private native void allocate( - @ByVal Writer writer, + @ByVal PickleWriter writer, TensorVector tensor_table, @ByVal TypeRenamer type_renamer, - ClassTypeVector memoized_class_types); + SharedClassTypeVector memoized_class_types); // NOLINTNEXTLINE(bugprone-exception-escape) // Push protocol onto the stack @@ -63,7 +65,7 @@ private native void allocate( public native void startTuple(); public native void endTuple(); - public native @Cast({"", "std::vector"}) @StdMove TensorVector tensorData(); + public native @Cast({"", "std::vector"}) @StdMove TensorVector tensorData(); public native void pushDict(@Const @ByRef IValue ivalue); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/PixelShuffle.java b/pytorch/src/gen/java/org/bytedeco/pytorch/PixelShuffle.java deleted file mode 100644 index ffb5b75fe68..00000000000 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/PixelShuffle.java +++ /dev/null @@ -1,34 +0,0 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE - -package org.bytedeco.pytorch; - -import org.bytedeco.pytorch.Allocator; -import org.bytedeco.pytorch.Function; -import org.bytedeco.pytorch.Module; -import java.nio.*; -import org.bytedeco.javacpp.*; -import org.bytedeco.javacpp.annotation.*; - -import static org.bytedeco.javacpp.presets.javacpp.*; -import static org.bytedeco.openblas.global.openblas_nolapack.*; -import static org.bytedeco.openblas.global.openblas.*; - -import static org.bytedeco.pytorch.global.torch.*; - - -/** A {@code ModuleHolder} subclass for {@code PixelShuffleImpl}. - * See the documentation for {@code PixelShuffleImpl} class to learn what methods it - * provides, and examples of how to use {@code PixelShuffle} with - * {@code torch::nn::PixelShuffleOptions}. See the documentation for {@code ModuleHolder} - * to learn about PyTorch's module storage semantics. */ -@Namespace("torch::nn") @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) -public class PixelShuffle extends PixelShuffleImplModuleHolder { - static { Loader.load(); } - - public PixelShuffle(@ByVal @Cast("std::nullptr_t*") PointerPointer arg0) { super((Pointer)null); allocate(arg0); } - private native void allocate(@ByVal @Cast("std::nullptr_t*") PointerPointer arg0); public PixelShuffle(@SharedPtr @Cast({"", "std::shared_ptr"}) PixelShuffleImpl module) { super((Pointer)null); allocate(module); } - private native void allocate(@SharedPtr @Cast({"", "std::shared_ptr"}) PixelShuffleImpl module); - /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ - public PixelShuffle(Pointer p) { super(p); } - - } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/PixelShuffleImpl.java b/pytorch/src/gen/java/org/bytedeco/pytorch/PixelShuffleImpl.java index 4a70bba6e42..b806a66051f 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/PixelShuffleImpl.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/PixelShuffleImpl.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; @@ -39,7 +41,7 @@ public class PixelShuffleImpl extends PixelShuffleImplCloneable { public PixelShuffleImpl(Pointer p) { super(p); } public PixelShuffleImpl(@Const @ByRef PixelShuffleOptions options_) { super((Pointer)null); allocate(options_); } - @NoDeallocator private native void allocate(@Const @ByRef PixelShuffleOptions options_); + @SharedPtr private native void allocate(@Const @ByRef PixelShuffleOptions options_); /** Pretty prints the {@code PixelShuffle} module into the given {@code stream}. */ public native void pretty_print(@Cast("std::ostream*") @ByRef Pointer stream); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/PixelShuffleImplCloneable.java b/pytorch/src/gen/java/org/bytedeco/pytorch/PixelShuffleImplCloneable.java index f1f18feb52c..23ab59020c7 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/PixelShuffleImplCloneable.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/PixelShuffleImplCloneable.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; @@ -20,18 +22,18 @@ public class PixelShuffleImplCloneable extends Module { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public PixelShuffleImplCloneable(Pointer p) { super(p); } + @Override public Module asModule() { return asModule(this); } + @Namespace public static native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast>") Module asModule(@SharedPtr PixelShuffleImplCloneable pointer); /** {@code reset()} must perform initialization of all members with reference * semantics, most importantly parameters, buffers and submodules. */ public native void reset(); - @Override public Module asModule() { return asModule(this); } - @Namespace public static native @Name("static_cast") Module asModule(PixelShuffleImplCloneable module); /** Performs a recursive "deep copy" of the {@code Module}, such that all parameters * and submodules in the cloned module are different from those in the * original module. */ - public native @SharedPtr Module clone( - @Const @ByRef(nullValue = "c10::optional(c10::nullopt)") DeviceOptional device); - public native @SharedPtr Module clone(); + public native @SharedPtr("torch::nn::Module") @ByVal Module clone( + @Const @ByRef(nullValue = "c10::optional(c10::nullopt)") DeviceOptional device); + public native @SharedPtr("torch::nn::Module") @ByVal Module clone(); } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/PixelShuffleImplModuleHolder.java b/pytorch/src/gen/java/org/bytedeco/pytorch/PixelShuffleImplModuleHolder.java deleted file mode 100644 index 1c8a0d649e4..00000000000 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/PixelShuffleImplModuleHolder.java +++ /dev/null @@ -1,79 +0,0 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE - -package org.bytedeco.pytorch; - -import org.bytedeco.pytorch.Allocator; -import org.bytedeco.pytorch.Function; -import org.bytedeco.pytorch.Module; -import java.nio.*; -import org.bytedeco.javacpp.*; -import org.bytedeco.javacpp.annotation.*; - -import static org.bytedeco.javacpp.presets.javacpp.*; -import static org.bytedeco.openblas.global.openblas_nolapack.*; -import static org.bytedeco.openblas.global.openblas.*; - -import static org.bytedeco.pytorch.global.torch.*; - -@Name("torch::nn::ModuleHolder") @NoOffset @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) -public class PixelShuffleImplModuleHolder extends Pointer { - static { Loader.load(); } - /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ - public PixelShuffleImplModuleHolder(Pointer p) { super(p); } - - - /// - - /** Default constructs the contained module if if has a default constructor, - * else produces a static error. - * - * NOTE: This uses the behavior of template - * classes in C++ that constructors (or any methods) are only compiled when - * actually used. */ - - - /** Constructs the {@code ModuleHolder} with an empty contained value. Access to - * the underlying module is not permitted and will throw an exception, until - * a value is assigned. */ - /* implicit */ public PixelShuffleImplModuleHolder(@ByVal @Cast("std::nullptr_t*") PointerPointer arg0) { super((Pointer)null); allocate(arg0); } -private native void allocate(@ByVal @Cast("std::nullptr_t*") PointerPointer arg0); - - /** Constructs the {@code ModuleHolder} with a contained module, forwarding all - * arguments to its constructor. */ - - /** Constructs the {@code ModuleHolder} from a pointer to the contained type. - * Example: {@code Linear(std::make_shared(...))}. */ - /* implicit */ public PixelShuffleImplModuleHolder(@SharedPtr @Cast({"", "std::shared_ptr"}) PixelShuffleImpl module) { super((Pointer)null); allocate(module); } -private native void allocate(@SharedPtr @Cast({"", "std::shared_ptr"}) PixelShuffleImpl module); - - /** Returns true if the {@code ModuleHolder} contains a module, or false if it is - * {@code nullptr}. */ - public native @Cast("bool") @Name("operator bool") @NoException(true) boolean asBoolean(); - - /** Forwards to the contained module. */ - public native @Name("operator ->") PixelShuffleImpl access(); - - /** Forwards to the contained module. */ - - /** Returns a reference to the contained module. */ - public native @ByRef @Name("operator *") PixelShuffleImpl multiply(); - - /** Returns a const reference to the contained module. */ - - /** Returns a shared pointer to the underlying module. */ - public native @SharedPtr @Cast({"", "std::shared_ptr"}) PixelShuffleImpl ptr(); - - /** Returns a pointer to the underlying module. */ - public native PixelShuffleImpl get(); - - /** Returns a const pointer to the underlying module. */ - - /** Calls the {@code forward()} method of the contained module. */ - - /** Forwards to the subscript operator of the contained module. - * NOTE: std::forward is qualified to prevent VS2017 emitting - * error C2872: 'std': ambiguous symbol */ - - /** Returns true if the {@code ModuleHolder} does not contain a module. */ - public native @Cast("bool") @NoException(true) boolean is_empty(); -} diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/PixelShuffleOptions.java b/pytorch/src/gen/java/org/bytedeco/pytorch/PixelShuffleOptions.java index 2857211ec78..4f696119238 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/PixelShuffleOptions.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/PixelShuffleOptions.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/PixelUnshuffle.java b/pytorch/src/gen/java/org/bytedeco/pytorch/PixelUnshuffle.java deleted file mode 100644 index 661be331a67..00000000000 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/PixelUnshuffle.java +++ /dev/null @@ -1,34 +0,0 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE - -package org.bytedeco.pytorch; - -import org.bytedeco.pytorch.Allocator; -import org.bytedeco.pytorch.Function; -import org.bytedeco.pytorch.Module; -import java.nio.*; -import org.bytedeco.javacpp.*; -import org.bytedeco.javacpp.annotation.*; - -import static org.bytedeco.javacpp.presets.javacpp.*; -import static org.bytedeco.openblas.global.openblas_nolapack.*; -import static org.bytedeco.openblas.global.openblas.*; - -import static org.bytedeco.pytorch.global.torch.*; - - -/** A {@code ModuleHolder} subclass for {@code PixelUnshuffleImpl}. - * See the documentation for {@code PixelUnshuffleImpl} class to learn what methods - * it provides, and examples of how to use {@code PixelUnshuffle} with - * {@code torch::nn::PixelUnshuffleOptions}. See the documentation for {@code ModuleHolder} - * to learn about PyTorch's module storage semantics. */ -@Namespace("torch::nn") @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) -public class PixelUnshuffle extends PixelUnshuffleImplModuleHolder { - static { Loader.load(); } - - public PixelUnshuffle(@ByVal @Cast("std::nullptr_t*") PointerPointer arg0) { super((Pointer)null); allocate(arg0); } - private native void allocate(@ByVal @Cast("std::nullptr_t*") PointerPointer arg0); public PixelUnshuffle(@SharedPtr @Cast({"", "std::shared_ptr"}) PixelUnshuffleImpl module) { super((Pointer)null); allocate(module); } - private native void allocate(@SharedPtr @Cast({"", "std::shared_ptr"}) PixelUnshuffleImpl module); - /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ - public PixelUnshuffle(Pointer p) { super(p); } - - } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/PixelUnshuffleImpl.java b/pytorch/src/gen/java/org/bytedeco/pytorch/PixelUnshuffleImpl.java index 7476b234928..7e041525ee9 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/PixelUnshuffleImpl.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/PixelUnshuffleImpl.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; @@ -38,7 +40,7 @@ public class PixelUnshuffleImpl extends PixelUnshuffleImplCloneable { public PixelUnshuffleImpl(Pointer p) { super(p); } public PixelUnshuffleImpl(@Const @ByRef PixelUnshuffleOptions options_) { super((Pointer)null); allocate(options_); } - @NoDeallocator private native void allocate(@Const @ByRef PixelUnshuffleOptions options_); + @SharedPtr private native void allocate(@Const @ByRef PixelUnshuffleOptions options_); /** Pretty prints the {@code PixelUnshuffle} module into the given {@code stream}. */ public native void pretty_print(@Cast("std::ostream*") @ByRef Pointer stream); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/PixelUnshuffleImplCloneable.java b/pytorch/src/gen/java/org/bytedeco/pytorch/PixelUnshuffleImplCloneable.java index dfe04d9ee71..b68bb2958e8 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/PixelUnshuffleImplCloneable.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/PixelUnshuffleImplCloneable.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; @@ -20,18 +22,18 @@ public class PixelUnshuffleImplCloneable extends Module { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public PixelUnshuffleImplCloneable(Pointer p) { super(p); } + @Override public Module asModule() { return asModule(this); } + @Namespace public static native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast>") Module asModule(@SharedPtr PixelUnshuffleImplCloneable pointer); /** {@code reset()} must perform initialization of all members with reference * semantics, most importantly parameters, buffers and submodules. */ public native void reset(); - @Override public Module asModule() { return asModule(this); } - @Namespace public static native @Name("static_cast") Module asModule(PixelUnshuffleImplCloneable module); /** Performs a recursive "deep copy" of the {@code Module}, such that all parameters * and submodules in the cloned module are different from those in the * original module. */ - public native @SharedPtr Module clone( - @Const @ByRef(nullValue = "c10::optional(c10::nullopt)") DeviceOptional device); - public native @SharedPtr Module clone(); + public native @SharedPtr("torch::nn::Module") @ByVal Module clone( + @Const @ByRef(nullValue = "c10::optional(c10::nullopt)") DeviceOptional device); + public native @SharedPtr("torch::nn::Module") @ByVal Module clone(); } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/PixelUnshuffleImplModuleHolder.java b/pytorch/src/gen/java/org/bytedeco/pytorch/PixelUnshuffleImplModuleHolder.java deleted file mode 100644 index 4f747bfbdf5..00000000000 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/PixelUnshuffleImplModuleHolder.java +++ /dev/null @@ -1,79 +0,0 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE - -package org.bytedeco.pytorch; - -import org.bytedeco.pytorch.Allocator; -import org.bytedeco.pytorch.Function; -import org.bytedeco.pytorch.Module; -import java.nio.*; -import org.bytedeco.javacpp.*; -import org.bytedeco.javacpp.annotation.*; - -import static org.bytedeco.javacpp.presets.javacpp.*; -import static org.bytedeco.openblas.global.openblas_nolapack.*; -import static org.bytedeco.openblas.global.openblas.*; - -import static org.bytedeco.pytorch.global.torch.*; - -@Name("torch::nn::ModuleHolder") @NoOffset @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) -public class PixelUnshuffleImplModuleHolder extends Pointer { - static { Loader.load(); } - /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ - public PixelUnshuffleImplModuleHolder(Pointer p) { super(p); } - - - /// - - /** Default constructs the contained module if if has a default constructor, - * else produces a static error. - * - * NOTE: This uses the behavior of template - * classes in C++ that constructors (or any methods) are only compiled when - * actually used. */ - - - /** Constructs the {@code ModuleHolder} with an empty contained value. Access to - * the underlying module is not permitted and will throw an exception, until - * a value is assigned. */ - /* implicit */ public PixelUnshuffleImplModuleHolder(@ByVal @Cast("std::nullptr_t*") PointerPointer arg0) { super((Pointer)null); allocate(arg0); } -private native void allocate(@ByVal @Cast("std::nullptr_t*") PointerPointer arg0); - - /** Constructs the {@code ModuleHolder} with a contained module, forwarding all - * arguments to its constructor. */ - - /** Constructs the {@code ModuleHolder} from a pointer to the contained type. - * Example: {@code Linear(std::make_shared(...))}. */ - /* implicit */ public PixelUnshuffleImplModuleHolder(@SharedPtr @Cast({"", "std::shared_ptr"}) PixelUnshuffleImpl module) { super((Pointer)null); allocate(module); } -private native void allocate(@SharedPtr @Cast({"", "std::shared_ptr"}) PixelUnshuffleImpl module); - - /** Returns true if the {@code ModuleHolder} contains a module, or false if it is - * {@code nullptr}. */ - public native @Cast("bool") @Name("operator bool") @NoException(true) boolean asBoolean(); - - /** Forwards to the contained module. */ - public native @Name("operator ->") PixelUnshuffleImpl access(); - - /** Forwards to the contained module. */ - - /** Returns a reference to the contained module. */ - public native @ByRef @Name("operator *") PixelUnshuffleImpl multiply(); - - /** Returns a const reference to the contained module. */ - - /** Returns a shared pointer to the underlying module. */ - public native @SharedPtr @Cast({"", "std::shared_ptr"}) PixelUnshuffleImpl ptr(); - - /** Returns a pointer to the underlying module. */ - public native PixelUnshuffleImpl get(); - - /** Returns a const pointer to the underlying module. */ - - /** Calls the {@code forward()} method of the contained module. */ - - /** Forwards to the subscript operator of the contained module. - * NOTE: std::forward is qualified to prevent VS2017 emitting - * error C2872: 'std': ambiguous symbol */ - - /** Returns true if the {@code ModuleHolder} does not contain a module. */ - public native @Cast("bool") @NoException(true) boolean is_empty(); -} diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/PixelUnshuffleOptions.java b/pytorch/src/gen/java/org/bytedeco/pytorch/PixelUnshuffleOptions.java index 564047fef6c..38de4d6904c 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/PixelUnshuffleOptions.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/PixelUnshuffleOptions.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/PlacementDeleteContext.java b/pytorch/src/gen/java/org/bytedeco/pytorch/PlacementDeleteContext.java index 08b81bffefd..bbf5459a7e3 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/PlacementDeleteContext.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/PlacementDeleteContext.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/PlacementDtor.java b/pytorch/src/gen/java/org/bytedeco/pytorch/PlacementDtor.java index 5cba7e55a78..f85f2cffd25 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/PlacementDtor.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/PlacementDtor.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/PoissonNLLLoss.java b/pytorch/src/gen/java/org/bytedeco/pytorch/PoissonNLLLoss.java deleted file mode 100644 index 7ad798e3528..00000000000 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/PoissonNLLLoss.java +++ /dev/null @@ -1,34 +0,0 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE - -package org.bytedeco.pytorch; - -import org.bytedeco.pytorch.Allocator; -import org.bytedeco.pytorch.Function; -import org.bytedeco.pytorch.Module; -import java.nio.*; -import org.bytedeco.javacpp.*; -import org.bytedeco.javacpp.annotation.*; - -import static org.bytedeco.javacpp.presets.javacpp.*; -import static org.bytedeco.openblas.global.openblas_nolapack.*; -import static org.bytedeco.openblas.global.openblas.*; - -import static org.bytedeco.pytorch.global.torch.*; - - -/** A {@code ModuleHolder} subclass for {@code PoissonNLLLossImpl}. - * See the documentation for {@code PoissonNLLLossImpl} class to learn what methods - * it provides, and examples of how to use {@code PoissonNLLLoss} with - * {@code torch::nn::PoissonNLLLossOptions}. See the documentation for {@code ModuleHolder} - * to learn about PyTorch's module storage semantics. */ -@Namespace("torch::nn") @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) -public class PoissonNLLLoss extends PoissonNLLLossImplModuleHolder { - static { Loader.load(); } - - public PoissonNLLLoss(@ByVal @Cast("std::nullptr_t*") PointerPointer arg0) { super((Pointer)null); allocate(arg0); } - private native void allocate(@ByVal @Cast("std::nullptr_t*") PointerPointer arg0); public PoissonNLLLoss(@SharedPtr @Cast({"", "std::shared_ptr"}) PoissonNLLLossImpl module) { super((Pointer)null); allocate(module); } - private native void allocate(@SharedPtr @Cast({"", "std::shared_ptr"}) PoissonNLLLossImpl module); - /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ - public PoissonNLLLoss(Pointer p) { super(p); } - - } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/PoissonNLLLossImpl.java b/pytorch/src/gen/java/org/bytedeco/pytorch/PoissonNLLLossImpl.java index 4c77fa6f31a..da3ae69f8bf 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/PoissonNLLLossImpl.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/PoissonNLLLossImpl.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; @@ -47,9 +49,9 @@ public class PoissonNLLLossImpl extends PoissonNLLLossImplCloneable { } public PoissonNLLLossImpl(@ByVal(nullValue = "torch::nn::PoissonNLLLossOptions{}") PoissonNLLLossOptions options_) { super((Pointer)null); allocate(options_); } - @NoDeallocator private native void allocate(@ByVal(nullValue = "torch::nn::PoissonNLLLossOptions{}") PoissonNLLLossOptions options_); + @SharedPtr private native void allocate(@ByVal(nullValue = "torch::nn::PoissonNLLLossOptions{}") PoissonNLLLossOptions options_); public PoissonNLLLossImpl() { super((Pointer)null); allocate(); } - @NoDeallocator private native void allocate(); + @SharedPtr private native void allocate(); public native void reset(); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/PoissonNLLLossImplCloneable.java b/pytorch/src/gen/java/org/bytedeco/pytorch/PoissonNLLLossImplCloneable.java index d543076d8f3..0f11bd11054 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/PoissonNLLLossImplCloneable.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/PoissonNLLLossImplCloneable.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; @@ -20,18 +22,18 @@ public class PoissonNLLLossImplCloneable extends Module { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public PoissonNLLLossImplCloneable(Pointer p) { super(p); } + @Override public Module asModule() { return asModule(this); } + @Namespace public static native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast>") Module asModule(@SharedPtr PoissonNLLLossImplCloneable pointer); /** {@code reset()} must perform initialization of all members with reference * semantics, most importantly parameters, buffers and submodules. */ public native void reset(); - @Override public Module asModule() { return asModule(this); } - @Namespace public static native @Name("static_cast") Module asModule(PoissonNLLLossImplCloneable module); /** Performs a recursive "deep copy" of the {@code Module}, such that all parameters * and submodules in the cloned module are different from those in the * original module. */ - public native @SharedPtr Module clone( - @Const @ByRef(nullValue = "c10::optional(c10::nullopt)") DeviceOptional device); - public native @SharedPtr Module clone(); + public native @SharedPtr("torch::nn::Module") @ByVal Module clone( + @Const @ByRef(nullValue = "c10::optional(c10::nullopt)") DeviceOptional device); + public native @SharedPtr("torch::nn::Module") @ByVal Module clone(); } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/PoissonNLLLossImplModuleHolder.java b/pytorch/src/gen/java/org/bytedeco/pytorch/PoissonNLLLossImplModuleHolder.java deleted file mode 100644 index 0a6a23bc303..00000000000 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/PoissonNLLLossImplModuleHolder.java +++ /dev/null @@ -1,79 +0,0 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE - -package org.bytedeco.pytorch; - -import org.bytedeco.pytorch.Allocator; -import org.bytedeco.pytorch.Function; -import org.bytedeco.pytorch.Module; -import java.nio.*; -import org.bytedeco.javacpp.*; -import org.bytedeco.javacpp.annotation.*; - -import static org.bytedeco.javacpp.presets.javacpp.*; -import static org.bytedeco.openblas.global.openblas_nolapack.*; -import static org.bytedeco.openblas.global.openblas.*; - -import static org.bytedeco.pytorch.global.torch.*; - -@Name("torch::nn::ModuleHolder") @NoOffset @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) -public class PoissonNLLLossImplModuleHolder extends Pointer { - static { Loader.load(); } - /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ - public PoissonNLLLossImplModuleHolder(Pointer p) { super(p); } - - - /// - - /** Default constructs the contained module if if has a default constructor, - * else produces a static error. - * - * NOTE: This uses the behavior of template - * classes in C++ that constructors (or any methods) are only compiled when - * actually used. */ - - - /** Constructs the {@code ModuleHolder} with an empty contained value. Access to - * the underlying module is not permitted and will throw an exception, until - * a value is assigned. */ - /* implicit */ public PoissonNLLLossImplModuleHolder(@ByVal @Cast("std::nullptr_t*") PointerPointer arg0) { super((Pointer)null); allocate(arg0); } -private native void allocate(@ByVal @Cast("std::nullptr_t*") PointerPointer arg0); - - /** Constructs the {@code ModuleHolder} with a contained module, forwarding all - * arguments to its constructor. */ - - /** Constructs the {@code ModuleHolder} from a pointer to the contained type. - * Example: {@code Linear(std::make_shared(...))}. */ - /* implicit */ public PoissonNLLLossImplModuleHolder(@SharedPtr @Cast({"", "std::shared_ptr"}) PoissonNLLLossImpl module) { super((Pointer)null); allocate(module); } -private native void allocate(@SharedPtr @Cast({"", "std::shared_ptr"}) PoissonNLLLossImpl module); - - /** Returns true if the {@code ModuleHolder} contains a module, or false if it is - * {@code nullptr}. */ - public native @Cast("bool") @Name("operator bool") @NoException(true) boolean asBoolean(); - - /** Forwards to the contained module. */ - public native @Name("operator ->") PoissonNLLLossImpl access(); - - /** Forwards to the contained module. */ - - /** Returns a reference to the contained module. */ - public native @ByRef @Name("operator *") PoissonNLLLossImpl multiply(); - - /** Returns a const reference to the contained module. */ - - /** Returns a shared pointer to the underlying module. */ - public native @SharedPtr @Cast({"", "std::shared_ptr"}) PoissonNLLLossImpl ptr(); - - /** Returns a pointer to the underlying module. */ - public native PoissonNLLLossImpl get(); - - /** Returns a const pointer to the underlying module. */ - - /** Calls the {@code forward()} method of the contained module. */ - - /** Forwards to the subscript operator of the contained module. - * NOTE: std::forward is qualified to prevent VS2017 emitting - * error C2872: 'std': ambiguous symbol */ - - /** Returns true if the {@code ModuleHolder} does not contain a module. */ - public native @Cast("bool") @NoException(true) boolean is_empty(); -} diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/PoissonNLLLossOptions.java b/pytorch/src/gen/java/org/bytedeco/pytorch/PoissonNLLLossOptions.java index 26b0b03f12b..fbdbcfc5307 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/PoissonNLLLossOptions.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/PoissonNLLLossOptions.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; @@ -46,5 +48,5 @@ public class PoissonNLLLossOptions extends Pointer { public native @Cast("bool*") @ByRef @NoException(true) BoolPointer log_input(); public native @Cast("bool*") @ByRef @NoException(true) BoolPointer full(); public native @ByRef @NoException(true) DoublePointer eps(); - public native @ByRef @NoException(true) loss_reduction_t reduction(); + public native @ByRef @NoException(true) LossReduction reduction(); } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/PrintDepsTable.java b/pytorch/src/gen/java/org/bytedeco/pytorch/PrintDepsTable.java deleted file mode 100644 index ca1f632ce0a..00000000000 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/PrintDepsTable.java +++ /dev/null @@ -1,42 +0,0 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE - -package org.bytedeco.pytorch; - -import org.bytedeco.pytorch.Allocator; -import org.bytedeco.pytorch.Function; -import org.bytedeco.pytorch.Module; -import java.nio.*; -import org.bytedeco.javacpp.*; -import org.bytedeco.javacpp.annotation.*; - -import static org.bytedeco.javacpp.presets.javacpp.*; -import static org.bytedeco.openblas.global.openblas_nolapack.*; -import static org.bytedeco.openblas.global.openblas.*; - -import static org.bytedeco.pytorch.global.torch.*; - - -@Namespace("torch::jit") @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) -public class PrintDepsTable extends Pointer { - static { Loader.load(); } - /** Default native constructor. */ - public PrintDepsTable() { super((Pointer)null); allocate(); } - /** Native array allocator. Access with {@link Pointer#position(long)}. */ - public PrintDepsTable(long size) { super((Pointer)null); allocateArray(size); } - /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ - public PrintDepsTable(Pointer p) { super(p); } - private native void allocate(); - private native void allocateArray(long size); - @Override public PrintDepsTable position(long position) { - return (PrintDepsTable)super.position(position); - } - @Override public PrintDepsTable getPointer(long i) { - return new PrintDepsTable((Pointer)this).offsetAddress(i); - } - - - - public native @Cast("size_t") long size(); - - public native @Const @SharedPtr @ByRef @Name("operator []") NamedType get(@Cast("size_t") long index); -} diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/PrintValue.java b/pytorch/src/gen/java/org/bytedeco/pytorch/PrintValue.java index 5ce6bc3dae8..5e262aa0963 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/PrintValue.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/PrintValue.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; @@ -35,10 +37,5 @@ public class PrintValue extends SugaredValue { } public native @StdString BytePointer kind(); - public native @SharedPtr @ByVal SugaredValue call( - @Const @ByRef SourceRange loc, - @ByRef GraphFunction m, - @ByVal NamedValueArrayRef args, - @ByVal NamedValueArrayRef kwargs, - @Cast("size_t") long n_binders); + } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/ProfileIValueOp.java b/pytorch/src/gen/java/org/bytedeco/pytorch/ProfileIValueOp.java index 63774625e87..65c4e4114e8 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/ProfileIValueOp.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/ProfileIValueOp.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; @@ -25,15 +27,15 @@ public class ProfileIValueOp extends JitNode { @MemberGetter public static native @Const @ByRef Symbol Kind(); public ProfileIValueOp( Graph graph, - @ByVal IValueCallback callback) { super((Pointer)null); allocate(graph, callback); } + @ByVal IValueVectorConsumer callback) { super((Pointer)null); allocate(graph, callback); } private native void allocate( Graph graph, - @ByVal IValueCallback callback); + @ByVal IValueVectorConsumer callback); public native void cloneFrom(JitNode other_); public native JitNode allocNewInstance(Graph g); public native @ByVal @Cast("std::function&)>*") Pointer getCallback(); - public native void setCallback(@ByVal IValueCallback callback); + public native void setCallback(@ByVal IValueVectorConsumer callback); } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/ProfilerConfig.java b/pytorch/src/gen/java/org/bytedeco/pytorch/ProfilerConfig.java new file mode 100644 index 00000000000..656b1ab54db --- /dev/null +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/ProfilerConfig.java @@ -0,0 +1,82 @@ +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE + +package org.bytedeco.pytorch; + +import org.bytedeco.pytorch.Allocator; +import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; +import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; +import java.nio.*; +import org.bytedeco.javacpp.*; +import org.bytedeco.javacpp.annotation.*; + +import static org.bytedeco.javacpp.presets.javacpp.*; +import static org.bytedeco.openblas.global.openblas_nolapack.*; +import static org.bytedeco.openblas.global.openblas.*; + +import static org.bytedeco.pytorch.global.torch.*; + + +@Namespace("torch::profiler::impl") @NoOffset @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) +public class ProfilerConfig extends Pointer { + static { Loader.load(); } + /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ + public ProfilerConfig(Pointer p) { super(p); } + + public ProfilerConfig( + ProfilerState state, + @Cast("bool") boolean report_input_shapes/*=false*/, + @Cast("bool") boolean profile_memory/*=false*/, + @Cast("bool") boolean with_stack/*=false*/, + @Cast("bool") boolean with_flops/*=false*/, + @Cast("bool") boolean with_modules/*=false*/, + @ByVal(nullValue = "torch::profiler::impl::ExperimentalConfig()") ExperimentalConfig experimental_config) { super((Pointer)null); allocate(state, report_input_shapes, profile_memory, with_stack, with_flops, with_modules, experimental_config); } + private native void allocate( + ProfilerState state, + @Cast("bool") boolean report_input_shapes/*=false*/, + @Cast("bool") boolean profile_memory/*=false*/, + @Cast("bool") boolean with_stack/*=false*/, + @Cast("bool") boolean with_flops/*=false*/, + @Cast("bool") boolean with_modules/*=false*/, + @ByVal(nullValue = "torch::profiler::impl::ExperimentalConfig()") ExperimentalConfig experimental_config); + public ProfilerConfig( + ProfilerState state) { super((Pointer)null); allocate(state); } + private native void allocate( + ProfilerState state); + public ProfilerConfig( + @Cast("torch::profiler::impl::ProfilerState") int state, + @Cast("bool") boolean report_input_shapes/*=false*/, + @Cast("bool") boolean profile_memory/*=false*/, + @Cast("bool") boolean with_stack/*=false*/, + @Cast("bool") boolean with_flops/*=false*/, + @Cast("bool") boolean with_modules/*=false*/, + @ByVal(nullValue = "torch::profiler::impl::ExperimentalConfig()") ExperimentalConfig experimental_config) { super((Pointer)null); allocate(state, report_input_shapes, profile_memory, with_stack, with_flops, with_modules, experimental_config); } + private native void allocate( + @Cast("torch::profiler::impl::ProfilerState") int state, + @Cast("bool") boolean report_input_shapes/*=false*/, + @Cast("bool") boolean profile_memory/*=false*/, + @Cast("bool") boolean with_stack/*=false*/, + @Cast("bool") boolean with_flops/*=false*/, + @Cast("bool") boolean with_modules/*=false*/, + @ByVal(nullValue = "torch::profiler::impl::ExperimentalConfig()") ExperimentalConfig experimental_config); + public ProfilerConfig( + @Cast("torch::profiler::impl::ProfilerState") int state) { super((Pointer)null); allocate(state); } + private native void allocate( + @Cast("torch::profiler::impl::ProfilerState") int state); + + public native @Cast("bool") boolean disabled(); + public native @Cast("bool") boolean global(); + + public native ProfilerState state(); public native ProfilerConfig state(ProfilerState setter); + public native @ByRef ExperimentalConfig experimental_config(); public native ProfilerConfig experimental_config(ExperimentalConfig setter); + public native @Cast("bool") boolean report_input_shapes(); public native ProfilerConfig report_input_shapes(boolean setter); + public native @Cast("bool") boolean profile_memory(); public native ProfilerConfig profile_memory(boolean setter); + public native @Cast("bool") boolean with_stack(); public native ProfilerConfig with_stack(boolean setter); + public native @Cast("bool") boolean with_flops(); public native ProfilerConfig with_flops(boolean setter); + public native @Cast("bool") boolean with_modules(); public native ProfilerConfig with_modules(boolean setter); + + // For serialization + public native @ByVal IValue toIValue(); + public static native @ByVal ProfilerConfig fromIValue(@Const @ByRef IValue profilerConfigIValue); +} diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/Property.java b/pytorch/src/gen/java/org/bytedeco/pytorch/Property.java index f6d6ed34e14..152a2f0f682 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/Property.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/Property.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; @@ -21,9 +23,11 @@ @Namespace("torch::jit") @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) public class Property extends TreeView { static { Loader.load(); } + /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ + public Property(Pointer p) { super(p); } - public Property(@Cast("const torch::jit::TreeRef*") @ByRef Pointer tree) { super((Pointer)null); allocate(tree); } - private native void allocate(@Cast("const torch::jit::TreeRef*") @ByRef Pointer tree); + public Property(@Const @ByRef TreeRef tree) { super((Pointer)null); allocate(tree); } + private native void allocate(@Const @ByRef TreeRef tree); public native @ByVal Ident name(); public native @ByVal Def getter(); public native @ByVal DefMaybe setter(); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/PropertyList.java b/pytorch/src/gen/java/org/bytedeco/pytorch/PropertyList.java new file mode 100644 index 00000000000..6bb115a0a4a --- /dev/null +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/PropertyList.java @@ -0,0 +1,38 @@ +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE + +package org.bytedeco.pytorch; + +import org.bytedeco.pytorch.Allocator; +import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; +import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; +import java.nio.*; +import org.bytedeco.javacpp.*; +import org.bytedeco.javacpp.annotation.*; + +import static org.bytedeco.javacpp.presets.javacpp.*; +import static org.bytedeco.openblas.global.openblas_nolapack.*; +import static org.bytedeco.openblas.global.openblas.*; + +import static org.bytedeco.pytorch.global.torch.*; + + +@Name("torch::jit::List") @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) +public class PropertyList extends TreeView { + static { Loader.load(); } + /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ + public PropertyList(Pointer p) { super(p); } + + + public PropertyList(@Const @ByRef TreeRef tree) { super((Pointer)null); allocate(tree); } + private native void allocate(@Const @ByRef TreeRef tree); + public native @ByVal @Cast("torch::jit::List::iterator*") PropertyListIterator begin(); + public native @ByVal @Cast("torch::jit::List::iterator*") PropertyListIterator end(); + public native @Cast("bool") boolean empty(); + public native @ByVal @Name("operator []") Property get(@Cast("size_t") long i); + + public static native @ByVal PropertyList create(@Const @ByRef SourceRange range, @Const @ByRef PropertyVector subtrees); + public static native @ByVal PropertyList unsafeCreate(@Const @ByRef SourceRange range, @Cast("torch::jit::TreeList*") @ByRef(true) SymDimVector subtrees); + public native @Cast("size_t") long size(); +} diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/PropertyListIterator.java b/pytorch/src/gen/java/org/bytedeco/pytorch/PropertyListIterator.java new file mode 100644 index 00000000000..c3f56f5b442 --- /dev/null +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/PropertyListIterator.java @@ -0,0 +1,35 @@ +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE + +package org.bytedeco.pytorch; + +import org.bytedeco.pytorch.Allocator; +import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; +import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; +import java.nio.*; +import org.bytedeco.javacpp.*; +import org.bytedeco.javacpp.annotation.*; + +import static org.bytedeco.javacpp.presets.javacpp.*; +import static org.bytedeco.openblas.global.openblas_nolapack.*; +import static org.bytedeco.openblas.global.openblas.*; + +import static org.bytedeco.pytorch.global.torch.*; + + +@Name("torch::jit::ListIterator") @NoOffset @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) +public class PropertyListIterator extends Pointer { + static { Loader.load(); } + /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ + public PropertyListIterator(Pointer p) { super(p); } + + public PropertyListIterator(@ByVal @Cast("torch::jit::TreeList::const_iterator*") TreeRef it) { super((Pointer)null); allocate(it); } + private native void allocate(@ByVal @Cast("torch::jit::TreeList::const_iterator*") TreeRef it); + public native @Cast("bool") @Name("operator !=") boolean notEquals(@Const @ByRef PropertyListIterator rhs); + public native @Cast("bool") @Name("operator ==") boolean equals(@Const @ByRef PropertyListIterator rhs); + public native @ByVal @Name("operator *") Property multiply(); + public native @ByRef @Name("operator +=") PropertyListIterator addPut(@Cast("std::ptrdiff_t") long n); + public native @ByRef @Name("operator ++") PropertyListIterator increment(); + public native @ByRef @Name("operator --") PropertyListIterator decrement(); +} diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/PropertyListMaybe.java b/pytorch/src/gen/java/org/bytedeco/pytorch/PropertyListMaybe.java new file mode 100644 index 00000000000..bcbc3ea97c9 --- /dev/null +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/PropertyListMaybe.java @@ -0,0 +1,36 @@ +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE + +package org.bytedeco.pytorch; + +import org.bytedeco.pytorch.Allocator; +import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; +import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; +import java.nio.*; +import org.bytedeco.javacpp.*; +import org.bytedeco.javacpp.annotation.*; + +import static org.bytedeco.javacpp.presets.javacpp.*; +import static org.bytedeco.openblas.global.openblas_nolapack.*; +import static org.bytedeco.openblas.global.openblas.*; + +import static org.bytedeco.pytorch.global.torch.*; + + +@Name("torch::jit::Maybe >") @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) +public class PropertyListMaybe extends TreeView { + static { Loader.load(); } + /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ + public PropertyListMaybe(Pointer p) { super(p); } + + public PropertyListMaybe(@Const @ByRef TreeRef tree) { super((Pointer)null); allocate(tree); } + private native void allocate(@Const @ByRef TreeRef tree); + /* implicit */ public PropertyListMaybe(@Const @ByRef PropertyList tree) { super((Pointer)null); allocate(tree); } +private native void allocate(@Const @ByRef PropertyList tree); + public native @Cast("bool") boolean present(); + public native @ByVal PropertyList get(); + + public static native @ByVal PropertyListMaybe create(@Const @ByRef SourceRange range); + public static native @ByVal PropertyListMaybe create(@Const @ByRef SourceRange range, @Const @ByRef PropertyList value); +} diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/PropertyPropBase.java b/pytorch/src/gen/java/org/bytedeco/pytorch/PropertyPropBase.java deleted file mode 100644 index 6685d2ac321..00000000000 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/PropertyPropBase.java +++ /dev/null @@ -1,31 +0,0 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE - -package org.bytedeco.pytorch; - -import org.bytedeco.pytorch.Allocator; -import org.bytedeco.pytorch.Function; -import org.bytedeco.pytorch.Module; -import java.nio.*; -import org.bytedeco.javacpp.*; -import org.bytedeco.javacpp.annotation.*; - -import static org.bytedeco.javacpp.presets.javacpp.*; -import static org.bytedeco.openblas.global.openblas_nolapack.*; -import static org.bytedeco.openblas.global.openblas.*; - -import static org.bytedeco.pytorch.global.torch.*; - - -@Namespace("torch::jit") @NoOffset @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) -public class PropertyPropBase extends Pointer { - static { Loader.load(); } - /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ - public PropertyPropBase(Pointer p) { super(p); } - - - - // insert_expands is used for shape inference - - - -} diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/PropertyVector.java b/pytorch/src/gen/java/org/bytedeco/pytorch/PropertyVector.java index dff5421ed27..2d48bd1db76 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/PropertyVector.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/PropertyVector.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; @@ -27,6 +29,8 @@ public class PropertyVector extends Pointer { public boolean empty() { return size() == 0; } public native long size(); + public Property front() { return get(0); } + public Property back() { return get(size() - 1); } @Index(function = "at") public native @ByRef Property get(@Cast("size_t") long i); public native @ByVal Iterator begin(); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/PyInterpreter.java b/pytorch/src/gen/java/org/bytedeco/pytorch/PyInterpreter.java new file mode 100644 index 00000000000..b2afb21c876 --- /dev/null +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/PyInterpreter.java @@ -0,0 +1,44 @@ +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE + +package org.bytedeco.pytorch; + +import org.bytedeco.pytorch.Allocator; +import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; +import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; +import java.nio.*; +import org.bytedeco.javacpp.*; +import org.bytedeco.javacpp.annotation.*; + +import static org.bytedeco.javacpp.presets.javacpp.*; +import static org.bytedeco.openblas.global.openblas_nolapack.*; +import static org.bytedeco.openblas.global.openblas.*; + +import static org.bytedeco.pytorch.global.torch.*; + + +@Namespace("c10::impl") @NoOffset @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) +public class PyInterpreter extends Pointer { + static { Loader.load(); } + /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ + public PyInterpreter(Pointer p) { super(p); } + + public native @Const PyInterpreterVTable vtable_(); public native PyInterpreter vtable_(PyInterpreterVTable setter); + + public PyInterpreter(@Const PyInterpreterVTable vtable) { super((Pointer)null); allocate(vtable); } + private native void allocate(@Const PyInterpreterVTable vtable); + + public native @Const @ByRef @Name("operator *") @NoException(true) PyInterpreterVTable multiply(); + public native @Const @Name("operator ->") @NoException(true) PyInterpreterVTable access(); + + // Disarm this PyInterpreter, making all of its methods noops. + // The vtable pointer is not an atomic at the moment, which means + // a disarm() invocation that is concurrent with active destructors + // is not thread safe and will trigger TSAN. My hope is that this + // situations doesn't ever actually happen; tensor destruction should + // quiesce when a dlclose happens, and any long lived tensors whose + // destructors would be disarmed here only begin the destruction process + // on process shutdown (long after the dlclose has occurred). + public native @NoException(true) void disarm(); +} diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/PyInterpreterVTable.java b/pytorch/src/gen/java/org/bytedeco/pytorch/PyInterpreterVTable.java new file mode 100644 index 00000000000..949391fc2ee --- /dev/null +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/PyInterpreterVTable.java @@ -0,0 +1,176 @@ +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE + +package org.bytedeco.pytorch; + +import org.bytedeco.pytorch.Allocator; +import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; +import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; +import java.nio.*; +import org.bytedeco.javacpp.*; +import org.bytedeco.javacpp.annotation.*; + +import static org.bytedeco.javacpp.presets.javacpp.*; +import static org.bytedeco.openblas.global.openblas_nolapack.*; +import static org.bytedeco.openblas.global.openblas.*; + +import static org.bytedeco.pytorch.global.torch.*; + + +// Note [Python interpreter tag] +// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +// Traditionally, PyTorch is layered such that our Python library +// (libtorch_python) references our pure C++ library (libtorch) as the +// natural order of things. However, sometimes this natural order is +// subverted: C++ objects refer to Python objects (for example, we +// store a PyObject* pointer on TensorImpl so that converting from a +// C++ Tensor to a Python Tensor is just a memory dereference). +// +// These unusual orderings must be treated with care. To start, you need to +// virtualize the destructor so that the PyObject can be decref'ed on +// destruction (because the C++ object itself doesn't know anything about +// Python--remember, layering!). This process itself is fraught, since +// acquiring the GIL could lead to deadlocks if someone is blocking on you +// while holding the GIL. Furthermore, if the C++ objects outlive the +// interpreter (which can happen if you stash them in a static global +// variable defined in libtorch), you may attempt to decref the object when +// the Python interpreter has already been shutdown. +// +// BUT WAIT, IT GETS WORSE. With torchdeploy, there may be multiple Python +// interpreters in a single process. If a C++ object is accessible from +// multiple interpreters, we must take care not to accidentally pass a +// PyObject from one interpreter with another interpreter. +// +// To prevent these mixups, we introduce a PyInterpreter "tag" (object with +// a vtable), which specifies a specific Python interpreter. +// +// - Any given object can be associated with AT MOST one Python interpreter. +// We represent the interpreter tag as a memory address to an instance of +// a virtual class that is allocated once per interpreter (this is so that +// we can request the interpreter to perform operations for us, if +// necessary). +// +// - It can be recorded with a PyObject (PyInterpreterObject) so that +// we know what interpreter the object is associated with, and we can +// raise an error if you try to use the PyObject from the wrong +// interpreter context. +// +// - It contains a vtable that can be used to perform various Python +// operations from ordinary C++ code that ordinarily wouldn't be accessible +// from libtorch. +// +// A simple use case is when a C++ object must be associated with a PyObject. +// However, for TensorImpl, we lazily allocate a PyObject the first time the +// object passes into Python. The invariants for this situation are more +// subtle: +// +// - A given TensorImpl's interpreter tag can only go from uninitialized to +// tagged; once tagged, this is a quiescent state (once tagged to an +// interpreter, ALWAYS tagged to that interpreter) +// +// - A thread may mutate the PyObject field of a TensorImpl if and only if it +// holds the GIL for the interpreter tagged on the TensorImpl. (If the +// TensorImpl is not tagged, it must first atomically claim its tag before it +// can validly write) +// +// WARNING: This class has to be written very carefully, because it may be +// possible for a Tensor to have a reference an interpreter corresponding to +// a shared library that has ALREADY BEEN UNLOADED. This makes blindly calling +// virtual methods very dangerous, because the vtable may be garbage at that +// point (on a good day, you might get "pure virtual method called"). +// +// The idea to solve this problem is we always leak PyInterpreters (so they +// always stay live even after dlclose), and make sure we can disarm their +// virtual methods by indirecting through a separate PyInterpreterVTable +// object. This can be replaced with a no-op vtable from libc10.so, which +// is guaranteed to stick around until the bitter end. +// +// NB: The downside with representing PyInterpreter tags as full objects is that +// it takes an extra word on TensorImpl. If tags were instead just integer +// indices, on 64-bit architectures we could pack the tag and PyObject together +// into a single atomic word. On 32-bit architectures we could simply say that +// only one Python interpreter is supported (erroring if a nontrivial +// interpreter tag is attempted to be set). +// +// The difficulty with this scheme is we need to maintain an out-of-line table +// to get at the PyInterpreters so that we can do virtual method calls on them, +// and registration/deregistration to this table must be done in a thread safe +// manner. This can be easily done if the number of possible PyInterpreters is +// small enough (e.g., 8-bit integer) by simply preallocating an array of +// sufficient size to hold all possible interpreters. Surely 128 threads is +// more than enough for anyone! +// +// I didn't decide to do this technique at the moment, because the extra word +// added by the PyInterpreter tag takes us to 24 words, which means that we +// still fit inside three eight word cache lines. If you need to penny pinch +// another word consider doing this! + +@Namespace("c10::impl") @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) +public class PyInterpreterVTable extends Pointer { + static { Loader.load(); } + /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ + public PyInterpreterVTable(Pointer p) { super(p); } + + + // Report the name of this interpreter + public native @StdString BytePointer name(); + + // Run Py_DECREF on a PyObject. We DO NOT assume the GIL is held on call + // See NOTE [PyInterpreter::decref takes an `is_tensor` arg] + public native void decref(@Cast("PyObject*") Pointer pyobj, @Cast("bool") boolean is_tensor); + + // Perform a detach by deferring to the __torch_dispatch__ implementation of + // detach, which will also arrange for the PyObject to get copied in this + // situation + public native @ByVal TensorImplPtr detach( + @Const TensorImpl self); + + // Invoke the Python boxed fallback dispatch to go back into Python + public native void dispatch(@Const @ByRef OperatorHandle op, IValueVector stack); + + // This is only invoked in the multipy/torchdeploy situation from + // pythonOpRegistrationTrampoline; this lets us get to the Python + // interpreter to actually find the appropriate Python op registration + // entry to call. + public native void python_op_registration_trampoline( + @Const @ByRef OperatorHandle op, + DispatchKey arg1, + IValueVector stack); + public native void python_op_registration_trampoline( + @Const @ByRef OperatorHandle op, + @Cast("c10::DispatchKey") short arg1, + IValueVector stack); + + // Invoke the Python dispatcher to handle this call + public native void python_dispatcher( + @Const @ByRef OperatorHandle op, + @ByVal DispatchKeySet arg1, + IValueVector stack); + + public native @Cast("bool") boolean is_contiguous(@Const TensorImpl self, @ByVal MemoryFormat arg1); + public native @Cast("bool") boolean is_strides_like(@Const TensorImpl self, @ByVal MemoryFormat arg1); + public native @Cast("bool") boolean is_non_overlapping_and_dense(@Const TensorImpl self); + public native @ByVal Device device(@Const TensorImpl self); + public native @Cast("int64_t") long dim(@Const TensorImpl self); + public native @ByVal LongArrayRef strides(@Const TensorImpl self); + public native @ByVal LongArrayRef sizes(@Const TensorImpl self); + public native @ByVal SymIntArrayRef sym_sizes(@Const TensorImpl self); + public native Layout layout(@Const TensorImpl self); + public native @ByVal SymInt sym_numel(@Const TensorImpl self); + public native @ByVal SymIntArrayRef sym_strides(@Const TensorImpl self); + public native @ByVal SymInt sym_storage_offset(@Const TensorImpl self); + + public native void trace_gpu_event_creation(@Cast("uintptr_t") long event); + public native void trace_gpu_event_deletion(@Cast("uintptr_t") long event); + public native void trace_gpu_event_record(@Cast("uintptr_t") long event, @Cast("uintptr_t") long stream); + public native void trace_gpu_event_wait(@Cast("uintptr_t") long event, @Cast("uintptr_t") long stream); + public native void trace_gpu_memory_allocation(@Cast("uintptr_t") long ptr); + public native void trace_gpu_memory_deallocation(@Cast("uintptr_t") long ptr); + public native void trace_gpu_stream_creation(@Cast("uintptr_t") long stream); + public native void trace_gpu_device_synchronization(); + public native void trace_gpu_stream_synchronization(@Cast("uintptr_t") long stream); + public native void trace_gpu_event_synchronization(@Cast("uintptr_t") long event); + + public native void reset_backward_hooks(@Const TensorImpl self); +} diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/PyObjectHolder.java b/pytorch/src/gen/java/org/bytedeco/pytorch/PyObjectHolder.java index 973ee335827..f0a172030c5 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/PyObjectHolder.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/PyObjectHolder.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; @@ -15,10 +17,20 @@ import static org.bytedeco.pytorch.global.torch.*; -@Namespace("c10::ivalue") @Opaque @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) + +// virtual ivalue PyObjectHolder that hold a py::object, we make this virtual +// because the py::object and refcounting logic should happen in libtorch_python +// see concrete implementation in python_ivalue.h +@Name("c10::ivalue::PyObjectHolder") @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) public class PyObjectHolder extends Pointer { - /** Empty constructor. Calls {@code super((Pointer)null)}. */ - public PyObjectHolder() { super((Pointer)null); } + static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public PyObjectHolder(Pointer p) { super(p); } + + public native @Cast("PyObject*") Pointer getPyObject(); + public native @ByVal InferredType tryToInferType(); + public native @ByVal IValue toIValue(@Const @ByRef Type.TypePtr type, @ByVal(nullValue = "c10::optional(c10::nullopt)") IntOptional N); + public native @ByVal IValue toIValue(@Const @ByRef Type.TypePtr type); + public native @StdString BytePointer toStr(); + public native @Cast({"", "std::vector"}) @StdMove TensorVector extractTensors(); } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/PyObjectHolderPtr.java b/pytorch/src/gen/java/org/bytedeco/pytorch/PyObjectHolderPtr.java new file mode 100644 index 00000000000..01830de5e76 --- /dev/null +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/PyObjectHolderPtr.java @@ -0,0 +1,150 @@ +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE + +package org.bytedeco.pytorch; + +import org.bytedeco.pytorch.Allocator; +import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; +import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; +import java.nio.*; +import org.bytedeco.javacpp.*; +import org.bytedeco.javacpp.annotation.*; + +import static org.bytedeco.javacpp.presets.javacpp.*; +import static org.bytedeco.openblas.global.openblas_nolapack.*; +import static org.bytedeco.openblas.global.openblas.*; + +import static org.bytedeco.pytorch.global.torch.*; + + +@Name("c10::intrusive_ptr") @NoOffset @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) +public class PyObjectHolderPtr extends Pointer { + static { Loader.load(); } + /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ + public PyObjectHolderPtr(Pointer p) { super(p); } + /** Native array allocator. Access with {@link Pointer#position(long)}. */ + public PyObjectHolderPtr(long size) { super((Pointer)null); allocateArray(size); } + private native void allocateArray(long size); + @Override public PyObjectHolderPtr position(long position) { + return (PyObjectHolderPtr)super.position(position); + } + @Override public PyObjectHolderPtr getPointer(long i) { + return new PyObjectHolderPtr((Pointer)this).offsetAddress(i); + } + + + public PyObjectHolderPtr() { super((Pointer)null); allocate(); } + @NoException(true) private native void allocate(); + + public PyObjectHolderPtr(@ByVal @Cast("std::nullptr_t*") PointerPointer arg0) { super((Pointer)null); allocate(arg0); } + @NoException(true) private native void allocate(@ByVal @Cast("std::nullptr_t*") PointerPointer arg0); + + // This constructor will not increase the ref counter for you. + // We use the tagged dispatch mechanism to explicitly mark this constructor + // to not increase the refcount + public PyObjectHolderPtr(PyObjectHolder target, @ByVal DontIncreaseRefcount arg1) { super((Pointer)null); allocate(target, arg1); } + @NoException(true) private native void allocate(PyObjectHolder target, @ByVal DontIncreaseRefcount arg1); + + + + public PyObjectHolderPtr(@ByRef(true) PyObjectHolderPtr rhs) { super((Pointer)null); allocate(rhs); } + @NoException(true) private native void allocate(@ByRef(true) PyObjectHolderPtr rhs); + + public native @ByRef @Name("operator =") @NoException(true) PyObjectHolderPtr put(@ByRef(true) PyObjectHolderPtr rhs); + + public native @NoException(true) PyObjectHolder get(); + + public native @ByRef @Name("operator *") @NoException(true) PyObjectHolder multiply(); + + public native @Name("operator ->") @NoException(true) PyObjectHolder access(); + + public native @Cast("bool") @Name("operator bool") @NoException(true) boolean asBoolean(); + + public native @NoException(true) void reset(); + + public native @NoException(true) void swap(@ByRef PyObjectHolderPtr rhs); + + // We do a lot of null-pointer checks in our code, good to have this be cheap. + public native @Cast("bool") @NoException(true) boolean defined(); + + public native @Cast("size_t") @NoException(true) long use_count(); + + public native @Cast("size_t") @NoException(true) long weak_use_count(); + + public native @Cast("bool") @NoException(true) boolean unique(); + + /** + * Returns an owning (!) pointer to the underlying object and makes the + * intrusive_ptr instance invalid. That means the refcount is not decreased. + * You *must* put the returned pointer back into a intrusive_ptr using + * intrusive_ptr::reclaim(ptr) to properly destruct it. + * This is helpful for C APIs. + */ + public native @NoException(true) PyObjectHolder release(); + + /** + * Takes an owning pointer to TTarget* and creates an intrusive_ptr that takes + * over ownership. That means the refcount is not increased. + * This is the counter-part to intrusive_ptr::release() and the pointer + * passed in *must* have been created using intrusive_ptr::release(). + */ + public static native @ByVal PyObjectHolderPtr reclaim(PyObjectHolder owning_ptr); + + /** + * Takes an owning pointer to TTarget* and creates an intrusive_ptr + * representing a new reference, i.e. the raw pointer retains + * ownership. + */ + public static native @ByVal PyObjectHolderPtr reclaim_copy(PyObjectHolder owning_ptr); + + /** + * Allocate a heap object with args and wrap it inside a intrusive_ptr and + * incref. This is a helper function to let make_intrusive() access private + * intrusive_ptr constructors. + */ + + /** + * Turn a new instance of TTarget (e.g., literally allocated + * using new TTarget(...) into an intrusive_ptr. If possible, + * use intrusive_ptr::make instead which statically guarantees + * that the allocation was done properly. + * + * At the moment, the only reason this method exists is because + * pybind11 holder types expect to be able to allocate in + * this way (because pybind11 handles the new allocation itself). + */ + public static native @ByVal PyObjectHolderPtr unsafe_steal_from_new(PyObjectHolder raw_ptr); + + /** + * Turn an instance of TTarget that should not be reference counted + * (e.g., allocated into an arena with placement new) into an + * intrusive_ptr. This is gratuitously unsafe and should only be + * used if you can guarantee that the pointer will not escape and be + * refcounted as normal. + * + * {@code expected_decrefs} is a debugging parameter: it indicates the + * number of strong owners the intrusive_ptr_target in question is + * expected to get. In most use cases, this will likely be 1. + * + * The reason this method exists is for manually sharing + * StorageImpls across Tensors in the static runtime. It needs + * access to private intrusive_ptr members so that the refcounts can + * be initialized to custom values. + */ + public static native @ByVal PyObjectHolderPtr unsafe_adapt_non_heap_allocated( + PyObjectHolder raw_ptr, + @Cast("size_t") long expected_decrefs); + + /** + * Turn a **non-owning raw pointer** to an intrusive_ptr. It is + * the moral equivalent of enable_shared_from_this on a shared pointer. + * + * This method is only valid for objects that are already live. If + * you are looking for the moral equivalent of unique_ptr(T*) + * constructor, see steal_from_new. + * + * TODO: https://github.com/pytorch/pytorch/issues/56482 + */ + public static native @ByVal PyObjectHolderPtr unsafe_reclaim_from_nonowning(PyObjectHolder raw_ptr); +} diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/PyObjectType.java b/pytorch/src/gen/java/org/bytedeco/pytorch/PyObjectType.java index fe2b38fe8a0..98dbb9a0d3f 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/PyObjectType.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/PyObjectType.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/PyObjectTypePtr.java b/pytorch/src/gen/java/org/bytedeco/pytorch/PyObjectTypePtr.java index e21b086bf10..15e1da9be0f 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/PyObjectTypePtr.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/PyObjectTypePtr.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/PyTorchStreamReader.java b/pytorch/src/gen/java/org/bytedeco/pytorch/PyTorchStreamReader.java new file mode 100644 index 00000000000..a601a940b5a --- /dev/null +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/PyTorchStreamReader.java @@ -0,0 +1,115 @@ +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE + +package org.bytedeco.pytorch; + +import org.bytedeco.pytorch.Allocator; +import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; +import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; +import java.nio.*; +import org.bytedeco.javacpp.*; +import org.bytedeco.javacpp.annotation.*; + +import static org.bytedeco.javacpp.presets.javacpp.*; +import static org.bytedeco.openblas.global.openblas_nolapack.*; +import static org.bytedeco.openblas.global.openblas.*; + +import static org.bytedeco.pytorch.global.torch.*; + + +// PyTorch containers are a special zip archive with the following layout +// archive_name.zip contains: +// archive_name/ +// version # a file with a single decimal number written in ascii, +// # used to establish the version of the archive format +// model.json # overall model description, this is a json output of +// # ModelDef from torch.proto +// # the following names are by convention only, model.json will +// # refer to these files by full names +// tensors/ +// 0 # flat storage for tensor data, meta-data about shapes, etc. is +// # in model.json +// 1 +// ... +// # code entries will only exist for modules that have methods attached +// code/ +// archive_name.py # serialized torch script code (python syntax, using +// PythonPrint) archive_name_my_submodule.py # submodules have separate +// files +// +// The PyTorchStreamWriter also ensures additional useful properties for these +// files +// 1. All files are stored uncompressed. +// 2. All files in the archive are aligned to 64 byte boundaries such that +// it is possible to mmap the entire file and get an aligned pointer to +// tensor data. +// 3. We universally write in ZIP64 format for consistency. + +// The PyTorchStreamReader also provides additional properties: +// 1. It can read zip files that are created with common +// zip tools. This means that even though our writer doesn't compress files, +// the reader can still read files that were compressed. +// 2. It provides a getRecordOffset function which returns the offset into the +// raw file where file data lives. If the file was written with +// PyTorchStreamWriter it is guaranteed to be 64 byte aligned. + +// PyTorchReader/Writer handle checking the version number on the archive format +// and ensure that all files are written to a archive_name directory so they +// unzip cleanly. + +// When developing this format we want to pay particular attention to the +// following use cases: +// +// -- Reading -- +// 1) Reading with full random access +// a) Reading with file api's such as fread() +// b) mmaping the file and jumping around the mapped region +// 2) Reading with 1-pass sequential access +// -> A reader will need to build up a data structure of parsed structures +// as it reads +// +// -- Writing -- +// 1) Writing with full random access +// 2) Writing with 1-pass sequential access +// -> We must take care not to require updating values that have already +// been written. We place the variable-length index at the end and do +// not put any indicies into the header to fulfill this constraint. + +// The model.json, which contains all the metadata information, +// should be written as the last file. One reason is that the size of tensor +// data is usually stable. As long as the shape and type of the tensor do not +// change, the size of the data won't change. On the other sied, the size of the +// serialized model is likely to change, so we store it as the last record, and +// we don't need to move previous records when updating the model data. + +// The zip format is sufficiently flexible to handle the above use-case. +// it puts its central directory at the end of the archive and we write +// model.json as the last file when writing after we have accumulated all +// other information. + +@Namespace("caffe2::serialize") @NoOffset @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) +public class PyTorchStreamReader extends Pointer { + static { Loader.load(); } + + public PyTorchStreamReader(@StdString BytePointer file_name) { super((Pointer)null); allocate(file_name); } + private native void allocate(@StdString BytePointer file_name); + public PyTorchStreamReader(@StdString String file_name) { super((Pointer)null); allocate(file_name); } + private native void allocate(@StdString String file_name); + public PyTorchStreamReader(@Cast("std::istream*") Pointer in) { super((Pointer)null); allocate(in); } + private native void allocate(@Cast("std::istream*") Pointer in); + public PyTorchStreamReader(@SharedPtr ReadAdapterInterface in) { super((Pointer)null); allocate(in); } + private native void allocate(@SharedPtr ReadAdapterInterface in); + + // return dataptr, size + public native @ByVal T_DataPtrSizeT_T getRecord(@StdString BytePointer name); + public native @ByVal T_DataPtrSizeT_T getRecord(@StdString String name); + public native @Cast("size_t") long getRecordOffset(@StdString BytePointer name); + public native @Cast("size_t") long getRecordOffset(@StdString String name); + public native @Cast("bool") boolean hasRecord(@StdString BytePointer name); + public native @Cast("bool") boolean hasRecord(@StdString String name); + public native @ByVal StringVector getAllRecords(); + public native @Cast("uint64_t") long version(); + + public native void setShouldLoadDebugSymbol(@Cast("bool") boolean should_load_debug_symbol); +} diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/PythonDispatcherTLS.java b/pytorch/src/gen/java/org/bytedeco/pytorch/PythonDispatcherTLS.java new file mode 100644 index 00000000000..4d34bf06d56 --- /dev/null +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/PythonDispatcherTLS.java @@ -0,0 +1,42 @@ +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE + +package org.bytedeco.pytorch; + +import org.bytedeco.pytorch.Allocator; +import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; +import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; +import java.nio.*; +import org.bytedeco.javacpp.*; +import org.bytedeco.javacpp.annotation.*; + +import static org.bytedeco.javacpp.presets.javacpp.*; +import static org.bytedeco.openblas.global.openblas_nolapack.*; +import static org.bytedeco.openblas.global.openblas.*; + +import static org.bytedeco.pytorch.global.torch.*; + + +@Namespace("c10::impl") @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) +public class PythonDispatcherTLS extends Pointer { + static { Loader.load(); } + /** Default native constructor. */ + public PythonDispatcherTLS() { super((Pointer)null); allocate(); } + /** Native array allocator. Access with {@link Pointer#position(long)}. */ + public PythonDispatcherTLS(long size) { super((Pointer)null); allocateArray(size); } + /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ + public PythonDispatcherTLS(Pointer p) { super(p); } + private native void allocate(); + private native void allocateArray(long size); + @Override public PythonDispatcherTLS position(long position) { + return (PythonDispatcherTLS)super.position(position); + } + @Override public PythonDispatcherTLS getPointer(long i) { + return new PythonDispatcherTLS((Pointer)this).offsetAddress(i); + } + + public static native void set_state(PyInterpreter state); + public static native PyInterpreter get_state(); + public static native void reset_state(); +} diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/PythonOp.java b/pytorch/src/gen/java/org/bytedeco/pytorch/PythonOp.java index 0301dfe2095..75294cd01ee 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/PythonOp.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/PythonOp.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/PythonPrint.java b/pytorch/src/gen/java/org/bytedeco/pytorch/PythonPrint.java deleted file mode 100644 index e4092e41041..00000000000 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/PythonPrint.java +++ /dev/null @@ -1,49 +0,0 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE - -package org.bytedeco.pytorch; - -import org.bytedeco.pytorch.Allocator; -import org.bytedeco.pytorch.Function; -import org.bytedeco.pytorch.Module; -import java.nio.*; -import org.bytedeco.javacpp.*; -import org.bytedeco.javacpp.annotation.*; - -import static org.bytedeco.javacpp.presets.javacpp.*; -import static org.bytedeco.openblas.global.openblas_nolapack.*; -import static org.bytedeco.openblas.global.openblas.*; - -import static org.bytedeco.pytorch.global.torch.*; - - -@Namespace("torch::jit") @NoOffset @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) -public class PythonPrint extends Pointer { - static { Loader.load(); } - /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ - public PythonPrint(Pointer p) { super(p); } - - public PythonPrint( - @ByRef IValueVector constant_table, - @ByRef PrintDepsTable deps_table, - @ByVal(nullValue = "c10::TypePrinter(nullptr)") @Cast("c10::TypePrinter*") Pointer type_printer, - @Cast("bool") boolean enforce_importable/*=false*/) { super((Pointer)null); allocate(constant_table, deps_table, type_printer, enforce_importable); } - private native void allocate( - @ByRef IValueVector constant_table, - @ByRef PrintDepsTable deps_table, - @ByVal(nullValue = "c10::TypePrinter(nullptr)") @Cast("c10::TypePrinter*") Pointer type_printer, - @Cast("bool") boolean enforce_importable/*=false*/); - public PythonPrint( - @ByRef IValueVector constant_table, - @ByRef PrintDepsTable deps_table) { super((Pointer)null); allocate(constant_table, deps_table); } - private native void allocate( - @ByRef IValueVector constant_table, - @ByRef PrintDepsTable deps_table); - - public native void printNamedType(@Const @SharedPtr @ByRef NamedType classType); - public native void printFunction(@Const @ByRef Function callee); - public native void printMethod(@Const @ByRef Function callee); - - public native @StdString BytePointer str(); - public native @StdVector TaggedRange ranges(); - public native @Cast("uint64_t") long minVersion(); -} diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/PythonTorchFunctionTLS.java b/pytorch/src/gen/java/org/bytedeco/pytorch/PythonTorchFunctionTLS.java new file mode 100644 index 00000000000..936e996fbe3 --- /dev/null +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/PythonTorchFunctionTLS.java @@ -0,0 +1,50 @@ +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE + +package org.bytedeco.pytorch; + +import org.bytedeco.pytorch.Allocator; +import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; +import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; +import java.nio.*; +import org.bytedeco.javacpp.*; +import org.bytedeco.javacpp.annotation.*; + +import static org.bytedeco.javacpp.presets.javacpp.*; +import static org.bytedeco.openblas.global.openblas_nolapack.*; +import static org.bytedeco.openblas.global.openblas.*; + +import static org.bytedeco.pytorch.global.torch.*; + + +@Namespace("at::impl") @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) +public class PythonTorchFunctionTLS extends Pointer { + static { Loader.load(); } + /** Default native constructor. */ + public PythonTorchFunctionTLS() { super((Pointer)null); allocate(); } + /** Native array allocator. Access with {@link Pointer#position(long)}. */ + public PythonTorchFunctionTLS(long size) { super((Pointer)null); allocateArray(size); } + /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ + public PythonTorchFunctionTLS(Pointer p) { super(p); } + private native void allocate(); + private native void allocateArray(long size); + @Override public PythonTorchFunctionTLS position(long position) { + return (PythonTorchFunctionTLS)super.position(position); + } + @Override public PythonTorchFunctionTLS getPointer(long i) { + return new PythonTorchFunctionTLS((Pointer)this).offsetAddress(i); + } + + public static native void set_disabled_state(TorchFunctionDisabledState disabled_state_); + public static native void set_disabled_state(@Cast("at::impl::TorchFunctionDisabledState") int disabled_state_); + public static native TorchFunctionDisabledState get_disabled_state(); + + public static native void push_onto_stack(@SharedPtr SafePyObject mode); + public static native @SharedPtr SafePyObject pop_stack(); + public static native @SharedPtr SafePyObject get_stack_at(@Cast("int64_t") long idx); + public static native @Cast("int64_t") long stack_len(); + + public static native @Const @ByRef PythonTorchFunctionTLS get_state(); + public static native void set_state(@Const @ByRef PythonTorchFunctionTLS state); +} diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/QEngineVector.java b/pytorch/src/gen/java/org/bytedeco/pytorch/QEngineVector.java index 90b138da863..320553f81e1 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/QEngineVector.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/QEngineVector.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; @@ -33,6 +35,8 @@ public class QEngineVector extends Pointer { public void clear() { resize(0); } public native void resize(@Cast("size_t") long n); + public QEngine front() { return get(0); } + public QEngine back() { return get(size() - 1); } @Index(function = "at") public native @ByRef QEngine get(@Cast("size_t") long i); public native QEngineVector put(@Cast("size_t") long i, QEngine value); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/QSchemeType.java b/pytorch/src/gen/java/org/bytedeco/pytorch/QSchemeType.java index 98d3e8dc3b9..f730b45bbd3 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/QSchemeType.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/QSchemeType.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/QSchemeTypePtr.java b/pytorch/src/gen/java/org/bytedeco/pytorch/QSchemeTypePtr.java index f1314d3e1e4..4b305187b97 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/QSchemeTypePtr.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/QSchemeTypePtr.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/DeprecatedTypeProperties.java b/pytorch/src/gen/java/org/bytedeco/pytorch/QTensorImpl.java similarity index 70% rename from pytorch/src/gen/java/org/bytedeco/pytorch/DeprecatedTypeProperties.java rename to pytorch/src/gen/java/org/bytedeco/pytorch/QTensorImpl.java index 2fc86c49828..d3aac84b76b 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/DeprecatedTypeProperties.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/QTensorImpl.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; @@ -16,9 +18,9 @@ import static org.bytedeco.pytorch.global.torch.*; @Namespace("at") @Opaque @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) -public class DeprecatedTypeProperties extends Pointer { +public class QTensorImpl extends Pointer { /** Empty constructor. Calls {@code super((Pointer)null)}. */ - public DeprecatedTypeProperties() { super((Pointer)null); } + public QTensorImpl() { super((Pointer)null); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ - public DeprecatedTypeProperties(Pointer p) { super(p); } + public QTensorImpl(Pointer p) { super(p); } } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/QualifiedName.java b/pytorch/src/gen/java/org/bytedeco/pytorch/QualifiedName.java index 9435c19b6fc..ea9aeb3c5b8 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/QualifiedName.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/QualifiedName.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; @@ -67,7 +69,7 @@ public class QualifiedName extends Pointer { public native @Const @ByRef StringVector atoms(); - + public native @Cast("bool") @Name("operator ==") boolean equals(@Const @ByRef QualifiedName other); - + public native @Cast("bool") @Name("operator !=") boolean notEquals(@Const @ByRef QualifiedName other); } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/QualifiedNameOptional.java b/pytorch/src/gen/java/org/bytedeco/pytorch/QualifiedNameOptional.java index 31aae37640d..b8a39a9be17 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/QualifiedNameOptional.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/QualifiedNameOptional.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; @@ -26,6 +28,7 @@ public class QualifiedNameOptional extends Pointer { public native @Name("operator =") @ByRef QualifiedNameOptional put(@ByRef QualifiedNameOptional x); public native boolean has_value(); + public native void reset(); public native @Name("value") @ByRef QualifiedName get(); @ValueSetter public native QualifiedNameOptional put(@ByRef QualifiedName value); } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/Quantizer.java b/pytorch/src/gen/java/org/bytedeco/pytorch/Quantizer.java index 1ec640c3013..8d0a1c6279c 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/Quantizer.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/Quantizer.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; @@ -15,10 +17,66 @@ import static org.bytedeco.pytorch.global.torch.*; - @Namespace("at") @Opaque @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) + +/** + * Quantizer is the class for storing all the information + * that's necessary to perform quantize and dequantize + * operation. + * + * We might have different types of quantization schemes and this is + * the base class for all quantizers. + * + * QTensorImpl will hold a pointer to Quantizer so that we can support + * different quantization schemes on Tensor. + * + * For example, the most common quantization scheme, Affine Quantization, + * requires scale and zero_point as parameters, we'll store scale and zero_point + * inside the instance and we can use it to quantize a float Tensor or + * dequantize a quantized Tensor. + * + * When you add new types of leaf Quantizer class, please also + * make sure to add a corresponding QScheme enum since + * they should have one to one mapping. + * + * Note about intrusive_ptr: + * Quantized Tensor holds an intrusive_ptr to Quantizer, and multiple Tensor can + * share the same Quantizer. Quantizer should be immutable. + */ +@Namespace("at") @NoOffset @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) public class Quantizer extends Pointer { - /** Empty constructor. Calls {@code super((Pointer)null)}. */ - public Quantizer() { super((Pointer)null); } - /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ - public Quantizer(Pointer p) { super(p); } - } + static { Loader.load(); } + /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ + public Quantizer(Pointer p) { super(p); } + + @MemberGetter public native ScalarType scalar_type_(); + + // Copied from torch/csrc/jit/ir/scope.h + public native @ByVal QuantizerPtr intrusive_from_this(); + + /** + * Each concrete Quantizer type should have a unique QScheme type. + */ + public native QScheme qscheme(); + + public native ScalarType scalar_type(); + + /** + * quantize a float Tensor into a quantized Tensor. + */ + public native @ByVal Tensor quantize(@Const @ByRef Tensor t); + + /** + * dequantize a quantized Tensor into a float Tensor. + */ + public native @ByVal Tensor dequantize(@Const @ByRef Tensor t); + + /** + * dequantize a quantized Tensor into a float Tensor, out= variant + */ + public native @ByRef Tensor dequantize_out(@ByRef Tensor out, @Const @ByRef Tensor t); + + /** + * Compare against {@code other} for equality. + */ + public native @Cast("bool") boolean equalTo(@ByVal QuantizerPtr other); +} diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/QuantizerPtr.java b/pytorch/src/gen/java/org/bytedeco/pytorch/QuantizerPtr.java new file mode 100644 index 00000000000..aac73f96678 --- /dev/null +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/QuantizerPtr.java @@ -0,0 +1,150 @@ +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE + +package org.bytedeco.pytorch; + +import org.bytedeco.pytorch.Allocator; +import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; +import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; +import java.nio.*; +import org.bytedeco.javacpp.*; +import org.bytedeco.javacpp.annotation.*; + +import static org.bytedeco.javacpp.presets.javacpp.*; +import static org.bytedeco.openblas.global.openblas_nolapack.*; +import static org.bytedeco.openblas.global.openblas.*; + +import static org.bytedeco.pytorch.global.torch.*; + + +@Name("c10::intrusive_ptr") @NoOffset @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) +public class QuantizerPtr extends Pointer { + static { Loader.load(); } + /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ + public QuantizerPtr(Pointer p) { super(p); } + /** Native array allocator. Access with {@link Pointer#position(long)}. */ + public QuantizerPtr(long size) { super((Pointer)null); allocateArray(size); } + private native void allocateArray(long size); + @Override public QuantizerPtr position(long position) { + return (QuantizerPtr)super.position(position); + } + @Override public QuantizerPtr getPointer(long i) { + return new QuantizerPtr((Pointer)this).offsetAddress(i); + } + + + public QuantizerPtr() { super((Pointer)null); allocate(); } + @NoException(true) private native void allocate(); + + public QuantizerPtr(@ByVal @Cast("std::nullptr_t*") PointerPointer arg0) { super((Pointer)null); allocate(arg0); } + @NoException(true) private native void allocate(@ByVal @Cast("std::nullptr_t*") PointerPointer arg0); + + // This constructor will not increase the ref counter for you. + // We use the tagged dispatch mechanism to explicitly mark this constructor + // to not increase the refcount + public QuantizerPtr(Quantizer target, @ByVal DontIncreaseRefcount arg1) { super((Pointer)null); allocate(target, arg1); } + @NoException(true) private native void allocate(Quantizer target, @ByVal DontIncreaseRefcount arg1); + + + + public QuantizerPtr(@ByRef(true) QuantizerPtr rhs) { super((Pointer)null); allocate(rhs); } + @NoException(true) private native void allocate(@ByRef(true) QuantizerPtr rhs); + + public native @ByRef @Name("operator =") @NoException(true) QuantizerPtr put(@ByRef(true) QuantizerPtr rhs); + + public native @NoException(true) Quantizer get(); + + public native @ByRef @Name("operator *") @NoException(true) Quantizer multiply(); + + public native @Name("operator ->") @NoException(true) Quantizer access(); + + public native @Cast("bool") @Name("operator bool") @NoException(true) boolean asBoolean(); + + public native @NoException(true) void reset(); + + public native @NoException(true) void swap(@ByRef QuantizerPtr rhs); + + // We do a lot of null-pointer checks in our code, good to have this be cheap. + public native @Cast("bool") @NoException(true) boolean defined(); + + public native @Cast("size_t") @NoException(true) long use_count(); + + public native @Cast("size_t") @NoException(true) long weak_use_count(); + + public native @Cast("bool") @NoException(true) boolean unique(); + + /** + * Returns an owning (!) pointer to the underlying object and makes the + * intrusive_ptr instance invalid. That means the refcount is not decreased. + * You *must* put the returned pointer back into a intrusive_ptr using + * intrusive_ptr::reclaim(ptr) to properly destruct it. + * This is helpful for C APIs. + */ + public native @NoException(true) Quantizer release(); + + /** + * Takes an owning pointer to TTarget* and creates an intrusive_ptr that takes + * over ownership. That means the refcount is not increased. + * This is the counter-part to intrusive_ptr::release() and the pointer + * passed in *must* have been created using intrusive_ptr::release(). + */ + public static native @ByVal QuantizerPtr reclaim(Quantizer owning_ptr); + + /** + * Takes an owning pointer to TTarget* and creates an intrusive_ptr + * representing a new reference, i.e. the raw pointer retains + * ownership. + */ + public static native @ByVal QuantizerPtr reclaim_copy(Quantizer owning_ptr); + + /** + * Allocate a heap object with args and wrap it inside a intrusive_ptr and + * incref. This is a helper function to let make_intrusive() access private + * intrusive_ptr constructors. + */ + + /** + * Turn a new instance of TTarget (e.g., literally allocated + * using new TTarget(...) into an intrusive_ptr. If possible, + * use intrusive_ptr::make instead which statically guarantees + * that the allocation was done properly. + * + * At the moment, the only reason this method exists is because + * pybind11 holder types expect to be able to allocate in + * this way (because pybind11 handles the new allocation itself). + */ + public static native @ByVal QuantizerPtr unsafe_steal_from_new(Quantizer raw_ptr); + + /** + * Turn an instance of TTarget that should not be reference counted + * (e.g., allocated into an arena with placement new) into an + * intrusive_ptr. This is gratuitously unsafe and should only be + * used if you can guarantee that the pointer will not escape and be + * refcounted as normal. + * + * {@code expected_decrefs} is a debugging parameter: it indicates the + * number of strong owners the intrusive_ptr_target in question is + * expected to get. In most use cases, this will likely be 1. + * + * The reason this method exists is for manually sharing + * StorageImpls across Tensors in the static runtime. It needs + * access to private intrusive_ptr members so that the refcounts can + * be initialized to custom values. + */ + public static native @ByVal QuantizerPtr unsafe_adapt_non_heap_allocated( + Quantizer raw_ptr, + @Cast("size_t") long expected_decrefs); + + /** + * Turn a **non-owning raw pointer** to an intrusive_ptr. It is + * the moral equivalent of enable_shared_from_this on a shared pointer. + * + * This method is only valid for objects that are already live. If + * you are looking for the moral equivalent of unique_ptr(T*) + * constructor, see steal_from_new. + * + * TODO: https://github.com/pytorch/pytorch/issues/56482 + */ + public static native @ByVal QuantizerPtr unsafe_reclaim_from_nonowning(Quantizer raw_ptr); +} diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/QuantizerType.java b/pytorch/src/gen/java/org/bytedeco/pytorch/QuantizerType.java index b7bbd323a6f..1fd336b0b4f 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/QuantizerType.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/QuantizerType.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/QuantizerTypePtr.java b/pytorch/src/gen/java/org/bytedeco/pytorch/QuantizerTypePtr.java index 86bf75eebf2..f82116a60a8 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/QuantizerTypePtr.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/QuantizerTypePtr.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/RMSprop.java b/pytorch/src/gen/java/org/bytedeco/pytorch/RMSprop.java index db81aba69c3..0703438a342 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/RMSprop.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/RMSprop.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; @@ -33,10 +35,10 @@ public RMSprop( private native void allocate( @ByVal OptimizerParamGroupVector param_groups); - public RMSprop(@Cast({"", "std::vector"}) @StdMove TensorVector params, @ByVal(nullValue = "torch::optim::RMSpropOptions{}") RMSpropOptions defaults) { super((Pointer)null); allocate(params, defaults); } - private native void allocate(@Cast({"", "std::vector"}) @StdMove TensorVector params, @ByVal(nullValue = "torch::optim::RMSpropOptions{}") RMSpropOptions defaults); - public RMSprop(@Cast({"", "std::vector"}) @StdMove TensorVector params) { super((Pointer)null); allocate(params); } - private native void allocate(@Cast({"", "std::vector"}) @StdMove TensorVector params); + public RMSprop(@Cast({"", "std::vector"}) @StdMove TensorVector params, @ByVal(nullValue = "torch::optim::RMSpropOptions{}") RMSpropOptions defaults) { super((Pointer)null); allocate(params, defaults); } + private native void allocate(@Cast({"", "std::vector"}) @StdMove TensorVector params, @ByVal(nullValue = "torch::optim::RMSpropOptions{}") RMSpropOptions defaults); + public RMSprop(@Cast({"", "std::vector"}) @StdMove TensorVector params) { super((Pointer)null); allocate(params); } + private native void allocate(@Cast({"", "std::vector"}) @StdMove TensorVector params); public native @ByVal Tensor step(@ByVal(nullValue = "torch::optim::Optimizer::LossClosure(nullptr)") LossClosure closure); public native @ByVal Tensor step(); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/RMSpropOptions.java b/pytorch/src/gen/java/org/bytedeco/pytorch/RMSpropOptions.java index 73c1db5b2be..10ae66a635f 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/RMSpropOptions.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/RMSpropOptions.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; @@ -34,7 +36,10 @@ public class RMSpropOptions extends OptimizerCloneableRMSpropOptions { public native @Cast("bool*") @ByRef @NoException(true) BoolPointer centered(); - + private static native @Namespace @Cast("bool") @Name("operator ==") boolean equals( + @Const @ByRef RMSpropOptions lhs, + @Const @ByRef RMSpropOptions rhs); + public boolean equals(RMSpropOptions rhs) { return equals(this, rhs); } public native double get_lr(); public native void set_lr(double lr); } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/RMSpropParamState.java b/pytorch/src/gen/java/org/bytedeco/pytorch/RMSpropParamState.java index 41a5c1b6b52..8d3514ae497 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/RMSpropParamState.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/RMSpropParamState.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; @@ -40,5 +42,8 @@ public class RMSpropParamState extends OptimizerCloneableRMSpropParamState { public native @ByRef @NoException(true) Tensor grad_avg(); - + private static native @Namespace @Cast("bool") @Name("operator ==") boolean equals( + @Const @ByRef RMSpropParamState lhs, + @Const @ByRef RMSpropParamState rhs); + public boolean equals(RMSpropParamState rhs) { return equals(this, rhs); } } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/RNN.java b/pytorch/src/gen/java/org/bytedeco/pytorch/RNN.java deleted file mode 100644 index 98432038c2f..00000000000 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/RNN.java +++ /dev/null @@ -1,34 +0,0 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE - -package org.bytedeco.pytorch; - -import org.bytedeco.pytorch.Allocator; -import org.bytedeco.pytorch.Function; -import org.bytedeco.pytorch.Module; -import java.nio.*; -import org.bytedeco.javacpp.*; -import org.bytedeco.javacpp.annotation.*; - -import static org.bytedeco.javacpp.presets.javacpp.*; -import static org.bytedeco.openblas.global.openblas_nolapack.*; -import static org.bytedeco.openblas.global.openblas.*; - -import static org.bytedeco.pytorch.global.torch.*; - - -/** A {@code ModuleHolder} subclass for {@code RNNImpl}. - * See the documentation for {@code RNNImpl} class to learn what methods it - * provides, and examples of how to use {@code RNN} with {@code torch::nn::RNNOptions}. - * See the documentation for {@code ModuleHolder} to learn about PyTorch's - * module storage semantics. */ -@Namespace("torch::nn") @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) -public class RNN extends RNNImplModuleHolder { - static { Loader.load(); } - - public RNN(@ByVal @Cast("std::nullptr_t*") PointerPointer arg0) { super((Pointer)null); allocate(arg0); } - private native void allocate(@ByVal @Cast("std::nullptr_t*") PointerPointer arg0); public RNN(@SharedPtr @Cast({"", "std::shared_ptr"}) RNNImpl module) { super((Pointer)null); allocate(module); } - private native void allocate(@SharedPtr @Cast({"", "std::shared_ptr"}) RNNImpl module); - /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ - public RNN(Pointer p) { super(p); } - - } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/rnn_options_base_mode_t.java b/pytorch/src/gen/java/org/bytedeco/pytorch/RNNBaseMode.java similarity index 52% rename from pytorch/src/gen/java/org/bytedeco/pytorch/rnn_options_base_mode_t.java rename to pytorch/src/gen/java/org/bytedeco/pytorch/RNNBaseMode.java index d7975e7a2e6..7d44896fb46 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/rnn_options_base_mode_t.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/RNNBaseMode.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; @@ -16,29 +18,29 @@ import static org.bytedeco.pytorch.global.torch.*; @NoOffset @Name("c10::variant") @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) -public class rnn_options_base_mode_t extends Pointer { +public class RNNBaseMode extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ - public rnn_options_base_mode_t(Pointer p) { super(p); } - public rnn_options_base_mode_t(kLSTM value) { this(); put(value); } - public rnn_options_base_mode_t(kGRU value) { this(); put(value); } - public rnn_options_base_mode_t(kRNN_TANH value) { this(); put(value); } - public rnn_options_base_mode_t(kRNN_RELU value) { this(); put(value); } - public rnn_options_base_mode_t() { allocate(); } + public RNNBaseMode(Pointer p) { super(p); } + public RNNBaseMode(kLSTM value) { this(); put(value); } + public RNNBaseMode(kGRU value) { this(); put(value); } + public RNNBaseMode(kRNN_TANH value) { this(); put(value); } + public RNNBaseMode(kRNN_RELU value) { this(); put(value); } + public RNNBaseMode() { allocate(); } private native void allocate(); - public native @Name("operator =") @ByRef rnn_options_base_mode_t put(@ByRef rnn_options_base_mode_t x); + public native @Name("operator =") @ByRef RNNBaseMode put(@ByRef RNNBaseMode x); public @ByRef kLSTM get0() { return get0(this); } - @Namespace @Name("c10::get<0>") public static native @ByRef kLSTM get0(@ByRef rnn_options_base_mode_t container); - @ValueSetter public native rnn_options_base_mode_t put(@ByRef kLSTM value); + @Namespace @Name("c10::get<0>") public static native @ByRef kLSTM get0(@ByRef RNNBaseMode container); + @ValueSetter public native RNNBaseMode put(@ByRef kLSTM value); public @ByRef kGRU get1() { return get1(this); } - @Namespace @Name("c10::get<1>") public static native @ByRef kGRU get1(@ByRef rnn_options_base_mode_t container); - @ValueSetter public native rnn_options_base_mode_t put(@ByRef kGRU value); + @Namespace @Name("c10::get<1>") public static native @ByRef kGRU get1(@ByRef RNNBaseMode container); + @ValueSetter public native RNNBaseMode put(@ByRef kGRU value); public @ByRef kRNN_TANH get2() { return get2(this); } - @Namespace @Name("c10::get<2>") public static native @ByRef kRNN_TANH get2(@ByRef rnn_options_base_mode_t container); - @ValueSetter public native rnn_options_base_mode_t put(@ByRef kRNN_TANH value); + @Namespace @Name("c10::get<2>") public static native @ByRef kRNN_TANH get2(@ByRef RNNBaseMode container); + @ValueSetter public native RNNBaseMode put(@ByRef kRNN_TANH value); public @ByRef kRNN_RELU get3() { return get3(this); } - @Namespace @Name("c10::get<3>") public static native @ByRef kRNN_RELU get3(@ByRef rnn_options_base_mode_t container); - @ValueSetter public native rnn_options_base_mode_t put(@ByRef kRNN_RELU value); + @Namespace @Name("c10::get<3>") public static native @ByRef kRNN_RELU get3(@ByRef RNNBaseMode container); + @ValueSetter public native RNNBaseMode put(@ByRef kRNN_RELU value); } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/RNNCell.java b/pytorch/src/gen/java/org/bytedeco/pytorch/RNNCell.java deleted file mode 100644 index 0db0df1fbb0..00000000000 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/RNNCell.java +++ /dev/null @@ -1,34 +0,0 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE - -package org.bytedeco.pytorch; - -import org.bytedeco.pytorch.Allocator; -import org.bytedeco.pytorch.Function; -import org.bytedeco.pytorch.Module; -import java.nio.*; -import org.bytedeco.javacpp.*; -import org.bytedeco.javacpp.annotation.*; - -import static org.bytedeco.javacpp.presets.javacpp.*; -import static org.bytedeco.openblas.global.openblas_nolapack.*; -import static org.bytedeco.openblas.global.openblas.*; - -import static org.bytedeco.pytorch.global.torch.*; - - -/** A {@code ModuleHolder} subclass for {@code RNNCellImpl}. - * See the documentation for {@code RNNCellImpl} class to learn what methods it - * provides, and examples of how to use {@code RNNCell} with - * {@code torch::nn::RNNCellOptions}. See the documentation for {@code ModuleHolder} to - * learn about PyTorch's module storage semantics. */ -@Namespace("torch::nn") @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) -public class RNNCell extends RNNCellImplModuleHolder { - static { Loader.load(); } - - public RNNCell(@ByVal @Cast("std::nullptr_t*") PointerPointer arg0) { super((Pointer)null); allocate(arg0); } - private native void allocate(@ByVal @Cast("std::nullptr_t*") PointerPointer arg0); public RNNCell(@SharedPtr @Cast({"", "std::shared_ptr"}) RNNCellImpl module) { super((Pointer)null); allocate(module); } - private native void allocate(@SharedPtr @Cast({"", "std::shared_ptr"}) RNNCellImpl module); - /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ - public RNNCell(Pointer p) { super(p); } - - } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/RNNCellImpl.java b/pytorch/src/gen/java/org/bytedeco/pytorch/RNNCellImpl.java index 5363321ef5f..99afe6a4ac4 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/RNNCellImpl.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/RNNCellImpl.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; @@ -38,11 +40,11 @@ public class RNNCellImpl extends RNNCellImplBase { public RNNCellImpl(Pointer p) { super(p); } public RNNCellImpl(@Cast("int64_t") long input_size, @Cast("int64_t") long hidden_size) { super((Pointer)null); allocate(input_size, hidden_size); } - @NoDeallocator private native void allocate(@Cast("int64_t") long input_size, @Cast("int64_t") long hidden_size); + @SharedPtr private native void allocate(@Cast("int64_t") long input_size, @Cast("int64_t") long hidden_size); public RNNCellImpl(@Const @ByRef RNNCellOptions options_) { super((Pointer)null); allocate(options_); } - @NoDeallocator private native void allocate(@Const @ByRef RNNCellOptions options_); + @SharedPtr private native void allocate(@Const @ByRef RNNCellOptions options_); - public native @ByVal Tensor forward(@Const @ByRef Tensor input, @ByVal(nullValue = "at::Tensor{}") Tensor hx); + public native @ByVal Tensor forward(@Const @ByRef Tensor input, @ByVal(nullValue = "torch::Tensor{}") Tensor hx); public native @ByVal Tensor forward(@Const @ByRef Tensor input); public native @ByRef RNNCellOptions options(); public native RNNCellImpl options(RNNCellOptions setter); } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/RNNCellImplBase.java b/pytorch/src/gen/java/org/bytedeco/pytorch/RNNCellImplBase.java index 3718508e66a..abee4a38f00 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/RNNCellImplBase.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/RNNCellImplBase.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; @@ -23,7 +25,7 @@ public class RNNCellImplBase extends RNNCellImplCloneable { public RNNCellImplBase(Pointer p) { super(p); } public RNNCellImplBase(@Const @ByRef RNNCellOptionsBase options_) { super((Pointer)null); allocate(options_); } - @NoDeallocator private native void allocate(@Const @ByRef RNNCellOptionsBase options_); + private native void allocate(@Const @ByRef RNNCellOptionsBase options_); /** Initializes the parameters of the RNNCell module. */ public native void reset(); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/RNNCellImplCloneable.java b/pytorch/src/gen/java/org/bytedeco/pytorch/RNNCellImplCloneable.java index 6d5547c660e..cfaa49a76d5 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/RNNCellImplCloneable.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/RNNCellImplCloneable.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; @@ -20,18 +22,18 @@ public class RNNCellImplCloneable extends Module { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public RNNCellImplCloneable(Pointer p) { super(p); } + @Override public Module asModule() { return asModule(this); } + @Namespace public static native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast>") Module asModule(@SharedPtr RNNCellImplCloneable pointer); /** {@code reset()} must perform initialization of all members with reference * semantics, most importantly parameters, buffers and submodules. */ public native void reset(); - @Override public Module asModule() { return asModule(this); } - @Namespace public static native @Name("static_cast") Module asModule(RNNCellImplCloneable module); /** Performs a recursive "deep copy" of the {@code Module}, such that all parameters * and submodules in the cloned module are different from those in the * original module. */ - public native @SharedPtr Module clone( - @Const @ByRef(nullValue = "c10::optional(c10::nullopt)") DeviceOptional device); - public native @SharedPtr Module clone(); + public native @SharedPtr("torch::nn::Module") @ByVal Module clone( + @Const @ByRef(nullValue = "c10::optional(c10::nullopt)") DeviceOptional device); + public native @SharedPtr("torch::nn::Module") @ByVal Module clone(); } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/RNNCellImplModuleHolder.java b/pytorch/src/gen/java/org/bytedeco/pytorch/RNNCellImplModuleHolder.java deleted file mode 100644 index ad495be8083..00000000000 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/RNNCellImplModuleHolder.java +++ /dev/null @@ -1,79 +0,0 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE - -package org.bytedeco.pytorch; - -import org.bytedeco.pytorch.Allocator; -import org.bytedeco.pytorch.Function; -import org.bytedeco.pytorch.Module; -import java.nio.*; -import org.bytedeco.javacpp.*; -import org.bytedeco.javacpp.annotation.*; - -import static org.bytedeco.javacpp.presets.javacpp.*; -import static org.bytedeco.openblas.global.openblas_nolapack.*; -import static org.bytedeco.openblas.global.openblas.*; - -import static org.bytedeco.pytorch.global.torch.*; - -@Name("torch::nn::ModuleHolder") @NoOffset @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) -public class RNNCellImplModuleHolder extends Pointer { - static { Loader.load(); } - /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ - public RNNCellImplModuleHolder(Pointer p) { super(p); } - - - /// - - /** Default constructs the contained module if if has a default constructor, - * else produces a static error. - * - * NOTE: This uses the behavior of template - * classes in C++ that constructors (or any methods) are only compiled when - * actually used. */ - - - /** Constructs the {@code ModuleHolder} with an empty contained value. Access to - * the underlying module is not permitted and will throw an exception, until - * a value is assigned. */ - /* implicit */ public RNNCellImplModuleHolder(@ByVal @Cast("std::nullptr_t*") PointerPointer arg0) { super((Pointer)null); allocate(arg0); } -private native void allocate(@ByVal @Cast("std::nullptr_t*") PointerPointer arg0); - - /** Constructs the {@code ModuleHolder} with a contained module, forwarding all - * arguments to its constructor. */ - - /** Constructs the {@code ModuleHolder} from a pointer to the contained type. - * Example: {@code Linear(std::make_shared(...))}. */ - /* implicit */ public RNNCellImplModuleHolder(@SharedPtr @Cast({"", "std::shared_ptr"}) RNNCellImpl module) { super((Pointer)null); allocate(module); } -private native void allocate(@SharedPtr @Cast({"", "std::shared_ptr"}) RNNCellImpl module); - - /** Returns true if the {@code ModuleHolder} contains a module, or false if it is - * {@code nullptr}. */ - public native @Cast("bool") @Name("operator bool") @NoException(true) boolean asBoolean(); - - /** Forwards to the contained module. */ - public native @Name("operator ->") RNNCellImpl access(); - - /** Forwards to the contained module. */ - - /** Returns a reference to the contained module. */ - public native @ByRef @Name("operator *") RNNCellImpl multiply(); - - /** Returns a const reference to the contained module. */ - - /** Returns a shared pointer to the underlying module. */ - public native @SharedPtr @Cast({"", "std::shared_ptr"}) RNNCellImpl ptr(); - - /** Returns a pointer to the underlying module. */ - public native RNNCellImpl get(); - - /** Returns a const pointer to the underlying module. */ - - /** Calls the {@code forward()} method of the contained module. */ - - /** Forwards to the subscript operator of the contained module. - * NOTE: std::forward is qualified to prevent VS2017 emitting - * error C2872: 'std': ambiguous symbol */ - - /** Returns true if the {@code ModuleHolder} does not contain a module. */ - public native @Cast("bool") @NoException(true) boolean is_empty(); -} diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/RNNCellOptions.java b/pytorch/src/gen/java/org/bytedeco/pytorch/RNNCellOptions.java index fd434688c84..53f404daad9 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/RNNCellOptions.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/RNNCellOptions.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; @@ -35,5 +37,5 @@ public class RNNCellOptions extends Pointer { public native @Cast("int64_t*") @ByRef @NoException(true) LongPointer input_size(); public native @Cast("int64_t*") @ByRef @NoException(true) LongPointer hidden_size(); public native @Cast("bool*") @ByRef @NoException(true) BoolPointer bias(); - public native @ByRef @NoException(true) rnn_nonlinearity_t nonlinearity(); + public native @ByRef @NoException(true) RNNNonlinearity nonlinearity(); } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/RNNCellOptionsBase.java b/pytorch/src/gen/java/org/bytedeco/pytorch/RNNCellOptionsBase.java index cacf149a23e..75ee1e22645 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/RNNCellOptionsBase.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/RNNCellOptionsBase.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/RNNImpl.java b/pytorch/src/gen/java/org/bytedeco/pytorch/RNNImpl.java index 5af9ee8a2d8..ea4177f0cda 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/RNNImpl.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/RNNImpl.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; @@ -37,16 +39,16 @@ public class RNNImpl extends RNNImplBase { public RNNImpl(Pointer p) { super(p); } public RNNImpl(@Cast("int64_t") long input_size, @Cast("int64_t") long hidden_size) { super((Pointer)null); allocate(input_size, hidden_size); } - @NoDeallocator private native void allocate(@Cast("int64_t") long input_size, @Cast("int64_t") long hidden_size); + @SharedPtr private native void allocate(@Cast("int64_t") long input_size, @Cast("int64_t") long hidden_size); public RNNImpl(@Const @ByRef RNNOptions options_) { super((Pointer)null); allocate(options_); } - @NoDeallocator private native void allocate(@Const @ByRef RNNOptions options_); + @SharedPtr private native void allocate(@Const @ByRef RNNOptions options_); - public native @ByVal TensorTensorTuple forward(@Const @ByRef Tensor input, @ByVal(nullValue = "at::Tensor{}") Tensor hx); - public native @ByVal TensorTensorTuple forward(@Const @ByRef Tensor input); - public native @ByVal PackedSequenceTensorTuple forward_with_packed_input( + public native @ByVal T_TensorTensor_T forward(@Const @ByRef Tensor input, @ByVal(nullValue = "torch::Tensor{}") Tensor hx); + public native @ByVal T_TensorTensor_T forward(@Const @ByRef Tensor input); + public native @ByVal T_PackedSequenceTensor_T forward_with_packed_input( @Const @ByRef PackedSequence packed_input, - @ByVal(nullValue = "at::Tensor{}") Tensor hx); - public native @ByVal PackedSequenceTensorTuple forward_with_packed_input( + @ByVal(nullValue = "torch::Tensor{}") Tensor hx); + public native @ByVal T_PackedSequenceTensor_T forward_with_packed_input( @Const @ByRef PackedSequence packed_input); public native @ByRef RNNOptions options(); public native RNNImpl options(RNNOptions setter); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/RNNImplBase.java b/pytorch/src/gen/java/org/bytedeco/pytorch/RNNImplBase.java index fe4d6624e76..d3159654188 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/RNNImplBase.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/RNNImplBase.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; @@ -23,7 +25,7 @@ public class RNNImplBase extends RNNImplCloneable { public RNNImplBase(Pointer p) { super(p); } public RNNImplBase(@Const @ByRef RNNOptionsBase options_) { super((Pointer)null); allocate(options_); } - @NoDeallocator private native void allocate(@Const @ByRef RNNOptionsBase options_); + private native void allocate(@Const @ByRef RNNOptionsBase options_); /** Initializes the parameters of the RNN module. */ public native void reset(); @@ -54,7 +56,7 @@ public class RNNImplBase extends RNNImplCloneable { * called once upon construction, inside {@code reset()}. */ public native void flatten_parameters(); - public native @Cast({"", "std::vector"}) @StdMove TensorVector all_weights(); + public native @Cast({"", "std::vector"}) @StdMove TensorVector all_weights(); /** The RNN's options. */ // NOLINTNEXTLINE(cppcoreguidelines-non-private-member-variables-in-classes) diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/RNNImplCloneable.java b/pytorch/src/gen/java/org/bytedeco/pytorch/RNNImplCloneable.java index d11d5ffe9d8..dd945e792e3 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/RNNImplCloneable.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/RNNImplCloneable.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; @@ -20,18 +22,18 @@ public class RNNImplCloneable extends Module { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public RNNImplCloneable(Pointer p) { super(p); } + @Override public Module asModule() { return asModule(this); } + @Namespace public static native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast>") Module asModule(@SharedPtr RNNImplCloneable pointer); /** {@code reset()} must perform initialization of all members with reference * semantics, most importantly parameters, buffers and submodules. */ public native void reset(); - @Override public Module asModule() { return asModule(this); } - @Namespace public static native @Name("static_cast") Module asModule(RNNImplCloneable module); /** Performs a recursive "deep copy" of the {@code Module}, such that all parameters * and submodules in the cloned module are different from those in the * original module. */ - public native @SharedPtr Module clone( - @Const @ByRef(nullValue = "c10::optional(c10::nullopt)") DeviceOptional device); - public native @SharedPtr Module clone(); + public native @SharedPtr("torch::nn::Module") @ByVal Module clone( + @Const @ByRef(nullValue = "c10::optional(c10::nullopt)") DeviceOptional device); + public native @SharedPtr("torch::nn::Module") @ByVal Module clone(); } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/RNNImplModuleHolder.java b/pytorch/src/gen/java/org/bytedeco/pytorch/RNNImplModuleHolder.java deleted file mode 100644 index f228e51a1ef..00000000000 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/RNNImplModuleHolder.java +++ /dev/null @@ -1,79 +0,0 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE - -package org.bytedeco.pytorch; - -import org.bytedeco.pytorch.Allocator; -import org.bytedeco.pytorch.Function; -import org.bytedeco.pytorch.Module; -import java.nio.*; -import org.bytedeco.javacpp.*; -import org.bytedeco.javacpp.annotation.*; - -import static org.bytedeco.javacpp.presets.javacpp.*; -import static org.bytedeco.openblas.global.openblas_nolapack.*; -import static org.bytedeco.openblas.global.openblas.*; - -import static org.bytedeco.pytorch.global.torch.*; - -@Name("torch::nn::ModuleHolder") @NoOffset @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) -public class RNNImplModuleHolder extends Pointer { - static { Loader.load(); } - /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ - public RNNImplModuleHolder(Pointer p) { super(p); } - - - /// - - /** Default constructs the contained module if if has a default constructor, - * else produces a static error. - * - * NOTE: This uses the behavior of template - * classes in C++ that constructors (or any methods) are only compiled when - * actually used. */ - - - /** Constructs the {@code ModuleHolder} with an empty contained value. Access to - * the underlying module is not permitted and will throw an exception, until - * a value is assigned. */ - /* implicit */ public RNNImplModuleHolder(@ByVal @Cast("std::nullptr_t*") PointerPointer arg0) { super((Pointer)null); allocate(arg0); } -private native void allocate(@ByVal @Cast("std::nullptr_t*") PointerPointer arg0); - - /** Constructs the {@code ModuleHolder} with a contained module, forwarding all - * arguments to its constructor. */ - - /** Constructs the {@code ModuleHolder} from a pointer to the contained type. - * Example: {@code Linear(std::make_shared(...))}. */ - /* implicit */ public RNNImplModuleHolder(@SharedPtr @Cast({"", "std::shared_ptr"}) RNNImpl module) { super((Pointer)null); allocate(module); } -private native void allocate(@SharedPtr @Cast({"", "std::shared_ptr"}) RNNImpl module); - - /** Returns true if the {@code ModuleHolder} contains a module, or false if it is - * {@code nullptr}. */ - public native @Cast("bool") @Name("operator bool") @NoException(true) boolean asBoolean(); - - /** Forwards to the contained module. */ - public native @Name("operator ->") RNNImpl access(); - - /** Forwards to the contained module. */ - - /** Returns a reference to the contained module. */ - public native @ByRef @Name("operator *") RNNImpl multiply(); - - /** Returns a const reference to the contained module. */ - - /** Returns a shared pointer to the underlying module. */ - public native @SharedPtr @Cast({"", "std::shared_ptr"}) RNNImpl ptr(); - - /** Returns a pointer to the underlying module. */ - public native RNNImpl get(); - - /** Returns a const pointer to the underlying module. */ - - /** Calls the {@code forward()} method of the contained module. */ - - /** Forwards to the subscript operator of the contained module. - * NOTE: std::forward is qualified to prevent VS2017 emitting - * error C2872: 'std': ambiguous symbol */ - - /** Returns true if the {@code ModuleHolder} does not contain a module. */ - public native @Cast("bool") @NoException(true) boolean is_empty(); -} diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/rnn_nonlinearity_t.java b/pytorch/src/gen/java/org/bytedeco/pytorch/RNNNonlinearity.java similarity index 56% rename from pytorch/src/gen/java/org/bytedeco/pytorch/rnn_nonlinearity_t.java rename to pytorch/src/gen/java/org/bytedeco/pytorch/RNNNonlinearity.java index fb3c3140cd1..c9a07f3d0e2 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/rnn_nonlinearity_t.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/RNNNonlinearity.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; @@ -16,21 +18,21 @@ import static org.bytedeco.pytorch.global.torch.*; @NoOffset @Name("c10::variant") @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) -public class rnn_nonlinearity_t extends Pointer { +public class RNNNonlinearity extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ - public rnn_nonlinearity_t(Pointer p) { super(p); } - public rnn_nonlinearity_t(kTanh value) { this(); put(value); } - public rnn_nonlinearity_t(kReLU value) { this(); put(value); } - public rnn_nonlinearity_t() { allocate(); } + public RNNNonlinearity(Pointer p) { super(p); } + public RNNNonlinearity(kTanh value) { this(); put(value); } + public RNNNonlinearity(kReLU value) { this(); put(value); } + public RNNNonlinearity() { allocate(); } private native void allocate(); - public native @Name("operator =") @ByRef rnn_nonlinearity_t put(@ByRef rnn_nonlinearity_t x); + public native @Name("operator =") @ByRef RNNNonlinearity put(@ByRef RNNNonlinearity x); public @ByRef kTanh get0() { return get0(this); } - @Namespace @Name("c10::get<0>") public static native @ByRef kTanh get0(@ByRef rnn_nonlinearity_t container); - @ValueSetter public native rnn_nonlinearity_t put(@ByRef kTanh value); + @Namespace @Name("c10::get<0>") public static native @ByRef kTanh get0(@ByRef RNNNonlinearity container); + @ValueSetter public native RNNNonlinearity put(@ByRef kTanh value); public @ByRef kReLU get1() { return get1(this); } - @Namespace @Name("c10::get<1>") public static native @ByRef kReLU get1(@ByRef rnn_nonlinearity_t container); - @ValueSetter public native rnn_nonlinearity_t put(@ByRef kReLU value); + @Namespace @Name("c10::get<1>") public static native @ByRef kReLU get1(@ByRef RNNNonlinearity container); + @ValueSetter public native RNNNonlinearity put(@ByRef kReLU value); } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/RNNOptions.java b/pytorch/src/gen/java/org/bytedeco/pytorch/RNNOptions.java index e50ed1ee3a7..fd16df7d120 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/RNNOptions.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/RNNOptions.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; @@ -35,7 +37,7 @@ public class RNNOptions extends Pointer { public native @Cast("int64_t*") @ByRef @NoException(true) LongPointer input_size(); public native @Cast("int64_t*") @ByRef @NoException(true) LongPointer hidden_size(); public native @Cast("int64_t*") @ByRef @NoException(true) LongPointer num_layers(); - public native @ByRef @NoException(true) rnn_nonlinearity_t nonlinearity(); + public native @ByRef @NoException(true) RNNNonlinearity nonlinearity(); public native @Cast("bool*") @ByRef @NoException(true) BoolPointer bias(); public native @Cast("bool*") @ByRef @NoException(true) BoolPointer batch_first(); public native @ByRef @NoException(true) DoublePointer dropout(); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/RNNOptionsBase.java b/pytorch/src/gen/java/org/bytedeco/pytorch/RNNOptionsBase.java index 2d7c9999819..d9b33d127df 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/RNNOptionsBase.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/RNNOptionsBase.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; @@ -25,14 +27,14 @@ public class RNNOptionsBase extends Pointer { public RNNOptionsBase( - @ByVal rnn_options_base_mode_t mode, + @ByVal RNNBaseMode mode, @Cast("int64_t") long input_size, @Cast("int64_t") long hidden_size) { super((Pointer)null); allocate(mode, input_size, hidden_size); } private native void allocate( - @ByVal rnn_options_base_mode_t mode, + @ByVal RNNBaseMode mode, @Cast("int64_t") long input_size, @Cast("int64_t") long hidden_size); - public native @ByRef @NoException(true) rnn_options_base_mode_t mode(); + public native @ByRef @NoException(true) RNNBaseMode mode(); public native @Cast("int64_t*") @ByRef @NoException(true) LongPointer input_size(); public native @Cast("int64_t*") @ByRef @NoException(true) LongPointer hidden_size(); public native @Cast("int64_t*") @ByRef @NoException(true) LongPointer num_layers(); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/RReLU.java b/pytorch/src/gen/java/org/bytedeco/pytorch/RReLU.java deleted file mode 100644 index b8ed2877a5e..00000000000 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/RReLU.java +++ /dev/null @@ -1,34 +0,0 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE - -package org.bytedeco.pytorch; - -import org.bytedeco.pytorch.Allocator; -import org.bytedeco.pytorch.Function; -import org.bytedeco.pytorch.Module; -import java.nio.*; -import org.bytedeco.javacpp.*; -import org.bytedeco.javacpp.annotation.*; - -import static org.bytedeco.javacpp.presets.javacpp.*; -import static org.bytedeco.openblas.global.openblas_nolapack.*; -import static org.bytedeco.openblas.global.openblas.*; - -import static org.bytedeco.pytorch.global.torch.*; - - -/** A {@code ModuleHolder} subclass for {@code RReLUImpl}. - * See the documentation for {@code RReLUImpl} class to learn what methods it - * provides, and examples of how to use {@code RReLU} with {@code torch::nn::RReLUOptions}. - * See the documentation for {@code ModuleHolder} to learn about PyTorch's - * module storage semantics. */ -@Namespace("torch::nn") @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) -public class RReLU extends RReLUImplModuleHolder { - static { Loader.load(); } - - public RReLU(@ByVal @Cast("std::nullptr_t*") PointerPointer arg0) { super((Pointer)null); allocate(arg0); } - private native void allocate(@ByVal @Cast("std::nullptr_t*") PointerPointer arg0); public RReLU(@SharedPtr @Cast({"", "std::shared_ptr"}) RReLUImpl module) { super((Pointer)null); allocate(module); } - private native void allocate(@SharedPtr @Cast({"", "std::shared_ptr"}) RReLUImpl module); - /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ - public RReLU(Pointer p) { super(p); } - - } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/RReLUFuncOptions.java b/pytorch/src/gen/java/org/bytedeco/pytorch/RReLUFuncOptions.java index 07610c31a02..4fb74faa1ef 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/RReLUFuncOptions.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/RReLUFuncOptions.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/RReLUImpl.java b/pytorch/src/gen/java/org/bytedeco/pytorch/RReLUImpl.java index aa7d98790b9..bfdb73b3aee 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/RReLUImpl.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/RReLUImpl.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; @@ -45,9 +47,9 @@ public class RReLUImpl extends RReLUImplCloneable { } public RReLUImpl(@Const @ByRef(nullValue = "torch::nn::RReLUOptions{}") RReLUOptions options_) { super((Pointer)null); allocate(options_); } - @NoDeallocator private native void allocate(@Const @ByRef(nullValue = "torch::nn::RReLUOptions{}") RReLUOptions options_); + @SharedPtr private native void allocate(@Const @ByRef(nullValue = "torch::nn::RReLUOptions{}") RReLUOptions options_); public RReLUImpl() { super((Pointer)null); allocate(); } - @NoDeallocator private native void allocate(); + @SharedPtr private native void allocate(); public native @ByVal Tensor forward(@ByVal Tensor input); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/RReLUImplCloneable.java b/pytorch/src/gen/java/org/bytedeco/pytorch/RReLUImplCloneable.java index b8aa585e1c3..be4449ad6ce 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/RReLUImplCloneable.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/RReLUImplCloneable.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; @@ -20,18 +22,18 @@ public class RReLUImplCloneable extends Module { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public RReLUImplCloneable(Pointer p) { super(p); } + @Override public Module asModule() { return asModule(this); } + @Namespace public static native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast>") Module asModule(@SharedPtr RReLUImplCloneable pointer); /** {@code reset()} must perform initialization of all members with reference * semantics, most importantly parameters, buffers and submodules. */ public native void reset(); - @Override public Module asModule() { return asModule(this); } - @Namespace public static native @Name("static_cast") Module asModule(RReLUImplCloneable module); /** Performs a recursive "deep copy" of the {@code Module}, such that all parameters * and submodules in the cloned module are different from those in the * original module. */ - public native @SharedPtr Module clone( - @Const @ByRef(nullValue = "c10::optional(c10::nullopt)") DeviceOptional device); - public native @SharedPtr Module clone(); + public native @SharedPtr("torch::nn::Module") @ByVal Module clone( + @Const @ByRef(nullValue = "c10::optional(c10::nullopt)") DeviceOptional device); + public native @SharedPtr("torch::nn::Module") @ByVal Module clone(); } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/RReLUImplModuleHolder.java b/pytorch/src/gen/java/org/bytedeco/pytorch/RReLUImplModuleHolder.java deleted file mode 100644 index 1863009ced1..00000000000 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/RReLUImplModuleHolder.java +++ /dev/null @@ -1,79 +0,0 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE - -package org.bytedeco.pytorch; - -import org.bytedeco.pytorch.Allocator; -import org.bytedeco.pytorch.Function; -import org.bytedeco.pytorch.Module; -import java.nio.*; -import org.bytedeco.javacpp.*; -import org.bytedeco.javacpp.annotation.*; - -import static org.bytedeco.javacpp.presets.javacpp.*; -import static org.bytedeco.openblas.global.openblas_nolapack.*; -import static org.bytedeco.openblas.global.openblas.*; - -import static org.bytedeco.pytorch.global.torch.*; - -@Name("torch::nn::ModuleHolder") @NoOffset @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) -public class RReLUImplModuleHolder extends Pointer { - static { Loader.load(); } - /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ - public RReLUImplModuleHolder(Pointer p) { super(p); } - - - /// - - /** Default constructs the contained module if if has a default constructor, - * else produces a static error. - * - * NOTE: This uses the behavior of template - * classes in C++ that constructors (or any methods) are only compiled when - * actually used. */ - - - /** Constructs the {@code ModuleHolder} with an empty contained value. Access to - * the underlying module is not permitted and will throw an exception, until - * a value is assigned. */ - /* implicit */ public RReLUImplModuleHolder(@ByVal @Cast("std::nullptr_t*") PointerPointer arg0) { super((Pointer)null); allocate(arg0); } -private native void allocate(@ByVal @Cast("std::nullptr_t*") PointerPointer arg0); - - /** Constructs the {@code ModuleHolder} with a contained module, forwarding all - * arguments to its constructor. */ - - /** Constructs the {@code ModuleHolder} from a pointer to the contained type. - * Example: {@code Linear(std::make_shared(...))}. */ - /* implicit */ public RReLUImplModuleHolder(@SharedPtr @Cast({"", "std::shared_ptr"}) RReLUImpl module) { super((Pointer)null); allocate(module); } -private native void allocate(@SharedPtr @Cast({"", "std::shared_ptr"}) RReLUImpl module); - - /** Returns true if the {@code ModuleHolder} contains a module, or false if it is - * {@code nullptr}. */ - public native @Cast("bool") @Name("operator bool") @NoException(true) boolean asBoolean(); - - /** Forwards to the contained module. */ - public native @Name("operator ->") RReLUImpl access(); - - /** Forwards to the contained module. */ - - /** Returns a reference to the contained module. */ - public native @ByRef @Name("operator *") RReLUImpl multiply(); - - /** Returns a const reference to the contained module. */ - - /** Returns a shared pointer to the underlying module. */ - public native @SharedPtr @Cast({"", "std::shared_ptr"}) RReLUImpl ptr(); - - /** Returns a pointer to the underlying module. */ - public native RReLUImpl get(); - - /** Returns a const pointer to the underlying module. */ - - /** Calls the {@code forward()} method of the contained module. */ - - /** Forwards to the subscript operator of the contained module. - * NOTE: std::forward is qualified to prevent VS2017 emitting - * error C2872: 'std': ambiguous symbol */ - - /** Returns true if the {@code ModuleHolder} does not contain a module. */ - public native @Cast("bool") @NoException(true) boolean is_empty(); -} diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/RReLUOptions.java b/pytorch/src/gen/java/org/bytedeco/pytorch/RReLUOptions.java index ec4acb660d1..08e05b41550 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/RReLUOptions.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/RReLUOptions.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/RRefInterface.java b/pytorch/src/gen/java/org/bytedeco/pytorch/RRefInterface.java index b3c8892f6d1..a9a89ba70ec 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/RRefInterface.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/RRefInterface.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; @@ -15,10 +17,33 @@ import static org.bytedeco.pytorch.global.torch.*; -@Namespace("c10") @Opaque @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) + +// This abstract class contains only user-facing APIs, and will be shared +// between jit and distributed to implement TorchScript support. +@Namespace("c10") @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) public class RRefInterface extends Pointer { - /** Empty constructor. Calls {@code super((Pointer)null)}. */ - public RRefInterface() { super((Pointer)null); } + static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public RRefInterface(Pointer p) { super(p); } + + // RRef is made NOT copyable NOT movable to prevent messing up reference + // counting. + + + + + // returns the worker id of the owner + public native @Cast("c10::worker_id_t") short owner(); + + // returns the worker name of the owner + public native @StdString BytePointer ownerName(); + + // Returns true if this is the ``OwnerRRef`` + public native @Cast("bool") boolean isOwner(); + + // Returns true if this is an ``OwnerRRef`` or if this ``UserRRef`` has been + // confirmed by its owner. + public native @Cast("bool") boolean confirmedByOwner(); + + public native @Const @ByVal Type.TypePtr type(); } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/RRefInterfacePtr.java b/pytorch/src/gen/java/org/bytedeco/pytorch/RRefInterfacePtr.java new file mode 100644 index 00000000000..d1292685565 --- /dev/null +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/RRefInterfacePtr.java @@ -0,0 +1,150 @@ +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE + +package org.bytedeco.pytorch; + +import org.bytedeco.pytorch.Allocator; +import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; +import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; +import java.nio.*; +import org.bytedeco.javacpp.*; +import org.bytedeco.javacpp.annotation.*; + +import static org.bytedeco.javacpp.presets.javacpp.*; +import static org.bytedeco.openblas.global.openblas_nolapack.*; +import static org.bytedeco.openblas.global.openblas.*; + +import static org.bytedeco.pytorch.global.torch.*; + + +@Name("c10::intrusive_ptr") @NoOffset @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) +public class RRefInterfacePtr extends Pointer { + static { Loader.load(); } + /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ + public RRefInterfacePtr(Pointer p) { super(p); } + /** Native array allocator. Access with {@link Pointer#position(long)}. */ + public RRefInterfacePtr(long size) { super((Pointer)null); allocateArray(size); } + private native void allocateArray(long size); + @Override public RRefInterfacePtr position(long position) { + return (RRefInterfacePtr)super.position(position); + } + @Override public RRefInterfacePtr getPointer(long i) { + return new RRefInterfacePtr((Pointer)this).offsetAddress(i); + } + + + public RRefInterfacePtr() { super((Pointer)null); allocate(); } + @NoException(true) private native void allocate(); + + public RRefInterfacePtr(@ByVal @Cast("std::nullptr_t*") PointerPointer arg0) { super((Pointer)null); allocate(arg0); } + @NoException(true) private native void allocate(@ByVal @Cast("std::nullptr_t*") PointerPointer arg0); + + // This constructor will not increase the ref counter for you. + // We use the tagged dispatch mechanism to explicitly mark this constructor + // to not increase the refcount + public RRefInterfacePtr(RRefInterface target, @ByVal DontIncreaseRefcount arg1) { super((Pointer)null); allocate(target, arg1); } + @NoException(true) private native void allocate(RRefInterface target, @ByVal DontIncreaseRefcount arg1); + + + + public RRefInterfacePtr(@ByRef(true) RRefInterfacePtr rhs) { super((Pointer)null); allocate(rhs); } + @NoException(true) private native void allocate(@ByRef(true) RRefInterfacePtr rhs); + + public native @ByRef @Name("operator =") @NoException(true) RRefInterfacePtr put(@ByRef(true) RRefInterfacePtr rhs); + + public native @NoException(true) RRefInterface get(); + + public native @ByRef @Name("operator *") @NoException(true) RRefInterface multiply(); + + public native @Name("operator ->") @NoException(true) RRefInterface access(); + + public native @Cast("bool") @Name("operator bool") @NoException(true) boolean asBoolean(); + + public native @NoException(true) void reset(); + + public native @NoException(true) void swap(@ByRef RRefInterfacePtr rhs); + + // We do a lot of null-pointer checks in our code, good to have this be cheap. + public native @Cast("bool") @NoException(true) boolean defined(); + + public native @Cast("size_t") @NoException(true) long use_count(); + + public native @Cast("size_t") @NoException(true) long weak_use_count(); + + public native @Cast("bool") @NoException(true) boolean unique(); + + /** + * Returns an owning (!) pointer to the underlying object and makes the + * intrusive_ptr instance invalid. That means the refcount is not decreased. + * You *must* put the returned pointer back into a intrusive_ptr using + * intrusive_ptr::reclaim(ptr) to properly destruct it. + * This is helpful for C APIs. + */ + public native @NoException(true) RRefInterface release(); + + /** + * Takes an owning pointer to TTarget* and creates an intrusive_ptr that takes + * over ownership. That means the refcount is not increased. + * This is the counter-part to intrusive_ptr::release() and the pointer + * passed in *must* have been created using intrusive_ptr::release(). + */ + public static native @ByVal RRefInterfacePtr reclaim(RRefInterface owning_ptr); + + /** + * Takes an owning pointer to TTarget* and creates an intrusive_ptr + * representing a new reference, i.e. the raw pointer retains + * ownership. + */ + public static native @ByVal RRefInterfacePtr reclaim_copy(RRefInterface owning_ptr); + + /** + * Allocate a heap object with args and wrap it inside a intrusive_ptr and + * incref. This is a helper function to let make_intrusive() access private + * intrusive_ptr constructors. + */ + + /** + * Turn a new instance of TTarget (e.g., literally allocated + * using new TTarget(...) into an intrusive_ptr. If possible, + * use intrusive_ptr::make instead which statically guarantees + * that the allocation was done properly. + * + * At the moment, the only reason this method exists is because + * pybind11 holder types expect to be able to allocate in + * this way (because pybind11 handles the new allocation itself). + */ + public static native @ByVal RRefInterfacePtr unsafe_steal_from_new(RRefInterface raw_ptr); + + /** + * Turn an instance of TTarget that should not be reference counted + * (e.g., allocated into an arena with placement new) into an + * intrusive_ptr. This is gratuitously unsafe and should only be + * used if you can guarantee that the pointer will not escape and be + * refcounted as normal. + * + * {@code expected_decrefs} is a debugging parameter: it indicates the + * number of strong owners the intrusive_ptr_target in question is + * expected to get. In most use cases, this will likely be 1. + * + * The reason this method exists is for manually sharing + * StorageImpls across Tensors in the static runtime. It needs + * access to private intrusive_ptr members so that the refcounts can + * be initialized to custom values. + */ + public static native @ByVal RRefInterfacePtr unsafe_adapt_non_heap_allocated( + RRefInterface raw_ptr, + @Cast("size_t") long expected_decrefs); + + /** + * Turn a **non-owning raw pointer** to an intrusive_ptr. It is + * the moral equivalent of enable_shared_from_this on a shared pointer. + * + * This method is only valid for objects that are already live. If + * you are looking for the moral equivalent of unique_ptr(T*) + * constructor, see steal_from_new. + * + * TODO: https://github.com/pytorch/pytorch/issues/56482 + */ + public static native @ByVal RRefInterfacePtr unsafe_reclaim_from_nonowning(RRefInterface raw_ptr); +} diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/RRefSingleElementType.java b/pytorch/src/gen/java/org/bytedeco/pytorch/RRefSingleElementType.java index 59bc6dac15c..a0ed979278a 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/RRefSingleElementType.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/RRefSingleElementType.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/RRefType.java b/pytorch/src/gen/java/org/bytedeco/pytorch/RRefType.java index 1e67f6c3542..a910d39fb29 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/RRefType.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/RRefType.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/Raise.java b/pytorch/src/gen/java/org/bytedeco/pytorch/Raise.java index ad7ff36d6e0..ed6f04483b8 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/Raise.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/Raise.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; @@ -19,9 +21,11 @@ @Namespace("torch::jit") @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) public class Raise extends Stmt { static { Loader.load(); } + /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ + public Raise(Pointer p) { super(p); } - public Raise(@Cast("const torch::jit::TreeRef*") @ByRef Pointer tree) { super((Pointer)null); allocate(tree); } - private native void allocate(@Cast("const torch::jit::TreeRef*") @ByRef Pointer tree); + public Raise(@Const @ByRef TreeRef tree) { super((Pointer)null); allocate(tree); } + private native void allocate(@Const @ByRef TreeRef tree); public native @ByVal Expr expr(); public static native @ByVal Raise create(@Const @ByRef SourceRange range, @Const @ByRef Expr expr); } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/RandomSampler.java b/pytorch/src/gen/java/org/bytedeco/pytorch/RandomSampler.java index 41ba7f63b2c..03af2c42232 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/RandomSampler.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/RandomSampler.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/RangeValue.java b/pytorch/src/gen/java/org/bytedeco/pytorch/RangeValue.java index 7cd29b6a23f..9d8db731aa8 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/RangeValue.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/RangeValue.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; @@ -44,16 +46,16 @@ private native void allocate( public native @StdString BytePointer kind(); public native Value len(@Const @ByRef SourceRange loc, @ByRef GraphFunction m); - public native @SharedPtr @ByVal SugaredValue getitem( + public native @SharedPtr("torch::jit::SugaredValue") @ByVal SugaredValue getitem( @Const @ByRef SourceRange loc, @ByRef GraphFunction m, Value idx, @ByVal(nullValue = "c10::TypePtr(nullptr)") Type.TypePtr type_hint); - public native @SharedPtr @ByVal SugaredValue getitem( + public native @SharedPtr("torch::jit::SugaredValue") @ByVal SugaredValue getitem( @Const @ByRef SourceRange loc, @ByRef GraphFunction m, Value idx); - public native @SharedPtr @ByVal SugaredValue iter(@Const @ByRef SourceRange loc, @ByRef GraphFunction m); + public native @SharedPtr("torch::jit::SugaredValue") @ByVal SugaredValue iter(@Const @ByRef SourceRange loc, @ByRef GraphFunction m); // When Range is instantiated via enumerate(iterable_with_static_len), // then it takes the static length of the iterable diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/ReLU.java b/pytorch/src/gen/java/org/bytedeco/pytorch/ReLU.java deleted file mode 100644 index 6aea7b41e11..00000000000 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/ReLU.java +++ /dev/null @@ -1,34 +0,0 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE - -package org.bytedeco.pytorch; - -import org.bytedeco.pytorch.Allocator; -import org.bytedeco.pytorch.Function; -import org.bytedeco.pytorch.Module; -import java.nio.*; -import org.bytedeco.javacpp.*; -import org.bytedeco.javacpp.annotation.*; - -import static org.bytedeco.javacpp.presets.javacpp.*; -import static org.bytedeco.openblas.global.openblas_nolapack.*; -import static org.bytedeco.openblas.global.openblas.*; - -import static org.bytedeco.pytorch.global.torch.*; - - -/** A {@code ModuleHolder} subclass for {@code ReLUImpl}. - * See the documentation for {@code ReLUImpl} class to learn what methods it - * provides, and examples of how to use {@code ReLU} with {@code torch::nn::ReLUOptions}. - * See the documentation for {@code ModuleHolder} to learn about PyTorch's - * module storage semantics. */ -@Namespace("torch::nn") @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) -public class ReLU extends ReLUImplModuleHolder { - static { Loader.load(); } - - public ReLU(@ByVal @Cast("std::nullptr_t*") PointerPointer arg0) { super((Pointer)null); allocate(arg0); } - private native void allocate(@ByVal @Cast("std::nullptr_t*") PointerPointer arg0); public ReLU(@SharedPtr @Cast({"", "std::shared_ptr"}) ReLUImpl module) { super((Pointer)null); allocate(module); } - private native void allocate(@SharedPtr @Cast({"", "std::shared_ptr"}) ReLUImpl module); - /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ - public ReLU(Pointer p) { super(p); } - - } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/ReLU6.java b/pytorch/src/gen/java/org/bytedeco/pytorch/ReLU6.java deleted file mode 100644 index 0c05223dadf..00000000000 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/ReLU6.java +++ /dev/null @@ -1,34 +0,0 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE - -package org.bytedeco.pytorch; - -import org.bytedeco.pytorch.Allocator; -import org.bytedeco.pytorch.Function; -import org.bytedeco.pytorch.Module; -import java.nio.*; -import org.bytedeco.javacpp.*; -import org.bytedeco.javacpp.annotation.*; - -import static org.bytedeco.javacpp.presets.javacpp.*; -import static org.bytedeco.openblas.global.openblas_nolapack.*; -import static org.bytedeco.openblas.global.openblas.*; - -import static org.bytedeco.pytorch.global.torch.*; - - -/** A {@code ModuleHolder} subclass for {@code ReLU6Impl}. - * See the documentation for {@code ReLU6Impl} class to learn what methods it - * provides, and examples of how to use {@code ReLU6} with {@code torch::nn::ReLU6Options}. - * See the documentation for {@code ModuleHolder} to learn about PyTorch's - * module storage semantics. */ -@Namespace("torch::nn") @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) -public class ReLU6 extends ReLU6ImplModuleHolder { - static { Loader.load(); } - - public ReLU6(@ByVal @Cast("std::nullptr_t*") PointerPointer arg0) { super((Pointer)null); allocate(arg0); } - private native void allocate(@ByVal @Cast("std::nullptr_t*") PointerPointer arg0); public ReLU6(@SharedPtr @Cast({"", "std::shared_ptr"}) ReLU6Impl module) { super((Pointer)null); allocate(module); } - private native void allocate(@SharedPtr @Cast({"", "std::shared_ptr"}) ReLU6Impl module); - /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ - public ReLU6(Pointer p) { super(p); } - - } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/ReLU6Impl.java b/pytorch/src/gen/java/org/bytedeco/pytorch/ReLU6Impl.java index ab9af544c4b..02942be581a 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/ReLU6Impl.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/ReLU6Impl.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; @@ -45,9 +47,9 @@ public class ReLU6Impl extends ReLU6ImplCloneable { } public ReLU6Impl(@Const @ByRef(nullValue = "torch::nn::ReLU6Options{}") ReLU6Options options_) { super((Pointer)null); allocate(options_); } - @NoDeallocator private native void allocate(@Const @ByRef(nullValue = "torch::nn::ReLU6Options{}") ReLU6Options options_); + @SharedPtr private native void allocate(@Const @ByRef(nullValue = "torch::nn::ReLU6Options{}") ReLU6Options options_); public ReLU6Impl() { super((Pointer)null); allocate(); } - @NoDeallocator private native void allocate(); + @SharedPtr private native void allocate(); public native @ByVal Tensor forward(@ByVal Tensor input); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/ReLU6ImplCloneable.java b/pytorch/src/gen/java/org/bytedeco/pytorch/ReLU6ImplCloneable.java index 85549312fef..c492dc875fe 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/ReLU6ImplCloneable.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/ReLU6ImplCloneable.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; @@ -20,18 +22,18 @@ public class ReLU6ImplCloneable extends Module { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public ReLU6ImplCloneable(Pointer p) { super(p); } + @Override public Module asModule() { return asModule(this); } + @Namespace public static native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast>") Module asModule(@SharedPtr ReLU6ImplCloneable pointer); /** {@code reset()} must perform initialization of all members with reference * semantics, most importantly parameters, buffers and submodules. */ public native void reset(); - @Override public Module asModule() { return asModule(this); } - @Namespace public static native @Name("static_cast") Module asModule(ReLU6ImplCloneable module); /** Performs a recursive "deep copy" of the {@code Module}, such that all parameters * and submodules in the cloned module are different from those in the * original module. */ - public native @SharedPtr Module clone( - @Const @ByRef(nullValue = "c10::optional(c10::nullopt)") DeviceOptional device); - public native @SharedPtr Module clone(); + public native @SharedPtr("torch::nn::Module") @ByVal Module clone( + @Const @ByRef(nullValue = "c10::optional(c10::nullopt)") DeviceOptional device); + public native @SharedPtr("torch::nn::Module") @ByVal Module clone(); } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/ReLU6ImplModuleHolder.java b/pytorch/src/gen/java/org/bytedeco/pytorch/ReLU6ImplModuleHolder.java deleted file mode 100644 index 4e9d0a0da82..00000000000 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/ReLU6ImplModuleHolder.java +++ /dev/null @@ -1,79 +0,0 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE - -package org.bytedeco.pytorch; - -import org.bytedeco.pytorch.Allocator; -import org.bytedeco.pytorch.Function; -import org.bytedeco.pytorch.Module; -import java.nio.*; -import org.bytedeco.javacpp.*; -import org.bytedeco.javacpp.annotation.*; - -import static org.bytedeco.javacpp.presets.javacpp.*; -import static org.bytedeco.openblas.global.openblas_nolapack.*; -import static org.bytedeco.openblas.global.openblas.*; - -import static org.bytedeco.pytorch.global.torch.*; - -@Name("torch::nn::ModuleHolder") @NoOffset @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) -public class ReLU6ImplModuleHolder extends Pointer { - static { Loader.load(); } - /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ - public ReLU6ImplModuleHolder(Pointer p) { super(p); } - - - /// - - /** Default constructs the contained module if if has a default constructor, - * else produces a static error. - * - * NOTE: This uses the behavior of template - * classes in C++ that constructors (or any methods) are only compiled when - * actually used. */ - - - /** Constructs the {@code ModuleHolder} with an empty contained value. Access to - * the underlying module is not permitted and will throw an exception, until - * a value is assigned. */ - /* implicit */ public ReLU6ImplModuleHolder(@ByVal @Cast("std::nullptr_t*") PointerPointer arg0) { super((Pointer)null); allocate(arg0); } -private native void allocate(@ByVal @Cast("std::nullptr_t*") PointerPointer arg0); - - /** Constructs the {@code ModuleHolder} with a contained module, forwarding all - * arguments to its constructor. */ - - /** Constructs the {@code ModuleHolder} from a pointer to the contained type. - * Example: {@code Linear(std::make_shared(...))}. */ - /* implicit */ public ReLU6ImplModuleHolder(@SharedPtr @Cast({"", "std::shared_ptr"}) ReLU6Impl module) { super((Pointer)null); allocate(module); } -private native void allocate(@SharedPtr @Cast({"", "std::shared_ptr"}) ReLU6Impl module); - - /** Returns true if the {@code ModuleHolder} contains a module, or false if it is - * {@code nullptr}. */ - public native @Cast("bool") @Name("operator bool") @NoException(true) boolean asBoolean(); - - /** Forwards to the contained module. */ - public native @Name("operator ->") ReLU6Impl access(); - - /** Forwards to the contained module. */ - - /** Returns a reference to the contained module. */ - public native @ByRef @Name("operator *") ReLU6Impl multiply(); - - /** Returns a const reference to the contained module. */ - - /** Returns a shared pointer to the underlying module. */ - public native @SharedPtr @Cast({"", "std::shared_ptr"}) ReLU6Impl ptr(); - - /** Returns a pointer to the underlying module. */ - public native ReLU6Impl get(); - - /** Returns a const pointer to the underlying module. */ - - /** Calls the {@code forward()} method of the contained module. */ - - /** Forwards to the subscript operator of the contained module. - * NOTE: std::forward is qualified to prevent VS2017 emitting - * error C2872: 'std': ambiguous symbol */ - - /** Returns true if the {@code ModuleHolder} does not contain a module. */ - public native @Cast("bool") @NoException(true) boolean is_empty(); -} diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/ReLU6Options.java b/pytorch/src/gen/java/org/bytedeco/pytorch/ReLU6Options.java index abb78e544c5..75e312bac0c 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/ReLU6Options.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/ReLU6Options.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/ReLUImpl.java b/pytorch/src/gen/java/org/bytedeco/pytorch/ReLUImpl.java index 7e4c2bd2659..35b4d458242 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/ReLUImpl.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/ReLUImpl.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; @@ -45,9 +47,9 @@ public class ReLUImpl extends ReLUImplCloneable { } public ReLUImpl(@Const @ByRef(nullValue = "torch::nn::ReLUOptions{}") ReLUOptions options_) { super((Pointer)null); allocate(options_); } - @NoDeallocator private native void allocate(@Const @ByRef(nullValue = "torch::nn::ReLUOptions{}") ReLUOptions options_); + @SharedPtr private native void allocate(@Const @ByRef(nullValue = "torch::nn::ReLUOptions{}") ReLUOptions options_); public ReLUImpl() { super((Pointer)null); allocate(); } - @NoDeallocator private native void allocate(); + @SharedPtr private native void allocate(); public native @ByVal Tensor forward(@ByVal Tensor input); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/ReLUImplCloneable.java b/pytorch/src/gen/java/org/bytedeco/pytorch/ReLUImplCloneable.java index fecae5d92a6..a043cbacc53 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/ReLUImplCloneable.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/ReLUImplCloneable.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; @@ -20,18 +22,18 @@ public class ReLUImplCloneable extends Module { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public ReLUImplCloneable(Pointer p) { super(p); } + @Override public Module asModule() { return asModule(this); } + @Namespace public static native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast>") Module asModule(@SharedPtr ReLUImplCloneable pointer); /** {@code reset()} must perform initialization of all members with reference * semantics, most importantly parameters, buffers and submodules. */ public native void reset(); - @Override public Module asModule() { return asModule(this); } - @Namespace public static native @Name("static_cast") Module asModule(ReLUImplCloneable module); /** Performs a recursive "deep copy" of the {@code Module}, such that all parameters * and submodules in the cloned module are different from those in the * original module. */ - public native @SharedPtr Module clone( - @Const @ByRef(nullValue = "c10::optional(c10::nullopt)") DeviceOptional device); - public native @SharedPtr Module clone(); + public native @SharedPtr("torch::nn::Module") @ByVal Module clone( + @Const @ByRef(nullValue = "c10::optional(c10::nullopt)") DeviceOptional device); + public native @SharedPtr("torch::nn::Module") @ByVal Module clone(); } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/ReLUImplModuleHolder.java b/pytorch/src/gen/java/org/bytedeco/pytorch/ReLUImplModuleHolder.java deleted file mode 100644 index acee7321095..00000000000 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/ReLUImplModuleHolder.java +++ /dev/null @@ -1,79 +0,0 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE - -package org.bytedeco.pytorch; - -import org.bytedeco.pytorch.Allocator; -import org.bytedeco.pytorch.Function; -import org.bytedeco.pytorch.Module; -import java.nio.*; -import org.bytedeco.javacpp.*; -import org.bytedeco.javacpp.annotation.*; - -import static org.bytedeco.javacpp.presets.javacpp.*; -import static org.bytedeco.openblas.global.openblas_nolapack.*; -import static org.bytedeco.openblas.global.openblas.*; - -import static org.bytedeco.pytorch.global.torch.*; - -@Name("torch::nn::ModuleHolder") @NoOffset @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) -public class ReLUImplModuleHolder extends Pointer { - static { Loader.load(); } - /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ - public ReLUImplModuleHolder(Pointer p) { super(p); } - - - /// - - /** Default constructs the contained module if if has a default constructor, - * else produces a static error. - * - * NOTE: This uses the behavior of template - * classes in C++ that constructors (or any methods) are only compiled when - * actually used. */ - - - /** Constructs the {@code ModuleHolder} with an empty contained value. Access to - * the underlying module is not permitted and will throw an exception, until - * a value is assigned. */ - /* implicit */ public ReLUImplModuleHolder(@ByVal @Cast("std::nullptr_t*") PointerPointer arg0) { super((Pointer)null); allocate(arg0); } -private native void allocate(@ByVal @Cast("std::nullptr_t*") PointerPointer arg0); - - /** Constructs the {@code ModuleHolder} with a contained module, forwarding all - * arguments to its constructor. */ - - /** Constructs the {@code ModuleHolder} from a pointer to the contained type. - * Example: {@code Linear(std::make_shared(...))}. */ - /* implicit */ public ReLUImplModuleHolder(@SharedPtr @Cast({"", "std::shared_ptr"}) ReLUImpl module) { super((Pointer)null); allocate(module); } -private native void allocate(@SharedPtr @Cast({"", "std::shared_ptr"}) ReLUImpl module); - - /** Returns true if the {@code ModuleHolder} contains a module, or false if it is - * {@code nullptr}. */ - public native @Cast("bool") @Name("operator bool") @NoException(true) boolean asBoolean(); - - /** Forwards to the contained module. */ - public native @Name("operator ->") ReLUImpl access(); - - /** Forwards to the contained module. */ - - /** Returns a reference to the contained module. */ - public native @ByRef @Name("operator *") ReLUImpl multiply(); - - /** Returns a const reference to the contained module. */ - - /** Returns a shared pointer to the underlying module. */ - public native @SharedPtr @Cast({"", "std::shared_ptr"}) ReLUImpl ptr(); - - /** Returns a pointer to the underlying module. */ - public native ReLUImpl get(); - - /** Returns a const pointer to the underlying module. */ - - /** Calls the {@code forward()} method of the contained module. */ - - /** Forwards to the subscript operator of the contained module. - * NOTE: std::forward is qualified to prevent VS2017 emitting - * error C2872: 'std': ambiguous symbol */ - - /** Returns true if the {@code ModuleHolder} does not contain a module. */ - public native @Cast("bool") @NoException(true) boolean is_empty(); -} diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/ReLUOptions.java b/pytorch/src/gen/java/org/bytedeco/pytorch/ReLUOptions.java index fad1ec5402c..3db6b1bcba6 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/ReLUOptions.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/ReLUOptions.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/ReadAdapterInterface.java b/pytorch/src/gen/java/org/bytedeco/pytorch/ReadAdapterInterface.java index e810f6a36c5..de7070baf6a 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/ReadAdapterInterface.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/ReadAdapterInterface.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; @@ -15,10 +17,18 @@ import static org.bytedeco.pytorch.global.torch.*; -@Namespace("caffe2::serialize") @Opaque @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) + +// this is the interface for the (file/stream/memory) reader in +// PyTorchStreamReader. with this interface, we can extend the support +// besides standard istream +@Namespace("caffe2::serialize") @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) public class ReadAdapterInterface extends Pointer { - /** Empty constructor. Calls {@code super((Pointer)null)}. */ - public ReadAdapterInterface() { super((Pointer)null); } + static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public ReadAdapterInterface(Pointer p) { super(p); } + + public native @Cast("size_t") long size(); + public native @Cast("size_t") long read(@Cast("uint64_t") long pos, Pointer buf, @Cast("size_t") long n, @Cast("const char*") BytePointer what/*=""*/); + public native @Cast("size_t") long read(@Cast("uint64_t") long pos, Pointer buf, @Cast("size_t") long n); + public native @Cast("size_t") long read(@Cast("uint64_t") long pos, Pointer buf, @Cast("size_t") long n, String what/*=""*/); } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/ReadyQueue.java b/pytorch/src/gen/java/org/bytedeco/pytorch/ReadyQueue.java new file mode 100644 index 00000000000..eb6145b8fd2 --- /dev/null +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/ReadyQueue.java @@ -0,0 +1,26 @@ +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE + +package org.bytedeco.pytorch; + +import org.bytedeco.pytorch.Allocator; +import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; +import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; +import java.nio.*; +import org.bytedeco.javacpp.*; +import org.bytedeco.javacpp.annotation.*; + +import static org.bytedeco.javacpp.presets.javacpp.*; +import static org.bytedeco.openblas.global.openblas_nolapack.*; +import static org.bytedeco.openblas.global.openblas.*; + +import static org.bytedeco.pytorch.global.torch.*; + +@Namespace("torch::autograd") @Opaque @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) +public class ReadyQueue extends Pointer { + /** Empty constructor. Calls {@code super((Pointer)null)}. */ + public ReadyQueue() { super((Pointer)null); } + /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ + public ReadyQueue(Pointer p) { super(p); } +} diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/RecordFunction.java b/pytorch/src/gen/java/org/bytedeco/pytorch/RecordFunction.java index 33c54cee26f..b3f4ff05383 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/RecordFunction.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/RecordFunction.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; @@ -53,13 +55,13 @@ public class RecordFunction extends Pointer { public native @Cast("int64_t") long seqNr(); - public native @ByVal @Cast("c10::ArrayRef*") IValueArrayRef inputs(); + public native @ByVal IValueArrayRef inputs(); public native @Const @ByRef IValueVector outputs(); public native void setOutputs(@ByRef(true) IValueVector outputs); - public native void setOutputs(@ByVal @Cast("c10::ArrayRef*") IValueArrayRef outputs); + public native void setOutputs(@ByVal IValueArrayRef outputs); public native @Cast("size_t") long num_inputs(); public native @Cast("size_t") long num_outputs(); @@ -105,23 +107,23 @@ public class RecordFunction extends Pointer { // Internal-only, used only force async event for distributed events // profiling. - public native void _setAsync(); + // Returns whether this RecordFunction corresponds to an async event orn ot. public native @Cast("bool") boolean isAsync(); // Internal-only, used to denote out variant used for Static Runtime execution - public native void _setStaticRuntimeOutVariant(); + public native @Cast("bool") boolean isStaticRuntimeOutVariant(); - public native @Cast("at::RecordFunctionHandle") long handle(); + public native long handle(); public native @ByVal OperatorNameOptional operator_name(); // This method returns a copy of the FunctionSchema and can be expensive. public native @ByVal FunctionSchemaOptional operator_schema(); - public native void setHandle(@Cast("at::RecordFunctionHandle") long handle); + public native void setHandle(long handle); // Whether this RecordFunction runs any callbacks. public native @Cast("bool") boolean isActive(); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/RecordFunctionCallbackHandleVector.java b/pytorch/src/gen/java/org/bytedeco/pytorch/RecordFunctionCallbackHandleVector.java index 16189066c31..ba394961e7c 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/RecordFunctionCallbackHandleVector.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/RecordFunctionCallbackHandleVector.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/RecordFunctionCallbacksEntry.java b/pytorch/src/gen/java/org/bytedeco/pytorch/RecordFunctionCallbacksEntry.java index 6d1a6d4f10d..0c788bf494e 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/RecordFunctionCallbacksEntry.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/RecordFunctionCallbacksEntry.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/RecordFunctionGuard.java b/pytorch/src/gen/java/org/bytedeco/pytorch/RecordFunctionGuard.java index a62a2a48a21..a4cb3712646 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/RecordFunctionGuard.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/RecordFunctionGuard.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/RecordFunctionHandleIntList.java b/pytorch/src/gen/java/org/bytedeco/pytorch/RecordFunctionHandleIntList.java new file mode 100644 index 00000000000..4482bd53307 --- /dev/null +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/RecordFunctionHandleIntList.java @@ -0,0 +1,46 @@ +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE + +package org.bytedeco.pytorch; + +import org.bytedeco.pytorch.Allocator; +import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; +import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; +import java.nio.*; +import org.bytedeco.javacpp.*; +import org.bytedeco.javacpp.annotation.*; + +import static org.bytedeco.javacpp.presets.javacpp.*; +import static org.bytedeco.openblas.global.openblas_nolapack.*; +import static org.bytedeco.openblas.global.openblas.*; + +import static org.bytedeco.pytorch.global.torch.*; + +@Name("std::list >") @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) +public class RecordFunctionHandleIntList extends Pointer { + static { Loader.load(); } + /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ + public RecordFunctionHandleIntList(Pointer p) { super(p); } + public RecordFunctionHandleIntList() { allocate(); } + private native void allocate(); + public native @Name("operator =") @ByRef RecordFunctionHandleIntList put(@ByRef RecordFunctionHandleIntList x); + + public boolean empty() { return size() == 0; } + public native long size(); + + public RecordFunctionHandleIntPair front() { try (Iterator it = begin()) { return it.get(); } } + public native @ByVal Iterator insert(@ByVal Iterator pos, @ByRef RecordFunctionHandleIntPair value); + public native @ByVal Iterator erase(@ByVal Iterator pos); + public native @ByVal Iterator begin(); + public native @ByVal Iterator end(); + @NoOffset @Name("iterator") public static class Iterator extends Pointer { + public Iterator(Pointer p) { super(p); } + public Iterator() { } + + public native @Name("operator ++") @ByRef Iterator increment(); + public native @Name("operator ==") boolean equals(@ByRef Iterator it); + public native @Name("operator *") @ByRef @Const RecordFunctionHandleIntPair get(); + } +} + diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/RecordFunctionHandleIntPair.java b/pytorch/src/gen/java/org/bytedeco/pytorch/RecordFunctionHandleIntPair.java new file mode 100644 index 00000000000..ba8b46a7db0 --- /dev/null +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/RecordFunctionHandleIntPair.java @@ -0,0 +1,40 @@ +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE + +package org.bytedeco.pytorch; + +import org.bytedeco.pytorch.Allocator; +import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; +import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; +import java.nio.*; +import org.bytedeco.javacpp.*; +import org.bytedeco.javacpp.annotation.*; + +import static org.bytedeco.javacpp.presets.javacpp.*; +import static org.bytedeco.openblas.global.openblas_nolapack.*; +import static org.bytedeco.openblas.global.openblas.*; + +import static org.bytedeco.pytorch.global.torch.*; + +@NoOffset @Name("std::pair") @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) +public class RecordFunctionHandleIntPair extends Pointer { + static { Loader.load(); } + /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ + public RecordFunctionHandleIntPair(Pointer p) { super(p); } + public RecordFunctionHandleIntPair(long firstValue, int secondValue) { this(); put(firstValue, secondValue); } + public RecordFunctionHandleIntPair() { allocate(); } + private native void allocate(); + public native @Name("operator =") @ByRef RecordFunctionHandleIntPair put(@ByRef RecordFunctionHandleIntPair x); + + + @MemberGetter public native long first(); public native RecordFunctionHandleIntPair first(long first); + @MemberGetter public native int second(); public native RecordFunctionHandleIntPair second(int second); + + public RecordFunctionHandleIntPair put(long firstValue, int secondValue) { + first(firstValue); + second(secondValue); + return this; + } +} + diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/RecordFunctionTLS.java b/pytorch/src/gen/java/org/bytedeco/pytorch/RecordFunctionTLS.java index d2554ebc289..9cd4e8b4e13 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/RecordFunctionTLS.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/RecordFunctionTLS.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/RecordScopeSet.java b/pytorch/src/gen/java/org/bytedeco/pytorch/RecordScopeSet.java index 9214c63b9ee..f1f13f8161a 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/RecordScopeSet.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/RecordScopeSet.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; @@ -15,7 +17,7 @@ import static org.bytedeco.pytorch.global.torch.*; -@Name("std::unordered_set >") @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) +@Name("std::unordered_set") @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) public class RecordScopeSet extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ @@ -27,6 +29,7 @@ public class RecordScopeSet extends Pointer { public boolean empty() { return size() == 0; } public native long size(); + public RecordScope front() { try (Iterator it = begin()) { return it.get(); } } public native void insert(@ByRef RecordScope value); public native void erase(@ByRef RecordScope value); public native @ByVal Iterator begin(); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/ReflectionPad1d.java b/pytorch/src/gen/java/org/bytedeco/pytorch/ReflectionPad1d.java deleted file mode 100644 index 335b9b876aa..00000000000 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/ReflectionPad1d.java +++ /dev/null @@ -1,34 +0,0 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE - -package org.bytedeco.pytorch; - -import org.bytedeco.pytorch.Allocator; -import org.bytedeco.pytorch.Function; -import org.bytedeco.pytorch.Module; -import java.nio.*; -import org.bytedeco.javacpp.*; -import org.bytedeco.javacpp.annotation.*; - -import static org.bytedeco.javacpp.presets.javacpp.*; -import static org.bytedeco.openblas.global.openblas_nolapack.*; -import static org.bytedeco.openblas.global.openblas.*; - -import static org.bytedeco.pytorch.global.torch.*; - - -/** A {@code ModuleHolder} subclass for {@code ReflectionPad1dImpl}. - * See the documentation for {@code ReflectionPad1dImpl} class to learn what methods - * it provides, and examples of how to use {@code ReflectionPad1d} with - * {@code torch::nn::ReflectionPad1dOptions}. See the documentation for - * {@code ModuleHolder} to learn about PyTorch's module storage semantics. */ -@Namespace("torch::nn") @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) -public class ReflectionPad1d extends ReflectionPad1dImplModuleHolder { - static { Loader.load(); } - - public ReflectionPad1d(@ByVal @Cast("std::nullptr_t*") PointerPointer arg0) { super((Pointer)null); allocate(arg0); } - private native void allocate(@ByVal @Cast("std::nullptr_t*") PointerPointer arg0); public ReflectionPad1d(@SharedPtr @Cast({"", "std::shared_ptr"}) ReflectionPad1dImpl module) { super((Pointer)null); allocate(module); } - private native void allocate(@SharedPtr @Cast({"", "std::shared_ptr"}) ReflectionPad1dImpl module); - /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ - public ReflectionPad1d(Pointer p) { super(p); } - - } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/ReflectionPad1dImpl.java b/pytorch/src/gen/java/org/bytedeco/pytorch/ReflectionPad1dImpl.java index 639d8c80ba7..49c1a5e76a2 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/ReflectionPad1dImpl.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/ReflectionPad1dImpl.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; @@ -36,9 +38,9 @@ public class ReflectionPad1dImpl extends ReflectionPad1dImplBase { public ReflectionPad1dImpl(@ByVal @Cast("torch::ExpandingArray<1*2>*") LongPointer padding) { super((Pointer)null); allocate(padding); } - @NoDeallocator private native void allocate(@ByVal @Cast("torch::ExpandingArray<1*2>*") LongPointer padding); + private native void allocate(@ByVal @Cast("torch::ExpandingArray<1*2>*") LongPointer padding); public ReflectionPad1dImpl(@Const @ByRef ReflectionPad1dOptions options_) { super((Pointer)null); allocate(options_); } - @NoDeallocator private native void allocate(@Const @ByRef ReflectionPad1dOptions options_); + private native void allocate(@Const @ByRef ReflectionPad1dOptions options_); /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public ReflectionPad1dImpl(Pointer p) { super(p); } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/ReflectionPad1dImplBase.java b/pytorch/src/gen/java/org/bytedeco/pytorch/ReflectionPad1dImplBase.java index d400fe60566..5997c6a5210 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/ReflectionPad1dImplBase.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/ReflectionPad1dImplBase.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; @@ -24,9 +26,9 @@ public class ReflectionPad1dImplBase extends ReflectionPad1dImplCloneable { public ReflectionPad1dImplBase(Pointer p) { super(p); } public ReflectionPad1dImplBase(@ByVal @Cast("torch::ExpandingArray<1*2>*") LongPointer padding) { super((Pointer)null); allocate(padding); } - @NoDeallocator private native void allocate(@ByVal @Cast("torch::ExpandingArray<1*2>*") LongPointer padding); + private native void allocate(@ByVal @Cast("torch::ExpandingArray<1*2>*") LongPointer padding); public ReflectionPad1dImplBase(@Const @ByRef ReflectionPad1dOptions options_) { super((Pointer)null); allocate(options_); } - @NoDeallocator private native void allocate(@Const @ByRef ReflectionPad1dOptions options_); + private native void allocate(@Const @ByRef ReflectionPad1dOptions options_); public native void reset(); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/ReflectionPad1dImplCloneable.java b/pytorch/src/gen/java/org/bytedeco/pytorch/ReflectionPad1dImplCloneable.java index b9c83f09cc3..9735c421b6c 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/ReflectionPad1dImplCloneable.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/ReflectionPad1dImplCloneable.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; @@ -20,18 +22,18 @@ public class ReflectionPad1dImplCloneable extends Module { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public ReflectionPad1dImplCloneable(Pointer p) { super(p); } + @Override public Module asModule() { return asModule(this); } + @Namespace public static native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast>") Module asModule(@SharedPtr ReflectionPad1dImplCloneable pointer); /** {@code reset()} must perform initialization of all members with reference * semantics, most importantly parameters, buffers and submodules. */ public native void reset(); - @Override public Module asModule() { return asModule(this); } - @Namespace public static native @Name("static_cast") Module asModule(ReflectionPad1dImplCloneable module); /** Performs a recursive "deep copy" of the {@code Module}, such that all parameters * and submodules in the cloned module are different from those in the * original module. */ - public native @SharedPtr Module clone( - @Const @ByRef(nullValue = "c10::optional(c10::nullopt)") DeviceOptional device); - public native @SharedPtr Module clone(); + public native @SharedPtr("torch::nn::Module") @ByVal Module clone( + @Const @ByRef(nullValue = "c10::optional(c10::nullopt)") DeviceOptional device); + public native @SharedPtr("torch::nn::Module") @ByVal Module clone(); } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/ReflectionPad1dImplModuleHolder.java b/pytorch/src/gen/java/org/bytedeco/pytorch/ReflectionPad1dImplModuleHolder.java deleted file mode 100644 index 32c60dfdb4f..00000000000 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/ReflectionPad1dImplModuleHolder.java +++ /dev/null @@ -1,79 +0,0 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE - -package org.bytedeco.pytorch; - -import org.bytedeco.pytorch.Allocator; -import org.bytedeco.pytorch.Function; -import org.bytedeco.pytorch.Module; -import java.nio.*; -import org.bytedeco.javacpp.*; -import org.bytedeco.javacpp.annotation.*; - -import static org.bytedeco.javacpp.presets.javacpp.*; -import static org.bytedeco.openblas.global.openblas_nolapack.*; -import static org.bytedeco.openblas.global.openblas.*; - -import static org.bytedeco.pytorch.global.torch.*; - -@Name("torch::nn::ModuleHolder") @NoOffset @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) -public class ReflectionPad1dImplModuleHolder extends Pointer { - static { Loader.load(); } - /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ - public ReflectionPad1dImplModuleHolder(Pointer p) { super(p); } - - - /// - - /** Default constructs the contained module if if has a default constructor, - * else produces a static error. - * - * NOTE: This uses the behavior of template - * classes in C++ that constructors (or any methods) are only compiled when - * actually used. */ - - - /** Constructs the {@code ModuleHolder} with an empty contained value. Access to - * the underlying module is not permitted and will throw an exception, until - * a value is assigned. */ - /* implicit */ public ReflectionPad1dImplModuleHolder(@ByVal @Cast("std::nullptr_t*") PointerPointer arg0) { super((Pointer)null); allocate(arg0); } -private native void allocate(@ByVal @Cast("std::nullptr_t*") PointerPointer arg0); - - /** Constructs the {@code ModuleHolder} with a contained module, forwarding all - * arguments to its constructor. */ - - /** Constructs the {@code ModuleHolder} from a pointer to the contained type. - * Example: {@code Linear(std::make_shared(...))}. */ - /* implicit */ public ReflectionPad1dImplModuleHolder(@SharedPtr @Cast({"", "std::shared_ptr"}) ReflectionPad1dImpl module) { super((Pointer)null); allocate(module); } -private native void allocate(@SharedPtr @Cast({"", "std::shared_ptr"}) ReflectionPad1dImpl module); - - /** Returns true if the {@code ModuleHolder} contains a module, or false if it is - * {@code nullptr}. */ - public native @Cast("bool") @Name("operator bool") @NoException(true) boolean asBoolean(); - - /** Forwards to the contained module. */ - public native @Name("operator ->") ReflectionPad1dImpl access(); - - /** Forwards to the contained module. */ - - /** Returns a reference to the contained module. */ - public native @ByRef @Name("operator *") ReflectionPad1dImpl multiply(); - - /** Returns a const reference to the contained module. */ - - /** Returns a shared pointer to the underlying module. */ - public native @SharedPtr @Cast({"", "std::shared_ptr"}) ReflectionPad1dImpl ptr(); - - /** Returns a pointer to the underlying module. */ - public native ReflectionPad1dImpl get(); - - /** Returns a const pointer to the underlying module. */ - - /** Calls the {@code forward()} method of the contained module. */ - - /** Forwards to the subscript operator of the contained module. - * NOTE: std::forward is qualified to prevent VS2017 emitting - * error C2872: 'std': ambiguous symbol */ - - /** Returns true if the {@code ModuleHolder} does not contain a module. */ - public native @Cast("bool") @NoException(true) boolean is_empty(); -} diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/ReflectionPad1dOptions.java b/pytorch/src/gen/java/org/bytedeco/pytorch/ReflectionPad1dOptions.java index 3c72a6fa1f9..75460319c93 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/ReflectionPad1dOptions.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/ReflectionPad1dOptions.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/ReflectionPad2d.java b/pytorch/src/gen/java/org/bytedeco/pytorch/ReflectionPad2d.java deleted file mode 100644 index dd57228aac3..00000000000 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/ReflectionPad2d.java +++ /dev/null @@ -1,34 +0,0 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE - -package org.bytedeco.pytorch; - -import org.bytedeco.pytorch.Allocator; -import org.bytedeco.pytorch.Function; -import org.bytedeco.pytorch.Module; -import java.nio.*; -import org.bytedeco.javacpp.*; -import org.bytedeco.javacpp.annotation.*; - -import static org.bytedeco.javacpp.presets.javacpp.*; -import static org.bytedeco.openblas.global.openblas_nolapack.*; -import static org.bytedeco.openblas.global.openblas.*; - -import static org.bytedeco.pytorch.global.torch.*; - - -/** A {@code ModuleHolder} subclass for {@code ReflectionPad2dImpl}. - * See the documentation for {@code ReflectionPad2dImpl} class to learn what methods - * it provides, and examples of how to use {@code ReflectionPad2d} with - * {@code torch::nn::ReflectionPad2dOptions}. See the documentation for - * {@code ModuleHolder} to learn about PyTorch's module storage semantics. */ -@Namespace("torch::nn") @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) -public class ReflectionPad2d extends ReflectionPad2dImplModuleHolder { - static { Loader.load(); } - - public ReflectionPad2d(@ByVal @Cast("std::nullptr_t*") PointerPointer arg0) { super((Pointer)null); allocate(arg0); } - private native void allocate(@ByVal @Cast("std::nullptr_t*") PointerPointer arg0); public ReflectionPad2d(@SharedPtr @Cast({"", "std::shared_ptr"}) ReflectionPad2dImpl module) { super((Pointer)null); allocate(module); } - private native void allocate(@SharedPtr @Cast({"", "std::shared_ptr"}) ReflectionPad2dImpl module); - /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ - public ReflectionPad2d(Pointer p) { super(p); } - - } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/ReflectionPad2dImpl.java b/pytorch/src/gen/java/org/bytedeco/pytorch/ReflectionPad2dImpl.java index 3338c12f9f5..16cd9b8d0d1 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/ReflectionPad2dImpl.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/ReflectionPad2dImpl.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; @@ -36,9 +38,9 @@ public class ReflectionPad2dImpl extends ReflectionPad2dImplBase { public ReflectionPad2dImpl(@ByVal @Cast("torch::ExpandingArray<2*2>*") LongPointer padding) { super((Pointer)null); allocate(padding); } - @NoDeallocator private native void allocate(@ByVal @Cast("torch::ExpandingArray<2*2>*") LongPointer padding); + private native void allocate(@ByVal @Cast("torch::ExpandingArray<2*2>*") LongPointer padding); public ReflectionPad2dImpl(@Const @ByRef ReflectionPad2dOptions options_) { super((Pointer)null); allocate(options_); } - @NoDeallocator private native void allocate(@Const @ByRef ReflectionPad2dOptions options_); + private native void allocate(@Const @ByRef ReflectionPad2dOptions options_); /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public ReflectionPad2dImpl(Pointer p) { super(p); } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/ReflectionPad2dImplBase.java b/pytorch/src/gen/java/org/bytedeco/pytorch/ReflectionPad2dImplBase.java index 8f44e0d3a4a..46f9d030849 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/ReflectionPad2dImplBase.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/ReflectionPad2dImplBase.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; @@ -22,9 +24,9 @@ public class ReflectionPad2dImplBase extends ReflectionPad2dImplCloneable { public ReflectionPad2dImplBase(Pointer p) { super(p); } public ReflectionPad2dImplBase(@ByVal @Cast("torch::ExpandingArray<2*2>*") LongPointer padding) { super((Pointer)null); allocate(padding); } - @NoDeallocator private native void allocate(@ByVal @Cast("torch::ExpandingArray<2*2>*") LongPointer padding); + private native void allocate(@ByVal @Cast("torch::ExpandingArray<2*2>*") LongPointer padding); public ReflectionPad2dImplBase(@Const @ByRef ReflectionPad2dOptions options_) { super((Pointer)null); allocate(options_); } - @NoDeallocator private native void allocate(@Const @ByRef ReflectionPad2dOptions options_); + private native void allocate(@Const @ByRef ReflectionPad2dOptions options_); public native void reset(); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/ReflectionPad2dImplCloneable.java b/pytorch/src/gen/java/org/bytedeco/pytorch/ReflectionPad2dImplCloneable.java index c24f373b429..16f8858b1e3 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/ReflectionPad2dImplCloneable.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/ReflectionPad2dImplCloneable.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; @@ -20,18 +22,18 @@ public class ReflectionPad2dImplCloneable extends Module { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public ReflectionPad2dImplCloneable(Pointer p) { super(p); } + @Override public Module asModule() { return asModule(this); } + @Namespace public static native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast>") Module asModule(@SharedPtr ReflectionPad2dImplCloneable pointer); /** {@code reset()} must perform initialization of all members with reference * semantics, most importantly parameters, buffers and submodules. */ public native void reset(); - @Override public Module asModule() { return asModule(this); } - @Namespace public static native @Name("static_cast") Module asModule(ReflectionPad2dImplCloneable module); /** Performs a recursive "deep copy" of the {@code Module}, such that all parameters * and submodules in the cloned module are different from those in the * original module. */ - public native @SharedPtr Module clone( - @Const @ByRef(nullValue = "c10::optional(c10::nullopt)") DeviceOptional device); - public native @SharedPtr Module clone(); + public native @SharedPtr("torch::nn::Module") @ByVal Module clone( + @Const @ByRef(nullValue = "c10::optional(c10::nullopt)") DeviceOptional device); + public native @SharedPtr("torch::nn::Module") @ByVal Module clone(); } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/ReflectionPad2dImplModuleHolder.java b/pytorch/src/gen/java/org/bytedeco/pytorch/ReflectionPad2dImplModuleHolder.java deleted file mode 100644 index 5669ceda0c0..00000000000 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/ReflectionPad2dImplModuleHolder.java +++ /dev/null @@ -1,79 +0,0 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE - -package org.bytedeco.pytorch; - -import org.bytedeco.pytorch.Allocator; -import org.bytedeco.pytorch.Function; -import org.bytedeco.pytorch.Module; -import java.nio.*; -import org.bytedeco.javacpp.*; -import org.bytedeco.javacpp.annotation.*; - -import static org.bytedeco.javacpp.presets.javacpp.*; -import static org.bytedeco.openblas.global.openblas_nolapack.*; -import static org.bytedeco.openblas.global.openblas.*; - -import static org.bytedeco.pytorch.global.torch.*; - -@Name("torch::nn::ModuleHolder") @NoOffset @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) -public class ReflectionPad2dImplModuleHolder extends Pointer { - static { Loader.load(); } - /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ - public ReflectionPad2dImplModuleHolder(Pointer p) { super(p); } - - - /// - - /** Default constructs the contained module if if has a default constructor, - * else produces a static error. - * - * NOTE: This uses the behavior of template - * classes in C++ that constructors (or any methods) are only compiled when - * actually used. */ - - - /** Constructs the {@code ModuleHolder} with an empty contained value. Access to - * the underlying module is not permitted and will throw an exception, until - * a value is assigned. */ - /* implicit */ public ReflectionPad2dImplModuleHolder(@ByVal @Cast("std::nullptr_t*") PointerPointer arg0) { super((Pointer)null); allocate(arg0); } -private native void allocate(@ByVal @Cast("std::nullptr_t*") PointerPointer arg0); - - /** Constructs the {@code ModuleHolder} with a contained module, forwarding all - * arguments to its constructor. */ - - /** Constructs the {@code ModuleHolder} from a pointer to the contained type. - * Example: {@code Linear(std::make_shared(...))}. */ - /* implicit */ public ReflectionPad2dImplModuleHolder(@SharedPtr @Cast({"", "std::shared_ptr"}) ReflectionPad2dImpl module) { super((Pointer)null); allocate(module); } -private native void allocate(@SharedPtr @Cast({"", "std::shared_ptr"}) ReflectionPad2dImpl module); - - /** Returns true if the {@code ModuleHolder} contains a module, or false if it is - * {@code nullptr}. */ - public native @Cast("bool") @Name("operator bool") @NoException(true) boolean asBoolean(); - - /** Forwards to the contained module. */ - public native @Name("operator ->") ReflectionPad2dImpl access(); - - /** Forwards to the contained module. */ - - /** Returns a reference to the contained module. */ - public native @ByRef @Name("operator *") ReflectionPad2dImpl multiply(); - - /** Returns a const reference to the contained module. */ - - /** Returns a shared pointer to the underlying module. */ - public native @SharedPtr @Cast({"", "std::shared_ptr"}) ReflectionPad2dImpl ptr(); - - /** Returns a pointer to the underlying module. */ - public native ReflectionPad2dImpl get(); - - /** Returns a const pointer to the underlying module. */ - - /** Calls the {@code forward()} method of the contained module. */ - - /** Forwards to the subscript operator of the contained module. - * NOTE: std::forward is qualified to prevent VS2017 emitting - * error C2872: 'std': ambiguous symbol */ - - /** Returns true if the {@code ModuleHolder} does not contain a module. */ - public native @Cast("bool") @NoException(true) boolean is_empty(); -} diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/ReflectionPad2dOptions.java b/pytorch/src/gen/java/org/bytedeco/pytorch/ReflectionPad2dOptions.java index e20a7ac48b7..16ab25dcce3 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/ReflectionPad2dOptions.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/ReflectionPad2dOptions.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/ReflectionPad3d.java b/pytorch/src/gen/java/org/bytedeco/pytorch/ReflectionPad3d.java deleted file mode 100644 index bff94583884..00000000000 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/ReflectionPad3d.java +++ /dev/null @@ -1,34 +0,0 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE - -package org.bytedeco.pytorch; - -import org.bytedeco.pytorch.Allocator; -import org.bytedeco.pytorch.Function; -import org.bytedeco.pytorch.Module; -import java.nio.*; -import org.bytedeco.javacpp.*; -import org.bytedeco.javacpp.annotation.*; - -import static org.bytedeco.javacpp.presets.javacpp.*; -import static org.bytedeco.openblas.global.openblas_nolapack.*; -import static org.bytedeco.openblas.global.openblas.*; - -import static org.bytedeco.pytorch.global.torch.*; - - -/** A {@code ModuleHolder} subclass for {@code ReflectionPad3dImpl}. - * See the documentation for {@code ReflectionPad3dImpl} class to learn what methods - * it provides, and examples of how to use {@code ReflectionPad3d} with - * {@code torch::nn::ReflectionPad3dOptions}. See the documentation for - * {@code ModuleHolder} to learn about PyTorch's module storage semantics. */ -@Namespace("torch::nn") @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) -public class ReflectionPad3d extends ReflectionPad3dImplModuleHolder { - static { Loader.load(); } - - public ReflectionPad3d(@ByVal @Cast("std::nullptr_t*") PointerPointer arg0) { super((Pointer)null); allocate(arg0); } - private native void allocate(@ByVal @Cast("std::nullptr_t*") PointerPointer arg0); public ReflectionPad3d(@SharedPtr @Cast({"", "std::shared_ptr"}) ReflectionPad3dImpl module) { super((Pointer)null); allocate(module); } - private native void allocate(@SharedPtr @Cast({"", "std::shared_ptr"}) ReflectionPad3dImpl module); - /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ - public ReflectionPad3d(Pointer p) { super(p); } - - } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/ReflectionPad3dImpl.java b/pytorch/src/gen/java/org/bytedeco/pytorch/ReflectionPad3dImpl.java index 16f785b6fde..c35516a1d69 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/ReflectionPad3dImpl.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/ReflectionPad3dImpl.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; @@ -37,9 +39,9 @@ public class ReflectionPad3dImpl extends ReflectionPad3dImplBase { public ReflectionPad3dImpl(@ByVal @Cast("torch::ExpandingArray<3*2>*") LongPointer padding) { super((Pointer)null); allocate(padding); } - @NoDeallocator private native void allocate(@ByVal @Cast("torch::ExpandingArray<3*2>*") LongPointer padding); + private native void allocate(@ByVal @Cast("torch::ExpandingArray<3*2>*") LongPointer padding); public ReflectionPad3dImpl(@Const @ByRef ReflectionPad3dOptions options_) { super((Pointer)null); allocate(options_); } - @NoDeallocator private native void allocate(@Const @ByRef ReflectionPad3dOptions options_); + private native void allocate(@Const @ByRef ReflectionPad3dOptions options_); /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public ReflectionPad3dImpl(Pointer p) { super(p); } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/ReflectionPad3dImplBase.java b/pytorch/src/gen/java/org/bytedeco/pytorch/ReflectionPad3dImplBase.java index 9d8c0d0c5c5..09103a09a04 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/ReflectionPad3dImplBase.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/ReflectionPad3dImplBase.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; @@ -22,9 +24,9 @@ public class ReflectionPad3dImplBase extends ReflectionPad3dImplCloneable { public ReflectionPad3dImplBase(Pointer p) { super(p); } public ReflectionPad3dImplBase(@ByVal @Cast("torch::ExpandingArray<3*2>*") LongPointer padding) { super((Pointer)null); allocate(padding); } - @NoDeallocator private native void allocate(@ByVal @Cast("torch::ExpandingArray<3*2>*") LongPointer padding); + private native void allocate(@ByVal @Cast("torch::ExpandingArray<3*2>*") LongPointer padding); public ReflectionPad3dImplBase(@Const @ByRef ReflectionPad3dOptions options_) { super((Pointer)null); allocate(options_); } - @NoDeallocator private native void allocate(@Const @ByRef ReflectionPad3dOptions options_); + private native void allocate(@Const @ByRef ReflectionPad3dOptions options_); public native void reset(); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/ReflectionPad3dImplCloneable.java b/pytorch/src/gen/java/org/bytedeco/pytorch/ReflectionPad3dImplCloneable.java index cc00c7307d1..47f7e78d0f8 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/ReflectionPad3dImplCloneable.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/ReflectionPad3dImplCloneable.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; @@ -20,18 +22,18 @@ public class ReflectionPad3dImplCloneable extends Module { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public ReflectionPad3dImplCloneable(Pointer p) { super(p); } + @Override public Module asModule() { return asModule(this); } + @Namespace public static native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast>") Module asModule(@SharedPtr ReflectionPad3dImplCloneable pointer); /** {@code reset()} must perform initialization of all members with reference * semantics, most importantly parameters, buffers and submodules. */ public native void reset(); - @Override public Module asModule() { return asModule(this); } - @Namespace public static native @Name("static_cast") Module asModule(ReflectionPad3dImplCloneable module); /** Performs a recursive "deep copy" of the {@code Module}, such that all parameters * and submodules in the cloned module are different from those in the * original module. */ - public native @SharedPtr Module clone( - @Const @ByRef(nullValue = "c10::optional(c10::nullopt)") DeviceOptional device); - public native @SharedPtr Module clone(); + public native @SharedPtr("torch::nn::Module") @ByVal Module clone( + @Const @ByRef(nullValue = "c10::optional(c10::nullopt)") DeviceOptional device); + public native @SharedPtr("torch::nn::Module") @ByVal Module clone(); } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/ReflectionPad3dImplModuleHolder.java b/pytorch/src/gen/java/org/bytedeco/pytorch/ReflectionPad3dImplModuleHolder.java deleted file mode 100644 index 61d18e71b80..00000000000 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/ReflectionPad3dImplModuleHolder.java +++ /dev/null @@ -1,79 +0,0 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE - -package org.bytedeco.pytorch; - -import org.bytedeco.pytorch.Allocator; -import org.bytedeco.pytorch.Function; -import org.bytedeco.pytorch.Module; -import java.nio.*; -import org.bytedeco.javacpp.*; -import org.bytedeco.javacpp.annotation.*; - -import static org.bytedeco.javacpp.presets.javacpp.*; -import static org.bytedeco.openblas.global.openblas_nolapack.*; -import static org.bytedeco.openblas.global.openblas.*; - -import static org.bytedeco.pytorch.global.torch.*; - -@Name("torch::nn::ModuleHolder") @NoOffset @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) -public class ReflectionPad3dImplModuleHolder extends Pointer { - static { Loader.load(); } - /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ - public ReflectionPad3dImplModuleHolder(Pointer p) { super(p); } - - - /// - - /** Default constructs the contained module if if has a default constructor, - * else produces a static error. - * - * NOTE: This uses the behavior of template - * classes in C++ that constructors (or any methods) are only compiled when - * actually used. */ - - - /** Constructs the {@code ModuleHolder} with an empty contained value. Access to - * the underlying module is not permitted and will throw an exception, until - * a value is assigned. */ - /* implicit */ public ReflectionPad3dImplModuleHolder(@ByVal @Cast("std::nullptr_t*") PointerPointer arg0) { super((Pointer)null); allocate(arg0); } -private native void allocate(@ByVal @Cast("std::nullptr_t*") PointerPointer arg0); - - /** Constructs the {@code ModuleHolder} with a contained module, forwarding all - * arguments to its constructor. */ - - /** Constructs the {@code ModuleHolder} from a pointer to the contained type. - * Example: {@code Linear(std::make_shared(...))}. */ - /* implicit */ public ReflectionPad3dImplModuleHolder(@SharedPtr @Cast({"", "std::shared_ptr"}) ReflectionPad3dImpl module) { super((Pointer)null); allocate(module); } -private native void allocate(@SharedPtr @Cast({"", "std::shared_ptr"}) ReflectionPad3dImpl module); - - /** Returns true if the {@code ModuleHolder} contains a module, or false if it is - * {@code nullptr}. */ - public native @Cast("bool") @Name("operator bool") @NoException(true) boolean asBoolean(); - - /** Forwards to the contained module. */ - public native @Name("operator ->") ReflectionPad3dImpl access(); - - /** Forwards to the contained module. */ - - /** Returns a reference to the contained module. */ - public native @ByRef @Name("operator *") ReflectionPad3dImpl multiply(); - - /** Returns a const reference to the contained module. */ - - /** Returns a shared pointer to the underlying module. */ - public native @SharedPtr @Cast({"", "std::shared_ptr"}) ReflectionPad3dImpl ptr(); - - /** Returns a pointer to the underlying module. */ - public native ReflectionPad3dImpl get(); - - /** Returns a const pointer to the underlying module. */ - - /** Calls the {@code forward()} method of the contained module. */ - - /** Forwards to the subscript operator of the contained module. - * NOTE: std::forward is qualified to prevent VS2017 emitting - * error C2872: 'std': ambiguous symbol */ - - /** Returns true if the {@code ModuleHolder} does not contain a module. */ - public native @Cast("bool") @NoException(true) boolean is_empty(); -} diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/ReflectionPad3dOptions.java b/pytorch/src/gen/java/org/bytedeco/pytorch/ReflectionPad3dOptions.java index 5002072f5ae..ad2c8bef95c 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/ReflectionPad3dOptions.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/ReflectionPad3dOptions.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/RegisterOperators.java b/pytorch/src/gen/java/org/bytedeco/pytorch/RegisterOperators.java index bc903683611..b09ceb054b4 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/RegisterOperators.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/RegisterOperators.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; @@ -16,13 +18,26 @@ import static org.bytedeco.pytorch.global.torch.*; -/** Registration class for new operators. Effectively calls - * {@code torch::jit::registerOperator} for every supplied operator, but allows doing - * so in the global scope when a {@code RegisterOperators} object is assigned to a - * static variable. - * Note: This is *not* the custom operator API. If you want to register custom - * operators, take a look at torch::RegisterOperators. */ -@Namespace("torch::jit") @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) +/** + * An instance of this class handles the registration for one or more operators. + * Make sure you keep the RegisterOperators instance around since it will + * deregister the operator it's responsible for in its destructor. + * + * Example: + * + * > namespace { + * > class my_kernel_cpu final : public c10::OperatorKernel { + * > public: + * > Tensor operator()(Tensor a, Tensor b) {...} + * > }; + * > } + * > + * > static auto registry = c10::RegisterOperators() + * > .op(c10::RegisterOperators::options() + * > .schema("my_op") + * > .kernel(DispatchKey::CPU)); + */ +@Namespace("c10") @NoOffset @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) public class RegisterOperators extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ @@ -40,9 +55,87 @@ public class RegisterOperators extends Pointer { public RegisterOperators() { super((Pointer)null); allocate(); } private native void allocate(); - /** Registers a vector of already created {@code Operator}s. - * The operator element is now optional to filter null ops. It's backward - * compatible and works for selective operator registration. */ - public RegisterOperators(@ByVal OperatorOptionalVector operators) { super((Pointer)null); allocate(operators); } - private native void allocate(@ByVal OperatorOptionalVector operators); + + + public RegisterOperators(@ByRef(true) RegisterOperators arg0) { super((Pointer)null); allocate(arg0); } + @NoException(true) private native void allocate(@ByRef(true) RegisterOperators arg0); + public native @ByRef @Name("operator =") @NoException(true) RegisterOperators put(@ByRef(true) RegisterOperators arg0); + + /** + * Call this to get an instance of registration options, which + * can be passed to a call to RegisterOperators::op() to specify + * these options for the operator registration. + * See class doc comment for examples. + */ + + /** + * Call this to register an operator. See class doc comment for examples. + */ + + + // Regular mutator version of the && version above + + /** + * This is a shorthand for RegisterOperators::op(Options) where you can + * specify the operator schema outside of the options parameter. + * See class doc comment for examples. + */ + + + // internal only for registering caffe2 ops + + + /** + * This API registers an operator based on a kernel function pointer. + * + * Given a kernel + * + * > namespace { Tensor my_kernel_cpu(Tensor a, Tensor b) {...} } + * + * This API looks like: + * + * > static auto registry = c10::RegisterOperators() + * > .op("my_op", &my_kernel_cpu); + * + * If your kernel is small and the overhead of calling it matters, + * then this API might be the wrong choice since the following API + * has a slightly lower overhead for calling into the kernel: + * + * > static auto registry = c10::RegisterOperators() + * > .op("my_op", c10::RegisterOperators::options() + * > .kernel()); + * + * Or, alternatively, write your kernel as a functor: + * + * > namespace { + * > class my_kernel_cpu final : public c10::OperatorKernel { + * > public: + * > Tensor operator()(Tensor a, Tensor b) {...} + * > }; + * > } + * > + * > static auto registry = c10::RegisterOperators() + * > .op("my_op", c10::RegisterOperators::options() + * > .kernel()); + */ + + + /** + * This API registers an operator based on a kernel lambda. + * + * This API looks like: + * + * > static auto registry = c10::RegisterOperators() + * > .op("my_op", [] (Tensor a, Tensor b) {...}); + * + * This is equivalent to: + * + * > static auto registry = c10::RegisterOperators() + * > .op("my_op", c10::RegisterOperators::options() + * > .catchAllKernel([] (Tensor a, Tensor b) {...})); + * + */ + + + } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/RegistrationHandleRAII.java b/pytorch/src/gen/java/org/bytedeco/pytorch/RegistrationHandleRAII.java index 9dcae913c84..79fe91d0638 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/RegistrationHandleRAII.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/RegistrationHandleRAII.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/RegistrationListenerList.java b/pytorch/src/gen/java/org/bytedeco/pytorch/RegistrationListenerList.java index 990fe4fc45b..6aadfb8e0f6 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/RegistrationListenerList.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/RegistrationListenerList.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/ReplicationPad1d.java b/pytorch/src/gen/java/org/bytedeco/pytorch/ReplicationPad1d.java deleted file mode 100644 index a8c2d9deab5..00000000000 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/ReplicationPad1d.java +++ /dev/null @@ -1,34 +0,0 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE - -package org.bytedeco.pytorch; - -import org.bytedeco.pytorch.Allocator; -import org.bytedeco.pytorch.Function; -import org.bytedeco.pytorch.Module; -import java.nio.*; -import org.bytedeco.javacpp.*; -import org.bytedeco.javacpp.annotation.*; - -import static org.bytedeco.javacpp.presets.javacpp.*; -import static org.bytedeco.openblas.global.openblas_nolapack.*; -import static org.bytedeco.openblas.global.openblas.*; - -import static org.bytedeco.pytorch.global.torch.*; - - -/** A {@code ModuleHolder} subclass for {@code ReplicationPad1dImpl}. - * See the documentation for {@code ReplicationPad1dImpl} class to learn what methods - * it provides, and examples of how to use {@code ReplicationPad1d} with - * {@code torch::nn::ReplicationPad1dOptions}. See the documentation for - * {@code ModuleHolder} to learn about PyTorch's module storage semantics. */ -@Namespace("torch::nn") @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) -public class ReplicationPad1d extends ReplicationPad1dImplModuleHolder { - static { Loader.load(); } - - public ReplicationPad1d(@ByVal @Cast("std::nullptr_t*") PointerPointer arg0) { super((Pointer)null); allocate(arg0); } - private native void allocate(@ByVal @Cast("std::nullptr_t*") PointerPointer arg0); public ReplicationPad1d(@SharedPtr @Cast({"", "std::shared_ptr"}) ReplicationPad1dImpl module) { super((Pointer)null); allocate(module); } - private native void allocate(@SharedPtr @Cast({"", "std::shared_ptr"}) ReplicationPad1dImpl module); - /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ - public ReplicationPad1d(Pointer p) { super(p); } - - } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/ReplicationPad1dImpl.java b/pytorch/src/gen/java/org/bytedeco/pytorch/ReplicationPad1dImpl.java index 9aa251f8eed..92e34930a02 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/ReplicationPad1dImpl.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/ReplicationPad1dImpl.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; @@ -36,9 +38,9 @@ public class ReplicationPad1dImpl extends ReplicationPad1dImplBase { public ReplicationPad1dImpl(@ByVal @Cast("torch::ExpandingArray<1*2>*") LongPointer padding) { super((Pointer)null); allocate(padding); } - @NoDeallocator private native void allocate(@ByVal @Cast("torch::ExpandingArray<1*2>*") LongPointer padding); + private native void allocate(@ByVal @Cast("torch::ExpandingArray<1*2>*") LongPointer padding); public ReplicationPad1dImpl(@Const @ByRef ReplicationPad1dOptions options_) { super((Pointer)null); allocate(options_); } - @NoDeallocator private native void allocate(@Const @ByRef ReplicationPad1dOptions options_); + private native void allocate(@Const @ByRef ReplicationPad1dOptions options_); /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public ReplicationPad1dImpl(Pointer p) { super(p); } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/ReplicationPad1dImplBase.java b/pytorch/src/gen/java/org/bytedeco/pytorch/ReplicationPad1dImplBase.java index 0d05a4ec47b..e70212d7a48 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/ReplicationPad1dImplBase.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/ReplicationPad1dImplBase.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; @@ -26,9 +28,9 @@ public class ReplicationPad1dImplBase extends ReplicationPad1dImplCloneable { public ReplicationPad1dImplBase(Pointer p) { super(p); } public ReplicationPad1dImplBase(@ByVal @Cast("torch::ExpandingArray<1*2>*") LongPointer padding) { super((Pointer)null); allocate(padding); } - @NoDeallocator private native void allocate(@ByVal @Cast("torch::ExpandingArray<1*2>*") LongPointer padding); + private native void allocate(@ByVal @Cast("torch::ExpandingArray<1*2>*") LongPointer padding); public ReplicationPad1dImplBase(@Const @ByRef ReplicationPad1dOptions options_) { super((Pointer)null); allocate(options_); } - @NoDeallocator private native void allocate(@Const @ByRef ReplicationPad1dOptions options_); + private native void allocate(@Const @ByRef ReplicationPad1dOptions options_); public native void reset(); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/ReplicationPad1dImplCloneable.java b/pytorch/src/gen/java/org/bytedeco/pytorch/ReplicationPad1dImplCloneable.java index 353427e8a2a..0f549136e37 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/ReplicationPad1dImplCloneable.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/ReplicationPad1dImplCloneable.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; @@ -20,18 +22,18 @@ public class ReplicationPad1dImplCloneable extends Module { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public ReplicationPad1dImplCloneable(Pointer p) { super(p); } + @Override public Module asModule() { return asModule(this); } + @Namespace public static native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast>") Module asModule(@SharedPtr ReplicationPad1dImplCloneable pointer); /** {@code reset()} must perform initialization of all members with reference * semantics, most importantly parameters, buffers and submodules. */ public native void reset(); - @Override public Module asModule() { return asModule(this); } - @Namespace public static native @Name("static_cast") Module asModule(ReplicationPad1dImplCloneable module); /** Performs a recursive "deep copy" of the {@code Module}, such that all parameters * and submodules in the cloned module are different from those in the * original module. */ - public native @SharedPtr Module clone( - @Const @ByRef(nullValue = "c10::optional(c10::nullopt)") DeviceOptional device); - public native @SharedPtr Module clone(); + public native @SharedPtr("torch::nn::Module") @ByVal Module clone( + @Const @ByRef(nullValue = "c10::optional(c10::nullopt)") DeviceOptional device); + public native @SharedPtr("torch::nn::Module") @ByVal Module clone(); } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/ReplicationPad1dImplModuleHolder.java b/pytorch/src/gen/java/org/bytedeco/pytorch/ReplicationPad1dImplModuleHolder.java deleted file mode 100644 index 766ef1c1965..00000000000 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/ReplicationPad1dImplModuleHolder.java +++ /dev/null @@ -1,79 +0,0 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE - -package org.bytedeco.pytorch; - -import org.bytedeco.pytorch.Allocator; -import org.bytedeco.pytorch.Function; -import org.bytedeco.pytorch.Module; -import java.nio.*; -import org.bytedeco.javacpp.*; -import org.bytedeco.javacpp.annotation.*; - -import static org.bytedeco.javacpp.presets.javacpp.*; -import static org.bytedeco.openblas.global.openblas_nolapack.*; -import static org.bytedeco.openblas.global.openblas.*; - -import static org.bytedeco.pytorch.global.torch.*; - -@Name("torch::nn::ModuleHolder") @NoOffset @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) -public class ReplicationPad1dImplModuleHolder extends Pointer { - static { Loader.load(); } - /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ - public ReplicationPad1dImplModuleHolder(Pointer p) { super(p); } - - - /// - - /** Default constructs the contained module if if has a default constructor, - * else produces a static error. - * - * NOTE: This uses the behavior of template - * classes in C++ that constructors (or any methods) are only compiled when - * actually used. */ - - - /** Constructs the {@code ModuleHolder} with an empty contained value. Access to - * the underlying module is not permitted and will throw an exception, until - * a value is assigned. */ - /* implicit */ public ReplicationPad1dImplModuleHolder(@ByVal @Cast("std::nullptr_t*") PointerPointer arg0) { super((Pointer)null); allocate(arg0); } -private native void allocate(@ByVal @Cast("std::nullptr_t*") PointerPointer arg0); - - /** Constructs the {@code ModuleHolder} with a contained module, forwarding all - * arguments to its constructor. */ - - /** Constructs the {@code ModuleHolder} from a pointer to the contained type. - * Example: {@code Linear(std::make_shared(...))}. */ - /* implicit */ public ReplicationPad1dImplModuleHolder(@SharedPtr @Cast({"", "std::shared_ptr"}) ReplicationPad1dImpl module) { super((Pointer)null); allocate(module); } -private native void allocate(@SharedPtr @Cast({"", "std::shared_ptr"}) ReplicationPad1dImpl module); - - /** Returns true if the {@code ModuleHolder} contains a module, or false if it is - * {@code nullptr}. */ - public native @Cast("bool") @Name("operator bool") @NoException(true) boolean asBoolean(); - - /** Forwards to the contained module. */ - public native @Name("operator ->") ReplicationPad1dImpl access(); - - /** Forwards to the contained module. */ - - /** Returns a reference to the contained module. */ - public native @ByRef @Name("operator *") ReplicationPad1dImpl multiply(); - - /** Returns a const reference to the contained module. */ - - /** Returns a shared pointer to the underlying module. */ - public native @SharedPtr @Cast({"", "std::shared_ptr"}) ReplicationPad1dImpl ptr(); - - /** Returns a pointer to the underlying module. */ - public native ReplicationPad1dImpl get(); - - /** Returns a const pointer to the underlying module. */ - - /** Calls the {@code forward()} method of the contained module. */ - - /** Forwards to the subscript operator of the contained module. - * NOTE: std::forward is qualified to prevent VS2017 emitting - * error C2872: 'std': ambiguous symbol */ - - /** Returns true if the {@code ModuleHolder} does not contain a module. */ - public native @Cast("bool") @NoException(true) boolean is_empty(); -} diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/ReplicationPad1dOptions.java b/pytorch/src/gen/java/org/bytedeco/pytorch/ReplicationPad1dOptions.java index 2d5f942e87d..dab75a85697 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/ReplicationPad1dOptions.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/ReplicationPad1dOptions.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/ReplicationPad2d.java b/pytorch/src/gen/java/org/bytedeco/pytorch/ReplicationPad2d.java deleted file mode 100644 index 0d353fc30a8..00000000000 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/ReplicationPad2d.java +++ /dev/null @@ -1,34 +0,0 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE - -package org.bytedeco.pytorch; - -import org.bytedeco.pytorch.Allocator; -import org.bytedeco.pytorch.Function; -import org.bytedeco.pytorch.Module; -import java.nio.*; -import org.bytedeco.javacpp.*; -import org.bytedeco.javacpp.annotation.*; - -import static org.bytedeco.javacpp.presets.javacpp.*; -import static org.bytedeco.openblas.global.openblas_nolapack.*; -import static org.bytedeco.openblas.global.openblas.*; - -import static org.bytedeco.pytorch.global.torch.*; - - -/** A {@code ModuleHolder} subclass for {@code ReplicationPad2dImpl}. - * See the documentation for {@code ReplicationPad2dImpl} class to learn what methods - * it provides, and examples of how to use {@code ReplicationPad2d} with - * {@code torch::nn::ReplicationPad2dOptions}. See the documentation for - * {@code ModuleHolder} to learn about PyTorch's module storage semantics. */ -@Namespace("torch::nn") @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) -public class ReplicationPad2d extends ReplicationPad2dImplModuleHolder { - static { Loader.load(); } - - public ReplicationPad2d(@ByVal @Cast("std::nullptr_t*") PointerPointer arg0) { super((Pointer)null); allocate(arg0); } - private native void allocate(@ByVal @Cast("std::nullptr_t*") PointerPointer arg0); public ReplicationPad2d(@SharedPtr @Cast({"", "std::shared_ptr"}) ReplicationPad2dImpl module) { super((Pointer)null); allocate(module); } - private native void allocate(@SharedPtr @Cast({"", "std::shared_ptr"}) ReplicationPad2dImpl module); - /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ - public ReplicationPad2d(Pointer p) { super(p); } - - } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/ReplicationPad2dImpl.java b/pytorch/src/gen/java/org/bytedeco/pytorch/ReplicationPad2dImpl.java index 206b94a8e8e..10dd3c52483 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/ReplicationPad2dImpl.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/ReplicationPad2dImpl.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; @@ -36,9 +38,9 @@ public class ReplicationPad2dImpl extends ReplicationPad2dImplBase { public ReplicationPad2dImpl(@ByVal @Cast("torch::ExpandingArray<2*2>*") LongPointer padding) { super((Pointer)null); allocate(padding); } - @NoDeallocator private native void allocate(@ByVal @Cast("torch::ExpandingArray<2*2>*") LongPointer padding); + private native void allocate(@ByVal @Cast("torch::ExpandingArray<2*2>*") LongPointer padding); public ReplicationPad2dImpl(@Const @ByRef ReplicationPad2dOptions options_) { super((Pointer)null); allocate(options_); } - @NoDeallocator private native void allocate(@Const @ByRef ReplicationPad2dOptions options_); + private native void allocate(@Const @ByRef ReplicationPad2dOptions options_); /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public ReplicationPad2dImpl(Pointer p) { super(p); } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/ReplicationPad2dImplBase.java b/pytorch/src/gen/java/org/bytedeco/pytorch/ReplicationPad2dImplBase.java index 5b9e75efc8e..494ea96d211 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/ReplicationPad2dImplBase.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/ReplicationPad2dImplBase.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; @@ -22,9 +24,9 @@ public class ReplicationPad2dImplBase extends ReplicationPad2dImplCloneable { public ReplicationPad2dImplBase(Pointer p) { super(p); } public ReplicationPad2dImplBase(@ByVal @Cast("torch::ExpandingArray<2*2>*") LongPointer padding) { super((Pointer)null); allocate(padding); } - @NoDeallocator private native void allocate(@ByVal @Cast("torch::ExpandingArray<2*2>*") LongPointer padding); + private native void allocate(@ByVal @Cast("torch::ExpandingArray<2*2>*") LongPointer padding); public ReplicationPad2dImplBase(@Const @ByRef ReplicationPad2dOptions options_) { super((Pointer)null); allocate(options_); } - @NoDeallocator private native void allocate(@Const @ByRef ReplicationPad2dOptions options_); + private native void allocate(@Const @ByRef ReplicationPad2dOptions options_); public native void reset(); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/ReplicationPad2dImplCloneable.java b/pytorch/src/gen/java/org/bytedeco/pytorch/ReplicationPad2dImplCloneable.java index a223847557b..3a1611f0543 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/ReplicationPad2dImplCloneable.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/ReplicationPad2dImplCloneable.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; @@ -20,18 +22,18 @@ public class ReplicationPad2dImplCloneable extends Module { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public ReplicationPad2dImplCloneable(Pointer p) { super(p); } + @Override public Module asModule() { return asModule(this); } + @Namespace public static native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast>") Module asModule(@SharedPtr ReplicationPad2dImplCloneable pointer); /** {@code reset()} must perform initialization of all members with reference * semantics, most importantly parameters, buffers and submodules. */ public native void reset(); - @Override public Module asModule() { return asModule(this); } - @Namespace public static native @Name("static_cast") Module asModule(ReplicationPad2dImplCloneable module); /** Performs a recursive "deep copy" of the {@code Module}, such that all parameters * and submodules in the cloned module are different from those in the * original module. */ - public native @SharedPtr Module clone( - @Const @ByRef(nullValue = "c10::optional(c10::nullopt)") DeviceOptional device); - public native @SharedPtr Module clone(); + public native @SharedPtr("torch::nn::Module") @ByVal Module clone( + @Const @ByRef(nullValue = "c10::optional(c10::nullopt)") DeviceOptional device); + public native @SharedPtr("torch::nn::Module") @ByVal Module clone(); } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/ReplicationPad2dImplModuleHolder.java b/pytorch/src/gen/java/org/bytedeco/pytorch/ReplicationPad2dImplModuleHolder.java deleted file mode 100644 index cd2b0f30231..00000000000 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/ReplicationPad2dImplModuleHolder.java +++ /dev/null @@ -1,79 +0,0 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE - -package org.bytedeco.pytorch; - -import org.bytedeco.pytorch.Allocator; -import org.bytedeco.pytorch.Function; -import org.bytedeco.pytorch.Module; -import java.nio.*; -import org.bytedeco.javacpp.*; -import org.bytedeco.javacpp.annotation.*; - -import static org.bytedeco.javacpp.presets.javacpp.*; -import static org.bytedeco.openblas.global.openblas_nolapack.*; -import static org.bytedeco.openblas.global.openblas.*; - -import static org.bytedeco.pytorch.global.torch.*; - -@Name("torch::nn::ModuleHolder") @NoOffset @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) -public class ReplicationPad2dImplModuleHolder extends Pointer { - static { Loader.load(); } - /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ - public ReplicationPad2dImplModuleHolder(Pointer p) { super(p); } - - - /// - - /** Default constructs the contained module if if has a default constructor, - * else produces a static error. - * - * NOTE: This uses the behavior of template - * classes in C++ that constructors (or any methods) are only compiled when - * actually used. */ - - - /** Constructs the {@code ModuleHolder} with an empty contained value. Access to - * the underlying module is not permitted and will throw an exception, until - * a value is assigned. */ - /* implicit */ public ReplicationPad2dImplModuleHolder(@ByVal @Cast("std::nullptr_t*") PointerPointer arg0) { super((Pointer)null); allocate(arg0); } -private native void allocate(@ByVal @Cast("std::nullptr_t*") PointerPointer arg0); - - /** Constructs the {@code ModuleHolder} with a contained module, forwarding all - * arguments to its constructor. */ - - /** Constructs the {@code ModuleHolder} from a pointer to the contained type. - * Example: {@code Linear(std::make_shared(...))}. */ - /* implicit */ public ReplicationPad2dImplModuleHolder(@SharedPtr @Cast({"", "std::shared_ptr"}) ReplicationPad2dImpl module) { super((Pointer)null); allocate(module); } -private native void allocate(@SharedPtr @Cast({"", "std::shared_ptr"}) ReplicationPad2dImpl module); - - /** Returns true if the {@code ModuleHolder} contains a module, or false if it is - * {@code nullptr}. */ - public native @Cast("bool") @Name("operator bool") @NoException(true) boolean asBoolean(); - - /** Forwards to the contained module. */ - public native @Name("operator ->") ReplicationPad2dImpl access(); - - /** Forwards to the contained module. */ - - /** Returns a reference to the contained module. */ - public native @ByRef @Name("operator *") ReplicationPad2dImpl multiply(); - - /** Returns a const reference to the contained module. */ - - /** Returns a shared pointer to the underlying module. */ - public native @SharedPtr @Cast({"", "std::shared_ptr"}) ReplicationPad2dImpl ptr(); - - /** Returns a pointer to the underlying module. */ - public native ReplicationPad2dImpl get(); - - /** Returns a const pointer to the underlying module. */ - - /** Calls the {@code forward()} method of the contained module. */ - - /** Forwards to the subscript operator of the contained module. - * NOTE: std::forward is qualified to prevent VS2017 emitting - * error C2872: 'std': ambiguous symbol */ - - /** Returns true if the {@code ModuleHolder} does not contain a module. */ - public native @Cast("bool") @NoException(true) boolean is_empty(); -} diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/ReplicationPad2dOptions.java b/pytorch/src/gen/java/org/bytedeco/pytorch/ReplicationPad2dOptions.java index 4adf4077b9b..2690f66621c 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/ReplicationPad2dOptions.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/ReplicationPad2dOptions.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/ReplicationPad3d.java b/pytorch/src/gen/java/org/bytedeco/pytorch/ReplicationPad3d.java deleted file mode 100644 index 7d6c223fa4f..00000000000 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/ReplicationPad3d.java +++ /dev/null @@ -1,34 +0,0 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE - -package org.bytedeco.pytorch; - -import org.bytedeco.pytorch.Allocator; -import org.bytedeco.pytorch.Function; -import org.bytedeco.pytorch.Module; -import java.nio.*; -import org.bytedeco.javacpp.*; -import org.bytedeco.javacpp.annotation.*; - -import static org.bytedeco.javacpp.presets.javacpp.*; -import static org.bytedeco.openblas.global.openblas_nolapack.*; -import static org.bytedeco.openblas.global.openblas.*; - -import static org.bytedeco.pytorch.global.torch.*; - - -/** A {@code ModuleHolder} subclass for {@code ReplicationPad3dImpl}. - * See the documentation for {@code ReplicationPad3dImpl} class to learn what methods - * it provides, and examples of how to use {@code ReplicationPad3d} with - * {@code torch::nn::ReplicationPad3dOptions}. See the documentation for - * {@code ModuleHolder} to learn about PyTorch's module storage semantics. */ -@Namespace("torch::nn") @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) -public class ReplicationPad3d extends ReplicationPad3dImplModuleHolder { - static { Loader.load(); } - - public ReplicationPad3d(@ByVal @Cast("std::nullptr_t*") PointerPointer arg0) { super((Pointer)null); allocate(arg0); } - private native void allocate(@ByVal @Cast("std::nullptr_t*") PointerPointer arg0); public ReplicationPad3d(@SharedPtr @Cast({"", "std::shared_ptr"}) ReplicationPad3dImpl module) { super((Pointer)null); allocate(module); } - private native void allocate(@SharedPtr @Cast({"", "std::shared_ptr"}) ReplicationPad3dImpl module); - /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ - public ReplicationPad3d(Pointer p) { super(p); } - - } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/ReplicationPad3dImpl.java b/pytorch/src/gen/java/org/bytedeco/pytorch/ReplicationPad3dImpl.java index 7baad538405..8fd9ceb3fa4 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/ReplicationPad3dImpl.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/ReplicationPad3dImpl.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; @@ -36,9 +38,9 @@ public class ReplicationPad3dImpl extends ReplicationPad3dImplBase { public ReplicationPad3dImpl(@ByVal @Cast("torch::ExpandingArray<3*2>*") LongPointer padding) { super((Pointer)null); allocate(padding); } - @NoDeallocator private native void allocate(@ByVal @Cast("torch::ExpandingArray<3*2>*") LongPointer padding); + private native void allocate(@ByVal @Cast("torch::ExpandingArray<3*2>*") LongPointer padding); public ReplicationPad3dImpl(@Const @ByRef ReplicationPad3dOptions options_) { super((Pointer)null); allocate(options_); } - @NoDeallocator private native void allocate(@Const @ByRef ReplicationPad3dOptions options_); + private native void allocate(@Const @ByRef ReplicationPad3dOptions options_); /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public ReplicationPad3dImpl(Pointer p) { super(p); } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/ReplicationPad3dImplBase.java b/pytorch/src/gen/java/org/bytedeco/pytorch/ReplicationPad3dImplBase.java index f941a4efd04..f2fc7171517 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/ReplicationPad3dImplBase.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/ReplicationPad3dImplBase.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; @@ -22,9 +24,9 @@ public class ReplicationPad3dImplBase extends ReplicationPad3dImplCloneable { public ReplicationPad3dImplBase(Pointer p) { super(p); } public ReplicationPad3dImplBase(@ByVal @Cast("torch::ExpandingArray<3*2>*") LongPointer padding) { super((Pointer)null); allocate(padding); } - @NoDeallocator private native void allocate(@ByVal @Cast("torch::ExpandingArray<3*2>*") LongPointer padding); + private native void allocate(@ByVal @Cast("torch::ExpandingArray<3*2>*") LongPointer padding); public ReplicationPad3dImplBase(@Const @ByRef ReplicationPad3dOptions options_) { super((Pointer)null); allocate(options_); } - @NoDeallocator private native void allocate(@Const @ByRef ReplicationPad3dOptions options_); + private native void allocate(@Const @ByRef ReplicationPad3dOptions options_); public native void reset(); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/ReplicationPad3dImplCloneable.java b/pytorch/src/gen/java/org/bytedeco/pytorch/ReplicationPad3dImplCloneable.java index d38ba7f69b7..a92e6b9ab89 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/ReplicationPad3dImplCloneable.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/ReplicationPad3dImplCloneable.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; @@ -20,18 +22,18 @@ public class ReplicationPad3dImplCloneable extends Module { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public ReplicationPad3dImplCloneable(Pointer p) { super(p); } + @Override public Module asModule() { return asModule(this); } + @Namespace public static native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast>") Module asModule(@SharedPtr ReplicationPad3dImplCloneable pointer); /** {@code reset()} must perform initialization of all members with reference * semantics, most importantly parameters, buffers and submodules. */ public native void reset(); - @Override public Module asModule() { return asModule(this); } - @Namespace public static native @Name("static_cast") Module asModule(ReplicationPad3dImplCloneable module); /** Performs a recursive "deep copy" of the {@code Module}, such that all parameters * and submodules in the cloned module are different from those in the * original module. */ - public native @SharedPtr Module clone( - @Const @ByRef(nullValue = "c10::optional(c10::nullopt)") DeviceOptional device); - public native @SharedPtr Module clone(); + public native @SharedPtr("torch::nn::Module") @ByVal Module clone( + @Const @ByRef(nullValue = "c10::optional(c10::nullopt)") DeviceOptional device); + public native @SharedPtr("torch::nn::Module") @ByVal Module clone(); } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/ReplicationPad3dImplModuleHolder.java b/pytorch/src/gen/java/org/bytedeco/pytorch/ReplicationPad3dImplModuleHolder.java deleted file mode 100644 index 5d17974f97c..00000000000 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/ReplicationPad3dImplModuleHolder.java +++ /dev/null @@ -1,79 +0,0 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE - -package org.bytedeco.pytorch; - -import org.bytedeco.pytorch.Allocator; -import org.bytedeco.pytorch.Function; -import org.bytedeco.pytorch.Module; -import java.nio.*; -import org.bytedeco.javacpp.*; -import org.bytedeco.javacpp.annotation.*; - -import static org.bytedeco.javacpp.presets.javacpp.*; -import static org.bytedeco.openblas.global.openblas_nolapack.*; -import static org.bytedeco.openblas.global.openblas.*; - -import static org.bytedeco.pytorch.global.torch.*; - -@Name("torch::nn::ModuleHolder") @NoOffset @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) -public class ReplicationPad3dImplModuleHolder extends Pointer { - static { Loader.load(); } - /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ - public ReplicationPad3dImplModuleHolder(Pointer p) { super(p); } - - - /// - - /** Default constructs the contained module if if has a default constructor, - * else produces a static error. - * - * NOTE: This uses the behavior of template - * classes in C++ that constructors (or any methods) are only compiled when - * actually used. */ - - - /** Constructs the {@code ModuleHolder} with an empty contained value. Access to - * the underlying module is not permitted and will throw an exception, until - * a value is assigned. */ - /* implicit */ public ReplicationPad3dImplModuleHolder(@ByVal @Cast("std::nullptr_t*") PointerPointer arg0) { super((Pointer)null); allocate(arg0); } -private native void allocate(@ByVal @Cast("std::nullptr_t*") PointerPointer arg0); - - /** Constructs the {@code ModuleHolder} with a contained module, forwarding all - * arguments to its constructor. */ - - /** Constructs the {@code ModuleHolder} from a pointer to the contained type. - * Example: {@code Linear(std::make_shared(...))}. */ - /* implicit */ public ReplicationPad3dImplModuleHolder(@SharedPtr @Cast({"", "std::shared_ptr"}) ReplicationPad3dImpl module) { super((Pointer)null); allocate(module); } -private native void allocate(@SharedPtr @Cast({"", "std::shared_ptr"}) ReplicationPad3dImpl module); - - /** Returns true if the {@code ModuleHolder} contains a module, or false if it is - * {@code nullptr}. */ - public native @Cast("bool") @Name("operator bool") @NoException(true) boolean asBoolean(); - - /** Forwards to the contained module. */ - public native @Name("operator ->") ReplicationPad3dImpl access(); - - /** Forwards to the contained module. */ - - /** Returns a reference to the contained module. */ - public native @ByRef @Name("operator *") ReplicationPad3dImpl multiply(); - - /** Returns a const reference to the contained module. */ - - /** Returns a shared pointer to the underlying module. */ - public native @SharedPtr @Cast({"", "std::shared_ptr"}) ReplicationPad3dImpl ptr(); - - /** Returns a pointer to the underlying module. */ - public native ReplicationPad3dImpl get(); - - /** Returns a const pointer to the underlying module. */ - - /** Calls the {@code forward()} method of the contained module. */ - - /** Forwards to the subscript operator of the contained module. - * NOTE: std::forward is qualified to prevent VS2017 emitting - * error C2872: 'std': ambiguous symbol */ - - /** Returns true if the {@code ModuleHolder} does not contain a module. */ - public native @Cast("bool") @NoException(true) boolean is_empty(); -} diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/ReplicationPad3dOptions.java b/pytorch/src/gen/java/org/bytedeco/pytorch/ReplicationPad3dOptions.java index cd02d22ce1a..71373962382 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/ReplicationPad3dOptions.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/ReplicationPad3dOptions.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/Resolver.java b/pytorch/src/gen/java/org/bytedeco/pytorch/Resolver.java index 9691961cd1a..dbccdd2a605 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/Resolver.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/Resolver.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; @@ -52,11 +54,11 @@ public class Resolver extends Pointer { // Resolve a given name to a SugaredValue. This takes the method `m` that the // caller is currently constructing, since we may need to insert nodes into // the graph to create a value. - public native @SharedPtr @ByVal SugaredValue resolveValue( + public native @SharedPtr("torch::jit::SugaredValue") @ByVal SugaredValue resolveValue( @StdString BytePointer name, @ByRef GraphFunction m, @Const @ByRef SourceRange loc); - public native @SharedPtr @ByVal SugaredValue resolveValue( + public native @SharedPtr("torch::jit::SugaredValue") @ByVal SugaredValue resolveValue( @StdString String name, @ByRef GraphFunction m, @Const @ByRef SourceRange loc); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/ResolverVector.java b/pytorch/src/gen/java/org/bytedeco/pytorch/ResolverVector.java index 48e64be3d16..8d5e948ca5f 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/ResolverVector.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/ResolverVector.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; @@ -33,10 +35,12 @@ public class ResolverVector extends Pointer { public void clear() { resize(0); } public native void resize(@Cast("size_t") long n); - @Index(function = "at") public native @SharedPtr Resolver get(@Cast("size_t") long i); + public Resolver front() { return get(0); } + public Resolver back() { return get(size() - 1); } + @Index(function = "at") public native @SharedPtr("torch::jit::Resolver") Resolver get(@Cast("size_t") long i); public native ResolverVector put(@Cast("size_t") long i, Resolver value); - public native @ByVal Iterator insert(@ByVal Iterator pos, @SharedPtr Resolver value); + public native @ByVal Iterator insert(@ByVal Iterator pos, @SharedPtr("torch::jit::Resolver") Resolver value); public native @ByVal Iterator erase(@ByVal Iterator pos); public native @ByVal Iterator begin(); public native @ByVal Iterator end(); @@ -46,7 +50,7 @@ public Iterator() { } public native @Name("operator ++") @ByRef Iterator increment(); public native @Name("operator ==") boolean equals(@ByRef Iterator it); - public native @Name("operator *") @SharedPtr @Const Resolver get(); + public native @Name("operator *") @SharedPtr("torch::jit::Resolver") @Const Resolver get(); } public Resolver[] get() { diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/RecursiveMethodCallError.java b/pytorch/src/gen/java/org/bytedeco/pytorch/Result.java similarity index 60% rename from pytorch/src/gen/java/org/bytedeco/pytorch/RecursiveMethodCallError.java rename to pytorch/src/gen/java/org/bytedeco/pytorch/Result.java index b2e058c1c0d..ff3856c5fc9 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/RecursiveMethodCallError.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/Result.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; @@ -15,10 +17,10 @@ import static org.bytedeco.pytorch.global.torch.*; -@Namespace("torch::jit") @Opaque @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) -public class RecursiveMethodCallError extends Pointer { +@Namespace("torch::profiler::impl") @Opaque @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) +public class Result extends Pointer { /** Empty constructor. Calls {@code super((Pointer)null)}. */ - public RecursiveMethodCallError() { super((Pointer)null); } + public Result() { super((Pointer)null); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ - public RecursiveMethodCallError(Pointer p) { super(p); } + public Result(Pointer p) { super(p); } } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/Return.java b/pytorch/src/gen/java/org/bytedeco/pytorch/Return.java index 4b75f917ce9..fa24f65464f 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/Return.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/Return.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; @@ -19,9 +21,11 @@ @Namespace("torch::jit") @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) public class Return extends Stmt { static { Loader.load(); } + /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ + public Return(Pointer p) { super(p); } - public Return(@Cast("const torch::jit::TreeRef*") @ByRef Pointer tree) { super((Pointer)null); allocate(tree); } - private native void allocate(@Cast("const torch::jit::TreeRef*") @ByRef Pointer tree); + public Return(@Const @ByRef TreeRef tree) { super((Pointer)null); allocate(tree); } + private native void allocate(@Const @ByRef TreeRef tree); public native @ByVal Expr expr(); public static native @ByVal Return create(@Const @ByRef SourceRange range, @Const @ByRef Expr value); } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/SELU.java b/pytorch/src/gen/java/org/bytedeco/pytorch/SELU.java deleted file mode 100644 index 2e761907653..00000000000 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/SELU.java +++ /dev/null @@ -1,34 +0,0 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE - -package org.bytedeco.pytorch; - -import org.bytedeco.pytorch.Allocator; -import org.bytedeco.pytorch.Function; -import org.bytedeco.pytorch.Module; -import java.nio.*; -import org.bytedeco.javacpp.*; -import org.bytedeco.javacpp.annotation.*; - -import static org.bytedeco.javacpp.presets.javacpp.*; -import static org.bytedeco.openblas.global.openblas_nolapack.*; -import static org.bytedeco.openblas.global.openblas.*; - -import static org.bytedeco.pytorch.global.torch.*; - - -/** A {@code ModuleHolder} subclass for {@code SELUImpl}. - * See the documentation for {@code SELUImpl} class to learn what methods it - * provides, and examples of how to use {@code SELU} with {@code torch::nn::SELUOptions}. - * See the documentation for {@code ModuleHolder} to learn about PyTorch's - * module storage semantics. */ -@Namespace("torch::nn") @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) -public class SELU extends SELUImplModuleHolder { - static { Loader.load(); } - - public SELU(@ByVal @Cast("std::nullptr_t*") PointerPointer arg0) { super((Pointer)null); allocate(arg0); } - private native void allocate(@ByVal @Cast("std::nullptr_t*") PointerPointer arg0); public SELU(@SharedPtr @Cast({"", "std::shared_ptr"}) SELUImpl module) { super((Pointer)null); allocate(module); } - private native void allocate(@SharedPtr @Cast({"", "std::shared_ptr"}) SELUImpl module); - /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ - public SELU(Pointer p) { super(p); } - - } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/SELUImpl.java b/pytorch/src/gen/java/org/bytedeco/pytorch/SELUImpl.java index 888cc9d14a7..188a107c800 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/SELUImpl.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/SELUImpl.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; @@ -45,9 +47,9 @@ public class SELUImpl extends SELUImplCloneable { } public SELUImpl(@Const @ByRef(nullValue = "torch::nn::SELUOptions{}") SELUOptions options_) { super((Pointer)null); allocate(options_); } - @NoDeallocator private native void allocate(@Const @ByRef(nullValue = "torch::nn::SELUOptions{}") SELUOptions options_); + @SharedPtr private native void allocate(@Const @ByRef(nullValue = "torch::nn::SELUOptions{}") SELUOptions options_); public SELUImpl() { super((Pointer)null); allocate(); } - @NoDeallocator private native void allocate(); + @SharedPtr private native void allocate(); public native @ByVal Tensor forward(@ByVal Tensor input); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/SELUImplCloneable.java b/pytorch/src/gen/java/org/bytedeco/pytorch/SELUImplCloneable.java index 5186f465786..7bbe3d02e86 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/SELUImplCloneable.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/SELUImplCloneable.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; @@ -20,18 +22,18 @@ public class SELUImplCloneable extends Module { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public SELUImplCloneable(Pointer p) { super(p); } + @Override public Module asModule() { return asModule(this); } + @Namespace public static native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast>") Module asModule(@SharedPtr SELUImplCloneable pointer); /** {@code reset()} must perform initialization of all members with reference * semantics, most importantly parameters, buffers and submodules. */ public native void reset(); - @Override public Module asModule() { return asModule(this); } - @Namespace public static native @Name("static_cast") Module asModule(SELUImplCloneable module); /** Performs a recursive "deep copy" of the {@code Module}, such that all parameters * and submodules in the cloned module are different from those in the * original module. */ - public native @SharedPtr Module clone( - @Const @ByRef(nullValue = "c10::optional(c10::nullopt)") DeviceOptional device); - public native @SharedPtr Module clone(); + public native @SharedPtr("torch::nn::Module") @ByVal Module clone( + @Const @ByRef(nullValue = "c10::optional(c10::nullopt)") DeviceOptional device); + public native @SharedPtr("torch::nn::Module") @ByVal Module clone(); } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/SELUImplModuleHolder.java b/pytorch/src/gen/java/org/bytedeco/pytorch/SELUImplModuleHolder.java deleted file mode 100644 index 0a3ccdbea68..00000000000 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/SELUImplModuleHolder.java +++ /dev/null @@ -1,79 +0,0 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE - -package org.bytedeco.pytorch; - -import org.bytedeco.pytorch.Allocator; -import org.bytedeco.pytorch.Function; -import org.bytedeco.pytorch.Module; -import java.nio.*; -import org.bytedeco.javacpp.*; -import org.bytedeco.javacpp.annotation.*; - -import static org.bytedeco.javacpp.presets.javacpp.*; -import static org.bytedeco.openblas.global.openblas_nolapack.*; -import static org.bytedeco.openblas.global.openblas.*; - -import static org.bytedeco.pytorch.global.torch.*; - -@Name("torch::nn::ModuleHolder") @NoOffset @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) -public class SELUImplModuleHolder extends Pointer { - static { Loader.load(); } - /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ - public SELUImplModuleHolder(Pointer p) { super(p); } - - - /// - - /** Default constructs the contained module if if has a default constructor, - * else produces a static error. - * - * NOTE: This uses the behavior of template - * classes in C++ that constructors (or any methods) are only compiled when - * actually used. */ - - - /** Constructs the {@code ModuleHolder} with an empty contained value. Access to - * the underlying module is not permitted and will throw an exception, until - * a value is assigned. */ - /* implicit */ public SELUImplModuleHolder(@ByVal @Cast("std::nullptr_t*") PointerPointer arg0) { super((Pointer)null); allocate(arg0); } -private native void allocate(@ByVal @Cast("std::nullptr_t*") PointerPointer arg0); - - /** Constructs the {@code ModuleHolder} with a contained module, forwarding all - * arguments to its constructor. */ - - /** Constructs the {@code ModuleHolder} from a pointer to the contained type. - * Example: {@code Linear(std::make_shared(...))}. */ - /* implicit */ public SELUImplModuleHolder(@SharedPtr @Cast({"", "std::shared_ptr"}) SELUImpl module) { super((Pointer)null); allocate(module); } -private native void allocate(@SharedPtr @Cast({"", "std::shared_ptr"}) SELUImpl module); - - /** Returns true if the {@code ModuleHolder} contains a module, or false if it is - * {@code nullptr}. */ - public native @Cast("bool") @Name("operator bool") @NoException(true) boolean asBoolean(); - - /** Forwards to the contained module. */ - public native @Name("operator ->") SELUImpl access(); - - /** Forwards to the contained module. */ - - /** Returns a reference to the contained module. */ - public native @ByRef @Name("operator *") SELUImpl multiply(); - - /** Returns a const reference to the contained module. */ - - /** Returns a shared pointer to the underlying module. */ - public native @SharedPtr @Cast({"", "std::shared_ptr"}) SELUImpl ptr(); - - /** Returns a pointer to the underlying module. */ - public native SELUImpl get(); - - /** Returns a const pointer to the underlying module. */ - - /** Calls the {@code forward()} method of the contained module. */ - - /** Forwards to the subscript operator of the contained module. - * NOTE: std::forward is qualified to prevent VS2017 emitting - * error C2872: 'std': ambiguous symbol */ - - /** Returns true if the {@code ModuleHolder} does not contain a module. */ - public native @Cast("bool") @NoException(true) boolean is_empty(); -} diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/SELUOptions.java b/pytorch/src/gen/java/org/bytedeco/pytorch/SELUOptions.java index d7477d87740..6a15d475651 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/SELUOptions.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/SELUOptions.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/SGD.java b/pytorch/src/gen/java/org/bytedeco/pytorch/SGD.java index d835bb6ce1f..7cc3309dca2 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/SGD.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/SGD.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; @@ -29,8 +31,8 @@ private native void allocate( @ByVal OptimizerParamGroupVector param_groups, @ByVal SGDOptions defaults); - public SGD(@Cast({"", "std::vector"}) @StdMove TensorVector params, @ByVal SGDOptions defaults) { super((Pointer)null); allocate(params, defaults); } - private native void allocate(@Cast({"", "std::vector"}) @StdMove TensorVector params, @ByVal SGDOptions defaults); + public SGD(@Cast({"", "std::vector"}) @StdMove TensorVector params, @ByVal SGDOptions defaults) { super((Pointer)null); allocate(params, defaults); } + private native void allocate(@Cast({"", "std::vector"}) @StdMove TensorVector params, @ByVal SGDOptions defaults); public native @ByVal Tensor step(@ByVal(nullValue = "torch::optim::Optimizer::LossClosure(nullptr)") LossClosure closure); public native @ByVal Tensor step(); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/SGDOptions.java b/pytorch/src/gen/java/org/bytedeco/pytorch/SGDOptions.java index 03e742987a5..93ffd0adc70 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/SGDOptions.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/SGDOptions.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; @@ -31,7 +33,10 @@ public class SGDOptions extends OptimizerCloneableSGDOptions { public native @Cast("bool*") @ByRef @NoException(true) BoolPointer nesterov(); - + private static native @Namespace @Cast("bool") @Name("operator ==") boolean equals( + @Const @ByRef SGDOptions lhs, + @Const @ByRef SGDOptions rhs); + public boolean equals(SGDOptions rhs) { return equals(this, rhs); } public native double get_lr(); public native void set_lr(double lr); } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/SGDParamState.java b/pytorch/src/gen/java/org/bytedeco/pytorch/SGDParamState.java index 9aecd02f4c7..7259baf47a1 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/SGDParamState.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/SGDParamState.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; @@ -37,5 +39,8 @@ public class SGDParamState extends OptimizerCloneableSGDParamState { public native @ByRef @NoException(true) Tensor momentum_buffer(); - + private static native @Namespace @Cast("bool") @Name("operator ==") boolean equals( + @Const @ByRef SGDParamState lhs, + @Const @ByRef SGDParamState rhs); + public boolean equals(SGDParamState rhs) { return equals(this, rhs); } } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/SafePyHandle.java b/pytorch/src/gen/java/org/bytedeco/pytorch/SafePyHandle.java new file mode 100644 index 00000000000..0520226ac16 --- /dev/null +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/SafePyHandle.java @@ -0,0 +1,48 @@ +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE + +package org.bytedeco.pytorch; + +import org.bytedeco.pytorch.Allocator; +import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; +import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; +import java.nio.*; +import org.bytedeco.javacpp.*; +import org.bytedeco.javacpp.annotation.*; + +import static org.bytedeco.javacpp.presets.javacpp.*; +import static org.bytedeco.openblas.global.openblas_nolapack.*; +import static org.bytedeco.openblas.global.openblas.*; + +import static org.bytedeco.pytorch.global.torch.*; + + +// Like SafePyObject, but non-owning. Good for references to global PyObjects +// that will be leaked on interpreter exit. You get a copy constructor/assign +// this way. +@Namespace("c10") @NoOffset @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) +public class SafePyHandle extends Pointer { + static { Loader.load(); } + /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ + public SafePyHandle(Pointer p) { super(p); } + /** Native array allocator. Access with {@link Pointer#position(long)}. */ + public SafePyHandle(long size) { super((Pointer)null); allocateArray(size); } + private native void allocateArray(long size); + @Override public SafePyHandle position(long position) { + return (SafePyHandle)super.position(position); + } + @Override public SafePyHandle getPointer(long i) { + return new SafePyHandle((Pointer)this).offsetAddress(i); + } + + public SafePyHandle() { super((Pointer)null); allocate(); } + private native void allocate(); + public SafePyHandle(@Cast("PyObject*") Pointer data, PyInterpreter pyinterpreter) { super((Pointer)null); allocate(data, pyinterpreter); } + private native void allocate(@Cast("PyObject*") Pointer data, PyInterpreter pyinterpreter); + + public native @ByRef PyInterpreter pyinterpreter(); + public native @Cast("PyObject*") Pointer ptr(@Const PyInterpreter arg0); + public native void reset(); + public native @Cast("bool") @Name("operator bool") boolean asBoolean(); +} diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/SafePyObject.java b/pytorch/src/gen/java/org/bytedeco/pytorch/SafePyObject.java new file mode 100644 index 00000000000..21af3c7c18f --- /dev/null +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/SafePyObject.java @@ -0,0 +1,50 @@ +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE + +package org.bytedeco.pytorch; + +import org.bytedeco.pytorch.Allocator; +import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; +import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; +import java.nio.*; +import org.bytedeco.javacpp.*; +import org.bytedeco.javacpp.annotation.*; + +import static org.bytedeco.javacpp.presets.javacpp.*; +import static org.bytedeco.openblas.global.openblas_nolapack.*; +import static org.bytedeco.openblas.global.openblas.*; + +import static org.bytedeco.pytorch.global.torch.*; + + +// This is an safe owning holder for a PyObject, akin to pybind11's +// py::object, with two major differences: +// +// - It is in c10/core; i.e., you can use this type in contexts where +// you do not have a libpython dependency +// +// - It is multi-interpreter safe (ala torchdeploy); when you fetch +// the underlying PyObject* you are required to specify what the current +// interpreter context is and we will check that you match it. +// +// It is INVALID to store a reference to a Tensor object in this way; +// you should just use TensorImpl directly in that case! +@Namespace("c10") @NoOffset @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) +public class SafePyObject extends Pointer { + static { Loader.load(); } + /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ + public SafePyObject(Pointer p) { super(p); } + + // Steals a reference to data + public SafePyObject(@Cast("PyObject*") Pointer data, PyInterpreter pyinterpreter) { super((Pointer)null); allocate(data, pyinterpreter); } + private native void allocate(@Cast("PyObject*") Pointer data, PyInterpreter pyinterpreter); + + // In principle this could be copyable if we add an incref to PyInterpreter + // but for now it's easier to just disallow it. + + + + public native @ByRef PyInterpreter pyinterpreter(); + public native @Cast("PyObject*") Pointer ptr(@Const PyInterpreter arg0); +} diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/Sampler.java b/pytorch/src/gen/java/org/bytedeco/pytorch/Sampler.java index bc8cee2e45b..b66e11a90a6 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/Sampler.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/Sampler.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/SavedTensorDefaultHooks.java b/pytorch/src/gen/java/org/bytedeco/pytorch/SavedTensorDefaultHooks.java index 05109461288..d01ff650096 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/SavedTensorDefaultHooks.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/SavedTensorDefaultHooks.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/SavedTensorDefaultHooksTLS.java b/pytorch/src/gen/java/org/bytedeco/pytorch/SavedTensorDefaultHooksTLS.java index 08350672c24..0271039621e 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/SavedTensorDefaultHooksTLS.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/SavedTensorDefaultHooksTLS.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/SavedVariable.java b/pytorch/src/gen/java/org/bytedeco/pytorch/SavedVariable.java index a55619b0b89..3d5babb2615 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/SavedVariable.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/SavedVariable.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/SavedVariableArrayRef.java b/pytorch/src/gen/java/org/bytedeco/pytorch/SavedVariableArrayRef.java index 48eab02877f..d6376ec9ccf 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/SavedVariableArrayRef.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/SavedVariableArrayRef.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; @@ -39,8 +41,7 @@ public class SavedVariableArrayRef extends Pointer { /** Construct an ArrayRef from a single element. */ // TODO Make this explicit - public SavedVariableArrayRef(@Const @ByRef SavedVariable OneElt) { super((Pointer)null); allocate(OneElt); } - private native void allocate(@Const @ByRef SavedVariable OneElt); + /** Construct an ArrayRef from a pointer and length. */ public SavedVariableArrayRef(@Const SavedVariable data, @Cast("size_t") long length) { super((Pointer)null); allocate(data, length); } @@ -58,6 +59,8 @@ public class SavedVariableArrayRef extends Pointer { // The enable_if stuff here makes sure that this isn't used for // std::vector, because ArrayRef can't work on a std::vector // bitfield. + public SavedVariableArrayRef(@ByRef SavedVariableVector vec) { super((Pointer)null); allocate(vec); } + private native void allocate(@ByRef SavedVariableVector vec); /** Construct an ArrayRef from a std::array */ @@ -70,13 +73,13 @@ public class SavedVariableArrayRef extends Pointer { * \name Simple Operations * \{ */ - public native @ByVal @Cast("const c10::ArrayRef::iterator*") SavedVariable begin(); - public native @ByVal @Cast("const c10::ArrayRef::iterator*") SavedVariable end(); + public native @Const @ByPtr SavedVariable begin(); + public native @Const @ByPtr SavedVariable end(); // These are actually the same as iterator, since ArrayRef only // gives you const iterators. - public native @ByVal @Cast("const c10::ArrayRef::const_iterator*") SavedVariable cbegin(); - public native @ByVal @Cast("const c10::ArrayRef::const_iterator*") SavedVariable cend(); + public native @Const @ByPtr SavedVariable cbegin(); + public native @Const @ByPtr SavedVariable cend(); /** empty - Check if the array is empty. */ public native @Cast("const bool") boolean empty(); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/SavedVariableHooks.java b/pytorch/src/gen/java/org/bytedeco/pytorch/SavedVariableHooks.java index e0973c0eb67..8500ed27a60 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/SavedVariableHooks.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/SavedVariableHooks.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/SavedVariableVector.java b/pytorch/src/gen/java/org/bytedeco/pytorch/SavedVariableVector.java index 0a44c6df6d9..4ce52cd9aea 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/SavedVariableVector.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/SavedVariableVector.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; @@ -27,6 +29,8 @@ public class SavedVariableVector extends Pointer { public boolean empty() { return size() == 0; } public native long size(); + public SavedVariable front() { return get(0); } + public SavedVariable back() { return get(size() - 1); } @Index(function = "at") public native @ByRef SavedVariable get(@Cast("size_t") long i); public native @ByVal Iterator begin(); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/Scalar.java b/pytorch/src/gen/java/org/bytedeco/pytorch/Scalar.java index 98cfa418fe6..2c53b21e50a 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/Scalar.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/Scalar.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; @@ -54,6 +56,10 @@ public class Scalar extends Pointer { private native void allocate(@ByVal Half vv); public Scalar(@ByVal BFloat16 vv) { super((Pointer)null); allocate(vv); } private native void allocate(@ByVal BFloat16 vv); + public Scalar(@ByVal FloatComplex vv) { super((Pointer)null); allocate(vv); } + private native void allocate(@ByVal FloatComplex vv); + public Scalar(@ByVal DoubleComplex vv) { super((Pointer)null); allocate(vv); } + private native void allocate(@ByVal DoubleComplex vv); // #undef DEFINE_IMPLICIT_CTOR @@ -89,6 +95,9 @@ public class Scalar extends Pointer { public native @ByVal Half toHalf(); public native float toFloat(); public native double toDouble(); + public native @ByVal HalfComplex toComplexHalf(); + public native @ByVal FloatComplex toComplexFloat(); + public native @ByVal DoubleComplex toComplexDouble(); public native @Cast("bool") boolean toBool(); public native @ByVal BFloat16 toBFloat16(); @@ -107,7 +116,7 @@ public class Scalar extends Pointer { public native @Cast("bool") boolean isFloatingPoint(); - public native @Cast("bool") @Deprecated boolean isIntegral(); + public native @Cast("bool") boolean isIntegral(@Cast("bool") boolean includeBool); public native @Cast("bool") boolean isComplex(); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/ScalarArrayRef.java b/pytorch/src/gen/java/org/bytedeco/pytorch/ScalarArrayRef.java index b6012639160..cee1412bf5b 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/ScalarArrayRef.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/ScalarArrayRef.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; @@ -39,8 +41,7 @@ public class ScalarArrayRef extends Pointer { /** Construct an ArrayRef from a single element. */ // TODO Make this explicit - public ScalarArrayRef(@Const @ByRef Scalar OneElt) { super((Pointer)null); allocate(OneElt); } - private native void allocate(@Const @ByRef Scalar OneElt); + /** Construct an ArrayRef from a pointer and length. */ public ScalarArrayRef(@Const Scalar data, @Cast("size_t") long length) { super((Pointer)null); allocate(data, length); } @@ -70,13 +71,13 @@ public class ScalarArrayRef extends Pointer { * \name Simple Operations * \{ */ - public native @ByVal @Cast("const c10::ArrayRef::iterator*") Scalar begin(); - public native @ByVal @Cast("const c10::ArrayRef::iterator*") Scalar end(); + public native @Const @ByPtr Scalar begin(); + public native @Const @ByPtr Scalar end(); // These are actually the same as iterator, since ArrayRef only // gives you const iterators. - public native @ByVal @Cast("const c10::ArrayRef::const_iterator*") Scalar cbegin(); - public native @ByVal @Cast("const c10::ArrayRef::const_iterator*") Scalar cend(); + public native @Const @ByPtr Scalar cbegin(); + public native @Const @ByPtr Scalar cend(); /** empty - Check if the array is empty. */ public native @Cast("const bool") boolean empty(); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/ScalarOptional.java b/pytorch/src/gen/java/org/bytedeco/pytorch/ScalarOptional.java index 85b322c34e0..ef0093eed43 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/ScalarOptional.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/ScalarOptional.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; @@ -26,6 +28,7 @@ public class ScalarOptional extends Pointer { public native @Name("operator =") @ByRef ScalarOptional put(@ByRef ScalarOptional x); public native boolean has_value(); + public native void reset(); public native @Name("value") @ByRef Scalar get(); @ValueSetter public native ScalarOptional put(@ByRef Scalar value); } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/ScalarTypeArrayRef.java b/pytorch/src/gen/java/org/bytedeco/pytorch/ScalarTypeArrayRef.java index 89cf48caa09..242787ca4dc 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/ScalarTypeArrayRef.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/ScalarTypeArrayRef.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; @@ -39,8 +41,7 @@ public class ScalarTypeArrayRef extends Pointer { /** Construct an ArrayRef from a single element. */ // TODO Make this explicit - public ScalarTypeArrayRef(ScalarType OneElt) { super((Pointer)null); allocate(OneElt); } - private native void allocate(ScalarType OneElt); + /** Construct an ArrayRef from a pointer and length. */ public ScalarTypeArrayRef(@Cast("c10::ScalarType*") BytePointer data, @Cast("size_t") long length) { super((Pointer)null); allocate(data, length); } @@ -58,6 +59,8 @@ public class ScalarTypeArrayRef extends Pointer { // The enable_if stuff here makes sure that this isn't used for // std::vector, because ArrayRef can't work on a std::vector // bitfield. + public ScalarTypeArrayRef(@ByRef ScalarTypeVector vec) { super((Pointer)null); allocate(vec); } + private native void allocate(@ByRef ScalarTypeVector vec); /** Construct an ArrayRef from a std::array */ @@ -70,13 +73,13 @@ public class ScalarTypeArrayRef extends Pointer { * \name Simple Operations * \{ */ - public native @ByVal @Cast("const c10::ArrayRef::iterator*") BytePointer begin(); - public native @ByVal @Cast("const c10::ArrayRef::iterator*") BytePointer end(); + public native @Const @Cast("c10::ScalarType*") BytePointer begin(); + public native @Const @Cast("c10::ScalarType*") BytePointer end(); // These are actually the same as iterator, since ArrayRef only // gives you const iterators. - public native @ByVal @Cast("const c10::ArrayRef::const_iterator*") BytePointer cbegin(); - public native @ByVal @Cast("const c10::ArrayRef::const_iterator*") BytePointer cend(); + public native @Const @Cast("c10::ScalarType*") BytePointer cbegin(); + public native @Const @Cast("c10::ScalarType*") BytePointer cend(); /** empty - Check if the array is empty. */ public native @Cast("const bool") boolean empty(); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/ScalarTypeEnumerationType.java b/pytorch/src/gen/java/org/bytedeco/pytorch/ScalarTypeEnumerationType.java index 4eacbff2a2e..1d92092dac5 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/ScalarTypeEnumerationType.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/ScalarTypeEnumerationType.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/ScalarTypeOptional.java b/pytorch/src/gen/java/org/bytedeco/pytorch/ScalarTypeOptional.java index c90cfc863e1..b6514b31a58 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/ScalarTypeOptional.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/ScalarTypeOptional.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; @@ -26,6 +28,7 @@ public class ScalarTypeOptional extends Pointer { public native @Name("operator =") @ByRef ScalarTypeOptional put(@ByRef ScalarTypeOptional x); public native boolean has_value(); + public native void reset(); public native @Name("value") ScalarType get(); @ValueSetter public native ScalarTypeOptional put(ScalarType value); } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/ScalarTypeType.java b/pytorch/src/gen/java/org/bytedeco/pytorch/ScalarTypeType.java index 7e314c36bf3..b637f530778 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/ScalarTypeType.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/ScalarTypeType.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/ScalarTypeTypePtr.java b/pytorch/src/gen/java/org/bytedeco/pytorch/ScalarTypeTypePtr.java index 5eee661d199..fa1508d91f4 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/ScalarTypeTypePtr.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/ScalarTypeTypePtr.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/ScalarTypeVector.java b/pytorch/src/gen/java/org/bytedeco/pytorch/ScalarTypeVector.java index a03440a6e93..0a494352b2f 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/ScalarTypeVector.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/ScalarTypeVector.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; @@ -33,6 +35,8 @@ public class ScalarTypeVector extends Pointer { public void clear() { resize(0); } public native void resize(@Cast("size_t") long n); + public ScalarType front() { return get(0); } + public ScalarType back() { return get(size() - 1); } @Index(function = "at") public native ScalarType get(@Cast("size_t") long i); public native ScalarTypeVector put(@Cast("size_t") long i, ScalarType value); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/SchemaArgument.java b/pytorch/src/gen/java/org/bytedeco/pytorch/SchemaArgument.java index 24e73ea460d..e260aec830b 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/SchemaArgument.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/SchemaArgument.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; @@ -33,5 +35,5 @@ public class SchemaArgument extends Pointer { private native void allocate(SchemaArgType tpe, @Cast("size_t") long idx); public SchemaArgument(@Cast("c10::SchemaArgType") int tpe, @Cast("size_t") long idx) { super((Pointer)null); allocate(tpe, idx); } private native void allocate(@Cast("c10::SchemaArgType") int tpe, @Cast("size_t") long idx); - + public native @Cast("bool") @Name("operator ==") boolean equals(@Const @ByRef SchemaArgument rhs); } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/SchemaInfo.java b/pytorch/src/gen/java/org/bytedeco/pytorch/SchemaInfo.java index e2ff3891228..497840ba78b 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/SchemaInfo.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/SchemaInfo.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/SchemaRegistrationHandleRAII.java b/pytorch/src/gen/java/org/bytedeco/pytorch/SchemaRegistrationHandleRAII.java index f9a81f03cf1..fa207e4a8f7 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/SchemaRegistrationHandleRAII.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/SchemaRegistrationHandleRAII.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/Scope.java b/pytorch/src/gen/java/org/bytedeco/pytorch/Scope.java index dadeb4ccb20..467abf7a9e6 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/Scope.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/Scope.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/ScopeOptional.java b/pytorch/src/gen/java/org/bytedeco/pytorch/ScopeOptional.java index c2114358c65..d5d5bc44098 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/ScopeOptional.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/ScopeOptional.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; @@ -26,6 +28,7 @@ public class ScopeOptional extends Pointer { public native @Name("operator =") @ByRef ScopeOptional put(@ByRef ScopeOptional x); public native boolean has_value(); + public native void reset(); public native @Name("value") @ByRef Scope get(); @ValueSetter public native ScopeOptional put(@ByRef Scope value); } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/ScriptModuleSerializer.java b/pytorch/src/gen/java/org/bytedeco/pytorch/ScriptModuleSerializer.java deleted file mode 100644 index e6cd9efa29f..00000000000 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/ScriptModuleSerializer.java +++ /dev/null @@ -1,38 +0,0 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE - -package org.bytedeco.pytorch; - -import org.bytedeco.pytorch.Allocator; -import org.bytedeco.pytorch.Function; -import org.bytedeco.pytorch.Module; -import java.nio.*; -import org.bytedeco.javacpp.*; -import org.bytedeco.javacpp.annotation.*; - -import static org.bytedeco.javacpp.presets.javacpp.*; -import static org.bytedeco.openblas.global.openblas_nolapack.*; -import static org.bytedeco.openblas.global.openblas.*; - -import static org.bytedeco.pytorch.global.torch.*; - - -// Serializer for both oldsyle and unified format TorchScript serialization -@Namespace("torch::jit") @NoOffset @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) -public class ScriptModuleSerializer extends Pointer { - static { Loader.load(); } - - public ScriptModuleSerializer( - @Cast("caffe2::serialize::PyTorchStreamWriter*") @ByRef Pointer export_writer) { super((Pointer)null); allocate(export_writer); } - private native void allocate( - @Cast("caffe2::serialize::PyTorchStreamWriter*") @ByRef Pointer export_writer); - - public native void writeFiles(@StdString BytePointer code_dir); - public native void writeFiles(@StdString String code_dir); - public native void serialize( - @Const @ByRef JitModule module, - @Const @ByRef ExtraFilesMap extra_files, - @Cast("bool") boolean bytecode_format, - @Cast("bool") boolean save_mobile_debug_info); - public native void serialize_unified_format(@ByRef JitModule module, @Cast("uint64_t") long script_module_id); - public native @ByRef SerializationStorageContext storage_context(); -} diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/ScriptTypeParser.java b/pytorch/src/gen/java/org/bytedeco/pytorch/ScriptTypeParser.java new file mode 100644 index 00000000000..fb5dea95a80 --- /dev/null +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/ScriptTypeParser.java @@ -0,0 +1,58 @@ +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE + +package org.bytedeco.pytorch; + +import org.bytedeco.pytorch.Allocator; +import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; +import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; +import java.nio.*; +import org.bytedeco.javacpp.*; +import org.bytedeco.javacpp.annotation.*; + +import static org.bytedeco.javacpp.presets.javacpp.*; +import static org.bytedeco.openblas.global.openblas_nolapack.*; +import static org.bytedeco.openblas.global.openblas.*; + +import static org.bytedeco.pytorch.global.torch.*; + + +/** + * class ScriptTypeParser + * + * Parses expressions in our typed AST format (TreeView) into types and + * typenames. + */ +@Namespace("torch::jit") @NoOffset @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) +public class ScriptTypeParser extends Pointer { + static { Loader.load(); } + /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ + public ScriptTypeParser(Pointer p) { super(p); } + /** Native array allocator. Access with {@link Pointer#position(long)}. */ + public ScriptTypeParser(long size) { super((Pointer)null); allocateArray(size); } + private native void allocateArray(long size); + @Override public ScriptTypeParser position(long position) { + return (ScriptTypeParser)super.position(position); + } + @Override public ScriptTypeParser getPointer(long i) { + return new ScriptTypeParser((Pointer)this).offsetAddress(i); + } + + public ScriptTypeParser() { super((Pointer)null); allocate(); } + private native void allocate(); + public ScriptTypeParser(@SharedPtr("torch::jit::Resolver") @ByVal Resolver resolver) { super((Pointer)null); allocate(resolver); } + private native void allocate(@SharedPtr("torch::jit::Resolver") @ByVal Resolver resolver); + + public native @ByVal Type.TypePtr parseTypeFromExpr(@Const @ByRef Expr expr); + + public native @ByVal @Cast("c10::optional >*") T_TypePtrLong_TOptional parseBroadcastList( + @Const @ByRef Expr expr); + + public native @ByVal Type.TypePtr parseType(@StdString BytePointer str); + public native @ByVal Type.TypePtr parseType(@StdString String str); + + public native @ByVal FunctionSchema parseSchemaFromDef(@Const @ByRef Def def, @Cast("bool") boolean skip_self); + + public native @ByVal IValue parseClassConstant(@Const @ByRef Assign assign); +} diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/Select.java b/pytorch/src/gen/java/org/bytedeco/pytorch/Select.java index 041874882d3..6081e5a7072 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/Select.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/Select.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; @@ -19,9 +21,11 @@ @Namespace("torch::jit") @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) public class Select extends Expr { static { Loader.load(); } + /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ + public Select(Pointer p) { super(p); } - public Select(@Cast("const torch::jit::TreeRef*") @ByRef Pointer tree) { super((Pointer)null); allocate(tree); } - private native void allocate(@Cast("const torch::jit::TreeRef*") @ByRef Pointer tree); + public Select(@Const @ByRef TreeRef tree) { super((Pointer)null); allocate(tree); } + private native void allocate(@Const @ByRef TreeRef tree); public native @ByVal Expr value(); public native @ByVal Ident selector(); public static native @ByVal Select create( diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/Self.java b/pytorch/src/gen/java/org/bytedeco/pytorch/Self.java index 9f4e8defe58..d2bb2332d67 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/Self.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/Self.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; @@ -21,6 +23,6 @@ public class Self extends Pointer { /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public Self(Pointer p) { super(p); } - public native @SharedPtr @ByVal SugaredValue makeSugared(Value v); - public native @SharedPtr @ByVal ClassType getClassType(); + public native @SharedPtr("torch::jit::SugaredValue") @ByVal SugaredValue makeSugared(Value v); + public native @SharedPtr("c10::ClassType") @ByVal ClassType getClassType(); } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/Sequential.java b/pytorch/src/gen/java/org/bytedeco/pytorch/Sequential.java deleted file mode 100644 index 4eb8a50cfef..00000000000 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/Sequential.java +++ /dev/null @@ -1,48 +0,0 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE - -package org.bytedeco.pytorch; - -import org.bytedeco.pytorch.Allocator; -import org.bytedeco.pytorch.Function; -import org.bytedeco.pytorch.Module; -import java.nio.*; -import org.bytedeco.javacpp.*; -import org.bytedeco.javacpp.annotation.*; - -import static org.bytedeco.javacpp.presets.javacpp.*; -import static org.bytedeco.openblas.global.openblas_nolapack.*; -import static org.bytedeco.openblas.global.openblas.*; - -import static org.bytedeco.pytorch.global.torch.*; - - -/** A {@code ModuleHolder} subclass for {@code SequentialImpl}. - * See the documentation for {@code SequentialImpl} class to learn what methods it - * provides, or the documentation for {@code ModuleHolder} to learn about PyTorch's - * module storage semantics. */ -@Namespace("torch::nn") @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) -public class Sequential extends SequentialImplModuleHolder { - static { Loader.load(); } - - - public Sequential() { super((Pointer)null); allocate(); } - private native void allocate(); public Sequential(@ByVal @Cast("std::nullptr_t*") PointerPointer arg0) { super((Pointer)null); allocate(arg0); } - private native void allocate(@ByVal @Cast("std::nullptr_t*") PointerPointer arg0); public Sequential(@SharedPtr @Cast({"", "std::shared_ptr"}) SequentialImpl module) { super((Pointer)null); allocate(module); } - private native void allocate(@SharedPtr @Cast({"", "std::shared_ptr"}) SequentialImpl module); - /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ - public Sequential(Pointer p) { super(p); } - /** Native array allocator. Access with {@link Pointer#position(long)}. */ - public Sequential(long size) { super((Pointer)null); allocateArray(size); } - private native void allocateArray(long size); - @Override public Sequential position(long position) { - return (Sequential)super.position(position); - } - @Override public Sequential getPointer(long i) { - return new Sequential((Pointer)this).offsetAddress(i); - } - - - /** Constructs the {@code Sequential} from a braced-init-list of named {@code AnyModule}s. - * It enables the following use case: - * {@code Sequential sequential({{"m1", M(1)}, {"m2", M(2)}})} */ -} diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/SequentialImpl.java b/pytorch/src/gen/java/org/bytedeco/pytorch/SequentialImpl.java index c84e9e5fe50..0507f0aca0b 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/SequentialImpl.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/SequentialImpl.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; @@ -101,15 +103,15 @@ public class SequentialImpl extends SequentialImplCloneable { public SequentialImpl() { super((Pointer)null); allocate(); } - @NoDeallocator private native void allocate(); + @SharedPtr private native void allocate(); /** Constructs the {@code Sequential} from a variadic list of modules. */ /** Constructs the {@code Sequential} from an {@code OrderedDict} of named {@code AnyModule}s. */ public SequentialImpl( - @Cast({"", "torch::OrderedDict&&"}) @StdMove StringAnyModuleDict ordered_dict) { super((Pointer)null); allocate(ordered_dict); } - @NoDeallocator private native void allocate( - @Cast({"", "torch::OrderedDict&&"}) @StdMove StringAnyModuleDict ordered_dict); + @ByRef(true) StringAnyModuleDict ordered_dict) { super((Pointer)null); allocate(ordered_dict); } + @SharedPtr private native void allocate( + @ByRef(true) StringAnyModuleDict ordered_dict); /** Constructs the {@code Sequential} from a braced-init-list of named {@code AnyModule}s. * It enables the following use case: @@ -117,9 +119,9 @@ public SequentialImpl( /** Special cloning function for {@code Sequential} because it does not use * {@code reset()}. */ - public native @SharedPtr @Cast({"", "std::shared_ptr"}) Module clone( - @Const @ByRef(nullValue = "c10::optional(c10::nullopt)") DeviceOptional device); - public native @SharedPtr @Cast({"", "std::shared_ptr"}) Module clone(); + public native @SharedPtr("torch::nn::Module") @ByVal Module clone( + @Const @ByRef(nullValue = "c10::optional(c10::nullopt)") DeviceOptional device); + public native @SharedPtr("torch::nn::Module") @ByVal Module clone(); /** {@code reset()} is empty for {@code Sequential}, since it does not have parameters of * its own. */ @@ -164,10 +166,396 @@ public SequentialImpl( * float value = sequential3->forward(inputs); * * \endrst */ + public native @ByVal Tensor forward(@Const @ByRef Tensor input); + public native @ByVal Tensor forward(@Const @ByRef Tensor input1, @Const @ByRef Tensor input2); + public native @ByVal Tensor forward(@Const @ByRef Tensor input1, @Const @ByRef Tensor input2, @Const @ByRef Tensor input3); + public native @ByVal Tensor forward(@Const @ByRef Tensor input1, @Const @ByRef Tensor input2, @Const @ByRef Tensor input3, @Const @ByRef Tensor input4); + public native @ByVal Tensor forward(@Const @ByRef Tensor input1, @Const @ByRef Tensor input2, @Const @ByRef Tensor input3, @Const @ByRef Tensor input4, @Const @ByRef Tensor input5, @Const @ByRef Tensor input6); + public native @ByVal Tensor forward(@Const @ByRef Tensor input1, @Const @ByRef Tensor input2, @Const @ByRef Tensor input3, @Const @ByRef Tensor input4, @Const @ByRef Tensor input5, @Const @ByRef Tensor input6, @Const @ByRef Tensor input7, @Const @ByRef Tensor input8); + public native @ByVal Tensor forward(@Const @ByRef Tensor input, @ByRef(nullValue = "c10::optional(c10::nullopt)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... output_size); + public native @ByVal Tensor forward(@Const @ByRef Tensor input, @Const @ByRef(nullValue = "c10::optional(c10::nullopt)") LongArrayRefOptional output_size); + public native @ByVal Tensor forward(@Const @ByRef Tensor input, @Const @ByRef Tensor indices, @Const @ByRef(nullValue = "c10::optional >(c10::nullopt)") LongVectorOptional output_size); + public native @ByVal @Name("forward>>") T_TensorT_TensorTensor_T_T forwardT_TensorT_TensorTensor_T_T(@Const @ByRef Tensor input); + public native @ByVal @Name("forward>>") T_TensorT_TensorTensor_T_T forwardT_TensorT_TensorTensor_T_T(@Const @ByRef Tensor input, @ByVal(nullValue = "torch::optional >{}") T_TensorTensor_TOptional hx_opt); + public native @ByVal @Name("forward>") T_TensorTensor_T forwardT_TensorTensor_T(@Const @ByRef Tensor input); + public native @ByVal @Name("forward>") T_TensorTensor_T forwardT_TensorTensor_T(@Const @ByRef Tensor input1, @Const @ByRef Tensor input2); + public native @ByVal @Name("forward>") T_TensorTensor_T forwardT_TensorTensor_T(@Const @ByRef Tensor input1, @Const @ByRef Tensor input2, @Const @ByRef Tensor input3); + public native @ByVal @Name("forward>") T_TensorTensor_T forwardT_TensorTensor_T(@Const @ByRef Tensor input, @ByVal(nullValue = "torch::optional >{}") T_TensorTensor_TOptional hx_opt); + public native @ByVal @Name("forward>") T_TensorTensor_T forwardT_TensorTensor_T(@Const @ByRef Tensor query, @Const @ByRef Tensor key, @Const @ByRef Tensor value, @Const @ByRef(nullValue = "torch::Tensor{}") Tensor key_padding_mask, @Cast("bool") boolean need_weights/*=true*/, @Const @ByRef(nullValue = "torch::Tensor{}") Tensor attn_mask, @Cast("bool") boolean average_attn_weights/*=true*/); + public native @ByVal @Name("forward") ASMoutput forwardASMoutput(@Const @ByRef Tensor input, @Const @ByRef Tensor target); /** Adds a new (boxed) {@code Module} to the {@code Sequential} container. */ + public native @Name("push_back") void push_back(@SharedPtr AdaptiveLogSoftmaxWithLossImpl module_ptr); + public native @Name("push_back") void push_back(@SharedPtr BatchNorm1dImpl module_ptr); + public native @Name("push_back") void push_back(@SharedPtr InstanceNorm1dImpl module_ptr); + public native @Name("push_back") void push_back(@SharedPtr Conv1dImpl module_ptr); + public native @Name("push_back") void push_back(@SharedPtr ConvTranspose1dImpl module_ptr); + public native @Name("push_back") void push_back(@SharedPtr DropoutImpl module_ptr); + public native @Name("push_back") void push_back(@SharedPtr BatchNorm2dImpl module_ptr); + public native @Name("push_back") void push_back(@SharedPtr InstanceNorm2dImpl module_ptr); + public native @Name("push_back") void push_back(@SharedPtr Conv2dImpl module_ptr); + public native @Name("push_back") void push_back(@SharedPtr ConvTranspose2dImpl module_ptr); + public native @Name("push_back") void push_back(@SharedPtr Dropout2dImpl module_ptr); + public native @Name("push_back") void push_back(@SharedPtr BatchNorm3dImpl module_ptr); + public native @Name("push_back") void push_back(@SharedPtr InstanceNorm3dImpl module_ptr); + public native @Name("push_back") void push_back(@SharedPtr Conv3dImpl module_ptr); + public native @Name("push_back") void push_back(@SharedPtr ConvTranspose3dImpl module_ptr); + public native @Name("push_back") void push_back(@SharedPtr Dropout3dImpl module_ptr); + public native @Name("push_back") void push_back(@SharedPtr AlphaDropoutImpl module_ptr); + public native @Name("push_back") void push_back(@SharedPtr FeatureAlphaDropoutImpl module_ptr); + public native @Name("push_back") void push_back(@SharedPtr CosineSimilarityImpl module_ptr); + public native @Name("push_back") void push_back(@SharedPtr PairwiseDistanceImpl module_ptr); + public native @Name("push_back") void push_back(@SharedPtr EmbeddingImpl module_ptr); + public native @Name("push_back") void push_back(@SharedPtr EmbeddingBagImpl module_ptr); + public native @Name("push_back") void push_back(@SharedPtr FoldImpl module_ptr); + public native @Name("push_back") void push_back(@SharedPtr UnfoldImpl module_ptr); + public native @Name("push_back") void push_back(@SharedPtr IdentityImpl module_ptr); + public native @Name("push_back") void push_back(@SharedPtr LinearImpl module_ptr); + public native @Name("push_back") void push_back(@SharedPtr BilinearImpl module_ptr); + public native @Name("push_back") void push_back(@SharedPtr FlattenImpl module_ptr); + public native @Name("push_back") void push_back(@SharedPtr UnflattenImpl module_ptr); + public native @Name("push_back") void push_back(@SharedPtr L1LossImpl module_ptr); + public native @Name("push_back") void push_back(@SharedPtr KLDivLossImpl module_ptr); + public native @Name("push_back") void push_back(@SharedPtr MSELossImpl module_ptr); + public native @Name("push_back") void push_back(@SharedPtr BCELossImpl module_ptr); + public native @Name("push_back") void push_back(@SharedPtr HingeEmbeddingLossImpl module_ptr); + public native @Name("push_back") void push_back(@SharedPtr MultiMarginLossImpl module_ptr); + public native @Name("push_back") void push_back(@SharedPtr CosineEmbeddingLossImpl module_ptr); + public native @Name("push_back") void push_back(@SharedPtr SmoothL1LossImpl module_ptr); + public native @Name("push_back") void push_back(@SharedPtr HuberLossImpl module_ptr); + public native @Name("push_back") void push_back(@SharedPtr MultiLabelMarginLossImpl module_ptr); + public native @Name("push_back") void push_back(@SharedPtr SoftMarginLossImpl module_ptr); + public native @Name("push_back") void push_back(@SharedPtr MultiLabelSoftMarginLossImpl module_ptr); + public native @Name("push_back") void push_back(@SharedPtr TripletMarginLossImpl module_ptr); + public native @Name("push_back") void push_back(@SharedPtr TripletMarginWithDistanceLossImpl module_ptr); + public native @Name("push_back") void push_back(@SharedPtr CTCLossImpl module_ptr); + public native @Name("push_back") void push_back(@SharedPtr PoissonNLLLossImpl module_ptr); + public native @Name("push_back") void push_back(@SharedPtr MarginRankingLossImpl module_ptr); + public native @Name("push_back") void push_back(@SharedPtr NLLLossImpl module_ptr); + public native @Name("push_back") void push_back(@SharedPtr CrossEntropyLossImpl module_ptr); + public native @Name("push_back") void push_back(@SharedPtr BCEWithLogitsLossImpl module_ptr); + public native @Name("push_back") void push_back(@SharedPtr ReflectionPad1dImpl module_ptr); + public native @Name("push_back") void push_back(@SharedPtr ReplicationPad1dImpl module_ptr); + public native @Name("push_back") void push_back(@SharedPtr ConstantPad1dImpl module_ptr); + public native @Name("push_back") void push_back(@SharedPtr AvgPool1dImpl module_ptr); + public native @Name("push_back") void push_back(@SharedPtr MaxPool1dImpl module_ptr); + public native @Name("push_back") void push_back(@SharedPtr AdaptiveAvgPool1dImpl module_ptr); + public native @Name("push_back") void push_back(@SharedPtr AdaptiveMaxPool1dImpl module_ptr); + public native @Name("push_back") void push_back(@SharedPtr MaxUnpool1dImpl module_ptr); + public native @Name("push_back") void push_back(@SharedPtr LPPool1dImpl module_ptr); + public native @Name("push_back") void push_back(@SharedPtr ReflectionPad2dImpl module_ptr); + public native @Name("push_back") void push_back(@SharedPtr ReplicationPad2dImpl module_ptr); + public native @Name("push_back") void push_back(@SharedPtr ConstantPad2dImpl module_ptr); + public native @Name("push_back") void push_back(@SharedPtr ZeroPad2dImpl module_ptr); + public native @Name("push_back") void push_back(@SharedPtr AvgPool2dImpl module_ptr); + public native @Name("push_back") void push_back(@SharedPtr MaxPool2dImpl module_ptr); + public native @Name("push_back") void push_back(@SharedPtr AdaptiveAvgPool2dImpl module_ptr); + public native @Name("push_back") void push_back(@SharedPtr AdaptiveMaxPool2dImpl module_ptr); + public native @Name("push_back") void push_back(@SharedPtr MaxUnpool2dImpl module_ptr); + public native @Name("push_back") void push_back(@SharedPtr FractionalMaxPool2dImpl module_ptr); + public native @Name("push_back") void push_back(@SharedPtr LPPool2dImpl module_ptr); + public native @Name("push_back") void push_back(@SharedPtr ReflectionPad3dImpl module_ptr); + public native @Name("push_back") void push_back(@SharedPtr ReplicationPad3dImpl module_ptr); + public native @Name("push_back") void push_back(@SharedPtr ConstantPad3dImpl module_ptr); + public native @Name("push_back") void push_back(@SharedPtr AvgPool3dImpl module_ptr); + public native @Name("push_back") void push_back(@SharedPtr MaxPool3dImpl module_ptr); + public native @Name("push_back") void push_back(@SharedPtr AdaptiveAvgPool3dImpl module_ptr); + public native @Name("push_back") void push_back(@SharedPtr AdaptiveMaxPool3dImpl module_ptr); + public native @Name("push_back") void push_back(@SharedPtr MaxUnpool3dImpl module_ptr); + public native @Name("push_back") void push_back(@SharedPtr FractionalMaxPool3dImpl module_ptr); + public native @Name("push_back") void push_back(@SharedPtr RNNImpl module_ptr); + public native @Name("push_back") void push_back(@SharedPtr LSTMImpl module_ptr); + public native @Name("push_back") void push_back(@SharedPtr GRUImpl module_ptr); + public native @Name("push_back") void push_back(@SharedPtr RNNCellImpl module_ptr); + public native @Name("push_back") void push_back(@SharedPtr LSTMCellImpl module_ptr); + public native @Name("push_back") void push_back(@SharedPtr GRUCellImpl module_ptr); + public native @Name("push_back") void push_back(@SharedPtr PixelShuffleImpl module_ptr); + public native @Name("push_back") void push_back(@SharedPtr PixelUnshuffleImpl module_ptr); + public native @Name("push_back") void push_back(@SharedPtr UpsampleImpl module_ptr); + public native @Name("push_back") void push_back(@SharedPtr ELUImpl module_ptr); + public native @Name("push_back") void push_back(@SharedPtr SELUImpl module_ptr); + public native @Name("push_back") void push_back(@SharedPtr HardshrinkImpl module_ptr); + public native @Name("push_back") void push_back(@SharedPtr HardtanhImpl module_ptr); + public native @Name("push_back") void push_back(@SharedPtr LeakyReLUImpl module_ptr); + public native @Name("push_back") void push_back(@SharedPtr LogSigmoidImpl module_ptr); + public native @Name("push_back") void push_back(@SharedPtr SoftmaxImpl module_ptr); + public native @Name("push_back") void push_back(@SharedPtr SoftminImpl module_ptr); + public native @Name("push_back") void push_back(@SharedPtr LogSoftmaxImpl module_ptr); + public native @Name("push_back") void push_back(@SharedPtr Softmax2dImpl module_ptr); + public native @Name("push_back") void push_back(@SharedPtr PReLUImpl module_ptr); + public native @Name("push_back") void push_back(@SharedPtr ReLUImpl module_ptr); + public native @Name("push_back") void push_back(@SharedPtr ReLU6Impl module_ptr); + public native @Name("push_back") void push_back(@SharedPtr RReLUImpl module_ptr); + public native @Name("push_back") void push_back(@SharedPtr CELUImpl module_ptr); + public native @Name("push_back") void push_back(@SharedPtr GLUImpl module_ptr); + public native @Name("push_back") void push_back(@SharedPtr GELUImpl module_ptr); + public native @Name("push_back") void push_back(@SharedPtr SiLUImpl module_ptr); + public native @Name("push_back") void push_back(@SharedPtr MishImpl module_ptr); + public native @Name("push_back") void push_back(@SharedPtr SigmoidImpl module_ptr); + public native @Name("push_back") void push_back(@SharedPtr SoftplusImpl module_ptr); + public native @Name("push_back") void push_back(@SharedPtr SoftshrinkImpl module_ptr); + public native @Name("push_back") void push_back(@SharedPtr SoftsignImpl module_ptr); + public native @Name("push_back") void push_back(@SharedPtr TanhImpl module_ptr); + public native @Name("push_back") void push_back(@SharedPtr TanhshrinkImpl module_ptr); + public native @Name("push_back") void push_back(@SharedPtr ThresholdImpl module_ptr); + public native @Name("push_back") void push_back(@SharedPtr MultiheadAttentionImpl module_ptr); + public native @Name("push_back") void push_back(@SharedPtr LayerNormImpl module_ptr); + public native @Name("push_back") void push_back(@SharedPtr LocalResponseNormImpl module_ptr); + public native @Name("push_back") void push_back(@SharedPtr CrossMapLRN2dImpl module_ptr); + public native @Name("push_back") void push_back(@SharedPtr GroupNormImpl module_ptr); + public native @Name("push_back") void push_back(@SharedPtr TransformerEncoderLayerImpl module_ptr); + public native @Name("push_back") void push_back(@SharedPtr TransformerDecoderLayerImpl module_ptr); + public native @Name("push_back") void push_back(@SharedPtr TransformerEncoderImpl module_ptr); + public native @Name("push_back") void push_back(@SharedPtr TransformerDecoderImpl module_ptr); + public native @Name("push_back") void push_back(@SharedPtr TransformerImpl module_ptr); /** Adds a new named (boxed) {@code Module} to the {@code Sequential} container. */ + public native @Name("push_back") void push_back(@StdString BytePointer name, @SharedPtr AdaptiveLogSoftmaxWithLossImpl module_ptr); + public native @Name("push_back") void push_back(@StdString String name, @SharedPtr AdaptiveLogSoftmaxWithLossImpl module_ptr); + public native @Name("push_back") void push_back(@StdString BytePointer name, @SharedPtr BatchNorm1dImpl module_ptr); + public native @Name("push_back") void push_back(@StdString String name, @SharedPtr BatchNorm1dImpl module_ptr); + public native @Name("push_back") void push_back(@StdString BytePointer name, @SharedPtr InstanceNorm1dImpl module_ptr); + public native @Name("push_back") void push_back(@StdString String name, @SharedPtr InstanceNorm1dImpl module_ptr); + public native @Name("push_back") void push_back(@StdString BytePointer name, @SharedPtr Conv1dImpl module_ptr); + public native @Name("push_back") void push_back(@StdString String name, @SharedPtr Conv1dImpl module_ptr); + public native @Name("push_back") void push_back(@StdString BytePointer name, @SharedPtr ConvTranspose1dImpl module_ptr); + public native @Name("push_back") void push_back(@StdString String name, @SharedPtr ConvTranspose1dImpl module_ptr); + public native @Name("push_back") void push_back(@StdString BytePointer name, @SharedPtr DropoutImpl module_ptr); + public native @Name("push_back") void push_back(@StdString String name, @SharedPtr DropoutImpl module_ptr); + public native @Name("push_back") void push_back(@StdString BytePointer name, @SharedPtr BatchNorm2dImpl module_ptr); + public native @Name("push_back") void push_back(@StdString String name, @SharedPtr BatchNorm2dImpl module_ptr); + public native @Name("push_back") void push_back(@StdString BytePointer name, @SharedPtr InstanceNorm2dImpl module_ptr); + public native @Name("push_back") void push_back(@StdString String name, @SharedPtr InstanceNorm2dImpl module_ptr); + public native @Name("push_back") void push_back(@StdString BytePointer name, @SharedPtr Conv2dImpl module_ptr); + public native @Name("push_back") void push_back(@StdString String name, @SharedPtr Conv2dImpl module_ptr); + public native @Name("push_back") void push_back(@StdString BytePointer name, @SharedPtr ConvTranspose2dImpl module_ptr); + public native @Name("push_back") void push_back(@StdString String name, @SharedPtr ConvTranspose2dImpl module_ptr); + public native @Name("push_back") void push_back(@StdString BytePointer name, @SharedPtr Dropout2dImpl module_ptr); + public native @Name("push_back") void push_back(@StdString String name, @SharedPtr Dropout2dImpl module_ptr); + public native @Name("push_back") void push_back(@StdString BytePointer name, @SharedPtr BatchNorm3dImpl module_ptr); + public native @Name("push_back") void push_back(@StdString String name, @SharedPtr BatchNorm3dImpl module_ptr); + public native @Name("push_back") void push_back(@StdString BytePointer name, @SharedPtr InstanceNorm3dImpl module_ptr); + public native @Name("push_back") void push_back(@StdString String name, @SharedPtr InstanceNorm3dImpl module_ptr); + public native @Name("push_back") void push_back(@StdString BytePointer name, @SharedPtr Conv3dImpl module_ptr); + public native @Name("push_back") void push_back(@StdString String name, @SharedPtr Conv3dImpl module_ptr); + public native @Name("push_back") void push_back(@StdString BytePointer name, @SharedPtr ConvTranspose3dImpl module_ptr); + public native @Name("push_back") void push_back(@StdString String name, @SharedPtr ConvTranspose3dImpl module_ptr); + public native @Name("push_back") void push_back(@StdString BytePointer name, @SharedPtr Dropout3dImpl module_ptr); + public native @Name("push_back") void push_back(@StdString String name, @SharedPtr Dropout3dImpl module_ptr); + public native @Name("push_back") void push_back(@StdString BytePointer name, @SharedPtr AlphaDropoutImpl module_ptr); + public native @Name("push_back") void push_back(@StdString String name, @SharedPtr AlphaDropoutImpl module_ptr); + public native @Name("push_back") void push_back(@StdString BytePointer name, @SharedPtr FeatureAlphaDropoutImpl module_ptr); + public native @Name("push_back") void push_back(@StdString String name, @SharedPtr FeatureAlphaDropoutImpl module_ptr); + public native @Name("push_back") void push_back(@StdString BytePointer name, @SharedPtr CosineSimilarityImpl module_ptr); + public native @Name("push_back") void push_back(@StdString String name, @SharedPtr CosineSimilarityImpl module_ptr); + public native @Name("push_back") void push_back(@StdString BytePointer name, @SharedPtr PairwiseDistanceImpl module_ptr); + public native @Name("push_back") void push_back(@StdString String name, @SharedPtr PairwiseDistanceImpl module_ptr); + public native @Name("push_back") void push_back(@StdString BytePointer name, @SharedPtr EmbeddingImpl module_ptr); + public native @Name("push_back") void push_back(@StdString String name, @SharedPtr EmbeddingImpl module_ptr); + public native @Name("push_back") void push_back(@StdString BytePointer name, @SharedPtr EmbeddingBagImpl module_ptr); + public native @Name("push_back") void push_back(@StdString String name, @SharedPtr EmbeddingBagImpl module_ptr); + public native @Name("push_back") void push_back(@StdString BytePointer name, @SharedPtr FoldImpl module_ptr); + public native @Name("push_back") void push_back(@StdString String name, @SharedPtr FoldImpl module_ptr); + public native @Name("push_back") void push_back(@StdString BytePointer name, @SharedPtr UnfoldImpl module_ptr); + public native @Name("push_back") void push_back(@StdString String name, @SharedPtr UnfoldImpl module_ptr); + public native @Name("push_back") void push_back(@StdString BytePointer name, @SharedPtr IdentityImpl module_ptr); + public native @Name("push_back") void push_back(@StdString String name, @SharedPtr IdentityImpl module_ptr); + public native @Name("push_back") void push_back(@StdString BytePointer name, @SharedPtr LinearImpl module_ptr); + public native @Name("push_back") void push_back(@StdString String name, @SharedPtr LinearImpl module_ptr); + public native @Name("push_back") void push_back(@StdString BytePointer name, @SharedPtr BilinearImpl module_ptr); + public native @Name("push_back") void push_back(@StdString String name, @SharedPtr BilinearImpl module_ptr); + public native @Name("push_back") void push_back(@StdString BytePointer name, @SharedPtr FlattenImpl module_ptr); + public native @Name("push_back") void push_back(@StdString String name, @SharedPtr FlattenImpl module_ptr); + public native @Name("push_back") void push_back(@StdString BytePointer name, @SharedPtr UnflattenImpl module_ptr); + public native @Name("push_back") void push_back(@StdString String name, @SharedPtr UnflattenImpl module_ptr); + public native @Name("push_back") void push_back(@StdString BytePointer name, @SharedPtr L1LossImpl module_ptr); + public native @Name("push_back") void push_back(@StdString String name, @SharedPtr L1LossImpl module_ptr); + public native @Name("push_back") void push_back(@StdString BytePointer name, @SharedPtr KLDivLossImpl module_ptr); + public native @Name("push_back") void push_back(@StdString String name, @SharedPtr KLDivLossImpl module_ptr); + public native @Name("push_back") void push_back(@StdString BytePointer name, @SharedPtr MSELossImpl module_ptr); + public native @Name("push_back") void push_back(@StdString String name, @SharedPtr MSELossImpl module_ptr); + public native @Name("push_back") void push_back(@StdString BytePointer name, @SharedPtr BCELossImpl module_ptr); + public native @Name("push_back") void push_back(@StdString String name, @SharedPtr BCELossImpl module_ptr); + public native @Name("push_back") void push_back(@StdString BytePointer name, @SharedPtr HingeEmbeddingLossImpl module_ptr); + public native @Name("push_back") void push_back(@StdString String name, @SharedPtr HingeEmbeddingLossImpl module_ptr); + public native @Name("push_back") void push_back(@StdString BytePointer name, @SharedPtr MultiMarginLossImpl module_ptr); + public native @Name("push_back") void push_back(@StdString String name, @SharedPtr MultiMarginLossImpl module_ptr); + public native @Name("push_back") void push_back(@StdString BytePointer name, @SharedPtr CosineEmbeddingLossImpl module_ptr); + public native @Name("push_back") void push_back(@StdString String name, @SharedPtr CosineEmbeddingLossImpl module_ptr); + public native @Name("push_back") void push_back(@StdString BytePointer name, @SharedPtr SmoothL1LossImpl module_ptr); + public native @Name("push_back") void push_back(@StdString String name, @SharedPtr SmoothL1LossImpl module_ptr); + public native @Name("push_back") void push_back(@StdString BytePointer name, @SharedPtr HuberLossImpl module_ptr); + public native @Name("push_back") void push_back(@StdString String name, @SharedPtr HuberLossImpl module_ptr); + public native @Name("push_back") void push_back(@StdString BytePointer name, @SharedPtr MultiLabelMarginLossImpl module_ptr); + public native @Name("push_back") void push_back(@StdString String name, @SharedPtr MultiLabelMarginLossImpl module_ptr); + public native @Name("push_back") void push_back(@StdString BytePointer name, @SharedPtr SoftMarginLossImpl module_ptr); + public native @Name("push_back") void push_back(@StdString String name, @SharedPtr SoftMarginLossImpl module_ptr); + public native @Name("push_back") void push_back(@StdString BytePointer name, @SharedPtr MultiLabelSoftMarginLossImpl module_ptr); + public native @Name("push_back") void push_back(@StdString String name, @SharedPtr MultiLabelSoftMarginLossImpl module_ptr); + public native @Name("push_back") void push_back(@StdString BytePointer name, @SharedPtr TripletMarginLossImpl module_ptr); + public native @Name("push_back") void push_back(@StdString String name, @SharedPtr TripletMarginLossImpl module_ptr); + public native @Name("push_back") void push_back(@StdString BytePointer name, @SharedPtr TripletMarginWithDistanceLossImpl module_ptr); + public native @Name("push_back") void push_back(@StdString String name, @SharedPtr TripletMarginWithDistanceLossImpl module_ptr); + public native @Name("push_back") void push_back(@StdString BytePointer name, @SharedPtr CTCLossImpl module_ptr); + public native @Name("push_back") void push_back(@StdString String name, @SharedPtr CTCLossImpl module_ptr); + public native @Name("push_back") void push_back(@StdString BytePointer name, @SharedPtr PoissonNLLLossImpl module_ptr); + public native @Name("push_back") void push_back(@StdString String name, @SharedPtr PoissonNLLLossImpl module_ptr); + public native @Name("push_back") void push_back(@StdString BytePointer name, @SharedPtr MarginRankingLossImpl module_ptr); + public native @Name("push_back") void push_back(@StdString String name, @SharedPtr MarginRankingLossImpl module_ptr); + public native @Name("push_back") void push_back(@StdString BytePointer name, @SharedPtr NLLLossImpl module_ptr); + public native @Name("push_back") void push_back(@StdString String name, @SharedPtr NLLLossImpl module_ptr); + public native @Name("push_back") void push_back(@StdString BytePointer name, @SharedPtr CrossEntropyLossImpl module_ptr); + public native @Name("push_back") void push_back(@StdString String name, @SharedPtr CrossEntropyLossImpl module_ptr); + public native @Name("push_back") void push_back(@StdString BytePointer name, @SharedPtr BCEWithLogitsLossImpl module_ptr); + public native @Name("push_back") void push_back(@StdString String name, @SharedPtr BCEWithLogitsLossImpl module_ptr); + public native @Name("push_back") void push_back(@StdString BytePointer name, @SharedPtr ReflectionPad1dImpl module_ptr); + public native @Name("push_back") void push_back(@StdString String name, @SharedPtr ReflectionPad1dImpl module_ptr); + public native @Name("push_back") void push_back(@StdString BytePointer name, @SharedPtr ReplicationPad1dImpl module_ptr); + public native @Name("push_back") void push_back(@StdString String name, @SharedPtr ReplicationPad1dImpl module_ptr); + public native @Name("push_back") void push_back(@StdString BytePointer name, @SharedPtr ConstantPad1dImpl module_ptr); + public native @Name("push_back") void push_back(@StdString String name, @SharedPtr ConstantPad1dImpl module_ptr); + public native @Name("push_back") void push_back(@StdString BytePointer name, @SharedPtr AvgPool1dImpl module_ptr); + public native @Name("push_back") void push_back(@StdString String name, @SharedPtr AvgPool1dImpl module_ptr); + public native @Name("push_back") void push_back(@StdString BytePointer name, @SharedPtr MaxPool1dImpl module_ptr); + public native @Name("push_back") void push_back(@StdString String name, @SharedPtr MaxPool1dImpl module_ptr); + public native @Name("push_back") void push_back(@StdString BytePointer name, @SharedPtr AdaptiveAvgPool1dImpl module_ptr); + public native @Name("push_back") void push_back(@StdString String name, @SharedPtr AdaptiveAvgPool1dImpl module_ptr); + public native @Name("push_back") void push_back(@StdString BytePointer name, @SharedPtr AdaptiveMaxPool1dImpl module_ptr); + public native @Name("push_back") void push_back(@StdString String name, @SharedPtr AdaptiveMaxPool1dImpl module_ptr); + public native @Name("push_back") void push_back(@StdString BytePointer name, @SharedPtr MaxUnpool1dImpl module_ptr); + public native @Name("push_back") void push_back(@StdString String name, @SharedPtr MaxUnpool1dImpl module_ptr); + public native @Name("push_back") void push_back(@StdString BytePointer name, @SharedPtr LPPool1dImpl module_ptr); + public native @Name("push_back") void push_back(@StdString String name, @SharedPtr LPPool1dImpl module_ptr); + public native @Name("push_back") void push_back(@StdString BytePointer name, @SharedPtr ReflectionPad2dImpl module_ptr); + public native @Name("push_back") void push_back(@StdString String name, @SharedPtr ReflectionPad2dImpl module_ptr); + public native @Name("push_back") void push_back(@StdString BytePointer name, @SharedPtr ReplicationPad2dImpl module_ptr); + public native @Name("push_back") void push_back(@StdString String name, @SharedPtr ReplicationPad2dImpl module_ptr); + public native @Name("push_back") void push_back(@StdString BytePointer name, @SharedPtr ConstantPad2dImpl module_ptr); + public native @Name("push_back") void push_back(@StdString String name, @SharedPtr ConstantPad2dImpl module_ptr); + public native @Name("push_back") void push_back(@StdString BytePointer name, @SharedPtr ZeroPad2dImpl module_ptr); + public native @Name("push_back") void push_back(@StdString String name, @SharedPtr ZeroPad2dImpl module_ptr); + public native @Name("push_back") void push_back(@StdString BytePointer name, @SharedPtr AvgPool2dImpl module_ptr); + public native @Name("push_back") void push_back(@StdString String name, @SharedPtr AvgPool2dImpl module_ptr); + public native @Name("push_back") void push_back(@StdString BytePointer name, @SharedPtr MaxPool2dImpl module_ptr); + public native @Name("push_back") void push_back(@StdString String name, @SharedPtr MaxPool2dImpl module_ptr); + public native @Name("push_back") void push_back(@StdString BytePointer name, @SharedPtr AdaptiveAvgPool2dImpl module_ptr); + public native @Name("push_back") void push_back(@StdString String name, @SharedPtr AdaptiveAvgPool2dImpl module_ptr); + public native @Name("push_back") void push_back(@StdString BytePointer name, @SharedPtr AdaptiveMaxPool2dImpl module_ptr); + public native @Name("push_back") void push_back(@StdString String name, @SharedPtr AdaptiveMaxPool2dImpl module_ptr); + public native @Name("push_back") void push_back(@StdString BytePointer name, @SharedPtr MaxUnpool2dImpl module_ptr); + public native @Name("push_back") void push_back(@StdString String name, @SharedPtr MaxUnpool2dImpl module_ptr); + public native @Name("push_back") void push_back(@StdString BytePointer name, @SharedPtr FractionalMaxPool2dImpl module_ptr); + public native @Name("push_back") void push_back(@StdString String name, @SharedPtr FractionalMaxPool2dImpl module_ptr); + public native @Name("push_back") void push_back(@StdString BytePointer name, @SharedPtr LPPool2dImpl module_ptr); + public native @Name("push_back") void push_back(@StdString String name, @SharedPtr LPPool2dImpl module_ptr); + public native @Name("push_back") void push_back(@StdString BytePointer name, @SharedPtr ReflectionPad3dImpl module_ptr); + public native @Name("push_back") void push_back(@StdString String name, @SharedPtr ReflectionPad3dImpl module_ptr); + public native @Name("push_back") void push_back(@StdString BytePointer name, @SharedPtr ReplicationPad3dImpl module_ptr); + public native @Name("push_back") void push_back(@StdString String name, @SharedPtr ReplicationPad3dImpl module_ptr); + public native @Name("push_back") void push_back(@StdString BytePointer name, @SharedPtr ConstantPad3dImpl module_ptr); + public native @Name("push_back") void push_back(@StdString String name, @SharedPtr ConstantPad3dImpl module_ptr); + public native @Name("push_back") void push_back(@StdString BytePointer name, @SharedPtr AvgPool3dImpl module_ptr); + public native @Name("push_back") void push_back(@StdString String name, @SharedPtr AvgPool3dImpl module_ptr); + public native @Name("push_back") void push_back(@StdString BytePointer name, @SharedPtr MaxPool3dImpl module_ptr); + public native @Name("push_back") void push_back(@StdString String name, @SharedPtr MaxPool3dImpl module_ptr); + public native @Name("push_back") void push_back(@StdString BytePointer name, @SharedPtr AdaptiveAvgPool3dImpl module_ptr); + public native @Name("push_back") void push_back(@StdString String name, @SharedPtr AdaptiveAvgPool3dImpl module_ptr); + public native @Name("push_back") void push_back(@StdString BytePointer name, @SharedPtr AdaptiveMaxPool3dImpl module_ptr); + public native @Name("push_back") void push_back(@StdString String name, @SharedPtr AdaptiveMaxPool3dImpl module_ptr); + public native @Name("push_back") void push_back(@StdString BytePointer name, @SharedPtr MaxUnpool3dImpl module_ptr); + public native @Name("push_back") void push_back(@StdString String name, @SharedPtr MaxUnpool3dImpl module_ptr); + public native @Name("push_back") void push_back(@StdString BytePointer name, @SharedPtr FractionalMaxPool3dImpl module_ptr); + public native @Name("push_back") void push_back(@StdString String name, @SharedPtr FractionalMaxPool3dImpl module_ptr); + public native @Name("push_back") void push_back(@StdString BytePointer name, @SharedPtr RNNImpl module_ptr); + public native @Name("push_back") void push_back(@StdString String name, @SharedPtr RNNImpl module_ptr); + public native @Name("push_back") void push_back(@StdString BytePointer name, @SharedPtr LSTMImpl module_ptr); + public native @Name("push_back") void push_back(@StdString String name, @SharedPtr LSTMImpl module_ptr); + public native @Name("push_back") void push_back(@StdString BytePointer name, @SharedPtr GRUImpl module_ptr); + public native @Name("push_back") void push_back(@StdString String name, @SharedPtr GRUImpl module_ptr); + public native @Name("push_back") void push_back(@StdString BytePointer name, @SharedPtr RNNCellImpl module_ptr); + public native @Name("push_back") void push_back(@StdString String name, @SharedPtr RNNCellImpl module_ptr); + public native @Name("push_back") void push_back(@StdString BytePointer name, @SharedPtr LSTMCellImpl module_ptr); + public native @Name("push_back") void push_back(@StdString String name, @SharedPtr LSTMCellImpl module_ptr); + public native @Name("push_back") void push_back(@StdString BytePointer name, @SharedPtr GRUCellImpl module_ptr); + public native @Name("push_back") void push_back(@StdString String name, @SharedPtr GRUCellImpl module_ptr); + public native @Name("push_back") void push_back(@StdString BytePointer name, @SharedPtr PixelShuffleImpl module_ptr); + public native @Name("push_back") void push_back(@StdString String name, @SharedPtr PixelShuffleImpl module_ptr); + public native @Name("push_back") void push_back(@StdString BytePointer name, @SharedPtr PixelUnshuffleImpl module_ptr); + public native @Name("push_back") void push_back(@StdString String name, @SharedPtr PixelUnshuffleImpl module_ptr); + public native @Name("push_back") void push_back(@StdString BytePointer name, @SharedPtr UpsampleImpl module_ptr); + public native @Name("push_back") void push_back(@StdString String name, @SharedPtr UpsampleImpl module_ptr); + public native @Name("push_back") void push_back(@StdString BytePointer name, @SharedPtr ELUImpl module_ptr); + public native @Name("push_back") void push_back(@StdString String name, @SharedPtr ELUImpl module_ptr); + public native @Name("push_back") void push_back(@StdString BytePointer name, @SharedPtr SELUImpl module_ptr); + public native @Name("push_back") void push_back(@StdString String name, @SharedPtr SELUImpl module_ptr); + public native @Name("push_back") void push_back(@StdString BytePointer name, @SharedPtr HardshrinkImpl module_ptr); + public native @Name("push_back") void push_back(@StdString String name, @SharedPtr HardshrinkImpl module_ptr); + public native @Name("push_back") void push_back(@StdString BytePointer name, @SharedPtr HardtanhImpl module_ptr); + public native @Name("push_back") void push_back(@StdString String name, @SharedPtr HardtanhImpl module_ptr); + public native @Name("push_back") void push_back(@StdString BytePointer name, @SharedPtr LeakyReLUImpl module_ptr); + public native @Name("push_back") void push_back(@StdString String name, @SharedPtr LeakyReLUImpl module_ptr); + public native @Name("push_back") void push_back(@StdString BytePointer name, @SharedPtr LogSigmoidImpl module_ptr); + public native @Name("push_back") void push_back(@StdString String name, @SharedPtr LogSigmoidImpl module_ptr); + public native @Name("push_back") void push_back(@StdString BytePointer name, @SharedPtr SoftmaxImpl module_ptr); + public native @Name("push_back") void push_back(@StdString String name, @SharedPtr SoftmaxImpl module_ptr); + public native @Name("push_back") void push_back(@StdString BytePointer name, @SharedPtr SoftminImpl module_ptr); + public native @Name("push_back") void push_back(@StdString String name, @SharedPtr SoftminImpl module_ptr); + public native @Name("push_back") void push_back(@StdString BytePointer name, @SharedPtr LogSoftmaxImpl module_ptr); + public native @Name("push_back") void push_back(@StdString String name, @SharedPtr LogSoftmaxImpl module_ptr); + public native @Name("push_back") void push_back(@StdString BytePointer name, @SharedPtr Softmax2dImpl module_ptr); + public native @Name("push_back") void push_back(@StdString String name, @SharedPtr Softmax2dImpl module_ptr); + public native @Name("push_back") void push_back(@StdString BytePointer name, @SharedPtr PReLUImpl module_ptr); + public native @Name("push_back") void push_back(@StdString String name, @SharedPtr PReLUImpl module_ptr); + public native @Name("push_back") void push_back(@StdString BytePointer name, @SharedPtr ReLUImpl module_ptr); + public native @Name("push_back") void push_back(@StdString String name, @SharedPtr ReLUImpl module_ptr); + public native @Name("push_back") void push_back(@StdString BytePointer name, @SharedPtr ReLU6Impl module_ptr); + public native @Name("push_back") void push_back(@StdString String name, @SharedPtr ReLU6Impl module_ptr); + public native @Name("push_back") void push_back(@StdString BytePointer name, @SharedPtr RReLUImpl module_ptr); + public native @Name("push_back") void push_back(@StdString String name, @SharedPtr RReLUImpl module_ptr); + public native @Name("push_back") void push_back(@StdString BytePointer name, @SharedPtr CELUImpl module_ptr); + public native @Name("push_back") void push_back(@StdString String name, @SharedPtr CELUImpl module_ptr); + public native @Name("push_back") void push_back(@StdString BytePointer name, @SharedPtr GLUImpl module_ptr); + public native @Name("push_back") void push_back(@StdString String name, @SharedPtr GLUImpl module_ptr); + public native @Name("push_back") void push_back(@StdString BytePointer name, @SharedPtr GELUImpl module_ptr); + public native @Name("push_back") void push_back(@StdString String name, @SharedPtr GELUImpl module_ptr); + public native @Name("push_back") void push_back(@StdString BytePointer name, @SharedPtr SiLUImpl module_ptr); + public native @Name("push_back") void push_back(@StdString String name, @SharedPtr SiLUImpl module_ptr); + public native @Name("push_back") void push_back(@StdString BytePointer name, @SharedPtr MishImpl module_ptr); + public native @Name("push_back") void push_back(@StdString String name, @SharedPtr MishImpl module_ptr); + public native @Name("push_back") void push_back(@StdString BytePointer name, @SharedPtr SigmoidImpl module_ptr); + public native @Name("push_back") void push_back(@StdString String name, @SharedPtr SigmoidImpl module_ptr); + public native @Name("push_back") void push_back(@StdString BytePointer name, @SharedPtr SoftplusImpl module_ptr); + public native @Name("push_back") void push_back(@StdString String name, @SharedPtr SoftplusImpl module_ptr); + public native @Name("push_back") void push_back(@StdString BytePointer name, @SharedPtr SoftshrinkImpl module_ptr); + public native @Name("push_back") void push_back(@StdString String name, @SharedPtr SoftshrinkImpl module_ptr); + public native @Name("push_back") void push_back(@StdString BytePointer name, @SharedPtr SoftsignImpl module_ptr); + public native @Name("push_back") void push_back(@StdString String name, @SharedPtr SoftsignImpl module_ptr); + public native @Name("push_back") void push_back(@StdString BytePointer name, @SharedPtr TanhImpl module_ptr); + public native @Name("push_back") void push_back(@StdString String name, @SharedPtr TanhImpl module_ptr); + public native @Name("push_back") void push_back(@StdString BytePointer name, @SharedPtr TanhshrinkImpl module_ptr); + public native @Name("push_back") void push_back(@StdString String name, @SharedPtr TanhshrinkImpl module_ptr); + public native @Name("push_back") void push_back(@StdString BytePointer name, @SharedPtr ThresholdImpl module_ptr); + public native @Name("push_back") void push_back(@StdString String name, @SharedPtr ThresholdImpl module_ptr); + public native @Name("push_back") void push_back(@StdString BytePointer name, @SharedPtr MultiheadAttentionImpl module_ptr); + public native @Name("push_back") void push_back(@StdString String name, @SharedPtr MultiheadAttentionImpl module_ptr); + public native @Name("push_back") void push_back(@StdString BytePointer name, @SharedPtr LayerNormImpl module_ptr); + public native @Name("push_back") void push_back(@StdString String name, @SharedPtr LayerNormImpl module_ptr); + public native @Name("push_back") void push_back(@StdString BytePointer name, @SharedPtr LocalResponseNormImpl module_ptr); + public native @Name("push_back") void push_back(@StdString String name, @SharedPtr LocalResponseNormImpl module_ptr); + public native @Name("push_back") void push_back(@StdString BytePointer name, @SharedPtr CrossMapLRN2dImpl module_ptr); + public native @Name("push_back") void push_back(@StdString String name, @SharedPtr CrossMapLRN2dImpl module_ptr); + public native @Name("push_back") void push_back(@StdString BytePointer name, @SharedPtr GroupNormImpl module_ptr); + public native @Name("push_back") void push_back(@StdString String name, @SharedPtr GroupNormImpl module_ptr); + public native @Name("push_back") void push_back(@StdString BytePointer name, @SharedPtr TransformerEncoderLayerImpl module_ptr); + public native @Name("push_back") void push_back(@StdString String name, @SharedPtr TransformerEncoderLayerImpl module_ptr); + public native @Name("push_back") void push_back(@StdString BytePointer name, @SharedPtr TransformerDecoderLayerImpl module_ptr); + public native @Name("push_back") void push_back(@StdString String name, @SharedPtr TransformerDecoderLayerImpl module_ptr); + public native @Name("push_back") void push_back(@StdString BytePointer name, @SharedPtr TransformerEncoderImpl module_ptr); + public native @Name("push_back") void push_back(@StdString String name, @SharedPtr TransformerEncoderImpl module_ptr); + public native @Name("push_back") void push_back(@StdString BytePointer name, @SharedPtr TransformerDecoderImpl module_ptr); + public native @Name("push_back") void push_back(@StdString String name, @SharedPtr TransformerDecoderImpl module_ptr); + public native @Name("push_back") void push_back(@StdString BytePointer name, @SharedPtr TransformerImpl module_ptr); + public native @Name("push_back") void push_back(@StdString String name, @SharedPtr TransformerImpl module_ptr); /** Adds a new {@code Module} to the {@code Sequential} container, moving or copying it * into a {@code shared_ptr} internally. This method allows passing value types, @@ -214,14 +602,14 @@ public SequentialImpl( /** Attempts to return a {@code std::shared_ptr} whose dynamic type is that of the * underlying module at the given index. Throws an exception if the index is * out of bounds. */ - public native @SharedPtr @Cast({"", "std::shared_ptr"}) Module ptr(@Cast("size_t") long index); + public native @SharedPtr("torch::nn::Module") @ByVal Module ptr(@Cast("size_t") long index); /** Attempts to return a {@code std::shared_ptr} whose type is the one provided. * Throws an exception if the index is out of bounds or the types do not * match. */ /** Like {@code ptr(index)}. */ - public native @SharedPtr @Name("operator []") @Cast({"", "std::shared_ptr"}) Module get(@Cast("size_t") long index); + public native @SharedPtr("torch::nn::Module") @ByVal @Name("operator []") Module get(@Cast("size_t") long index); /** The current size of the {@code Sequential} container. */ public native @Cast("size_t") @NoException(true) long size(); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/SequentialImplCloneable.java b/pytorch/src/gen/java/org/bytedeco/pytorch/SequentialImplCloneable.java index 5670ba18b7b..4565f511da8 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/SequentialImplCloneable.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/SequentialImplCloneable.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; @@ -20,18 +22,18 @@ public class SequentialImplCloneable extends Module { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public SequentialImplCloneable(Pointer p) { super(p); } + @Override public Module asModule() { return asModule(this); } + @Namespace public static native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast>") Module asModule(@SharedPtr SequentialImplCloneable pointer); /** {@code reset()} must perform initialization of all members with reference * semantics, most importantly parameters, buffers and submodules. */ public native void reset(); - @Override public Module asModule() { return asModule(this); } - @Namespace public static native @Name("static_cast") Module asModule(SequentialImplCloneable module); /** Performs a recursive "deep copy" of the {@code Module}, such that all parameters * and submodules in the cloned module are different from those in the * original module. */ - public native @SharedPtr Module clone( - @Const @ByRef(nullValue = "c10::optional(c10::nullopt)") DeviceOptional device); - public native @SharedPtr Module clone(); + public native @SharedPtr("torch::nn::Module") @ByVal Module clone( + @Const @ByRef(nullValue = "c10::optional(c10::nullopt)") DeviceOptional device); + public native @SharedPtr("torch::nn::Module") @ByVal Module clone(); } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/SequentialImplModuleHolder.java b/pytorch/src/gen/java/org/bytedeco/pytorch/SequentialImplModuleHolder.java deleted file mode 100644 index 1d721ff056e..00000000000 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/SequentialImplModuleHolder.java +++ /dev/null @@ -1,89 +0,0 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE - -package org.bytedeco.pytorch; - -import org.bytedeco.pytorch.Allocator; -import org.bytedeco.pytorch.Function; -import org.bytedeco.pytorch.Module; -import java.nio.*; -import org.bytedeco.javacpp.*; -import org.bytedeco.javacpp.annotation.*; - -import static org.bytedeco.javacpp.presets.javacpp.*; -import static org.bytedeco.openblas.global.openblas_nolapack.*; -import static org.bytedeco.openblas.global.openblas.*; - -import static org.bytedeco.pytorch.global.torch.*; - -@Name("torch::nn::ModuleHolder") @NoOffset @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) -public class SequentialImplModuleHolder extends Pointer { - static { Loader.load(); } - /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ - public SequentialImplModuleHolder(Pointer p) { super(p); } - /** Native array allocator. Access with {@link Pointer#position(long)}. */ - public SequentialImplModuleHolder(long size) { super((Pointer)null); allocateArray(size); } - private native void allocateArray(long size); - @Override public SequentialImplModuleHolder position(long position) { - return (SequentialImplModuleHolder)super.position(position); - } - @Override public SequentialImplModuleHolder getPointer(long i) { - return new SequentialImplModuleHolder((Pointer)this).offsetAddress(i); - } - - - /// - - /** Default constructs the contained module if if has a default constructor, - * else produces a static error. - * - * NOTE: This uses the behavior of template - * classes in C++ that constructors (or any methods) are only compiled when - * actually used. */ - public SequentialImplModuleHolder() { super((Pointer)null); allocate(); } - private native void allocate(); - - /** Constructs the {@code ModuleHolder} with an empty contained value. Access to - * the underlying module is not permitted and will throw an exception, until - * a value is assigned. */ - /* implicit */ public SequentialImplModuleHolder(@ByVal @Cast("std::nullptr_t*") PointerPointer arg0) { super((Pointer)null); allocate(arg0); } -private native void allocate(@ByVal @Cast("std::nullptr_t*") PointerPointer arg0); - - /** Constructs the {@code ModuleHolder} with a contained module, forwarding all - * arguments to its constructor. */ - - /** Constructs the {@code ModuleHolder} from a pointer to the contained type. - * Example: {@code Linear(std::make_shared(...))}. */ - /* implicit */ public SequentialImplModuleHolder(@SharedPtr @Cast({"", "std::shared_ptr"}) SequentialImpl module) { super((Pointer)null); allocate(module); } -private native void allocate(@SharedPtr @Cast({"", "std::shared_ptr"}) SequentialImpl module); - - /** Returns true if the {@code ModuleHolder} contains a module, or false if it is - * {@code nullptr}. */ - public native @Cast("bool") @Name("operator bool") @NoException(true) boolean asBoolean(); - - /** Forwards to the contained module. */ - public native @Name("operator ->") SequentialImpl access(); - - /** Forwards to the contained module. */ - - /** Returns a reference to the contained module. */ - public native @ByRef @Name("operator *") SequentialImpl multiply(); - - /** Returns a const reference to the contained module. */ - - /** Returns a shared pointer to the underlying module. */ - public native @SharedPtr @Cast({"", "std::shared_ptr"}) SequentialImpl ptr(); - - /** Returns a pointer to the underlying module. */ - public native SequentialImpl get(); - - /** Returns a const pointer to the underlying module. */ - - /** Calls the {@code forward()} method of the contained module. */ - - /** Forwards to the subscript operator of the contained module. - * NOTE: std::forward is qualified to prevent VS2017 emitting - * error C2872: 'std': ambiguous symbol */ - - /** Returns true if the {@code ModuleHolder} does not contain a module. */ - public native @Cast("bool") @NoException(true) boolean is_empty(); -} diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/SequentialSampler.java b/pytorch/src/gen/java/org/bytedeco/pytorch/SequentialSampler.java index cf019c9438a..a250bc93c9d 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/SequentialSampler.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/SequentialSampler.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/SerializationStorageContext.java b/pytorch/src/gen/java/org/bytedeco/pytorch/SerializationStorageContext.java index e24929bc79b..7f3bd8df017 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/SerializationStorageContext.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/SerializationStorageContext.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/ShapeSymbol.java b/pytorch/src/gen/java/org/bytedeco/pytorch/ShapeSymbol.java index fbc838711c0..23c3a669abc 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/ShapeSymbol.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/ShapeSymbol.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; @@ -36,7 +38,7 @@ public class ShapeSymbol extends Pointer { private native void allocate(); // is this symbol a fixed/static dimension public native @Cast("bool") boolean is_static(); - + public native @Cast("bool") @Name("operator ==") boolean equals(@Const @ByRef ShapeSymbol b); public native @Cast("bool") @Name("operator <") boolean lessThan(@Const @ByRef ShapeSymbol b); public static native @ByVal ShapeSymbol fromStaticSize(@Cast("int64_t") long val); @@ -45,5 +47,8 @@ public class ShapeSymbol extends Pointer { public native @Cast("int64_t") long value(); public static native @ByVal ShapeSymbol newSymbol(); - + private static native @Namespace @Cast("std::ostream*") @ByRef @Name("operator <<") Pointer shiftLeft( + @Cast("std::ostream*") @ByRef Pointer os, + @Const @ByRef ShapeSymbol s); + public Pointer shiftLeft(Pointer os) { return shiftLeft(os, this); } } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/ShapeSymbolVector.java b/pytorch/src/gen/java/org/bytedeco/pytorch/ShapeSymbolVector.java index 544aba75a20..a38b68ba5e4 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/ShapeSymbolVector.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/ShapeSymbolVector.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; @@ -33,6 +35,8 @@ public class ShapeSymbolVector extends Pointer { public void clear() { resize(0); } public native void resize(@Cast("size_t") long n); + public ShapeSymbol front() { return get(0); } + public ShapeSymbol back() { return get(size() - 1); } @Index(function = "at") public native @ByRef ShapeSymbol get(@Cast("size_t") long i); public native ShapeSymbolVector put(@Cast("size_t") long i, ShapeSymbol value); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/ShapeSymbolVectorOptional.java b/pytorch/src/gen/java/org/bytedeco/pytorch/ShapeSymbolVectorOptional.java index f1e650a086e..4cd62a7cf0e 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/ShapeSymbolVectorOptional.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/ShapeSymbolVectorOptional.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; @@ -26,6 +28,7 @@ public class ShapeSymbolVectorOptional extends Pointer { public native @Name("operator =") @ByRef ShapeSymbolVectorOptional put(@ByRef ShapeSymbolVectorOptional x); public native boolean has_value(); + public native void reset(); public native @Name("value") @ByRef ShapeSymbolVector get(); @ValueSetter public native ShapeSymbolVectorOptional put(@ByRef ShapeSymbolVector value); } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/SharedAnyModuleVector.java b/pytorch/src/gen/java/org/bytedeco/pytorch/SharedAnyModuleVector.java index 3f8a4a57456..63db0877207 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/SharedAnyModuleVector.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/SharedAnyModuleVector.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; @@ -20,8 +22,8 @@ public class SharedAnyModuleVector extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public SharedAnyModuleVector(Pointer p) { super(p); } - public SharedAnyModuleVector(@Cast({"", "std::shared_ptr"}) AnyModule value) { this(1); put(0, value); } - public SharedAnyModuleVector(@Cast({"", "std::shared_ptr"}) AnyModule ... array) { this(array.length); put(array); } + public SharedAnyModuleVector(AnyModule value) { this(1); put(0, value); } + public SharedAnyModuleVector(AnyModule ... array) { this(array.length); put(array); } public SharedAnyModuleVector() { allocate(); } public SharedAnyModuleVector(long n) { allocate(n); } private native void allocate(); @@ -33,10 +35,12 @@ public class SharedAnyModuleVector extends Pointer { public void clear() { resize(0); } public native void resize(@Cast("size_t") long n); - @Index(function = "at") public native @SharedPtr @Cast({"", "std::shared_ptr"}) AnyModule get(@Cast("size_t") long i); + public AnyModule front() { return get(0); } + public AnyModule back() { return get(size() - 1); } + @Index(function = "at") public native @SharedPtr("torch::nn::AnyModule") AnyModule get(@Cast("size_t") long i); public native SharedAnyModuleVector put(@Cast("size_t") long i, AnyModule value); - public native @ByVal Iterator insert(@ByVal Iterator pos, @SharedPtr @Cast({"", "std::shared_ptr"}) AnyModule value); + public native @ByVal Iterator insert(@ByVal Iterator pos, @SharedPtr("torch::nn::AnyModule") AnyModule value); public native @ByVal Iterator erase(@ByVal Iterator pos); public native @ByVal Iterator begin(); public native @ByVal Iterator end(); @@ -46,7 +50,7 @@ public Iterator() { } public native @Name("operator ++") @ByRef Iterator increment(); public native @Name("operator ==") boolean equals(@ByRef Iterator it); - public native @Name("operator *") @SharedPtr @Cast({"", "std::shared_ptr"}) AnyModule get(); + public native @Name("operator *") @SharedPtr("torch::nn::AnyModule") @Const AnyModule get(); } public AnyModule[] get() { diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/ClassTypeVector.java b/pytorch/src/gen/java/org/bytedeco/pytorch/SharedClassTypeVector.java similarity index 64% rename from pytorch/src/gen/java/org/bytedeco/pytorch/ClassTypeVector.java rename to pytorch/src/gen/java/org/bytedeco/pytorch/SharedClassTypeVector.java index 77aea73f965..015d86fbe80 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/ClassTypeVector.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/SharedClassTypeVector.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; @@ -16,27 +18,29 @@ import static org.bytedeco.pytorch.global.torch.*; @Name("std::vector >") @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) -public class ClassTypeVector extends Pointer { +public class SharedClassTypeVector extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ - public ClassTypeVector(Pointer p) { super(p); } - public ClassTypeVector(ClassType value) { this(1); put(0, value); } - public ClassTypeVector(ClassType ... array) { this(array.length); put(array); } - public ClassTypeVector() { allocate(); } - public ClassTypeVector(long n) { allocate(n); } + public SharedClassTypeVector(Pointer p) { super(p); } + public SharedClassTypeVector(ClassType value) { this(1); put(0, value); } + public SharedClassTypeVector(ClassType ... array) { this(array.length); put(array); } + public SharedClassTypeVector() { allocate(); } + public SharedClassTypeVector(long n) { allocate(n); } private native void allocate(); private native void allocate(@Cast("size_t") long n); - public native @Name("operator =") @ByRef ClassTypeVector put(@ByRef ClassTypeVector x); + public native @Name("operator =") @ByRef SharedClassTypeVector put(@ByRef SharedClassTypeVector x); public boolean empty() { return size() == 0; } public native long size(); public void clear() { resize(0); } public native void resize(@Cast("size_t") long n); - @Index(function = "at") public native @SharedPtr ClassType get(@Cast("size_t") long i); - public native ClassTypeVector put(@Cast("size_t") long i, ClassType value); + public ClassType front() { return get(0); } + public ClassType back() { return get(size() - 1); } + @Index(function = "at") public native @SharedPtr("c10::ClassType") ClassType get(@Cast("size_t") long i); + public native SharedClassTypeVector put(@Cast("size_t") long i, ClassType value); - public native @ByVal Iterator insert(@ByVal Iterator pos, @SharedPtr ClassType value); + public native @ByVal Iterator insert(@ByVal Iterator pos, @SharedPtr("c10::ClassType") ClassType value); public native @ByVal Iterator erase(@ByVal Iterator pos); public native @ByVal Iterator begin(); public native @ByVal Iterator end(); @@ -46,7 +50,7 @@ public Iterator() { } public native @Name("operator ++") @ByRef Iterator increment(); public native @Name("operator ==") boolean equals(@ByRef Iterator it); - public native @Name("operator *") @SharedPtr @Const ClassType get(); + public native @Name("operator *") @SharedPtr("c10::ClassType") @Const ClassType get(); } public ClassType[] get() { @@ -66,16 +70,16 @@ public ClassType pop_back() { resize(size - 1); return value; } - public ClassTypeVector push_back(ClassType value) { + public SharedClassTypeVector push_back(ClassType value) { long size = size(); resize(size + 1); return put(size, value); } - public ClassTypeVector put(ClassType value) { + public SharedClassTypeVector put(ClassType value) { if (size() != 1) { resize(1); } return put(0, value); } - public ClassTypeVector put(ClassType ... array) { + public SharedClassTypeVector put(ClassType ... array) { if (size() != array.length) { resize(array.length); } for (int i = 0; i < array.length; i++) { put(i, array[i]); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/FunctionPreVector.java b/pytorch/src/gen/java/org/bytedeco/pytorch/SharedFunctionPreVector.java similarity index 62% rename from pytorch/src/gen/java/org/bytedeco/pytorch/FunctionPreVector.java rename to pytorch/src/gen/java/org/bytedeco/pytorch/SharedFunctionPreVector.java index f579b3feedc..03371d340f7 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/FunctionPreVector.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/SharedFunctionPreVector.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; @@ -16,27 +18,29 @@ import static org.bytedeco.pytorch.global.torch.*; @Name("std::vector >") @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) -public class FunctionPreVector extends Pointer { +public class SharedFunctionPreVector extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ - public FunctionPreVector(Pointer p) { super(p); } - public FunctionPreVector(FunctionPreHook value) { this(1); put(0, value); } - public FunctionPreVector(FunctionPreHook ... array) { this(array.length); put(array); } - public FunctionPreVector() { allocate(); } - public FunctionPreVector(long n) { allocate(n); } + public SharedFunctionPreVector(Pointer p) { super(p); } + public SharedFunctionPreVector(FunctionPreHook value) { this(1); put(0, value); } + public SharedFunctionPreVector(FunctionPreHook ... array) { this(array.length); put(array); } + public SharedFunctionPreVector() { allocate(); } + public SharedFunctionPreVector(long n) { allocate(n); } private native void allocate(); private native void allocate(@Cast("size_t") long n); - public native @Name("operator =") @ByRef FunctionPreVector put(@ByRef FunctionPreVector x); + public native @Name("operator =") @ByRef SharedFunctionPreVector put(@ByRef SharedFunctionPreVector x); public boolean empty() { return size() == 0; } public native long size(); public void clear() { resize(0); } public native void resize(@Cast("size_t") long n); - @Index(function = "at") public native @SharedPtr FunctionPreHook get(@Cast("size_t") long i); - public native FunctionPreVector put(@Cast("size_t") long i, FunctionPreHook value); + public FunctionPreHook front() { return get(0); } + public FunctionPreHook back() { return get(size() - 1); } + @Index(function = "at") public native @SharedPtr("torch::autograd::FunctionPreHook") FunctionPreHook get(@Cast("size_t") long i); + public native SharedFunctionPreVector put(@Cast("size_t") long i, FunctionPreHook value); - public native @ByVal Iterator insert(@ByVal Iterator pos, @SharedPtr FunctionPreHook value); + public native @ByVal Iterator insert(@ByVal Iterator pos, @SharedPtr("torch::autograd::FunctionPreHook") FunctionPreHook value); public native @ByVal Iterator erase(@ByVal Iterator pos); public native @ByVal Iterator begin(); public native @ByVal Iterator end(); @@ -46,7 +50,7 @@ public Iterator() { } public native @Name("operator ++") @ByRef Iterator increment(); public native @Name("operator ==") boolean equals(@ByRef Iterator it); - public native @Name("operator *") @SharedPtr FunctionPreHook get(); + public native @Name("operator *") @SharedPtr("torch::autograd::FunctionPreHook") @Const FunctionPreHook get(); } public FunctionPreHook[] get() { @@ -66,16 +70,16 @@ public FunctionPreHook pop_back() { resize(size - 1); return value; } - public FunctionPreVector push_back(FunctionPreHook value) { + public SharedFunctionPreVector push_back(FunctionPreHook value) { long size = size(); resize(size + 1); return put(size, value); } - public FunctionPreVector put(FunctionPreHook value) { + public SharedFunctionPreVector put(FunctionPreHook value) { if (size() != 1) { resize(1); } return put(0, value); } - public FunctionPreVector put(FunctionPreHook ... array) { + public SharedFunctionPreVector put(FunctionPreHook ... array) { if (size() != array.length) { resize(array.length); } for (int i = 0; i < array.length; i++) { put(i, array[i]); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/SharedModuleVector.java b/pytorch/src/gen/java/org/bytedeco/pytorch/SharedModuleVector.java index b5a6ac14cfa..d3952f60e1c 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/SharedModuleVector.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/SharedModuleVector.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; @@ -20,8 +22,8 @@ public class SharedModuleVector extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public SharedModuleVector(Pointer p) { super(p); } - public SharedModuleVector(@Cast({"", "std::shared_ptr"}) Module value) { this(1); put(0, value); } - public SharedModuleVector(@Cast({"", "std::shared_ptr"}) Module ... array) { this(array.length); put(array); } + public SharedModuleVector(Module value) { this(1); put(0, value); } + public SharedModuleVector(Module ... array) { this(array.length); put(array); } public SharedModuleVector() { allocate(); } public SharedModuleVector(long n) { allocate(n); } private native void allocate(); @@ -33,10 +35,12 @@ public class SharedModuleVector extends Pointer { public void clear() { resize(0); } public native void resize(@Cast("size_t") long n); - @Index(function = "at") public native @SharedPtr @Cast({"", "std::shared_ptr"}) Module get(@Cast("size_t") long i); + public Module front() { return get(0); } + public Module back() { return get(size() - 1); } + @Index(function = "at") public native @SharedPtr("torch::nn::Module") Module get(@Cast("size_t") long i); public native SharedModuleVector put(@Cast("size_t") long i, Module value); - public native @ByVal Iterator insert(@ByVal Iterator pos, @SharedPtr @Cast({"", "std::shared_ptr"}) Module value); + public native @ByVal Iterator insert(@ByVal Iterator pos, @SharedPtr("torch::nn::Module") Module value); public native @ByVal Iterator erase(@ByVal Iterator pos); public native @ByVal Iterator begin(); public native @ByVal Iterator end(); @@ -46,7 +50,7 @@ public Iterator() { } public native @Name("operator ++") @ByRef Iterator increment(); public native @Name("operator ==") boolean equals(@ByRef Iterator it); - public native @Name("operator *") @SharedPtr @Cast({"", "std::shared_ptr"}) Module get(); + public native @Name("operator *") @SharedPtr("torch::nn::Module") @Const Module get(); } public Module[] get() { diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/SharedParserData.java b/pytorch/src/gen/java/org/bytedeco/pytorch/SharedParserData.java index 98a2688afee..33cf2cd0dd9 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/SharedParserData.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/SharedParserData.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/SugaredValueVector.java b/pytorch/src/gen/java/org/bytedeco/pytorch/SharedSugaredValueVector.java similarity index 63% rename from pytorch/src/gen/java/org/bytedeco/pytorch/SugaredValueVector.java rename to pytorch/src/gen/java/org/bytedeco/pytorch/SharedSugaredValueVector.java index 6edc4239d8d..22d5d5eafd3 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/SugaredValueVector.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/SharedSugaredValueVector.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; @@ -16,27 +18,29 @@ import static org.bytedeco.pytorch.global.torch.*; @Name("std::vector >") @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) -public class SugaredValueVector extends Pointer { +public class SharedSugaredValueVector extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ - public SugaredValueVector(Pointer p) { super(p); } - public SugaredValueVector(SugaredValue value) { this(1); put(0, value); } - public SugaredValueVector(SugaredValue ... array) { this(array.length); put(array); } - public SugaredValueVector() { allocate(); } - public SugaredValueVector(long n) { allocate(n); } + public SharedSugaredValueVector(Pointer p) { super(p); } + public SharedSugaredValueVector(SugaredValue value) { this(1); put(0, value); } + public SharedSugaredValueVector(SugaredValue ... array) { this(array.length); put(array); } + public SharedSugaredValueVector() { allocate(); } + public SharedSugaredValueVector(long n) { allocate(n); } private native void allocate(); private native void allocate(@Cast("size_t") long n); - public native @Name("operator =") @ByRef SugaredValueVector put(@ByRef SugaredValueVector x); + public native @Name("operator =") @ByRef SharedSugaredValueVector put(@ByRef SharedSugaredValueVector x); public boolean empty() { return size() == 0; } public native long size(); public void clear() { resize(0); } public native void resize(@Cast("size_t") long n); - @Index(function = "at") public native @SharedPtr SugaredValue get(@Cast("size_t") long i); - public native SugaredValueVector put(@Cast("size_t") long i, SugaredValue value); + public SugaredValue front() { return get(0); } + public SugaredValue back() { return get(size() - 1); } + @Index(function = "at") public native @SharedPtr("torch::jit::SugaredValue") SugaredValue get(@Cast("size_t") long i); + public native SharedSugaredValueVector put(@Cast("size_t") long i, SugaredValue value); - public native @ByVal Iterator insert(@ByVal Iterator pos, @SharedPtr SugaredValue value); + public native @ByVal Iterator insert(@ByVal Iterator pos, @SharedPtr("torch::jit::SugaredValue") SugaredValue value); public native @ByVal Iterator erase(@ByVal Iterator pos); public native @ByVal Iterator begin(); public native @ByVal Iterator end(); @@ -46,7 +50,7 @@ public Iterator() { } public native @Name("operator ++") @ByRef Iterator increment(); public native @Name("operator ==") boolean equals(@ByRef Iterator it); - public native @Name("operator *") @SharedPtr @Const SugaredValue get(); + public native @Name("operator *") @SharedPtr("torch::jit::SugaredValue") @Const SugaredValue get(); } public SugaredValue[] get() { @@ -66,16 +70,16 @@ public SugaredValue pop_back() { resize(size - 1); return value; } - public SugaredValueVector push_back(SugaredValue value) { + public SharedSugaredValueVector push_back(SugaredValue value) { long size = size(); resize(size + 1); return put(size, value); } - public SugaredValueVector put(SugaredValue value) { + public SharedSugaredValueVector put(SugaredValue value) { if (size() != 1) { resize(1); } return put(0, value); } - public SugaredValueVector put(SugaredValue ... array) { + public SharedSugaredValueVector put(SugaredValue ... array) { if (size() != array.length) { resize(array.length); } for (int i = 0; i < array.length; i++) { put(i, array[i]); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/SharedType.java b/pytorch/src/gen/java/org/bytedeco/pytorch/SharedType.java index 3dbf42e2504..be2b7bde0bf 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/SharedType.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/SharedType.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/ShortArrayRef.java b/pytorch/src/gen/java/org/bytedeco/pytorch/ShortArrayRef.java index 374c69e17a3..7b4761e342f 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/ShortArrayRef.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/ShortArrayRef.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; @@ -39,8 +41,7 @@ public class ShortArrayRef extends Pointer { /** Construct an ArrayRef from a single element. */ // TODO Make this explicit - public ShortArrayRef(short OneElt) { super((Pointer)null); allocate(OneElt); } - private native void allocate(short OneElt); + /** Construct an ArrayRef from a pointer and length. */ public ShortArrayRef(@Const ShortPointer data, @Cast("size_t") long length) { super((Pointer)null); allocate(data, length); } @@ -78,13 +79,13 @@ public class ShortArrayRef extends Pointer { * \name Simple Operations * \{ */ - public native @ByVal @Cast("const c10::ArrayRef::iterator*") ShortPointer begin(); - public native @ByVal @Cast("const c10::ArrayRef::iterator*") ShortPointer end(); + public native @Const ShortPointer begin(); + public native @Const ShortPointer end(); // These are actually the same as iterator, since ArrayRef only // gives you const iterators. - public native @ByVal @Cast("const c10::ArrayRef::const_iterator*") ShortPointer cbegin(); - public native @ByVal @Cast("const c10::ArrayRef::const_iterator*") ShortPointer cend(); + public native @Const ShortPointer cbegin(); + public native @Const ShortPointer cend(); /** empty - Check if the array is empty. */ public native @Cast("const bool") boolean empty(); @@ -101,13 +102,13 @@ public class ShortArrayRef extends Pointer { public native short back(); /** equals - Check for element-wise equality. */ - public native @Cast("const bool") boolean equals(@ByVal @Cast("c10::ArrayRef*") ShortArrayRef RHS); + public native @Cast("const bool") boolean equals(@ByVal ShortArrayRef RHS); /** slice(n, m) - Take M elements of the array starting at element N */ - public native @ByVal @Cast("const c10::ArrayRef*") ShortArrayRef slice(@Cast("size_t") long N, @Cast("size_t") long M); + public native @Const @ByVal ShortArrayRef slice(@Cast("size_t") long N, @Cast("size_t") long M); /** slice(n) - Chop off the first N elements of the array. */ - public native @ByVal @Cast("const c10::ArrayRef*") ShortArrayRef slice(@Cast("size_t") long N); + public native @Const @ByVal ShortArrayRef slice(@Cast("size_t") long N); /** \} * \name Operator Overloads diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/SiLU.java b/pytorch/src/gen/java/org/bytedeco/pytorch/SiLU.java deleted file mode 100644 index ffcd635e28b..00000000000 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/SiLU.java +++ /dev/null @@ -1,33 +0,0 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE - -package org.bytedeco.pytorch; - -import org.bytedeco.pytorch.Allocator; -import org.bytedeco.pytorch.Function; -import org.bytedeco.pytorch.Module; -import java.nio.*; -import org.bytedeco.javacpp.*; -import org.bytedeco.javacpp.annotation.*; - -import static org.bytedeco.javacpp.presets.javacpp.*; -import static org.bytedeco.openblas.global.openblas_nolapack.*; -import static org.bytedeco.openblas.global.openblas.*; - -import static org.bytedeco.pytorch.global.torch.*; - - -/** A {@code ModuleHolder} subclass for {@code SiLUImpl}. - * See the documentation for {@code SiLUImpl} class to learn what methods it - * provides, or the documentation for {@code ModuleHolder} to learn about PyTorch's - * module storage semantics. */ -@Namespace("torch::nn") @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) -public class SiLU extends SiLUImplModuleHolder { - static { Loader.load(); } - - public SiLU(@ByVal @Cast("std::nullptr_t*") PointerPointer arg0) { super((Pointer)null); allocate(arg0); } - private native void allocate(@ByVal @Cast("std::nullptr_t*") PointerPointer arg0); public SiLU(@SharedPtr @Cast({"", "std::shared_ptr"}) SiLUImpl module) { super((Pointer)null); allocate(module); } - private native void allocate(@SharedPtr @Cast({"", "std::shared_ptr"}) SiLUImpl module); - /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ - public SiLU(Pointer p) { super(p); } - - } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/SiLUImpl.java b/pytorch/src/gen/java/org/bytedeco/pytorch/SiLUImpl.java index 33843b371dc..1e2941da8e0 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/SiLUImpl.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/SiLUImpl.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/SiLUImplCloneable.java b/pytorch/src/gen/java/org/bytedeco/pytorch/SiLUImplCloneable.java index 78d594c1245..8bc485fb410 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/SiLUImplCloneable.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/SiLUImplCloneable.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; @@ -20,18 +22,18 @@ public class SiLUImplCloneable extends Module { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public SiLUImplCloneable(Pointer p) { super(p); } + @Override public Module asModule() { return asModule(this); } + @Namespace public static native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast>") Module asModule(@SharedPtr SiLUImplCloneable pointer); /** {@code reset()} must perform initialization of all members with reference * semantics, most importantly parameters, buffers and submodules. */ public native void reset(); - @Override public Module asModule() { return asModule(this); } - @Namespace public static native @Name("static_cast") Module asModule(SiLUImplCloneable module); /** Performs a recursive "deep copy" of the {@code Module}, such that all parameters * and submodules in the cloned module are different from those in the * original module. */ - public native @SharedPtr Module clone( - @Const @ByRef(nullValue = "c10::optional(c10::nullopt)") DeviceOptional device); - public native @SharedPtr Module clone(); + public native @SharedPtr("torch::nn::Module") @ByVal Module clone( + @Const @ByRef(nullValue = "c10::optional(c10::nullopt)") DeviceOptional device); + public native @SharedPtr("torch::nn::Module") @ByVal Module clone(); } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/SiLUImplModuleHolder.java b/pytorch/src/gen/java/org/bytedeco/pytorch/SiLUImplModuleHolder.java deleted file mode 100644 index 107d8baed46..00000000000 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/SiLUImplModuleHolder.java +++ /dev/null @@ -1,79 +0,0 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE - -package org.bytedeco.pytorch; - -import org.bytedeco.pytorch.Allocator; -import org.bytedeco.pytorch.Function; -import org.bytedeco.pytorch.Module; -import java.nio.*; -import org.bytedeco.javacpp.*; -import org.bytedeco.javacpp.annotation.*; - -import static org.bytedeco.javacpp.presets.javacpp.*; -import static org.bytedeco.openblas.global.openblas_nolapack.*; -import static org.bytedeco.openblas.global.openblas.*; - -import static org.bytedeco.pytorch.global.torch.*; - -@Name("torch::nn::ModuleHolder") @NoOffset @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) -public class SiLUImplModuleHolder extends Pointer { - static { Loader.load(); } - /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ - public SiLUImplModuleHolder(Pointer p) { super(p); } - - - /// - - /** Default constructs the contained module if if has a default constructor, - * else produces a static error. - * - * NOTE: This uses the behavior of template - * classes in C++ that constructors (or any methods) are only compiled when - * actually used. */ - - - /** Constructs the {@code ModuleHolder} with an empty contained value. Access to - * the underlying module is not permitted and will throw an exception, until - * a value is assigned. */ - /* implicit */ public SiLUImplModuleHolder(@ByVal @Cast("std::nullptr_t*") PointerPointer arg0) { super((Pointer)null); allocate(arg0); } -private native void allocate(@ByVal @Cast("std::nullptr_t*") PointerPointer arg0); - - /** Constructs the {@code ModuleHolder} with a contained module, forwarding all - * arguments to its constructor. */ - - /** Constructs the {@code ModuleHolder} from a pointer to the contained type. - * Example: {@code Linear(std::make_shared(...))}. */ - /* implicit */ public SiLUImplModuleHolder(@SharedPtr @Cast({"", "std::shared_ptr"}) SiLUImpl module) { super((Pointer)null); allocate(module); } -private native void allocate(@SharedPtr @Cast({"", "std::shared_ptr"}) SiLUImpl module); - - /** Returns true if the {@code ModuleHolder} contains a module, or false if it is - * {@code nullptr}. */ - public native @Cast("bool") @Name("operator bool") @NoException(true) boolean asBoolean(); - - /** Forwards to the contained module. */ - public native @Name("operator ->") SiLUImpl access(); - - /** Forwards to the contained module. */ - - /** Returns a reference to the contained module. */ - public native @ByRef @Name("operator *") SiLUImpl multiply(); - - /** Returns a const reference to the contained module. */ - - /** Returns a shared pointer to the underlying module. */ - public native @SharedPtr @Cast({"", "std::shared_ptr"}) SiLUImpl ptr(); - - /** Returns a pointer to the underlying module. */ - public native SiLUImpl get(); - - /** Returns a const pointer to the underlying module. */ - - /** Calls the {@code forward()} method of the contained module. */ - - /** Forwards to the subscript operator of the contained module. - * NOTE: std::forward is qualified to prevent VS2017 emitting - * error C2872: 'std': ambiguous symbol */ - - /** Returns true if the {@code ModuleHolder} does not contain a module. */ - public native @Cast("bool") @NoException(true) boolean is_empty(); -} diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/Sigmoid.java b/pytorch/src/gen/java/org/bytedeco/pytorch/Sigmoid.java deleted file mode 100644 index daac6428b77..00000000000 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/Sigmoid.java +++ /dev/null @@ -1,33 +0,0 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE - -package org.bytedeco.pytorch; - -import org.bytedeco.pytorch.Allocator; -import org.bytedeco.pytorch.Function; -import org.bytedeco.pytorch.Module; -import java.nio.*; -import org.bytedeco.javacpp.*; -import org.bytedeco.javacpp.annotation.*; - -import static org.bytedeco.javacpp.presets.javacpp.*; -import static org.bytedeco.openblas.global.openblas_nolapack.*; -import static org.bytedeco.openblas.global.openblas.*; - -import static org.bytedeco.pytorch.global.torch.*; - - -/** A {@code ModuleHolder} subclass for {@code SigmoidImpl}. - * See the documentation for {@code SigmoidImpl} class to learn what methods it - * provides, or the documentation for {@code ModuleHolder} to learn about PyTorch's - * module storage semantics. */ -@Namespace("torch::nn") @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) -public class Sigmoid extends SigmoidImplModuleHolder { - static { Loader.load(); } - - public Sigmoid(@ByVal @Cast("std::nullptr_t*") PointerPointer arg0) { super((Pointer)null); allocate(arg0); } - private native void allocate(@ByVal @Cast("std::nullptr_t*") PointerPointer arg0); public Sigmoid(@SharedPtr @Cast({"", "std::shared_ptr"}) SigmoidImpl module) { super((Pointer)null); allocate(module); } - private native void allocate(@SharedPtr @Cast({"", "std::shared_ptr"}) SigmoidImpl module); - /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ - public Sigmoid(Pointer p) { super(p); } - - } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/SigmoidImpl.java b/pytorch/src/gen/java/org/bytedeco/pytorch/SigmoidImpl.java index e5a58ddde00..09256d58399 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/SigmoidImpl.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/SigmoidImpl.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/SigmoidImplCloneable.java b/pytorch/src/gen/java/org/bytedeco/pytorch/SigmoidImplCloneable.java index e11fe12e10b..c12b7db95ab 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/SigmoidImplCloneable.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/SigmoidImplCloneable.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; @@ -20,18 +22,18 @@ public class SigmoidImplCloneable extends Module { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public SigmoidImplCloneable(Pointer p) { super(p); } + @Override public Module asModule() { return asModule(this); } + @Namespace public static native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast>") Module asModule(@SharedPtr SigmoidImplCloneable pointer); /** {@code reset()} must perform initialization of all members with reference * semantics, most importantly parameters, buffers and submodules. */ public native void reset(); - @Override public Module asModule() { return asModule(this); } - @Namespace public static native @Name("static_cast") Module asModule(SigmoidImplCloneable module); /** Performs a recursive "deep copy" of the {@code Module}, such that all parameters * and submodules in the cloned module are different from those in the * original module. */ - public native @SharedPtr Module clone( - @Const @ByRef(nullValue = "c10::optional(c10::nullopt)") DeviceOptional device); - public native @SharedPtr Module clone(); + public native @SharedPtr("torch::nn::Module") @ByVal Module clone( + @Const @ByRef(nullValue = "c10::optional(c10::nullopt)") DeviceOptional device); + public native @SharedPtr("torch::nn::Module") @ByVal Module clone(); } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/SigmoidImplModuleHolder.java b/pytorch/src/gen/java/org/bytedeco/pytorch/SigmoidImplModuleHolder.java deleted file mode 100644 index 9ea5cb253b7..00000000000 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/SigmoidImplModuleHolder.java +++ /dev/null @@ -1,79 +0,0 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE - -package org.bytedeco.pytorch; - -import org.bytedeco.pytorch.Allocator; -import org.bytedeco.pytorch.Function; -import org.bytedeco.pytorch.Module; -import java.nio.*; -import org.bytedeco.javacpp.*; -import org.bytedeco.javacpp.annotation.*; - -import static org.bytedeco.javacpp.presets.javacpp.*; -import static org.bytedeco.openblas.global.openblas_nolapack.*; -import static org.bytedeco.openblas.global.openblas.*; - -import static org.bytedeco.pytorch.global.torch.*; - -@Name("torch::nn::ModuleHolder") @NoOffset @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) -public class SigmoidImplModuleHolder extends Pointer { - static { Loader.load(); } - /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ - public SigmoidImplModuleHolder(Pointer p) { super(p); } - - - /// - - /** Default constructs the contained module if if has a default constructor, - * else produces a static error. - * - * NOTE: This uses the behavior of template - * classes in C++ that constructors (or any methods) are only compiled when - * actually used. */ - - - /** Constructs the {@code ModuleHolder} with an empty contained value. Access to - * the underlying module is not permitted and will throw an exception, until - * a value is assigned. */ - /* implicit */ public SigmoidImplModuleHolder(@ByVal @Cast("std::nullptr_t*") PointerPointer arg0) { super((Pointer)null); allocate(arg0); } -private native void allocate(@ByVal @Cast("std::nullptr_t*") PointerPointer arg0); - - /** Constructs the {@code ModuleHolder} with a contained module, forwarding all - * arguments to its constructor. */ - - /** Constructs the {@code ModuleHolder} from a pointer to the contained type. - * Example: {@code Linear(std::make_shared(...))}. */ - /* implicit */ public SigmoidImplModuleHolder(@SharedPtr @Cast({"", "std::shared_ptr"}) SigmoidImpl module) { super((Pointer)null); allocate(module); } -private native void allocate(@SharedPtr @Cast({"", "std::shared_ptr"}) SigmoidImpl module); - - /** Returns true if the {@code ModuleHolder} contains a module, or false if it is - * {@code nullptr}. */ - public native @Cast("bool") @Name("operator bool") @NoException(true) boolean asBoolean(); - - /** Forwards to the contained module. */ - public native @Name("operator ->") SigmoidImpl access(); - - /** Forwards to the contained module. */ - - /** Returns a reference to the contained module. */ - public native @ByRef @Name("operator *") SigmoidImpl multiply(); - - /** Returns a const reference to the contained module. */ - - /** Returns a shared pointer to the underlying module. */ - public native @SharedPtr @Cast({"", "std::shared_ptr"}) SigmoidImpl ptr(); - - /** Returns a pointer to the underlying module. */ - public native SigmoidImpl get(); - - /** Returns a const pointer to the underlying module. */ - - /** Calls the {@code forward()} method of the contained module. */ - - /** Forwards to the subscript operator of the contained module. - * NOTE: std::forward is qualified to prevent VS2017 emitting - * error C2872: 'std': ambiguous symbol */ - - /** Returns true if the {@code ModuleHolder} does not contain a module. */ - public native @Cast("bool") @NoException(true) boolean is_empty(); -} diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/SimpleSelf.java b/pytorch/src/gen/java/org/bytedeco/pytorch/SimpleSelf.java index 5e2fd71335b..1ae3a54b0d4 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/SimpleSelf.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/SimpleSelf.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; @@ -22,8 +24,8 @@ public class SimpleSelf extends Self { /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public SimpleSelf(Pointer p) { super(p); } - public SimpleSelf(@SharedPtr @ByVal ClassType classType) { super((Pointer)null); allocate(classType); } - private native void allocate(@SharedPtr @ByVal ClassType classType); - public native @SharedPtr @ByVal SugaredValue makeSugared(Value v); - public native @SharedPtr @ByVal ClassType getClassType(); + public SimpleSelf(@SharedPtr("c10::ClassType") @ByVal ClassType classType) { super((Pointer)null); allocate(classType); } + private native void allocate(@SharedPtr("c10::ClassType") @ByVal ClassType classType); + public native @SharedPtr("torch::jit::SugaredValue") @ByVal SugaredValue makeSugared(Value v); + public native @SharedPtr("c10::ClassType") @ByVal ClassType getClassType(); } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/SimpleValue.java b/pytorch/src/gen/java/org/bytedeco/pytorch/SimpleValue.java index c876703951f..45ab308e955 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/SimpleValue.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/SimpleValue.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; @@ -28,18 +30,18 @@ public class SimpleValue extends SugaredValue { private native void allocate(Value value); public native @StdString BytePointer kind(); public native Value asValue(@Const @ByRef SourceRange range, @ByRef GraphFunction m); - public native @ByVal SugaredValueVector asTuple( + public native @ByVal SharedSugaredValueVector asTuple( @Const @ByRef SourceRange loc, @ByRef GraphFunction m, @Const @ByRef(nullValue = "c10::optional{}") SizeTOptional size_hint); - public native @ByVal SugaredValueVector asTuple( + public native @ByVal SharedSugaredValueVector asTuple( @Const @ByRef SourceRange loc, @ByRef GraphFunction m); - public native @SharedPtr @ByVal SugaredValue attr( + public native @SharedPtr("torch::jit::SugaredValue") @ByVal SugaredValue attr( @Const @ByRef SourceRange loc, @ByRef GraphFunction m, @StdString BytePointer field); - public native @SharedPtr @ByVal SugaredValue attr( + public native @SharedPtr("torch::jit::SugaredValue") @ByVal SugaredValue attr( @Const @ByRef SourceRange loc, @ByRef GraphFunction m, @StdString String field); @@ -64,24 +66,19 @@ public native void setAttr( @StdString String field, Value newValue); - public native @SharedPtr @ByVal SugaredValue call( - @Const @ByRef SourceRange loc, - @ByRef GraphFunction m, - @ByVal NamedValueArrayRef args, - @ByVal NamedValueArrayRef kwargs, - @Cast("size_t") long n_binders); + - public native @SharedPtr @ByVal SugaredValue iter(@Const @ByRef SourceRange loc, @ByRef GraphFunction m); + public native @SharedPtr("torch::jit::SugaredValue") @ByVal SugaredValue iter(@Const @ByRef SourceRange loc, @ByRef GraphFunction m); public native Value getValue(); public native Value len(@Const @ByRef SourceRange loc, @ByRef GraphFunction m); - public native @SharedPtr @ByVal SugaredValue getitem( + public native @SharedPtr("torch::jit::SugaredValue") @ByVal SugaredValue getitem( @Const @ByRef SourceRange loc, @ByRef GraphFunction m, Value idx, @ByVal(nullValue = "c10::TypePtr(nullptr)") Type.TypePtr type_hint); - public native @SharedPtr @ByVal SugaredValue getitem( + public native @SharedPtr("torch::jit::SugaredValue") @ByVal SugaredValue getitem( @Const @ByRef SourceRange loc, @ByRef GraphFunction m, Value idx); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/SingletonTypePtr.java b/pytorch/src/gen/java/org/bytedeco/pytorch/SingletonTypePtr.java index d0ca0c770a2..a4cea643365 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/SingletonTypePtr.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/SingletonTypePtr.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/SizeTArrayRef.java b/pytorch/src/gen/java/org/bytedeco/pytorch/SizeTArrayRef.java index 83ee49828b9..c4ca3e0ed65 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/SizeTArrayRef.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/SizeTArrayRef.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; @@ -20,6 +22,15 @@ public class SizeTArrayRef extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public SizeTArrayRef(Pointer p) { super(p); } + /** Native array allocator. Access with {@link Pointer#position(long)}. */ + public SizeTArrayRef(long size) { super((Pointer)null); allocateArray(size); } + private native void allocateArray(long size); + @Override public SizeTArrayRef position(long position) { + return (SizeTArrayRef)super.position(position); + } + @Override public SizeTArrayRef getPointer(long i) { + return new SizeTArrayRef((Pointer)this).offsetAddress(i); + } /** \name Constructors * \{ @@ -30,8 +41,7 @@ public class SizeTArrayRef extends Pointer { /** Construct an ArrayRef from a single element. */ // TODO Make this explicit - public SizeTArrayRef(@Cast("const size_t") long OneElt) { super((Pointer)null); allocate(OneElt); } - private native void allocate(@Cast("const size_t") long OneElt); + /** Construct an ArrayRef from a pointer and length. */ public SizeTArrayRef(@Cast("const size_t*") SizeTPointer data, @Cast("size_t") long length) { super((Pointer)null); allocate(data, length); } @@ -49,6 +59,8 @@ public class SizeTArrayRef extends Pointer { // The enable_if stuff here makes sure that this isn't used for // std::vector, because ArrayRef can't work on a std::vector // bitfield. + public SizeTArrayRef(@ByRef SizeTVector vec) { super((Pointer)null); allocate(vec); } + private native void allocate(@ByRef SizeTVector vec); /** Construct an ArrayRef from a std::array */ @@ -61,13 +73,13 @@ public class SizeTArrayRef extends Pointer { * \name Simple Operations * \{ */ - public native @ByVal @Cast("const c10::ArrayRef::iterator*") SizeTPointer begin(); - public native @ByVal @Cast("const c10::ArrayRef::iterator*") SizeTPointer end(); + public native @Const SizeTPointer begin(); + public native @Const SizeTPointer end(); // These are actually the same as iterator, since ArrayRef only // gives you const iterators. - public native @ByVal @Cast("const c10::ArrayRef::const_iterator*") SizeTPointer cbegin(); - public native @ByVal @Cast("const c10::ArrayRef::const_iterator*") SizeTPointer cend(); + public native @Const SizeTPointer cbegin(); + public native @Const SizeTPointer cend(); /** empty - Check if the array is empty. */ public native @Cast("const bool") boolean empty(); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/SizeTMatchedSchemaPair.java b/pytorch/src/gen/java/org/bytedeco/pytorch/SizeTMatchedSchemaPair.java new file mode 100644 index 00000000000..5454f0648c3 --- /dev/null +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/SizeTMatchedSchemaPair.java @@ -0,0 +1,40 @@ +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE + +package org.bytedeco.pytorch; + +import org.bytedeco.pytorch.Allocator; +import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; +import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; +import java.nio.*; +import org.bytedeco.javacpp.*; +import org.bytedeco.javacpp.annotation.*; + +import static org.bytedeco.javacpp.presets.javacpp.*; +import static org.bytedeco.openblas.global.openblas_nolapack.*; +import static org.bytedeco.openblas.global.openblas.*; + +import static org.bytedeco.pytorch.global.torch.*; + +@NoOffset @Name("std::pair") @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) +public class SizeTMatchedSchemaPair extends Pointer { + static { Loader.load(); } + /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ + public SizeTMatchedSchemaPair(Pointer p) { super(p); } + public SizeTMatchedSchemaPair(long firstValue, MatchedSchema secondValue) { this(); put(firstValue, secondValue); } + public SizeTMatchedSchemaPair() { allocate(); } + private native void allocate(); + public native @Name("operator =") @ByRef SizeTMatchedSchemaPair put(@ByRef SizeTMatchedSchemaPair x); + + + @MemberGetter public native @Cast("size_t") long first(); public native SizeTMatchedSchemaPair first(long first); + @MemberGetter public native @ByRef MatchedSchema second(); public native SizeTMatchedSchemaPair second(MatchedSchema second); + + public SizeTMatchedSchemaPair put(long firstValue, MatchedSchema secondValue) { + first(firstValue); + second(secondValue); + return this; + } +} + diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/SizeTOptional.java b/pytorch/src/gen/java/org/bytedeco/pytorch/SizeTOptional.java index a65304503fe..e8ab49241d5 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/SizeTOptional.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/SizeTOptional.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; @@ -26,6 +28,7 @@ public class SizeTOptional extends Pointer { public native @Name("operator =") @ByRef SizeTOptional put(@ByRef SizeTOptional x); public native boolean has_value(); + public native void reset(); public native @Name("value") @Cast("size_t") long get(); @ValueSetter public native SizeTOptional put(@Cast("size_t") long value); } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/SizeTVector.java b/pytorch/src/gen/java/org/bytedeco/pytorch/SizeTVector.java index ddce68fa259..c4534d0679a 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/SizeTVector.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/SizeTVector.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; @@ -32,6 +34,8 @@ public class SizeTVector extends Pointer { public void clear() { resize(0); } public native void resize(@Cast("size_t") long n); + public long front() { return get(0); } + public long back() { return get(size() - 1); } @Index(function = "at") public native @Cast("size_t") long get(@Cast("size_t") long i); public native SizeTVector put(@Cast("size_t") long i, long value); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/SizeTVectorOptional.java b/pytorch/src/gen/java/org/bytedeco/pytorch/SizeTVectorOptional.java index dc834b37a4b..63a1fd3c8bc 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/SizeTVectorOptional.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/SizeTVectorOptional.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; @@ -26,6 +28,7 @@ public class SizeTVectorOptional extends Pointer { public native @Name("operator =") @ByRef SizeTVectorOptional put(@ByRef SizeTVectorOptional x); public native boolean has_value(); + public native void reset(); public native @Name("value") @Cast("std::vector*") @ByRef SizeTVector get(); @ValueSetter public native SizeTVectorOptional put(@Cast("std::vector*") @ByRef SizeTVector value); } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/SizesAndStrides.java b/pytorch/src/gen/java/org/bytedeco/pytorch/SizesAndStrides.java new file mode 100644 index 00000000000..ebb8a24e0dc --- /dev/null +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/SizesAndStrides.java @@ -0,0 +1,99 @@ +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE + +package org.bytedeco.pytorch; + +import org.bytedeco.pytorch.Allocator; +import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; +import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; +import java.nio.*; +import org.bytedeco.javacpp.*; +import org.bytedeco.javacpp.annotation.*; + +import static org.bytedeco.javacpp.presets.javacpp.*; +import static org.bytedeco.openblas.global.openblas_nolapack.*; +import static org.bytedeco.openblas.global.openblas.*; + +import static org.bytedeco.pytorch.global.torch.*; + + +// Packed container for TensorImpl sizes and strides. +// This design improves on the previous approach of using a pair of +// c10::SmallVector by specializing for the operations we +// actually use and enforcing that the number of sizes is the same as +// the number of strides. The memory layout is as follows: +// +// 1 size_t for the size +// 5 eightbytes of inline sizes and 5 eightbytes of inline strides, OR pointer +// to out-of-line array +@Namespace("c10::impl") @NoOffset @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) +public class SizesAndStrides extends Pointer { + static { Loader.load(); } + /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ + public SizesAndStrides(Pointer p) { super(p); } + /** Native array allocator. Access with {@link Pointer#position(long)}. */ + public SizesAndStrides(long size) { super((Pointer)null); allocateArray(size); } + private native void allocateArray(long size); + @Override public SizesAndStrides position(long position) { + return (SizesAndStrides)super.position(position); + } + @Override public SizesAndStrides getPointer(long i) { + return new SizesAndStrides((Pointer)this).offsetAddress(i); + } + + // TODO: different iterator types for sizes & strides to prevent + // mixing the two accidentally. + + public SizesAndStrides() { super((Pointer)null); allocate(); } + private native void allocate(); + + public SizesAndStrides(@Const @ByRef SizesAndStrides rhs) { super((Pointer)null); allocate(rhs); } + private native void allocate(@Const @ByRef SizesAndStrides rhs); + + public native @ByRef @Name("operator =") SizesAndStrides put(@Const @ByRef SizesAndStrides rhs); + + // Move from rhs. rhs.size() == 0 afterwards. + + // Move from rhs. rhs.size() == 0 afterwards. + + public native @Cast("size_t") @NoException(true) long size(); + + public native @Cast("int64_t*") @NoException(true) LongPointer sizes_data(); + + public native @Cast("c10::impl::SizesAndStrides::sizes_iterator") @NoException(true) long sizes_begin(); + + public native @Cast("c10::impl::SizesAndStrides::sizes_iterator") @NoException(true) long sizes_end(); + + public native @ByVal @NoException(true) LongArrayRef sizes_arrayref(); + + public native void set_sizes(@ByVal LongArrayRef newSizes); + public native void set_sizes(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... newSizes); + + public native void set_strides(@ByVal LongArrayRef strides); + public native void set_strides(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... strides); + + public native @Cast("int64_t*") @NoException(true) LongPointer strides_data(); + + public native @Cast("c10::impl::SizesAndStrides::strides_iterator") @NoException(true) long strides_begin(); + + public native @Cast("c10::impl::SizesAndStrides::strides_iterator") @NoException(true) long strides_end(); + + public native @ByVal @NoException(true) LongArrayRef strides_arrayref(); + + // Size accessors. + + public native @Cast("int64_t*") @ByRef @NoException(true) LongPointer size_at(@Cast("size_t") long idx); + + public native @Cast("int64_t*") @ByRef @NoException(true) LongPointer size_at_unchecked(@Cast("size_t") long idx); + + // Size accessors. + + public native @Cast("int64_t*") @ByRef @NoException(true) LongPointer stride_at(@Cast("size_t") long idx); + + public native @Cast("int64_t*") @ByRef @NoException(true) LongPointer stride_at_unchecked(@Cast("size_t") long idx); + + public native void resize(@Cast("size_t") long newSize); + + public native void resizeSlowPath(@Cast("size_t") long newSize, @Cast("size_t") long oldSize); +} diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/Slice.java b/pytorch/src/gen/java/org/bytedeco/pytorch/Slice.java index cceae76fec9..c2c4de27394 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/Slice.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/Slice.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/SliceExpr.java b/pytorch/src/gen/java/org/bytedeco/pytorch/SliceExpr.java index 1d0b1ea7063..492648ef573 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/SliceExpr.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/SliceExpr.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; @@ -19,9 +21,11 @@ @Namespace("torch::jit") @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) public class SliceExpr extends Expr { static { Loader.load(); } + /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ + public SliceExpr(Pointer p) { super(p); } - public SliceExpr(@Cast("const torch::jit::TreeRef*") @ByRef Pointer tree) { super((Pointer)null); allocate(tree); } - private native void allocate(@Cast("const torch::jit::TreeRef*") @ByRef Pointer tree); + public SliceExpr(@Const @ByRef TreeRef tree) { super((Pointer)null); allocate(tree); } + private native void allocate(@Const @ByRef TreeRef tree); public native @ByVal ExprMaybe start(); public native @ByVal ExprMaybe end(); public native @ByVal ExprMaybe step(); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/SliceValue.java b/pytorch/src/gen/java/org/bytedeco/pytorch/SliceValue.java index b218cdb9b0e..bc338bab1f3 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/SliceValue.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/SliceValue.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/SlotCursor.java b/pytorch/src/gen/java/org/bytedeco/pytorch/SlotCursor.java index b9e3e4408f7..b0f6a8c6abe 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/SlotCursor.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/SlotCursor.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; @@ -34,6 +36,6 @@ public class SlotCursor extends Pointer { return new SlotCursor((Pointer)this).offsetAddress(i); } - public native @ByRef JitModule module_(); public native SlotCursor module_(JitModule setter); - public native @Cast("int64_t") long i_(); public native SlotCursor i_(long setter); // slot offset, -1 indicates the module itself + public native @ByRef @NoOffset JitModule module_(); public native SlotCursor module_(JitModule setter); + public native @Cast("int64_t") @NoOffset long i_(); public native SlotCursor i_(long setter); // slot offset, -1 indicates the module itself } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/SmallNodeVector.java b/pytorch/src/gen/java/org/bytedeco/pytorch/SmallNodeVector.java new file mode 100644 index 00000000000..53174c93531 --- /dev/null +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/SmallNodeVector.java @@ -0,0 +1,51 @@ +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE + +package org.bytedeco.pytorch; + +import org.bytedeco.pytorch.Allocator; +import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; +import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; +import java.nio.*; +import org.bytedeco.javacpp.*; +import org.bytedeco.javacpp.annotation.*; + +import static org.bytedeco.javacpp.presets.javacpp.*; +import static org.bytedeco.openblas.global.openblas_nolapack.*; +import static org.bytedeco.openblas.global.openblas.*; + +import static org.bytedeco.pytorch.global.torch.*; + +@Name("c10::SmallVector") @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) +public class SmallNodeVector extends NodeSmallVectorImpl { + static { Loader.load(); } + /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ + public SmallNodeVector(Pointer p) { super(p); } + + public SmallNodeVector() { super((Pointer)null); allocate(); } + private native void allocate(); + + public SmallNodeVector(@Cast("size_t") long Size, @ByPtrRef Node Value/*=torch::autograd::Node*()*/) { super((Pointer)null); allocate(Size, Value); } + private native void allocate(@Cast("size_t") long Size, @ByPtrRef Node Value/*=torch::autograd::Node*()*/); + public SmallNodeVector(@Cast("size_t") long Size) { super((Pointer)null); allocate(Size); } + private native void allocate(@Cast("size_t") long Size); + + // note: The enable_if restricts Container to types that have a .begin() and + // .end() that return valid input iterators. + + public SmallNodeVector(@Const @ByRef SmallNodeVector RHS) { super((Pointer)null); allocate(RHS); } + private native void allocate(@Const @ByRef SmallNodeVector RHS); + + public native @ByRef @Name("operator =") SmallNodeVector put(@Const @ByRef SmallNodeVector RHS); + + // note: The enable_if restricts Container to types that have a .begin() and + // .end() that return valid input iterators. + + + + + + // note: The enable_if restricts Container to types that have a .begin() and + // .end() that return valid input iterators. +} diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/SmoothL1Loss.java b/pytorch/src/gen/java/org/bytedeco/pytorch/SmoothL1Loss.java deleted file mode 100644 index 30fc60a9612..00000000000 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/SmoothL1Loss.java +++ /dev/null @@ -1,34 +0,0 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE - -package org.bytedeco.pytorch; - -import org.bytedeco.pytorch.Allocator; -import org.bytedeco.pytorch.Function; -import org.bytedeco.pytorch.Module; -import java.nio.*; -import org.bytedeco.javacpp.*; -import org.bytedeco.javacpp.annotation.*; - -import static org.bytedeco.javacpp.presets.javacpp.*; -import static org.bytedeco.openblas.global.openblas_nolapack.*; -import static org.bytedeco.openblas.global.openblas.*; - -import static org.bytedeco.pytorch.global.torch.*; - - -/** A {@code ModuleHolder} subclass for {@code SmoothL1LossImpl}. - * See the documentation for {@code SmoothL1LossImpl} class to learn what methods it - * provides, and examples of how to use {@code SmoothL1Loss} with - * {@code torch::nn::SmoothL1LossOptions}. See the documentation for {@code ModuleHolder} - * to learn about PyTorch's module storage semantics. */ -@Namespace("torch::nn") @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) -public class SmoothL1Loss extends SmoothL1LossImplModuleHolder { - static { Loader.load(); } - - public SmoothL1Loss(@ByVal @Cast("std::nullptr_t*") PointerPointer arg0) { super((Pointer)null); allocate(arg0); } - private native void allocate(@ByVal @Cast("std::nullptr_t*") PointerPointer arg0); public SmoothL1Loss(@SharedPtr @Cast({"", "std::shared_ptr"}) SmoothL1LossImpl module) { super((Pointer)null); allocate(module); } - private native void allocate(@SharedPtr @Cast({"", "std::shared_ptr"}) SmoothL1LossImpl module); - /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ - public SmoothL1Loss(Pointer p) { super(p); } - - } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/SmoothL1LossImpl.java b/pytorch/src/gen/java/org/bytedeco/pytorch/SmoothL1LossImpl.java index 0f8c5f65f11..06269e5e93f 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/SmoothL1LossImpl.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/SmoothL1LossImpl.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; @@ -49,9 +51,9 @@ public class SmoothL1LossImpl extends SmoothL1LossImplCloneable { } public SmoothL1LossImpl(@ByVal(nullValue = "torch::nn::SmoothL1LossOptions{}") SmoothL1LossOptions options) { super((Pointer)null); allocate(options); } - @NoDeallocator private native void allocate(@ByVal(nullValue = "torch::nn::SmoothL1LossOptions{}") SmoothL1LossOptions options); + @SharedPtr private native void allocate(@ByVal(nullValue = "torch::nn::SmoothL1LossOptions{}") SmoothL1LossOptions options); public SmoothL1LossImpl() { super((Pointer)null); allocate(); } - @NoDeallocator private native void allocate(); + @SharedPtr private native void allocate(); public native void reset(); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/SmoothL1LossImplCloneable.java b/pytorch/src/gen/java/org/bytedeco/pytorch/SmoothL1LossImplCloneable.java index a8a8f6a985c..a04402f7500 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/SmoothL1LossImplCloneable.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/SmoothL1LossImplCloneable.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; @@ -20,18 +22,18 @@ public class SmoothL1LossImplCloneable extends Module { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public SmoothL1LossImplCloneable(Pointer p) { super(p); } + @Override public Module asModule() { return asModule(this); } + @Namespace public static native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast>") Module asModule(@SharedPtr SmoothL1LossImplCloneable pointer); /** {@code reset()} must perform initialization of all members with reference * semantics, most importantly parameters, buffers and submodules. */ public native void reset(); - @Override public Module asModule() { return asModule(this); } - @Namespace public static native @Name("static_cast") Module asModule(SmoothL1LossImplCloneable module); /** Performs a recursive "deep copy" of the {@code Module}, such that all parameters * and submodules in the cloned module are different from those in the * original module. */ - public native @SharedPtr Module clone( - @Const @ByRef(nullValue = "c10::optional(c10::nullopt)") DeviceOptional device); - public native @SharedPtr Module clone(); + public native @SharedPtr("torch::nn::Module") @ByVal Module clone( + @Const @ByRef(nullValue = "c10::optional(c10::nullopt)") DeviceOptional device); + public native @SharedPtr("torch::nn::Module") @ByVal Module clone(); } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/SmoothL1LossImplModuleHolder.java b/pytorch/src/gen/java/org/bytedeco/pytorch/SmoothL1LossImplModuleHolder.java deleted file mode 100644 index d1ccb02ed7b..00000000000 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/SmoothL1LossImplModuleHolder.java +++ /dev/null @@ -1,79 +0,0 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE - -package org.bytedeco.pytorch; - -import org.bytedeco.pytorch.Allocator; -import org.bytedeco.pytorch.Function; -import org.bytedeco.pytorch.Module; -import java.nio.*; -import org.bytedeco.javacpp.*; -import org.bytedeco.javacpp.annotation.*; - -import static org.bytedeco.javacpp.presets.javacpp.*; -import static org.bytedeco.openblas.global.openblas_nolapack.*; -import static org.bytedeco.openblas.global.openblas.*; - -import static org.bytedeco.pytorch.global.torch.*; - -@Name("torch::nn::ModuleHolder") @NoOffset @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) -public class SmoothL1LossImplModuleHolder extends Pointer { - static { Loader.load(); } - /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ - public SmoothL1LossImplModuleHolder(Pointer p) { super(p); } - - - /// - - /** Default constructs the contained module if if has a default constructor, - * else produces a static error. - * - * NOTE: This uses the behavior of template - * classes in C++ that constructors (or any methods) are only compiled when - * actually used. */ - - - /** Constructs the {@code ModuleHolder} with an empty contained value. Access to - * the underlying module is not permitted and will throw an exception, until - * a value is assigned. */ - /* implicit */ public SmoothL1LossImplModuleHolder(@ByVal @Cast("std::nullptr_t*") PointerPointer arg0) { super((Pointer)null); allocate(arg0); } -private native void allocate(@ByVal @Cast("std::nullptr_t*") PointerPointer arg0); - - /** Constructs the {@code ModuleHolder} with a contained module, forwarding all - * arguments to its constructor. */ - - /** Constructs the {@code ModuleHolder} from a pointer to the contained type. - * Example: {@code Linear(std::make_shared(...))}. */ - /* implicit */ public SmoothL1LossImplModuleHolder(@SharedPtr @Cast({"", "std::shared_ptr"}) SmoothL1LossImpl module) { super((Pointer)null); allocate(module); } -private native void allocate(@SharedPtr @Cast({"", "std::shared_ptr"}) SmoothL1LossImpl module); - - /** Returns true if the {@code ModuleHolder} contains a module, or false if it is - * {@code nullptr}. */ - public native @Cast("bool") @Name("operator bool") @NoException(true) boolean asBoolean(); - - /** Forwards to the contained module. */ - public native @Name("operator ->") SmoothL1LossImpl access(); - - /** Forwards to the contained module. */ - - /** Returns a reference to the contained module. */ - public native @ByRef @Name("operator *") SmoothL1LossImpl multiply(); - - /** Returns a const reference to the contained module. */ - - /** Returns a shared pointer to the underlying module. */ - public native @SharedPtr @Cast({"", "std::shared_ptr"}) SmoothL1LossImpl ptr(); - - /** Returns a pointer to the underlying module. */ - public native SmoothL1LossImpl get(); - - /** Returns a const pointer to the underlying module. */ - - /** Calls the {@code forward()} method of the contained module. */ - - /** Forwards to the subscript operator of the contained module. - * NOTE: std::forward is qualified to prevent VS2017 emitting - * error C2872: 'std': ambiguous symbol */ - - /** Returns true if the {@code ModuleHolder} does not contain a module. */ - public native @Cast("bool") @NoException(true) boolean is_empty(); -} diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/SmoothL1LossOptions.java b/pytorch/src/gen/java/org/bytedeco/pytorch/SmoothL1LossOptions.java index 10b3dc2aa19..7cc5f5e2e1d 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/SmoothL1LossOptions.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/SmoothL1LossOptions.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; @@ -51,6 +53,6 @@ public class SmoothL1LossOptions extends Pointer { public SmoothL1LossOptions(@ByVal kSum reduction) { super((Pointer)null); allocate(reduction); } private native void allocate(@ByVal kSum reduction); - public native @ByRef @NoException(true) loss_reduction_t reduction(); + public native @ByRef @NoException(true) LossReduction reduction(); public native @ByRef @NoException(true) DoublePointer beta(); } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/SoftMarginLoss.java b/pytorch/src/gen/java/org/bytedeco/pytorch/SoftMarginLoss.java deleted file mode 100644 index 9e9406e5002..00000000000 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/SoftMarginLoss.java +++ /dev/null @@ -1,34 +0,0 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE - -package org.bytedeco.pytorch; - -import org.bytedeco.pytorch.Allocator; -import org.bytedeco.pytorch.Function; -import org.bytedeco.pytorch.Module; -import java.nio.*; -import org.bytedeco.javacpp.*; -import org.bytedeco.javacpp.annotation.*; - -import static org.bytedeco.javacpp.presets.javacpp.*; -import static org.bytedeco.openblas.global.openblas_nolapack.*; -import static org.bytedeco.openblas.global.openblas.*; - -import static org.bytedeco.pytorch.global.torch.*; - - -/** A {@code ModuleHolder} subclass for {@code SoftMarginLossImpl}. - * See the documentation for {@code SoftMarginLossImpl} class to learn what methods - * it provides, and examples of how to use {@code SoftMarginLoss} with - * {@code torch::nn::SoftMarginLossOptions}. See the documentation for {@code ModuleHolder} - * to learn about PyTorch's module storage semantics. */ -@Namespace("torch::nn") @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) -public class SoftMarginLoss extends SoftMarginLossImplModuleHolder { - static { Loader.load(); } - - public SoftMarginLoss(@ByVal @Cast("std::nullptr_t*") PointerPointer arg0) { super((Pointer)null); allocate(arg0); } - private native void allocate(@ByVal @Cast("std::nullptr_t*") PointerPointer arg0); public SoftMarginLoss(@SharedPtr @Cast({"", "std::shared_ptr"}) SoftMarginLossImpl module) { super((Pointer)null); allocate(module); } - private native void allocate(@SharedPtr @Cast({"", "std::shared_ptr"}) SoftMarginLossImpl module); - /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ - public SoftMarginLoss(Pointer p) { super(p); } - - } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/SoftMarginLossImpl.java b/pytorch/src/gen/java/org/bytedeco/pytorch/SoftMarginLossImpl.java index 83b894decc1..b98d3d226f9 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/SoftMarginLossImpl.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/SoftMarginLossImpl.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; @@ -48,9 +50,9 @@ public class SoftMarginLossImpl extends SoftMarginLossImplCloneable { } public SoftMarginLossImpl(@ByVal(nullValue = "torch::nn::SoftMarginLossOptions{}") SoftMarginLossOptions options_) { super((Pointer)null); allocate(options_); } - @NoDeallocator private native void allocate(@ByVal(nullValue = "torch::nn::SoftMarginLossOptions{}") SoftMarginLossOptions options_); + @SharedPtr private native void allocate(@ByVal(nullValue = "torch::nn::SoftMarginLossOptions{}") SoftMarginLossOptions options_); public SoftMarginLossImpl() { super((Pointer)null); allocate(); } - @NoDeallocator private native void allocate(); + @SharedPtr private native void allocate(); /** Pretty prints the {@code SoftMarginLoss} module into the given {@code stream}. */ public native void pretty_print(@Cast("std::ostream*") @ByRef Pointer stream); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/SoftMarginLossImplCloneable.java b/pytorch/src/gen/java/org/bytedeco/pytorch/SoftMarginLossImplCloneable.java index 40d58dc8ad0..4a0bfd4405b 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/SoftMarginLossImplCloneable.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/SoftMarginLossImplCloneable.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; @@ -20,18 +22,18 @@ public class SoftMarginLossImplCloneable extends Module { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public SoftMarginLossImplCloneable(Pointer p) { super(p); } + @Override public Module asModule() { return asModule(this); } + @Namespace public static native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast>") Module asModule(@SharedPtr SoftMarginLossImplCloneable pointer); /** {@code reset()} must perform initialization of all members with reference * semantics, most importantly parameters, buffers and submodules. */ public native void reset(); - @Override public Module asModule() { return asModule(this); } - @Namespace public static native @Name("static_cast") Module asModule(SoftMarginLossImplCloneable module); /** Performs a recursive "deep copy" of the {@code Module}, such that all parameters * and submodules in the cloned module are different from those in the * original module. */ - public native @SharedPtr Module clone( - @Const @ByRef(nullValue = "c10::optional(c10::nullopt)") DeviceOptional device); - public native @SharedPtr Module clone(); + public native @SharedPtr("torch::nn::Module") @ByVal Module clone( + @Const @ByRef(nullValue = "c10::optional(c10::nullopt)") DeviceOptional device); + public native @SharedPtr("torch::nn::Module") @ByVal Module clone(); } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/SoftMarginLossImplModuleHolder.java b/pytorch/src/gen/java/org/bytedeco/pytorch/SoftMarginLossImplModuleHolder.java deleted file mode 100644 index 8f77356e2b3..00000000000 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/SoftMarginLossImplModuleHolder.java +++ /dev/null @@ -1,79 +0,0 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE - -package org.bytedeco.pytorch; - -import org.bytedeco.pytorch.Allocator; -import org.bytedeco.pytorch.Function; -import org.bytedeco.pytorch.Module; -import java.nio.*; -import org.bytedeco.javacpp.*; -import org.bytedeco.javacpp.annotation.*; - -import static org.bytedeco.javacpp.presets.javacpp.*; -import static org.bytedeco.openblas.global.openblas_nolapack.*; -import static org.bytedeco.openblas.global.openblas.*; - -import static org.bytedeco.pytorch.global.torch.*; - -@Name("torch::nn::ModuleHolder") @NoOffset @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) -public class SoftMarginLossImplModuleHolder extends Pointer { - static { Loader.load(); } - /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ - public SoftMarginLossImplModuleHolder(Pointer p) { super(p); } - - - /// - - /** Default constructs the contained module if if has a default constructor, - * else produces a static error. - * - * NOTE: This uses the behavior of template - * classes in C++ that constructors (or any methods) are only compiled when - * actually used. */ - - - /** Constructs the {@code ModuleHolder} with an empty contained value. Access to - * the underlying module is not permitted and will throw an exception, until - * a value is assigned. */ - /* implicit */ public SoftMarginLossImplModuleHolder(@ByVal @Cast("std::nullptr_t*") PointerPointer arg0) { super((Pointer)null); allocate(arg0); } -private native void allocate(@ByVal @Cast("std::nullptr_t*") PointerPointer arg0); - - /** Constructs the {@code ModuleHolder} with a contained module, forwarding all - * arguments to its constructor. */ - - /** Constructs the {@code ModuleHolder} from a pointer to the contained type. - * Example: {@code Linear(std::make_shared(...))}. */ - /* implicit */ public SoftMarginLossImplModuleHolder(@SharedPtr @Cast({"", "std::shared_ptr"}) SoftMarginLossImpl module) { super((Pointer)null); allocate(module); } -private native void allocate(@SharedPtr @Cast({"", "std::shared_ptr"}) SoftMarginLossImpl module); - - /** Returns true if the {@code ModuleHolder} contains a module, or false if it is - * {@code nullptr}. */ - public native @Cast("bool") @Name("operator bool") @NoException(true) boolean asBoolean(); - - /** Forwards to the contained module. */ - public native @Name("operator ->") SoftMarginLossImpl access(); - - /** Forwards to the contained module. */ - - /** Returns a reference to the contained module. */ - public native @ByRef @Name("operator *") SoftMarginLossImpl multiply(); - - /** Returns a const reference to the contained module. */ - - /** Returns a shared pointer to the underlying module. */ - public native @SharedPtr @Cast({"", "std::shared_ptr"}) SoftMarginLossImpl ptr(); - - /** Returns a pointer to the underlying module. */ - public native SoftMarginLossImpl get(); - - /** Returns a const pointer to the underlying module. */ - - /** Calls the {@code forward()} method of the contained module. */ - - /** Forwards to the subscript operator of the contained module. - * NOTE: std::forward is qualified to prevent VS2017 emitting - * error C2872: 'std': ambiguous symbol */ - - /** Returns true if the {@code ModuleHolder} does not contain a module. */ - public native @Cast("bool") @NoException(true) boolean is_empty(); -} diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/SoftMarginLossOptions.java b/pytorch/src/gen/java/org/bytedeco/pytorch/SoftMarginLossOptions.java index 18dd42edfae..4e49c16f320 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/SoftMarginLossOptions.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/SoftMarginLossOptions.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; @@ -51,5 +53,5 @@ public class SoftMarginLossOptions extends Pointer { public SoftMarginLossOptions(@ByVal kSum reduction) { super((Pointer)null); allocate(reduction); } private native void allocate(@ByVal kSum reduction); - public native @ByRef @NoException(true) loss_reduction_t reduction(); + public native @ByRef @NoException(true) LossReduction reduction(); } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/Softmax.java b/pytorch/src/gen/java/org/bytedeco/pytorch/Softmax.java deleted file mode 100644 index 81bde297d83..00000000000 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/Softmax.java +++ /dev/null @@ -1,34 +0,0 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE - -package org.bytedeco.pytorch; - -import org.bytedeco.pytorch.Allocator; -import org.bytedeco.pytorch.Function; -import org.bytedeco.pytorch.Module; -import java.nio.*; -import org.bytedeco.javacpp.*; -import org.bytedeco.javacpp.annotation.*; - -import static org.bytedeco.javacpp.presets.javacpp.*; -import static org.bytedeco.openblas.global.openblas_nolapack.*; -import static org.bytedeco.openblas.global.openblas.*; - -import static org.bytedeco.pytorch.global.torch.*; - - -/** A {@code ModuleHolder} subclass for {@code SoftmaxImpl}. - * See the documentation for {@code SoftmaxImpl} class to learn what methods it - * provides, and examples of how to use {@code Softmax} with - * {@code torch::nn::SoftmaxOptions}. See the documentation for {@code ModuleHolder} to - * learn about PyTorch's module storage semantics. */ -@Namespace("torch::nn") @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) -public class Softmax extends SoftmaxImplModuleHolder { - static { Loader.load(); } - - public Softmax(@ByVal @Cast("std::nullptr_t*") PointerPointer arg0) { super((Pointer)null); allocate(arg0); } - private native void allocate(@ByVal @Cast("std::nullptr_t*") PointerPointer arg0); public Softmax(@SharedPtr @Cast({"", "std::shared_ptr"}) SoftmaxImpl module) { super((Pointer)null); allocate(module); } - private native void allocate(@SharedPtr @Cast({"", "std::shared_ptr"}) SoftmaxImpl module); - /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ - public Softmax(Pointer p) { super(p); } - - } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/Softmax2d.java b/pytorch/src/gen/java/org/bytedeco/pytorch/Softmax2d.java deleted file mode 100644 index 4c598fae096..00000000000 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/Softmax2d.java +++ /dev/null @@ -1,33 +0,0 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE - -package org.bytedeco.pytorch; - -import org.bytedeco.pytorch.Allocator; -import org.bytedeco.pytorch.Function; -import org.bytedeco.pytorch.Module; -import java.nio.*; -import org.bytedeco.javacpp.*; -import org.bytedeco.javacpp.annotation.*; - -import static org.bytedeco.javacpp.presets.javacpp.*; -import static org.bytedeco.openblas.global.openblas_nolapack.*; -import static org.bytedeco.openblas.global.openblas.*; - -import static org.bytedeco.pytorch.global.torch.*; - - -/** A {@code ModuleHolder} subclass for {@code Softmax2dImpl}. - * See the documentation for {@code Softmax2dImpl} class to learn what methods it - * provides, or the documentation for {@code ModuleHolder} to learn about PyTorch's - * module storage semantics. */ -@Namespace("torch::nn") @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) -public class Softmax2d extends Softmax2dImplModuleHolder { - static { Loader.load(); } - - public Softmax2d(@ByVal @Cast("std::nullptr_t*") PointerPointer arg0) { super((Pointer)null); allocate(arg0); } - private native void allocate(@ByVal @Cast("std::nullptr_t*") PointerPointer arg0); public Softmax2d(@SharedPtr @Cast({"", "std::shared_ptr"}) Softmax2dImpl module) { super((Pointer)null); allocate(module); } - private native void allocate(@SharedPtr @Cast({"", "std::shared_ptr"}) Softmax2dImpl module); - /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ - public Softmax2d(Pointer p) { super(p); } - - } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/Softmax2dImpl.java b/pytorch/src/gen/java/org/bytedeco/pytorch/Softmax2dImpl.java index 8de0e72c702..51cfa208d0c 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/Softmax2dImpl.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/Softmax2dImpl.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/Softmax2dImplCloneable.java b/pytorch/src/gen/java/org/bytedeco/pytorch/Softmax2dImplCloneable.java index 56a184966a2..e852f490664 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/Softmax2dImplCloneable.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/Softmax2dImplCloneable.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; @@ -20,18 +22,18 @@ public class Softmax2dImplCloneable extends Module { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public Softmax2dImplCloneable(Pointer p) { super(p); } + @Override public Module asModule() { return asModule(this); } + @Namespace public static native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast>") Module asModule(@SharedPtr Softmax2dImplCloneable pointer); /** {@code reset()} must perform initialization of all members with reference * semantics, most importantly parameters, buffers and submodules. */ public native void reset(); - @Override public Module asModule() { return asModule(this); } - @Namespace public static native @Name("static_cast") Module asModule(Softmax2dImplCloneable module); /** Performs a recursive "deep copy" of the {@code Module}, such that all parameters * and submodules in the cloned module are different from those in the * original module. */ - public native @SharedPtr Module clone( - @Const @ByRef(nullValue = "c10::optional(c10::nullopt)") DeviceOptional device); - public native @SharedPtr Module clone(); + public native @SharedPtr("torch::nn::Module") @ByVal Module clone( + @Const @ByRef(nullValue = "c10::optional(c10::nullopt)") DeviceOptional device); + public native @SharedPtr("torch::nn::Module") @ByVal Module clone(); } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/Softmax2dImplModuleHolder.java b/pytorch/src/gen/java/org/bytedeco/pytorch/Softmax2dImplModuleHolder.java deleted file mode 100644 index a7d7c0a8bcb..00000000000 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/Softmax2dImplModuleHolder.java +++ /dev/null @@ -1,79 +0,0 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE - -package org.bytedeco.pytorch; - -import org.bytedeco.pytorch.Allocator; -import org.bytedeco.pytorch.Function; -import org.bytedeco.pytorch.Module; -import java.nio.*; -import org.bytedeco.javacpp.*; -import org.bytedeco.javacpp.annotation.*; - -import static org.bytedeco.javacpp.presets.javacpp.*; -import static org.bytedeco.openblas.global.openblas_nolapack.*; -import static org.bytedeco.openblas.global.openblas.*; - -import static org.bytedeco.pytorch.global.torch.*; - -@Name("torch::nn::ModuleHolder") @NoOffset @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) -public class Softmax2dImplModuleHolder extends Pointer { - static { Loader.load(); } - /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ - public Softmax2dImplModuleHolder(Pointer p) { super(p); } - - - /// - - /** Default constructs the contained module if if has a default constructor, - * else produces a static error. - * - * NOTE: This uses the behavior of template - * classes in C++ that constructors (or any methods) are only compiled when - * actually used. */ - - - /** Constructs the {@code ModuleHolder} with an empty contained value. Access to - * the underlying module is not permitted and will throw an exception, until - * a value is assigned. */ - /* implicit */ public Softmax2dImplModuleHolder(@ByVal @Cast("std::nullptr_t*") PointerPointer arg0) { super((Pointer)null); allocate(arg0); } -private native void allocate(@ByVal @Cast("std::nullptr_t*") PointerPointer arg0); - - /** Constructs the {@code ModuleHolder} with a contained module, forwarding all - * arguments to its constructor. */ - - /** Constructs the {@code ModuleHolder} from a pointer to the contained type. - * Example: {@code Linear(std::make_shared(...))}. */ - /* implicit */ public Softmax2dImplModuleHolder(@SharedPtr @Cast({"", "std::shared_ptr"}) Softmax2dImpl module) { super((Pointer)null); allocate(module); } -private native void allocate(@SharedPtr @Cast({"", "std::shared_ptr"}) Softmax2dImpl module); - - /** Returns true if the {@code ModuleHolder} contains a module, or false if it is - * {@code nullptr}. */ - public native @Cast("bool") @Name("operator bool") @NoException(true) boolean asBoolean(); - - /** Forwards to the contained module. */ - public native @Name("operator ->") Softmax2dImpl access(); - - /** Forwards to the contained module. */ - - /** Returns a reference to the contained module. */ - public native @ByRef @Name("operator *") Softmax2dImpl multiply(); - - /** Returns a const reference to the contained module. */ - - /** Returns a shared pointer to the underlying module. */ - public native @SharedPtr @Cast({"", "std::shared_ptr"}) Softmax2dImpl ptr(); - - /** Returns a pointer to the underlying module. */ - public native Softmax2dImpl get(); - - /** Returns a const pointer to the underlying module. */ - - /** Calls the {@code forward()} method of the contained module. */ - - /** Forwards to the subscript operator of the contained module. - * NOTE: std::forward is qualified to prevent VS2017 emitting - * error C2872: 'std': ambiguous symbol */ - - /** Returns true if the {@code ModuleHolder} does not contain a module. */ - public native @Cast("bool") @NoException(true) boolean is_empty(); -} diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/SoftmaxFuncOptions.java b/pytorch/src/gen/java/org/bytedeco/pytorch/SoftmaxFuncOptions.java index f4494c39d65..4a3d4eddcd9 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/SoftmaxFuncOptions.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/SoftmaxFuncOptions.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/SoftmaxImpl.java b/pytorch/src/gen/java/org/bytedeco/pytorch/SoftmaxImpl.java index bfa34e38cfe..b246f27e0cc 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/SoftmaxImpl.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/SoftmaxImpl.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; @@ -36,9 +38,9 @@ public class SoftmaxImpl extends SoftmaxImplCloneable { public SoftmaxImpl(Pointer p) { super(p); } public SoftmaxImpl(@Cast("int64_t") long dim) { super((Pointer)null); allocate(dim); } - @NoDeallocator private native void allocate(@Cast("int64_t") long dim); + @SharedPtr private native void allocate(@Cast("int64_t") long dim); public SoftmaxImpl(@Const @ByRef SoftmaxOptions options_) { super((Pointer)null); allocate(options_); } - @NoDeallocator private native void allocate(@Const @ByRef SoftmaxOptions options_); + @SharedPtr private native void allocate(@Const @ByRef SoftmaxOptions options_); public native @ByVal Tensor forward(@Const @ByRef Tensor input); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/SoftmaxImplCloneable.java b/pytorch/src/gen/java/org/bytedeco/pytorch/SoftmaxImplCloneable.java index e4bb89636e3..aed37772063 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/SoftmaxImplCloneable.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/SoftmaxImplCloneable.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; @@ -20,18 +22,18 @@ public class SoftmaxImplCloneable extends Module { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public SoftmaxImplCloneable(Pointer p) { super(p); } + @Override public Module asModule() { return asModule(this); } + @Namespace public static native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast>") Module asModule(@SharedPtr SoftmaxImplCloneable pointer); /** {@code reset()} must perform initialization of all members with reference * semantics, most importantly parameters, buffers and submodules. */ public native void reset(); - @Override public Module asModule() { return asModule(this); } - @Namespace public static native @Name("static_cast") Module asModule(SoftmaxImplCloneable module); /** Performs a recursive "deep copy" of the {@code Module}, such that all parameters * and submodules in the cloned module are different from those in the * original module. */ - public native @SharedPtr Module clone( - @Const @ByRef(nullValue = "c10::optional(c10::nullopt)") DeviceOptional device); - public native @SharedPtr Module clone(); + public native @SharedPtr("torch::nn::Module") @ByVal Module clone( + @Const @ByRef(nullValue = "c10::optional(c10::nullopt)") DeviceOptional device); + public native @SharedPtr("torch::nn::Module") @ByVal Module clone(); } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/SoftmaxImplModuleHolder.java b/pytorch/src/gen/java/org/bytedeco/pytorch/SoftmaxImplModuleHolder.java deleted file mode 100644 index c0116c89af6..00000000000 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/SoftmaxImplModuleHolder.java +++ /dev/null @@ -1,79 +0,0 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE - -package org.bytedeco.pytorch; - -import org.bytedeco.pytorch.Allocator; -import org.bytedeco.pytorch.Function; -import org.bytedeco.pytorch.Module; -import java.nio.*; -import org.bytedeco.javacpp.*; -import org.bytedeco.javacpp.annotation.*; - -import static org.bytedeco.javacpp.presets.javacpp.*; -import static org.bytedeco.openblas.global.openblas_nolapack.*; -import static org.bytedeco.openblas.global.openblas.*; - -import static org.bytedeco.pytorch.global.torch.*; - -@Name("torch::nn::ModuleHolder") @NoOffset @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) -public class SoftmaxImplModuleHolder extends Pointer { - static { Loader.load(); } - /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ - public SoftmaxImplModuleHolder(Pointer p) { super(p); } - - - /// - - /** Default constructs the contained module if if has a default constructor, - * else produces a static error. - * - * NOTE: This uses the behavior of template - * classes in C++ that constructors (or any methods) are only compiled when - * actually used. */ - - - /** Constructs the {@code ModuleHolder} with an empty contained value. Access to - * the underlying module is not permitted and will throw an exception, until - * a value is assigned. */ - /* implicit */ public SoftmaxImplModuleHolder(@ByVal @Cast("std::nullptr_t*") PointerPointer arg0) { super((Pointer)null); allocate(arg0); } -private native void allocate(@ByVal @Cast("std::nullptr_t*") PointerPointer arg0); - - /** Constructs the {@code ModuleHolder} with a contained module, forwarding all - * arguments to its constructor. */ - - /** Constructs the {@code ModuleHolder} from a pointer to the contained type. - * Example: {@code Linear(std::make_shared(...))}. */ - /* implicit */ public SoftmaxImplModuleHolder(@SharedPtr @Cast({"", "std::shared_ptr"}) SoftmaxImpl module) { super((Pointer)null); allocate(module); } -private native void allocate(@SharedPtr @Cast({"", "std::shared_ptr"}) SoftmaxImpl module); - - /** Returns true if the {@code ModuleHolder} contains a module, or false if it is - * {@code nullptr}. */ - public native @Cast("bool") @Name("operator bool") @NoException(true) boolean asBoolean(); - - /** Forwards to the contained module. */ - public native @Name("operator ->") SoftmaxImpl access(); - - /** Forwards to the contained module. */ - - /** Returns a reference to the contained module. */ - public native @ByRef @Name("operator *") SoftmaxImpl multiply(); - - /** Returns a const reference to the contained module. */ - - /** Returns a shared pointer to the underlying module. */ - public native @SharedPtr @Cast({"", "std::shared_ptr"}) SoftmaxImpl ptr(); - - /** Returns a pointer to the underlying module. */ - public native SoftmaxImpl get(); - - /** Returns a const pointer to the underlying module. */ - - /** Calls the {@code forward()} method of the contained module. */ - - /** Forwards to the subscript operator of the contained module. - * NOTE: std::forward is qualified to prevent VS2017 emitting - * error C2872: 'std': ambiguous symbol */ - - /** Returns true if the {@code ModuleHolder} does not contain a module. */ - public native @Cast("bool") @NoException(true) boolean is_empty(); -} diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/SoftmaxOptions.java b/pytorch/src/gen/java/org/bytedeco/pytorch/SoftmaxOptions.java index abffc4d94d2..0b1cbea8e9b 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/SoftmaxOptions.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/SoftmaxOptions.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/Softmin.java b/pytorch/src/gen/java/org/bytedeco/pytorch/Softmin.java deleted file mode 100644 index 133df7224d6..00000000000 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/Softmin.java +++ /dev/null @@ -1,34 +0,0 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE - -package org.bytedeco.pytorch; - -import org.bytedeco.pytorch.Allocator; -import org.bytedeco.pytorch.Function; -import org.bytedeco.pytorch.Module; -import java.nio.*; -import org.bytedeco.javacpp.*; -import org.bytedeco.javacpp.annotation.*; - -import static org.bytedeco.javacpp.presets.javacpp.*; -import static org.bytedeco.openblas.global.openblas_nolapack.*; -import static org.bytedeco.openblas.global.openblas.*; - -import static org.bytedeco.pytorch.global.torch.*; - - -/** A {@code ModuleHolder} subclass for {@code SoftminImpl}. - * See the documentation for {@code SoftminImpl} class to learn what methods it - * provides, and examples of how to use {@code Softmin} with - * {@code torch::nn::SoftminOptions}. See the documentation for {@code ModuleHolder} to - * learn about PyTorch's module storage semantics. */ -@Namespace("torch::nn") @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) -public class Softmin extends SoftminImplModuleHolder { - static { Loader.load(); } - - public Softmin(@ByVal @Cast("std::nullptr_t*") PointerPointer arg0) { super((Pointer)null); allocate(arg0); } - private native void allocate(@ByVal @Cast("std::nullptr_t*") PointerPointer arg0); public Softmin(@SharedPtr @Cast({"", "std::shared_ptr"}) SoftminImpl module) { super((Pointer)null); allocate(module); } - private native void allocate(@SharedPtr @Cast({"", "std::shared_ptr"}) SoftminImpl module); - /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ - public Softmin(Pointer p) { super(p); } - - } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/SoftminFuncOptions.java b/pytorch/src/gen/java/org/bytedeco/pytorch/SoftminFuncOptions.java index cd20a588fa5..6b70d06c82b 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/SoftminFuncOptions.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/SoftminFuncOptions.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/SoftminImpl.java b/pytorch/src/gen/java/org/bytedeco/pytorch/SoftminImpl.java index 5eddd1b3616..a0ac0eb6572 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/SoftminImpl.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/SoftminImpl.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; @@ -36,9 +38,9 @@ public class SoftminImpl extends SoftminImplCloneable { public SoftminImpl(Pointer p) { super(p); } public SoftminImpl(@Cast("int64_t") long dim) { super((Pointer)null); allocate(dim); } - @NoDeallocator private native void allocate(@Cast("int64_t") long dim); + @SharedPtr private native void allocate(@Cast("int64_t") long dim); public SoftminImpl(@Const @ByRef SoftminOptions options_) { super((Pointer)null); allocate(options_); } - @NoDeallocator private native void allocate(@Const @ByRef SoftminOptions options_); + @SharedPtr private native void allocate(@Const @ByRef SoftminOptions options_); public native @ByVal Tensor forward(@Const @ByRef Tensor input); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/SoftminImplCloneable.java b/pytorch/src/gen/java/org/bytedeco/pytorch/SoftminImplCloneable.java index 2efbdee599e..5f69d534f7f 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/SoftminImplCloneable.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/SoftminImplCloneable.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; @@ -20,18 +22,18 @@ public class SoftminImplCloneable extends Module { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public SoftminImplCloneable(Pointer p) { super(p); } + @Override public Module asModule() { return asModule(this); } + @Namespace public static native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast>") Module asModule(@SharedPtr SoftminImplCloneable pointer); /** {@code reset()} must perform initialization of all members with reference * semantics, most importantly parameters, buffers and submodules. */ public native void reset(); - @Override public Module asModule() { return asModule(this); } - @Namespace public static native @Name("static_cast") Module asModule(SoftminImplCloneable module); /** Performs a recursive "deep copy" of the {@code Module}, such that all parameters * and submodules in the cloned module are different from those in the * original module. */ - public native @SharedPtr Module clone( - @Const @ByRef(nullValue = "c10::optional(c10::nullopt)") DeviceOptional device); - public native @SharedPtr Module clone(); + public native @SharedPtr("torch::nn::Module") @ByVal Module clone( + @Const @ByRef(nullValue = "c10::optional(c10::nullopt)") DeviceOptional device); + public native @SharedPtr("torch::nn::Module") @ByVal Module clone(); } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/SoftminImplModuleHolder.java b/pytorch/src/gen/java/org/bytedeco/pytorch/SoftminImplModuleHolder.java deleted file mode 100644 index 18b30b525b0..00000000000 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/SoftminImplModuleHolder.java +++ /dev/null @@ -1,79 +0,0 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE - -package org.bytedeco.pytorch; - -import org.bytedeco.pytorch.Allocator; -import org.bytedeco.pytorch.Function; -import org.bytedeco.pytorch.Module; -import java.nio.*; -import org.bytedeco.javacpp.*; -import org.bytedeco.javacpp.annotation.*; - -import static org.bytedeco.javacpp.presets.javacpp.*; -import static org.bytedeco.openblas.global.openblas_nolapack.*; -import static org.bytedeco.openblas.global.openblas.*; - -import static org.bytedeco.pytorch.global.torch.*; - -@Name("torch::nn::ModuleHolder") @NoOffset @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) -public class SoftminImplModuleHolder extends Pointer { - static { Loader.load(); } - /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ - public SoftminImplModuleHolder(Pointer p) { super(p); } - - - /// - - /** Default constructs the contained module if if has a default constructor, - * else produces a static error. - * - * NOTE: This uses the behavior of template - * classes in C++ that constructors (or any methods) are only compiled when - * actually used. */ - - - /** Constructs the {@code ModuleHolder} with an empty contained value. Access to - * the underlying module is not permitted and will throw an exception, until - * a value is assigned. */ - /* implicit */ public SoftminImplModuleHolder(@ByVal @Cast("std::nullptr_t*") PointerPointer arg0) { super((Pointer)null); allocate(arg0); } -private native void allocate(@ByVal @Cast("std::nullptr_t*") PointerPointer arg0); - - /** Constructs the {@code ModuleHolder} with a contained module, forwarding all - * arguments to its constructor. */ - - /** Constructs the {@code ModuleHolder} from a pointer to the contained type. - * Example: {@code Linear(std::make_shared(...))}. */ - /* implicit */ public SoftminImplModuleHolder(@SharedPtr @Cast({"", "std::shared_ptr"}) SoftminImpl module) { super((Pointer)null); allocate(module); } -private native void allocate(@SharedPtr @Cast({"", "std::shared_ptr"}) SoftminImpl module); - - /** Returns true if the {@code ModuleHolder} contains a module, or false if it is - * {@code nullptr}. */ - public native @Cast("bool") @Name("operator bool") @NoException(true) boolean asBoolean(); - - /** Forwards to the contained module. */ - public native @Name("operator ->") SoftminImpl access(); - - /** Forwards to the contained module. */ - - /** Returns a reference to the contained module. */ - public native @ByRef @Name("operator *") SoftminImpl multiply(); - - /** Returns a const reference to the contained module. */ - - /** Returns a shared pointer to the underlying module. */ - public native @SharedPtr @Cast({"", "std::shared_ptr"}) SoftminImpl ptr(); - - /** Returns a pointer to the underlying module. */ - public native SoftminImpl get(); - - /** Returns a const pointer to the underlying module. */ - - /** Calls the {@code forward()} method of the contained module. */ - - /** Forwards to the subscript operator of the contained module. - * NOTE: std::forward is qualified to prevent VS2017 emitting - * error C2872: 'std': ambiguous symbol */ - - /** Returns true if the {@code ModuleHolder} does not contain a module. */ - public native @Cast("bool") @NoException(true) boolean is_empty(); -} diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/SoftminOptions.java b/pytorch/src/gen/java/org/bytedeco/pytorch/SoftminOptions.java index 6b34f52f975..afc22b00aec 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/SoftminOptions.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/SoftminOptions.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/Softplus.java b/pytorch/src/gen/java/org/bytedeco/pytorch/Softplus.java deleted file mode 100644 index 0d7b8a24fa1..00000000000 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/Softplus.java +++ /dev/null @@ -1,34 +0,0 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE - -package org.bytedeco.pytorch; - -import org.bytedeco.pytorch.Allocator; -import org.bytedeco.pytorch.Function; -import org.bytedeco.pytorch.Module; -import java.nio.*; -import org.bytedeco.javacpp.*; -import org.bytedeco.javacpp.annotation.*; - -import static org.bytedeco.javacpp.presets.javacpp.*; -import static org.bytedeco.openblas.global.openblas_nolapack.*; -import static org.bytedeco.openblas.global.openblas.*; - -import static org.bytedeco.pytorch.global.torch.*; - - -/** A {@code ModuleHolder} subclass for {@code SoftplusImpl}. - * See the documentation for {@code SoftplusImpl} class to learn what methods it - * provides, and examples of how to use {@code Softplus} with - * {@code torch::nn::SoftplusOptions}. See the documentation for {@code ModuleHolder} to - * learn about PyTorch's module storage semantics. */ -@Namespace("torch::nn") @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) -public class Softplus extends SoftplusImplModuleHolder { - static { Loader.load(); } - - public Softplus(@ByVal @Cast("std::nullptr_t*") PointerPointer arg0) { super((Pointer)null); allocate(arg0); } - private native void allocate(@ByVal @Cast("std::nullptr_t*") PointerPointer arg0); public Softplus(@SharedPtr @Cast({"", "std::shared_ptr"}) SoftplusImpl module) { super((Pointer)null); allocate(module); } - private native void allocate(@SharedPtr @Cast({"", "std::shared_ptr"}) SoftplusImpl module); - /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ - public Softplus(Pointer p) { super(p); } - - } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/SoftplusImpl.java b/pytorch/src/gen/java/org/bytedeco/pytorch/SoftplusImpl.java index 2c9e731ed78..fec9a6699fe 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/SoftplusImpl.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/SoftplusImpl.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; @@ -45,9 +47,9 @@ public class SoftplusImpl extends SoftplusImplCloneable { } public SoftplusImpl(@Const @ByRef(nullValue = "torch::nn::SoftplusOptions{}") SoftplusOptions options_) { super((Pointer)null); allocate(options_); } - @NoDeallocator private native void allocate(@Const @ByRef(nullValue = "torch::nn::SoftplusOptions{}") SoftplusOptions options_); + @SharedPtr private native void allocate(@Const @ByRef(nullValue = "torch::nn::SoftplusOptions{}") SoftplusOptions options_); public SoftplusImpl() { super((Pointer)null); allocate(); } - @NoDeallocator private native void allocate(); + @SharedPtr private native void allocate(); public native @ByVal Tensor forward(@Const @ByRef Tensor input); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/SoftplusImplCloneable.java b/pytorch/src/gen/java/org/bytedeco/pytorch/SoftplusImplCloneable.java index f72113c27db..5d37c1744f1 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/SoftplusImplCloneable.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/SoftplusImplCloneable.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; @@ -20,18 +22,18 @@ public class SoftplusImplCloneable extends Module { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public SoftplusImplCloneable(Pointer p) { super(p); } + @Override public Module asModule() { return asModule(this); } + @Namespace public static native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast>") Module asModule(@SharedPtr SoftplusImplCloneable pointer); /** {@code reset()} must perform initialization of all members with reference * semantics, most importantly parameters, buffers and submodules. */ public native void reset(); - @Override public Module asModule() { return asModule(this); } - @Namespace public static native @Name("static_cast") Module asModule(SoftplusImplCloneable module); /** Performs a recursive "deep copy" of the {@code Module}, such that all parameters * and submodules in the cloned module are different from those in the * original module. */ - public native @SharedPtr Module clone( - @Const @ByRef(nullValue = "c10::optional(c10::nullopt)") DeviceOptional device); - public native @SharedPtr Module clone(); + public native @SharedPtr("torch::nn::Module") @ByVal Module clone( + @Const @ByRef(nullValue = "c10::optional(c10::nullopt)") DeviceOptional device); + public native @SharedPtr("torch::nn::Module") @ByVal Module clone(); } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/SoftplusImplModuleHolder.java b/pytorch/src/gen/java/org/bytedeco/pytorch/SoftplusImplModuleHolder.java deleted file mode 100644 index ea9906e995d..00000000000 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/SoftplusImplModuleHolder.java +++ /dev/null @@ -1,79 +0,0 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE - -package org.bytedeco.pytorch; - -import org.bytedeco.pytorch.Allocator; -import org.bytedeco.pytorch.Function; -import org.bytedeco.pytorch.Module; -import java.nio.*; -import org.bytedeco.javacpp.*; -import org.bytedeco.javacpp.annotation.*; - -import static org.bytedeco.javacpp.presets.javacpp.*; -import static org.bytedeco.openblas.global.openblas_nolapack.*; -import static org.bytedeco.openblas.global.openblas.*; - -import static org.bytedeco.pytorch.global.torch.*; - -@Name("torch::nn::ModuleHolder") @NoOffset @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) -public class SoftplusImplModuleHolder extends Pointer { - static { Loader.load(); } - /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ - public SoftplusImplModuleHolder(Pointer p) { super(p); } - - - /// - - /** Default constructs the contained module if if has a default constructor, - * else produces a static error. - * - * NOTE: This uses the behavior of template - * classes in C++ that constructors (or any methods) are only compiled when - * actually used. */ - - - /** Constructs the {@code ModuleHolder} with an empty contained value. Access to - * the underlying module is not permitted and will throw an exception, until - * a value is assigned. */ - /* implicit */ public SoftplusImplModuleHolder(@ByVal @Cast("std::nullptr_t*") PointerPointer arg0) { super((Pointer)null); allocate(arg0); } -private native void allocate(@ByVal @Cast("std::nullptr_t*") PointerPointer arg0); - - /** Constructs the {@code ModuleHolder} with a contained module, forwarding all - * arguments to its constructor. */ - - /** Constructs the {@code ModuleHolder} from a pointer to the contained type. - * Example: {@code Linear(std::make_shared(...))}. */ - /* implicit */ public SoftplusImplModuleHolder(@SharedPtr @Cast({"", "std::shared_ptr"}) SoftplusImpl module) { super((Pointer)null); allocate(module); } -private native void allocate(@SharedPtr @Cast({"", "std::shared_ptr"}) SoftplusImpl module); - - /** Returns true if the {@code ModuleHolder} contains a module, or false if it is - * {@code nullptr}. */ - public native @Cast("bool") @Name("operator bool") @NoException(true) boolean asBoolean(); - - /** Forwards to the contained module. */ - public native @Name("operator ->") SoftplusImpl access(); - - /** Forwards to the contained module. */ - - /** Returns a reference to the contained module. */ - public native @ByRef @Name("operator *") SoftplusImpl multiply(); - - /** Returns a const reference to the contained module. */ - - /** Returns a shared pointer to the underlying module. */ - public native @SharedPtr @Cast({"", "std::shared_ptr"}) SoftplusImpl ptr(); - - /** Returns a pointer to the underlying module. */ - public native SoftplusImpl get(); - - /** Returns a const pointer to the underlying module. */ - - /** Calls the {@code forward()} method of the contained module. */ - - /** Forwards to the subscript operator of the contained module. - * NOTE: std::forward is qualified to prevent VS2017 emitting - * error C2872: 'std': ambiguous symbol */ - - /** Returns true if the {@code ModuleHolder} does not contain a module. */ - public native @Cast("bool") @NoException(true) boolean is_empty(); -} diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/SoftplusOptions.java b/pytorch/src/gen/java/org/bytedeco/pytorch/SoftplusOptions.java index 0996ba19d3b..68257bf6206 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/SoftplusOptions.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/SoftplusOptions.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/Softshrink.java b/pytorch/src/gen/java/org/bytedeco/pytorch/Softshrink.java deleted file mode 100644 index 3f339b5534c..00000000000 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/Softshrink.java +++ /dev/null @@ -1,34 +0,0 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE - -package org.bytedeco.pytorch; - -import org.bytedeco.pytorch.Allocator; -import org.bytedeco.pytorch.Function; -import org.bytedeco.pytorch.Module; -import java.nio.*; -import org.bytedeco.javacpp.*; -import org.bytedeco.javacpp.annotation.*; - -import static org.bytedeco.javacpp.presets.javacpp.*; -import static org.bytedeco.openblas.global.openblas_nolapack.*; -import static org.bytedeco.openblas.global.openblas.*; - -import static org.bytedeco.pytorch.global.torch.*; - - -/** A {@code ModuleHolder} subclass for {@code SoftshrinkImpl}. - * See the documentation for {@code SoftshrinkImpl} class to learn what methods it - * provides, and examples of how to use {@code Softshrink} with - * {@code torch::nn::SoftshrinkOptions}. See the documentation for {@code ModuleHolder} to - * learn about PyTorch's module storage semantics. */ -@Namespace("torch::nn") @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) -public class Softshrink extends SoftshrinkImplModuleHolder { - static { Loader.load(); } - - public Softshrink(@ByVal @Cast("std::nullptr_t*") PointerPointer arg0) { super((Pointer)null); allocate(arg0); } - private native void allocate(@ByVal @Cast("std::nullptr_t*") PointerPointer arg0); public Softshrink(@SharedPtr @Cast({"", "std::shared_ptr"}) SoftshrinkImpl module) { super((Pointer)null); allocate(module); } - private native void allocate(@SharedPtr @Cast({"", "std::shared_ptr"}) SoftshrinkImpl module); - /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ - public Softshrink(Pointer p) { super(p); } - - } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/SoftshrinkImpl.java b/pytorch/src/gen/java/org/bytedeco/pytorch/SoftshrinkImpl.java index 3cebc3eaacd..abd2d0a66b2 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/SoftshrinkImpl.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/SoftshrinkImpl.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; @@ -45,9 +47,9 @@ public class SoftshrinkImpl extends SoftshrinkImplCloneable { } public SoftshrinkImpl(@Const @ByRef(nullValue = "torch::nn::SoftshrinkOptions{}") SoftshrinkOptions options_) { super((Pointer)null); allocate(options_); } - @NoDeallocator private native void allocate(@Const @ByRef(nullValue = "torch::nn::SoftshrinkOptions{}") SoftshrinkOptions options_); + @SharedPtr private native void allocate(@Const @ByRef(nullValue = "torch::nn::SoftshrinkOptions{}") SoftshrinkOptions options_); public SoftshrinkImpl() { super((Pointer)null); allocate(); } - @NoDeallocator private native void allocate(); + @SharedPtr private native void allocate(); public native @ByVal Tensor forward(@Const @ByRef Tensor input); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/SoftshrinkImplCloneable.java b/pytorch/src/gen/java/org/bytedeco/pytorch/SoftshrinkImplCloneable.java index 7f43d62fdf4..7fc7bd6204d 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/SoftshrinkImplCloneable.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/SoftshrinkImplCloneable.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; @@ -20,18 +22,18 @@ public class SoftshrinkImplCloneable extends Module { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public SoftshrinkImplCloneable(Pointer p) { super(p); } + @Override public Module asModule() { return asModule(this); } + @Namespace public static native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast>") Module asModule(@SharedPtr SoftshrinkImplCloneable pointer); /** {@code reset()} must perform initialization of all members with reference * semantics, most importantly parameters, buffers and submodules. */ public native void reset(); - @Override public Module asModule() { return asModule(this); } - @Namespace public static native @Name("static_cast") Module asModule(SoftshrinkImplCloneable module); /** Performs a recursive "deep copy" of the {@code Module}, such that all parameters * and submodules in the cloned module are different from those in the * original module. */ - public native @SharedPtr Module clone( - @Const @ByRef(nullValue = "c10::optional(c10::nullopt)") DeviceOptional device); - public native @SharedPtr Module clone(); + public native @SharedPtr("torch::nn::Module") @ByVal Module clone( + @Const @ByRef(nullValue = "c10::optional(c10::nullopt)") DeviceOptional device); + public native @SharedPtr("torch::nn::Module") @ByVal Module clone(); } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/SoftshrinkImplModuleHolder.java b/pytorch/src/gen/java/org/bytedeco/pytorch/SoftshrinkImplModuleHolder.java deleted file mode 100644 index a2952bcaf08..00000000000 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/SoftshrinkImplModuleHolder.java +++ /dev/null @@ -1,79 +0,0 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE - -package org.bytedeco.pytorch; - -import org.bytedeco.pytorch.Allocator; -import org.bytedeco.pytorch.Function; -import org.bytedeco.pytorch.Module; -import java.nio.*; -import org.bytedeco.javacpp.*; -import org.bytedeco.javacpp.annotation.*; - -import static org.bytedeco.javacpp.presets.javacpp.*; -import static org.bytedeco.openblas.global.openblas_nolapack.*; -import static org.bytedeco.openblas.global.openblas.*; - -import static org.bytedeco.pytorch.global.torch.*; - -@Name("torch::nn::ModuleHolder") @NoOffset @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) -public class SoftshrinkImplModuleHolder extends Pointer { - static { Loader.load(); } - /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ - public SoftshrinkImplModuleHolder(Pointer p) { super(p); } - - - /// - - /** Default constructs the contained module if if has a default constructor, - * else produces a static error. - * - * NOTE: This uses the behavior of template - * classes in C++ that constructors (or any methods) are only compiled when - * actually used. */ - - - /** Constructs the {@code ModuleHolder} with an empty contained value. Access to - * the underlying module is not permitted and will throw an exception, until - * a value is assigned. */ - /* implicit */ public SoftshrinkImplModuleHolder(@ByVal @Cast("std::nullptr_t*") PointerPointer arg0) { super((Pointer)null); allocate(arg0); } -private native void allocate(@ByVal @Cast("std::nullptr_t*") PointerPointer arg0); - - /** Constructs the {@code ModuleHolder} with a contained module, forwarding all - * arguments to its constructor. */ - - /** Constructs the {@code ModuleHolder} from a pointer to the contained type. - * Example: {@code Linear(std::make_shared(...))}. */ - /* implicit */ public SoftshrinkImplModuleHolder(@SharedPtr @Cast({"", "std::shared_ptr"}) SoftshrinkImpl module) { super((Pointer)null); allocate(module); } -private native void allocate(@SharedPtr @Cast({"", "std::shared_ptr"}) SoftshrinkImpl module); - - /** Returns true if the {@code ModuleHolder} contains a module, or false if it is - * {@code nullptr}. */ - public native @Cast("bool") @Name("operator bool") @NoException(true) boolean asBoolean(); - - /** Forwards to the contained module. */ - public native @Name("operator ->") SoftshrinkImpl access(); - - /** Forwards to the contained module. */ - - /** Returns a reference to the contained module. */ - public native @ByRef @Name("operator *") SoftshrinkImpl multiply(); - - /** Returns a const reference to the contained module. */ - - /** Returns a shared pointer to the underlying module. */ - public native @SharedPtr @Cast({"", "std::shared_ptr"}) SoftshrinkImpl ptr(); - - /** Returns a pointer to the underlying module. */ - public native SoftshrinkImpl get(); - - /** Returns a const pointer to the underlying module. */ - - /** Calls the {@code forward()} method of the contained module. */ - - /** Forwards to the subscript operator of the contained module. - * NOTE: std::forward is qualified to prevent VS2017 emitting - * error C2872: 'std': ambiguous symbol */ - - /** Returns true if the {@code ModuleHolder} does not contain a module. */ - public native @Cast("bool") @NoException(true) boolean is_empty(); -} diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/SoftshrinkOptions.java b/pytorch/src/gen/java/org/bytedeco/pytorch/SoftshrinkOptions.java index 052a7e07a1f..1a9245c8643 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/SoftshrinkOptions.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/SoftshrinkOptions.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/Softsign.java b/pytorch/src/gen/java/org/bytedeco/pytorch/Softsign.java deleted file mode 100644 index c04cb636abb..00000000000 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/Softsign.java +++ /dev/null @@ -1,33 +0,0 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE - -package org.bytedeco.pytorch; - -import org.bytedeco.pytorch.Allocator; -import org.bytedeco.pytorch.Function; -import org.bytedeco.pytorch.Module; -import java.nio.*; -import org.bytedeco.javacpp.*; -import org.bytedeco.javacpp.annotation.*; - -import static org.bytedeco.javacpp.presets.javacpp.*; -import static org.bytedeco.openblas.global.openblas_nolapack.*; -import static org.bytedeco.openblas.global.openblas.*; - -import static org.bytedeco.pytorch.global.torch.*; - - -/** A {@code ModuleHolder} subclass for {@code SoftsignImpl}. - * See the documentation for {@code SoftsignImpl} class to learn what methods it - * provides, or the documentation for {@code ModuleHolder} to learn about PyTorch's - * module storage semantics. */ -@Namespace("torch::nn") @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) -public class Softsign extends SoftsignImplModuleHolder { - static { Loader.load(); } - - public Softsign(@ByVal @Cast("std::nullptr_t*") PointerPointer arg0) { super((Pointer)null); allocate(arg0); } - private native void allocate(@ByVal @Cast("std::nullptr_t*") PointerPointer arg0); public Softsign(@SharedPtr @Cast({"", "std::shared_ptr"}) SoftsignImpl module) { super((Pointer)null); allocate(module); } - private native void allocate(@SharedPtr @Cast({"", "std::shared_ptr"}) SoftsignImpl module); - /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ - public Softsign(Pointer p) { super(p); } - - } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/SoftsignImpl.java b/pytorch/src/gen/java/org/bytedeco/pytorch/SoftsignImpl.java index ff6c5bb4ffd..e268e01f2f9 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/SoftsignImpl.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/SoftsignImpl.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/SoftsignImplCloneable.java b/pytorch/src/gen/java/org/bytedeco/pytorch/SoftsignImplCloneable.java index 2746f50c65f..8b13f6c2501 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/SoftsignImplCloneable.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/SoftsignImplCloneable.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; @@ -20,18 +22,18 @@ public class SoftsignImplCloneable extends Module { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public SoftsignImplCloneable(Pointer p) { super(p); } + @Override public Module asModule() { return asModule(this); } + @Namespace public static native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast>") Module asModule(@SharedPtr SoftsignImplCloneable pointer); /** {@code reset()} must perform initialization of all members with reference * semantics, most importantly parameters, buffers and submodules. */ public native void reset(); - @Override public Module asModule() { return asModule(this); } - @Namespace public static native @Name("static_cast") Module asModule(SoftsignImplCloneable module); /** Performs a recursive "deep copy" of the {@code Module}, such that all parameters * and submodules in the cloned module are different from those in the * original module. */ - public native @SharedPtr Module clone( - @Const @ByRef(nullValue = "c10::optional(c10::nullopt)") DeviceOptional device); - public native @SharedPtr Module clone(); + public native @SharedPtr("torch::nn::Module") @ByVal Module clone( + @Const @ByRef(nullValue = "c10::optional(c10::nullopt)") DeviceOptional device); + public native @SharedPtr("torch::nn::Module") @ByVal Module clone(); } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/SoftsignImplModuleHolder.java b/pytorch/src/gen/java/org/bytedeco/pytorch/SoftsignImplModuleHolder.java deleted file mode 100644 index ac862ba7086..00000000000 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/SoftsignImplModuleHolder.java +++ /dev/null @@ -1,79 +0,0 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE - -package org.bytedeco.pytorch; - -import org.bytedeco.pytorch.Allocator; -import org.bytedeco.pytorch.Function; -import org.bytedeco.pytorch.Module; -import java.nio.*; -import org.bytedeco.javacpp.*; -import org.bytedeco.javacpp.annotation.*; - -import static org.bytedeco.javacpp.presets.javacpp.*; -import static org.bytedeco.openblas.global.openblas_nolapack.*; -import static org.bytedeco.openblas.global.openblas.*; - -import static org.bytedeco.pytorch.global.torch.*; - -@Name("torch::nn::ModuleHolder") @NoOffset @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) -public class SoftsignImplModuleHolder extends Pointer { - static { Loader.load(); } - /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ - public SoftsignImplModuleHolder(Pointer p) { super(p); } - - - /// - - /** Default constructs the contained module if if has a default constructor, - * else produces a static error. - * - * NOTE: This uses the behavior of template - * classes in C++ that constructors (or any methods) are only compiled when - * actually used. */ - - - /** Constructs the {@code ModuleHolder} with an empty contained value. Access to - * the underlying module is not permitted and will throw an exception, until - * a value is assigned. */ - /* implicit */ public SoftsignImplModuleHolder(@ByVal @Cast("std::nullptr_t*") PointerPointer arg0) { super((Pointer)null); allocate(arg0); } -private native void allocate(@ByVal @Cast("std::nullptr_t*") PointerPointer arg0); - - /** Constructs the {@code ModuleHolder} with a contained module, forwarding all - * arguments to its constructor. */ - - /** Constructs the {@code ModuleHolder} from a pointer to the contained type. - * Example: {@code Linear(std::make_shared(...))}. */ - /* implicit */ public SoftsignImplModuleHolder(@SharedPtr @Cast({"", "std::shared_ptr"}) SoftsignImpl module) { super((Pointer)null); allocate(module); } -private native void allocate(@SharedPtr @Cast({"", "std::shared_ptr"}) SoftsignImpl module); - - /** Returns true if the {@code ModuleHolder} contains a module, or false if it is - * {@code nullptr}. */ - public native @Cast("bool") @Name("operator bool") @NoException(true) boolean asBoolean(); - - /** Forwards to the contained module. */ - public native @Name("operator ->") SoftsignImpl access(); - - /** Forwards to the contained module. */ - - /** Returns a reference to the contained module. */ - public native @ByRef @Name("operator *") SoftsignImpl multiply(); - - /** Returns a const reference to the contained module. */ - - /** Returns a shared pointer to the underlying module. */ - public native @SharedPtr @Cast({"", "std::shared_ptr"}) SoftsignImpl ptr(); - - /** Returns a pointer to the underlying module. */ - public native SoftsignImpl get(); - - /** Returns a const pointer to the underlying module. */ - - /** Calls the {@code forward()} method of the contained module. */ - - /** Forwards to the subscript operator of the contained module. - * NOTE: std::forward is qualified to prevent VS2017 emitting - * error C2872: 'std': ambiguous symbol */ - - /** Returns true if the {@code ModuleHolder} does not contain a module. */ - public native @Cast("bool") @NoException(true) boolean is_empty(); -} diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/Source.java b/pytorch/src/gen/java/org/bytedeco/pytorch/Source.java index c19a44ff84a..70776b2cf98 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/Source.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/Source.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/SourceLocation.java b/pytorch/src/gen/java/org/bytedeco/pytorch/SourceLocation.java index e7f4ec3292e..c883d4cd699 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/SourceLocation.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/SourceLocation.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/SourceRange.java b/pytorch/src/gen/java/org/bytedeco/pytorch/SourceRange.java index 074fd6dbdd2..410544979ff 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/SourceRange.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/SourceRange.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; @@ -73,7 +75,7 @@ public native void print_with_context( public native @Cast("size_t") long end(); public native @StdString BytePointer str(); - public native @ByVal StringSizeTSizeTTupleOptional file_line_col(); + public native @ByVal T_StringSizeTSizeT_TOptional file_line_col(); public native @Cast("bool") @Name("operator ==") boolean equals(@Const @ByRef SourceRange rhs); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/SourceRangeDeserializer.java b/pytorch/src/gen/java/org/bytedeco/pytorch/SourceRangeDeserializer.java deleted file mode 100644 index 4e532c94a38..00000000000 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/SourceRangeDeserializer.java +++ /dev/null @@ -1,39 +0,0 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE - -package org.bytedeco.pytorch; - -import org.bytedeco.pytorch.Allocator; -import org.bytedeco.pytorch.Function; -import org.bytedeco.pytorch.Module; -import java.nio.*; -import org.bytedeco.javacpp.*; -import org.bytedeco.javacpp.annotation.*; - -import static org.bytedeco.javacpp.presets.javacpp.*; -import static org.bytedeco.openblas.global.openblas_nolapack.*; -import static org.bytedeco.openblas.global.openblas.*; - -import static org.bytedeco.pytorch.global.torch.*; - - -@Namespace("torch::jit") @NoOffset @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) -public class SourceRangeDeserializer extends Pointer { - static { Loader.load(); } - /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ - public SourceRangeDeserializer(Pointer p) { super(p); } - /** Native array allocator. Access with {@link Pointer#position(long)}. */ - public SourceRangeDeserializer(long size) { super((Pointer)null); allocateArray(size); } - private native void allocateArray(long size); - @Override public SourceRangeDeserializer position(long position) { - return (SourceRangeDeserializer)super.position(position); - } - @Override public SourceRangeDeserializer getPointer(long i) { - return new SourceRangeDeserializer((Pointer)this).offsetAddress(i); - } - - public SourceRangeDeserializer() { super((Pointer)null); allocate(); } - private native void allocate(); - public SourceRangeDeserializer(@ByVal IValue text_table) { super((Pointer)null); allocate(text_table); } - private native void allocate(@ByVal IValue text_table); - -} diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/SourceRangeHasher.java b/pytorch/src/gen/java/org/bytedeco/pytorch/SourceRangeHasher.java index cc058e4c4e1..297e6a71430 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/SourceRangeHasher.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/SourceRangeHasher.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/SourceRangeOptional.java b/pytorch/src/gen/java/org/bytedeco/pytorch/SourceRangeOptional.java index c95dee523fa..6758b6920e1 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/SourceRangeOptional.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/SourceRangeOptional.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; @@ -26,6 +28,7 @@ public class SourceRangeOptional extends Pointer { public native @Name("operator =") @ByRef SourceRangeOptional put(@ByRef SourceRangeOptional x); public native boolean has_value(); + public native void reset(); public native @Name("value") @ByRef SourceRange get(); @ValueSetter public native SourceRangeOptional put(@ByRef SourceRange value); } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/SourceRangeSerializer.java b/pytorch/src/gen/java/org/bytedeco/pytorch/SourceRangeSerializer.java deleted file mode 100644 index 94b4bf959b4..00000000000 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/SourceRangeSerializer.java +++ /dev/null @@ -1,24 +0,0 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE - -package org.bytedeco.pytorch; - -import org.bytedeco.pytorch.Allocator; -import org.bytedeco.pytorch.Function; -import org.bytedeco.pytorch.Module; -import java.nio.*; -import org.bytedeco.javacpp.*; -import org.bytedeco.javacpp.annotation.*; - -import static org.bytedeco.javacpp.presets.javacpp.*; -import static org.bytedeco.openblas.global.openblas_nolapack.*; -import static org.bytedeco.openblas.global.openblas.*; - -import static org.bytedeco.pytorch.global.torch.*; - -@Namespace("torch::jit") @Opaque @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) -public class SourceRangeSerializer extends Pointer { - /** Empty constructor. Calls {@code super((Pointer)null)}. */ - public SourceRangeSerializer() { super((Pointer)null); } - /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ - public SourceRangeSerializer(Pointer p) { super(p); } -} diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/SourceRangeUnpickler.java b/pytorch/src/gen/java/org/bytedeco/pytorch/SourceRangeUnpickler.java index 16381db3df3..5cc1c3d1faa 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/SourceRangeUnpickler.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/SourceRangeUnpickler.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; @@ -16,12 +18,10 @@ import static org.bytedeco.pytorch.global.torch.*; -@Namespace("torch::jit") @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) +@Namespace("torch::jit") @Opaque @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) public class SourceRangeUnpickler extends Pointer { - static { Loader.load(); } + /** Empty constructor. Calls {@code super((Pointer)null)}. */ + public SourceRangeUnpickler() { super((Pointer)null); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public SourceRangeUnpickler(Pointer p) { super(p); } - - public native @ByVal SourceRangeOptional findSourceRangeThatGenerated( - @Const @ByRef SourceRange range); } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/SpecialFormValue.java b/pytorch/src/gen/java/org/bytedeco/pytorch/SpecialFormValue.java index d0ef0772355..ef9b7176f10 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/SpecialFormValue.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/SpecialFormValue.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/SplitUntil32Bit.java b/pytorch/src/gen/java/org/bytedeco/pytorch/SplitUntil32Bit.java new file mode 100644 index 00000000000..9586aaef75d --- /dev/null +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/SplitUntil32Bit.java @@ -0,0 +1,67 @@ +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE + +package org.bytedeco.pytorch; + +import org.bytedeco.pytorch.Allocator; +import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; +import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; +import java.nio.*; +import org.bytedeco.javacpp.*; +import org.bytedeco.javacpp.annotation.*; + +import static org.bytedeco.javacpp.presets.javacpp.*; +import static org.bytedeco.openblas.global.openblas_nolapack.*; +import static org.bytedeco.openblas.global.openblas.*; + +import static org.bytedeco.pytorch.global.torch.*; + + +/** A container-like struct that acts as if it contains splits of a + * TensorIterator that can use 32-bit indexing. Taken together the splits cover + * the original TensorIterator. */ +@Namespace("at") @NoOffset @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) +public class SplitUntil32Bit extends Pointer { + static { Loader.load(); } + /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ + public SplitUntil32Bit(Pointer p) { super(p); } + + public static class iterator extends Pointer { + static { Loader.load(); } + /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ + public iterator(Pointer p) { super(p); } + /** Native array allocator. Access with {@link Pointer#position(long)}. */ + public iterator(long size) { super((Pointer)null); allocateArray(size); } + private native void allocateArray(long size); + @Override public iterator position(long position) { + return (iterator)super.position(position); + } + @Override public iterator getPointer(long i) { + return new iterator((Pointer)this).offsetAddress(i); + } + + public iterator() { super((Pointer)null); allocate(); } + private native void allocate(); + public iterator(@Const @ByRef TensorIteratorBase iter) { super((Pointer)null); allocate(iter); } + private native void allocate(@Const @ByRef TensorIteratorBase iter); + public iterator(@ByRef(true) iterator arg0) { super((Pointer)null); allocate(arg0); } + private native void allocate(@ByRef(true) iterator arg0); + + // Guaranteed to be a TensorIterator proper! + public native @ByRef @Name("operator *") TensorIterator multiply(); + public native @ByRef @Name("operator ++") iterator increment(); + public native @Cast("bool") @Name("operator ==") boolean equals(@Const @ByRef iterator other); + // needed for C++11 range-based for loop + public native @Cast("bool") @Name("operator !=") boolean notEquals(@Const @ByRef iterator other); + + /** stack of TensorIterators to be split */ + + } + + public SplitUntil32Bit(@Const @ByRef TensorIteratorBase iter) { super((Pointer)null); allocate(iter); } + private native void allocate(@Const @ByRef TensorIteratorBase iter); + + public native @ByVal iterator begin(); + public native @ByVal iterator end(); +} diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/StackEntry.java b/pytorch/src/gen/java/org/bytedeco/pytorch/StackEntry.java index 0f340a64fa7..ea57f40403c 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/StackEntry.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/StackEntry.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; @@ -34,6 +36,6 @@ public class StackEntry extends Pointer { return new StackEntry((Pointer)this).offsetAddress(i); } - public native @StdString BytePointer filename(); public native StackEntry filename(BytePointer setter); - public native @ByRef SourceRange range(); public native StackEntry range(SourceRange setter); + public native @StdString @NoOffset BytePointer filename(); public native StackEntry filename(BytePointer setter); + public native @ByRef @NoOffset SourceRange range(); public native StackEntry range(SourceRange setter); } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/StackEntryVector.java b/pytorch/src/gen/java/org/bytedeco/pytorch/StackEntryVector.java index adc90a42d9d..4e1bfe44c2d 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/StackEntryVector.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/StackEntryVector.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; @@ -33,6 +35,8 @@ public class StackEntryVector extends Pointer { public void clear() { resize(0); } public native void resize(@Cast("size_t") long n); + public StackEntry front() { return get(0); } + public StackEntry back() { return get(size() - 1); } @Index(function = "at") public native @ByRef StackEntry get(@Cast("size_t") long i); public native StackEntryVector put(@Cast("size_t") long i, StackEntry value); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/StackEntryVector_V.java b/pytorch/src/gen/java/org/bytedeco/pytorch/StackEntryVector_V.java deleted file mode 100644 index 42da9ee1308..00000000000 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/StackEntryVector_V.java +++ /dev/null @@ -1,26 +0,0 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE - -package org.bytedeco.pytorch; - -import org.bytedeco.pytorch.Allocator; -import org.bytedeco.pytorch.Function; -import org.bytedeco.pytorch.Module; -import java.nio.*; -import org.bytedeco.javacpp.*; -import org.bytedeco.javacpp.annotation.*; - -import static org.bytedeco.javacpp.presets.javacpp.*; -import static org.bytedeco.openblas.global.openblas_nolapack.*; -import static org.bytedeco.openblas.global.openblas.*; - -import static org.bytedeco.pytorch.global.torch.*; - -@Properties(inherit = org.bytedeco.pytorch.presets.torch.class) -public class StackEntryVector_V extends FunctionPointer { - static { Loader.load(); } - /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ - public StackEntryVector_V(Pointer p) { super(p); } - protected StackEntryVector_V() { allocate(); } - private native void allocate(); - public native @ByVal StackEntryVector call(); -} diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/Starred.java b/pytorch/src/gen/java/org/bytedeco/pytorch/Starred.java index 7795fce89df..294ccef1053 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/Starred.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/Starred.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; @@ -19,9 +21,11 @@ @Namespace("torch::jit") @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) public class Starred extends Expr { static { Loader.load(); } + /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ + public Starred(Pointer p) { super(p); } - public Starred(@Cast("const torch::jit::TreeRef*") @ByRef Pointer tree) { super((Pointer)null); allocate(tree); } - private native void allocate(@Cast("const torch::jit::TreeRef*") @ByRef Pointer tree); + public Starred(@Const @ByRef TreeRef tree) { super((Pointer)null); allocate(tree); } + private native void allocate(@Const @ByRef TreeRef tree); public native @ByVal Expr expr(); public static native @ByVal Starred create(@Const @ByRef SourceRange range, @Const @ByRef Expr expr); } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/StepLR.java b/pytorch/src/gen/java/org/bytedeco/pytorch/StepLR.java index 0bbb096819f..537a0cef280 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/StepLR.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/StepLR.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/Stmt.java b/pytorch/src/gen/java/org/bytedeco/pytorch/Stmt.java index c6ad3fda816..cc4d75efe26 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/Stmt.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/Stmt.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; @@ -23,7 +25,9 @@ @Namespace("torch::jit") @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) public class Stmt extends TreeView { static { Loader.load(); } + /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ + public Stmt(Pointer p) { super(p); } - public Stmt(@Cast("const torch::jit::TreeRef*") @ByRef Pointer tree) { super((Pointer)null); allocate(tree); } - private native void allocate(@Cast("const torch::jit::TreeRef*") @ByRef Pointer tree); + public Stmt(@Const @ByRef TreeRef tree) { super((Pointer)null); allocate(tree); } + private native void allocate(@Const @ByRef TreeRef tree); } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/StmtList.java b/pytorch/src/gen/java/org/bytedeco/pytorch/StmtList.java new file mode 100644 index 00000000000..8df936e2670 --- /dev/null +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/StmtList.java @@ -0,0 +1,38 @@ +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE + +package org.bytedeco.pytorch; + +import org.bytedeco.pytorch.Allocator; +import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; +import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; +import java.nio.*; +import org.bytedeco.javacpp.*; +import org.bytedeco.javacpp.annotation.*; + +import static org.bytedeco.javacpp.presets.javacpp.*; +import static org.bytedeco.openblas.global.openblas_nolapack.*; +import static org.bytedeco.openblas.global.openblas.*; + +import static org.bytedeco.pytorch.global.torch.*; + + +@Name("torch::jit::List") @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) +public class StmtList extends TreeView { + static { Loader.load(); } + /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ + public StmtList(Pointer p) { super(p); } + + + public StmtList(@Const @ByRef TreeRef tree) { super((Pointer)null); allocate(tree); } + private native void allocate(@Const @ByRef TreeRef tree); + public native @ByVal @Cast("torch::jit::List::iterator*") StmtListIterator begin(); + public native @ByVal @Cast("torch::jit::List::iterator*") StmtListIterator end(); + public native @Cast("bool") boolean empty(); + public native @ByVal @Name("operator []") Stmt get(@Cast("size_t") long i); + + public static native @ByVal StmtList create(@Const @ByRef SourceRange range, @StdVector Stmt subtrees); + public static native @ByVal StmtList unsafeCreate(@Const @ByRef SourceRange range, @Cast("torch::jit::TreeList*") @ByRef(true) SymDimVector subtrees); + public native @Cast("size_t") long size(); +} diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/StmtListIterator.java b/pytorch/src/gen/java/org/bytedeco/pytorch/StmtListIterator.java new file mode 100644 index 00000000000..43c94db9eb3 --- /dev/null +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/StmtListIterator.java @@ -0,0 +1,35 @@ +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE + +package org.bytedeco.pytorch; + +import org.bytedeco.pytorch.Allocator; +import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; +import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; +import java.nio.*; +import org.bytedeco.javacpp.*; +import org.bytedeco.javacpp.annotation.*; + +import static org.bytedeco.javacpp.presets.javacpp.*; +import static org.bytedeco.openblas.global.openblas_nolapack.*; +import static org.bytedeco.openblas.global.openblas.*; + +import static org.bytedeco.pytorch.global.torch.*; + + +@Name("torch::jit::ListIterator") @NoOffset @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) +public class StmtListIterator extends Pointer { + static { Loader.load(); } + /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ + public StmtListIterator(Pointer p) { super(p); } + + public StmtListIterator(@ByVal @Cast("torch::jit::TreeList::const_iterator*") TreeRef it) { super((Pointer)null); allocate(it); } + private native void allocate(@ByVal @Cast("torch::jit::TreeList::const_iterator*") TreeRef it); + public native @Cast("bool") @Name("operator !=") boolean notEquals(@Const @ByRef StmtListIterator rhs); + public native @Cast("bool") @Name("operator ==") boolean equals(@Const @ByRef StmtListIterator rhs); + public native @ByVal @Name("operator *") Stmt multiply(); + public native @ByRef @Name("operator +=") StmtListIterator addPut(@Cast("std::ptrdiff_t") long n); + public native @ByRef @Name("operator ++") StmtListIterator increment(); + public native @ByRef @Name("operator --") StmtListIterator decrement(); +} diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/Storage.java b/pytorch/src/gen/java/org/bytedeco/pytorch/Storage.java index 2f7d164e149..99dc4da4061 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/Storage.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/Storage.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; @@ -16,7 +18,7 @@ import static org.bytedeco.pytorch.global.torch.*; -@Namespace("c10") @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) +@Namespace("c10") @NoOffset @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) public class Storage extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ @@ -40,6 +42,8 @@ public class Storage extends Pointer { public Storage() { super((Pointer)null); allocate(); } private native void allocate(); + public Storage(@ByVal StorageImplPtr ptr) { super((Pointer)null); allocate(ptr); } + private native void allocate(@ByVal StorageImplPtr ptr); // Allocates memory buffer using given allocator and creates a storage with it public Storage( @@ -122,6 +126,8 @@ private native void allocate( public native @NoException(true) StorageImpl unsafeGetStorageImpl(); + public native @ByVal WeakStorage getWeakStorageImpl(); + public native @Cast("bool") @Name("operator bool") boolean asBoolean(); public native @Cast("size_t") long use_count(); @@ -133,7 +139,7 @@ private native void allocate( public native void UniqueStorageShareExternalPointer( Pointer src, @Cast("size_t") long _capacity, - @Cast("c10::DeleterFnPtr") Deleter d/*=nullptr*/); + @Cast("c10::DeleterFnPtr") PointerConsumer d/*=nullptr*/); public native void UniqueStorageShareExternalPointer( Pointer src, @Cast("size_t") long _capacity); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/StorageImpl.java b/pytorch/src/gen/java/org/bytedeco/pytorch/StorageImpl.java index a68973200ae..8cf0d823075 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/StorageImpl.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/StorageImpl.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; @@ -130,7 +132,7 @@ private native void allocate( public native void UniqueStorageShareExternalPointer( Pointer src, @Cast("size_t") long size_bytes, - @Cast("c10::DeleterFnPtr") Deleter d/*=nullptr*/); + @Cast("c10::DeleterFnPtr") PointerConsumer d/*=nullptr*/); public native void UniqueStorageShareExternalPointer( Pointer src, @Cast("size_t") long size_bytes); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/StorageImplPtr.java b/pytorch/src/gen/java/org/bytedeco/pytorch/StorageImplPtr.java new file mode 100644 index 00000000000..1bae9a6e26d --- /dev/null +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/StorageImplPtr.java @@ -0,0 +1,150 @@ +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE + +package org.bytedeco.pytorch; + +import org.bytedeco.pytorch.Allocator; +import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; +import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; +import java.nio.*; +import org.bytedeco.javacpp.*; +import org.bytedeco.javacpp.annotation.*; + +import static org.bytedeco.javacpp.presets.javacpp.*; +import static org.bytedeco.openblas.global.openblas_nolapack.*; +import static org.bytedeco.openblas.global.openblas.*; + +import static org.bytedeco.pytorch.global.torch.*; + + +@Name("c10::intrusive_ptr") @NoOffset @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) +public class StorageImplPtr extends Pointer { + static { Loader.load(); } + /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ + public StorageImplPtr(Pointer p) { super(p); } + /** Native array allocator. Access with {@link Pointer#position(long)}. */ + public StorageImplPtr(long size) { super((Pointer)null); allocateArray(size); } + private native void allocateArray(long size); + @Override public StorageImplPtr position(long position) { + return (StorageImplPtr)super.position(position); + } + @Override public StorageImplPtr getPointer(long i) { + return new StorageImplPtr((Pointer)this).offsetAddress(i); + } + + + public StorageImplPtr() { super((Pointer)null); allocate(); } + @NoException(true) private native void allocate(); + + public StorageImplPtr(@ByVal @Cast("std::nullptr_t*") PointerPointer arg0) { super((Pointer)null); allocate(arg0); } + @NoException(true) private native void allocate(@ByVal @Cast("std::nullptr_t*") PointerPointer arg0); + + // This constructor will not increase the ref counter for you. + // We use the tagged dispatch mechanism to explicitly mark this constructor + // to not increase the refcount + public StorageImplPtr(StorageImpl target, @ByVal DontIncreaseRefcount arg1) { super((Pointer)null); allocate(target, arg1); } + @NoException(true) private native void allocate(StorageImpl target, @ByVal DontIncreaseRefcount arg1); + + + + public StorageImplPtr(@ByRef(true) StorageImplPtr rhs) { super((Pointer)null); allocate(rhs); } + @NoException(true) private native void allocate(@ByRef(true) StorageImplPtr rhs); + + public native @ByRef @Name("operator =") @NoException(true) StorageImplPtr put(@ByRef(true) StorageImplPtr rhs); + + public native @NoException(true) StorageImpl get(); + + public native @ByRef @Name("operator *") @NoException(true) StorageImpl multiply(); + + public native @Name("operator ->") @NoException(true) StorageImpl access(); + + public native @Cast("bool") @Name("operator bool") @NoException(true) boolean asBoolean(); + + public native @NoException(true) void reset(); + + public native @NoException(true) void swap(@ByRef StorageImplPtr rhs); + + // We do a lot of null-pointer checks in our code, good to have this be cheap. + public native @Cast("bool") @NoException(true) boolean defined(); + + public native @Cast("size_t") @NoException(true) long use_count(); + + public native @Cast("size_t") @NoException(true) long weak_use_count(); + + public native @Cast("bool") @NoException(true) boolean unique(); + + /** + * Returns an owning (!) pointer to the underlying object and makes the + * intrusive_ptr instance invalid. That means the refcount is not decreased. + * You *must* put the returned pointer back into a intrusive_ptr using + * intrusive_ptr::reclaim(ptr) to properly destruct it. + * This is helpful for C APIs. + */ + public native @NoException(true) StorageImpl release(); + + /** + * Takes an owning pointer to TTarget* and creates an intrusive_ptr that takes + * over ownership. That means the refcount is not increased. + * This is the counter-part to intrusive_ptr::release() and the pointer + * passed in *must* have been created using intrusive_ptr::release(). + */ + public static native @ByVal StorageImplPtr reclaim(StorageImpl owning_ptr); + + /** + * Takes an owning pointer to TTarget* and creates an intrusive_ptr + * representing a new reference, i.e. the raw pointer retains + * ownership. + */ + public static native @ByVal StorageImplPtr reclaim_copy(StorageImpl owning_ptr); + + /** + * Allocate a heap object with args and wrap it inside a intrusive_ptr and + * incref. This is a helper function to let make_intrusive() access private + * intrusive_ptr constructors. + */ + + /** + * Turn a new instance of TTarget (e.g., literally allocated + * using new TTarget(...) into an intrusive_ptr. If possible, + * use intrusive_ptr::make instead which statically guarantees + * that the allocation was done properly. + * + * At the moment, the only reason this method exists is because + * pybind11 holder types expect to be able to allocate in + * this way (because pybind11 handles the new allocation itself). + */ + public static native @ByVal StorageImplPtr unsafe_steal_from_new(StorageImpl raw_ptr); + + /** + * Turn an instance of TTarget that should not be reference counted + * (e.g., allocated into an arena with placement new) into an + * intrusive_ptr. This is gratuitously unsafe and should only be + * used if you can guarantee that the pointer will not escape and be + * refcounted as normal. + * + * {@code expected_decrefs} is a debugging parameter: it indicates the + * number of strong owners the intrusive_ptr_target in question is + * expected to get. In most use cases, this will likely be 1. + * + * The reason this method exists is for manually sharing + * StorageImpls across Tensors in the static runtime. It needs + * access to private intrusive_ptr members so that the refcounts can + * be initialized to custom values. + */ + public static native @ByVal StorageImplPtr unsafe_adapt_non_heap_allocated( + StorageImpl raw_ptr, + @Cast("size_t") long expected_decrefs); + + /** + * Turn a **non-owning raw pointer** to an intrusive_ptr. It is + * the moral equivalent of enable_shared_from_this on a shared pointer. + * + * This method is only valid for objects that are already live. If + * you are looking for the moral equivalent of unique_ptr(T*) + * constructor, see steal_from_new. + * + * TODO: https://github.com/pytorch/pytorch/issues/56482 + */ + public static native @ByVal StorageImplPtr unsafe_reclaim_from_nonowning(StorageImpl raw_ptr); +} diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/StorageType.java b/pytorch/src/gen/java/org/bytedeco/pytorch/StorageType.java index 853804c8ac0..673ab545a48 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/StorageType.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/StorageType.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/StorageTypePtr.java b/pytorch/src/gen/java/org/bytedeco/pytorch/StorageTypePtr.java index bb90e29067f..be195885c57 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/StorageTypePtr.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/StorageTypePtr.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/Stream.java b/pytorch/src/gen/java/org/bytedeco/pytorch/Stream.java index 5c392e79b68..bb7aedc9ba3 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/Stream.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/Stream.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; @@ -99,8 +101,8 @@ public enum Default { DEFAULT(0); public Stream(@Cast("c10::Stream::Default") int arg0, @ByVal Device device) { super((Pointer)null); allocate(arg0, device); } private native void allocate(@Cast("c10::Stream::Default") int arg0, @ByVal Device device); - - + public native @Cast("bool") @Name("operator ==") @NoException(true) boolean equals(@Const @ByRef Stream other); + public native @Cast("bool") @Name("operator !=") @NoException(true) boolean notEquals(@Const @ByRef Stream other); public native @ByVal @NoException(true) Device device(); public native @NoException(true) DeviceType device_type(); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/StreamData3.java b/pytorch/src/gen/java/org/bytedeco/pytorch/StreamData3.java index 33afb67c1c3..1da27b35c54 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/StreamData3.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/StreamData3.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/StreamData3Holder.java b/pytorch/src/gen/java/org/bytedeco/pytorch/StreamData3Holder.java deleted file mode 100644 index a43b0da3e85..00000000000 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/StreamData3Holder.java +++ /dev/null @@ -1,30 +0,0 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE - -package org.bytedeco.pytorch; - -import org.bytedeco.pytorch.Allocator; -import org.bytedeco.pytorch.Function; -import org.bytedeco.pytorch.Module; -import java.nio.*; -import org.bytedeco.javacpp.*; -import org.bytedeco.javacpp.annotation.*; - -import static org.bytedeco.javacpp.presets.javacpp.*; -import static org.bytedeco.openblas.global.openblas_nolapack.*; -import static org.bytedeco.openblas.global.openblas.*; - -import static org.bytedeco.pytorch.global.torch.*; - - -// Similar to ComplexHolder, for StreamData3 -@Namespace("c10::ivalue") @NoOffset @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) -public class StreamData3Holder extends Pointer { - static { Loader.load(); } - /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ - public StreamData3Holder(Pointer p) { super(p); } - - public StreamData3Holder(@ByVal StreamData3 d) { super((Pointer)null); allocate(d); } - private native void allocate(@ByVal StreamData3 d); - - public native @ByRef StreamData3 val(); public native StreamData3Holder val(StreamData3 setter); -} diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/StreamHash.java b/pytorch/src/gen/java/org/bytedeco/pytorch/StreamHash.java deleted file mode 100644 index a42055dd911..00000000000 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/StreamHash.java +++ /dev/null @@ -1,37 +0,0 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE - -package org.bytedeco.pytorch; - -import org.bytedeco.pytorch.Allocator; -import org.bytedeco.pytorch.Function; -import org.bytedeco.pytorch.Module; -import java.nio.*; -import org.bytedeco.javacpp.*; -import org.bytedeco.javacpp.annotation.*; - -import static org.bytedeco.javacpp.presets.javacpp.*; -import static org.bytedeco.openblas.global.openblas_nolapack.*; -import static org.bytedeco.openblas.global.openblas.*; - -import static org.bytedeco.pytorch.global.torch.*; - // namespace c10 -@Name("std::hash") @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) -public class StreamHash extends Pointer { - static { Loader.load(); } - /** Default native constructor. */ - public StreamHash() { super((Pointer)null); allocate(); } - /** Native array allocator. Access with {@link Pointer#position(long)}. */ - public StreamHash(long size) { super((Pointer)null); allocateArray(size); } - /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ - public StreamHash(Pointer p) { super(p); } - private native void allocate(); - private native void allocateArray(long size); - @Override public StreamHash position(long position) { - return (StreamHash)super.position(position); - } - @Override public StreamHash getPointer(long i) { - return new StreamHash((Pointer)this).offsetAddress(i); - } - - public native @Cast("std::size_t") @Name("operator ()") @NoException(true) long apply(@ByVal Stream s); -} diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/StreamObjType.java b/pytorch/src/gen/java/org/bytedeco/pytorch/StreamObjType.java index 118b5dca4ec..7b9a5d1411f 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/StreamObjType.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/StreamObjType.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/StreamObjTypePtr.java b/pytorch/src/gen/java/org/bytedeco/pytorch/StreamObjTypePtr.java index ebf6250d3fa..8ee9cc7f38e 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/StreamObjTypePtr.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/StreamObjTypePtr.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/StreamOptional.java b/pytorch/src/gen/java/org/bytedeco/pytorch/StreamOptional.java index 6abe360898a..34b070c0a3b 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/StreamOptional.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/StreamOptional.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; @@ -26,6 +28,7 @@ public class StreamOptional extends Pointer { public native @Name("operator =") @ByRef StreamOptional put(@ByRef StreamOptional x); public native boolean has_value(); + public native void reset(); public native @Name("value") @ByRef Stream get(); @ValueSetter public native StreamOptional put(@ByRef Stream value); } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/StreamSampler.java b/pytorch/src/gen/java/org/bytedeco/pytorch/StreamSampler.java index b6450f31558..4bdfe822ba0 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/StreamSampler.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/StreamSampler.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/StreamSet.java b/pytorch/src/gen/java/org/bytedeco/pytorch/StreamSet.java new file mode 100644 index 00000000000..b720402cc9f --- /dev/null +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/StreamSet.java @@ -0,0 +1,46 @@ +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE + +package org.bytedeco.pytorch; + +import org.bytedeco.pytorch.Allocator; +import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; +import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; +import java.nio.*; +import org.bytedeco.javacpp.*; +import org.bytedeco.javacpp.annotation.*; + +import static org.bytedeco.javacpp.presets.javacpp.*; +import static org.bytedeco.openblas.global.openblas_nolapack.*; +import static org.bytedeco.openblas.global.openblas.*; + +import static org.bytedeco.pytorch.global.torch.*; + +@Name("std::unordered_set") @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) +public class StreamSet extends Pointer { + static { Loader.load(); } + /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ + public StreamSet(Pointer p) { super(p); } + public StreamSet() { allocate(); } + private native void allocate(); + public native @Name("operator =") @ByRef StreamSet put(@ByRef StreamSet x); + + public boolean empty() { return size() == 0; } + public native long size(); + + public Stream front() { try (Iterator it = begin()) { return it.get(); } } + public native void insert(@ByRef Stream value); + public native void erase(@ByRef Stream value); + public native @ByVal Iterator begin(); + public native @ByVal Iterator end(); + @NoOffset @Name("iterator") public static class Iterator extends Pointer { + public Iterator(Pointer p) { super(p); } + public Iterator() { } + + public native @Name("operator ++") @ByRef Iterator increment(); + public native @Name("operator ==") boolean equals(@ByRef Iterator it); + public native @Name("operator *") @ByRef @Const Stream get(); + } +} + diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/Stride.java b/pytorch/src/gen/java/org/bytedeco/pytorch/Stride.java index 5abecd9f4bf..d687b1e091d 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/Stride.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/Stride.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; @@ -57,7 +59,7 @@ private native void allocate( @ByVal BoolOptional contiguous, @Const @ByRef SizeTOptional stride); - + public native @Cast("bool") @Name("operator ==") boolean equals(@Const @ByRef Stride b); public native @Cast("bool") boolean isComplete(); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/StrideArrayRef.java b/pytorch/src/gen/java/org/bytedeco/pytorch/StrideArrayRef.java index a3665684401..3e6509f5c18 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/StrideArrayRef.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/StrideArrayRef.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; @@ -39,8 +41,7 @@ public class StrideArrayRef extends Pointer { /** Construct an ArrayRef from a single element. */ // TODO Make this explicit - public StrideArrayRef(@Const @ByRef Stride OneElt) { super((Pointer)null); allocate(OneElt); } - private native void allocate(@Const @ByRef Stride OneElt); + /** Construct an ArrayRef from a pointer and length. */ public StrideArrayRef(@Const Stride data, @Cast("size_t") long length) { super((Pointer)null); allocate(data, length); } @@ -58,6 +59,8 @@ public class StrideArrayRef extends Pointer { // The enable_if stuff here makes sure that this isn't used for // std::vector, because ArrayRef can't work on a std::vector // bitfield. + public StrideArrayRef(@ByRef StrideVector vec) { super((Pointer)null); allocate(vec); } + private native void allocate(@ByRef StrideVector vec); /** Construct an ArrayRef from a std::array */ @@ -70,13 +73,13 @@ public class StrideArrayRef extends Pointer { * \name Simple Operations * \{ */ - public native @ByVal @Cast("const c10::ArrayRef::iterator*") Stride begin(); - public native @ByVal @Cast("const c10::ArrayRef::iterator*") Stride end(); + public native @Const @ByPtr Stride begin(); + public native @Const @ByPtr Stride end(); // These are actually the same as iterator, since ArrayRef only // gives you const iterators. - public native @ByVal @Cast("const c10::ArrayRef::const_iterator*") Stride cbegin(); - public native @ByVal @Cast("const c10::ArrayRef::const_iterator*") Stride cend(); + public native @Const @ByPtr Stride cbegin(); + public native @Const @ByPtr Stride cend(); /** empty - Check if the array is empty. */ public native @Cast("const bool") boolean empty(); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/StrideOptional.java b/pytorch/src/gen/java/org/bytedeco/pytorch/StrideOptional.java index f9450542791..8968e90a9f5 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/StrideOptional.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/StrideOptional.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; @@ -26,6 +28,7 @@ public class StrideOptional extends Pointer { public native @Name("operator =") @ByRef StrideOptional put(@ByRef StrideOptional x); public native boolean has_value(); + public native void reset(); public native @Name("value") @ByRef Stride get(); @ValueSetter public native StrideOptional put(@ByRef Stride value); } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/StrideVaryingShape.java b/pytorch/src/gen/java/org/bytedeco/pytorch/StrideVaryingShape.java index eed80ae2a4d..da3544309d6 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/StrideVaryingShape.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/StrideVaryingShape.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; @@ -39,7 +41,7 @@ public class StrideVaryingShape extends Pointer { public StrideVaryingShape(@Cast("size_t") long size) { super((Pointer)null); allocate(size); } private native void allocate(@Cast("size_t") long size); - + public native @Cast("bool") @Name("operator ==") boolean equals(@Const @ByRef StrideVaryingShape other); public native @Const @ByRef @Name("operator []") StrideOptional get(@Cast("size_t") long i); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/StrideVector.java b/pytorch/src/gen/java/org/bytedeco/pytorch/StrideVector.java index 2b7e882dc24..11c432f198a 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/StrideVector.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/StrideVector.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; @@ -33,6 +35,8 @@ public class StrideVector extends Pointer { public void clear() { resize(0); } public native void resize(@Cast("size_t") long n); + public Stride front() { return get(0); } + public Stride back() { return get(size() - 1); } @Index(function = "at") public native @ByRef Stride get(@Cast("size_t") long i); public native StrideVector put(@Cast("size_t") long i, Stride value); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/StrideVectorOptional.java b/pytorch/src/gen/java/org/bytedeco/pytorch/StrideVectorOptional.java index c5e15bb91fc..e2fba1c7f8a 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/StrideVectorOptional.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/StrideVectorOptional.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; @@ -26,6 +28,7 @@ public class StrideVectorOptional extends Pointer { public native @Name("operator =") @ByRef StrideVectorOptional put(@ByRef StrideVectorOptional x); public native boolean has_value(); + public native void reset(); public native @Name("value") @ByRef StrideVector get(); @ValueSetter public native StrideVectorOptional put(@ByRef StrideVector value); } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/StringAnyModuleDict.java b/pytorch/src/gen/java/org/bytedeco/pytorch/StringAnyModuleDict.java index c6027ba3abf..55dd925cd7f 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/StringAnyModuleDict.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/StringAnyModuleDict.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; @@ -47,11 +49,11 @@ public class StringAnyModuleDict extends Pointer { private native void allocate(@StdString String key_description/*="Key"*/); /** Copy constructs this {@code OrderedDict} from {@code other}. */ - public StringAnyModuleDict(@Cast({"", "torch::OrderedDict&&"}) @StdMove StringAnyModuleDict other) { super((Pointer)null); allocate(other); } - private native void allocate(@Cast({"", "torch::OrderedDict&&"}) @StdMove StringAnyModuleDict other); + public StringAnyModuleDict(@Const @ByRef StringAnyModuleDict other) { super((Pointer)null); allocate(other); } + private native void allocate(@Const @ByRef StringAnyModuleDict other); /** Assigns items from {@code other} to this {@code OrderedDict}. */ - public native @ByRef @Name("operator =") StringAnyModuleDict put(@Cast({"", "torch::OrderedDict&&"}) @StdMove StringAnyModuleDict other); + public native @ByRef @Name("operator =") StringAnyModuleDict put(@Const @ByRef StringAnyModuleDict other); // NB: Move works by default, because you can move-construct vectors of const // values. I tried to make this noexcept (conditional on the move constructors @@ -116,13 +118,13 @@ public class StringAnyModuleDict extends Pointer { /** Returns an iterator to the first item in the {@code OrderedDict}. Iteration is * ordered. */ - public native @ByVal @Cast("torch::OrderedDict::Iterator*") StringAnyModuleDictItem begin(); + public native @ByVal @Cast("torch::OrderedDict::Iterator*") StringAnyModuleDictItemVector.Iterator begin(); /** Returns an iterator to the first item in the {@code OrderedDict}. Iteration is * ordered. */ /** Returns an iterator one past the last item in the {@code OrderedDict}. */ - public native @ByVal @Cast("torch::OrderedDict::Iterator*") StringAnyModuleDictItem end(); + public native @ByVal @Cast("torch::OrderedDict::Iterator*") StringAnyModuleDictItemVector.Iterator end(); /** Returns an iterator one past the last item in the {@code OrderedDict}. */ @@ -152,7 +154,7 @@ public class StringAnyModuleDict extends Pointer { /** Inserts all items from {@code other} into this {@code OrderedDict}. If any key from * {@code other} is already present in this {@code OrderedDict}, an exception is thrown. */ - public native void update(@Cast({"", "torch::OrderedDict&&"}) @StdMove StringAnyModuleDict other); + public native void update(@ByRef(true) StringAnyModuleDict other); /** Inserts all items from {@code other} into this {@code OrderedDict}. If any key from * {@code other} is already present in this {@code OrderedDict}, an exception is thrown. */ @@ -168,7 +170,7 @@ public class StringAnyModuleDict extends Pointer { // Observers /** Returns the items stored in the {@code OrderedDict}. */ - public native @StdVector @NoException(true) StringAnyModuleDictItem items(); + public native @Const @ByRef @NoException(true) StringAnyModuleDictItemVector items(); /** Returns a newly allocated vector and copies all keys from this * {@code OrderedDict} into the vector. */ @@ -180,7 +182,7 @@ public class StringAnyModuleDict extends Pointer { /** Returns a newly allocated vector and copies all keys and values from this * {@code OrderedDict} into a vector of {@code std::pair}. */ - public native @ByVal StringAnyModulePairVector pairs(); + public native @ByVal StringAnyModuleVector pairs(); /** Returns true if both dicts contain the same keys and values, in the same * order. */ diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/StringAnyModuleDictItem.java b/pytorch/src/gen/java/org/bytedeco/pytorch/StringAnyModuleDictItem.java index 737ca5e09dc..b6bb400d285 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/StringAnyModuleDictItem.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/StringAnyModuleDictItem.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/StringAnyModuleDictItemVector.java b/pytorch/src/gen/java/org/bytedeco/pytorch/StringAnyModuleDictItemVector.java new file mode 100644 index 00000000000..a87b0cc96b8 --- /dev/null +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/StringAnyModuleDictItemVector.java @@ -0,0 +1,47 @@ +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE + +package org.bytedeco.pytorch; + +import org.bytedeco.pytorch.Allocator; +import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; +import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; +import java.nio.*; +import org.bytedeco.javacpp.*; +import org.bytedeco.javacpp.annotation.*; + +import static org.bytedeco.javacpp.presets.javacpp.*; +import static org.bytedeco.openblas.global.openblas_nolapack.*; +import static org.bytedeco.openblas.global.openblas.*; + +import static org.bytedeco.pytorch.global.torch.*; + +@Name("std::vector::Item>") @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) +public class StringAnyModuleDictItemVector extends Pointer { + static { Loader.load(); } + /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ + public StringAnyModuleDictItemVector(Pointer p) { super(p); } + public StringAnyModuleDictItemVector() { allocate(); } + private native void allocate(); + + + public boolean empty() { return size() == 0; } + public native long size(); + + public StringAnyModuleDictItem front() { return get(0); } + public StringAnyModuleDictItem back() { return get(size() - 1); } + @Index(function = "at") public native @ByRef StringAnyModuleDictItem get(@Cast("size_t") long i); + + public native @ByVal Iterator begin(); + public native @ByVal Iterator end(); + @NoOffset @Name("iterator") public static class Iterator extends Pointer { + public Iterator(Pointer p) { super(p); } + public Iterator() { } + + public native @Name("operator ++") @ByRef Iterator increment(); + public native @Name("operator ==") boolean equals(@ByRef Iterator it); + public native @Name("operator *") @ByRef @Const StringAnyModuleDictItem get(); + } +} + diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/StringAnyModulePair.java b/pytorch/src/gen/java/org/bytedeco/pytorch/StringAnyModulePair.java index 04ec77b5c17..ea54c1f0566 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/StringAnyModulePair.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/StringAnyModulePair.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/StringAnyModulePairVector.java b/pytorch/src/gen/java/org/bytedeco/pytorch/StringAnyModuleVector.java similarity index 54% rename from pytorch/src/gen/java/org/bytedeco/pytorch/StringAnyModulePairVector.java rename to pytorch/src/gen/java/org/bytedeco/pytorch/StringAnyModuleVector.java index 2ef477fd95d..eeade3bb615 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/StringAnyModulePairVector.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/StringAnyModuleVector.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; @@ -16,28 +18,28 @@ import static org.bytedeco.pytorch.global.torch.*; @Name("std::vector >") @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) -public class StringAnyModulePairVector extends Pointer { +public class StringAnyModuleVector extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ - public StringAnyModulePairVector(Pointer p) { super(p); } - public StringAnyModulePairVector(BytePointer[] firstValue, AnyModule[] secondValue) { this(Math.min(firstValue.length, secondValue.length)); put(firstValue, secondValue); } - public StringAnyModulePairVector(String[] firstValue, AnyModule[] secondValue) { this(Math.min(firstValue.length, secondValue.length)); put(firstValue, secondValue); } - public StringAnyModulePairVector() { allocate(); } - public StringAnyModulePairVector(long n) { allocate(n); } + public StringAnyModuleVector(Pointer p) { super(p); } + public StringAnyModuleVector(BytePointer[] firstValue, AnyModule[] secondValue) { this(Math.min(firstValue.length, secondValue.length)); put(firstValue, secondValue); } + public StringAnyModuleVector(String[] firstValue, AnyModule[] secondValue) { this(Math.min(firstValue.length, secondValue.length)); put(firstValue, secondValue); } + public StringAnyModuleVector() { allocate(); } + public StringAnyModuleVector(long n) { allocate(n); } private native void allocate(); private native void allocate(@Cast("size_t") long n); - public native @Name("operator =") @ByRef StringAnyModulePairVector put(@ByRef StringAnyModulePairVector x); + public native @Name("operator =") @ByRef StringAnyModuleVector put(@ByRef StringAnyModuleVector x); public boolean empty() { return size() == 0; } public native long size(); public void clear() { resize(0); } public native void resize(@Cast("size_t") long n); - @Index(function = "at") public native @StdString BytePointer first(@Cast("size_t") long i); public native StringAnyModulePairVector first(@Cast("size_t") long i, BytePointer first); - @Index(function = "at") public native @ByRef AnyModule second(@Cast("size_t") long i); public native StringAnyModulePairVector second(@Cast("size_t") long i, AnyModule second); - @MemberSetter @Index(function = "at") public native StringAnyModulePairVector first(@Cast("size_t") long i, @StdString String first); + @Index(function = "at") public native @StdString BytePointer first(@Cast("size_t") long i); public native StringAnyModuleVector first(@Cast("size_t") long i, BytePointer first); + @Index(function = "at") public native @ByRef AnyModule second(@Cast("size_t") long i); public native StringAnyModuleVector second(@Cast("size_t") long i, AnyModule second); + @MemberSetter @Index(function = "at") public native StringAnyModuleVector first(@Cast("size_t") long i, @StdString String first); - public StringAnyModulePairVector put(BytePointer[] firstValue, AnyModule[] secondValue) { + public StringAnyModuleVector put(BytePointer[] firstValue, AnyModule[] secondValue) { for (int i = 0; i < firstValue.length && i < secondValue.length; i++) { first(i, firstValue[i]); second(i, secondValue[i]); @@ -45,7 +47,7 @@ public StringAnyModulePairVector put(BytePointer[] firstValue, AnyModule[] secon return this; } - public StringAnyModulePairVector put(String[] firstValue, AnyModule[] secondValue) { + public StringAnyModuleVector put(String[] firstValue, AnyModule[] secondValue) { for (int i = 0; i < firstValue.length && i < secondValue.length; i++) { first(i, firstValue[i]); second(i, secondValue[i]); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/StringArrayRef.java b/pytorch/src/gen/java/org/bytedeco/pytorch/StringArrayRef.java index 804f16fe22b..d5ef995e3a9 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/StringArrayRef.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/StringArrayRef.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; @@ -20,19 +22,34 @@ public class StringArrayRef extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public StringArrayRef(Pointer p) { super(p); } + /** Native array allocator. Access with {@link Pointer#position(long)}. */ + public StringArrayRef(long size) { super((Pointer)null); allocateArray(size); } + private native void allocateArray(long size); + @Override public StringArrayRef position(long position) { + return (StringArrayRef)super.position(position); + } + @Override public StringArrayRef getPointer(long i) { + return new StringArrayRef((Pointer)this).offsetAddress(i); + } /** \name Constructors * \{

* Construct an empty ArrayRef. */ - /* implicit */ + /* implicit */ public StringArrayRef() { super((Pointer)null); allocate(); } +private native void allocate(); /** Construct an ArrayRef from a single element. */ // TODO Make this explicit + /** Construct an ArrayRef from a pointer and length. */ + public StringArrayRef(PointerPointer data, long length) { super((Pointer)null); allocate(data, length); } + private native void allocate(@Cast("const std::string*") PointerPointer data, @Cast("size_t") long length); /** Construct an ArrayRef from a range. */ + public StringArrayRef(PointerPointer begin, PointerPointer end) { super((Pointer)null); allocate(begin, end); } + private native void allocate(@Cast("const std::string*") PointerPointer begin, @Cast("const std::string*") PointerPointer end); /** Construct an ArrayRef from a SmallVector. This is templated in order to * avoid instantiating SmallVectorTemplateCommon whenever we @@ -42,6 +59,8 @@ public class StringArrayRef extends Pointer { // The enable_if stuff here makes sure that this isn't used for // std::vector, because ArrayRef can't work on a std::vector // bitfield. + public StringArrayRef(@ByRef StringVector vec) { super((Pointer)null); allocate(vec); } + private native void allocate(@ByRef StringVector vec); /** Construct an ArrayRef from a std::array */ @@ -54,18 +73,18 @@ public class StringArrayRef extends Pointer { * \name Simple Operations * \{ */ - public native @ByVal @Cast({"", "std::string*"}) @StdString BytePointer begin(); - public native @ByVal @Cast({"", "std::string*"}) @StdString BytePointer end(); + public native @Const PointerPointer begin(); + public native @Const PointerPointer end(); // These are actually the same as iterator, since ArrayRef only // gives you const iterators. - public native @ByVal @Cast({"", "std::string*"}) @StdString BytePointer cbegin(); - public native @ByVal @Cast({"", "std::string*"}) @StdString BytePointer cend(); + public native @Const PointerPointer cbegin(); + public native @Const PointerPointer cend(); /** empty - Check if the array is empty. */ public native @Cast("const bool") boolean empty(); - public native @Const @StdString BytePointer data(); + public native @Const PointerPointer data(); /** size - Get the array size. */ public native @Cast("const size_t") long size(); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/StringBoolMap.java b/pytorch/src/gen/java/org/bytedeco/pytorch/StringBoolMap.java index 584b2c150ad..9ebf0766c60 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/StringBoolMap.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/StringBoolMap.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/StringCordView.java b/pytorch/src/gen/java/org/bytedeco/pytorch/StringCordView.java index ef18b3943a3..1f16079ecbf 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/StringCordView.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/StringCordView.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/StringFunctionMap.java b/pytorch/src/gen/java/org/bytedeco/pytorch/StringFunctionMap.java index b87e06e4bdb..29ea8a7d9a8 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/StringFunctionMap.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/StringFunctionMap.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/StringGenericListDict.java b/pytorch/src/gen/java/org/bytedeco/pytorch/StringGenericListDict.java index 9cc0c755d0f..1a8efa58d52 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/StringGenericListDict.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/StringGenericListDict.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; @@ -15,39 +17,37 @@ import static org.bytedeco.pytorch.global.torch.*; - -/** - * An object of this class stores a map from Key to Value. - * - * This is a pointer type. After a copy, both Dicts - * will share the same storage: - * - * > Dict a; - * > Dict b = a; - * > b.insert(3, "three"); - * > ASSERT("three" == a.at(3)); - * - * We use this class in the PyTorch kernel API because that - * allows us to do optimizations and switch out the underlying - * map implementation without breaking backwards compatibility - * for the kernel API. - */ -@Name("c10::Dict") @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) +@Name("c10::Dict") @NoOffset @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) public class StringGenericListDict extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public StringGenericListDict(Pointer p) { super(p); } + /** Native array allocator. Access with {@link Pointer#position(long)}. */ + public StringGenericListDict(long size) { super((Pointer)null); allocateArray(size); } + private native void allocateArray(long size); + @Override public StringGenericListDict position(long position) { + return (StringGenericListDict)super.position(position); + } + @Override public StringGenericListDict getPointer(long i) { + return new StringGenericListDict((Pointer)this).offsetAddress(i); + } /** * Creates an empty dict. */ + public StringGenericListDict() { super((Pointer)null); allocate(); } + private native void allocate(); /** * Create a generic dict with runtime type information. * This only works for c10::impl::GenericDict and is not part of the public API * but only supposed to be used internally by PyTorch. */ + + + public StringGenericListDict(@Const @ByRef StringGenericListDict arg0) { super((Pointer)null); allocate(arg0); } + private native void allocate(@Const @ByRef StringGenericListDict arg0); public native @ByRef @Name("operator =") StringGenericListDict put(@Const @ByRef StringGenericListDict arg0); /** @@ -120,8 +120,8 @@ public class StringGenericListDict extends Pointer { * Returns the mapped value of the element with key equivalent to key. * If no such element exists, an exception of type std::out_of_range is thrown. */ - public native @ByVal @Cast("c10::impl::GenericList*") Pointer at(@StdString BytePointer key); - public native @ByVal @Cast("c10::impl::GenericList*") Pointer at(@StdString String key); + public native @ByVal GenericList at(@StdString BytePointer key); + public native @ByVal GenericList at(@StdString String key); /** * Finds an element with key equivalent to key. diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/StringIValueMap.java b/pytorch/src/gen/java/org/bytedeco/pytorch/StringIValueMap.java index 4ebf4d5d9d6..eba90543da4 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/StringIValueMap.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/StringIValueMap.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/StringIntMap.java b/pytorch/src/gen/java/org/bytedeco/pytorch/StringIntMap.java index 599c157d52f..a5fa205b60b 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/StringIntMap.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/StringIntMap.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/StringLiteral.java b/pytorch/src/gen/java/org/bytedeco/pytorch/StringLiteral.java index 69c3991b2ac..0ba89fa6404 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/StringLiteral.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/StringLiteral.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; @@ -19,9 +21,11 @@ @Namespace("torch::jit") @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) public class StringLiteral extends Expr { static { Loader.load(); } + /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ + public StringLiteral(Pointer p) { super(p); } - public StringLiteral(@Cast("const torch::jit::TreeRef*") @ByRef Pointer tree) { super((Pointer)null); allocate(tree); } - private native void allocate(@Cast("const torch::jit::TreeRef*") @ByRef Pointer tree); + public StringLiteral(@Const @ByRef TreeRef tree) { super((Pointer)null); allocate(tree); } + private native void allocate(@Const @ByRef TreeRef tree); public native @StdString BytePointer text(); public static native @ByVal StringLiteral create( @Const @ByRef SourceRange range, diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/StringLongMap.java b/pytorch/src/gen/java/org/bytedeco/pytorch/StringLongMap.java index 56911e06a1c..2b398a2b116 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/StringLongMap.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/StringLongMap.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/StringLongStringMapMap.java b/pytorch/src/gen/java/org/bytedeco/pytorch/StringLongStringMapMap.java index 2992f1cd636..1e4416435ad 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/StringLongStringMapMap.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/StringLongStringMapMap.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/StringLongVector.java b/pytorch/src/gen/java/org/bytedeco/pytorch/StringLongVector.java index 89899adbc31..2acb2223bae 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/StringLongVector.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/StringLongVector.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/StringModuleDict.java b/pytorch/src/gen/java/org/bytedeco/pytorch/StringModuleDict.java index 3c5fc3b321c..ce135bb7599 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/StringModuleDict.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/StringModuleDict.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; @@ -116,13 +118,13 @@ public class StringModuleDict extends Pointer { /** Returns an iterator to the first item in the {@code OrderedDict}. Iteration is * ordered. */ - public native @ByVal @Cast("torch::OrderedDict::Iterator*") StringModuleDictItem begin(); + public native @ByVal @Cast("torch::OrderedDict::Iterator*") StringModuleDictItemVector.Iterator begin(); /** Returns an iterator to the first item in the {@code OrderedDict}. Iteration is * ordered. */ /** Returns an iterator one past the last item in the {@code OrderedDict}. */ - public native @ByVal @Cast("torch::OrderedDict::Iterator*") StringModuleDictItem end(); + public native @ByVal @Cast("torch::OrderedDict::Iterator*") StringModuleDictItemVector.Iterator end(); /** Returns an iterator one past the last item in the {@code OrderedDict}. */ @@ -147,8 +149,10 @@ public class StringModuleDict extends Pointer { /** Inserts a new {@code (key, value)} pair into the {@code OrderedDict}. Throws an * exception if the key is already present. If insertion is successful, * immediately returns a reference to the inserted value. */ - public native @ByRef Module insert(@StdString BytePointer key, @ByRef(true) Module value); - public native @ByRef Module insert(@StdString String key, @ByRef(true) Module value); + public Module insert(BytePointer key, Module value) { return _insert(key, value.asModule()); } + private native @ByRef @Name("insert") Module _insert(@StdString BytePointer key, @ByRef(true) Module value); + public Module insert(String key, Module value) { return _insert(key, value.asModule()); } + private native @ByRef @Name("insert") Module _insert(@StdString String key, @ByRef(true) Module value); /** Inserts all items from {@code other} into this {@code OrderedDict}. If any key from * {@code other} is already present in this {@code OrderedDict}, an exception is thrown. */ @@ -168,7 +172,7 @@ public class StringModuleDict extends Pointer { // Observers /** Returns the items stored in the {@code OrderedDict}. */ - public native @StdVector @NoException(true) StringModuleDictItem items(); + public native @Const @ByRef @NoException(true) StringModuleDictItemVector items(); /** Returns a newly allocated vector and copies all keys from this * {@code OrderedDict} into the vector. */ @@ -180,7 +184,7 @@ public class StringModuleDict extends Pointer { /** Returns a newly allocated vector and copies all keys and values from this * {@code OrderedDict} into a vector of {@code std::pair}. */ - public native @ByVal StringModulePairVector pairs(); + public native @ByVal StringModuleVector pairs(); /** Returns true if both dicts contain the same keys and values, in the same * order. */ diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/StringModuleDictItem.java b/pytorch/src/gen/java/org/bytedeco/pytorch/StringModuleDictItem.java index 9222f5be5d6..825ef8f98b7 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/StringModuleDictItem.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/StringModuleDictItem.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; @@ -23,9 +25,9 @@ public class StringModuleDictItem extends Pointer { public StringModuleDictItem(Pointer p) { super(p); } /** Constructs a new item. */ - public StringModuleDictItem(@StdString BytePointer key, @ByVal Module value) { super((Pointer)null); allocate(key, value); } + public StringModuleDictItem(@StdString BytePointer key, @ByVal Module value) { super((Pointer)null); allocate(key, value.asModule()); } private native void allocate(@StdString BytePointer key, @ByVal Module value); - public StringModuleDictItem(@StdString String key, @ByVal Module value) { super((Pointer)null); allocate(key, value); } + public StringModuleDictItem(@StdString String key, @ByVal Module value) { super((Pointer)null); allocate(key, value.asModule()); } private native void allocate(@StdString String key, @ByVal Module value); /** Returns a reference to the value. */ diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/StringModuleDictItemVector.java b/pytorch/src/gen/java/org/bytedeco/pytorch/StringModuleDictItemVector.java new file mode 100644 index 00000000000..357e89a4d85 --- /dev/null +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/StringModuleDictItemVector.java @@ -0,0 +1,47 @@ +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE + +package org.bytedeco.pytorch; + +import org.bytedeco.pytorch.Allocator; +import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; +import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; +import java.nio.*; +import org.bytedeco.javacpp.*; +import org.bytedeco.javacpp.annotation.*; + +import static org.bytedeco.javacpp.presets.javacpp.*; +import static org.bytedeco.openblas.global.openblas_nolapack.*; +import static org.bytedeco.openblas.global.openblas.*; + +import static org.bytedeco.pytorch.global.torch.*; + +@Name("std::vector::Item>") @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) +public class StringModuleDictItemVector extends Pointer { + static { Loader.load(); } + /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ + public StringModuleDictItemVector(Pointer p) { super(p); } + public StringModuleDictItemVector() { allocate(); } + private native void allocate(); + + + public boolean empty() { return size() == 0; } + public native long size(); + + public StringModuleDictItem front() { return get(0); } + public StringModuleDictItem back() { return get(size() - 1); } + @Index(function = "at") public native @ByRef StringModuleDictItem get(@Cast("size_t") long i); + + public native @ByVal Iterator begin(); + public native @ByVal Iterator end(); + @NoOffset @Name("iterator") public static class Iterator extends Pointer { + public Iterator(Pointer p) { super(p); } + public Iterator() { } + + public native @Name("operator ++") @ByRef Iterator increment(); + public native @Name("operator ==") boolean equals(@ByRef Iterator it); + public native @Name("operator *") @ByRef @Const StringModuleDictItem get(); + } +} + diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/StringModulePair.java b/pytorch/src/gen/java/org/bytedeco/pytorch/StringModulePair.java index 6919f7a9edf..f027d5deb09 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/StringModulePair.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/StringModulePair.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/StringModulePairVector.java b/pytorch/src/gen/java/org/bytedeco/pytorch/StringModuleVector.java similarity index 55% rename from pytorch/src/gen/java/org/bytedeco/pytorch/StringModulePairVector.java rename to pytorch/src/gen/java/org/bytedeco/pytorch/StringModuleVector.java index 000142443be..1d22d80c23b 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/StringModulePairVector.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/StringModuleVector.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; @@ -16,28 +18,28 @@ import static org.bytedeco.pytorch.global.torch.*; @Name("std::vector >") @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) -public class StringModulePairVector extends Pointer { +public class StringModuleVector extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ - public StringModulePairVector(Pointer p) { super(p); } - public StringModulePairVector(BytePointer[] firstValue, Module[] secondValue) { this(Math.min(firstValue.length, secondValue.length)); put(firstValue, secondValue); } - public StringModulePairVector(String[] firstValue, Module[] secondValue) { this(Math.min(firstValue.length, secondValue.length)); put(firstValue, secondValue); } - public StringModulePairVector() { allocate(); } - public StringModulePairVector(long n) { allocate(n); } + public StringModuleVector(Pointer p) { super(p); } + public StringModuleVector(BytePointer[] firstValue, Module[] secondValue) { this(Math.min(firstValue.length, secondValue.length)); put(firstValue, secondValue); } + public StringModuleVector(String[] firstValue, Module[] secondValue) { this(Math.min(firstValue.length, secondValue.length)); put(firstValue, secondValue); } + public StringModuleVector() { allocate(); } + public StringModuleVector(long n) { allocate(n); } private native void allocate(); private native void allocate(@Cast("size_t") long n); - public native @Name("operator =") @ByRef StringModulePairVector put(@ByRef StringModulePairVector x); + public native @Name("operator =") @ByRef StringModuleVector put(@ByRef StringModuleVector x); public boolean empty() { return size() == 0; } public native long size(); public void clear() { resize(0); } public native void resize(@Cast("size_t") long n); - @Index(function = "at") public native @StdString BytePointer first(@Cast("size_t") long i); public native StringModulePairVector first(@Cast("size_t") long i, BytePointer first); - @Index(function = "at") public native @ByRef Module second(@Cast("size_t") long i); public native StringModulePairVector second(@Cast("size_t") long i, Module second); - @MemberSetter @Index(function = "at") public native StringModulePairVector first(@Cast("size_t") long i, @StdString String first); + @Index(function = "at") public native @StdString BytePointer first(@Cast("size_t") long i); public native StringModuleVector first(@Cast("size_t") long i, BytePointer first); + @Index(function = "at") public native @ByRef Module second(@Cast("size_t") long i); public native StringModuleVector second(@Cast("size_t") long i, Module second); + @MemberSetter @Index(function = "at") public native StringModuleVector first(@Cast("size_t") long i, @StdString String first); - public StringModulePairVector put(BytePointer[] firstValue, Module[] secondValue) { + public StringModuleVector put(BytePointer[] firstValue, Module[] secondValue) { for (int i = 0; i < firstValue.length && i < secondValue.length; i++) { first(i, firstValue[i]); second(i, secondValue[i]); @@ -45,7 +47,7 @@ public StringModulePairVector put(BytePointer[] firstValue, Module[] secondValue return this; } - public StringModulePairVector put(String[] firstValue, Module[] secondValue) { + public StringModuleVector put(String[] firstValue, Module[] secondValue) { for (int i = 0; i < firstValue.length && i < secondValue.length; i++) { first(i, firstValue[i]); second(i, secondValue[i]); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/StringOptional.java b/pytorch/src/gen/java/org/bytedeco/pytorch/StringOptional.java index 9cc92adccc7..ad303df3dca 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/StringOptional.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/StringOptional.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; @@ -27,6 +29,7 @@ public class StringOptional extends Pointer { public native @Name("operator =") @ByRef StringOptional put(@ByRef StringOptional x); public native boolean has_value(); + public native void reset(); public native @Name("value") @StdString BytePointer get(); @ValueSetter public native StringOptional put(@StdString BytePointer value); @ValueSetter public native StringOptional put(@StdString String value); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/StringSet.java b/pytorch/src/gen/java/org/bytedeco/pytorch/StringSet.java index a0ac74e52f3..3bbe8a4c8de 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/StringSet.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/StringSet.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; @@ -27,6 +29,7 @@ public class StringSet extends Pointer { public boolean empty() { return size() == 0; } public native long size(); + public BytePointer front() { try (Iterator it = begin()) { return it.get(); } } public native void insert(@StdString BytePointer value); public native void erase(@StdString BytePointer value); public native @ByVal Iterator begin(); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/StringSharedModuleDict.java b/pytorch/src/gen/java/org/bytedeco/pytorch/StringSharedModuleDict.java index 4a27b80c68f..622fc6403da 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/StringSharedModuleDict.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/StringSharedModuleDict.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; @@ -91,8 +93,8 @@ public class StringSharedModuleDict extends Pointer { /** Returns the value associated with the given {@code key}. Throws an exception if * no such key is stored in the {@code OrderedDict}. Use {@code find()} for a * non-throwing way of accessing a value if it is present. */ - public native @SharedPtr @ByRef @Name("operator []") Module get(@StdString BytePointer key); - public native @SharedPtr @ByRef @Name("operator []") Module get(@StdString String key); + public native @SharedPtr("torch::nn::Module") @ByRef @Name("operator []") Module get(@StdString BytePointer key); + public native @SharedPtr("torch::nn::Module") @ByRef @Name("operator []") Module get(@StdString String key); /** Returns the value associated with the given {@code key}. Throws an exception if * no such key is stored in the {@code OrderedDict}. Use {@code find()} for a @@ -102,8 +104,8 @@ public class StringSharedModuleDict extends Pointer { /** Returns a pointer to the value associated with the given key, or a * {@code nullptr} if no such key is stored in the {@code OrderedDict}. */ - public native @SharedPtr @NoException(true) Module find(@StdString BytePointer key); - public native @SharedPtr @NoException(true) Module find(@StdString String key); + public native @SharedPtr("torch::nn::Module") @NoException(true) Module find(@StdString BytePointer key); + public native @SharedPtr("torch::nn::Module") @NoException(true) Module find(@StdString String key); /** Returns a pointer to the value associated with the given key, or a * {@code nullptr} if no such key is stored in the {@code OrderedDict}. */ @@ -116,13 +118,13 @@ public class StringSharedModuleDict extends Pointer { /** Returns an iterator to the first item in the {@code OrderedDict}. Iteration is * ordered. */ - public native @ByVal @Cast("torch::OrderedDict >::Iterator*") StringSharedModuleDictItem begin(); + public native @ByVal @Cast("torch::OrderedDict >::Iterator*") StringSharedModuleDictItemVector.Iterator begin(); /** Returns an iterator to the first item in the {@code OrderedDict}. Iteration is * ordered. */ /** Returns an iterator one past the last item in the {@code OrderedDict}. */ - public native @ByVal @Cast("torch::OrderedDict >::Iterator*") StringSharedModuleDictItem end(); + public native @ByVal @Cast("torch::OrderedDict >::Iterator*") StringSharedModuleDictItemVector.Iterator end(); /** Returns an iterator one past the last item in the {@code OrderedDict}. */ @@ -147,8 +149,10 @@ public class StringSharedModuleDict extends Pointer { /** Inserts a new {@code (key, value)} pair into the {@code OrderedDict}. Throws an * exception if the key is already present. If insertion is successful, * immediately returns a reference to the inserted value. */ - public native @SharedPtr @ByRef Module insert(@StdString BytePointer key, @SharedPtr @Cast({"", "std::shared_ptr"}) Module value); - public native @SharedPtr @ByRef Module insert(@StdString String key, @SharedPtr @Cast({"", "std::shared_ptr"}) Module value); + public Module insert(BytePointer key, Module value) { return _insert(key, value.asModule()); } + private native @SharedPtr("torch::nn::Module") @ByRef @Name("insert") Module _insert(@StdString BytePointer key, @SharedPtr("torch::nn::Module") @ByRef(true) Module value); + public Module insert(String key, Module value) { return _insert(key, value.asModule()); } + private native @SharedPtr("torch::nn::Module") @ByRef @Name("insert") Module _insert(@StdString String key, @SharedPtr("torch::nn::Module") @ByRef(true) Module value); /** Inserts all items from {@code other} into this {@code OrderedDict}. If any key from * {@code other} is already present in this {@code OrderedDict}, an exception is thrown. */ @@ -168,7 +172,7 @@ public class StringSharedModuleDict extends Pointer { // Observers /** Returns the items stored in the {@code OrderedDict}. */ - public native @StdVector @NoException(true) StringSharedModuleDictItem items(); + public native @Const @ByRef @NoException(true) StringSharedModuleDictItemVector items(); /** Returns a newly allocated vector and copies all keys from this * {@code OrderedDict} into the vector. */ @@ -180,7 +184,7 @@ public class StringSharedModuleDict extends Pointer { /** Returns a newly allocated vector and copies all keys and values from this * {@code OrderedDict} into a vector of {@code std::pair}. */ - public native @ByVal StringSharedModulePairVector pairs(); + public native @ByVal StringSharedModuleVector pairs(); /** Returns true if both dicts contain the same keys and values, in the same * order. */ diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/StringSharedModuleDictItem.java b/pytorch/src/gen/java/org/bytedeco/pytorch/StringSharedModuleDictItem.java index c23ecc7948d..834d8c161bc 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/StringSharedModuleDictItem.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/StringSharedModuleDictItem.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; @@ -23,18 +25,18 @@ public class StringSharedModuleDictItem extends Pointer { public StringSharedModuleDictItem(Pointer p) { super(p); } /** Constructs a new item. */ - public StringSharedModuleDictItem(@StdString BytePointer key, @SharedPtr @Cast({"", "std::shared_ptr"}) Module value) { super((Pointer)null); allocate(key, value); } - private native void allocate(@StdString BytePointer key, @SharedPtr @Cast({"", "std::shared_ptr"}) Module value); - public StringSharedModuleDictItem(@StdString String key, @SharedPtr @Cast({"", "std::shared_ptr"}) Module value) { super((Pointer)null); allocate(key, value); } - private native void allocate(@StdString String key, @SharedPtr @Cast({"", "std::shared_ptr"}) Module value); + public StringSharedModuleDictItem(@StdString BytePointer key, @SharedPtr("torch::nn::Module") @ByVal Module value) { super((Pointer)null); allocate(key, value.asModule()); } + private native void allocate(@StdString BytePointer key, @SharedPtr("torch::nn::Module") @ByVal Module value); + public StringSharedModuleDictItem(@StdString String key, @SharedPtr("torch::nn::Module") @ByVal Module value) { super((Pointer)null); allocate(key, value.asModule()); } + private native void allocate(@StdString String key, @SharedPtr("torch::nn::Module") @ByVal Module value); /** Returns a reference to the value. */ - public native @SharedPtr @ByRef @Name("operator *") Module multiply(); + public native @SharedPtr("torch::nn::Module") @ByRef @Name("operator *") Module multiply(); /** Returns a reference to the value. */ /** Allows access to the value using the arrow operator. */ - public native @SharedPtr @Name("operator ->") Module access(); + public native @SharedPtr("torch::nn::Module") @Name("operator ->") Module access(); /** Allows access to the value using the arrow operator. */ @@ -42,7 +44,7 @@ public class StringSharedModuleDictItem extends Pointer { public native @StdString @NoException(true) BytePointer key(); /** Returns a reference to the value. */ - public native @SharedPtr @ByRef @NoException(true) Module value(); + public native @SharedPtr("torch::nn::Module") @ByRef @NoException(true) Module value(); /** Returns a reference to the value. */ diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/StringSharedModuleDictItemVector.java b/pytorch/src/gen/java/org/bytedeco/pytorch/StringSharedModuleDictItemVector.java new file mode 100644 index 00000000000..e43b4a4a24a --- /dev/null +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/StringSharedModuleDictItemVector.java @@ -0,0 +1,47 @@ +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE + +package org.bytedeco.pytorch; + +import org.bytedeco.pytorch.Allocator; +import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; +import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; +import java.nio.*; +import org.bytedeco.javacpp.*; +import org.bytedeco.javacpp.annotation.*; + +import static org.bytedeco.javacpp.presets.javacpp.*; +import static org.bytedeco.openblas.global.openblas_nolapack.*; +import static org.bytedeco.openblas.global.openblas.*; + +import static org.bytedeco.pytorch.global.torch.*; + +@Name("std::vector >::Item>") @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) +public class StringSharedModuleDictItemVector extends Pointer { + static { Loader.load(); } + /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ + public StringSharedModuleDictItemVector(Pointer p) { super(p); } + public StringSharedModuleDictItemVector() { allocate(); } + private native void allocate(); + + + public boolean empty() { return size() == 0; } + public native long size(); + + public StringSharedModuleDictItem front() { return get(0); } + public StringSharedModuleDictItem back() { return get(size() - 1); } + @Index(function = "at") public native @ByRef StringSharedModuleDictItem get(@Cast("size_t") long i); + + public native @ByVal Iterator begin(); + public native @ByVal Iterator end(); + @NoOffset @Name("iterator") public static class Iterator extends Pointer { + public Iterator(Pointer p) { super(p); } + public Iterator() { } + + public native @Name("operator ++") @ByRef Iterator increment(); + public native @Name("operator ==") boolean equals(@ByRef Iterator it); + public native @Name("operator *") @ByRef @Const StringSharedModuleDictItem get(); + } +} + diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/StringSharedModulePair.java b/pytorch/src/gen/java/org/bytedeco/pytorch/StringSharedModulePair.java index e605a5b2897..21ab2ddb001 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/StringSharedModulePair.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/StringSharedModulePair.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; @@ -20,15 +22,15 @@ public class StringSharedModulePair extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public StringSharedModulePair(Pointer p) { super(p); } - public StringSharedModulePair(BytePointer firstValue, @Cast({"", "std::shared_ptr"}) Module secondValue) { this(); put(firstValue, secondValue); } - public StringSharedModulePair(String firstValue, @Cast({"", "std::shared_ptr"}) Module secondValue) { this(); put(firstValue, secondValue); } + public StringSharedModulePair(BytePointer firstValue, Module secondValue) { this(); put(firstValue, secondValue); } + public StringSharedModulePair(String firstValue, Module secondValue) { this(); put(firstValue, secondValue); } public StringSharedModulePair() { allocate(); } private native void allocate(); public native @Name("operator =") @ByRef StringSharedModulePair put(@ByRef StringSharedModulePair x); @MemberGetter public native @StdString BytePointer first(); public native StringSharedModulePair first(BytePointer first); - @MemberGetter public native @SharedPtr @Cast({"", "std::shared_ptr"}) Module second(); public native StringSharedModulePair second(Module second); + @MemberGetter public native @SharedPtr("torch::nn::Module") Module second(); public native StringSharedModulePair second(Module second); @MemberSetter @Index public native StringSharedModulePair first(@StdString String first); public StringSharedModulePair put(BytePointer firstValue, Module secondValue) { diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/StringSharedModulePairVector.java b/pytorch/src/gen/java/org/bytedeco/pytorch/StringSharedModuleVector.java similarity index 52% rename from pytorch/src/gen/java/org/bytedeco/pytorch/StringSharedModulePairVector.java rename to pytorch/src/gen/java/org/bytedeco/pytorch/StringSharedModuleVector.java index d8aa26de49f..0c4247902a7 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/StringSharedModulePairVector.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/StringSharedModuleVector.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; @@ -16,28 +18,28 @@ import static org.bytedeco.pytorch.global.torch.*; @Name("std::vector > >") @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) -public class StringSharedModulePairVector extends Pointer { +public class StringSharedModuleVector extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ - public StringSharedModulePairVector(Pointer p) { super(p); } - public StringSharedModulePairVector(BytePointer[] firstValue, @Cast({"", "std::shared_ptr"}) Module[] secondValue) { this(Math.min(firstValue.length, secondValue.length)); put(firstValue, secondValue); } - public StringSharedModulePairVector(String[] firstValue, @Cast({"", "std::shared_ptr"}) Module[] secondValue) { this(Math.min(firstValue.length, secondValue.length)); put(firstValue, secondValue); } - public StringSharedModulePairVector() { allocate(); } - public StringSharedModulePairVector(long n) { allocate(n); } + public StringSharedModuleVector(Pointer p) { super(p); } + public StringSharedModuleVector(BytePointer[] firstValue, Module[] secondValue) { this(Math.min(firstValue.length, secondValue.length)); put(firstValue, secondValue); } + public StringSharedModuleVector(String[] firstValue, Module[] secondValue) { this(Math.min(firstValue.length, secondValue.length)); put(firstValue, secondValue); } + public StringSharedModuleVector() { allocate(); } + public StringSharedModuleVector(long n) { allocate(n); } private native void allocate(); private native void allocate(@Cast("size_t") long n); - public native @Name("operator =") @ByRef StringSharedModulePairVector put(@ByRef StringSharedModulePairVector x); + public native @Name("operator =") @ByRef StringSharedModuleVector put(@ByRef StringSharedModuleVector x); public boolean empty() { return size() == 0; } public native long size(); public void clear() { resize(0); } public native void resize(@Cast("size_t") long n); - @Index(function = "at") public native @StdString BytePointer first(@Cast("size_t") long i); public native StringSharedModulePairVector first(@Cast("size_t") long i, BytePointer first); - @Index(function = "at") public native @SharedPtr @Cast({"", "std::shared_ptr"}) Module second(@Cast("size_t") long i); public native StringSharedModulePairVector second(@Cast("size_t") long i, Module second); - @MemberSetter @Index(function = "at") public native StringSharedModulePairVector first(@Cast("size_t") long i, @StdString String first); + @Index(function = "at") public native @StdString BytePointer first(@Cast("size_t") long i); public native StringSharedModuleVector first(@Cast("size_t") long i, BytePointer first); + @Index(function = "at") public native @SharedPtr("torch::nn::Module") Module second(@Cast("size_t") long i); public native StringSharedModuleVector second(@Cast("size_t") long i, Module second); + @MemberSetter @Index(function = "at") public native StringSharedModuleVector first(@Cast("size_t") long i, @StdString String first); - public StringSharedModulePairVector put(BytePointer[] firstValue, Module[] secondValue) { + public StringSharedModuleVector put(BytePointer[] firstValue, Module[] secondValue) { for (int i = 0; i < firstValue.length && i < secondValue.length; i++) { first(i, firstValue[i]); second(i, secondValue[i]); @@ -45,7 +47,7 @@ public StringSharedModulePairVector put(BytePointer[] firstValue, Module[] secon return this; } - public StringSharedModulePairVector put(String[] firstValue, Module[] secondValue) { + public StringSharedModuleVector put(String[] firstValue, Module[] secondValue) { for (int i = 0; i < firstValue.length && i < secondValue.length; i++) { first(i, firstValue[i]); second(i, secondValue[i]); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/StringSizeTMap.java b/pytorch/src/gen/java/org/bytedeco/pytorch/StringSizeTMap.java index ad078fdb281..cb048858087 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/StringSizeTMap.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/StringSizeTMap.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/StringStringMap.java b/pytorch/src/gen/java/org/bytedeco/pytorch/StringStringMap.java index fca6cbceece..51313355a07 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/StringStringMap.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/StringStringMap.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/StringTensorDict.java b/pytorch/src/gen/java/org/bytedeco/pytorch/StringTensorDict.java index 9c008479aae..acdf68cbccf 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/StringTensorDict.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/StringTensorDict.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; @@ -16,7 +18,7 @@ import static org.bytedeco.pytorch.global.torch.*; /** An ordered dictionary implementation, akin to Python's {@code OrderedDict}. */ -@Name("torch::OrderedDict") @NoOffset @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) +@Name("torch::OrderedDict") @NoOffset @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) public class StringTensorDict extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ @@ -117,13 +119,13 @@ public class StringTensorDict extends Pointer { /** Returns an iterator to the first item in the {@code OrderedDict}. Iteration is * ordered. */ - public native @ByVal @Cast("torch::OrderedDict::Iterator*") StringTensorDictItem begin(); + public native @ByVal @Cast("torch::OrderedDict::Iterator*") StringTensorDictItemVector.Iterator begin(); /** Returns an iterator to the first item in the {@code OrderedDict}. Iteration is * ordered. */ /** Returns an iterator one past the last item in the {@code OrderedDict}. */ - public native @ByVal @Cast("torch::OrderedDict::Iterator*") StringTensorDictItem end(); + public native @ByVal @Cast("torch::OrderedDict::Iterator*") StringTensorDictItemVector.Iterator end(); /** Returns an iterator one past the last item in the {@code OrderedDict}. */ @@ -169,7 +171,7 @@ public class StringTensorDict extends Pointer { // Observers /** Returns the items stored in the {@code OrderedDict}. */ - public native @StdVector @NoException(true) StringTensorDictItem items(); + public native @Const @ByRef @NoException(true) StringTensorDictItemVector items(); /** Returns a newly allocated vector and copies all keys from this * {@code OrderedDict} into the vector. */ @@ -177,11 +179,11 @@ public class StringTensorDict extends Pointer { /** Returns a newly allocated vector and copies all values from this * {@code OrderedDict} into the vector. */ - public native @Cast({"", "std::vector"}) @StdMove TensorVector values(); + public native @Cast({"", "std::vector"}) @StdMove TensorVector values(); /** Returns a newly allocated vector and copies all keys and values from this * {@code OrderedDict} into a vector of {@code std::pair}. */ - public native @ByVal StringTensorPairVector pairs(); + public native @ByVal StringTensorVector pairs(); /** Returns true if both dicts contain the same keys and values, in the same * order. */ diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/StringTensorDictItem.java b/pytorch/src/gen/java/org/bytedeco/pytorch/StringTensorDictItem.java index 72872cad3ed..ffbf07a6a3f 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/StringTensorDictItem.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/StringTensorDictItem.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; @@ -18,7 +20,7 @@ // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~ OrderedDict::Item ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -@Name("torch::OrderedDict::Item") @NoOffset @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) +@Name("torch::OrderedDict::Item") @NoOffset @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) public class StringTensorDictItem extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ @@ -49,5 +51,5 @@ public class StringTensorDictItem extends Pointer { /** Returns a reference to the value. */ /** Returns a {@code (key, value)} pair. */ - public native @Cast("const std::pair*") @ByRef @NoException(true) StringTensorPair pair(); + public native @Cast("const std::pair*") @ByRef @NoException(true) StringTensorPair pair(); } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/StringTensorDictItemVector.java b/pytorch/src/gen/java/org/bytedeco/pytorch/StringTensorDictItemVector.java new file mode 100644 index 00000000000..43a5cf9f739 --- /dev/null +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/StringTensorDictItemVector.java @@ -0,0 +1,47 @@ +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE + +package org.bytedeco.pytorch; + +import org.bytedeco.pytorch.Allocator; +import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; +import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; +import java.nio.*; +import org.bytedeco.javacpp.*; +import org.bytedeco.javacpp.annotation.*; + +import static org.bytedeco.javacpp.presets.javacpp.*; +import static org.bytedeco.openblas.global.openblas_nolapack.*; +import static org.bytedeco.openblas.global.openblas.*; + +import static org.bytedeco.pytorch.global.torch.*; + +@Name("std::vector::Item>") @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) +public class StringTensorDictItemVector extends Pointer { + static { Loader.load(); } + /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ + public StringTensorDictItemVector(Pointer p) { super(p); } + public StringTensorDictItemVector() { allocate(); } + private native void allocate(); + + + public boolean empty() { return size() == 0; } + public native long size(); + + public StringTensorDictItem front() { return get(0); } + public StringTensorDictItem back() { return get(size() - 1); } + @Index(function = "at") public native @ByRef StringTensorDictItem get(@Cast("size_t") long i); + + public native @ByVal Iterator begin(); + public native @ByVal Iterator end(); + @NoOffset @Name("iterator") public static class Iterator extends Pointer { + public Iterator(Pointer p) { super(p); } + public Iterator() { } + + public native @Name("operator ++") @ByRef Iterator increment(); + public native @Name("operator ==") boolean equals(@ByRef Iterator it); + public native @Name("operator *") @ByRef @Const StringTensorDictItem get(); + } +} + diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/StringTensorMap.java b/pytorch/src/gen/java/org/bytedeco/pytorch/StringTensorMap.java index d35f38b14fa..5b7ea2e071c 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/StringTensorMap.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/StringTensorMap.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; @@ -15,7 +17,7 @@ import static org.bytedeco.pytorch.global.torch.*; -@Name("std::map") @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) +@Name("std::map") @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) public class StringTensorMap extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/StringTensorPair.java b/pytorch/src/gen/java/org/bytedeco/pytorch/StringTensorPair.java index 66248708ef6..d17a03f792d 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/StringTensorPair.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/StringTensorPair.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/StringTensorPairVector.java b/pytorch/src/gen/java/org/bytedeco/pytorch/StringTensorVector.java similarity index 51% rename from pytorch/src/gen/java/org/bytedeco/pytorch/StringTensorPairVector.java rename to pytorch/src/gen/java/org/bytedeco/pytorch/StringTensorVector.java index 3c1dfcba884..93b1c647793 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/StringTensorPairVector.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/StringTensorVector.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; @@ -15,29 +17,29 @@ import static org.bytedeco.pytorch.global.torch.*; -@Name("std::vector >") @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) -public class StringTensorPairVector extends Pointer { +@Name("std::vector >") @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) +public class StringTensorVector extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ - public StringTensorPairVector(Pointer p) { super(p); } - public StringTensorPairVector(BytePointer[] firstValue, Tensor[] secondValue) { this(Math.min(firstValue.length, secondValue.length)); put(firstValue, secondValue); } - public StringTensorPairVector(String[] firstValue, Tensor[] secondValue) { this(Math.min(firstValue.length, secondValue.length)); put(firstValue, secondValue); } - public StringTensorPairVector() { allocate(); } - public StringTensorPairVector(long n) { allocate(n); } + public StringTensorVector(Pointer p) { super(p); } + public StringTensorVector(BytePointer[] firstValue, Tensor[] secondValue) { this(Math.min(firstValue.length, secondValue.length)); put(firstValue, secondValue); } + public StringTensorVector(String[] firstValue, Tensor[] secondValue) { this(Math.min(firstValue.length, secondValue.length)); put(firstValue, secondValue); } + public StringTensorVector() { allocate(); } + public StringTensorVector(long n) { allocate(n); } private native void allocate(); private native void allocate(@Cast("size_t") long n); - public native @Name("operator =") @ByRef StringTensorPairVector put(@ByRef StringTensorPairVector x); + public native @Name("operator =") @ByRef StringTensorVector put(@ByRef StringTensorVector x); public boolean empty() { return size() == 0; } public native long size(); public void clear() { resize(0); } public native void resize(@Cast("size_t") long n); - @Index(function = "at") public native @StdString BytePointer first(@Cast("size_t") long i); public native StringTensorPairVector first(@Cast("size_t") long i, BytePointer first); - @Index(function = "at") public native @ByRef Tensor second(@Cast("size_t") long i); public native StringTensorPairVector second(@Cast("size_t") long i, Tensor second); - @MemberSetter @Index(function = "at") public native StringTensorPairVector first(@Cast("size_t") long i, @StdString String first); + @Index(function = "at") public native @StdString BytePointer first(@Cast("size_t") long i); public native StringTensorVector first(@Cast("size_t") long i, BytePointer first); + @Index(function = "at") public native @ByRef Tensor second(@Cast("size_t") long i); public native StringTensorVector second(@Cast("size_t") long i, Tensor second); + @MemberSetter @Index(function = "at") public native StringTensorVector first(@Cast("size_t") long i, @StdString String first); - public StringTensorPairVector put(BytePointer[] firstValue, Tensor[] secondValue) { + public StringTensorVector put(BytePointer[] firstValue, Tensor[] secondValue) { for (int i = 0; i < firstValue.length && i < secondValue.length; i++) { first(i, firstValue[i]); second(i, secondValue[i]); @@ -45,7 +47,7 @@ public StringTensorPairVector put(BytePointer[] firstValue, Tensor[] secondValue return this; } - public StringTensorPairVector put(String[] firstValue, Tensor[] secondValue) { + public StringTensorVector put(String[] firstValue, Tensor[] secondValue) { for (int i = 0; i < firstValue.length && i < secondValue.length; i++) { first(i, firstValue[i]); second(i, secondValue[i]); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/StringType.java b/pytorch/src/gen/java/org/bytedeco/pytorch/StringType.java index d9aaaba72b3..7a1bfd09fef 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/StringType.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/StringType.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/StringTypePtr.java b/pytorch/src/gen/java/org/bytedeco/pytorch/StringTypePtr.java index b1397f1b1cf..f7c3f246765 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/StringTypePtr.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/StringTypePtr.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/StringValueMap.java b/pytorch/src/gen/java/org/bytedeco/pytorch/StringValueMap.java index bae383ab59d..dbefd7ff53f 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/StringValueMap.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/StringValueMap.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/StringVector.java b/pytorch/src/gen/java/org/bytedeco/pytorch/StringVector.java index 7918900e943..fdd41e48b0e 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/StringVector.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/StringVector.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; @@ -35,6 +37,8 @@ public class StringVector extends Pointer { public void clear() { resize(0); } public native void resize(@Cast("size_t") long n); + public BytePointer front() { return get(0); } + public BytePointer back() { return get(size() - 1); } @Index(function = "at") public native @StdString BytePointer get(@Cast("size_t") long i); public native StringVector put(@Cast("size_t") long i, BytePointer value); @ValueSetter @Index(function = "at") public native StringVector put(@Cast("size_t") long i, @StdString String value); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/StringVectorOptional.java b/pytorch/src/gen/java/org/bytedeco/pytorch/StringVectorOptional.java index bc870f5743b..25711d2d691 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/StringVectorOptional.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/StringVectorOptional.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; @@ -26,6 +28,7 @@ public class StringVectorOptional extends Pointer { public native @Name("operator =") @ByRef StringVectorOptional put(@ByRef StringVectorOptional x); public native boolean has_value(); + public native void reset(); public native @Name("value") @ByRef StringVector get(); @ValueSetter public native StringVectorOptional put(@ByRef StringVector value); } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/StringView.java b/pytorch/src/gen/java/org/bytedeco/pytorch/StringView.java index 0e2ff222cda..f309d2040ae 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/StringView.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/StringView.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; @@ -40,9 +42,12 @@ public class StringView extends Pointer { public native @Cast("const char*") BytePointer str(); - + private static native @Namespace @Cast("std::ostream*") @ByRef @Name("operator <<") Pointer shiftLeft(@Cast("std::ostream*") @ByRef Pointer os, @Const @ByRef StringView dt); + public Pointer shiftLeft(Pointer os) { return shiftLeft(os, this); } - + private static native @Namespace @Cast("bool") @Name("operator ==") boolean equals(@Const @ByRef StringView lhs, @Const @ByRef StringView rhs); + public boolean equals(StringView rhs) { return equals(this, rhs); } - + private static native @Namespace @Cast("bool") @Name("operator !=") boolean notEquals(@Const @ByRef StringView lhs, @Const @ByRef StringView rhs); + public boolean notEquals(StringView rhs) { return notEquals(this, rhs); } } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/StrongFunctionPtr.java b/pytorch/src/gen/java/org/bytedeco/pytorch/StrongFunctionPtr.java deleted file mode 100644 index 2e350a1e62c..00000000000 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/StrongFunctionPtr.java +++ /dev/null @@ -1,32 +0,0 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE - -package org.bytedeco.pytorch; - -import org.bytedeco.pytorch.Allocator; -import org.bytedeco.pytorch.Function; -import org.bytedeco.pytorch.Module; -import java.nio.*; -import org.bytedeco.javacpp.*; -import org.bytedeco.javacpp.annotation.*; - -import static org.bytedeco.javacpp.presets.javacpp.*; -import static org.bytedeco.openblas.global.openblas_nolapack.*; -import static org.bytedeco.openblas.global.openblas.*; - -import static org.bytedeco.pytorch.global.torch.*; - - -// An owning pointer to a Function. Just a pair of a raw Function ptr and it's -// owning CU. We need this because pybind requires a ref-counted way to refer to -// Functions. -@Namespace("torch::jit") @NoOffset @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) -public class StrongFunctionPtr extends Pointer { - static { Loader.load(); } - /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ - public StrongFunctionPtr(Pointer p) { super(p); } - - public StrongFunctionPtr(@SharedPtr CompilationUnit cu, Function function) { super((Pointer)null); allocate(cu, function); } - private native void allocate(@SharedPtr CompilationUnit cu, Function function); - public native @SharedPtr CompilationUnit cu_(); public native StrongFunctionPtr cu_(CompilationUnit setter); - public native Function function_(); public native StrongFunctionPtr function_(Function setter); -} diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/StrongTypePtr.java b/pytorch/src/gen/java/org/bytedeco/pytorch/StrongTypePtr.java index 3895e9d41f3..56c552ce667 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/StrongTypePtr.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/StrongTypePtr.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/Subscript.java b/pytorch/src/gen/java/org/bytedeco/pytorch/Subscript.java index 88e3e744cd7..edcdf10278c 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/Subscript.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/Subscript.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; @@ -19,8 +21,15 @@ @Namespace("torch::jit") @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) public class Subscript extends Expr { static { Loader.load(); } + /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ + public Subscript(Pointer p) { super(p); } - public Subscript(@Cast("const torch::jit::TreeRef*") @ByRef Pointer tree) { super((Pointer)null); allocate(tree); } - private native void allocate(@Cast("const torch::jit::TreeRef*") @ByRef Pointer tree); + public Subscript(@Const @ByRef TreeRef tree) { super((Pointer)null); allocate(tree); } + private native void allocate(@Const @ByRef TreeRef tree); public native @ByVal Expr value(); + public native @ByVal ExprList subscript_exprs(); + public static native @ByVal Subscript create( + @Const @ByRef SourceRange range, + @Const @ByRef Expr value, + @Const @ByRef ExprList subscript_exprs); } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/SugaredEnumClass.java b/pytorch/src/gen/java/org/bytedeco/pytorch/SugaredEnumClass.java index bc682e20531..3d170582894 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/SugaredEnumClass.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/SugaredEnumClass.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; @@ -22,19 +24,19 @@ public class SugaredEnumClass extends SugaredValue { /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public SugaredEnumClass(Pointer p) { super(p); } - public SugaredEnumClass(@SharedPtr @ByVal EnumType enum_type) { super((Pointer)null); allocate(enum_type); } - private native void allocate(@SharedPtr @ByVal EnumType enum_type); + public SugaredEnumClass(@SharedPtr EnumType enum_type) { super((Pointer)null); allocate(enum_type); } + private native void allocate(@SharedPtr EnumType enum_type); public native @StdString BytePointer kind(); - public native @SharedPtr @ByVal SugaredValue attr( + public native @SharedPtr("torch::jit::SugaredValue") @ByVal SugaredValue attr( @Const @ByRef SourceRange loc, @ByRef GraphFunction m, @StdString BytePointer field); - public native @SharedPtr @ByVal SugaredValue attr( + public native @SharedPtr("torch::jit::SugaredValue") @ByVal SugaredValue attr( @Const @ByRef SourceRange loc, @ByRef GraphFunction m, @StdString String field); - public native @SharedPtr @ByVal SugaredValue iter(@Const @ByRef SourceRange loc, @ByRef GraphFunction m); + public native @SharedPtr("torch::jit::SugaredValue") @ByVal SugaredValue iter(@Const @ByRef SourceRange loc, @ByRef GraphFunction m); } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/SugaredTupleValue.java b/pytorch/src/gen/java/org/bytedeco/pytorch/SugaredTupleValue.java index 9fd4e2e8bfa..d2d89e20842 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/SugaredTupleValue.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/SugaredTupleValue.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; @@ -22,14 +24,14 @@ public class SugaredTupleValue extends SugaredValue { /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public SugaredTupleValue(Pointer p) { super(p); } - public SugaredTupleValue(@ByVal SugaredValueVector tup) { super((Pointer)null); allocate(tup); } - private native void allocate(@ByVal SugaredValueVector tup); + public SugaredTupleValue(@ByVal SharedSugaredValueVector tup) { super((Pointer)null); allocate(tup); } + private native void allocate(@ByVal SharedSugaredValueVector tup); - public native @ByVal SugaredValueVector asTuple( + public native @ByVal SharedSugaredValueVector asTuple( @Const @ByRef SourceRange loc, @ByRef GraphFunction m, @Const @ByRef(nullValue = "c10::optional{}") SizeTOptional size_hint); - public native @ByVal SugaredValueVector asTuple( + public native @ByVal SharedSugaredValueVector asTuple( @Const @ByRef SourceRange loc, @ByRef GraphFunction m); @@ -37,12 +39,12 @@ public class SugaredTupleValue extends SugaredValue { public native @StdString BytePointer kind(); - public native @SharedPtr @ByVal SugaredValue getitem( + public native @SharedPtr("torch::jit::SugaredValue") @ByVal SugaredValue getitem( @Const @ByRef SourceRange loc, @ByRef GraphFunction m, Value idx, @ByVal(nullValue = "c10::TypePtr(nullptr)") Type.TypePtr type_hint); - public native @SharedPtr @ByVal SugaredValue getitem( + public native @SharedPtr("torch::jit::SugaredValue") @ByVal SugaredValue getitem( @Const @ByRef SourceRange loc, @ByRef GraphFunction m, Value idx); @@ -50,12 +52,12 @@ public class SugaredTupleValue extends SugaredValue { // This function is called when a SugaredValue is used to convert a // SugaredValue to its iterator. For example, when iterating through a Dict we // iterate over its keys - public native @SharedPtr @ByVal SugaredValue iter(@Const @ByRef SourceRange loc, @ByRef GraphFunction m); + public native @SharedPtr("torch::jit::SugaredValue") @ByVal SugaredValue iter(@Const @ByRef SourceRange loc, @ByRef GraphFunction m); // Because this is used to contain SugaredValues of Heterogenous types, // we define staticLen() so that when this is iterated over it is emitted // as an unrolled loop. public native @ByVal LongOptional staticLen(); - public native @ByRef SugaredValueVector tup_(); public native SugaredTupleValue tup_(SugaredValueVector setter); + public native @ByRef SharedSugaredValueVector tup_(); public native SugaredTupleValue tup_(SharedSugaredValueVector setter); } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/SugaredValue.java b/pytorch/src/gen/java/org/bytedeco/pytorch/SugaredValue.java index 77f4c7ffd94..12d61bad555 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/SugaredValue.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/SugaredValue.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; @@ -38,11 +40,11 @@ public class SugaredValue extends Pointer { public native Value asValue(@Const @ByRef SourceRange loc, @ByRef GraphFunction m); // select an attribute on it, e.g. `this.field` - public native @SharedPtr @ByVal SugaredValue attr( + public native @SharedPtr("torch::jit::SugaredValue") @ByVal SugaredValue attr( @Const @ByRef SourceRange loc, @ByRef GraphFunction m, @StdString BytePointer field); - public native @SharedPtr @ByVal SugaredValue attr( + public native @SharedPtr("torch::jit::SugaredValue") @ByVal SugaredValue attr( @Const @ByRef SourceRange loc, @ByRef GraphFunction m, @StdString String field); @@ -70,34 +72,29 @@ public native void setAttr( // use it as a vector of values, e.g. a tuple of values as return value from // a method invocation - public native @ByVal SugaredValueVector asTuple( + public native @ByVal SharedSugaredValueVector asTuple( @Const @ByRef SourceRange loc, @ByRef GraphFunction m, @Const @ByRef(nullValue = "c10::optional{}") SizeTOptional size_hint); - public native @ByVal SugaredValueVector asTuple( + public native @ByVal SharedSugaredValueVector asTuple( @Const @ByRef SourceRange loc, @ByRef GraphFunction m); // TODO @wconstab refactor to use ModuleValue::asTuple instead of new API - public native @SharedPtr @ByVal SugaredValue asTupleValue( + public native @SharedPtr("torch::jit::SugaredValue") @ByVal SugaredValue asTupleValue( @Const @ByRef SourceRange loc, @ByRef GraphFunction m); - public native @ByVal SugaredValueVector asType( + public native @ByVal SharedSugaredValueVector asType( @Const @ByRef SourceRange loc, @ByRef Method m); // call it like a function, e.g. `outputs = this(inputs)` - public native @SharedPtr @ByVal SugaredValue call( - @Const @ByRef SourceRange loc, - @ByRef GraphFunction m, - @ByVal NamedValueArrayRef args, - @ByVal NamedValueArrayRef kwargs, - @Cast("size_t") long n_binders); + // This function is called when to convert a SugaredValue to its iterator. // For example, when iterating through a Dict we iterate over its keys - public native @SharedPtr @ByVal SugaredValue iter( + public native @SharedPtr("torch::jit::SugaredValue") @ByVal SugaredValue iter( @Const @ByRef SourceRange loc, @ByRef GraphFunction m); @@ -118,12 +115,12 @@ public native void setAttr( public native Value len(@Const @ByRef SourceRange loc, @ByRef GraphFunction m); // expression for ith elemement for iterable value - public native @SharedPtr @ByVal SugaredValue getitem( + public native @SharedPtr("torch::jit::SugaredValue") @ByVal SugaredValue getitem( @Const @ByRef SourceRange loc, @ByRef GraphFunction m, Value idx, @ByVal(nullValue = "c10::TypePtr(nullptr)") Type.TypePtr type_hint); - public native @SharedPtr @ByVal SugaredValue getitem( + public native @SharedPtr("torch::jit::SugaredValue") @ByVal SugaredValue getitem( @Const @ByRef SourceRange loc, @ByRef GraphFunction m, Value idx); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/SymBool.java b/pytorch/src/gen/java/org/bytedeco/pytorch/SymBool.java index 8231aa00177..b9f449dfc08 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/SymBool.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/SymBool.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; @@ -19,6 +21,8 @@ @Namespace("c10") @NoOffset @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) public class SymBool extends Pointer { static { Loader.load(); } + /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ + public SymBool(Pointer p) { super(p); } /** Native array allocator. Access with {@link Pointer#position(long)}. */ public SymBool(long size) { super((Pointer)null); allocateArray(size); } private native void allocateArray(long size); @@ -31,8 +35,8 @@ public class SymBool extends Pointer { /*implicit*/ public SymBool(@Cast("bool") boolean b) { super((Pointer)null); allocate(b); } private native void allocate(@Cast("bool") boolean b); - public SymBool(@ByVal @Cast("c10::SymNode*") Pointer ptr) { super((Pointer)null); allocate(ptr); } - private native void allocate(@ByVal @Cast("c10::SymNode*") Pointer ptr); + public SymBool(@ByVal SymNode ptr) { super((Pointer)null); allocate(ptr); } + private native void allocate(@ByVal SymNode ptr); public SymBool() { super((Pointer)null); allocate(); } private native void allocate(); @@ -40,7 +44,7 @@ public class SymBool extends Pointer { - public native @ByVal @Cast("c10::SymNode*") Pointer toSymNodeImpl(); + public native @ByVal SymNode toSymNodeImpl(); public native @Cast("bool") boolean expect_bool(); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/SymDimVector.java b/pytorch/src/gen/java/org/bytedeco/pytorch/SymDimVector.java index f0a61a7191c..743812c2047 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/SymDimVector.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/SymDimVector.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; @@ -15,8 +17,25 @@ import static org.bytedeco.pytorch.global.torch.*; + +/** This is a 'vector' (really, a variable-sized array), optimized + * for the case when the array is small. It contains some number of elements + * in-place, which allows it to avoid heap allocation when the actual number of + * elements is below that threshold. This allows normal "small" cases to be + * fast without losing generality for large inputs. + * + * \note + * In the absence of a well-motivated choice for the number of inlined + * elements \p N, it is recommended to use \c SmallVector (that is, + * omitting the \p N). This will choose a default number of inlined elements + * reasonable for allocation on the stack (for example, trying to keep \c + * sizeof(SmallVector) around 64 bytes). + * + * \warning This does not attempt to be exception safe. + * + * @see https://llvm.org/docs/ProgrammersManual.html#llvm-adt-smallvector-h */ @Name("c10::SmallVector") @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) -public class SymDimVector extends SymDimVectorImpl { +public class SymDimVector extends SymIntSmallVectorImpl { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public SymDimVector(Pointer p) { super(p); } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/SymDimVectorOptional.java b/pytorch/src/gen/java/org/bytedeco/pytorch/SymDimVectorOptional.java index 5ee6785b4bf..3afeedc854c 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/SymDimVectorOptional.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/SymDimVectorOptional.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; @@ -26,6 +28,7 @@ public class SymDimVectorOptional extends Pointer { public native @Name("operator =") @ByRef SymDimVectorOptional put(@ByRef SymDimVectorOptional x); public native boolean has_value(); + public native void reset(); public native @Name("value") @ByRef SymDimVector get(); @ValueSetter public native SymDimVectorOptional put(@ByRef SymDimVector value); } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/SymFloat.java b/pytorch/src/gen/java/org/bytedeco/pytorch/SymFloat.java index b4e629c584c..29ba5c4e98f 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/SymFloat.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/SymFloat.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; @@ -20,11 +22,13 @@ @Namespace("c10") @NoOffset @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) public class SymFloat extends Pointer { static { Loader.load(); } + /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ + public SymFloat(Pointer p) { super(p); } /*implicit*/ public SymFloat(double d) { super((Pointer)null); allocate(d); } private native void allocate(double d); - public SymFloat(@ByVal @Cast("c10::SymNode*") Pointer ptr) { super((Pointer)null); allocate(ptr); } - private native void allocate(@ByVal @Cast("c10::SymNode*") Pointer ptr); + public SymFloat(@ByVal SymNode ptr) { super((Pointer)null); allocate(ptr); } + private native void allocate(@ByVal SymNode ptr); public SymFloat() { super((Pointer)null); allocate(); } private native void allocate(); @@ -32,7 +36,7 @@ public class SymFloat extends Pointer { - public native @ByVal @Cast("c10::SymNode*") Pointer toSymNodeImpl(); + public native @ByVal SymNode toSymNodeImpl(); public native double expect_float(); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/SymFloatType.java b/pytorch/src/gen/java/org/bytedeco/pytorch/SymFloatType.java index 5d4c9391394..6229f558c0a 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/SymFloatType.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/SymFloatType.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/SymInt.java b/pytorch/src/gen/java/org/bytedeco/pytorch/SymInt.java index 1cca672424b..93892d57206 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/SymInt.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/SymInt.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; @@ -33,6 +35,8 @@ @Namespace("c10") @NoOffset @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) public class SymInt extends Pointer { static { Loader.load(); } + /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ + public SymInt(Pointer p) { super(p); } public enum Unchecked { UNCHECKED(0); @@ -48,8 +52,8 @@ public enum Unchecked { private native void allocate(@Cast("int64_t") long d); public SymInt() { super((Pointer)null); allocate(); } private native void allocate(); - public SymInt(@ByVal @Cast("c10::SymNode*") Pointer n) { super((Pointer)null); allocate(n); } - private native void allocate(@ByVal @Cast("c10::SymNode*") Pointer n); + public SymInt(@ByVal SymNode n) { super((Pointer)null); allocate(n); } + private native void allocate(@ByVal SymNode n); // unchecked c-tor accepting raw `data_` // One appropriate use for this is when you are constructing a symint @@ -75,7 +79,7 @@ public enum Unchecked { - public native @ByVal @Cast("c10::SymNode*") Pointer toSymNodeImpl(); + public native @ByVal SymNode toSymNodeImpl(); // Require the int to be non-symbolic, and if it is symbolic raise an // error. This is safe to use for C++ code that doesn't work for symbolic @@ -116,8 +120,8 @@ public enum Unchecked { public native @ByVal SymBool sym_gt(@Const @ByRef SymInt arg0); public native @ByVal SymBool sym_ge(@Const @ByRef SymInt arg0); - - + public native @Cast("bool") @Name("operator ==") boolean equals(@Const @ByRef SymInt o); + public native @Cast("bool") @Name("operator !=") boolean notEquals(@Const @ByRef SymInt o); public native @Cast("bool") @Name("operator <") boolean lessThan(@Const @ByRef SymInt o); public native @Cast("bool") @Name("operator <=") boolean lessThanEquals(@Const @ByRef SymInt o); public native @Cast("bool") @Name("operator >") boolean greaterThan(@Const @ByRef SymInt o); @@ -128,8 +132,8 @@ public enum Unchecked { public native @ByVal @Name("operator *") SymInt multiply(@Cast("int64_t") long sci); public native @Cast("bool") @Name("operator <") boolean lessThan(@Cast("int64_t") long sci); - - + public native @Cast("bool") @Name("operator ==") boolean equals(@Cast("int64_t") long sci); + public native @Cast("bool") @Name("operator !=") boolean notEquals(@Cast("int64_t") long sci); public native @Cast("bool") @Name("operator <=") boolean lessThanEquals(@Cast("int64_t") long sci); public native @Cast("bool") @Name("operator >") boolean greaterThan(@Cast("int64_t") long sci); public native @Cast("bool") @Name("operator >=") boolean greaterThanEquals(@Cast("int64_t") long sci); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/SymIntRef.java b/pytorch/src/gen/java/org/bytedeco/pytorch/SymIntArrayRef.java similarity index 70% rename from pytorch/src/gen/java/org/bytedeco/pytorch/SymIntRef.java rename to pytorch/src/gen/java/org/bytedeco/pytorch/SymIntArrayRef.java index 8bf3a3d69c7..a65b1b45ff5 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/SymIntRef.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/SymIntArrayRef.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; @@ -16,38 +18,37 @@ import static org.bytedeco.pytorch.global.torch.*; @Name("c10::ArrayRef") @NoOffset @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) -public class SymIntRef extends Pointer { +public class SymIntArrayRef extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ - public SymIntRef(Pointer p) { super(p); } + public SymIntArrayRef(Pointer p) { super(p); } /** Native array allocator. Access with {@link Pointer#position(long)}. */ - public SymIntRef(long size) { super((Pointer)null); allocateArray(size); } + public SymIntArrayRef(long size) { super((Pointer)null); allocateArray(size); } private native void allocateArray(long size); - @Override public SymIntRef position(long position) { - return (SymIntRef)super.position(position); + @Override public SymIntArrayRef position(long position) { + return (SymIntArrayRef)super.position(position); } - @Override public SymIntRef getPointer(long i) { - return new SymIntRef((Pointer)this).offsetAddress(i); + @Override public SymIntArrayRef getPointer(long i) { + return new SymIntArrayRef((Pointer)this).offsetAddress(i); } /** \name Constructors * \{

* Construct an empty ArrayRef. */ - /* implicit */ public SymIntRef() { super((Pointer)null); allocate(); } + /* implicit */ public SymIntArrayRef() { super((Pointer)null); allocate(); } private native void allocate(); /** Construct an ArrayRef from a single element. */ // TODO Make this explicit - public SymIntRef(@Const @ByRef SymInt OneElt) { super((Pointer)null); allocate(OneElt); } - private native void allocate(@Const @ByRef SymInt OneElt); + /** Construct an ArrayRef from a pointer and length. */ - public SymIntRef(@Const SymInt data, @Cast("size_t") long length) { super((Pointer)null); allocate(data, length); } + public SymIntArrayRef(@Const SymInt data, @Cast("size_t") long length) { super((Pointer)null); allocate(data, length); } private native void allocate(@Const SymInt data, @Cast("size_t") long length); /** Construct an ArrayRef from a range. */ - public SymIntRef(@Const SymInt begin, @Const SymInt end) { super((Pointer)null); allocate(begin, end); } + public SymIntArrayRef(@Const SymInt begin, @Const SymInt end) { super((Pointer)null); allocate(begin, end); } private native void allocate(@Const SymInt begin, @Const SymInt end); /** Construct an ArrayRef from a SmallVector. This is templated in order to @@ -58,6 +59,8 @@ public class SymIntRef extends Pointer { // The enable_if stuff here makes sure that this isn't used for // std::vector, because ArrayRef can't work on a std::vector // bitfield. + public SymIntArrayRef(@ByRef SymIntVector vec) { super((Pointer)null); allocate(vec); } + private native void allocate(@ByRef SymIntVector vec); /** Construct an ArrayRef from a std::array */ @@ -70,13 +73,13 @@ public class SymIntRef extends Pointer { * \name Simple Operations * \{ */ - public native @ByVal @Cast("const c10::ArrayRef::iterator*") SymInt begin(); - public native @ByVal @Cast("const c10::ArrayRef::iterator*") SymInt end(); + public native @Const @ByPtr SymInt begin(); + public native @Const @ByPtr SymInt end(); // These are actually the same as iterator, since ArrayRef only // gives you const iterators. - public native @ByVal @Cast("const c10::ArrayRef::const_iterator*") SymInt cbegin(); - public native @ByVal @Cast("const c10::ArrayRef::const_iterator*") SymInt cend(); + public native @Const @ByPtr SymInt cbegin(); + public native @Const @ByPtr SymInt cend(); /** empty - Check if the array is empty. */ public native @Cast("const bool") boolean empty(); @@ -93,13 +96,13 @@ public class SymIntRef extends Pointer { public native @Const @ByRef SymInt back(); /** equals - Check for element-wise equality. */ - public native @Cast("const bool") boolean equals(@ByVal SymIntRef RHS); + public native @Cast("const bool") boolean equals(@ByVal SymIntArrayRef RHS); /** slice(n, m) - Take M elements of the array starting at element N */ - public native @Const @ByVal SymIntRef slice(@Cast("size_t") long N, @Cast("size_t") long M); + public native @Const @ByVal SymIntArrayRef slice(@Cast("size_t") long N, @Cast("size_t") long M); /** slice(n) - Chop off the first N elements of the array. */ - public native @Const @ByVal SymIntRef slice(@Cast("size_t") long N); + public native @Const @ByVal SymIntArrayRef slice(@Cast("size_t") long N); /** \} * \name Operator Overloads diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/SymIntArrayRefOptional.java b/pytorch/src/gen/java/org/bytedeco/pytorch/SymIntArrayRefOptional.java index 3f7fdae44b5..a6bfcd35e7c 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/SymIntArrayRefOptional.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/SymIntArrayRefOptional.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; @@ -15,18 +17,19 @@ import static org.bytedeco.pytorch.global.torch.*; -@NoOffset @Name("c10::optional") @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) +@NoOffset @Name("c10::optional >") @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) public class SymIntArrayRefOptional extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public SymIntArrayRefOptional(Pointer p) { super(p); } - public SymIntArrayRefOptional(SymIntRef value) { this(); put(value); } + public SymIntArrayRefOptional(SymIntArrayRef value) { this(); put(value); } public SymIntArrayRefOptional() { allocate(); } private native void allocate(); public native @Name("operator =") @ByRef SymIntArrayRefOptional put(@ByRef SymIntArrayRefOptional x); public native boolean has_value(); - public native @Name("value") @ByRef SymIntRef get(); - @ValueSetter public native SymIntArrayRefOptional put(@ByRef SymIntRef value); + public native void reset(); + public native @Name("value") @ByRef SymIntArrayRef get(); + @ValueSetter public native SymIntArrayRefOptional put(@ByRef SymIntArrayRef value); } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/SymIntOptional.java b/pytorch/src/gen/java/org/bytedeco/pytorch/SymIntOptional.java index d070c7feb5f..33e3b0359c6 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/SymIntOptional.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/SymIntOptional.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; @@ -26,6 +28,7 @@ public class SymIntOptional extends Pointer { public native @Name("operator =") @ByRef SymIntOptional put(@ByRef SymIntOptional x); public native boolean has_value(); + public native void reset(); public native @Name("value") @ByRef SymInt get(); @ValueSetter public native SymIntOptional put(@ByRef SymInt value); } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/SmallVectorBase.java b/pytorch/src/gen/java/org/bytedeco/pytorch/SymIntSmallVectorBase.java similarity index 72% rename from pytorch/src/gen/java/org/bytedeco/pytorch/SmallVectorBase.java rename to pytorch/src/gen/java/org/bytedeco/pytorch/SymIntSmallVectorBase.java index 84423b3f14a..c89884fae2a 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/SmallVectorBase.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/SymIntSmallVectorBase.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; @@ -27,13 +29,13 @@ * * XXX: if build fails here fall back to C10_IS_TRIVIALLY_COPYABLE and make a * note */ -@Name("c10::SmallVectorTemplateBase") @NoOffset @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) -public class SmallVectorBase extends Pointer { +@Name("c10::SmallVectorTemplateBase") @NoOffset @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) +public class SymIntSmallVectorBase extends SymIntSmallVectorCommon { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ - public SmallVectorBase(Pointer p) { super(p); } + public SymIntSmallVectorBase(Pointer p) { super(p); } - public native void push_back(@Cast("const int64_t") long Elt); + public native void push_back(@Const @ByRef SymInt Elt); public native void pop_back(); } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/SymIntSmallVectorCommon.java b/pytorch/src/gen/java/org/bytedeco/pytorch/SymIntSmallVectorCommon.java new file mode 100644 index 00000000000..12a61041dc2 --- /dev/null +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/SymIntSmallVectorCommon.java @@ -0,0 +1,53 @@ +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE + +package org.bytedeco.pytorch; + +import org.bytedeco.pytorch.Allocator; +import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; +import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; +import java.nio.*; +import org.bytedeco.javacpp.*; +import org.bytedeco.javacpp.annotation.*; + +import static org.bytedeco.javacpp.presets.javacpp.*; +import static org.bytedeco.openblas.global.openblas_nolapack.*; +import static org.bytedeco.openblas.global.openblas.*; + +import static org.bytedeco.pytorch.global.torch.*; + + +/** This is the part of SmallVectorTemplateBase which does not depend on whether + * the type T is a POD. The extra dummy template argument is used by ArrayRef + * to avoid unnecessarily requiring T to be complete. */ +@Name("c10::SmallVectorTemplateCommon") @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) +public class SymIntSmallVectorCommon extends IntSizedSmallVectorBase { + static { Loader.load(); } + /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ + public SymIntSmallVectorCommon(Pointer p) { super(p); } + + + // forward iterator creation methods. + public native @ByVal @Cast("c10::SmallVectorTemplateCommon::iterator*") SymInt begin(); + public native @ByVal @Cast("c10::SmallVectorTemplateCommon::iterator*") SymInt end(); + + // reverse iterator creation methods. + + public native long size_in_bytes(); + public native long max_size(); + + public native @Cast("size_t") long capacity_in_bytes(); + + /** Return a pointer to the vector's buffer, even if empty(). */ + public native @ByVal @Cast("c10::SmallVectorTemplateCommon::pointer*") SymInt data(); + /** Return a pointer to the vector's buffer, even if empty(). */ + + // SmallVector::at is NOT from LLVM. + public native @ByVal SymInt at(long idx); + public native @Name("operator []") @ByVal SymInt get(long idx); + + public native @ByVal SymInt front(); + + public native @ByVal SymInt back(); +} diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/SymDimVectorImpl.java b/pytorch/src/gen/java/org/bytedeco/pytorch/SymIntSmallVectorImpl.java similarity index 62% rename from pytorch/src/gen/java/org/bytedeco/pytorch/SymDimVectorImpl.java rename to pytorch/src/gen/java/org/bytedeco/pytorch/SymIntSmallVectorImpl.java index 33992e56603..c7ce7199c0d 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/SymDimVectorImpl.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/SymIntSmallVectorImpl.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; @@ -15,11 +17,14 @@ import static org.bytedeco.pytorch.global.torch.*; + +/** This class consists of common code factored out of the SmallVector class to + * reduce code duplication based on the SmallVector 'N' template parameter. */ @Name("c10::SmallVectorImpl") @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) -public class SymDimVectorImpl extends SymSmallVectorBase { +public class SymIntSmallVectorImpl extends SymIntSmallVectorBase { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ - public SymDimVectorImpl(Pointer p) { super(p); } + public SymIntSmallVectorImpl(Pointer p) { super(p); } @@ -29,7 +34,7 @@ public class SymDimVectorImpl extends SymSmallVectorBase { /** Like resize, but \ref T is POD, the new values won't be initialized. */ public native void resize_for_overwrite(long N); - public native void resize(long N, long NV); + public native void resize(long N, @ByVal SymInt NV); public native void reserve(long N); @@ -37,33 +42,33 @@ public class SymDimVectorImpl extends SymSmallVectorBase { public native @ByVal SymInt pop_back_val(); - public native void swap(@ByRef SymDimVectorImpl RHS); + public native void swap(@ByRef SymIntSmallVectorImpl RHS); /** Add the specified range to the end of the SmallVector. */ /** Append \p NumInputs copies of \p Elt to the end. */ - public native void append(long NumInputs, long Elt); + public native void append(long NumInputs, @ByVal SymInt Elt); - public native void append(@Const @ByRef SymDimVectorImpl RHS); + public native void append(@Const @ByRef SymIntSmallVectorImpl RHS); - public native void assign(long NumElts, long Elt); + public native void assign(long NumElts, @ByVal SymInt Elt); // FIXME: Consider assigning over existing elements, rather than clearing & // re-initializing them - for all assign(...) variants. - public native void assign(@Const @ByRef SymDimVectorImpl RHS); + public native void assign(@Const @ByRef SymIntSmallVectorImpl RHS); public native @ByVal @Cast("c10::SmallVectorImpl::iterator*") SymInt erase(@ByVal @Cast("c10::SmallVectorImpl::const_iterator*") SymInt CI); public native @ByVal @Cast("c10::SmallVectorImpl::iterator*") SymInt erase(@ByVal @Cast("c10::SmallVectorImpl::const_iterator*") SymInt CS, @ByVal @Cast("c10::SmallVectorImpl::const_iterator*") SymInt CE); public native @ByVal @Cast("c10::SmallVectorImpl::iterator*") SymInt insert(@ByVal @Cast("c10::SmallVectorImpl::iterator*") SymInt I, @ByRef(true) SymInt Elt); - public native @ByVal @Cast("c10::SmallVectorImpl::iterator*") SymInt insert(@ByVal @Cast("c10::SmallVectorImpl::iterator*") SymInt I, long NumToInsert, long Elt); + public native @ByVal @Cast("c10::SmallVectorImpl::iterator*") SymInt insert(@ByVal @Cast("c10::SmallVectorImpl::iterator*") SymInt I, long NumToInsert, @ByVal SymInt Elt); - public native @ByRef @Name("operator =") SymDimVectorImpl put(@Const @ByRef SymDimVectorImpl RHS); + public native @ByRef @Name("operator =") SymIntSmallVectorImpl put(@Const @ByRef SymIntSmallVectorImpl RHS); - - + public native @Cast("bool") @Name("operator ==") boolean equals(@Const @ByRef SymIntSmallVectorImpl RHS); + public native @Cast("bool") @Name("operator !=") boolean notEquals(@Const @ByRef SymIntSmallVectorImpl RHS); - public native @Cast("bool") @Name("operator <") boolean lessThan(@Const @ByRef SymDimVectorImpl RHS); + public native @Cast("bool") @Name("operator <") boolean lessThan(@Const @ByRef SymIntSmallVectorImpl RHS); } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/SymIntType.java b/pytorch/src/gen/java/org/bytedeco/pytorch/SymIntType.java index db958b7a631..6d40c4ea877 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/SymIntType.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/SymIntType.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/SymIntVector.java b/pytorch/src/gen/java/org/bytedeco/pytorch/SymIntVector.java index 65cda3e4169..c16dba8220e 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/SymIntVector.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/SymIntVector.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; @@ -33,6 +35,8 @@ public class SymIntVector extends Pointer { public void clear() { resize(0); } public native void resize(@Cast("size_t") long n); + public SymInt front() { return get(0); } + public SymInt back() { return get(size() - 1); } @Index(function = "at") public native @ByRef SymInt get(@Cast("size_t") long i); public native SymIntVector put(@Cast("size_t") long i, SymInt value); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/SymNode.java b/pytorch/src/gen/java/org/bytedeco/pytorch/SymNode.java new file mode 100644 index 00000000000..934d8d18cfd --- /dev/null +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/SymNode.java @@ -0,0 +1,150 @@ +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE + +package org.bytedeco.pytorch; + +import org.bytedeco.pytorch.Allocator; +import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; +import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; +import java.nio.*; +import org.bytedeco.javacpp.*; +import org.bytedeco.javacpp.annotation.*; + +import static org.bytedeco.javacpp.presets.javacpp.*; +import static org.bytedeco.openblas.global.openblas_nolapack.*; +import static org.bytedeco.openblas.global.openblas.*; + +import static org.bytedeco.pytorch.global.torch.*; + + +@Name("c10::intrusive_ptr") @NoOffset @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) +public class SymNode extends Pointer { + static { Loader.load(); } + /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ + public SymNode(Pointer p) { super(p); } + /** Native array allocator. Access with {@link Pointer#position(long)}. */ + public SymNode(long size) { super((Pointer)null); allocateArray(size); } + private native void allocateArray(long size); + @Override public SymNode position(long position) { + return (SymNode)super.position(position); + } + @Override public SymNode getPointer(long i) { + return new SymNode((Pointer)this).offsetAddress(i); + } + + + public SymNode() { super((Pointer)null); allocate(); } + @NoException(true) private native void allocate(); + + public SymNode(@ByVal @Cast("std::nullptr_t*") PointerPointer arg0) { super((Pointer)null); allocate(arg0); } + @NoException(true) private native void allocate(@ByVal @Cast("std::nullptr_t*") PointerPointer arg0); + + // This constructor will not increase the ref counter for you. + // We use the tagged dispatch mechanism to explicitly mark this constructor + // to not increase the refcount + public SymNode(SymNodeImpl target, @ByVal DontIncreaseRefcount arg1) { super((Pointer)null); allocate(target, arg1); } + @NoException(true) private native void allocate(SymNodeImpl target, @ByVal DontIncreaseRefcount arg1); + + + + public SymNode(@ByRef(true) SymNode rhs) { super((Pointer)null); allocate(rhs); } + @NoException(true) private native void allocate(@ByRef(true) SymNode rhs); + + public native @ByRef @Name("operator =") @NoException(true) SymNode put(@ByRef(true) SymNode rhs); + + public native @NoException(true) SymNodeImpl get(); + + public native @ByRef @Name("operator *") @NoException(true) SymNodeImpl multiply(); + + public native @Name("operator ->") @NoException(true) SymNodeImpl access(); + + public native @Cast("bool") @Name("operator bool") @NoException(true) boolean asBoolean(); + + public native @NoException(true) void reset(); + + public native @NoException(true) void swap(@ByRef SymNode rhs); + + // We do a lot of null-pointer checks in our code, good to have this be cheap. + public native @Cast("bool") @NoException(true) boolean defined(); + + public native @Cast("size_t") @NoException(true) long use_count(); + + public native @Cast("size_t") @NoException(true) long weak_use_count(); + + public native @Cast("bool") @NoException(true) boolean unique(); + + /** + * Returns an owning (!) pointer to the underlying object and makes the + * intrusive_ptr instance invalid. That means the refcount is not decreased. + * You *must* put the returned pointer back into a intrusive_ptr using + * intrusive_ptr::reclaim(ptr) to properly destruct it. + * This is helpful for C APIs. + */ + public native @NoException(true) SymNodeImpl release(); + + /** + * Takes an owning pointer to TTarget* and creates an intrusive_ptr that takes + * over ownership. That means the refcount is not increased. + * This is the counter-part to intrusive_ptr::release() and the pointer + * passed in *must* have been created using intrusive_ptr::release(). + */ + public static native @ByVal SymNode reclaim(SymNodeImpl owning_ptr); + + /** + * Takes an owning pointer to TTarget* and creates an intrusive_ptr + * representing a new reference, i.e. the raw pointer retains + * ownership. + */ + public static native @ByVal SymNode reclaim_copy(SymNodeImpl owning_ptr); + + /** + * Allocate a heap object with args and wrap it inside a intrusive_ptr and + * incref. This is a helper function to let make_intrusive() access private + * intrusive_ptr constructors. + */ + + /** + * Turn a new instance of TTarget (e.g., literally allocated + * using new TTarget(...) into an intrusive_ptr. If possible, + * use intrusive_ptr::make instead which statically guarantees + * that the allocation was done properly. + * + * At the moment, the only reason this method exists is because + * pybind11 holder types expect to be able to allocate in + * this way (because pybind11 handles the new allocation itself). + */ + public static native @ByVal SymNode unsafe_steal_from_new(SymNodeImpl raw_ptr); + + /** + * Turn an instance of TTarget that should not be reference counted + * (e.g., allocated into an arena with placement new) into an + * intrusive_ptr. This is gratuitously unsafe and should only be + * used if you can guarantee that the pointer will not escape and be + * refcounted as normal. + * + * {@code expected_decrefs} is a debugging parameter: it indicates the + * number of strong owners the intrusive_ptr_target in question is + * expected to get. In most use cases, this will likely be 1. + * + * The reason this method exists is for manually sharing + * StorageImpls across Tensors in the static runtime. It needs + * access to private intrusive_ptr members so that the refcounts can + * be initialized to custom values. + */ + public static native @ByVal SymNode unsafe_adapt_non_heap_allocated( + SymNodeImpl raw_ptr, + @Cast("size_t") long expected_decrefs); + + /** + * Turn a **non-owning raw pointer** to an intrusive_ptr. It is + * the moral equivalent of enable_shared_from_this on a shared pointer. + * + * This method is only valid for objects that are already live. If + * you are looking for the moral equivalent of unique_ptr(T*) + * constructor, see steal_from_new. + * + * TODO: https://github.com/pytorch/pytorch/issues/56482 + */ + public static native @ByVal SymNode unsafe_reclaim_from_nonowning(SymNodeImpl raw_ptr); +} diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/SymNodeRef.java b/pytorch/src/gen/java/org/bytedeco/pytorch/SymNodeArrayRef.java similarity index 56% rename from pytorch/src/gen/java/org/bytedeco/pytorch/SymNodeRef.java rename to pytorch/src/gen/java/org/bytedeco/pytorch/SymNodeArrayRef.java index 5e7b9a34974..0a1871f5b0b 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/SymNodeRef.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/SymNodeArrayRef.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; @@ -16,37 +18,38 @@ import static org.bytedeco.pytorch.global.torch.*; @Name("c10::ArrayRef") @NoOffset @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) -public class SymNodeRef extends Pointer { +public class SymNodeArrayRef extends Pointer { static { Loader.load(); } + /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ + public SymNodeArrayRef(Pointer p) { super(p); } /** Native array allocator. Access with {@link Pointer#position(long)}. */ - public SymNodeRef(long size) { super((Pointer)null); allocateArray(size); } + public SymNodeArrayRef(long size) { super((Pointer)null); allocateArray(size); } private native void allocateArray(long size); - @Override public SymNodeRef position(long position) { - return (SymNodeRef)super.position(position); + @Override public SymNodeArrayRef position(long position) { + return (SymNodeArrayRef)super.position(position); } - @Override public SymNodeRef getPointer(long i) { - return new SymNodeRef((Pointer)this).offsetAddress(i); + @Override public SymNodeArrayRef getPointer(long i) { + return new SymNodeArrayRef((Pointer)this).offsetAddress(i); } /** \name Constructors * \{

* Construct an empty ArrayRef. */ - /* implicit */ public SymNodeRef() { super((Pointer)null); allocate(); } + /* implicit */ public SymNodeArrayRef() { super((Pointer)null); allocate(); } private native void allocate(); /** Construct an ArrayRef from a single element. */ // TODO Make this explicit - public SymNodeRef(@Cast("const c10::SymNode*") @ByRef Pointer OneElt) { super((Pointer)null); allocate(OneElt); } - private native void allocate(@Cast("const c10::SymNode*") @ByRef Pointer OneElt); + /** Construct an ArrayRef from a pointer and length. */ - public SymNodeRef(@Cast("const c10::SymNode*") Pointer data, @Cast("size_t") long length) { super((Pointer)null); allocate(data, length); } - private native void allocate(@Cast("const c10::SymNode*") Pointer data, @Cast("size_t") long length); + public SymNodeArrayRef(@Const SymNode data, @Cast("size_t") long length) { super((Pointer)null); allocate(data, length); } + private native void allocate(@Const SymNode data, @Cast("size_t") long length); /** Construct an ArrayRef from a range. */ - public SymNodeRef(@Cast("const c10::SymNode*") Pointer begin, @Cast("const c10::SymNode*") Pointer end) { super((Pointer)null); allocate(begin, end); } - private native void allocate(@Cast("const c10::SymNode*") Pointer begin, @Cast("const c10::SymNode*") Pointer end); + public SymNodeArrayRef(@Const SymNode begin, @Const SymNode end) { super((Pointer)null); allocate(begin, end); } + private native void allocate(@Const SymNode begin, @Const SymNode end); /** Construct an ArrayRef from a SmallVector. This is templated in order to * avoid instantiating SmallVectorTemplateCommon whenever we @@ -68,46 +71,46 @@ public class SymNodeRef extends Pointer { * \name Simple Operations * \{ */ - public native @ByVal @Cast("const c10::ArrayRef::iterator*") Pointer begin(); - public native @ByVal @Cast("const c10::ArrayRef::iterator*") Pointer end(); + public native @Const @ByPtr SymNode begin(); + public native @Const @ByPtr SymNode end(); // These are actually the same as iterator, since ArrayRef only // gives you const iterators. - public native @ByVal @Cast("const c10::ArrayRef::const_iterator*") Pointer cbegin(); - public native @ByVal @Cast("const c10::ArrayRef::const_iterator*") Pointer cend(); + public native @Const @ByPtr SymNode cbegin(); + public native @Const @ByPtr SymNode cend(); /** empty - Check if the array is empty. */ public native @Cast("const bool") boolean empty(); - public native @Cast("const c10::SymNode*") Pointer data(); + public native @Const SymNode data(); /** size - Get the array size. */ public native @Cast("const size_t") long size(); /** front - Get the first element. */ - public native @Cast("const c10::SymNode*") @ByRef Pointer front(); + public native @Const @ByRef SymNode front(); /** back - Get the last element. */ - public native @Cast("const c10::SymNode*") @ByRef Pointer back(); + public native @Const @ByRef SymNode back(); /** equals - Check for element-wise equality. */ - public native @Cast("const bool") boolean equals(@ByVal SymNodeRef RHS); + public native @Cast("const bool") boolean equals(@ByVal SymNodeArrayRef RHS); /** slice(n, m) - Take M elements of the array starting at element N */ - public native @Const @ByVal SymNodeRef slice(@Cast("size_t") long N, @Cast("size_t") long M); + public native @Const @ByVal SymNodeArrayRef slice(@Cast("size_t") long N, @Cast("size_t") long M); /** slice(n) - Chop off the first N elements of the array. */ - public native @Const @ByVal SymNodeRef slice(@Cast("size_t") long N); + public native @Const @ByVal SymNodeArrayRef slice(@Cast("size_t") long N); /** \} * \name Operator Overloads * \{ */ - public native @Cast("const c10::SymNode*") @ByRef @Name("operator []") Pointer get(@Cast("size_t") long Index); + public native @Const @ByRef @Name("operator []") SymNode get(@Cast("size_t") long Index); /** Vector compatibility */ /// - public native @Cast("const c10::SymNode*") @ByRef Pointer at(@Cast("size_t") long Index); + public native @Const @ByRef SymNode at(@Cast("size_t") long Index); /** Disallow accidental assignment from a temporary. * @@ -124,7 +127,7 @@ public class SymNodeRef extends Pointer { /** \} * \name Expensive Operations * \{ */ - public native @Cast("c10::SymNode*") @StdVector Pointer vec(); + public native @StdVector SymNode vec(); /** \} */ } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/SymNodeImpl.java b/pytorch/src/gen/java/org/bytedeco/pytorch/SymNodeImpl.java index 9a146c5016e..38eecb05c4b 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/SymNodeImpl.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/SymNodeImpl.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; @@ -39,36 +41,36 @@ public class SymNodeImpl extends Pointer { public native @Cast("bool") boolean is_int(); public native @Cast("bool") boolean is_bool(); public native @Cast("bool") boolean is_float(); - public native @ByVal @Cast("c10::SymNode*") Pointer add(@Cast("const c10::SymNode*") @ByRef Pointer other); - public native @ByVal @Cast("c10::SymNode*") Pointer sub(@Cast("const c10::SymNode*") @ByRef Pointer other); - public native @ByVal @Cast("c10::SymNode*") Pointer mul(@Cast("const c10::SymNode*") @ByRef Pointer other); - public native @ByVal @Cast("c10::SymNode*") Pointer truediv(@Cast("const c10::SymNode*") @ByRef Pointer other); - public native @ByVal @Cast("c10::SymNode*") Pointer pow(@Cast("const c10::SymNode*") @ByRef Pointer other); - public native @ByVal @Cast("c10::SymNode*") Pointer floordiv(@Cast("const c10::SymNode*") @ByRef Pointer other); - public native @ByVal @Cast("c10::SymNode*") Pointer mod(@Cast("const c10::SymNode*") @ByRef Pointer other); - public native @ByVal @Cast("c10::SymNode*") Pointer eq(@Cast("const c10::SymNode*") @ByRef Pointer other); - public native @ByVal @Cast("c10::SymNode*") Pointer ne(@Cast("const c10::SymNode*") @ByRef Pointer other); - public native @ByVal @Cast("c10::SymNode*") Pointer gt(@Cast("const c10::SymNode*") @ByRef Pointer other); - public native @ByVal @Cast("c10::SymNode*") Pointer lt(@Cast("const c10::SymNode*") @ByRef Pointer other); - public native @ByVal @Cast("c10::SymNode*") Pointer le(@Cast("const c10::SymNode*") @ByRef Pointer other); - public native @ByVal @Cast("c10::SymNode*") Pointer ge(@Cast("const c10::SymNode*") @ByRef Pointer other); - public native @ByVal @Cast("c10::SymNode*") Pointer ceil(); - public native @ByVal @Cast("c10::SymNode*") Pointer floor(); - public native @ByVal @Cast("c10::SymNode*") Pointer neg(); - public native @ByVal @Cast("c10::SymNode*") Pointer sym_min(@Cast("const c10::SymNode*") @ByRef Pointer other); - public native @ByVal @Cast("c10::SymNode*") Pointer sym_max(@Cast("const c10::SymNode*") @ByRef Pointer other); - public native @ByVal @Cast("c10::SymNode*") Pointer sym_or(@Cast("const c10::SymNode*") @ByRef Pointer other); - public native @ByVal @Cast("c10::SymNode*") Pointer sym_and(@Cast("const c10::SymNode*") @ByRef Pointer other); - public native @ByVal @Cast("c10::SymNode*") Pointer sym_not(); + public native @ByVal SymNode add(@Const @ByRef SymNode other); + public native @ByVal SymNode sub(@Const @ByRef SymNode other); + public native @ByVal SymNode mul(@Const @ByRef SymNode other); + public native @ByVal SymNode truediv(@Const @ByRef SymNode other); + public native @ByVal SymNode pow(@Const @ByRef SymNode other); + public native @ByVal SymNode floordiv(@Const @ByRef SymNode other); + public native @ByVal SymNode mod(@Const @ByRef SymNode other); + public native @ByVal SymNode eq(@Const @ByRef SymNode other); + public native @ByVal SymNode ne(@Const @ByRef SymNode other); + public native @ByVal SymNode gt(@Const @ByRef SymNode other); + public native @ByVal SymNode lt(@Const @ByRef SymNode other); + public native @ByVal SymNode le(@Const @ByRef SymNode other); + public native @ByVal SymNode ge(@Const @ByRef SymNode other); + public native @ByVal SymNode ceil(); + public native @ByVal SymNode floor(); + public native @ByVal SymNode neg(); + public native @ByVal SymNode sym_min(@Const @ByRef SymNode other); + public native @ByVal SymNode sym_max(@Const @ByRef SymNode other); + public native @ByVal SymNode sym_or(@Const @ByRef SymNode other); + public native @ByVal SymNode sym_and(@Const @ByRef SymNode other); + public native @ByVal SymNode sym_not(); // NB: self is ignored here, only the arguments are used - public native @ByVal @Cast("c10::SymNode*") Pointer is_non_overlapping_and_dense( - @ByVal SymNodeRef sizes, - @ByVal SymNodeRef strides); - public native @ByVal @Cast("c10::SymNode*") Pointer clone(); - public native @ByVal @Cast("c10::SymNode*") Pointer sym_float(); - public native @ByVal @Cast("c10::SymNode*") Pointer wrap_int(@Cast("int64_t") long num); - public native @ByVal @Cast("c10::SymNode*") Pointer wrap_float(double num); - public native @ByVal @Cast("c10::SymNode*") Pointer wrap_bool(@Cast("bool") boolean num); + public native @ByVal SymNode is_non_overlapping_and_dense( + @ByVal SymNodeArrayRef sizes, + @ByVal SymNodeArrayRef strides); + public native @ByVal SymNode clone(); + public native @ByVal SymNode sym_float(); + public native @ByVal SymNode wrap_int(@Cast("int64_t") long num); + public native @ByVal SymNode wrap_float(double num); + public native @ByVal SymNode wrap_bool(@Cast("bool") boolean num); public native @Cast("int64_t") long guard_int(@Cast("const char*") BytePointer file, @Cast("int64_t") long line); public native @Cast("int64_t") long guard_int(String file, @Cast("int64_t") long line); public native @Cast("bool") boolean guard_bool(@Cast("const char*") BytePointer file, @Cast("int64_t") long line); @@ -78,5 +80,5 @@ public class SymNodeImpl extends Pointer { public native @Cast("int64_t") long int_(); public native @Cast("bool") boolean bool_(); public native @StdString BytePointer str(); - + public native @Cast("std::ostream*") @ByRef @Name("operator <<") Pointer shiftLeft(@Cast("std::ostream*") @ByRef Pointer os); } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/Symbol.java b/pytorch/src/gen/java/org/bytedeco/pytorch/Symbol.java index cbc50e275d8..4aecb342ea5 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/Symbol.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/Symbol.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/SymbolArrayRef.java b/pytorch/src/gen/java/org/bytedeco/pytorch/SymbolArrayRef.java index dc48622f488..18c9f6f084c 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/SymbolArrayRef.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/SymbolArrayRef.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; @@ -39,8 +41,7 @@ public class SymbolArrayRef extends Pointer { /** Construct an ArrayRef from a single element. */ // TODO Make this explicit - public SymbolArrayRef(@Const @ByRef Symbol OneElt) { super((Pointer)null); allocate(OneElt); } - private native void allocate(@Const @ByRef Symbol OneElt); + /** Construct an ArrayRef from a pointer and length. */ public SymbolArrayRef(@Const Symbol data, @Cast("size_t") long length) { super((Pointer)null); allocate(data, length); } @@ -58,6 +59,8 @@ public class SymbolArrayRef extends Pointer { // The enable_if stuff here makes sure that this isn't used for // std::vector, because ArrayRef can't work on a std::vector // bitfield. + public SymbolArrayRef(@ByRef SymbolVector vec) { super((Pointer)null); allocate(vec); } + private native void allocate(@ByRef SymbolVector vec); /** Construct an ArrayRef from a std::array */ @@ -70,13 +73,13 @@ public class SymbolArrayRef extends Pointer { * \name Simple Operations * \{ */ - public native @ByVal @Cast("const c10::ArrayRef::iterator*") Symbol begin(); - public native @ByVal @Cast("const c10::ArrayRef::iterator*") Symbol end(); + public native @Const @ByPtr Symbol begin(); + public native @Const @ByPtr Symbol end(); // These are actually the same as iterator, since ArrayRef only // gives you const iterators. - public native @ByVal @Cast("const c10::ArrayRef::const_iterator*") Symbol cbegin(); - public native @ByVal @Cast("const c10::ArrayRef::const_iterator*") Symbol cend(); + public native @Const @ByPtr Symbol cbegin(); + public native @Const @ByPtr Symbol cend(); /** empty - Check if the array is empty. */ public native @Cast("const bool") boolean empty(); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/SymbolHash.java b/pytorch/src/gen/java/org/bytedeco/pytorch/SymbolHash.java deleted file mode 100644 index 70fe2f0ad06..00000000000 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/SymbolHash.java +++ /dev/null @@ -1,39 +0,0 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE - -package org.bytedeco.pytorch; - -import org.bytedeco.pytorch.Allocator; -import org.bytedeco.pytorch.Function; -import org.bytedeco.pytorch.Module; -import java.nio.*; -import org.bytedeco.javacpp.*; -import org.bytedeco.javacpp.annotation.*; - -import static org.bytedeco.javacpp.presets.javacpp.*; -import static org.bytedeco.openblas.global.openblas_nolapack.*; -import static org.bytedeco.openblas.global.openblas.*; - -import static org.bytedeco.pytorch.global.torch.*; - // namespace c10 - -// make symbol behave like an integer in hash tables -@Name("std::hash") @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) -public class SymbolHash extends Pointer { - static { Loader.load(); } - /** Default native constructor. */ - public SymbolHash() { super((Pointer)null); allocate(); } - /** Native array allocator. Access with {@link Pointer#position(long)}. */ - public SymbolHash(long size) { super((Pointer)null); allocateArray(size); } - /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ - public SymbolHash(Pointer p) { super(p); } - private native void allocate(); - private native void allocateArray(long size); - @Override public SymbolHash position(long position) { - return (SymbolHash)super.position(position); - } - @Override public SymbolHash getPointer(long i) { - return new SymbolHash((Pointer)this).offsetAddress(i); - } - - public native @Cast("std::size_t") @Name("operator ()") long apply(@ByVal Symbol s); -} diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/SymbolSet.java b/pytorch/src/gen/java/org/bytedeco/pytorch/SymbolSet.java index 80069d52caf..d7d0d54d805 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/SymbolSet.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/SymbolSet.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; @@ -27,6 +29,7 @@ public class SymbolSet extends Pointer { public boolean empty() { return size() == 0; } public native long size(); + public Symbol front() { try (Iterator it = begin()) { return it.get(); } } public native void insert(@ByRef Symbol value); public native void erase(@ByRef Symbol value); public native @ByVal Iterator begin(); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/SymbolVector.java b/pytorch/src/gen/java/org/bytedeco/pytorch/SymbolVector.java index 5d0b3b4b3ae..d3a7139df17 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/SymbolVector.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/SymbolVector.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; @@ -33,6 +35,8 @@ public class SymbolVector extends Pointer { public void clear() { resize(0); } public native void resize(@Cast("size_t") long n); + public Symbol front() { return get(0); } + public Symbol back() { return get(size() - 1); } @Index(function = "at") public native @ByRef Symbol get(@Cast("size_t") long i); public native SymbolVector put(@Cast("size_t") long i, Symbol value); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/SymbolicShape.java b/pytorch/src/gen/java/org/bytedeco/pytorch/SymbolicShape.java index b6bb0df6178..1d9450851e2 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/SymbolicShape.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/SymbolicShape.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; @@ -50,8 +52,8 @@ public class SymbolicShape extends Pointer { public SymbolicShape(@ByVal ShapeSymbolVector dims) { super((Pointer)null); allocate(dims); } private native void allocate(@ByVal ShapeSymbolVector dims); - public SymbolicShape(@ByVal @Cast("c10::ArrayRef*") LongArrayRef dims) { super((Pointer)null); allocate(dims); } - private native void allocate(@ByVal @Cast("c10::ArrayRef*") LongArrayRef dims); + public SymbolicShape(@ByVal LongArrayRef dims) { super((Pointer)null); allocate(dims); } + private native void allocate(@ByVal LongArrayRef dims); public SymbolicShape(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... dims) { super((Pointer)null); allocate(dims); } private native void allocate(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... dims); @@ -77,7 +79,9 @@ public class SymbolicShape extends Pointer { // result will be unranked. public native @ByVal SymbolicShape merge(@Const @ByRef SymbolicShape other); - + private static native @Namespace @Cast("bool") @Name("operator ==") boolean equals(@Const @ByRef SymbolicShape lhs, @Const @ByRef SymbolicShape rhs); + public boolean equals(SymbolicShape rhs) { return equals(this, rhs); } - + private static native @Namespace @Cast("bool") @Name("operator !=") boolean notEquals(@Const @ByRef SymbolicShape lhs, @Const @ByRef SymbolicShape rhs); + public boolean notEquals(SymbolicShape rhs) { return notEquals(this, rhs); } } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/T_DataPtrSizeT_T.java b/pytorch/src/gen/java/org/bytedeco/pytorch/T_DataPtrSizeT_T.java new file mode 100644 index 00000000000..8d5262a6107 --- /dev/null +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/T_DataPtrSizeT_T.java @@ -0,0 +1,34 @@ +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE + +package org.bytedeco.pytorch; + +import org.bytedeco.pytorch.Allocator; +import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; +import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; +import java.nio.*; +import org.bytedeco.javacpp.*; +import org.bytedeco.javacpp.annotation.*; + +import static org.bytedeco.javacpp.presets.javacpp.*; +import static org.bytedeco.openblas.global.openblas_nolapack.*; +import static org.bytedeco.openblas.global.openblas.*; + +import static org.bytedeco.pytorch.global.torch.*; + +@NoOffset @Name("std::tuple") @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) +public class T_DataPtrSizeT_T extends Pointer { + static { Loader.load(); } + /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ + public T_DataPtrSizeT_T(Pointer p) { super(p); } + public T_DataPtrSizeT_T() { allocate(); } + private native void allocate(); + + + public @Cast({"", "c10::DataPtr&&"}) @StdMove DataPtr get0() { return get0(this); } + @Namespace @Name("std::get<0>") public static native @Cast({"", "c10::DataPtr&&"}) @StdMove DataPtr get0(@ByRef T_DataPtrSizeT_T container); + public @Cast("size_t") long get1() { return get1(this); } + @Namespace @Name("std::get<1>") public static native @Cast("size_t") long get1(@ByRef T_DataPtrSizeT_T container); +} + diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/T_DoubleLong_T.java b/pytorch/src/gen/java/org/bytedeco/pytorch/T_DoubleLong_T.java new file mode 100644 index 00000000000..31f86db357c --- /dev/null +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/T_DoubleLong_T.java @@ -0,0 +1,36 @@ +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE + +package org.bytedeco.pytorch; + +import org.bytedeco.pytorch.Allocator; +import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; +import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; +import java.nio.*; +import org.bytedeco.javacpp.*; +import org.bytedeco.javacpp.annotation.*; + +import static org.bytedeco.javacpp.presets.javacpp.*; +import static org.bytedeco.openblas.global.openblas_nolapack.*; +import static org.bytedeco.openblas.global.openblas.*; + +import static org.bytedeco.pytorch.global.torch.*; + +@NoOffset @Name("std::tuple") @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) +public class T_DoubleLong_T extends Pointer { + static { Loader.load(); } + /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ + public T_DoubleLong_T(Pointer p) { super(p); } + public T_DoubleLong_T(double value0, @Cast("int64_t") long value1) { allocate(value0, value1); } + private native void allocate(double value0, @Cast("int64_t") long value1); + public T_DoubleLong_T() { allocate(); } + private native void allocate(); + public native @Name("operator =") @ByRef T_DoubleLong_T put(@ByRef T_DoubleLong_T x); + + public double get0() { return get0(this); } + @Namespace @Name("std::get<0>") public static native double get0(@ByRef T_DoubleLong_T container); + public @Cast("int64_t") long get1() { return get1(this); } + @Namespace @Name("std::get<1>") public static native @Cast("int64_t") long get1(@ByRef T_DoubleLong_T container); +} + diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/T_IntInt_T.java b/pytorch/src/gen/java/org/bytedeco/pytorch/T_IntInt_T.java new file mode 100644 index 00000000000..89f88fb8c5d --- /dev/null +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/T_IntInt_T.java @@ -0,0 +1,36 @@ +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE + +package org.bytedeco.pytorch; + +import org.bytedeco.pytorch.Allocator; +import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; +import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; +import java.nio.*; +import org.bytedeco.javacpp.*; +import org.bytedeco.javacpp.annotation.*; + +import static org.bytedeco.javacpp.presets.javacpp.*; +import static org.bytedeco.openblas.global.openblas_nolapack.*; +import static org.bytedeco.openblas.global.openblas.*; + +import static org.bytedeco.pytorch.global.torch.*; + +@NoOffset @Name("std::tuple") @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) +public class T_IntInt_T extends Pointer { + static { Loader.load(); } + /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ + public T_IntInt_T(Pointer p) { super(p); } + public T_IntInt_T(int value0, int value1) { allocate(value0, value1); } + private native void allocate(int value0, int value1); + public T_IntInt_T() { allocate(); } + private native void allocate(); + public native @Name("operator =") @ByRef T_IntInt_T put(@ByRef T_IntInt_T x); + + public int get0() { return get0(this); } + @Namespace @Name("std::get<0>") public static native int get0(@ByRef T_IntInt_T container); + public int get1() { return get1(this); } + @Namespace @Name("std::get<1>") public static native int get1(@ByRef T_IntInt_T container); +} + diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/T_LongLong_T.java b/pytorch/src/gen/java/org/bytedeco/pytorch/T_LongLong_T.java new file mode 100644 index 00000000000..c1add8433fe --- /dev/null +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/T_LongLong_T.java @@ -0,0 +1,36 @@ +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE + +package org.bytedeco.pytorch; + +import org.bytedeco.pytorch.Allocator; +import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; +import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; +import java.nio.*; +import org.bytedeco.javacpp.*; +import org.bytedeco.javacpp.annotation.*; + +import static org.bytedeco.javacpp.presets.javacpp.*; +import static org.bytedeco.openblas.global.openblas_nolapack.*; +import static org.bytedeco.openblas.global.openblas.*; + +import static org.bytedeco.pytorch.global.torch.*; + +@NoOffset @Name("std::tuple") @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) +public class T_LongLong_T extends Pointer { + static { Loader.load(); } + /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ + public T_LongLong_T(Pointer p) { super(p); } + public T_LongLong_T(@Cast("int64_t") long value0, @Cast("int64_t") long value1) { allocate(value0, value1); } + private native void allocate(@Cast("int64_t") long value0, @Cast("int64_t") long value1); + public T_LongLong_T() { allocate(); } + private native void allocate(); + public native @Name("operator =") @ByRef T_LongLong_T put(@ByRef T_LongLong_T x); + + public @Cast("int64_t") long get0() { return get0(this); } + @Namespace @Name("std::get<0>") public static native @Cast("int64_t") long get0(@ByRef T_LongLong_T container); + public @Cast("int64_t") long get1() { return get1(this); } + @Namespace @Name("std::get<1>") public static native @Cast("int64_t") long get1(@ByRef T_LongLong_T container); +} + diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/PackedSequenceTensorTensorTupleTuple.java b/pytorch/src/gen/java/org/bytedeco/pytorch/T_PackedSequenceT_TensorTensor_T_T.java similarity index 58% rename from pytorch/src/gen/java/org/bytedeco/pytorch/PackedSequenceTensorTensorTupleTuple.java rename to pytorch/src/gen/java/org/bytedeco/pytorch/T_PackedSequenceT_TensorTensor_T_T.java index b6457ba5bf3..c7f2efa20f6 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/PackedSequenceTensorTensorTupleTuple.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/T_PackedSequenceT_TensorTensor_T_T.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; @@ -15,14 +17,14 @@ import static org.bytedeco.pytorch.global.torch.*; -@NoOffset @Name("std::tuple >") @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) -public class PackedSequenceTensorTensorTupleTuple extends Pointer { +@NoOffset @Name("std::tuple >") @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) +public class T_PackedSequenceT_TensorTensor_T_T extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ - public PackedSequenceTensorTensorTupleTuple(Pointer p) { super(p); } + public T_PackedSequenceT_TensorTensor_T_T(Pointer p) { super(p); } public @ByRef PackedSequence get0() { return get0(this); } - @Namespace @Name("std::get<0>") public static native @ByRef PackedSequence get0(@ByRef PackedSequenceTensorTensorTupleTuple container); - public @ByRef TensorTensorTuple get1() { return get1(this); } - @Namespace @Name("std::get<1>") public static native @ByRef TensorTensorTuple get1(@ByRef PackedSequenceTensorTensorTupleTuple container); + @Namespace @Name("std::get<0>") public static native @ByRef PackedSequence get0(@ByRef T_PackedSequenceT_TensorTensor_T_T container); + public @ByRef T_TensorTensor_T get1() { return get1(this); } + @Namespace @Name("std::get<1>") public static native @ByRef T_TensorTensor_T get1(@ByRef T_PackedSequenceT_TensorTensor_T_T container); } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/PackedSequenceTensorTuple.java b/pytorch/src/gen/java/org/bytedeco/pytorch/T_PackedSequenceTensor_T.java similarity index 65% rename from pytorch/src/gen/java/org/bytedeco/pytorch/PackedSequenceTensorTuple.java rename to pytorch/src/gen/java/org/bytedeco/pytorch/T_PackedSequenceTensor_T.java index 31407b3e0ce..aacc7c4a7ee 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/PackedSequenceTensorTuple.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/T_PackedSequenceTensor_T.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; @@ -15,14 +17,14 @@ import static org.bytedeco.pytorch.global.torch.*; -@NoOffset @Name("std::tuple") @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) -public class PackedSequenceTensorTuple extends Pointer { +@NoOffset @Name("std::tuple") @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) +public class T_PackedSequenceTensor_T extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ - public PackedSequenceTensorTuple(Pointer p) { super(p); } + public T_PackedSequenceTensor_T(Pointer p) { super(p); } public @ByRef PackedSequence get0() { return get0(this); } - @Namespace @Name("std::get<0>") public static native @ByRef PackedSequence get0(@ByRef PackedSequenceTensorTuple container); + @Namespace @Name("std::get<0>") public static native @ByRef PackedSequence get0(@ByRef T_PackedSequenceTensor_T container); public @ByRef Tensor get1() { return get1(this); } - @Namespace @Name("std::get<1>") public static native @ByRef Tensor get1(@ByRef PackedSequenceTensorTuple container); + @Namespace @Name("std::get<1>") public static native @ByRef Tensor get1(@ByRef T_PackedSequenceTensor_T container); } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/T_StringLong_T.java b/pytorch/src/gen/java/org/bytedeco/pytorch/T_StringLong_T.java new file mode 100644 index 00000000000..c178108ee3f --- /dev/null +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/T_StringLong_T.java @@ -0,0 +1,36 @@ +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE + +package org.bytedeco.pytorch; + +import org.bytedeco.pytorch.Allocator; +import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; +import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; +import java.nio.*; +import org.bytedeco.javacpp.*; +import org.bytedeco.javacpp.annotation.*; + +import static org.bytedeco.javacpp.presets.javacpp.*; +import static org.bytedeco.openblas.global.openblas_nolapack.*; +import static org.bytedeco.openblas.global.openblas.*; + +import static org.bytedeco.pytorch.global.torch.*; + +@NoOffset @Name("std::tuple") @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) +public class T_StringLong_T extends Pointer { + static { Loader.load(); } + /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ + public T_StringLong_T(Pointer p) { super(p); } + public T_StringLong_T(@StdString BytePointer value0, @Cast("uint64_t") long value1) { allocate(value0, value1); } + private native void allocate(@StdString BytePointer value0, @Cast("uint64_t") long value1); + public T_StringLong_T() { allocate(); } + private native void allocate(); + public native @Name("operator =") @ByRef T_StringLong_T put(@ByRef T_StringLong_T x); + + public @StdString BytePointer get0() { return get0(this); } + @Namespace @Name("std::get<0>") public static native @StdString BytePointer get0(@ByRef T_StringLong_T container); + public @Cast("uint64_t") long get1() { return get1(this); } + @Namespace @Name("std::get<1>") public static native @Cast("uint64_t") long get1(@ByRef T_StringLong_T container); +} + diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/StringSizeTSizeTTuple.java b/pytorch/src/gen/java/org/bytedeco/pytorch/T_StringSizeTSizeT_T.java similarity index 61% rename from pytorch/src/gen/java/org/bytedeco/pytorch/StringSizeTSizeTTuple.java rename to pytorch/src/gen/java/org/bytedeco/pytorch/T_StringSizeTSizeT_T.java index 856042479e7..1f7d9d608e5 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/StringSizeTSizeTTuple.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/T_StringSizeTSizeT_T.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; @@ -16,21 +18,21 @@ import static org.bytedeco.pytorch.global.torch.*; @NoOffset @Name("std::tuple") @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) -public class StringSizeTSizeTTuple extends Pointer { +public class T_StringSizeTSizeT_T extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ - public StringSizeTSizeTTuple(Pointer p) { super(p); } - public StringSizeTSizeTTuple(@StdString BytePointer value0, @Cast("size_t") long value1, @Cast("size_t") long value2) { allocate(value0, value1, value2); } + public T_StringSizeTSizeT_T(Pointer p) { super(p); } + public T_StringSizeTSizeT_T(@StdString BytePointer value0, @Cast("size_t") long value1, @Cast("size_t") long value2) { allocate(value0, value1, value2); } private native void allocate(@StdString BytePointer value0, @Cast("size_t") long value1, @Cast("size_t") long value2); - public StringSizeTSizeTTuple() { allocate(); } + public T_StringSizeTSizeT_T() { allocate(); } private native void allocate(); - public native @Name("operator =") @ByRef StringSizeTSizeTTuple put(@ByRef StringSizeTSizeTTuple x); + public native @Name("operator =") @ByRef T_StringSizeTSizeT_T put(@ByRef T_StringSizeTSizeT_T x); public @StdString BytePointer get0() { return get0(this); } - @Namespace @Name("std::get<0>") public static native @StdString BytePointer get0(@ByRef StringSizeTSizeTTuple container); + @Namespace @Name("std::get<0>") public static native @StdString BytePointer get0(@ByRef T_StringSizeTSizeT_T container); public @Cast("size_t") long get1() { return get1(this); } - @Namespace @Name("std::get<1>") public static native @Cast("size_t") long get1(@ByRef StringSizeTSizeTTuple container); + @Namespace @Name("std::get<1>") public static native @Cast("size_t") long get1(@ByRef T_StringSizeTSizeT_T container); public @Cast("size_t") long get2() { return get2(this); } - @Namespace @Name("std::get<2>") public static native @Cast("size_t") long get2(@ByRef StringSizeTSizeTTuple container); + @Namespace @Name("std::get<2>") public static native @Cast("size_t") long get2(@ByRef T_StringSizeTSizeT_T container); } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/StringSizeTSizeTTupleOptional.java b/pytorch/src/gen/java/org/bytedeco/pytorch/T_StringSizeTSizeT_TOptional.java similarity index 51% rename from pytorch/src/gen/java/org/bytedeco/pytorch/StringSizeTSizeTTupleOptional.java rename to pytorch/src/gen/java/org/bytedeco/pytorch/T_StringSizeTSizeT_TOptional.java index 8f06fd45ee5..18b16499302 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/StringSizeTSizeTTupleOptional.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/T_StringSizeTSizeT_TOptional.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; @@ -16,17 +18,18 @@ import static org.bytedeco.pytorch.global.torch.*; @NoOffset @Name("c10::optional >") @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) -public class StringSizeTSizeTTupleOptional extends Pointer { +public class T_StringSizeTSizeT_TOptional extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ - public StringSizeTSizeTTupleOptional(Pointer p) { super(p); } - public StringSizeTSizeTTupleOptional(StringSizeTSizeTTuple value) { this(); put(value); } - public StringSizeTSizeTTupleOptional() { allocate(); } + public T_StringSizeTSizeT_TOptional(Pointer p) { super(p); } + public T_StringSizeTSizeT_TOptional(T_StringSizeTSizeT_T value) { this(); put(value); } + public T_StringSizeTSizeT_TOptional() { allocate(); } private native void allocate(); - public native @Name("operator =") @ByRef StringSizeTSizeTTupleOptional put(@ByRef StringSizeTSizeTTupleOptional x); + public native @Name("operator =") @ByRef T_StringSizeTSizeT_TOptional put(@ByRef T_StringSizeTSizeT_TOptional x); public native boolean has_value(); - public native @Name("value") @ByRef StringSizeTSizeTTuple get(); - @ValueSetter public native StringSizeTSizeTTupleOptional put(@ByRef StringSizeTSizeTTuple value); + public native void reset(); + public native @Name("value") @ByRef T_StringSizeTSizeT_T get(); + @ValueSetter public native T_StringSizeTSizeT_TOptional put(@ByRef T_StringSizeTSizeT_T value); } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/TensorMaybeOwnedTensorMaybeOwnedTensorMaybeOwnedTuple.java b/pytorch/src/gen/java/org/bytedeco/pytorch/T_TensorMaybeOwnedTensorMaybeOwnedTensorMaybeOwned_T.java similarity index 60% rename from pytorch/src/gen/java/org/bytedeco/pytorch/TensorMaybeOwnedTensorMaybeOwnedTensorMaybeOwnedTuple.java rename to pytorch/src/gen/java/org/bytedeco/pytorch/T_TensorMaybeOwnedTensorMaybeOwnedTensorMaybeOwned_T.java index e8b60fb8583..0ce67e3ba4b 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/TensorMaybeOwnedTensorMaybeOwnedTensorMaybeOwnedTuple.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/T_TensorMaybeOwnedTensorMaybeOwnedTensorMaybeOwned_T.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; @@ -16,21 +18,21 @@ import static org.bytedeco.pytorch.global.torch.*; @NoOffset @Name("std::tuple,c10::MaybeOwned,c10::MaybeOwned >") @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) -public class TensorMaybeOwnedTensorMaybeOwnedTensorMaybeOwnedTuple extends Pointer { +public class T_TensorMaybeOwnedTensorMaybeOwnedTensorMaybeOwned_T extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ - public TensorMaybeOwnedTensorMaybeOwnedTensorMaybeOwnedTuple(Pointer p) { super(p); } - public TensorMaybeOwnedTensorMaybeOwnedTensorMaybeOwnedTuple(@Cast({"", "c10::MaybeOwned&&"}) @StdMove TensorMaybeOwned value0, @Cast({"", "c10::MaybeOwned&&"}) @StdMove TensorMaybeOwned value1, @Cast({"", "c10::MaybeOwned&&"}) @StdMove TensorMaybeOwned value2) { allocate(value0, value1, value2); } + public T_TensorMaybeOwnedTensorMaybeOwnedTensorMaybeOwned_T(Pointer p) { super(p); } + public T_TensorMaybeOwnedTensorMaybeOwnedTensorMaybeOwned_T(@Cast({"", "c10::MaybeOwned&&"}) @StdMove TensorMaybeOwned value0, @Cast({"", "c10::MaybeOwned&&"}) @StdMove TensorMaybeOwned value1, @Cast({"", "c10::MaybeOwned&&"}) @StdMove TensorMaybeOwned value2) { allocate(value0, value1, value2); } private native void allocate(@Cast({"", "c10::MaybeOwned&&"}) @StdMove TensorMaybeOwned value0, @Cast({"", "c10::MaybeOwned&&"}) @StdMove TensorMaybeOwned value1, @Cast({"", "c10::MaybeOwned&&"}) @StdMove TensorMaybeOwned value2); - public TensorMaybeOwnedTensorMaybeOwnedTensorMaybeOwnedTuple() { allocate(); } + public T_TensorMaybeOwnedTensorMaybeOwnedTensorMaybeOwned_T() { allocate(); } private native void allocate(); - public native @Name("operator =") @ByRef TensorMaybeOwnedTensorMaybeOwnedTensorMaybeOwnedTuple put(@ByRef TensorMaybeOwnedTensorMaybeOwnedTensorMaybeOwnedTuple x); + public native @Name("operator =") @ByRef T_TensorMaybeOwnedTensorMaybeOwnedTensorMaybeOwned_T put(@ByRef T_TensorMaybeOwnedTensorMaybeOwnedTensorMaybeOwned_T x); public @Cast({"", "c10::MaybeOwned&&"}) @StdMove TensorMaybeOwned get0() { return get0(this); } - @Namespace @Name("std::get<0>") public static native @Cast({"", "c10::MaybeOwned&&"}) @StdMove TensorMaybeOwned get0(@ByRef TensorMaybeOwnedTensorMaybeOwnedTensorMaybeOwnedTuple container); + @Namespace @Name("std::get<0>") public static native @Cast({"", "c10::MaybeOwned&&"}) @StdMove TensorMaybeOwned get0(@ByRef T_TensorMaybeOwnedTensorMaybeOwnedTensorMaybeOwned_T container); public @Cast({"", "c10::MaybeOwned&&"}) @StdMove TensorMaybeOwned get1() { return get1(this); } - @Namespace @Name("std::get<1>") public static native @Cast({"", "c10::MaybeOwned&&"}) @StdMove TensorMaybeOwned get1(@ByRef TensorMaybeOwnedTensorMaybeOwnedTensorMaybeOwnedTuple container); + @Namespace @Name("std::get<1>") public static native @Cast({"", "c10::MaybeOwned&&"}) @StdMove TensorMaybeOwned get1(@ByRef T_TensorMaybeOwnedTensorMaybeOwnedTensorMaybeOwned_T container); public @Cast({"", "c10::MaybeOwned&&"}) @StdMove TensorMaybeOwned get2() { return get2(this); } - @Namespace @Name("std::get<2>") public static native @Cast({"", "c10::MaybeOwned&&"}) @StdMove TensorMaybeOwned get2(@ByRef TensorMaybeOwnedTensorMaybeOwnedTensorMaybeOwnedTuple container); + @Namespace @Name("std::get<2>") public static native @Cast({"", "c10::MaybeOwned&&"}) @StdMove TensorMaybeOwned get2(@ByRef T_TensorMaybeOwnedTensorMaybeOwnedTensorMaybeOwned_T container); } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/TensorMaybeOwnedTensorMaybeOwnedTuple.java b/pytorch/src/gen/java/org/bytedeco/pytorch/T_TensorMaybeOwnedTensorMaybeOwned_T.java similarity index 62% rename from pytorch/src/gen/java/org/bytedeco/pytorch/TensorMaybeOwnedTensorMaybeOwnedTuple.java rename to pytorch/src/gen/java/org/bytedeco/pytorch/T_TensorMaybeOwnedTensorMaybeOwned_T.java index 6c9b04cfcbc..84a61d3a74b 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/TensorMaybeOwnedTensorMaybeOwnedTuple.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/T_TensorMaybeOwnedTensorMaybeOwned_T.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; @@ -16,19 +18,19 @@ import static org.bytedeco.pytorch.global.torch.*; @NoOffset @Name("std::tuple,c10::MaybeOwned >") @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) -public class TensorMaybeOwnedTensorMaybeOwnedTuple extends Pointer { +public class T_TensorMaybeOwnedTensorMaybeOwned_T extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ - public TensorMaybeOwnedTensorMaybeOwnedTuple(Pointer p) { super(p); } - public TensorMaybeOwnedTensorMaybeOwnedTuple(@Cast({"", "c10::MaybeOwned&&"}) @StdMove TensorMaybeOwned value0, @Cast({"", "c10::MaybeOwned&&"}) @StdMove TensorMaybeOwned value1) { allocate(value0, value1); } + public T_TensorMaybeOwnedTensorMaybeOwned_T(Pointer p) { super(p); } + public T_TensorMaybeOwnedTensorMaybeOwned_T(@Cast({"", "c10::MaybeOwned&&"}) @StdMove TensorMaybeOwned value0, @Cast({"", "c10::MaybeOwned&&"}) @StdMove TensorMaybeOwned value1) { allocate(value0, value1); } private native void allocate(@Cast({"", "c10::MaybeOwned&&"}) @StdMove TensorMaybeOwned value0, @Cast({"", "c10::MaybeOwned&&"}) @StdMove TensorMaybeOwned value1); - public TensorMaybeOwnedTensorMaybeOwnedTuple() { allocate(); } + public T_TensorMaybeOwnedTensorMaybeOwned_T() { allocate(); } private native void allocate(); - public native @Name("operator =") @ByRef TensorMaybeOwnedTensorMaybeOwnedTuple put(@ByRef TensorMaybeOwnedTensorMaybeOwnedTuple x); + public native @Name("operator =") @ByRef T_TensorMaybeOwnedTensorMaybeOwned_T put(@ByRef T_TensorMaybeOwnedTensorMaybeOwned_T x); public @Cast({"", "c10::MaybeOwned&&"}) @StdMove TensorMaybeOwned get0() { return get0(this); } - @Namespace @Name("std::get<0>") public static native @Cast({"", "c10::MaybeOwned&&"}) @StdMove TensorMaybeOwned get0(@ByRef TensorMaybeOwnedTensorMaybeOwnedTuple container); + @Namespace @Name("std::get<0>") public static native @Cast({"", "c10::MaybeOwned&&"}) @StdMove TensorMaybeOwned get0(@ByRef T_TensorMaybeOwnedTensorMaybeOwned_T container); public @Cast({"", "c10::MaybeOwned&&"}) @StdMove TensorMaybeOwned get1() { return get1(this); } - @Namespace @Name("std::get<1>") public static native @Cast({"", "c10::MaybeOwned&&"}) @StdMove TensorMaybeOwned get1(@ByRef TensorMaybeOwnedTensorMaybeOwnedTuple container); + @Namespace @Name("std::get<1>") public static native @Cast({"", "c10::MaybeOwned&&"}) @StdMove TensorMaybeOwned get1(@ByRef T_TensorMaybeOwnedTensorMaybeOwned_T container); } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/T_TensorT_TensorTensor_T_T.java b/pytorch/src/gen/java/org/bytedeco/pytorch/T_TensorT_TensorTensor_T_T.java new file mode 100644 index 00000000000..34ace74db37 --- /dev/null +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/T_TensorT_TensorTensor_T_T.java @@ -0,0 +1,36 @@ +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE + +package org.bytedeco.pytorch; + +import org.bytedeco.pytorch.Allocator; +import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; +import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; +import java.nio.*; +import org.bytedeco.javacpp.*; +import org.bytedeco.javacpp.annotation.*; + +import static org.bytedeco.javacpp.presets.javacpp.*; +import static org.bytedeco.openblas.global.openblas_nolapack.*; +import static org.bytedeco.openblas.global.openblas.*; + +import static org.bytedeco.pytorch.global.torch.*; + +@NoOffset @Name("std::tuple >") @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) +public class T_TensorT_TensorTensor_T_T extends Pointer { + static { Loader.load(); } + /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ + public T_TensorT_TensorTensor_T_T(Pointer p) { super(p); } + public T_TensorT_TensorTensor_T_T(@ByRef Tensor value0, @ByRef T_TensorTensor_T value1) { allocate(value0, value1); } + private native void allocate(@ByRef Tensor value0, @ByRef T_TensorTensor_T value1); + public T_TensorT_TensorTensor_T_T() { allocate(); } + private native void allocate(); + public native @Name("operator =") @ByRef T_TensorT_TensorTensor_T_T put(@ByRef T_TensorT_TensorTensor_T_T x); + + public @ByRef Tensor get0() { return get0(this); } + @Namespace @Name("std::get<0>") public static native @ByRef Tensor get0(@ByRef T_TensorT_TensorTensor_T_T container); + public @ByRef T_TensorTensor_T get1() { return get1(this); } + @Namespace @Name("std::get<1>") public static native @ByRef T_TensorTensor_T get1(@ByRef T_TensorT_TensorTensor_T_T container); +} + diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/TensorTensorDoubleLongTuple.java b/pytorch/src/gen/java/org/bytedeco/pytorch/T_TensorTensorDoubleLong_T.java similarity index 54% rename from pytorch/src/gen/java/org/bytedeco/pytorch/TensorTensorDoubleLongTuple.java rename to pytorch/src/gen/java/org/bytedeco/pytorch/T_TensorTensorDoubleLong_T.java index 05dddfa37fc..f1f23d0ce20 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/TensorTensorDoubleLongTuple.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/T_TensorTensorDoubleLong_T.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; @@ -15,24 +17,24 @@ import static org.bytedeco.pytorch.global.torch.*; -@NoOffset @Name("std::tuple") @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) -public class TensorTensorDoubleLongTuple extends Pointer { +@NoOffset @Name("std::tuple") @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) +public class T_TensorTensorDoubleLong_T extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ - public TensorTensorDoubleLongTuple(Pointer p) { super(p); } - public TensorTensorDoubleLongTuple(@ByRef Tensor value0, @ByRef Tensor value1, double value2, @Cast("int64_t") long value3) { allocate(value0, value1, value2, value3); } + public T_TensorTensorDoubleLong_T(Pointer p) { super(p); } + public T_TensorTensorDoubleLong_T(@ByRef Tensor value0, @ByRef Tensor value1, double value2, @Cast("int64_t") long value3) { allocate(value0, value1, value2, value3); } private native void allocate(@ByRef Tensor value0, @ByRef Tensor value1, double value2, @Cast("int64_t") long value3); - public TensorTensorDoubleLongTuple() { allocate(); } + public T_TensorTensorDoubleLong_T() { allocate(); } private native void allocate(); - public native @Name("operator =") @ByRef TensorTensorDoubleLongTuple put(@ByRef TensorTensorDoubleLongTuple x); + public native @Name("operator =") @ByRef T_TensorTensorDoubleLong_T put(@ByRef T_TensorTensorDoubleLong_T x); public @ByRef Tensor get0() { return get0(this); } - @Namespace @Name("std::get<0>") public static native @ByRef Tensor get0(@ByRef TensorTensorDoubleLongTuple container); + @Namespace @Name("std::get<0>") public static native @ByRef Tensor get0(@ByRef T_TensorTensorDoubleLong_T container); public @ByRef Tensor get1() { return get1(this); } - @Namespace @Name("std::get<1>") public static native @ByRef Tensor get1(@ByRef TensorTensorDoubleLongTuple container); + @Namespace @Name("std::get<1>") public static native @ByRef Tensor get1(@ByRef T_TensorTensorDoubleLong_T container); public double get2() { return get2(this); } - @Namespace @Name("std::get<2>") public static native double get2(@ByRef TensorTensorDoubleLongTuple container); + @Namespace @Name("std::get<2>") public static native double get2(@ByRef T_TensorTensorDoubleLong_T container); public @Cast("int64_t") long get3() { return get3(this); } - @Namespace @Name("std::get<3>") public static native @Cast("int64_t") long get3(@ByRef TensorTensorDoubleLongTuple container); + @Namespace @Name("std::get<3>") public static native @Cast("int64_t") long get3(@ByRef T_TensorTensorDoubleLong_T container); } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/TensorTensorLongLongTensorTuple.java b/pytorch/src/gen/java/org/bytedeco/pytorch/T_TensorTensorLongLongTensor_T.java similarity index 54% rename from pytorch/src/gen/java/org/bytedeco/pytorch/TensorTensorLongLongTensorTuple.java rename to pytorch/src/gen/java/org/bytedeco/pytorch/T_TensorTensorLongLongTensor_T.java index b28d04f0e23..b19c0454932 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/TensorTensorLongLongTensorTuple.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/T_TensorTensorLongLongTensor_T.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; @@ -15,26 +17,26 @@ import static org.bytedeco.pytorch.global.torch.*; -@NoOffset @Name("std::tuple") @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) -public class TensorTensorLongLongTensorTuple extends Pointer { +@NoOffset @Name("std::tuple") @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) +public class T_TensorTensorLongLongTensor_T extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ - public TensorTensorLongLongTensorTuple(Pointer p) { super(p); } - public TensorTensorLongLongTensorTuple(@ByRef Tensor value0, @ByRef Tensor value1, @Cast("int64_t") long value2, @Cast("int64_t") long value3, @ByRef Tensor value4) { allocate(value0, value1, value2, value3, value4); } + public T_TensorTensorLongLongTensor_T(Pointer p) { super(p); } + public T_TensorTensorLongLongTensor_T(@ByRef Tensor value0, @ByRef Tensor value1, @Cast("int64_t") long value2, @Cast("int64_t") long value3, @ByRef Tensor value4) { allocate(value0, value1, value2, value3, value4); } private native void allocate(@ByRef Tensor value0, @ByRef Tensor value1, @Cast("int64_t") long value2, @Cast("int64_t") long value3, @ByRef Tensor value4); - public TensorTensorLongLongTensorTuple() { allocate(); } + public T_TensorTensorLongLongTensor_T() { allocate(); } private native void allocate(); - public native @Name("operator =") @ByRef TensorTensorLongLongTensorTuple put(@ByRef TensorTensorLongLongTensorTuple x); + public native @Name("operator =") @ByRef T_TensorTensorLongLongTensor_T put(@ByRef T_TensorTensorLongLongTensor_T x); public @ByRef Tensor get0() { return get0(this); } - @Namespace @Name("std::get<0>") public static native @ByRef Tensor get0(@ByRef TensorTensorLongLongTensorTuple container); + @Namespace @Name("std::get<0>") public static native @ByRef Tensor get0(@ByRef T_TensorTensorLongLongTensor_T container); public @ByRef Tensor get1() { return get1(this); } - @Namespace @Name("std::get<1>") public static native @ByRef Tensor get1(@ByRef TensorTensorLongLongTensorTuple container); + @Namespace @Name("std::get<1>") public static native @ByRef Tensor get1(@ByRef T_TensorTensorLongLongTensor_T container); public @Cast("int64_t") long get2() { return get2(this); } - @Namespace @Name("std::get<2>") public static native @Cast("int64_t") long get2(@ByRef TensorTensorLongLongTensorTuple container); + @Namespace @Name("std::get<2>") public static native @Cast("int64_t") long get2(@ByRef T_TensorTensorLongLongTensor_T container); public @Cast("int64_t") long get3() { return get3(this); } - @Namespace @Name("std::get<3>") public static native @Cast("int64_t") long get3(@ByRef TensorTensorLongLongTensorTuple container); + @Namespace @Name("std::get<3>") public static native @Cast("int64_t") long get3(@ByRef T_TensorTensorLongLongTensor_T container); public @ByRef Tensor get4() { return get4(this); } - @Namespace @Name("std::get<4>") public static native @ByRef Tensor get4(@ByRef TensorTensorLongLongTensorTuple container); + @Namespace @Name("std::get<4>") public static native @ByRef Tensor get4(@ByRef T_TensorTensorLongLongTensor_T container); } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/TensorTensorTensorTensorLongTuple.java b/pytorch/src/gen/java/org/bytedeco/pytorch/T_TensorTensorTensorTensorLong_T.java similarity index 53% rename from pytorch/src/gen/java/org/bytedeco/pytorch/TensorTensorTensorTensorLongTuple.java rename to pytorch/src/gen/java/org/bytedeco/pytorch/T_TensorTensorTensorTensorLong_T.java index 86b84410ab9..00eb302f91f 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/TensorTensorTensorTensorLongTuple.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/T_TensorTensorTensorTensorLong_T.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; @@ -15,26 +17,26 @@ import static org.bytedeco.pytorch.global.torch.*; -@NoOffset @Name("std::tuple") @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) -public class TensorTensorTensorTensorLongTuple extends Pointer { +@NoOffset @Name("std::tuple") @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) +public class T_TensorTensorTensorTensorLong_T extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ - public TensorTensorTensorTensorLongTuple(Pointer p) { super(p); } - public TensorTensorTensorTensorLongTuple(@ByRef Tensor value0, @ByRef Tensor value1, @ByRef Tensor value2, @ByRef Tensor value3, @Cast("int64_t") long value4) { allocate(value0, value1, value2, value3, value4); } + public T_TensorTensorTensorTensorLong_T(Pointer p) { super(p); } + public T_TensorTensorTensorTensorLong_T(@ByRef Tensor value0, @ByRef Tensor value1, @ByRef Tensor value2, @ByRef Tensor value3, @Cast("int64_t") long value4) { allocate(value0, value1, value2, value3, value4); } private native void allocate(@ByRef Tensor value0, @ByRef Tensor value1, @ByRef Tensor value2, @ByRef Tensor value3, @Cast("int64_t") long value4); - public TensorTensorTensorTensorLongTuple() { allocate(); } + public T_TensorTensorTensorTensorLong_T() { allocate(); } private native void allocate(); - public native @Name("operator =") @ByRef TensorTensorTensorTensorLongTuple put(@ByRef TensorTensorTensorTensorLongTuple x); + public native @Name("operator =") @ByRef T_TensorTensorTensorTensorLong_T put(@ByRef T_TensorTensorTensorTensorLong_T x); public @ByRef Tensor get0() { return get0(this); } - @Namespace @Name("std::get<0>") public static native @ByRef Tensor get0(@ByRef TensorTensorTensorTensorLongTuple container); + @Namespace @Name("std::get<0>") public static native @ByRef Tensor get0(@ByRef T_TensorTensorTensorTensorLong_T container); public @ByRef Tensor get1() { return get1(this); } - @Namespace @Name("std::get<1>") public static native @ByRef Tensor get1(@ByRef TensorTensorTensorTensorLongTuple container); + @Namespace @Name("std::get<1>") public static native @ByRef Tensor get1(@ByRef T_TensorTensorTensorTensorLong_T container); public @ByRef Tensor get2() { return get2(this); } - @Namespace @Name("std::get<2>") public static native @ByRef Tensor get2(@ByRef TensorTensorTensorTensorLongTuple container); + @Namespace @Name("std::get<2>") public static native @ByRef Tensor get2(@ByRef T_TensorTensorTensorTensorLong_T container); public @ByRef Tensor get3() { return get3(this); } - @Namespace @Name("std::get<3>") public static native @ByRef Tensor get3(@ByRef TensorTensorTensorTensorLongTuple container); + @Namespace @Name("std::get<3>") public static native @ByRef Tensor get3(@ByRef T_TensorTensorTensorTensorLong_T container); public @Cast("int64_t") long get4() { return get4(this); } - @Namespace @Name("std::get<4>") public static native @Cast("int64_t") long get4(@ByRef TensorTensorTensorTensorLongTuple container); + @Namespace @Name("std::get<4>") public static native @Cast("int64_t") long get4(@ByRef T_TensorTensorTensorTensorLong_T container); } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/TensorTensorTensorTensorTensorTensorTensorTuple.java b/pytorch/src/gen/java/org/bytedeco/pytorch/T_TensorTensorTensorTensorTensorTensorTensor_T.java similarity index 50% rename from pytorch/src/gen/java/org/bytedeco/pytorch/TensorTensorTensorTensorTensorTensorTensorTuple.java rename to pytorch/src/gen/java/org/bytedeco/pytorch/T_TensorTensorTensorTensorTensorTensorTensor_T.java index 03e04ccdc10..4145fc59fff 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/TensorTensorTensorTensorTensorTensorTensorTuple.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/T_TensorTensorTensorTensorTensorTensorTensor_T.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; @@ -15,30 +17,30 @@ import static org.bytedeco.pytorch.global.torch.*; -@NoOffset @Name("std::tuple") @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) -public class TensorTensorTensorTensorTensorTensorTensorTuple extends Pointer { +@NoOffset @Name("std::tuple") @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) +public class T_TensorTensorTensorTensorTensorTensorTensor_T extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ - public TensorTensorTensorTensorTensorTensorTensorTuple(Pointer p) { super(p); } - public TensorTensorTensorTensorTensorTensorTensorTuple(@ByRef Tensor value0, @ByRef Tensor value1, @ByRef Tensor value2, @ByRef Tensor value3, @ByRef Tensor value4, @ByRef Tensor value5, @ByRef Tensor value6) { allocate(value0, value1, value2, value3, value4, value5, value6); } + public T_TensorTensorTensorTensorTensorTensorTensor_T(Pointer p) { super(p); } + public T_TensorTensorTensorTensorTensorTensorTensor_T(@ByRef Tensor value0, @ByRef Tensor value1, @ByRef Tensor value2, @ByRef Tensor value3, @ByRef Tensor value4, @ByRef Tensor value5, @ByRef Tensor value6) { allocate(value0, value1, value2, value3, value4, value5, value6); } private native void allocate(@ByRef Tensor value0, @ByRef Tensor value1, @ByRef Tensor value2, @ByRef Tensor value3, @ByRef Tensor value4, @ByRef Tensor value5, @ByRef Tensor value6); - public TensorTensorTensorTensorTensorTensorTensorTuple() { allocate(); } + public T_TensorTensorTensorTensorTensorTensorTensor_T() { allocate(); } private native void allocate(); - public native @Name("operator =") @ByRef TensorTensorTensorTensorTensorTensorTensorTuple put(@ByRef TensorTensorTensorTensorTensorTensorTensorTuple x); + public native @Name("operator =") @ByRef T_TensorTensorTensorTensorTensorTensorTensor_T put(@ByRef T_TensorTensorTensorTensorTensorTensorTensor_T x); public @ByRef Tensor get0() { return get0(this); } - @Namespace @Name("std::get<0>") public static native @ByRef Tensor get0(@ByRef TensorTensorTensorTensorTensorTensorTensorTuple container); + @Namespace @Name("std::get<0>") public static native @ByRef Tensor get0(@ByRef T_TensorTensorTensorTensorTensorTensorTensor_T container); public @ByRef Tensor get1() { return get1(this); } - @Namespace @Name("std::get<1>") public static native @ByRef Tensor get1(@ByRef TensorTensorTensorTensorTensorTensorTensorTuple container); + @Namespace @Name("std::get<1>") public static native @ByRef Tensor get1(@ByRef T_TensorTensorTensorTensorTensorTensorTensor_T container); public @ByRef Tensor get2() { return get2(this); } - @Namespace @Name("std::get<2>") public static native @ByRef Tensor get2(@ByRef TensorTensorTensorTensorTensorTensorTensorTuple container); + @Namespace @Name("std::get<2>") public static native @ByRef Tensor get2(@ByRef T_TensorTensorTensorTensorTensorTensorTensor_T container); public @ByRef Tensor get3() { return get3(this); } - @Namespace @Name("std::get<3>") public static native @ByRef Tensor get3(@ByRef TensorTensorTensorTensorTensorTensorTensorTuple container); + @Namespace @Name("std::get<3>") public static native @ByRef Tensor get3(@ByRef T_TensorTensorTensorTensorTensorTensorTensor_T container); public @ByRef Tensor get4() { return get4(this); } - @Namespace @Name("std::get<4>") public static native @ByRef Tensor get4(@ByRef TensorTensorTensorTensorTensorTensorTensorTuple container); + @Namespace @Name("std::get<4>") public static native @ByRef Tensor get4(@ByRef T_TensorTensorTensorTensorTensorTensorTensor_T container); public @ByRef Tensor get5() { return get5(this); } - @Namespace @Name("std::get<5>") public static native @ByRef Tensor get5(@ByRef TensorTensorTensorTensorTensorTensorTensorTuple container); + @Namespace @Name("std::get<5>") public static native @ByRef Tensor get5(@ByRef T_TensorTensorTensorTensorTensorTensorTensor_T container); public @ByRef Tensor get6() { return get6(this); } - @Namespace @Name("std::get<6>") public static native @ByRef Tensor get6(@ByRef TensorTensorTensorTensorTensorTensorTensorTuple container); + @Namespace @Name("std::get<6>") public static native @ByRef Tensor get6(@ByRef T_TensorTensorTensorTensorTensorTensorTensor_T container); } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/TensorTensorTensorTensorTensorTensorTuple.java b/pytorch/src/gen/java/org/bytedeco/pytorch/T_TensorTensorTensorTensorTensorTensor_T.java similarity index 51% rename from pytorch/src/gen/java/org/bytedeco/pytorch/TensorTensorTensorTensorTensorTensorTuple.java rename to pytorch/src/gen/java/org/bytedeco/pytorch/T_TensorTensorTensorTensorTensorTensor_T.java index 0dffb525c65..3dbeda5f956 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/TensorTensorTensorTensorTensorTensorTuple.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/T_TensorTensorTensorTensorTensorTensor_T.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; @@ -15,28 +17,28 @@ import static org.bytedeco.pytorch.global.torch.*; -@NoOffset @Name("std::tuple") @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) -public class TensorTensorTensorTensorTensorTensorTuple extends Pointer { +@NoOffset @Name("std::tuple") @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) +public class T_TensorTensorTensorTensorTensorTensor_T extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ - public TensorTensorTensorTensorTensorTensorTuple(Pointer p) { super(p); } - public TensorTensorTensorTensorTensorTensorTuple(@ByRef Tensor value0, @ByRef Tensor value1, @ByRef Tensor value2, @ByRef Tensor value3, @ByRef Tensor value4, @ByRef Tensor value5) { allocate(value0, value1, value2, value3, value4, value5); } + public T_TensorTensorTensorTensorTensorTensor_T(Pointer p) { super(p); } + public T_TensorTensorTensorTensorTensorTensor_T(@ByRef Tensor value0, @ByRef Tensor value1, @ByRef Tensor value2, @ByRef Tensor value3, @ByRef Tensor value4, @ByRef Tensor value5) { allocate(value0, value1, value2, value3, value4, value5); } private native void allocate(@ByRef Tensor value0, @ByRef Tensor value1, @ByRef Tensor value2, @ByRef Tensor value3, @ByRef Tensor value4, @ByRef Tensor value5); - public TensorTensorTensorTensorTensorTensorTuple() { allocate(); } + public T_TensorTensorTensorTensorTensorTensor_T() { allocate(); } private native void allocate(); - public native @Name("operator =") @ByRef TensorTensorTensorTensorTensorTensorTuple put(@ByRef TensorTensorTensorTensorTensorTensorTuple x); + public native @Name("operator =") @ByRef T_TensorTensorTensorTensorTensorTensor_T put(@ByRef T_TensorTensorTensorTensorTensorTensor_T x); public @ByRef Tensor get0() { return get0(this); } - @Namespace @Name("std::get<0>") public static native @ByRef Tensor get0(@ByRef TensorTensorTensorTensorTensorTensorTuple container); + @Namespace @Name("std::get<0>") public static native @ByRef Tensor get0(@ByRef T_TensorTensorTensorTensorTensorTensor_T container); public @ByRef Tensor get1() { return get1(this); } - @Namespace @Name("std::get<1>") public static native @ByRef Tensor get1(@ByRef TensorTensorTensorTensorTensorTensorTuple container); + @Namespace @Name("std::get<1>") public static native @ByRef Tensor get1(@ByRef T_TensorTensorTensorTensorTensorTensor_T container); public @ByRef Tensor get2() { return get2(this); } - @Namespace @Name("std::get<2>") public static native @ByRef Tensor get2(@ByRef TensorTensorTensorTensorTensorTensorTuple container); + @Namespace @Name("std::get<2>") public static native @ByRef Tensor get2(@ByRef T_TensorTensorTensorTensorTensorTensor_T container); public @ByRef Tensor get3() { return get3(this); } - @Namespace @Name("std::get<3>") public static native @ByRef Tensor get3(@ByRef TensorTensorTensorTensorTensorTensorTuple container); + @Namespace @Name("std::get<3>") public static native @ByRef Tensor get3(@ByRef T_TensorTensorTensorTensorTensorTensor_T container); public @ByRef Tensor get4() { return get4(this); } - @Namespace @Name("std::get<4>") public static native @ByRef Tensor get4(@ByRef TensorTensorTensorTensorTensorTensorTuple container); + @Namespace @Name("std::get<4>") public static native @ByRef Tensor get4(@ByRef T_TensorTensorTensorTensorTensorTensor_T container); public @ByRef Tensor get5() { return get5(this); } - @Namespace @Name("std::get<5>") public static native @ByRef Tensor get5(@ByRef TensorTensorTensorTensorTensorTensorTuple container); + @Namespace @Name("std::get<5>") public static native @ByRef Tensor get5(@ByRef T_TensorTensorTensorTensorTensorTensor_T container); } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/TensorTensorTensorTensorTensorTuple.java b/pytorch/src/gen/java/org/bytedeco/pytorch/T_TensorTensorTensorTensorTensor_T.java similarity index 52% rename from pytorch/src/gen/java/org/bytedeco/pytorch/TensorTensorTensorTensorTensorTuple.java rename to pytorch/src/gen/java/org/bytedeco/pytorch/T_TensorTensorTensorTensorTensor_T.java index 9628d68bf6a..7bdb1ba99e2 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/TensorTensorTensorTensorTensorTuple.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/T_TensorTensorTensorTensorTensor_T.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; @@ -15,26 +17,26 @@ import static org.bytedeco.pytorch.global.torch.*; -@NoOffset @Name("std::tuple") @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) -public class TensorTensorTensorTensorTensorTuple extends Pointer { +@NoOffset @Name("std::tuple") @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) +public class T_TensorTensorTensorTensorTensor_T extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ - public TensorTensorTensorTensorTensorTuple(Pointer p) { super(p); } - public TensorTensorTensorTensorTensorTuple(@ByRef Tensor value0, @ByRef Tensor value1, @ByRef Tensor value2, @ByRef Tensor value3, @ByRef Tensor value4) { allocate(value0, value1, value2, value3, value4); } + public T_TensorTensorTensorTensorTensor_T(Pointer p) { super(p); } + public T_TensorTensorTensorTensorTensor_T(@ByRef Tensor value0, @ByRef Tensor value1, @ByRef Tensor value2, @ByRef Tensor value3, @ByRef Tensor value4) { allocate(value0, value1, value2, value3, value4); } private native void allocate(@ByRef Tensor value0, @ByRef Tensor value1, @ByRef Tensor value2, @ByRef Tensor value3, @ByRef Tensor value4); - public TensorTensorTensorTensorTensorTuple() { allocate(); } + public T_TensorTensorTensorTensorTensor_T() { allocate(); } private native void allocate(); - public native @Name("operator =") @ByRef TensorTensorTensorTensorTensorTuple put(@ByRef TensorTensorTensorTensorTensorTuple x); + public native @Name("operator =") @ByRef T_TensorTensorTensorTensorTensor_T put(@ByRef T_TensorTensorTensorTensorTensor_T x); public @ByRef Tensor get0() { return get0(this); } - @Namespace @Name("std::get<0>") public static native @ByRef Tensor get0(@ByRef TensorTensorTensorTensorTensorTuple container); + @Namespace @Name("std::get<0>") public static native @ByRef Tensor get0(@ByRef T_TensorTensorTensorTensorTensor_T container); public @ByRef Tensor get1() { return get1(this); } - @Namespace @Name("std::get<1>") public static native @ByRef Tensor get1(@ByRef TensorTensorTensorTensorTensorTuple container); + @Namespace @Name("std::get<1>") public static native @ByRef Tensor get1(@ByRef T_TensorTensorTensorTensorTensor_T container); public @ByRef Tensor get2() { return get2(this); } - @Namespace @Name("std::get<2>") public static native @ByRef Tensor get2(@ByRef TensorTensorTensorTensorTensorTuple container); + @Namespace @Name("std::get<2>") public static native @ByRef Tensor get2(@ByRef T_TensorTensorTensorTensorTensor_T container); public @ByRef Tensor get3() { return get3(this); } - @Namespace @Name("std::get<3>") public static native @ByRef Tensor get3(@ByRef TensorTensorTensorTensorTensorTuple container); + @Namespace @Name("std::get<3>") public static native @ByRef Tensor get3(@ByRef T_TensorTensorTensorTensorTensor_T container); public @ByRef Tensor get4() { return get4(this); } - @Namespace @Name("std::get<4>") public static native @ByRef Tensor get4(@ByRef TensorTensorTensorTensorTensorTuple container); + @Namespace @Name("std::get<4>") public static native @ByRef Tensor get4(@ByRef T_TensorTensorTensorTensorTensor_T container); } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/T_TensorTensorTensorTensorVector_T.java b/pytorch/src/gen/java/org/bytedeco/pytorch/T_TensorTensorTensorTensorVector_T.java new file mode 100644 index 00000000000..859bd478011 --- /dev/null +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/T_TensorTensorTensorTensorVector_T.java @@ -0,0 +1,40 @@ +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE + +package org.bytedeco.pytorch; + +import org.bytedeco.pytorch.Allocator; +import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; +import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; +import java.nio.*; +import org.bytedeco.javacpp.*; +import org.bytedeco.javacpp.annotation.*; + +import static org.bytedeco.javacpp.presets.javacpp.*; +import static org.bytedeco.openblas.global.openblas_nolapack.*; +import static org.bytedeco.openblas.global.openblas.*; + +import static org.bytedeco.pytorch.global.torch.*; + +@NoOffset @Name("std::tuple >") @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) +public class T_TensorTensorTensorTensorVector_T extends Pointer { + static { Loader.load(); } + /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ + public T_TensorTensorTensorTensorVector_T(Pointer p) { super(p); } + public T_TensorTensorTensorTensorVector_T(@ByRef Tensor value0, @ByRef Tensor value1, @ByRef Tensor value2, @Cast({"", "std::vector"}) @StdMove TensorVector value3) { allocate(value0, value1, value2, value3); } + private native void allocate(@ByRef Tensor value0, @ByRef Tensor value1, @ByRef Tensor value2, @Cast({"", "std::vector"}) @StdMove TensorVector value3); + public T_TensorTensorTensorTensorVector_T() { allocate(); } + private native void allocate(); + public native @Name("operator =") @ByRef T_TensorTensorTensorTensorVector_T put(@ByRef T_TensorTensorTensorTensorVector_T x); + + public @ByRef Tensor get0() { return get0(this); } + @Namespace @Name("std::get<0>") public static native @ByRef Tensor get0(@ByRef T_TensorTensorTensorTensorVector_T container); + public @ByRef Tensor get1() { return get1(this); } + @Namespace @Name("std::get<1>") public static native @ByRef Tensor get1(@ByRef T_TensorTensorTensorTensorVector_T container); + public @ByRef Tensor get2() { return get2(this); } + @Namespace @Name("std::get<2>") public static native @ByRef Tensor get2(@ByRef T_TensorTensorTensorTensorVector_T container); + public @Cast({"", "std::vector"}) @StdMove TensorVector get3() { return get3(this); } + @Namespace @Name("std::get<3>") public static native @Cast({"", "std::vector"}) @StdMove TensorVector get3(@ByRef T_TensorTensorTensorTensorVector_T container); +} + diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/TensorTensorTensorTensorTuple.java b/pytorch/src/gen/java/org/bytedeco/pytorch/T_TensorTensorTensorTensor_T.java similarity index 54% rename from pytorch/src/gen/java/org/bytedeco/pytorch/TensorTensorTensorTensorTuple.java rename to pytorch/src/gen/java/org/bytedeco/pytorch/T_TensorTensorTensorTensor_T.java index a68d2844f1b..f0b5d11a3a3 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/TensorTensorTensorTensorTuple.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/T_TensorTensorTensorTensor_T.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; @@ -15,24 +17,24 @@ import static org.bytedeco.pytorch.global.torch.*; -@NoOffset @Name("std::tuple") @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) -public class TensorTensorTensorTensorTuple extends Pointer { +@NoOffset @Name("std::tuple") @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) +public class T_TensorTensorTensorTensor_T extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ - public TensorTensorTensorTensorTuple(Pointer p) { super(p); } - public TensorTensorTensorTensorTuple(@ByRef Tensor value0, @ByRef Tensor value1, @ByRef Tensor value2, @ByRef Tensor value3) { allocate(value0, value1, value2, value3); } + public T_TensorTensorTensorTensor_T(Pointer p) { super(p); } + public T_TensorTensorTensorTensor_T(@ByRef Tensor value0, @ByRef Tensor value1, @ByRef Tensor value2, @ByRef Tensor value3) { allocate(value0, value1, value2, value3); } private native void allocate(@ByRef Tensor value0, @ByRef Tensor value1, @ByRef Tensor value2, @ByRef Tensor value3); - public TensorTensorTensorTensorTuple() { allocate(); } + public T_TensorTensorTensorTensor_T() { allocate(); } private native void allocate(); - public native @Name("operator =") @ByRef TensorTensorTensorTensorTuple put(@ByRef TensorTensorTensorTensorTuple x); + public native @Name("operator =") @ByRef T_TensorTensorTensorTensor_T put(@ByRef T_TensorTensorTensorTensor_T x); public @ByRef Tensor get0() { return get0(this); } - @Namespace @Name("std::get<0>") public static native @ByRef Tensor get0(@ByRef TensorTensorTensorTensorTuple container); + @Namespace @Name("std::get<0>") public static native @ByRef Tensor get0(@ByRef T_TensorTensorTensorTensor_T container); public @ByRef Tensor get1() { return get1(this); } - @Namespace @Name("std::get<1>") public static native @ByRef Tensor get1(@ByRef TensorTensorTensorTensorTuple container); + @Namespace @Name("std::get<1>") public static native @ByRef Tensor get1(@ByRef T_TensorTensorTensorTensor_T container); public @ByRef Tensor get2() { return get2(this); } - @Namespace @Name("std::get<2>") public static native @ByRef Tensor get2(@ByRef TensorTensorTensorTensorTuple container); + @Namespace @Name("std::get<2>") public static native @ByRef Tensor get2(@ByRef T_TensorTensorTensorTensor_T container); public @ByRef Tensor get3() { return get3(this); } - @Namespace @Name("std::get<3>") public static native @ByRef Tensor get3(@ByRef TensorTensorTensorTensorTuple container); + @Namespace @Name("std::get<3>") public static native @ByRef Tensor get3(@ByRef T_TensorTensorTensorTensor_T container); } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/T_TensorTensorTensorTensorsLongLongLongLongTensor_T.java b/pytorch/src/gen/java/org/bytedeco/pytorch/T_TensorTensorTensorTensorsLongLongLongLongTensor_T.java new file mode 100644 index 00000000000..33f2bc4cb37 --- /dev/null +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/T_TensorTensorTensorTensorsLongLongLongLongTensor_T.java @@ -0,0 +1,50 @@ +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE + +package org.bytedeco.pytorch; + +import org.bytedeco.pytorch.Allocator; +import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; +import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; +import java.nio.*; +import org.bytedeco.javacpp.*; +import org.bytedeco.javacpp.annotation.*; + +import static org.bytedeco.javacpp.presets.javacpp.*; +import static org.bytedeco.openblas.global.openblas_nolapack.*; +import static org.bytedeco.openblas.global.openblas.*; + +import static org.bytedeco.pytorch.global.torch.*; + +@NoOffset @Name("std::tuple") @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) +public class T_TensorTensorTensorTensorsLongLongLongLongTensor_T extends Pointer { + static { Loader.load(); } + /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ + public T_TensorTensorTensorTensorsLongLongLongLongTensor_T(Pointer p) { super(p); } + public T_TensorTensorTensorTensorsLongLongLongLongTensor_T(@ByRef Tensor value0, @ByRef Tensor value1, @ByRef Tensor value2, @ByRef Tensor value3, @Cast("int64_t") long value4, @Cast("int64_t") long value5, @Cast("int64_t") long value6, @Cast("int64_t") long value7, @ByRef Tensor value8) { allocate(value0, value1, value2, value3, value4, value5, value6, value7, value8); } + private native void allocate(@ByRef Tensor value0, @ByRef Tensor value1, @ByRef Tensor value2, @ByRef Tensor value3, @Cast("int64_t") long value4, @Cast("int64_t") long value5, @Cast("int64_t") long value6, @Cast("int64_t") long value7, @ByRef Tensor value8); + public T_TensorTensorTensorTensorsLongLongLongLongTensor_T() { allocate(); } + private native void allocate(); + public native @Name("operator =") @ByRef T_TensorTensorTensorTensorsLongLongLongLongTensor_T put(@ByRef T_TensorTensorTensorTensorsLongLongLongLongTensor_T x); + + public @ByRef Tensor get0() { return get0(this); } + @Namespace @Name("std::get<0>") public static native @ByRef Tensor get0(@ByRef T_TensorTensorTensorTensorsLongLongLongLongTensor_T container); + public @ByRef Tensor get1() { return get1(this); } + @Namespace @Name("std::get<1>") public static native @ByRef Tensor get1(@ByRef T_TensorTensorTensorTensorsLongLongLongLongTensor_T container); + public @ByRef Tensor get2() { return get2(this); } + @Namespace @Name("std::get<2>") public static native @ByRef Tensor get2(@ByRef T_TensorTensorTensorTensorsLongLongLongLongTensor_T container); + public @ByRef Tensor get3() { return get3(this); } + @Namespace @Name("std::get<3>") public static native @ByRef Tensor get3(@ByRef T_TensorTensorTensorTensorsLongLongLongLongTensor_T container); + public @Cast("int64_t") long get4() { return get4(this); } + @Namespace @Name("std::get<4>") public static native @Cast("int64_t") long get4(@ByRef T_TensorTensorTensorTensorsLongLongLongLongTensor_T container); + public @Cast("int64_t") long get5() { return get5(this); } + @Namespace @Name("std::get<5>") public static native @Cast("int64_t") long get5(@ByRef T_TensorTensorTensorTensorsLongLongLongLongTensor_T container); + public @Cast("int64_t") long get6() { return get6(this); } + @Namespace @Name("std::get<6>") public static native @Cast("int64_t") long get6(@ByRef T_TensorTensorTensorTensorsLongLongLongLongTensor_T container); + public @Cast("int64_t") long get7() { return get7(this); } + @Namespace @Name("std::get<7>") public static native @Cast("int64_t") long get7(@ByRef T_TensorTensorTensorTensorsLongLongLongLongTensor_T container); + public @ByRef Tensor get8() { return get8(this); } + @Namespace @Name("std::get<8>") public static native @ByRef Tensor get8(@ByRef T_TensorTensorTensorTensorsLongLongLongLongTensor_T container); +} + diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/TensorTensorTensorTuple.java b/pytorch/src/gen/java/org/bytedeco/pytorch/T_TensorTensorTensor_T.java similarity index 55% rename from pytorch/src/gen/java/org/bytedeco/pytorch/TensorTensorTensorTuple.java rename to pytorch/src/gen/java/org/bytedeco/pytorch/T_TensorTensorTensor_T.java index 17b63884f24..cf57286fd65 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/TensorTensorTensorTuple.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/T_TensorTensorTensor_T.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; @@ -15,22 +17,22 @@ import static org.bytedeco.pytorch.global.torch.*; -@NoOffset @Name("std::tuple") @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) -public class TensorTensorTensorTuple extends Pointer { +@NoOffset @Name("std::tuple") @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) +public class T_TensorTensorTensor_T extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ - public TensorTensorTensorTuple(Pointer p) { super(p); } - public TensorTensorTensorTuple(@ByRef Tensor value0, @ByRef Tensor value1, @ByRef Tensor value2) { allocate(value0, value1, value2); } + public T_TensorTensorTensor_T(Pointer p) { super(p); } + public T_TensorTensorTensor_T(@ByRef Tensor value0, @ByRef Tensor value1, @ByRef Tensor value2) { allocate(value0, value1, value2); } private native void allocate(@ByRef Tensor value0, @ByRef Tensor value1, @ByRef Tensor value2); - public TensorTensorTensorTuple() { allocate(); } + public T_TensorTensorTensor_T() { allocate(); } private native void allocate(); - public native @Name("operator =") @ByRef TensorTensorTensorTuple put(@ByRef TensorTensorTensorTuple x); + public native @Name("operator =") @ByRef T_TensorTensorTensor_T put(@ByRef T_TensorTensorTensor_T x); public @ByRef Tensor get0() { return get0(this); } - @Namespace @Name("std::get<0>") public static native @ByRef Tensor get0(@ByRef TensorTensorTensorTuple container); + @Namespace @Name("std::get<0>") public static native @ByRef Tensor get0(@ByRef T_TensorTensorTensor_T container); public @ByRef Tensor get1() { return get1(this); } - @Namespace @Name("std::get<1>") public static native @ByRef Tensor get1(@ByRef TensorTensorTensorTuple container); + @Namespace @Name("std::get<1>") public static native @ByRef Tensor get1(@ByRef T_TensorTensorTensor_T container); public @ByRef Tensor get2() { return get2(this); } - @Namespace @Name("std::get<2>") public static native @ByRef Tensor get2(@ByRef TensorTensorTensorTuple container); + @Namespace @Name("std::get<2>") public static native @ByRef Tensor get2(@ByRef T_TensorTensorTensor_T container); } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/T_TensorTensorVectorTensorVector_T.java b/pytorch/src/gen/java/org/bytedeco/pytorch/T_TensorTensorVectorTensorVector_T.java new file mode 100644 index 00000000000..d79783f92f9 --- /dev/null +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/T_TensorTensorVectorTensorVector_T.java @@ -0,0 +1,38 @@ +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE + +package org.bytedeco.pytorch; + +import org.bytedeco.pytorch.Allocator; +import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; +import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; +import java.nio.*; +import org.bytedeco.javacpp.*; +import org.bytedeco.javacpp.annotation.*; + +import static org.bytedeco.javacpp.presets.javacpp.*; +import static org.bytedeco.openblas.global.openblas_nolapack.*; +import static org.bytedeco.openblas.global.openblas.*; + +import static org.bytedeco.pytorch.global.torch.*; + +@NoOffset @Name("std::tuple,std::vector >") @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) +public class T_TensorTensorVectorTensorVector_T extends Pointer { + static { Loader.load(); } + /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ + public T_TensorTensorVectorTensorVector_T(Pointer p) { super(p); } + public T_TensorTensorVectorTensorVector_T(@ByRef Tensor value0, @Cast({"", "std::vector"}) @StdMove TensorVector value1, @Cast({"", "std::vector"}) @StdMove TensorVector value2) { allocate(value0, value1, value2); } + private native void allocate(@ByRef Tensor value0, @Cast({"", "std::vector"}) @StdMove TensorVector value1, @Cast({"", "std::vector"}) @StdMove TensorVector value2); + public T_TensorTensorVectorTensorVector_T() { allocate(); } + private native void allocate(); + public native @Name("operator =") @ByRef T_TensorTensorVectorTensorVector_T put(@ByRef T_TensorTensorVectorTensorVector_T x); + + public @ByRef Tensor get0() { return get0(this); } + @Namespace @Name("std::get<0>") public static native @ByRef Tensor get0(@ByRef T_TensorTensorVectorTensorVector_T container); + public @Cast({"", "std::vector"}) @StdMove TensorVector get1() { return get1(this); } + @Namespace @Name("std::get<1>") public static native @Cast({"", "std::vector"}) @StdMove TensorVector get1(@ByRef T_TensorTensorVectorTensorVector_T container); + public @Cast({"", "std::vector"}) @StdMove TensorVector get2() { return get2(this); } + @Namespace @Name("std::get<2>") public static native @Cast({"", "std::vector"}) @StdMove TensorVector get2(@ByRef T_TensorTensorVectorTensorVector_T container); +} + diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/T_TensorTensorVector_T.java b/pytorch/src/gen/java/org/bytedeco/pytorch/T_TensorTensorVector_T.java new file mode 100644 index 00000000000..5331fd742b7 --- /dev/null +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/T_TensorTensorVector_T.java @@ -0,0 +1,36 @@ +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE + +package org.bytedeco.pytorch; + +import org.bytedeco.pytorch.Allocator; +import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; +import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; +import java.nio.*; +import org.bytedeco.javacpp.*; +import org.bytedeco.javacpp.annotation.*; + +import static org.bytedeco.javacpp.presets.javacpp.*; +import static org.bytedeco.openblas.global.openblas_nolapack.*; +import static org.bytedeco.openblas.global.openblas.*; + +import static org.bytedeco.pytorch.global.torch.*; + +@NoOffset @Name("std::tuple >") @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) +public class T_TensorTensorVector_T extends Pointer { + static { Loader.load(); } + /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ + public T_TensorTensorVector_T(Pointer p) { super(p); } + public T_TensorTensorVector_T(@ByRef Tensor value0, @Cast({"", "std::vector"}) @StdMove TensorVector value1) { allocate(value0, value1); } + private native void allocate(@ByRef Tensor value0, @Cast({"", "std::vector"}) @StdMove TensorVector value1); + public T_TensorTensorVector_T() { allocate(); } + private native void allocate(); + public native @Name("operator =") @ByRef T_TensorTensorVector_T put(@ByRef T_TensorTensorVector_T x); + + public @ByRef Tensor get0() { return get0(this); } + @Namespace @Name("std::get<0>") public static native @ByRef Tensor get0(@ByRef T_TensorTensorVector_T container); + public @Cast({"", "std::vector"}) @StdMove TensorVector get1() { return get1(this); } + @Namespace @Name("std::get<1>") public static native @Cast({"", "std::vector"}) @StdMove TensorVector get1(@ByRef T_TensorTensorVector_T container); +} + diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/TensorTensorTuple.java b/pytorch/src/gen/java/org/bytedeco/pytorch/T_TensorTensor_T.java similarity index 56% rename from pytorch/src/gen/java/org/bytedeco/pytorch/TensorTensorTuple.java rename to pytorch/src/gen/java/org/bytedeco/pytorch/T_TensorTensor_T.java index e957d43f1a4..72317cae60f 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/TensorTensorTuple.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/T_TensorTensor_T.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; @@ -15,20 +17,20 @@ import static org.bytedeco.pytorch.global.torch.*; -@NoOffset @Name("std::tuple") @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) -public class TensorTensorTuple extends Pointer { +@NoOffset @Name("std::tuple") @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) +public class T_TensorTensor_T extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ - public TensorTensorTuple(Pointer p) { super(p); } - public TensorTensorTuple(@ByRef Tensor value0, @ByRef Tensor value1) { allocate(value0, value1); } + public T_TensorTensor_T(Pointer p) { super(p); } + public T_TensorTensor_T(@ByRef Tensor value0, @ByRef Tensor value1) { allocate(value0, value1); } private native void allocate(@ByRef Tensor value0, @ByRef Tensor value1); - public TensorTensorTuple() { allocate(); } + public T_TensorTensor_T() { allocate(); } private native void allocate(); - public native @Name("operator =") @ByRef TensorTensorTuple put(@ByRef TensorTensorTuple x); + public native @Name("operator =") @ByRef T_TensorTensor_T put(@ByRef T_TensorTensor_T x); public @ByRef Tensor get0() { return get0(this); } - @Namespace @Name("std::get<0>") public static native @ByRef Tensor get0(@ByRef TensorTensorTuple container); + @Namespace @Name("std::get<0>") public static native @ByRef Tensor get0(@ByRef T_TensorTensor_T container); public @ByRef Tensor get1() { return get1(this); } - @Namespace @Name("std::get<1>") public static native @ByRef Tensor get1(@ByRef TensorTensorTuple container); + @Namespace @Name("std::get<1>") public static native @ByRef Tensor get1(@ByRef T_TensorTensor_T container); } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/T_TensorTensor_TOptional.java b/pytorch/src/gen/java/org/bytedeco/pytorch/T_TensorTensor_TOptional.java new file mode 100644 index 00000000000..40a8fd6b85e --- /dev/null +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/T_TensorTensor_TOptional.java @@ -0,0 +1,35 @@ +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE + +package org.bytedeco.pytorch; + +import org.bytedeco.pytorch.Allocator; +import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; +import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; +import java.nio.*; +import org.bytedeco.javacpp.*; +import org.bytedeco.javacpp.annotation.*; + +import static org.bytedeco.javacpp.presets.javacpp.*; +import static org.bytedeco.openblas.global.openblas_nolapack.*; +import static org.bytedeco.openblas.global.openblas.*; + +import static org.bytedeco.pytorch.global.torch.*; + +@NoOffset @Name("torch::optional >") @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) +public class T_TensorTensor_TOptional extends Pointer { + static { Loader.load(); } + /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ + public T_TensorTensor_TOptional(Pointer p) { super(p); } + public T_TensorTensor_TOptional(T_TensorTensor_T value) { this(); put(value); } + public T_TensorTensor_TOptional() { allocate(); } + private native void allocate(); + public native @Name("operator =") @ByRef T_TensorTensor_TOptional put(@ByRef T_TensorTensor_TOptional x); + + public native boolean has_value(); + public native void reset(); + public native @Name("value") @ByRef T_TensorTensor_T get(); + @ValueSetter public native T_TensorTensor_TOptional put(@ByRef T_TensorTensor_T value); +} + diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/T_TensorVectorTensorVectorTensorVectorTensorVectorTensorVector_T.java b/pytorch/src/gen/java/org/bytedeco/pytorch/T_TensorVectorTensorVectorTensorVectorTensorVectorTensorVector_T.java new file mode 100644 index 00000000000..53779948f33 --- /dev/null +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/T_TensorVectorTensorVectorTensorVectorTensorVectorTensorVector_T.java @@ -0,0 +1,42 @@ +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE + +package org.bytedeco.pytorch; + +import org.bytedeco.pytorch.Allocator; +import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; +import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; +import java.nio.*; +import org.bytedeco.javacpp.*; +import org.bytedeco.javacpp.annotation.*; + +import static org.bytedeco.javacpp.presets.javacpp.*; +import static org.bytedeco.openblas.global.openblas_nolapack.*; +import static org.bytedeco.openblas.global.openblas.*; + +import static org.bytedeco.pytorch.global.torch.*; + +@NoOffset @Name("std::tuple,std::vector,std::vector,std::vector,std::vector >") @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) +public class T_TensorVectorTensorVectorTensorVectorTensorVectorTensorVector_T extends Pointer { + static { Loader.load(); } + /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ + public T_TensorVectorTensorVectorTensorVectorTensorVectorTensorVector_T(Pointer p) { super(p); } + public T_TensorVectorTensorVectorTensorVectorTensorVectorTensorVector_T(@Cast({"", "std::vector"}) @StdMove TensorVector value0, @Cast({"", "std::vector"}) @StdMove TensorVector value1, @Cast({"", "std::vector"}) @StdMove TensorVector value2, @Cast({"", "std::vector"}) @StdMove TensorVector value3, @Cast({"", "std::vector"}) @StdMove TensorVector value4) { allocate(value0, value1, value2, value3, value4); } + private native void allocate(@Cast({"", "std::vector"}) @StdMove TensorVector value0, @Cast({"", "std::vector"}) @StdMove TensorVector value1, @Cast({"", "std::vector"}) @StdMove TensorVector value2, @Cast({"", "std::vector"}) @StdMove TensorVector value3, @Cast({"", "std::vector"}) @StdMove TensorVector value4); + public T_TensorVectorTensorVectorTensorVectorTensorVectorTensorVector_T() { allocate(); } + private native void allocate(); + public native @Name("operator =") @ByRef T_TensorVectorTensorVectorTensorVectorTensorVectorTensorVector_T put(@ByRef T_TensorVectorTensorVectorTensorVectorTensorVectorTensorVector_T x); + + public @Cast({"", "std::vector"}) @StdMove TensorVector get0() { return get0(this); } + @Namespace @Name("std::get<0>") public static native @Cast({"", "std::vector"}) @StdMove TensorVector get0(@ByRef T_TensorVectorTensorVectorTensorVectorTensorVectorTensorVector_T container); + public @Cast({"", "std::vector"}) @StdMove TensorVector get1() { return get1(this); } + @Namespace @Name("std::get<1>") public static native @Cast({"", "std::vector"}) @StdMove TensorVector get1(@ByRef T_TensorVectorTensorVectorTensorVectorTensorVectorTensorVector_T container); + public @Cast({"", "std::vector"}) @StdMove TensorVector get2() { return get2(this); } + @Namespace @Name("std::get<2>") public static native @Cast({"", "std::vector"}) @StdMove TensorVector get2(@ByRef T_TensorVectorTensorVectorTensorVectorTensorVectorTensorVector_T container); + public @Cast({"", "std::vector"}) @StdMove TensorVector get3() { return get3(this); } + @Namespace @Name("std::get<3>") public static native @Cast({"", "std::vector"}) @StdMove TensorVector get3(@ByRef T_TensorVectorTensorVectorTensorVectorTensorVectorTensorVector_T container); + public @Cast({"", "std::vector"}) @StdMove TensorVector get4() { return get4(this); } + @Namespace @Name("std::get<4>") public static native @Cast({"", "std::vector"}) @StdMove TensorVector get4(@ByRef T_TensorVectorTensorVectorTensorVectorTensorVectorTensorVector_T container); +} + diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/T_TensorVectorTensor_T.java b/pytorch/src/gen/java/org/bytedeco/pytorch/T_TensorVectorTensor_T.java new file mode 100644 index 00000000000..8aec7f95641 --- /dev/null +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/T_TensorVectorTensor_T.java @@ -0,0 +1,36 @@ +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE + +package org.bytedeco.pytorch; + +import org.bytedeco.pytorch.Allocator; +import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; +import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; +import java.nio.*; +import org.bytedeco.javacpp.*; +import org.bytedeco.javacpp.annotation.*; + +import static org.bytedeco.javacpp.presets.javacpp.*; +import static org.bytedeco.openblas.global.openblas_nolapack.*; +import static org.bytedeco.openblas.global.openblas.*; + +import static org.bytedeco.pytorch.global.torch.*; + +@NoOffset @Name("std::tuple,torch::Tensor>") @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) +public class T_TensorVectorTensor_T extends Pointer { + static { Loader.load(); } + /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ + public T_TensorVectorTensor_T(Pointer p) { super(p); } + public T_TensorVectorTensor_T(@Cast({"", "std::vector"}) @StdMove TensorVector value0, @ByRef Tensor value1) { allocate(value0, value1); } + private native void allocate(@Cast({"", "std::vector"}) @StdMove TensorVector value0, @ByRef Tensor value1); + public T_TensorVectorTensor_T() { allocate(); } + private native void allocate(); + public native @Name("operator =") @ByRef T_TensorVectorTensor_T put(@ByRef T_TensorVectorTensor_T x); + + public @Cast({"", "std::vector"}) @StdMove TensorVector get0() { return get0(this); } + @Namespace @Name("std::get<0>") public static native @Cast({"", "std::vector"}) @StdMove TensorVector get0(@ByRef T_TensorVectorTensor_T container); + public @ByRef Tensor get1() { return get1(this); } + @Namespace @Name("std::get<1>") public static native @ByRef Tensor get1(@ByRef T_TensorVectorTensor_T container); +} + diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/T_TypePtrLong_T.java b/pytorch/src/gen/java/org/bytedeco/pytorch/T_TypePtrLong_T.java new file mode 100644 index 00000000000..8bbee07231d --- /dev/null +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/T_TypePtrLong_T.java @@ -0,0 +1,36 @@ +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE + +package org.bytedeco.pytorch; + +import org.bytedeco.pytorch.Allocator; +import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; +import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; +import java.nio.*; +import org.bytedeco.javacpp.*; +import org.bytedeco.javacpp.annotation.*; + +import static org.bytedeco.javacpp.presets.javacpp.*; +import static org.bytedeco.openblas.global.openblas_nolapack.*; +import static org.bytedeco.openblas.global.openblas.*; + +import static org.bytedeco.pytorch.global.torch.*; + +@NoOffset @Name("std::tuple") @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) +public class T_TypePtrLong_T extends Pointer { + static { Loader.load(); } + /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ + public T_TypePtrLong_T(Pointer p) { super(p); } + public T_TypePtrLong_T(@ByRef Type.TypePtr value0, int value1) { allocate(value0, value1); } + private native void allocate(@ByRef Type.TypePtr value0, int value1); + public T_TypePtrLong_T() { allocate(); } + private native void allocate(); + public native @Name("operator =") @ByRef T_TypePtrLong_T put(@ByRef T_TypePtrLong_T x); + + public @ByRef Type.TypePtr get0() { return get0(this); } + @Namespace @Name("std::get<0>") public static native @ByRef Type.TypePtr get0(@ByRef T_TypePtrLong_T container); + public int get1() { return get1(this); } + @Namespace @Name("std::get<1>") public static native int get1(@ByRef T_TypePtrLong_T container); +} + diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/T_TypePtrLong_TOptional.java b/pytorch/src/gen/java/org/bytedeco/pytorch/T_TypePtrLong_TOptional.java new file mode 100644 index 00000000000..ed372769a43 --- /dev/null +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/T_TypePtrLong_TOptional.java @@ -0,0 +1,35 @@ +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE + +package org.bytedeco.pytorch; + +import org.bytedeco.pytorch.Allocator; +import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; +import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; +import java.nio.*; +import org.bytedeco.javacpp.*; +import org.bytedeco.javacpp.annotation.*; + +import static org.bytedeco.javacpp.presets.javacpp.*; +import static org.bytedeco.openblas.global.openblas_nolapack.*; +import static org.bytedeco.openblas.global.openblas.*; + +import static org.bytedeco.pytorch.global.torch.*; + +@NoOffset @Name("c10::optional >") @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) +public class T_TypePtrLong_TOptional extends Pointer { + static { Loader.load(); } + /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ + public T_TypePtrLong_TOptional(Pointer p) { super(p); } + public T_TypePtrLong_TOptional(T_TypePtrLong_T value) { this(); put(value); } + public T_TypePtrLong_TOptional() { allocate(); } + private native void allocate(); + public native @Name("operator =") @ByRef T_TypePtrLong_TOptional put(@ByRef T_TypePtrLong_TOptional x); + + public native boolean has_value(); + public native void reset(); + public native @Name("value") @ByRef T_TypePtrLong_T get(); + @ValueSetter public native T_TypePtrLong_TOptional put(@ByRef T_TypePtrLong_T value); +} + diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/TagArrayRef.java b/pytorch/src/gen/java/org/bytedeco/pytorch/TagArrayRef.java new file mode 100644 index 00000000000..bfe33c1ad24 --- /dev/null +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/TagArrayRef.java @@ -0,0 +1,133 @@ +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE + +package org.bytedeco.pytorch; + +import org.bytedeco.pytorch.Allocator; +import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; +import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; +import java.nio.*; +import org.bytedeco.javacpp.*; +import org.bytedeco.javacpp.annotation.*; + +import static org.bytedeco.javacpp.presets.javacpp.*; +import static org.bytedeco.openblas.global.openblas_nolapack.*; +import static org.bytedeco.openblas.global.openblas.*; + +import static org.bytedeco.pytorch.global.torch.*; + +@Name("c10::ArrayRef") @NoOffset @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) +public class TagArrayRef extends Pointer { + static { Loader.load(); } + /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ + public TagArrayRef(Pointer p) { super(p); } + /** Native array allocator. Access with {@link Pointer#position(long)}. */ + public TagArrayRef(long size) { super((Pointer)null); allocateArray(size); } + private native void allocateArray(long size); + @Override public TagArrayRef position(long position) { + return (TagArrayRef)super.position(position); + } + @Override public TagArrayRef getPointer(long i) { + return new TagArrayRef((Pointer)this).offsetAddress(i); + } + + /** \name Constructors + * \{ +

+ * Construct an empty ArrayRef. */ + /* implicit */ public TagArrayRef() { super((Pointer)null); allocate(); } +private native void allocate(); + + /** Construct an ArrayRef from a single element. */ + // TODO Make this explicit + + + /** Construct an ArrayRef from a pointer and length. */ + public TagArrayRef(IntPointer data, long length) { super((Pointer)null); allocate(data, length); } + private native void allocate(@Cast("const at::Tag*") IntPointer data, @Cast("size_t") long length); + + /** Construct an ArrayRef from a range. */ + public TagArrayRef(IntPointer begin, IntPointer end) { super((Pointer)null); allocate(begin, end); } + private native void allocate(@Cast("const at::Tag*") IntPointer begin, @Cast("const at::Tag*") IntPointer end); + + /** Construct an ArrayRef from a SmallVector. This is templated in order to + * avoid instantiating SmallVectorTemplateCommon whenever we + * copy-construct an ArrayRef. */ + + /** Construct an ArrayRef from a std::vector. */ + // The enable_if stuff here makes sure that this isn't used for + // std::vector, because ArrayRef can't work on a std::vector + // bitfield. + + /** Construct an ArrayRef from a std::array */ + + /** Construct an ArrayRef from a C array. */ + + /** Construct an ArrayRef from a std::initializer_list. */ + /* implicit */ + + /** \} + * \name Simple Operations + * \{ */ + + public native @Const IntPointer begin(); + public native @Const IntPointer end(); + + // These are actually the same as iterator, since ArrayRef only + // gives you const iterators. + public native @Const IntPointer cbegin(); + public native @Const IntPointer cend(); + + /** empty - Check if the array is empty. */ + public native @Cast("const bool") boolean empty(); + + public native @Const IntPointer data(); + + /** size - Get the array size. */ + public native @Cast("const size_t") long size(); + + /** front - Get the first element. */ + public native @Const @ByRef Tag front(); + + /** back - Get the last element. */ + public native @Const @ByRef Tag back(); + + /** equals - Check for element-wise equality. */ + public native @Cast("const bool") boolean equals(@ByVal TagArrayRef RHS); + + /** slice(n, m) - Take M elements of the array starting at element N */ + public native @Const @ByVal TagArrayRef slice(@Cast("size_t") long N, @Cast("size_t") long M); + + /** slice(n) - Chop off the first N elements of the array. */ + public native @Const @ByVal TagArrayRef slice(@Cast("size_t") long N); + + /** \} + * \name Operator Overloads + * \{ */ + public native @Const @ByRef @Name("operator []") Tag get(@Cast("size_t") long Index); + + /** Vector compatibility */ + + /// + public native @Const @ByRef Tag at(@Cast("size_t") long Index); + + /** Disallow accidental assignment from a temporary. + * + * The declaration here is extra complicated so that "arrayRef = {}" + * continues to select the move assignment operator. */ + + + /** Disallow accidental assignment from a temporary. + * + * The declaration here is extra complicated so that "arrayRef = {}" + * continues to select the move assignment operator. */ + + + /** \} + * \name Expensive Operations + * \{ */ + + + /** \} */ +} diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/TaggedRange.java b/pytorch/src/gen/java/org/bytedeco/pytorch/TaggedRange.java deleted file mode 100644 index 2b25085a26b..00000000000 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/TaggedRange.java +++ /dev/null @@ -1,31 +0,0 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE - -package org.bytedeco.pytorch; - -import org.bytedeco.pytorch.Allocator; -import org.bytedeco.pytorch.Function; -import org.bytedeco.pytorch.Module; -import java.nio.*; -import org.bytedeco.javacpp.*; -import org.bytedeco.javacpp.annotation.*; - -import static org.bytedeco.javacpp.presets.javacpp.*; -import static org.bytedeco.openblas.global.openblas_nolapack.*; -import static org.bytedeco.openblas.global.openblas.*; - -import static org.bytedeco.pytorch.global.torch.*; - - -// A pair of (byte offset, SourceRange) describing a specific segment -// of the output stream -@Namespace("torch::jit") @NoOffset @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) -public class TaggedRange extends Pointer { - static { Loader.load(); } - /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ - public TaggedRange(Pointer p) { super(p); } - - public TaggedRange(@Cast("size_t") long bytes, @ByVal SourceRange range) { super((Pointer)null); allocate(bytes, range); } - private native void allocate(@Cast("size_t") long bytes, @ByVal SourceRange range); - public native @Cast("size_t") long bytes(); public native TaggedRange bytes(long setter); - public native @ByRef SourceRange range(); public native TaggedRange range(SourceRange setter); -} diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/Tanh.java b/pytorch/src/gen/java/org/bytedeco/pytorch/Tanh.java deleted file mode 100644 index 1957f673ee3..00000000000 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/Tanh.java +++ /dev/null @@ -1,33 +0,0 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE - -package org.bytedeco.pytorch; - -import org.bytedeco.pytorch.Allocator; -import org.bytedeco.pytorch.Function; -import org.bytedeco.pytorch.Module; -import java.nio.*; -import org.bytedeco.javacpp.*; -import org.bytedeco.javacpp.annotation.*; - -import static org.bytedeco.javacpp.presets.javacpp.*; -import static org.bytedeco.openblas.global.openblas_nolapack.*; -import static org.bytedeco.openblas.global.openblas.*; - -import static org.bytedeco.pytorch.global.torch.*; - - -/** A {@code ModuleHolder} subclass for {@code TanhImpl}. - * See the documentation for {@code TanhImpl} class to learn what methods it - * provides, or the documentation for {@code ModuleHolder} to learn about PyTorch's - * module storage semantics. */ -@Namespace("torch::nn") @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) -public class Tanh extends TanhImplModuleHolder { - static { Loader.load(); } - - public Tanh(@ByVal @Cast("std::nullptr_t*") PointerPointer arg0) { super((Pointer)null); allocate(arg0); } - private native void allocate(@ByVal @Cast("std::nullptr_t*") PointerPointer arg0); public Tanh(@SharedPtr @Cast({"", "std::shared_ptr"}) TanhImpl module) { super((Pointer)null); allocate(module); } - private native void allocate(@SharedPtr @Cast({"", "std::shared_ptr"}) TanhImpl module); - /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ - public Tanh(Pointer p) { super(p); } - - } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/TanhImpl.java b/pytorch/src/gen/java/org/bytedeco/pytorch/TanhImpl.java index e097ba5fdbe..00690e3c568 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/TanhImpl.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/TanhImpl.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/TanhImplCloneable.java b/pytorch/src/gen/java/org/bytedeco/pytorch/TanhImplCloneable.java index 29935ecb6ce..b626e48b46b 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/TanhImplCloneable.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/TanhImplCloneable.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; @@ -20,18 +22,18 @@ public class TanhImplCloneable extends Module { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public TanhImplCloneable(Pointer p) { super(p); } + @Override public Module asModule() { return asModule(this); } + @Namespace public static native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast>") Module asModule(@SharedPtr TanhImplCloneable pointer); /** {@code reset()} must perform initialization of all members with reference * semantics, most importantly parameters, buffers and submodules. */ public native void reset(); - @Override public Module asModule() { return asModule(this); } - @Namespace public static native @Name("static_cast") Module asModule(TanhImplCloneable module); /** Performs a recursive "deep copy" of the {@code Module}, such that all parameters * and submodules in the cloned module are different from those in the * original module. */ - public native @SharedPtr Module clone( - @Const @ByRef(nullValue = "c10::optional(c10::nullopt)") DeviceOptional device); - public native @SharedPtr Module clone(); + public native @SharedPtr("torch::nn::Module") @ByVal Module clone( + @Const @ByRef(nullValue = "c10::optional(c10::nullopt)") DeviceOptional device); + public native @SharedPtr("torch::nn::Module") @ByVal Module clone(); } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/TanhImplModuleHolder.java b/pytorch/src/gen/java/org/bytedeco/pytorch/TanhImplModuleHolder.java deleted file mode 100644 index 5fac893f0e5..00000000000 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/TanhImplModuleHolder.java +++ /dev/null @@ -1,79 +0,0 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE - -package org.bytedeco.pytorch; - -import org.bytedeco.pytorch.Allocator; -import org.bytedeco.pytorch.Function; -import org.bytedeco.pytorch.Module; -import java.nio.*; -import org.bytedeco.javacpp.*; -import org.bytedeco.javacpp.annotation.*; - -import static org.bytedeco.javacpp.presets.javacpp.*; -import static org.bytedeco.openblas.global.openblas_nolapack.*; -import static org.bytedeco.openblas.global.openblas.*; - -import static org.bytedeco.pytorch.global.torch.*; - -@Name("torch::nn::ModuleHolder") @NoOffset @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) -public class TanhImplModuleHolder extends Pointer { - static { Loader.load(); } - /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ - public TanhImplModuleHolder(Pointer p) { super(p); } - - - /// - - /** Default constructs the contained module if if has a default constructor, - * else produces a static error. - * - * NOTE: This uses the behavior of template - * classes in C++ that constructors (or any methods) are only compiled when - * actually used. */ - - - /** Constructs the {@code ModuleHolder} with an empty contained value. Access to - * the underlying module is not permitted and will throw an exception, until - * a value is assigned. */ - /* implicit */ public TanhImplModuleHolder(@ByVal @Cast("std::nullptr_t*") PointerPointer arg0) { super((Pointer)null); allocate(arg0); } -private native void allocate(@ByVal @Cast("std::nullptr_t*") PointerPointer arg0); - - /** Constructs the {@code ModuleHolder} with a contained module, forwarding all - * arguments to its constructor. */ - - /** Constructs the {@code ModuleHolder} from a pointer to the contained type. - * Example: {@code Linear(std::make_shared(...))}. */ - /* implicit */ public TanhImplModuleHolder(@SharedPtr @Cast({"", "std::shared_ptr"}) TanhImpl module) { super((Pointer)null); allocate(module); } -private native void allocate(@SharedPtr @Cast({"", "std::shared_ptr"}) TanhImpl module); - - /** Returns true if the {@code ModuleHolder} contains a module, or false if it is - * {@code nullptr}. */ - public native @Cast("bool") @Name("operator bool") @NoException(true) boolean asBoolean(); - - /** Forwards to the contained module. */ - public native @Name("operator ->") TanhImpl access(); - - /** Forwards to the contained module. */ - - /** Returns a reference to the contained module. */ - public native @ByRef @Name("operator *") TanhImpl multiply(); - - /** Returns a const reference to the contained module. */ - - /** Returns a shared pointer to the underlying module. */ - public native @SharedPtr @Cast({"", "std::shared_ptr"}) TanhImpl ptr(); - - /** Returns a pointer to the underlying module. */ - public native TanhImpl get(); - - /** Returns a const pointer to the underlying module. */ - - /** Calls the {@code forward()} method of the contained module. */ - - /** Forwards to the subscript operator of the contained module. - * NOTE: std::forward is qualified to prevent VS2017 emitting - * error C2872: 'std': ambiguous symbol */ - - /** Returns true if the {@code ModuleHolder} does not contain a module. */ - public native @Cast("bool") @NoException(true) boolean is_empty(); -} diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/Tanhshrink.java b/pytorch/src/gen/java/org/bytedeco/pytorch/Tanhshrink.java deleted file mode 100644 index 6565ec57f28..00000000000 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/Tanhshrink.java +++ /dev/null @@ -1,33 +0,0 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE - -package org.bytedeco.pytorch; - -import org.bytedeco.pytorch.Allocator; -import org.bytedeco.pytorch.Function; -import org.bytedeco.pytorch.Module; -import java.nio.*; -import org.bytedeco.javacpp.*; -import org.bytedeco.javacpp.annotation.*; - -import static org.bytedeco.javacpp.presets.javacpp.*; -import static org.bytedeco.openblas.global.openblas_nolapack.*; -import static org.bytedeco.openblas.global.openblas.*; - -import static org.bytedeco.pytorch.global.torch.*; - - -/** A {@code ModuleHolder} subclass for {@code TanhshrinkImpl}. - * See the documentation for {@code TanhshrinkImpl} class to learn what methods it - * provides, or the documentation for {@code ModuleHolder} to learn about PyTorch's - * module storage semantics. */ -@Namespace("torch::nn") @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) -public class Tanhshrink extends TanhshrinkImplModuleHolder { - static { Loader.load(); } - - public Tanhshrink(@ByVal @Cast("std::nullptr_t*") PointerPointer arg0) { super((Pointer)null); allocate(arg0); } - private native void allocate(@ByVal @Cast("std::nullptr_t*") PointerPointer arg0); public Tanhshrink(@SharedPtr @Cast({"", "std::shared_ptr"}) TanhshrinkImpl module) { super((Pointer)null); allocate(module); } - private native void allocate(@SharedPtr @Cast({"", "std::shared_ptr"}) TanhshrinkImpl module); - /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ - public Tanhshrink(Pointer p) { super(p); } - - } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/TanhshrinkImpl.java b/pytorch/src/gen/java/org/bytedeco/pytorch/TanhshrinkImpl.java index 77800afb99d..d9b3ecb991a 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/TanhshrinkImpl.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/TanhshrinkImpl.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/TanhshrinkImplCloneable.java b/pytorch/src/gen/java/org/bytedeco/pytorch/TanhshrinkImplCloneable.java index 57eed6eafb4..b2e5fa0e9b7 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/TanhshrinkImplCloneable.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/TanhshrinkImplCloneable.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; @@ -20,18 +22,18 @@ public class TanhshrinkImplCloneable extends Module { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public TanhshrinkImplCloneable(Pointer p) { super(p); } + @Override public Module asModule() { return asModule(this); } + @Namespace public static native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast>") Module asModule(@SharedPtr TanhshrinkImplCloneable pointer); /** {@code reset()} must perform initialization of all members with reference * semantics, most importantly parameters, buffers and submodules. */ public native void reset(); - @Override public Module asModule() { return asModule(this); } - @Namespace public static native @Name("static_cast") Module asModule(TanhshrinkImplCloneable module); /** Performs a recursive "deep copy" of the {@code Module}, such that all parameters * and submodules in the cloned module are different from those in the * original module. */ - public native @SharedPtr Module clone( - @Const @ByRef(nullValue = "c10::optional(c10::nullopt)") DeviceOptional device); - public native @SharedPtr Module clone(); + public native @SharedPtr("torch::nn::Module") @ByVal Module clone( + @Const @ByRef(nullValue = "c10::optional(c10::nullopt)") DeviceOptional device); + public native @SharedPtr("torch::nn::Module") @ByVal Module clone(); } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/TanhshrinkImplModuleHolder.java b/pytorch/src/gen/java/org/bytedeco/pytorch/TanhshrinkImplModuleHolder.java deleted file mode 100644 index 4780a8fd6d6..00000000000 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/TanhshrinkImplModuleHolder.java +++ /dev/null @@ -1,79 +0,0 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE - -package org.bytedeco.pytorch; - -import org.bytedeco.pytorch.Allocator; -import org.bytedeco.pytorch.Function; -import org.bytedeco.pytorch.Module; -import java.nio.*; -import org.bytedeco.javacpp.*; -import org.bytedeco.javacpp.annotation.*; - -import static org.bytedeco.javacpp.presets.javacpp.*; -import static org.bytedeco.openblas.global.openblas_nolapack.*; -import static org.bytedeco.openblas.global.openblas.*; - -import static org.bytedeco.pytorch.global.torch.*; - -@Name("torch::nn::ModuleHolder") @NoOffset @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) -public class TanhshrinkImplModuleHolder extends Pointer { - static { Loader.load(); } - /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ - public TanhshrinkImplModuleHolder(Pointer p) { super(p); } - - - /// - - /** Default constructs the contained module if if has a default constructor, - * else produces a static error. - * - * NOTE: This uses the behavior of template - * classes in C++ that constructors (or any methods) are only compiled when - * actually used. */ - - - /** Constructs the {@code ModuleHolder} with an empty contained value. Access to - * the underlying module is not permitted and will throw an exception, until - * a value is assigned. */ - /* implicit */ public TanhshrinkImplModuleHolder(@ByVal @Cast("std::nullptr_t*") PointerPointer arg0) { super((Pointer)null); allocate(arg0); } -private native void allocate(@ByVal @Cast("std::nullptr_t*") PointerPointer arg0); - - /** Constructs the {@code ModuleHolder} with a contained module, forwarding all - * arguments to its constructor. */ - - /** Constructs the {@code ModuleHolder} from a pointer to the contained type. - * Example: {@code Linear(std::make_shared(...))}. */ - /* implicit */ public TanhshrinkImplModuleHolder(@SharedPtr @Cast({"", "std::shared_ptr"}) TanhshrinkImpl module) { super((Pointer)null); allocate(module); } -private native void allocate(@SharedPtr @Cast({"", "std::shared_ptr"}) TanhshrinkImpl module); - - /** Returns true if the {@code ModuleHolder} contains a module, or false if it is - * {@code nullptr}. */ - public native @Cast("bool") @Name("operator bool") @NoException(true) boolean asBoolean(); - - /** Forwards to the contained module. */ - public native @Name("operator ->") TanhshrinkImpl access(); - - /** Forwards to the contained module. */ - - /** Returns a reference to the contained module. */ - public native @ByRef @Name("operator *") TanhshrinkImpl multiply(); - - /** Returns a const reference to the contained module. */ - - /** Returns a shared pointer to the underlying module. */ - public native @SharedPtr @Cast({"", "std::shared_ptr"}) TanhshrinkImpl ptr(); - - /** Returns a pointer to the underlying module. */ - public native TanhshrinkImpl get(); - - /** Returns a const pointer to the underlying module. */ - - /** Calls the {@code forward()} method of the contained module. */ - - /** Forwards to the subscript operator of the contained module. - * NOTE: std::forward is qualified to prevent VS2017 emitting - * error C2872: 'std': ambiguous symbol */ - - /** Returns true if the {@code ModuleHolder} does not contain a module. */ - public native @Cast("bool") @NoException(true) boolean is_empty(); -} diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/Tensor.java b/pytorch/src/gen/java/org/bytedeco/pytorch/Tensor.java index 4e2b7c51de4..0ebf5a50d82 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/Tensor.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/Tensor.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; @@ -49,19 +51,25 @@ public class Tensor extends TensorBase { } public Tensor() { super((Pointer)null); allocate(); } - private native void allocate(); + @SharedPtr private native void allocate(); // This constructor should not be used by end users and is an implementation // detail invoked by autogenerated code. + public Tensor( + @ByVal TensorImplPtr tensor_impl) { super((Pointer)null); allocate(tensor_impl); } + @SharedPtr private native void allocate( + @ByVal TensorImplPtr tensor_impl); public Tensor(@Const @ByRef Tensor tensor) { super((Pointer)null); allocate(tensor); } - private native void allocate(@Const @ByRef Tensor tensor); + @SharedPtr private native void allocate(@Const @ByRef Tensor tensor); // Implicitly move-constructible from TensorBase, but must be explicit to increase refcount public Tensor(@Const @ByRef TensorBase base) { super((Pointer)null); allocate(base); } - private native void allocate(@Const @ByRef TensorBase base); + @SharedPtr private native void allocate(@Const @ByRef TensorBase base); /*implicit*/ // Creates a new wrapper from TensorImpl. Intentionally a free method because // it should be used with care. Checks necessary invariants + public static native @ByVal Tensor wrap_tensor_impl( + @ByVal TensorImplPtr tensor_impl); public native @ByVal Tensor contiguous(MemoryFormat memory_format/*=c10::MemoryFormat::Contiguous*/); public native @ByVal Tensor contiguous(); @@ -130,7 +138,7 @@ public class Tensor extends TensorBase { - public native @Deprecated @ByRef DeprecatedTypeProperties type(); + public native @ByVal Tensor toType(ScalarType t); @@ -138,9 +146,9 @@ public class Tensor extends TensorBase { public native @ByVal Tensor toBackend(Backend b); public native @ByVal Tensor toBackend(@Cast("c10::Backend") int b); - public native @Cast("bool") @Deprecated @NoException(true) boolean is_variable(); + - public native @Name("item") byte item_byte(); + public native @Name("item") byte item_char(); public native @Name("item") short item_short(); @@ -151,8 +159,6 @@ public class Tensor extends TensorBase { public native @Name("item") float item_float(); public native @Name("item") double item_double(); - - public native @ByVal @Name("operator ~") Tensor not(); @@ -285,7 +291,7 @@ public class Tensor extends TensorBase { /// /// /// - public native void backward(@Const @ByRef(nullValue = "at::Tensor{}") Tensor gradient, @ByVal(nullValue = "c10::optional(c10::nullopt)") BoolOptional retain_graph, @Cast("bool") boolean create_graph/*=false*/, @ByVal(nullValue = "c10::optional(c10::nullopt)") TensorListOptional inputs); + public native void backward(@Const @ByRef(nullValue = "at::Tensor{}") Tensor gradient, @ByVal(nullValue = "c10::optional(c10::nullopt)") BoolOptional retain_graph, @Cast("bool") boolean create_graph/*=false*/, @ByVal(nullValue = "c10::optional(c10::nullopt)") TensorArrayRefOptional inputs); public native void backward(); /** \fn Tensor detach() const; @@ -326,13 +332,13 @@ public class Tensor extends TensorBase { // users who should use the API provided in torch/csrc/autograd.h /** This function returns the forward gradient for this Tensor at the given level. */ - public native @Const @ByRef Tensor _fw_grad(@Cast("uint64_t") long level); + /** This function can be used to set the value of the forward grad. * Note that the given new_grad might not be used directly if it has different * metadata (size/stride/storage offset) compared to this Tensor. In that case, * new_grad content will be copied into a new Tensor */ - public native void _set_fw_grad(@Const @ByRef TensorBase new_grad, @Cast("uint64_t") long level, @Cast("bool") boolean is_inplace_op); + // STOP. Thinking of adding a method here, which only makes use @@ -340,8 +346,8 @@ public class Tensor extends TensorBase { //example //Tensor * add(Tensor & b); - public native void __dispatch__backward(@ByVal TensorArrayRef inputs, @Const @ByRef(nullValue = "c10::optional{}") TensorOptional gradient, @ByVal(nullValue = "c10::optional(c10::nullopt)") BoolOptional retain_graph, @Cast("bool") boolean create_graph/*=false*/); - public native void __dispatch__backward(@ByVal TensorArrayRef inputs); + public native void __dispatch__backward(@ByVal @Cast("at::TensorList*") TensorArrayRef inputs, @Const @ByRef(nullValue = "c10::optional{}") TensorOptional gradient, @ByVal(nullValue = "c10::optional(c10::nullopt)") BoolOptional retain_graph, @Cast("bool") boolean create_graph/*=false*/); + public native void __dispatch__backward(@ByVal @Cast("at::TensorList*") TensorArrayRef inputs); public native void __dispatch_set_data(@Const @ByRef Tensor new_data); public native @ByVal Tensor __dispatch_data(); public native @Cast("bool") boolean __dispatch_is_leaf(); @@ -423,18 +429,18 @@ public class Tensor extends TensorBase { public native @ByRef Tensor atanh_(); public native @ByVal Tensor arctanh(); public native @ByRef Tensor arctanh_(); - public native @ByVal Tensor as_strided(@ByVal @Cast("c10::ArrayRef*") LongArrayRef size, @ByVal @Cast("c10::ArrayRef*") LongArrayRef stride, @ByVal(nullValue = "c10::optional(c10::nullopt)") LongOptional storage_offset); - public native @ByVal Tensor as_strided(@ByVal @Cast("c10::ArrayRef*") LongArrayRef size, @ByVal @Cast("c10::ArrayRef*") LongArrayRef stride); + public native @ByVal Tensor as_strided(@ByVal LongArrayRef size, @ByVal LongArrayRef stride, @ByVal(nullValue = "c10::optional(c10::nullopt)") LongOptional storage_offset); + public native @ByVal Tensor as_strided(@ByVal LongArrayRef size, @ByVal LongArrayRef stride); public native @ByVal Tensor as_strided(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] stride, @ByVal(nullValue = "c10::optional(c10::nullopt)") LongOptional storage_offset); public native @ByVal Tensor as_strided(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... stride); - public native @ByVal Tensor as_strided_symint(@ByVal SymIntRef size, @ByVal SymIntRef stride, @ByVal(nullValue = "c10::optional(c10::nullopt)") SymIntOptional storage_offset); - public native @ByVal Tensor as_strided_symint(@ByVal SymIntRef size, @ByVal SymIntRef stride); - public native @Const @ByRef Tensor as_strided_(@ByVal @Cast("c10::ArrayRef*") LongArrayRef size, @ByVal @Cast("c10::ArrayRef*") LongArrayRef stride, @ByVal(nullValue = "c10::optional(c10::nullopt)") LongOptional storage_offset); - public native @Const @ByRef Tensor as_strided_(@ByVal @Cast("c10::ArrayRef*") LongArrayRef size, @ByVal @Cast("c10::ArrayRef*") LongArrayRef stride); + public native @ByVal Tensor as_strided_symint(@ByVal SymIntArrayRef size, @ByVal SymIntArrayRef stride, @ByVal(nullValue = "c10::optional(c10::nullopt)") SymIntOptional storage_offset); + public native @ByVal Tensor as_strided_symint(@ByVal SymIntArrayRef size, @ByVal SymIntArrayRef stride); + public native @Const @ByRef Tensor as_strided_(@ByVal LongArrayRef size, @ByVal LongArrayRef stride, @ByVal(nullValue = "c10::optional(c10::nullopt)") LongOptional storage_offset); + public native @Const @ByRef Tensor as_strided_(@ByVal LongArrayRef size, @ByVal LongArrayRef stride); public native @Const @ByRef Tensor as_strided_(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] stride, @ByVal(nullValue = "c10::optional(c10::nullopt)") LongOptional storage_offset); public native @Const @ByRef Tensor as_strided_(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... stride); - public native @Const @ByRef Tensor as_strided__symint(@ByVal SymIntRef size, @ByVal SymIntRef stride, @ByVal(nullValue = "c10::optional(c10::nullopt)") SymIntOptional storage_offset); - public native @Const @ByRef Tensor as_strided__symint(@ByVal SymIntRef size, @ByVal SymIntRef stride); + public native @Const @ByRef Tensor as_strided__symint(@ByVal SymIntArrayRef size, @ByVal SymIntArrayRef stride, @ByVal(nullValue = "c10::optional(c10::nullopt)") SymIntOptional storage_offset); + public native @Const @ByRef Tensor as_strided__symint(@ByVal SymIntArrayRef size, @ByVal SymIntArrayRef stride); public native @ByVal Tensor asin(); public native @ByRef Tensor asin_(); public native @ByVal Tensor arcsin(); @@ -472,27 +478,27 @@ public class Tensor extends TensorBase { public native @ByVal Tensor logical_or(@Const @ByRef Tensor other); public native @ByRef Tensor logical_or_(@Const @ByRef Tensor other); public native @ByVal Tensor bmm(@Const @ByRef Tensor mat2); - public native @ByVal Tensor broadcast_to(@ByVal @Cast("c10::ArrayRef*") LongArrayRef size); + public native @ByVal Tensor broadcast_to(@ByVal LongArrayRef size); public native @ByVal Tensor broadcast_to(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... size); - public native @ByVal Tensor broadcast_to_symint(@ByVal SymIntRef size); + public native @ByVal Tensor broadcast_to_symint(@ByVal SymIntArrayRef size); public native @ByVal Tensor ceil(); public native @ByRef Tensor ceil_(); - public native @Cast({"", "std::vector"}) @StdMove TensorVector unsafe_chunk(@Cast("int64_t") long chunks, @Cast("int64_t") long dim/*=0*/); - public native @Cast({"", "std::vector"}) @StdMove TensorVector unsafe_chunk(@Cast("int64_t") long chunks); - public native @Cast({"", "std::vector"}) @StdMove TensorVector chunk(@Cast("int64_t") long chunks, @Cast("int64_t") long dim/*=0*/); - public native @Cast({"", "std::vector"}) @StdMove TensorVector chunk(@Cast("int64_t") long chunks); - public native @Cast({"", "std::vector"}) @StdMove TensorVector tensor_split(@Cast("int64_t") long sections, @Cast("int64_t") long dim/*=0*/); - public native @Cast({"", "std::vector"}) @StdMove TensorVector tensor_split(@Cast("int64_t") long sections); - public native @Cast({"", "std::vector"}) @StdMove TensorVector tensor_split_symint(@ByVal SymInt sections, @Cast("int64_t") long dim/*=0*/); - public native @Cast({"", "std::vector"}) @StdMove TensorVector tensor_split_symint(@ByVal SymInt sections); - public native @Cast({"", "std::vector"}) @StdMove TensorVector tensor_split(@ByVal @Cast("c10::ArrayRef*") LongArrayRef indices, @Cast("int64_t") long dim/*=0*/); - public native @Cast({"", "std::vector"}) @StdMove TensorVector tensor_split(@ByVal @Cast("c10::ArrayRef*") LongArrayRef indices); - public native @Cast({"", "std::vector"}) @StdMove TensorVector tensor_split(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] indices, @Cast("int64_t") long dim/*=0*/); - public native @Cast({"", "std::vector"}) @StdMove TensorVector tensor_split(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... indices); - public native @Cast({"", "std::vector"}) @StdMove TensorVector tensor_split_symint(@ByVal SymIntRef indices, @Cast("int64_t") long dim/*=0*/); - public native @Cast({"", "std::vector"}) @StdMove TensorVector tensor_split_symint(@ByVal SymIntRef indices); - public native @Cast({"", "std::vector"}) @StdMove TensorVector tensor_split(@Const @ByRef Tensor tensor_indices_or_sections, @Cast("int64_t") long dim/*=0*/); - public native @Cast({"", "std::vector"}) @StdMove TensorVector tensor_split(@Const @ByRef Tensor tensor_indices_or_sections); + public native @Cast({"", "std::vector"}) @StdMove TensorVector unsafe_chunk(@Cast("int64_t") long chunks, @Cast("int64_t") long dim/*=0*/); + public native @Cast({"", "std::vector"}) @StdMove TensorVector unsafe_chunk(@Cast("int64_t") long chunks); + public native @Cast({"", "std::vector"}) @StdMove TensorVector chunk(@Cast("int64_t") long chunks, @Cast("int64_t") long dim/*=0*/); + public native @Cast({"", "std::vector"}) @StdMove TensorVector chunk(@Cast("int64_t") long chunks); + public native @Cast({"", "std::vector"}) @StdMove TensorVector tensor_split(@Cast("int64_t") long sections, @Cast("int64_t") long dim/*=0*/); + public native @Cast({"", "std::vector"}) @StdMove TensorVector tensor_split(@Cast("int64_t") long sections); + public native @Cast({"", "std::vector"}) @StdMove TensorVector tensor_split_symint(@ByVal SymInt sections, @Cast("int64_t") long dim/*=0*/); + public native @Cast({"", "std::vector"}) @StdMove TensorVector tensor_split_symint(@ByVal SymInt sections); + public native @Cast({"", "std::vector"}) @StdMove TensorVector tensor_split(@ByVal LongArrayRef indices, @Cast("int64_t") long dim/*=0*/); + public native @Cast({"", "std::vector"}) @StdMove TensorVector tensor_split(@ByVal LongArrayRef indices); + public native @Cast({"", "std::vector"}) @StdMove TensorVector tensor_split(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] indices, @Cast("int64_t") long dim/*=0*/); + public native @Cast({"", "std::vector"}) @StdMove TensorVector tensor_split(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... indices); + public native @Cast({"", "std::vector"}) @StdMove TensorVector tensor_split_symint(@ByVal SymIntArrayRef indices, @Cast("int64_t") long dim/*=0*/); + public native @Cast({"", "std::vector"}) @StdMove TensorVector tensor_split_symint(@ByVal SymIntArrayRef indices); + public native @Cast({"", "std::vector"}) @StdMove TensorVector tensor_split(@Const @ByRef Tensor tensor_indices_or_sections, @Cast("int64_t") long dim/*=0*/); + public native @Cast({"", "std::vector"}) @StdMove TensorVector tensor_split(@Const @ByRef Tensor tensor_indices_or_sections); public native @ByVal Tensor clamp(@Const @ByRef ScalarOptional min, @Const @ByRef(nullValue = "c10::optional(c10::nullopt)") ScalarOptional max); public native @ByVal Tensor clamp(@Const @ByRef ScalarOptional min); public native @ByVal Tensor clamp(@Const @ByRef(nullValue = "c10::optional{}") TensorOptional min, @Const @ByRef(nullValue = "c10::optional{}") TensorOptional max); @@ -525,17 +531,17 @@ public class Tensor extends TensorBase { public native @ByRef Tensor cos_(); public native @ByVal Tensor cosh(); public native @ByRef Tensor cosh_(); - public native @ByVal Tensor count_nonzero(@ByVal @Cast("c10::ArrayRef*") LongArrayRef dim); + public native @ByVal Tensor count_nonzero(@ByVal LongArrayRef dim); public native @ByVal Tensor count_nonzero(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... dim); public native @ByVal Tensor count_nonzero(@ByVal(nullValue = "c10::optional(c10::nullopt)") LongOptional dim); public native @ByVal Tensor count_nonzero(); public native @ByVal Tensor cov(@Cast("int64_t") long correction/*=1*/, @Const @ByRef(nullValue = "c10::optional{}") TensorOptional fweights, @Const @ByRef(nullValue = "c10::optional{}") TensorOptional aweights); public native @ByVal Tensor cov(); public native @ByVal Tensor corrcoef(); - public native @ByVal TensorTensorTuple cummax(@Cast("int64_t") long dim); - public native @ByVal TensorTensorTuple cummax(@ByVal Dimname dim); - public native @ByVal TensorTensorTuple cummin(@Cast("int64_t") long dim); - public native @ByVal TensorTensorTuple cummin(@ByVal Dimname dim); + public native @ByVal T_TensorTensor_T cummax(@Cast("int64_t") long dim); + public native @ByVal T_TensorTensor_T cummax(@ByVal Dimname dim); + public native @ByVal T_TensorTensor_T cummin(@Cast("int64_t") long dim); + public native @ByVal T_TensorTensor_T cummin(@ByVal Dimname dim); public native @ByVal Tensor cumprod(@Cast("int64_t") long dim, @ByVal(nullValue = "c10::optional(c10::nullopt)") ScalarTypeOptional dtype); public native @ByVal Tensor cumprod(@Cast("int64_t") long dim); public native @ByRef Tensor cumprod_(@Cast("int64_t") long dim, @ByVal(nullValue = "c10::optional(c10::nullopt)") ScalarTypeOptional dtype); @@ -586,57 +592,57 @@ public class Tensor extends TensorBase { public native @ByRef Tensor true_divide_(@Const @ByRef Scalar other); public native @ByVal Tensor dot(@Const @ByRef Tensor tensor); public native @ByVal Tensor vdot(@Const @ByRef Tensor other); - public native @ByVal Tensor new_empty(@ByVal @Cast("c10::ArrayRef*") LongArrayRef size, @ByVal(nullValue = "at::TensorOptions{}") TensorOptions options); - public native @ByVal Tensor new_empty(@ByVal @Cast("c10::ArrayRef*") LongArrayRef size); + public native @ByVal Tensor new_empty(@ByVal LongArrayRef size, @ByVal(nullValue = "at::TensorOptions{}") TensorOptions options); + public native @ByVal Tensor new_empty(@ByVal LongArrayRef size); public native @ByVal Tensor new_empty(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @ByVal(nullValue = "at::TensorOptions{}") TensorOptions options); public native @ByVal Tensor new_empty(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... size); - public native @ByVal Tensor new_empty(@ByVal @Cast("c10::ArrayRef*") LongArrayRef size, @ByVal ScalarTypeOptional dtype, @ByVal LayoutOptional layout, @ByVal DeviceOptional device, @ByVal BoolOptional pin_memory); + public native @ByVal Tensor new_empty(@ByVal LongArrayRef size, @ByVal ScalarTypeOptional dtype, @ByVal LayoutOptional layout, @ByVal DeviceOptional device, @ByVal BoolOptional pin_memory); public native @ByVal Tensor new_empty(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @ByVal ScalarTypeOptional dtype, @ByVal LayoutOptional layout, @ByVal DeviceOptional device, @ByVal BoolOptional pin_memory); - public native @ByVal Tensor new_empty_symint(@ByVal SymIntRef size, @ByVal(nullValue = "at::TensorOptions{}") TensorOptions options); - public native @ByVal Tensor new_empty_symint(@ByVal SymIntRef size); - public native @ByVal Tensor new_empty_symint(@ByVal SymIntRef size, @ByVal ScalarTypeOptional dtype, @ByVal LayoutOptional layout, @ByVal DeviceOptional device, @ByVal BoolOptional pin_memory); - public native @ByVal Tensor new_empty_strided(@ByVal @Cast("c10::ArrayRef*") LongArrayRef size, @ByVal @Cast("c10::ArrayRef*") LongArrayRef stride, @ByVal(nullValue = "at::TensorOptions{}") TensorOptions options); - public native @ByVal Tensor new_empty_strided(@ByVal @Cast("c10::ArrayRef*") LongArrayRef size, @ByVal @Cast("c10::ArrayRef*") LongArrayRef stride); + public native @ByVal Tensor new_empty_symint(@ByVal SymIntArrayRef size, @ByVal(nullValue = "at::TensorOptions{}") TensorOptions options); + public native @ByVal Tensor new_empty_symint(@ByVal SymIntArrayRef size); + public native @ByVal Tensor new_empty_symint(@ByVal SymIntArrayRef size, @ByVal ScalarTypeOptional dtype, @ByVal LayoutOptional layout, @ByVal DeviceOptional device, @ByVal BoolOptional pin_memory); + public native @ByVal Tensor new_empty_strided(@ByVal LongArrayRef size, @ByVal LongArrayRef stride, @ByVal(nullValue = "at::TensorOptions{}") TensorOptions options); + public native @ByVal Tensor new_empty_strided(@ByVal LongArrayRef size, @ByVal LongArrayRef stride); public native @ByVal Tensor new_empty_strided(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] stride, @ByVal(nullValue = "at::TensorOptions{}") TensorOptions options); public native @ByVal Tensor new_empty_strided(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... stride); - public native @ByVal Tensor new_empty_strided(@ByVal @Cast("c10::ArrayRef*") LongArrayRef size, @ByVal @Cast("c10::ArrayRef*") LongArrayRef stride, @ByVal ScalarTypeOptional dtype, @ByVal LayoutOptional layout, @ByVal DeviceOptional device, @ByVal BoolOptional pin_memory); + public native @ByVal Tensor new_empty_strided(@ByVal LongArrayRef size, @ByVal LongArrayRef stride, @ByVal ScalarTypeOptional dtype, @ByVal LayoutOptional layout, @ByVal DeviceOptional device, @ByVal BoolOptional pin_memory); public native @ByVal Tensor new_empty_strided(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] stride, @ByVal ScalarTypeOptional dtype, @ByVal LayoutOptional layout, @ByVal DeviceOptional device, @ByVal BoolOptional pin_memory); - public native @ByVal Tensor new_empty_strided_symint(@ByVal SymIntRef size, @ByVal SymIntRef stride, @ByVal(nullValue = "at::TensorOptions{}") TensorOptions options); - public native @ByVal Tensor new_empty_strided_symint(@ByVal SymIntRef size, @ByVal SymIntRef stride); - public native @ByVal Tensor new_empty_strided_symint(@ByVal SymIntRef size, @ByVal SymIntRef stride, @ByVal ScalarTypeOptional dtype, @ByVal LayoutOptional layout, @ByVal DeviceOptional device, @ByVal BoolOptional pin_memory); - public native @ByVal Tensor new_full(@ByVal @Cast("c10::ArrayRef*") LongArrayRef size, @Const @ByRef Scalar fill_value, @ByVal(nullValue = "at::TensorOptions{}") TensorOptions options); - public native @ByVal Tensor new_full(@ByVal @Cast("c10::ArrayRef*") LongArrayRef size, @Const @ByRef Scalar fill_value); + public native @ByVal Tensor new_empty_strided_symint(@ByVal SymIntArrayRef size, @ByVal SymIntArrayRef stride, @ByVal(nullValue = "at::TensorOptions{}") TensorOptions options); + public native @ByVal Tensor new_empty_strided_symint(@ByVal SymIntArrayRef size, @ByVal SymIntArrayRef stride); + public native @ByVal Tensor new_empty_strided_symint(@ByVal SymIntArrayRef size, @ByVal SymIntArrayRef stride, @ByVal ScalarTypeOptional dtype, @ByVal LayoutOptional layout, @ByVal DeviceOptional device, @ByVal BoolOptional pin_memory); + public native @ByVal Tensor new_full(@ByVal LongArrayRef size, @Const @ByRef Scalar fill_value, @ByVal(nullValue = "at::TensorOptions{}") TensorOptions options); + public native @ByVal Tensor new_full(@ByVal LongArrayRef size, @Const @ByRef Scalar fill_value); public native @ByVal Tensor new_full(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @Const @ByRef Scalar fill_value, @ByVal(nullValue = "at::TensorOptions{}") TensorOptions options); public native @ByVal Tensor new_full(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @Const @ByRef Scalar fill_value); - public native @ByVal Tensor new_full(@ByVal @Cast("c10::ArrayRef*") LongArrayRef size, @Const @ByRef Scalar fill_value, @ByVal ScalarTypeOptional dtype, @ByVal LayoutOptional layout, @ByVal DeviceOptional device, @ByVal BoolOptional pin_memory); + public native @ByVal Tensor new_full(@ByVal LongArrayRef size, @Const @ByRef Scalar fill_value, @ByVal ScalarTypeOptional dtype, @ByVal LayoutOptional layout, @ByVal DeviceOptional device, @ByVal BoolOptional pin_memory); public native @ByVal Tensor new_full(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @Const @ByRef Scalar fill_value, @ByVal ScalarTypeOptional dtype, @ByVal LayoutOptional layout, @ByVal DeviceOptional device, @ByVal BoolOptional pin_memory); - public native @ByVal Tensor new_full_symint(@ByVal SymIntRef size, @Const @ByRef Scalar fill_value, @ByVal(nullValue = "at::TensorOptions{}") TensorOptions options); - public native @ByVal Tensor new_full_symint(@ByVal SymIntRef size, @Const @ByRef Scalar fill_value); - public native @ByVal Tensor new_full_symint(@ByVal SymIntRef size, @Const @ByRef Scalar fill_value, @ByVal ScalarTypeOptional dtype, @ByVal LayoutOptional layout, @ByVal DeviceOptional device, @ByVal BoolOptional pin_memory); - public native @ByVal Tensor new_zeros(@ByVal @Cast("c10::ArrayRef*") LongArrayRef size, @ByVal(nullValue = "at::TensorOptions{}") TensorOptions options); - public native @ByVal Tensor new_zeros(@ByVal @Cast("c10::ArrayRef*") LongArrayRef size); + public native @ByVal Tensor new_full_symint(@ByVal SymIntArrayRef size, @Const @ByRef Scalar fill_value, @ByVal(nullValue = "at::TensorOptions{}") TensorOptions options); + public native @ByVal Tensor new_full_symint(@ByVal SymIntArrayRef size, @Const @ByRef Scalar fill_value); + public native @ByVal Tensor new_full_symint(@ByVal SymIntArrayRef size, @Const @ByRef Scalar fill_value, @ByVal ScalarTypeOptional dtype, @ByVal LayoutOptional layout, @ByVal DeviceOptional device, @ByVal BoolOptional pin_memory); + public native @ByVal Tensor new_zeros(@ByVal LongArrayRef size, @ByVal(nullValue = "at::TensorOptions{}") TensorOptions options); + public native @ByVal Tensor new_zeros(@ByVal LongArrayRef size); public native @ByVal Tensor new_zeros(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @ByVal(nullValue = "at::TensorOptions{}") TensorOptions options); public native @ByVal Tensor new_zeros(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... size); - public native @ByVal Tensor new_zeros(@ByVal @Cast("c10::ArrayRef*") LongArrayRef size, @ByVal ScalarTypeOptional dtype, @ByVal LayoutOptional layout, @ByVal DeviceOptional device, @ByVal BoolOptional pin_memory); + public native @ByVal Tensor new_zeros(@ByVal LongArrayRef size, @ByVal ScalarTypeOptional dtype, @ByVal LayoutOptional layout, @ByVal DeviceOptional device, @ByVal BoolOptional pin_memory); public native @ByVal Tensor new_zeros(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @ByVal ScalarTypeOptional dtype, @ByVal LayoutOptional layout, @ByVal DeviceOptional device, @ByVal BoolOptional pin_memory); - public native @ByVal Tensor new_zeros_symint(@ByVal SymIntRef size, @ByVal(nullValue = "at::TensorOptions{}") TensorOptions options); - public native @ByVal Tensor new_zeros_symint(@ByVal SymIntRef size); - public native @ByVal Tensor new_zeros_symint(@ByVal SymIntRef size, @ByVal ScalarTypeOptional dtype, @ByVal LayoutOptional layout, @ByVal DeviceOptional device, @ByVal BoolOptional pin_memory); - public native @ByVal Tensor new_ones(@ByVal @Cast("c10::ArrayRef*") LongArrayRef size, @ByVal(nullValue = "at::TensorOptions{}") TensorOptions options); - public native @ByVal Tensor new_ones(@ByVal @Cast("c10::ArrayRef*") LongArrayRef size); + public native @ByVal Tensor new_zeros_symint(@ByVal SymIntArrayRef size, @ByVal(nullValue = "at::TensorOptions{}") TensorOptions options); + public native @ByVal Tensor new_zeros_symint(@ByVal SymIntArrayRef size); + public native @ByVal Tensor new_zeros_symint(@ByVal SymIntArrayRef size, @ByVal ScalarTypeOptional dtype, @ByVal LayoutOptional layout, @ByVal DeviceOptional device, @ByVal BoolOptional pin_memory); + public native @ByVal Tensor new_ones(@ByVal LongArrayRef size, @ByVal(nullValue = "at::TensorOptions{}") TensorOptions options); + public native @ByVal Tensor new_ones(@ByVal LongArrayRef size); public native @ByVal Tensor new_ones(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @ByVal(nullValue = "at::TensorOptions{}") TensorOptions options); public native @ByVal Tensor new_ones(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... size); - public native @ByVal Tensor new_ones(@ByVal @Cast("c10::ArrayRef*") LongArrayRef size, @ByVal ScalarTypeOptional dtype, @ByVal LayoutOptional layout, @ByVal DeviceOptional device, @ByVal BoolOptional pin_memory); + public native @ByVal Tensor new_ones(@ByVal LongArrayRef size, @ByVal ScalarTypeOptional dtype, @ByVal LayoutOptional layout, @ByVal DeviceOptional device, @ByVal BoolOptional pin_memory); public native @ByVal Tensor new_ones(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @ByVal ScalarTypeOptional dtype, @ByVal LayoutOptional layout, @ByVal DeviceOptional device, @ByVal BoolOptional pin_memory); - public native @ByVal Tensor new_ones_symint(@ByVal SymIntRef size, @ByVal(nullValue = "at::TensorOptions{}") TensorOptions options); - public native @ByVal Tensor new_ones_symint(@ByVal SymIntRef size); - public native @ByVal Tensor new_ones_symint(@ByVal SymIntRef size, @ByVal ScalarTypeOptional dtype, @ByVal LayoutOptional layout, @ByVal DeviceOptional device, @ByVal BoolOptional pin_memory); - public native @Const @ByRef Tensor resize_(@ByVal @Cast("c10::ArrayRef*") LongArrayRef size, @ByVal(nullValue = "c10::optional(c10::nullopt)") MemoryFormatOptional memory_format); - public native @Const @ByRef Tensor resize_(@ByVal @Cast("c10::ArrayRef*") LongArrayRef size); + public native @ByVal Tensor new_ones_symint(@ByVal SymIntArrayRef size, @ByVal(nullValue = "at::TensorOptions{}") TensorOptions options); + public native @ByVal Tensor new_ones_symint(@ByVal SymIntArrayRef size); + public native @ByVal Tensor new_ones_symint(@ByVal SymIntArrayRef size, @ByVal ScalarTypeOptional dtype, @ByVal LayoutOptional layout, @ByVal DeviceOptional device, @ByVal BoolOptional pin_memory); + public native @Const @ByRef Tensor resize_(@ByVal LongArrayRef size, @ByVal(nullValue = "c10::optional(c10::nullopt)") MemoryFormatOptional memory_format); + public native @Const @ByRef Tensor resize_(@ByVal LongArrayRef size); public native @Const @ByRef Tensor resize_(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @ByVal(nullValue = "c10::optional(c10::nullopt)") MemoryFormatOptional memory_format); public native @Const @ByRef Tensor resize_(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... size); - public native @Const @ByRef Tensor resize__symint(@ByVal SymIntRef size, @ByVal(nullValue = "c10::optional(c10::nullopt)") MemoryFormatOptional memory_format); - public native @Const @ByRef Tensor resize__symint(@ByVal SymIntRef size); + public native @Const @ByRef Tensor resize__symint(@ByVal SymIntArrayRef size, @ByVal(nullValue = "c10::optional(c10::nullopt)") MemoryFormatOptional memory_format); + public native @Const @ByRef Tensor resize__symint(@ByVal SymIntArrayRef size); public native @ByVal Tensor erf(); public native @ByRef Tensor erf_(); public native @ByVal Tensor erfc(); @@ -647,21 +653,21 @@ public class Tensor extends TensorBase { public native @ByRef Tensor exp2_(); public native @ByVal Tensor expm1(); public native @ByRef Tensor expm1_(); - public native @ByVal Tensor expand(@ByVal @Cast("c10::ArrayRef*") LongArrayRef size, @Cast("bool") boolean implicit/*=false*/); - public native @ByVal Tensor expand(@ByVal @Cast("c10::ArrayRef*") LongArrayRef size); + public native @ByVal Tensor expand(@ByVal LongArrayRef size, @Cast("bool") boolean implicit/*=false*/); + public native @ByVal Tensor expand(@ByVal LongArrayRef size); public native @ByVal Tensor expand(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @Cast("bool") boolean implicit/*=false*/); public native @ByVal Tensor expand(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... size); - public native @ByVal Tensor expand_symint(@ByVal SymIntRef size, @Cast("bool") boolean implicit/*=false*/); - public native @ByVal Tensor expand_symint(@ByVal SymIntRef size); + public native @ByVal Tensor expand_symint(@ByVal SymIntArrayRef size, @Cast("bool") boolean implicit/*=false*/); + public native @ByVal Tensor expand_symint(@ByVal SymIntArrayRef size); public native @ByVal Tensor expand_as(@Const @ByRef Tensor other); public native @ByVal Tensor flatten(@Cast("int64_t") long start_dim/*=0*/, @Cast("int64_t") long end_dim/*=-1*/); public native @ByVal Tensor flatten(); public native @ByVal Tensor flatten(@Cast("int64_t") long start_dim, @Cast("int64_t") long end_dim, @ByVal Dimname out_dim); public native @ByVal Tensor flatten(@ByVal Dimname start_dim, @ByVal Dimname end_dim, @ByVal Dimname out_dim); public native @ByVal Tensor flatten(@ByVal DimnameArrayRef dims, @ByVal Dimname out_dim); - public native @ByVal Tensor unflatten(@Cast("int64_t") long dim, @ByVal @Cast("c10::ArrayRef*") LongArrayRef sizes); + public native @ByVal Tensor unflatten(@Cast("int64_t") long dim, @ByVal LongArrayRef sizes); public native @ByVal Tensor unflatten(@Cast("int64_t") long dim, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... sizes); - public native @ByVal Tensor unflatten(@ByVal Dimname dim, @ByVal @Cast("c10::ArrayRef*") LongArrayRef sizes, @ByVal DimnameArrayRef names); + public native @ByVal Tensor unflatten(@ByVal Dimname dim, @ByVal LongArrayRef sizes, @ByVal DimnameArrayRef names); public native @ByVal Tensor unflatten(@ByVal Dimname dim, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] sizes, @ByVal DimnameArrayRef names); public native @ByRef Tensor fill_(@Const @ByRef Scalar value); public native @ByRef Tensor fill_(@Const @ByRef Tensor value); @@ -677,10 +683,15 @@ public class Tensor extends TensorBase { public native @ByRef Tensor gcd_(@Const @ByRef Tensor other); public native @ByVal Tensor lcm(@Const @ByRef Tensor other); public native @ByRef Tensor lcm_(@Const @ByRef Tensor other); + public native @ByVal Tensor index(@Const @ByRef TensorOptionalList indices); public native @ByRef Tensor index_copy_(@Cast("int64_t") long dim, @Const @ByRef Tensor index, @Const @ByRef Tensor source); public native @ByVal Tensor index_copy(@Cast("int64_t") long dim, @Const @ByRef Tensor index, @Const @ByRef Tensor source); public native @ByRef Tensor index_copy_(@ByVal Dimname dim, @Const @ByRef Tensor index, @Const @ByRef Tensor source); public native @ByVal Tensor index_copy(@ByVal Dimname dim, @Const @ByRef Tensor index, @Const @ByRef Tensor source); + public native @ByRef Tensor index_put_(@Const @ByRef TensorOptionalList indices, @Const @ByRef Tensor values, @Cast("bool") boolean accumulate/*=false*/); + public native @ByRef Tensor index_put_(@Const @ByRef TensorOptionalList indices, @Const @ByRef Tensor values); + public native @ByVal Tensor index_put(@Const @ByRef TensorOptionalList indices, @Const @ByRef Tensor values, @Cast("bool") boolean accumulate/*=false*/); + public native @ByVal Tensor index_put(@Const @ByRef TensorOptionalList indices, @Const @ByRef Tensor values); public native @ByVal Tensor isclose(@Const @ByRef Tensor other, double rtol/*=1e-05*/, double atol/*=1e-08*/, @Cast("bool") boolean equal_nan/*=false*/); public native @ByVal Tensor isclose(@Const @ByRef Tensor other); public native @ByVal Tensor isnan(); @@ -696,10 +707,10 @@ public class Tensor extends TensorBase { public native @Cast("bool") boolean __dispatch_is_signed(); public native @Cast("bool") boolean __dispatch_is_inference(); public native @ByVal Tensor kron(@Const @ByRef Tensor other); - public native @ByVal TensorTensorTuple kthvalue(@Cast("int64_t") long k, @Cast("int64_t") long dim/*=-1*/, @Cast("bool") boolean keepdim/*=false*/); - public native @ByVal TensorTensorTuple kthvalue(@Cast("int64_t") long k); - public native @ByVal TensorTensorTuple kthvalue(@Cast("int64_t") long k, @ByVal Dimname dim, @Cast("bool") boolean keepdim/*=false*/); - public native @ByVal TensorTensorTuple kthvalue(@Cast("int64_t") long k, @ByVal Dimname dim); + public native @ByVal T_TensorTensor_T kthvalue(@Cast("int64_t") long k, @Cast("int64_t") long dim/*=-1*/, @Cast("bool") boolean keepdim/*=false*/); + public native @ByVal T_TensorTensor_T kthvalue(@Cast("int64_t") long k); + public native @ByVal T_TensorTensor_T kthvalue(@Cast("int64_t") long k, @ByVal Dimname dim, @Cast("bool") boolean keepdim/*=false*/); + public native @ByVal T_TensorTensor_T kthvalue(@Cast("int64_t") long k, @ByVal Dimname dim); public native @ByVal Tensor nan_to_num(@ByVal(nullValue = "c10::optional(c10::nullopt)") DoubleOptional nan, @ByVal(nullValue = "c10::optional(c10::nullopt)") DoubleOptional posinf, @ByVal(nullValue = "c10::optional(c10::nullopt)") DoubleOptional neginf); public native @ByVal Tensor nan_to_num(); public native @ByRef Tensor nan_to_num_(@ByVal(nullValue = "c10::optional(c10::nullopt)") DoubleOptional nan, @ByVal(nullValue = "c10::optional(c10::nullopt)") DoubleOptional posinf, @ByVal(nullValue = "c10::optional(c10::nullopt)") DoubleOptional neginf); @@ -726,8 +737,8 @@ public class Tensor extends TensorBase { public native @ByVal Tensor log_softmax(@ByVal Dimname dim); public native @ByVal Tensor logcumsumexp(@Cast("int64_t") long dim); public native @ByVal Tensor logcumsumexp(@ByVal Dimname dim); - public native @ByVal Tensor logsumexp(@ByVal @Cast("c10::ArrayRef*") LongArrayRef dim, @Cast("bool") boolean keepdim/*=false*/); - public native @ByVal Tensor logsumexp(@ByVal @Cast("c10::ArrayRef*") LongArrayRef dim); + public native @ByVal Tensor logsumexp(@ByVal LongArrayRef dim, @Cast("bool") boolean keepdim/*=false*/); + public native @ByVal Tensor logsumexp(@ByVal LongArrayRef dim); public native @ByVal Tensor logsumexp(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dim, @Cast("bool") boolean keepdim/*=false*/); public native @ByVal Tensor logsumexp(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... dim); public native @ByVal Tensor logsumexp(@ByVal DimnameArrayRef dim, @Cast("bool") boolean keepdim/*=false*/); @@ -735,13 +746,13 @@ public class Tensor extends TensorBase { public native @ByVal Tensor matmul(@Const @ByRef Tensor other); public native @ByVal Tensor matrix_power(@Cast("int64_t") long n); public native @ByVal Tensor matrix_exp(); - public native @ByVal TensorTensorTuple aminmax(@ByVal(nullValue = "c10::optional(c10::nullopt)") LongOptional dim, @Cast("bool") boolean keepdim/*=false*/); - public native @ByVal TensorTensorTuple aminmax(); - public native @ByVal TensorTensorTuple max(@Cast("int64_t") long dim, @Cast("bool") boolean keepdim/*=false*/); - public native @ByVal TensorTensorTuple max(@Cast("int64_t") long dim); - public native @ByVal TensorTensorTuple max(@ByVal Dimname dim, @Cast("bool") boolean keepdim/*=false*/); - public native @ByVal TensorTensorTuple max(@ByVal Dimname dim); - public native @ByVal Tensor amax(@ByVal(nullValue = "at::IntArrayRef{}") @Cast("c10::ArrayRef*") LongArrayRef dim, @Cast("bool") boolean keepdim/*=false*/); + public native @ByVal T_TensorTensor_T aminmax(@ByVal(nullValue = "c10::optional(c10::nullopt)") LongOptional dim, @Cast("bool") boolean keepdim/*=false*/); + public native @ByVal T_TensorTensor_T aminmax(); + public native @ByVal T_TensorTensor_T max(@Cast("int64_t") long dim, @Cast("bool") boolean keepdim/*=false*/); + public native @ByVal T_TensorTensor_T max(@Cast("int64_t") long dim); + public native @ByVal T_TensorTensor_T max(@ByVal Dimname dim, @Cast("bool") boolean keepdim/*=false*/); + public native @ByVal T_TensorTensor_T max(@ByVal Dimname dim); + public native @ByVal Tensor amax(@ByVal(nullValue = "at::IntArrayRef{}") LongArrayRef dim, @Cast("bool") boolean keepdim/*=false*/); public native @ByVal Tensor amax(); public native @ByVal Tensor amax(@ByVal(nullValue = "at::IntArrayRef{}") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dim, @Cast("bool") boolean keepdim/*=false*/); public native @ByVal Tensor mean(@ByVal(nullValue = "c10::optional(c10::nullopt)") ScalarTypeOptional dtype); @@ -756,27 +767,27 @@ public class Tensor extends TensorBase { public native @ByVal Tensor nanmean(); public native @ByVal Tensor nanmean(@ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dim, @Cast("bool") boolean keepdim/*=false*/, @ByVal(nullValue = "c10::optional(c10::nullopt)") ScalarTypeOptional dtype); public native @ByVal Tensor median(); - public native @ByVal TensorTensorTuple median(@Cast("int64_t") long dim, @Cast("bool") boolean keepdim/*=false*/); - public native @ByVal TensorTensorTuple median(@Cast("int64_t") long dim); - public native @ByVal TensorTensorTuple median(@ByVal Dimname dim, @Cast("bool") boolean keepdim/*=false*/); - public native @ByVal TensorTensorTuple median(@ByVal Dimname dim); + public native @ByVal T_TensorTensor_T median(@Cast("int64_t") long dim, @Cast("bool") boolean keepdim/*=false*/); + public native @ByVal T_TensorTensor_T median(@Cast("int64_t") long dim); + public native @ByVal T_TensorTensor_T median(@ByVal Dimname dim, @Cast("bool") boolean keepdim/*=false*/); + public native @ByVal T_TensorTensor_T median(@ByVal Dimname dim); public native @ByVal Tensor nanmedian(); - public native @ByVal TensorTensorTuple nanmedian(@Cast("int64_t") long dim, @Cast("bool") boolean keepdim/*=false*/); - public native @ByVal TensorTensorTuple nanmedian(@Cast("int64_t") long dim); - public native @ByVal TensorTensorTuple nanmedian(@ByVal Dimname dim, @Cast("bool") boolean keepdim/*=false*/); - public native @ByVal TensorTensorTuple nanmedian(@ByVal Dimname dim); - public native @ByVal TensorTensorTuple min(@Cast("int64_t") long dim, @Cast("bool") boolean keepdim/*=false*/); - public native @ByVal TensorTensorTuple min(@Cast("int64_t") long dim); - public native @ByVal TensorTensorTuple min(@ByVal Dimname dim, @Cast("bool") boolean keepdim/*=false*/); - public native @ByVal TensorTensorTuple min(@ByVal Dimname dim); - public native @ByVal Tensor amin(@ByVal(nullValue = "at::IntArrayRef{}") @Cast("c10::ArrayRef*") LongArrayRef dim, @Cast("bool") boolean keepdim/*=false*/); + public native @ByVal T_TensorTensor_T nanmedian(@Cast("int64_t") long dim, @Cast("bool") boolean keepdim/*=false*/); + public native @ByVal T_TensorTensor_T nanmedian(@Cast("int64_t") long dim); + public native @ByVal T_TensorTensor_T nanmedian(@ByVal Dimname dim, @Cast("bool") boolean keepdim/*=false*/); + public native @ByVal T_TensorTensor_T nanmedian(@ByVal Dimname dim); + public native @ByVal T_TensorTensor_T min(@Cast("int64_t") long dim, @Cast("bool") boolean keepdim/*=false*/); + public native @ByVal T_TensorTensor_T min(@Cast("int64_t") long dim); + public native @ByVal T_TensorTensor_T min(@ByVal Dimname dim, @Cast("bool") boolean keepdim/*=false*/); + public native @ByVal T_TensorTensor_T min(@ByVal Dimname dim); + public native @ByVal Tensor amin(@ByVal(nullValue = "at::IntArrayRef{}") LongArrayRef dim, @Cast("bool") boolean keepdim/*=false*/); public native @ByVal Tensor amin(); public native @ByVal Tensor amin(@ByVal(nullValue = "at::IntArrayRef{}") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dim, @Cast("bool") boolean keepdim/*=false*/); public native @ByVal Tensor mm(@Const @ByRef Tensor mat2); - public native @ByVal TensorTensorTuple mode(@Cast("int64_t") long dim/*=-1*/, @Cast("bool") boolean keepdim/*=false*/); - public native @ByVal TensorTensorTuple mode(); - public native @ByVal TensorTensorTuple mode(@ByVal Dimname dim, @Cast("bool") boolean keepdim/*=false*/); - public native @ByVal TensorTensorTuple mode(@ByVal Dimname dim); + public native @ByVal T_TensorTensor_T mode(@Cast("int64_t") long dim/*=-1*/, @Cast("bool") boolean keepdim/*=false*/); + public native @ByVal T_TensorTensor_T mode(); + public native @ByVal T_TensorTensor_T mode(@ByVal Dimname dim, @Cast("bool") boolean keepdim/*=false*/); + public native @ByVal T_TensorTensor_T mode(@ByVal Dimname dim); public native @ByVal Tensor mul(@Const @ByRef Tensor other); public native @ByRef Tensor mul_(@Const @ByRef Tensor other); public native @ByVal Tensor mul(@Const @ByRef Scalar other); @@ -794,12 +805,12 @@ public class Tensor extends TensorBase { public native @ByVal Tensor narrow_symint(@Cast("int64_t") long dim, @ByVal SymInt start, @ByVal SymInt length); public native @ByVal Tensor narrow(@Cast("int64_t") long dim, @Const @ByRef Tensor start, @Cast("int64_t") long length); public native @ByVal Tensor narrow_symint(@Cast("int64_t") long dim, @Const @ByRef Tensor start, @ByVal SymInt length); - public native @ByVal Tensor permute(@ByVal @Cast("c10::ArrayRef*") LongArrayRef dims); + public native @ByVal Tensor permute(@ByVal LongArrayRef dims); public native @ByVal Tensor permute(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... dims); - public native @ByVal Tensor movedim(@ByVal @Cast("c10::ArrayRef*") LongArrayRef source, @ByVal @Cast("c10::ArrayRef*") LongArrayRef destination); + public native @ByVal Tensor movedim(@ByVal LongArrayRef source, @ByVal LongArrayRef destination); public native @ByVal Tensor movedim(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] source, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... destination); public native @ByVal Tensor movedim(@Cast("int64_t") long source, @Cast("int64_t") long destination); - public native @ByVal Tensor moveaxis(@ByVal @Cast("c10::ArrayRef*") LongArrayRef source, @ByVal @Cast("c10::ArrayRef*") LongArrayRef destination); + public native @ByVal Tensor moveaxis(@ByVal LongArrayRef source, @ByVal LongArrayRef destination); public native @ByVal Tensor moveaxis(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] source, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... destination); public native @ByVal Tensor moveaxis(@Cast("int64_t") long source, @Cast("int64_t") long destination); public native @ByVal Tensor numpy_T(); @@ -824,21 +835,21 @@ public class Tensor extends TensorBase { public native @ByRef Tensor neg_(); public native @ByVal Tensor negative(); public native @ByRef Tensor negative_(); - public native @ByVal Tensor repeat(@ByVal @Cast("c10::ArrayRef*") LongArrayRef repeats); + public native @ByVal Tensor repeat(@ByVal LongArrayRef repeats); public native @ByVal Tensor repeat(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... repeats); - public native @ByVal Tensor repeat_symint(@ByVal SymIntRef repeats); + public native @ByVal Tensor repeat_symint(@ByVal SymIntArrayRef repeats); public native @ByVal Tensor repeat_interleave(@Const @ByRef Tensor repeats, @ByVal(nullValue = "c10::optional(c10::nullopt)") LongOptional dim, @ByVal(nullValue = "c10::optional(c10::nullopt)") LongOptional output_size); public native @ByVal Tensor repeat_interleave(@Const @ByRef Tensor repeats); public native @ByVal Tensor repeat_interleave(@Cast("int64_t") long repeats, @ByVal(nullValue = "c10::optional(c10::nullopt)") LongOptional dim, @ByVal(nullValue = "c10::optional(c10::nullopt)") LongOptional output_size); public native @ByVal Tensor repeat_interleave(@Cast("int64_t") long repeats); public native @ByVal Tensor repeat_interleave_symint(@ByVal SymInt repeats, @ByVal(nullValue = "c10::optional(c10::nullopt)") LongOptional dim, @ByVal(nullValue = "c10::optional(c10::nullopt)") LongOptional output_size); public native @ByVal Tensor repeat_interleave_symint(@ByVal SymInt repeats); - public native @ByVal Tensor reshape(@ByVal @Cast("c10::ArrayRef*") LongArrayRef shape); + public native @ByVal Tensor reshape(@ByVal LongArrayRef shape); public native @ByVal Tensor reshape(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... shape); - public native @ByVal Tensor reshape_symint(@ByVal SymIntRef shape); - public native @ByVal Tensor _reshape_alias(@ByVal @Cast("c10::ArrayRef*") LongArrayRef size, @ByVal @Cast("c10::ArrayRef*") LongArrayRef stride); + public native @ByVal Tensor reshape_symint(@ByVal SymIntArrayRef shape); + public native @ByVal Tensor _reshape_alias(@ByVal LongArrayRef size, @ByVal LongArrayRef stride); public native @ByVal Tensor _reshape_alias(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... stride); - public native @ByVal Tensor _reshape_alias_symint(@ByVal SymIntRef size, @ByVal SymIntRef stride); + public native @ByVal Tensor _reshape_alias_symint(@ByVal SymIntArrayRef size, @ByVal SymIntArrayRef stride); public native @ByVal Tensor reshape_as(@Const @ByRef Tensor other); public native @ByVal Tensor round(); public native @ByRef Tensor round_(); @@ -882,60 +893,60 @@ public class Tensor extends TensorBase { public native @ByVal Tensor select_scatter_symint(@Const @ByRef Tensor src, @Cast("int64_t") long dim, @ByVal SymInt index); public native @ByVal Tensor diagonal_scatter(@Const @ByRef Tensor src, @Cast("int64_t") long offset/*=0*/, @Cast("int64_t") long dim1/*=0*/, @Cast("int64_t") long dim2/*=1*/); public native @ByVal Tensor diagonal_scatter(@Const @ByRef Tensor src); - public native @ByVal Tensor as_strided_scatter(@Const @ByRef Tensor src, @ByVal @Cast("c10::ArrayRef*") LongArrayRef size, @ByVal @Cast("c10::ArrayRef*") LongArrayRef stride, @ByVal(nullValue = "c10::optional(c10::nullopt)") LongOptional storage_offset); - public native @ByVal Tensor as_strided_scatter(@Const @ByRef Tensor src, @ByVal @Cast("c10::ArrayRef*") LongArrayRef size, @ByVal @Cast("c10::ArrayRef*") LongArrayRef stride); + public native @ByVal Tensor as_strided_scatter(@Const @ByRef Tensor src, @ByVal LongArrayRef size, @ByVal LongArrayRef stride, @ByVal(nullValue = "c10::optional(c10::nullopt)") LongOptional storage_offset); + public native @ByVal Tensor as_strided_scatter(@Const @ByRef Tensor src, @ByVal LongArrayRef size, @ByVal LongArrayRef stride); public native @ByVal Tensor as_strided_scatter(@Const @ByRef Tensor src, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] stride, @ByVal(nullValue = "c10::optional(c10::nullopt)") LongOptional storage_offset); public native @ByVal Tensor as_strided_scatter(@Const @ByRef Tensor src, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... stride); - public native @ByVal Tensor as_strided_scatter_symint(@Const @ByRef Tensor src, @ByVal SymIntRef size, @ByVal SymIntRef stride, @ByVal(nullValue = "c10::optional(c10::nullopt)") SymIntOptional storage_offset); - public native @ByVal Tensor as_strided_scatter_symint(@Const @ByRef Tensor src, @ByVal SymIntRef size, @ByVal SymIntRef stride); + public native @ByVal Tensor as_strided_scatter_symint(@Const @ByRef Tensor src, @ByVal SymIntArrayRef size, @ByVal SymIntArrayRef stride, @ByVal(nullValue = "c10::optional(c10::nullopt)") SymIntOptional storage_offset); + public native @ByVal Tensor as_strided_scatter_symint(@Const @ByRef Tensor src, @ByVal SymIntArrayRef size, @ByVal SymIntArrayRef stride); public native @ByVal Tensor smm(@Const @ByRef Tensor mat2); public native @ByVal Tensor softmax(@Cast("int64_t") long dim, @ByVal(nullValue = "c10::optional(c10::nullopt)") ScalarTypeOptional dtype); public native @ByVal Tensor softmax(@Cast("int64_t") long dim); public native @ByVal Tensor softmax(@ByVal Dimname dim, @ByVal(nullValue = "c10::optional(c10::nullopt)") ScalarTypeOptional dtype); public native @ByVal Tensor softmax(@ByVal Dimname dim); - public native @Cast({"", "std::vector"}) @StdMove TensorVector unsafe_split(@Cast("int64_t") long split_size, @Cast("int64_t") long dim/*=0*/); - public native @Cast({"", "std::vector"}) @StdMove TensorVector unsafe_split(@Cast("int64_t") long split_size); - public native @Cast({"", "std::vector"}) @StdMove TensorVector unsafe_split_symint(@ByVal SymInt split_size, @Cast("int64_t") long dim/*=0*/); - public native @Cast({"", "std::vector"}) @StdMove TensorVector unsafe_split_symint(@ByVal SymInt split_size); - public native @Cast({"", "std::vector"}) @StdMove TensorVector split(@Cast("int64_t") long split_size, @Cast("int64_t") long dim/*=0*/); - public native @Cast({"", "std::vector"}) @StdMove TensorVector split(@Cast("int64_t") long split_size); - public native @Cast({"", "std::vector"}) @StdMove TensorVector split_symint(@ByVal SymInt split_size, @Cast("int64_t") long dim/*=0*/); - public native @Cast({"", "std::vector"}) @StdMove TensorVector split_symint(@ByVal SymInt split_size); - public native @Cast({"", "std::vector"}) @StdMove TensorVector split(@ByVal @Cast("c10::ArrayRef*") LongArrayRef split_size, @Cast("int64_t") long dim/*=0*/); - public native @Cast({"", "std::vector"}) @StdMove TensorVector split(@ByVal @Cast("c10::ArrayRef*") LongArrayRef split_size); - public native @Cast({"", "std::vector"}) @StdMove TensorVector split(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] split_size, @Cast("int64_t") long dim/*=0*/); - public native @Cast({"", "std::vector"}) @StdMove TensorVector split(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... split_size); - public native @Cast({"", "std::vector"}) @StdMove TensorVector split_symint(@ByVal SymIntRef split_size, @Cast("int64_t") long dim/*=0*/); - public native @Cast({"", "std::vector"}) @StdMove TensorVector split_symint(@ByVal SymIntRef split_size); - public native @Cast({"", "std::vector"}) @StdMove TensorVector unsafe_split_with_sizes(@ByVal @Cast("c10::ArrayRef*") LongArrayRef split_sizes, @Cast("int64_t") long dim/*=0*/); - public native @Cast({"", "std::vector"}) @StdMove TensorVector unsafe_split_with_sizes(@ByVal @Cast("c10::ArrayRef*") LongArrayRef split_sizes); - public native @Cast({"", "std::vector"}) @StdMove TensorVector unsafe_split_with_sizes(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] split_sizes, @Cast("int64_t") long dim/*=0*/); - public native @Cast({"", "std::vector"}) @StdMove TensorVector unsafe_split_with_sizes(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... split_sizes); - public native @Cast({"", "std::vector"}) @StdMove TensorVector unsafe_split_with_sizes_symint(@ByVal SymIntRef split_sizes, @Cast("int64_t") long dim/*=0*/); - public native @Cast({"", "std::vector"}) @StdMove TensorVector unsafe_split_with_sizes_symint(@ByVal SymIntRef split_sizes); - public native @Cast({"", "std::vector"}) @StdMove TensorVector split_with_sizes(@ByVal @Cast("c10::ArrayRef*") LongArrayRef split_sizes, @Cast("int64_t") long dim/*=0*/); - public native @Cast({"", "std::vector"}) @StdMove TensorVector split_with_sizes(@ByVal @Cast("c10::ArrayRef*") LongArrayRef split_sizes); - public native @Cast({"", "std::vector"}) @StdMove TensorVector split_with_sizes(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] split_sizes, @Cast("int64_t") long dim/*=0*/); - public native @Cast({"", "std::vector"}) @StdMove TensorVector split_with_sizes(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... split_sizes); - public native @Cast({"", "std::vector"}) @StdMove TensorVector split_with_sizes_symint(@ByVal SymIntRef split_sizes, @Cast("int64_t") long dim/*=0*/); - public native @Cast({"", "std::vector"}) @StdMove TensorVector split_with_sizes_symint(@ByVal SymIntRef split_sizes); - public native @Cast({"", "std::vector"}) @StdMove TensorVector hsplit(@Cast("int64_t") long sections); - public native @Cast({"", "std::vector"}) @StdMove TensorVector hsplit(@ByVal @Cast("c10::ArrayRef*") LongArrayRef indices); - public native @Cast({"", "std::vector"}) @StdMove TensorVector hsplit(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... indices); - public native @Cast({"", "std::vector"}) @StdMove TensorVector vsplit(@Cast("int64_t") long sections); - public native @Cast({"", "std::vector"}) @StdMove TensorVector vsplit(@ByVal @Cast("c10::ArrayRef*") LongArrayRef indices); - public native @Cast({"", "std::vector"}) @StdMove TensorVector vsplit(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... indices); - public native @Cast({"", "std::vector"}) @StdMove TensorVector dsplit(@Cast("int64_t") long sections); - public native @Cast({"", "std::vector"}) @StdMove TensorVector dsplit(@ByVal @Cast("c10::ArrayRef*") LongArrayRef indices); - public native @Cast({"", "std::vector"}) @StdMove TensorVector dsplit(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... indices); + public native @Cast({"", "std::vector"}) @StdMove TensorVector unsafe_split(@Cast("int64_t") long split_size, @Cast("int64_t") long dim/*=0*/); + public native @Cast({"", "std::vector"}) @StdMove TensorVector unsafe_split(@Cast("int64_t") long split_size); + public native @Cast({"", "std::vector"}) @StdMove TensorVector unsafe_split_symint(@ByVal SymInt split_size, @Cast("int64_t") long dim/*=0*/); + public native @Cast({"", "std::vector"}) @StdMove TensorVector unsafe_split_symint(@ByVal SymInt split_size); + public native @Cast({"", "std::vector"}) @StdMove TensorVector split(@Cast("int64_t") long split_size, @Cast("int64_t") long dim/*=0*/); + public native @Cast({"", "std::vector"}) @StdMove TensorVector split(@Cast("int64_t") long split_size); + public native @Cast({"", "std::vector"}) @StdMove TensorVector split_symint(@ByVal SymInt split_size, @Cast("int64_t") long dim/*=0*/); + public native @Cast({"", "std::vector"}) @StdMove TensorVector split_symint(@ByVal SymInt split_size); + public native @Cast({"", "std::vector"}) @StdMove TensorVector split(@ByVal LongArrayRef split_size, @Cast("int64_t") long dim/*=0*/); + public native @Cast({"", "std::vector"}) @StdMove TensorVector split(@ByVal LongArrayRef split_size); + public native @Cast({"", "std::vector"}) @StdMove TensorVector split(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] split_size, @Cast("int64_t") long dim/*=0*/); + public native @Cast({"", "std::vector"}) @StdMove TensorVector split(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... split_size); + public native @Cast({"", "std::vector"}) @StdMove TensorVector split_symint(@ByVal SymIntArrayRef split_size, @Cast("int64_t") long dim/*=0*/); + public native @Cast({"", "std::vector"}) @StdMove TensorVector split_symint(@ByVal SymIntArrayRef split_size); + public native @Cast({"", "std::vector"}) @StdMove TensorVector unsafe_split_with_sizes(@ByVal LongArrayRef split_sizes, @Cast("int64_t") long dim/*=0*/); + public native @Cast({"", "std::vector"}) @StdMove TensorVector unsafe_split_with_sizes(@ByVal LongArrayRef split_sizes); + public native @Cast({"", "std::vector"}) @StdMove TensorVector unsafe_split_with_sizes(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] split_sizes, @Cast("int64_t") long dim/*=0*/); + public native @Cast({"", "std::vector"}) @StdMove TensorVector unsafe_split_with_sizes(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... split_sizes); + public native @Cast({"", "std::vector"}) @StdMove TensorVector unsafe_split_with_sizes_symint(@ByVal SymIntArrayRef split_sizes, @Cast("int64_t") long dim/*=0*/); + public native @Cast({"", "std::vector"}) @StdMove TensorVector unsafe_split_with_sizes_symint(@ByVal SymIntArrayRef split_sizes); + public native @Cast({"", "std::vector"}) @StdMove TensorVector split_with_sizes(@ByVal LongArrayRef split_sizes, @Cast("int64_t") long dim/*=0*/); + public native @Cast({"", "std::vector"}) @StdMove TensorVector split_with_sizes(@ByVal LongArrayRef split_sizes); + public native @Cast({"", "std::vector"}) @StdMove TensorVector split_with_sizes(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] split_sizes, @Cast("int64_t") long dim/*=0*/); + public native @Cast({"", "std::vector"}) @StdMove TensorVector split_with_sizes(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... split_sizes); + public native @Cast({"", "std::vector"}) @StdMove TensorVector split_with_sizes_symint(@ByVal SymIntArrayRef split_sizes, @Cast("int64_t") long dim/*=0*/); + public native @Cast({"", "std::vector"}) @StdMove TensorVector split_with_sizes_symint(@ByVal SymIntArrayRef split_sizes); + public native @Cast({"", "std::vector"}) @StdMove TensorVector hsplit(@Cast("int64_t") long sections); + public native @Cast({"", "std::vector"}) @StdMove TensorVector hsplit(@ByVal LongArrayRef indices); + public native @Cast({"", "std::vector"}) @StdMove TensorVector hsplit(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... indices); + public native @Cast({"", "std::vector"}) @StdMove TensorVector vsplit(@Cast("int64_t") long sections); + public native @Cast({"", "std::vector"}) @StdMove TensorVector vsplit(@ByVal LongArrayRef indices); + public native @Cast({"", "std::vector"}) @StdMove TensorVector vsplit(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... indices); + public native @Cast({"", "std::vector"}) @StdMove TensorVector dsplit(@Cast("int64_t") long sections); + public native @Cast({"", "std::vector"}) @StdMove TensorVector dsplit(@ByVal LongArrayRef indices); + public native @Cast({"", "std::vector"}) @StdMove TensorVector dsplit(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... indices); public native @ByVal Tensor squeeze(); public native @ByVal Tensor squeeze(@Cast("int64_t") long dim); public native @ByVal Tensor squeeze(@ByVal Dimname dim); - public native @ByVal Tensor squeeze(@ByVal @Cast("c10::ArrayRef*") LongArrayRef dim); + public native @ByVal Tensor squeeze(@ByVal LongArrayRef dim); public native @ByVal Tensor squeeze(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... dim); public native @ByRef Tensor squeeze_(); public native @ByRef Tensor squeeze_(@Cast("int64_t") long dim); - public native @ByRef Tensor squeeze_(@ByVal @Cast("c10::ArrayRef*") LongArrayRef dim); + public native @ByRef Tensor squeeze_(@ByVal LongArrayRef dim); public native @ByRef Tensor squeeze_(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... dim); public native @ByRef Tensor squeeze_(@ByVal Dimname dim); public native @ByVal Tensor sspaddmm(@Const @ByRef Tensor mat1, @Const @ByRef Tensor mat2, @Const @ByRef(nullValue = "at::Scalar(1)") Scalar beta, @Const @ByRef(nullValue = "at::Scalar(1)") Scalar alpha); @@ -956,7 +967,7 @@ public class Tensor extends TensorBase { public native @ByVal Tensor nansum(@ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") LongArrayRefOptional dim, @Cast("bool") boolean keepdim/*=false*/, @ByVal(nullValue = "c10::optional(c10::nullopt)") ScalarTypeOptional dtype); public native @ByVal Tensor nansum(); public native @ByVal Tensor nansum(@ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dim, @Cast("bool") boolean keepdim/*=false*/, @ByVal(nullValue = "c10::optional(c10::nullopt)") ScalarTypeOptional dtype); - public native @ByVal Tensor sum_to_size(@ByVal @Cast("c10::ArrayRef*") LongArrayRef size); + public native @ByVal Tensor sum_to_size(@ByVal LongArrayRef size); public native @ByVal Tensor sum_to_size(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... size); public native @ByVal Tensor sqrt(); public native @ByRef Tensor sqrt_(); @@ -986,20 +997,20 @@ public class Tensor extends TensorBase { public native @ByRef Tensor tan_(); public native @ByVal Tensor tanh(); public native @ByRef Tensor tanh_(); - public native @ByVal Tensor tile(@ByVal @Cast("c10::ArrayRef*") LongArrayRef dims); + public native @ByVal Tensor tile(@ByVal LongArrayRef dims); public native @ByVal Tensor tile(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... dims); public native @ByVal Tensor transpose(@Cast("int64_t") long dim0, @Cast("int64_t") long dim1); public native @ByVal Tensor transpose(@ByVal Dimname dim0, @ByVal Dimname dim1); public native @ByRef Tensor transpose_(@Cast("int64_t") long dim0, @Cast("int64_t") long dim1); - public native @ByVal Tensor flip(@ByVal @Cast("c10::ArrayRef*") LongArrayRef dims); + public native @ByVal Tensor flip(@ByVal LongArrayRef dims); public native @ByVal Tensor flip(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... dims); public native @ByVal Tensor fliplr(); public native @ByVal Tensor flipud(); - public native @ByVal Tensor roll(@ByVal @Cast("c10::ArrayRef*") LongArrayRef shifts, @ByVal(nullValue = "at::IntArrayRef{}") @Cast("c10::ArrayRef*") LongArrayRef dims); - public native @ByVal Tensor roll(@ByVal @Cast("c10::ArrayRef*") LongArrayRef shifts); + public native @ByVal Tensor roll(@ByVal LongArrayRef shifts, @ByVal(nullValue = "at::IntArrayRef{}") LongArrayRef dims); + public native @ByVal Tensor roll(@ByVal LongArrayRef shifts); public native @ByVal Tensor roll(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] shifts, @ByVal(nullValue = "at::IntArrayRef{}") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... dims); public native @ByVal Tensor roll(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... shifts); - public native @ByVal Tensor rot90(@Cast("int64_t") long k/*=1*/, @ByVal(nullValue = "at::IntArrayRef({0,1})") @Cast("c10::ArrayRef*") LongArrayRef dims); + public native @ByVal Tensor rot90(@Cast("int64_t") long k/*=1*/, @ByVal(nullValue = "at::IntArrayRef({0,1})") LongArrayRef dims); public native @ByVal Tensor rot90(); public native @ByVal Tensor rot90(@Cast("int64_t") long k/*=1*/, @ByVal(nullValue = "at::IntArrayRef({0,1})") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... dims); public native @ByVal Tensor _nested_tensor_size(); @@ -1030,16 +1041,16 @@ public class Tensor extends TensorBase { public native @ByVal Tensor norm(@Const @ByRef ScalarOptional p, ScalarType dtype); public native @ByVal Tensor norm(@Const @ByRef(nullValue = "at::Scalar(2)") Scalar p); public native @ByVal Tensor norm(); - public native @ByVal Tensor norm(@Const @ByRef ScalarOptional p, @ByVal @Cast("c10::ArrayRef*") LongArrayRef dim, @Cast("bool") boolean keepdim, ScalarType dtype); + public native @ByVal Tensor norm(@Const @ByRef ScalarOptional p, @ByVal LongArrayRef dim, @Cast("bool") boolean keepdim, ScalarType dtype); public native @ByVal Tensor norm(@Const @ByRef ScalarOptional p, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dim, @Cast("bool") boolean keepdim, ScalarType dtype); - public native @ByVal Tensor norm(@Const @ByRef ScalarOptional p, @ByVal @Cast("c10::ArrayRef*") LongArrayRef dim, @Cast("bool") boolean keepdim/*=false*/); - public native @ByVal Tensor norm(@Const @ByRef ScalarOptional p, @ByVal @Cast("c10::ArrayRef*") LongArrayRef dim); + public native @ByVal Tensor norm(@Const @ByRef ScalarOptional p, @ByVal LongArrayRef dim, @Cast("bool") boolean keepdim/*=false*/); + public native @ByVal Tensor norm(@Const @ByRef ScalarOptional p, @ByVal LongArrayRef dim); public native @ByVal Tensor norm(@Const @ByRef ScalarOptional p, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dim, @Cast("bool") boolean keepdim/*=false*/); public native @ByVal Tensor norm(@Const @ByRef ScalarOptional p, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... dim); public native @ByVal Tensor norm(@Const @ByRef ScalarOptional p, @ByVal DimnameArrayRef dim, @Cast("bool") boolean keepdim, ScalarType dtype); public native @ByVal Tensor norm(@Const @ByRef ScalarOptional p, @ByVal DimnameArrayRef dim, @Cast("bool") boolean keepdim/*=false*/); public native @ByVal Tensor norm(@Const @ByRef ScalarOptional p, @ByVal DimnameArrayRef dim); - public native @ByVal TensorTensorTuple frexp(); + public native @ByVal T_TensorTensor_T frexp(); public native @ByVal Tensor clone(@ByVal(nullValue = "c10::optional(c10::nullopt)") MemoryFormatOptional memory_format); public native @ByVal Tensor clone(); public native @ByVal Tensor positive(); @@ -1071,9 +1082,9 @@ public class Tensor extends TensorBase { public native @ByRef Tensor addmm_(@Const @ByRef Tensor mat1, @Const @ByRef Tensor mat2); public native @ByVal Tensor _addmm_activation(@Const @ByRef Tensor mat1, @Const @ByRef Tensor mat2, @Const @ByRef(nullValue = "at::Scalar(1)") Scalar beta, @Const @ByRef(nullValue = "at::Scalar(1)") Scalar alpha, @Cast("bool") boolean use_gelu/*=false*/); public native @ByVal Tensor _addmm_activation(@Const @ByRef Tensor mat1, @Const @ByRef Tensor mat2); - public native @Const @ByRef Tensor sparse_resize_(@ByVal @Cast("c10::ArrayRef*") LongArrayRef size, @Cast("int64_t") long sparse_dim, @Cast("int64_t") long dense_dim); + public native @Const @ByRef Tensor sparse_resize_(@ByVal LongArrayRef size, @Cast("int64_t") long sparse_dim, @Cast("int64_t") long dense_dim); public native @Const @ByRef Tensor sparse_resize_(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @Cast("int64_t") long sparse_dim, @Cast("int64_t") long dense_dim); - public native @Const @ByRef Tensor sparse_resize_and_clear_(@ByVal @Cast("c10::ArrayRef*") LongArrayRef size, @Cast("int64_t") long sparse_dim, @Cast("int64_t") long dense_dim); + public native @Const @ByRef Tensor sparse_resize_and_clear_(@ByVal LongArrayRef size, @Cast("int64_t") long sparse_dim, @Cast("int64_t") long dense_dim); public native @Const @ByRef Tensor sparse_resize_and_clear_(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @Cast("int64_t") long sparse_dim, @Cast("int64_t") long dense_dim); public native @ByVal Tensor sparse_mask(@Const @ByRef Tensor mask); public native @ByVal Tensor to_dense(@ByVal(nullValue = "c10::optional(c10::nullopt)") ScalarTypeOptional dtype); @@ -1096,9 +1107,9 @@ public class Tensor extends TensorBase { public native @ByVal Tensor col_indices(); public native @ByVal Tensor ccol_indices(); public native @ByVal Tensor row_indices(); - public native @Cast({"", "std::vector"}) @StdMove TensorVector unbind(@Cast("int64_t") long dim/*=0*/); - public native @Cast({"", "std::vector"}) @StdMove TensorVector unbind(); - public native @Cast({"", "std::vector"}) @StdMove TensorVector unbind(@ByVal Dimname dim); + public native @Cast({"", "std::vector"}) @StdMove TensorVector unbind(@Cast("int64_t") long dim/*=0*/); + public native @Cast({"", "std::vector"}) @StdMove TensorVector unbind(); + public native @Cast({"", "std::vector"}) @StdMove TensorVector unbind(@ByVal Dimname dim); public native @ByVal Tensor to_sparse(@Cast("int64_t") long sparse_dim); public native @ByVal Tensor to_sparse(@ByVal(nullValue = "c10::optional(c10::nullopt)") LayoutOptional layout, @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") LongArrayRefOptional blocksize, @ByVal(nullValue = "c10::optional(c10::nullopt)") LongOptional dense_dim); public native @ByVal Tensor to_sparse(); @@ -1107,12 +1118,12 @@ public class Tensor extends TensorBase { public native @ByVal Tensor to_sparse_csr(); public native @ByVal Tensor to_sparse_csc(@ByVal(nullValue = "c10::optional(c10::nullopt)") LongOptional dense_dim); public native @ByVal Tensor to_sparse_csc(); - public native @ByVal Tensor to_sparse_bsr(@ByVal @Cast("c10::ArrayRef*") LongArrayRef blocksize, @ByVal(nullValue = "c10::optional(c10::nullopt)") LongOptional dense_dim); - public native @ByVal Tensor to_sparse_bsr(@ByVal @Cast("c10::ArrayRef*") LongArrayRef blocksize); + public native @ByVal Tensor to_sparse_bsr(@ByVal LongArrayRef blocksize, @ByVal(nullValue = "c10::optional(c10::nullopt)") LongOptional dense_dim); + public native @ByVal Tensor to_sparse_bsr(@ByVal LongArrayRef blocksize); public native @ByVal Tensor to_sparse_bsr(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] blocksize, @ByVal(nullValue = "c10::optional(c10::nullopt)") LongOptional dense_dim); public native @ByVal Tensor to_sparse_bsr(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... blocksize); - public native @ByVal Tensor to_sparse_bsc(@ByVal @Cast("c10::ArrayRef*") LongArrayRef blocksize, @ByVal(nullValue = "c10::optional(c10::nullopt)") LongOptional dense_dim); - public native @ByVal Tensor to_sparse_bsc(@ByVal @Cast("c10::ArrayRef*") LongArrayRef blocksize); + public native @ByVal Tensor to_sparse_bsc(@ByVal LongArrayRef blocksize, @ByVal(nullValue = "c10::optional(c10::nullopt)") LongOptional dense_dim); + public native @ByVal Tensor to_sparse_bsc(@ByVal LongArrayRef blocksize); public native @ByVal Tensor to_sparse_bsc(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] blocksize, @ByVal(nullValue = "c10::optional(c10::nullopt)") LongOptional dense_dim); public native @ByVal Tensor to_sparse_bsc(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... blocksize); public native @ByVal Tensor to_mkldnn(@ByVal(nullValue = "c10::optional(c10::nullopt)") ScalarTypeOptional dtype); @@ -1138,18 +1149,18 @@ public class Tensor extends TensorBase { public native @ByVal Tensor to(@Const @ByRef Tensor other); public native @ByVal Scalar item(); public native @ByRef Tensor set_(@Cast({"", "c10::Storage&&"}) @StdMove Storage source); - public native @ByRef Tensor set_(@Cast({"", "c10::Storage&&"}) @StdMove Storage source, @Cast("int64_t") long storage_offset, @ByVal @Cast("c10::ArrayRef*") LongArrayRef size, @ByVal(nullValue = "at::IntArrayRef{}") @Cast("c10::ArrayRef*") LongArrayRef stride); - public native @ByRef Tensor set_(@Cast({"", "c10::Storage&&"}) @StdMove Storage source, @Cast("int64_t") long storage_offset, @ByVal @Cast("c10::ArrayRef*") LongArrayRef size); + public native @ByRef Tensor set_(@Cast({"", "c10::Storage&&"}) @StdMove Storage source, @Cast("int64_t") long storage_offset, @ByVal LongArrayRef size, @ByVal(nullValue = "at::IntArrayRef{}") LongArrayRef stride); + public native @ByRef Tensor set_(@Cast({"", "c10::Storage&&"}) @StdMove Storage source, @Cast("int64_t") long storage_offset, @ByVal LongArrayRef size); public native @ByRef Tensor set_(@Cast({"", "c10::Storage&&"}) @StdMove Storage source, @Cast("int64_t") long storage_offset, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @ByVal(nullValue = "at::IntArrayRef{}") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... stride); public native @ByRef Tensor set_(@Cast({"", "c10::Storage&&"}) @StdMove Storage source, @Cast("int64_t") long storage_offset, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... size); - public native @ByRef Tensor set__symint(@Cast({"", "c10::Storage&&"}) @StdMove Storage source, @ByVal SymInt storage_offset, @ByVal SymIntRef size, @ByVal(nullValue = "c10::SymIntArrayRef{}") SymIntRef stride); - public native @ByRef Tensor set__symint(@Cast({"", "c10::Storage&&"}) @StdMove Storage source, @ByVal SymInt storage_offset, @ByVal SymIntRef size); - public native @ByRef Tensor set_(@Const @ByRef Tensor source, @Cast("int64_t") long storage_offset, @ByVal @Cast("c10::ArrayRef*") LongArrayRef size, @ByVal(nullValue = "at::IntArrayRef{}") @Cast("c10::ArrayRef*") LongArrayRef stride); - public native @ByRef Tensor set_(@Const @ByRef Tensor source, @Cast("int64_t") long storage_offset, @ByVal @Cast("c10::ArrayRef*") LongArrayRef size); + public native @ByRef Tensor set__symint(@Cast({"", "c10::Storage&&"}) @StdMove Storage source, @ByVal SymInt storage_offset, @ByVal SymIntArrayRef size, @ByVal(nullValue = "c10::SymIntArrayRef{}") SymIntArrayRef stride); + public native @ByRef Tensor set__symint(@Cast({"", "c10::Storage&&"}) @StdMove Storage source, @ByVal SymInt storage_offset, @ByVal SymIntArrayRef size); + public native @ByRef Tensor set_(@Const @ByRef Tensor source, @Cast("int64_t") long storage_offset, @ByVal LongArrayRef size, @ByVal(nullValue = "at::IntArrayRef{}") LongArrayRef stride); + public native @ByRef Tensor set_(@Const @ByRef Tensor source, @Cast("int64_t") long storage_offset, @ByVal LongArrayRef size); public native @ByRef Tensor set_(@Const @ByRef Tensor source, @Cast("int64_t") long storage_offset, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @ByVal(nullValue = "at::IntArrayRef{}") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... stride); public native @ByRef Tensor set_(@Const @ByRef Tensor source, @Cast("int64_t") long storage_offset, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... size); - public native @ByRef Tensor set__symint(@Const @ByRef Tensor source, @ByVal SymInt storage_offset, @ByVal SymIntRef size, @ByVal(nullValue = "c10::SymIntArrayRef{}") SymIntRef stride); - public native @ByRef Tensor set__symint(@Const @ByRef Tensor source, @ByVal SymInt storage_offset, @ByVal SymIntRef size); + public native @ByRef Tensor set__symint(@Const @ByRef Tensor source, @ByVal SymInt storage_offset, @ByVal SymIntArrayRef size, @ByVal(nullValue = "c10::SymIntArrayRef{}") SymIntArrayRef stride); + public native @ByRef Tensor set__symint(@Const @ByRef Tensor source, @ByVal SymInt storage_offset, @ByVal SymIntArrayRef size); public native @ByRef Tensor set_(@Const @ByRef Tensor source); public native @ByRef Tensor set_(); public native @Cast("bool") boolean is_set_to(@Const @ByRef Tensor tensor); @@ -1159,9 +1170,9 @@ public class Tensor extends TensorBase { public native @ByVal Tensor masked_fill(@Const @ByRef Tensor mask, @Const @ByRef Tensor value); public native @ByRef Tensor masked_scatter_(@Const @ByRef Tensor mask, @Const @ByRef Tensor source); public native @ByVal Tensor masked_scatter(@Const @ByRef Tensor mask, @Const @ByRef Tensor source); - public native @ByVal Tensor view(@ByVal @Cast("c10::ArrayRef*") LongArrayRef size); + public native @ByVal Tensor view(@ByVal LongArrayRef size); public native @ByVal Tensor view(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... size); - public native @ByVal Tensor view_symint(@ByVal SymIntRef size); + public native @ByVal Tensor view_symint(@ByVal SymIntArrayRef size); public native @ByVal Tensor view(ScalarType dtype); public native @ByRef Tensor put_(@Const @ByRef Tensor index, @Const @ByRef Tensor source, @Cast("bool") boolean accumulate/*=false*/); public native @ByRef Tensor put_(@Const @ByRef Tensor index, @Const @ByRef Tensor source); @@ -1329,7 +1340,7 @@ public class Tensor extends TensorBase { public native @ByVal Tensor index_select(@ByVal Dimname dim, @Const @ByRef Tensor index); public native @ByVal Tensor masked_select(@Const @ByRef Tensor mask); public native @ByVal Tensor nonzero(); - public native @Cast({"", "std::vector"}) @StdMove TensorVector nonzero_numpy(); + public native @Cast({"", "std::vector"}) @StdMove TensorVector nonzero_numpy(); public native @ByVal Tensor argwhere(); public native @ByVal Tensor gather(@Cast("int64_t") long dim, @Const @ByRef Tensor index, @Cast("bool") boolean sparse_grad/*=false*/); public native @ByVal Tensor gather(@Cast("int64_t") long dim, @Const @ByRef Tensor index); @@ -1343,10 +1354,10 @@ public class Tensor extends TensorBase { public native @ByVal Tensor addcdiv(@Const @ByRef Tensor tensor1, @Const @ByRef Tensor tensor2); public native @ByRef Tensor addcdiv_(@Const @ByRef Tensor tensor1, @Const @ByRef Tensor tensor2, @Const @ByRef(nullValue = "at::Scalar(1)") Scalar value); public native @ByRef Tensor addcdiv_(@Const @ByRef Tensor tensor1, @Const @ByRef Tensor tensor2); - public native @ByVal TensorTensorTuple triangular_solve(@Const @ByRef Tensor A, @Cast("bool") boolean upper/*=true*/, @Cast("bool") boolean transpose/*=false*/, @Cast("bool") boolean unitriangular/*=false*/); - public native @ByVal TensorTensorTuple triangular_solve(@Const @ByRef Tensor A); - public native @ByVal TensorTensorTensorTuple svd(@Cast("bool") boolean some/*=true*/, @Cast("bool") boolean compute_uv/*=true*/); - public native @ByVal TensorTensorTensorTuple svd(); + public native @ByVal T_TensorTensor_T triangular_solve(@Const @ByRef Tensor A, @Cast("bool") boolean upper/*=true*/, @Cast("bool") boolean transpose/*=false*/, @Cast("bool") boolean unitriangular/*=false*/); + public native @ByVal T_TensorTensor_T triangular_solve(@Const @ByRef Tensor A); + public native @ByVal T_TensorTensorTensor_T svd(@Cast("bool") boolean some/*=true*/, @Cast("bool") boolean compute_uv/*=true*/); + public native @ByVal T_TensorTensorTensor_T svd(); public native @ByVal Tensor swapaxes(@Cast("int64_t") long axis0, @Cast("int64_t") long axis1); public native @ByRef Tensor swapaxes_(@Cast("int64_t") long axis0, @Cast("int64_t") long axis1); public native @ByVal Tensor swapdims(@Cast("int64_t") long dim0, @Cast("int64_t") long dim1); @@ -1357,9 +1368,9 @@ public class Tensor extends TensorBase { public native @ByVal Tensor cholesky_solve(@Const @ByRef Tensor input2); public native @ByVal Tensor cholesky_inverse(@Cast("bool") boolean upper/*=false*/); public native @ByVal Tensor cholesky_inverse(); - public native @ByVal TensorTensorTuple qr(@Cast("bool") boolean some/*=true*/); - public native @ByVal TensorTensorTuple qr(); - public native @ByVal TensorTensorTuple geqrf(); + public native @ByVal T_TensorTensor_T qr(@Cast("bool") boolean some/*=true*/); + public native @ByVal T_TensorTensor_T qr(); + public native @ByVal T_TensorTensor_T geqrf(); public native @ByVal Tensor orgqr(@Const @ByRef Tensor input2); public native @ByVal Tensor ormqr(@Const @ByRef Tensor input2, @Const @ByRef Tensor input3, @Cast("bool") boolean left/*=true*/, @Cast("bool") boolean transpose/*=false*/); public native @ByVal Tensor ormqr(@Const @ByRef Tensor input2, @Const @ByRef Tensor input3); @@ -1388,10 +1399,10 @@ public class Tensor extends TensorBase { public native @ByVal Tensor lerp(@Const @ByRef Tensor end, @Const @ByRef Tensor weight); public native @ByVal Tensor histc(@Cast("int64_t") long bins/*=100*/, @Const @ByRef(nullValue = "at::Scalar(0)") Scalar min, @Const @ByRef(nullValue = "at::Scalar(0)") Scalar max); public native @ByVal Tensor histc(); - public native @ByVal TensorTensorTuple histogram(@Const @ByRef Tensor bins, @Const @ByRef(nullValue = "c10::optional{}") TensorOptional weight, @Cast("bool") boolean density/*=false*/); - public native @ByVal TensorTensorTuple histogram(@Const @ByRef Tensor bins); - public native @ByVal TensorTensorTuple histogram(@Cast("int64_t") long bins/*=100*/, @ByVal(nullValue = "c10::optional >(c10::nullopt)") DoubleArrayRefOptional range, @Const @ByRef(nullValue = "c10::optional{}") TensorOptional weight, @Cast("bool") boolean density/*=false*/); - public native @ByVal TensorTensorTuple histogram(); + public native @ByVal T_TensorTensor_T histogram(@Const @ByRef Tensor bins, @Const @ByRef(nullValue = "c10::optional{}") TensorOptional weight, @Cast("bool") boolean density/*=false*/); + public native @ByVal T_TensorTensor_T histogram(@Const @ByRef Tensor bins); + public native @ByVal T_TensorTensor_T histogram(@Cast("int64_t") long bins/*=100*/, @ByVal(nullValue = "c10::optional >(c10::nullopt)") DoubleArrayRefOptional range, @Const @ByRef(nullValue = "c10::optional{}") TensorOptional weight, @Cast("bool") boolean density/*=false*/); + public native @ByVal T_TensorTensor_T histogram(); public native @ByVal Tensor fmod(@Const @ByRef Scalar other); public native @ByRef Tensor fmod_(@Const @ByRef Scalar other); public native @ByVal Tensor fmod(@Const @ByRef Tensor other); @@ -1424,14 +1435,14 @@ public class Tensor extends TensorBase { public native @ByVal Tensor nanquantile(@Const @ByRef Tensor q); public native @ByVal Tensor nanquantile(double q, @ByVal(nullValue = "c10::optional(c10::nullopt)") LongOptional dim, @Cast("bool") boolean keepdim/*=false*/, @ByVal(nullValue = "c10::string_view(\"linear\")") @Cast("c10::string_view*") Pointer interpolation); public native @ByVal Tensor nanquantile(double q); - public native @ByVal TensorTensorTuple sort(@Cast("int64_t") long dim/*=-1*/, @Cast("bool") boolean descending/*=false*/); - public native @ByVal TensorTensorTuple sort(); - public native @ByVal TensorTensorTuple sort(@ByVal BoolOptional stable, @Cast("int64_t") long dim/*=-1*/, @Cast("bool") boolean descending/*=false*/); - public native @ByVal TensorTensorTuple sort(@ByVal BoolOptional stable); - public native @ByVal TensorTensorTuple sort(@ByVal Dimname dim, @Cast("bool") boolean descending/*=false*/); - public native @ByVal TensorTensorTuple sort(@ByVal Dimname dim); - public native @ByVal TensorTensorTuple sort(@ByVal BoolOptional stable, @ByVal Dimname dim, @Cast("bool") boolean descending/*=false*/); - public native @ByVal TensorTensorTuple sort(@ByVal BoolOptional stable, @ByVal Dimname dim); + public native @ByVal T_TensorTensor_T sort(@Cast("int64_t") long dim/*=-1*/, @Cast("bool") boolean descending/*=false*/); + public native @ByVal T_TensorTensor_T sort(); + public native @ByVal T_TensorTensor_T sort(@ByVal BoolOptional stable, @Cast("int64_t") long dim/*=-1*/, @Cast("bool") boolean descending/*=false*/); + public native @ByVal T_TensorTensor_T sort(@ByVal BoolOptional stable); + public native @ByVal T_TensorTensor_T sort(@ByVal Dimname dim, @Cast("bool") boolean descending/*=false*/); + public native @ByVal T_TensorTensor_T sort(@ByVal Dimname dim); + public native @ByVal T_TensorTensor_T sort(@ByVal BoolOptional stable, @ByVal Dimname dim, @Cast("bool") boolean descending/*=false*/); + public native @ByVal T_TensorTensor_T sort(@ByVal BoolOptional stable, @ByVal Dimname dim); public native @ByVal Tensor msort(); public native @ByVal Tensor argsort(@Cast("int64_t") long dim/*=-1*/, @Cast("bool") boolean descending/*=false*/); public native @ByVal Tensor argsort(); @@ -1439,8 +1450,8 @@ public class Tensor extends TensorBase { public native @ByVal Tensor argsort(@Cast("bool") boolean stable); public native @ByVal Tensor argsort(@ByVal Dimname dim, @Cast("bool") boolean descending/*=false*/); public native @ByVal Tensor argsort(@ByVal Dimname dim); - public native @ByVal TensorTensorTuple topk(@Cast("int64_t") long k, @Cast("int64_t") long dim/*=-1*/, @Cast("bool") boolean largest/*=true*/, @Cast("bool") boolean sorted/*=true*/); - public native @ByVal TensorTensorTuple topk(@Cast("int64_t") long k); + public native @ByVal T_TensorTensor_T topk(@Cast("int64_t") long k, @Cast("int64_t") long dim/*=-1*/, @Cast("bool") boolean largest/*=true*/, @Cast("bool") boolean sorted/*=true*/); + public native @ByVal T_TensorTensor_T topk(@Cast("int64_t") long k); public native @ByVal Tensor all(); public native @ByVal Tensor any(); public native @ByVal Tensor renorm(@Const @ByRef Scalar p, @Cast("int64_t") long dim, @Const @ByRef Scalar maxnorm); @@ -1464,7 +1475,7 @@ public class Tensor extends TensorBase { public native @ByVal Tensor isposinf(); public native @ByVal Tensor isneginf(); public native @ByVal Tensor det(); - public native @ByVal TensorTensorTuple slogdet(); + public native @ByVal T_TensorTensor_T slogdet(); public native @ByVal Tensor logdet(); public native @ByVal Tensor inverse(); public native @ByVal Tensor inner(@Const @ByRef Tensor other); @@ -1562,7 +1573,7 @@ public class Tensor extends TensorBase { public native @ByVal Tensor data(); - public native void _backward(@ByVal TensorArrayRef inputs, @Const @ByRef TensorOptional gradient, @ByVal BoolOptional keep_graph, @Cast("bool") boolean create_graph); + public native void _backward(@ByVal @Cast("at::TensorList*") TensorArrayRef inputs, @Const @ByRef TensorOptional gradient, @ByVal BoolOptional keep_graph, @Cast("bool") boolean create_graph); public native @Const @ByRef Tensor requires_grad_(@Cast("bool") boolean _requires_grad/*=true*/); public native @Const @ByRef Tensor requires_grad_(); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/TensorArg.java b/pytorch/src/gen/java/org/bytedeco/pytorch/TensorArg.java index 82b03e2c31e..cec8c7a3179 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/TensorArg.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/TensorArg.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/TensorArgArrayRef.java b/pytorch/src/gen/java/org/bytedeco/pytorch/TensorArgArrayRef.java index d5bb06866dd..4f8851fcea6 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/TensorArgArrayRef.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/TensorArgArrayRef.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; @@ -15,7 +17,7 @@ import static org.bytedeco.pytorch.global.torch.*; -@Name("c10::ArrayRef") @NoOffset @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) +@Name("c10::ArrayRef") @NoOffset @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) public class TensorArgArrayRef extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ @@ -39,8 +41,7 @@ public class TensorArgArrayRef extends Pointer { /** Construct an ArrayRef from a single element. */ // TODO Make this explicit - public TensorArgArrayRef(@Const @ByRef TensorArg OneElt) { super((Pointer)null); allocate(OneElt); } - private native void allocate(@Const @ByRef TensorArg OneElt); + /** Construct an ArrayRef from a pointer and length. */ public TensorArgArrayRef(@Const TensorArg data, @Cast("size_t") long length) { super((Pointer)null); allocate(data, length); } @@ -70,13 +71,13 @@ public class TensorArgArrayRef extends Pointer { * \name Simple Operations * \{ */ - public native @ByVal @Cast("const c10::ArrayRef::iterator*") TensorArg begin(); - public native @ByVal @Cast("const c10::ArrayRef::iterator*") TensorArg end(); + public native @Const @ByPtr TensorArg begin(); + public native @Const @ByPtr TensorArg end(); // These are actually the same as iterator, since ArrayRef only // gives you const iterators. - public native @ByVal @Cast("const c10::ArrayRef::const_iterator*") TensorArg cbegin(); - public native @ByVal @Cast("const c10::ArrayRef::const_iterator*") TensorArg cend(); + public native @Const @ByPtr TensorArg cbegin(); + public native @Const @ByPtr TensorArg cend(); /** empty - Check if the array is empty. */ public native @Cast("const bool") boolean empty(); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/TensorArrayRef.java b/pytorch/src/gen/java/org/bytedeco/pytorch/TensorArrayRef.java index 3560b4e2c5c..d6c100a483d 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/TensorArrayRef.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/TensorArrayRef.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; @@ -15,7 +17,7 @@ import static org.bytedeco.pytorch.global.torch.*; -@Name("c10::ArrayRef") @NoOffset @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) +@Name("c10::ArrayRef") @NoOffset @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) public class TensorArrayRef extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ @@ -39,8 +41,7 @@ public class TensorArrayRef extends Pointer { /** Construct an ArrayRef from a single element. */ // TODO Make this explicit - public TensorArrayRef(@Const @ByRef Tensor OneElt) { super((Pointer)null); allocate(OneElt); } - private native void allocate(@Const @ByRef Tensor OneElt); + /** Construct an ArrayRef from a pointer and length. */ public TensorArrayRef(@Const Tensor data, @Cast("size_t") long length) { super((Pointer)null); allocate(data, length); } @@ -58,8 +59,8 @@ public class TensorArrayRef extends Pointer { // The enable_if stuff here makes sure that this isn't used for // std::vector, because ArrayRef can't work on a std::vector // bitfield. - public TensorArrayRef(@ByRef TensorVector Vec) { super((Pointer)null); allocate(Vec); } - private native void allocate(@ByRef TensorVector Vec); + public TensorArrayRef(@ByRef TensorVector vec) { super((Pointer)null); allocate(vec); } + private native void allocate(@ByRef TensorVector vec); /** Construct an ArrayRef from a std::array */ @@ -72,13 +73,13 @@ public class TensorArrayRef extends Pointer { * \name Simple Operations * \{ */ - public native @ByVal @Cast("const c10::ArrayRef::iterator*") Tensor begin(); - public native @ByVal @Cast("const c10::ArrayRef::iterator*") Tensor end(); + public native @Const @ByPtr Tensor begin(); + public native @Const @ByPtr Tensor end(); // These are actually the same as iterator, since ArrayRef only // gives you const iterators. - public native @ByVal @Cast("const c10::ArrayRef::const_iterator*") Tensor cbegin(); - public native @ByVal @Cast("const c10::ArrayRef::const_iterator*") Tensor cend(); + public native @Const @ByPtr Tensor cbegin(); + public native @Const @ByPtr Tensor cend(); /** empty - Check if the array is empty. */ public native @Cast("const bool") boolean empty(); @@ -128,7 +129,7 @@ public class TensorArrayRef extends Pointer { /** \} * \name Expensive Operations * \{ */ - public native @Cast({"", "std::vector"}) @StdMove TensorVector vec(); + public native @Cast({"", "std::vector"}) @StdMove TensorVector vec(); /** \} */ } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/TensorArrayRefOptional.java b/pytorch/src/gen/java/org/bytedeco/pytorch/TensorArrayRefOptional.java new file mode 100644 index 00000000000..54ee280728d --- /dev/null +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/TensorArrayRefOptional.java @@ -0,0 +1,35 @@ +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE + +package org.bytedeco.pytorch; + +import org.bytedeco.pytorch.Allocator; +import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; +import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; +import java.nio.*; +import org.bytedeco.javacpp.*; +import org.bytedeco.javacpp.annotation.*; + +import static org.bytedeco.javacpp.presets.javacpp.*; +import static org.bytedeco.openblas.global.openblas_nolapack.*; +import static org.bytedeco.openblas.global.openblas.*; + +import static org.bytedeco.pytorch.global.torch.*; + +@NoOffset @Name("c10::optional") @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) +public class TensorArrayRefOptional extends Pointer { + static { Loader.load(); } + /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ + public TensorArrayRefOptional(Pointer p) { super(p); } + public TensorArrayRefOptional(TensorArrayRef value) { this(); put(value); } + public TensorArrayRefOptional() { allocate(); } + private native void allocate(); + public native @Name("operator =") @ByRef TensorArrayRefOptional put(@ByRef TensorArrayRefOptional x); + + public native boolean has_value(); + public native void reset(); + public native @Name("value") @ByRef TensorArrayRef get(); + @ValueSetter public native TensorArrayRefOptional put(@ByRef TensorArrayRef value); +} + diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/TensorBase.java b/pytorch/src/gen/java/org/bytedeco/pytorch/TensorBase.java index fa5730d95f6..ebbd2266dc2 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/TensorBase.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/TensorBase.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; @@ -41,7 +43,7 @@ // but this requires a reference-count bump. OptionalTensorRef on // the other hand can materialize a `const Tensor &` without // touching the reference-count. -@Namespace("at") @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) +@Namespace("at") @NoOffset @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) public class TensorBase extends AbstractTensor { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ @@ -56,29 +58,20 @@ public class TensorBase extends AbstractTensor { return new TensorBase((Pointer)this).offsetAddress(i); } - public static class unsafe_borrow_t extends Pointer { - static { Loader.load(); } - /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ - public unsafe_borrow_t(Pointer p) { super(p); } - /** Native array allocator. Access with {@link Pointer#position(long)}. */ - public unsafe_borrow_t(long size) { super((Pointer)null); allocateArray(size); } - private native void allocateArray(long size); - @Override public unsafe_borrow_t position(long position) { - return (unsafe_borrow_t)super.position(position); - } - @Override public unsafe_borrow_t getPointer(long i) { - return new unsafe_borrow_t((Pointer)this).offsetAddress(i); - } - public unsafe_borrow_t() { super((Pointer)null); allocate(); } -private native void allocate(); } public TensorBase() { super((Pointer)null); allocate(); } private native void allocate(); // This constructor should not be used by end users and is an implementation // detail invoked by autogenerated code. + public TensorBase( + @ByVal TensorImplPtr tensor_impl) { super((Pointer)null); allocate(tensor_impl); } + private native void allocate( + @ByVal TensorImplPtr tensor_impl); public TensorBase(@Const @ByRef TensorBase arg0) { super((Pointer)null); allocate(arg0); } private native void allocate(@Const @ByRef TensorBase arg0); // Creates a new wrapper from TensorImpl. Intentionally a free method because // it should be used with care. Checks necessary invariants + public static native @ByVal TensorBase wrap_tensor_impl( + @ByVal TensorImplPtr tensor_impl); public native @Cast("int64_t") long dim(); public native @Cast("int64_t") long storage_offset(); @@ -121,6 +114,9 @@ public static class unsafe_borrow_t extends Pointer { public native TensorImpl unsafeGetTensorImpl(); public native TensorImpl unsafeReleaseTensorImpl(); + public native @Const @ByRef TensorImplPtr getIntrusivePtr(); + + public native @ByVal TensorImplPtr unsafeReleaseIntrusivePtr(); public native @Cast("bool") boolean defined(); @@ -138,10 +134,10 @@ public static class unsafe_borrow_t extends Pointer { public native @StdString String toString(); - public native @ByVal @Cast("c10::ArrayRef*") LongArrayRef sizes(); - public native @ByVal SymIntRef sym_sizes(); - public native @ByVal SymIntRef sym_strides(); - public native @ByVal @Cast("c10::ArrayRef*") LongArrayRef strides(); + public native @ByVal LongArrayRef sizes(); + public native @ByVal SymIntArrayRef sym_sizes(); + public native @ByVal SymIntArrayRef sym_strides(); + public native @ByVal LongArrayRef strides(); // See impl::get_opt_names in ATen/NamedTensor.h for docs. public native @ByVal DimnameListOptional opt_names(); // See impl::get_names in ATen/NamedTensor.h for docs. @@ -280,6 +276,7 @@ public static class unsafe_borrow_t extends Pointer { /** If a tensor is a quantized tensor, returns its quantizer * TODO: it's not in native_functions.yaml yet as it's not exposed to python */ + public native @ByVal QuantizerPtr quantizer(); /** Returns if a {@code Tensor} has any dimension names */ public native @Cast("bool") boolean has_names(); @@ -294,7 +291,7 @@ public static class unsafe_borrow_t extends Pointer { public native Pointer data_ptr(); - public native @Name("data_ptr") BytePointer data_ptr_byte(); + public native @Name("data_ptr") BytePointer data_ptr_char(); public native @Name("data_ptr") ShortPointer data_ptr_short(); @@ -424,15 +421,13 @@ public static class unsafe_borrow_t extends Pointer { // users who should use the API provided in torch/csrc/autograd.h /** This function returns the forward gradient for this Tensor at the given level. */ - public native @Const @ByRef Tensor _fw_grad(@Cast("uint64_t") long level); + /** This function can be used to set the value of the forward grad. * Note that the given new_grad might not be used directly if it has different * metadata (size/stride/storage offset) compared to this Tensor. In that case, * new_grad content will be copied into a new Tensor */ - /// - public native void _set_fw_grad(@Const @ByRef TensorBase new_grad, @Cast("uint64_t") long level, @Cast("bool") boolean is_inplace_op); /** NOTE: This is similar to the legacy {@code .data()} function on {@code Variable}, and is intended * to be used from functions that need to access the {@code Variable}'s equivalent {@code Tensor} @@ -510,6 +505,8 @@ public static class unsafe_borrow_t extends Pointer { * std::cout << v.grad() << std::endl; * v.remove_hook(h); // removes the hook * } */ + public native @Name("register_hook >") int register_hook(@ByRef(true) VoidTensorHook hook); + public native @Name("register_hook >") int register_hook(@ByRef(true) TensorTensorHook hook); /** Remove hook at given position */ public native void remove_hook(@Cast("unsigned") int pos); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/TensorBaseMaybeOwned.java b/pytorch/src/gen/java/org/bytedeco/pytorch/TensorBaseMaybeOwned.java new file mode 100644 index 00000000000..fbeb7149dcc --- /dev/null +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/TensorBaseMaybeOwned.java @@ -0,0 +1,65 @@ +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE + +package org.bytedeco.pytorch; + +import org.bytedeco.pytorch.Allocator; +import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; +import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; +import java.nio.*; +import org.bytedeco.javacpp.*; +import org.bytedeco.javacpp.annotation.*; + +import static org.bytedeco.javacpp.presets.javacpp.*; +import static org.bytedeco.openblas.global.openblas_nolapack.*; +import static org.bytedeco.openblas.global.openblas.*; + +import static org.bytedeco.pytorch.global.torch.*; + +@Name("c10::MaybeOwned") @NoOffset @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) +public class TensorBaseMaybeOwned extends Pointer { + static { Loader.load(); } + /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ + public TensorBaseMaybeOwned(Pointer p) { super(p); } + /** Native array allocator. Access with {@link Pointer#position(long)}. */ + public TensorBaseMaybeOwned(long size) { super((Pointer)null); allocateArray(size); } + private native void allocateArray(long size); + @Override public TensorBaseMaybeOwned position(long position) { + return (TensorBaseMaybeOwned)super.position(position); + } + @Override public TensorBaseMaybeOwned getPointer(long i) { + return new TensorBaseMaybeOwned((Pointer)this).offsetAddress(i); + } + + public TensorBaseMaybeOwned() { super((Pointer)null); allocate(); } + private native void allocate(); + + // Copying a borrow yields another borrow of the original, as with a + // T*. Copying an owned T yields another owned T for safety: no + // chains of borrowing by default! (Note you could get that behavior + // with MaybeOwned::borrowed(*rhs) if you wanted it.) + public TensorBaseMaybeOwned(@Cast({"", "c10::MaybeOwned&&"}) @StdMove TensorBaseMaybeOwned rhs) { super((Pointer)null); allocate(rhs); } + private native void allocate(@Cast({"", "c10::MaybeOwned&&"}) @StdMove TensorBaseMaybeOwned rhs); + + public native @ByRef @Name("operator =") TensorBaseMaybeOwned put(@Cast({"", "c10::MaybeOwned&&"}) @StdMove TensorBaseMaybeOwned rhs); + + public static native @Cast({"", "c10::MaybeOwned&&"}) @StdMove TensorBaseMaybeOwned borrowed(@Const @ByRef TensorBase t); + + public static native @NoException(true) @Cast({"", "c10::MaybeOwned&&"}) @StdMove TensorBaseMaybeOwned owned(@ByRef(true) TensorBase t); + + // This is an implementation detail! You should know what you're doing + // if you are testing this. If you just want to guarantee ownership move + // this into a T + public native @Cast("bool") boolean unsafeIsBorrowed(); + + public native @Const @ByRef @Name("operator *") TensorBase multiply(); + + public native @Const @Name("operator ->") TensorBase access(); + + // If borrowed, copy the underlying T. If owned, move from + // it. borrowed/owned state remains the same, and either we + // reference the same borrow as before or we are an owned moved-from + // T. + +} diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/TensorCastValue.java b/pytorch/src/gen/java/org/bytedeco/pytorch/TensorCastValue.java index 98daa4d58be..05890e29b85 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/TensorCastValue.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/TensorCastValue.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; @@ -27,12 +29,7 @@ public class TensorCastValue extends SugaredValue { public native @StdString BytePointer kind(); - public native @SharedPtr @ByVal SugaredValue call( - @Const @ByRef SourceRange loc, - @ByRef GraphFunction m, - @ByVal NamedValueArrayRef args, - @ByVal NamedValueArrayRef kwargs, - @Cast("size_t") long n_binders); + public native ScalarType dtype_(); public native TensorCastValue dtype_(ScalarType setter); public native @ByRef NamedValue self_(); public native TensorCastValue self_(NamedValue setter); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/TensorDataset.java b/pytorch/src/gen/java/org/bytedeco/pytorch/TensorDataset.java deleted file mode 100644 index bf4aed5fdc9..00000000000 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/TensorDataset.java +++ /dev/null @@ -1,41 +0,0 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE - -package org.bytedeco.pytorch; - -import org.bytedeco.pytorch.Allocator; -import org.bytedeco.pytorch.Function; -import org.bytedeco.pytorch.Module; -import java.nio.*; -import org.bytedeco.javacpp.*; -import org.bytedeco.javacpp.annotation.*; - -import static org.bytedeco.javacpp.presets.javacpp.*; -import static org.bytedeco.openblas.global.openblas_nolapack.*; -import static org.bytedeco.openblas.global.openblas.*; - -import static org.bytedeco.pytorch.global.torch.*; - - -/** A dataset of tensors. - * Stores a single tensor internally, which is then indexed inside {@code get()}. */ -@Namespace("torch::data::datasets") @NoOffset @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) -public class TensorDataset extends TensorExampleDataset { - static { Loader.load(); } - /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ - public TensorDataset(Pointer p) { super(p); } - - /** Creates a {@code TensorDataset} from a vector of tensors. */ - public TensorDataset(@Cast({"", "std::vector"}) @StdMove TensorVector tensors) { super((Pointer)null); allocate(tensors); } - private native void allocate(@Cast({"", "std::vector"}) @StdMove TensorVector tensors); - - public TensorDataset(@ByVal Tensor tensor) { super((Pointer)null); allocate(tensor); } - private native void allocate(@ByVal Tensor tensor); - - /** Returns a single {@code TensorExample}. */ - public native @ByVal @Cast("torch::data::TensorExample*") Example get(@Cast("size_t") long index); - - /** Returns the number of tensors in the dataset. */ - public native @ByVal SizeTOptional size(); - - public native @ByRef Tensor tensor(); public native TensorDataset tensor(Tensor setter); -} diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/TensorDeque.java b/pytorch/src/gen/java/org/bytedeco/pytorch/TensorDeque.java index 66db13c3ff3..41ae88563f6 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/TensorDeque.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/TensorDeque.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; @@ -15,7 +17,7 @@ import static org.bytedeco.pytorch.global.torch.*; -@Name("std::deque") @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) +@Name("std::deque") @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) public class TensorDeque extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ @@ -33,6 +35,8 @@ public class TensorDeque extends Pointer { public void clear() { resize(0); } public native void resize(@Cast("size_t") long n); + public Tensor front() { return get(0); } + public Tensor back() { return get(size() - 1); } @Index(function = "at") public native @ByRef Tensor get(@Cast("size_t") long i); public native TensorDeque put(@Cast("size_t") long i, Tensor value); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/TensorElementReference.java b/pytorch/src/gen/java/org/bytedeco/pytorch/TensorElementReference.java new file mode 100644 index 00000000000..8834ac5b11f --- /dev/null +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/TensorElementReference.java @@ -0,0 +1,42 @@ +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE + +package org.bytedeco.pytorch; + +import org.bytedeco.pytorch.Allocator; +import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; +import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; +import java.nio.*; +import org.bytedeco.javacpp.*; +import org.bytedeco.javacpp.annotation.*; + +import static org.bytedeco.javacpp.presets.javacpp.*; +import static org.bytedeco.openblas.global.openblas_nolapack.*; +import static org.bytedeco.openblas.global.openblas.*; + +import static org.bytedeco.pytorch.global.torch.*; + + +@Name("c10::impl::ListElementReference") @NoOffset @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) +public class TensorElementReference extends Pointer { + static { Loader.load(); } + /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ + public TensorElementReference(Pointer p) { super(p); } + + public native @Name("operator std::conditional_t::type>::value,const at::Tensor&,at::Tensor>") @ByVal Tensor getTensor(); + + + + + + // assigning another ref to this assigns the underlying value + + + public native @Const @ByRef IValue get(); + + + + + +} diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/TensorExample.java b/pytorch/src/gen/java/org/bytedeco/pytorch/TensorExample.java index 23ed026199c..13ae3f16654 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/TensorExample.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/TensorExample.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/TensorExampleBatchDataset.java b/pytorch/src/gen/java/org/bytedeco/pytorch/TensorExampleBatchDataset.java index a04e53f5e12..8ddcc7b1c2c 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/TensorExampleBatchDataset.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/TensorExampleBatchDataset.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/TensorExampleDataset.java b/pytorch/src/gen/java/org/bytedeco/pytorch/TensorExampleDataset.java index bc0cff6ccb5..7b0fa945b52 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/TensorExampleDataset.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/TensorExampleDataset.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/TensorExampleVector.java b/pytorch/src/gen/java/org/bytedeco/pytorch/TensorExampleVector.java index 8aabd20a733..589a34feb48 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/TensorExampleVector.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/TensorExampleVector.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; @@ -33,6 +35,8 @@ public class TensorExampleVector extends Pointer { public void clear() { resize(0); } public native void resize(@Cast("size_t") long n); + public TensorExample front() { return get(0); } + public TensorExample back() { return get(size() - 1); } @Index(function = "at") public native @ByRef TensorExample get(@Cast("size_t") long i); public native TensorExampleVector put(@Cast("size_t") long i, TensorExample value); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/TensorGeometry.java b/pytorch/src/gen/java/org/bytedeco/pytorch/TensorGeometry.java index 42a6774e70f..346a18a4343 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/TensorGeometry.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/TensorGeometry.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; @@ -34,8 +36,8 @@ public class TensorGeometry extends Pointer { public TensorGeometry() { super((Pointer)null); allocate(); } private native void allocate(); - public TensorGeometry(@ByVal SymIntRef sizes) { super((Pointer)null); allocate(sizes); } - private native void allocate(@ByVal SymIntRef sizes); + public TensorGeometry(@ByVal SymIntArrayRef sizes) { super((Pointer)null); allocate(sizes); } + private native void allocate(@ByVal SymIntArrayRef sizes); public TensorGeometry(@Const @ByRef TensorBase t) { super((Pointer)null); allocate(t); } private native void allocate(@Const @ByRef TensorBase t); @@ -46,16 +48,16 @@ public class TensorGeometry extends Pointer { public native @Cast("int64_t") long dim(); public native @Cast("int64_t") long size(@Cast("int64_t") long dim); - public native @ByVal @Cast("c10::ArrayRef*") LongArrayRef sizes(); + public native @ByVal LongArrayRef sizes(); public native @Cast("int64_t") long stride(@Cast("int64_t") long dim); - public native @ByVal @Cast("c10::ArrayRef*") LongArrayRef strides(); + public native @ByVal LongArrayRef strides(); public native @Cast("int64_t") long storage_offset(); public native @Cast("int64_t") long numel(); public native @ByVal SymInt sym_size(@Cast("int64_t") long dim); - public native @ByVal SymIntRef sym_sizes(); + public native @ByVal SymIntArrayRef sym_sizes(); public native @ByVal SymInt sym_stride(@Cast("int64_t") long dim); - public native @ByVal SymIntRef sym_strides(); + public native @ByVal SymIntArrayRef sym_strides(); public native @ByVal SymInt sym_storage_offset(); public native @ByVal SymInt sym_numel(); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/TensorGeometryArg.java b/pytorch/src/gen/java/org/bytedeco/pytorch/TensorGeometryArg.java index 4351bddbe46..9433eb2f90e 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/TensorGeometryArg.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/TensorGeometryArg.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/TensorImpl.java b/pytorch/src/gen/java/org/bytedeco/pytorch/TensorImpl.java index 833c154b6d7..ffed5f6f1d5 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/TensorImpl.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/TensorImpl.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; @@ -211,22 +213,61 @@ private native void allocate( * Return a reference to the sizes of this tensor. This reference remains * valid as long as the tensor is live and not resized. */ - public native @ByVal @Cast("c10::ArrayRef*") LongArrayRef sizes(); + public native @ByVal LongArrayRef sizes(); - public native @ByVal SymIntRef sym_sizes(); + public native @ByVal SymIntArrayRef sym_sizes(); - public native @ByVal @Cast("c10::ArrayRef*") LongArrayRef sizes_default(); + public native @ByVal LongArrayRef sizes_default(); - public native @ByVal SymIntRef sym_sizes_default(); + public native @ByVal SymIntArrayRef sym_sizes_default(); // From https://stackoverflow.com/a/3057522/23845 // TODO: does C++14 have a stdlib template for this? + @Name("identity") public static class SymIntIdentity extends Pointer { + static { Loader.load(); } + /** Default native constructor. */ + public SymIntIdentity() { super((Pointer)null); allocate(); } + /** Native array allocator. Access with {@link Pointer#position(long)}. */ + public SymIntIdentity(long size) { super((Pointer)null); allocateArray(size); } + /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ + public SymIntIdentity(Pointer p) { super(p); } + private native void allocate(); + private native void allocateArray(long size); + @Override public SymIntIdentity position(long position) { + return (SymIntIdentity)super.position(position); + } + @Override public SymIntIdentity getPointer(long i) { + return new SymIntIdentity((Pointer)this).offsetAddress(i); + } + + } + @Name("identity") public static class LongIdentity extends Pointer { + static { Loader.load(); } + /** Default native constructor. */ + public LongIdentity() { super((Pointer)null); allocate(); } + /** Native array allocator. Access with {@link Pointer#position(long)}. */ + public LongIdentity(long size) { super((Pointer)null); allocateArray(size); } + /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ + public LongIdentity(Pointer p) { super(p); } + private native void allocate(); + private native void allocateArray(long size); + @Override public LongIdentity position(long position) { + return (LongIdentity)super.position(position); + } + @Override public LongIdentity getPointer(long i) { + return new LongIdentity((Pointer)this).offsetAddress(i); + } + + } - public native @ByVal @Cast("c10::ArrayRef*") LongArrayRef _generic_sizes(@ByVal @Cast("c10::TensorImpl::identity*") Pointer arg0); + public native @ByVal LongArrayRef _generic_sizes(@ByVal LongIdentity arg0); + public native @ByVal SymIntArrayRef _generic_sizes(@ByVal SymIntIdentity arg0); - public native @ByVal @Cast("c10::ArrayRef*") LongArrayRef _generic_strides(@ByVal @Cast("c10::TensorImpl::identity*") Pointer arg0); + public native @ByVal LongArrayRef _generic_strides(@ByVal LongIdentity arg0); + public native @ByVal SymIntArrayRef _generic_strides(@ByVal SymIntIdentity arg0); - public native @Cast("int64_t") long _generic_storage_offset(@ByVal @Cast("c10::TensorImpl::identity*") Pointer arg0); + public native @Cast("int64_t") long _generic_storage_offset(@ByVal LongIdentity arg0); + public native @ByVal SymInt _generic_storage_offset(@ByVal SymIntIdentity arg0); /** * The number of elements in a tensor. @@ -271,13 +312,13 @@ private native void allocate( * Return a reference to the strides of this tensor. This reference remains * valid as long as the tensor is live and not restrided. */ - public native @ByVal @Cast("c10::ArrayRef*") LongArrayRef strides(); + public native @ByVal LongArrayRef strides(); - public native @ByVal SymIntRef sym_strides(); + public native @ByVal SymIntArrayRef sym_strides(); - public native @ByVal @Cast("c10::ArrayRef*") LongArrayRef strides_default(); + public native @ByVal LongArrayRef strides_default(); - public native @ByVal SymIntRef sym_strides_default(); + public native @ByVal SymIntArrayRef sym_strides_default(); /** * Whether or not a tensor is laid out in contiguous memory. @@ -633,15 +674,15 @@ public native void _set_fw_grad( // if we are going to use sym sizes, we should be setting sym strides at the // same time, otherwise it's very easy to misuse this API public native void set_sizes_and_strides( - @ByVal SymIntRef sizes, - @ByVal SymIntRef strides, + @ByVal SymIntArrayRef sizes, + @ByVal SymIntArrayRef strides, @ByVal(nullValue = "c10::optional(c10::nullopt)") SymIntOptional storage_offset); public native void set_sizes_and_strides( - @ByVal SymIntRef sizes, - @ByVal SymIntRef strides); + @ByVal SymIntArrayRef sizes, + @ByVal SymIntArrayRef strides); // This is renamed to avoid breaking overload BC - public native void generic_set_sizes_contiguous(@ByVal SymIntRef sizes); - public native void generic_set_sizes_contiguous(@ByVal @Cast("c10::ArrayRef*") LongArrayRef sizes); + public native void generic_set_sizes_contiguous(@ByVal SymIntArrayRef sizes); + public native void generic_set_sizes_contiguous(@ByVal LongArrayRef sizes); public native void generic_set_sizes_contiguous(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... sizes); /** @@ -678,7 +719,7 @@ public native void set_sizes_and_strides( * sizes/strides are in bounds for the storage that is allocated; * this is the responsibility of the caller */ - public native void set_sizes_contiguous(@ByVal @Cast("c10::ArrayRef*") LongArrayRef new_size); + public native void set_sizes_contiguous(@ByVal LongArrayRef new_size); public native void set_sizes_contiguous(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... new_size); /** @@ -689,12 +730,12 @@ public native void set_sizes_and_strides( * this is the responsibility of the caller */ public native void set_sizes_and_strides( - @ByVal @Cast("c10::ArrayRef*") LongArrayRef new_size, - @ByVal @Cast("c10::ArrayRef*") LongArrayRef new_stride, + @ByVal LongArrayRef new_size, + @ByVal LongArrayRef new_stride, @ByVal(nullValue = "c10::optional(c10::nullopt)") LongOptional storage_offset); public native void set_sizes_and_strides( - @ByVal @Cast("c10::ArrayRef*") LongArrayRef new_size, - @ByVal @Cast("c10::ArrayRef*") LongArrayRef new_stride); + @ByVal LongArrayRef new_size, + @ByVal LongArrayRef new_stride); public native void set_sizes_and_strides( @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] new_size, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] new_stride, @@ -793,7 +834,7 @@ public native void set_named_tensor_meta( * compatible with SparseCUDA. */ public native @Cast("bool") boolean has_compatible_shallow_copy_type(@ByVal DispatchKeySet from); - public native @ByVal @Cast("c10::intrusive_ptr*") Pointer shallow_copy_and_detach( + public native @ByVal TensorImplPtr shallow_copy_and_detach( @Const @ByRef VariableVersion version_counter, @Cast("bool") boolean allow_tensor_metadata_change); @@ -810,7 +851,7 @@ public native void set_named_tensor_meta( * For why this function doesn't check this TensorImpl's * {@code allow_tensor_metadata_change_}, see NOTE [ TensorImpl Shallow-Copying ]. */ - public native void shallow_copy_from(@Cast("const c10::intrusive_ptr*") @ByRef Pointer impl); + public native void shallow_copy_from(@Const @ByRef TensorImplPtr impl); // Inference tensor doesn't have version counter, // set_version_counter is no-op for them. diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/TensorImplPtr.java b/pytorch/src/gen/java/org/bytedeco/pytorch/TensorImplPtr.java new file mode 100644 index 00000000000..7befe17ef9f --- /dev/null +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/TensorImplPtr.java @@ -0,0 +1,150 @@ +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE + +package org.bytedeco.pytorch; + +import org.bytedeco.pytorch.Allocator; +import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; +import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; +import java.nio.*; +import org.bytedeco.javacpp.*; +import org.bytedeco.javacpp.annotation.*; + +import static org.bytedeco.javacpp.presets.javacpp.*; +import static org.bytedeco.openblas.global.openblas_nolapack.*; +import static org.bytedeco.openblas.global.openblas.*; + +import static org.bytedeco.pytorch.global.torch.*; + + +@Name("c10::intrusive_ptr") @NoOffset @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) +public class TensorImplPtr extends Pointer { + static { Loader.load(); } + /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ + public TensorImplPtr(Pointer p) { super(p); } + /** Native array allocator. Access with {@link Pointer#position(long)}. */ + public TensorImplPtr(long size) { super((Pointer)null); allocateArray(size); } + private native void allocateArray(long size); + @Override public TensorImplPtr position(long position) { + return (TensorImplPtr)super.position(position); + } + @Override public TensorImplPtr getPointer(long i) { + return new TensorImplPtr((Pointer)this).offsetAddress(i); + } + + + public TensorImplPtr() { super((Pointer)null); allocate(); } + @NoException(true) private native void allocate(); + + public TensorImplPtr(@ByVal @Cast("std::nullptr_t*") PointerPointer arg0) { super((Pointer)null); allocate(arg0); } + @NoException(true) private native void allocate(@ByVal @Cast("std::nullptr_t*") PointerPointer arg0); + + // This constructor will not increase the ref counter for you. + // We use the tagged dispatch mechanism to explicitly mark this constructor + // to not increase the refcount + public TensorImplPtr(TensorImpl target, @ByVal DontIncreaseRefcount arg1) { super((Pointer)null); allocate(target, arg1); } + @NoException(true) private native void allocate(TensorImpl target, @ByVal DontIncreaseRefcount arg1); + + + + public TensorImplPtr(@ByRef(true) TensorImplPtr rhs) { super((Pointer)null); allocate(rhs); } + @NoException(true) private native void allocate(@ByRef(true) TensorImplPtr rhs); + + public native @ByRef @Name("operator =") @NoException(true) TensorImplPtr put(@ByRef(true) TensorImplPtr rhs); + + public native @NoException(true) TensorImpl get(); + + public native @ByRef @Name("operator *") @NoException(true) TensorImpl multiply(); + + public native @Name("operator ->") @NoException(true) TensorImpl access(); + + public native @Cast("bool") @Name("operator bool") @NoException(true) boolean asBoolean(); + + public native @NoException(true) void reset(); + + public native @NoException(true) void swap(@ByRef TensorImplPtr rhs); + + // We do a lot of null-pointer checks in our code, good to have this be cheap. + public native @Cast("bool") @NoException(true) boolean defined(); + + public native @Cast("size_t") @NoException(true) long use_count(); + + public native @Cast("size_t") @NoException(true) long weak_use_count(); + + public native @Cast("bool") @NoException(true) boolean unique(); + + /** + * Returns an owning (!) pointer to the underlying object and makes the + * intrusive_ptr instance invalid. That means the refcount is not decreased. + * You *must* put the returned pointer back into a intrusive_ptr using + * intrusive_ptr::reclaim(ptr) to properly destruct it. + * This is helpful for C APIs. + */ + public native @NoException(true) TensorImpl release(); + + /** + * Takes an owning pointer to TTarget* and creates an intrusive_ptr that takes + * over ownership. That means the refcount is not increased. + * This is the counter-part to intrusive_ptr::release() and the pointer + * passed in *must* have been created using intrusive_ptr::release(). + */ + public static native @ByVal TensorImplPtr reclaim(TensorImpl owning_ptr); + + /** + * Takes an owning pointer to TTarget* and creates an intrusive_ptr + * representing a new reference, i.e. the raw pointer retains + * ownership. + */ + public static native @ByVal TensorImplPtr reclaim_copy(TensorImpl owning_ptr); + + /** + * Allocate a heap object with args and wrap it inside a intrusive_ptr and + * incref. This is a helper function to let make_intrusive() access private + * intrusive_ptr constructors. + */ + + /** + * Turn a new instance of TTarget (e.g., literally allocated + * using new TTarget(...) into an intrusive_ptr. If possible, + * use intrusive_ptr::make instead which statically guarantees + * that the allocation was done properly. + * + * At the moment, the only reason this method exists is because + * pybind11 holder types expect to be able to allocate in + * this way (because pybind11 handles the new allocation itself). + */ + public static native @ByVal TensorImplPtr unsafe_steal_from_new(TensorImpl raw_ptr); + + /** + * Turn an instance of TTarget that should not be reference counted + * (e.g., allocated into an arena with placement new) into an + * intrusive_ptr. This is gratuitously unsafe and should only be + * used if you can guarantee that the pointer will not escape and be + * refcounted as normal. + * + * {@code expected_decrefs} is a debugging parameter: it indicates the + * number of strong owners the intrusive_ptr_target in question is + * expected to get. In most use cases, this will likely be 1. + * + * The reason this method exists is for manually sharing + * StorageImpls across Tensors in the static runtime. It needs + * access to private intrusive_ptr members so that the refcounts can + * be initialized to custom values. + */ + public static native @ByVal TensorImplPtr unsafe_adapt_non_heap_allocated( + TensorImpl raw_ptr, + @Cast("size_t") long expected_decrefs); + + /** + * Turn a **non-owning raw pointer** to an intrusive_ptr. It is + * the moral equivalent of enable_shared_from_this on a shared pointer. + * + * This method is only valid for objects that are already live. If + * you are looking for the moral equivalent of unique_ptr(T*) + * constructor, see steal_from_new. + * + * TODO: https://github.com/pytorch/pytorch/issues/56482 + */ + public static native @ByVal TensorImplPtr unsafe_reclaim_from_nonowning(TensorImpl raw_ptr); +} diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/TensorImplSet.java b/pytorch/src/gen/java/org/bytedeco/pytorch/TensorImplSet.java index e66e1e1b695..0e326e49954 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/TensorImplSet.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/TensorImplSet.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; @@ -15,7 +17,7 @@ import static org.bytedeco.pytorch.global.torch.*; -@Name("std::unordered_set") @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) +@Name("std::unordered_set") @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) public class TensorImplSet extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ @@ -27,6 +29,7 @@ public class TensorImplSet extends Pointer { public boolean empty() { return size() == 0; } public native long size(); + public TensorImpl front() { try (Iterator it = begin()) { return it.get(); } } public native void insert(TensorImpl value); public native void erase(TensorImpl value); public native @ByVal Iterator begin(); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/TensorImplVector.java b/pytorch/src/gen/java/org/bytedeco/pytorch/TensorImplVector.java index aff865fdd9b..c4addb61d76 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/TensorImplVector.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/TensorImplVector.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; @@ -33,6 +35,8 @@ public class TensorImplVector extends Pointer { public void clear() { resize(0); } public native void resize(@Cast("size_t") long n); + public TensorImpl front() { return get(0); } + public TensorImpl back() { return get(size() - 1); } @Index(function = "at") public native TensorImpl get(@Cast("size_t") long i); public native TensorImplVector put(@Cast("size_t") long i, TensorImpl value); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/TensorIndex.java b/pytorch/src/gen/java/org/bytedeco/pytorch/TensorIndex.java index 0a80d8f0a2c..6cd3bc9ee7a 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/TensorIndex.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/TensorIndex.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/TensorIndexArrayRef.java b/pytorch/src/gen/java/org/bytedeco/pytorch/TensorIndexArrayRef.java index db7e6ba0744..33df6b775ca 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/TensorIndexArrayRef.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/TensorIndexArrayRef.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; @@ -39,8 +41,7 @@ public class TensorIndexArrayRef extends Pointer { /** Construct an ArrayRef from a single element. */ // TODO Make this explicit - public TensorIndexArrayRef(@Const @ByRef TensorIndex OneElt) { super((Pointer)null); allocate(OneElt); } - private native void allocate(@Const @ByRef TensorIndex OneElt); + /** Construct an ArrayRef from a pointer and length. */ public TensorIndexArrayRef(@Const TensorIndex data, @Cast("size_t") long length) { super((Pointer)null); allocate(data, length); } @@ -58,12 +59,12 @@ public class TensorIndexArrayRef extends Pointer { // The enable_if stuff here makes sure that this isn't used for // std::vector, because ArrayRef can't work on a std::vector // bitfield. - public TensorIndexArrayRef(@ByRef TensorIndexVector Vec) { super((Pointer)null); allocate(Vec); } - private native void allocate(@ByRef TensorIndexVector Vec); /** Construct an ArrayRef from a std::array */ /** Construct an ArrayRef from a C array. */ + public TensorIndexArrayRef(@ByRef TensorIndexVector vec) { super((Pointer)null); allocate(vec); } + private native void allocate(@ByRef TensorIndexVector vec); /** Construct an ArrayRef from a std::initializer_list. */ /* implicit */ @@ -72,13 +73,13 @@ public class TensorIndexArrayRef extends Pointer { * \name Simple Operations * \{ */ - public native @ByVal @Cast("const c10::ArrayRef::iterator*") TensorIndex begin(); - public native @ByVal @Cast("const c10::ArrayRef::iterator*") TensorIndex end(); + public native @Const @ByPtr TensorIndex begin(); + public native @Const @ByPtr TensorIndex end(); // These are actually the same as iterator, since ArrayRef only // gives you const iterators. - public native @ByVal @Cast("const c10::ArrayRef::const_iterator*") TensorIndex cbegin(); - public native @ByVal @Cast("const c10::ArrayRef::const_iterator*") TensorIndex cend(); + public native @Const @ByPtr TensorIndex cbegin(); + public native @Const @ByPtr TensorIndex cend(); /** empty - Check if the array is empty. */ public native @Cast("const bool") boolean empty(); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/TensorIndexVector.java b/pytorch/src/gen/java/org/bytedeco/pytorch/TensorIndexVector.java index c8638f09d79..2ec6d598212 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/TensorIndexVector.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/TensorIndexVector.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; @@ -33,6 +35,8 @@ public class TensorIndexVector extends Pointer { public void clear() { resize(0); } public native void resize(@Cast("size_t") long n); + public TensorIndex front() { return get(0); } + public TensorIndex back() { return get(size() - 1); } @Index(function = "at") public native @ByRef TensorIndex get(@Cast("size_t") long i); public native TensorIndexVector put(@Cast("size_t") long i, TensorIndex value); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/TensorIterator.java b/pytorch/src/gen/java/org/bytedeco/pytorch/TensorIterator.java new file mode 100644 index 00000000000..7819686a368 --- /dev/null +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/TensorIterator.java @@ -0,0 +1,87 @@ +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE + +package org.bytedeco.pytorch; + +import org.bytedeco.pytorch.Allocator; +import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; +import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; +import java.nio.*; +import org.bytedeco.javacpp.*; +import org.bytedeco.javacpp.annotation.*; + +import static org.bytedeco.javacpp.presets.javacpp.*; +import static org.bytedeco.openblas.global.openblas_nolapack.*; +import static org.bytedeco.openblas.global.openblas.*; + +import static org.bytedeco.pytorch.global.torch.*; + + +@Namespace("at") @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) +public class TensorIterator extends TensorIteratorBase { + static { Loader.load(); } + /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ + public TensorIterator(Pointer p) { super(p); } + /** Native array allocator. Access with {@link Pointer#position(long)}. */ + public TensorIterator(long size) { super((Pointer)null); allocateArray(size); } + private native void allocateArray(long size); + @Override public TensorIterator position(long position) { + return (TensorIterator)super.position(position); + } + @Override public TensorIterator getPointer(long i) { + return new TensorIterator((Pointer)this).offsetAddress(i); + } + + public TensorIterator() { super((Pointer)null); allocate(); } + private native void allocate(); + // Slicing is OK, TensorIterator guaranteed NOT to have any fields + public TensorIterator(@Const @ByRef TensorIteratorBase iter) { super((Pointer)null); allocate(iter); } + private native void allocate(@Const @ByRef TensorIteratorBase iter); + +// #define TORCH_DISALLOW_TEMPORARIES(methodname) +// TORCH_DISALLOW_TEMPORARIES_IMPL(methodname, static) + + public static native @ByVal TensorIterator binary_float_op( + @ByRef TensorBase out, + @Const @ByRef TensorBase a, + @Const @ByRef TensorBase b); + public static native @ByVal TensorIterator binary_op( + @ByRef TensorBase out, + @Const @ByRef TensorBase a, + @Const @ByRef TensorBase b); + public static native @ByVal TensorIterator borrowing_binary_op( + @Const @ByRef TensorBase out, + @Const @ByRef TensorBase a, + @Const @ByRef TensorBase b); + public static native @ByVal TensorIterator comparison_op( + @ByRef TensorBase out, + @Const @ByRef TensorBase a, + @Const @ByRef TensorBase b); + public static native @ByVal TensorIterator unary_op(@ByRef TensorBase out, @Const @ByRef TensorBase a); + public static native @ByVal TensorIterator unary_float_op(@ByRef TensorBase out, @Const @ByRef TensorBase a); + public static native @ByVal TensorIterator nullary_op(@ByRef TensorBase out); + public static native @ByVal TensorIterator borrowing_nullary_op(@Const @ByRef TensorBase out); + + public static native @ByVal TensorIterator reduce_op(@ByRef TensorBase out, @Const @ByRef TensorBase a); + public static native @ByVal TensorIterator reduce_op( + @ByRef TensorBase out1, + @ByRef TensorBase out2, + @Const @ByRef TensorBase a); +// #undef TORCH_DISALLOW_TEMPORARIES +// #undef TORCH_DISALLOW_TEMPORARIES_IMPL + + public native @Const @ByRef Tensor maybe_get_output(@Cast("int64_t") long output_idx); + public native void set_output_raw_strided( + @Cast("int64_t") long output_idx, + @ByVal LongArrayRef sizes, + @ByVal LongArrayRef strides, + @ByVal TensorOptions options, + @ByVal DimnameArrayRef names); + public native void set_output_raw_strided( + @Cast("int64_t") long output_idx, + @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] sizes, + @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] strides, + @ByVal TensorOptions options, + @ByVal DimnameArrayRef names); +} diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/TensorIteratorBase.java b/pytorch/src/gen/java/org/bytedeco/pytorch/TensorIteratorBase.java new file mode 100644 index 00000000000..66ff0985cdd --- /dev/null +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/TensorIteratorBase.java @@ -0,0 +1,252 @@ +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE + +package org.bytedeco.pytorch; + +import org.bytedeco.pytorch.Allocator; +import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; +import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; +import java.nio.*; +import org.bytedeco.javacpp.*; +import org.bytedeco.javacpp.annotation.*; + +import static org.bytedeco.javacpp.presets.javacpp.*; +import static org.bytedeco.openblas.global.openblas_nolapack.*; +import static org.bytedeco.openblas.global.openblas.*; + +import static org.bytedeco.pytorch.global.torch.*; + + +@Namespace("at") @NoOffset @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) +public class TensorIteratorBase extends MetaBase { + static { Loader.load(); } + /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ + public TensorIteratorBase(Pointer p) { super(p); } + + public native void build(@ByRef TensorIteratorConfig arg0); + + // The inner-loop function operates on the fastest moving dimension. It + // implements element-wise operations in terms of 1-d strided tensors. + // + // Arguments: + // data: data pointers for each operand (length `ntensors`) + // strides: stride for each operand (length `ntensors`) + // size: size of inner loop + // + // The `size` often matches shape[0], but may be smaller due to + // parallelization of the inner loop. + + + + public native int ndim(); + public native @ByVal LongArrayRef shape(); + public native @Cast("int64_t") long numel(); + public native int ntensors(); + public native int noutputs(); + public native int ninputs(); + public native @ByVal LongArrayRef view_offsets(); + + /** number of elements in the output operand. this is the same as numel() for + * operations that are not reductions. */ + public native @Cast("int64_t") long num_output_elements(); + + /** number of reduced dimensions in a reduction operation */ + public native int num_reduce_dims(); + + /** 1-dimensional iteration and no buffering or type conversion */ + public native @Cast("bool") boolean is_trivial_1d(); + /** Reducible to 1-dimensional and all operands are contiguous */ + public native @Cast("bool") boolean is_contiguous(); + public native @Cast("bool") boolean is_dim_reduced(int dim); + + /** Accessors for each operand */ + public native @ByVal LongArrayRef strides(int arg); + public native Pointer data_ptr(int arg); + public native ScalarType dtype(int arg/*=0*/); + public native ScalarType dtype(); + public native ScalarType common_dtype(); + public native ScalarType input_dtype(int arg/*=0*/); + public native ScalarType input_dtype(); + public native @ByVal Device device(int arg/*=0*/); + public native @ByVal Device device(); + public native DeviceType device_type(int arg/*=0*/); + public native DeviceType device_type(); + public native @Cast("int64_t") long element_size(int arg); + public native @Cast("bool") boolean is_scalar(int arg); + public native @Cast("bool") boolean is_cpu_scalar(int arg); + + public native @Const @ByRef TensorBase tensor_base(int arg); + public native @Const @ByRef Tensor tensor(int arg); + + public native @Const @ByRef TensorBase output_base(int arg/*=0*/); + public native @Const @ByRef TensorBase output_base(); + + public native @Const @ByRef Tensor output(int arg/*=0*/); + public native @Const @ByRef Tensor output(); + + public native @Const @ByRef TensorBase input_base(int arg/*=0*/); + public native @Const @ByRef TensorBase input_base(); + public native @Const @ByRef Tensor input(int arg/*=0*/); + public native @Const @ByRef Tensor input(); + + // Copies from temporary outputs back to the original outputs + // NOTE: only used on CPU + public native void cast_outputs(); + + /** Removes an operand from this iterator */ + public native void remove_operand(int arg); + /** Shrinks an iterated dimension */ + public native void narrow(int dim, @Cast("int64_t") long start, @Cast("int64_t") long size); + /** Narrows every dim after and including {@code start_dim} to size one. */ + public native void select_all_keeping_dim(int start_dim, @ByVal LongArrayRef starts); + public native void select_all_keeping_dim(int start_dim, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... starts); + /** Replaces the data pointer for the operand at index {@code arg}. + * The new pointer should have the same sizes, strides and dtype as the + * original */ + public native void unsafe_replace_operand(int arg, Pointer data); + + /** Splits this TensorIterator into two iterators. Together they iterate over + * the entire operation. Used by {@code with_32bit_indexing()}. */ + public native @UniquePtr TensorIterator split(int dim); + + /** Returns the dimension with the largest extent: (size[dim]-1) * stride[dim] */ + public native int get_dim_to_split(); + + + + + + + + + + + /** Create a strides array for a Tensor with shape of this iterator. The + * parameter {@code element_size} specifies the size of Tensor's data type in + * bytes (e.g. {@code 4} for {@code float}) */ + public native @ByVal @Cast("at::TensorIteratorBase::StrideVector*") SymDimVector compatible_stride(int element_size); + + /** Inverts the re-ordering done by reorder_dimensions. This can only be + * called *before* coalesce_dimensions() is called. */ + public native @ByVal DimVector invert_perm(@ByVal LongArrayRef input); + public native @ByVal DimVector invert_perm(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... input); + + /** Reapply same re-ordering as it is done by reorder_dimensions. This can + * only be called *before* coalesce_dimensions() is called. */ + + + /** Helper functions for CPU iteration */ + public native @ByVal @Cast("at::TensorIteratorBase::StrideVector*") SymDimVector get_dim_strides(int dim); + public native @ByVal @Cast("at::TensorIteratorBase::StrideVector*") SymDimVector get_strides(); + public native @ByVal @Cast("at::TensorIteratorBase::StrideVector*") SymDimVector get_inner_strides(); + public native @ByVal @Cast("at::TensorIteratorBase::PtrVector*") SymDimVector get_base_ptrs(); + + // Helper functions for advanced stride manipulations (e.g. torch.flip) + public native void _unsafe_set_arg_strides(int arg, @ByVal LongArrayRef strides); + public native void _unsafe_set_arg_strides(int arg, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... strides); + public native void _unsafe_set_arg_data(int arg, Pointer data); + + /** true if the stride computation can use 32-bit arithmetic. Used by GPU + * kernels */ + public native @Cast("bool") boolean can_use_32bit_indexing(); + + /** An "iteratable" object that recursively splits this iterator into + * sub-iterators that can use 32-bit indexing. */ + public native @ByVal SplitUntil32Bit with_32bit_indexing(); + + /** If the kernel should accumulate into the output. Only relevant for CUDA + * reductions. */ + public native @Cast("bool") boolean should_accumulate(); + + /** Whether this iterator produces the actual output, + * as opposed to something that will be accumulated further. Only relevant + * for CUDA reductions. */ + public native @Cast("bool") boolean is_final_output(); + + public native @Cast("bool") boolean has_contiguous_first_dim(); + + public native void set_output_raw_strided( + @Cast("int64_t") long output_idx, + @ByVal LongArrayRef sizes, + @ByVal LongArrayRef strides, + @ByVal TensorOptions options, + @ByVal DimnameArrayRef names); + public native void set_output_raw_strided( + @Cast("int64_t") long output_idx, + @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] sizes, + @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] strides, + @ByVal TensorOptions options, + @ByVal DimnameArrayRef names); + +// #define TORCH_DISALLOW_TEMPORARIES_IMPL(methodname, maybestatic) +// maybestatic void methodname( +// TensorBase&& out, const TensorBase& a, const TensorBase& b) = delete; +// maybestatic void methodname( +// const TensorBase& out, TensorBase&& a, const TensorBase& b) = delete; +// maybestatic void methodname( +// const TensorBase& out, const TensorBase& a, TensorBase&& b) = delete; +// maybestatic void methodname( +// TensorBase&& out, TensorBase&& a, const TensorBase& b) = delete; +// maybestatic void methodname( +// TensorBase&& out, const TensorBase& a, TensorBase&& b) = delete; +// maybestatic void methodname( +// const TensorBase& out, TensorBase&& a, TensorBase&& b) = delete; +// maybestatic void methodname( +// TensorBase&& out, TensorBase&& a, TensorBase&& b) = delete; + +// #define TORCH_DISALLOW_TEMPORARIES(methodname) +// TORCH_DISALLOW_TEMPORARIES_IMPL(methodname, ) + + public native void build_binary_float_op( + @Const @ByRef TensorBase out, + @Const @ByRef TensorBase a, + @Const @ByRef TensorBase b); + public native void build_borrowing_binary_float_op( + @Const @ByRef TensorBase out, + @Const @ByRef TensorBase a, + @Const @ByRef TensorBase b); + public native void build_binary_op( + @Const @ByRef TensorBase out, + @Const @ByRef TensorBase a, + @Const @ByRef TensorBase b); + public native void build_borrowing_binary_op( + @Const @ByRef TensorBase out, + @Const @ByRef TensorBase a, + @Const @ByRef TensorBase b); + public native void build_unary_float_op(@Const @ByRef TensorBase out, @Const @ByRef TensorBase a); + public native void build_borrowing_unary_float_op( + @Const @ByRef TensorBase out, + @Const @ByRef TensorBase a); + public native void build_unary_op(@Const @ByRef TensorBase out, @Const @ByRef TensorBase a); + // Odd special case needed for pow. Has to borrow the output because + // it's a structured kernel, but the argument is potentially a copy. + public native void build_output_borrowing_argument_owning_unary_op( + @Const @ByRef TensorBase out, + @Const @ByRef TensorBase a); + public native void build_borrowing_unary_op(@Const @ByRef TensorBase out, @Const @ByRef TensorBase a); + public native void build_borrowing_unary_force_boolean_op( + @Const @ByRef TensorBase out, + @Const @ByRef TensorBase a); + public native void build_comparison_op( + @Const @ByRef TensorBase out, + @Const @ByRef TensorBase a, + @Const @ByRef TensorBase b); + public native void build_borrowing_comparison_op( + @Const @ByRef TensorBase out, + @Const @ByRef TensorBase a, + @Const @ByRef TensorBase b); + // Another special case: we need to own the second argument for comparison + // ops. + public native void build_borrowing_except_last_argument_comparison_op( + @Const @ByRef TensorBase out, + @Const @ByRef TensorBase a, + @Const @ByRef TensorBase b); + public native void build_ternary_op( + @Const @ByRef TensorBase out, + @Const @ByRef TensorBase a, + @Const @ByRef TensorBase b, + @Const @ByRef TensorBase c); + +// #undef TORCH_DISALLOW_TEMPORARIES +} diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/TensorIteratorConfig.java b/pytorch/src/gen/java/org/bytedeco/pytorch/TensorIteratorConfig.java new file mode 100644 index 00000000000..60703f9b1cc --- /dev/null +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/TensorIteratorConfig.java @@ -0,0 +1,166 @@ +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE + +package org.bytedeco.pytorch; + +import org.bytedeco.pytorch.Allocator; +import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; +import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; +import java.nio.*; +import org.bytedeco.javacpp.*; +import org.bytedeco.javacpp.annotation.*; + +import static org.bytedeco.javacpp.presets.javacpp.*; +import static org.bytedeco.openblas.global.openblas_nolapack.*; +import static org.bytedeco.openblas.global.openblas.*; + +import static org.bytedeco.pytorch.global.torch.*; + + +@Namespace("at") @NoOffset @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) +public class TensorIteratorConfig extends Pointer { + static { Loader.load(); } + /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ + public TensorIteratorConfig(Pointer p) { super(p); } + /** Native array allocator. Access with {@link Pointer#position(long)}. */ + public TensorIteratorConfig(long size) { super((Pointer)null); allocateArray(size); } + private native void allocateArray(long size); + @Override public TensorIteratorConfig position(long position) { + return (TensorIteratorConfig)super.position(position); + } + @Override public TensorIteratorConfig getPointer(long i) { + return new TensorIteratorConfig((Pointer)this).offsetAddress(i); + } + + + public TensorIteratorConfig() { super((Pointer)null); allocate(); } + private native void allocate(); + + + + + /** Construction */ + // Stores input/output Tensors without incrementing the reference count. + // Important: the outputs have to be added before the inputs. + public native @ByRef TensorIteratorConfig add_output(@Const @ByRef TensorBase output); + public native @ByRef TensorIteratorConfig add_input(@Const @ByRef TensorBase input); + + // Borrowing from temporaries is unlikely to go well. + + + + // Stores input/output Tensors while incrementing the reference count. + // Note that add_{in,out}put are nearly always what you + // want, and the exception (adding an unnamed temporary) won't + // compile. + public native @ByRef TensorIteratorConfig add_owned_output(@Const @ByRef TensorBase output); + public native @ByRef TensorIteratorConfig add_owned_input(@Const @ByRef TensorBase input); + + // Advanced API: stores input/output Tensors without incrementing + // the reference count. The caller must ensure that these Tensors + // live at least as long as this TensorIteratorConfig and any + // TensorIteratorBase built from this TensorIteratorConfig. + // Important: the outputs have to be added before the inputs. + public native @ByRef TensorIteratorConfig add_borrowed_output(@Const @ByRef TensorBase output); + public native @ByRef TensorIteratorConfig add_borrowed_input(@Const @ByRef TensorBase input); + + // Borrowing from temporaries is unlikely to go well. + + + + // Sets the check_mem_overlap_ flag, which is true by default. + // If true, inputs are checked for partial overlap with the outputs and + // outputs are checked for internal overlap (e.g. broadcasted views). An error + // is raised if unacceptable overlap is detected. + // If you're migrating an existing operator to using TensorIterator, please + // consider if the previous implementation checked memory overlap. If it did + // not, and if the operator is idempotent (for example, Tensor.fill_(0)), then + // checking memory overlap is BC-breaking. Please don't check memory overlap + // in that case. + public native @ByRef TensorIteratorConfig set_check_mem_overlap(@Cast("bool") boolean check_mem_overlap); + + // Sets the check_all_same_dtype_ flag, which is true by default + // If true, checks that all inputs and defined outputs have the same dtype + // Setting either of promote_inputs_to_common_dtype_ + // or cast_common_dtype_to_outputs_ to true will set + // check_all_same_dtype_ to false. + public native @ByRef TensorIteratorConfig check_all_same_dtype(@Cast("const bool") boolean _check_all_same_dtype); + + // Sets the check_all_same_device_ flag, which is true by default + // If true, all operands must be on the same device, with the possible + // exception of CPU scalars, which can be passed to some CUDA kernels + // as kernel arguments. + public native @ByRef TensorIteratorConfig check_all_same_device( + @Cast("const bool") boolean _check_all_same_device); + + // Sets the enforce_safe_casting_to_output_ flag, which is false by default + // If true, the iterator's "common dtype" must be computable + // (see the [Common Dtype Computation] note) and + // canCast(common dtype, output dtype) must be true for all outputs. + public native @ByRef TensorIteratorConfig enforce_safe_casting_to_output( + @Cast("const bool") boolean _enforce_safe_casting_to_output); + + // Sets the enforce_linear_iteration_ flag, which is false by default. + // If true, iteration goes in the same order as a C-contiguous tensor + // is layed out in memory. i.e. last dimension iterates fastest. + // + // This iteration order can be less efficient and may even prevent + // vectorization. So only use if the correctness of your kernel depends on it. + public native @ByRef TensorIteratorConfig enforce_linear_iteration( + @Cast("const bool") boolean _enforce_linear_iteration/*=true*/); + public native @ByRef TensorIteratorConfig enforce_linear_iteration(); + + // Sets the promote_inputs_to_common_dtype_ flag, which is false by default + // If true, the iterator's "common dtype" is always computed (see the + // [Common Dtype Computation] note) and, on the CPU, temporary copies of + // the inputs in the common dtype are passed as the actual inputs to + // the operation. + // Setting this flag to true sets check_all_same_dtype_ to false. + public native @ByRef TensorIteratorConfig promote_inputs_to_common_dtype( + @Cast("const bool") boolean _promote_inputs_to_common_dtype); + + // Sets the promote_integer_inputs_to_float_ flag, which is false by default + // NOTE: If set to true, the promote_inputs_to_common_dtype_ must also be + // true. If true, if the iterator's "common dtype" is an integral type + // (including bool) + // then it is changed to the default float scalar type. + public native @ByRef TensorIteratorConfig promote_integer_inputs_to_float( + @Cast("const bool") boolean _promote_integer_inputs_to_float); + + public native @ByRef TensorIteratorConfig is_reduction(@Cast("const bool") boolean _is_reduction); + + public native @ByRef TensorIteratorConfig allow_cpu_scalars(@Cast("const bool") boolean _allow_cpu_scalars); + + // Sets the cast_common_dtype_to_outputs_ flag, which is false by default + // If true, the iterator's "common dtype" must be computatable + // (see the [Common Dtype Computation] note) and, on the CPU, temporary + // copies of the outputs are passed as the actual output to the operation. + // These temporaries are then copied to the original outputs after + // the operation is performed (see cast_outputs()). + // Setting this flag to true sets check_all_same_dtype_ to false. + public native @ByRef TensorIteratorConfig cast_common_dtype_to_outputs( + @Cast("const bool") boolean _cast_common_dtype_to_outputs); + + public native @ByRef TensorIteratorConfig resize_outputs(@Cast("bool") boolean resize_outputs); + + // Bypass output dtype/device computation and fix the dtype/device as + // specified here. + public native @ByRef TensorIteratorConfig declare_static_dtype_and_device( + ScalarType dtype, + @ByVal Device device); + public native @ByRef TensorIteratorConfig declare_static_dtype(ScalarType dtype); + public native @ByRef TensorIteratorConfig declare_static_device(@ByVal Device device); + public native @ByRef TensorIteratorConfig declare_static_shape(@ByVal LongArrayRef shape); + public native @ByRef TensorIteratorConfig declare_static_shape(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... shape); + public native @ByRef TensorIteratorConfig declare_static_shape( + @ByVal LongArrayRef shape, + @ByVal LongArrayRef squash_dims); + public native @ByRef TensorIteratorConfig declare_static_shape( + @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] shape, + @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... squash_dims); + + // It would be better if this was && qualified, but this would be at the cost + // of a lot of boilerplate above + public native @ByVal TensorIterator build(); +} diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/TensorList.java b/pytorch/src/gen/java/org/bytedeco/pytorch/TensorList.java new file mode 100644 index 00000000000..5d358a23abd --- /dev/null +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/TensorList.java @@ -0,0 +1,239 @@ +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE + +package org.bytedeco.pytorch; + +import org.bytedeco.pytorch.Allocator; +import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; +import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; +import java.nio.*; +import org.bytedeco.javacpp.*; +import org.bytedeco.javacpp.annotation.*; + +import static org.bytedeco.javacpp.presets.javacpp.*; +import static org.bytedeco.openblas.global.openblas_nolapack.*; +import static org.bytedeco.openblas.global.openblas.*; + +import static org.bytedeco.pytorch.global.torch.*; + +@Name("c10::List") @NoOffset @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) +public class TensorList extends Pointer { + static { Loader.load(); } + /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ + public TensorList(Pointer p) { super(p); } + /** Native array allocator. Access with {@link Pointer#position(long)}. */ + public TensorList(long size) { super((Pointer)null); allocateArray(size); } + private native void allocateArray(long size); + @Override public TensorList position(long position) { + return (TensorList)super.position(position); + } + @Override public TensorList getPointer(long i) { + return new TensorList((Pointer)this).offsetAddress(i); + } + + + /** + * Constructs an empty list. + */ + public TensorList() { super((Pointer)null); allocate(); } + private native void allocate(); + + /** + * Constructs a list with some initial values. + * Example: + * List a({2, 3, 4}); + */ + public TensorList(@ByVal TensorArrayRef initial_values) { super((Pointer)null); allocate(initial_values); } + private native void allocate(@ByVal TensorArrayRef initial_values); + + /** + * Create a generic list with runtime type information. + * This only works for c10::impl::GenericList and is not part of the public API + * but only supposed to be used internally by PyTorch. + */ + + + public TensorList(@Const @ByRef TensorList arg0) { super((Pointer)null); allocate(arg0); } + private native void allocate(@Const @ByRef TensorList arg0); + public native @ByRef @Name("operator =") TensorList put(@Const @ByRef TensorList arg0); + + /** + * Create a new List pointing to a deep copy of the same data. + * The List returned is a new list with separate storage. + * Changes in it are not reflected in the original list or vice versa. + */ + public native @ByVal TensorList copy(); + + /** + * Returns the element at specified location pos, with bounds checking. + * If pos is not within the range of the container, an exception of type std::out_of_range is thrown. + */ + public native @ByVal Tensor get(long pos); + + /** + * Moves out the element at the specified location pos and returns it, with bounds checking. + * If pos is not within the range of the container, an exception of type std::out_of_range is thrown. + * The list contains an invalid element at position pos afterwards. Any operations + * on it before re-setting it are invalid. + */ + public native @ByVal Tensor extract(long pos); + + /** + * Returns a reference to the element at specified location pos, with bounds checking. + * If pos is not within the range of the container, an exception of type std::out_of_range is thrown. + * + * You cannot store the reference, but you can read it and assign new values to it: + * + * List list = ...; + * list[2] = 5; + * int64_t v = list[1]; + */ + + + + + /** + * Assigns a new value to the element at location pos. + */ + public native void set(long pos, @ByVal Tensor value); + + /** + * Assigns a new value to the element at location pos. + */ + + /** + * Returns an iterator to the first element of the container. + * If the container is empty, the returned iterator will be equal to end(). + */ + public native @ByVal @Cast("c10::List::iterator*") TensorListIterator begin(); + + /** + * Returns an iterator to the element following the last element of the container. + * This element acts as a placeholder; attempting to access it results in undefined behavior. + */ + public native @ByVal @Cast("c10::List::iterator*") TensorListIterator end(); + + /** + * Checks if the container has no elements. + */ + public native @Cast("bool") boolean empty(); + + /** + * Returns the number of elements in the container + */ + public native long size(); + + /** + * Increase the capacity of the vector to a value that's greater or equal to new_cap. + */ + public native void reserve(long new_cap); + + /** + * Erases all elements from the container. After this call, size() returns zero. + * Invalidates any references, pointers, or iterators referring to contained elements. Any past-the-end iterators are also invalidated. + */ + public native void clear(); + + /** + * Inserts value before pos. + * May invalidate any references, pointers, or iterators referring to contained elements. Any past-the-end iterators may also be invalidated. + */ + public native @ByVal @Cast("c10::List::iterator*") TensorListIterator insert(@ByVal @Cast("c10::List::iterator*") TensorListIterator pos, @Const @ByRef Tensor value); + + /** + * Inserts value before pos. + * May invalidate any references, pointers, or iterators referring to contained elements. Any past-the-end iterators may also be invalidated. + */ + + /** + * Inserts a new element into the container directly before pos. + * The new element is constructed with the given arguments. + * May invalidate any references, pointers, or iterators referring to contained elements. Any past-the-end iterators may also be invalidated. + */ + + /** + * Appends the given element value to the end of the container. + * May invalidate any references, pointers, or iterators referring to contained elements. Any past-the-end iterators may also be invalidated. + */ + public native void push_back(@Const @ByRef Tensor value); + + /** + * Appends the given element value to the end of the container. + * May invalidate any references, pointers, or iterators referring to contained elements. Any past-the-end iterators may also be invalidated. + */ + + /** + * Appends the given list to the end of the container. Uses at most one memory allocation. + * May invalidate any references, pointers, or iterators referring to contained elements. Any past-the-end iterators may also be invalidated. + */ + public native void append(@ByVal TensorList lst); + + /** + * Appends the given element value to the end of the container. + * The new element is constructed with the given arguments. + * May invalidate any references, pointers, or iterators referring to contained elements. Any past-the-end iterators may also be invalidated. + */ + + /** + * Removes the element at pos. + * May invalidate any references, pointers, or iterators referring to contained elements. Any past-the-end iterators may also be invalidated. + */ + public native @ByVal @Cast("c10::List::iterator*") TensorListIterator erase(@ByVal @Cast("c10::List::iterator*") TensorListIterator pos); + + /** + * Removes the elements in the range [first, last). + * May invalidate any references, pointers, or iterators referring to contained elements. Any past-the-end iterators may also be invalidated. + */ + public native @ByVal @Cast("c10::List::iterator*") TensorListIterator erase(@ByVal @Cast("c10::List::iterator*") TensorListIterator first, @ByVal @Cast("c10::List::iterator*") TensorListIterator last); + + /** + * Removes the last element of the container. + * Calling pop_back on an empty container is undefined. + * May invalidate any references, pointers, or iterators referring to contained elements. Any past-the-end iterators may also be invalidated. + */ + public native void pop_back(); + + /** + * Resizes the container to contain count elements. + * If the current size is less than count, additional default-inserted elements are appended. + * May invalidate any references, pointers, or iterators referring to contained elements. Any past-the-end iterators may also be invalidated. + */ + public native void resize(long count); + + /** + * Resizes the container to contain count elements. + * If the current size is less than count, additional copies of value are appended. + * May invalidate any references, pointers, or iterators referring to contained elements. Any past-the-end iterators may also be invalidated. + */ + public native void resize(long count, @Const @ByRef Tensor value); + + /** + * Value equality comparison. This function implements Python-like semantics for + * equality: two lists with the same identity (e.g. same pointer) trivially + * compare equal, otherwise each element is compared for equality. + */ + + + + + /** + * Identity comparison. Returns true if and only if {@code rhs} represents the same + * List object as {@code this}. + */ + public native @Cast("bool") boolean is(@Const @ByRef TensorList rhs); + + public native @Cast({"", "std::vector"}) @StdMove TensorVector vec(); + + /** + * Returns the number of Lists currently pointing to this same list. + * If this is the only instance pointing to this list, returns 1. + */ + // TODO Test use_count + public native @Cast("size_t") long use_count(); + + public native @ByVal Type.TypePtr elementType(); + + // See [unsafe set type] for why this exists. + public native void unsafeSetElementType(@ByVal Type.TypePtr t); +} diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/TensorListIterator.java b/pytorch/src/gen/java/org/bytedeco/pytorch/TensorListIterator.java new file mode 100644 index 00000000000..d458185bd54 --- /dev/null +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/TensorListIterator.java @@ -0,0 +1,84 @@ +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE + +package org.bytedeco.pytorch; + +import org.bytedeco.pytorch.Allocator; +import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; +import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; +import java.nio.*; +import org.bytedeco.javacpp.*; +import org.bytedeco.javacpp.annotation.*; + +import static org.bytedeco.javacpp.presets.javacpp.*; +import static org.bytedeco.openblas.global.openblas_nolapack.*; +import static org.bytedeco.openblas.global.openblas.*; + +import static org.bytedeco.pytorch.global.torch.*; + +@Name("c10::impl::ListIterator") @NoOffset @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) +public class TensorListIterator extends Pointer { + static { Loader.load(); } + /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ + public TensorListIterator(Pointer p) { super(p); } + /** Native array allocator. Access with {@link Pointer#position(long)}. */ + public TensorListIterator(long size) { super((Pointer)null); allocateArray(size); } + private native void allocateArray(long size); + @Override public TensorListIterator position(long position) { + return (TensorListIterator)super.position(position); + } + @Override public TensorListIterator getPointer(long i) { + return new TensorListIterator((Pointer)this).offsetAddress(i); + } + + // C++17 friendly std::iterator implementation + + public TensorListIterator() { super((Pointer)null); allocate(); } + private native void allocate(); + + public TensorListIterator(@Const @ByRef TensorListIterator arg0) { super((Pointer)null); allocate(arg0); } + private native void allocate(@Const @ByRef TensorListIterator arg0); + public native @ByRef @Name("operator =") TensorListIterator put(@Const @ByRef TensorListIterator arg0); + + public native @ByRef @Name("operator ++") TensorListIterator increment(); + + public native @ByVal @Name("operator ++") TensorListIterator increment(int arg0); + + public native @ByRef @Name("operator --") TensorListIterator decrement(); + + public native @ByVal @Name("operator --") TensorListIterator decrement(int arg0); + + public native @ByRef @Name("operator +=") TensorListIterator addPut(long offset); + + public native @ByRef @Name("operator -=") TensorListIterator subtractPut(long offset); + + public native @ByVal @Name("operator +") TensorListIterator add(long offset); + + public native @ByVal @Name("operator -") TensorListIterator subtract(long offset); + + private static native @Namespace @Cast("c10::impl::ListIterator::difference_type") @Name("operator -") long subtract(@Const @ByRef TensorListIterator lhs, @Const @ByRef TensorListIterator rhs); + public long subtract(TensorListIterator rhs) { return subtract(this, rhs); } + + + + + + private static native @Namespace @Cast("bool") @Name("operator ==") boolean equals(@Const @ByRef TensorListIterator lhs, @Const @ByRef TensorListIterator rhs); + public boolean equals(TensorListIterator rhs) { return equals(this, rhs); } + + private static native @Namespace @Cast("bool") @Name("operator !=") boolean notEquals(@Const @ByRef TensorListIterator lhs, @Const @ByRef TensorListIterator rhs); + public boolean notEquals(TensorListIterator rhs) { return notEquals(this, rhs); } + + private static native @Namespace @Cast("bool") @Name("operator <") boolean lessThan(@Const @ByRef TensorListIterator lhs, @Const @ByRef TensorListIterator rhs); + public boolean lessThan(TensorListIterator rhs) { return lessThan(this, rhs); } + + private static native @Namespace @Cast("bool") @Name("operator <=") boolean lessThanEquals(@Const @ByRef TensorListIterator lhs, @Const @ByRef TensorListIterator rhs); + public boolean lessThanEquals(TensorListIterator rhs) { return lessThanEquals(this, rhs); } + + private static native @Namespace @Cast("bool") @Name("operator >") boolean greaterThan(@Const @ByRef TensorListIterator lhs, @Const @ByRef TensorListIterator rhs); + public boolean greaterThan(TensorListIterator rhs) { return greaterThan(this, rhs); } + + private static native @Namespace @Cast("bool") @Name("operator >=") boolean greaterThanEquals(@Const @ByRef TensorListIterator lhs, @Const @ByRef TensorListIterator rhs); + public boolean greaterThanEquals(TensorListIterator rhs) { return greaterThanEquals(this, rhs); } +} diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/TensorListOptional.java b/pytorch/src/gen/java/org/bytedeco/pytorch/TensorListOptional.java deleted file mode 100644 index 6e48ad036d1..00000000000 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/TensorListOptional.java +++ /dev/null @@ -1,32 +0,0 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE - -package org.bytedeco.pytorch; - -import org.bytedeco.pytorch.Allocator; -import org.bytedeco.pytorch.Function; -import org.bytedeco.pytorch.Module; -import java.nio.*; -import org.bytedeco.javacpp.*; -import org.bytedeco.javacpp.annotation.*; - -import static org.bytedeco.javacpp.presets.javacpp.*; -import static org.bytedeco.openblas.global.openblas_nolapack.*; -import static org.bytedeco.openblas.global.openblas.*; - -import static org.bytedeco.pytorch.global.torch.*; - -@NoOffset @Name("c10::optional") @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) -public class TensorListOptional extends Pointer { - static { Loader.load(); } - /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ - public TensorListOptional(Pointer p) { super(p); } - public TensorListOptional(TensorArrayRef value) { this(); put(value); } - public TensorListOptional() { allocate(); } - private native void allocate(); - public native @Name("operator =") @ByRef TensorListOptional put(@ByRef TensorListOptional x); - - public native boolean has_value(); - public native @Name("value") @ByRef TensorArrayRef get(); - @ValueSetter public native TensorListOptional put(@ByRef TensorArrayRef value); -} - diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/TensorMaker.java b/pytorch/src/gen/java/org/bytedeco/pytorch/TensorMaker.java index 3eacb3699fe..e005e974334 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/TensorMaker.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/TensorMaker.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; @@ -39,11 +41,11 @@ public class TensorMaker extends Pointer { public native @ByRef @NoException(true) TensorMaker storage_offset(@ByVal LongOptional value); - public native @ByRef @NoException(true) TensorMaker deleter(@ByVal Deleter value); + public native @ByRef @NoException(true) TensorMaker deleter(@ByVal PointerConsumer value); public native @ByRef @NoException(true) TensorMaker deleter(@ByVal @Cast("void(*)(void*)") Pointer value); public native @ByRef @NoException(true) TensorMaker deleter(@ByVal @Cast("void(*)(void*)") long value); - public native @ByRef @NoException(true) TensorMaker context(Pointer value, @Cast("at::TensorMaker::ContextDeleter") Deleter deleter/*=nullptr*/); + public native @ByRef @NoException(true) TensorMaker context(Pointer value, @Cast("at::TensorMaker::ContextDeleter") PointerConsumer deleter/*=nullptr*/); public native @ByRef @NoException(true) TensorMaker context(Pointer value); public native @ByRef @NoException(true) TensorMaker context(Pointer value, @Cast("at::TensorMaker::ContextDeleter") Pointer deleter/*=nullptr*/); public native @ByRef @NoException(true) TensorMaker context(Pointer value, @Cast("at::TensorMaker::ContextDeleter") long deleter/*=nullptr*/); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/TensorMaybeOwned.java b/pytorch/src/gen/java/org/bytedeco/pytorch/TensorMaybeOwned.java index a4e104d8556..9bb5e266ecd 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/TensorMaybeOwned.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/TensorMaybeOwned.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/TensorName.java b/pytorch/src/gen/java/org/bytedeco/pytorch/TensorName.java index d80207ccf0f..bc5149b11e6 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/TensorName.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/TensorName.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; @@ -50,4 +52,9 @@ public class TensorName extends Pointer { public native @Cast({"", "at::namedinference::TensorName&&"}) @StdMove TensorName unify(@Cast({"", "at::namedinference::TensorName&&"}) @StdMove TensorName other, @Cast("const char*") BytePointer op_name); public native @Cast({"", "at::namedinference::TensorName&&"}) @StdMove TensorName unify(@Cast({"", "at::namedinference::TensorName&&"}) @StdMove TensorName other, String op_name); public native @ByVal Dimname toDimname(); + + private static native @Namespace @Cast("std::ostream*") @ByRef @Name("operator <<") Pointer shiftLeft( + @Cast("std::ostream*") @ByRef Pointer out, + @Cast({"", "at::namedinference::TensorName&&"}) @StdMove TensorName tensorname); + public Pointer shiftLeft(Pointer out) { return shiftLeft(out, this); } } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/TensorNames.java b/pytorch/src/gen/java/org/bytedeco/pytorch/TensorNames.java index affe81f2c5d..227c8a6c60d 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/TensorNames.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/TensorNames.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/TensorOptional.java b/pytorch/src/gen/java/org/bytedeco/pytorch/TensorOptional.java index 384b228439a..52c9832749d 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/TensorOptional.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/TensorOptional.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; @@ -26,6 +28,7 @@ public class TensorOptional extends Pointer { public native @Name("operator =") @ByRef TensorOptional put(@ByRef TensorOptional x); public native boolean has_value(); + public native void reset(); public native @Name("value") @ByRef Tensor get(); @ValueSetter public native TensorOptional put(@ByRef Tensor value); } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/TensorOptionalArrayRef.java b/pytorch/src/gen/java/org/bytedeco/pytorch/TensorOptionalArrayRef.java index c5a305206eb..f84cd133ed7 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/TensorOptionalArrayRef.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/TensorOptionalArrayRef.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; @@ -39,8 +41,7 @@ public class TensorOptionalArrayRef extends Pointer { /** Construct an ArrayRef from a single element. */ // TODO Make this explicit - public TensorOptionalArrayRef(@Const @ByRef TensorOptional OneElt) { super((Pointer)null); allocate(OneElt); } - private native void allocate(@Const @ByRef TensorOptional OneElt); + /** Construct an ArrayRef from a pointer and length. */ public TensorOptionalArrayRef(@Const TensorOptional data, @Cast("size_t") long length) { super((Pointer)null); allocate(data, length); } @@ -70,13 +71,13 @@ public class TensorOptionalArrayRef extends Pointer { * \name Simple Operations * \{ */ - public native @ByVal @Cast("const c10::ArrayRef >::iterator*") TensorOptional begin(); - public native @ByVal @Cast("const c10::ArrayRef >::iterator*") TensorOptional end(); + public native @Const @ByPtr TensorOptional begin(); + public native @Const @ByPtr TensorOptional end(); // These are actually the same as iterator, since ArrayRef only // gives you const iterators. - public native @ByVal @Cast("const c10::ArrayRef >::const_iterator*") TensorOptional cbegin(); - public native @ByVal @Cast("const c10::ArrayRef >::const_iterator*") TensorOptional cend(); + public native @Const @ByPtr TensorOptional cbegin(); + public native @Const @ByPtr TensorOptional cend(); /** empty - Check if the array is empty. */ public native @Cast("const bool") boolean empty(); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/TensorOptionalElementReference.java b/pytorch/src/gen/java/org/bytedeco/pytorch/TensorOptionalElementReference.java new file mode 100644 index 00000000000..25cf4f9985f --- /dev/null +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/TensorOptionalElementReference.java @@ -0,0 +1,42 @@ +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE + +package org.bytedeco.pytorch; + +import org.bytedeco.pytorch.Allocator; +import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; +import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; +import java.nio.*; +import org.bytedeco.javacpp.*; +import org.bytedeco.javacpp.annotation.*; + +import static org.bytedeco.javacpp.presets.javacpp.*; +import static org.bytedeco.openblas.global.openblas_nolapack.*; +import static org.bytedeco.openblas.global.openblas.*; + +import static org.bytedeco.pytorch.global.torch.*; + + +@Name("c10::impl::ListElementReference,c10::detail::ListImpl::list_type::iterator>") @NoOffset @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) +public class TensorOptionalElementReference extends Pointer { + static { Loader.load(); } + /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ + public TensorOptionalElementReference(Pointer p) { super(p); } + + public native @Name("operator std::conditional_t >::type>::value,const c10::optional&,c10::optional >") @ByVal TensorOptional getTensorOptional(); + + + + + + // assigning another ref to this assigns the underlying value + + + public native @Const @ByRef IValue get(); + + + + + +} diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/TensorOptionalList.java b/pytorch/src/gen/java/org/bytedeco/pytorch/TensorOptionalList.java new file mode 100644 index 00000000000..50b5e98badf --- /dev/null +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/TensorOptionalList.java @@ -0,0 +1,239 @@ +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE + +package org.bytedeco.pytorch; + +import org.bytedeco.pytorch.Allocator; +import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; +import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; +import java.nio.*; +import org.bytedeco.javacpp.*; +import org.bytedeco.javacpp.annotation.*; + +import static org.bytedeco.javacpp.presets.javacpp.*; +import static org.bytedeco.openblas.global.openblas_nolapack.*; +import static org.bytedeco.openblas.global.openblas.*; + +import static org.bytedeco.pytorch.global.torch.*; + +@Name("c10::List >") @NoOffset @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) +public class TensorOptionalList extends Pointer { + static { Loader.load(); } + /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ + public TensorOptionalList(Pointer p) { super(p); } + /** Native array allocator. Access with {@link Pointer#position(long)}. */ + public TensorOptionalList(long size) { super((Pointer)null); allocateArray(size); } + private native void allocateArray(long size); + @Override public TensorOptionalList position(long position) { + return (TensorOptionalList)super.position(position); + } + @Override public TensorOptionalList getPointer(long i) { + return new TensorOptionalList((Pointer)this).offsetAddress(i); + } + + + /** + * Constructs an empty list. + */ + public TensorOptionalList() { super((Pointer)null); allocate(); } + private native void allocate(); + + /** + * Constructs a list with some initial values. + * Example: + * List a({2, 3, 4}); + */ + public TensorOptionalList(@ByVal TensorOptionalArrayRef initial_values) { super((Pointer)null); allocate(initial_values); } + private native void allocate(@ByVal TensorOptionalArrayRef initial_values); + + /** + * Create a generic list with runtime type information. + * This only works for c10::impl::GenericList and is not part of the public API + * but only supposed to be used internally by PyTorch. + */ + + + public TensorOptionalList(@Const @ByRef TensorOptionalList arg0) { super((Pointer)null); allocate(arg0); } + private native void allocate(@Const @ByRef TensorOptionalList arg0); + public native @ByRef @Name("operator =") TensorOptionalList put(@Const @ByRef TensorOptionalList arg0); + + /** + * Create a new List pointing to a deep copy of the same data. + * The List returned is a new list with separate storage. + * Changes in it are not reflected in the original list or vice versa. + */ + public native @ByVal TensorOptionalList copy(); + + /** + * Returns the element at specified location pos, with bounds checking. + * If pos is not within the range of the container, an exception of type std::out_of_range is thrown. + */ + public native @ByVal TensorOptional get(long pos); + + /** + * Moves out the element at the specified location pos and returns it, with bounds checking. + * If pos is not within the range of the container, an exception of type std::out_of_range is thrown. + * The list contains an invalid element at position pos afterwards. Any operations + * on it before re-setting it are invalid. + */ + public native @ByVal TensorOptional extract(long pos); + + /** + * Returns a reference to the element at specified location pos, with bounds checking. + * If pos is not within the range of the container, an exception of type std::out_of_range is thrown. + * + * You cannot store the reference, but you can read it and assign new values to it: + * + * List list = ...; + * list[2] = 5; + * int64_t v = list[1]; + */ + + + + + /** + * Assigns a new value to the element at location pos. + */ + public native void set(long pos, @ByVal TensorOptional value); + + /** + * Assigns a new value to the element at location pos. + */ + + /** + * Returns an iterator to the first element of the container. + * If the container is empty, the returned iterator will be equal to end(). + */ + public native @ByVal @Cast("c10::List >::iterator*") TensorOptionalListIterator begin(); + + /** + * Returns an iterator to the element following the last element of the container. + * This element acts as a placeholder; attempting to access it results in undefined behavior. + */ + public native @ByVal @Cast("c10::List >::iterator*") TensorOptionalListIterator end(); + + /** + * Checks if the container has no elements. + */ + public native @Cast("bool") boolean empty(); + + /** + * Returns the number of elements in the container + */ + public native long size(); + + /** + * Increase the capacity of the vector to a value that's greater or equal to new_cap. + */ + public native void reserve(long new_cap); + + /** + * Erases all elements from the container. After this call, size() returns zero. + * Invalidates any references, pointers, or iterators referring to contained elements. Any past-the-end iterators are also invalidated. + */ + public native void clear(); + + /** + * Inserts value before pos. + * May invalidate any references, pointers, or iterators referring to contained elements. Any past-the-end iterators may also be invalidated. + */ + public native @ByVal @Cast("c10::List >::iterator*") TensorOptionalListIterator insert(@ByVal @Cast("c10::List >::iterator*") TensorOptionalListIterator pos, @Const @ByRef TensorOptional value); + + /** + * Inserts value before pos. + * May invalidate any references, pointers, or iterators referring to contained elements. Any past-the-end iterators may also be invalidated. + */ + + /** + * Inserts a new element into the container directly before pos. + * The new element is constructed with the given arguments. + * May invalidate any references, pointers, or iterators referring to contained elements. Any past-the-end iterators may also be invalidated. + */ + + /** + * Appends the given element value to the end of the container. + * May invalidate any references, pointers, or iterators referring to contained elements. Any past-the-end iterators may also be invalidated. + */ + public native void push_back(@Const @ByRef TensorOptional value); + + /** + * Appends the given element value to the end of the container. + * May invalidate any references, pointers, or iterators referring to contained elements. Any past-the-end iterators may also be invalidated. + */ + + /** + * Appends the given list to the end of the container. Uses at most one memory allocation. + * May invalidate any references, pointers, or iterators referring to contained elements. Any past-the-end iterators may also be invalidated. + */ + public native void append(@ByVal TensorOptionalList lst); + + /** + * Appends the given element value to the end of the container. + * The new element is constructed with the given arguments. + * May invalidate any references, pointers, or iterators referring to contained elements. Any past-the-end iterators may also be invalidated. + */ + + /** + * Removes the element at pos. + * May invalidate any references, pointers, or iterators referring to contained elements. Any past-the-end iterators may also be invalidated. + */ + public native @ByVal @Cast("c10::List >::iterator*") TensorOptionalListIterator erase(@ByVal @Cast("c10::List >::iterator*") TensorOptionalListIterator pos); + + /** + * Removes the elements in the range [first, last). + * May invalidate any references, pointers, or iterators referring to contained elements. Any past-the-end iterators may also be invalidated. + */ + public native @ByVal @Cast("c10::List >::iterator*") TensorOptionalListIterator erase(@ByVal @Cast("c10::List >::iterator*") TensorOptionalListIterator first, @ByVal @Cast("c10::List >::iterator*") TensorOptionalListIterator last); + + /** + * Removes the last element of the container. + * Calling pop_back on an empty container is undefined. + * May invalidate any references, pointers, or iterators referring to contained elements. Any past-the-end iterators may also be invalidated. + */ + public native void pop_back(); + + /** + * Resizes the container to contain count elements. + * If the current size is less than count, additional default-inserted elements are appended. + * May invalidate any references, pointers, or iterators referring to contained elements. Any past-the-end iterators may also be invalidated. + */ + public native void resize(long count); + + /** + * Resizes the container to contain count elements. + * If the current size is less than count, additional copies of value are appended. + * May invalidate any references, pointers, or iterators referring to contained elements. Any past-the-end iterators may also be invalidated. + */ + public native void resize(long count, @Const @ByRef TensorOptional value); + + /** + * Value equality comparison. This function implements Python-like semantics for + * equality: two lists with the same identity (e.g. same pointer) trivially + * compare equal, otherwise each element is compared for equality. + */ + + + + + /** + * Identity comparison. Returns true if and only if {@code rhs} represents the same + * List object as {@code this}. + */ + public native @Cast("bool") boolean is(@Const @ByRef TensorOptionalList rhs); + + public native @StdVector TensorOptional vec(); + + /** + * Returns the number of Lists currently pointing to this same list. + * If this is the only instance pointing to this list, returns 1. + */ + // TODO Test use_count + public native @Cast("size_t") long use_count(); + + public native @ByVal Type.TypePtr elementType(); + + // See [unsafe set type] for why this exists. + public native void unsafeSetElementType(@ByVal Type.TypePtr t); +} diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/TensorOptionalListIterator.java b/pytorch/src/gen/java/org/bytedeco/pytorch/TensorOptionalListIterator.java new file mode 100644 index 00000000000..dfa4f75d507 --- /dev/null +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/TensorOptionalListIterator.java @@ -0,0 +1,84 @@ +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE + +package org.bytedeco.pytorch; + +import org.bytedeco.pytorch.Allocator; +import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; +import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; +import java.nio.*; +import org.bytedeco.javacpp.*; +import org.bytedeco.javacpp.annotation.*; + +import static org.bytedeco.javacpp.presets.javacpp.*; +import static org.bytedeco.openblas.global.openblas_nolapack.*; +import static org.bytedeco.openblas.global.openblas.*; + +import static org.bytedeco.pytorch.global.torch.*; + +@Name("c10::impl::ListIterator,c10::detail::ListImpl::list_type::iterator>") @NoOffset @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) +public class TensorOptionalListIterator extends Pointer { + static { Loader.load(); } + /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ + public TensorOptionalListIterator(Pointer p) { super(p); } + /** Native array allocator. Access with {@link Pointer#position(long)}. */ + public TensorOptionalListIterator(long size) { super((Pointer)null); allocateArray(size); } + private native void allocateArray(long size); + @Override public TensorOptionalListIterator position(long position) { + return (TensorOptionalListIterator)super.position(position); + } + @Override public TensorOptionalListIterator getPointer(long i) { + return new TensorOptionalListIterator((Pointer)this).offsetAddress(i); + } + + // C++17 friendly std::iterator implementation + + public TensorOptionalListIterator() { super((Pointer)null); allocate(); } + private native void allocate(); + + public TensorOptionalListIterator(@Const @ByRef TensorOptionalListIterator arg0) { super((Pointer)null); allocate(arg0); } + private native void allocate(@Const @ByRef TensorOptionalListIterator arg0); + public native @ByRef @Name("operator =") TensorOptionalListIterator put(@Const @ByRef TensorOptionalListIterator arg0); + + public native @ByRef @Name("operator ++") TensorOptionalListIterator increment(); + + public native @ByVal @Name("operator ++") TensorOptionalListIterator increment(int arg0); + + public native @ByRef @Name("operator --") TensorOptionalListIterator decrement(); + + public native @ByVal @Name("operator --") TensorOptionalListIterator decrement(int arg0); + + public native @ByRef @Name("operator +=") TensorOptionalListIterator addPut(long offset); + + public native @ByRef @Name("operator -=") TensorOptionalListIterator subtractPut(long offset); + + public native @ByVal @Name("operator +") TensorOptionalListIterator add(long offset); + + public native @ByVal @Name("operator -") TensorOptionalListIterator subtract(long offset); + + private static native @Namespace @Cast("c10::impl::ListIterator,c10::detail::ListImpl::list_type::iterator>::difference_type") @Name("operator -") long subtract(@Const @ByRef TensorOptionalListIterator lhs, @Const @ByRef TensorOptionalListIterator rhs); + public long subtract(TensorOptionalListIterator rhs) { return subtract(this, rhs); } + + + + + + private static native @Namespace @Cast("bool") @Name("operator ==") boolean equals(@Const @ByRef TensorOptionalListIterator lhs, @Const @ByRef TensorOptionalListIterator rhs); + public boolean equals(TensorOptionalListIterator rhs) { return equals(this, rhs); } + + private static native @Namespace @Cast("bool") @Name("operator !=") boolean notEquals(@Const @ByRef TensorOptionalListIterator lhs, @Const @ByRef TensorOptionalListIterator rhs); + public boolean notEquals(TensorOptionalListIterator rhs) { return notEquals(this, rhs); } + + private static native @Namespace @Cast("bool") @Name("operator <") boolean lessThan(@Const @ByRef TensorOptionalListIterator lhs, @Const @ByRef TensorOptionalListIterator rhs); + public boolean lessThan(TensorOptionalListIterator rhs) { return lessThan(this, rhs); } + + private static native @Namespace @Cast("bool") @Name("operator <=") boolean lessThanEquals(@Const @ByRef TensorOptionalListIterator lhs, @Const @ByRef TensorOptionalListIterator rhs); + public boolean lessThanEquals(TensorOptionalListIterator rhs) { return lessThanEquals(this, rhs); } + + private static native @Namespace @Cast("bool") @Name("operator >") boolean greaterThan(@Const @ByRef TensorOptionalListIterator lhs, @Const @ByRef TensorOptionalListIterator rhs); + public boolean greaterThan(TensorOptionalListIterator rhs) { return greaterThan(this, rhs); } + + private static native @Namespace @Cast("bool") @Name("operator >=") boolean greaterThanEquals(@Const @ByRef TensorOptionalListIterator lhs, @Const @ByRef TensorOptionalListIterator rhs); + public boolean greaterThanEquals(TensorOptionalListIterator rhs) { return greaterThanEquals(this, rhs); } +} diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/TensorOptionalVector.java b/pytorch/src/gen/java/org/bytedeco/pytorch/TensorOptionalVector.java index 330dabe8061..e6ec6109f27 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/TensorOptionalVector.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/TensorOptionalVector.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; @@ -33,6 +35,8 @@ public class TensorOptionalVector extends Pointer { public void clear() { resize(0); } public native void resize(@Cast("size_t") long n); + public TensorOptional front() { return get(0); } + public TensorOptional back() { return get(size() - 1); } @Index(function = "at") public native @ByRef TensorOptional get(@Cast("size_t") long i); public native TensorOptionalVector put(@Cast("size_t") long i, TensorOptional value); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/TensorOptions.java b/pytorch/src/gen/java/org/bytedeco/pytorch/TensorOptions.java index a999f7a46bf..8ca34de54a6 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/TensorOptions.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/TensorOptions.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/TensorTensorOptional.java b/pytorch/src/gen/java/org/bytedeco/pytorch/TensorTensorOptional.java deleted file mode 100644 index 7b7d1666bad..00000000000 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/TensorTensorOptional.java +++ /dev/null @@ -1,32 +0,0 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE - -package org.bytedeco.pytorch; - -import org.bytedeco.pytorch.Allocator; -import org.bytedeco.pytorch.Function; -import org.bytedeco.pytorch.Module; -import java.nio.*; -import org.bytedeco.javacpp.*; -import org.bytedeco.javacpp.annotation.*; - -import static org.bytedeco.javacpp.presets.javacpp.*; -import static org.bytedeco.openblas.global.openblas_nolapack.*; -import static org.bytedeco.openblas.global.openblas.*; - -import static org.bytedeco.pytorch.global.torch.*; - -@NoOffset @Name("torch::optional >") @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) -public class TensorTensorOptional extends Pointer { - static { Loader.load(); } - /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ - public TensorTensorOptional(Pointer p) { super(p); } - public TensorTensorOptional(TensorTensorTuple value) { this(); put(value); } - public TensorTensorOptional() { allocate(); } - private native void allocate(); - public native @Name("operator =") @ByRef TensorTensorOptional put(@ByRef TensorTensorOptional x); - - public native boolean has_value(); - public native @Name("value") @ByRef TensorTensorTuple get(); - @ValueSetter public native TensorTensorOptional put(@ByRef TensorTensorTuple value); -} - diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/TensorTensorTensorTensorVectorTuple.java b/pytorch/src/gen/java/org/bytedeco/pytorch/TensorTensorTensorTensorVectorTuple.java deleted file mode 100644 index 096ad9a9d71..00000000000 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/TensorTensorTensorTensorVectorTuple.java +++ /dev/null @@ -1,38 +0,0 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE - -package org.bytedeco.pytorch; - -import org.bytedeco.pytorch.Allocator; -import org.bytedeco.pytorch.Function; -import org.bytedeco.pytorch.Module; -import java.nio.*; -import org.bytedeco.javacpp.*; -import org.bytedeco.javacpp.annotation.*; - -import static org.bytedeco.javacpp.presets.javacpp.*; -import static org.bytedeco.openblas.global.openblas_nolapack.*; -import static org.bytedeco.openblas.global.openblas.*; - -import static org.bytedeco.pytorch.global.torch.*; - -@NoOffset @Name("std::tuple >") @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) -public class TensorTensorTensorTensorVectorTuple extends Pointer { - static { Loader.load(); } - /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ - public TensorTensorTensorTensorVectorTuple(Pointer p) { super(p); } - public TensorTensorTensorTensorVectorTuple(@ByRef Tensor value0, @ByRef Tensor value1, @ByRef Tensor value2, @Cast({"", "std::vector"}) @StdMove TensorVector value3) { allocate(value0, value1, value2, value3); } - private native void allocate(@ByRef Tensor value0, @ByRef Tensor value1, @ByRef Tensor value2, @Cast({"", "std::vector"}) @StdMove TensorVector value3); - public TensorTensorTensorTensorVectorTuple() { allocate(); } - private native void allocate(); - public native @Name("operator =") @ByRef TensorTensorTensorTensorVectorTuple put(@ByRef TensorTensorTensorTensorVectorTuple x); - - public @ByRef Tensor get0() { return get0(this); } - @Namespace @Name("std::get<0>") public static native @ByRef Tensor get0(@ByRef TensorTensorTensorTensorVectorTuple container); - public @ByRef Tensor get1() { return get1(this); } - @Namespace @Name("std::get<1>") public static native @ByRef Tensor get1(@ByRef TensorTensorTensorTensorVectorTuple container); - public @ByRef Tensor get2() { return get2(this); } - @Namespace @Name("std::get<2>") public static native @ByRef Tensor get2(@ByRef TensorTensorTensorTensorVectorTuple container); - public @Cast({"", "std::vector"}) @StdMove TensorVector get3() { return get3(this); } - @Namespace @Name("std::get<3>") public static native @Cast({"", "std::vector"}) @StdMove TensorVector get3(@ByRef TensorTensorTensorTensorVectorTuple container); -} - diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/TensorTensorTensorTupleTuple.java b/pytorch/src/gen/java/org/bytedeco/pytorch/TensorTensorTensorTupleTuple.java deleted file mode 100644 index f8f91509420..00000000000 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/TensorTensorTensorTupleTuple.java +++ /dev/null @@ -1,34 +0,0 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE - -package org.bytedeco.pytorch; - -import org.bytedeco.pytorch.Allocator; -import org.bytedeco.pytorch.Function; -import org.bytedeco.pytorch.Module; -import java.nio.*; -import org.bytedeco.javacpp.*; -import org.bytedeco.javacpp.annotation.*; - -import static org.bytedeco.javacpp.presets.javacpp.*; -import static org.bytedeco.openblas.global.openblas_nolapack.*; -import static org.bytedeco.openblas.global.openblas.*; - -import static org.bytedeco.pytorch.global.torch.*; - -@NoOffset @Name("std::tuple >") @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) -public class TensorTensorTensorTupleTuple extends Pointer { - static { Loader.load(); } - /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ - public TensorTensorTensorTupleTuple(Pointer p) { super(p); } - public TensorTensorTensorTupleTuple(@ByRef Tensor value0, @ByRef TensorTensorTuple value1) { allocate(value0, value1); } - private native void allocate(@ByRef Tensor value0, @ByRef TensorTensorTuple value1); - public TensorTensorTensorTupleTuple() { allocate(); } - private native void allocate(); - public native @Name("operator =") @ByRef TensorTensorTensorTupleTuple put(@ByRef TensorTensorTensorTupleTuple x); - - public @ByRef Tensor get0() { return get0(this); } - @Namespace @Name("std::get<0>") public static native @ByRef Tensor get0(@ByRef TensorTensorTensorTupleTuple container); - public @ByRef TensorTensorTuple get1() { return get1(this); } - @Namespace @Name("std::get<1>") public static native @ByRef TensorTensorTuple get1(@ByRef TensorTensorTensorTupleTuple container); -} - diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/TensorTensorVectorTensorVectorTuple.java b/pytorch/src/gen/java/org/bytedeco/pytorch/TensorTensorVectorTensorVectorTuple.java deleted file mode 100644 index 81582bf89cd..00000000000 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/TensorTensorVectorTensorVectorTuple.java +++ /dev/null @@ -1,36 +0,0 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE - -package org.bytedeco.pytorch; - -import org.bytedeco.pytorch.Allocator; -import org.bytedeco.pytorch.Function; -import org.bytedeco.pytorch.Module; -import java.nio.*; -import org.bytedeco.javacpp.*; -import org.bytedeco.javacpp.annotation.*; - -import static org.bytedeco.javacpp.presets.javacpp.*; -import static org.bytedeco.openblas.global.openblas_nolapack.*; -import static org.bytedeco.openblas.global.openblas.*; - -import static org.bytedeco.pytorch.global.torch.*; - -@NoOffset @Name("std::tuple,std::vector >") @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) -public class TensorTensorVectorTensorVectorTuple extends Pointer { - static { Loader.load(); } - /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ - public TensorTensorVectorTensorVectorTuple(Pointer p) { super(p); } - public TensorTensorVectorTensorVectorTuple(@ByRef Tensor value0, @Cast({"", "std::vector"}) @StdMove TensorVector value1, @Cast({"", "std::vector"}) @StdMove TensorVector value2) { allocate(value0, value1, value2); } - private native void allocate(@ByRef Tensor value0, @Cast({"", "std::vector"}) @StdMove TensorVector value1, @Cast({"", "std::vector"}) @StdMove TensorVector value2); - public TensorTensorVectorTensorVectorTuple() { allocate(); } - private native void allocate(); - public native @Name("operator =") @ByRef TensorTensorVectorTensorVectorTuple put(@ByRef TensorTensorVectorTensorVectorTuple x); - - public @ByRef Tensor get0() { return get0(this); } - @Namespace @Name("std::get<0>") public static native @ByRef Tensor get0(@ByRef TensorTensorVectorTensorVectorTuple container); - public @Cast({"", "std::vector"}) @StdMove TensorVector get1() { return get1(this); } - @Namespace @Name("std::get<1>") public static native @Cast({"", "std::vector"}) @StdMove TensorVector get1(@ByRef TensorTensorVectorTensorVectorTuple container); - public @Cast({"", "std::vector"}) @StdMove TensorVector get2() { return get2(this); } - @Namespace @Name("std::get<2>") public static native @Cast({"", "std::vector"}) @StdMove TensorVector get2(@ByRef TensorTensorVectorTensorVectorTuple container); -} - diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/TensorTensorVectorTuple.java b/pytorch/src/gen/java/org/bytedeco/pytorch/TensorTensorVectorTuple.java deleted file mode 100644 index 9214ad695fc..00000000000 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/TensorTensorVectorTuple.java +++ /dev/null @@ -1,34 +0,0 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE - -package org.bytedeco.pytorch; - -import org.bytedeco.pytorch.Allocator; -import org.bytedeco.pytorch.Function; -import org.bytedeco.pytorch.Module; -import java.nio.*; -import org.bytedeco.javacpp.*; -import org.bytedeco.javacpp.annotation.*; - -import static org.bytedeco.javacpp.presets.javacpp.*; -import static org.bytedeco.openblas.global.openblas_nolapack.*; -import static org.bytedeco.openblas.global.openblas.*; - -import static org.bytedeco.pytorch.global.torch.*; - -@NoOffset @Name("std::tuple >") @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) -public class TensorTensorVectorTuple extends Pointer { - static { Loader.load(); } - /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ - public TensorTensorVectorTuple(Pointer p) { super(p); } - public TensorTensorVectorTuple(@ByRef Tensor value0, @Cast({"", "std::vector"}) @StdMove TensorVector value1) { allocate(value0, value1); } - private native void allocate(@ByRef Tensor value0, @Cast({"", "std::vector"}) @StdMove TensorVector value1); - public TensorTensorVectorTuple() { allocate(); } - private native void allocate(); - public native @Name("operator =") @ByRef TensorTensorVectorTuple put(@ByRef TensorTensorVectorTuple x); - - public @ByRef Tensor get0() { return get0(this); } - @Namespace @Name("std::get<0>") public static native @ByRef Tensor get0(@ByRef TensorTensorVectorTuple container); - public @Cast({"", "std::vector"}) @StdMove TensorVector get1() { return get1(this); } - @Namespace @Name("std::get<1>") public static native @Cast({"", "std::vector"}) @StdMove TensorVector get1(@ByRef TensorTensorVectorTuple container); -} - diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/TensorTuple.java b/pytorch/src/gen/java/org/bytedeco/pytorch/TensorTuple.java deleted file mode 100644 index 4f0ec47a5d9..00000000000 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/TensorTuple.java +++ /dev/null @@ -1,32 +0,0 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE - -package org.bytedeco.pytorch; - -import org.bytedeco.pytorch.Allocator; -import org.bytedeco.pytorch.Function; -import org.bytedeco.pytorch.Module; -import java.nio.*; -import org.bytedeco.javacpp.*; -import org.bytedeco.javacpp.annotation.*; - -import static org.bytedeco.javacpp.presets.javacpp.*; -import static org.bytedeco.openblas.global.openblas_nolapack.*; -import static org.bytedeco.openblas.global.openblas.*; - -import static org.bytedeco.pytorch.global.torch.*; - -@NoOffset @Name("std::tuple") @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) -public class TensorTuple extends Pointer { - static { Loader.load(); } - /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ - public TensorTuple(Pointer p) { super(p); } - public TensorTuple(@ByRef Tensor value0) { allocate(value0); } - private native void allocate(@ByRef Tensor value0); - public TensorTuple() { allocate(); } - private native void allocate(); - public native @Name("operator =") @ByRef TensorTuple put(@ByRef TensorTuple x); - - public @ByRef Tensor get0() { return get0(this); } - @Namespace @Name("std::get<0>") public static native @ByRef Tensor get0(@ByRef TensorTuple container); -} - diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/TensorType.java b/pytorch/src/gen/java/org/bytedeco/pytorch/TensorType.java index 5e4220b3acf..31d67e8f65e 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/TensorType.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/TensorType.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; @@ -22,11 +24,11 @@ public class TensorType extends SharedType { /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public TensorType(Pointer p) { super(p); } - public static native @SharedPtr @ByVal TensorType create(@Const @ByRef Tensor t); + public static native @SharedPtr("c10::TensorType") @ByVal TensorType create(@Const @ByRef Tensor t); // used by TensorType::create(size_t dim) which in turn used by // shape_analysis.cpp - public static native @SharedPtr @ByVal TensorType create( + public static native @SharedPtr("c10::TensorType") @ByVal TensorType create( @ByVal ScalarTypeOptional scalar_type, @ByVal DeviceOptional device, @Const @ByRef LongVaryingShape sizes, @@ -34,28 +36,28 @@ public class TensorType extends SharedType { @ByVal BoolOptional requires_grad, @ByVal(nullValue = "c10::optional(false)") BoolOptional undefined, @Cast("bool") boolean tensor_contiguity/*=false*/); - public static native @SharedPtr @ByVal TensorType create( + public static native @SharedPtr("c10::TensorType") @ByVal TensorType create( @ByVal ScalarTypeOptional scalar_type, @ByVal DeviceOptional device, @Const @ByRef LongVaryingShape sizes, @Const @ByRef LongVaryingShape strides, @ByVal BoolOptional requires_grad); - public static native @SharedPtr @ByVal TensorType create( + public static native @SharedPtr("c10::TensorType") @ByVal TensorType create( @ByVal ScalarTypeOptional scalar_type, @ByVal DeviceOptional device, @Const @ByRef SymbolicShape sizes, @Const @ByRef StrideVaryingShape stride_, @ByVal BoolOptional requires_grad, @ByVal(nullValue = "c10::optional(false)") BoolOptional undefined); - public static native @SharedPtr @ByVal TensorType create( + public static native @SharedPtr("c10::TensorType") @ByVal TensorType create( @ByVal ScalarTypeOptional scalar_type, @ByVal DeviceOptional device, @Const @ByRef SymbolicShape sizes, @Const @ByRef StrideVaryingShape stride_, @ByVal BoolOptional requires_grad); - public static native @SharedPtr @ByVal TensorType create( + public static native @SharedPtr("c10::TensorType") @ByVal TensorType create( @ByVal ScalarTypeOptional scalar_type, @ByVal DeviceOptional device, @ByVal SizeTOptional dim, @@ -63,11 +65,11 @@ public class TensorType extends SharedType { // overloaded create variadic template argument as it could not distinguish // initializer list - public static native @SharedPtr @ByVal TensorType createContiguous( + public static native @SharedPtr("c10::TensorType") @ByVal TensorType createContiguous( ScalarType scalar_type, @ByVal Device device, - @ByVal @Cast("c10::ArrayRef*") LongArrayRef sizes); - public static native @SharedPtr @ByVal TensorType createContiguous( + @ByVal LongArrayRef sizes); + public static native @SharedPtr("c10::TensorType") @ByVal TensorType createContiguous( ScalarType scalar_type, @ByVal Device device, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... sizes); @@ -97,36 +99,36 @@ public class TensorType extends SharedType { public native @ByVal SizeTOptional numel(); - public native @SharedPtr @ByVal TensorType withRequiresGrad(@ByVal BoolOptional s); + public native @SharedPtr("c10::TensorType") @ByVal TensorType withRequiresGrad(@ByVal BoolOptional s); - public native @SharedPtr @ByVal TensorType withScalarType(@ByVal ScalarTypeOptional st); + public native @SharedPtr("c10::TensorType") @ByVal TensorType withScalarType(@ByVal ScalarTypeOptional st); - public native @SharedPtr @ByVal TensorType withDim(@ByVal SizeTOptional d); + public native @SharedPtr("c10::TensorType") @ByVal TensorType withDim(@ByVal SizeTOptional d); - public native @SharedPtr @ByVal TensorType withStrides(@ByVal StrideVaryingShape sstrides); + public native @SharedPtr("c10::TensorType") @ByVal TensorType withStrides(@ByVal StrideVaryingShape sstrides); - public native @SharedPtr @ByVal TensorType withSizesStrides( - @ByVal @Cast("c10::ArrayRef*") LongArrayRef sizes, - @ByVal @Cast("c10::ArrayRef*") LongArrayRef strides); - public native @SharedPtr @ByVal TensorType withSizesStrides( + public native @SharedPtr("c10::TensorType") @ByVal TensorType withSizesStrides( + @ByVal LongArrayRef sizes, + @ByVal LongArrayRef strides); + public native @SharedPtr("c10::TensorType") @ByVal TensorType withSizesStrides( @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] sizes, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... strides); - public native @SharedPtr @ByVal TensorType withSymbolicShapes(@ByVal SymbolicShape ssizes); + public native @SharedPtr("c10::TensorType") @ByVal TensorType withSymbolicShapes(@ByVal SymbolicShape ssizes); - public native @SharedPtr @ByVal TensorType withSizes(@ByVal @Cast("c10::ArrayRef*") LongArrayRef sizes); - public native @SharedPtr @ByVal TensorType withSizes(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... sizes); + public native @SharedPtr("c10::TensorType") @ByVal TensorType withSizes(@ByVal LongArrayRef sizes); + public native @SharedPtr("c10::TensorType") @ByVal TensorType withSizes(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... sizes); - public native @SharedPtr @ByVal TensorType withDevice(@Const @ByVal DeviceOptional device); + public native @SharedPtr("c10::TensorType") @ByVal TensorType withDevice(@Const @ByVal DeviceOptional device); - public native @SharedPtr @ByVal TensorType dimensionedOnly(); + public native @SharedPtr("c10::TensorType") @ByVal TensorType dimensionedOnly(); - public native @SharedPtr @ByVal TensorType contiguous(); + public native @SharedPtr("c10::TensorType") @ByVal TensorType contiguous(); public native @Const @ByRef SymbolicShape symbolic_sizes(); - public native @SharedPtr @ByVal TensorType merge(@Const @ByRef TensorType other, @Cast("bool") boolean merge_sizes/*=true*/); - public native @SharedPtr @ByVal TensorType merge(@Const @ByRef TensorType other); + public native @SharedPtr("c10::TensorType") @ByVal TensorType merge(@Const @ByRef TensorType other, @Cast("bool") boolean merge_sizes/*=true*/); + public native @SharedPtr("c10::TensorType") @ByVal TensorType merge(@Const @ByRef TensorType other); public native @Cast("bool") boolean matchTensor(@Const @ByRef Tensor t); @@ -138,27 +140,27 @@ public class TensorType extends SharedType { public native @Cast("bool") boolean isInferredType(); - public static native @SharedPtr @ByVal TensorType getInferred(); + public static native @SharedPtr("c10::TensorType") @ByVal TensorType getInferred(); // this property is used by GuardElimination // please see `checkInputs` for more details public native @Cast("bool") boolean isSummarized(); - public native @SharedPtr @ByVal TensorType withUndefined(); + public native @SharedPtr("c10::TensorType") @ByVal TensorType withUndefined(); - public native @SharedPtr @ByVal TensorType withPossiblyUndefined(); + public native @SharedPtr("c10::TensorType") @ByVal TensorType withPossiblyUndefined(); public native @ByVal BoolOptional undefined(); - public static native @Const @SharedPtr @ByRef TensorType get(); + public static native @Const @SharedPtr("c10::TensorType") @ByRef TensorType get(); @MemberGetter public static native TypeKind Kind(); public static native @ByVal @Cast("std::vector*") LongVector contiguousStridesOf( - @ByVal @Cast("c10::ArrayRef*") LongArrayRef in_sizes, + @ByVal LongArrayRef in_sizes, @ByVal(nullValue = "at::MemoryFormat(c10::MemoryFormat::Contiguous)") MemoryFormat memory_format); public static native @ByVal @Cast("std::vector*") LongVector contiguousStridesOf( - @ByVal @Cast("c10::ArrayRef*") LongArrayRef in_sizes); + @ByVal LongArrayRef in_sizes); public static native @ByVal @Cast("std::vector*") LongVector contiguousStridesOf( @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] in_sizes, @ByVal(nullValue = "at::MemoryFormat(c10::MemoryFormat::Contiguous)") MemoryFormat memory_format); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/TensorVector.java b/pytorch/src/gen/java/org/bytedeco/pytorch/TensorVector.java index 9bd203c3557..b75c1ca4d45 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/TensorVector.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/TensorVector.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; @@ -15,7 +17,7 @@ import static org.bytedeco.pytorch.global.torch.*; -@Name("std::vector") @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) +@Name("std::vector") @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) public class TensorVector extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ @@ -26,13 +28,15 @@ public class TensorVector extends Pointer { public TensorVector(long n) { allocate(n); } private native void allocate(); private native void allocate(@Cast("size_t") long n); - public native @Name("operator =") @ByRef TensorVector put(@ByRef @Cast({"", "std::vector"}) @StdMove TensorVector x); + public native @Name("operator =") @ByRef TensorVector put(@ByRef @Cast({"", "std::vector"}) @StdMove TensorVector x); public boolean empty() { return size() == 0; } public native long size(); public void clear() { resize(0); } public native void resize(@Cast("size_t") long n); + public Tensor front() { return get(0); } + public Tensor back() { return get(size() - 1); } @Index(function = "at") public native @ByRef Tensor get(@Cast("size_t") long i); public native TensorVector put(@Cast("size_t") long i, Tensor value); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/TensorVectorOptional.java b/pytorch/src/gen/java/org/bytedeco/pytorch/TensorVectorOptional.java index 62415a0417c..36865355332 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/TensorVectorOptional.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/TensorVectorOptional.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; @@ -15,18 +17,19 @@ import static org.bytedeco.pytorch.global.torch.*; -@NoOffset @Name("c10::optional >") @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) +@NoOffset @Name("c10::optional >") @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) public class TensorVectorOptional extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public TensorVectorOptional(Pointer p) { super(p); } - public TensorVectorOptional(@Cast({"", "std::vector"}) @StdMove TensorVector value) { this(); put(value); } + public TensorVectorOptional(@Cast({"", "std::vector"}) @StdMove TensorVector value) { this(); put(value); } public TensorVectorOptional() { allocate(); } private native void allocate(); public native @Name("operator =") @ByRef TensorVectorOptional put(@ByRef TensorVectorOptional x); public native boolean has_value(); - public native @Name("value") @Cast({"", "std::vector"}) @StdMove TensorVector get(); - @ValueSetter public native TensorVectorOptional put(@Cast({"", "std::vector"}) @StdMove TensorVector value); + public native void reset(); + public native @Name("value") @Cast({"", "std::vector"}) @StdMove TensorVector get(); + @ValueSetter public native TensorVectorOptional put(@Cast({"", "std::vector"}) @StdMove TensorVector value); } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/TensorVectorTensorTuple.java b/pytorch/src/gen/java/org/bytedeco/pytorch/TensorVectorTensorTuple.java deleted file mode 100644 index d74b476e465..00000000000 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/TensorVectorTensorTuple.java +++ /dev/null @@ -1,34 +0,0 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE - -package org.bytedeco.pytorch; - -import org.bytedeco.pytorch.Allocator; -import org.bytedeco.pytorch.Function; -import org.bytedeco.pytorch.Module; -import java.nio.*; -import org.bytedeco.javacpp.*; -import org.bytedeco.javacpp.annotation.*; - -import static org.bytedeco.javacpp.presets.javacpp.*; -import static org.bytedeco.openblas.global.openblas_nolapack.*; -import static org.bytedeco.openblas.global.openblas.*; - -import static org.bytedeco.pytorch.global.torch.*; - -@NoOffset @Name("std::tuple,at::Tensor>") @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) -public class TensorVectorTensorTuple extends Pointer { - static { Loader.load(); } - /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ - public TensorVectorTensorTuple(Pointer p) { super(p); } - public TensorVectorTensorTuple(@Cast({"", "std::vector"}) @StdMove TensorVector value0, @ByRef Tensor value1) { allocate(value0, value1); } - private native void allocate(@Cast({"", "std::vector"}) @StdMove TensorVector value0, @ByRef Tensor value1); - public TensorVectorTensorTuple() { allocate(); } - private native void allocate(); - public native @Name("operator =") @ByRef TensorVectorTensorTuple put(@ByRef TensorVectorTensorTuple x); - - public @Cast({"", "std::vector"}) @StdMove TensorVector get0() { return get0(this); } - @Namespace @Name("std::get<0>") public static native @Cast({"", "std::vector"}) @StdMove TensorVector get0(@ByRef TensorVectorTensorTuple container); - public @ByRef Tensor get1() { return get1(this); } - @Namespace @Name("std::get<1>") public static native @ByRef Tensor get1(@ByRef TensorVectorTensorTuple container); -} - diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/TensorVectorTensorVectorTensorVectorTensorVectorTensorVectorTuple.java b/pytorch/src/gen/java/org/bytedeco/pytorch/TensorVectorTensorVectorTensorVectorTensorVectorTensorVectorTuple.java deleted file mode 100644 index 8f087819efe..00000000000 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/TensorVectorTensorVectorTensorVectorTensorVectorTensorVectorTuple.java +++ /dev/null @@ -1,40 +0,0 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE - -package org.bytedeco.pytorch; - -import org.bytedeco.pytorch.Allocator; -import org.bytedeco.pytorch.Function; -import org.bytedeco.pytorch.Module; -import java.nio.*; -import org.bytedeco.javacpp.*; -import org.bytedeco.javacpp.annotation.*; - -import static org.bytedeco.javacpp.presets.javacpp.*; -import static org.bytedeco.openblas.global.openblas_nolapack.*; -import static org.bytedeco.openblas.global.openblas.*; - -import static org.bytedeco.pytorch.global.torch.*; - -@NoOffset @Name("std::tuple,std::vector,std::vector,std::vector,std::vector >") @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) -public class TensorVectorTensorVectorTensorVectorTensorVectorTensorVectorTuple extends Pointer { - static { Loader.load(); } - /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ - public TensorVectorTensorVectorTensorVectorTensorVectorTensorVectorTuple(Pointer p) { super(p); } - public TensorVectorTensorVectorTensorVectorTensorVectorTensorVectorTuple(@Cast({"", "std::vector"}) @StdMove TensorVector value0, @Cast({"", "std::vector"}) @StdMove TensorVector value1, @Cast({"", "std::vector"}) @StdMove TensorVector value2, @Cast({"", "std::vector"}) @StdMove TensorVector value3, @Cast({"", "std::vector"}) @StdMove TensorVector value4) { allocate(value0, value1, value2, value3, value4); } - private native void allocate(@Cast({"", "std::vector"}) @StdMove TensorVector value0, @Cast({"", "std::vector"}) @StdMove TensorVector value1, @Cast({"", "std::vector"}) @StdMove TensorVector value2, @Cast({"", "std::vector"}) @StdMove TensorVector value3, @Cast({"", "std::vector"}) @StdMove TensorVector value4); - public TensorVectorTensorVectorTensorVectorTensorVectorTensorVectorTuple() { allocate(); } - private native void allocate(); - public native @Name("operator =") @ByRef TensorVectorTensorVectorTensorVectorTensorVectorTensorVectorTuple put(@ByRef TensorVectorTensorVectorTensorVectorTensorVectorTensorVectorTuple x); - - public @Cast({"", "std::vector"}) @StdMove TensorVector get0() { return get0(this); } - @Namespace @Name("std::get<0>") public static native @Cast({"", "std::vector"}) @StdMove TensorVector get0(@ByRef TensorVectorTensorVectorTensorVectorTensorVectorTensorVectorTuple container); - public @Cast({"", "std::vector"}) @StdMove TensorVector get1() { return get1(this); } - @Namespace @Name("std::get<1>") public static native @Cast({"", "std::vector"}) @StdMove TensorVector get1(@ByRef TensorVectorTensorVectorTensorVectorTensorVectorTensorVectorTuple container); - public @Cast({"", "std::vector"}) @StdMove TensorVector get2() { return get2(this); } - @Namespace @Name("std::get<2>") public static native @Cast({"", "std::vector"}) @StdMove TensorVector get2(@ByRef TensorVectorTensorVectorTensorVectorTensorVectorTensorVectorTuple container); - public @Cast({"", "std::vector"}) @StdMove TensorVector get3() { return get3(this); } - @Namespace @Name("std::get<3>") public static native @Cast({"", "std::vector"}) @StdMove TensorVector get3(@ByRef TensorVectorTensorVectorTensorVectorTensorVectorTensorVectorTuple container); - public @Cast({"", "std::vector"}) @StdMove TensorVector get4() { return get4(this); } - @Namespace @Name("std::get<4>") public static native @Cast({"", "std::vector"}) @StdMove TensorVector get4(@ByRef TensorVectorTensorVectorTensorVectorTensorVectorTensorVectorTuple container); -} - diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/TernaryIf.java b/pytorch/src/gen/java/org/bytedeco/pytorch/TernaryIf.java index 334a9594f5b..e862a162b79 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/TernaryIf.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/TernaryIf.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; @@ -19,9 +21,11 @@ @Namespace("torch::jit") @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) public class TernaryIf extends Expr { static { Loader.load(); } + /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ + public TernaryIf(Pointer p) { super(p); } - public TernaryIf(@Cast("const torch::jit::TreeRef*") @ByRef Pointer tree) { super((Pointer)null); allocate(tree); } - private native void allocate(@Cast("const torch::jit::TreeRef*") @ByRef Pointer tree); + public TernaryIf(@Const @ByRef TreeRef tree) { super((Pointer)null); allocate(tree); } + private native void allocate(@Const @ByRef TreeRef tree); public native @ByVal Expr cond(); public native @ByVal Expr true_expr(); public native @ByVal Expr false_expr(); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/ThreadIdGuard.java b/pytorch/src/gen/java/org/bytedeco/pytorch/ThreadIdGuard.java index 4ade8ff3c3b..405fed233c6 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/ThreadIdGuard.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/ThreadIdGuard.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/ThreadLocalDebugInfo.java b/pytorch/src/gen/java/org/bytedeco/pytorch/ThreadLocalDebugInfo.java index 9534a4f1f34..05c219809e3 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/ThreadLocalDebugInfo.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/ThreadLocalDebugInfo.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; @@ -47,8 +49,7 @@ public class ThreadLocalDebugInfo extends Pointer { public static native @SharedPtr ThreadLocalDebugInfo current(); // Internal, use DebugInfoGuard/ThreadLocalStateGuard - public static native void _forceCurrentDebugInfo( - @SharedPtr ThreadLocalDebugInfo info); + // Push debug info struct of a given kind public static native void _push(DebugInfoKind kind, @SharedPtr DebugInfoBase info); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/ThreadLocalPythonObjects.java b/pytorch/src/gen/java/org/bytedeco/pytorch/ThreadLocalPythonObjects.java new file mode 100644 index 00000000000..98a4101ee4a --- /dev/null +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/ThreadLocalPythonObjects.java @@ -0,0 +1,48 @@ +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE + +package org.bytedeco.pytorch; + +import org.bytedeco.pytorch.Allocator; +import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; +import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; +import java.nio.*; +import org.bytedeco.javacpp.*; +import org.bytedeco.javacpp.annotation.*; + +import static org.bytedeco.javacpp.presets.javacpp.*; +import static org.bytedeco.openblas.global.openblas_nolapack.*; +import static org.bytedeco.openblas.global.openblas.*; + +import static org.bytedeco.pytorch.global.torch.*; + + +@Namespace("at::impl") @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) +public class ThreadLocalPythonObjects extends Pointer { + static { Loader.load(); } + /** Default native constructor. */ + public ThreadLocalPythonObjects() { super((Pointer)null); allocate(); } + /** Native array allocator. Access with {@link Pointer#position(long)}. */ + public ThreadLocalPythonObjects(long size) { super((Pointer)null); allocateArray(size); } + /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ + public ThreadLocalPythonObjects(Pointer p) { super(p); } + private native void allocate(); + private native void allocateArray(long size); + @Override public ThreadLocalPythonObjects position(long position) { + return (ThreadLocalPythonObjects)super.position(position); + } + @Override public ThreadLocalPythonObjects getPointer(long i) { + return new ThreadLocalPythonObjects((Pointer)this).offsetAddress(i); + } + + public static native void set(@StdString BytePointer key, @SharedPtr SafePyObject value); + public static native void set(@StdString String key, @SharedPtr SafePyObject value); + public static native @SharedPtr SafePyObject get(@StdString BytePointer key); + public static native @SharedPtr SafePyObject get(@StdString String key); + public static native @Cast("bool") boolean contains(@StdString BytePointer key); + public static native @Cast("bool") boolean contains(@StdString String key); + + public static native @Const @ByRef ThreadLocalPythonObjects get_state(); + public static native void set_state(@ByVal ThreadLocalPythonObjects state); +} diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/ThreadLocalState.java b/pytorch/src/gen/java/org/bytedeco/pytorch/ThreadLocalState.java index 3d8301b3641..7b09da0c250 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/ThreadLocalState.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/ThreadLocalState.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/ThreadLocalStateGuard.java b/pytorch/src/gen/java/org/bytedeco/pytorch/ThreadLocalStateGuard.java index 84e49325f71..99862c61129 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/ThreadLocalStateGuard.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/ThreadLocalStateGuard.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/ThreadLocalStateOptional.java b/pytorch/src/gen/java/org/bytedeco/pytorch/ThreadLocalStateOptional.java index c78a878a791..d6ff895ddc8 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/ThreadLocalStateOptional.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/ThreadLocalStateOptional.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; @@ -26,6 +28,7 @@ public class ThreadLocalStateOptional extends Pointer { public native @Name("operator =") @ByRef ThreadLocalStateOptional put(@ByRef ThreadLocalStateOptional x); public native boolean has_value(); + public native void reset(); public native @Name("value") @ByRef ThreadLocalState get(); @ValueSetter public native ThreadLocalStateOptional put(@ByRef ThreadLocalState value); } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/Threshold.java b/pytorch/src/gen/java/org/bytedeco/pytorch/Threshold.java deleted file mode 100644 index 8f582166687..00000000000 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/Threshold.java +++ /dev/null @@ -1,34 +0,0 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE - -package org.bytedeco.pytorch; - -import org.bytedeco.pytorch.Allocator; -import org.bytedeco.pytorch.Function; -import org.bytedeco.pytorch.Module; -import java.nio.*; -import org.bytedeco.javacpp.*; -import org.bytedeco.javacpp.annotation.*; - -import static org.bytedeco.javacpp.presets.javacpp.*; -import static org.bytedeco.openblas.global.openblas_nolapack.*; -import static org.bytedeco.openblas.global.openblas.*; - -import static org.bytedeco.pytorch.global.torch.*; - - -/** A {@code ModuleHolder} subclass for {@code ThresholdImpl}. - * See the documentation for {@code ThresholdImpl} class to learn what methods it - * provides, and examples of how to use {@code Threshold} with - * {@code torch::nn::ThresholdOptions}. See the documentation for {@code ModuleHolder} to - * learn about PyTorch's module storage semantics. */ -@Namespace("torch::nn") @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) -public class Threshold extends ThresholdImplModuleHolder { - static { Loader.load(); } - - public Threshold(@ByVal @Cast("std::nullptr_t*") PointerPointer arg0) { super((Pointer)null); allocate(arg0); } - private native void allocate(@ByVal @Cast("std::nullptr_t*") PointerPointer arg0); public Threshold(@SharedPtr @Cast({"", "std::shared_ptr"}) ThresholdImpl module) { super((Pointer)null); allocate(module); } - private native void allocate(@SharedPtr @Cast({"", "std::shared_ptr"}) ThresholdImpl module); - /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ - public Threshold(Pointer p) { super(p); } - - } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/ThresholdImpl.java b/pytorch/src/gen/java/org/bytedeco/pytorch/ThresholdImpl.java index a0ffe58e7ad..9cb48dfd47b 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/ThresholdImpl.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/ThresholdImpl.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; @@ -36,9 +38,9 @@ public class ThresholdImpl extends ThresholdImplCloneable { public ThresholdImpl(Pointer p) { super(p); } public ThresholdImpl(double threshold, double value) { super((Pointer)null); allocate(threshold, value); } - @NoDeallocator private native void allocate(double threshold, double value); + @SharedPtr private native void allocate(double threshold, double value); public ThresholdImpl(@Const @ByRef ThresholdOptions options_) { super((Pointer)null); allocate(options_); } - @NoDeallocator private native void allocate(@Const @ByRef ThresholdOptions options_); + @SharedPtr private native void allocate(@Const @ByRef ThresholdOptions options_); public native @ByVal Tensor forward(@ByVal Tensor input); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/ThresholdImplCloneable.java b/pytorch/src/gen/java/org/bytedeco/pytorch/ThresholdImplCloneable.java index 21db416fb18..3a77b49fe07 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/ThresholdImplCloneable.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/ThresholdImplCloneable.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; @@ -20,18 +22,18 @@ public class ThresholdImplCloneable extends Module { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public ThresholdImplCloneable(Pointer p) { super(p); } + @Override public Module asModule() { return asModule(this); } + @Namespace public static native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast>") Module asModule(@SharedPtr ThresholdImplCloneable pointer); /** {@code reset()} must perform initialization of all members with reference * semantics, most importantly parameters, buffers and submodules. */ public native void reset(); - @Override public Module asModule() { return asModule(this); } - @Namespace public static native @Name("static_cast") Module asModule(ThresholdImplCloneable module); /** Performs a recursive "deep copy" of the {@code Module}, such that all parameters * and submodules in the cloned module are different from those in the * original module. */ - public native @SharedPtr Module clone( - @Const @ByRef(nullValue = "c10::optional(c10::nullopt)") DeviceOptional device); - public native @SharedPtr Module clone(); + public native @SharedPtr("torch::nn::Module") @ByVal Module clone( + @Const @ByRef(nullValue = "c10::optional(c10::nullopt)") DeviceOptional device); + public native @SharedPtr("torch::nn::Module") @ByVal Module clone(); } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/ThresholdImplModuleHolder.java b/pytorch/src/gen/java/org/bytedeco/pytorch/ThresholdImplModuleHolder.java deleted file mode 100644 index a36425ba89a..00000000000 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/ThresholdImplModuleHolder.java +++ /dev/null @@ -1,79 +0,0 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE - -package org.bytedeco.pytorch; - -import org.bytedeco.pytorch.Allocator; -import org.bytedeco.pytorch.Function; -import org.bytedeco.pytorch.Module; -import java.nio.*; -import org.bytedeco.javacpp.*; -import org.bytedeco.javacpp.annotation.*; - -import static org.bytedeco.javacpp.presets.javacpp.*; -import static org.bytedeco.openblas.global.openblas_nolapack.*; -import static org.bytedeco.openblas.global.openblas.*; - -import static org.bytedeco.pytorch.global.torch.*; - -@Name("torch::nn::ModuleHolder") @NoOffset @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) -public class ThresholdImplModuleHolder extends Pointer { - static { Loader.load(); } - /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ - public ThresholdImplModuleHolder(Pointer p) { super(p); } - - - /// - - /** Default constructs the contained module if if has a default constructor, - * else produces a static error. - * - * NOTE: This uses the behavior of template - * classes in C++ that constructors (or any methods) are only compiled when - * actually used. */ - - - /** Constructs the {@code ModuleHolder} with an empty contained value. Access to - * the underlying module is not permitted and will throw an exception, until - * a value is assigned. */ - /* implicit */ public ThresholdImplModuleHolder(@ByVal @Cast("std::nullptr_t*") PointerPointer arg0) { super((Pointer)null); allocate(arg0); } -private native void allocate(@ByVal @Cast("std::nullptr_t*") PointerPointer arg0); - - /** Constructs the {@code ModuleHolder} with a contained module, forwarding all - * arguments to its constructor. */ - - /** Constructs the {@code ModuleHolder} from a pointer to the contained type. - * Example: {@code Linear(std::make_shared(...))}. */ - /* implicit */ public ThresholdImplModuleHolder(@SharedPtr @Cast({"", "std::shared_ptr"}) ThresholdImpl module) { super((Pointer)null); allocate(module); } -private native void allocate(@SharedPtr @Cast({"", "std::shared_ptr"}) ThresholdImpl module); - - /** Returns true if the {@code ModuleHolder} contains a module, or false if it is - * {@code nullptr}. */ - public native @Cast("bool") @Name("operator bool") @NoException(true) boolean asBoolean(); - - /** Forwards to the contained module. */ - public native @Name("operator ->") ThresholdImpl access(); - - /** Forwards to the contained module. */ - - /** Returns a reference to the contained module. */ - public native @ByRef @Name("operator *") ThresholdImpl multiply(); - - /** Returns a const reference to the contained module. */ - - /** Returns a shared pointer to the underlying module. */ - public native @SharedPtr @Cast({"", "std::shared_ptr"}) ThresholdImpl ptr(); - - /** Returns a pointer to the underlying module. */ - public native ThresholdImpl get(); - - /** Returns a const pointer to the underlying module. */ - - /** Calls the {@code forward()} method of the contained module. */ - - /** Forwards to the subscript operator of the contained module. - * NOTE: std::forward is qualified to prevent VS2017 emitting - * error C2872: 'std': ambiguous symbol */ - - /** Returns true if the {@code ModuleHolder} does not contain a module. */ - public native @Cast("bool") @NoException(true) boolean is_empty(); -} diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/ThresholdOptions.java b/pytorch/src/gen/java/org/bytedeco/pytorch/ThresholdOptions.java index c04fe0d8143..2150401481c 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/ThresholdOptions.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/ThresholdOptions.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/Token.java b/pytorch/src/gen/java/org/bytedeco/pytorch/Token.java index 3ed62020379..34a9fe12e2d 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/Token.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/Token.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/TokenTrie.java b/pytorch/src/gen/java/org/bytedeco/pytorch/TokenTrie.java deleted file mode 100644 index 6ae17004207..00000000000 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/TokenTrie.java +++ /dev/null @@ -1,41 +0,0 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE - -package org.bytedeco.pytorch; - -import org.bytedeco.pytorch.Allocator; -import org.bytedeco.pytorch.Function; -import org.bytedeco.pytorch.Module; -import java.nio.*; -import org.bytedeco.javacpp.*; -import org.bytedeco.javacpp.annotation.*; - -import static org.bytedeco.javacpp.presets.javacpp.*; -import static org.bytedeco.openblas.global.openblas_nolapack.*; -import static org.bytedeco.openblas.global.openblas.*; - -import static org.bytedeco.pytorch.global.torch.*; - -@Namespace("torch::jit") @NoOffset @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) -public class TokenTrie extends Pointer { - static { Loader.load(); } - /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ - public TokenTrie(Pointer p) { super(p); } - /** Native array allocator. Access with {@link Pointer#position(long)}. */ - public TokenTrie(long size) { super((Pointer)null); allocateArray(size); } - private native void allocateArray(long size); - @Override public TokenTrie position(long position) { - return (TokenTrie)super.position(position); - } - @Override public TokenTrie getPointer(long i) { - return new TokenTrie((Pointer)this).offsetAddress(i); - } - - public TokenTrie() { super((Pointer)null); allocate(); } - private native void allocate(); - public native void insert(@Cast("const char*") BytePointer str, int tok); - public native void insert(String str, int tok); - @MemberGetter public native int kind(); // 0 == invalid token - - @MemberGetter public native @Cast("char*") @StdVector BytePointer child_chars(); - @MemberGetter public native @ByRef TokenTrieVector child_tries(); -} diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/TorchDispatchModeTLS.java b/pytorch/src/gen/java/org/bytedeco/pytorch/TorchDispatchModeTLS.java new file mode 100644 index 00000000000..f82dbbb962f --- /dev/null +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/TorchDispatchModeTLS.java @@ -0,0 +1,46 @@ +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE + +package org.bytedeco.pytorch; + +import org.bytedeco.pytorch.Allocator; +import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; +import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; +import java.nio.*; +import org.bytedeco.javacpp.*; +import org.bytedeco.javacpp.annotation.*; + +import static org.bytedeco.javacpp.presets.javacpp.*; +import static org.bytedeco.openblas.global.openblas_nolapack.*; +import static org.bytedeco.openblas.global.openblas.*; + +import static org.bytedeco.pytorch.global.torch.*; + + +@Namespace("c10::impl") @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) +public class TorchDispatchModeTLS extends Pointer { + static { Loader.load(); } + /** Default native constructor. */ + public TorchDispatchModeTLS() { super((Pointer)null); allocate(); } + /** Native array allocator. Access with {@link Pointer#position(long)}. */ + public TorchDispatchModeTLS(long size) { super((Pointer)null); allocateArray(size); } + /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ + public TorchDispatchModeTLS(Pointer p) { super(p); } + private native void allocate(); + private native void allocateArray(long size); + @Override public TorchDispatchModeTLS position(long position) { + return (TorchDispatchModeTLS)super.position(position); + } + @Override public TorchDispatchModeTLS getPointer(long i) { + return new TorchDispatchModeTLS((Pointer)this).offsetAddress(i); + } + + public static native void push_onto_stack(@SharedPtr SafePyObject mode); + public static native @SharedPtr SafePyObject pop_stack(); + public static native @SharedPtr SafePyObject get_stack_at(@Cast("int64_t") long idx); + public static native @Cast("int64_t") long stack_len(); + + public static native @Const @ByRef TorchDispatchModeTLS get_state(); + public static native void set_state(@ByVal TorchDispatchModeTLS state); +} diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/TraceableFunction.java b/pytorch/src/gen/java/org/bytedeco/pytorch/TraceableFunction.java index 386bf018014..fe47be3a7f9 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/TraceableFunction.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/TraceableFunction.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/TracingState.java b/pytorch/src/gen/java/org/bytedeco/pytorch/TracingState.java deleted file mode 100644 index 10303cd2ffb..00000000000 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/TracingState.java +++ /dev/null @@ -1,60 +0,0 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE - -package org.bytedeco.pytorch; - -import org.bytedeco.pytorch.Allocator; -import org.bytedeco.pytorch.Function; -import org.bytedeco.pytorch.Module; -import java.nio.*; -import org.bytedeco.javacpp.*; -import org.bytedeco.javacpp.annotation.*; - -import static org.bytedeco.javacpp.presets.javacpp.*; -import static org.bytedeco.openblas.global.openblas_nolapack.*; -import static org.bytedeco.openblas.global.openblas.*; - -import static org.bytedeco.pytorch.global.torch.*; - - -@Namespace("torch::jit::tracer") @NoOffset @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) -public class TracingState extends Pointer { - static { Loader.load(); } - /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ - public TracingState(Pointer p) { super(p); } - /** Native array allocator. Access with {@link Pointer#position(long)}. */ - public TracingState(long size) { super((Pointer)null); allocateArray(size); } - private native void allocateArray(long size); - @Override public TracingState position(long position) { - return (TracingState)super.position(position); - } - @Override public TracingState getPointer(long i) { - return new TracingState((Pointer)this).offsetAddress(i); - } - - public TracingState() { super((Pointer)null); allocate(); } - private native void allocate(); - - // NOLINTNEXTLINE(cppcoreguidelines-non-private-member-variables-in-classes) - public native @SharedPtr @ByRef Graph graph(); public native TracingState graph(Graph setter); - // NOLINTNEXTLINE(cppcoreguidelines-non-private-member-variables-in-classes) - public native @Cast("bool") boolean warn(); public native TracingState warn(boolean setter); - // NOLINTNEXTLINE(cppcoreguidelines-non-private-member-variables-in-classes) - public native @Cast("bool") boolean strict(); public native TracingState strict(boolean setter); - // NOLINTNEXTLINE(cppcoreguidelines-non-private-member-variables-in-classes) - public native @Cast("bool") boolean force_outplace(); public native TracingState force_outplace(boolean setter); - // NOLINTNEXTLINE(cppcoreguidelines-non-private-member-variables-in-classes) - - - public native void enterFrame(); - - public native void leaveFrame(); - - public native void setValue(@Const @ByRef IValue v, Value value); - public native void delValue(@Const @ByRef IValue var); - public native Value getValue(@Const @ByRef IValue var); - public native Value getOutput(@Const @ByRef IValue var, @Cast("size_t") long i); - public native @Cast("bool") boolean hasValue(@Const @ByRef IValue var); - - public native JitNode createNode(@ByVal Symbol op_name, @Cast("size_t") long num_outputs); - public native void insertNode(JitNode node); -} diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/Transformer.java b/pytorch/src/gen/java/org/bytedeco/pytorch/Transformer.java deleted file mode 100644 index 38d9932ec88..00000000000 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/Transformer.java +++ /dev/null @@ -1,35 +0,0 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE - -package org.bytedeco.pytorch; - -import org.bytedeco.pytorch.Allocator; -import org.bytedeco.pytorch.Function; -import org.bytedeco.pytorch.Module; -import java.nio.*; -import org.bytedeco.javacpp.*; -import org.bytedeco.javacpp.annotation.*; - -import static org.bytedeco.javacpp.presets.javacpp.*; -import static org.bytedeco.openblas.global.openblas_nolapack.*; -import static org.bytedeco.openblas.global.openblas.*; - -import static org.bytedeco.pytorch.global.torch.*; - - -/** A {@code ModuleHolder} subclass for {@code TransformerImpl}. - * See the documentation for {@code TransformerImpl} class to learn what - * methods it provides, and examples of how to use {@code Transformer} with - * {@code torch::nn::TransformerOptions}. - * See the documentation for {@code ModuleHolder} to learn about PyTorch's - * module storage semantics. */ -@Namespace("torch::nn") @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) -public class Transformer extends TransformerImplModuleHolder { - static { Loader.load(); } - - public Transformer(@ByVal @Cast("std::nullptr_t*") PointerPointer arg0) { super((Pointer)null); allocate(arg0); } - private native void allocate(@ByVal @Cast("std::nullptr_t*") PointerPointer arg0); public Transformer(@SharedPtr @Cast({"", "std::shared_ptr"}) TransformerImpl module) { super((Pointer)null); allocate(module); } - private native void allocate(@SharedPtr @Cast({"", "std::shared_ptr"}) TransformerImpl module); - /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ - public Transformer(Pointer p) { super(p); } - - } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/TransformerDecoder.java b/pytorch/src/gen/java/org/bytedeco/pytorch/TransformerDecoder.java deleted file mode 100644 index e69d1b7a56f..00000000000 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/TransformerDecoder.java +++ /dev/null @@ -1,35 +0,0 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE - -package org.bytedeco.pytorch; - -import org.bytedeco.pytorch.Allocator; -import org.bytedeco.pytorch.Function; -import org.bytedeco.pytorch.Module; -import java.nio.*; -import org.bytedeco.javacpp.*; -import org.bytedeco.javacpp.annotation.*; - -import static org.bytedeco.javacpp.presets.javacpp.*; -import static org.bytedeco.openblas.global.openblas_nolapack.*; -import static org.bytedeco.openblas.global.openblas.*; - -import static org.bytedeco.pytorch.global.torch.*; - - -/** A {@code ModuleHolder} subclass for {@code TransformerDecoderImpl}. - * See the documentation for {@code TransformerDecoderImpl} class to learn what - * methods it provides, and examples of how to use {@code TransformerDecoder} with - * {@code torch::nn::TransformerDecoderOptions}. - * See the documentation for {@code ModuleHolder} to learn about PyTorch's - * module storage semantics. */ -@Namespace("torch::nn") @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) -public class TransformerDecoder extends TransformerDecoderImplModuleHolder { - static { Loader.load(); } - - public TransformerDecoder(@ByVal @Cast("std::nullptr_t*") PointerPointer arg0) { super((Pointer)null); allocate(arg0); } - private native void allocate(@ByVal @Cast("std::nullptr_t*") PointerPointer arg0); public TransformerDecoder(@SharedPtr @Cast({"", "std::shared_ptr"}) TransformerDecoderImpl module) { super((Pointer)null); allocate(module); } - private native void allocate(@SharedPtr @Cast({"", "std::shared_ptr"}) TransformerDecoderImpl module); - /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ - public TransformerDecoder(Pointer p) { super(p); } - - } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/TransformerDecoderImpl.java b/pytorch/src/gen/java/org/bytedeco/pytorch/TransformerDecoderImpl.java index b21c87c646e..76cdd2dda20 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/TransformerDecoderImpl.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/TransformerDecoderImpl.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; @@ -42,14 +44,8 @@ public class TransformerDecoderImpl extends TransformerDecoderImplCloneable { /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public TransformerDecoderImpl(Pointer p) { super(p); } - public TransformerDecoderImpl( - @ByVal TransformerDecoderLayer decoder_layer, - @Cast("int64_t") long num_layers) { super((Pointer)null); allocate(decoder_layer, num_layers); } - @NoDeallocator private native void allocate( - @ByVal TransformerDecoderLayer decoder_layer, - @Cast("int64_t") long num_layers); public TransformerDecoderImpl(@ByVal TransformerDecoderOptions options_) { super((Pointer)null); allocate(options_); } - @NoDeallocator private native void allocate(@ByVal TransformerDecoderOptions options_); + @SharedPtr private native void allocate(@ByVal TransformerDecoderOptions options_); public native void reset(); @@ -67,10 +63,10 @@ public TransformerDecoderImpl( public native @ByVal Tensor forward( @Const @ByRef Tensor tgt, @Const @ByRef Tensor memory, - @Const @ByRef(nullValue = "at::Tensor{}") Tensor tgt_mask, - @Const @ByRef(nullValue = "at::Tensor{}") Tensor memory_mask, - @Const @ByRef(nullValue = "at::Tensor{}") Tensor tgt_key_padding_mask, - @Const @ByRef(nullValue = "at::Tensor{}") Tensor memory_key_padding_mask); + @Const @ByRef(nullValue = "torch::Tensor{}") Tensor tgt_mask, + @Const @ByRef(nullValue = "torch::Tensor{}") Tensor memory_mask, + @Const @ByRef(nullValue = "torch::Tensor{}") Tensor tgt_key_padding_mask, + @Const @ByRef(nullValue = "torch::Tensor{}") Tensor memory_key_padding_mask); public native @ByVal Tensor forward( @Const @ByRef Tensor tgt, @Const @ByRef Tensor memory); @@ -79,7 +75,6 @@ public TransformerDecoderImpl( public native @ByRef TransformerDecoderOptions options(); public native TransformerDecoderImpl options(TransformerDecoderOptions setter); /** Cloned layers of decoder layers */ - public native @ByRef ModuleList layers(); public native TransformerDecoderImpl layers(ModuleList setter); /** optional layer normalization module */ public native @ByRef AnyModule norm(); public native TransformerDecoderImpl norm(AnyModule setter); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/TransformerDecoderImplCloneable.java b/pytorch/src/gen/java/org/bytedeco/pytorch/TransformerDecoderImplCloneable.java index d9ff25c728e..2479f98bd87 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/TransformerDecoderImplCloneable.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/TransformerDecoderImplCloneable.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; @@ -20,18 +22,18 @@ public class TransformerDecoderImplCloneable extends Module { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public TransformerDecoderImplCloneable(Pointer p) { super(p); } + @Override public Module asModule() { return asModule(this); } + @Namespace public static native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast>") Module asModule(@SharedPtr TransformerDecoderImplCloneable pointer); /** {@code reset()} must perform initialization of all members with reference * semantics, most importantly parameters, buffers and submodules. */ public native void reset(); - @Override public Module asModule() { return asModule(this); } - @Namespace public static native @Name("static_cast") Module asModule(TransformerDecoderImplCloneable module); /** Performs a recursive "deep copy" of the {@code Module}, such that all parameters * and submodules in the cloned module are different from those in the * original module. */ - public native @SharedPtr Module clone( - @Const @ByRef(nullValue = "c10::optional(c10::nullopt)") DeviceOptional device); - public native @SharedPtr Module clone(); + public native @SharedPtr("torch::nn::Module") @ByVal Module clone( + @Const @ByRef(nullValue = "c10::optional(c10::nullopt)") DeviceOptional device); + public native @SharedPtr("torch::nn::Module") @ByVal Module clone(); } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/TransformerDecoderImplModuleHolder.java b/pytorch/src/gen/java/org/bytedeco/pytorch/TransformerDecoderImplModuleHolder.java deleted file mode 100644 index 8f3411dc865..00000000000 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/TransformerDecoderImplModuleHolder.java +++ /dev/null @@ -1,79 +0,0 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE - -package org.bytedeco.pytorch; - -import org.bytedeco.pytorch.Allocator; -import org.bytedeco.pytorch.Function; -import org.bytedeco.pytorch.Module; -import java.nio.*; -import org.bytedeco.javacpp.*; -import org.bytedeco.javacpp.annotation.*; - -import static org.bytedeco.javacpp.presets.javacpp.*; -import static org.bytedeco.openblas.global.openblas_nolapack.*; -import static org.bytedeco.openblas.global.openblas.*; - -import static org.bytedeco.pytorch.global.torch.*; - -@Name("torch::nn::ModuleHolder") @NoOffset @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) -public class TransformerDecoderImplModuleHolder extends Pointer { - static { Loader.load(); } - /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ - public TransformerDecoderImplModuleHolder(Pointer p) { super(p); } - - - /// - - /** Default constructs the contained module if if has a default constructor, - * else produces a static error. - * - * NOTE: This uses the behavior of template - * classes in C++ that constructors (or any methods) are only compiled when - * actually used. */ - - - /** Constructs the {@code ModuleHolder} with an empty contained value. Access to - * the underlying module is not permitted and will throw an exception, until - * a value is assigned. */ - /* implicit */ public TransformerDecoderImplModuleHolder(@ByVal @Cast("std::nullptr_t*") PointerPointer arg0) { super((Pointer)null); allocate(arg0); } -private native void allocate(@ByVal @Cast("std::nullptr_t*") PointerPointer arg0); - - /** Constructs the {@code ModuleHolder} with a contained module, forwarding all - * arguments to its constructor. */ - - /** Constructs the {@code ModuleHolder} from a pointer to the contained type. - * Example: {@code Linear(std::make_shared(...))}. */ - /* implicit */ public TransformerDecoderImplModuleHolder(@SharedPtr @Cast({"", "std::shared_ptr"}) TransformerDecoderImpl module) { super((Pointer)null); allocate(module); } -private native void allocate(@SharedPtr @Cast({"", "std::shared_ptr"}) TransformerDecoderImpl module); - - /** Returns true if the {@code ModuleHolder} contains a module, or false if it is - * {@code nullptr}. */ - public native @Cast("bool") @Name("operator bool") @NoException(true) boolean asBoolean(); - - /** Forwards to the contained module. */ - public native @Name("operator ->") TransformerDecoderImpl access(); - - /** Forwards to the contained module. */ - - /** Returns a reference to the contained module. */ - public native @ByRef @Name("operator *") TransformerDecoderImpl multiply(); - - /** Returns a const reference to the contained module. */ - - /** Returns a shared pointer to the underlying module. */ - public native @SharedPtr @Cast({"", "std::shared_ptr"}) TransformerDecoderImpl ptr(); - - /** Returns a pointer to the underlying module. */ - public native TransformerDecoderImpl get(); - - /** Returns a const pointer to the underlying module. */ - - /** Calls the {@code forward()} method of the contained module. */ - - /** Forwards to the subscript operator of the contained module. - * NOTE: std::forward is qualified to prevent VS2017 emitting - * error C2872: 'std': ambiguous symbol */ - - /** Returns true if the {@code ModuleHolder} does not contain a module. */ - public native @Cast("bool") @NoException(true) boolean is_empty(); -} diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/TransformerDecoderLayer.java b/pytorch/src/gen/java/org/bytedeco/pytorch/TransformerDecoderLayer.java deleted file mode 100644 index 9b10451359c..00000000000 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/TransformerDecoderLayer.java +++ /dev/null @@ -1,34 +0,0 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE - -package org.bytedeco.pytorch; - -import org.bytedeco.pytorch.Allocator; -import org.bytedeco.pytorch.Function; -import org.bytedeco.pytorch.Module; -import java.nio.*; -import org.bytedeco.javacpp.*; -import org.bytedeco.javacpp.annotation.*; - -import static org.bytedeco.javacpp.presets.javacpp.*; -import static org.bytedeco.openblas.global.openblas_nolapack.*; -import static org.bytedeco.openblas.global.openblas.*; - -import static org.bytedeco.pytorch.global.torch.*; - - -/** A {@code ModuleHolder} subclass for {@code TransformerDecoderLayerImpl}. - * See the documentation for {@code TransformerDecoderLayerImpl} class to learn what - * methods it provides, and examples of how to use {@code TransformerDecoderLayer} - * with {@code torch::nn::TransformerDecoderLayerOptions}. See the documentation for - * {@code ModuleHolder} to learn about PyTorch's module storage semantics. */ -@Namespace("torch::nn") @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) -public class TransformerDecoderLayer extends TransformerDecoderLayerImplModuleHolder { - static { Loader.load(); } - - public TransformerDecoderLayer(@ByVal @Cast("std::nullptr_t*") PointerPointer arg0) { super((Pointer)null); allocate(arg0); } - private native void allocate(@ByVal @Cast("std::nullptr_t*") PointerPointer arg0); public TransformerDecoderLayer(@SharedPtr @Cast({"", "std::shared_ptr"}) TransformerDecoderLayerImpl module) { super((Pointer)null); allocate(module); } - private native void allocate(@SharedPtr @Cast({"", "std::shared_ptr"}) TransformerDecoderLayerImpl module); - /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ - public TransformerDecoderLayer(Pointer p) { super(p); } - - } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/TransformerDecoderLayerImpl.java b/pytorch/src/gen/java/org/bytedeco/pytorch/TransformerDecoderLayerImpl.java index 18e70036f28..7b663b2cd3b 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/TransformerDecoderLayerImpl.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/TransformerDecoderLayerImpl.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; @@ -44,9 +46,9 @@ public class TransformerDecoderLayerImpl extends TransformerDecoderLayerImplClon public TransformerDecoderLayerImpl(Pointer p) { super(p); } public TransformerDecoderLayerImpl(@Cast("int64_t") long d_model, @Cast("int64_t") long nhead) { super((Pointer)null); allocate(d_model, nhead); } - @NoDeallocator private native void allocate(@Cast("int64_t") long d_model, @Cast("int64_t") long nhead); + @SharedPtr private native void allocate(@Cast("int64_t") long d_model, @Cast("int64_t") long nhead); public TransformerDecoderLayerImpl(@ByVal TransformerDecoderLayerOptions options_) { super((Pointer)null); allocate(options_); } - @NoDeallocator private native void allocate(@ByVal TransformerDecoderLayerOptions options_); + @SharedPtr private native void allocate(@ByVal TransformerDecoderLayerOptions options_); public native void reset(); @@ -64,10 +66,10 @@ public class TransformerDecoderLayerImpl extends TransformerDecoderLayerImplClon public native @ByVal Tensor forward( @ByVal Tensor tgt, @Const @ByRef Tensor memory, - @Const @ByRef(nullValue = "at::Tensor{}") Tensor tgt_mask, - @Const @ByRef(nullValue = "at::Tensor{}") Tensor memory_mask, - @Const @ByRef(nullValue = "at::Tensor{}") Tensor tgt_key_padding_mask, - @Const @ByRef(nullValue = "at::Tensor{}") Tensor memory_key_padding_mask); + @Const @ByRef(nullValue = "torch::Tensor{}") Tensor tgt_mask, + @Const @ByRef(nullValue = "torch::Tensor{}") Tensor memory_mask, + @Const @ByRef(nullValue = "torch::Tensor{}") Tensor tgt_key_padding_mask, + @Const @ByRef(nullValue = "torch::Tensor{}") Tensor memory_key_padding_mask); public native @ByVal Tensor forward( @ByVal Tensor tgt, @Const @ByRef Tensor memory); @@ -76,35 +78,24 @@ public class TransformerDecoderLayerImpl extends TransformerDecoderLayerImplClon public native @ByRef TransformerDecoderLayerOptions options(); public native TransformerDecoderLayerImpl options(TransformerDecoderLayerOptions setter); /** self attention */ - public native @ByRef MultiheadAttention self_attn(); public native TransformerDecoderLayerImpl self_attn(MultiheadAttention setter); /** Dropout, post self attention */ - public native @ByRef Dropout dropout1(); public native TransformerDecoderLayerImpl dropout1(Dropout setter); /** Normalization, post self attention */ - public native @ByRef LayerNorm norm1(); public native TransformerDecoderLayerImpl norm1(LayerNorm setter); /** Multi-headed attention */ - public native @ByRef MultiheadAttention multihead_attn(); public native TransformerDecoderLayerImpl multihead_attn(MultiheadAttention setter); /** Dropout, post multi-headed attention */ - public native @ByRef Dropout dropout2(); public native TransformerDecoderLayerImpl dropout2(Dropout setter); /** Normalization, post multi-headed attention */ - public native @ByRef LayerNorm norm2(); public native TransformerDecoderLayerImpl norm2(LayerNorm setter); /** Feed forward first linear layer */ - public native @ByRef Linear linear1(); public native TransformerDecoderLayerImpl linear1(Linear setter); /** Feed forward dropout layer */ - public native @ByRef Dropout dropout(); public native TransformerDecoderLayerImpl dropout(Dropout setter); /** Feed forward second linear layer */ - public native @ByRef Linear linear2(); public native TransformerDecoderLayerImpl linear2(Linear setter); /** Dropout, post feed forward */ - public native @ByRef Dropout dropout3(); public native TransformerDecoderLayerImpl dropout3(Dropout setter); /** Normalization, post feed forward */ - public native @ByRef LayerNorm norm3(); public native TransformerDecoderLayerImpl norm3(LayerNorm setter); } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/TransformerDecoderLayerImplCloneable.java b/pytorch/src/gen/java/org/bytedeco/pytorch/TransformerDecoderLayerImplCloneable.java index 32cfcc00a45..b517ae6423a 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/TransformerDecoderLayerImplCloneable.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/TransformerDecoderLayerImplCloneable.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; @@ -20,18 +22,18 @@ public class TransformerDecoderLayerImplCloneable extends Module { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public TransformerDecoderLayerImplCloneable(Pointer p) { super(p); } + @Override public Module asModule() { return asModule(this); } + @Namespace public static native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast>") Module asModule(@SharedPtr TransformerDecoderLayerImplCloneable pointer); /** {@code reset()} must perform initialization of all members with reference * semantics, most importantly parameters, buffers and submodules. */ public native void reset(); - @Override public Module asModule() { return asModule(this); } - @Namespace public static native @Name("static_cast") Module asModule(TransformerDecoderLayerImplCloneable module); /** Performs a recursive "deep copy" of the {@code Module}, such that all parameters * and submodules in the cloned module are different from those in the * original module. */ - public native @SharedPtr Module clone( - @Const @ByRef(nullValue = "c10::optional(c10::nullopt)") DeviceOptional device); - public native @SharedPtr Module clone(); + public native @SharedPtr("torch::nn::Module") @ByVal Module clone( + @Const @ByRef(nullValue = "c10::optional(c10::nullopt)") DeviceOptional device); + public native @SharedPtr("torch::nn::Module") @ByVal Module clone(); } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/TransformerDecoderLayerImplModuleHolder.java b/pytorch/src/gen/java/org/bytedeco/pytorch/TransformerDecoderLayerImplModuleHolder.java deleted file mode 100644 index 7b46c3168d5..00000000000 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/TransformerDecoderLayerImplModuleHolder.java +++ /dev/null @@ -1,79 +0,0 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE - -package org.bytedeco.pytorch; - -import org.bytedeco.pytorch.Allocator; -import org.bytedeco.pytorch.Function; -import org.bytedeco.pytorch.Module; -import java.nio.*; -import org.bytedeco.javacpp.*; -import org.bytedeco.javacpp.annotation.*; - -import static org.bytedeco.javacpp.presets.javacpp.*; -import static org.bytedeco.openblas.global.openblas_nolapack.*; -import static org.bytedeco.openblas.global.openblas.*; - -import static org.bytedeco.pytorch.global.torch.*; - -@Name("torch::nn::ModuleHolder") @NoOffset @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) -public class TransformerDecoderLayerImplModuleHolder extends Pointer { - static { Loader.load(); } - /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ - public TransformerDecoderLayerImplModuleHolder(Pointer p) { super(p); } - - - /// - - /** Default constructs the contained module if if has a default constructor, - * else produces a static error. - * - * NOTE: This uses the behavior of template - * classes in C++ that constructors (or any methods) are only compiled when - * actually used. */ - - - /** Constructs the {@code ModuleHolder} with an empty contained value. Access to - * the underlying module is not permitted and will throw an exception, until - * a value is assigned. */ - /* implicit */ public TransformerDecoderLayerImplModuleHolder(@ByVal @Cast("std::nullptr_t*") PointerPointer arg0) { super((Pointer)null); allocate(arg0); } -private native void allocate(@ByVal @Cast("std::nullptr_t*") PointerPointer arg0); - - /** Constructs the {@code ModuleHolder} with a contained module, forwarding all - * arguments to its constructor. */ - - /** Constructs the {@code ModuleHolder} from a pointer to the contained type. - * Example: {@code Linear(std::make_shared(...))}. */ - /* implicit */ public TransformerDecoderLayerImplModuleHolder(@SharedPtr @Cast({"", "std::shared_ptr"}) TransformerDecoderLayerImpl module) { super((Pointer)null); allocate(module); } -private native void allocate(@SharedPtr @Cast({"", "std::shared_ptr"}) TransformerDecoderLayerImpl module); - - /** Returns true if the {@code ModuleHolder} contains a module, or false if it is - * {@code nullptr}. */ - public native @Cast("bool") @Name("operator bool") @NoException(true) boolean asBoolean(); - - /** Forwards to the contained module. */ - public native @Name("operator ->") TransformerDecoderLayerImpl access(); - - /** Forwards to the contained module. */ - - /** Returns a reference to the contained module. */ - public native @ByRef @Name("operator *") TransformerDecoderLayerImpl multiply(); - - /** Returns a const reference to the contained module. */ - - /** Returns a shared pointer to the underlying module. */ - public native @SharedPtr @Cast({"", "std::shared_ptr"}) TransformerDecoderLayerImpl ptr(); - - /** Returns a pointer to the underlying module. */ - public native TransformerDecoderLayerImpl get(); - - /** Returns a const pointer to the underlying module. */ - - /** Calls the {@code forward()} method of the contained module. */ - - /** Forwards to the subscript operator of the contained module. - * NOTE: std::forward is qualified to prevent VS2017 emitting - * error C2872: 'std': ambiguous symbol */ - - /** Returns true if the {@code ModuleHolder} does not contain a module. */ - public native @Cast("bool") @NoException(true) boolean is_empty(); -} diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/TransformerDecoderLayerOptions.java b/pytorch/src/gen/java/org/bytedeco/pytorch/TransformerDecoderLayerOptions.java index c652313ea66..75ced2978c4 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/TransformerDecoderLayerOptions.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/TransformerDecoderLayerOptions.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; @@ -37,5 +39,5 @@ public class TransformerDecoderLayerOptions extends Pointer { public native @Cast("int64_t*") @ByRef @NoException(true) LongPointer nhead(); public native @Cast("int64_t*") @ByRef @NoException(true) LongPointer dim_feedforward(); public native @ByRef @NoException(true) DoublePointer dropout(); - public native @ByRef @NoException(true) transformer_activation_t activation(); + public native @ByRef @NoException(true) TransformerActivation activation(); } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/TransformerDecoderOptions.java b/pytorch/src/gen/java/org/bytedeco/pytorch/TransformerDecoderOptions.java index 81daa339b03..43f8b2ad6c0 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/TransformerDecoderOptions.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/TransformerDecoderOptions.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; @@ -33,12 +35,6 @@ public class TransformerDecoderOptions extends Pointer { // This constructor will keep the a ref of passed in decoder_layer, // so it keeps all the data in decoder_layer. - public TransformerDecoderOptions( - @ByVal TransformerDecoderLayer decoder_layer, - @Cast("int64_t") long num_layers) { super((Pointer)null); allocate(decoder_layer, num_layers); } - private native void allocate( - @ByVal TransformerDecoderLayer decoder_layer, - @Cast("int64_t") long num_layers); // This constructor will create a new TransformerDecoderLayer obj, // based on passed in decoder_layer_options. public TransformerDecoderOptions( @@ -47,7 +43,6 @@ public TransformerDecoderOptions( private native void allocate( @Const @ByRef TransformerDecoderLayerOptions decoder_layer_options, @Cast("int64_t") long num_layers); - public native @ByRef @NoException(true) TransformerDecoderLayer decoder_layer(); public native @Cast("int64_t*") @ByRef @NoException(true) LongPointer num_layers(); public native @ByRef @NoException(true) AnyModule norm(); } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/TransformerEncoder.java b/pytorch/src/gen/java/org/bytedeco/pytorch/TransformerEncoder.java deleted file mode 100644 index 8d61214bfbe..00000000000 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/TransformerEncoder.java +++ /dev/null @@ -1,35 +0,0 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE - -package org.bytedeco.pytorch; - -import org.bytedeco.pytorch.Allocator; -import org.bytedeco.pytorch.Function; -import org.bytedeco.pytorch.Module; -import java.nio.*; -import org.bytedeco.javacpp.*; -import org.bytedeco.javacpp.annotation.*; - -import static org.bytedeco.javacpp.presets.javacpp.*; -import static org.bytedeco.openblas.global.openblas_nolapack.*; -import static org.bytedeco.openblas.global.openblas.*; - -import static org.bytedeco.pytorch.global.torch.*; - - -/** A {@code ModuleHolder} subclass for {@code TransformerEncoderImpl}. - * See the documentation for {@code TransformerEncoderImpl} class to learn what - * methods it provides, and examples of how to use {@code TransformerEncoder} with - * {@code torch::nn::TransformerEncoderOptions}. - * See the documentation for {@code ModuleHolder} to learn about PyTorch's - * module storage semantics. */ -@Namespace("torch::nn") @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) -public class TransformerEncoder extends TransformerEncoderImplModuleHolder { - static { Loader.load(); } - - public TransformerEncoder(@ByVal @Cast("std::nullptr_t*") PointerPointer arg0) { super((Pointer)null); allocate(arg0); } - private native void allocate(@ByVal @Cast("std::nullptr_t*") PointerPointer arg0); public TransformerEncoder(@SharedPtr @Cast({"", "std::shared_ptr"}) TransformerEncoderImpl module) { super((Pointer)null); allocate(module); } - private native void allocate(@SharedPtr @Cast({"", "std::shared_ptr"}) TransformerEncoderImpl module); - /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ - public TransformerEncoder(Pointer p) { super(p); } - - } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/TransformerEncoderImpl.java b/pytorch/src/gen/java/org/bytedeco/pytorch/TransformerEncoderImpl.java index 6d8fc089813..6e014e47e4f 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/TransformerEncoderImpl.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/TransformerEncoderImpl.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; @@ -40,19 +42,13 @@ public class TransformerEncoderImpl extends TransformerEncoderImplCloneable { /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public TransformerEncoderImpl(Pointer p) { super(p); } - public TransformerEncoderImpl( - @ByVal TransformerEncoderLayer encoder_layer, - @Cast("int64_t") long num_layers) { super((Pointer)null); allocate(encoder_layer, num_layers); } - @NoDeallocator private native void allocate( - @ByVal TransformerEncoderLayer encoder_layer, - @Cast("int64_t") long num_layers); public TransformerEncoderImpl(@ByVal TransformerEncoderOptions options_) { super((Pointer)null); allocate(options_); } - @NoDeallocator private native void allocate(@ByVal TransformerEncoderOptions options_); + @SharedPtr private native void allocate(@ByVal TransformerEncoderOptions options_); public native @ByVal Tensor forward( @Const @ByRef Tensor src, - @Const @ByRef(nullValue = "at::Tensor{}") Tensor src_mask, - @Const @ByRef(nullValue = "at::Tensor{}") Tensor src_key_padding_mask); + @Const @ByRef(nullValue = "torch::Tensor{}") Tensor src_mask, + @Const @ByRef(nullValue = "torch::Tensor{}") Tensor src_key_padding_mask); public native @ByVal Tensor forward( @Const @ByRef Tensor src); @@ -63,7 +59,6 @@ public TransformerEncoderImpl( public native @ByRef TransformerEncoderOptions options(); public native TransformerEncoderImpl options(TransformerEncoderOptions setter); /** module list that contains all the encoder layers */ - public native @ByRef ModuleList layers(); public native TransformerEncoderImpl layers(ModuleList setter); /** optional normalization module */ public native @ByRef AnyModule norm(); public native TransformerEncoderImpl norm(AnyModule setter); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/TransformerEncoderImplCloneable.java b/pytorch/src/gen/java/org/bytedeco/pytorch/TransformerEncoderImplCloneable.java index 075902251af..6acc934eab8 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/TransformerEncoderImplCloneable.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/TransformerEncoderImplCloneable.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; @@ -20,18 +22,18 @@ public class TransformerEncoderImplCloneable extends Module { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public TransformerEncoderImplCloneable(Pointer p) { super(p); } + @Override public Module asModule() { return asModule(this); } + @Namespace public static native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast>") Module asModule(@SharedPtr TransformerEncoderImplCloneable pointer); /** {@code reset()} must perform initialization of all members with reference * semantics, most importantly parameters, buffers and submodules. */ public native void reset(); - @Override public Module asModule() { return asModule(this); } - @Namespace public static native @Name("static_cast") Module asModule(TransformerEncoderImplCloneable module); /** Performs a recursive "deep copy" of the {@code Module}, such that all parameters * and submodules in the cloned module are different from those in the * original module. */ - public native @SharedPtr Module clone( - @Const @ByRef(nullValue = "c10::optional(c10::nullopt)") DeviceOptional device); - public native @SharedPtr Module clone(); + public native @SharedPtr("torch::nn::Module") @ByVal Module clone( + @Const @ByRef(nullValue = "c10::optional(c10::nullopt)") DeviceOptional device); + public native @SharedPtr("torch::nn::Module") @ByVal Module clone(); } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/TransformerEncoderImplModuleHolder.java b/pytorch/src/gen/java/org/bytedeco/pytorch/TransformerEncoderImplModuleHolder.java deleted file mode 100644 index 53d3a655426..00000000000 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/TransformerEncoderImplModuleHolder.java +++ /dev/null @@ -1,79 +0,0 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE - -package org.bytedeco.pytorch; - -import org.bytedeco.pytorch.Allocator; -import org.bytedeco.pytorch.Function; -import org.bytedeco.pytorch.Module; -import java.nio.*; -import org.bytedeco.javacpp.*; -import org.bytedeco.javacpp.annotation.*; - -import static org.bytedeco.javacpp.presets.javacpp.*; -import static org.bytedeco.openblas.global.openblas_nolapack.*; -import static org.bytedeco.openblas.global.openblas.*; - -import static org.bytedeco.pytorch.global.torch.*; - -@Name("torch::nn::ModuleHolder") @NoOffset @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) -public class TransformerEncoderImplModuleHolder extends Pointer { - static { Loader.load(); } - /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ - public TransformerEncoderImplModuleHolder(Pointer p) { super(p); } - - - /// - - /** Default constructs the contained module if if has a default constructor, - * else produces a static error. - * - * NOTE: This uses the behavior of template - * classes in C++ that constructors (or any methods) are only compiled when - * actually used. */ - - - /** Constructs the {@code ModuleHolder} with an empty contained value. Access to - * the underlying module is not permitted and will throw an exception, until - * a value is assigned. */ - /* implicit */ public TransformerEncoderImplModuleHolder(@ByVal @Cast("std::nullptr_t*") PointerPointer arg0) { super((Pointer)null); allocate(arg0); } -private native void allocate(@ByVal @Cast("std::nullptr_t*") PointerPointer arg0); - - /** Constructs the {@code ModuleHolder} with a contained module, forwarding all - * arguments to its constructor. */ - - /** Constructs the {@code ModuleHolder} from a pointer to the contained type. - * Example: {@code Linear(std::make_shared(...))}. */ - /* implicit */ public TransformerEncoderImplModuleHolder(@SharedPtr @Cast({"", "std::shared_ptr"}) TransformerEncoderImpl module) { super((Pointer)null); allocate(module); } -private native void allocate(@SharedPtr @Cast({"", "std::shared_ptr"}) TransformerEncoderImpl module); - - /** Returns true if the {@code ModuleHolder} contains a module, or false if it is - * {@code nullptr}. */ - public native @Cast("bool") @Name("operator bool") @NoException(true) boolean asBoolean(); - - /** Forwards to the contained module. */ - public native @Name("operator ->") TransformerEncoderImpl access(); - - /** Forwards to the contained module. */ - - /** Returns a reference to the contained module. */ - public native @ByRef @Name("operator *") TransformerEncoderImpl multiply(); - - /** Returns a const reference to the contained module. */ - - /** Returns a shared pointer to the underlying module. */ - public native @SharedPtr @Cast({"", "std::shared_ptr"}) TransformerEncoderImpl ptr(); - - /** Returns a pointer to the underlying module. */ - public native TransformerEncoderImpl get(); - - /** Returns a const pointer to the underlying module. */ - - /** Calls the {@code forward()} method of the contained module. */ - - /** Forwards to the subscript operator of the contained module. - * NOTE: std::forward is qualified to prevent VS2017 emitting - * error C2872: 'std': ambiguous symbol */ - - /** Returns true if the {@code ModuleHolder} does not contain a module. */ - public native @Cast("bool") @NoException(true) boolean is_empty(); -} diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/TransformerEncoderLayer.java b/pytorch/src/gen/java/org/bytedeco/pytorch/TransformerEncoderLayer.java deleted file mode 100644 index bf92697a259..00000000000 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/TransformerEncoderLayer.java +++ /dev/null @@ -1,34 +0,0 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE - -package org.bytedeco.pytorch; - -import org.bytedeco.pytorch.Allocator; -import org.bytedeco.pytorch.Function; -import org.bytedeco.pytorch.Module; -import java.nio.*; -import org.bytedeco.javacpp.*; -import org.bytedeco.javacpp.annotation.*; - -import static org.bytedeco.javacpp.presets.javacpp.*; -import static org.bytedeco.openblas.global.openblas_nolapack.*; -import static org.bytedeco.openblas.global.openblas.*; - -import static org.bytedeco.pytorch.global.torch.*; - - -/** A {@code ModuleHolder} subclass for {@code TransformerEncoderLayerImpl}{@code . - * See the documentation for }TransformerEncoderLayerImpl{@code class to learn what - * methods it provides, and examples of how to use }TransformerEncoderLayer{@code - * with }torch::nn::TransformerEncoderLayerOptions{@code . See the documentation for - * }ModuleHolder{@code to learn about PyTorch's module storage semantics. */ -@Namespace("torch::nn") @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) -public class TransformerEncoderLayer extends TransformerEncoderLayerImplModuleHolder { - static { Loader.load(); } - - public TransformerEncoderLayer(@ByVal @Cast("std::nullptr_t*") PointerPointer arg0) { super((Pointer)null); allocate(arg0); } - private native void allocate(@ByVal @Cast("std::nullptr_t*") PointerPointer arg0); public TransformerEncoderLayer(@SharedPtr @Cast({"", "std::shared_ptr"}) TransformerEncoderLayerImpl module) { super((Pointer)null); allocate(module); } - private native void allocate(@SharedPtr @Cast({"", "std::shared_ptr"}) TransformerEncoderLayerImpl module); - /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ - public TransformerEncoderLayer(Pointer p) { super(p); } - - } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/TransformerEncoderLayerImpl.java b/pytorch/src/gen/java/org/bytedeco/pytorch/TransformerEncoderLayerImpl.java index bd1e4e34cc9..a45806b1195 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/TransformerEncoderLayerImpl.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/TransformerEncoderLayerImpl.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; @@ -39,14 +41,14 @@ public class TransformerEncoderLayerImpl extends TransformerEncoderLayerImplClon public TransformerEncoderLayerImpl(Pointer p) { super(p); } public TransformerEncoderLayerImpl(@Cast("int64_t") long d_model, @Cast("int64_t") long nhead) { super((Pointer)null); allocate(d_model, nhead); } - @NoDeallocator private native void allocate(@Cast("int64_t") long d_model, @Cast("int64_t") long nhead); + @SharedPtr private native void allocate(@Cast("int64_t") long d_model, @Cast("int64_t") long nhead); public TransformerEncoderLayerImpl(@ByVal TransformerEncoderLayerOptions options_) { super((Pointer)null); allocate(options_); } - @NoDeallocator private native void allocate(@ByVal TransformerEncoderLayerOptions options_); + @SharedPtr private native void allocate(@ByVal TransformerEncoderLayerOptions options_); public native @ByVal Tensor forward( @Const @ByRef Tensor src, - @Const @ByRef(nullValue = "at::Tensor{}") Tensor src_mask, - @Const @ByRef(nullValue = "at::Tensor{}") Tensor src_key_padding_mask); + @Const @ByRef(nullValue = "torch::Tensor{}") Tensor src_mask, + @Const @ByRef(nullValue = "torch::Tensor{}") Tensor src_key_padding_mask); public native @ByVal Tensor forward( @Const @ByRef Tensor src); @@ -57,24 +59,16 @@ public class TransformerEncoderLayerImpl extends TransformerEncoderLayerImplClon public native @ByRef TransformerEncoderLayerOptions options(); public native TransformerEncoderLayerImpl options(TransformerEncoderLayerOptions setter); /** self attention */ - public native @ByRef MultiheadAttention self_attn(); public native TransformerEncoderLayerImpl self_attn(MultiheadAttention setter); /** feedforward first linear layer */ - public native @ByRef Linear linear1(); public native TransformerEncoderLayerImpl linear1(Linear setter); /** feedforward dropout layer */ - public native @ByRef Dropout dropout(); public native TransformerEncoderLayerImpl dropout(Dropout setter); /** feedforward second linear layer */ - public native @ByRef Linear linear2(); public native TransformerEncoderLayerImpl linear2(Linear setter); /** pre feedforward, normalization layer */ - public native @ByRef LayerNorm norm1(); public native TransformerEncoderLayerImpl norm1(LayerNorm setter); /** post feedfastward, normalization layer */ - public native @ByRef LayerNorm norm2(); public native TransformerEncoderLayerImpl norm2(LayerNorm setter); /** pre feedfastward, dropout layer */ - public native @ByRef Dropout dropout1(); public native TransformerEncoderLayerImpl dropout1(Dropout setter); /** post feedfastward, dropout layer */ - public native @ByRef Dropout dropout2(); public native TransformerEncoderLayerImpl dropout2(Dropout setter); } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/TransformerEncoderLayerImplCloneable.java b/pytorch/src/gen/java/org/bytedeco/pytorch/TransformerEncoderLayerImplCloneable.java index 50d72d50a42..6f777ba5e2a 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/TransformerEncoderLayerImplCloneable.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/TransformerEncoderLayerImplCloneable.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; @@ -20,18 +22,18 @@ public class TransformerEncoderLayerImplCloneable extends Module { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public TransformerEncoderLayerImplCloneable(Pointer p) { super(p); } + @Override public Module asModule() { return asModule(this); } + @Namespace public static native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast>") Module asModule(@SharedPtr TransformerEncoderLayerImplCloneable pointer); /** {@code reset()} must perform initialization of all members with reference * semantics, most importantly parameters, buffers and submodules. */ public native void reset(); - @Override public Module asModule() { return asModule(this); } - @Namespace public static native @Name("static_cast") Module asModule(TransformerEncoderLayerImplCloneable module); /** Performs a recursive "deep copy" of the {@code Module}, such that all parameters * and submodules in the cloned module are different from those in the * original module. */ - public native @SharedPtr Module clone( - @Const @ByRef(nullValue = "c10::optional(c10::nullopt)") DeviceOptional device); - public native @SharedPtr Module clone(); + public native @SharedPtr("torch::nn::Module") @ByVal Module clone( + @Const @ByRef(nullValue = "c10::optional(c10::nullopt)") DeviceOptional device); + public native @SharedPtr("torch::nn::Module") @ByVal Module clone(); } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/TransformerEncoderLayerImplModuleHolder.java b/pytorch/src/gen/java/org/bytedeco/pytorch/TransformerEncoderLayerImplModuleHolder.java deleted file mode 100644 index 47586dc546c..00000000000 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/TransformerEncoderLayerImplModuleHolder.java +++ /dev/null @@ -1,79 +0,0 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE - -package org.bytedeco.pytorch; - -import org.bytedeco.pytorch.Allocator; -import org.bytedeco.pytorch.Function; -import org.bytedeco.pytorch.Module; -import java.nio.*; -import org.bytedeco.javacpp.*; -import org.bytedeco.javacpp.annotation.*; - -import static org.bytedeco.javacpp.presets.javacpp.*; -import static org.bytedeco.openblas.global.openblas_nolapack.*; -import static org.bytedeco.openblas.global.openblas.*; - -import static org.bytedeco.pytorch.global.torch.*; - -@Name("torch::nn::ModuleHolder") @NoOffset @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) -public class TransformerEncoderLayerImplModuleHolder extends Pointer { - static { Loader.load(); } - /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ - public TransformerEncoderLayerImplModuleHolder(Pointer p) { super(p); } - - - /// - - /** Default constructs the contained module if if has a default constructor, - * else produces a static error. - * - * NOTE: This uses the behavior of template - * classes in C++ that constructors (or any methods) are only compiled when - * actually used. */ - - - /** Constructs the {@code ModuleHolder} with an empty contained value. Access to - * the underlying module is not permitted and will throw an exception, until - * a value is assigned. */ - /* implicit */ public TransformerEncoderLayerImplModuleHolder(@ByVal @Cast("std::nullptr_t*") PointerPointer arg0) { super((Pointer)null); allocate(arg0); } -private native void allocate(@ByVal @Cast("std::nullptr_t*") PointerPointer arg0); - - /** Constructs the {@code ModuleHolder} with a contained module, forwarding all - * arguments to its constructor. */ - - /** Constructs the {@code ModuleHolder} from a pointer to the contained type. - * Example: {@code Linear(std::make_shared(...))}. */ - /* implicit */ public TransformerEncoderLayerImplModuleHolder(@SharedPtr @Cast({"", "std::shared_ptr"}) TransformerEncoderLayerImpl module) { super((Pointer)null); allocate(module); } -private native void allocate(@SharedPtr @Cast({"", "std::shared_ptr"}) TransformerEncoderLayerImpl module); - - /** Returns true if the {@code ModuleHolder} contains a module, or false if it is - * {@code nullptr}. */ - public native @Cast("bool") @Name("operator bool") @NoException(true) boolean asBoolean(); - - /** Forwards to the contained module. */ - public native @Name("operator ->") TransformerEncoderLayerImpl access(); - - /** Forwards to the contained module. */ - - /** Returns a reference to the contained module. */ - public native @ByRef @Name("operator *") TransformerEncoderLayerImpl multiply(); - - /** Returns a const reference to the contained module. */ - - /** Returns a shared pointer to the underlying module. */ - public native @SharedPtr @Cast({"", "std::shared_ptr"}) TransformerEncoderLayerImpl ptr(); - - /** Returns a pointer to the underlying module. */ - public native TransformerEncoderLayerImpl get(); - - /** Returns a const pointer to the underlying module. */ - - /** Calls the {@code forward()} method of the contained module. */ - - /** Forwards to the subscript operator of the contained module. - * NOTE: std::forward is qualified to prevent VS2017 emitting - * error C2872: 'std': ambiguous symbol */ - - /** Returns true if the {@code ModuleHolder} does not contain a module. */ - public native @Cast("bool") @NoException(true) boolean is_empty(); -} diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/TransformerEncoderLayerOptions.java b/pytorch/src/gen/java/org/bytedeco/pytorch/TransformerEncoderLayerOptions.java index 1d98be7525f..9462c48248d 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/TransformerEncoderLayerOptions.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/TransformerEncoderLayerOptions.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; @@ -34,5 +36,5 @@ public class TransformerEncoderLayerOptions extends Pointer { public native @Cast("int64_t*") @ByRef @NoException(true) LongPointer nhead(); public native @Cast("int64_t*") @ByRef @NoException(true) LongPointer dim_feedforward(); public native @ByRef @NoException(true) DoublePointer dropout(); - public native @ByRef @NoException(true) transformer_activation_t activation(); + public native @ByRef @NoException(true) TransformerActivation activation(); } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/TransformerEncoderOptions.java b/pytorch/src/gen/java/org/bytedeco/pytorch/TransformerEncoderOptions.java index 7e8590eef3d..d16041d01b3 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/TransformerEncoderOptions.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/TransformerEncoderOptions.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; @@ -32,12 +34,6 @@ public class TransformerEncoderOptions extends Pointer { // This constructor will keep a shallow copy of encoder_layer, so it keeps all // the data in encoder_layer. - public TransformerEncoderOptions( - @ByVal TransformerEncoderLayer encoder_layer, - @Cast("int64_t") long num_layers) { super((Pointer)null); allocate(encoder_layer, num_layers); } - private native void allocate( - @ByVal TransformerEncoderLayer encoder_layer, - @Cast("int64_t") long num_layers); // This constructor will create a new TransformerEncoderLayer obj based on // passed in encoder_layer_options. public TransformerEncoderOptions( @@ -46,7 +42,6 @@ public TransformerEncoderOptions( private native void allocate( @Const @ByRef TransformerEncoderLayerOptions encoder_layer_options, @Cast("int64_t") long num_layers); - public native @ByRef @NoException(true) TransformerEncoderLayer encoder_layer(); public native @Cast("int64_t*") @ByRef @NoException(true) LongPointer num_layers(); public native @ByRef @NoException(true) AnyModule norm(); } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/TransformerImpl.java b/pytorch/src/gen/java/org/bytedeco/pytorch/TransformerImpl.java index d5b16ef8c58..1b8fb417fbe 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/TransformerImpl.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/TransformerImpl.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; @@ -48,7 +50,7 @@ public class TransformerImpl extends TransformerImplCloneable { /// /// public TransformerImpl(@ByVal TransformerOptions options_) { super((Pointer)null); allocate(options_); } - @NoDeallocator private native void allocate(@ByVal TransformerOptions options_); + @SharedPtr private native void allocate(@ByVal TransformerOptions options_); /** forward function for Transformer Module * Args: @@ -102,12 +104,12 @@ public class TransformerImpl extends TransformerImplCloneable { public native @ByVal Tensor forward( @Const @ByRef Tensor src, @Const @ByRef Tensor tgt, - @Const @ByRef(nullValue = "at::Tensor{}") Tensor src_mask, - @Const @ByRef(nullValue = "at::Tensor{}") Tensor tgt_mask, - @Const @ByRef(nullValue = "at::Tensor{}") Tensor memory_mask, - @Const @ByRef(nullValue = "at::Tensor{}") Tensor src_key_padding_mask, - @Const @ByRef(nullValue = "at::Tensor{}") Tensor tgt_key_padding_mask, - @Const @ByRef(nullValue = "at::Tensor{}") Tensor memory_key_padding_mask); + @Const @ByRef(nullValue = "torch::Tensor{}") Tensor src_mask, + @Const @ByRef(nullValue = "torch::Tensor{}") Tensor tgt_mask, + @Const @ByRef(nullValue = "torch::Tensor{}") Tensor memory_mask, + @Const @ByRef(nullValue = "torch::Tensor{}") Tensor src_key_padding_mask, + @Const @ByRef(nullValue = "torch::Tensor{}") Tensor tgt_key_padding_mask, + @Const @ByRef(nullValue = "torch::Tensor{}") Tensor memory_key_padding_mask); public native @ByVal Tensor forward( @Const @ByRef Tensor src, @Const @ByRef Tensor tgt); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/TransformerImplCloneable.java b/pytorch/src/gen/java/org/bytedeco/pytorch/TransformerImplCloneable.java index a288f2c10c2..a84e89bfaf2 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/TransformerImplCloneable.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/TransformerImplCloneable.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; @@ -20,18 +22,18 @@ public class TransformerImplCloneable extends Module { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public TransformerImplCloneable(Pointer p) { super(p); } + @Override public Module asModule() { return asModule(this); } + @Namespace public static native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast>") Module asModule(@SharedPtr TransformerImplCloneable pointer); /** {@code reset()} must perform initialization of all members with reference * semantics, most importantly parameters, buffers and submodules. */ public native void reset(); - @Override public Module asModule() { return asModule(this); } - @Namespace public static native @Name("static_cast") Module asModule(TransformerImplCloneable module); /** Performs a recursive "deep copy" of the {@code Module}, such that all parameters * and submodules in the cloned module are different from those in the * original module. */ - public native @SharedPtr Module clone( - @Const @ByRef(nullValue = "c10::optional(c10::nullopt)") DeviceOptional device); - public native @SharedPtr Module clone(); + public native @SharedPtr("torch::nn::Module") @ByVal Module clone( + @Const @ByRef(nullValue = "c10::optional(c10::nullopt)") DeviceOptional device); + public native @SharedPtr("torch::nn::Module") @ByVal Module clone(); } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/TransformerImplModuleHolder.java b/pytorch/src/gen/java/org/bytedeco/pytorch/TransformerImplModuleHolder.java deleted file mode 100644 index 77261fdab0b..00000000000 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/TransformerImplModuleHolder.java +++ /dev/null @@ -1,79 +0,0 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE - -package org.bytedeco.pytorch; - -import org.bytedeco.pytorch.Allocator; -import org.bytedeco.pytorch.Function; -import org.bytedeco.pytorch.Module; -import java.nio.*; -import org.bytedeco.javacpp.*; -import org.bytedeco.javacpp.annotation.*; - -import static org.bytedeco.javacpp.presets.javacpp.*; -import static org.bytedeco.openblas.global.openblas_nolapack.*; -import static org.bytedeco.openblas.global.openblas.*; - -import static org.bytedeco.pytorch.global.torch.*; - -@Name("torch::nn::ModuleHolder") @NoOffset @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) -public class TransformerImplModuleHolder extends Pointer { - static { Loader.load(); } - /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ - public TransformerImplModuleHolder(Pointer p) { super(p); } - - - /// - - /** Default constructs the contained module if if has a default constructor, - * else produces a static error. - * - * NOTE: This uses the behavior of template - * classes in C++ that constructors (or any methods) are only compiled when - * actually used. */ - - - /** Constructs the {@code ModuleHolder} with an empty contained value. Access to - * the underlying module is not permitted and will throw an exception, until - * a value is assigned. */ - /* implicit */ public TransformerImplModuleHolder(@ByVal @Cast("std::nullptr_t*") PointerPointer arg0) { super((Pointer)null); allocate(arg0); } -private native void allocate(@ByVal @Cast("std::nullptr_t*") PointerPointer arg0); - - /** Constructs the {@code ModuleHolder} with a contained module, forwarding all - * arguments to its constructor. */ - - /** Constructs the {@code ModuleHolder} from a pointer to the contained type. - * Example: {@code Linear(std::make_shared(...))}. */ - /* implicit */ public TransformerImplModuleHolder(@SharedPtr @Cast({"", "std::shared_ptr"}) TransformerImpl module) { super((Pointer)null); allocate(module); } -private native void allocate(@SharedPtr @Cast({"", "std::shared_ptr"}) TransformerImpl module); - - /** Returns true if the {@code ModuleHolder} contains a module, or false if it is - * {@code nullptr}. */ - public native @Cast("bool") @Name("operator bool") @NoException(true) boolean asBoolean(); - - /** Forwards to the contained module. */ - public native @Name("operator ->") TransformerImpl access(); - - /** Forwards to the contained module. */ - - /** Returns a reference to the contained module. */ - public native @ByRef @Name("operator *") TransformerImpl multiply(); - - /** Returns a const reference to the contained module. */ - - /** Returns a shared pointer to the underlying module. */ - public native @SharedPtr @Cast({"", "std::shared_ptr"}) TransformerImpl ptr(); - - /** Returns a pointer to the underlying module. */ - public native TransformerImpl get(); - - /** Returns a const pointer to the underlying module. */ - - /** Calls the {@code forward()} method of the contained module. */ - - /** Forwards to the subscript operator of the contained module. - * NOTE: std::forward is qualified to prevent VS2017 emitting - * error C2872: 'std': ambiguous symbol */ - - /** Returns true if the {@code ModuleHolder} does not contain a module. */ - public native @Cast("bool") @NoException(true) boolean is_empty(); -} diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/TransformerOptions.java b/pytorch/src/gen/java/org/bytedeco/pytorch/TransformerOptions.java index 3de0f8ee107..bc8664fa9a1 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/TransformerOptions.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/TransformerOptions.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; @@ -61,7 +63,7 @@ private native void allocate( public native @Cast("int64_t*") @ByRef @NoException(true) LongPointer num_decoder_layers(); public native @Cast("int64_t*") @ByRef @NoException(true) LongPointer dim_feedforward(); public native @ByRef @NoException(true) DoublePointer dropout(); - public native @ByRef @NoException(true) transformer_activation_t activation(); + public native @ByRef @NoException(true) TransformerActivation activation(); public native @ByRef @NoException(true) AnyModule custom_encoder(); public native @ByRef @NoException(true) AnyModule custom_decoder(); } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/Tree.java b/pytorch/src/gen/java/org/bytedeco/pytorch/Tree.java index ace80e45102..facad669bc9 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/Tree.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/Tree.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; @@ -28,8 +30,8 @@ public class Tree extends Pointer { public native @Cast("bool") boolean isAtom(); public native @Const @ByRef SourceRange range(); public native @StdString BytePointer stringValue(); - public native @Cast("const torch::jit::TreeList*") @ByRef Pointer trees(); - public native @Cast("const torch::jit::TreeRef*") @ByRef Pointer tree(@Cast("size_t") long i); + public native @Cast("const torch::jit::TreeList*") @ByRef SymDimVector trees(); + public native @Const @ByRef TreeRef tree(@Cast("size_t") long i); public native void matchNumSubtrees(int k, @Cast("size_t") long expected_subtrees); public native void matchNumSubtreesD( diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/TreeList.java b/pytorch/src/gen/java/org/bytedeco/pytorch/TreeList.java new file mode 100644 index 00000000000..cb3422040f8 --- /dev/null +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/TreeList.java @@ -0,0 +1,51 @@ +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE + +package org.bytedeco.pytorch; + +import org.bytedeco.pytorch.Allocator; +import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; +import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; +import java.nio.*; +import org.bytedeco.javacpp.*; +import org.bytedeco.javacpp.annotation.*; + +import static org.bytedeco.javacpp.presets.javacpp.*; +import static org.bytedeco.openblas.global.openblas_nolapack.*; +import static org.bytedeco.openblas.global.openblas.*; + +import static org.bytedeco.pytorch.global.torch.*; + +@Name("c10::SmallVector,4>") @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) +public class TreeList extends TreeRefSmallVectorImpl { + static { Loader.load(); } + /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ + public TreeList(Pointer p) { super(p); } + + public TreeList() { super((Pointer)null); allocate(); } + private native void allocate(); + + public TreeList(@Cast("size_t") long Size, @Const @ByRef(nullValue = "c10::intrusive_ptr()") TreeRef Value) { super((Pointer)null); allocate(Size, Value); } + private native void allocate(@Cast("size_t") long Size, @Const @ByRef(nullValue = "c10::intrusive_ptr()") TreeRef Value); + public TreeList(@Cast("size_t") long Size) { super((Pointer)null); allocate(Size); } + private native void allocate(@Cast("size_t") long Size); + + // note: The enable_if restricts Container to types that have a .begin() and + // .end() that return valid input iterators. + + public TreeList(@Const @ByRef TreeList RHS) { super((Pointer)null); allocate(RHS); } + private native void allocate(@Const @ByRef TreeList RHS); + + public native @ByRef @Name("operator =") TreeList put(@Const @ByRef TreeList RHS); + + // note: The enable_if restricts Container to types that have a .begin() and + // .end() that return valid input iterators. + + + + + + // note: The enable_if restricts Container to types that have a .begin() and + // .end() that return valid input iterators. +} diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/TreeRef.java b/pytorch/src/gen/java/org/bytedeco/pytorch/TreeRef.java new file mode 100644 index 00000000000..20df01df0fc --- /dev/null +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/TreeRef.java @@ -0,0 +1,150 @@ +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE + +package org.bytedeco.pytorch; + +import org.bytedeco.pytorch.Allocator; +import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; +import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; +import java.nio.*; +import org.bytedeco.javacpp.*; +import org.bytedeco.javacpp.annotation.*; + +import static org.bytedeco.javacpp.presets.javacpp.*; +import static org.bytedeco.openblas.global.openblas_nolapack.*; +import static org.bytedeco.openblas.global.openblas.*; + +import static org.bytedeco.pytorch.global.torch.*; + + +@Name("c10::intrusive_ptr") @NoOffset @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) +public class TreeRef extends Pointer { + static { Loader.load(); } + /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ + public TreeRef(Pointer p) { super(p); } + /** Native array allocator. Access with {@link Pointer#position(long)}. */ + public TreeRef(long size) { super((Pointer)null); allocateArray(size); } + private native void allocateArray(long size); + @Override public TreeRef position(long position) { + return (TreeRef)super.position(position); + } + @Override public TreeRef getPointer(long i) { + return new TreeRef((Pointer)this).offsetAddress(i); + } + + + public TreeRef() { super((Pointer)null); allocate(); } + @NoException(true) private native void allocate(); + + public TreeRef(@ByVal @Cast("std::nullptr_t*") PointerPointer arg0) { super((Pointer)null); allocate(arg0); } + @NoException(true) private native void allocate(@ByVal @Cast("std::nullptr_t*") PointerPointer arg0); + + // This constructor will not increase the ref counter for you. + // We use the tagged dispatch mechanism to explicitly mark this constructor + // to not increase the refcount + public TreeRef(Tree target, @ByVal DontIncreaseRefcount arg1) { super((Pointer)null); allocate(target, arg1); } + @NoException(true) private native void allocate(Tree target, @ByVal DontIncreaseRefcount arg1); + + + + public TreeRef(@ByRef(true) TreeRef rhs) { super((Pointer)null); allocate(rhs); } + @NoException(true) private native void allocate(@ByRef(true) TreeRef rhs); + + public native @ByRef @Name("operator =") @NoException(true) TreeRef put(@ByRef(true) TreeRef rhs); + + public native @NoException(true) Tree get(); + + public native @ByRef @Name("operator *") @NoException(true) Tree multiply(); + + public native @Name("operator ->") @NoException(true) Tree access(); + + public native @Cast("bool") @Name("operator bool") @NoException(true) boolean asBoolean(); + + public native @NoException(true) void reset(); + + public native @NoException(true) void swap(@ByRef TreeRef rhs); + + // We do a lot of null-pointer checks in our code, good to have this be cheap. + public native @Cast("bool") @NoException(true) boolean defined(); + + public native @Cast("size_t") @NoException(true) long use_count(); + + public native @Cast("size_t") @NoException(true) long weak_use_count(); + + public native @Cast("bool") @NoException(true) boolean unique(); + + /** + * Returns an owning (!) pointer to the underlying object and makes the + * intrusive_ptr instance invalid. That means the refcount is not decreased. + * You *must* put the returned pointer back into a intrusive_ptr using + * intrusive_ptr::reclaim(ptr) to properly destruct it. + * This is helpful for C APIs. + */ + public native @NoException(true) Tree release(); + + /** + * Takes an owning pointer to TTarget* and creates an intrusive_ptr that takes + * over ownership. That means the refcount is not increased. + * This is the counter-part to intrusive_ptr::release() and the pointer + * passed in *must* have been created using intrusive_ptr::release(). + */ + public static native @ByVal TreeRef reclaim(Tree owning_ptr); + + /** + * Takes an owning pointer to TTarget* and creates an intrusive_ptr + * representing a new reference, i.e. the raw pointer retains + * ownership. + */ + public static native @ByVal TreeRef reclaim_copy(Tree owning_ptr); + + /** + * Allocate a heap object with args and wrap it inside a intrusive_ptr and + * incref. This is a helper function to let make_intrusive() access private + * intrusive_ptr constructors. + */ + + /** + * Turn a new instance of TTarget (e.g., literally allocated + * using new TTarget(...) into an intrusive_ptr. If possible, + * use intrusive_ptr::make instead which statically guarantees + * that the allocation was done properly. + * + * At the moment, the only reason this method exists is because + * pybind11 holder types expect to be able to allocate in + * this way (because pybind11 handles the new allocation itself). + */ + public static native @ByVal TreeRef unsafe_steal_from_new(Tree raw_ptr); + + /** + * Turn an instance of TTarget that should not be reference counted + * (e.g., allocated into an arena with placement new) into an + * intrusive_ptr. This is gratuitously unsafe and should only be + * used if you can guarantee that the pointer will not escape and be + * refcounted as normal. + * + * {@code expected_decrefs} is a debugging parameter: it indicates the + * number of strong owners the intrusive_ptr_target in question is + * expected to get. In most use cases, this will likely be 1. + * + * The reason this method exists is for manually sharing + * StorageImpls across Tensors in the static runtime. It needs + * access to private intrusive_ptr members so that the refcounts can + * be initialized to custom values. + */ + public static native @ByVal TreeRef unsafe_adapt_non_heap_allocated( + Tree raw_ptr, + @Cast("size_t") long expected_decrefs); + + /** + * Turn a **non-owning raw pointer** to an intrusive_ptr. It is + * the moral equivalent of enable_shared_from_this on a shared pointer. + * + * This method is only valid for objects that are already live. If + * you are looking for the moral equivalent of unique_ptr(T*) + * constructor, see steal_from_new. + * + * TODO: https://github.com/pytorch/pytorch/issues/56482 + */ + public static native @ByVal TreeRef unsafe_reclaim_from_nonowning(Tree raw_ptr); +} diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/SourceRangePickler.java b/pytorch/src/gen/java/org/bytedeco/pytorch/TreeRefSmallVectorBase.java similarity index 52% rename from pytorch/src/gen/java/org/bytedeco/pytorch/SourceRangePickler.java rename to pytorch/src/gen/java/org/bytedeco/pytorch/TreeRefSmallVectorBase.java index 0b4e00e6016..443b5c97060 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/SourceRangePickler.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/TreeRefSmallVectorBase.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; @@ -15,13 +17,13 @@ import static org.bytedeco.pytorch.global.torch.*; - -@Namespace("torch::jit") @NoOffset @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) -public class SourceRangePickler extends Pointer { +@Name("c10::SmallVectorTemplateBase >") @NoOffset @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) +public class TreeRefSmallVectorBase extends TreeRefSmallVectorCommon { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ - public SourceRangePickler(Pointer p) { super(p); } + public TreeRefSmallVectorBase(Pointer p) { super(p); } + public native void push_back(@Const @ByRef TreeRef Elt); - + public native void pop_back(); } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/TreeRefSmallVectorCommon.java b/pytorch/src/gen/java/org/bytedeco/pytorch/TreeRefSmallVectorCommon.java new file mode 100644 index 00000000000..5a11aad8b86 --- /dev/null +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/TreeRefSmallVectorCommon.java @@ -0,0 +1,49 @@ +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE + +package org.bytedeco.pytorch; + +import org.bytedeco.pytorch.Allocator; +import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; +import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; +import java.nio.*; +import org.bytedeco.javacpp.*; +import org.bytedeco.javacpp.annotation.*; + +import static org.bytedeco.javacpp.presets.javacpp.*; +import static org.bytedeco.openblas.global.openblas_nolapack.*; +import static org.bytedeco.openblas.global.openblas.*; + +import static org.bytedeco.pytorch.global.torch.*; + +@Name("c10::SmallVectorTemplateCommon >") @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) +public class TreeRefSmallVectorCommon extends IntSizedSmallVectorBase { + static { Loader.load(); } + /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ + public TreeRefSmallVectorCommon(Pointer p) { super(p); } + + + // forward iterator creation methods. + public native @ByVal @Cast("c10::SmallVectorTemplateCommon >::iterator*") TreeRef begin(); + public native @ByVal @Cast("c10::SmallVectorTemplateCommon >::iterator*") TreeRef end(); + + // reverse iterator creation methods. + + public native long size_in_bytes(); + public native long max_size(); + + public native @Cast("size_t") long capacity_in_bytes(); + + /** Return a pointer to the vector's buffer, even if empty(). */ + public native @ByVal @Cast("c10::SmallVectorTemplateCommon >::pointer*") TreeRef data(); + /** Return a pointer to the vector's buffer, even if empty(). */ + + // SmallVector::at is NOT from LLVM. + public native @ByVal TreeRef at(long idx); + public native @Name("operator []") @ByVal TreeRef get(long idx); + + public native @ByVal TreeRef front(); + + public native @ByVal TreeRef back(); +} diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/TreeRefSmallVectorImpl.java b/pytorch/src/gen/java/org/bytedeco/pytorch/TreeRefSmallVectorImpl.java new file mode 100644 index 00000000000..3c54a576b99 --- /dev/null +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/TreeRefSmallVectorImpl.java @@ -0,0 +1,71 @@ +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE + +package org.bytedeco.pytorch; + +import org.bytedeco.pytorch.Allocator; +import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; +import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; +import java.nio.*; +import org.bytedeco.javacpp.*; +import org.bytedeco.javacpp.annotation.*; + +import static org.bytedeco.javacpp.presets.javacpp.*; +import static org.bytedeco.openblas.global.openblas_nolapack.*; +import static org.bytedeco.openblas.global.openblas.*; + +import static org.bytedeco.pytorch.global.torch.*; + +@Name("c10::SmallVectorImpl >") @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) +public class TreeRefSmallVectorImpl extends TreeRefSmallVectorBase { + static { Loader.load(); } + /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ + public TreeRefSmallVectorImpl(Pointer p) { super(p); } + + + + public native void clear(); + public native void resize(long N); + + /** Like resize, but \ref T is POD, the new values won't be initialized. */ + public native void resize_for_overwrite(long N); + + public native void resize(long N, @ByVal TreeRef NV); + + public native void reserve(long N); + + public native void pop_back_n(long NumItems); + + public native @ByVal TreeRef pop_back_val(); + + public native void swap(@ByRef TreeRefSmallVectorImpl RHS); + + /** Add the specified range to the end of the SmallVector. */ + + /** Append \p NumInputs copies of \p Elt to the end. */ + public native void append(long NumInputs, @ByVal TreeRef Elt); + + public native void append(@Const @ByRef TreeRefSmallVectorImpl RHS); + + public native void assign(long NumElts, @ByVal TreeRef Elt); + + // FIXME: Consider assigning over existing elements, rather than clearing & + // re-initializing them - for all assign(...) variants. + + public native void assign(@Const @ByRef TreeRefSmallVectorImpl RHS); + + public native @ByVal @Cast("c10::SmallVectorImpl >::iterator*") TreeRef erase(@ByVal @Cast("c10::SmallVectorImpl >::const_iterator*") TreeRef CI); + + public native @ByVal @Cast("c10::SmallVectorImpl >::iterator*") TreeRef erase(@ByVal @Cast("c10::SmallVectorImpl >::const_iterator*") TreeRef CS, @ByVal @Cast("c10::SmallVectorImpl >::const_iterator*") TreeRef CE); + public native @ByVal @Cast("c10::SmallVectorImpl >::iterator*") TreeRef insert(@ByVal @Cast("c10::SmallVectorImpl >::iterator*") TreeRef I, @ByRef(true) TreeRef Elt); + + public native @ByVal @Cast("c10::SmallVectorImpl >::iterator*") TreeRef insert(@ByVal @Cast("c10::SmallVectorImpl >::iterator*") TreeRef I, long NumToInsert, @ByVal TreeRef Elt); + + public native @ByRef @Name("operator =") TreeRefSmallVectorImpl put(@Const @ByRef TreeRefSmallVectorImpl RHS); + + public native @Cast("bool") @Name("operator ==") boolean equals(@Const @ByRef TreeRefSmallVectorImpl RHS); + public native @Cast("bool") @Name("operator !=") boolean notEquals(@Const @ByRef TreeRefSmallVectorImpl RHS); + + public native @Cast("bool") @Name("operator <") boolean lessThan(@Const @ByRef TreeRefSmallVectorImpl RHS); +} diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/TreeRefStringMap.java b/pytorch/src/gen/java/org/bytedeco/pytorch/TreeRefStringMap.java new file mode 100644 index 00000000000..977f6ccf719 --- /dev/null +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/TreeRefStringMap.java @@ -0,0 +1,49 @@ +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE + +package org.bytedeco.pytorch; + +import org.bytedeco.pytorch.Allocator; +import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; +import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; +import java.nio.*; +import org.bytedeco.javacpp.*; +import org.bytedeco.javacpp.annotation.*; + +import static org.bytedeco.javacpp.presets.javacpp.*; +import static org.bytedeco.openblas.global.openblas_nolapack.*; +import static org.bytedeco.openblas.global.openblas.*; + +import static org.bytedeco.pytorch.global.torch.*; + +@Name("std::unordered_map") @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) +public class TreeRefStringMap extends Pointer { + static { Loader.load(); } + /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ + public TreeRefStringMap(Pointer p) { super(p); } + public TreeRefStringMap() { allocate(); } + private native void allocate(); + public native @Name("operator =") @ByRef TreeRefStringMap put(@ByRef TreeRefStringMap x); + + public boolean empty() { return size() == 0; } + public native long size(); + + @Index public native @StdString BytePointer get(@ByRef TreeRef i); + public native TreeRefStringMap put(@ByRef TreeRef i, BytePointer value); + @ValueSetter @Index public native TreeRefStringMap put(@ByRef TreeRef i, @StdString String value); + + public native void erase(@ByVal Iterator pos); + public native @ByVal Iterator begin(); + public native @ByVal Iterator end(); + @NoOffset @Name("iterator") public static class Iterator extends Pointer { + public Iterator(Pointer p) { super(p); } + public Iterator() { } + + public native @Name("operator ++") @ByRef Iterator increment(); + public native @Name("operator ==") boolean equals(@ByRef Iterator it); + public native @Name("operator *().first") @MemberGetter @ByRef @Const TreeRef first(); + public native @Name("operator *().second") @MemberGetter @StdString BytePointer second(); + } +} + diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/TreeView.java b/pytorch/src/gen/java/org/bytedeco/pytorch/TreeView.java index 75b64a5b3b2..480ba6e7850 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/TreeView.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/TreeView.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; @@ -106,13 +108,15 @@ @Namespace("torch::jit") @NoOffset @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) public class TreeView extends Pointer { static { Loader.load(); } + /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ + public TreeView(Pointer p) { super(p); } - public TreeView(@ByVal @Cast("torch::jit::TreeRef*") Pointer tree) { super((Pointer)null); allocate(tree); } - private native void allocate(@ByVal @Cast("torch::jit::TreeRef*") Pointer tree); - public native @ByVal @Cast("torch::jit::TreeRef*") Pointer tree(); + public TreeView(@ByVal TreeRef tree) { super((Pointer)null); allocate(tree); } + private native void allocate(@ByVal TreeRef tree); + public native @ByVal TreeRef tree(); public native @Const @ByRef SourceRange range(); - public native @ByVal @Cast("torch::jit::TreeRef*") @Name("operator torch::jit::TreeRef") Pointer asPointer(); - public native @Cast("const torch::jit::TreeRef*") @ByRef Pointer get(); + public native @ByVal @Name("operator torch::jit::TreeRef") TreeRef asTreeRef(); + public native int kind(); public native void dump(); } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/TripletMarginLoss.java b/pytorch/src/gen/java/org/bytedeco/pytorch/TripletMarginLoss.java deleted file mode 100644 index 603417c9c35..00000000000 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/TripletMarginLoss.java +++ /dev/null @@ -1,34 +0,0 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE - -package org.bytedeco.pytorch; - -import org.bytedeco.pytorch.Allocator; -import org.bytedeco.pytorch.Function; -import org.bytedeco.pytorch.Module; -import java.nio.*; -import org.bytedeco.javacpp.*; -import org.bytedeco.javacpp.annotation.*; - -import static org.bytedeco.javacpp.presets.javacpp.*; -import static org.bytedeco.openblas.global.openblas_nolapack.*; -import static org.bytedeco.openblas.global.openblas.*; - -import static org.bytedeco.pytorch.global.torch.*; - - -/** A {@code ModuleHolder} subclass for {@code TripletMarginLossImpl}. - * See the documentation for {@code TripletMarginLossImpl} class to learn what - * methods it provides, and examples of how to use {@code TripletMarginLoss} with - * {@code torch::nn::TripletMarginLossOptions}. See the documentation for - * {@code ModuleHolder} to learn about PyTorch's module storage semantics. */ -@Namespace("torch::nn") @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) -public class TripletMarginLoss extends TripletMarginLossImplModuleHolder { - static { Loader.load(); } - - public TripletMarginLoss(@ByVal @Cast("std::nullptr_t*") PointerPointer arg0) { super((Pointer)null); allocate(arg0); } - private native void allocate(@ByVal @Cast("std::nullptr_t*") PointerPointer arg0); public TripletMarginLoss(@SharedPtr @Cast({"", "std::shared_ptr"}) TripletMarginLossImpl module) { super((Pointer)null); allocate(module); } - private native void allocate(@SharedPtr @Cast({"", "std::shared_ptr"}) TripletMarginLossImpl module); - /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ - public TripletMarginLoss(Pointer p) { super(p); } - - } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/TripletMarginLossImpl.java b/pytorch/src/gen/java/org/bytedeco/pytorch/TripletMarginLossImpl.java index 3be85124dd5..7e54999d5b9 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/TripletMarginLossImpl.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/TripletMarginLossImpl.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; @@ -52,9 +54,9 @@ public class TripletMarginLossImpl extends TripletMarginLossImplCloneable { } public TripletMarginLossImpl(@ByVal(nullValue = "torch::nn::TripletMarginLossOptions{}") TripletMarginLossOptions options_) { super((Pointer)null); allocate(options_); } - @NoDeallocator private native void allocate(@ByVal(nullValue = "torch::nn::TripletMarginLossOptions{}") TripletMarginLossOptions options_); + @SharedPtr private native void allocate(@ByVal(nullValue = "torch::nn::TripletMarginLossOptions{}") TripletMarginLossOptions options_); public TripletMarginLossImpl() { super((Pointer)null); allocate(); } - @NoDeallocator private native void allocate(); + @SharedPtr private native void allocate(); public native void reset(); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/TripletMarginLossImplCloneable.java b/pytorch/src/gen/java/org/bytedeco/pytorch/TripletMarginLossImplCloneable.java index c021f434b02..349062bee6a 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/TripletMarginLossImplCloneable.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/TripletMarginLossImplCloneable.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; @@ -20,18 +22,18 @@ public class TripletMarginLossImplCloneable extends Module { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public TripletMarginLossImplCloneable(Pointer p) { super(p); } + @Override public Module asModule() { return asModule(this); } + @Namespace public static native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast>") Module asModule(@SharedPtr TripletMarginLossImplCloneable pointer); /** {@code reset()} must perform initialization of all members with reference * semantics, most importantly parameters, buffers and submodules. */ public native void reset(); - @Override public Module asModule() { return asModule(this); } - @Namespace public static native @Name("static_cast") Module asModule(TripletMarginLossImplCloneable module); /** Performs a recursive "deep copy" of the {@code Module}, such that all parameters * and submodules in the cloned module are different from those in the * original module. */ - public native @SharedPtr Module clone( - @Const @ByRef(nullValue = "c10::optional(c10::nullopt)") DeviceOptional device); - public native @SharedPtr Module clone(); + public native @SharedPtr("torch::nn::Module") @ByVal Module clone( + @Const @ByRef(nullValue = "c10::optional(c10::nullopt)") DeviceOptional device); + public native @SharedPtr("torch::nn::Module") @ByVal Module clone(); } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/TripletMarginLossImplModuleHolder.java b/pytorch/src/gen/java/org/bytedeco/pytorch/TripletMarginLossImplModuleHolder.java deleted file mode 100644 index 7f932237879..00000000000 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/TripletMarginLossImplModuleHolder.java +++ /dev/null @@ -1,79 +0,0 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE - -package org.bytedeco.pytorch; - -import org.bytedeco.pytorch.Allocator; -import org.bytedeco.pytorch.Function; -import org.bytedeco.pytorch.Module; -import java.nio.*; -import org.bytedeco.javacpp.*; -import org.bytedeco.javacpp.annotation.*; - -import static org.bytedeco.javacpp.presets.javacpp.*; -import static org.bytedeco.openblas.global.openblas_nolapack.*; -import static org.bytedeco.openblas.global.openblas.*; - -import static org.bytedeco.pytorch.global.torch.*; - -@Name("torch::nn::ModuleHolder") @NoOffset @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) -public class TripletMarginLossImplModuleHolder extends Pointer { - static { Loader.load(); } - /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ - public TripletMarginLossImplModuleHolder(Pointer p) { super(p); } - - - /// - - /** Default constructs the contained module if if has a default constructor, - * else produces a static error. - * - * NOTE: This uses the behavior of template - * classes in C++ that constructors (or any methods) are only compiled when - * actually used. */ - - - /** Constructs the {@code ModuleHolder} with an empty contained value. Access to - * the underlying module is not permitted and will throw an exception, until - * a value is assigned. */ - /* implicit */ public TripletMarginLossImplModuleHolder(@ByVal @Cast("std::nullptr_t*") PointerPointer arg0) { super((Pointer)null); allocate(arg0); } -private native void allocate(@ByVal @Cast("std::nullptr_t*") PointerPointer arg0); - - /** Constructs the {@code ModuleHolder} with a contained module, forwarding all - * arguments to its constructor. */ - - /** Constructs the {@code ModuleHolder} from a pointer to the contained type. - * Example: {@code Linear(std::make_shared(...))}. */ - /* implicit */ public TripletMarginLossImplModuleHolder(@SharedPtr @Cast({"", "std::shared_ptr"}) TripletMarginLossImpl module) { super((Pointer)null); allocate(module); } -private native void allocate(@SharedPtr @Cast({"", "std::shared_ptr"}) TripletMarginLossImpl module); - - /** Returns true if the {@code ModuleHolder} contains a module, or false if it is - * {@code nullptr}. */ - public native @Cast("bool") @Name("operator bool") @NoException(true) boolean asBoolean(); - - /** Forwards to the contained module. */ - public native @Name("operator ->") TripletMarginLossImpl access(); - - /** Forwards to the contained module. */ - - /** Returns a reference to the contained module. */ - public native @ByRef @Name("operator *") TripletMarginLossImpl multiply(); - - /** Returns a const reference to the contained module. */ - - /** Returns a shared pointer to the underlying module. */ - public native @SharedPtr @Cast({"", "std::shared_ptr"}) TripletMarginLossImpl ptr(); - - /** Returns a pointer to the underlying module. */ - public native TripletMarginLossImpl get(); - - /** Returns a const pointer to the underlying module. */ - - /** Calls the {@code forward()} method of the contained module. */ - - /** Forwards to the subscript operator of the contained module. - * NOTE: std::forward is qualified to prevent VS2017 emitting - * error C2872: 'std': ambiguous symbol */ - - /** Returns true if the {@code ModuleHolder} does not contain a module. */ - public native @Cast("bool") @NoException(true) boolean is_empty(); -} diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/TripletMarginLossOptions.java b/pytorch/src/gen/java/org/bytedeco/pytorch/TripletMarginLossOptions.java index c1af7bc4b8a..98ce37284aa 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/TripletMarginLossOptions.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/TripletMarginLossOptions.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; @@ -47,5 +49,5 @@ public class TripletMarginLossOptions extends Pointer { public native @ByRef @NoException(true) DoublePointer p(); public native @ByRef @NoException(true) DoublePointer eps(); public native @Cast("bool*") @ByRef @NoException(true) BoolPointer swap(); - public native @ByRef @NoException(true) loss_reduction_t reduction(); + public native @ByRef @NoException(true) LossReduction reduction(); } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/TripletMarginWithDistanceLoss.java b/pytorch/src/gen/java/org/bytedeco/pytorch/TripletMarginWithDistanceLoss.java deleted file mode 100644 index f8760fad923..00000000000 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/TripletMarginWithDistanceLoss.java +++ /dev/null @@ -1,36 +0,0 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE - -package org.bytedeco.pytorch; - -import org.bytedeco.pytorch.Allocator; -import org.bytedeco.pytorch.Function; -import org.bytedeco.pytorch.Module; -import java.nio.*; -import org.bytedeco.javacpp.*; -import org.bytedeco.javacpp.annotation.*; - -import static org.bytedeco.javacpp.presets.javacpp.*; -import static org.bytedeco.openblas.global.openblas_nolapack.*; -import static org.bytedeco.openblas.global.openblas.*; - -import static org.bytedeco.pytorch.global.torch.*; - - -/** A {@code ModuleHolder} subclass for {@code TripletMarginWithDistanceLossImpl}. - * See the documentation for {@code TripletMarginWithDistanceLossImpl} class to learn - * what methods it provides, and examples of how to use - * {@code TripletMarginWithDistanceLoss} with - * {@code torch::nn::TripletMarginWithDistanceLossOptions}. - * See the documentation for {@code ModuleHolder} to learn about PyTorch's - * module storage semantics. */ -@Namespace("torch::nn") @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) -public class TripletMarginWithDistanceLoss extends TripletMarginWithDistanceLossImplModuleHolder { - static { Loader.load(); } - - public TripletMarginWithDistanceLoss(@ByVal @Cast("std::nullptr_t*") PointerPointer arg0) { super((Pointer)null); allocate(arg0); } - private native void allocate(@ByVal @Cast("std::nullptr_t*") PointerPointer arg0); public TripletMarginWithDistanceLoss(@SharedPtr @Cast({"", "std::shared_ptr"}) TripletMarginWithDistanceLossImpl module) { super((Pointer)null); allocate(module); } - private native void allocate(@SharedPtr @Cast({"", "std::shared_ptr"}) TripletMarginWithDistanceLossImpl module); - /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ - public TripletMarginWithDistanceLoss(Pointer p) { super(p); } - - } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/TripletMarginWithDistanceLossImpl.java b/pytorch/src/gen/java/org/bytedeco/pytorch/TripletMarginWithDistanceLossImpl.java index 1c10d6d4f46..2c8b548cb13 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/TripletMarginWithDistanceLossImpl.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/TripletMarginWithDistanceLossImpl.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; @@ -55,10 +57,10 @@ public class TripletMarginWithDistanceLossImpl extends TripletMarginWithDistance public TripletMarginWithDistanceLossImpl( @ByVal(nullValue = "torch::nn::TripletMarginWithDistanceLossOptions{}") TripletMarginWithDistanceLossOptions options_) { super((Pointer)null); allocate(options_); } - @NoDeallocator private native void allocate( + @SharedPtr private native void allocate( @ByVal(nullValue = "torch::nn::TripletMarginWithDistanceLossOptions{}") TripletMarginWithDistanceLossOptions options_); public TripletMarginWithDistanceLossImpl() { super((Pointer)null); allocate(); } - @NoDeallocator private native void allocate(); + @SharedPtr private native void allocate(); public native void reset(); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/TripletMarginWithDistanceLossImplCloneable.java b/pytorch/src/gen/java/org/bytedeco/pytorch/TripletMarginWithDistanceLossImplCloneable.java index bdb964e2467..d561758eaad 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/TripletMarginWithDistanceLossImplCloneable.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/TripletMarginWithDistanceLossImplCloneable.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; @@ -20,18 +22,18 @@ public class TripletMarginWithDistanceLossImplCloneable extends Module { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public TripletMarginWithDistanceLossImplCloneable(Pointer p) { super(p); } + @Override public Module asModule() { return asModule(this); } + @Namespace public static native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast>") Module asModule(@SharedPtr TripletMarginWithDistanceLossImplCloneable pointer); /** {@code reset()} must perform initialization of all members with reference * semantics, most importantly parameters, buffers and submodules. */ public native void reset(); - @Override public Module asModule() { return asModule(this); } - @Namespace public static native @Name("static_cast") Module asModule(TripletMarginWithDistanceLossImplCloneable module); /** Performs a recursive "deep copy" of the {@code Module}, such that all parameters * and submodules in the cloned module are different from those in the * original module. */ - public native @SharedPtr Module clone( - @Const @ByRef(nullValue = "c10::optional(c10::nullopt)") DeviceOptional device); - public native @SharedPtr Module clone(); + public native @SharedPtr("torch::nn::Module") @ByVal Module clone( + @Const @ByRef(nullValue = "c10::optional(c10::nullopt)") DeviceOptional device); + public native @SharedPtr("torch::nn::Module") @ByVal Module clone(); } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/TripletMarginWithDistanceLossImplModuleHolder.java b/pytorch/src/gen/java/org/bytedeco/pytorch/TripletMarginWithDistanceLossImplModuleHolder.java deleted file mode 100644 index 994206d709b..00000000000 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/TripletMarginWithDistanceLossImplModuleHolder.java +++ /dev/null @@ -1,79 +0,0 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE - -package org.bytedeco.pytorch; - -import org.bytedeco.pytorch.Allocator; -import org.bytedeco.pytorch.Function; -import org.bytedeco.pytorch.Module; -import java.nio.*; -import org.bytedeco.javacpp.*; -import org.bytedeco.javacpp.annotation.*; - -import static org.bytedeco.javacpp.presets.javacpp.*; -import static org.bytedeco.openblas.global.openblas_nolapack.*; -import static org.bytedeco.openblas.global.openblas.*; - -import static org.bytedeco.pytorch.global.torch.*; - -@Name("torch::nn::ModuleHolder") @NoOffset @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) -public class TripletMarginWithDistanceLossImplModuleHolder extends Pointer { - static { Loader.load(); } - /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ - public TripletMarginWithDistanceLossImplModuleHolder(Pointer p) { super(p); } - - - /// - - /** Default constructs the contained module if if has a default constructor, - * else produces a static error. - * - * NOTE: This uses the behavior of template - * classes in C++ that constructors (or any methods) are only compiled when - * actually used. */ - - - /** Constructs the {@code ModuleHolder} with an empty contained value. Access to - * the underlying module is not permitted and will throw an exception, until - * a value is assigned. */ - /* implicit */ public TripletMarginWithDistanceLossImplModuleHolder(@ByVal @Cast("std::nullptr_t*") PointerPointer arg0) { super((Pointer)null); allocate(arg0); } -private native void allocate(@ByVal @Cast("std::nullptr_t*") PointerPointer arg0); - - /** Constructs the {@code ModuleHolder} with a contained module, forwarding all - * arguments to its constructor. */ - - /** Constructs the {@code ModuleHolder} from a pointer to the contained type. - * Example: {@code Linear(std::make_shared(...))}. */ - /* implicit */ public TripletMarginWithDistanceLossImplModuleHolder(@SharedPtr @Cast({"", "std::shared_ptr"}) TripletMarginWithDistanceLossImpl module) { super((Pointer)null); allocate(module); } -private native void allocate(@SharedPtr @Cast({"", "std::shared_ptr"}) TripletMarginWithDistanceLossImpl module); - - /** Returns true if the {@code ModuleHolder} contains a module, or false if it is - * {@code nullptr}. */ - public native @Cast("bool") @Name("operator bool") @NoException(true) boolean asBoolean(); - - /** Forwards to the contained module. */ - public native @Name("operator ->") TripletMarginWithDistanceLossImpl access(); - - /** Forwards to the contained module. */ - - /** Returns a reference to the contained module. */ - public native @ByRef @Name("operator *") TripletMarginWithDistanceLossImpl multiply(); - - /** Returns a const reference to the contained module. */ - - /** Returns a shared pointer to the underlying module. */ - public native @SharedPtr @Cast({"", "std::shared_ptr"}) TripletMarginWithDistanceLossImpl ptr(); - - /** Returns a pointer to the underlying module. */ - public native TripletMarginWithDistanceLossImpl get(); - - /** Returns a const pointer to the underlying module. */ - - /** Calls the {@code forward()} method of the contained module. */ - - /** Forwards to the subscript operator of the contained module. - * NOTE: std::forward is qualified to prevent VS2017 emitting - * error C2872: 'std': ambiguous symbol */ - - /** Returns true if the {@code ModuleHolder} does not contain a module. */ - public native @Cast("bool") @NoException(true) boolean is_empty(); -} diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/TripletMarginWithDistanceLossOptions.java b/pytorch/src/gen/java/org/bytedeco/pytorch/TripletMarginWithDistanceLossOptions.java index 2bcebd750a5..83d24241b1b 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/TripletMarginWithDistanceLossOptions.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/TripletMarginWithDistanceLossOptions.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; @@ -46,5 +48,5 @@ public class TripletMarginWithDistanceLossOptions extends Pointer { public native @Cast("c10::optional*") @ByRef @NoException(true) Pointer distance_function(); public native @ByRef @NoException(true) DoublePointer margin(); public native @Cast("bool*") @ByRef @NoException(true) BoolPointer swap(); - public native @ByRef @NoException(true) loss_reduction_t reduction(); + public native @ByRef @NoException(true) LossReduction reduction(); } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/Tuple.java b/pytorch/src/gen/java/org/bytedeco/pytorch/Tuple.java index aea24a3b494..c12ec10b62f 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/Tuple.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/Tuple.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; @@ -15,10 +17,58 @@ import static org.bytedeco.pytorch.global.torch.*; -@Namespace("c10::ivalue") @Opaque @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) + +@Namespace("c10::ivalue") @NoOffset @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) public class Tuple extends Pointer { - /** Empty constructor. Calls {@code super((Pointer)null)}. */ - public Tuple() { super((Pointer)null); } + static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public Tuple(Pointer p) { super(p); } + + // named tuples have additional type information, so we + // directly create them tagged + public static native @ByVal TuplePtr createNamed( + @ByVal IValueVector elements_, + @ByVal Type.TypePtr type_); + + public static native @ByVal TuplePtr createNamed( + @ByVal TupleElements elements_, + @SharedPtr TupleType type_); + + // MSVC apparently can't disambiguate the other two overloads of + // create when passed an initializer_list without this. + + public static native @ByVal TuplePtr create(@ByVal IValueVector elements_); + + public static native @ByVal TuplePtr create(@ByVal TupleElements elements_); + + public static native @ByVal TuplePtr create(@ByVal IValueArrayRef elements_); + + public static native @ByVal TuplePtr create(@ByVal IValue e1); + + public static native @ByVal TuplePtr create(@ByVal IValue e1, @ByVal IValue e2); + + public static native @ByVal TuplePtr create(@ByVal IValue e1, @ByVal IValue e2, @ByVal IValue e3); + + // Again, it would be nice to make this noncopyable, but there's a + // lot of extant code that copies Tuples. + // Tuple(const Tuple& rhs) = delete; + + public native @Const @ByRef TupleElements elements(); + + + + public native void setElements(@ByRef(true) IValueVector elements); + + public native void setElements(@ByRef(true) TupleElements elements); + + public native void unsafeSetElement(@Cast("size_t") long idx, @Const @ByRef IValue element); + + public native @Cast("size_t") long size(); + + public static native @Cast("size_t") long hash(@Const @ByRef Tuple t); + + private static native @Namespace @Cast("bool") @Name("operator ==") boolean equals( + @Const @ByRef Tuple lhs, + @Const @ByRef Tuple rhs); + public boolean equals(Tuple rhs) { return equals(this, rhs); } } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/TupleElements.java b/pytorch/src/gen/java/org/bytedeco/pytorch/TupleElements.java new file mode 100644 index 00000000000..48d7f0f45ae --- /dev/null +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/TupleElements.java @@ -0,0 +1,108 @@ +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE + +package org.bytedeco.pytorch; + +import org.bytedeco.pytorch.Allocator; +import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; +import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; +import java.nio.*; +import org.bytedeco.javacpp.*; +import org.bytedeco.javacpp.annotation.*; + +import static org.bytedeco.javacpp.presets.javacpp.*; +import static org.bytedeco.openblas.global.openblas_nolapack.*; +import static org.bytedeco.openblas.global.openblas.*; + +import static org.bytedeco.pytorch.global.torch.*; + + +@Namespace("c10::ivalue") @NoOffset @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) +public class TupleElements extends Pointer { + static { Loader.load(); } + /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ + public TupleElements(Pointer p) { super(p); } + /** Native array allocator. Access with {@link Pointer#position(long)}. */ + public TupleElements(long size) { super((Pointer)null); allocateArray(size); } + private native void allocateArray(long size); + @Override public TupleElements position(long position) { + return (TupleElements)super.position(position); + } + @Override public TupleElements getPointer(long i) { + return new TupleElements((Pointer)this).offsetAddress(i); + } + + + public TupleElements() { super((Pointer)null); allocate(); } + private native void allocate(); + + public TupleElements(@ByVal IValueVector elements) { super((Pointer)null); allocate(elements); } + private native void allocate(@ByVal IValueVector elements); + + public TupleElements(@ByVal IValueArrayRef elements) { super((Pointer)null); allocate(elements); } + private native void allocate(@ByVal IValueArrayRef elements); + + public TupleElements(@ByRef(true) IValue e1) { super((Pointer)null); allocate(e1); } + private native void allocate(@ByRef(true) IValue e1); + + public TupleElements(@ByRef(true) IValue e1, @ByRef(true) IValue e2) { super((Pointer)null); allocate(e1, e2); } + private native void allocate(@ByRef(true) IValue e1, @ByRef(true) IValue e2); + + public TupleElements(@ByRef(true) IValue e1, @ByRef(true) IValue e2, @ByRef(true) IValue e3) { super((Pointer)null); allocate(e1, e2, e3); } + private native void allocate(@ByRef(true) IValue e1, @ByRef(true) IValue e2, @ByRef(true) IValue e3); + + // It would be nice to make this noncopyable to prevent people from + // writing code like `auto output = + // forward(...).toTupleRef().elements()` (which does refcount bumps on + // each element, unlike the more efficient but verbose + // ``` + // auto outputIntrusivePtr = forward(...).toTuple(); + // const auto& output = outputIntrusivePtr->elements(); + // ``` + // ), but there is simply an overwhelming amount of code that does + // it the inefficient way. + // See also operator std::vector below. + public TupleElements(@Const @ByRef TupleElements rhs) { super((Pointer)null); allocate(rhs); } + private native void allocate(@Const @ByRef TupleElements rhs); + + public native @ByRef @Name("operator =") TupleElements put(@Const @ByRef TupleElements rhs); + + public native @ByVal IValueArrayRef asArrayRef(); + + // Mimic implicit conversion from std::vector to ArrayRef. + public native @ByVal @Name("operator c10::ArrayRef") IValueArrayRef asIValueArrayRef(); + + public static native @Cast("size_t") long hash(@Const @ByRef TupleElements v); + + public native void setContents(@ByRef(true) IValueVector contents); + + public native @Cast("bool") boolean empty(); + + public native @Cast("size_t") long size(); + + public native @ByRef @Name("operator []") IValue get(@Cast("size_t") long idx); + + public native @ByRef IValue at(@Cast("size_t") long idx); + + public native @Cast("c10::ivalue::TupleElements::iterator") IValue begin(); + + public native @Cast("c10::ivalue::TupleElements::iterator") IValue end(); + + public native @Cast("c10::ivalue::TupleElements::const_iterator") IValue cbegin(); + + public native @Cast("c10::ivalue::TupleElements::const_iterator") IValue cend(); + + public native @ByVal IValueVector vec(); + + public native @ByRef IValue back(); + + + + // More compatibility shims for the overwhelming amount of code that + // likes to copy tuple elements into a vector; see comment above the + // copy constructor. + public native @ByVal @Name("operator std::vector") IValueVector asIValueVector(); + + +} diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/TupleLiteral.java b/pytorch/src/gen/java/org/bytedeco/pytorch/TupleLiteral.java index 4ce9e83b0aa..4b41c527a6f 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/TupleLiteral.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/TupleLiteral.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; @@ -19,7 +21,13 @@ @Namespace("torch::jit") @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) public class TupleLiteral extends Expr { static { Loader.load(); } + /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ + public TupleLiteral(Pointer p) { super(p); } - public TupleLiteral(@Cast("const torch::jit::TreeRef*") @ByRef Pointer tree) { super((Pointer)null); allocate(tree); } - private native void allocate(@Cast("const torch::jit::TreeRef*") @ByRef Pointer tree); + public TupleLiteral(@Const @ByRef TreeRef tree) { super((Pointer)null); allocate(tree); } + private native void allocate(@Const @ByRef TreeRef tree); + public native @ByVal ExprList inputs(); + public static native @ByVal TupleLiteral create( + @Const @ByRef SourceRange range, + @Const @ByRef ExprList inputs); } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/TuplePtr.java b/pytorch/src/gen/java/org/bytedeco/pytorch/TuplePtr.java new file mode 100644 index 00000000000..6e9e2c23fef --- /dev/null +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/TuplePtr.java @@ -0,0 +1,150 @@ +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE + +package org.bytedeco.pytorch; + +import org.bytedeco.pytorch.Allocator; +import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; +import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; +import java.nio.*; +import org.bytedeco.javacpp.*; +import org.bytedeco.javacpp.annotation.*; + +import static org.bytedeco.javacpp.presets.javacpp.*; +import static org.bytedeco.openblas.global.openblas_nolapack.*; +import static org.bytedeco.openblas.global.openblas.*; + +import static org.bytedeco.pytorch.global.torch.*; + // namespace detail + +@Name("c10::intrusive_ptr") @NoOffset @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) +public class TuplePtr extends Pointer { + static { Loader.load(); } + /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ + public TuplePtr(Pointer p) { super(p); } + /** Native array allocator. Access with {@link Pointer#position(long)}. */ + public TuplePtr(long size) { super((Pointer)null); allocateArray(size); } + private native void allocateArray(long size); + @Override public TuplePtr position(long position) { + return (TuplePtr)super.position(position); + } + @Override public TuplePtr getPointer(long i) { + return new TuplePtr((Pointer)this).offsetAddress(i); + } + + + public TuplePtr() { super((Pointer)null); allocate(); } + @NoException(true) private native void allocate(); + + public TuplePtr(@ByVal @Cast("std::nullptr_t*") PointerPointer arg0) { super((Pointer)null); allocate(arg0); } + @NoException(true) private native void allocate(@ByVal @Cast("std::nullptr_t*") PointerPointer arg0); + + // This constructor will not increase the ref counter for you. + // We use the tagged dispatch mechanism to explicitly mark this constructor + // to not increase the refcount + public TuplePtr(Tuple target, @ByVal DontIncreaseRefcount arg1) { super((Pointer)null); allocate(target, arg1); } + @NoException(true) private native void allocate(Tuple target, @ByVal DontIncreaseRefcount arg1); + + + + public TuplePtr(@ByRef(true) TuplePtr rhs) { super((Pointer)null); allocate(rhs); } + @NoException(true) private native void allocate(@ByRef(true) TuplePtr rhs); + + public native @ByRef @Name("operator =") @NoException(true) TuplePtr put(@ByRef(true) TuplePtr rhs); + + public native @NoException(true) Tuple get(); + + public native @ByRef @Name("operator *") @NoException(true) Tuple multiply(); + + public native @Name("operator ->") @NoException(true) Tuple access(); + + public native @Cast("bool") @Name("operator bool") @NoException(true) boolean asBoolean(); + + public native @NoException(true) void reset(); + + public native @NoException(true) void swap(@ByRef TuplePtr rhs); + + // We do a lot of null-pointer checks in our code, good to have this be cheap. + public native @Cast("bool") @NoException(true) boolean defined(); + + public native @Cast("size_t") @NoException(true) long use_count(); + + public native @Cast("size_t") @NoException(true) long weak_use_count(); + + public native @Cast("bool") @NoException(true) boolean unique(); + + /** + * Returns an owning (!) pointer to the underlying object and makes the + * intrusive_ptr instance invalid. That means the refcount is not decreased. + * You *must* put the returned pointer back into a intrusive_ptr using + * intrusive_ptr::reclaim(ptr) to properly destruct it. + * This is helpful for C APIs. + */ + public native @NoException(true) Tuple release(); + + /** + * Takes an owning pointer to TTarget* and creates an intrusive_ptr that takes + * over ownership. That means the refcount is not increased. + * This is the counter-part to intrusive_ptr::release() and the pointer + * passed in *must* have been created using intrusive_ptr::release(). + */ + public static native @ByVal TuplePtr reclaim(Tuple owning_ptr); + + /** + * Takes an owning pointer to TTarget* and creates an intrusive_ptr + * representing a new reference, i.e. the raw pointer retains + * ownership. + */ + public static native @ByVal TuplePtr reclaim_copy(Tuple owning_ptr); + + /** + * Allocate a heap object with args and wrap it inside a intrusive_ptr and + * incref. This is a helper function to let make_intrusive() access private + * intrusive_ptr constructors. + */ + + /** + * Turn a new instance of TTarget (e.g., literally allocated + * using new TTarget(...) into an intrusive_ptr. If possible, + * use intrusive_ptr::make instead which statically guarantees + * that the allocation was done properly. + * + * At the moment, the only reason this method exists is because + * pybind11 holder types expect to be able to allocate in + * this way (because pybind11 handles the new allocation itself). + */ + public static native @ByVal TuplePtr unsafe_steal_from_new(Tuple raw_ptr); + + /** + * Turn an instance of TTarget that should not be reference counted + * (e.g., allocated into an arena with placement new) into an + * intrusive_ptr. This is gratuitously unsafe and should only be + * used if you can guarantee that the pointer will not escape and be + * refcounted as normal. + * + * {@code expected_decrefs} is a debugging parameter: it indicates the + * number of strong owners the intrusive_ptr_target in question is + * expected to get. In most use cases, this will likely be 1. + * + * The reason this method exists is for manually sharing + * StorageImpls across Tensors in the static runtime. It needs + * access to private intrusive_ptr members so that the refcounts can + * be initialized to custom values. + */ + public static native @ByVal TuplePtr unsafe_adapt_non_heap_allocated( + Tuple raw_ptr, + @Cast("size_t") long expected_decrefs); + + /** + * Turn a **non-owning raw pointer** to an intrusive_ptr. It is + * the moral equivalent of enable_shared_from_this on a shared pointer. + * + * This method is only valid for objects that are already live. If + * you are looking for the moral equivalent of unique_ptr(T*) + * constructor, see steal_from_new. + * + * TODO: https://github.com/pytorch/pytorch/issues/56482 + */ + public static native @ByVal TuplePtr unsafe_reclaim_from_nonowning(Tuple raw_ptr); +} diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/TupleType.java b/pytorch/src/gen/java/org/bytedeco/pytorch/TupleType.java index 94305fb45e3..d831ce68bed 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/TupleType.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/TupleType.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/Type.java b/pytorch/src/gen/java/org/bytedeco/pytorch/Type.java index 6d0c997399a..2f378a7991e 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/Type.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/Type.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; @@ -22,7 +24,8 @@ public class Type extends Pointer { /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public Type(Pointer p) { super(p); } - + private static native @Namespace @Cast("bool") @Name("operator ==") boolean equals(@Const @ByRef Type lhs, @Const @ByRef Type rhs); + public boolean equals(Type rhs) { return equals(this, rhs); } @Name("SingletonOrSharedTypePtr") public static class TypePtr extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ @@ -41,13 +44,14 @@ public class Type extends Pointer { public TypePtr() { super((Pointer)null); allocate(); } private native void allocate(); - /* implicit */ public TypePtr(@SharedPtr @ByVal Type x) { super((Pointer)null); allocate(x); } -private native void allocate(@SharedPtr @ByVal Type x); + /* implicit */ public TypePtr(@SharedPtr Type x) { super((Pointer)null); allocate(x); } +private native void allocate(@SharedPtr Type x); /* implicit */ public TypePtr(@ByVal @Cast("std::nullptr_t*") PointerPointer arg0) { super((Pointer)null); allocate(arg0); } private native void allocate(@ByVal @Cast("std::nullptr_t*") PointerPointer arg0); - /* implicit */ + /* implicit */ public TypePtr(@ByVal SingletonTypePtr p) { super((Pointer)null); allocate(p); } +private native void allocate(@ByVal SingletonTypePtr p); // We need to support construction from T* for pybind. The problem @@ -71,9 +75,9 @@ public class Type extends Pointer { public native @Cast("bool") @Name("operator bool") boolean asBoolean(); - + public native @Cast("bool") @Name("operator ==") boolean equals(@ByVal @Cast("std::nullptr_t*") PointerPointer arg0); - + public native @Cast("bool") @Name("operator !=") boolean notEquals(@ByVal @Cast("std::nullptr_t*") PointerPointer arg0); public native @Name("operator ->") Type access(); } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/TypeArrayRef.java b/pytorch/src/gen/java/org/bytedeco/pytorch/TypeArrayRef.java index 32053b5a304..59140d35d33 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/TypeArrayRef.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/TypeArrayRef.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; @@ -39,8 +41,7 @@ public class TypeArrayRef extends Pointer { /** Construct an ArrayRef from a single element. */ // TODO Make this explicit - public TypeArrayRef(@Const @ByRef Type.TypePtr OneElt) { super((Pointer)null); allocate(OneElt); } - private native void allocate(@Const @ByRef Type.TypePtr OneElt); + /** Construct an ArrayRef from a pointer and length. */ public TypeArrayRef(@Const Type.TypePtr data, @Cast("size_t") long length) { super((Pointer)null); allocate(data, length); } @@ -58,6 +59,8 @@ public class TypeArrayRef extends Pointer { // The enable_if stuff here makes sure that this isn't used for // std::vector, because ArrayRef can't work on a std::vector // bitfield. + public TypeArrayRef(@ByRef TypeVector vec) { super((Pointer)null); allocate(vec); } + private native void allocate(@ByRef TypeVector vec); /** Construct an ArrayRef from a std::array */ @@ -70,13 +73,13 @@ public class TypeArrayRef extends Pointer { * \name Simple Operations * \{ */ - public native @ByVal @Cast("const c10::ArrayRef::iterator*") Type begin(); - public native @ByVal @Cast("const c10::ArrayRef::iterator*") Type end(); + public native @Const Type.TypePtr begin(); + public native @Const Type.TypePtr end(); // These are actually the same as iterator, since ArrayRef only // gives you const iterators. - public native @ByVal @Cast("const c10::ArrayRef::const_iterator*") Type cbegin(); - public native @ByVal @Cast("const c10::ArrayRef::const_iterator*") Type cend(); + public native @Const Type.TypePtr cbegin(); + public native @Const Type.TypePtr cend(); /** empty - Check if the array is empty. */ public native @Cast("const bool") boolean empty(); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/TypeEnv.java b/pytorch/src/gen/java/org/bytedeco/pytorch/TypeEnv.java index 927e038dc8e..22401d2b02f 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/TypeEnv.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/TypeEnv.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/TypeError.java b/pytorch/src/gen/java/org/bytedeco/pytorch/TypeError.java index d61120ddffb..62924d6883d 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/TypeError.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/TypeError.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/TypeIdentifier.java b/pytorch/src/gen/java/org/bytedeco/pytorch/TypeIdentifier.java index 5ee3a3a65e4..7de5356ed14 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/TypeIdentifier.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/TypeIdentifier.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; @@ -23,13 +25,15 @@ * dtype of tensors. */ @Namespace("caffe2") @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) -public class TypeIdentifier extends TypeIdentifierIdWrapper { +public class TypeIdentifier extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public TypeIdentifier(Pointer p) { super(p); } - - + private static native @Namespace @Cast("std::ostream*") @ByRef @Name("operator <<") Pointer shiftLeft(@Cast("std::ostream*") @ByRef Pointer stream, @ByVal TypeIdentifier typeId); + public Pointer shiftLeft(Pointer stream) { return shiftLeft(stream, this); } + private static native @Namespace @Cast("const bool") @Name("operator <") boolean lessThan(@ByVal TypeIdentifier lhs, @ByVal TypeIdentifier rhs); + public boolean lessThan(TypeIdentifier rhs) { return lessThan(this, rhs); } /** * Returns the unique id for the given type T. The id is unique for the type T diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/TypeIdentifierIdWrapper.java b/pytorch/src/gen/java/org/bytedeco/pytorch/TypeIdentifierIdWrapper.java deleted file mode 100644 index b826a59f34c..00000000000 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/TypeIdentifierIdWrapper.java +++ /dev/null @@ -1,41 +0,0 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE - -package org.bytedeco.pytorch; - -import org.bytedeco.pytorch.Allocator; -import org.bytedeco.pytorch.Function; -import org.bytedeco.pytorch.Module; -import java.nio.*; -import org.bytedeco.javacpp.*; -import org.bytedeco.javacpp.annotation.*; - -import static org.bytedeco.javacpp.presets.javacpp.*; -import static org.bytedeco.openblas.global.openblas_nolapack.*; -import static org.bytedeco.openblas.global.openblas.*; - -import static org.bytedeco.pytorch.global.torch.*; - - -/** - * This template simplifies generation of simple classes that wrap an id - * in a typesafe way. Namely, you can use it to create a very lightweight - * type that only offers equality comparators and hashing. Example: - * - * struct MyIdType final : IdWrapper { - * constexpr explicit MyIdType(uint32_t id): IdWrapper(id) {} - * }; - * - * Then in the global top level namespace: - * - * C10_DEFINE_HASH_FOR_IDWRAPPER(MyIdType); - * - * That's it - equality operators and hash functions are automatically defined - * for you, given the underlying type supports it. - */ -@Name("c10::IdWrapper") @NoOffset @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) -public class TypeIdentifierIdWrapper extends Pointer { - static { Loader.load(); } - /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ - public TypeIdentifierIdWrapper(Pointer p) { super(p); } - -} diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/TypeMeta.java b/pytorch/src/gen/java/org/bytedeco/pytorch/TypeMeta.java index 928891a6e8e..520b4a4bd34 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/TypeMeta.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/TypeMeta.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; @@ -94,7 +96,8 @@ public class TypeMeta extends Pointer { */ public native @ByVal @Cast("c10::string_view*") @NoException(true) Pointer name(); - + private static native @Namespace @Cast("bool") @Name("operator ==") @NoException(true) boolean equals(@Const @ByVal TypeMeta lhs, @Const @ByVal TypeMeta rhs); + public boolean equals(TypeMeta rhs) { return equals(this, rhs); } // Below are static functions that can be called by passing a specific type. diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/TypeMetaData.java b/pytorch/src/gen/java/org/bytedeco/pytorch/TypeMetaData.java index 41b15fdba40..6fd512a0277 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/TypeMetaData.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/TypeMetaData.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/TypeMetaOptional.java b/pytorch/src/gen/java/org/bytedeco/pytorch/TypeMetaOptional.java index 4f04e83f6d7..4ff15cef1ef 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/TypeMetaOptional.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/TypeMetaOptional.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; @@ -26,6 +28,7 @@ public class TypeMetaOptional extends Pointer { public native @Name("operator =") @ByRef TypeMetaOptional put(@ByRef TypeMetaOptional x); public native boolean has_value(); + public native void reset(); public native @Name("value") @ByRef TypeMeta get(); @ValueSetter public native TypeMetaOptional put(@ByRef TypeMeta value); } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/TypeNameUniquer.java b/pytorch/src/gen/java/org/bytedeco/pytorch/TypeNameUniquer.java deleted file mode 100644 index 37ee9e2f7b5..00000000000 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/TypeNameUniquer.java +++ /dev/null @@ -1,47 +0,0 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE - -package org.bytedeco.pytorch; - -import org.bytedeco.pytorch.Allocator; -import org.bytedeco.pytorch.Function; -import org.bytedeco.pytorch.Module; -import java.nio.*; -import org.bytedeco.javacpp.*; -import org.bytedeco.javacpp.annotation.*; - -import static org.bytedeco.javacpp.presets.javacpp.*; -import static org.bytedeco.openblas.global.openblas_nolapack.*; -import static org.bytedeco.openblas.global.openblas.*; - -import static org.bytedeco.pytorch.global.torch.*; - - -/** - * class TypeNameUniquer - * - * Generates a unique name for every type {@code t} passed in. Types that compare - * equal with EqualType will receive the same unique name. - * - * This is used during Module::save(), to resolve type name collisions during - * serialization. - */ -@Namespace("torch::jit") @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) -public class TypeNameUniquer extends Pointer { - static { Loader.load(); } - /** Default native constructor. */ - public TypeNameUniquer() { super((Pointer)null); allocate(); } - /** Native array allocator. Access with {@link Pointer#position(long)}. */ - public TypeNameUniquer(long size) { super((Pointer)null); allocateArray(size); } - /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ - public TypeNameUniquer(Pointer p) { super(p); } - private native void allocate(); - private native void allocateArray(long size); - @Override public TypeNameUniquer position(long position) { - return (TypeNameUniquer)super.position(position); - } - @Override public TypeNameUniquer getPointer(long i) { - return new TypeNameUniquer((Pointer)this).offsetAddress(i); - } - - public native @ByVal QualifiedName getUniqueName(@SharedPtr @Cast({"", "", "std::shared_ptr&&"}) NamedType t); -} diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/TypeParser.java b/pytorch/src/gen/java/org/bytedeco/pytorch/TypeParser.java deleted file mode 100644 index 79cbcfee891..00000000000 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/TypeParser.java +++ /dev/null @@ -1,32 +0,0 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE - -package org.bytedeco.pytorch; - -import org.bytedeco.pytorch.Allocator; -import org.bytedeco.pytorch.Function; -import org.bytedeco.pytorch.Module; -import java.nio.*; -import org.bytedeco.javacpp.*; -import org.bytedeco.javacpp.annotation.*; - -import static org.bytedeco.javacpp.presets.javacpp.*; -import static org.bytedeco.openblas.global.openblas_nolapack.*; -import static org.bytedeco.openblas.global.openblas.*; - -import static org.bytedeco.pytorch.global.torch.*; - - -/** {@code reader} is a function that takes in a size to read from some pickled - * binary. {@code reader} should remember where it last read, and return - * the number of bytes read. - * See {@code torch::pickle} for details. - * type_resolver is used to resolve any JIT type based on type str */ -@Properties(inherit = org.bytedeco.pytorch.presets.torch.class) -public class TypeParser extends FunctionPointer { - static { Loader.load(); } - /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ - public TypeParser(Pointer p) { super(p); } - protected TypeParser() { allocate(); } - private native void allocate(); - public native @ByVal Type.TypePtr call(@StdString BytePointer arg0); -} diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/TypePtrOptional.java b/pytorch/src/gen/java/org/bytedeco/pytorch/TypePtrOptional.java index 918fc7b785c..a0bc363ac90 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/TypePtrOptional.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/TypePtrOptional.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; @@ -26,6 +28,7 @@ public class TypePtrOptional extends Pointer { public native @Name("operator =") @ByRef TypePtrOptional put(@ByRef TypePtrOptional x); public native boolean has_value(); + public native void reset(); public native @Name("value") @ByRef Type.TypePtr get(); @ValueSetter public native TypePtrOptional put(@ByRef Type.TypePtr value); } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/TypeVector.java b/pytorch/src/gen/java/org/bytedeco/pytorch/TypeVector.java index 205f4d4aafd..a423d1de177 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/TypeVector.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/TypeVector.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; @@ -33,6 +35,8 @@ public class TypeVector extends Pointer { public void clear() { resize(0); } public native void resize(@Cast("size_t") long n); + public Type.TypePtr front() { return get(0); } + public Type.TypePtr back() { return get(size() - 1); } @Index(function = "at") public native @ByRef Type.TypePtr get(@Cast("size_t") long i); public native TypeVector put(@Cast("size_t") long i, Type.TypePtr value); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/UnaryOp.java b/pytorch/src/gen/java/org/bytedeco/pytorch/UnaryOp.java index 27d7dd8bfa1..43f604e72bb 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/UnaryOp.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/UnaryOp.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; @@ -19,8 +21,10 @@ @Namespace("torch::jit") @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) public class UnaryOp extends Expr { static { Loader.load(); } + /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ + public UnaryOp(Pointer p) { super(p); } - public UnaryOp(@Cast("const torch::jit::TreeRef*") @ByRef Pointer tree) { super((Pointer)null); allocate(tree); } - private native void allocate(@Cast("const torch::jit::TreeRef*") @ByRef Pointer tree); + public UnaryOp(@Const @ByRef TreeRef tree) { super((Pointer)null); allocate(tree); } + private native void allocate(@Const @ByRef TreeRef tree); public static native @ByVal UnaryOp create(@Const @ByRef SourceRange range, int kind, @Const @ByRef Expr expr); } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/UndefinedTensorImpl.java b/pytorch/src/gen/java/org/bytedeco/pytorch/UndefinedTensorImpl.java index c3bf2278084..6ee6f5dbd51 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/UndefinedTensorImpl.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/UndefinedTensorImpl.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/Unflatten.java b/pytorch/src/gen/java/org/bytedeco/pytorch/Unflatten.java deleted file mode 100644 index 2ce63466b7b..00000000000 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/Unflatten.java +++ /dev/null @@ -1,34 +0,0 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE - -package org.bytedeco.pytorch; - -import org.bytedeco.pytorch.Allocator; -import org.bytedeco.pytorch.Function; -import org.bytedeco.pytorch.Module; -import java.nio.*; -import org.bytedeco.javacpp.*; -import org.bytedeco.javacpp.annotation.*; - -import static org.bytedeco.javacpp.presets.javacpp.*; -import static org.bytedeco.openblas.global.openblas_nolapack.*; -import static org.bytedeco.openblas.global.openblas.*; - -import static org.bytedeco.pytorch.global.torch.*; - - -/** A {@code ModuleHolder} subclass for {@code UnflattenImpl}. - * See the documentation for {@code UnflattenImpl} class to learn what methods it - * provides, and examples of how to use {@code Unflatten} with - * {@code torch::nn::UnflattenOptions}. See the documentation for {@code ModuleHolder} to - * learn about PyTorch's module storage semantics. */ -@Namespace("torch::nn") @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) -public class Unflatten extends UnflattenImplModuleHolder { - static { Loader.load(); } - - public Unflatten(@ByVal @Cast("std::nullptr_t*") PointerPointer arg0) { super((Pointer)null); allocate(arg0); } - private native void allocate(@ByVal @Cast("std::nullptr_t*") PointerPointer arg0); public Unflatten(@SharedPtr @Cast({"", "std::shared_ptr"}) UnflattenImpl module) { super((Pointer)null); allocate(module); } - private native void allocate(@SharedPtr @Cast({"", "std::shared_ptr"}) UnflattenImpl module); - /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ - public Unflatten(Pointer p) { super(p); } - - } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/UnflattenImpl.java b/pytorch/src/gen/java/org/bytedeco/pytorch/UnflattenImpl.java index 8570b131713..faa29468c18 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/UnflattenImpl.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/UnflattenImpl.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; @@ -38,13 +40,13 @@ public class UnflattenImpl extends UnflattenImplCloneable { public UnflattenImpl(Pointer p) { super(p); } public UnflattenImpl(@Cast("int64_t") long dim, @ByVal @Cast("std::vector*") LongVector sizes) { super((Pointer)null); allocate(dim, sizes); } - @NoDeallocator private native void allocate(@Cast("int64_t") long dim, @ByVal @Cast("std::vector*") LongVector sizes); + @SharedPtr private native void allocate(@Cast("int64_t") long dim, @ByVal @Cast("std::vector*") LongVector sizes); public UnflattenImpl(@StdString BytePointer dimname, @ByVal @Cast("torch::nn::UnflattenOptions::namedshape_t*") StringLongVector namedshape) { super((Pointer)null); allocate(dimname, namedshape); } - @NoDeallocator private native void allocate(@StdString BytePointer dimname, @ByVal @Cast("torch::nn::UnflattenOptions::namedshape_t*") StringLongVector namedshape); + @SharedPtr private native void allocate(@StdString BytePointer dimname, @ByVal @Cast("torch::nn::UnflattenOptions::namedshape_t*") StringLongVector namedshape); public UnflattenImpl(@StdString String dimname, @ByVal @Cast("torch::nn::UnflattenOptions::namedshape_t*") StringLongVector namedshape) { super((Pointer)null); allocate(dimname, namedshape); } - @NoDeallocator private native void allocate(@StdString String dimname, @ByVal @Cast("torch::nn::UnflattenOptions::namedshape_t*") StringLongVector namedshape); + @SharedPtr private native void allocate(@StdString String dimname, @ByVal @Cast("torch::nn::UnflattenOptions::namedshape_t*") StringLongVector namedshape); public UnflattenImpl(@ByVal UnflattenOptions options_) { super((Pointer)null); allocate(options_); } - @NoDeallocator private native void allocate(@ByVal UnflattenOptions options_); + @SharedPtr private native void allocate(@ByVal UnflattenOptions options_); public native void reset(); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/UnflattenImplCloneable.java b/pytorch/src/gen/java/org/bytedeco/pytorch/UnflattenImplCloneable.java index de2d15e5094..a3f7407af7b 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/UnflattenImplCloneable.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/UnflattenImplCloneable.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; @@ -20,18 +22,18 @@ public class UnflattenImplCloneable extends Module { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public UnflattenImplCloneable(Pointer p) { super(p); } + @Override public Module asModule() { return asModule(this); } + @Namespace public static native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast>") Module asModule(@SharedPtr UnflattenImplCloneable pointer); /** {@code reset()} must perform initialization of all members with reference * semantics, most importantly parameters, buffers and submodules. */ public native void reset(); - @Override public Module asModule() { return asModule(this); } - @Namespace public static native @Name("static_cast") Module asModule(UnflattenImplCloneable module); /** Performs a recursive "deep copy" of the {@code Module}, such that all parameters * and submodules in the cloned module are different from those in the * original module. */ - public native @SharedPtr Module clone( - @Const @ByRef(nullValue = "c10::optional(c10::nullopt)") DeviceOptional device); - public native @SharedPtr Module clone(); + public native @SharedPtr("torch::nn::Module") @ByVal Module clone( + @Const @ByRef(nullValue = "c10::optional(c10::nullopt)") DeviceOptional device); + public native @SharedPtr("torch::nn::Module") @ByVal Module clone(); } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/UnflattenImplModuleHolder.java b/pytorch/src/gen/java/org/bytedeco/pytorch/UnflattenImplModuleHolder.java deleted file mode 100644 index 09d81eb6a88..00000000000 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/UnflattenImplModuleHolder.java +++ /dev/null @@ -1,79 +0,0 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE - -package org.bytedeco.pytorch; - -import org.bytedeco.pytorch.Allocator; -import org.bytedeco.pytorch.Function; -import org.bytedeco.pytorch.Module; -import java.nio.*; -import org.bytedeco.javacpp.*; -import org.bytedeco.javacpp.annotation.*; - -import static org.bytedeco.javacpp.presets.javacpp.*; -import static org.bytedeco.openblas.global.openblas_nolapack.*; -import static org.bytedeco.openblas.global.openblas.*; - -import static org.bytedeco.pytorch.global.torch.*; - -@Name("torch::nn::ModuleHolder") @NoOffset @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) -public class UnflattenImplModuleHolder extends Pointer { - static { Loader.load(); } - /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ - public UnflattenImplModuleHolder(Pointer p) { super(p); } - - - /// - - /** Default constructs the contained module if if has a default constructor, - * else produces a static error. - * - * NOTE: This uses the behavior of template - * classes in C++ that constructors (or any methods) are only compiled when - * actually used. */ - - - /** Constructs the {@code ModuleHolder} with an empty contained value. Access to - * the underlying module is not permitted and will throw an exception, until - * a value is assigned. */ - /* implicit */ public UnflattenImplModuleHolder(@ByVal @Cast("std::nullptr_t*") PointerPointer arg0) { super((Pointer)null); allocate(arg0); } -private native void allocate(@ByVal @Cast("std::nullptr_t*") PointerPointer arg0); - - /** Constructs the {@code ModuleHolder} with a contained module, forwarding all - * arguments to its constructor. */ - - /** Constructs the {@code ModuleHolder} from a pointer to the contained type. - * Example: {@code Linear(std::make_shared(...))}. */ - /* implicit */ public UnflattenImplModuleHolder(@SharedPtr @Cast({"", "std::shared_ptr"}) UnflattenImpl module) { super((Pointer)null); allocate(module); } -private native void allocate(@SharedPtr @Cast({"", "std::shared_ptr"}) UnflattenImpl module); - - /** Returns true if the {@code ModuleHolder} contains a module, or false if it is - * {@code nullptr}. */ - public native @Cast("bool") @Name("operator bool") @NoException(true) boolean asBoolean(); - - /** Forwards to the contained module. */ - public native @Name("operator ->") UnflattenImpl access(); - - /** Forwards to the contained module. */ - - /** Returns a reference to the contained module. */ - public native @ByRef @Name("operator *") UnflattenImpl multiply(); - - /** Returns a const reference to the contained module. */ - - /** Returns a shared pointer to the underlying module. */ - public native @SharedPtr @Cast({"", "std::shared_ptr"}) UnflattenImpl ptr(); - - /** Returns a pointer to the underlying module. */ - public native UnflattenImpl get(); - - /** Returns a const pointer to the underlying module. */ - - /** Calls the {@code forward()} method of the contained module. */ - - /** Forwards to the subscript operator of the contained module. - * NOTE: std::forward is qualified to prevent VS2017 emitting - * error C2872: 'std': ambiguous symbol */ - - /** Returns true if the {@code ModuleHolder} does not contain a module. */ - public native @Cast("bool") @NoException(true) boolean is_empty(); -} diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/UnflattenOptions.java b/pytorch/src/gen/java/org/bytedeco/pytorch/UnflattenOptions.java index c388ebcd903..0f275e0cd32 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/UnflattenOptions.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/UnflattenOptions.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/Unfold.java b/pytorch/src/gen/java/org/bytedeco/pytorch/Unfold.java deleted file mode 100644 index 2b40895b40b..00000000000 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/Unfold.java +++ /dev/null @@ -1,34 +0,0 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE - -package org.bytedeco.pytorch; - -import org.bytedeco.pytorch.Allocator; -import org.bytedeco.pytorch.Function; -import org.bytedeco.pytorch.Module; -import java.nio.*; -import org.bytedeco.javacpp.*; -import org.bytedeco.javacpp.annotation.*; - -import static org.bytedeco.javacpp.presets.javacpp.*; -import static org.bytedeco.openblas.global.openblas_nolapack.*; -import static org.bytedeco.openblas.global.openblas.*; - -import static org.bytedeco.pytorch.global.torch.*; - - -/** A {@code ModuleHolder} subclass for {@code UnfoldImpl}. - * See the documentation for {@code UnfoldImpl} class to learn what methods it - * provides, and examples of how to use {@code Unfold} with - * {@code torch::nn::UnfoldOptions}. See the documentation for {@code ModuleHolder} to - * learn about PyTorch's module storage semantics. */ -@Namespace("torch::nn") @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) -public class Unfold extends UnfoldImplModuleHolder { - static { Loader.load(); } - - public Unfold(@ByVal @Cast("std::nullptr_t*") PointerPointer arg0) { super((Pointer)null); allocate(arg0); } - private native void allocate(@ByVal @Cast("std::nullptr_t*") PointerPointer arg0); public Unfold(@SharedPtr @Cast({"", "std::shared_ptr"}) UnfoldImpl module) { super((Pointer)null); allocate(module); } - private native void allocate(@SharedPtr @Cast({"", "std::shared_ptr"}) UnfoldImpl module); - /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ - public Unfold(Pointer p) { super(p); } - - } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/UnfoldImpl.java b/pytorch/src/gen/java/org/bytedeco/pytorch/UnfoldImpl.java index fa69d77ce2a..a12e85dc614 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/UnfoldImpl.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/UnfoldImpl.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; @@ -36,9 +38,9 @@ public class UnfoldImpl extends UnfoldImplCloneable { public UnfoldImpl(Pointer p) { super(p); } public UnfoldImpl(@ByVal @Cast("torch::ExpandingArray<2>*") LongPointer kernel_size) { super((Pointer)null); allocate(kernel_size); } - @NoDeallocator private native void allocate(@ByVal @Cast("torch::ExpandingArray<2>*") LongPointer kernel_size); + @SharedPtr private native void allocate(@ByVal @Cast("torch::ExpandingArray<2>*") LongPointer kernel_size); public UnfoldImpl(@Const @ByRef UnfoldOptions options_) { super((Pointer)null); allocate(options_); } - @NoDeallocator private native void allocate(@Const @ByRef UnfoldOptions options_); + @SharedPtr private native void allocate(@Const @ByRef UnfoldOptions options_); public native void reset(); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/UnfoldImplCloneable.java b/pytorch/src/gen/java/org/bytedeco/pytorch/UnfoldImplCloneable.java index 7a36263fdb9..05662e463ca 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/UnfoldImplCloneable.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/UnfoldImplCloneable.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; @@ -20,18 +22,18 @@ public class UnfoldImplCloneable extends Module { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public UnfoldImplCloneable(Pointer p) { super(p); } + @Override public Module asModule() { return asModule(this); } + @Namespace public static native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast>") Module asModule(@SharedPtr UnfoldImplCloneable pointer); /** {@code reset()} must perform initialization of all members with reference * semantics, most importantly parameters, buffers and submodules. */ public native void reset(); - @Override public Module asModule() { return asModule(this); } - @Namespace public static native @Name("static_cast") Module asModule(UnfoldImplCloneable module); /** Performs a recursive "deep copy" of the {@code Module}, such that all parameters * and submodules in the cloned module are different from those in the * original module. */ - public native @SharedPtr Module clone( - @Const @ByRef(nullValue = "c10::optional(c10::nullopt)") DeviceOptional device); - public native @SharedPtr Module clone(); + public native @SharedPtr("torch::nn::Module") @ByVal Module clone( + @Const @ByRef(nullValue = "c10::optional(c10::nullopt)") DeviceOptional device); + public native @SharedPtr("torch::nn::Module") @ByVal Module clone(); } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/UnfoldImplModuleHolder.java b/pytorch/src/gen/java/org/bytedeco/pytorch/UnfoldImplModuleHolder.java deleted file mode 100644 index a4f9d0ee57b..00000000000 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/UnfoldImplModuleHolder.java +++ /dev/null @@ -1,79 +0,0 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE - -package org.bytedeco.pytorch; - -import org.bytedeco.pytorch.Allocator; -import org.bytedeco.pytorch.Function; -import org.bytedeco.pytorch.Module; -import java.nio.*; -import org.bytedeco.javacpp.*; -import org.bytedeco.javacpp.annotation.*; - -import static org.bytedeco.javacpp.presets.javacpp.*; -import static org.bytedeco.openblas.global.openblas_nolapack.*; -import static org.bytedeco.openblas.global.openblas.*; - -import static org.bytedeco.pytorch.global.torch.*; - -@Name("torch::nn::ModuleHolder") @NoOffset @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) -public class UnfoldImplModuleHolder extends Pointer { - static { Loader.load(); } - /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ - public UnfoldImplModuleHolder(Pointer p) { super(p); } - - - /// - - /** Default constructs the contained module if if has a default constructor, - * else produces a static error. - * - * NOTE: This uses the behavior of template - * classes in C++ that constructors (or any methods) are only compiled when - * actually used. */ - - - /** Constructs the {@code ModuleHolder} with an empty contained value. Access to - * the underlying module is not permitted and will throw an exception, until - * a value is assigned. */ - /* implicit */ public UnfoldImplModuleHolder(@ByVal @Cast("std::nullptr_t*") PointerPointer arg0) { super((Pointer)null); allocate(arg0); } -private native void allocate(@ByVal @Cast("std::nullptr_t*") PointerPointer arg0); - - /** Constructs the {@code ModuleHolder} with a contained module, forwarding all - * arguments to its constructor. */ - - /** Constructs the {@code ModuleHolder} from a pointer to the contained type. - * Example: {@code Linear(std::make_shared(...))}. */ - /* implicit */ public UnfoldImplModuleHolder(@SharedPtr @Cast({"", "std::shared_ptr"}) UnfoldImpl module) { super((Pointer)null); allocate(module); } -private native void allocate(@SharedPtr @Cast({"", "std::shared_ptr"}) UnfoldImpl module); - - /** Returns true if the {@code ModuleHolder} contains a module, or false if it is - * {@code nullptr}. */ - public native @Cast("bool") @Name("operator bool") @NoException(true) boolean asBoolean(); - - /** Forwards to the contained module. */ - public native @Name("operator ->") UnfoldImpl access(); - - /** Forwards to the contained module. */ - - /** Returns a reference to the contained module. */ - public native @ByRef @Name("operator *") UnfoldImpl multiply(); - - /** Returns a const reference to the contained module. */ - - /** Returns a shared pointer to the underlying module. */ - public native @SharedPtr @Cast({"", "std::shared_ptr"}) UnfoldImpl ptr(); - - /** Returns a pointer to the underlying module. */ - public native UnfoldImpl get(); - - /** Returns a const pointer to the underlying module. */ - - /** Calls the {@code forward()} method of the contained module. */ - - /** Forwards to the subscript operator of the contained module. - * NOTE: std::forward is qualified to prevent VS2017 emitting - * error C2872: 'std': ambiguous symbol */ - - /** Returns true if the {@code ModuleHolder} does not contain a module. */ - public native @Cast("bool") @NoException(true) boolean is_empty(); -} diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/UnfoldOptions.java b/pytorch/src/gen/java/org/bytedeco/pytorch/UnfoldOptions.java index c0fdb587207..0bee8688b53 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/UnfoldOptions.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/UnfoldOptions.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/UnionType.java b/pytorch/src/gen/java/org/bytedeco/pytorch/UnionType.java index 3e8c3ca1c1f..fece0afd55f 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/UnionType.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/UnionType.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/UniqueVoidPtr.java b/pytorch/src/gen/java/org/bytedeco/pytorch/UniqueVoidPtr.java new file mode 100644 index 00000000000..50e7b49129f --- /dev/null +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/UniqueVoidPtr.java @@ -0,0 +1,84 @@ +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE + +package org.bytedeco.pytorch; + +import org.bytedeco.pytorch.Allocator; +import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; +import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; +import java.nio.*; +import org.bytedeco.javacpp.*; +import org.bytedeco.javacpp.annotation.*; + +import static org.bytedeco.javacpp.presets.javacpp.*; +import static org.bytedeco.openblas.global.openblas_nolapack.*; +import static org.bytedeco.openblas.global.openblas.*; + +import static org.bytedeco.pytorch.global.torch.*; + + +// A detail::UniqueVoidPtr is an owning smart pointer like unique_ptr, but +// with three major differences: +// +// 1) It is specialized to void +// +// 2) It is specialized for a function pointer deleter +// void(void* ctx); i.e., the deleter doesn't take a +// reference to the data, just to a context pointer +// (erased as void*). In fact, internally, this pointer +// is implemented as having an owning reference to +// context, and a non-owning reference to data; this is why +// you release_context(), not release() (the conventional +// API for release() wouldn't give you enough information +// to properly dispose of the object later.) +// +// 3) The deleter is guaranteed to be called when the unique +// pointer is destructed and the context is non-null; this is different +// from std::unique_ptr where the deleter is not called if the +// data pointer is null. +// +// Some of the methods have slightly different types than std::unique_ptr +// to reflect this. +// +@Namespace("c10::detail") @NoOffset @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) +public class UniqueVoidPtr extends Pointer { + static { Loader.load(); } + /** Native array allocator. Access with {@link Pointer#position(long)}. */ + public UniqueVoidPtr(long size) { super((Pointer)null); allocateArray(size); } + private native void allocateArray(long size); + @Override public UniqueVoidPtr position(long position) { + return (UniqueVoidPtr)super.position(position); + } + @Override public UniqueVoidPtr getPointer(long i) { + return new UniqueVoidPtr((Pointer)this).offsetAddress(i); + } + + public UniqueVoidPtr() { super((Pointer)null); allocate(); } + private native void allocate(); + public UniqueVoidPtr(Pointer data) { super((Pointer)null); allocate(data); } + private native void allocate(Pointer data); + public UniqueVoidPtr(Pointer data, Pointer ctx, @Cast("c10::DeleterFnPtr") PointerConsumer ctx_deleter) { super((Pointer)null); allocate(data, ctx, ctx_deleter); } + private native void allocate(Pointer data, Pointer ctx, @Cast("c10::DeleterFnPtr") PointerConsumer ctx_deleter); + public UniqueVoidPtr(Pointer data, Pointer ctx, @Cast("c10::DeleterFnPtr") Pointer ctx_deleter) { super((Pointer)null); allocate(data, ctx, ctx_deleter); } + private native void allocate(Pointer data, Pointer ctx, @Cast("c10::DeleterFnPtr") Pointer ctx_deleter); + public UniqueVoidPtr(Pointer data, Pointer ctx, @Cast("c10::DeleterFnPtr") long ctx_deleter) { super((Pointer)null); allocate(data, ctx, ctx_deleter); } + private native void allocate(Pointer data, Pointer ctx, @Cast("c10::DeleterFnPtr") long ctx_deleter); + public native @Name("operator ->") Pointer access(); + public native void clear(); + public native Pointer get(); + public native Pointer get_context(); + public native Pointer release_context(); + + public native @Cast("bool") boolean compare_exchange_deleter( + @Cast("c10::DeleterFnPtr") PointerConsumer expected_deleter, + @Cast("c10::DeleterFnPtr") PointerConsumer new_deleter); + public native @Cast("bool") boolean compare_exchange_deleter( + @Cast("c10::DeleterFnPtr") Pointer expected_deleter, + @Cast("c10::DeleterFnPtr") Pointer new_deleter); + public native @Cast("bool") boolean compare_exchange_deleter( + @Cast("c10::DeleterFnPtr") long expected_deleter, + @Cast("c10::DeleterFnPtr") long new_deleter); + public native @Cast("bool") @Name("operator bool") boolean asBoolean(); + public native @Cast("c10::DeleterFnPtr") PointerConsumer get_deleter(); +} diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/Unpickler.java b/pytorch/src/gen/java/org/bytedeco/pytorch/Unpickler.java index 3105e36156e..43b0a83098e 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/Unpickler.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/Unpickler.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/Upsample.java b/pytorch/src/gen/java/org/bytedeco/pytorch/Upsample.java deleted file mode 100644 index 69b50987b15..00000000000 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/Upsample.java +++ /dev/null @@ -1,34 +0,0 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE - -package org.bytedeco.pytorch; - -import org.bytedeco.pytorch.Allocator; -import org.bytedeco.pytorch.Function; -import org.bytedeco.pytorch.Module; -import java.nio.*; -import org.bytedeco.javacpp.*; -import org.bytedeco.javacpp.annotation.*; - -import static org.bytedeco.javacpp.presets.javacpp.*; -import static org.bytedeco.openblas.global.openblas_nolapack.*; -import static org.bytedeco.openblas.global.openblas.*; - -import static org.bytedeco.pytorch.global.torch.*; - - -/** A {@code ModuleHolder} subclass for {@code UpsampleImpl}. - * See the documentation for {@code UpsampleImpl} class to learn what methods it - * provides, and examples of how to use {@code Upsample} with - * {@code torch::nn::UpsampleOptions}. See the documentation for {@code ModuleHolder} to - * learn about PyTorch's module storage semantics. */ -@Namespace("torch::nn") @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) -public class Upsample extends UpsampleImplModuleHolder { - static { Loader.load(); } - - public Upsample(@ByVal @Cast("std::nullptr_t*") PointerPointer arg0) { super((Pointer)null); allocate(arg0); } - private native void allocate(@ByVal @Cast("std::nullptr_t*") PointerPointer arg0); public Upsample(@SharedPtr @Cast({"", "std::shared_ptr"}) UpsampleImpl module) { super((Pointer)null); allocate(module); } - private native void allocate(@SharedPtr @Cast({"", "std::shared_ptr"}) UpsampleImpl module); - /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ - public Upsample(Pointer p) { super(p); } - - } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/UpsampleImpl.java b/pytorch/src/gen/java/org/bytedeco/pytorch/UpsampleImpl.java index d07879d571e..69a6ad15fcf 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/UpsampleImpl.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/UpsampleImpl.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; @@ -47,9 +49,9 @@ public class UpsampleImpl extends UpsampleImplCloneable { } public UpsampleImpl(@Const @ByRef(nullValue = "torch::nn::UpsampleOptions{}") UpsampleOptions options_) { super((Pointer)null); allocate(options_); } - @NoDeallocator private native void allocate(@Const @ByRef(nullValue = "torch::nn::UpsampleOptions{}") UpsampleOptions options_); + @SharedPtr private native void allocate(@Const @ByRef(nullValue = "torch::nn::UpsampleOptions{}") UpsampleOptions options_); public UpsampleImpl() { super((Pointer)null); allocate(); } - @NoDeallocator private native void allocate(); + @SharedPtr private native void allocate(); public native void reset(); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/UpsampleImplCloneable.java b/pytorch/src/gen/java/org/bytedeco/pytorch/UpsampleImplCloneable.java index 6814f391eeb..2db43f0538e 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/UpsampleImplCloneable.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/UpsampleImplCloneable.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; @@ -20,18 +22,18 @@ public class UpsampleImplCloneable extends Module { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public UpsampleImplCloneable(Pointer p) { super(p); } + @Override public Module asModule() { return asModule(this); } + @Namespace public static native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast>") Module asModule(@SharedPtr UpsampleImplCloneable pointer); /** {@code reset()} must perform initialization of all members with reference * semantics, most importantly parameters, buffers and submodules. */ public native void reset(); - @Override public Module asModule() { return asModule(this); } - @Namespace public static native @Name("static_cast") Module asModule(UpsampleImplCloneable module); /** Performs a recursive "deep copy" of the {@code Module}, such that all parameters * and submodules in the cloned module are different from those in the * original module. */ - public native @SharedPtr Module clone( - @Const @ByRef(nullValue = "c10::optional(c10::nullopt)") DeviceOptional device); - public native @SharedPtr Module clone(); + public native @SharedPtr("torch::nn::Module") @ByVal Module clone( + @Const @ByRef(nullValue = "c10::optional(c10::nullopt)") DeviceOptional device); + public native @SharedPtr("torch::nn::Module") @ByVal Module clone(); } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/UpsampleImplModuleHolder.java b/pytorch/src/gen/java/org/bytedeco/pytorch/UpsampleImplModuleHolder.java deleted file mode 100644 index 0450ed310c6..00000000000 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/UpsampleImplModuleHolder.java +++ /dev/null @@ -1,79 +0,0 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE - -package org.bytedeco.pytorch; - -import org.bytedeco.pytorch.Allocator; -import org.bytedeco.pytorch.Function; -import org.bytedeco.pytorch.Module; -import java.nio.*; -import org.bytedeco.javacpp.*; -import org.bytedeco.javacpp.annotation.*; - -import static org.bytedeco.javacpp.presets.javacpp.*; -import static org.bytedeco.openblas.global.openblas_nolapack.*; -import static org.bytedeco.openblas.global.openblas.*; - -import static org.bytedeco.pytorch.global.torch.*; - -@Name("torch::nn::ModuleHolder") @NoOffset @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) -public class UpsampleImplModuleHolder extends Pointer { - static { Loader.load(); } - /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ - public UpsampleImplModuleHolder(Pointer p) { super(p); } - - - /// - - /** Default constructs the contained module if if has a default constructor, - * else produces a static error. - * - * NOTE: This uses the behavior of template - * classes in C++ that constructors (or any methods) are only compiled when - * actually used. */ - - - /** Constructs the {@code ModuleHolder} with an empty contained value. Access to - * the underlying module is not permitted and will throw an exception, until - * a value is assigned. */ - /* implicit */ public UpsampleImplModuleHolder(@ByVal @Cast("std::nullptr_t*") PointerPointer arg0) { super((Pointer)null); allocate(arg0); } -private native void allocate(@ByVal @Cast("std::nullptr_t*") PointerPointer arg0); - - /** Constructs the {@code ModuleHolder} with a contained module, forwarding all - * arguments to its constructor. */ - - /** Constructs the {@code ModuleHolder} from a pointer to the contained type. - * Example: {@code Linear(std::make_shared(...))}. */ - /* implicit */ public UpsampleImplModuleHolder(@SharedPtr @Cast({"", "std::shared_ptr"}) UpsampleImpl module) { super((Pointer)null); allocate(module); } -private native void allocate(@SharedPtr @Cast({"", "std::shared_ptr"}) UpsampleImpl module); - - /** Returns true if the {@code ModuleHolder} contains a module, or false if it is - * {@code nullptr}. */ - public native @Cast("bool") @Name("operator bool") @NoException(true) boolean asBoolean(); - - /** Forwards to the contained module. */ - public native @Name("operator ->") UpsampleImpl access(); - - /** Forwards to the contained module. */ - - /** Returns a reference to the contained module. */ - public native @ByRef @Name("operator *") UpsampleImpl multiply(); - - /** Returns a const reference to the contained module. */ - - /** Returns a shared pointer to the underlying module. */ - public native @SharedPtr @Cast({"", "std::shared_ptr"}) UpsampleImpl ptr(); - - /** Returns a pointer to the underlying module. */ - public native UpsampleImpl get(); - - /** Returns a const pointer to the underlying module. */ - - /** Calls the {@code forward()} method of the contained module. */ - - /** Forwards to the subscript operator of the contained module. - * NOTE: std::forward is qualified to prevent VS2017 emitting - * error C2872: 'std': ambiguous symbol */ - - /** Returns true if the {@code ModuleHolder} does not contain a module. */ - public native @Cast("bool") @NoException(true) boolean is_empty(); -} diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/upsample_mode_t.java b/pytorch/src/gen/java/org/bytedeco/pytorch/UpsampleMode.java similarity index 53% rename from pytorch/src/gen/java/org/bytedeco/pytorch/upsample_mode_t.java rename to pytorch/src/gen/java/org/bytedeco/pytorch/UpsampleMode.java index a8f8d2b5157..f89424df7d9 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/upsample_mode_t.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/UpsampleMode.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; @@ -16,33 +18,33 @@ import static org.bytedeco.pytorch.global.torch.*; @NoOffset @Name("c10::variant") @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) -public class upsample_mode_t extends Pointer { +public class UpsampleMode extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ - public upsample_mode_t(Pointer p) { super(p); } - public upsample_mode_t(kNearest value) { this(); put(value); } - public upsample_mode_t(kLinear value) { this(); put(value); } - public upsample_mode_t(kBilinear value) { this(); put(value); } - public upsample_mode_t(kBicubic value) { this(); put(value); } - public upsample_mode_t(kTrilinear value) { this(); put(value); } - public upsample_mode_t() { allocate(); } + public UpsampleMode(Pointer p) { super(p); } + public UpsampleMode(kNearest value) { this(); put(value); } + public UpsampleMode(kLinear value) { this(); put(value); } + public UpsampleMode(kBilinear value) { this(); put(value); } + public UpsampleMode(kBicubic value) { this(); put(value); } + public UpsampleMode(kTrilinear value) { this(); put(value); } + public UpsampleMode() { allocate(); } private native void allocate(); - public native @Name("operator =") @ByRef upsample_mode_t put(@ByRef upsample_mode_t x); + public native @Name("operator =") @ByRef UpsampleMode put(@ByRef UpsampleMode x); public @ByRef kNearest get0() { return get0(this); } - @Namespace @Name("c10::get<0>") public static native @ByRef kNearest get0(@ByRef upsample_mode_t container); - @ValueSetter public native upsample_mode_t put(@ByRef kNearest value); + @Namespace @Name("c10::get<0>") public static native @ByRef kNearest get0(@ByRef UpsampleMode container); + @ValueSetter public native UpsampleMode put(@ByRef kNearest value); public @ByRef kLinear get1() { return get1(this); } - @Namespace @Name("c10::get<1>") public static native @ByRef kLinear get1(@ByRef upsample_mode_t container); - @ValueSetter public native upsample_mode_t put(@ByRef kLinear value); + @Namespace @Name("c10::get<1>") public static native @ByRef kLinear get1(@ByRef UpsampleMode container); + @ValueSetter public native UpsampleMode put(@ByRef kLinear value); public @ByRef kBilinear get2() { return get2(this); } - @Namespace @Name("c10::get<2>") public static native @ByRef kBilinear get2(@ByRef upsample_mode_t container); - @ValueSetter public native upsample_mode_t put(@ByRef kBilinear value); + @Namespace @Name("c10::get<2>") public static native @ByRef kBilinear get2(@ByRef UpsampleMode container); + @ValueSetter public native UpsampleMode put(@ByRef kBilinear value); public @ByRef kBicubic get3() { return get3(this); } - @Namespace @Name("c10::get<3>") public static native @ByRef kBicubic get3(@ByRef upsample_mode_t container); - @ValueSetter public native upsample_mode_t put(@ByRef kBicubic value); + @Namespace @Name("c10::get<3>") public static native @ByRef kBicubic get3(@ByRef UpsampleMode container); + @ValueSetter public native UpsampleMode put(@ByRef kBicubic value); public @ByRef kTrilinear get4() { return get4(this); } - @Namespace @Name("c10::get<4>") public static native @ByRef kTrilinear get4(@ByRef upsample_mode_t container); - @ValueSetter public native upsample_mode_t put(@ByRef kTrilinear value); + @Namespace @Name("c10::get<4>") public static native @ByRef kTrilinear get4(@ByRef UpsampleMode container); + @ValueSetter public native UpsampleMode put(@ByRef kTrilinear value); } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/UpsampleOptions.java b/pytorch/src/gen/java/org/bytedeco/pytorch/UpsampleOptions.java index cc0721a2c1c..6867f0bc39e 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/UpsampleOptions.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/UpsampleOptions.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; @@ -43,6 +45,6 @@ public class UpsampleOptions extends Pointer { public native @ByRef @NoException(true) LongVectorOptional size(); public native @ByRef @NoException(true) DoubleVectorOptional scale_factor(); - public native @ByRef @NoException(true) upsample_mode_t mode(); + public native @ByRef @NoException(true) UpsampleMode mode(); public native @ByRef @NoException(true) BoolOptional align_corners(); } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/Use.java b/pytorch/src/gen/java/org/bytedeco/pytorch/Use.java index e51d56857d4..5310814d2e4 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/Use.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/Use.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/V_JitNode.java b/pytorch/src/gen/java/org/bytedeco/pytorch/V_JitNode.java deleted file mode 100644 index 1ea255e1956..00000000000 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/V_JitNode.java +++ /dev/null @@ -1,26 +0,0 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE - -package org.bytedeco.pytorch; - -import org.bytedeco.pytorch.Allocator; -import org.bytedeco.pytorch.Function; -import org.bytedeco.pytorch.Module; -import java.nio.*; -import org.bytedeco.javacpp.*; -import org.bytedeco.javacpp.annotation.*; - -import static org.bytedeco.javacpp.presets.javacpp.*; -import static org.bytedeco.openblas.global.openblas_nolapack.*; -import static org.bytedeco.openblas.global.openblas.*; - -import static org.bytedeco.pytorch.global.torch.*; - -@Properties(inherit = org.bytedeco.pytorch.presets.torch.class) -public class V_JitNode extends FunctionPointer { - static { Loader.load(); } - /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ - public V_JitNode(Pointer p) { super(p); } - protected V_JitNode() { allocate(); } - private native void allocate(); - public native void call(JitNode arg0); -} diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/Value.java b/pytorch/src/gen/java/org/bytedeco/pytorch/Value.java index fbb744921cc..4ec03ffd091 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/Value.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/Value.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/ValueArrayRef.java b/pytorch/src/gen/java/org/bytedeco/pytorch/ValueArrayRef.java index fdc58d815c4..702fdf21fd2 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/ValueArrayRef.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/ValueArrayRef.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; @@ -20,19 +22,38 @@ public class ValueArrayRef extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public ValueArrayRef(Pointer p) { super(p); } + /** Native array allocator. Access with {@link Pointer#position(long)}. */ + public ValueArrayRef(long size) { super((Pointer)null); allocateArray(size); } + private native void allocateArray(long size); + @Override public ValueArrayRef position(long position) { + return (ValueArrayRef)super.position(position); + } + @Override public ValueArrayRef getPointer(long i) { + return new ValueArrayRef((Pointer)this).offsetAddress(i); + } /** \name Constructors * \{

* Construct an empty ArrayRef. */ - /* implicit */ + /* implicit */ public ValueArrayRef() { super((Pointer)null); allocate(); } +private native void allocate(); /** Construct an ArrayRef from a single element. */ // TODO Make this explicit + /** Construct an ArrayRef from a pointer and length. */ + public ValueArrayRef(@Cast("torch::jit::Value**") PointerPointer data, @Cast("size_t") long length) { super((Pointer)null); allocate(data, length); } + private native void allocate(@Cast("torch::jit::Value**") PointerPointer data, @Cast("size_t") long length); + public ValueArrayRef(@ByPtrPtr Value data, @Cast("size_t") long length) { super((Pointer)null); allocate(data, length); } + private native void allocate(@ByPtrPtr Value data, @Cast("size_t") long length); /** Construct an ArrayRef from a range. */ + public ValueArrayRef(@Cast("torch::jit::Value**") PointerPointer begin, @Cast("torch::jit::Value**") PointerPointer end) { super((Pointer)null); allocate(begin, end); } + private native void allocate(@Cast("torch::jit::Value**") PointerPointer begin, @Cast("torch::jit::Value**") PointerPointer end); + public ValueArrayRef(@ByPtrPtr Value begin, @ByPtrPtr Value end) { super((Pointer)null); allocate(begin, end); } + private native void allocate(@ByPtrPtr Value begin, @ByPtrPtr Value end); /** Construct an ArrayRef from a SmallVector. This is templated in order to * avoid instantiating SmallVectorTemplateCommon whenever we @@ -42,6 +63,8 @@ public class ValueArrayRef extends Pointer { // The enable_if stuff here makes sure that this isn't used for // std::vector, because ArrayRef can't work on a std::vector // bitfield. + public ValueArrayRef(@ByRef ValueVector vec) { super((Pointer)null); allocate(vec); } + private native void allocate(@ByRef ValueVector vec); /** Construct an ArrayRef from a std::array */ @@ -54,18 +77,18 @@ public class ValueArrayRef extends Pointer { * \name Simple Operations * \{ */ - public native @ByVal @Cast("const c10::ArrayRef::iterator*") Value begin(); - public native @ByVal @Cast("const c10::ArrayRef::iterator*") Value end(); + public native @Const @ByPtr Value begin(); + public native @Const @ByPtr Value end(); // These are actually the same as iterator, since ArrayRef only // gives you const iterators. - public native @ByVal @Cast("const c10::ArrayRef::const_iterator*") Value cbegin(); - public native @ByVal @Cast("const c10::ArrayRef::const_iterator*") Value cend(); + public native @Const @ByPtr Value cbegin(); + public native @Const @ByPtr Value cend(); /** empty - Check if the array is empty. */ public native @Cast("const bool") boolean empty(); - public native @Cast("const torch::jit::Value**") PointerPointer data(); + public native @Cast("torch::jit::Value**") PointerPointer data(); /** size - Get the array size. */ public native @Cast("const size_t") long size(); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/ValueError.java b/pytorch/src/gen/java/org/bytedeco/pytorch/ValueError.java index 2813559a7f0..5490e53390e 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/ValueError.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/ValueError.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/ValueOptional.java b/pytorch/src/gen/java/org/bytedeco/pytorch/ValueOptional.java index 271267fd097..683944665b2 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/ValueOptional.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/ValueOptional.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; @@ -26,6 +28,7 @@ public class ValueOptional extends Pointer { public native @Name("operator =") @ByRef ValueOptional put(@ByRef ValueOptional x); public native boolean has_value(); + public native void reset(); public native @Name("value") Value get(); @ValueSetter public native ValueOptional put(Value value); } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/ValueValueMap.java b/pytorch/src/gen/java/org/bytedeco/pytorch/ValueValueMap.java index 828df1710c6..0b92921e933 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/ValueValueMap.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/ValueValueMap.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/ValueVector.java b/pytorch/src/gen/java/org/bytedeco/pytorch/ValueVector.java index 18f9fa4be90..6a289d368b8 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/ValueVector.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/ValueVector.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; @@ -33,6 +35,8 @@ public class ValueVector extends Pointer { public void clear() { resize(0); } public native void resize(@Cast("size_t") long n); + public Value front() { return get(0); } + public Value back() { return get(size() - 1); } @Index(function = "at") public native Value get(@Cast("size_t") long i); public native ValueVector put(@Cast("size_t") long i, Value value); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/ValueWrap.java b/pytorch/src/gen/java/org/bytedeco/pytorch/ValueWrap.java index e28a7a4abff..20c5ac4cdbe 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/ValueWrap.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/ValueWrap.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/Var.java b/pytorch/src/gen/java/org/bytedeco/pytorch/Var.java index 5f65c194b0d..f8abbaa677f 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/Var.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/Var.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; @@ -19,9 +21,11 @@ @Namespace("torch::jit") @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) public class Var extends Expr { static { Loader.load(); } + /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ + public Var(Pointer p) { super(p); } - public Var(@Cast("const torch::jit::TreeRef*") @ByRef Pointer tree) { super((Pointer)null); allocate(tree); } - private native void allocate(@Cast("const torch::jit::TreeRef*") @ByRef Pointer tree); + public Var(@Const @ByRef TreeRef tree) { super((Pointer)null); allocate(tree); } + private native void allocate(@Const @ByRef TreeRef tree); public native @ByVal Ident name(); public static native @ByVal Var create(@Const @ByRef SourceRange range, @Const @ByRef Ident name); } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/VarMaybe.java b/pytorch/src/gen/java/org/bytedeco/pytorch/VarMaybe.java index b2bfd8cf5d5..a8ce009cd85 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/VarMaybe.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/VarMaybe.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; @@ -19,9 +21,11 @@ @Name("torch::jit::Maybe") @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) public class VarMaybe extends TreeView { static { Loader.load(); } + /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ + public VarMaybe(Pointer p) { super(p); } - public VarMaybe(@Cast("const torch::jit::TreeRef*") @ByRef Pointer tree) { super((Pointer)null); allocate(tree); } - private native void allocate(@Cast("const torch::jit::TreeRef*") @ByRef Pointer tree); + public VarMaybe(@Const @ByRef TreeRef tree) { super((Pointer)null); allocate(tree); } + private native void allocate(@Const @ByRef TreeRef tree); /* implicit */ public VarMaybe(@Const @ByRef Var tree) { super((Pointer)null); allocate(tree); } private native void allocate(@Const @ByRef Var tree); public native @Cast("bool") boolean present(); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/VarType.java b/pytorch/src/gen/java/org/bytedeco/pytorch/VarType.java deleted file mode 100644 index 5db1ec542f4..00000000000 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/VarType.java +++ /dev/null @@ -1,32 +0,0 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE - -package org.bytedeco.pytorch; - -import org.bytedeco.pytorch.Allocator; -import org.bytedeco.pytorch.Function; -import org.bytedeco.pytorch.Module; -import java.nio.*; -import org.bytedeco.javacpp.*; -import org.bytedeco.javacpp.annotation.*; - -import static org.bytedeco.javacpp.presets.javacpp.*; -import static org.bytedeco.openblas.global.openblas_nolapack.*; -import static org.bytedeco.openblas.global.openblas.*; - -import static org.bytedeco.pytorch.global.torch.*; - -// This type represents a type variable, used in FunctionSchema -@Namespace("c10") @NoOffset @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) -public class VarType extends SharedType { - static { Loader.load(); } - /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ - public VarType(Pointer p) { super(p); } - - public static native @SharedPtr VarType create(@StdString BytePointer name_); - public static native @SharedPtr VarType create(@StdString String name_); - public native @Cast("bool") boolean equals(@Const @ByRef Type rhs); - public native @StdString BytePointer str(); - public native @StdString BytePointer name(); - public native @Cast("bool") boolean hasFreeVariables(); - @MemberGetter public static native TypeKind Kind(); -} diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/VariableInfo.java b/pytorch/src/gen/java/org/bytedeco/pytorch/VariableInfo.java index 62c8ac34af0..9267c72101e 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/VariableInfo.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/VariableInfo.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/VariableVersion.java b/pytorch/src/gen/java/org/bytedeco/pytorch/VariableVersion.java index 0297f303d1c..12a0a2cc194 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/VariableVersion.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/VariableVersion.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; @@ -57,7 +59,7 @@ // can introduce race conditions when we are running the forward pass in // multi-thread scenarios, thus making the forward pass not thread-safe anymore, // which breaks the invariant. -@Namespace("c10") @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) +@Namespace("c10") @NoOffset @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) public class VariableVersion extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/WarnAlways.java b/pytorch/src/gen/java/org/bytedeco/pytorch/WarnAlways.java index e966f3b2d0c..4d0ee9c0f88 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/WarnAlways.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/WarnAlways.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/Warning.java b/pytorch/src/gen/java/org/bytedeco/pytorch/Warning.java index 2e7a5a90895..83efee909a1 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/Warning.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/Warning.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; @@ -36,39 +38,39 @@ public class Warning extends Pointer { } public Warning( - @ByVal @Cast("c10::Warning::warning_variant_t*") NonlinearityType type, + @ByVal @Cast("c10::Warning::warning_variant_t*") Nonlinearity type, @Const @ByRef SourceLocation source_location, @StdString BytePointer msg, @Cast("bool") boolean verbatim) { super((Pointer)null); allocate(type, source_location, msg, verbatim); } private native void allocate( - @ByVal @Cast("c10::Warning::warning_variant_t*") NonlinearityType type, + @ByVal @Cast("c10::Warning::warning_variant_t*") Nonlinearity type, @Const @ByRef SourceLocation source_location, @StdString BytePointer msg, @Cast("bool") boolean verbatim); public Warning( - @ByVal @Cast("c10::Warning::warning_variant_t*") NonlinearityType type, + @ByVal @Cast("c10::Warning::warning_variant_t*") Nonlinearity type, @Const @ByRef SourceLocation source_location, @StdString String msg, @Cast("bool") boolean verbatim) { super((Pointer)null); allocate(type, source_location, msg, verbatim); } private native void allocate( - @ByVal @Cast("c10::Warning::warning_variant_t*") NonlinearityType type, + @ByVal @Cast("c10::Warning::warning_variant_t*") Nonlinearity type, @Const @ByRef SourceLocation source_location, @StdString String msg, @Cast("bool") boolean verbatim); public Warning( - @ByVal @Cast("c10::Warning::warning_variant_t*") NonlinearityType type, + @ByVal @Cast("c10::Warning::warning_variant_t*") Nonlinearity type, @ByVal SourceLocation source_location, @ByVal CompileTimeEmptyString msg, @Cast("bool") boolean verbatim) { super((Pointer)null); allocate(type, source_location, msg, verbatim); } private native void allocate( - @ByVal @Cast("c10::Warning::warning_variant_t*") NonlinearityType type, + @ByVal @Cast("c10::Warning::warning_variant_t*") Nonlinearity type, @ByVal SourceLocation source_location, @ByVal CompileTimeEmptyString msg, @Cast("bool") boolean verbatim); // Getters for members - public native @ByVal @Cast("c10::Warning::warning_variant_t*") NonlinearityType type(); + public native @ByVal @Cast("c10::Warning::warning_variant_t*") Nonlinearity type(); public native @Const @ByRef SourceLocation source_location(); public native @StdString BytePointer msg(); public native @Cast("bool") boolean verbatim(); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/WarningHandler.java b/pytorch/src/gen/java/org/bytedeco/pytorch/WarningHandler.java index eb70cdd0862..bd844375b11 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/WarningHandler.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/WarningHandler.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/WarningHandlerGuard.java b/pytorch/src/gen/java/org/bytedeco/pytorch/WarningHandlerGuard.java index 396b671aced..960905a295b 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/WarningHandlerGuard.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/WarningHandlerGuard.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/WeakIValue.java b/pytorch/src/gen/java/org/bytedeco/pytorch/WeakIValue.java index c278abd5cdf..cfb283ffa36 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/WeakIValue.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/WeakIValue.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/WeakOrStrongCompilationUnit.java b/pytorch/src/gen/java/org/bytedeco/pytorch/WeakOrStrongCompilationUnit.java index c437198cbe1..065aa230f64 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/WeakOrStrongCompilationUnit.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/WeakOrStrongCompilationUnit.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/WeakOrStrongTypePtr.java b/pytorch/src/gen/java/org/bytedeco/pytorch/WeakOrStrongTypePtr.java index 4b24dae781e..4c582f7758f 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/WeakOrStrongTypePtr.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/WeakOrStrongTypePtr.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/WeakStorage.java b/pytorch/src/gen/java/org/bytedeco/pytorch/WeakStorage.java new file mode 100644 index 00000000000..70f785f79e7 --- /dev/null +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/WeakStorage.java @@ -0,0 +1,104 @@ +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE + +package org.bytedeco.pytorch; + +import org.bytedeco.pytorch.Allocator; +import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; +import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; +import java.nio.*; +import org.bytedeco.javacpp.*; +import org.bytedeco.javacpp.annotation.*; + +import static org.bytedeco.javacpp.presets.javacpp.*; +import static org.bytedeco.openblas.global.openblas_nolapack.*; +import static org.bytedeco.openblas.global.openblas.*; + +import static org.bytedeco.pytorch.global.torch.*; + + +// To allow intrusive_ptr inside std::map or std::set, we need operator< + +@Name("c10::weak_intrusive_ptr") @NoOffset @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) +public class WeakStorage extends Pointer { + static { Loader.load(); } + /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ + public WeakStorage(Pointer p) { super(p); } + + + public WeakStorage(@Const @ByRef StorageImplPtr ptr) { super((Pointer)null); allocate(ptr); } + private native void allocate(@Const @ByRef StorageImplPtr ptr); + + public WeakStorage(@ByRef(true) WeakStorage rhs) { super((Pointer)null); allocate(rhs); } + @NoException(true) private native void allocate(@ByRef(true) WeakStorage rhs); + + public native @ByRef @Name("operator =") @NoException(true) WeakStorage put(@ByRef(true) WeakStorage rhs); + + public native @ByRef @Name("operator =") @NoException(true) WeakStorage put( + @Const @ByRef StorageImplPtr rhs); + + public native @NoException(true) void reset(); + + public native @NoException(true) void swap(@ByRef WeakStorage rhs); + + // NB: This should ONLY be used by the std::hash implementation + // for weak_intrusive_ptr. Another way you could do this is + // friend std::hash, but this triggers two + // bugs: + // + // (1) It triggers an nvcc bug, where std::hash in a friend class + // declaration gets preprocessed into hash, which then cannot + // actually be found. The error in this case looks like: + // + // error: no template named 'hash'; did you mean 'std::hash'? + // + // (2) On OS X, std::hash is declared as a struct, not a class. + // This twings: + // + // error: class 'hash' was previously declared as a struct + // [-Werror,-Wmismatched-tags] + // + // Both of these are work-aroundable, but on the whole, I decided + // it would be simpler and easier to make work if we just expose + // an unsafe getter for target_ + // + public native @NoException(true) StorageImpl _unsafe_get_target(); + + public native @Cast("size_t") @NoException(true) long use_count(); + + public native @Cast("size_t") @NoException(true) long weak_use_count(); + + public native @Cast("bool") @NoException(true) boolean expired(); + + public native @ByVal @NoException(true) StorageImplPtr lock(); + + /** + * Returns an owning (but still only weakly referenced) pointer to the + * underlying object and makes the weak_intrusive_ptr instance invalid. + * That means the weakcount is not decreased. + * You *must* put the returned pointer back into a weak_intrusive_ptr using + * weak_intrusive_ptr::reclaim(ptr) to properly destruct it. + * This is helpful for C APIs. + */ + public native @NoException(true) StorageImpl release(); + + /** + * Takes an owning (but must be weakly referenced) pointer to TTarget* and + * creates a weak_intrusive_ptr that takes over ownership. + * This means that the weakcount is not increased. + * This is the counter-part to weak_intrusive_ptr::release() and the pointer + * passed in *must* have been created using weak_intrusive_ptr::release(). + */ + public static native @ByVal WeakStorage reclaim(StorageImpl owning_weak_ptr); + + /** + * Takes a pointer to TTarget* (may be weak or strong) and creates a + * new weak_intrusive_ptr representing a new weak reference, i.e. + * the raw pointer retains ownership. + */ + public static native @ByVal WeakStorage reclaim_copy(StorageImpl owning_ptr); + + + +} diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/WeakStorageVector.java b/pytorch/src/gen/java/org/bytedeco/pytorch/WeakStorageVector.java new file mode 100644 index 00000000000..ffc3734822a --- /dev/null +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/WeakStorageVector.java @@ -0,0 +1,47 @@ +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE + +package org.bytedeco.pytorch; + +import org.bytedeco.pytorch.Allocator; +import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; +import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; +import java.nio.*; +import org.bytedeco.javacpp.*; +import org.bytedeco.javacpp.annotation.*; + +import static org.bytedeco.javacpp.presets.javacpp.*; +import static org.bytedeco.openblas.global.openblas_nolapack.*; +import static org.bytedeco.openblas.global.openblas.*; + +import static org.bytedeco.pytorch.global.torch.*; + +@Name("std::vector") @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) +public class WeakStorageVector extends Pointer { + static { Loader.load(); } + /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ + public WeakStorageVector(Pointer p) { super(p); } + public WeakStorageVector() { allocate(); } + private native void allocate(); + + + public boolean empty() { return size() == 0; } + public native long size(); + + public WeakStorage front() { return get(0); } + public WeakStorage back() { return get(size() - 1); } + @Index(function = "at") public native @ByRef WeakStorage get(@Cast("size_t") long i); + + public native @ByVal Iterator begin(); + public native @ByVal Iterator end(); + @NoOffset @Name("iterator") public static class Iterator extends Pointer { + public Iterator(Pointer p) { super(p); } + public Iterator() { } + + public native @Name("operator ++") @ByRef Iterator increment(); + public native @Name("operator ==") boolean equals(@ByRef Iterator it); + public native @Name("operator *") @ByRef @Const WeakStorage get(); + } +} + diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/WeakStorageVectorOptional.java b/pytorch/src/gen/java/org/bytedeco/pytorch/WeakStorageVectorOptional.java new file mode 100644 index 00000000000..deb05153b50 --- /dev/null +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/WeakStorageVectorOptional.java @@ -0,0 +1,35 @@ +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE + +package org.bytedeco.pytorch; + +import org.bytedeco.pytorch.Allocator; +import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; +import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; +import java.nio.*; +import org.bytedeco.javacpp.*; +import org.bytedeco.javacpp.annotation.*; + +import static org.bytedeco.javacpp.presets.javacpp.*; +import static org.bytedeco.openblas.global.openblas_nolapack.*; +import static org.bytedeco.openblas.global.openblas.*; + +import static org.bytedeco.pytorch.global.torch.*; + +@NoOffset @Name("c10::optional >") @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) +public class WeakStorageVectorOptional extends Pointer { + static { Loader.load(); } + /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ + public WeakStorageVectorOptional(Pointer p) { super(p); } + public WeakStorageVectorOptional(WeakStorageVector value) { this(); put(value); } + public WeakStorageVectorOptional() { allocate(); } + private native void allocate(); + public native @Name("operator =") @ByRef WeakStorageVectorOptional put(@ByRef WeakStorageVectorOptional x); + + public native boolean has_value(); + public native void reset(); + public native @Name("value") @ByRef WeakStorageVector get(); + @ValueSetter public native WeakStorageVectorOptional put(@ByRef WeakStorageVector value); +} + diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/WeakTypePtr.java b/pytorch/src/gen/java/org/bytedeco/pytorch/WeakTypePtr.java index 6c5faf20454..7b1fc91c9a1 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/WeakTypePtr.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/WeakTypePtr.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/While.java b/pytorch/src/gen/java/org/bytedeco/pytorch/While.java index 2edf59d3e2f..1f4e94e8197 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/While.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/While.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; @@ -19,8 +21,15 @@ @Namespace("torch::jit") @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) public class While extends Stmt { static { Loader.load(); } + /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ + public While(Pointer p) { super(p); } - public While(@Cast("const torch::jit::TreeRef*") @ByRef Pointer tree) { super((Pointer)null); allocate(tree); } - private native void allocate(@Cast("const torch::jit::TreeRef*") @ByRef Pointer tree); + public While(@Const @ByRef TreeRef tree) { super((Pointer)null); allocate(tree); } + private native void allocate(@Const @ByRef TreeRef tree); public native @ByVal Expr cond(); + public native @ByVal StmtList body(); + public static native @ByVal While create( + @Const @ByRef SourceRange range, + @Const @ByRef Expr cond, + @Const @ByRef StmtList body); } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/With.java b/pytorch/src/gen/java/org/bytedeco/pytorch/With.java index 336ac74ee96..94519af98bc 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/With.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/With.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; @@ -21,7 +23,18 @@ @Namespace("torch::jit") @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) public class With extends Stmt { static { Loader.load(); } + /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ + public With(Pointer p) { super(p); } - public With(@Cast("const torch::jit::TreeRef*") @ByRef Pointer tree) { super((Pointer)null); allocate(tree); } - private native void allocate(@Cast("const torch::jit::TreeRef*") @ByRef Pointer tree); + public With(@Const @ByRef TreeRef tree) { super((Pointer)null); allocate(tree); } + private native void allocate(@Const @ByRef TreeRef tree); + + public native @ByVal WithItemList targets(); + + public native @ByVal StmtList body(); + + public static native @ByVal With create( + @Const @ByRef SourceRange range, + @Const @ByRef WithItemList targets, + @Const @ByRef StmtList body); } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/WithCurrentScope.java b/pytorch/src/gen/java/org/bytedeco/pytorch/WithCurrentScope.java deleted file mode 100644 index 0b80dc23bf9..00000000000 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/WithCurrentScope.java +++ /dev/null @@ -1,32 +0,0 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE - -package org.bytedeco.pytorch; - -import org.bytedeco.pytorch.Allocator; -import org.bytedeco.pytorch.Function; -import org.bytedeco.pytorch.Module; -import java.nio.*; -import org.bytedeco.javacpp.*; -import org.bytedeco.javacpp.annotation.*; - -import static org.bytedeco.javacpp.presets.javacpp.*; -import static org.bytedeco.openblas.global.openblas_nolapack.*; -import static org.bytedeco.openblas.global.openblas.*; - -import static org.bytedeco.pytorch.global.torch.*; - - -/** \brief An utility class for setting temporary scopes. - * - * When an object of this class is created, it stores the current scope, sets - * the new one, and restores the original scope when the object is destroyed. - */ -@Namespace("torch::jit") @NoOffset @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) -public class WithCurrentScope extends Pointer { - static { Loader.load(); } - /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ - public WithCurrentScope(Pointer p) { super(p); } - - public WithCurrentScope(@ByRef Graph g, @ByVal @Cast("torch::jit::ScopePtr*") Pointer scope) { super((Pointer)null); allocate(g, scope); } - private native void allocate(@ByRef Graph g, @ByVal @Cast("torch::jit::ScopePtr*") Pointer scope); -} diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/WithInsertPoint.java b/pytorch/src/gen/java/org/bytedeco/pytorch/WithInsertPoint.java deleted file mode 100644 index 8628f2a8fd5..00000000000 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/WithInsertPoint.java +++ /dev/null @@ -1,35 +0,0 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE - -package org.bytedeco.pytorch; - -import org.bytedeco.pytorch.Allocator; -import org.bytedeco.pytorch.Function; -import org.bytedeco.pytorch.Module; -import java.nio.*; -import org.bytedeco.javacpp.*; -import org.bytedeco.javacpp.annotation.*; - -import static org.bytedeco.javacpp.presets.javacpp.*; -import static org.bytedeco.openblas.global.openblas_nolapack.*; -import static org.bytedeco.openblas.global.openblas.*; - -import static org.bytedeco.pytorch.global.torch.*; - - -/** \brief An utility class for setting temporary insertion points. - * - * When an object of this class is created, it stores the current insertion - * point, sets the new one, and restores the original insertion point when the - * object is destroyed. - */ -@Namespace("torch::jit") @NoOffset @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) -public class WithInsertPoint extends Pointer { - static { Loader.load(); } - /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ - public WithInsertPoint(Pointer p) { super(p); } - - public WithInsertPoint(JitNode n) { super((Pointer)null); allocate(n); } - private native void allocate(JitNode n); - public WithInsertPoint(Block b) { super((Pointer)null); allocate(b); } - private native void allocate(Block b); -} diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/WithItem.java b/pytorch/src/gen/java/org/bytedeco/pytorch/WithItem.java index c3c38d68ea4..7453a970226 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/WithItem.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/WithItem.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; @@ -20,9 +22,11 @@ @Namespace("torch::jit") @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) public class WithItem extends Expr { static { Loader.load(); } + /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ + public WithItem(Pointer p) { super(p); } - public WithItem(@Cast("const torch::jit::TreeRef*") @ByRef Pointer tree) { super((Pointer)null); allocate(tree); } - private native void allocate(@Cast("const torch::jit::TreeRef*") @ByRef Pointer tree); + public WithItem(@Const @ByRef TreeRef tree) { super((Pointer)null); allocate(tree); } + private native void allocate(@Const @ByRef TreeRef tree); public native @ByVal Expr target(); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/WithItemList.java b/pytorch/src/gen/java/org/bytedeco/pytorch/WithItemList.java new file mode 100644 index 00000000000..827ea06a3c9 --- /dev/null +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/WithItemList.java @@ -0,0 +1,38 @@ +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE + +package org.bytedeco.pytorch; + +import org.bytedeco.pytorch.Allocator; +import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; +import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; +import java.nio.*; +import org.bytedeco.javacpp.*; +import org.bytedeco.javacpp.annotation.*; + +import static org.bytedeco.javacpp.presets.javacpp.*; +import static org.bytedeco.openblas.global.openblas_nolapack.*; +import static org.bytedeco.openblas.global.openblas.*; + +import static org.bytedeco.pytorch.global.torch.*; + + +@Name("torch::jit::List") @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) +public class WithItemList extends TreeView { + static { Loader.load(); } + /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ + public WithItemList(Pointer p) { super(p); } + + + public WithItemList(@Const @ByRef TreeRef tree) { super((Pointer)null); allocate(tree); } + private native void allocate(@Const @ByRef TreeRef tree); + public native @ByVal @Cast("torch::jit::List::iterator*") WithItemListIterator begin(); + public native @ByVal @Cast("torch::jit::List::iterator*") WithItemListIterator end(); + public native @Cast("bool") boolean empty(); + public native @ByVal @Name("operator []") WithItem get(@Cast("size_t") long i); + + public static native @ByVal WithItemList create(@Const @ByRef SourceRange range, @StdVector WithItem subtrees); + public static native @ByVal WithItemList unsafeCreate(@Const @ByRef SourceRange range, @Cast("torch::jit::TreeList*") @ByRef(true) SymDimVector subtrees); + public native @Cast("size_t") long size(); +} diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/WithItemListIterator.java b/pytorch/src/gen/java/org/bytedeco/pytorch/WithItemListIterator.java new file mode 100644 index 00000000000..4bfc4e990da --- /dev/null +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/WithItemListIterator.java @@ -0,0 +1,35 @@ +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE + +package org.bytedeco.pytorch; + +import org.bytedeco.pytorch.Allocator; +import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; +import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; +import java.nio.*; +import org.bytedeco.javacpp.*; +import org.bytedeco.javacpp.annotation.*; + +import static org.bytedeco.javacpp.presets.javacpp.*; +import static org.bytedeco.openblas.global.openblas_nolapack.*; +import static org.bytedeco.openblas.global.openblas.*; + +import static org.bytedeco.pytorch.global.torch.*; + + +@Name("torch::jit::ListIterator") @NoOffset @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) +public class WithItemListIterator extends Pointer { + static { Loader.load(); } + /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ + public WithItemListIterator(Pointer p) { super(p); } + + public WithItemListIterator(@ByVal @Cast("torch::jit::TreeList::const_iterator*") TreeRef it) { super((Pointer)null); allocate(it); } + private native void allocate(@ByVal @Cast("torch::jit::TreeList::const_iterator*") TreeRef it); + public native @Cast("bool") @Name("operator !=") boolean notEquals(@Const @ByRef WithItemListIterator rhs); + public native @Cast("bool") @Name("operator ==") boolean equals(@Const @ByRef WithItemListIterator rhs); + public native @ByVal @Name("operator *") WithItem multiply(); + public native @ByRef @Name("operator +=") WithItemListIterator addPut(@Cast("std::ptrdiff_t") long n); + public native @ByRef @Name("operator ++") WithItemListIterator increment(); + public native @ByRef @Name("operator --") WithItemListIterator decrement(); +} diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/WithNestedTracingFrame.java b/pytorch/src/gen/java/org/bytedeco/pytorch/WithNestedTracingFrame.java deleted file mode 100644 index 4c79cbc8675..00000000000 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/WithNestedTracingFrame.java +++ /dev/null @@ -1,36 +0,0 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE - -package org.bytedeco.pytorch; - -import org.bytedeco.pytorch.Allocator; -import org.bytedeco.pytorch.Function; -import org.bytedeco.pytorch.Module; -import java.nio.*; -import org.bytedeco.javacpp.*; -import org.bytedeco.javacpp.annotation.*; - -import static org.bytedeco.javacpp.presets.javacpp.*; -import static org.bytedeco.openblas.global.openblas_nolapack.*; -import static org.bytedeco.openblas.global.openblas.*; - -import static org.bytedeco.pytorch.global.torch.*; - - -@Namespace("torch::jit::tracer") @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) -public class WithNestedTracingFrame extends Pointer { - static { Loader.load(); } - /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ - public WithNestedTracingFrame(Pointer p) { super(p); } - /** Native array allocator. Access with {@link Pointer#position(long)}. */ - public WithNestedTracingFrame(long size) { super((Pointer)null); allocateArray(size); } - private native void allocateArray(long size); - @Override public WithNestedTracingFrame position(long position) { - return (WithNestedTracingFrame)super.position(position); - } - @Override public WithNestedTracingFrame getPointer(long i) { - return new WithNestedTracingFrame((Pointer)this).offsetAddress(i); - } - - public WithNestedTracingFrame() { super((Pointer)null); allocate(); } - private native void allocate(); -} diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/WorkerException.java b/pytorch/src/gen/java/org/bytedeco/pytorch/WorkerException.java deleted file mode 100644 index e6a4d62bbbc..00000000000 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/WorkerException.java +++ /dev/null @@ -1,37 +0,0 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE - -package org.bytedeco.pytorch; - -import org.bytedeco.pytorch.Allocator; -import org.bytedeco.pytorch.Function; -import org.bytedeco.pytorch.Module; -import java.nio.*; -import org.bytedeco.javacpp.*; -import org.bytedeco.javacpp.annotation.*; - -import static org.bytedeco.javacpp.presets.javacpp.*; -import static org.bytedeco.openblas.global.openblas_nolapack.*; -import static org.bytedeco.openblas.global.openblas.*; - -import static org.bytedeco.pytorch.global.torch.*; - - -/** An exception thrown when a DataLoader's worker thread throws an exception, - * which is caught. A {@code WorkerException} stores an {@code exception_ptr} to the - * original exception thrown in the worker thread. */ -@Namespace("torch::data") @NoOffset @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) -public class WorkerException extends Pointer { - static { Loader.load(); } - - /** Constructs a {@code WorkerException} from an {@code exception_ptr}. */ - public WorkerException(@ByVal @Cast("std::exception_ptr*") Pointer original) { super((Pointer)null); allocate(original); } - private native void allocate(@ByVal @Cast("std::exception_ptr*") Pointer original); - - public native @NoException(true) @Cast("const char*") BytePointer what(); - - /** The original exception thrown in the worker thread. */ - public native @ByRef @Cast("std::exception_ptr*") Pointer original_exception(); public native WorkerException original_exception(Pointer setter); - - /** This exception's message (not the original exception's message). */ - public native @StdString BytePointer message(); public native WorkerException message(BytePointer setter); -} diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/WriteableTensorData.java b/pytorch/src/gen/java/org/bytedeco/pytorch/WriteableTensorData.java index ef173862c46..ca9e839f08e 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/WriteableTensorData.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/WriteableTensorData.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/ZeroPad2d.java b/pytorch/src/gen/java/org/bytedeco/pytorch/ZeroPad2d.java deleted file mode 100644 index 28736443d2f..00000000000 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/ZeroPad2d.java +++ /dev/null @@ -1,34 +0,0 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE - -package org.bytedeco.pytorch; - -import org.bytedeco.pytorch.Allocator; -import org.bytedeco.pytorch.Function; -import org.bytedeco.pytorch.Module; -import java.nio.*; -import org.bytedeco.javacpp.*; -import org.bytedeco.javacpp.annotation.*; - -import static org.bytedeco.javacpp.presets.javacpp.*; -import static org.bytedeco.openblas.global.openblas_nolapack.*; -import static org.bytedeco.openblas.global.openblas.*; - -import static org.bytedeco.pytorch.global.torch.*; - - -/** A {@code ModuleHolder} subclass for {@code ZeroPad2dImpl}. - * See the documentation for {@code ZeroPad2dImpl} class to learn what methods it - * provides, and examples of how to use {@code ZeroPad2d} with - * {@code torch::nn::ZeroPad2dOptions}. See the documentation for {@code ModuleHolder} to - * learn about PyTorch's module storage semantics. */ -@Namespace("torch::nn") @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) -public class ZeroPad2d extends ZeroPad2dImplModuleHolder { - static { Loader.load(); } - - public ZeroPad2d(@ByVal @Cast("std::nullptr_t*") PointerPointer arg0) { super((Pointer)null); allocate(arg0); } - private native void allocate(@ByVal @Cast("std::nullptr_t*") PointerPointer arg0); public ZeroPad2d(@SharedPtr @Cast({"", "std::shared_ptr"}) ZeroPad2dImpl module) { super((Pointer)null); allocate(module); } - private native void allocate(@SharedPtr @Cast({"", "std::shared_ptr"}) ZeroPad2dImpl module); - /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ - public ZeroPad2d(Pointer p) { super(p); } - - } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/ZeroPad2dImpl.java b/pytorch/src/gen/java/org/bytedeco/pytorch/ZeroPad2dImpl.java index 5dfdba7c4c9..eecb2aecaa8 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/ZeroPad2dImpl.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/ZeroPad2dImpl.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; @@ -36,9 +38,9 @@ public class ZeroPad2dImpl extends ZeroPad2dImplCloneable { public ZeroPad2dImpl(Pointer p) { super(p); } public ZeroPad2dImpl(@ByVal @Cast("torch::ExpandingArray<4>*") LongPointer padding) { super((Pointer)null); allocate(padding); } - @NoDeallocator private native void allocate(@ByVal @Cast("torch::ExpandingArray<4>*") LongPointer padding); + @SharedPtr private native void allocate(@ByVal @Cast("torch::ExpandingArray<4>*") LongPointer padding); public ZeroPad2dImpl(@Const @ByRef ZeroPad2dOptions options_) { super((Pointer)null); allocate(options_); } - @NoDeallocator private native void allocate(@Const @ByRef ZeroPad2dOptions options_); + @SharedPtr private native void allocate(@Const @ByRef ZeroPad2dOptions options_); public native void reset(); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/ZeroPad2dImplCloneable.java b/pytorch/src/gen/java/org/bytedeco/pytorch/ZeroPad2dImplCloneable.java index 3cfb07777d3..ac0ea437f5a 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/ZeroPad2dImplCloneable.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/ZeroPad2dImplCloneable.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; @@ -20,18 +22,18 @@ public class ZeroPad2dImplCloneable extends Module { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public ZeroPad2dImplCloneable(Pointer p) { super(p); } + @Override public Module asModule() { return asModule(this); } + @Namespace public static native @SharedPtr @Name("SHARED_PTR_NAMESPACE::static_pointer_cast>") Module asModule(@SharedPtr ZeroPad2dImplCloneable pointer); /** {@code reset()} must perform initialization of all members with reference * semantics, most importantly parameters, buffers and submodules. */ public native void reset(); - @Override public Module asModule() { return asModule(this); } - @Namespace public static native @Name("static_cast") Module asModule(ZeroPad2dImplCloneable module); /** Performs a recursive "deep copy" of the {@code Module}, such that all parameters * and submodules in the cloned module are different from those in the * original module. */ - public native @SharedPtr Module clone( - @Const @ByRef(nullValue = "c10::optional(c10::nullopt)") DeviceOptional device); - public native @SharedPtr Module clone(); + public native @SharedPtr("torch::nn::Module") @ByVal Module clone( + @Const @ByRef(nullValue = "c10::optional(c10::nullopt)") DeviceOptional device); + public native @SharedPtr("torch::nn::Module") @ByVal Module clone(); } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/ZeroPad2dImplModuleHolder.java b/pytorch/src/gen/java/org/bytedeco/pytorch/ZeroPad2dImplModuleHolder.java deleted file mode 100644 index 86241826a53..00000000000 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/ZeroPad2dImplModuleHolder.java +++ /dev/null @@ -1,79 +0,0 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE - -package org.bytedeco.pytorch; - -import org.bytedeco.pytorch.Allocator; -import org.bytedeco.pytorch.Function; -import org.bytedeco.pytorch.Module; -import java.nio.*; -import org.bytedeco.javacpp.*; -import org.bytedeco.javacpp.annotation.*; - -import static org.bytedeco.javacpp.presets.javacpp.*; -import static org.bytedeco.openblas.global.openblas_nolapack.*; -import static org.bytedeco.openblas.global.openblas.*; - -import static org.bytedeco.pytorch.global.torch.*; - -@Name("torch::nn::ModuleHolder") @NoOffset @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) -public class ZeroPad2dImplModuleHolder extends Pointer { - static { Loader.load(); } - /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ - public ZeroPad2dImplModuleHolder(Pointer p) { super(p); } - - - /// - - /** Default constructs the contained module if if has a default constructor, - * else produces a static error. - * - * NOTE: This uses the behavior of template - * classes in C++ that constructors (or any methods) are only compiled when - * actually used. */ - - - /** Constructs the {@code ModuleHolder} with an empty contained value. Access to - * the underlying module is not permitted and will throw an exception, until - * a value is assigned. */ - /* implicit */ public ZeroPad2dImplModuleHolder(@ByVal @Cast("std::nullptr_t*") PointerPointer arg0) { super((Pointer)null); allocate(arg0); } -private native void allocate(@ByVal @Cast("std::nullptr_t*") PointerPointer arg0); - - /** Constructs the {@code ModuleHolder} with a contained module, forwarding all - * arguments to its constructor. */ - - /** Constructs the {@code ModuleHolder} from a pointer to the contained type. - * Example: {@code Linear(std::make_shared(...))}. */ - /* implicit */ public ZeroPad2dImplModuleHolder(@SharedPtr @Cast({"", "std::shared_ptr"}) ZeroPad2dImpl module) { super((Pointer)null); allocate(module); } -private native void allocate(@SharedPtr @Cast({"", "std::shared_ptr"}) ZeroPad2dImpl module); - - /** Returns true if the {@code ModuleHolder} contains a module, or false if it is - * {@code nullptr}. */ - public native @Cast("bool") @Name("operator bool") @NoException(true) boolean asBoolean(); - - /** Forwards to the contained module. */ - public native @Name("operator ->") ZeroPad2dImpl access(); - - /** Forwards to the contained module. */ - - /** Returns a reference to the contained module. */ - public native @ByRef @Name("operator *") ZeroPad2dImpl multiply(); - - /** Returns a const reference to the contained module. */ - - /** Returns a shared pointer to the underlying module. */ - public native @SharedPtr @Cast({"", "std::shared_ptr"}) ZeroPad2dImpl ptr(); - - /** Returns a pointer to the underlying module. */ - public native ZeroPad2dImpl get(); - - /** Returns a const pointer to the underlying module. */ - - /** Calls the {@code forward()} method of the contained module. */ - - /** Forwards to the subscript operator of the contained module. - * NOTE: std::forward is qualified to prevent VS2017 emitting - * error C2872: 'std': ambiguous symbol */ - - /** Returns true if the {@code ModuleHolder} does not contain a module. */ - public native @Cast("bool") @NoException(true) boolean is_empty(); -} diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/ZeroPad2dOptions.java b/pytorch/src/gen/java/org/bytedeco/pytorch/ZeroPad2dOptions.java index 6e54f348eb1..b685de576d7 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/ZeroPad2dOptions.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/ZeroPad2dOptions.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/_CopyBytesFunctionRegisterer.java b/pytorch/src/gen/java/org/bytedeco/pytorch/_CopyBytesFunctionRegisterer.java deleted file mode 100644 index 466f1a793c1..00000000000 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/_CopyBytesFunctionRegisterer.java +++ /dev/null @@ -1,61 +0,0 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE - -package org.bytedeco.pytorch; - -import org.bytedeco.pytorch.Allocator; -import org.bytedeco.pytorch.Function; -import org.bytedeco.pytorch.Module; -import java.nio.*; -import org.bytedeco.javacpp.*; -import org.bytedeco.javacpp.annotation.*; - -import static org.bytedeco.javacpp.presets.javacpp.*; -import static org.bytedeco.openblas.global.openblas_nolapack.*; -import static org.bytedeco.openblas.global.openblas.*; - -import static org.bytedeco.pytorch.global.torch.*; - - -@Namespace("c10") @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) -public class _CopyBytesFunctionRegisterer extends Pointer { - static { Loader.load(); } - /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ - public _CopyBytesFunctionRegisterer(Pointer p) { super(p); } - - public _CopyBytesFunctionRegisterer( - DeviceType from, - DeviceType to, - CopyBytesFunction func_sync, - CopyBytesFunction func_async/*=nullptr*/) { super((Pointer)null); allocate(from, to, func_sync, func_async); } - private native void allocate( - DeviceType from, - DeviceType to, - CopyBytesFunction func_sync, - CopyBytesFunction func_async/*=nullptr*/); - public _CopyBytesFunctionRegisterer( - DeviceType from, - DeviceType to, - CopyBytesFunction func_sync) { super((Pointer)null); allocate(from, to, func_sync); } - private native void allocate( - DeviceType from, - DeviceType to, - CopyBytesFunction func_sync); - public _CopyBytesFunctionRegisterer( - @Cast("c10::DeviceType") byte from, - @Cast("c10::DeviceType") byte to, - CopyBytesFunction func_sync, - CopyBytesFunction func_async/*=nullptr*/) { super((Pointer)null); allocate(from, to, func_sync, func_async); } - private native void allocate( - @Cast("c10::DeviceType") byte from, - @Cast("c10::DeviceType") byte to, - CopyBytesFunction func_sync, - CopyBytesFunction func_async/*=nullptr*/); - public _CopyBytesFunctionRegisterer( - @Cast("c10::DeviceType") byte from, - @Cast("c10::DeviceType") byte to, - CopyBytesFunction func_sync) { super((Pointer)null); allocate(from, to, func_sync); } - private native void allocate( - @Cast("c10::DeviceType") byte from, - @Cast("c10::DeviceType") byte to, - CopyBytesFunction func_sync); -} diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/_compute_enum_name.java b/pytorch/src/gen/java/org/bytedeco/pytorch/_compute_enum_name.java deleted file mode 100644 index dbc58022731..00000000000 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/_compute_enum_name.java +++ /dev/null @@ -1,77 +0,0 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE - -package org.bytedeco.pytorch; - -import org.bytedeco.pytorch.Allocator; -import org.bytedeco.pytorch.Function; -import org.bytedeco.pytorch.Module; -import java.nio.*; -import org.bytedeco.javacpp.*; -import org.bytedeco.javacpp.annotation.*; - -import static org.bytedeco.javacpp.presets.javacpp.*; -import static org.bytedeco.openblas.global.openblas_nolapack.*; -import static org.bytedeco.openblas.global.openblas.*; - -import static org.bytedeco.pytorch.global.torch.*; - - -@Namespace("torch::enumtype") @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) -public class _compute_enum_name extends Pointer { - static { Loader.load(); } - /** Default native constructor. */ - public _compute_enum_name() { super((Pointer)null); allocate(); } - /** Native array allocator. Access with {@link Pointer#position(long)}. */ - public _compute_enum_name(long size) { super((Pointer)null); allocateArray(size); } - /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ - public _compute_enum_name(Pointer p) { super(p); } - private native void allocate(); - private native void allocateArray(long size); - @Override public _compute_enum_name position(long position) { - return (_compute_enum_name)super.position(position); - } - @Override public _compute_enum_name getPointer(long i) { - return new _compute_enum_name((Pointer)this).offsetAddress(i); - } - - public native @StdString @Name("operator ()") BytePointer apply(@Const @ByRef kLinear v); - public native @StdString @Name("operator ()") BytePointer apply(@Const @ByRef kConv1D v); - public native @StdString @Name("operator ()") BytePointer apply(@Const @ByRef kConv2D v); - public native @StdString @Name("operator ()") BytePointer apply(@Const @ByRef kConv3D v); - public native @StdString @Name("operator ()") BytePointer apply(@Const @ByRef kConvTranspose1D v); - public native @StdString @Name("operator ()") BytePointer apply(@Const @ByRef kConvTranspose2D v); - public native @StdString @Name("operator ()") BytePointer apply(@Const @ByRef kConvTranspose3D v); - public native @StdString @Name("operator ()") BytePointer apply(@Const @ByRef kSigmoid v); - public native @StdString @Name("operator ()") BytePointer apply(@Const @ByRef kTanh v); - public native @StdString @Name("operator ()") BytePointer apply(@Const @ByRef kReLU v); - public native @StdString @Name("operator ()") BytePointer apply(@Const @ByRef kGELU v); - public native @StdString @Name("operator ()") BytePointer apply(@Const @ByRef kSiLU v); - public native @StdString @Name("operator ()") BytePointer apply(@Const @ByRef kMish v); - public native @StdString @Name("operator ()") BytePointer apply(@Const @ByRef kLeakyReLU v); - public native @StdString @Name("operator ()") BytePointer apply(@Const @ByRef kFanIn v); - public native @StdString @Name("operator ()") BytePointer apply(@Const @ByRef kFanOut v); - public native @StdString @Name("operator ()") BytePointer apply(@Const @ByRef kConstant v); - public native @StdString @Name("operator ()") BytePointer apply(@Const @ByRef kReflect v); - public native @StdString @Name("operator ()") BytePointer apply(@Const @ByRef kReplicate v); - public native @StdString @Name("operator ()") BytePointer apply(@Const @ByRef kCircular v); - public native @StdString @Name("operator ()") BytePointer apply(@Const @ByRef kNearest v); - public native @StdString @Name("operator ()") BytePointer apply(@Const @ByRef kBilinear v); - public native @StdString @Name("operator ()") BytePointer apply(@Const @ByRef kBicubic v); - public native @StdString @Name("operator ()") BytePointer apply(@Const @ByRef kTrilinear v); - public native @StdString @Name("operator ()") BytePointer apply(@Const @ByRef kArea v); - public native @StdString @Name("operator ()") BytePointer apply(@Const @ByRef kNearestExact v); - public native @StdString @Name("operator ()") BytePointer apply(@Const @ByRef kSum v); - public native @StdString @Name("operator ()") BytePointer apply(@Const @ByRef kMean v); - public native @StdString @Name("operator ()") BytePointer apply(@Const @ByRef kMax v); - public native @StdString @Name("operator ()") BytePointer apply(@Const @ByRef kNone v); - public native @StdString @Name("operator ()") BytePointer apply(@Const @ByRef kBatchMean v); - public native @StdString @Name("operator ()") BytePointer apply(@Const @ByRef kZeros v); - public native @StdString @Name("operator ()") BytePointer apply(@Const @ByRef kBorder v); - public native @StdString @Name("operator ()") BytePointer apply(@Const @ByRef kReflection v); - public native @StdString @Name("operator ()") BytePointer apply(@Const @ByRef kRNN_TANH v); - public native @StdString @Name("operator ()") BytePointer apply(@Const @ByRef kRNN_RELU v); - public native @StdString @Name("operator ()") BytePointer apply(@Const @ByRef kLSTM v); - public native @StdString @Name("operator ()") BytePointer apply(@Const @ByRef kGRU v); - public native @StdString @Name("operator ()") BytePointer apply(@Const @ByRef kValid v); - public native @StdString @Name("operator ()") BytePointer apply(@Const @ByRef kSame v); -} diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/_str_wrapper.java b/pytorch/src/gen/java/org/bytedeco/pytorch/_str_wrapper.java deleted file mode 100644 index b847af15cc8..00000000000 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/_str_wrapper.java +++ /dev/null @@ -1,41 +0,0 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE - -package org.bytedeco.pytorch; - -import org.bytedeco.pytorch.Allocator; -import org.bytedeco.pytorch.Function; -import org.bytedeco.pytorch.Module; -import java.nio.*; -import org.bytedeco.javacpp.*; -import org.bytedeco.javacpp.annotation.*; - -import static org.bytedeco.javacpp.presets.javacpp.*; -import static org.bytedeco.openblas.global.openblas_nolapack.*; -import static org.bytedeco.openblas.global.openblas.*; - -import static org.bytedeco.pytorch.global.torch.*; - - -// Specializations for already-a-string types. -@Name("c10::detail::_str_wrapper") @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) -public class _str_wrapper extends Pointer { - static { Loader.load(); } - /** Default native constructor. */ - public _str_wrapper() { super((Pointer)null); allocate(); } - /** Native array allocator. Access with {@link Pointer#position(long)}. */ - public _str_wrapper(long size) { super((Pointer)null); allocateArray(size); } - /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ - public _str_wrapper(Pointer p) { super(p); } - private native void allocate(); - private native void allocateArray(long size); - @Override public _str_wrapper position(long position) { - return (_str_wrapper)super.position(position); - } - @Override public _str_wrapper getPointer(long i) { - return new _str_wrapper((Pointer)this).offsetAddress(i); - } - - // return by reference to avoid the binary size of a string copy - public static native @StdString BytePointer call(@StdString BytePointer str); - public static native @StdString String call(@StdString String str); -} diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/all_of.java b/pytorch/src/gen/java/org/bytedeco/pytorch/all_of.java deleted file mode 100644 index 0730a5953bc..00000000000 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/all_of.java +++ /dev/null @@ -1,25 +0,0 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE - -package org.bytedeco.pytorch; - -import org.bytedeco.pytorch.Allocator; -import org.bytedeco.pytorch.Function; -import org.bytedeco.pytorch.Module; -import java.nio.*; -import org.bytedeco.javacpp.*; -import org.bytedeco.javacpp.annotation.*; - -import static org.bytedeco.javacpp.presets.javacpp.*; -import static org.bytedeco.openblas.global.openblas_nolapack.*; -import static org.bytedeco.openblas.global.openblas.*; - -import static org.bytedeco.pytorch.global.torch.*; - // namespace detail - -@Namespace("torch") @Opaque @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) -public class all_of extends Pointer { - /** Empty constructor. Calls {@code super((Pointer)null)}. */ - public all_of() { super((Pointer)null); } - /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ - public all_of(Pointer p) { super(p); } -} diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/attribute_iterator.java b/pytorch/src/gen/java/org/bytedeco/pytorch/attribute_iterator.java index 0cf14f50208..cf4c7ad5820 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/attribute_iterator.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/attribute_iterator.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; @@ -45,4 +47,9 @@ private native void allocate( public native @ByVal @Name("operator ->") IValue access(); public native @ByRef @Name("operator ++") attribute_iterator increment(); public native @ByVal @Name("operator ++") attribute_iterator increment(int arg0); + + private static native @Namespace @Cast("bool") @Name("operator !=") boolean notEquals( + @Const @ByRef attribute_iterator a, + @Const @ByRef attribute_iterator b); + public boolean notEquals(attribute_iterator b) { return notEquals(this, b); } } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/attribute_list.java b/pytorch/src/gen/java/org/bytedeco/pytorch/attribute_list.java index a3b4877a427..53ddd45911f 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/attribute_list.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/attribute_list.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/bitset.java b/pytorch/src/gen/java/org/bytedeco/pytorch/bitset.java new file mode 100644 index 00000000000..0d64fdf3b83 --- /dev/null +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/bitset.java @@ -0,0 +1,70 @@ +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE + +package org.bytedeco.pytorch; + +import org.bytedeco.pytorch.Allocator; +import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; +import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; +import java.nio.*; +import org.bytedeco.javacpp.*; +import org.bytedeco.javacpp.annotation.*; + +import static org.bytedeco.javacpp.presets.javacpp.*; +import static org.bytedeco.openblas.global.openblas_nolapack.*; +import static org.bytedeco.openblas.global.openblas.*; + +import static org.bytedeco.pytorch.global.torch.*; + + +/** + * This is a simple bitset class with sizeof(long long int) bits. + * You can set bits, unset bits, query bits by index, + * and query for the first set bit. + * Before using this class, please also take a look at std::bitset, + * which has more functionality and is more generic. It is probably + * a better fit for your use case. The sole reason for c10::utils::bitset + * to exist is that std::bitset misses a find_first_set() method. + */ +@Namespace("c10::utils") @NoOffset @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) +public class bitset extends Pointer { + static { Loader.load(); } + /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ + public bitset(Pointer p) { super(p); } + /** Native array allocator. Access with {@link Pointer#position(long)}. */ + public bitset(long size) { super((Pointer)null); allocateArray(size); } + private native void allocateArray(long size); + @Override public bitset position(long position) { + return (bitset)super.position(position); + } + @Override public bitset getPointer(long i) { + return new bitset((Pointer)this).offsetAddress(i); + } + + public static native @Cast("const size_t") long NUM_BITS(); + + public bitset() { super((Pointer)null); allocate(); } + @NoException(true) private native void allocate(); + public bitset(@Const @ByRef bitset arg0) { super((Pointer)null); allocate(arg0); } + @NoException(true) private native void allocate(@Const @ByRef bitset arg0); + // there is an issure for gcc 5.3.0 when define default function as constexpr + // see https://gcc.gnu.org/bugzilla/show_bug.cgi?id=68754. + public native @ByRef @Name("operator =") @NoException(true) bitset put(@Const @ByRef bitset arg0); + + public native @NoException(true) void set(@Cast("size_t") long index); + + public native @NoException(true) void unset(@Cast("size_t") long index); + + public native @Cast("const bool") @NoException(true) boolean get(@Cast("size_t") long index); + + public native @Cast("const bool") @NoException(true) boolean is_entirely_unset(); + + // Call the given functor with the index of each bit that is set + // Return the index of the first set bit. The returned index is one-indexed + // (i.e. if the very first bit is set, this function returns '1'), and a + // return of '0' means that there was no bit set. + + private static native @Namespace @Cast("bool") @Name("operator ==") @NoException(true) boolean equals(@ByVal bitset lhs, @ByVal bitset rhs); + public boolean equals(bitset rhs) { return equals(this, rhs); } +} diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/buffer_iterator.java b/pytorch/src/gen/java/org/bytedeco/pytorch/buffer_iterator.java index 5780ca53124..a0b5610ea52 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/buffer_iterator.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/buffer_iterator.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; @@ -45,4 +47,9 @@ private native void allocate( public native @ByVal @Name("operator ->") Tensor access(); public native @ByRef @Name("operator ++") buffer_iterator increment(); public native @ByVal @Name("operator ++") buffer_iterator increment(int arg0); + + private static native @Namespace @Cast("bool") @Name("operator !=") boolean notEquals( + @Const @ByRef buffer_iterator a, + @Const @ByRef buffer_iterator b); + public boolean notEquals(buffer_iterator b) { return notEquals(this, b); } } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/buffer_list.java b/pytorch/src/gen/java/org/bytedeco/pytorch/buffer_list.java index 1d653ea47bb..6e3e6674133 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/buffer_list.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/buffer_list.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/PythonPrintImpl.java b/pytorch/src/gen/java/org/bytedeco/pytorch/class_.java similarity index 61% rename from pytorch/src/gen/java/org/bytedeco/pytorch/PythonPrintImpl.java rename to pytorch/src/gen/java/org/bytedeco/pytorch/class_.java index 5c5187fa489..c70b60d5c01 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/PythonPrintImpl.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/class_.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; @@ -15,10 +17,10 @@ import static org.bytedeco.pytorch.global.torch.*; -@Namespace("torch::jit") @Opaque @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) -public class PythonPrintImpl extends Pointer { +@Namespace("pybind11") @Opaque @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) +public class class_ extends Pointer { /** Empty constructor. Calls {@code super((Pointer)null)}. */ - public PythonPrintImpl() { super((Pointer)null); } + public class_() { super((Pointer)null); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ - public PythonPrintImpl(Pointer p) { super(p); } + public class_(Pointer p) { super(p); } } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/ObserverContext.java b/pytorch/src/gen/java/org/bytedeco/pytorch/crc64_t.java similarity index 50% rename from pytorch/src/gen/java/org/bytedeco/pytorch/ObserverContext.java rename to pytorch/src/gen/java/org/bytedeco/pytorch/crc64_t.java index bd35874be4b..638ac5ca856 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/ObserverContext.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/crc64_t.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; @@ -14,14 +16,15 @@ import static org.bytedeco.openblas.global.openblas.*; import static org.bytedeco.pytorch.global.torch.*; + // namespace detail - -// An abstract base class for various observer contexts that can be attached to -// the RecordFunction. -@Namespace("at") @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) -public class ObserverContext extends Pointer { +@Namespace("c10::util") @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) +public class crc64_t extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ - public ObserverContext(Pointer p) { super(p); } + public crc64_t(Pointer p) { super(p); } + public crc64_t(@Cast("uint64_t") long checksum) { super((Pointer)null); allocate(checksum); } + private native void allocate(@Cast("uint64_t") long checksum); + public native @Cast("const uint64_t") long checksum(); } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/cuda/ActivationDescriptor.java b/pytorch/src/gen/java/org/bytedeco/pytorch/cuda/ActivationDescriptor.java new file mode 100644 index 00000000000..c9e732a0271 --- /dev/null +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/cuda/ActivationDescriptor.java @@ -0,0 +1,43 @@ +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE + +package org.bytedeco.pytorch.cuda; + +import org.bytedeco.pytorch.*; +import org.bytedeco.pytorch.Error; +import org.bytedeco.pytorch.global.torch.DeviceType; +import org.bytedeco.pytorch.global.torch.ScalarType; +import org.bytedeco.pytorch.global.torch.MemoryFormat; +import org.bytedeco.pytorch.Allocator; +import java.nio.*; +import org.bytedeco.javacpp.*; +import org.bytedeco.javacpp.annotation.*; + +import static org.bytedeco.javacpp.presets.javacpp.*; +import static org.bytedeco.openblas.global.openblas_nolapack.*; +import static org.bytedeco.openblas.global.openblas.*; +import org.bytedeco.pytorch.*; +import static org.bytedeco.pytorch.global.torch.*; + +import static org.bytedeco.pytorch.global.torch_cuda.*; + + +@Namespace("at::native") @Properties(inherit = org.bytedeco.pytorch.presets.torch_cuda.class) +public class ActivationDescriptor extends Pointer { + static { Loader.load(); } + /** Default native constructor. */ + public ActivationDescriptor() { super((Pointer)null); allocate(); } + /** Native array allocator. Access with {@link Pointer#position(long)}. */ + public ActivationDescriptor(long size) { super((Pointer)null); allocateArray(size); } + /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ + public ActivationDescriptor(Pointer p) { super(p); } + private native void allocate(); + private native void allocateArray(long size); + @Override public ActivationDescriptor position(long position) { + return (ActivationDescriptor)super.position(position); + } + @Override public ActivationDescriptor getPointer(long i) { + return new ActivationDescriptor((Pointer)this).offsetAddress(i); + } + + public native void set(@Cast("cudnnActivationMode_t") int mode); +} diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/cuda/CTCLossDescriptor.java b/pytorch/src/gen/java/org/bytedeco/pytorch/cuda/CTCLossDescriptor.java new file mode 100644 index 00000000000..9f0548222c3 --- /dev/null +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/cuda/CTCLossDescriptor.java @@ -0,0 +1,49 @@ +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE + +package org.bytedeco.pytorch.cuda; + +import org.bytedeco.pytorch.*; +import org.bytedeco.pytorch.Error; +import org.bytedeco.pytorch.global.torch.DeviceType; +import org.bytedeco.pytorch.global.torch.ScalarType; +import org.bytedeco.pytorch.global.torch.MemoryFormat; +import org.bytedeco.pytorch.Allocator; +import java.nio.*; +import org.bytedeco.javacpp.*; +import org.bytedeco.javacpp.annotation.*; + +import static org.bytedeco.javacpp.presets.javacpp.*; +import static org.bytedeco.openblas.global.openblas_nolapack.*; +import static org.bytedeco.openblas.global.openblas.*; +import org.bytedeco.pytorch.*; +import static org.bytedeco.pytorch.global.torch.*; + +import static org.bytedeco.pytorch.global.torch_cuda.*; + + +@Namespace("at::native") @Properties(inherit = org.bytedeco.pytorch.presets.torch_cuda.class) +public class CTCLossDescriptor extends Pointer { + static { Loader.load(); } + /** Default native constructor. */ + public CTCLossDescriptor() { super((Pointer)null); allocate(); } + /** Native array allocator. Access with {@link Pointer#position(long)}. */ + public CTCLossDescriptor(long size) { super((Pointer)null); allocateArray(size); } + /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ + public CTCLossDescriptor(Pointer p) { super(p); } + private native void allocate(); + private native void allocateArray(long size); + @Override public CTCLossDescriptor position(long position) { + return (CTCLossDescriptor)super.position(position); + } + @Override public CTCLossDescriptor getPointer(long i) { + return new CTCLossDescriptor((Pointer)this).offsetAddress(i); + } + + public native void set(@Cast("cudnnDataType_t") int datatype); +// #if CUDNN_VERSION >= 7600 + public native void setEx( + @Cast("cudnnDataType_t") int datatype, + @Cast("cudnnLossNormalizationMode_t") int normMode, + @Cast("cudnnNanPropagation_t") int gradMode); +// #endif +} diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/cuda/CUDAGuard.java b/pytorch/src/gen/java/org/bytedeco/pytorch/cuda/CUDAGuard.java new file mode 100644 index 00000000000..0366d8b4f34 --- /dev/null +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/cuda/CUDAGuard.java @@ -0,0 +1,76 @@ +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE + +package org.bytedeco.pytorch.cuda; + +import org.bytedeco.pytorch.*; +import org.bytedeco.pytorch.Error; +import org.bytedeco.pytorch.global.torch.DeviceType; +import org.bytedeco.pytorch.global.torch.ScalarType; +import org.bytedeco.pytorch.global.torch.MemoryFormat; +import org.bytedeco.pytorch.Allocator; +import java.nio.*; +import org.bytedeco.javacpp.*; +import org.bytedeco.javacpp.annotation.*; + +import static org.bytedeco.javacpp.presets.javacpp.*; +import static org.bytedeco.openblas.global.openblas_nolapack.*; +import static org.bytedeco.openblas.global.openblas.*; +import org.bytedeco.pytorch.*; +import static org.bytedeco.pytorch.global.torch.*; + +import static org.bytedeco.pytorch.global.torch_cuda.*; + + +// This code is kind of boilerplatey. See Note [Whither the DeviceGuard +// boilerplate] + +/** A variant of DeviceGuard that is specialized for CUDA. It accepts + * integer indices (interpreting them as CUDA devices) and is a little + * more efficient than DeviceGuard (it compiles to straight line + * cudaSetDevice/cudaGetDevice calls); however, it can only be used + * from code that links against CUDA directly. */ +@Namespace("c10::cuda") @NoOffset @Properties(inherit = org.bytedeco.pytorch.presets.torch_cuda.class) +public class CUDAGuard extends Pointer { + static { Loader.load(); } + /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ + public CUDAGuard(Pointer p) { super(p); } + + /** No default constructor; see Note [Omitted default constructor from RAII] */ + + + /** Set the current CUDA device to the passed device index. */ + public CUDAGuard(byte device_index) { super((Pointer)null); allocate(device_index); } + private native void allocate(byte device_index); + + /** Sets the current CUDA device to the passed device. Errors if the passed + * device is not a CUDA device. */ + public CUDAGuard(@ByVal Device device) { super((Pointer)null); allocate(device); } + private native void allocate(@ByVal Device device); + + // Copy is not allowed + + + + // Move is not allowed (there is no uninitialized state) + + + + /** Sets the CUDA device to the given device. Errors if the given device + * is not a CUDA device. */ + public native void set_device(@ByVal Device device); + + /** Sets the CUDA device to the given device. Errors if the given device + * is not a CUDA device. (This method is provided for uniformity with + * DeviceGuard). */ + public native void reset_device(@ByVal Device device); + + /** Sets the CUDA device to the given device index. */ + public native void set_index(byte device_index); + + /** Returns the device that was set upon construction of the guard */ + public native @ByVal Device original_device(); + + /** Returns the last device that was set via {@code set_device}, if any, otherwise + * the device passed during construction. */ + public native @ByVal Device current_device(); +} diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/cuda/CUDAKernelLaunchInfo.java b/pytorch/src/gen/java/org/bytedeco/pytorch/cuda/CUDAKernelLaunchInfo.java new file mode 100644 index 00000000000..04011970743 --- /dev/null +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/cuda/CUDAKernelLaunchInfo.java @@ -0,0 +1,62 @@ +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE + +package org.bytedeco.pytorch.cuda; + +import org.bytedeco.pytorch.*; +import org.bytedeco.pytorch.Error; +import org.bytedeco.pytorch.global.torch.DeviceType; +import org.bytedeco.pytorch.global.torch.ScalarType; +import org.bytedeco.pytorch.global.torch.MemoryFormat; +import org.bytedeco.pytorch.Allocator; +import java.nio.*; +import org.bytedeco.javacpp.*; +import org.bytedeco.javacpp.annotation.*; + +import static org.bytedeco.javacpp.presets.javacpp.*; +import static org.bytedeco.openblas.global.openblas_nolapack.*; +import static org.bytedeco.openblas.global.openblas.*; +import org.bytedeco.pytorch.*; +import static org.bytedeco.pytorch.global.torch.*; + +import static org.bytedeco.pytorch.global.torch_cuda.*; + + +/** Use to hold info about kernel launches so that we can run kernels + * asynchronously and still associate launches with device-side + * assertion failures */ +@Namespace("c10::cuda") @Properties(inherit = org.bytedeco.pytorch.presets.torch_cuda.class) +public class CUDAKernelLaunchInfo extends Pointer { + static { Loader.load(); } + /** Default native constructor. */ + public CUDAKernelLaunchInfo() { super((Pointer)null); allocate(); } + /** Native array allocator. Access with {@link Pointer#position(long)}. */ + public CUDAKernelLaunchInfo(long size) { super((Pointer)null); allocateArray(size); } + /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ + public CUDAKernelLaunchInfo(Pointer p) { super(p); } + private native void allocate(); + private native void allocateArray(long size); + @Override public CUDAKernelLaunchInfo position(long position) { + return (CUDAKernelLaunchInfo)super.position(position); + } + @Override public CUDAKernelLaunchInfo getPointer(long i) { + return new CUDAKernelLaunchInfo((Pointer)this).offsetAddress(i); + } + + /** Filename of the code where the kernel was launched from */ + public native @Cast("const char*") BytePointer launch_filename(); public native CUDAKernelLaunchInfo launch_filename(BytePointer setter); + /** Function from which the kernel was launched */ + public native @Cast("const char*") BytePointer launch_function(); public native CUDAKernelLaunchInfo launch_function(BytePointer setter); + /** Line number of where the code was launched from */ + public native @Cast("uint32_t") int launch_linenum(); public native CUDAKernelLaunchInfo launch_linenum(int setter); + /** Backtrace of where the kernel was launched from, only populated if + * CUDAKernelLaunchRegistry::gather_launch_stacktrace is True */ + public native @StdString BytePointer launch_stacktrace(); public native CUDAKernelLaunchInfo launch_stacktrace(BytePointer setter); + /** Kernel that was launched */ + public native @Cast("const char*") BytePointer kernel_name(); public native CUDAKernelLaunchInfo kernel_name(BytePointer setter); + /** Device the kernel was launched on */ + public native int device(); public native CUDAKernelLaunchInfo device(int setter); + /** Stream the kernel was launched on */ + public native int stream(); public native CUDAKernelLaunchInfo stream(int setter); + /** A number that uniquely identifies the kernel launch */ + public native @Cast("uint64_t") long generation_number(); public native CUDAKernelLaunchInfo generation_number(long setter); +} diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/cuda/CUDAKernelLaunchInfoVector.java b/pytorch/src/gen/java/org/bytedeco/pytorch/cuda/CUDAKernelLaunchInfoVector.java new file mode 100644 index 00000000000..51971d0db4e --- /dev/null +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/cuda/CUDAKernelLaunchInfoVector.java @@ -0,0 +1,93 @@ +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE + +package org.bytedeco.pytorch.cuda; + +import org.bytedeco.pytorch.*; +import org.bytedeco.pytorch.Error; +import org.bytedeco.pytorch.global.torch.DeviceType; +import org.bytedeco.pytorch.global.torch.ScalarType; +import org.bytedeco.pytorch.global.torch.MemoryFormat; +import org.bytedeco.pytorch.Allocator; +import java.nio.*; +import org.bytedeco.javacpp.*; +import org.bytedeco.javacpp.annotation.*; + +import static org.bytedeco.javacpp.presets.javacpp.*; +import static org.bytedeco.openblas.global.openblas_nolapack.*; +import static org.bytedeco.openblas.global.openblas.*; +import org.bytedeco.pytorch.*; +import static org.bytedeco.pytorch.global.torch.*; + +import static org.bytedeco.pytorch.global.torch_cuda.*; + +@Name("std::vector") @Properties(inherit = org.bytedeco.pytorch.presets.torch_cuda.class) +public class CUDAKernelLaunchInfoVector extends Pointer { + static { Loader.load(); } + /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ + public CUDAKernelLaunchInfoVector(Pointer p) { super(p); } + public CUDAKernelLaunchInfoVector(CUDAKernelLaunchInfo value) { this(1); put(0, value); } + public CUDAKernelLaunchInfoVector(CUDAKernelLaunchInfo ... array) { this(array.length); put(array); } + public CUDAKernelLaunchInfoVector() { allocate(); } + public CUDAKernelLaunchInfoVector(long n) { allocate(n); } + private native void allocate(); + private native void allocate(@Cast("size_t") long n); + public native @Name("operator =") @ByRef CUDAKernelLaunchInfoVector put(@ByRef CUDAKernelLaunchInfoVector x); + + public boolean empty() { return size() == 0; } + public native long size(); + public void clear() { resize(0); } + public native void resize(@Cast("size_t") long n); + + public CUDAKernelLaunchInfo front() { return get(0); } + public CUDAKernelLaunchInfo back() { return get(size() - 1); } + @Index(function = "at") public native @ByRef CUDAKernelLaunchInfo get(@Cast("size_t") long i); + public native CUDAKernelLaunchInfoVector put(@Cast("size_t") long i, CUDAKernelLaunchInfo value); + + public native @ByVal Iterator insert(@ByVal Iterator pos, @ByRef CUDAKernelLaunchInfo value); + public native @ByVal Iterator erase(@ByVal Iterator pos); + public native @ByVal Iterator begin(); + public native @ByVal Iterator end(); + @NoOffset @Name("iterator") public static class Iterator extends Pointer { + public Iterator(Pointer p) { super(p); } + public Iterator() { } + + public native @Name("operator ++") @ByRef Iterator increment(); + public native @Name("operator ==") boolean equals(@ByRef Iterator it); + public native @Name("operator *") @ByRef @Const CUDAKernelLaunchInfo get(); + } + + public CUDAKernelLaunchInfo[] get() { + CUDAKernelLaunchInfo[] array = new CUDAKernelLaunchInfo[size() < Integer.MAX_VALUE ? (int)size() : Integer.MAX_VALUE]; + for (int i = 0; i < array.length; i++) { + array[i] = get(i); + } + return array; + } + @Override public String toString() { + return java.util.Arrays.toString(get()); + } + + public CUDAKernelLaunchInfo pop_back() { + long size = size(); + CUDAKernelLaunchInfo value = get(size - 1); + resize(size - 1); + return value; + } + public CUDAKernelLaunchInfoVector push_back(CUDAKernelLaunchInfo value) { + long size = size(); + resize(size + 1); + return put(size, value); + } + public CUDAKernelLaunchInfoVector put(CUDAKernelLaunchInfo value) { + if (size() != 1) { resize(1); } + return put(0, value); + } + public CUDAKernelLaunchInfoVector put(CUDAKernelLaunchInfo ... array) { + if (size() != array.length) { resize(array.length); } + for (int i = 0; i < array.length; i++) { + put(i, array[i]); + } + return this; + } +} + diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/cuda/CUDAKernelLaunchRegistry.java b/pytorch/src/gen/java/org/bytedeco/pytorch/cuda/CUDAKernelLaunchRegistry.java new file mode 100644 index 00000000000..cdf41555bba --- /dev/null +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/cuda/CUDAKernelLaunchRegistry.java @@ -0,0 +1,81 @@ +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE + +package org.bytedeco.pytorch.cuda; + +import org.bytedeco.pytorch.*; +import org.bytedeco.pytorch.Error; +import org.bytedeco.pytorch.global.torch.DeviceType; +import org.bytedeco.pytorch.global.torch.ScalarType; +import org.bytedeco.pytorch.global.torch.MemoryFormat; +import org.bytedeco.pytorch.Allocator; +import java.nio.*; +import org.bytedeco.javacpp.*; +import org.bytedeco.javacpp.annotation.*; + +import static org.bytedeco.javacpp.presets.javacpp.*; +import static org.bytedeco.openblas.global.openblas_nolapack.*; +import static org.bytedeco.openblas.global.openblas.*; +import org.bytedeco.pytorch.*; +import static org.bytedeco.pytorch.global.torch.*; + +import static org.bytedeco.pytorch.global.torch_cuda.*; + + +/** Circular buffer used to hold information about kernel launches + * this is later used to reconstruct how a device-side kernel assertion failure + * occurred CUDAKernelLaunchRegistry is used as a singleton */ +@Namespace("c10::cuda") @NoOffset @Properties(inherit = org.bytedeco.pytorch.presets.torch_cuda.class) +public class CUDAKernelLaunchRegistry extends Pointer { + static { Loader.load(); } + /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ + public CUDAKernelLaunchRegistry(Pointer p) { super(p); } + /** Native array allocator. Access with {@link Pointer#position(long)}. */ + public CUDAKernelLaunchRegistry(long size) { super((Pointer)null); allocateArray(size); } + private native void allocateArray(long size); + @Override public CUDAKernelLaunchRegistry position(long position) { + return (CUDAKernelLaunchRegistry)super.position(position); + } + @Override public CUDAKernelLaunchRegistry getPointer(long i) { + return new CUDAKernelLaunchRegistry((Pointer)this).offsetAddress(i); + } + + public CUDAKernelLaunchRegistry() { super((Pointer)null); allocate(); } + private native void allocate(); + /** Register a new kernel launch and obtain a generation number back to be + * passed to the kernel */ + public native @Cast("uint32_t") int insert( + @Cast("const char*") BytePointer launch_filename, + @Cast("const char*") BytePointer launch_function, + @Cast("const uint32_t") int launch_linenum, + @Cast("const char*") BytePointer kernel_name, + int stream_id); + public native @Cast("uint32_t") int insert( + String launch_filename, + String launch_function, + @Cast("const uint32_t") int launch_linenum, + String kernel_name, + int stream_id); + /** Get copies of the kernel launch registry and each device's assertion + * failure buffer so they can be inspected without raising race conditions */ + public native @ByVal DeviceAssertionsDataVectorCUDAKernelLaunchInfoVectorPair snapshot(); + /** Get a pointer to the current device's assertion failure buffer. If no such + * buffer exists then one is created. This means that the first kernel launch + * made on each device will be slightly slower because memory allocations are + * required */ + public native DeviceAssertionsData get_uvm_assertions_ptr_for_current_device(); + /** Gets the global singleton of the registry */ + public static native @ByRef CUDAKernelLaunchRegistry get_singleton_ref(); + /** If not all devices support DSA, we disable it */ + @MemberGetter public native @Cast("const bool") boolean do_all_devices_support_managed_memory(); + /** Whether or not to gather stack traces when launching kernels */ + public native @Cast("bool") boolean gather_launch_stacktrace(); public native CUDAKernelLaunchRegistry gather_launch_stacktrace(boolean setter); + /** Whether or not host-side DSA is enabled or disabled at run-time + * Note: Device-side code cannot be enabled/disabled at run-time */ + public native @Cast("bool") boolean enabled_at_runtime(); public native CUDAKernelLaunchRegistry enabled_at_runtime(boolean setter); + /** Whether or not a device has indicated a failure */ + public native @Cast("bool") boolean has_failed(); +// #ifdef TORCH_USE_CUDA_DSA +// #else + @MemberGetter public native @Cast("const bool") boolean enabled_at_compile_time(); +// #endif +} diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/cuda/CUDAMultiStreamGuard.java b/pytorch/src/gen/java/org/bytedeco/pytorch/cuda/CUDAMultiStreamGuard.java new file mode 100644 index 00000000000..73361de9189 --- /dev/null +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/cuda/CUDAMultiStreamGuard.java @@ -0,0 +1,43 @@ +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE + +package org.bytedeco.pytorch.cuda; + +import org.bytedeco.pytorch.*; +import org.bytedeco.pytorch.Error; +import org.bytedeco.pytorch.global.torch.DeviceType; +import org.bytedeco.pytorch.global.torch.ScalarType; +import org.bytedeco.pytorch.global.torch.MemoryFormat; +import org.bytedeco.pytorch.Allocator; +import java.nio.*; +import org.bytedeco.javacpp.*; +import org.bytedeco.javacpp.annotation.*; + +import static org.bytedeco.javacpp.presets.javacpp.*; +import static org.bytedeco.openblas.global.openblas_nolapack.*; +import static org.bytedeco.openblas.global.openblas.*; +import org.bytedeco.pytorch.*; +import static org.bytedeco.pytorch.global.torch.*; + +import static org.bytedeco.pytorch.global.torch_cuda.*; + + +/** A variant of MultiStreamGuard that is specialized for CUDA. */ +@Namespace("c10::cuda") @NoOffset @Properties(inherit = org.bytedeco.pytorch.presets.torch_cuda.class) +public class CUDAMultiStreamGuard extends Pointer { + static { Loader.load(); } + /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ + public CUDAMultiStreamGuard(Pointer p) { super(p); } + + public CUDAMultiStreamGuard(@ByVal CUDAStreamArrayRef streams) { super((Pointer)null); allocate(streams); } + private native void allocate(@ByVal CUDAStreamArrayRef streams); + + /** Copy is disallowed */ + + + + // See Note [Move construction for RAII guards is tricky] + + + // See Note [Move assignment for RAII guards is tricky] + +} diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/cuda/CUDAStream.java b/pytorch/src/gen/java/org/bytedeco/pytorch/cuda/CUDAStream.java new file mode 100644 index 00000000000..11c4f5d399e --- /dev/null +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/cuda/CUDAStream.java @@ -0,0 +1,112 @@ +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE + +package org.bytedeco.pytorch.cuda; + +import org.bytedeco.pytorch.*; +import org.bytedeco.pytorch.Error; +import org.bytedeco.pytorch.global.torch.DeviceType; +import org.bytedeco.pytorch.global.torch.ScalarType; +import org.bytedeco.pytorch.global.torch.MemoryFormat; +import org.bytedeco.pytorch.Allocator; +import java.nio.*; +import org.bytedeco.javacpp.*; +import org.bytedeco.javacpp.annotation.*; + +import static org.bytedeco.javacpp.presets.javacpp.*; +import static org.bytedeco.openblas.global.openblas_nolapack.*; +import static org.bytedeco.openblas.global.openblas.*; +import org.bytedeco.pytorch.*; +import static org.bytedeco.pytorch.global.torch.*; + +import static org.bytedeco.pytorch.global.torch_cuda.*; + + +// Value object representing a CUDA stream. This is just a wrapper +// around c10::Stream, but it comes with a little extra CUDA-specific +// functionality (conversion to cudaStream_t), and a guarantee that +// the wrapped c10::Stream really is a CUDA stream. +@Namespace("c10::cuda") @NoOffset @Properties(inherit = org.bytedeco.pytorch.presets.torch_cuda.class) +public class CUDAStream extends Pointer { + static { Loader.load(); } + /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ + public CUDAStream(Pointer p) { super(p); } + + public enum Unchecked { UNCHECKED(0); + + public final int value; + private Unchecked(int v) { this.value = v; } + private Unchecked(Unchecked e) { this.value = e.value; } + public Unchecked intern() { for (Unchecked e : values()) if (e.value == value) return e; return this; } + @Override public String toString() { return intern().name(); } + } + + /** Construct a CUDAStream from a Stream. This construction is checked, + * and will raise an error if the Stream is not, in fact, a CUDA stream. */ + public CUDAStream(@ByVal Stream stream) { super((Pointer)null); allocate(stream); } + private native void allocate(@ByVal Stream stream); + + /** Construct a CUDAStream from a Stream with no error checking. + * This constructor uses the "named" constructor idiom, and can + * be invoked as: CUDAStream(CUDAStream::UNCHECKED, stream) */ + public CUDAStream(Unchecked arg0, @ByVal Stream stream) { super((Pointer)null); allocate(arg0, stream); } + private native void allocate(Unchecked arg0, @ByVal Stream stream); + public CUDAStream(@Cast("c10::cuda::CUDAStream::Unchecked") int arg0, @ByVal Stream stream) { super((Pointer)null); allocate(arg0, stream); } + private native void allocate(@Cast("c10::cuda::CUDAStream::Unchecked") int arg0, @ByVal Stream stream); + + public native @Cast("bool") @Name("operator ==") @NoException(true) boolean equals(@Const @ByRef CUDAStream other); + + public native @Cast("bool") @Name("operator !=") @NoException(true) boolean notEquals(@Const @ByRef CUDAStream other); + + /** Implicit conversion to cudaStream_t. */ + public native @Cast("cudaStream_t") @Name("operator cudaStream_t") Pointer asPointer(); + + /** Implicit conversion to Stream (a.k.a., forget that the stream is a + * CUDA stream). */ + public native @ByVal @Name("operator c10::Stream") Stream asStream(); + + /** Used to avoid baking in device type explicitly to Python-side API. */ + public native @ByVal DeviceType device_type(); + + /** Get the CUDA device index that this stream is associated with. */ + public native byte device_index(); + + /** Get the full Device that this stream is associated with. The Device + * is guaranteed to be a CUDA device. */ + public native @ByVal Device device(); + + /** Return the stream ID corresponding to this particular stream. */ + public native long id(); + + public native @Cast("bool") boolean query(); + + public native void synchronize(); + + public native int priority(); + + /** Explicit conversion to cudaStream_t. */ + public native @Cast("cudaStream_t") Pointer stream(); + + /** Explicit conversion to Stream. */ + + /// + public native @ByVal Stream unwrap(); + + /** Reversibly pack a CUDAStream into a struct representation. + * Previously the stream's data was packed into a single int64_t, + * as it was assumed the fields would not require more than + * 64 bits of storage in total. + * See https://github.com/pytorch/pytorch/issues/75854 + * for more information regarding newer platforms that may violate + * this assumption. + * + * The CUDAStream can be unpacked using unpack(). */ + public native @ByVal StreamData3 pack3(); + + // Unpack a CUDAStream from the 3 fields generated by pack(). + public static native @ByVal CUDAStream unpack3( + long stream_id, + byte device_index, + @ByVal DeviceType device_type); + + public static native @ByVal T_IntInt_T priority_range(); +} diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/cuda/CUDAStreamArrayRef.java b/pytorch/src/gen/java/org/bytedeco/pytorch/cuda/CUDAStreamArrayRef.java new file mode 100644 index 00000000000..e93dde9c480 --- /dev/null +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/cuda/CUDAStreamArrayRef.java @@ -0,0 +1,147 @@ +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE + +package org.bytedeco.pytorch.cuda; + +import org.bytedeco.pytorch.*; +import org.bytedeco.pytorch.Error; +import org.bytedeco.pytorch.global.torch.DeviceType; +import org.bytedeco.pytorch.global.torch.ScalarType; +import org.bytedeco.pytorch.global.torch.MemoryFormat; +import org.bytedeco.pytorch.Allocator; +import java.nio.*; +import org.bytedeco.javacpp.*; +import org.bytedeco.javacpp.annotation.*; + +import static org.bytedeco.javacpp.presets.javacpp.*; +import static org.bytedeco.openblas.global.openblas_nolapack.*; +import static org.bytedeco.openblas.global.openblas.*; +import org.bytedeco.pytorch.*; +import static org.bytedeco.pytorch.global.torch.*; + +import static org.bytedeco.pytorch.global.torch_cuda.*; + +/** ArrayRef - Represent a constant reference to an array (0 or more elements + * consecutively in memory), i.e. a start pointer and a length. It allows + * various APIs to take consecutive elements easily and conveniently. + * + * This class does not own the underlying data, it is expected to be used in + * situations where the data resides in some other buffer, whose lifetime + * extends past that of the ArrayRef. For this reason, it is not in general + * safe to store an ArrayRef. + * + * This is intended to be trivially copyable, so it should be passed by + * value. */ +@Name("c10::ArrayRef") @NoOffset @Properties(inherit = org.bytedeco.pytorch.presets.torch_cuda.class) +public class CUDAStreamArrayRef extends Pointer { + static { Loader.load(); } + /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ + public CUDAStreamArrayRef(Pointer p) { super(p); } + /** Native array allocator. Access with {@link Pointer#position(long)}. */ + public CUDAStreamArrayRef(long size) { super((Pointer)null); allocateArray(size); } + private native void allocateArray(long size); + @Override public CUDAStreamArrayRef position(long position) { + return (CUDAStreamArrayRef)super.position(position); + } + @Override public CUDAStreamArrayRef getPointer(long i) { + return new CUDAStreamArrayRef((Pointer)this).offsetAddress(i); + } + + /** \name Constructors + * \{ +

+ * Construct an empty ArrayRef. */ + /* implicit */ public CUDAStreamArrayRef() { super((Pointer)null); allocate(); } +private native void allocate(); + + /** Construct an ArrayRef from a single element. */ + // TODO Make this explicit + + + /** Construct an ArrayRef from a pointer and length. */ + public CUDAStreamArrayRef(@Const CUDAStream data, @Cast("size_t") long length) { super((Pointer)null); allocate(data, length); } + private native void allocate(@Const CUDAStream data, @Cast("size_t") long length); + + /** Construct an ArrayRef from a range. */ + public CUDAStreamArrayRef(@Const CUDAStream begin, @Const CUDAStream end) { super((Pointer)null); allocate(begin, end); } + private native void allocate(@Const CUDAStream begin, @Const CUDAStream end); + + /** Construct an ArrayRef from a SmallVector. This is templated in order to + * avoid instantiating SmallVectorTemplateCommon whenever we + * copy-construct an ArrayRef. */ + + /** Construct an ArrayRef from a std::vector. */ + // The enable_if stuff here makes sure that this isn't used for + // std::vector, because ArrayRef can't work on a std::vector + // bitfield. + + /** Construct an ArrayRef from a std::array */ + + /** Construct an ArrayRef from a C array. */ + + /** Construct an ArrayRef from a std::initializer_list. */ + /* implicit */ + + /** \} + * \name Simple Operations + * \{ */ + + public native @Const @ByPtr CUDAStream begin(); + public native @Const @ByPtr CUDAStream end(); + + // These are actually the same as iterator, since ArrayRef only + // gives you const iterators. + public native @Const @ByPtr CUDAStream cbegin(); + public native @Const @ByPtr CUDAStream cend(); + + /** empty - Check if the array is empty. */ + public native @Cast("const bool") boolean empty(); + + public native @Const CUDAStream data(); + + /** size - Get the array size. */ + public native @Cast("const size_t") long size(); + + /** front - Get the first element. */ + public native @Const @ByRef CUDAStream front(); + + /** back - Get the last element. */ + public native @Const @ByRef CUDAStream back(); + + /** equals - Check for element-wise equality. */ + public native @Cast("const bool") boolean equals(@ByVal CUDAStreamArrayRef RHS); + + /** slice(n, m) - Take M elements of the array starting at element N */ + public native @ByVal CUDAStreamArrayRef slice(@Cast("size_t") long N, @Cast("size_t") long M); + + /** slice(n) - Chop off the first N elements of the array. */ + public native @Const @ByVal CUDAStreamArrayRef slice(@Cast("size_t") long N); + + /** \} + * \name Operator Overloads + * \{ */ + public native @Const @ByRef @Name("operator []") CUDAStream get(@Cast("size_t") long Index); + + /** Vector compatibility */ + + /// + public native @Const @ByRef CUDAStream at(@Cast("size_t") long Index); + + /** Disallow accidental assignment from a temporary. + * + * The declaration here is extra complicated so that "arrayRef = {}" + * continues to select the move assignment operator. */ + + + /** Disallow accidental assignment from a temporary. + * + * The declaration here is extra complicated so that "arrayRef = {}" + * continues to select the move assignment operator. */ + + + /** \} + * \name Expensive Operations + * \{ */ + public native @StdVector CUDAStream vec(); + + /** \} */ +} diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/cuda/CUDAStreamCaptureModeGuard.java b/pytorch/src/gen/java/org/bytedeco/pytorch/cuda/CUDAStreamCaptureModeGuard.java new file mode 100644 index 00000000000..62880eff1fb --- /dev/null +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/cuda/CUDAStreamCaptureModeGuard.java @@ -0,0 +1,35 @@ +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE + +package org.bytedeco.pytorch.cuda; + +import org.bytedeco.pytorch.*; +import org.bytedeco.pytorch.Error; +import org.bytedeco.pytorch.global.torch.DeviceType; +import org.bytedeco.pytorch.global.torch.ScalarType; +import org.bytedeco.pytorch.global.torch.MemoryFormat; +import org.bytedeco.pytorch.Allocator; +import java.nio.*; +import org.bytedeco.javacpp.*; +import org.bytedeco.javacpp.annotation.*; + +import static org.bytedeco.javacpp.presets.javacpp.*; +import static org.bytedeco.openblas.global.openblas_nolapack.*; +import static org.bytedeco.openblas.global.openblas.*; +import org.bytedeco.pytorch.*; +import static org.bytedeco.pytorch.global.torch.*; + +import static org.bytedeco.pytorch.global.torch_cuda.*; + + +// RAII guard for "cudaStreamCaptureMode", a thread-local value +// that controls the error-checking strictness of a capture. +// #if !defined(USE_ROCM) || ROCM_VERSION >= 50300 +@Namespace("c10::cuda") @NoOffset @Properties(inherit = org.bytedeco.pytorch.presets.torch_cuda.class) +public class CUDAStreamCaptureModeGuard extends Pointer { + static { Loader.load(); } + /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ + public CUDAStreamCaptureModeGuard(Pointer p) { super(p); } + + public CUDAStreamCaptureModeGuard(@Cast("cudaStreamCaptureMode") int desired) { super((Pointer)null); allocate(desired); } + private native void allocate(@Cast("cudaStreamCaptureMode") int desired); +} diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/cuda/CUDAStreamGuard.java b/pytorch/src/gen/java/org/bytedeco/pytorch/cuda/CUDAStreamGuard.java new file mode 100644 index 00000000000..e0b1d7666f2 --- /dev/null +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/cuda/CUDAStreamGuard.java @@ -0,0 +1,79 @@ +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE + +package org.bytedeco.pytorch.cuda; + +import org.bytedeco.pytorch.*; +import org.bytedeco.pytorch.Error; +import org.bytedeco.pytorch.global.torch.DeviceType; +import org.bytedeco.pytorch.global.torch.ScalarType; +import org.bytedeco.pytorch.global.torch.MemoryFormat; +import org.bytedeco.pytorch.Allocator; +import java.nio.*; +import org.bytedeco.javacpp.*; +import org.bytedeco.javacpp.annotation.*; + +import static org.bytedeco.javacpp.presets.javacpp.*; +import static org.bytedeco.openblas.global.openblas_nolapack.*; +import static org.bytedeco.openblas.global.openblas.*; +import org.bytedeco.pytorch.*; +import static org.bytedeco.pytorch.global.torch.*; + +import static org.bytedeco.pytorch.global.torch_cuda.*; + + +/** A variant of StreamGuard that is specialized for CUDA. See CUDAGuard + * for when you can use this. */ +@Namespace("c10::cuda") @NoOffset @Properties(inherit = org.bytedeco.pytorch.presets.torch_cuda.class) +public class CUDAStreamGuard extends Pointer { + static { Loader.load(); } + /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ + public CUDAStreamGuard(Pointer p) { super(p); } + + /** No default constructor, see Note [Omitted default constructor from RAII] */ + + + /** Set the current CUDA device to the device associated with the passed + * stream, and set the current CUDA stream on that device to the passed + * stream. Errors if the Stream is not a CUDA stream. */ + public CUDAStreamGuard(@ByVal Stream stream) { super((Pointer)null); allocate(stream); } + private native void allocate(@ByVal Stream stream); + + /** Copy is disallowed */ + + + + /** Move is disallowed, as CUDAStreamGuard does not have an uninitialized + * state, which is required for moves on types with nontrivial destructors. */ + + + + /** Resets the currently set stream to the original stream and + * the currently set device to the original device. Then, + * set the current device to the device associated with the passed stream, + * and set the current stream on that device to the passed stream. + * Errors if the stream passed is not a CUDA stream. + * + * NOTE: this implementation may skip some stream/device setting if + * it can prove that it is unnecessary. + * + * WARNING: reset_stream does NOT preserve previously set streams on + * different devices. If you need to set streams on multiple devices + * on CUDA, use CUDAMultiStreamGuard instead. */ + public native void reset_stream(@ByVal Stream stream); + + /** Returns the CUDA stream that was set at the time the guard was + * constructed. */ + public native @ByVal CUDAStream original_stream(); + + /** Returns the most recent CUDA stream that was set using this device guard, + * either from construction, or via set_stream. */ + public native @ByVal CUDAStream current_stream(); + + /** Returns the most recent CUDA device that was set using this device guard, + * either from construction, or via set_device/reset_device/set_index. */ + public native @ByVal Device current_device(); + + /** Returns the CUDA device that was set at the most recent reset_stream(), + * or otherwise the device at construction time. */ + public native @ByVal Device original_device(); +} diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/cuda/CUDAStreamOptional.java b/pytorch/src/gen/java/org/bytedeco/pytorch/cuda/CUDAStreamOptional.java new file mode 100644 index 00000000000..97dee56b551 --- /dev/null +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/cuda/CUDAStreamOptional.java @@ -0,0 +1,38 @@ +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE + +package org.bytedeco.pytorch.cuda; + +import org.bytedeco.pytorch.*; +import org.bytedeco.pytorch.Error; +import org.bytedeco.pytorch.global.torch.DeviceType; +import org.bytedeco.pytorch.global.torch.ScalarType; +import org.bytedeco.pytorch.global.torch.MemoryFormat; +import org.bytedeco.pytorch.Allocator; +import java.nio.*; +import org.bytedeco.javacpp.*; +import org.bytedeco.javacpp.annotation.*; + +import static org.bytedeco.javacpp.presets.javacpp.*; +import static org.bytedeco.openblas.global.openblas_nolapack.*; +import static org.bytedeco.openblas.global.openblas.*; +import org.bytedeco.pytorch.*; +import static org.bytedeco.pytorch.global.torch.*; + +import static org.bytedeco.pytorch.global.torch_cuda.*; + +@NoOffset @Name("c10::optional") @Properties(inherit = org.bytedeco.pytorch.presets.torch_cuda.class) +public class CUDAStreamOptional extends Pointer { + static { Loader.load(); } + /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ + public CUDAStreamOptional(Pointer p) { super(p); } + public CUDAStreamOptional(CUDAStream value) { this(); put(value); } + public CUDAStreamOptional() { allocate(); } + private native void allocate(); + public native @Name("operator =") @ByRef CUDAStreamOptional put(@ByRef CUDAStreamOptional x); + + public native boolean has_value(); + public native void reset(); + public native @Name("value") @ByRef CUDAStream get(); + @ValueSetter public native CUDAStreamOptional put(@ByRef CUDAStream value); +} + diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/cuda/Constant.java b/pytorch/src/gen/java/org/bytedeco/pytorch/cuda/Constant.java new file mode 100644 index 00000000000..04ef3e72ac4 --- /dev/null +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/cuda/Constant.java @@ -0,0 +1,34 @@ +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE + +package org.bytedeco.pytorch.cuda; + +import org.bytedeco.pytorch.*; +import org.bytedeco.pytorch.Error; +import org.bytedeco.pytorch.global.torch.DeviceType; +import org.bytedeco.pytorch.global.torch.ScalarType; +import org.bytedeco.pytorch.global.torch.MemoryFormat; +import org.bytedeco.pytorch.Allocator; +import java.nio.*; +import org.bytedeco.javacpp.*; +import org.bytedeco.javacpp.annotation.*; + +import static org.bytedeco.javacpp.presets.javacpp.*; +import static org.bytedeco.openblas.global.openblas_nolapack.*; +import static org.bytedeco.openblas.global.openblas.*; +import org.bytedeco.pytorch.*; +import static org.bytedeco.pytorch.global.torch.*; + +import static org.bytedeco.pytorch.global.torch_cuda.*; + + +@Namespace("at::native") @NoOffset @Properties(inherit = org.bytedeco.pytorch.presets.torch_cuda.class) +public class Constant extends Pointer { + static { Loader.load(); } + /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ + public Constant(Pointer p) { super(p); } + + public native float f(); public native Constant f(float setter); + public native double d(); public native Constant d(double setter); + public Constant(@Cast("cudnnDataType_t") int dataType, double value) { super((Pointer)null); allocate(dataType, value); } + private native void allocate(@Cast("cudnnDataType_t") int dataType, double value); +} diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/cuda/ConvolutionDescriptor.java b/pytorch/src/gen/java/org/bytedeco/pytorch/cuda/ConvolutionDescriptor.java new file mode 100644 index 00000000000..bc73f09d29e --- /dev/null +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/cuda/ConvolutionDescriptor.java @@ -0,0 +1,45 @@ +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE + +package org.bytedeco.pytorch.cuda; + +import org.bytedeco.pytorch.*; +import org.bytedeco.pytorch.Error; +import org.bytedeco.pytorch.global.torch.DeviceType; +import org.bytedeco.pytorch.global.torch.ScalarType; +import org.bytedeco.pytorch.global.torch.MemoryFormat; +import org.bytedeco.pytorch.Allocator; +import java.nio.*; +import org.bytedeco.javacpp.*; +import org.bytedeco.javacpp.annotation.*; + +import static org.bytedeco.javacpp.presets.javacpp.*; +import static org.bytedeco.openblas.global.openblas_nolapack.*; +import static org.bytedeco.openblas.global.openblas.*; +import org.bytedeco.pytorch.*; +import static org.bytedeco.pytorch.global.torch.*; + +import static org.bytedeco.pytorch.global.torch_cuda.*; + + +@Namespace("at::native") @Properties(inherit = org.bytedeco.pytorch.presets.torch_cuda.class) +public class ConvolutionDescriptor extends Pointer { + static { Loader.load(); } + /** Default native constructor. */ + public ConvolutionDescriptor() { super((Pointer)null); allocate(); } + /** Native array allocator. Access with {@link Pointer#position(long)}. */ + public ConvolutionDescriptor(long size) { super((Pointer)null); allocateArray(size); } + /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ + public ConvolutionDescriptor(Pointer p) { super(p); } + private native void allocate(); + private native void allocateArray(long size); + @Override public ConvolutionDescriptor position(long position) { + return (ConvolutionDescriptor)super.position(position); + } + @Override public ConvolutionDescriptor getPointer(long i) { + return new ConvolutionDescriptor((Pointer)this).offsetAddress(i); + } + + public native void set(@Cast("cudnnDataType_t") int dataType, int dim, IntPointer pad, IntPointer stride, IntPointer upscale, int groups, @Cast("bool") boolean allow_tf32); + public native void set(@Cast("cudnnDataType_t") int dataType, int dim, IntBuffer pad, IntBuffer stride, IntBuffer upscale, int groups, @Cast("bool") boolean allow_tf32); + public native void set(@Cast("cudnnDataType_t") int dataType, int dim, int[] pad, int[] stride, int[] upscale, int groups, @Cast("bool") boolean allow_tf32); +} diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/cuda/CuDNNError.java b/pytorch/src/gen/java/org/bytedeco/pytorch/cuda/CuDNNError.java new file mode 100644 index 00000000000..4aefce154c0 --- /dev/null +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/cuda/CuDNNError.java @@ -0,0 +1,30 @@ +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE + +package org.bytedeco.pytorch.cuda; + +import org.bytedeco.pytorch.*; +import org.bytedeco.pytorch.Error; +import org.bytedeco.pytorch.global.torch.DeviceType; +import org.bytedeco.pytorch.global.torch.ScalarType; +import org.bytedeco.pytorch.global.torch.MemoryFormat; +import org.bytedeco.pytorch.Allocator; +import java.nio.*; +import org.bytedeco.javacpp.*; +import org.bytedeco.javacpp.annotation.*; + +import static org.bytedeco.javacpp.presets.javacpp.*; +import static org.bytedeco.openblas.global.openblas_nolapack.*; +import static org.bytedeco.openblas.global.openblas.*; +import org.bytedeco.pytorch.*; +import static org.bytedeco.pytorch.global.torch.*; + +import static org.bytedeco.pytorch.global.torch_cuda.*; + + +@Namespace("c10") @Properties(inherit = org.bytedeco.pytorch.presets.torch_cuda.class) +public class CuDNNError extends Error { + static { Loader.load(); } + /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ + public CuDNNError(Pointer p) { super(p); } + +} diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/cuda/DeviceAssertionData.java b/pytorch/src/gen/java/org/bytedeco/pytorch/cuda/DeviceAssertionData.java new file mode 100644 index 00000000000..e20fdc130b7 --- /dev/null +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/cuda/DeviceAssertionData.java @@ -0,0 +1,63 @@ +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE + +package org.bytedeco.pytorch.cuda; + +import org.bytedeco.pytorch.*; +import org.bytedeco.pytorch.Error; +import org.bytedeco.pytorch.global.torch.DeviceType; +import org.bytedeco.pytorch.global.torch.ScalarType; +import org.bytedeco.pytorch.global.torch.MemoryFormat; +import org.bytedeco.pytorch.Allocator; +import java.nio.*; +import org.bytedeco.javacpp.*; +import org.bytedeco.javacpp.annotation.*; + +import static org.bytedeco.javacpp.presets.javacpp.*; +import static org.bytedeco.openblas.global.openblas_nolapack.*; +import static org.bytedeco.openblas.global.openblas.*; +import org.bytedeco.pytorch.*; +import static org.bytedeco.pytorch.global.torch.*; + +import static org.bytedeco.pytorch.global.torch_cuda.*; + + +/** Holds information about any device-side assertions that fail. + * Held in managed memory and access by both the CPU and the GPU. */ +@Namespace("c10::cuda") @Properties(inherit = org.bytedeco.pytorch.presets.torch_cuda.class) +public class DeviceAssertionData extends Pointer { + static { Loader.load(); } + /** Default native constructor. */ + public DeviceAssertionData() { super((Pointer)null); allocate(); } + /** Native array allocator. Access with {@link Pointer#position(long)}. */ + public DeviceAssertionData(long size) { super((Pointer)null); allocateArray(size); } + /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ + public DeviceAssertionData(Pointer p) { super(p); } + private native void allocate(); + private native void allocateArray(long size); + @Override public DeviceAssertionData position(long position) { + return (DeviceAssertionData)super.position(position); + } + @Override public DeviceAssertionData getPointer(long i) { + return new DeviceAssertionData((Pointer)this).offsetAddress(i); + } + + /** Stringification of the assertion */ + public native @Cast("char") byte assertion_msg(int i); public native DeviceAssertionData assertion_msg(int i, byte setter); + @MemberGetter public native @Cast("char*") BytePointer assertion_msg(); + /** File the assertion was in */ + public native @Cast("char") byte filename(int i); public native DeviceAssertionData filename(int i, byte setter); + @MemberGetter public native @Cast("char*") BytePointer filename(); + /** Name of the function the assertion was in */ + public native @Cast("char") byte function_name(int i); public native DeviceAssertionData function_name(int i, byte setter); + @MemberGetter public native @Cast("char*") BytePointer function_name(); + /** Line number the assertion was at */ + public native int line_number(); public native DeviceAssertionData line_number(int setter); + /** Number uniquely identifying the kernel launch that triggered the assertion */ + public native @Cast("uint32_t") int caller(); public native DeviceAssertionData caller(int setter); + /** block_id of the thread that failed the assertion */ + public native int block_id(int i); public native DeviceAssertionData block_id(int i, int setter); + @MemberGetter public native IntPointer block_id(); + /** third_id of the thread that failed the assertion */ + public native int thread_id(int i); public native DeviceAssertionData thread_id(int i, int setter); + @MemberGetter public native IntPointer thread_id(); +} diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/cuda/DeviceAssertionsData.java b/pytorch/src/gen/java/org/bytedeco/pytorch/cuda/DeviceAssertionsData.java new file mode 100644 index 00000000000..87a0fcbf46f --- /dev/null +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/cuda/DeviceAssertionsData.java @@ -0,0 +1,50 @@ +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE + +package org.bytedeco.pytorch.cuda; + +import org.bytedeco.pytorch.*; +import org.bytedeco.pytorch.Error; +import org.bytedeco.pytorch.global.torch.DeviceType; +import org.bytedeco.pytorch.global.torch.ScalarType; +import org.bytedeco.pytorch.global.torch.MemoryFormat; +import org.bytedeco.pytorch.Allocator; +import java.nio.*; +import org.bytedeco.javacpp.*; +import org.bytedeco.javacpp.annotation.*; + +import static org.bytedeco.javacpp.presets.javacpp.*; +import static org.bytedeco.openblas.global.openblas_nolapack.*; +import static org.bytedeco.openblas.global.openblas.*; +import org.bytedeco.pytorch.*; +import static org.bytedeco.pytorch.global.torch.*; + +import static org.bytedeco.pytorch.global.torch_cuda.*; + + +/** Used to hold assertions generated by the device + * Held in managed memory and access by both the CPU and the GPU. */ +@Namespace("c10::cuda") @Properties(inherit = org.bytedeco.pytorch.presets.torch_cuda.class) +public class DeviceAssertionsData extends Pointer { + static { Loader.load(); } + /** Default native constructor. */ + public DeviceAssertionsData() { super((Pointer)null); allocate(); } + /** Native array allocator. Access with {@link Pointer#position(long)}. */ + public DeviceAssertionsData(long size) { super((Pointer)null); allocateArray(size); } + /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ + public DeviceAssertionsData(Pointer p) { super(p); } + private native void allocate(); + private native void allocateArray(long size); + @Override public DeviceAssertionsData position(long position) { + return (DeviceAssertionsData)super.position(position); + } + @Override public DeviceAssertionsData getPointer(long i) { + return new DeviceAssertionsData((Pointer)this).offsetAddress(i); + } + + /** Total number of assertions found; a subset of thse will be recorded + * in {@code assertions} */ + public native int assertion_count(); public native DeviceAssertionsData assertion_count(int setter); + /** An array of assertions that will be written to in a race-free manner */ + public native @ByRef DeviceAssertionData assertions(int i); public native DeviceAssertionsData assertions(int i, DeviceAssertionData setter); + @MemberGetter public native DeviceAssertionData assertions(); +} diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/cuda/DeviceAssertionsDataVector.java b/pytorch/src/gen/java/org/bytedeco/pytorch/cuda/DeviceAssertionsDataVector.java new file mode 100644 index 00000000000..4b52d62a018 --- /dev/null +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/cuda/DeviceAssertionsDataVector.java @@ -0,0 +1,93 @@ +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE + +package org.bytedeco.pytorch.cuda; + +import org.bytedeco.pytorch.*; +import org.bytedeco.pytorch.Error; +import org.bytedeco.pytorch.global.torch.DeviceType; +import org.bytedeco.pytorch.global.torch.ScalarType; +import org.bytedeco.pytorch.global.torch.MemoryFormat; +import org.bytedeco.pytorch.Allocator; +import java.nio.*; +import org.bytedeco.javacpp.*; +import org.bytedeco.javacpp.annotation.*; + +import static org.bytedeco.javacpp.presets.javacpp.*; +import static org.bytedeco.openblas.global.openblas_nolapack.*; +import static org.bytedeco.openblas.global.openblas.*; +import org.bytedeco.pytorch.*; +import static org.bytedeco.pytorch.global.torch.*; + +import static org.bytedeco.pytorch.global.torch_cuda.*; + +@Name("std::vector") @Properties(inherit = org.bytedeco.pytorch.presets.torch_cuda.class) +public class DeviceAssertionsDataVector extends Pointer { + static { Loader.load(); } + /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ + public DeviceAssertionsDataVector(Pointer p) { super(p); } + public DeviceAssertionsDataVector(DeviceAssertionsData value) { this(1); put(0, value); } + public DeviceAssertionsDataVector(DeviceAssertionsData ... array) { this(array.length); put(array); } + public DeviceAssertionsDataVector() { allocate(); } + public DeviceAssertionsDataVector(long n) { allocate(n); } + private native void allocate(); + private native void allocate(@Cast("size_t") long n); + public native @Name("operator =") @ByRef DeviceAssertionsDataVector put(@ByRef DeviceAssertionsDataVector x); + + public boolean empty() { return size() == 0; } + public native long size(); + public void clear() { resize(0); } + public native void resize(@Cast("size_t") long n); + + public DeviceAssertionsData front() { return get(0); } + public DeviceAssertionsData back() { return get(size() - 1); } + @Index(function = "at") public native @ByRef DeviceAssertionsData get(@Cast("size_t") long i); + public native DeviceAssertionsDataVector put(@Cast("size_t") long i, DeviceAssertionsData value); + + public native @ByVal Iterator insert(@ByVal Iterator pos, @ByRef DeviceAssertionsData value); + public native @ByVal Iterator erase(@ByVal Iterator pos); + public native @ByVal Iterator begin(); + public native @ByVal Iterator end(); + @NoOffset @Name("iterator") public static class Iterator extends Pointer { + public Iterator(Pointer p) { super(p); } + public Iterator() { } + + public native @Name("operator ++") @ByRef Iterator increment(); + public native @Name("operator ==") boolean equals(@ByRef Iterator it); + public native @Name("operator *") @ByRef @Const DeviceAssertionsData get(); + } + + public DeviceAssertionsData[] get() { + DeviceAssertionsData[] array = new DeviceAssertionsData[size() < Integer.MAX_VALUE ? (int)size() : Integer.MAX_VALUE]; + for (int i = 0; i < array.length; i++) { + array[i] = get(i); + } + return array; + } + @Override public String toString() { + return java.util.Arrays.toString(get()); + } + + public DeviceAssertionsData pop_back() { + long size = size(); + DeviceAssertionsData value = get(size - 1); + resize(size - 1); + return value; + } + public DeviceAssertionsDataVector push_back(DeviceAssertionsData value) { + long size = size(); + resize(size + 1); + return put(size, value); + } + public DeviceAssertionsDataVector put(DeviceAssertionsData value) { + if (size() != 1) { resize(1); } + return put(0, value); + } + public DeviceAssertionsDataVector put(DeviceAssertionsData ... array) { + if (size() != array.length) { resize(array.length); } + for (int i = 0; i < array.length; i++) { + put(i, array[i]); + } + return this; + } +} + diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/cuda/DeviceAssertionsDataVectorCUDAKernelLaunchInfoVectorPair.java b/pytorch/src/gen/java/org/bytedeco/pytorch/cuda/DeviceAssertionsDataVectorCUDAKernelLaunchInfoVectorPair.java new file mode 100644 index 00000000000..bb384e719ea --- /dev/null +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/cuda/DeviceAssertionsDataVectorCUDAKernelLaunchInfoVectorPair.java @@ -0,0 +1,43 @@ +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE + +package org.bytedeco.pytorch.cuda; + +import org.bytedeco.pytorch.*; +import org.bytedeco.pytorch.Error; +import org.bytedeco.pytorch.global.torch.DeviceType; +import org.bytedeco.pytorch.global.torch.ScalarType; +import org.bytedeco.pytorch.global.torch.MemoryFormat; +import org.bytedeco.pytorch.Allocator; +import java.nio.*; +import org.bytedeco.javacpp.*; +import org.bytedeco.javacpp.annotation.*; + +import static org.bytedeco.javacpp.presets.javacpp.*; +import static org.bytedeco.openblas.global.openblas_nolapack.*; +import static org.bytedeco.openblas.global.openblas.*; +import org.bytedeco.pytorch.*; +import static org.bytedeco.pytorch.global.torch.*; + +import static org.bytedeco.pytorch.global.torch_cuda.*; + +@NoOffset @Name("std::pair,std::vector >") @Properties(inherit = org.bytedeco.pytorch.presets.torch_cuda.class) +public class DeviceAssertionsDataVectorCUDAKernelLaunchInfoVectorPair extends Pointer { + static { Loader.load(); } + /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ + public DeviceAssertionsDataVectorCUDAKernelLaunchInfoVectorPair(Pointer p) { super(p); } + public DeviceAssertionsDataVectorCUDAKernelLaunchInfoVectorPair(DeviceAssertionsDataVector firstValue, CUDAKernelLaunchInfoVector secondValue) { this(); put(firstValue, secondValue); } + public DeviceAssertionsDataVectorCUDAKernelLaunchInfoVectorPair() { allocate(); } + private native void allocate(); + public native @Name("operator =") @ByRef DeviceAssertionsDataVectorCUDAKernelLaunchInfoVectorPair put(@ByRef DeviceAssertionsDataVectorCUDAKernelLaunchInfoVectorPair x); + + + @MemberGetter public native @ByRef DeviceAssertionsDataVector first(); public native DeviceAssertionsDataVectorCUDAKernelLaunchInfoVectorPair first(DeviceAssertionsDataVector first); + @MemberGetter public native @ByRef CUDAKernelLaunchInfoVector second(); public native DeviceAssertionsDataVectorCUDAKernelLaunchInfoVectorPair second(CUDAKernelLaunchInfoVector second); + + public DeviceAssertionsDataVectorCUDAKernelLaunchInfoVectorPair put(DeviceAssertionsDataVector firstValue, CUDAKernelLaunchInfoVector secondValue) { + first(firstValue); + second(secondValue); + return this; + } +} + diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/cuda/DropoutDescriptor.java b/pytorch/src/gen/java/org/bytedeco/pytorch/cuda/DropoutDescriptor.java new file mode 100644 index 00000000000..d28d4c120ca --- /dev/null +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/cuda/DropoutDescriptor.java @@ -0,0 +1,53 @@ +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE + +package org.bytedeco.pytorch.cuda; + +import org.bytedeco.pytorch.*; +import org.bytedeco.pytorch.Error; +import org.bytedeco.pytorch.global.torch.DeviceType; +import org.bytedeco.pytorch.global.torch.ScalarType; +import org.bytedeco.pytorch.global.torch.MemoryFormat; +import org.bytedeco.pytorch.Allocator; +import java.nio.*; +import org.bytedeco.javacpp.*; +import org.bytedeco.javacpp.annotation.*; + +import static org.bytedeco.javacpp.presets.javacpp.*; +import static org.bytedeco.openblas.global.openblas_nolapack.*; +import static org.bytedeco.openblas.global.openblas.*; +import org.bytedeco.pytorch.*; +import static org.bytedeco.pytorch.global.torch.*; + +import static org.bytedeco.pytorch.global.torch_cuda.*; + + +@Namespace("at::native") @NoOffset @Properties(inherit = org.bytedeco.pytorch.presets.torch_cuda.class) +public class DropoutDescriptor extends Pointer { + static { Loader.load(); } + /** Default native constructor. */ + public DropoutDescriptor() { super((Pointer)null); allocate(); } + /** Native array allocator. Access with {@link Pointer#position(long)}. */ + public DropoutDescriptor(long size) { super((Pointer)null); allocateArray(size); } + /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ + public DropoutDescriptor(Pointer p) { super(p); } + private native void allocate(); + private native void allocateArray(long size); + @Override public DropoutDescriptor position(long position) { + return (DropoutDescriptor)super.position(position); + } + @Override public DropoutDescriptor getPointer(long i) { + return new DropoutDescriptor((Pointer)this).offsetAddress(i); + } + + public native @ByRef Tensor state(); public native DropoutDescriptor state(Tensor setter); + + // Initialize a dropout descriptor's RNG state. + // WARNING: This function is very expensive, avoid calling this function! + public native void initialize_rng(@Cast("cudnnHandle_t") Pointer handle, float dropout, long seed, @Const @ByRef TensorOptions options); + + // Restore a dropout descriptor given a dropout probability and existing RNG state. + public native void set(@Cast("cudnnHandle_t") Pointer handle, float dropout, @ByVal Tensor state_); + + // Restore a dropout descriptor corresponding to no dropout + public native void set_no_dropout(@Cast("cudnnHandle_t") Pointer handle); +} diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/cuda/FilterDescriptor.java b/pytorch/src/gen/java/org/bytedeco/pytorch/cuda/FilterDescriptor.java new file mode 100644 index 00000000000..355cfec9694 --- /dev/null +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/cuda/FilterDescriptor.java @@ -0,0 +1,49 @@ +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE + +package org.bytedeco.pytorch.cuda; + +import org.bytedeco.pytorch.*; +import org.bytedeco.pytorch.Error; +import org.bytedeco.pytorch.global.torch.DeviceType; +import org.bytedeco.pytorch.global.torch.ScalarType; +import org.bytedeco.pytorch.global.torch.MemoryFormat; +import org.bytedeco.pytorch.Allocator; +import java.nio.*; +import org.bytedeco.javacpp.*; +import org.bytedeco.javacpp.annotation.*; + +import static org.bytedeco.javacpp.presets.javacpp.*; +import static org.bytedeco.openblas.global.openblas_nolapack.*; +import static org.bytedeco.openblas.global.openblas.*; +import org.bytedeco.pytorch.*; +import static org.bytedeco.pytorch.global.torch.*; + +import static org.bytedeco.pytorch.global.torch_cuda.*; + + +@Namespace("at::native") @Properties(inherit = org.bytedeco.pytorch.presets.torch_cuda.class) +public class FilterDescriptor extends Pointer { + static { Loader.load(); } + /** Default native constructor. */ + public FilterDescriptor() { super((Pointer)null); allocate(); } + /** Native array allocator. Access with {@link Pointer#position(long)}. */ + public FilterDescriptor(long size) { super((Pointer)null); allocateArray(size); } + /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ + public FilterDescriptor(Pointer p) { super(p); } + private native void allocate(); + private native void allocateArray(long size); + @Override public FilterDescriptor position(long position) { + return (FilterDescriptor)super.position(position); + } + @Override public FilterDescriptor getPointer(long i) { + return new FilterDescriptor((Pointer)this).offsetAddress(i); + } + + public native void set(@Const @ByRef Tensor t, @Cast("int64_t") long pad/*=0*/); + public native void set(@Const @ByRef Tensor t); + + public native void set(@Const @ByRef Tensor t, @Const @ByVal MemoryFormat memory_format, @Cast("int64_t") long pad/*=0*/); + public native void set(@Const @ByRef Tensor t, @Const @ByVal MemoryFormat memory_format); + + public native void print(); +} diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/cuda/OptionalCUDAGuard.java b/pytorch/src/gen/java/org/bytedeco/pytorch/cuda/OptionalCUDAGuard.java new file mode 100644 index 00000000000..4a7897fa10c --- /dev/null +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/cuda/OptionalCUDAGuard.java @@ -0,0 +1,90 @@ +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE + +package org.bytedeco.pytorch.cuda; + +import org.bytedeco.pytorch.*; +import org.bytedeco.pytorch.Error; +import org.bytedeco.pytorch.global.torch.DeviceType; +import org.bytedeco.pytorch.global.torch.ScalarType; +import org.bytedeco.pytorch.global.torch.MemoryFormat; +import org.bytedeco.pytorch.Allocator; +import java.nio.*; +import org.bytedeco.javacpp.*; +import org.bytedeco.javacpp.annotation.*; + +import static org.bytedeco.javacpp.presets.javacpp.*; +import static org.bytedeco.openblas.global.openblas_nolapack.*; +import static org.bytedeco.openblas.global.openblas.*; +import org.bytedeco.pytorch.*; +import static org.bytedeco.pytorch.global.torch.*; + +import static org.bytedeco.pytorch.global.torch_cuda.*; + + +/** A variant of OptionalDeviceGuard that is specialized for CUDA. See + * CUDAGuard for when you can use this. */ +@Namespace("c10::cuda") @NoOffset @Properties(inherit = org.bytedeco.pytorch.presets.torch_cuda.class) +public class OptionalCUDAGuard extends Pointer { + static { Loader.load(); } + /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ + public OptionalCUDAGuard(Pointer p) { super(p); } + /** Native array allocator. Access with {@link Pointer#position(long)}. */ + public OptionalCUDAGuard(long size) { super((Pointer)null); allocateArray(size); } + private native void allocateArray(long size); + @Override public OptionalCUDAGuard position(long position) { + return (OptionalCUDAGuard)super.position(position); + } + @Override public OptionalCUDAGuard getPointer(long i) { + return new OptionalCUDAGuard((Pointer)this).offsetAddress(i); + } + + /** Create an uninitialized OptionalCUDAGuard. */ + public OptionalCUDAGuard() { super((Pointer)null); allocate(); } + private native void allocate(); + + /** Set the current CUDA device to the passed Device, if it is not nullopt. */ + public OptionalCUDAGuard(@ByVal DeviceOptional device_opt) { super((Pointer)null); allocate(device_opt); } + private native void allocate(@ByVal DeviceOptional device_opt); + + /** Set the current CUDA device to the passed device index, if it is not + * nullopt */ + public OptionalCUDAGuard(@ByVal ByteOptional device_index_opt) { super((Pointer)null); allocate(device_index_opt); } + private native void allocate(@ByVal ByteOptional device_index_opt); + + // Copy is not allowed + + + + // See Note [Move construction for RAII guards is tricky] + + + // See Note [Move assignment for RAII guards is tricky] + + + /** Sets the CUDA device to the given device, initializing the guard if it + * is not already initialized. Errors if the given device is not a CUDA + * device. */ + public native void set_device(@ByVal Device device); + + /** Sets the CUDA device to the given device, initializing the guard if it is + * not already initialized. Errors if the given device is not a CUDA device. + * (This method is provided for uniformity with OptionalDeviceGuard). */ + public native void reset_device(@ByVal Device device); + + /** Sets the CUDA device to the given device index, initializing the guard if + * it is not already initialized. */ + public native void set_index(byte device_index); + + /** Returns the device that was set immediately prior to initialization of the + * guard, or nullopt if the guard is uninitialized. */ + public native @ByVal DeviceOptional original_device(); + + /** Returns the most recent device that was set using this device guard, + * either from construction, or via set_device, if the guard is initialized, + * or nullopt if the guard is uninitialized. */ + public native @ByVal DeviceOptional current_device(); + + /** Restore the original CUDA device, resetting this guard to uninitialized + * state. */ + public native void reset(); +} diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/cuda/OptionalCUDAStreamGuard.java b/pytorch/src/gen/java/org/bytedeco/pytorch/cuda/OptionalCUDAStreamGuard.java new file mode 100644 index 00000000000..32472e73e3b --- /dev/null +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/cuda/OptionalCUDAStreamGuard.java @@ -0,0 +1,86 @@ +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE + +package org.bytedeco.pytorch.cuda; + +import org.bytedeco.pytorch.*; +import org.bytedeco.pytorch.Error; +import org.bytedeco.pytorch.global.torch.DeviceType; +import org.bytedeco.pytorch.global.torch.ScalarType; +import org.bytedeco.pytorch.global.torch.MemoryFormat; +import org.bytedeco.pytorch.Allocator; +import java.nio.*; +import org.bytedeco.javacpp.*; +import org.bytedeco.javacpp.annotation.*; + +import static org.bytedeco.javacpp.presets.javacpp.*; +import static org.bytedeco.openblas.global.openblas_nolapack.*; +import static org.bytedeco.openblas.global.openblas.*; +import org.bytedeco.pytorch.*; +import static org.bytedeco.pytorch.global.torch.*; + +import static org.bytedeco.pytorch.global.torch_cuda.*; + + +/** A variant of OptionalStreamGuard that is specialized for CUDA. See + * CUDAGuard for when you can use this. */ +@Namespace("c10::cuda") @NoOffset @Properties(inherit = org.bytedeco.pytorch.presets.torch_cuda.class) +public class OptionalCUDAStreamGuard extends Pointer { + static { Loader.load(); } + /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ + public OptionalCUDAStreamGuard(Pointer p) { super(p); } + /** Native array allocator. Access with {@link Pointer#position(long)}. */ + public OptionalCUDAStreamGuard(long size) { super((Pointer)null); allocateArray(size); } + private native void allocateArray(long size); + @Override public OptionalCUDAStreamGuard position(long position) { + return (OptionalCUDAStreamGuard)super.position(position); + } + @Override public OptionalCUDAStreamGuard getPointer(long i) { + return new OptionalCUDAStreamGuard((Pointer)this).offsetAddress(i); + } + + /** Create an uninitialized guard. */ + public OptionalCUDAStreamGuard() { super((Pointer)null); allocate(); } + private native void allocate(); + + /** Set the current CUDA device to the device associated with the passed + * stream, and set the current CUDA stream on that device to the passed + * stream. Errors if the Stream is not a CUDA stream. */ + public OptionalCUDAStreamGuard(@ByVal Stream stream) { super((Pointer)null); allocate(stream); } + private native void allocate(@ByVal Stream stream); + + /** Set the current device to the device associated with the passed stream, + * and set the current stream on that device to the passed stream, + * if the passed stream is not nullopt. */ + public OptionalCUDAStreamGuard(@ByVal StreamOptional stream_opt) { super((Pointer)null); allocate(stream_opt); } + private native void allocate(@ByVal StreamOptional stream_opt); + + /** Copy is disallowed */ + + + + // See Note [Move construction for RAII guards is tricky] + + + // See Note [Move assignment for RAII guards is tricky] + + + /** Resets the currently set CUDA stream to the original stream and + * the currently set device to the original device. Then, + * set the current device to the device associated with the passed stream, + * and set the current stream on that device to the passed stream. + * Initializes the guard if it was not previously initialized. */ + public native void reset_stream(@ByVal Stream stream); + + /** Returns the CUDA stream that was set at the time the guard was most + * recently initialized, or nullopt if the guard is uninitialized. */ + public native @ByVal CUDAStreamOptional original_stream(); + + /** Returns the most recent CUDA stream that was set using this stream guard, + * either from construction, or via reset_stream, if the guard is + * initialized, or nullopt if the guard is uninitialized. */ + public native @ByVal CUDAStreamOptional current_stream(); + + /** Restore the original CUDA device and stream, resetting this guard to + * uninitialized state. */ + public native void reset(); +} diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/cuda/RNNDescriptor.java b/pytorch/src/gen/java/org/bytedeco/pytorch/cuda/RNNDescriptor.java new file mode 100644 index 00000000000..5ac9cefd560 --- /dev/null +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/cuda/RNNDescriptor.java @@ -0,0 +1,46 @@ +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE + +package org.bytedeco.pytorch.cuda; + +import org.bytedeco.pytorch.*; +import org.bytedeco.pytorch.Error; +import org.bytedeco.pytorch.global.torch.DeviceType; +import org.bytedeco.pytorch.global.torch.ScalarType; +import org.bytedeco.pytorch.global.torch.MemoryFormat; +import org.bytedeco.pytorch.Allocator; +import java.nio.*; +import org.bytedeco.javacpp.*; +import org.bytedeco.javacpp.annotation.*; + +import static org.bytedeco.javacpp.presets.javacpp.*; +import static org.bytedeco.openblas.global.openblas_nolapack.*; +import static org.bytedeco.openblas.global.openblas.*; +import org.bytedeco.pytorch.*; +import static org.bytedeco.pytorch.global.torch.*; + +import static org.bytedeco.pytorch.global.torch_cuda.*; + + +@Namespace("at::native") @Properties(inherit = org.bytedeco.pytorch.presets.torch_cuda.class) +public class RNNDescriptor extends Pointer { + static { Loader.load(); } + /** Default native constructor. */ + public RNNDescriptor() { super((Pointer)null); allocate(); } + /** Native array allocator. Access with {@link Pointer#position(long)}. */ + public RNNDescriptor(long size) { super((Pointer)null); allocateArray(size); } + /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ + public RNNDescriptor(Pointer p) { super(p); } + private native void allocate(); + private native void allocateArray(long size); + @Override public RNNDescriptor position(long position) { + return (RNNDescriptor)super.position(position); + } + @Override public RNNDescriptor getPointer(long i) { + return new RNNDescriptor((Pointer)this).offsetAddress(i); + } + + + public native void set(@Cast("cudnnHandle_t") Pointer handle, int hidden_size, int proj_size, int num_layers, @ByRef(true) DropoutDescriptor dropout_desc, + @Cast("cudnnRNNInputMode_t") int input_mode, @Cast("cudnnDirectionMode_t") int bidirectional, + @Cast("cudnnRNNMode_t") int mode, @Cast("cudnnDataType_t") int datatype, @Cast("cudnnDataType_t") int input_type, @Cast("cudnnRNNAlgo_t") int algo, @Cast("bool") boolean allow_tf32); +} diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/cuda/SpatialTransformerDescriptor.java b/pytorch/src/gen/java/org/bytedeco/pytorch/cuda/SpatialTransformerDescriptor.java new file mode 100644 index 00000000000..1e80e1ae9e9 --- /dev/null +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/cuda/SpatialTransformerDescriptor.java @@ -0,0 +1,45 @@ +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE + +package org.bytedeco.pytorch.cuda; + +import org.bytedeco.pytorch.*; +import org.bytedeco.pytorch.Error; +import org.bytedeco.pytorch.global.torch.DeviceType; +import org.bytedeco.pytorch.global.torch.ScalarType; +import org.bytedeco.pytorch.global.torch.MemoryFormat; +import org.bytedeco.pytorch.Allocator; +import java.nio.*; +import org.bytedeco.javacpp.*; +import org.bytedeco.javacpp.annotation.*; + +import static org.bytedeco.javacpp.presets.javacpp.*; +import static org.bytedeco.openblas.global.openblas_nolapack.*; +import static org.bytedeco.openblas.global.openblas.*; +import org.bytedeco.pytorch.*; +import static org.bytedeco.pytorch.global.torch.*; + +import static org.bytedeco.pytorch.global.torch_cuda.*; + + +@Namespace("at::native") @Properties(inherit = org.bytedeco.pytorch.presets.torch_cuda.class) +public class SpatialTransformerDescriptor extends Pointer { + static { Loader.load(); } + /** Default native constructor. */ + public SpatialTransformerDescriptor() { super((Pointer)null); allocate(); } + /** Native array allocator. Access with {@link Pointer#position(long)}. */ + public SpatialTransformerDescriptor(long size) { super((Pointer)null); allocateArray(size); } + /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ + public SpatialTransformerDescriptor(Pointer p) { super(p); } + private native void allocate(); + private native void allocateArray(long size); + @Override public SpatialTransformerDescriptor position(long position) { + return (SpatialTransformerDescriptor)super.position(position); + } + @Override public SpatialTransformerDescriptor getPointer(long i) { + return new SpatialTransformerDescriptor((Pointer)this).offsetAddress(i); + } + + public native void set(@Cast("cudnnDataType_t") int dataType, int dim, IntPointer size); + public native void set(@Cast("cudnnDataType_t") int dataType, int dim, IntBuffer size); + public native void set(@Cast("cudnnDataType_t") int dataType, int dim, int[] size); +} diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/cuda/TensorDescriptor.java b/pytorch/src/gen/java/org/bytedeco/pytorch/cuda/TensorDescriptor.java new file mode 100644 index 00000000000..c80f0a8137b --- /dev/null +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/cuda/TensorDescriptor.java @@ -0,0 +1,77 @@ +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE + +package org.bytedeco.pytorch.cuda; + +import org.bytedeco.pytorch.*; +import org.bytedeco.pytorch.Error; +import org.bytedeco.pytorch.global.torch.DeviceType; +import org.bytedeco.pytorch.global.torch.ScalarType; +import org.bytedeco.pytorch.global.torch.MemoryFormat; +import org.bytedeco.pytorch.Allocator; +import java.nio.*; +import org.bytedeco.javacpp.*; +import org.bytedeco.javacpp.annotation.*; + +import static org.bytedeco.javacpp.presets.javacpp.*; +import static org.bytedeco.openblas.global.openblas_nolapack.*; +import static org.bytedeco.openblas.global.openblas.*; +import org.bytedeco.pytorch.*; +import static org.bytedeco.pytorch.global.torch.*; + +import static org.bytedeco.pytorch.global.torch_cuda.*; + + +// A generic class for wrapping cuDNN descriptor types. All you need +// is to give the underlying type the Descriptor_t points to (usually, +// if it's cudnnTensorDescriptor_t it points to cudnnTensorStruct), +// the constructor and the destructor. Subclasses are responsible +// for defining a set() function to actually set the descriptor. +// +// Descriptors default construct to a nullptr, and have a descriptor +// initialized the first time you call set() or any other initializing +// function. + +@Namespace("at::native") @Properties(inherit = org.bytedeco.pytorch.presets.torch_cuda.class) +public class TensorDescriptor extends Pointer { + static { Loader.load(); } + /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ + public TensorDescriptor(Pointer p) { super(p); } + /** Native array allocator. Access with {@link Pointer#position(long)}. */ + public TensorDescriptor(long size) { super((Pointer)null); allocateArray(size); } + private native void allocateArray(long size); + @Override public TensorDescriptor position(long position) { + return (TensorDescriptor)super.position(position); + } + @Override public TensorDescriptor getPointer(long i) { + return new TensorDescriptor((Pointer)this).offsetAddress(i); + } + + public TensorDescriptor() { super((Pointer)null); allocate(); } + private native void allocate(); + public TensorDescriptor(@Const @ByRef Tensor t, @Cast("size_t") long pad/*=0*/) { super((Pointer)null); allocate(t, pad); } + private native void allocate(@Const @ByRef Tensor t, @Cast("size_t") long pad/*=0*/); + public TensorDescriptor(@Const @ByRef Tensor t) { super((Pointer)null); allocate(t); } + private native void allocate(@Const @ByRef Tensor t); + + // Note [CuDNN broadcast padding] + // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + // pad specifies the minimum dimensionality of the tensor descriptor + // we produce (it doesn't have anything to do with, e.g., convolution + // padding). If 't' is lower-dimensional than 'pad', the remaining + // dimensions (on the right) are padded with ones. This doesn't + // affect the underlying data layout. This is particularly useful for + // dealing with a pecularity of the CuDNN API, which is that broadcasting in CuDNN is + // done in two steps: first, the client code is expected to pad out + // (the dimensions) input tensors to be the same dimension as the + // target broadcast, and then second, CuDNN takes of actually + // broadcasting size 1 dimensions. + + public native void set(@Const @ByRef Tensor t, @Cast("size_t") long pad/*=0*/); + public native void set(@Const @ByRef Tensor t); + public native void set(@Const @ByRef Tensor t, @ByVal MemoryFormat memory_format, @Cast("size_t") long pad/*=0*/); + public native void set(@Const @ByRef Tensor t, @ByVal MemoryFormat memory_format); + public native void set(@Cast("cudnnDataType_t") int dataType, @ByVal LongArrayRef sizes, @ByVal LongArrayRef strides, @Cast("size_t") long pad/*=0*/); + public native void set(@Cast("cudnnDataType_t") int dataType, @ByVal LongArrayRef sizes, @ByVal LongArrayRef strides); + + public native void print(); +} diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/fibonacci_hash_policy.java b/pytorch/src/gen/java/org/bytedeco/pytorch/fibonacci_hash_policy.java new file mode 100644 index 00000000000..d2ccf7a4556 --- /dev/null +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/fibonacci_hash_policy.java @@ -0,0 +1,47 @@ +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE + +package org.bytedeco.pytorch; + +import org.bytedeco.pytorch.Allocator; +import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; +import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; +import java.nio.*; +import org.bytedeco.javacpp.*; +import org.bytedeco.javacpp.annotation.*; + +import static org.bytedeco.javacpp.presets.javacpp.*; +import static org.bytedeco.openblas.global.openblas_nolapack.*; +import static org.bytedeco.openblas.global.openblas.*; + +import static org.bytedeco.pytorch.global.torch.*; + + +@Namespace("ska_ordered") @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) +public class fibonacci_hash_policy extends Pointer { + static { Loader.load(); } + /** Default native constructor. */ + public fibonacci_hash_policy() { super((Pointer)null); allocate(); } + /** Native array allocator. Access with {@link Pointer#position(long)}. */ + public fibonacci_hash_policy(long size) { super((Pointer)null); allocateArray(size); } + /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ + public fibonacci_hash_policy(Pointer p) { super(p); } + private native void allocate(); + private native void allocateArray(long size); + @Override public fibonacci_hash_policy position(long position) { + return (fibonacci_hash_policy)super.position(position); + } + @Override public fibonacci_hash_policy getPointer(long i) { + return new fibonacci_hash_policy((Pointer)this).offsetAddress(i); + } + + public native @Cast("uint64_t") long index_for_hash(@Cast("uint64_t") long hash, @Cast("uint64_t") long arg1); + public native @Cast("uint64_t") long keep_in_range(@Cast("uint64_t") long index, @Cast("uint64_t") long num_slots_minus_one); + + public native byte next_size_over(@Cast("uint64_t*") @ByRef LongPointer size); + public native byte next_size_over(@Cast("uint64_t*") @ByRef LongBuffer size); + public native byte next_size_over(@Cast("uint64_t*") @ByRef long[] size); + public native void commit(byte shift_); + public native void reset(); +} diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/getTypePtr_.java b/pytorch/src/gen/java/org/bytedeco/pytorch/getTypePtr_.java deleted file mode 100644 index 9bf80d07f7d..00000000000 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/getTypePtr_.java +++ /dev/null @@ -1,38 +0,0 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE - -package org.bytedeco.pytorch; - -import org.bytedeco.pytorch.Allocator; -import org.bytedeco.pytorch.Function; -import org.bytedeco.pytorch.Module; -import java.nio.*; -import org.bytedeco.javacpp.*; -import org.bytedeco.javacpp.annotation.*; - -import static org.bytedeco.javacpp.presets.javacpp.*; -import static org.bytedeco.openblas.global.openblas_nolapack.*; -import static org.bytedeco.openblas.global.openblas.*; - -import static org.bytedeco.pytorch.global.torch.*; - - -@Name("c10::detail::getTypePtr_") @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) -public class getTypePtr_ extends Pointer { - static { Loader.load(); } - /** Default native constructor. */ - public getTypePtr_() { super((Pointer)null); allocate(); } - /** Native array allocator. Access with {@link Pointer#position(long)}. */ - public getTypePtr_(long size) { super((Pointer)null); allocateArray(size); } - /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ - public getTypePtr_(Pointer p) { super(p); } - private native void allocate(); - private native void allocateArray(long size); - @Override public getTypePtr_ position(long position) { - return (getTypePtr_)super.position(position); - } - @Override public getTypePtr_ getPointer(long i) { - return new getTypePtr_((Pointer)this).offsetAddress(i); - } - - public static native @ByVal IValue call(); -} diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/global/torch.java b/pytorch/src/gen/java/org/bytedeco/pytorch/global/torch.java index 35ddeef88d2..cac335c4717 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/global/torch.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/global/torch.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch.global; @@ -6,7 +6,9 @@ import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; @@ -30,6 +32,9 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // Targeting ../LongOptional.java +// Targeting ../FloatOptional.java + + // Targeting ../DoubleOptional.java @@ -132,9 +137,6 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // Targeting ../SymIntOptional.java -// Targeting ../SymIntArrayRefOptional.java - - // Targeting ../IValueOptional.java @@ -153,7 +155,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // Targeting ../TensorOptional.java -// Targeting ../TensorListOptional.java +// Targeting ../TensorArrayRefOptional.java // Targeting ../ThreadLocalStateOptional.java @@ -195,7 +197,10 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // Targeting ../DoubleExpandingArrayOptional.java -// Targeting ../StringSizeTSizeTTupleOptional.java +// Targeting ../T_StringSizeTSizeT_TOptional.java + + +// Targeting ../T_TypePtrLong_TOptional.java // Targeting ../ExampleVectorOptional.java @@ -207,61 +212,64 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // Targeting ../BatchSizeOptional.java -// Targeting ../TensorTensorOptional.java +// Targeting ../WeakStorageVectorOptional.java + +// Targeting ../T_TensorTensor_TOptional.java -// Targeting ../NonlinearityType.java + +// Targeting ../Nonlinearity.java // Targeting ../FanModeType.java -// Targeting ../conv_padding_mode_t.java +// Targeting ../ConvPaddingMode.java -// Targeting ../conv_padding_t1.java +// Targeting ../Conv1dPadding.java -// Targeting ../conv_padding_t2.java +// Targeting ../Conv2dPadding.java -// Targeting ../conv_padding_t3.java +// Targeting ../Conv3dPadding.java // Targeting ../EmbeddingBagMode.java -// Targeting ../pad_mode_t.java +// Targeting ../PaddingMode.java -// Targeting ../loss_reduction_t.java +// Targeting ../LossReduction.java -// Targeting ../kldiv_loss_reduction_t.java +// Targeting ../KLDivLossReduction.java -// Targeting ../grid_sample_mode_t.java +// Targeting ../GridSampleMode.java -// Targeting ../grid_sample_padding_mode_t.java +// Targeting ../GridSamplePaddingMode.java -// Targeting ../rnn_options_base_mode_t.java +// Targeting ../RNNBaseMode.java -// Targeting ../rnn_nonlinearity_t.java +// Targeting ../RNNNonlinearity.java -// Targeting ../upsample_mode_t.java +// Targeting ../UpsampleMode.java -// Targeting ../interpolate_mode_t.java +// Targeting ../InterpolateMode.java -// Targeting ../transformer_activation_t.java +// Targeting ../TensorDeque.java -// Targeting ../TensorDeque.java +// Targeting ../RecordFunctionHandleIntList.java // Targeting ../StringStringMap.java @@ -276,6 +284,9 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // Targeting ../StringTensorMap.java +// Targeting ../ActivityTypeSet.java + + // Targeting ../RecordFunctionCallbackHandleVector.java @@ -288,9 +299,6 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // Targeting ../FunctionPostHookVector.java -// Targeting ../TokenTrieVector.java - - // Targeting ../SavedVariableVector.java @@ -309,6 +317,24 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // Targeting ../OptimizerParamGroupVector.java +// Targeting ../FunctionSchemaVector.java + + +// Targeting ../StringTensorDictItemVector.java + + +// Targeting ../StringModuleDictItemVector.java + + +// Targeting ../StringAnyModuleDictItemVector.java + + +// Targeting ../StringSharedModuleDictItemVector.java + + +// Targeting ../WeakStorageVector.java + + // Targeting ../Bool2Vector.java @@ -333,9 +359,6 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // Targeting ../StringLongVector.java -// Targeting ../ArgumentVector.java - - // Targeting ../IValueVector.java @@ -348,16 +371,13 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // Targeting ../SymbolVector.java -// Targeting ../SymIntVector.java - - // Targeting ../LongOptionalVector.java // Targeting ../IValueOptionalVector.java -// Targeting ../ClassTypeVector.java +// Targeting ../SharedClassTypeVector.java // Targeting ../TypeVector.java @@ -387,7 +407,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // Targeting ../OperatorOptionalVector.java -// Targeting ../FunctionPreVector.java +// Targeting ../SharedFunctionPreVector.java // Targeting ../FunctionVector.java @@ -402,15 +422,9 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // Targeting ../ResolverVector.java -// Targeting ../SugaredValueVector.java - - // Targeting ../StackEntryVector.java -// Targeting ../BlockVector.java - - // Targeting ../ValueVector.java @@ -429,21 +443,27 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // Targeting ../SharedAnyModuleVector.java -// Targeting ../StringTensorPairVector.java +// Targeting ../StringTensorVector.java -// Targeting ../StringModulePairVector.java +// Targeting ../StringModuleVector.java -// Targeting ../StringAnyModulePairVector.java +// Targeting ../StringAnyModuleVector.java -// Targeting ../StringSharedModulePairVector.java +// Targeting ../StringSharedModuleVector.java // Targeting ../FusionStrategy.java +// Targeting ../SymIntVector.java + + +// Targeting ../SharedSugaredValueVector.java + + // Targeting ../ExampleVector.java @@ -465,70 +485,94 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // Targeting ../StringSharedModulePair.java -// Targeting ../TensorTuple.java +// Targeting ../RecordFunctionHandleIntPair.java + + +// Targeting ../SizeTMatchedSchemaPair.java + + +// Targeting ../T_DataPtrSizeT_T.java + + +// Targeting ../T_IntInt_T.java + +// Targeting ../T_LongLong_T.java -// Targeting ../TensorTensorTuple.java +// Targeting ../T_DoubleLong_T.java -// Targeting ../TensorTensorTensorTuple.java +// Targeting ../T_TensorTensor_T.java -// Targeting ../TensorTensorTensorTensorTuple.java +// Targeting ../T_TensorTensorTensor_T.java -// Targeting ../TensorTensorTensorTensorTensorTuple.java +// Targeting ../T_TensorTensorTensorTensor_T.java -// Targeting ../TensorTensorTensorTensorVectorTuple.java +// Targeting ../T_TensorTensorTensorTensorTensor_T.java -// Targeting ../TensorTensorTensorTensorLongTuple.java +// Targeting ../T_TensorTensorTensorTensorTensorTensor_T.java -// Targeting ../TensorTensorLongLongTensorTuple.java +// Targeting ../T_TensorTensorTensorTensorTensorTensorTensor_T.java -// Targeting ../TensorTensorTensorTensorTensorTensorTuple.java +// Targeting ../T_TensorTensorTensorTensorVector_T.java -// Targeting ../TensorTensorTensorTensorTensorTensorTensorTuple.java +// Targeting ../T_TensorTensorTensorTensorLong_T.java -// Targeting ../TensorTensorDoubleLongTuple.java +// Targeting ../T_TensorTensorDoubleLong_T.java -// Targeting ../TensorTensorTensorTupleTuple.java +// Targeting ../T_TensorT_TensorTensor_T_T.java -// Targeting ../TensorMaybeOwnedTensorMaybeOwnedTuple.java +// Targeting ../T_TensorMaybeOwnedTensorMaybeOwned_T.java -// Targeting ../TensorMaybeOwnedTensorMaybeOwnedTensorMaybeOwnedTuple.java +// Targeting ../T_TensorMaybeOwnedTensorMaybeOwnedTensorMaybeOwned_T.java -// Targeting ../PackedSequenceTensorTuple.java +// Targeting ../T_PackedSequenceTensor_T.java -// Targeting ../PackedSequenceTensorTensorTupleTuple.java +// Targeting ../T_PackedSequenceT_TensorTensor_T_T.java -// Targeting ../StringSizeTSizeTTuple.java +// Targeting ../T_StringSizeTSizeT_T.java -// Targeting ../TensorTensorVectorTuple.java +// Targeting ../T_StringLong_T.java -// Targeting ../TensorVectorTensorTuple.java +// Targeting ../T_TensorTensorVector_T.java -// Targeting ../TensorVectorTensorVectorTensorVectorTensorVectorTensorVectorTuple.java +// Targeting ../T_TensorVectorTensor_T.java -// Targeting ../TensorTensorVectorTensorVectorTuple.java +// Targeting ../T_TensorVectorTensorVectorTensorVectorTensorVectorTensorVector_T.java -// Targeting ../IntFunctionPreHookMap.java + +// Targeting ../T_TensorTensorVectorTensorVector_T.java + + +// Targeting ../T_TensorTensorLongLongTensor_T.java + + +// Targeting ../T_TensorTensorTensorTensorsLongLongLongLongTensor_T.java + + +// Targeting ../T_TypePtrLong_T.java + + +// Targeting ../NodeIntMap.java // Targeting ../HashAliasedIValueMap.java @@ -561,10 +605,13 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // Targeting ../StringLongStringMapMap.java +// Targeting ../ValueValueMap.java + + // Targeting ../ArgumentSpecExecutionPlanMap.java -// Targeting ../ValueValueMap.java +// Targeting ../TreeRefStringMap.java // Targeting ../StringSet.java @@ -582,6 +629,20 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // Targeting ../RecordScopeSet.java +// Targeting ../NodeSet.java + + +// Targeting ../StreamSet.java + + +// Targeting ../RecordScopeSet.java + + +// Parsed from torch/csrc/utils/python_stub.h + +// #pragma once + + // Parsed from c10/macros/cmake_macros.h // #ifndef C10_MACROS_CMAKE_MACROS_H_ @@ -593,7 +654,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #define C10_BUILD_SHARED_LIBS /* #undef C10_USE_GLOG */ /* #undef C10_USE_GFLAGS */ -// #define C10_USE_NUMA +/* #undef C10_USE_NUMA */ /* #undef C10_USE_MSVC_STATIC_RUNTIME */ // #endif // C10_MACROS_CMAKE_MACROS_H_ @@ -749,6 +810,19 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #endif // C10_MACROS_MACROS_H_ +// Parsed from torch/csrc/Export.h + +// #pragma once + +// #include + +// #ifdef THP_BUILD_MAIN_LIB +// #define TORCH_PYTHON_API C10_EXPORT +// #else +// #define TORCH_PYTHON_API C10_IMPORT +// #endif + + // Parsed from c10/macros/Macros.h // #ifndef C10_MACROS_MACROS_H_ @@ -1168,673 +1242,390 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #endif // C10_MACROS_MACROS_H_ -// Parsed from c10/util/IdWrapper.h +// Parsed from c10/core/DeviceType.h // #pragma once +// This is directly synchronized with caffe2/proto/caffe2.proto, but +// doesn't require me to figure out how to get Protobuf headers into +// ATen/core (which would require a lot more build system hacking.) +// If you modify me, keep me synchronized with that file. + // #include -// #include + // #include -// #include -// Targeting ../TypeIdentifierIdWrapper.java +// #include + +// These contains all device types that also have a BackendComponent +// and therefore participate in per-backend functionality dispatch keys. +// This is most backends except PrivateUse2 and PrivateUse3 +// #define C10_FORALL_BACKEND_DEVICE_TYPES(_, extra) +// _(CPU, extra) +// _(CUDA, extra) +// _(HIP, extra) +// _(XLA, extra) +// _(MPS, extra) +// _(IPU, extra) +// _(XPU, extra) +// _(HPU, extra) +// _(VE, extra) +// _(Lazy, extra) +// _(Meta, extra) +// _(MTIA, extra) +// _(PrivateUse1, extra) +@Namespace("c10") public enum DeviceType { + CPU((byte)(0)), + CUDA((byte)(1)), // CUDA. + MKLDNN((byte)(2)), // Reserved for explicit MKLDNN + OPENGL((byte)(3)), // OpenGL + OPENCL((byte)(4)), // OpenCL + IDEEP((byte)(5)), // IDEEP. + HIP((byte)(6)), // AMD HIP + FPGA((byte)(7)), // FPGA + ORT((byte)(8)), // ONNX Runtime / Microsoft + XLA((byte)(9)), // XLA / TPU + Vulkan((byte)(10)), // Vulkan + Metal((byte)(11)), // Metal + XPU((byte)(12)), // XPU + MPS((byte)(13)), // MPS + Meta((byte)(14)), // Meta (tensors with no data) + HPU((byte)(15)), // HPU / HABANA + VE((byte)(16)), // SX-Aurora / NEC + Lazy((byte)(17)), // Lazy Tensors + IPU((byte)(18)), // Graphcore IPU + MTIA((byte)(19)), // Meta training and inference devices + PrivateUse1((byte)(20)), // PrivateUse1 device + // NB: If you add more devices: + // - Change the implementations of DeviceTypeName and isValidDeviceType + // in DeviceType.cpp + // - Change the number below + COMPILE_TIME_MAX_DEVICE_TYPES((byte)(21)); + public final byte value; + private DeviceType(byte v) { this.value = v; } + private DeviceType(DeviceType e) { this.value = e.value; } + public DeviceType intern() { for (DeviceType e : values()) if (e.value == value) return e; return this; } + @Override public String toString() { return intern().name(); } +} - // namespace c10 +@Namespace("c10") @MemberGetter public static native DeviceType kCPU(); +@Namespace("c10") @MemberGetter public static native DeviceType kCUDA(); +@Namespace("c10") @MemberGetter public static native DeviceType kHIP(); +@Namespace("c10") @MemberGetter public static native DeviceType kFPGA(); +@Namespace("c10") @MemberGetter public static native DeviceType kORT(); +@Namespace("c10") @MemberGetter public static native DeviceType kXLA(); +@Namespace("c10") @MemberGetter public static native DeviceType kMPS(); +@Namespace("c10") @MemberGetter public static native DeviceType kMeta(); +@Namespace("c10") @MemberGetter public static native DeviceType kVulkan(); +@Namespace("c10") @MemberGetter public static native DeviceType kMetal(); +@Namespace("c10") @MemberGetter public static native DeviceType kXPU(); +@Namespace("c10") @MemberGetter public static native DeviceType kHPU(); +@Namespace("c10") @MemberGetter public static native DeviceType kVE(); +@Namespace("c10") @MemberGetter public static native DeviceType kLazy(); +@Namespace("c10") @MemberGetter public static native DeviceType kIPU(); +@Namespace("c10") @MemberGetter public static native DeviceType kMTIA(); +@Namespace("c10") @MemberGetter public static native DeviceType kPrivateUse1(); -// #define C10_DEFINE_HASH_FOR_IDWRAPPER(ClassName) -// namespace std { -// template <> -// struct hash { -// size_t operator()(ClassName x) const { -// return hash_value(x); -// } -// }; -// } +// define explicit int constant +@Namespace("c10") @MemberGetter public static native int COMPILE_TIME_MAX_DEVICE_TYPES(); +@Namespace("c10") public static native @StdString BytePointer DeviceTypeName(DeviceType d, @Cast("bool") boolean lower_case/*=false*/); +@Namespace("c10") public static native @StdString BytePointer DeviceTypeName(DeviceType d); +@Namespace("c10") public static native @StdString String DeviceTypeName(@Cast("c10::DeviceType") byte d, @Cast("bool") boolean lower_case/*=false*/); +@Namespace("c10") public static native @StdString String DeviceTypeName(@Cast("c10::DeviceType") byte d); -// Parsed from c10/util/MaybeOwned.h +@Namespace("c10") public static native @Cast("bool") boolean isValidDeviceType(DeviceType d); +@Namespace("c10") public static native @Cast("bool") boolean isValidDeviceType(@Cast("c10::DeviceType") byte d); -// #pragma once +@Namespace("c10") public static native @Cast("std::ostream*") @ByRef @Name("operator <<") Pointer shiftLeft(@Cast("std::ostream*") @ByRef Pointer stream, DeviceType type); +@Namespace("c10") public static native @Cast("std::ostream*") @ByRef @Name("operator <<") Pointer shiftLeft(@Cast("std::ostream*") @ByRef Pointer stream, @Cast("c10::DeviceType") byte type); -// #include -// #include -// #include +@Namespace("c10") public static native void register_privateuse1_backend(@StdString BytePointer backend_name); +@Namespace("c10") public static native void register_privateuse1_backend(@StdString String backend_name); +@Namespace("c10") public static native @StdString BytePointer get_privateuse1_backend(@Cast("bool") boolean lower_case/*=true*/); +@Namespace("c10") public static native @StdString BytePointer get_privateuse1_backend(); -// #include + // namespace c10 + // namespace std -/** MaybeOwnedTraits describes how to borrow from T. Here is how we - * can implement borrowing from an arbitrary type T using a raw - * pointer to const: */ -/** It is possible to eliminate the extra layer of indirection for - * borrows for some types that we control. For examples, see - * intrusive_ptr.h and TensorBody.h. */ -// Explicitly enable MaybeOwned>, rather than allowing -// MaybeOwned to be used for any type right away. -// Targeting ../TensorMaybeOwned.java +// Parsed from c10/util/Deprecated.h +// #pragma once +/** + * This file provides portable macros for marking declarations + * as deprecated. You should generally use C10_DEPRECATED, + * except when marking 'using' declarations as deprecated, + * in which case you should use C10_DEFINE_DEPRECATED_USING + * (due to portability concerns). + */ - // namespace c10 +// Sample usage: +// +// C10_DEPRECATED void bad_func(); +// struct C10_DEPRECATED BadStruct { +// ... +// }; +// NB: __cplusplus doesn't work for MSVC, so for now MSVC always uses +// the "__declspec(deprecated)" implementation and not the C++14 +// "[[deprecated]]" attribute. We tried enabling "[[deprecated]]" for C++14 on +// MSVC, but ran into issues with some older MSVC versions. +// #if (defined(__cplusplus) && __cplusplus >= 201402L) +// #define C10_DEPRECATED [[deprecated]] +// #define C10_DEPRECATED_MESSAGE(message) [[deprecated(message)]] +// #elif defined(__GNUC__) +// #define C10_DEPRECATED __attribute__((deprecated)) +// TODO Is there some way to implement this? +// #define C10_DEPRECATED_MESSAGE(message) __attribute__((deprecated)) -// Parsed from c10/util/typeid.h +// #elif defined(_MSC_VER) +// #else +// #warning "You need to implement C10_DEPRECATED for this compiler" +// #define C10_DEPRECATED +// #endif -// #pragma once +// Sample usage: +// +// C10_DEFINE_DEPRECATED_USING(BadType, int) +// +// which is the portable version of +// +// using BadType [[deprecated]] = int; -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #ifdef __GXX_RTTI -// #include +// technically [[deprecated]] syntax is from c++14 standard, but it works in +// many compilers. +// #if defined(__has_cpp_attribute) +// #if __has_cpp_attribute(deprecated) && !defined(__CUDACC__) +// #define C10_DEFINE_DEPRECATED_USING(TypeName, TypeThingy) +// using TypeName [[deprecated]] = TypeThingy; +// #endif // #endif -// #include +// #if defined(_MSC_VER) +// #endif -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include +// #if !defined(C10_DEFINE_DEPRECATED_USING) && defined(__GNUC__) +// nvcc has a bug where it doesn't understand __attribute__((deprecated)) +// declarations even when the host compiler supports it. We'll only use this gcc +// attribute when not cuda, and when using a GCC compiler that doesn't support +// the c++14 syntax we checked for above (available in __GNUC__ >= 5) +// #if !defined(__CUDACC__) +// #define C10_DEFINE_DEPRECATED_USING(TypeName, TypeThingy) +// using TypeName __attribute__((deprecated)) = TypeThingy; +// #else +// using cuda + gcc < 5, neither deprecated syntax is available so turning off. +// #define C10_DEFINE_DEPRECATED_USING(TypeName, TypeThingy) +// using TypeName = TypeThingy; +// #endif +// #endif -// #include -// #include +// #if !defined(C10_DEFINE_DEPRECATED_USING) +// #warning "You need to implement C10_DEFINE_DEPRECATED_USING for this compiler" +// #define C10_DEFINE_DEPRECATED_USING +// #endif + + +// Parsed from c10/util/reverse_iterator.h + +// #pragma once + +/** + * A constexpr std::reverse_iterator for C++11. + * Implementation taken from libstdc++, + * https://raw.githubusercontent.com/gcc-mirror/gcc/gcc-9_2_0-release/libstdc%2B%2B-v3/include/bits/stl_iterator.h + * adapted to our code base and constexpr'ified. + */ + +// Copyright (C) 2001-2019 Free Software Foundation, Inc. +// +// This file is part of the GNU ISO C++ Library. This library is free +// software; you can redistribute it and/or modify it under the +// terms of the GNU General Public License as published by the +// Free Software Foundation; either version 3, or (at your option) +// any later version. + +// This library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// Under Section 7 of GPL version 3, you are granted additional +// permissions described in the GCC Runtime Library Exception, version +// 3.1, as published by the Free Software Foundation. + +// You should have received a copy of the GNU General Public License and +// a copy of the GCC Runtime Library Exception along with this program; +// see the files COPYING3 and COPYING.RUNTIME respectively. If not, see +// . /* - * TypeIdentifier is a small type containing an id. - * Types must be registered using CAFFE_DECLARE_KNOWN_TYPE() (in their header) - * and CAFFE_DEFINE_KNOWN_TYPE() (in their .cpp file) for them to have a type - * id. If a type is registered, you can also create an object containing meta - * data like constructor, destructor, stringified name, ... about the type by - * calling TypeMeta::Make. This returns a TypeMeta() object, which is - * basically just a pointer to the type information, so it's cheap to pass - * around. + * + * Copyright (c) 1994 + * Hewlett-Packard Company + * + * Permission to use, copy, modify, distribute and sell this software + * and its documentation for any purpose is hereby granted without fee, + * provided that the above copyright notice appear in all copies and + * that both that copyright notice and this permission notice appear + * in supporting documentation. Hewlett-Packard Company makes no + * representations about the suitability of this software for any + * purpose. It is provided "as is" without express or implied warranty. + * + * + * Copyright (c) 1996-1998 + * Silicon Graphics Computer Systems, Inc. + * + * Permission to use, copy, modify, distribute and sell this software + * and its documentation for any purpose is hereby granted without fee, + * provided that the above copyright notice appear in all copies and + * that both that copyright notice and this permission notice appear + * in supporting documentation. Silicon Graphics makes no + * representations about the suitability of this software for any + * purpose. It is provided "as is" without express or implied warranty. */ -// TODO: This file is still in the caffe2 namespace, despite living -// in the ATen directory. This is because the macro -// CAFFE_KNOWN_TYPE (and CAFFE_DECLARE_KNOWN_TYPE) defines a template -// specialization, which relies -// on the namespace of TypeMeta matching the namespace where the macro is -// called. This requires us to fix all of the call-sites, which I want to do -// later. So the namespace is not fixed at the moment. +// #include +// #include -// Make at::Half a fundamental type. - // namespace guts // namespace c10 -// Targeting ../TypeIdentifier.java +// Parsed from c10/util/string_view.h -// Allow usage in std::map / std::set -// TODO Disallow this and rather use std::unordered_map/set everywhere -@Namespace("caffe2") public static native @Cast("const bool") @Name("operator <") boolean lessThan(@ByVal TypeIdentifier lhs, @ByVal TypeIdentifier rhs); +// #pragma once -@Namespace("caffe2") public static native @Cast("std::ostream*") @ByRef @Name("operator <<") Pointer shiftLeft( - @Cast("std::ostream*") @ByRef Pointer stream, - @ByVal TypeIdentifier typeId); +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include - // namespace caffe2 +// #if __cpp_lib_string_view +// #include +// #define C10_HAS_STD_STRING_VIEW() 1 +// #define C10_HAS_STD_EXPERIMENTAL_STRING_VIEW() 0 +// #elif defined(__has_include) +// #if __has_include() +// libc++ 7.0 has experimental/string_view but it's just a #error +// #if !defined(_LIBCPP_VERSION) || (_LIBCPP_VERSION < 7000) +// #include +// #endif +// #if __cpp_lib_experimental_string_view +// #define C10_HAS_STD_STRING_VIEW() 0 +// #define C10_HAS_STD_EXPERIMENTAL_STRING_VIEW() 1 +// #endif +// #endif +// #endif -// Targeting ../DeviceTypeHash.java +// #ifndef C10_HAS_STD_STRING_VIEW +// #define C10_HAS_STD_STRING_VIEW() 0 +// #endif +// #ifndef C10_HAS_STD_EXPERIMENTAL_STRING_VIEW +// #define C10_HAS_STD_EXPERIMENTAL_STRING_VIEW() 0 +// #endif +// #if C10_CLANG_HAS_WARNING("-Wdeprecated") +// #endif - -// Targeting ../TypeMetaData.java +/** + * Reimplementation of std::string_view for C++11. + * Implemented following the interface definition in + * https://en.cppreference.com/w/cpp/string/basic_string_view + * See there for the API documentation. + * + * Difference: We don't have a Traits template parameter because + * std::char_traits isn't constexpr and we'd have to reimplement + * std::char_traits if we wanted to use it with our constexpr basic_string_view. + */ -// Mechanism for throwing errors which can't be prevented at compile time -// due to type erasure. E.g. somebody calling TypeMeta::copy() for -// non-copyable type. Right now just throws exception but is implemented -// in .cpp to manage dependencies -@Namespace("caffe2::detail") public static native void _ThrowRuntimeTypeLogicError(@StdString BytePointer msg); -@Namespace("caffe2::detail") public static native void _ThrowRuntimeTypeLogicError(@StdString String msg); + // namespace c10 + // namespace std -/** - * Placement new function for the type. - */ -/** - * Typed copy function for classes. - */ -/** - * A placeholder function for types that do not allow assignment. - */ -// Targeting ../_Uninitialized.java +// Parsed from c10/util/StringUtil.h +// #ifndef C10_UTIL_STRINGUTIL_H_ +// #define C10_UTIL_STRINGUTIL_H_ +// #include +// #include +// #include - // namespace detail +// #include +// #include +// #include +// #include +// #include -// -// note: this is outside TypeMeta bc gcc seems to have trouble -// with scalarTypeItemSizes as a constexpr static member used by -// a public inline instance method -// +// #if C10_CLANG_HAS_WARNING("-Wshorten-64-to-32") +// #endif -// item sizes for TypeMeta::itemsize() fast path -@Namespace("caffe2") @MemberGetter public static native @Cast("const uint8_t") byte scalarTypeItemSizes(int i); -@Namespace("caffe2") @MemberGetter public static native @Cast("const uint8_t*") BytePointer scalarTypeItemSizes(); -// Targeting ../TypeMeta.java +// Obtains the base name from a full path. +@Namespace("c10::detail") public static native @StdString BytePointer StripBasename(@StdString BytePointer full_path); +@Namespace("c10::detail") public static native @StdString String StripBasename(@StdString String full_path); +@Namespace("c10::detail") public static native @StdString BytePointer ExcludeFileExtension(@StdString BytePointer full_path); +@Namespace("c10::detail") public static native @StdString String ExcludeFileExtension(@StdString String full_path); +// Targeting ../CompileTimeEmptyString.java -// specializations of TypeMeta::_typeMetaData for ScalarType types -// #define DEFINE_SCALAR_METADATA_INSTANCE(T, name) -// template <> -// constexpr uint16_t TypeMeta::_typeMetaData() noexcept { -// return static_cast(ScalarType::name); -// } +@Namespace("c10::detail") public static native @Cast("std::ostream*") @ByRef Pointer _str(@Cast("std::ostream*") @ByRef Pointer ss); +@Namespace("c10::detail") public static native @Cast("std::ostream*") @ByRef @Name("_str") Pointer _strCompileTimeEmptyString(@Cast("std::ostream*") @ByRef Pointer ss, @Const @ByRef CompileTimeEmptyString t); +// Specializations for already-a-string types. +// For c10::str() with an empty argument list (which is common in our assert +// macros), we don't want to pay the binary size for constructing and +// destructing a stringstream or even constructing a string. -@Namespace("caffe2") public static native @Cast("bool") @Name("operator ==") @NoException(true) boolean equals(@Const @ByVal TypeMeta lhs, @Const @ByVal TypeMeta rhs); -@Namespace("caffe2") public static native @Cast("bool") @Name("operator !=") @NoException(true) boolean notEquals(@Const @ByVal TypeMeta lhs, @Const @ByVal TypeMeta rhs); + // namespace detail -@Namespace("caffe2") public static native @Cast("std::ostream*") @ByRef @Name("operator <<") Pointer shiftLeft( - @Cast("std::ostream*") @ByRef Pointer stream, - @ByVal TypeMeta typeMeta); +// Convert a list of string-like arguments into a single string. -/** - * Register unique id for a type so it can be used in TypeMeta context, e.g. be - * used as a type for Blob or for Tensor elements. - * - * CAFFE_KNOWN_TYPE is deprecated; prefer CAFFE_DECLARE_KNOWN_TYPE and - * CAFFE_DEFINE_KNOWN_TYPE. - * - * CAFFE_KNOWN_TYPE does explicit instantiation of TypeIdentifier::Get - * template function and thus needs to be put in a single translation unit (.cpp - * file) for a given type T. Other translation units that use type T as a type - * of the caffe2::Blob or element type of caffe2::Tensor need to depend on the - * translation unit that contains CAFFE_KNOWN_TYPE declaration via regular - * linkage dependencies. - * - * NOTE: the macro needs to be invoked in ::caffe2 namespace - */ -// Implementation note: in MSVC, we will need to prepend the C10_API -// keyword in order to get things compiled properly. in Linux, gcc seems to -// create attribute ignored error for explicit template instantiations, see -// http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2017/p0537r0.html -// https://gcc.gnu.org/bugzilla/show_bug.cgi?id=51930 -// and as a result, we define these two macros slightly differently. -// #if defined(_MSC_VER) || defined(__clang__) -// #define EXPORT_IF_NOT_GCC C10_EXPORT -// #else -// #define EXPORT_IF_NOT_GCC -// #endif +// Replace all occurrences of "from" substring to "to" string. +// Returns number of replacements +@Namespace("c10") public static native @Cast("size_t") long ReplaceAll(@StdString @ByRef BytePointer s, @ByVal @Cast("c10::string_view*") Pointer from, @ByVal @Cast("c10::string_view*") Pointer to); +// Targeting ../SourceLocation.java -// CAFFE_KNOWN_TYPE is deprecated! Use CAFFE_DECLARE_KNOWN_TYPE and -// CAFFE_DEFINE_KNOWN_TYPE instead. -// #define CAFFE_KNOWN_TYPE(T) -// template uint16_t TypeMeta::addTypeMetaData(); -// template <> -// EXPORT_IF_NOT_GCC uint16_t TypeMeta::_typeMetaData() noexcept { -// static const uint16_t index = addTypeMetaData(); -// return index; -// } -// #define CAFFE_DEFINE_KNOWN_TYPE(T) -// template uint16_t TypeMeta::addTypeMetaData(); -// Unlike CAFFE_KNOWN_TYPE, CAFFE_DECLARE_KNOWN_TYPE avoids a function -// call to access _typeMetaData in the common case. -// #ifdef __CUDACC__ -// nvcc needs its own specialization that doesn't use -// C10_ALWAYS_INLINE so that it doesn't need to see a definition for -// _addTypeMeta. See NOTE [ TypeIdentifier::Get nvcc/clang discrepancy -// ]. -// #define CAFFE_DECLARE_KNOWN_TYPE(T) -// extern template uint16_t TypeMeta::addTypeMetaData(); -// template <> -// EXPORT_IF_NOT_GCC inline uint16_t TypeMeta::_typeMetaData() noexcept { -// static const uint16_t index = addTypeMetaData(); -// return index; -// } -// #else -// #define CAFFE_DECLARE_KNOWN_TYPE(T) -// extern template uint16_t TypeMeta::addTypeMetaData(); -// template <> -// EXPORT_IF_NOT_GCC C10_ALWAYS_INLINE uint16_t -// TypeMeta::_typeMetaData() noexcept { -// static const uint16_t index = addTypeMetaData(); -// return index; -// } -// #endif -// #define CAFFE_KNOWN_TYPE_NOEXPORT(T) -// template <> -// uint16_t TypeMeta::_typeMetaData() noexcept { -// static const uint16_t index = addTypeMetaData(); -// return index; -// } +// unix isprint but insensitive to locale +@Namespace("c10") public static native @Cast("bool") boolean isPrint(@Cast("char") byte s); - +@Namespace("c10") public static native void printQuotedString(@Cast("std::ostream*") @ByRef Pointer stmt, @ByVal @Cast("const c10::string_view*") Pointer str); - + // namespace c10 - +// #endif // C10_UTIL_STRINGUTIL_H_ - - +// Parsed from c10/util/in_place.h - +// #pragma once - +// #include - + // namespace c10 - - +// Parsed from c10/util/Exception.h - - -// For some of the compilers, long is defined separately from int32_t and -// int64_t. As a result we will need to actually define them separately. -// It is recommended that one does NOT use long - use int32_t and int64_t -// explicitly. Explicit long type annotation may go away in the future. -// details: This hack works by defining a _guard_long_unique type, which is -// long iff the compiler has a separate long type and is a dummy type otherwise. -// we then allocate a type id to that _guard_long_unique. If the compiler has a -// separate long type, this allocates a type id for long. Otherwise, it -// allocates a type id for the dummy type, which doesn't matter. - // namespace detail - - - - - - - - - - - - // namespace caffe2 - - -// Parsed from c10/util/AlignOf.h - -//===--- AlignOf.h - Portable calculation of type alignment -----*- C++ -*-===// -// -// The LLVM Compiler Infrastructure -// -// This file is distributed under the University of Illinois Open Source -// License. See LICENSE.TXT for details. -// -//===----------------------------------------------------------------------===// -// -// This file defines the AlignedCharArray and AlignedCharArrayUnion classes. -// -//===----------------------------------------------------------------------===// - -// ATen: modified from llvm::AlignOf -// replaced LLVM_ALIGNAS with alignas - -// #pragma once - -// #include - -/** \struct AlignedCharArray - * \brief Helper for building an aligned character array type. - * - * This template is used to explicitly build up a collection of aligned - * character array types. We have to build these up using a macro and explicit - * specialization to cope with MSVC (at least till 2015) where only an - * integer literal can be used to specify an alignment constraint. Once built - * up here, we can then begin to indirect between these using normal C++ - * template parameters. */ - -// MSVC requires special handling here. -// #ifndef _MSC_VER - -// #else // _MSC_VER - -/** \brief Create a type with an aligned char buffer. */ - -// We provide special variations of this template for the most common -// alignments because __declspec(align(...)) doesn't actually work when it is -// a member of a by-value function argument in MSVC, even if the alignment -// request is something reasonably like 8-byte or 16-byte. Note that we can't -// even include the declspec with the union that forces the alignment because -// MSVC warns on the existence of the declspec despite the union member forcing -// proper alignment. - -// The rest of these are provided with a __declspec(align(...)) and we simply -// can't pass them by-value as function arguments on MSVC. - -// #define AT_ALIGNEDCHARARRAY_TEMPLATE_ALIGNMENT(x) -// template -// struct AlignedCharArray { -// __declspec(align(x)) char buffer[Size]; -// }; - -// #undef AT_ALIGNEDCHARARRAY_TEMPLATE_ALIGNMENT - -// #endif // _MSC_VER - // end namespace detail - -/** \brief This union template exposes a suitably aligned and sized character - * array member which can hold elements of any of up to ten types. - * - * These types may be arrays, structs, or any other types. The goal is to - * expose a char array buffer member which can be used as suitable storage for - * a placement new of any of these types. Support for more than ten types can - * be added at the cost of more boilerplate. */ - // end namespace c10 - - -// Parsed from c10/util/Deprecated.h - -// #pragma once - -/** - * This file provides portable macros for marking declarations - * as deprecated. You should generally use C10_DEPRECATED, - * except when marking 'using' declarations as deprecated, - * in which case you should use C10_DEFINE_DEPRECATED_USING - * (due to portability concerns). - */ - -// Sample usage: -// -// C10_DEPRECATED void bad_func(); -// struct C10_DEPRECATED BadStruct { -// ... -// }; - -// NB: __cplusplus doesn't work for MSVC, so for now MSVC always uses -// the "__declspec(deprecated)" implementation and not the C++14 -// "[[deprecated]]" attribute. We tried enabling "[[deprecated]]" for C++14 on -// MSVC, but ran into issues with some older MSVC versions. -// #if (defined(__cplusplus) && __cplusplus >= 201402L) -// #define C10_DEPRECATED [[deprecated]] -// #define C10_DEPRECATED_MESSAGE(message) [[deprecated(message)]] -// #elif defined(__GNUC__) -// #define C10_DEPRECATED __attribute__((deprecated)) -// TODO Is there some way to implement this? -// #define C10_DEPRECATED_MESSAGE(message) __attribute__((deprecated)) - -// #elif defined(_MSC_VER) -// #else -// #warning "You need to implement C10_DEPRECATED for this compiler" -// #define C10_DEPRECATED -// #endif - -// Sample usage: -// -// C10_DEFINE_DEPRECATED_USING(BadType, int) -// -// which is the portable version of -// -// using BadType [[deprecated]] = int; - -// technically [[deprecated]] syntax is from c++14 standard, but it works in -// many compilers. -// #if defined(__has_cpp_attribute) -// #if __has_cpp_attribute(deprecated) && !defined(__CUDACC__) -// #define C10_DEFINE_DEPRECATED_USING(TypeName, TypeThingy) -// using TypeName [[deprecated]] = TypeThingy; -// #endif -// #endif - -// #if defined(_MSC_VER) -// #endif - -// #if !defined(C10_DEFINE_DEPRECATED_USING) && defined(__GNUC__) -// nvcc has a bug where it doesn't understand __attribute__((deprecated)) -// declarations even when the host compiler supports it. We'll only use this gcc -// attribute when not cuda, and when using a GCC compiler that doesn't support -// the c++14 syntax we checked for above (available in __GNUC__ >= 5) -// #if !defined(__CUDACC__) -// #define C10_DEFINE_DEPRECATED_USING(TypeName, TypeThingy) -// using TypeName __attribute__((deprecated)) = TypeThingy; -// #else -// using cuda + gcc < 5, neither deprecated syntax is available so turning off. -// #define C10_DEFINE_DEPRECATED_USING(TypeName, TypeThingy) -// using TypeName = TypeThingy; -// #endif -// #endif - -// #if !defined(C10_DEFINE_DEPRECATED_USING) -// #warning "You need to implement C10_DEFINE_DEPRECATED_USING for this compiler" -// #define C10_DEFINE_DEPRECATED_USING -// #endif - - -// Parsed from c10/util/StringUtil.h - -// #ifndef C10_UTIL_STRINGUTIL_H_ -// #define C10_UTIL_STRINGUTIL_H_ - -// #include -// #include -// #include - -// #include -// #include -// #include -// #include -// #include - -// #if C10_CLANG_HAS_WARNING("-Wshorten-64-to-32") -// #endif - -// Obtains the base name from a full path. -@Namespace("c10::detail") public static native @StdString BytePointer StripBasename(@StdString BytePointer full_path); -@Namespace("c10::detail") public static native @StdString String StripBasename(@StdString String full_path); - -@Namespace("c10::detail") public static native @StdString BytePointer ExcludeFileExtension(@StdString BytePointer full_path); -@Namespace("c10::detail") public static native @StdString String ExcludeFileExtension(@StdString String full_path); -// Targeting ../CompileTimeEmptyString.java - - - -@Namespace("c10::detail") public static native @Cast("std::ostream*") @ByRef Pointer _str(@Cast("std::ostream*") @ByRef Pointer ss); - -@Namespace("c10::detail") public static native @Cast("std::ostream*") @ByRef @Name("_str") Pointer _strCompileTimeEmptyString(@Cast("std::ostream*") @ByRef Pointer ss, @Const @ByRef CompileTimeEmptyString t); -// Targeting ../_str_wrapper.java - - - -// For c10::str() with an empty argument list (which is common in our assert -// macros), we don't want to pay the binary size for constructing and -// destructing a stringstream or even constructing a string. - - // namespace detail - -// Convert a list of string-like arguments into a single string. - -// Replace all occurrences of "from" substring to "to" string. -// Returns number of replacements -@Namespace("c10") public static native @Cast("size_t") long ReplaceAll(@StdString @ByRef BytePointer s, @ByVal @Cast("c10::string_view*") Pointer from, @ByVal @Cast("c10::string_view*") Pointer to); -// Targeting ../SourceLocation.java - - - - - -// unix isprint but insensitive to locale -@Namespace("c10") public static native @Cast("bool") boolean isPrint(@Cast("char") byte s); - -@Namespace("c10") public static native void printQuotedString(@Cast("std::ostream*") @ByRef Pointer stmt, @ByVal @Cast("const c10::string_view*") Pointer str); - - // namespace c10 - -// #endif // C10_UTIL_STRINGUTIL_H_ - - -// Parsed from c10/util/SmallVector.h - -//===- llvm/ADT/SmallVector.h - 'Normally small' vectors --------*- C++ -*-===// -// -// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. -// See https://llvm.org/LICENSE.txt for license information. -// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception -// -//===----------------------------------------------------------------------===// -// -// This file defines the SmallVector class. -// -//===----------------------------------------------------------------------===// - -// ATen: modified from llvm::SmallVector. -// used std::is_trivially_{copy,move}_constructible -// replaced iterator_range constructor with inline Container&& constructor -// replaced LLVM_NODISCARD, LLVM_LIKELY, and LLVM_UNLIKELY with c10 equivalents -// removed LLVM_GSL_OWNER -// added SmallVector::at -// added operator<< for std::ostream -// added C10_API to export SmallVectorBase - -// #pragma once - -// #include -// #include - -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include - -// #if C10_CLANG_HAS_WARNING("-Wshorten-64-to-32") -// #endif - -/** This is all the stuff common to all SmallVectors. - * - * The template parameter specifies the type which should be used to hold the - * Size and Capacity of the SmallVector, so it can be adjusted. - * Using 32 bit size is desirable to shrink the size of the SmallVector. - * Using 64 bit size is desirable for cases like SmallVector, where a - * 32 bit size would limit the vector to ~4GB. SmallVectors are used for - * buffering bitcode output - which can exceed 4GB. */ - -/** Figure out the offset of the first element. */ - -/** This is the part of SmallVectorTemplateBase which does not depend on whether - * the type T is a POD. The extra dummy template argument is used by ArrayRef - * to avoid unnecessarily requiring T to be complete. */ -// Targeting ../SmallVectorBase.java - - -// Targeting ../SymSmallVectorBase.java - - - -// Define this out-of-line to dissuade the C++ compiler from inlining it. - - -// Define this out-of-line to dissuade the C++ compiler from inlining it. - - -// Define this out-of-line to dissuade the C++ compiler from inlining it. - - -/** SmallVectorTemplateBase - This is where we put - * method implementations that are designed to work with trivially copyable - * T's. This allows using memcpy in place of copy/move construction and - * skipping destruction. */ -// Targeting ../DimVectorImpl.java - - -// Targeting ../SymDimVectorImpl.java - - - - - - - - - -/** Storage for the SmallVector elements. This is specialized for the N=0 case - * to avoid allocating unnecessary storage. */ - -/** We need the storage to be properly aligned even for small-size of 0 so that - * the pointer math in \a SmallVectorTemplateCommon::getFirstEl() is - * well-defined. */ - -/** Forward declaration of SmallVector so that - * calculateSmallVectorDefaultInlinedElements can reference - * {@code sizeof(SmallVector)}. */ - -/** Helper class for calculating the default number of inline elements for - * {@code SmallVector}. - * - * This should be migrated to a constexpr function when our minimum - * compiler support is enough for multi-statement constexpr functions. */ -// Targeting ../DimVector.java - - -// Targeting ../SymDimVector.java - - - - - -/** Given a range of type R, iterate the entire range and return a - * SmallVector with elements of the vector. This is useful, for example, - * when you want to iterate a range and then sort the results. */ - - // end namespace c10 - -/** Implement std::swap in terms of SmallVector swap. */ - -/** Implement std::swap in terms of SmallVector swap. */ - - // end namespace std - - - -// Parsed from c10/util/DimVector.h - -// #pragma once - -// #include -// #include -// #include -// #include - -@Namespace("c10") @MemberGetter public static native @Cast("const size_t") long kDimVectorStaticSize(); - -/** A container for sizes or strides */ - - // namespace c10 - - -// Parsed from c10/util/Exception.h - -// #ifndef C10_UTIL_EXCEPTION_H_ -// #define C10_UTIL_EXCEPTION_H_ +// #ifndef C10_UTIL_EXCEPTION_H_ +// #define C10_UTIL_EXCEPTION_H_ // #include // #include @@ -2259,7 +2050,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { C10_DEPRECATED_MESSAGE("AT_ERROR(msg) is deprecated, use TORCH_CHECK(false, msg) instead.") */ -@Namespace("c10::detail") public static native void deprecated_AT_ERROR(); + /* // Deprecation disabled until we fix sites in our codebase @@ -2269,7 +2060,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { error checking, use " \ "TORCH_CHECK. See https://github.com/pytorch/pytorch/issues/20287 for more details.") */ -@Namespace("c10::detail") public static native void deprecated_AT_ASSERT(); + /* // Deprecation disabled until we fix sites in our codebase @@ -2279,7 +2070,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { error checking, use " \ "TORCH_CHECK. See https://github.com/pytorch/pytorch/issues/20287 for more details.") */ -@Namespace("c10::detail") public static native void deprecated_AT_ASSERTM(); + // namespace detail // namespace c10 @@ -2321,2961 +2112,2840 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #endif // C10_UTIL_EXCEPTION_H_ -// Parsed from c10/util/ArrayRef.h - -//===--- ArrayRef.h - Array Reference Wrapper -------------------*- C++ -*-===// -// -// The LLVM Compiler Infrastructure -// -// This file is distributed under the University of Illinois Open Source -// License. See LICENSE.TXT for details. -// -//===----------------------------------------------------------------------===// - -// ATen: modified from llvm::ArrayRef. -// removed llvm-specific functionality -// removed some implicit const -> non-const conversions that rely on -// complicated std::enable_if meta-programming -// removed a bunch of slice variants for simplicity... +// Parsed from c10/core/Device.h // #pragma once -// #include -// #include +// #include +// #include // #include -// #include - -// #include -// #include -// #include -// Targeting ../ByteArrayRef.java +// #include +// #include +// #include +// #include -// Targeting ../ShortArrayRef.java +/** An index representing a specific device; e.g., the 1 in GPU 1. + * A DeviceIndex is not independently meaningful without knowing + * the DeviceType it is associated; try to use Device rather than + * DeviceIndex directly. */ +// Targeting ../Device.java -// Targeting ../IntArrayRef.java +@Namespace("c10") public static native @Cast("std::ostream*") @ByRef @Name("operator <<") Pointer shiftLeft(@Cast("std::ostream*") @ByRef Pointer stream, @Const @ByRef Device device); -// Targeting ../LongArrayRef.java + // namespace c10 + // namespace std -// Targeting ../FloatArrayRef.java +// Parsed from c10/core/DispatchKey.h +// #pragma once -// Targeting ../DoubleArrayRef.java +// #include +// #include +// #include +// #include +// Semantically, each value of BackendComponent identifies a "backend" for our +// dispatch. Some functionalities that we may dispatch to are allowed to +// register different handlers for each backend. The BackendComponent is then +// used to figure out which backend implementation to dispatch to. -// Targeting ../SizeTArrayRef.java +// In implementation terms, the backend component identifies a specific "bit" in +// a DispatchKeySet. The bits in the DispatchKeySet are split between the bottom +// ~12 "BackendComponent" bits, while the remaining upper bits are assigned to +// functionalities. When we encounter a functionality bit that is known to be +// customizeable per-backend, then we also look at the lower BackendComponent +// bits and take the highest bit to determine which backend's implementation to +// use. +// WARNING! If you add a new backend component to the end of this list, +// make sure you update PrivateUse3Bit. (But you shouldn't: private use +// keys should have higher precedence than all built-in keys) -// Targeting ../SymIntRef.java +// If you add a new (non-privateuse) backend here, +// make sure to add an Autograd fallthrough kernel +// in aten/src/ATen/core/VariableFallbackKernel.cpp +// #define C10_FORALL_BACKEND_COMPONENTS(_, extra) +// _(CPU, extra) +// _(CUDA, extra) +// _(HIP, extra) +// _(XLA, extra) +// _(MPS, extra) +// _(IPU, extra) +// _(XPU, extra) +// _(HPU, extra) +// _(VE, extra) +// _(Lazy, extra) +// _(Meta, extra) +// _(MTIA, extra) +// _(PrivateUse1, extra) +// _(PrivateUse2, extra) +// _(PrivateUse3, extra) -// Targeting ../SymNodeRef.java +// WARNING! If we add a new per-backend functionality key that has higher +// priority than Autograd, then make sure you update EndOfRuntimeBackendKeys +// #define C10_FORALL_FUNCTIONALITY_KEYS(_) +// _(Dense, ) +// _(Quantized, Quantized) +// _(Sparse, Sparse) +// _(NestedTensor, NestedTensor) +// _(AutogradFunctionality, Autograd) -// Targeting ../StringArrayRef.java +@Namespace("c10") public enum BackendComponent { + // A "backend" is colloquially used to refer to handlers for dispatch + // which actually implement the numerics of an operation in question. + // + // Due to the nature of the enum, these backends are specified in + // an ordered way, but for most backends this order is not semantically + // meaningful (e.g., it's valid to reorder these backends without changing + // semantics). The only situation when backend ordering is meaningful + // is when the backend participates in multiple dispatch with another + // backend; e.g., CPU and CUDA (cuda must have higher priority). -// Targeting ../BoolArrayRef.java + // These keys don't correspond to individual kernels. + // Instead, they represent the backends that are allowed to override specific + // pieces of functionality: + // - dense kernels (e.g. DispatchKey::CPU) + // - sparse kernels (e.g. DispatchKey::SparseCPU) + // - quantized kernels (e.g. DispatchKey::QuantizedCPU) + // - autograd kernels (e.g. DispatchKey::AutogradCPU) + // We reserve space in the runtime operator table for this full cross product + // of + // [backends in this enum] x [keys below that are explicitly marked as having + // per-backend functionality] + // + // A meta tensor is a tensor without any data associated with it. (They + // have also colloquially been referred to as tensors on the "null" device). + // A meta tensor can be used to dry run operators without actually doing any + // computation, e.g., add on two meta tensors would give you another meta + // tensor with the output shape and dtype, but wouldn't actually add anything. + InvalidBit((byte)(0)), + CPUBit((byte)(1)), + CUDABit((byte)(2)), + HIPBit((byte)(3)), + XLABit((byte)(4)), + MPSBit((byte)(5)), + IPUBit((byte)(6)), + XPUBit((byte)(7)), + HPUBit((byte)(8)), + VEBit((byte)(9)), + LazyBit((byte)(10)), + MetaBit((byte)(11)), + MTIABit((byte)(12)), + PrivateUse1Bit((byte)(13)), + PrivateUse2Bit((byte)(14)), + PrivateUse3Bit((byte)(15)), -// Targeting ../HalfArrayRef.java - + // Define an alias to represent end of backend dispatch keys. + // If you add new backend keys after PrivateUse3, please also update it here. + EndOfBackendKeys((byte)(PrivateUse3Bit.value)); -// Targeting ../BFloat16ArrayRef.java + public final byte value; + private BackendComponent(byte v) { this.value = v; } + private BackendComponent(BackendComponent e) { this.value = e.value; } + public BackendComponent intern() { for (BackendComponent e : values()) if (e.value == value) return e; return this; } + @Override public String toString() { return intern().name(); } +} +// Semantically, a dispatch key identifies a possible "level" in our +// dispatch, for which a handler may be registered. Each handler corresponds +// to a type of functionality. +// +// In implementation terms, the dispatch key identifies a specific "bit" in a +// DispatchKeySet. Higher bit indexes get handled by dispatching first (because +// we "count leading zeros" when we extract the highest priority dispatch +// key.) +// +// Note [DispatchKey Classification] +// This enum actually contains several types of keys, which are explained +// in more detail further down: +// (1) non-customizable backends (e.g. FPGA) +// (2) non-customizable functionalities (e.g. Functionalize) +// (3) functionalized that are customizable per backend (e.g. Dense, Sparse, +// AutogradFunctionality) (4) per-backend instances of customizable +// functionalities (e.g. CPU, SparseCPU, AutogradCPU) (5) alias keys (e.g. +// CompositeImplicitAutograd) +// +// Of the categories above, it's important to note: +// (a) which keys are assigned individual bits in a DispatchKeySet +// (b) which keys are assigned individual slots in the runtime operator table +// ("Runtime keys") +// +// (1), (2) and (3) all get their own dedicated bits in the DispatchKeySet. +// (1), (2) and (4) all get their own dedicated slots in the runtime operator +// table. -// Targeting ../FloatComplexrrayRef.java +// See Note [DispatchKeySet Internal Representation] for more details. +// +// NOTE: Keep the list in sync with `DispatchKey` in torchgen/model.py +@Namespace("c10") public enum DispatchKey { + // ~~~~~~~~~~~~~~~~~~~~~~~~~~ UNDEFINED ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ // + // This is not a "real" functionality, but it exists to give us a "nullopt" + // element we can return for cases when a DispatchKeySet contains no elements. + // You can think a more semantically accurate definition of DispatchKey is: + // + // using DispatchKey = optional + // + // and Undefined == nullopt. We didn't actually represent + // it this way because optional would take two + // words, when DispatchKey fits in eight bits. -// Targeting ../DoubleComplexrrayRef.java + Undefined((short)(0)), + // Define an alias for Undefined to represent CatchAll (long term + // this will get eliminated, but for now it's convenient) + CatchAll((short)(Undefined.value)), -// Targeting ../ScalarTypeArrayRef.java + // ~~~~~~~~~~~~~~~~~~~~~~~~~~ Functionality Keys ~~~~~~~~~~~~~~~~~~~~~~ // + // Every value in the enum (up to EndOfFunctionalityKeys) + // corresponds to an individual "functionality" that can be dispatched to. + // This is represented in the DispatchKeySet by assigning each of these enum + // values + // to each of the remaining (64 - len(BackendComponent)) bits. + // + // Most of these functionalities have a single handler assigned to them, + // making them "runtime keys". + // That map to a single slot in the runtime operator table. + // + // A few functionalities are allowed to be customizable per backend. + // See [Note: Per-Backend Functionality Dispatch Keys] for details. + // See [Note: Per-Backend Functionality Dispatch Keys] + Dense((short)(Undefined.value + 1)), -// Targeting ../IValueArrayRef.java + // Below are non-extensible backends. + // These are backends that currently don't have their own overrides for + // Autograd/Sparse/Quantized kernels, + // and we therefore don't waste space in the runtime operator table allocating + // space for them. + // If any of these backends ever need to customize, e.g., Autograd, then we'll + // need to add a DispatchKey::*Bit for them. + // TODO: put this in BackendComponents + FPGA((short)(Undefined.value + 2)), // Xilinx support lives out of tree at + // https://gitlab.com/pytorch-complex/vitis_kernels -// Targeting ../EnumNameValueArrayRef.java + // TODO: put this in BackendComponents + // ONNX Runtime, lives out of tree at https://github.com/pytorch/ort and + // https://github.com/microsoft/onnxruntime, and is also used to test general + // backend/extension machinery in the core. cf: + // - test/cpp_extensions/ort_extension.cpp + // - test/test_torch.py + // - aten/src/ATen/test/extension_backend_test.cpp + ORT((short)(Undefined.value + 3)), + Vulkan((short)(Undefined.value + 4)), // TODO: put this in BackendComponents + Metal((short)(Undefined.value + 5)), // TODO: put this in BackendComponents -// Targeting ../TypeArrayRef.java + // See [Note: Per-Backend Functionality Dispatch Keys] + Quantized((short)(Undefined.value + 6)), + // This backend is to support custom RNGs; it lets you go + // to a different kernel if you pass in a generator that is not a + // traditional CPUGeneratorImpl/CUDAGeneratorImpl. To make use of this + // key: + // 1) set it as a second parameter of at::Generator constructor call in + // the user-defined PRNG class. + // 2) use it as a dispatch key while registering custom kernels + // (templatized kernels specialized for user-defined PRNG class) + // intended for out of tree use; tested by aten/src/ATen/test/rng_test.cpp + CustomRNGKeyId((short)(Undefined.value + 7)), -// Targeting ../SymbolArrayRef.java + // TODO: Make Mkldnn a functionality key, so we can give it Meta + // support + // Here are backends which specify more specialized operators + // based on the layout of the tensor. Note that the sparse backends + // are one case where ordering matters: sparse multi-dispatches with + // the corresponding dense tensors, and must be handled before them. + MkldnnCPU((short)(Undefined.value + 8)), // registered at build/aten/src/ATen/RegisterMkldnnCPU.cpp + // NB: not to be confused with MKLDNN, which is Caffe2 only + // See [Note: Per-Backend Functionality Dispatch Keys] + Sparse((short)(Undefined.value + 9)), -// Targeting ../StrideArrayRef.java + // TODO: Make SparseCsr a functionality key + SparseCsrCPU((short)(Undefined.value + 10)), + SparseCsrCUDA((short)(Undefined.value + 11)), + NestedTensor((short)(Undefined.value + 12)), -// Targeting ../DimnameArrayRef.java + // In some situations, it is not immediately obvious what the correct + // backend for function is, because the function in question doesn't + // have any "tensor" arguments. In this case, a BackendSelect function + // can be registered to implement the custom determination of the + // correct backend. + BackendSelect((short)(Undefined.value + 13)), + Python((short)(Undefined.value + 14)), -// Targeting ../ScalarArrayRef.java + // Out-of-core key for Fake Tensor in torchdistx. + // See https://pytorch.org/torchdistx/latest/fake_tensor.html + // TODO: delete this in favor of Python-implemented fake tensor + Fake((short)(Undefined.value + 15)), + // See Note [Out-of-tree vmap+grad prototype]. The purpose of this key + // is to insert code after the "autograd subsystem" runs, so this key should + // be directly after ADInplaceOrView and all of the autograd keys. + FuncTorchDynamicLayerBackMode((short)(Undefined.value + 16)), + // Alias and mutation removal. + // If some backends want to opt into only alias removal or only mutation + // removal, + // we can consider adding separate keys dedicated to those individual passes. + // See Note [Functionalization Pass In Core] for details. + Functionalize((short)(Undefined.value + 17)), -// Targeting ../TensorArrayRef.java + // The named dispatch key is set for any tensors with named dimensions. + // Although we have a dispatch key for named tensors, for historical reasons, + // this dispatch key doesn't do any of the substantive functionality for named + // tensor (though, hypothetically, it could!) At the moment, it's just + // responsible for letting us give good error messages when operations + // don't support named tensors. + // + // NB: If you ever consider moving named tensor functionality into + // this dispatch key, note that it might be necessary add another dispatch + // key that triggers before composite operators, in case a composite operator + // has named dimension propagation that doesn't match that of its + // constituent parts. + // TODO: delete this once torchdim lands in functorch + Named((short)(Undefined.value + 18)), + // The Conjugate dispatch key is set for any tensors that need to perform + // conjugation + // This is implemented at a dispatch level right before any backends run + Conjugate((short)(Undefined.value + 19)), -// Targeting ../TensorArgArrayRef.java + // The Negative dispatch key is set for any tensors that need to perform + // negation + // This is implemented at a dispatch level right before any backends run + Negative((short)(Undefined.value + 20)), + ZeroTensor((short)(Undefined.value + 21)), // registered at build/aten/src/ATen/RegisterZeroTensor.cpp -// Targeting ../TensorIndexArrayRef.java + // Note [ADInplaceOrView key] + // ADInplaceOrView key is used by inplace or view ops to register a kernel + // that does additional setup for future autograd computation. + // + // 1. For inplace ops this kernel does version bump + // 2. For view ops this kernel does `as_view` setup where we properly setup + // DifferentiableViewMeta on the view tensors. + // + // For other ops it's fallthrough kernel since there's no extra + // work to do. + // + // Note [Dream: skip VariableType kernel when requires_grad=false] + // + // In an ideal world where we can skip VariableType kernel for inputs + // with requires_grad=false, instead of a fallthrough kernel, we'll + // register a kernel shown below to all functional ops as well: + // torch::Tensor my_functional_op(...) { + // { + // // Note for every op in VariableType, you need to go through + // // `AutoDispatchBelowADInplaceOrView` guard exactly once to add the + // // key to TLS excluded set. If you don't go through it at all, + // // inplace/view ops called through `at::` inside your backend + // // kernel will dispatch to ADInplaceOrView kernels and do a lot + // // of extra work. + // at::AutoDispatchBelowADInplaceOrView guard; + // at::redispatch::my_functional_op(...); + // } + // } + // But this work is currently blocked since it adds an extra dispatch + // for all ops and it's non-trivial overhead at model level(a few percents). + // Thus our current approach takes advantage of the fact every kernel go + // through VariableType kernel first and pulls the + // `at::AutoDispatchBelowADInplaceOrView` guard of functional ops + // up to the `VariableType` kernel. Thus we only add the extra dispatch + // to view/inplace ops to minimize its perf impact to real models. + ADInplaceOrView((short)(Undefined.value + 22)), + // Note [Alias Dispatch Key : Autograd] + // All backends are oblivious to autograd; autograd is handled as a + // layer which happens on top of all backends. It inspects the autograd + // metadata of all inputs, determines what autograd metadata should be + // constructed by the output, and otherwise defers to the backend to + // actually do the numeric computation. Autograd contains + // the bulk of this logic. + // Autograd is now an alias dispatch key which by default maps to all + // backend-specific autograd keys. + // Backend-specific allow backends to override the default kernel registered + // to Autograd key as needed. + // For example, XLA wants to define autograd for einsum directly. + // Registering a custom autograd implementation at the XLA key won't work + // because we process Autograd before XLA. This key has higher priority and + // gets processed first. You generally should NOT redispatch after handling + // autograd here (since that would result in execution of the Autograd + // operator, which you're trying to skip). In AutogradXLA implementations, + // you are responsible for handling autograd yourself, or deferring to other + // operators which support autograd. -// Targeting ../TensorOptionalArrayRef.java + // Currently we only have backend-specific autograd keys for CPU/CUDA/XLA and + // reserved user-defined backends. All other in-tree backends share the + // AutogradOther key. We can add specific autograd key for those backends + // upon request. + AutogradOther((short)(Undefined.value + 23)), + // See [Note: Per-Backend Functionality Dispatch Keys] + AutogradFunctionality((short)(Undefined.value + 24)), -// Targeting ../SavedVariableArrayRef.java + // NestedTensor is an example of something that isn't a "real backend" + // (because it mostly consists of redispatching kernels) + // but it would like to override autograd functionality in C++. + // We can handle cases like this by adding an extra functionality key + // exclusively for handling autograd for NestedTensor. + // lives out of tree at + // https://github.com/pytorch/nestedtensor + AutogradNestedTensor((short)(Undefined.value + 25)), + Tracer((short)(Undefined.value + 26)), -// Targeting ../SugaredValueArrayRef.java + // TODO: make Autocast a functionality key + // Autocasting precedes VariableTypeId, to ensure casts are autograd-exposed + // and inputs are saved for backward in the post-autocast type. + AutocastCPU((short)(Undefined.value + 27)), + AutocastXPU((short)(Undefined.value + 28)), + AutocastHPU((short)(Undefined.value + 29)), + // Naughtily, AutocastCUDA is also being used for XLA. In the terminal state, + // it probably should get its own Autocast key + AutocastCUDA((short)(Undefined.value + 30)), + // ~~~~~~~~~~~~~~~~~~~~~~~~~~~ WRAPPERS ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ // + // There are a number of alternative modes which may want to handle before + // autograd; for example, error checking, tracing, profiling or vmap. They + // go here. -// Targeting ../NamedValueArrayRef.java + FuncTorchBatched((short)(Undefined.value + 31)), // See Note [Out-of-tree vmap+grad prototype] + FuncTorchVmapMode((short)(Undefined.value + 32)), // See Note [Out-of-tree vmap+grad prototype] + // This is the dispatch key for BatchedTensorImpl, which is used to implement + // batching rules for vmap. + Batched((short)(Undefined.value + 33)), -// Targeting ../BlockArrayRef.java + // When we are inside a vmap, all tensors dispatch on this key. + // See Note: [DispatchKey::VmapMode usage] for more details. + VmapMode((short)(Undefined.value + 34)), + FuncTorchGradWrapper((short)(Undefined.value + 35)), // See Note [Out-of-tree vmap+grad prototype] -// Targeting ../ValueArrayRef.java + // Out-of-core key for Deferred Module Initialization in torchdistx. + // See https://pytorch.org/torchdistx/latest/deferred_init.html + DeferredInit((short)(Undefined.value + 36)), + // Used by Python key logic to know the set of tls on entry to the dispatcher + // This kernel assumes it is the top-most non-functorch-related DispatchKey. + // If you add a key above, make sure to update the fallback implementation for + // this. + PythonTLSSnapshot((short)(Undefined.value + 37)), + // This key should be at the very top of the dispatcher + FuncTorchDynamicLayerFrontMode((short)(Undefined.value + 38)), // See Note [Out-of-tree vmap+grad prototype] + // TESTING: This is intended to be a generic testing tensor type id. + // Don't use it for anything real; its only acceptable use is within a single + // process test. Use it by creating a TensorImpl with this DispatchKey, and + // then registering operators to operate on this type id. See + // aten/src/ATen/core/dispatch/backend_fallback_test.cpp for a usage example. + TESTING_ONLY_GenericWrapper((short)(Undefined.value + 39)), + // TESTING: This is intended to be a generic testing tensor type id. + // Don't use it for anything real; its only acceptable use is within a ingle + // process test. Use it by toggling the mode on and off via + // TESTING_ONLY_tls_generic_mode_set_enabled and then registering operators + // to operate on this type id. See + // aten/src/ATen/core/dispatch/backend_fallback_test.cpp + // for a usage example + TESTING_ONLY_GenericMode((short)(Undefined.value + 40)), -/** \name ArrayRef Convenience constructors - * \{ -

- * Construct an ArrayRef from a single element. */ + // This is a bypass that allows you to skip running the C++ dispatcher + // entirely + PythonDispatcher((short)(Undefined.value + 41)), -/** Construct an ArrayRef from a pointer and length. */ + // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ FIN ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ // + EndOfFunctionalityKeys((short)(Undefined.value + 42)), -/** Construct an ArrayRef from a range. */ + StartOfDenseBackends((short)(Undefined.value + 43)), + CPU((short)(Undefined.value + 44)), + + CUDA((short)(Undefined.value + 45)), + + HIP((short)(Undefined.value + 46)), + + XLA((short)(Undefined.value + 47)), + + MPS((short)(Undefined.value + 48)), + + IPU((short)(Undefined.value + 49)), + + XPU((short)(Undefined.value + 50)), + + HPU((short)(Undefined.value + 51)), + + VE((short)(Undefined.value + 52)), + + Lazy((short)(Undefined.value + 53)), + + Meta((short)(Undefined.value + 54)), + + MTIA((short)(Undefined.value + 55)), + + PrivateUse1((short)(Undefined.value + 56)), + + PrivateUse2((short)(Undefined.value + 57)), + + PrivateUse3((short)(Undefined.value + 58)), + EndOfDenseBackends((short)(0)), + StartOfQuantizedBackends((short)(1)), + QuantizedCPU((short)(2)), + + QuantizedCUDA((short)(3)), + + QuantizedHIP((short)(4)), + + QuantizedXLA((short)(5)), + + QuantizedMPS((short)(6)), + + QuantizedIPU((short)(7)), + + QuantizedXPU((short)(8)), + + QuantizedHPU((short)(9)), + + QuantizedVE((short)(10)), + + QuantizedLazy((short)(11)), + + QuantizedMeta((short)(12)), + + QuantizedMTIA((short)(13)), + + QuantizedPrivateUse1((short)(14)), + + QuantizedPrivateUse2((short)(15)), + + QuantizedPrivateUse3((short)(16)), + EndOfQuantizedBackends((short)( QuantizedPrivateUse3.value)), + StartOfSparseBackends((short)( QuantizedPrivateUse3.value + 1)), + SparseCPU((short)( QuantizedPrivateUse3.value + 2)), + + SparseCUDA((short)( QuantizedPrivateUse3.value + 3)), + + SparseHIP((short)( QuantizedPrivateUse3.value + 4)), + + SparseXLA((short)( QuantizedPrivateUse3.value + 5)), + + SparseMPS((short)( QuantizedPrivateUse3.value + 6)), + + SparseIPU((short)( QuantizedPrivateUse3.value + 7)), + + SparseXPU((short)( QuantizedPrivateUse3.value + 8)), + + SparseHPU((short)( QuantizedPrivateUse3.value + 9)), + + SparseVE((short)( QuantizedPrivateUse3.value + 10)), + + SparseLazy((short)( QuantizedPrivateUse3.value + 11)), + + SparseMeta((short)( QuantizedPrivateUse3.value + 12)), + + SparseMTIA((short)( QuantizedPrivateUse3.value + 13)), + + SparsePrivateUse1((short)( QuantizedPrivateUse3.value + 14)), + + SparsePrivateUse2((short)( QuantizedPrivateUse3.value + 15)), + + SparsePrivateUse3((short)( QuantizedPrivateUse3.value + 16)), + EndOfSparseBackends((short)( SparsePrivateUse3.value)), + StartOfNestedTensorBackends((short)( SparsePrivateUse3.value + 1)), + NestedTensorCPU((short)( SparsePrivateUse3.value + 2)), + + NestedTensorCUDA((short)( SparsePrivateUse3.value + 3)), + + NestedTensorHIP((short)( SparsePrivateUse3.value + 4)), + + NestedTensorXLA((short)( SparsePrivateUse3.value + 5)), + + NestedTensorMPS((short)( SparsePrivateUse3.value + 6)), + + NestedTensorIPU((short)( SparsePrivateUse3.value + 7)), + + NestedTensorXPU((short)( SparsePrivateUse3.value + 8)), + + NestedTensorHPU((short)( SparsePrivateUse3.value + 9)), + + NestedTensorVE((short)( SparsePrivateUse3.value + 10)), + + NestedTensorLazy((short)( SparsePrivateUse3.value + 11)), + + NestedTensorMeta((short)( SparsePrivateUse3.value + 12)), + + NestedTensorMTIA((short)( SparsePrivateUse3.value + 13)), + + NestedTensorPrivateUse1((short)( SparsePrivateUse3.value + 14)), + + NestedTensorPrivateUse2((short)( SparsePrivateUse3.value + 15)), + + NestedTensorPrivateUse3((short)( SparsePrivateUse3.value + 16)), + EndOfNestedTensorBackends((short)( NestedTensorPrivateUse3.value)), + StartOfAutogradFunctionalityBackends((short)( NestedTensorPrivateUse3.value + 1)), + AutogradCPU((short)( NestedTensorPrivateUse3.value + 2)), + + AutogradCUDA((short)( NestedTensorPrivateUse3.value + 3)), + + AutogradHIP((short)( NestedTensorPrivateUse3.value + 4)), + + AutogradXLA((short)( NestedTensorPrivateUse3.value + 5)), + + AutogradMPS((short)( NestedTensorPrivateUse3.value + 6)), + + AutogradIPU((short)( NestedTensorPrivateUse3.value + 7)), + + AutogradXPU((short)( NestedTensorPrivateUse3.value + 8)), + + AutogradHPU((short)( NestedTensorPrivateUse3.value + 9)), + + AutogradVE((short)( NestedTensorPrivateUse3.value + 10)), + + AutogradLazy((short)( NestedTensorPrivateUse3.value + 11)), + + AutogradMeta((short)( NestedTensorPrivateUse3.value + 12)), + + AutogradMTIA((short)( NestedTensorPrivateUse3.value + 13)), + + AutogradPrivateUse1((short)( NestedTensorPrivateUse3.value + 14)), + + AutogradPrivateUse2((short)( NestedTensorPrivateUse3.value + 15)), + + AutogradPrivateUse3((short)( NestedTensorPrivateUse3.value + 16)), + EndOfAutogradFunctionalityBackends((short)( AutogradPrivateUse3.value)), -/** Construct an ArrayRef from a SmallVector. */ + EndOfRuntimeBackendKeys((short)(EndOfAutogradFunctionalityBackends.value)), -/** Construct an ArrayRef from a SmallVector. */ + // ~~~~~~~~~~~~~~~~~~~~~~ Alias Dispatch Keys ~~~~~~~~~~~~~~~~~~~~~~~~~~ // + // Note [Alias Dispatch Keys] + // Alias dispatch keys are synthetic dispatch keys which map to multiple + // runtime dispatch keys. Alisa keys have precedence, but they are always + // lower precedence than runtime keys. You can register a kernel to an + // alias key, the kernel might be populated to the mapped runtime keys + // during dispatch table computation. + // If a runtime dispatch key has multiple kernels from alias keys, which + // kernel wins is done based on the precedence of alias keys (but runtime + // keys always have precedence over alias keys). + // Alias keys won't be directly called during runtime. -/** Construct an ArrayRef from a std::vector. */ + // See Note [Alias Dispatch Key : Autograd] + Autograd((short)(EndOfAutogradFunctionalityBackends.value + 1)), + CompositeImplicitAutograd((short)(EndOfAutogradFunctionalityBackends.value + 2)), // registered at + // build/aten/src/ATen/RegisterCompositeImplicitAutograd.cpp -/** Construct an ArrayRef from a std::array. */ - -/** Construct an ArrayRef from an ArrayRef (no-op) (const) */ - -/** Construct an ArrayRef from an ArrayRef (no-op) */ - -/** Construct an ArrayRef from a C array. */ - -// WARNING: Template instantiation will NOT be willing to do an implicit -// conversions to get you to an c10::ArrayRef, which is why we need so -// many overloads. + // Note: The alias keyset for FuncTorchBatchedDecomposition is disjoint from + // all + // other alias keysets + // and so precedence order doesn't matter + FuncTorchBatchedDecomposition((short)(EndOfAutogradFunctionalityBackends.value + 3)), // registered at + // build/aten/src/ATen/RegisterFuncTorchBatchedDecomposition.cpp + // Note: The alias keyset for CompositeImplicitAutogradNestedTensor is + // disjoint from all other alias keysets + CompositeImplicitAutogradNestedTensor((short)(EndOfAutogradFunctionalityBackends.value + 4)), // registered at + // build/aten/src/ATen/RegisterCompositeImplicitAutogradNestedTensor.cpp + CompositeExplicitAutograd((short)(EndOfAutogradFunctionalityBackends.value + 5)), // registered at + // build/aten/src/ATen/RegisterCompositeExplicitAutograd.cpp + // See Note [CompositeExplicitAutogradNonFunctional Key] + CompositeExplicitAutogradNonFunctional((short)(EndOfAutogradFunctionalityBackends.value + 6)), // registered at + // build/aten/src/ATen/RegisterCompositeExplicitAutograd.cpp + // Define an alias key to represent end of alias dispatch keys. + // If you add new alias keys after Autograd, please also update it here. + StartOfAliasKeys((short)(Autograd.value)), + EndOfAliasKeys((short)(CompositeExplicitAutogradNonFunctional.value)), // + // ~~~~~~~~~~~~~~~~~~~~~~~~~ BC ALIASES ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ // + // The aliases exist for backwards compatibility reasons, they shouldn't + // be used + CPUTensorId((short)(CPU.value)), + CUDATensorId((short)(CUDA.value)), + DefaultBackend((short)(CompositeExplicitAutograd.value)), + PrivateUse1_PreAutograd((short)(AutogradPrivateUse1.value)), + PrivateUse2_PreAutograd((short)(AutogradPrivateUse2.value)), + PrivateUse3_PreAutograd((short)(AutogradPrivateUse3.value)), + Autocast((short)(AutocastCUDA.value)); + public final short value; + private DispatchKey(short v) { this.value = v; } + private DispatchKey(DispatchKey e) { this.value = e.value; } + public DispatchKey intern() { for (DispatchKey e : values()) if (e.value == value) return e; return this; } + @Override public String toString() { return intern().name(); } +} +// Note [Private use DispatchKey] +// ~~~~~~~~~~~~~~~~~~~~~~~~~~~ +// Private use tensor IDs are preallocated tensor type IDs for use in user +// applications. Similar to private use fields in HTTP, they can be used +// by end users for experimental or private applications, without needing +// to "standardize" the tensor ID (which would be done by submitting a PR +// to PyTorch to add your type ID). +// +// Private use tensor IDs are appropriate to use if you want to experiment +// with adding a new tensor type (without having to patch PyTorch first) or +// have a private, non-distributed application that needs to make use of a +// new tensor type. Private use tensor IDs are NOT appropriate to use for +// libraries intended to be distributed to further users: please contact +// the PyTorch developers to get a type ID registered in this case. +// +// We provide two classes of private user tensor id: regular DispatchKeys +// and Autograd DispatchKeys. DispatchKeys serve the role of ordinary "backend" +// DispatchKeys; if you were adding support for a new type of accelerator, you +// would use a backend DispatchKey, and ideally automatically reuse +// AutogradOther definitions already defined in PyTorch. AutogradPrivateUse +// DispatchKeys serve as "wrapper" DispatchKeys: they are only necessary for +// tensors that compose multiple internal tensors, and for cases when the +// built-in autograd formulas for operators are not appropriate. +// Check if a DispatchKey is an alias mapping to other runtime keys. +@Namespace("c10") public static native @Cast("const bool") boolean isAliasDispatchKey(DispatchKey k); +@Namespace("c10") public static native @Cast("const bool") boolean isAliasDispatchKey(@Cast("c10::DispatchKey") short k); +// [Note: Per-Backend Functionality Dispatch Keys] +// Check if a DispatchKey is a per-backend functionality key +// Any functionalities that can be customized per-backend should be added here. +// These keys correspond to functionalities that can be customized indivually +// per backend. While they only take up one bit in the `DispatchKeySet` bitset, +// they map to (# backends) slots in the operator table. +// Each of these keys also has a separate set of "runtime keys" in the dispatch +// key enum, per backend, which *do* map to the individual operator table slots. +// For example, the "Sparse" key maps to an individual bit in the +// DispatchKeySet, while `SparseCPU`, `SparseCUDA`, etc all map to individual +// slots in the runtime operator table. +@Namespace("c10") public static native @Cast("const bool") boolean isPerBackendFunctionalityKey(DispatchKey k); +@Namespace("c10") public static native @Cast("const bool") boolean isPerBackendFunctionalityKey(@Cast("c10::DispatchKey") short k); +// Note that this includes Undefined in the total count. +// BUT EndOfFunctionalityKeys is its own (placeholder) key. +// e.g. Undefined=0, Dense=1, Sparse=2, EndOfFunctionalityKeys=3. +// In the above example, there are 3 total functionality keys. +@Namespace("c10") @MemberGetter public static native @Cast("const uint8_t") byte num_functionality_keys(); +@Namespace("c10") @MemberGetter public static native @Cast("const uint8_t") byte num_backends(); +// Note [No More Than 16 Backends] +// Search for this note to find places in the code where the "no more than 16 +// backends" invariant is baked in. +@Namespace("c10") public static native @Cast("const uint8_t") byte numPerBackendFunctionalityKeys(); +// #if defined(C10_MOBILE_TRIM_DISPATCH_KEYS) +// See [Note: Trimmed Mobile Dispatch Keys] +@Namespace("c10") @MemberGetter public static native @Cast("const uint16_t") short num_runtime_entries(); +// #else +// #endif -// This alias is deprecated because it doesn't make ownership -// semantics obvious. Use IntArrayRef instead! - // namespace c10 +// See Note [No More Than 16 Backends] +@Namespace("c10") @MemberGetter public static native @Cast("const uint16_t") short full_backend_mask(); +@Namespace("c10") public static native @Cast("const char*") BytePointer toString(DispatchKey arg0); +@Namespace("c10") public static native String toString(@Cast("c10::DispatchKey") short arg0); +@Namespace("c10") public static native @Cast("const char*") BytePointer toString(BackendComponent arg0); +@Namespace("c10") public static native String toString(@Cast("c10::BackendComponent") byte arg0); +@Namespace("c10") public static native @Cast("std::ostream*") @ByRef @Name("operator <<") Pointer shiftLeft(@Cast("std::ostream*") @ByRef Pointer arg0, DispatchKey arg1); +@Namespace("c10") public static native @Cast("std::ostream*") @ByRef @Name("operator <<") Pointer shiftLeft(@Cast("std::ostream*") @ByRef Pointer arg0, @Cast("c10::DispatchKey") short arg1); +@Namespace("c10") public static native @Cast("std::ostream*") @ByRef @Name("operator <<") Pointer shiftLeft(@Cast("std::ostream*") @ByRef Pointer arg0, BackendComponent arg1); -// Parsed from c10/util/complex.h +@Namespace("c10") public static native DispatchKey getAutogradKeyFromBackend(BackendComponent k); +@Namespace("c10") public static native @Cast("c10::DispatchKey") short getAutogradKeyFromBackend(@Cast("c10::BackendComponent") byte k); -// #pragma once +// Parses a string into a dispatch key. +// If the string cannot be correctly parsed, throws an exception. +@Namespace("c10") public static native DispatchKey parseDispatchKey(@StdString BytePointer k); +@Namespace("c10") public static native @Cast("c10::DispatchKey") short parseDispatchKey(@StdString String k); -// #include +// These are some convenience identifiers for dispatch keys which are +// shorter to type than their long counterparts. Note that some of these +// dispatch keys directly correspond to DeviceType; and most APIs that +// accept DispatchKey also accept DeviceType; e.g., +// torch::dispatch(torch::kCPU, ...) is also valid. +@Namespace("c10") @MemberGetter public static native DispatchKey kAutograd(); -// #include +// See Note [The Ordering of Per-Backend Dispatch Keys Matters!] +// This function relies on the invariant that the dispatch keys between +// StartOfDenseBackends and EndOfRuntimeBackendKeys are ordered by backend +// in the same order as `BackendComponent`. -// #if defined(__CUDACC__) || defined(__HIPCC__) -// #endif -// #if C10_CLANG_HAS_WARNING("-Wimplicit-float-conversion") -// #endif -// #if C10_CLANG_HAS_WARNING("-Wfloat-conversion") -// #endif +@Namespace("c10") public static native DispatchKey toFunctionalityKey(DispatchKey k); +@Namespace("c10") public static native @Cast("c10::DispatchKey") short toFunctionalityKey(@Cast("c10::DispatchKey") short k); -// c10::complex is an implementation of complex numbers that aims -// to work on all devices supported by PyTorch -// -// Most of the APIs duplicates std::complex -// Reference: https://en.cppreference.com/w/cpp/numeric/complex -// -// [NOTE: Complex Operator Unification] -// Operators currently use a mix of std::complex, thrust::complex, and -// c10::complex internally. The end state is that all operators will use -// c10::complex internally. Until then, there may be some hacks to support all -// variants. -// -// -// [Note on Constructors] -// -// The APIs of constructors are mostly copied from C++ standard: -// https://en.cppreference.com/w/cpp/numeric/complex/complex -// -// Since C++14, all constructors are constexpr in std::complex -// -// There are three types of constructors: -// - initializing from real and imag: -// `constexpr complex( const T& re = T(), const T& im = T() );` -// - implicitly-declared copy constructor -// - converting constructors -// -// Converting constructors: -// - std::complex defines converting constructor between float/double/long -// double, -// while we define converting constructor between float/double. -// - For these converting constructors, upcasting is implicit, downcasting is -// explicit. -// - We also define explicit casting from std::complex/thrust::complex -// - Note that the conversion from thrust is not constexpr, because -// thrust does not define them as constexpr ???? -// -// -// [Operator =] -// -// The APIs of operator = are mostly copied from C++ standard: -// https://en.cppreference.com/w/cpp/numeric/complex/operator%3D -// -// Since C++20, all operator= are constexpr. Although we are not building with -// C++20, we also obey this behavior. -// -// There are three types of assign operator: -// - Assign a real value from the same scalar type -// - In std, this is templated as complex& operator=(const T& x) -// with specialization `complex& operator=(T x)` for float/double/long -// double Since we only support float and double, on will use `complex& -// operator=(T x)` -// - Copy assignment operator and converting assignment operator -// - There is no specialization of converting assignment operators, which type -// is -// convertible is solely dependent on whether the scalar type is convertible -// -// In addition to the standard assignment, we also provide assignment operators -// with std and thrust -// -// -// [Casting operators] -// -// std::complex does not have casting operators. We define casting operators -// casting to std::complex and thrust::complex -// -// -// [Operator ""] -// -// std::complex has custom literals `i`, `if` and `il` defined in namespace -// `std::literals::complex_literals`. We define our own custom literals in the -// namespace `c10::complex_literals`. Our custom literals does not follow the -// same behavior as in std::complex, instead, we define _if, _id to construct -// float/double complex literals. -// -// -// [real() and imag()] -// -// In C++20, there are two overload of these functions, one it to return the -// real/imag, another is to set real/imag, they are both constexpr. We follow -// this design. -// -// -// [Operator +=,-=,*=,/=] -// -// Since C++20, these operators become constexpr. In our implementation, they -// are also constexpr. -// -// There are two types of such operators: operating with a real number, or -// operating with another complex number. For the operating with a real number, -// the generic template form has argument type `const T &`, while the overload -// for float/double/long double has `T`. We will follow the same type as -// float/double/long double in std. -// -// [Unary operator +-] -// -// Since C++20, they are constexpr. We also make them expr -// -// [Binary operators +-*/] -// -// Each operator has three versions (taking + as example): -// - complex + complex -// - complex + real -// - real + complex -// -// [Operator ==, !=] -// -// Each operator has three versions (taking == as example): -// - complex == complex -// - complex == real -// - real == complex -// -// Some of them are removed on C++20, but we decide to keep them -// -// [Operator <<, >>] -// -// These are implemented by casting to std::complex -// -// -// -// TODO(@zasdfgbnm): c10::complex is not currently supported, -// because: -// - lots of members and functions of c10::Half are not constexpr -// - thrust::complex only support float and double +// Given (DispatchKey::Dense, BackendComponent::CUDABit), returns +// DispatchKey::CUDA. +// See Note [The Ordering of Per-Backend Dispatch Keys Matters!] +// This function relies on the invariant that the dispatch keys between +// StartOfDenseBackends and EndOfRuntimeBackendKeys are ordered by backend +// in the same order as `BackendComponent`. +@Namespace("c10") public static native DispatchKey toRuntimePerBackendFunctionalityKey( + DispatchKey functionality_k, + BackendComponent backend_k); +@Namespace("c10") public static native @Cast("c10::DispatchKey") short toRuntimePerBackendFunctionalityKey( + @Cast("c10::DispatchKey") short functionality_k, + @Cast("c10::BackendComponent") byte backend_k); + // namespace c10 +// Expose the constant, but not the TYPE (DispatchKey is an implementation +// detail!) + // namespace torch +// NB: You really shouldn't use this instance; this enum is guaranteed +// to be pretty small so a regular array should be acceptable. + // namespace std +// Parsed from c10/util/Array.h +/** + * This file is based on the std::array implementation of libstdc++ at + * https://gcc.gnu.org/onlinedocs/gcc-7.1.0/libstdc++/api/a01056_source.html + * + * Changes: + * - isolate, i.e. remove dependencies on internal libstdc++ stuff + * - use c++17 behavior even in c++11 or c++14 + * - remove std::swappable special case because that doesn't work with MSVC + * - constexpr more things + * - add some features like prepend/tail + * + * If using std::array at runtime, feel free to either keep using std::array or + * use this one - it doesn't really matter. For compile time computations, this + * one here is preferred because std::array in C++11 misses some constexpr + * specifiers, forcing these methods to be called at runtime instead of compile + * time. + */ +// Copyright (C) 2007-2017 Free Software Foundation, Inc. +// +// This file is part of the GNU ISO C++ Library. This library is free +// software; you can redistribute it and/or modify it under the +// terms of the GNU General Public License as published by the +// Free Software Foundation; either version 3, or (at your option) +// any later version. - // namespace complex_literals +// This library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. -// Define operators between integral scalars and c10::complex. std::complex does -// not support this when T is a floating-point number. This is useful because it -// saves a lot of "static_cast" when operate a complex and an integer. This -// makes the code both less verbose and potentially more efficient. -// #define COMPLEX_INTEGER_OP_TEMPLATE_CONDITION -// typename std::enable_if_t< -// std::is_floating_point::value && std::is_integral::value, -// int> = 0 +// Under Section 7 of GPL version 3, you are granted additional +// permissions described in the GCC Runtime Library Exception, version +// 3.1, as published by the Free Software Foundation. -// #undef COMPLEX_INTEGER_OP_TEMPLATE_CONDITION +// You should have received a copy of the GNU General Public License and +// a copy of the GCC Runtime Library Exception along with this program; +// see the files COPYING3 and COPYING.RUNTIME respectively. If not, see +// . +// #pragma once +// #include +// #include +// #include +// #include +// #include +@Namespace("c10::guts::detail") public static native void __throw_out_of_range(@StdString BytePointer msg); +@Namespace("c10::guts::detail") public static native void __throw_out_of_range(@StdString String msg); + // namespace detail +// #if defined(__cpp_deduction_guides) && __cpp_deduction_guides >= 201606 +// #endif +// Array comparisons. + // namespace detail +// Specialized algorithms. +/** + * Some added features not available in std::array. + * Only call these at compile time, they're slow if called at runtime. + * Examples: + * tail({2, 3, 4}) == {3, 4} + * prepend(2, {3, 4}) == {2, 3, 4} + */ + // namespace detail + // namespace detail +/** + * Convert a C array into a std::array. + * Example: + * int source[3] = {2, 3, 4}; + * std::array target = to_std_array(source); + */ + // namespace detail + // namespace guts + // namespace c10 +// Parsed from c10/util/TypeTraits.h +// #pragma once +// #include +// #include - // namespace c10 +/** + * is_equality_comparable is true_type iff the equality operator is defined + * for T. + */ -// std functions -// -// The implementation of these functions also follow the design of C++20 +/** + * is_hashable is true_type iff std::hash is defined for T + */ -// #if defined(USE_ROCM) -// #else -// #define ROCm_Bug(x) x -// #endif +/** + * is_function_type is true_type iff T is a plain function type (i.e. + * "Result(Args...)") + */ -// #undef ROCm_Bug +/** + * is_instantiation_of is true_type iff I is a template instantiation of T + * (e.g. vector is an instantiation of vector) Example: + * is_instantiation_of_t> // true + * is_instantiation_of_t> // true + * is_instantiation_of_t> // false + */ +/** + * strip_class: helper to remove the class type from pointers to {@code operator()}. + */ + // namespace detail -// For std::conj, there are other versions of it: -// constexpr std::complex conj( float z ); -// template< class DoubleOrInteger > -// constexpr std::complex conj( DoubleOrInteger z ); -// constexpr std::complex conj( long double z ); -// These are not implemented -// TODO(@zasdfgbnm): implement them as c10::conj +/** + * Evaluates to true_type, iff the given class is a Functor + * (i.e. has a call operator with some set of arguments) + */ -// Thrust does not have complex --> complex version of thrust::proj, -// so this function is not implemented at c10 right now. -// TODO(@zasdfgbnm): implement it by ourselves +/** + * lambda_is_stateless is true iff the lambda type T is stateless + * (i.e. does not have a closure). + * Example: + * auto stateless_lambda = [] (int a) {return a;}; + * lambda_is_stateless // true + * auto stateful_lambda = [&] (int a) {return a;}; + * lambda_is_stateless // false + */ +// implementation idea: According to the C++ standard, stateless lambdas are +// convertible to function pointers -// There is no c10 version of std::polar, because std::polar always -// returns std::complex. Use c10::polar instead; +// case where LambdaType is not even a functor +// case where LambdaType is a functor + // namespace detail - // namespace std +/** + * is_type_condition is true_type iff C<...> is a type trait representing a + * condition (i.e. has a constexpr static bool ::value member) Example: + * is_type_condition // true + */ +/** + * is_fundamental is true_type iff the lambda type T is a fundamental type + * (that is, arithmetic type, void, or nullptr_t). Example: is_fundamental + * // true We define it here to resolve a MSVC bug. See + * https://github.com/pytorch/pytorch/issues/30932 for details. + */ + // namespace guts // namespace c10 -// #define C10_INTERNAL_INCLUDE_COMPLEX_REMAINING_H -// math functions are included in a separate file -// #include // IWYU pragma: keep -// utilities for complex types -// #include // IWYU pragma: keep -// #undef C10_INTERNAL_INCLUDE_COMPLEX_REMAINING_H - -// Parsed from c10/util/Half.h +// Parsed from c10/util/TypeList.h // #pragma once -/** Defines the Half type (half-precision floating-point) including conversions - * to standard C types and basic arithmetic operations. Note that arithmetic - * operations are implemented by converting to floating point and - * performing the operation in float32, instead of using CUDA half intrinsics. - * Most uses of this type within ATen are memory bound, including the - * element-wise kernels, and the half intrinsics aren't efficient on all GPUs. - * If you are writing a compute bound kernel, you can use the CUDA half - * intrinsics directly on the Half type from device code. */ - -// #include // #include -// #include -// #include -// #include - -// #if defined(__cplusplus) && (__cplusplus >= 201103L) -// #include -// #include -// #elif !defined(__OPENCL_VERSION__) -// #include -// #include -// #endif +// #include -// #ifdef _MSC_VER -// #include -// #endif +/** + * Type holding a list of types for compile time type computations + */ -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include +/** + * Returns the number of types in a typelist + * Example: + * 3 == size>::value + */ -// #ifdef __CUDACC__ -// #include -// #endif +/** + * Transforms a list of types into a tuple holding these types. + * Example: + * std::tuple == to_tuple_t> + */ -// #ifdef __HIPCC__ -// #include -// #endif +/** + * Creates a typelist containing the types of a given tuple. + * Example: + * typelist == from_tuple_t> + */ -// #if defined(CL_SYCL_LANGUAGE_VERSION) -// #include // for SYCL 1.2.1 -// #elif defined(SYCL_LANGUAGE_VERSION) -// #include // for SYCL 2020 -// #endif +/** + * Concatenates multiple type lists. + * Example: + * typelist == concat_t, + * typelist> + */ -// Standard check for compiling CUDA with clang -// #if defined(__clang__) && defined(__CUDA__) && defined(__CUDA_ARCH__) -// #define C10_DEVICE_HOST_FUNCTION __device__ __host__ -// #else -// #define C10_DEVICE_HOST_FUNCTION -// #endif +/** + * Filters the types in a type list by a type trait. + * Examples: + * typelist == filter_t> + */ -// #include // operator typeid +/** + * Counts how many types in the list fulfill a type trait + * Examples: + * 2 == count_if> + */ -@Namespace("c10::detail") public static native float fp32_from_bits(@Cast("uint32_t") int w); +/** + * Checks if a typelist contains a certain type. + * Examples: + * contains, string> == true_type + * contains, double> == false_type + */ + // namespace detail -@Namespace("c10::detail") public static native @Cast("uint32_t") int fp32_to_bits(float f); +/** + * Returns true iff the type trait is true for all types in the type list + * Examples: + * true == all>::value false == all>::value + */ -/* - * Convert a 16-bit floating-point number in IEEE half-precision format, in bit - * representation, to a 32-bit floating-point number in IEEE single-precision - * format, in bit representation. - * - * @note The implementation doesn't use any floating-point operations. +/** + * Returns true iff the type trait is true for any type in the type list + * Examples: + * true == true_for_any_type>::value false == + * true_for_any_type>::value */ -@Namespace("c10::detail") public static native @Cast("uint32_t") int fp16_ieee_to_fp32_bits(@Cast("uint16_t") short h); -/* - * Convert a 16-bit floating-point number in IEEE half-precision format, in bit - * representation, to a 32-bit floating-point number in IEEE single-precision - * format. - * - * @note The implementation relies on IEEE-like (no assumption about rounding - * mode and no operations on denormals) floating-point operations and bitcasts - * between integer and floating-point variables. +/** + * Maps types of a type list using a type trait + * Example: + * typelist == map_t> */ -@Namespace("c10::detail") public static native float fp16_ieee_to_fp32_value(@Cast("uint16_t") short h); -/* - * Convert a 32-bit floating-point number in IEEE single-precision format to a - * 16-bit floating-point number in IEEE half-precision format, in bit - * representation. - * - * @note The implementation relies on IEEE-like (no assumption about rounding - * mode and no operations on denormals) floating-point operations and bitcasts - * between integer and floating-point variables. +/** + * Returns the first element of a type list. + * Example: + * int == head_t> */ -@Namespace("c10::detail") public static native @Cast("uint16_t") short fp16_ieee_from_fp32_value(float f); +/** + * Returns the first element of a type list, or the specified default if the + * type list is empty. Example: int == head_t> + * bool == head_t> + */ -// Targeting ../Half.java +/** + * Returns the N-th element of a type list. + * Example: + * int == element_t<1, typelist> + */ +/** Base template. */ +/** Successful case, we have reached the zero index and can "return" the head + * type. */ -// TODO : move to complex.h +/** Error case, we have an index but ran out of types! It will only be selected + * if {@code Ts...} is actually empty! */ -// In some versions of MSVC, there will be a compiler error when building. -// C4146: unary minus operator applied to unsigned type, result still unsigned -// C4804: unsafe use of type 'bool' in operation -// It can be addressed by disabling the following warning. -// #ifdef _MSC_VER -// #pragma warning(push) -// #pragma warning(disable : 4146) -// #pragma warning(disable : 4804) -// #pragma warning(disable : 4018) -// #endif +/** Shave off types until we hit the <0, Head, Tail...> or case. */ -// The overflow checks may involve float to int conversion which may -// trigger precision loss warning. Re-enable the warning once the code -// is fixed. See T58053069. -// #ifdef __clang__ -// #pragma GCC diagnostic push -// #pragma GCC diagnostic ignored "-Wunknown-warning-option" -// #pragma GCC diagnostic ignored "-Wimplicit-int-float-conversion" -// #endif +/** Convenience alias. */ -// bool can be converted to any type. -// Without specializing on bool, in pytorch_linux_trusty_py2_7_9_build: -// `error: comparison of constant '255' with boolean expression is always false` -// for `f > limit::max()` below +/** + * Returns the last element of a type list. + * Example: + * int == last_t> + */ -// skip isnan and isinf check for integral types +/** + * Take/drop a number of arguments from a typelist. + * Example: + * typelist == take_t, 2> + * typelist == drop_t, 2> + */ + // namespace detail -// #ifdef __clang__ -// #pragma GCC diagnostic pop -// #endif +/** + * Like drop, but returns an empty list rather than an assertion error if {@code num} + * is larger than the size of the TypeList. + * Example: + * typelist<> == drop_if_nonempty_t, 2> + * typelist<> == drop_if_nonempty_t, 3> + */ -// #ifdef _MSC_VER -// #pragma warning(pop) -// #endif +/** + * Reverses a typelist. + * Example: + * typelist == reverse_t> + */ +/** + * Find the index of the first type in a typelist fulfilling a type trait + * condition. Example: + * + * 2 == find_if, std::is_reference>::value + */ +/** + * Maps a list of types into a list of values. + * Examples: + * // Example 1 + * auto sizes = + * map_types_to_values>( + * [] (auto t) { return sizeof(decltype(t)::type); } + * ); + * // sizes == std::tuple{8, 1, 4} + * + * // Example 2 + * auto shared_ptrs = + * map_types_to_values>( + * [] (auto t) { return make_shared(); } + * ); + * // shared_ptrs == std::tuple, shared_ptr>() + */ + // namespace detail + // namespace typelist + // namespace guts // namespace c10 -// #include // IWYU pragma: keep - -// Parsed from c10/util/qint32.h +// Parsed from c10/core/DispatchKeySet.h // #pragma once -// #include - -// #include -// Targeting ../qint32.java +// #include +// #include +// #include +// #include +// #include +// Targeting ../FunctionalityOffsetAndMask.java - // namespace c10 +@Namespace("c10") public static native @ByVal @Cast("std::array*") FunctionalityOffsetAndMask initializeFunctionalityOffsetsAndMasks(); +@Namespace("c10") public static native @Cast("const std::array*") @ByRef FunctionalityOffsetAndMask offsetsAndMasks(); +// Targeting ../DispatchKeySet.java -// Parsed from c10/util/qint8.h -// #pragma once -// #include -// #include -// Targeting ../qint8.java +@Namespace("c10") public static native @StdString BytePointer toString(@ByVal DispatchKeySet arg0); +@Namespace("c10") public static native @Cast("std::ostream*") @ByRef @Name("operator <<") Pointer shiftLeft(@Cast("std::ostream*") @ByRef Pointer arg0, @ByVal DispatchKeySet arg1); +@Namespace("c10") public static native int getDispatchTableIndexForDispatchKey(DispatchKey k); +@Namespace("c10") public static native int getDispatchTableIndexForDispatchKey(@Cast("c10::DispatchKey") short k); +// Alias key DispatchKey::Autograd maps to +// (autograd_dispatch_keyset x full_backend_mask) +// NB: keys in this set also get associated with CompositeImplicitAutograd +// +// Note [autograd_dispatch_keyset Does Not Include Backend Bits] +// We don't want to include any backend bits (BackendComponent::CPUBit, etc) +// directly in autograd_dispatch_keyset. +// Why? keysets like autograd_dispatch_keyset are commonly used to remove +// autograd keys from a DispatchKeySet throughout the code base. However, you +// are only allowed to remove functionality bits from a keyset, not backend +// bits. See Note [Removing keys from DispatchKeySet Only Affects Functionality +// Keys] for details. To be consistent and avoid confusion, we're explicitly +// setting up autograd_dispatch_keyset to not have any backend bits. +@Namespace("c10") @MemberGetter public static native @Const @ByRef DispatchKeySet autograd_dispatch_keyset(); - // namespace c10 +@Namespace("c10") @MemberGetter public static native @Const @ByRef DispatchKeySet autocast_dispatch_keyset(); +// See Note [TLS Initialization] +@Namespace("c10") @MemberGetter public static native @Const @ByRef DispatchKeySet default_included_set(); -// Parsed from c10/util/quint8.h +@Namespace("c10") @MemberGetter public static native @Const @ByRef DispatchKeySet default_excluded_set(); -// #pragma once -// #include +@Namespace("c10") @MemberGetter public static native @Const @ByRef DispatchKeySet autograd_dispatch_keyset_with_ADInplaceOrView(); -// #include -// Targeting ../quint8.java +@Namespace("c10") @MemberGetter public static native @Const @ByRef DispatchKeySet python_ks(); +@Namespace("c10") @MemberGetter public static native @Const @ByRef DispatchKeySet sparse_ks(); +@Namespace("c10") @MemberGetter public static native @Const @ByRef DispatchKeySet sparse_csr_ks(); - // namespace c10 +@Namespace("c10") @MemberGetter public static native @Const @ByRef DispatchKeySet mkldnn_ks(); +// backend dispatch keys that map to DispatchKey::AutogradOther +// NB: keys in this set also get associated with CompositeImplicitAutograd +@Namespace("c10") @MemberGetter public static native @Const @ByRef DispatchKeySet autogradother_backends(); -// Parsed from c10/util/BFloat16.h +// The set of dispatch keys that come after autograd +// n.b. this relies on the fact that AutogradOther is currently the lowest +// Autograd key +@Namespace("c10") @MemberGetter public static native @Const @ByRef DispatchKeySet after_autograd_keyset(); -// #pragma once +// The set of dispatch keys that come after ADInplaceOrView +@Namespace("c10") @MemberGetter public static native @Const @ByRef DispatchKeySet after_ADInplaceOrView_keyset(); -// Defines the bloat16 type (brain floating-point). This representation uses -// 1 bit for the sign, 8 bits for the exponent and 7 bits for the mantissa. +// The set of dispatch keys that come after Functionalize +@Namespace("c10") @MemberGetter public static native @Const @ByRef DispatchKeySet after_func_keyset(); -// #include -// #include -// #include +@Namespace("c10") @MemberGetter public static native @Const @ByRef DispatchKeySet backend_bitset_mask(); +// keyset correpsonding to functorch keys that have their own dedicated +// TensorImpl subclass. -// #if defined(__CUDACC__) && !defined(USE_ROCM) -// #endif +// This keyset has: +// (1) the functionality bits corresponding to backends (dense, sparse, +// quantized) (2) all of the backend bits set +@Namespace("c10") @MemberGetter public static native @Const @ByRef DispatchKeySet backend_functionality_keys(); -// #if defined(SYCL_EXT_ONEAPI_BFLOAT16_MATH_FUNCTIONS) -// #endif -@Namespace("c10::detail") public static native float f32_from_bits(@Cast("uint16_t") short src); +// true if t is a backend dispatch key +@Namespace("c10") public static native @Cast("bool") boolean isBackendDispatchKey(DispatchKey t); +@Namespace("c10") public static native @Cast("bool") boolean isBackendDispatchKey(@Cast("c10::DispatchKey") short t); -@Namespace("c10::detail") public static native @Cast("uint16_t") short bits_from_f32(float src); +// Resolve alias dispatch key to DispatchKeySet if applicable +@Namespace("c10") public static native @ByVal DispatchKeySet getRuntimeDispatchKeySet(DispatchKey t); +@Namespace("c10") public static native @ByVal DispatchKeySet getRuntimeDispatchKeySet(@Cast("c10::DispatchKey") short t); -@Namespace("c10::detail") public static native @Cast("uint16_t") short round_to_nearest_even(float src); +// Resolve alias dispatch key to DispatchKeySet if applicable, +// and chek if k is a part of that set +@Namespace("c10") public static native @Cast("bool") boolean runtimeDispatchKeySetHas(DispatchKey t, DispatchKey k); +@Namespace("c10") public static native @Cast("bool") boolean runtimeDispatchKeySetHas(@Cast("c10::DispatchKey") short t, @Cast("c10::DispatchKey") short k); -// Targeting ../BFloat16.java +// Returns a DispatchKeySet of all backend keys mapped to Autograd dispatch key +// t, DispatchKeySet is empty if t is not alias of DispatchKey::Autograd. +@Namespace("c10") public static native @ByVal DispatchKeySet getBackendKeySetFromAutograd(DispatchKey t); +@Namespace("c10") public static native @ByVal DispatchKeySet getBackendKeySetFromAutograd(@Cast("c10::DispatchKey") short t); + +// Returns a DispatchKeySet of autograd related keys mapped to backend. +// for a given backend key, use the associated autograd key. +// for non-backend keys, use AutogradOther as a default. +// Note: it's convenient and fast to return a default here rather than (say) +// returning an optional, or throwing. But it makes callers +// responsible for either a) enforcing the invariant that only backend keys +// be passed as arguments, or b) interpreting our return value carefully. +@Namespace("c10") public static native @ByVal DispatchKeySet getAutogradRelatedKeySetFromBackend(BackendComponent t); +@Namespace("c10") public static native @ByVal DispatchKeySet getAutogradRelatedKeySetFromBackend(@Cast("c10::BackendComponent") byte t); +// Returns a DispatchKeySet of autocast related keys mapped to backend. +@Namespace("c10") public static native @ByVal DispatchKeySet getAutocastRelatedKeySetFromBackend(BackendComponent t); +@Namespace("c10") public static native @ByVal DispatchKeySet getAutocastRelatedKeySetFromBackend(@Cast("c10::BackendComponent") byte t); +// returns the "backend" DispatchKey of highest priority in the set. +// This is basically like highestBackendKey(), except that we have some +// "functionality" bits that correspond to backends (Sparse, Quantized) +@Namespace("c10") public static native DispatchKey highestPriorityBackendTypeId(@ByVal DispatchKeySet ks); - // namespace c10 +// This API exists because we have a use case for checking +// getRuntimeDispatchKeySet(alias).has(DispatchKey::Undefined) +// in OperatorEntry.cpp but we disallow it in has() API. +@Namespace("c10") public static native @Cast("bool") boolean isIncludedInAlias(DispatchKey k, DispatchKey alias); +@Namespace("c10") public static native @Cast("bool") boolean isIncludedInAlias(@Cast("c10::DispatchKey") short k, @Cast("c10::DispatchKey") short alias); -// #include // IWYU pragma: keep +// Historically, every tensor only had a single DispatchKey, and it was always +// something like CPU, and there wasn't any of this business where TLS +// could cause the DispatchKey of a tensor to change. But we still have some +// legacy code that is still using DispatchKey for things like instanceof +// checks; if at all possible, refactor the code to stop using DispatchKey in +// those cases. +@Namespace("c10") public static native DispatchKey legacyExtractDispatchKey(@ByVal DispatchKeySet s); + +// Given a function type, constructs a function_traits type that drops the first +// parameter type if the first parameter is of type DispatchKeySet. NB: +// DispatchKeySet is currently explicitly hidden from JIT (mainly to avoid +// pushing unnecessary arguments on the stack - see Note [ Plumbing Keys Through +// the Dispatcher] for details). If at any point in the future we need to expose +// this type to JIT, revisit the usage of this type alias. + // namespace c10 -// Parsed from c10/util/quint2x4.h +// Parsed from c10/core/Backend.h // #pragma once -// #include -// #include -// Targeting ../quint2x4.java +// #include +// #include +// #include +// #include +// #include +/** + * This legacy enum class defines the set of backends supported by old school, + * code generated Type-based ATen. A "backend" in this sense roughly + * corresponds to the cartesian product of (device type, layout), but restricted + * only to combinations which we actually have kernels for. Backend does NOT + * include dtype. + * + * The reason we are sunsetting this enum class is because it doesn't allow for + * open registration; e.g., if you want to add SparseXLA, you'd have to + * edit this enum; you wouldn't be able to do it out of tree. DispatchKey is + * the replacement for Backend which supports open registration. + * + * NB: The concept of 'Backend' here disagrees with the notion of backend + * exposed to users in torch.backends. Backend here is something like "CPU" + * or "SparseCUDA"; backend in torch.backends is something like "MKL" or + * "CUDNN". + */ +@Namespace("c10") public enum Backend { + CPU(0), + CUDA(1), + HIP(2), + VE(3), + FPGA(4), + IPU(5), + XPU(6), + SparseCPU(7), + SparseCUDA(8), + SparseCsrCPU(9), + SparseCsrCUDA(10), + SparseHIP(11), + SparseVE(12), + SparseXPU(13), + ORT(14), + XLA(15), + Vulkan(16), + Metal(17), + Meta(18), + QuantizedCPU(19), + QuantizedCUDA(20), + QuantizedXPU(21), + Undefined(22), + MkldnnCPU(23), + MPS(24), + HPU(25), + Lazy(26), + MTIA(27), + PrivateUse1(28), + NumOptions(29); - // namespace c10 + public final int value; + private Backend(int v) { this.value = v; } + private Backend(Backend e) { this.value = e.value; } + public Backend intern() { for (Backend e : values()) if (e.value == value) return e; return this; } + @Override public String toString() { return intern().name(); } +} +@Namespace("c10") public static native Backend dispatchKeyToBackend(DispatchKey t); +@Namespace("c10") public static native @Cast("c10::Backend") int dispatchKeyToBackend(@Cast("c10::DispatchKey") short t); -// Parsed from c10/util/quint4x2.h +@Namespace("c10") public static native DispatchKey backendToDispatchKey(Backend b); +@Namespace("c10") public static native @Cast("c10::DispatchKey") short backendToDispatchKey(@Cast("c10::Backend") int b); -// #pragma once -// #include +@Namespace("c10") public static native DeviceType backendToDeviceType(Backend b); +@Namespace("c10") public static native @Cast("c10::DeviceType") byte backendToDeviceType(@Cast("c10::Backend") int b); -// #include -// Targeting ../quint4x2.java +// TODO: This probably shouldn't actually be static inline +@Namespace("c10") public static native @Cast("const char*") BytePointer toString(Backend b); +@Namespace("c10") public static native String toString(@Cast("c10::Backend") int b); +@Namespace("c10") public static native @Cast("bool") boolean isSparse(Backend b); +@Namespace("c10") public static native @Cast("bool") boolean isSparse(@Cast("c10::Backend") int b); +@Namespace("c10") public static native @Cast("bool") boolean isSparseCsr(Backend b); +@Namespace("c10") public static native @Cast("bool") boolean isSparseCsr(@Cast("c10::Backend") int b); // namespace c10 -// Parsed from c10/util/ThreadLocalDebugInfo.h +// Parsed from c10/core/Layout.h // #pragma once -// #include - -// #include -// #include - -@Namespace("c10") public enum DebugInfoKind { - PRODUCER_INFO((byte)(0)), - MOBILE_RUNTIME_INFO((byte)(1)), - PROFILER_STATE((byte)(2)), - INFERENCE_CONTEXT((byte)(3)), // for inference usage - PARAM_COMMS_INFO((byte)(4)), +// #include +// #include - TEST_INFO((byte)(5)), // used only in tests - TEST_INFO_2((byte)(6));// used only in tests +// #include +@Namespace("c10") public enum Layout { + Strided((byte)(0)), + Sparse((byte)(1)), + SparseCsr((byte)(2)), + Mkldnn((byte)(3)), + SparseCsc((byte)(4)), + SparseBsr((byte)(5)), + SparseBsc((byte)(6)), + NumOptions((byte)(7)); public final byte value; - private DebugInfoKind(byte v) { this.value = v; } - private DebugInfoKind(DebugInfoKind e) { this.value = e.value; } - public DebugInfoKind intern() { for (DebugInfoKind e : values()) if (e.value == value) return e; return this; } + private Layout(byte v) { this.value = v; } + private Layout(Layout e) { this.value = e.value; } + public Layout intern() { for (Layout e : values()) if (e.value == value) return e; return this; } @Override public String toString() { return intern().name(); } } -// Targeting ../DebugInfoBase.java +@Namespace("c10") public static native Layout layout_from_backend(Backend backend); +@Namespace("c10") public static native @Cast("c10::Layout") byte layout_from_backend(@Cast("c10::Backend") int backend); -// Targeting ../ThreadLocalDebugInfo.java +@Namespace("c10") public static native @Cast("std::ostream*") @ByRef @Name("operator <<") Pointer shiftLeft(@Cast("std::ostream*") @ByRef Pointer stream, @ByVal Layout layout); + // namespace c10 -// Targeting ../DebugInfoGuard.java +// Parsed from c10/util/AlignOf.h +//===--- AlignOf.h - Portable calculation of type alignment -----*- C++ -*-===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file defines the AlignedCharArray and AlignedCharArrayUnion classes. +// +//===----------------------------------------------------------------------===// - // namespace c10 +// ATen: modified from llvm::AlignOf +// replaced LLVM_ALIGNAS with alignas +// #pragma once -// Parsed from c10/util/Type.h +// #include -// #ifndef C10_UTIL_TYPE_H_ -// #define C10_UTIL_TYPE_H_ +/** \struct AlignedCharArray + * \brief Helper for building an aligned character array type. + * + * This template is used to explicitly build up a collection of aligned + * character array types. We have to build these up using a macro and explicit + * specialization to cope with MSVC (at least till 2015) where only an + * integer literal can be used to specify an alignment constraint. Once built + * up here, we can then begin to indirect between these using normal C++ + * template parameters. */ -// #include -// #include -// #include +// MSVC requires special handling here. +// #ifndef _MSC_VER -// #include +// #else // _MSC_VER -/** Utility to demangle a C++ symbol name. */ -@Namespace("c10") public static native @StdString BytePointer demangle(@Cast("const char*") BytePointer name); -@Namespace("c10") public static native @StdString String demangle(String name); +/** \brief Create a type with an aligned char buffer. */ -/** Returns the printable name of the type. */ +// We provide special variations of this template for the most common +// alignments because __declspec(align(...)) doesn't actually work when it is +// a member of a by-value function argument in MSVC, even if the alignment +// request is something reasonably like 8-byte or 16-byte. Note that we can't +// even include the declspec with the union that forces the alignment because +// MSVC warns on the existence of the declspec despite the union member forcing +// proper alignment. - // namespace c10 +// The rest of these are provided with a __declspec(align(...)) and we simply +// can't pass them by-value as function arguments on MSVC. -// #endif // C10_UTIL_TYPE_H_ +// #define AT_ALIGNEDCHARARRAY_TEMPLATE_ALIGNMENT(x) +// template +// struct AlignedCharArray { +// __declspec(align(x)) char buffer[Size]; +// }; + +// #undef AT_ALIGNEDCHARARRAY_TEMPLATE_ALIGNMENT +// #endif // _MSC_VER + // end namespace detail -// Parsed from c10/util/TypeCast.h +/** \brief This union template exposes a suitably aligned and sized character + * array member which can hold elements of any of up to ten types. + * + * These types may be arrays, structs, or any other types. The goal is to + * expose a char array buffer member which can be used as suitable storage for + * a placement new of any of these types. Support for more than ten types can + * be added at the cost of more boilerplate. */ + // end namespace c10 + + +// Parsed from c10/util/SmallVector.h + +//===- llvm/ADT/SmallVector.h - 'Normally small' vectors --------*- C++ -*-===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// +// +// This file defines the SmallVector class. +// +//===----------------------------------------------------------------------===// + +// ATen: modified from llvm::SmallVector. +// used std::is_trivially_{copy,move}_constructible +// replaced iterator_range constructor with inline Container&& constructor +// replaced LLVM_NODISCARD, LLVM_LIKELY, and LLVM_UNLIKELY with c10 equivalents +// removed LLVM_GSL_OWNER +// added SmallVector::at +// added operator<< for std::ostream +// added C10_API to export SmallVectorBase // #pragma once + // #include -// #include -// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include // #include +// #include -// #if C10_CLANG_HAS_WARNING("-Wimplicit-float-conversion") -// #endif -// #if C10_CLANG_HAS_WARNING("-Wimplicit-int-float-conversion") +// #if C10_CLANG_HAS_WARNING("-Wshorten-64-to-32") // #endif +// Targeting ../IntSizedSmallVectorBase.java -// Note: deliberately ignores undefined behavior, consistent with NumPy. -// PyTorch's type conversions can cause a variety of undefined behavior, -// including float to integral overflow and signed to unsigned integer overflow. -// Some of this undefined behavior is addressed below. -// Targeting ../static_cast_with_inter_type.java +/** Figure out the offset of the first element. */ +// Targeting ../SymIntSmallVectorCommon.java -// Define separately to avoid being inlined and prevent code-size bloat -@Namespace("c10") public static native void report_overflow(@Cast("const char*") BytePointer name); -@Namespace("c10") public static native void report_overflow(String name); - // namespace c10 +// Targeting ../LongSmallVectorCommon.java -// Trigger tests for D25440771. TODO: Remove this line any time you want. +// Targeting ../NodeSmallVectorCommon.java -// Parsed from c10/util/Registry.h -// #ifndef C10_UTIL_REGISTRY_H_ -// #define C10_UTIL_REGISTRY_H_ +// Targeting ../TreeRefSmallVectorCommon.java -/** - * Simple registry implementation that uses static variables to - * register object creators during program initialization time. - */ -// NB: This Registry works poorly when you have other namespaces. -// Make all macro invocations from inside the at namespace. +// Targeting ../SymIntSmallVectorBase.java -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include +// Targeting ../LongSmallVectorBase.java -@Namespace("c10") public static native @StdString BytePointer KeyStrRepr(@StdString BytePointer key); -@Namespace("c10") public static native @StdString String KeyStrRepr(@StdString String key); -@Namespace("c10") public enum RegistryPriority { - REGISTRY_FALLBACK(1), - REGISTRY_DEFAULT(2), - REGISTRY_PREFERRED(3); +// Targeting ../NodeSmallVectorBase.java - public final int value; - private RegistryPriority(int v) { this.value = v; } - private RegistryPriority(RegistryPriority e) { this.value = e.value; } - public RegistryPriority intern() { for (RegistryPriority e : values()) if (e.value == value) return e; return this; } - @Override public String toString() { return intern().name(); } -} -/** - * \brief A template class that allows one to register classes by keys. - * - * The keys are usually a std::string specifying the name, but can be anything - * that can be used in a std::map. - * - * You should most likely not use the Registry class explicitly, but use the - * helper macros below to declare specific registries as well as registering - * objects. - */ +// Targeting ../TreeRefSmallVectorBase.java -/** - * C10_DECLARE_TYPED_REGISTRY is a macro that expands to a function - * declaration, as well as creating a convenient typename for its corresponding - * registerer. - */ -// Note on C10_IMPORT and C10_EXPORT below: we need to explicitly mark DECLARE -// as import and DEFINE as export, because these registry macros will be used -// in downstream shared libraries as well, and one cannot use *_API - the API -// macro will be defined on a per-shared-library basis. Semantically, when one -// declares a typed registry it is always going to be IMPORT, and when one -// defines a registry (which should happen ONLY ONCE and ONLY IN SOURCE FILE), -// the instantiation unit is always going to be exported. -// -// The only unique condition is when in the same file one does DECLARE and -// DEFINE - in Windows compilers, this generates a warning that dllimport and -// dllexport are mixed, but the warning is fine and linker will be properly -// exporting the symbol. Same thing happens in the gflags flag declaration and -// definition caes. -// #define C10_DECLARE_TYPED_REGISTRY( -// RegistryName, SrcType, ObjectType, PtrType, ...) -// C10_IMPORT ::c10::Registry, ##__VA_ARGS__>* -// RegistryName(); -// typedef ::c10::Registerer, ##__VA_ARGS__> -// Registerer##RegistryName -// #define C10_DEFINE_TYPED_REGISTRY( -// RegistryName, SrcType, ObjectType, PtrType, ...) -// C10_EXPORT ::c10::Registry, ##__VA_ARGS__>* -// RegistryName() { -// static ::c10::Registry, ##__VA_ARGS__>* -// registry = new ::c10:: -// Registry, ##__VA_ARGS__>(); -// return registry; -// } -// #define C10_DEFINE_TYPED_REGISTRY_WITHOUT_WARNING( -// RegistryName, SrcType, ObjectType, PtrType, ...) -// C10_EXPORT ::c10::Registry, ##__VA_ARGS__>* -// RegistryName() { -// static ::c10::Registry, ##__VA_ARGS__>* -// registry = -// new ::c10::Registry, ##__VA_ARGS__>( -// false); -// return registry; -// } +// Define this out-of-line to dissuade the C++ compiler from inlining it. -// Note(Yangqing): The __VA_ARGS__ below allows one to specify a templated -// creator with comma in its templated arguments. -// #define C10_REGISTER_TYPED_CREATOR(RegistryName, key, ...) -// static Registerer##RegistryName C10_ANONYMOUS_VARIABLE(g_##RegistryName)( -// key, RegistryName(), ##__VA_ARGS__); -// #define C10_REGISTER_TYPED_CREATOR_WITH_PRIORITY( -// RegistryName, key, priority, ...) -// static Registerer##RegistryName C10_ANONYMOUS_VARIABLE(g_##RegistryName)( -// key, priority, RegistryName(), ##__VA_ARGS__); +// Define this out-of-line to dissuade the C++ compiler from inlining it. -// #define C10_REGISTER_TYPED_CLASS(RegistryName, key, ...) -// static Registerer##RegistryName C10_ANONYMOUS_VARIABLE(g_##RegistryName)( -// key, -// RegistryName(), -// Registerer##RegistryName::DefaultCreator<__VA_ARGS__>, -// ::c10::demangle_type<__VA_ARGS__>()); -// #define C10_REGISTER_TYPED_CLASS_WITH_PRIORITY( -// RegistryName, key, priority, ...) -// static Registerer##RegistryName C10_ANONYMOUS_VARIABLE(g_##RegistryName)( -// key, -// priority, -// RegistryName(), -// Registerer##RegistryName::DefaultCreator<__VA_ARGS__>, -// ::c10::demangle_type<__VA_ARGS__>()); +// Define this out-of-line to dissuade the C++ compiler from inlining it. -// C10_DECLARE_REGISTRY and C10_DEFINE_REGISTRY are hard-wired to use -// std::string as the key type, because that is the most commonly used cases. -// #define C10_DECLARE_REGISTRY(RegistryName, ObjectType, ...) -// C10_DECLARE_TYPED_REGISTRY( -// RegistryName, std::string, ObjectType, std::unique_ptr, ##__VA_ARGS__) -// #define C10_DEFINE_REGISTRY(RegistryName, ObjectType, ...) -// C10_DEFINE_TYPED_REGISTRY( -// RegistryName, std::string, ObjectType, std::unique_ptr, ##__VA_ARGS__) +/** SmallVectorTemplateBase - This is where we put + * method implementations that are designed to work with trivially copyable + * T's. This allows using memcpy in place of copy/move construction and + * skipping destruction. */ +// Targeting ../SymIntSmallVectorImpl.java -// #define C10_DEFINE_REGISTRY_WITHOUT_WARNING(RegistryName, ObjectType, ...) -// C10_DEFINE_TYPED_REGISTRY_WITHOUT_WARNING( -// RegistryName, std::string, ObjectType, std::unique_ptr, ##__VA_ARGS__) -// #define C10_DECLARE_SHARED_REGISTRY(RegistryName, ObjectType, ...) -// C10_DECLARE_TYPED_REGISTRY( -// RegistryName, std::string, ObjectType, std::shared_ptr, ##__VA_ARGS__) +// Targeting ../LongSmallVectorImpl.java -// #define C10_DEFINE_SHARED_REGISTRY(RegistryName, ObjectType, ...) -// C10_DEFINE_TYPED_REGISTRY( -// RegistryName, std::string, ObjectType, std::shared_ptr, ##__VA_ARGS__) -// #define C10_DEFINE_SHARED_REGISTRY_WITHOUT_WARNING( -// RegistryName, ObjectType, ...) -// C10_DEFINE_TYPED_REGISTRY_WITHOUT_WARNING( -// RegistryName, std::string, ObjectType, std::shared_ptr, ##__VA_ARGS__) +// Targeting ../NodeSmallVectorImpl.java -// C10_REGISTER_CREATOR and C10_REGISTER_CLASS are hard-wired to use std::string -// as the key -// type, because that is the most commonly used cases. -// #define C10_REGISTER_CREATOR(RegistryName, key, ...) -// C10_REGISTER_TYPED_CREATOR(RegistryName, #key, __VA_ARGS__) -// #define C10_REGISTER_CREATOR_WITH_PRIORITY(RegistryName, key, priority, ...) -// C10_REGISTER_TYPED_CREATOR_WITH_PRIORITY( -// RegistryName, #key, priority, __VA_ARGS__) +// Targeting ../TreeRefSmallVectorImpl.java -// #define C10_REGISTER_CLASS(RegistryName, key, ...) -// C10_REGISTER_TYPED_CLASS(RegistryName, #key, __VA_ARGS__) -// #define C10_REGISTER_CLASS_WITH_PRIORITY(RegistryName, key, priority, ...) -// C10_REGISTER_TYPED_CLASS_WITH_PRIORITY( -// RegistryName, #key, priority, __VA_ARGS__) - // namespace c10 -// #endif // C10_UTIL_REGISTRY_H_ -// Parsed from c10/util/Flags.h -// #ifndef C10_UTIL_FLAGS_H_ -// #define C10_UTIL_FLAGS_H_ -/* Commandline flags support for C10. - * - * This is a portable commandline flags tool for c10, so we can optionally - * choose to use gflags or a lightweight custom implementation if gflags is - * not possible on a certain platform. If you have gflags installed, set the - * macro C10_USE_GFLAGS will seamlessly route everything to gflags. - * - * To define a flag foo of type bool default to true, do the following in the - * *global* namespace: - * C10_DEFINE_bool(foo, true, "An example."); - * - * To use it in another .cc file, you can use C10_DECLARE_* as follows: - * C10_DECLARE_bool(foo); - * - * In both cases, you can then access the flag via FLAGS_foo. - * - * It is recommended that you build with gflags. To learn more about the flags - * usage, refer to the gflags page here: - * - * https://gflags.github.io/gflags/ - * - * Note about Python users / devs: gflags is initiated from a C++ function - * ParseCommandLineFlags, and is usually done in native binaries in the main - * function. As Python does not have a modifiable main function, it is usually - * difficult to change the flags after Python starts. Hence, it is recommended - * that one sets the default value of the flags to one that's acceptable in - * general - that will allow Python to run without wrong flags. - */ -// #include +/** Storage for the SmallVector elements. This is specialized for the N=0 case + * to avoid allocating unnecessary storage. */ -// #include -// #include -/** - * Sets the usage message when a commandline tool is called with "--help". - */ -@Namespace("c10") public static native void SetUsageMessage(@StdString BytePointer str); -@Namespace("c10") public static native void SetUsageMessage(@StdString String str); +/** We need the storage to be properly aligned even for small-size of 0 so that + * the pointer math in \a SmallVectorTemplateCommon::getFirstEl() is + * well-defined. */ -/** - * Returns the usage message for the commandline tool set by SetUsageMessage. - */ -@Namespace("c10") public static native @Cast("const char*") BytePointer UsageMessage(); +/** Forward declaration of SmallVector so that + * calculateSmallVectorDefaultInlinedElements can reference + * {@code sizeof(SmallVector)}. */ -/** - * Parses the commandline flags. - * - * This command parses all the commandline arguments passed in via pargc - * and argv. Once it is finished, partc and argv will contain the remaining - * commandline args that c10 does not deal with. Note that following - * convention, argv[0] contains the binary name and is not parsed. - */ -@Namespace("c10") public static native @Cast("bool") boolean ParseCommandLineFlags(IntPointer pargc, @Cast("char***") @ByPtrPtr PointerPointer pargv); -@Namespace("c10") public static native @Cast("bool") boolean ParseCommandLineFlags(IntBuffer pargc, @Cast("char***") @ByPtrPtr PointerPointer pargv); -@Namespace("c10") public static native @Cast("bool") boolean ParseCommandLineFlags(int[] pargc, @Cast("char***") @ByPtrPtr PointerPointer pargv); +/** Helper class for calculating the default number of inline elements for + * {@code SmallVector}. + * + * This should be migrated to a constexpr function when our minimum + * compiler support is enough for multi-statement constexpr functions. */ +// Targeting ../SymDimVector.java -/** - * Checks if the commandline flags has already been passed. - */ -@Namespace("c10") public static native @Cast("bool") boolean CommandLineFlagsHasBeenParsed(); - // namespace c10 +// Targeting ../DimVector.java -//////////////////////////////////////////////////////////////////////////////// -// Below are gflags and non-gflags specific implementations. -// In general, they define the following macros for one to declare (use -// C10_DECLARE) or define (use C10_DEFINE) flags: -// C10_{DECLARE,DEFINE}_{int,int64,double,bool,string} -//////////////////////////////////////////////////////////////////////////////// -// #ifdef C10_USE_GFLAGS +// Targeting ../SmallNodeVector.java -//////////////////////////////////////////////////////////////////////////////// -// Begin gflags section: most functions are basically rerouted to gflags. -//////////////////////////////////////////////////////////////////////////////// -// #include -// C10 uses hidden visibility by default. However, in gflags, it only uses -// export on Windows platform (with dllexport) but not on linux/mac (with -// default visibility). As a result, to ensure that we are always exporting -// global variables, we will redefine the GFLAGS_DLL_DEFINE_FLAG macro if we -// are building C10 as a shared libray. -// This has to be done after the inclusion of gflags, because some early -// versions of gflags.h (e.g. 2.0 on ubuntu 14.04) directly defines the -// macros, so we need to do definition after gflags is done. -// #ifdef GFLAGS_DLL_DEFINE_FLAG -// #endif // GFLAGS_DLL_DEFINE_FLAG -// #ifdef GFLAGS_DLL_DECLARE_FLAG -// #endif // GFLAGS_DLL_DECLARE_FLAG -// #define GFLAGS_DLL_DEFINE_FLAG C10_EXPORT -// #define GFLAGS_DLL_DECLARE_FLAG C10_IMPORT +// Targeting ../TreeList.java -// gflags before 2.0 uses namespace google and after 2.1 uses namespace gflags. -// Using GFLAGS_GFLAGS_H_ to capture this change. -// #ifndef GFLAGS_GFLAGS_H_ -// #endif // GFLAGS_GFLAGS_H_ -// Motivation about the gflags wrapper: -// (1) We would need to make sure that the gflags version and the non-gflags -// version of C10 are going to expose the same flags abstraction. One should -// explicitly use FLAGS_flag_name to access the flags. -// (2) For flag names, it is recommended to start with c10_ to distinguish it -// from regular gflags flags. For example, do -// C10_DEFINE_BOOL(c10_my_flag, true, "An example"); -// to allow one to use FLAGS_c10_my_flag. -// (3) Gflags has a design issue that does not properly expose the global flags, -// if one builds the library with -fvisibility=hidden. The current gflags (as of -// Aug 2018) only deals with the Windows case using dllexport, and not the Linux -// counterparts. As a result, we will explciitly use C10_EXPORT to export the -// flags defined in C10. This is done via a global reference, so the flag -// itself is not duplicated - under the hood it is the same global gflags flag. -// #define C10_GFLAGS_DEF_WRAPPER(type, real_type, name, default_value, help_str) -// DEFINE_##type(name, default_value, help_str); -// #define C10_DEFINE_int(name, default_value, help_str) -// C10_GFLAGS_DEF_WRAPPER(int32, gflags::int32, name, default_value, help_str) -// #define C10_DEFINE_int32(name, default_value, help_str) -// C10_DEFINE_int(name, default_value, help_str) -// #define C10_DEFINE_int64(name, default_value, help_str) -// C10_GFLAGS_DEF_WRAPPER(int64, gflags::int64, name, default_value, help_str) -// #define C10_DEFINE_double(name, default_value, help_str) -// C10_GFLAGS_DEF_WRAPPER(double, double, name, default_value, help_str) -// #define C10_DEFINE_bool(name, default_value, help_str) -// C10_GFLAGS_DEF_WRAPPER(bool, bool, name, default_value, help_str) -// #define C10_DEFINE_string(name, default_value, help_str) -// C10_GFLAGS_DEF_WRAPPER(string, ::fLS::clstring, name, default_value, help_str) +/** Given a range of type R, iterate the entire range and return a + * SmallVector with elements of the vector. This is useful, for example, + * when you want to iterate a range and then sort the results. */ -// DECLARE_typed_var should be used in header files and in the global namespace. -// #define C10_GFLAGS_DECLARE_WRAPPER(type, real_type, name) DECLARE_##type(name); + // end namespace c10 -// #define C10_DECLARE_int(name) -// C10_GFLAGS_DECLARE_WRAPPER(int32, gflags::int32, name) -// #define C10_DECLARE_int32(name) C10_DECLARE_int(name) -// #define C10_DECLARE_int64(name) -// C10_GFLAGS_DECLARE_WRAPPER(int64, gflags::int64, name) -// #define C10_DECLARE_double(name) -// C10_GFLAGS_DECLARE_WRAPPER(double, double, name) -// #define C10_DECLARE_bool(name) C10_GFLAGS_DECLARE_WRAPPER(bool, bool, name) -// #define C10_DECLARE_string(name) -// C10_GFLAGS_DECLARE_WRAPPER(string, ::fLS::clstring, name) -// Targeting ../C10FlagParser.java +/** Implement std::swap in terms of SmallVector swap. */ +/** Implement std::swap in terms of SmallVector swap. */ + // end namespace std - // namespace c10 +// Parsed from c10/util/ArrayRef.h -// The macros are defined outside the c10 namespace. In your code, you should -// write the C10_DEFINE_* and C10_DECLARE_* macros outside any namespace -// as well. +//===--- ArrayRef.h - Array Reference Wrapper -------------------*- C++ -*-===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// -// #define C10_DEFINE_typed_var(type, name, default_value, help_str) -// C10_EXPORT type FLAGS_##name = default_value; -// namespace c10 { -// namespace { -// class C10FlagParser_##name : public C10FlagParser { -// public: -// explicit C10FlagParser_##name(const std::string& content) { -// success_ = C10FlagParser::Parse(content, &FLAGS_##name); -// } -// }; -// } -// RegistererC10FlagsRegistry g_C10FlagsRegistry_##name( -// #name, -// C10FlagsRegistry(), -// RegistererC10FlagsRegistry::DefaultCreator, -// "(" #type ", default " #default_value ") " help_str); -// } +// ATen: modified from llvm::ArrayRef. +// removed llvm-specific functionality +// removed some implicit const -> non-const conversions that rely on +// complicated std::enable_if meta-programming +// removed a bunch of slice variants for simplicity... -// #define C10_DEFINE_int(name, default_value, help_str) -// C10_DEFINE_typed_var(int, name, default_value, help_str) -// #define C10_DEFINE_int32(name, default_value, help_str) -// C10_DEFINE_int(name, default_value, help_str) -// #define C10_DEFINE_int64(name, default_value, help_str) -// C10_DEFINE_typed_var(int64_t, name, default_value, help_str) -// #define C10_DEFINE_double(name, default_value, help_str) -// C10_DEFINE_typed_var(double, name, default_value, help_str) -// #define C10_DEFINE_bool(name, default_value, help_str) -// C10_DEFINE_typed_var(bool, name, default_value, help_str) -// #define C10_DEFINE_string(name, default_value, help_str) -// C10_DEFINE_typed_var(std::string, name, default_value, help_str) +// #pragma once -// DECLARE_typed_var should be used in header files and in the global namespace. -// #define C10_DECLARE_typed_var(type, name) C10_IMPORT extern type FLAGS_##name +// #include +// #include +// #include +// #include -// #define C10_DECLARE_int(name) C10_DECLARE_typed_var(int, name) -// #define C10_DECLARE_int32(name) C10_DECLARE_int(name) -// #define C10_DECLARE_int64(name) C10_DECLARE_typed_var(int64_t, name) -// #define C10_DECLARE_double(name) C10_DECLARE_typed_var(double, name) -// #define C10_DECLARE_bool(name) C10_DECLARE_typed_var(bool, name) -// #define C10_DECLARE_string(name) C10_DECLARE_typed_var(std::string, name) +// #include +// #include +// #include +// Targeting ../ArgumentArrayRef.java -//////////////////////////////////////////////////////////////////////////////// -// End non-gflags section. -//////////////////////////////////////////////////////////////////////////////// -// #endif // C10_USE_GFLAGS +// Targeting ../ArgumentDefArrayRef.java -// #endif // C10_UTIL_FLAGS_H_ +// Targeting ../BFloat16ArrayRef.java -// Parsed from c10/util/Logging.h -// #ifndef C10_UTIL_LOGGING_H_ -// #define C10_UTIL_LOGGING_H_ +// Targeting ../BlockArrayRef.java -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include +// Targeting ../BoolArrayRef.java -// CAFFE2_LOG_THRESHOLD is a compile time flag that would allow us to turn off -// logging at compile time so no logging message below that level is produced -// at all. The value should be between INT_MIN and CAFFE_FATAL. -// #ifndef CAFFE2_LOG_THRESHOLD -// If we have not defined the compile time log threshold, we keep all the -// log cases. -public static native @MemberGetter int CAFFE2_LOG_THRESHOLD(); -public static final int CAFFE2_LOG_THRESHOLD = CAFFE2_LOG_THRESHOLD(); -// #endif // CAFFE2_LOG_THRESHOLD -// Below are different implementations for glog and non-glog cases. -// #ifdef C10_USE_GLOG -// #include -// #else // !C10_USE_GLOG -// #include -// #endif // C10_USE_GLOG +// Targeting ../ByteArrayRef.java +// Targeting ../DimnameArrayRef.java -// Some versions of GLOG support less-spammy version of LOG_EVERY_MS. If it's -// not available - just short-circuit to the always working one one. -// We define the C10_ name to avoid confusing other files -// #ifdef LOG_EVERY_MS -// #define C10_LOG_EVERY_MS(severity, ms) LOG_EVERY_MS(severity, ms) -// #else -// #define C10_LOG_EVERY_MS(severity, ms) LOG(severity) -// #endif +// Targeting ../DoubleArrayRef.java -// Same for LOG_FIRST_N -// #ifdef LOG_FIRST_N -// #define C10_LOG_FIRST_N(severity, n) LOG_FIRST_N(severity, n) -// #else -// #define C10_LOG_FIRST_N(severity, n) LOG(severity) -// #endif -// Same for LOG_EVERY_N -// #ifdef LOG_EVERY_N -// #define C10_LOG_EVERY_N(severity, n) LOG_EVERY_N(severity, n) -// #else -// #define C10_LOG_EVERY_N(severity, n) LOG(severity) -// #endif +// Targeting ../DoubleComplexArrayRef.java -// Functions that we use for initialization. -@Namespace("c10") public static native @Cast("bool") boolean InitCaffeLogging(IntPointer argc, @Cast("char**") PointerPointer argv); -@Namespace("c10") public static native @Cast("bool") boolean InitCaffeLogging(IntPointer argc, @Cast("char**") @ByPtrPtr BytePointer argv); -@Namespace("c10") public static native @Cast("bool") boolean InitCaffeLogging(IntBuffer argc, @Cast("char**") @ByPtrPtr ByteBuffer argv); -@Namespace("c10") public static native @Cast("bool") boolean InitCaffeLogging(int[] argc, @Cast("char**") @ByPtrPtr byte[] argv); -@Namespace("c10") public static native void UpdateLoggingLevelsFromFlags(); -@Namespace("c10") public static native void ThrowEnforceNotMet( - @Cast("const char*") BytePointer file, - int line, - @Cast("const char*") BytePointer condition, - @StdString BytePointer msg, - @Const Pointer caller/*=nullptr*/); -@Namespace("c10") public static native void ThrowEnforceNotMet( - @Cast("const char*") BytePointer file, - int line, - @Cast("const char*") BytePointer condition, - @StdString BytePointer msg); -@Namespace("c10") public static native void ThrowEnforceNotMet( - String file, - int line, - String condition, - @StdString String msg, - @Const Pointer caller/*=nullptr*/); -@Namespace("c10") public static native void ThrowEnforceNotMet( - String file, - int line, - String condition, - @StdString String msg); +// Targeting ../EnumNameValueArrayRef.java -@Namespace("c10") public static native void ThrowEnforceNotMet( - @Cast("const char*") BytePointer file, - int line, - @Cast("const char*") BytePointer condition, - @ByVal CompileTimeEmptyString arg3, - @Const Pointer caller/*=nullptr*/); -@Namespace("c10") public static native void ThrowEnforceNotMet( - @Cast("const char*") BytePointer file, - int line, - @Cast("const char*") BytePointer condition, - @ByVal CompileTimeEmptyString arg3); -@Namespace("c10") public static native void ThrowEnforceNotMet( - String file, - int line, - String condition, - @ByVal CompileTimeEmptyString arg3, - @Const Pointer caller/*=nullptr*/); -@Namespace("c10") public static native void ThrowEnforceNotMet( - String file, - int line, - String condition, - @ByVal CompileTimeEmptyString arg3); -@Namespace("c10") public static native void ThrowEnforceFiniteNotMet( - @Cast("const char*") BytePointer file, - int line, - @Cast("const char*") BytePointer condition, - @StdString BytePointer msg, - @Const Pointer caller/*=nullptr*/); -@Namespace("c10") public static native void ThrowEnforceFiniteNotMet( - @Cast("const char*") BytePointer file, - int line, - @Cast("const char*") BytePointer condition, - @StdString BytePointer msg); -@Namespace("c10") public static native void ThrowEnforceFiniteNotMet( - String file, - int line, - String condition, - @StdString String msg, - @Const Pointer caller/*=nullptr*/); -@Namespace("c10") public static native void ThrowEnforceFiniteNotMet( - String file, - int line, - String condition, - @StdString String msg); +// Targeting ../FloatArrayRef.java -@Namespace("c10") public static native void ThrowEnforceFiniteNotMet( - @Cast("const char*") BytePointer file, - int line, - @Cast("const char*") BytePointer condition, - @ByVal CompileTimeEmptyString arg3, - @Const Pointer caller/*=nullptr*/); -@Namespace("c10") public static native void ThrowEnforceFiniteNotMet( - @Cast("const char*") BytePointer file, - int line, - @Cast("const char*") BytePointer condition, - @ByVal CompileTimeEmptyString arg3); -@Namespace("c10") public static native void ThrowEnforceFiniteNotMet( - String file, - int line, - String condition, - @ByVal CompileTimeEmptyString arg3, - @Const Pointer caller/*=nullptr*/); -@Namespace("c10") public static native void ThrowEnforceFiniteNotMet( - String file, - int line, - String condition, - @ByVal CompileTimeEmptyString arg3); -@Namespace("c10") public static native @Cast("const bool") boolean IsUsingGoogleLogging(); +// Targeting ../FloatComplexArrayRef.java -/** - * A utility to allow one to show log info to stderr after the program starts. - * - * This is similar to calling GLOG's --logtostderr, or setting caffe2_log_level - * to smaller than INFO. You are recommended to only use this in a few sparse - * cases, such as when you want to write a tutorial or something. Normally, use - * the commandline flags to set the log level. - */ -@Namespace("c10") public static native void ShowLogInfoToStderr(); -@Namespace("c10") public static native void SetStackTraceFetcher(@ByVal Fetcher fetcher); +// Targeting ../FuturePtrArrayRef.java -// #define CAFFE_ENFORCE(condition, ...) -// do { -// if (C10_UNLIKELY(!(condition))) { -// ::c10::ThrowEnforceNotMet( -// __FILE__, __LINE__, #condition, ::c10::str(__VA_ARGS__)); -// } -// } while (false) -// #define CAFFE_ENFORCE_FINITE(condition, ...) -// do { -// if (C10_UNLIKELY(!(condition))) { -// ::c10::ThrowEnforceFiniteNotMet( -// __FILE__, __LINE__, #condition, ::c10::str(__VA_ARGS__)); -// } -// } while (false) +// Targeting ../HalfArrayRef.java -// #define CAFFE_ENFORCE_WITH_CALLER(condition, ...) -// do { -// if (C10_UNLIKELY(!(condition))) { -// ::c10::ThrowEnforceNotMet( -// __FILE__, __LINE__, #condition, ::c10::str(__VA_ARGS__), this); -// } -// } while (false) -// #define CAFFE_THROW(...) -// ::c10::ThrowEnforceNotMet(__FILE__, __LINE__, "", ::c10::str(__VA_ARGS__)) +// Targeting ../IValueArrayRef.java -/** - * Rich logging messages - * - * CAFFE_ENFORCE_THAT can be used with one of the "checker functions" that - * capture input argument values and add it to the exception message. E.g. - * {@code CAFFE_ENFORCE_THAT(Equals(foo(x), bar(y)), "Optional additional message")} - * would evaluate both foo and bar only once and if the results are not equal - - * include them in the exception message. - * - * Some of the basic checker functions like Equals or Greater are already - * defined below. Other header might define customized checkers by adding - * functions to caffe2::enforce_detail namespace. For example: - * - * namespace caffe2 { namespace enforce_detail { - * inline EnforceFailMessage IsVector(const vector& shape) { - * if (shape.size() == 1) { return EnforceOK(); } - * return c10::str("Shape ", shape, " is not a vector"); - * } - * }} - * - * With further usages like {@code CAFFE_ENFORCE_THAT(IsVector(Input(0).dims()))} - * - * Convenient wrappers for binary operations like CAFFE_ENFORCE_EQ are provided - * too. Please use them instead of TORCH_CHECK_EQ and friends for failures in - * user-provided input. - */ -// #define CAFFE_ENFORCE_THAT_IMPL(op, lhs, rhs, expr, ...) -// ::c10::enforce_detail::enforceThatImpl( -// op, lhs, rhs, __FILE__, __LINE__, expr, nullptr, ##__VA_ARGS__) -// #define CAFFE_ENFORCE_THAT_IMPL_WITH_CALLER(op, lhs, rhs, expr, ...) -// ::c10::enforce_detail::enforceThatImpl( -// op, (lhs), (rhs), __FILE__, __LINE__, expr, this, ##__VA_ARGS__) +// Targeting ../IntArrayRef.java - // namespace enforce_detail -// #define CAFFE_ENFORCE_THAT(cmp, op, lhs, rhs, ...) -// CAFFE_ENFORCE_THAT_IMPL(cmp, lhs, rhs, #lhs " " #op " " #rhs, ##__VA_ARGS__) +// Targeting ../TagArrayRef.java -// #define CAFFE_ENFORCE_BINARY_OP(cmp, op, x, y, ...) -// CAFFE_ENFORCE_THAT_IMPL(cmp, x, y, #x " " #op " " #y, ##__VA_ARGS__) -// #define CAFFE_ENFORCE_EQ(x, y, ...) -// CAFFE_ENFORCE_BINARY_OP(std::equal_to(), ==, x, y, ##__VA_ARGS__) -// #define CAFFE_ENFORCE_NE(x, y, ...) -// CAFFE_ENFORCE_BINARY_OP(std::not_equal_to(), !=, x, y, ##__VA_ARGS__) -// #define CAFFE_ENFORCE_LE(x, y, ...) -// CAFFE_ENFORCE_BINARY_OP(std::less_equal(), <=, x, y, ##__VA_ARGS__) -// #define CAFFE_ENFORCE_LT(x, y, ...) -// CAFFE_ENFORCE_BINARY_OP(std::less(), <, x, y, ##__VA_ARGS__) -// #define CAFFE_ENFORCE_GE(x, y, ...) -// CAFFE_ENFORCE_BINARY_OP(std::greater_equal(), >=, x, y, ##__VA_ARGS__) -// #define CAFFE_ENFORCE_GT(x, y, ...) -// CAFFE_ENFORCE_BINARY_OP(std::greater(), >, x, y, ##__VA_ARGS__) -// #define CAFFE_ENFORCE_BINARY_OP_WITH_CALLER(cmp, op, x, y, ...) -// CAFFE_ENFORCE_THAT_IMPL_WITH_CALLER( -// cmp, x, y, #x " " #op " " #y, ##__VA_ARGS__) -// #define CAFFE_ENFORCE_EQ_WITH_CALLER(x, y, ...) -// CAFFE_ENFORCE_BINARY_OP_WITH_CALLER( -// std::equal_to(), ==, x, y, ##__VA_ARGS__) -// #define CAFFE_ENFORCE_NE_WITH_CALLER(x, y, ...) -// CAFFE_ENFORCE_BINARY_OP_WITH_CALLER( -// std::not_equal_to(), !=, x, y, ##__VA_ARGS__) -// #define CAFFE_ENFORCE_LE_WITH_CALLER(x, y, ...) -// CAFFE_ENFORCE_BINARY_OP_WITH_CALLER( -// std::less_equal(), <=, x, y, ##__VA_ARGS__) -// #define CAFFE_ENFORCE_LT_WITH_CALLER(x, y, ...) -// CAFFE_ENFORCE_BINARY_OP_WITH_CALLER(std::less(), <, x, y, ##__VA_ARGS__) -// #define CAFFE_ENFORCE_GE_WITH_CALLER(x, y, ...) -// CAFFE_ENFORCE_BINARY_OP_WITH_CALLER( -// std::greater_equal(), >=, x, y, ##__VA_ARGS__) -// #define CAFFE_ENFORCE_GT_WITH_CALLER(x, y, ...) -// CAFFE_ENFORCE_BINARY_OP_WITH_CALLER( -// std::greater(), >, x, y, ##__VA_ARGS__) +// Targeting ../LongArrayRef.java -/** - * Very lightweight logging for the first time API usage. It's beneficial for - * tracking of individual functionality usage in larger applications. - * - * In order to ensure light-weightedness of logging, we utilize static variable - * trick - LogAPIUsage will be invoked only once and further invocations will - * just do an atomic check. - * - * Example: - * // Logs caller info with an arbitrary text event, if there is a usage. - * C10_LOG_API_USAGE_ONCE("my_api"); - */ -// #define C10_LOG_API_USAGE_ONCE(...) -// C10_UNUSED static bool C10_ANONYMOUS_VARIABLE(logFlag) = -// ::c10::detail::LogAPIUsageFakeReturn(__VA_ARGS__); -// API usage logging capabilities -@Namespace("c10") public static native void SetAPIUsageLogger(@ByVal Logger logger); -@Namespace("c10") public static native void LogAPIUsage(@StdString BytePointer context); -@Namespace("c10") public static native void LogAPIUsage(@StdString String context); -// Targeting ../DDPLoggingData.java +// Targeting ../LongOptionalArrayRef.java +// Targeting ../LongVectorArrayRef.java -@Namespace("c10") public static native void SetPyTorchDDPUsageLogger( - @ByVal DataLogger logger); -@Namespace("c10") public static native void LogPyTorchDDPUsage(@Const @ByRef DDPLoggingData ddpData); -// Return value is needed to do the static variable initialization trick -@Namespace("c10::detail") public static native @Cast("bool") boolean LogAPIUsageFakeReturn(@StdString BytePointer context); -@Namespace("c10::detail") public static native @Cast("bool") boolean LogAPIUsageFakeReturn(@StdString String context); - // namespace detail -// Initializes the c10 logger. -@Namespace("c10") public static native void initLogging(); +// Targeting ../NamedValueArrayRef.java - // namespace c10 -// #endif // C10_UTIL_LOGGING_H_ +// Targeting ../SavedVariableArrayRef.java -// Parsed from c10/core/DeviceType.h +// Targeting ../ScalarArrayRef.java -// #pragma once -// This is directly synchronized with caffe2/proto/caffe2.proto, but -// doesn't require me to figure out how to get Protobuf headers into -// ATen/core (which would require a lot more build system hacking.) -// If you modify me, keep me synchronized with that file. +// Targeting ../ScalarTypeArrayRef.java -// #include -// #include -// #include +// Targeting ../ShortArrayRef.java -// These contains all device types that also have a BackendComponent -// and therefore participate in per-backend functionality dispatch keys. -// This is most backends except PrivateUse2 and PrivateUse3 -// #define C10_FORALL_BACKEND_DEVICE_TYPES(_, extra) -// _(CPU, extra) -// _(CUDA, extra) -// _(HIP, extra) -// _(XLA, extra) -// _(MPS, extra) -// _(IPU, extra) -// _(XPU, extra) -// _(HPU, extra) -// _(VE, extra) -// _(Lazy, extra) -// _(Meta, extra) -// _(MTIA, extra) -// _(PrivateUse1, extra) -@Namespace("c10") public enum DeviceType { - CPU((byte)(0)), - CUDA((byte)(1)), // CUDA. - MKLDNN((byte)(2)), // Reserved for explicit MKLDNN - OPENGL((byte)(3)), // OpenGL - OPENCL((byte)(4)), // OpenCL - IDEEP((byte)(5)), // IDEEP. - HIP((byte)(6)), // AMD HIP - FPGA((byte)(7)), // FPGA - ORT((byte)(8)), // ONNX Runtime / Microsoft - XLA((byte)(9)), // XLA / TPU - Vulkan((byte)(10)), // Vulkan - Metal((byte)(11)), // Metal - XPU((byte)(12)), // XPU - MPS((byte)(13)), // MPS - Meta((byte)(14)), // Meta (tensors with no data) - HPU((byte)(15)), // HPU / HABANA - VE((byte)(16)), // SX-Aurora / NEC - Lazy((byte)(17)), // Lazy Tensors - IPU((byte)(18)), // Graphcore IPU - MTIA((byte)(19)), // Meta training and inference devices - PrivateUse1((byte)(20)), // PrivateUse1 device - // NB: If you add more devices: - // - Change the implementations of DeviceTypeName and isValidDeviceType - // in DeviceType.cpp - // - Change the number below - COMPILE_TIME_MAX_DEVICE_TYPES((byte)(21)); +// Targeting ../SizeTArrayRef.java - public final byte value; - private DeviceType(byte v) { this.value = v; } - private DeviceType(DeviceType e) { this.value = e.value; } - public DeviceType intern() { for (DeviceType e : values()) if (e.value == value) return e; return this; } - @Override public String toString() { return intern().name(); } -} -@Namespace("c10") @MemberGetter public static native DeviceType kCPU(); -@Namespace("c10") @MemberGetter public static native DeviceType kCUDA(); -@Namespace("c10") @MemberGetter public static native DeviceType kHIP(); -@Namespace("c10") @MemberGetter public static native DeviceType kFPGA(); -@Namespace("c10") @MemberGetter public static native DeviceType kORT(); -@Namespace("c10") @MemberGetter public static native DeviceType kXLA(); -@Namespace("c10") @MemberGetter public static native DeviceType kMPS(); -@Namespace("c10") @MemberGetter public static native DeviceType kMeta(); -@Namespace("c10") @MemberGetter public static native DeviceType kVulkan(); -@Namespace("c10") @MemberGetter public static native DeviceType kMetal(); -@Namespace("c10") @MemberGetter public static native DeviceType kXPU(); -@Namespace("c10") @MemberGetter public static native DeviceType kHPU(); -@Namespace("c10") @MemberGetter public static native DeviceType kVE(); -@Namespace("c10") @MemberGetter public static native DeviceType kLazy(); -@Namespace("c10") @MemberGetter public static native DeviceType kIPU(); -@Namespace("c10") @MemberGetter public static native DeviceType kMTIA(); -@Namespace("c10") @MemberGetter public static native DeviceType kPrivateUse1(); +// Targeting ../StrideArrayRef.java -// define explicit int constant -@Namespace("c10") @MemberGetter public static native int COMPILE_TIME_MAX_DEVICE_TYPES(); -@Namespace("c10") public static native @StdString BytePointer DeviceTypeName(DeviceType d, @Cast("bool") boolean lower_case/*=false*/); -@Namespace("c10") public static native @StdString BytePointer DeviceTypeName(DeviceType d); -@Namespace("c10") public static native @StdString String DeviceTypeName(@Cast("c10::DeviceType") byte d, @Cast("bool") boolean lower_case/*=false*/); -@Namespace("c10") public static native @StdString String DeviceTypeName(@Cast("c10::DeviceType") byte d); +// Targeting ../StringArrayRef.java -@Namespace("c10") public static native @Cast("bool") boolean isValidDeviceType(DeviceType d); -@Namespace("c10") public static native @Cast("bool") boolean isValidDeviceType(@Cast("c10::DeviceType") byte d); +// Targeting ../SymIntArrayRef.java -@Namespace("c10") public static native void register_privateuse1_backend(@StdString BytePointer backend_name); -@Namespace("c10") public static native void register_privateuse1_backend(@StdString String backend_name); -@Namespace("c10") public static native @StdString BytePointer get_privateuse1_backend(@Cast("bool") boolean lower_case/*=true*/); -@Namespace("c10") public static native @StdString BytePointer get_privateuse1_backend(); +// Targeting ../SymNodeArrayRef.java - // namespace c10 - // namespace std +// Targeting ../SymbolArrayRef.java -// Parsed from c10/core/Device.h +// Targeting ../TensorArrayRef.java -// #pragma once -// #include -// #include -// #include +// Targeting ../TensorArgArrayRef.java -// #include -// #include -// #include -// #include -/** An index representing a specific device; e.g., the 1 in GPU 1. - * A DeviceIndex is not independently meaningful without knowing - * the DeviceType it is associated; try to use Device rather than - * DeviceIndex directly. */ -// Targeting ../Device.java +// Targeting ../TensorIndexArrayRef.java +// Targeting ../TensorOptionalArrayRef.java +// Targeting ../TypeArrayRef.java -// Targeting ../DeviceHash.java +// Targeting ../ValueArrayRef.java - // namespace std +/** \name ArrayRef Convenience constructors + * \{ +

+ * Construct an ArrayRef from a single element. */ -// Parsed from c10/core/DeviceGuard.h +/** Construct an ArrayRef from a pointer and length. */ -// #pragma once +/** Construct an ArrayRef from a range. */ -// #include -// Targeting ../DeviceGuard.java +/** Construct an ArrayRef from a SmallVector. */ +/** Construct an ArrayRef from a SmallVector. */ -// Targeting ../OptionalDeviceGuard.java +/** Construct an ArrayRef from a std::vector. */ +/** Construct an ArrayRef from a std::array. */ +/** Construct an ArrayRef from an ArrayRef (no-op) (const) */ -// Note [Whither the DeviceGuard boilerplate] -// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -// Design note: in principle, we could avoid these wrappers using: -// -// using DeviceGuard = impl::InlineDeviceGuard; -// using OptionalDeviceGuard = -// impl::InlineOptionalDeviceGuard; -// -// But the error messages are worse, and our users can't just look at the -// header file to find out what's going on. Furthermore, for specializations -// like CUDAStreamGuard, it can be profitable to replace some interfaces with -// refined types (e.g., return CUDAStream instead of Stream). So, we eat -// the boilerplate and write out the API explicitly. +/** Construct an ArrayRef from an ArrayRef (no-op) */ + +/** Construct an ArrayRef from a C array. */ +// WARNING: Template instantiation will NOT be willing to do an implicit +// conversions to get you to an c10::ArrayRef, which is why we need so +// many overloads. + +// This alias is deprecated because it doesn't make ownership +// semantics obvious. Use IntArrayRef instead! // namespace c10 -// Parsed from c10/core/DispatchKey.h +// Parsed from c10/core/MemoryFormat.h // #pragma once -// #include -// #include +// #include +// #include +// #include + // #include -// #include -// Semantically, each value of BackendComponent identifies a "backend" for our -// dispatch. Some functionalities that we may dispatch to are allowed to -// register different handlers for each backend. The BackendComponent is then -// used to figure out which backend implementation to dispatch to. +// Memory format is not the property of a Tensor. It is the way to tell an +// operator how the result should be organized in memory and nothing more. That +// means memory format should never be used as return value for any tensor state +// interrogation functions (internally and externally). +// +// Possible options are: +// Preserve: +// If any of the input tensors is in channels_last format, operator output +// should be in channels_last format +// +// Contiguous: +// Regardless of input tensors format, the output should be contiguous +// Tensor. +// +// ChannelsLast: +// Regardless of input tensors format, the output should be in channels_last +// format. +@Namespace("c10") public enum MemoryFormat { + Contiguous((byte)(0)), + Preserve((byte)(1)), + ChannelsLast((byte)(2)), + ChannelsLast3d((byte)(3)), + NumOptions((byte)(4)); -// In implementation terms, the backend component identifies a specific "bit" in -// a DispatchKeySet. The bits in the DispatchKeySet are split between the bottom -// ~12 "BackendComponent" bits, while the remaining upper bits are assigned to -// functionalities. When we encounter a functionality bit that is known to be -// customizeable per-backend, then we also look at the lower BackendComponent -// bits and take the highest bit to determine which backend's implementation to -// use. + public final byte value; + private MemoryFormat(byte v) { this.value = v; } + private MemoryFormat(MemoryFormat e) { this.value = e.value; } + public MemoryFormat intern() { for (MemoryFormat e : values()) if (e.value == value) return e; return this; } + @Override public String toString() { return intern().name(); } +} -// WARNING! If you add a new backend component to the end of this list, -// make sure you update PrivateUse3Bit. (But you shouldn't: private use -// keys should have higher precedence than all built-in keys) +// If you are seeing this, it means that this call site was not checked if +// the memory format could be preserved, and it was switched to old default +// behaviour of contiguous +// #define LEGACY_CONTIGUOUS_MEMORY_FORMAT c10::get_contiguous_memory_format() -// If you add a new (non-privateuse) backend here, -// make sure to add an Autograd fallthrough kernel -// in aten/src/ATen/core/VariableFallbackKernel.cpp +@Namespace("c10") public static native MemoryFormat get_contiguous_memory_format(); -// #define C10_FORALL_BACKEND_COMPONENTS(_, extra) -// _(CPU, extra) -// _(CUDA, extra) -// _(HIP, extra) -// _(XLA, extra) -// _(MPS, extra) -// _(IPU, extra) -// _(XPU, extra) -// _(HPU, extra) -// _(VE, extra) -// _(Lazy, extra) -// _(Meta, extra) -// _(MTIA, extra) -// _(PrivateUse1, extra) -// _(PrivateUse2, extra) -// _(PrivateUse3, extra) +@Namespace("c10") public static native @Cast("std::ostream*") @ByRef @Name("operator <<") Pointer shiftLeft( + @Cast("std::ostream*") @ByRef Pointer stream, + @ByVal MemoryFormat memory_format); -// WARNING! If we add a new per-backend functionality key that has higher -// priority than Autograd, then make sure you update EndOfRuntimeBackendKeys +// Note: Hardcoded the channel last stride indices here to get better +// performance -// #define C10_FORALL_FUNCTIONALITY_KEYS(_) -// _(Dense, ) -// _(Quantized, Quantized) -// _(Sparse, Sparse) -// _(NestedTensor, NestedTensor) -// _(AutogradFunctionality, Autograd) +@Namespace("c10") public static native @ByVal @Cast("std::vector*") LongVector get_channels_last_strides_2d(@ByVal LongArrayRef sizes); +@Namespace("c10") public static native @ByVal @Cast("std::vector*") LongVector get_channels_last_strides_2d(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... sizes); -@Namespace("c10") public enum BackendComponent { +@Namespace("c10") public static native @ByVal @Cast("std::vector*") LongVector get_channels_last_strides_3d(@ByVal LongArrayRef sizes); +@Namespace("c10") public static native @ByVal @Cast("std::vector*") LongVector get_channels_last_strides_3d(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... sizes); - // A "backend" is colloquially used to refer to handlers for dispatch - // which actually implement the numerics of an operation in question. - // - // Due to the nature of the enum, these backends are specified in - // an ordered way, but for most backends this order is not semantically - // meaningful (e.g., it's valid to reorder these backends without changing - // semantics). The only situation when backend ordering is meaningful - // is when the backend participates in multiple dispatch with another - // backend; e.g., CPU and CUDA (cuda must have higher priority). +// NOTE: +// Below are Helper functions for is_channels_last_strides_xd. +// 1. Please do not combine these helper functions, each helper function handles +// exactly one case of sizes + memory_format, by doing this, the strides indices +// will be a constant array and we can access it using constant index number, +// the compiler will fully unroll the loop on strides indices to gain a better +// performance. +// 2. No error check in helper function, caller ensures the correctness of the +// input +// 3. All helper functions have similar comments, only 1st helper function is +// commented here. - // These keys don't correspond to individual kernels. - // Instead, they represent the backends that are allowed to override specific - // pieces of functionality: - // - dense kernels (e.g. DispatchKey::CPU) - // - sparse kernels (e.g. DispatchKey::SparseCPU) - // - quantized kernels (e.g. DispatchKey::QuantizedCPU) - // - autograd kernels (e.g. DispatchKey::AutogradCPU) - // We reserve space in the runtime operator table for this full cross product - // of - // [backends in this enum] x [keys below that are explicitly marked as having - // per-backend functionality] - // - // A meta tensor is a tensor without any data associated with it. (They - // have also colloquially been referred to as tensors on the "null" device). - // A meta tensor can be used to dry run operators without actually doing any - // computation, e.g., add on two meta tensors would give you another meta - // tensor with the output shape and dtype, but wouldn't actually add anything. +// Note [Ambiguous is_channels_last_strides_xd] +// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +// The flaw of carrying memory_format implicitly through strides is very hard +// to WAR properly. issue #24090 +// Without the history of permutation, we can't infer the memory_format of a +// tensor from the snapshot of its size & stride +// e.g. +// +// 1. We can NOT specify the memory_format of N111 tensor through strides in a +// meaningful way; +// +// 2. Two path that ended up with identical size/stride +// N11W contiguous tensor sliced at w-dimension becomes [N,1,1,1]@[W,W,W,W] +// NC11 channels_last tensor sliced at c-dimension becomes [N,1,1,1]@[C,C,C,C] +// So if we see a tensor [N,1,1,1]@[X,X,X,X], there's no way for us to infer +// the memory_format of the original tensor. +// +// Due to the limitations, our temporary WAR `is_channels_last_strides` does the +// best effort to infer whether the original memory_format of a tensor is +// at::MemoryFormat::ChannelsLast. The two objectives of this function (ordered +// by their importance): +// 1. Ensure that normal shape manipulation does not accidentally change the +// MemoryFormat of an existing tensor. +// 2. Allows user to mark MemoryFormat::ChannelsLast to tensors; +// +// The function does so via checking strides of the tensor, including strides of +// size-1 dimensions. Although conventionally PyTorch implies no restriction on +// trivial stride (stride for size-1 dimension). +// +// Note that this approach is a compromise. We did not solve the problem +// completely. Many cases we will not be able to infer the correct memory +// format. +// The implementation of `is_channels_last_strides` is to serve the objectives: +// MemoryFormat::ChannelsLast has to be explicitly opted-in (no accidental +// conversion); Best effort to maintain the ChannelsLast flag. +// +// Due to the fact that this is not a bulletproof solution, through testing +// (aten/src/ATen/test/memory_format_test.cpp) +// a. we ensure that the common tasks are supported; +// a. we identify corner cases where the implementation compromises on. +// +// By the time accumulated permutation is enabled to replace implicit +// memory_format through strides, we should be updating our tests and fix the +// issues in our tests. +// +// We use Channels Last 2d as an example above. +// This is a general problem for all the is_channels_last_strides_xd +// implementation. Please check the helper functions +// (is_channels_last_strides_*d_s*) for more details. - InvalidBit((byte)(0)), - CPUBit((byte)(1)), - CUDABit((byte)(2)), - HIPBit((byte)(3)), - XLABit((byte)(4)), - MPSBit((byte)(5)), - IPUBit((byte)(6)), - XPUBit((byte)(7)), - HPUBit((byte)(8)), - VEBit((byte)(9)), - LazyBit((byte)(10)), - MetaBit((byte)(11)), - MTIABit((byte)(12)), - PrivateUse1Bit((byte)(13)), - PrivateUse2Bit((byte)(14)), - PrivateUse3Bit((byte)(15)), +@Namespace("c10") public static native @Cast("bool") boolean is_channels_last_strides_2d( + @Const @ByVal LongArrayRef sizes, + @Const @ByVal LongArrayRef strides); +@Namespace("c10") public static native @Cast("bool") boolean is_channels_last_strides_2d( + @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] sizes, + @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... strides); - // Define an alias to represent end of backend dispatch keys. - // If you add new backend keys after PrivateUse3, please also update it here. - EndOfBackendKeys((byte)(PrivateUse3Bit.value)); +@Namespace("c10") public static native @Cast("bool") boolean is_channels_last_strides_3d( + @Const @ByVal LongArrayRef sizes, + @Const @ByVal LongArrayRef strides); +@Namespace("c10") public static native @Cast("bool") boolean is_channels_last_strides_3d( + @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] sizes, + @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... strides); + + // namespace c10 + + +// Parsed from c10/core/QScheme.h + +// #pragma once + +// #include +// #include + +/** + * QScheme is an enum that specifies the type of quantization. This has a one + * to one correspondence with Quantizer + * Please refer to ATen/quantized/Quantizer.h to see the Quantizers classes. + * Keep this file in sync with torch/nn/_qscheme.py + */ +@Namespace("c10") public enum QScheme { + PER_TENSOR_AFFINE((byte)(0)), + PER_CHANNEL_AFFINE((byte)(1)), + PER_TENSOR_SYMMETRIC((byte)(2)), + PER_CHANNEL_SYMMETRIC((byte)(3)), + PER_CHANNEL_AFFINE_FLOAT_QPARAMS((byte)(4)), + COMPILE_TIME_NUM_QSCHEMES((byte)(5)); public final byte value; - private BackendComponent(byte v) { this.value = v; } - private BackendComponent(BackendComponent e) { this.value = e.value; } - public BackendComponent intern() { for (BackendComponent e : values()) if (e.value == value) return e; return this; } + private QScheme(byte v) { this.value = v; } + private QScheme(QScheme e) { this.value = e.value; } + public QScheme intern() { for (QScheme e : values()) if (e.value == value) return e; return this; } @Override public String toString() { return intern().name(); } } +@Namespace("c10") @MemberGetter public static native int COMPILE_TIME_NUM_QSCHEMES(); -// Semantically, a dispatch key identifies a possible "level" in our -// dispatch, for which a handler may be registered. Each handler corresponds -// to a type of functionality. -// -// In implementation terms, the dispatch key identifies a specific "bit" in a -// DispatchKeySet. Higher bit indexes get handled by dispatching first (because -// we "count leading zeros" when we extract the highest priority dispatch -// key.) -// -// Note [DispatchKey Classification] -// This enum actually contains several types of keys, which are explained -// in more detail further down: -// (1) non-customizable backends (e.g. FPGA) -// (2) non-customizable functionalities (e.g. Functionalize) -// (3) functionalized that are customizable per backend (e.g. Dense, Sparse, -// AutogradFunctionality) (4) per-backend instances of customizable -// functionalities (e.g. CPU, SparseCPU, AutogradCPU) (5) alias keys (e.g. -// CompositeImplicitAutograd) -// -// Of the categories above, it's important to note: -// (a) which keys are assigned individual bits in a DispatchKeySet -// (b) which keys are assigned individual slots in the runtime operator table -// ("Runtime keys") -// -// (1), (2) and (3) all get their own dedicated bits in the DispatchKeySet. -// (1), (2) and (4) all get their own dedicated slots in the runtime operator -// table. +@Namespace("c10") public static native @StdString BytePointer toString(QScheme qscheme); -// See Note [DispatchKeySet Internal Representation] for more details. -// -// NOTE: Keep the list in sync with `DispatchKey` in torchgen/model.py -@Namespace("c10") public enum DispatchKey { + // namespace c10 - // ~~~~~~~~~~~~~~~~~~~~~~~~~~ UNDEFINED ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ // - // This is not a "real" functionality, but it exists to give us a "nullopt" - // element we can return for cases when a DispatchKeySet contains no elements. - // You can think a more semantically accurate definition of DispatchKey is: - // - // using DispatchKey = optional - // - // and Undefined == nullopt. We didn't actually represent - // it this way because optional would take two - // words, when DispatchKey fits in eight bits. - Undefined((short)(0)), +// Parsed from c10/core/Stream.h - // Define an alias for Undefined to represent CatchAll (long term - // this will get eliminated, but for now it's convenient) - CatchAll((short)(Undefined.value)), +// #pragma once - // ~~~~~~~~~~~~~~~~~~~~~~~~~~ Functionality Keys ~~~~~~~~~~~~~~~~~~~~~~ // - // Every value in the enum (up to EndOfFunctionalityKeys) - // corresponds to an individual "functionality" that can be dispatched to. - // This is represented in the DispatchKeySet by assigning each of these enum - // values - // to each of the remaining (64 - len(BackendComponent)) bits. - // - // Most of these functionalities have a single handler assigned to them, - // making them "runtime keys". - // That map to a single slot in the runtime operator table. - // - // A few functionalities are allowed to be customizable per backend. - // See [Note: Per-Backend Functionality Dispatch Keys] for details. +// #include - // See [Note: Per-Backend Functionality Dispatch Keys] - Dense((short)(Undefined.value + 1)), +/** An index representing a specific stream. A StreamId is not independently + * meaningful without knowing the Device it is associated with; try to + * use Stream rather than StreamId directly. + * + * StreamIds are opaque; they are assigned by some DeviceType-specific + * numbering system which is not visible to the user. HOWEVER, we + * guarantee that StreamId 0 is always a valid stream, and corresponds + * to some sort of "default" stream. */ +// Targeting ../StreamData3.java - // Below are non-extensible backends. - // These are backends that currently don't have their own overrides for - // Autograd/Sparse/Quantized kernels, - // and we therefore don't waste space in the runtime operator table allocating - // space for them. - // If any of these backends ever need to customize, e.g., Autograd, then we'll - // need to add a DispatchKey::*Bit for them. - // TODO: put this in BackendComponents - FPGA((short)(Undefined.value + 2)), // Xilinx support lives out of tree at - // https://gitlab.com/pytorch-complex/vitis_kernels +// Targeting ../Stream.java - // TODO: put this in BackendComponents - // ONNX Runtime, lives out of tree at https://github.com/pytorch/ort and - // https://github.com/microsoft/onnxruntime, and is also used to test general - // backend/extension machinery in the core. cf: - // - test/cpp_extensions/ort_extension.cpp - // - test/test_torch.py - // - aten/src/ATen/test/extension_backend_test.cpp - ORT((short)(Undefined.value + 3)), - Vulkan((short)(Undefined.value + 4)), // TODO: put this in BackendComponents - Metal((short)(Undefined.value + 5)), // TODO: put this in BackendComponents - // See [Note: Per-Backend Functionality Dispatch Keys] - Quantized((short)(Undefined.value + 6)), +@Namespace("c10") public static native @Cast("std::ostream*") @ByRef @Name("operator <<") Pointer shiftLeft(@Cast("std::ostream*") @ByRef Pointer stream, @Const @ByRef Stream s); - // This backend is to support custom RNGs; it lets you go - // to a different kernel if you pass in a generator that is not a - // traditional CPUGeneratorImpl/CUDAGeneratorImpl. To make use of this - // key: - // 1) set it as a second parameter of at::Generator constructor call in - // the user-defined PRNG class. - // 2) use it as a dispatch key while registering custom kernels - // (templatized kernels specialized for user-defined PRNG class) - // intended for out of tree use; tested by aten/src/ATen/test/rng_test.cpp - CustomRNGKeyId((short)(Undefined.value + 7)), + // namespace c10 + // namespace std - // TODO: Make Mkldnn a functionality key, so we can give it Meta - // support - // Here are backends which specify more specialized operators - // based on the layout of the tensor. Note that the sparse backends - // are one case where ordering matters: sparse multi-dispatches with - // the corresponding dense tensors, and must be handled before them. - MkldnnCPU((short)(Undefined.value + 8)), // registered at build/aten/src/ATen/RegisterMkldnnCPU.cpp - // NB: not to be confused with MKLDNN, which is Caffe2 only - // See [Note: Per-Backend Functionality Dispatch Keys] - Sparse((short)(Undefined.value + 9)), +// Parsed from c10/core/OptionalRef.h - // TODO: Make SparseCsr a functionality key - SparseCsrCPU((short)(Undefined.value + 10)), - SparseCsrCUDA((short)(Undefined.value + 11)), +// #pragma once - NestedTensor((short)(Undefined.value + 12)), + // namespace c10 - // In some situations, it is not immediately obvious what the correct - // backend for function is, because the function in question doesn't - // have any "tensor" arguments. In this case, a BackendSelect function - // can be registered to implement the custom determination of the - // correct backend. - BackendSelect((short)(Undefined.value + 13)), - Python((short)(Undefined.value + 14)), +// Parsed from c10/util/BFloat16.h - // Out-of-core key for Fake Tensor in torchdistx. - // See https://pytorch.org/torchdistx/latest/fake_tensor.html - // TODO: delete this in favor of Python-implemented fake tensor - Fake((short)(Undefined.value + 15)), - // See Note [Out-of-tree vmap+grad prototype]. The purpose of this key - // is to insert code after the "autograd subsystem" runs, so this key should - // be directly after ADInplaceOrView and all of the autograd keys. - FuncTorchDynamicLayerBackMode((short)(Undefined.value + 16)), +// #pragma once - // Alias and mutation removal. - // If some backends want to opt into only alias removal or only mutation - // removal, - // we can consider adding separate keys dedicated to those individual passes. - // See Note [Functionalization Pass In Core] for details. - Functionalize((short)(Undefined.value + 17)), +// Defines the bloat16 type (brain floating-point). This representation uses +// 1 bit for the sign, 8 bits for the exponent and 7 bits for the mantissa. - // The named dispatch key is set for any tensors with named dimensions. - // Although we have a dispatch key for named tensors, for historical reasons, - // this dispatch key doesn't do any of the substantive functionality for named - // tensor (though, hypothetically, it could!) At the moment, it's just - // responsible for letting us give good error messages when operations - // don't support named tensors. - // - // NB: If you ever consider moving named tensor functionality into - // this dispatch key, note that it might be necessary add another dispatch - // key that triggers before composite operators, in case a composite operator - // has named dimension propagation that doesn't match that of its - // constituent parts. - // TODO: delete this once torchdim lands in functorch - Named((short)(Undefined.value + 18)), +// #include +// #include +// #include - // The Conjugate dispatch key is set for any tensors that need to perform - // conjugation - // This is implemented at a dispatch level right before any backends run - Conjugate((short)(Undefined.value + 19)), +// #if defined(__CUDACC__) && !defined(USE_ROCM) +// #endif - // The Negative dispatch key is set for any tensors that need to perform - // negation - // This is implemented at a dispatch level right before any backends run - Negative((short)(Undefined.value + 20)), +// #if defined(SYCL_EXT_ONEAPI_BFLOAT16_MATH_FUNCTIONS) +// #endif +@Namespace("c10::detail") public static native float f32_from_bits(@Cast("uint16_t") short src); - ZeroTensor((short)(Undefined.value + 21)), // registered at build/aten/src/ATen/RegisterZeroTensor.cpp +@Namespace("c10::detail") public static native @Cast("uint16_t") short bits_from_f32(float src); - // Note [ADInplaceOrView key] - // ADInplaceOrView key is used by inplace or view ops to register a kernel - // that does additional setup for future autograd computation. - // - // 1. For inplace ops this kernel does version bump - // 2. For view ops this kernel does `as_view` setup where we properly setup - // DifferentiableViewMeta on the view tensors. - // - // For other ops it's fallthrough kernel since there's no extra - // work to do. - // - // Note [Dream: skip VariableType kernel when requires_grad=false] - // - // In an ideal world where we can skip VariableType kernel for inputs - // with requires_grad=false, instead of a fallthrough kernel, we'll - // register a kernel shown below to all functional ops as well: - // torch::Tensor my_functional_op(...) { - // { - // // Note for every op in VariableType, you need to go through - // // `AutoDispatchBelowADInplaceOrView` guard exactly once to add the - // // key to TLS excluded set. If you don't go through it at all, - // // inplace/view ops called through `at::` inside your backend - // // kernel will dispatch to ADInplaceOrView kernels and do a lot - // // of extra work. - // at::AutoDispatchBelowADInplaceOrView guard; - // at::redispatch::my_functional_op(...); - // } - // } - // But this work is currently blocked since it adds an extra dispatch - // for all ops and it's non-trivial overhead at model level(a few percents). - // Thus our current approach takes advantage of the fact every kernel go - // through VariableType kernel first and pulls the - // `at::AutoDispatchBelowADInplaceOrView` guard of functional ops - // up to the `VariableType` kernel. Thus we only add the extra dispatch - // to view/inplace ops to minimize its perf impact to real models. - ADInplaceOrView((short)(Undefined.value + 22)), - // Note [Alias Dispatch Key : Autograd] - // All backends are oblivious to autograd; autograd is handled as a - // layer which happens on top of all backends. It inspects the autograd - // metadata of all inputs, determines what autograd metadata should be - // constructed by the output, and otherwise defers to the backend to - // actually do the numeric computation. Autograd contains - // the bulk of this logic. +@Namespace("c10::detail") public static native @Cast("uint16_t") short round_to_nearest_even(float src); - // Autograd is now an alias dispatch key which by default maps to all - // backend-specific autograd keys. - // Backend-specific allow backends to override the default kernel registered - // to Autograd key as needed. - // For example, XLA wants to define autograd for einsum directly. - // Registering a custom autograd implementation at the XLA key won't work - // because we process Autograd before XLA. This key has higher priority and - // gets processed first. You generally should NOT redispatch after handling - // autograd here (since that would result in execution of the Autograd - // operator, which you're trying to skip). In AutogradXLA implementations, - // you are responsible for handling autograd yourself, or deferring to other - // operators which support autograd. +// Targeting ../BFloat16.java - // Currently we only have backend-specific autograd keys for CPU/CUDA/XLA and - // reserved user-defined backends. All other in-tree backends share the - // AutogradOther key. We can add specific autograd key for those backends - // upon request. - AutogradOther((short)(Undefined.value + 23)), - // See [Note: Per-Backend Functionality Dispatch Keys] - AutogradFunctionality((short)(Undefined.value + 24)), - // NestedTensor is an example of something that isn't a "real backend" - // (because it mostly consists of redispatching kernels) - // but it would like to override autograd functionality in C++. - // We can handle cases like this by adding an extra functionality key - // exclusively for handling autograd for NestedTensor. - // lives out of tree at - // https://github.com/pytorch/nestedtensor - AutogradNestedTensor((short)(Undefined.value + 25)), + // namespace c10 - Tracer((short)(Undefined.value + 26)), +// #include // IWYU pragma: keep - // TODO: make Autocast a functionality key - // Autocasting precedes VariableTypeId, to ensure casts are autograd-exposed - // and inputs are saved for backward in the post-autocast type. - AutocastCPU((short)(Undefined.value + 27)), - AutocastXPU((short)(Undefined.value + 28)), - AutocastHPU((short)(Undefined.value + 29)), - // Naughtily, AutocastCUDA is also being used for XLA. In the terminal state, - // it probably should get its own Autocast key - AutocastCUDA((short)(Undefined.value + 30)), - // ~~~~~~~~~~~~~~~~~~~~~~~~~~~ WRAPPERS ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ // - // There are a number of alternative modes which may want to handle before - // autograd; for example, error checking, tracing, profiling or vmap. They - // go here. +// Parsed from c10/util/BFloat16-inl.h - FuncTorchBatched((short)(Undefined.value + 31)), // See Note [Out-of-tree vmap+grad prototype] - FuncTorchVmapMode((short)(Undefined.value + 32)), // See Note [Out-of-tree vmap+grad prototype] +// #pragma once - // This is the dispatch key for BatchedTensorImpl, which is used to implement - // batching rules for vmap. - Batched((short)(Undefined.value + 33)), +// #include +// #include - // When we are inside a vmap, all tensors dispatch on this key. - // See Note: [DispatchKey::VmapMode usage] for more details. - VmapMode((short)(Undefined.value + 34)), +// #if C10_CLANG_HAS_WARNING("-Wimplicit-int-float-conversion") +// #endif - FuncTorchGradWrapper((short)(Undefined.value + 35)), // See Note [Out-of-tree vmap+grad prototype] +// #if defined(SYCL_EXT_ONEAPI_BFLOAT16_MATH_FUNCTIONS) +// #endif - // Out-of-core key for Deferred Module Initialization in torchdistx. - // See https://pytorch.org/torchdistx/latest/deferred_init.html - DeferredInit((short)(Undefined.value + 36)), +/** Constructors */ - // Used by Python key logic to know the set of tls on entry to the dispatcher - // This kernel assumes it is the top-most non-functorch-related DispatchKey. - // If you add a key above, make sure to update the fallback implementation for - // this. - PythonTLSSnapshot((short)(Undefined.value + 37)), - // This key should be at the very top of the dispatcher - FuncTorchDynamicLayerFrontMode((short)(Undefined.value + 38)), // See Note [Out-of-tree vmap+grad prototype] +/** Implicit conversions */ - // TESTING: This is intended to be a generic testing tensor type id. - // Don't use it for anything real; its only acceptable use is within a single - // process test. Use it by creating a TensorImpl with this DispatchKey, and - // then registering operators to operate on this type id. See - // aten/src/ATen/core/dispatch/backend_fallback_test.cpp for a usage example. - TESTING_ONLY_GenericWrapper((short)(Undefined.value + 39)), - // TESTING: This is intended to be a generic testing tensor type id. - // Don't use it for anything real; its only acceptable use is within a ingle - // process test. Use it by toggling the mode on and off via - // TESTING_ONLY_tls_generic_mode_set_enabled and then registering operators - // to operate on this type id. See - // aten/src/ATen/core/dispatch/backend_fallback_test.cpp - // for a usage example - TESTING_ONLY_GenericMode((short)(Undefined.value + 40)), +// #if defined(__CUDACC__) && !defined(USE_ROCM) +// #endif - // This is a bypass that allows you to skip running the C++ dispatcher - // entirely - PythonDispatcher((short)(Undefined.value + 41)), +// #if defined(SYCL_EXT_ONEAPI_BFLOAT16_MATH_FUNCTIONS) +// #endif - // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ FIN ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ // - EndOfFunctionalityKeys((short)(Undefined.value + 42)), +// CUDA intrinsics - StartOfDenseBackends((short)(Undefined.value + 43)), - CPU((short)(Undefined.value + 44)), - - CUDA((short)(Undefined.value + 45)), - - HIP((short)(Undefined.value + 46)), - - XLA((short)(Undefined.value + 47)), - - MPS((short)(Undefined.value + 48)), - - IPU((short)(Undefined.value + 49)), - - XPU((short)(Undefined.value + 50)), - - HPU((short)(Undefined.value + 51)), - - VE((short)(Undefined.value + 52)), - - Lazy((short)(Undefined.value + 53)), - - Meta((short)(Undefined.value + 54)), - - MTIA((short)(Undefined.value + 55)), - - PrivateUse1((short)(Undefined.value + 56)), - - PrivateUse2((short)(Undefined.value + 57)), - - PrivateUse3((short)(Undefined.value + 58)), - EndOfDenseBackends((short)(0)), - StartOfQuantizedBackends((short)(1)), - QuantizedCPU((short)(2)), - - QuantizedCUDA((short)(3)), - - QuantizedHIP((short)(4)), - - QuantizedXLA((short)(5)), - - QuantizedMPS((short)(6)), - - QuantizedIPU((short)(7)), - - QuantizedXPU((short)(8)), - - QuantizedHPU((short)(9)), - - QuantizedVE((short)(10)), - - QuantizedLazy((short)(11)), - - QuantizedMeta((short)(12)), - - QuantizedMTIA((short)(13)), - - QuantizedPrivateUse1((short)(14)), - - QuantizedPrivateUse2((short)(15)), - - QuantizedPrivateUse3((short)(16)), - EndOfQuantizedBackends((short)( QuantizedPrivateUse3.value)), - StartOfSparseBackends((short)( QuantizedPrivateUse3.value + 1)), - SparseCPU((short)( QuantizedPrivateUse3.value + 2)), - - SparseCUDA((short)( QuantizedPrivateUse3.value + 3)), - - SparseHIP((short)( QuantizedPrivateUse3.value + 4)), - - SparseXLA((short)( QuantizedPrivateUse3.value + 5)), - - SparseMPS((short)( QuantizedPrivateUse3.value + 6)), - - SparseIPU((short)( QuantizedPrivateUse3.value + 7)), - - SparseXPU((short)( QuantizedPrivateUse3.value + 8)), - - SparseHPU((short)( QuantizedPrivateUse3.value + 9)), - - SparseVE((short)( QuantizedPrivateUse3.value + 10)), - - SparseLazy((short)( QuantizedPrivateUse3.value + 11)), - - SparseMeta((short)( QuantizedPrivateUse3.value + 12)), - - SparseMTIA((short)( QuantizedPrivateUse3.value + 13)), - - SparsePrivateUse1((short)( QuantizedPrivateUse3.value + 14)), - - SparsePrivateUse2((short)( QuantizedPrivateUse3.value + 15)), - - SparsePrivateUse3((short)( QuantizedPrivateUse3.value + 16)), - EndOfSparseBackends((short)( SparsePrivateUse3.value)), - StartOfNestedTensorBackends((short)( SparsePrivateUse3.value + 1)), - NestedTensorCPU((short)( SparsePrivateUse3.value + 2)), - - NestedTensorCUDA((short)( SparsePrivateUse3.value + 3)), - - NestedTensorHIP((short)( SparsePrivateUse3.value + 4)), - - NestedTensorXLA((short)( SparsePrivateUse3.value + 5)), - - NestedTensorMPS((short)( SparsePrivateUse3.value + 6)), - - NestedTensorIPU((short)( SparsePrivateUse3.value + 7)), - - NestedTensorXPU((short)( SparsePrivateUse3.value + 8)), - - NestedTensorHPU((short)( SparsePrivateUse3.value + 9)), - - NestedTensorVE((short)( SparsePrivateUse3.value + 10)), - - NestedTensorLazy((short)( SparsePrivateUse3.value + 11)), - - NestedTensorMeta((short)( SparsePrivateUse3.value + 12)), - - NestedTensorMTIA((short)( SparsePrivateUse3.value + 13)), - - NestedTensorPrivateUse1((short)( SparsePrivateUse3.value + 14)), - - NestedTensorPrivateUse2((short)( SparsePrivateUse3.value + 15)), - - NestedTensorPrivateUse3((short)( SparsePrivateUse3.value + 16)), - EndOfNestedTensorBackends((short)( NestedTensorPrivateUse3.value)), - StartOfAutogradFunctionalityBackends((short)( NestedTensorPrivateUse3.value + 1)), - AutogradCPU((short)( NestedTensorPrivateUse3.value + 2)), - - AutogradCUDA((short)( NestedTensorPrivateUse3.value + 3)), - - AutogradHIP((short)( NestedTensorPrivateUse3.value + 4)), - - AutogradXLA((short)( NestedTensorPrivateUse3.value + 5)), - - AutogradMPS((short)( NestedTensorPrivateUse3.value + 6)), - - AutogradIPU((short)( NestedTensorPrivateUse3.value + 7)), - - AutogradXPU((short)( NestedTensorPrivateUse3.value + 8)), - - AutogradHPU((short)( NestedTensorPrivateUse3.value + 9)), - - AutogradVE((short)( NestedTensorPrivateUse3.value + 10)), - - AutogradLazy((short)( NestedTensorPrivateUse3.value + 11)), - - AutogradMeta((short)( NestedTensorPrivateUse3.value + 12)), - - AutogradMTIA((short)( NestedTensorPrivateUse3.value + 13)), - - AutogradPrivateUse1((short)( NestedTensorPrivateUse3.value + 14)), - - AutogradPrivateUse2((short)( NestedTensorPrivateUse3.value + 15)), - - AutogradPrivateUse3((short)( NestedTensorPrivateUse3.value + 16)), - EndOfAutogradFunctionalityBackends((short)( AutogradPrivateUse3.value)), +// #if defined(__CUDACC__) || defined(__HIPCC__) +// #endif - EndOfRuntimeBackendKeys((short)(EndOfAutogradFunctionalityBackends.value)), +/** Arithmetic */ - // ~~~~~~~~~~~~~~~~~~~~~~ Alias Dispatch Keys ~~~~~~~~~~~~~~~~~~~~~~~~~~ // - // Note [Alias Dispatch Keys] - // Alias dispatch keys are synthetic dispatch keys which map to multiple - // runtime dispatch keys. Alisa keys have precedence, but they are always - // lower precedence than runtime keys. You can register a kernel to an - // alias key, the kernel might be populated to the mapped runtime keys - // during dispatch table computation. - // If a runtime dispatch key has multiple kernels from alias keys, which - // kernel wins is done based on the precedence of alias keys (but runtime - // keys always have precedence over alias keys). - // Alias keys won't be directly called during runtime. +@Namespace("c10") public static native @ByVal @Name("operator +") BFloat16 add(@Const @ByRef BFloat16 a, @Const @ByRef BFloat16 b); - // See Note [Alias Dispatch Key : Autograd] - Autograd((short)(EndOfAutogradFunctionalityBackends.value + 1)), - CompositeImplicitAutograd((short)(EndOfAutogradFunctionalityBackends.value + 2)), // registered at - // build/aten/src/ATen/RegisterCompositeImplicitAutograd.cpp +@Namespace("c10") public static native @ByVal @Name("operator -") BFloat16 subtract(@Const @ByRef BFloat16 a, @Const @ByRef BFloat16 b); - // Note: The alias keyset for FuncTorchBatchedDecomposition is disjoint from - // all - // other alias keysets - // and so precedence order doesn't matter - FuncTorchBatchedDecomposition((short)(EndOfAutogradFunctionalityBackends.value + 3)), // registered at - // build/aten/src/ATen/RegisterFuncTorchBatchedDecomposition.cpp - // Note: The alias keyset for CompositeImplicitAutogradNestedTensor is - // disjoint from all other alias keysets - CompositeImplicitAutogradNestedTensor((short)(EndOfAutogradFunctionalityBackends.value + 4)), // registered at - // build/aten/src/ATen/RegisterCompositeImplicitAutogradNestedTensor.cpp - CompositeExplicitAutograd((short)(EndOfAutogradFunctionalityBackends.value + 5)), // registered at - // build/aten/src/ATen/RegisterCompositeExplicitAutograd.cpp - // See Note [CompositeExplicitAutogradNonFunctional Key] - CompositeExplicitAutogradNonFunctional((short)(EndOfAutogradFunctionalityBackends.value + 6)), // registered at - // build/aten/src/ATen/RegisterCompositeExplicitAutograd.cpp +@Namespace("c10") public static native @ByVal @Name("operator *") BFloat16 multiply(@Const @ByRef BFloat16 a, @Const @ByRef BFloat16 b); - // Define an alias key to represent end of alias dispatch keys. - // If you add new alias keys after Autograd, please also update it here. - StartOfAliasKeys((short)(Autograd.value)), - EndOfAliasKeys((short)(CompositeExplicitAutogradNonFunctional.value)), // +@Namespace("c10") public static native @ByVal @Name("operator /") BFloat16 divide(@Const @ByRef BFloat16 a, @Const @ByRef BFloat16 b); - // ~~~~~~~~~~~~~~~~~~~~~~~~~ BC ALIASES ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ // - // The aliases exist for backwards compatibility reasons, they shouldn't - // be used - CPUTensorId((short)(CPU.value)), - CUDATensorId((short)(CUDA.value)), - DefaultBackend((short)(CompositeExplicitAutograd.value)), - PrivateUse1_PreAutograd((short)(AutogradPrivateUse1.value)), - PrivateUse2_PreAutograd((short)(AutogradPrivateUse2.value)), - PrivateUse3_PreAutograd((short)(AutogradPrivateUse3.value)), - Autocast((short)(AutocastCUDA.value)); +@Namespace("c10") public static native @ByVal @Name("operator -") BFloat16 subtract(@Const @ByRef BFloat16 a); - public final short value; - private DispatchKey(short v) { this.value = v; } - private DispatchKey(DispatchKey e) { this.value = e.value; } - public DispatchKey intern() { for (DispatchKey e : values()) if (e.value == value) return e; return this; } - @Override public String toString() { return intern().name(); } -} +@Namespace("c10") public static native @ByRef @Name("operator +=") BFloat16 addPut(@ByRef BFloat16 a, @Const @ByRef BFloat16 b); -// Note [Private use DispatchKey] -// ~~~~~~~~~~~~~~~~~~~~~~~~~~~ -// Private use tensor IDs are preallocated tensor type IDs for use in user -// applications. Similar to private use fields in HTTP, they can be used -// by end users for experimental or private applications, without needing -// to "standardize" the tensor ID (which would be done by submitting a PR -// to PyTorch to add your type ID). -// -// Private use tensor IDs are appropriate to use if you want to experiment -// with adding a new tensor type (without having to patch PyTorch first) or -// have a private, non-distributed application that needs to make use of a -// new tensor type. Private use tensor IDs are NOT appropriate to use for -// libraries intended to be distributed to further users: please contact -// the PyTorch developers to get a type ID registered in this case. -// -// We provide two classes of private user tensor id: regular DispatchKeys -// and Autograd DispatchKeys. DispatchKeys serve the role of ordinary "backend" -// DispatchKeys; if you were adding support for a new type of accelerator, you -// would use a backend DispatchKey, and ideally automatically reuse -// AutogradOther definitions already defined in PyTorch. AutogradPrivateUse -// DispatchKeys serve as "wrapper" DispatchKeys: they are only necessary for -// tensors that compose multiple internal tensors, and for cases when the -// built-in autograd formulas for operators are not appropriate. +@Namespace("c10") public static native @ByRef @Name("operator -=") BFloat16 subtractPut(@ByRef BFloat16 a, @Const @ByRef BFloat16 b); -// Check if a DispatchKey is an alias mapping to other runtime keys. -@Namespace("c10") public static native @Cast("const bool") boolean isAliasDispatchKey(DispatchKey k); -@Namespace("c10") public static native @Cast("const bool") boolean isAliasDispatchKey(@Cast("c10::DispatchKey") short k); +@Namespace("c10") public static native @ByRef @Name("operator *=") BFloat16 multiplyPut(@ByRef BFloat16 a, @Const @ByRef BFloat16 b); -// [Note: Per-Backend Functionality Dispatch Keys] -// Check if a DispatchKey is a per-backend functionality key -// Any functionalities that can be customized per-backend should be added here. -// These keys correspond to functionalities that can be customized indivually -// per backend. While they only take up one bit in the `DispatchKeySet` bitset, -// they map to (# backends) slots in the operator table. -// Each of these keys also has a separate set of "runtime keys" in the dispatch -// key enum, per backend, which *do* map to the individual operator table slots. -// For example, the "Sparse" key maps to an individual bit in the -// DispatchKeySet, while `SparseCPU`, `SparseCUDA`, etc all map to individual -// slots in the runtime operator table. +@Namespace("c10") public static native @ByRef @Name("operator /=") BFloat16 dividePut(@ByRef BFloat16 a, @Const @ByRef BFloat16 b); -@Namespace("c10") public static native @Cast("const bool") boolean isPerBackendFunctionalityKey(DispatchKey k); -@Namespace("c10") public static native @Cast("const bool") boolean isPerBackendFunctionalityKey(@Cast("c10::DispatchKey") short k); +@Namespace("c10") public static native @ByRef @Name("operator |") BFloat16 or(@ByRef BFloat16 a, @Const @ByRef BFloat16 b); -// Note that this includes Undefined in the total count. -// BUT EndOfFunctionalityKeys is its own (placeholder) key. -// e.g. Undefined=0, Dense=1, Sparse=2, EndOfFunctionalityKeys=3. -// In the above example, there are 3 total functionality keys. -@Namespace("c10") @MemberGetter public static native @Cast("const uint8_t") byte num_functionality_keys(); +@Namespace("c10") public static native @ByRef @Name("operator ^") BFloat16 xor(@ByRef BFloat16 a, @Const @ByRef BFloat16 b); -@Namespace("c10") @MemberGetter public static native @Cast("const uint8_t") byte num_backends(); +@Namespace("c10") public static native @ByRef @Name("operator &") BFloat16 and(@ByRef BFloat16 a, @Const @ByRef BFloat16 b); -// Note [No More Than 16 Backends] -// Search for this note to find places in the code where the "no more than 16 -// backends" invariant is baked in. +/** Arithmetic with floats */ -@Namespace("c10") public static native @Cast("const uint8_t") byte numPerBackendFunctionalityKeys(); +@Namespace("c10") public static native @Name("operator +") float add(@ByVal BFloat16 a, float b); +@Namespace("c10") public static native @Name("operator -") float subtract(@ByVal BFloat16 a, float b); +@Namespace("c10") public static native @Name("operator *") float multiply(@ByVal BFloat16 a, float b); +@Namespace("c10") public static native @Name("operator /") float divide(@ByVal BFloat16 a, float b); -// #if defined(C10_MOBILE_TRIM_DISPATCH_KEYS) -// See [Note: Trimmed Mobile Dispatch Keys] -@Namespace("c10") @MemberGetter public static native @Cast("const uint16_t") short num_runtime_entries(); -// #else -// #endif +@Namespace("c10") public static native @Name("operator +") float add(float a, @ByVal BFloat16 b); +@Namespace("c10") public static native @Name("operator -") float subtract(float a, @ByVal BFloat16 b); +@Namespace("c10") public static native @Name("operator *") float multiply(float a, @ByVal BFloat16 b); +@Namespace("c10") public static native @Name("operator /") float divide(float a, @ByVal BFloat16 b); -// See Note [No More Than 16 Backends] -@Namespace("c10") @MemberGetter public static native @Cast("const uint16_t") short full_backend_mask(); +@Namespace("c10") public static native @ByRef @Name("operator +=") FloatPointer addPut(@ByRef FloatPointer a, @Const @ByRef BFloat16 b); +@Namespace("c10") public static native @ByRef @Name("operator +=") FloatBuffer addPut(@ByRef FloatBuffer a, @Const @ByRef BFloat16 b); +@Namespace("c10") public static native @ByRef @Name("operator +=") float[] addPut(@ByRef float[] a, @Const @ByRef BFloat16 b); +@Namespace("c10") public static native @ByRef @Name("operator -=") FloatPointer subtractPut(@ByRef FloatPointer a, @Const @ByRef BFloat16 b); +@Namespace("c10") public static native @ByRef @Name("operator -=") FloatBuffer subtractPut(@ByRef FloatBuffer a, @Const @ByRef BFloat16 b); +@Namespace("c10") public static native @ByRef @Name("operator -=") float[] subtractPut(@ByRef float[] a, @Const @ByRef BFloat16 b); +@Namespace("c10") public static native @ByRef @Name("operator *=") FloatPointer multiplyPut(@ByRef FloatPointer a, @Const @ByRef BFloat16 b); +@Namespace("c10") public static native @ByRef @Name("operator *=") FloatBuffer multiplyPut(@ByRef FloatBuffer a, @Const @ByRef BFloat16 b); +@Namespace("c10") public static native @ByRef @Name("operator *=") float[] multiplyPut(@ByRef float[] a, @Const @ByRef BFloat16 b); +@Namespace("c10") public static native @ByRef @Name("operator /=") FloatPointer dividePut(@ByRef FloatPointer a, @Const @ByRef BFloat16 b); +@Namespace("c10") public static native @ByRef @Name("operator /=") FloatBuffer dividePut(@ByRef FloatBuffer a, @Const @ByRef BFloat16 b); +@Namespace("c10") public static native @ByRef @Name("operator /=") float[] dividePut(@ByRef float[] a, @Const @ByRef BFloat16 b); -@Namespace("c10") public static native @Cast("const char*") BytePointer toString(DispatchKey arg0); -@Namespace("c10") public static native String toString(@Cast("c10::DispatchKey") short arg0); -@Namespace("c10") public static native @Cast("const char*") BytePointer toString(BackendComponent arg0); -@Namespace("c10") public static native String toString(@Cast("c10::BackendComponent") byte arg0); +/** Arithmetic with doubles */ +@Namespace("c10") public static native @Name("operator +") double add(@ByVal BFloat16 a, double b); +@Namespace("c10") public static native @Name("operator -") double subtract(@ByVal BFloat16 a, double b); +@Namespace("c10") public static native @Name("operator *") double multiply(@ByVal BFloat16 a, double b); +@Namespace("c10") public static native @Name("operator /") double divide(@ByVal BFloat16 a, double b); +@Namespace("c10") public static native @Name("operator +") double add(double a, @ByVal BFloat16 b); +@Namespace("c10") public static native @Name("operator -") double subtract(double a, @ByVal BFloat16 b); +@Namespace("c10") public static native @Name("operator *") double multiply(double a, @ByVal BFloat16 b); +@Namespace("c10") public static native @Name("operator /") double divide(double a, @ByVal BFloat16 b); -@Namespace("c10") public static native DispatchKey getAutogradKeyFromBackend(BackendComponent k); -@Namespace("c10") public static native @Cast("c10::DispatchKey") short getAutogradKeyFromBackend(@Cast("c10::BackendComponent") byte k); +/** Arithmetic with ints */ -// Parses a string into a dispatch key. -// If the string cannot be correctly parsed, throws an exception. -@Namespace("c10") public static native DispatchKey parseDispatchKey(@StdString BytePointer k); -@Namespace("c10") public static native @Cast("c10::DispatchKey") short parseDispatchKey(@StdString String k); +@Namespace("c10") public static native @ByVal @Name("operator +") BFloat16 add(@ByVal BFloat16 a, int b); +@Namespace("c10") public static native @ByVal @Name("operator -") BFloat16 subtract(@ByVal BFloat16 a, int b); +@Namespace("c10") public static native @ByVal @Name("operator *") BFloat16 multiply(@ByVal BFloat16 a, int b); +@Namespace("c10") public static native @ByVal @Name("operator /") BFloat16 divide(@ByVal BFloat16 a, int b); -// These are some convenience identifiers for dispatch keys which are -// shorter to type than their long counterparts. Note that some of these -// dispatch keys directly correspond to DeviceType; and most APIs that -// accept DispatchKey also accept DeviceType; e.g., -// torch::dispatch(torch::kCPU, ...) is also valid. -@Namespace("c10") @MemberGetter public static native DispatchKey kAutograd(); +@Namespace("c10") public static native @ByVal @Name("operator +") BFloat16 add(int a, @ByVal BFloat16 b); +@Namespace("c10") public static native @ByVal @Name("operator -") BFloat16 subtract(int a, @ByVal BFloat16 b); +@Namespace("c10") public static native @ByVal @Name("operator *") BFloat16 multiply(int a, @ByVal BFloat16 b); +@Namespace("c10") public static native @ByVal @Name("operator /") BFloat16 divide(int a, @ByVal BFloat16 b); -// See Note [The Ordering of Per-Backend Dispatch Keys Matters!] -// This function relies on the invariant that the dispatch keys between -// StartOfDenseBackends and EndOfRuntimeBackendKeys are ordered by backend -// in the same order as `BackendComponent`. +//// Arithmetic with int64_t +@Namespace("c10") public static native @ByVal @Name("operator +") BFloat16 add(@ByVal BFloat16 a, @Cast("int64_t") long b); +@Namespace("c10") public static native @ByVal @Name("operator -") BFloat16 subtract(@ByVal BFloat16 a, @Cast("int64_t") long b); +@Namespace("c10") public static native @ByVal @Name("operator *") BFloat16 multiply(@ByVal BFloat16 a, @Cast("int64_t") long b); +@Namespace("c10") public static native @ByVal @Name("operator /") BFloat16 divide(@ByVal BFloat16 a, @Cast("int64_t") long b); -@Namespace("c10") public static native DispatchKey toFunctionalityKey(DispatchKey k); -@Namespace("c10") public static native @Cast("c10::DispatchKey") short toFunctionalityKey(@Cast("c10::DispatchKey") short k); +@Namespace("c10") public static native @ByVal @Name("operator +") BFloat16 add(@Cast("int64_t") long a, @ByVal BFloat16 b); +@Namespace("c10") public static native @ByVal @Name("operator -") BFloat16 subtract(@Cast("int64_t") long a, @ByVal BFloat16 b); +@Namespace("c10") public static native @ByVal @Name("operator *") BFloat16 multiply(@Cast("int64_t") long a, @ByVal BFloat16 b); +@Namespace("c10") public static native @ByVal @Name("operator /") BFloat16 divide(@Cast("int64_t") long a, @ByVal BFloat16 b); +// Overloading < and > operators, because std::max and std::min use them. +@Namespace("c10") public static native @Cast("bool") @Name("operator >") boolean greaterThan(@ByRef BFloat16 lhs, @ByRef BFloat16 rhs); -// Given (DispatchKey::Dense, BackendComponent::CUDABit), returns -// DispatchKey::CUDA. -// See Note [The Ordering of Per-Backend Dispatch Keys Matters!] -// This function relies on the invariant that the dispatch keys between -// StartOfDenseBackends and EndOfRuntimeBackendKeys are ordered by backend -// in the same order as `BackendComponent`. -@Namespace("c10") public static native DispatchKey toRuntimePerBackendFunctionalityKey( - DispatchKey functionality_k, - BackendComponent backend_k); -@Namespace("c10") public static native @Cast("c10::DispatchKey") short toRuntimePerBackendFunctionalityKey( - @Cast("c10::DispatchKey") short functionality_k, - @Cast("c10::BackendComponent") byte backend_k); +@Namespace("c10") public static native @Cast("bool") @Name("operator <") boolean lessThan(@ByRef BFloat16 lhs, @ByRef BFloat16 rhs); // namespace c10 -// Expose the constant, but not the TYPE (DispatchKey is an implementation -// detail!) - // namespace torch -// NB: You really shouldn't use this instance; this enum is guaranteed -// to be pretty small so a regular array should be acceptable. // namespace std -// Parsed from c10/core/DispatchKeySet.h + +// Parsed from c10/util/TypeSafeSignMath.h // #pragma once -// #include -// #include -// #include -// #include -// #include -// Targeting ../FunctionalityOffsetAndMask.java +// #include +// #include +// #include -// Targeting ../DispatchKeySet.java +// #if C10_CLANG_HAS_WARNING("-Wstring-conversion") +// #endif +// #if C10_CLANG_HAS_WARNING("-Wimplicit-int-float-conversion") +// #endif +/** Returns false since we cannot have x < 0 if x is unsigned. */ +/** Returns true if a signed variable x < 0 */ -@Namespace("c10") public static native @StdString BytePointer toString(@ByVal DispatchKeySet arg0); +/** Returns true if x < 0 + * NOTE: Will fail on an unsigned custom type + * For the most part it's possible to fix this if + * the custom type has a constexpr constructor. + * However, notably, c10::Half does not :-( */ +/** Returns the sign of an unsigned variable x as 0, 1 */ -@Namespace("c10") public static native int getDispatchTableIndexForDispatchKey(DispatchKey k); -@Namespace("c10") public static native int getDispatchTableIndexForDispatchKey(@Cast("c10::DispatchKey") short k); +/** Returns the sign of a signed variable x as -1, 0, 1 */ -// Alias key DispatchKey::Autograd maps to -// (autograd_dispatch_keyset x full_backend_mask) -// NB: keys in this set also get associated with CompositeImplicitAutograd -// -// Note [autograd_dispatch_keyset Does Not Include Backend Bits] -// We don't want to include any backend bits (BackendComponent::CPUBit, etc) -// directly in autograd_dispatch_keyset. -// Why? keysets like autograd_dispatch_keyset are commonly used to remove -// autograd keys from a DispatchKeySet throughout the code base. However, you -// are only allowed to remove functionality bits from a keyset, not backend -// bits. See Note [Removing keys from DispatchKeySet Only Affects Functionality -// Keys] for details. To be consistent and avoid confusion, we're explicitly -// setting up autograd_dispatch_keyset to not have any backend bits. -@Namespace("c10") @MemberGetter public static native @Const @ByRef DispatchKeySet autograd_dispatch_keyset(); +/** Returns the sign of x as -1, 0, 1 + * NOTE: Will fail on an unsigned custom type + * For the most part it's possible to fix this if + * the custom type has a constexpr constructor. + * However, notably, c10::Half does not :-( */ -@Namespace("c10") @MemberGetter public static native @Const @ByRef DispatchKeySet autocast_dispatch_keyset(); +/** Returns true if a and b are not both negative */ -// See Note [TLS Initialization] -@Namespace("c10") @MemberGetter public static native @Const @ByRef DispatchKeySet default_included_set(); +// Suppress sign compare warning when compiling with GCC +// as later does not account for short-circuit rule before +// raising the warning, see https://godbolt.org/z/Tr3Msnz99 +// #ifdef __GNUC__ +// #pragma GCC diagnostic push +// #pragma GCC diagnostic ignored "-Wsign-compare" +// #endif -@Namespace("c10") @MemberGetter public static native @Const @ByRef DispatchKeySet default_excluded_set(); +/** Returns true if x is greater than the greatest value of the type Limit */ -@Namespace("c10") @MemberGetter public static native @Const @ByRef DispatchKeySet autograd_dispatch_keyset_with_ADInplaceOrView(); +// #ifdef __GNUC__ +// #pragma GCC diagnostic pop +// #endif -@Namespace("c10") @MemberGetter public static native @Const @ByRef DispatchKeySet python_ks(); +/** Returns true if x < lowest(Limit). Standard comparison */ -@Namespace("c10") @MemberGetter public static native @Const @ByRef DispatchKeySet sparse_ks(); +/** Returns false since all the limit is signed and therefore includes + * negative values but x cannot be negative because it is unsigned */ -@Namespace("c10") @MemberGetter public static native @Const @ByRef DispatchKeySet sparse_csr_ks(); +/** Returns true if x < 0, where 0 is constructed from T. + * Limit is not signed, so its lower value is zero */ -@Namespace("c10") @MemberGetter public static native @Const @ByRef DispatchKeySet mkldnn_ks(); +/** Returns false sign both types are unsigned */ -// backend dispatch keys that map to DispatchKey::AutogradOther -// NB: keys in this set also get associated with CompositeImplicitAutograd -@Namespace("c10") @MemberGetter public static native @Const @ByRef DispatchKeySet autogradother_backends(); +/** Returns true if x is less than the lowest value of type T + * NOTE: Will fail on an unsigned custom type + * For the most part it's possible to fix this if + * the custom type has a constexpr constructor. + * However, notably, c10::Half does not : */ -// The set of dispatch keys that come after autograd -// n.b. this relies on the fact that AutogradOther is currently the lowest -// Autograd key -@Namespace("c10") @MemberGetter public static native @Const @ByRef DispatchKeySet after_autograd_keyset(); + // namespace c10 -// The set of dispatch keys that come after ADInplaceOrView -@Namespace("c10") @MemberGetter public static native @Const @ByRef DispatchKeySet after_ADInplaceOrView_keyset(); -// The set of dispatch keys that come after Functionalize -@Namespace("c10") @MemberGetter public static native @Const @ByRef DispatchKeySet after_func_keyset(); -@Namespace("c10") @MemberGetter public static native @Const @ByRef DispatchKeySet backend_bitset_mask(); -// keyset correpsonding to functorch keys that have their own dedicated -// TensorImpl subclass. +// Parsed from c10/util/complex_math.h -// This keyset has: -// (1) the functionality bits corresponding to backends (dense, sparse, -// quantized) (2) all of the backend bits set -@Namespace("c10") @MemberGetter public static native @Const @ByRef DispatchKeySet backend_functionality_keys(); -// Targeting ../OpTableOffsetAndMask.java +// #if !defined(C10_INTERNAL_INCLUDE_COMPLEX_REMAINING_H) +// #error +// "c10/util/complex_math.h is not meant to be individually included. Include c10/util/complex.h instead." +// #endif +// Exponential functions +@Namespace("c10_complex_math") public static native @ByVal @Name("exp") FloatComplex exp(@Const @ByRef FloatComplex x); -// true if t is a backend dispatch key -@Namespace("c10") public static native @Cast("bool") boolean isBackendDispatchKey(DispatchKey t); -@Namespace("c10") public static native @Cast("bool") boolean isBackendDispatchKey(@Cast("c10::DispatchKey") short t); +@Namespace("c10_complex_math") public static native @ByVal @Name("exp") DoubleComplex exp(@Const @ByRef DoubleComplex x); -// Resolve alias dispatch key to DispatchKeySet if applicable -@Namespace("c10") public static native @ByVal DispatchKeySet getRuntimeDispatchKeySet(DispatchKey t); -@Namespace("c10") public static native @ByVal DispatchKeySet getRuntimeDispatchKeySet(@Cast("c10::DispatchKey") short t); +@Namespace("c10_complex_math") public static native @ByVal @Name("log") FloatComplex log(@Const @ByRef FloatComplex x); -// Resolve alias dispatch key to DispatchKeySet if applicable, -// and chek if k is a part of that set -@Namespace("c10") public static native @Cast("bool") boolean runtimeDispatchKeySetHas(DispatchKey t, DispatchKey k); -@Namespace("c10") public static native @Cast("bool") boolean runtimeDispatchKeySetHas(@Cast("c10::DispatchKey") short t, @Cast("c10::DispatchKey") short k); +@Namespace("c10_complex_math") public static native @ByVal @Name("log") DoubleComplex log(@Const @ByRef DoubleComplex x); -// Returns a DispatchKeySet of all backend keys mapped to Autograd dispatch key -// t, DispatchKeySet is empty if t is not alias of DispatchKey::Autograd. -@Namespace("c10") public static native @ByVal DispatchKeySet getBackendKeySetFromAutograd(DispatchKey t); -@Namespace("c10") public static native @ByVal DispatchKeySet getBackendKeySetFromAutograd(@Cast("c10::DispatchKey") short t); +@Namespace("c10_complex_math") public static native @ByVal @Name("log10") FloatComplex log10(@Const @ByRef FloatComplex x); -// Returns a DispatchKeySet of autograd related keys mapped to backend. -// for a given backend key, use the associated autograd key. -// for non-backend keys, use AutogradOther as a default. -// Note: it's convenient and fast to return a default here rather than (say) -// returning an optional, or throwing. But it makes callers -// responsible for either a) enforcing the invariant that only backend keys -// be passed as arguments, or b) interpreting our return value carefully. -@Namespace("c10") public static native @ByVal DispatchKeySet getAutogradRelatedKeySetFromBackend(BackendComponent t); -@Namespace("c10") public static native @ByVal DispatchKeySet getAutogradRelatedKeySetFromBackend(@Cast("c10::BackendComponent") byte t); +@Namespace("c10_complex_math") public static native @ByVal @Name("log10") DoubleComplex log10(@Const @ByRef DoubleComplex x); -// Returns a DispatchKeySet of autocast related keys mapped to backend. -@Namespace("c10") public static native @ByVal DispatchKeySet getAutocastRelatedKeySetFromBackend(BackendComponent t); -@Namespace("c10") public static native @ByVal DispatchKeySet getAutocastRelatedKeySetFromBackend(@Cast("c10::BackendComponent") byte t); +@Namespace("c10_complex_math") public static native @ByVal @Name("log2") FloatComplex log2(@Const @ByRef FloatComplex x); -// returns the "backend" DispatchKey of highest priority in the set. -// This is basically like highestBackendKey(), except that we have some -// "functionality" bits that correspond to backends (Sparse, Quantized) -@Namespace("c10") public static native DispatchKey highestPriorityBackendTypeId(@ByVal DispatchKeySet ks); +@Namespace("c10_complex_math") public static native @ByVal @Name("log2") DoubleComplex log2(@Const @ByRef DoubleComplex x); -// This API exists because we have a use case for checking -// getRuntimeDispatchKeySet(alias).has(DispatchKey::Undefined) -// in OperatorEntry.cpp but we disallow it in has() API. -@Namespace("c10") public static native @Cast("bool") boolean isIncludedInAlias(DispatchKey k, DispatchKey alias); -@Namespace("c10") public static native @Cast("bool") boolean isIncludedInAlias(@Cast("c10::DispatchKey") short k, @Cast("c10::DispatchKey") short alias); +// Power functions +// +// #if defined(_LIBCPP_VERSION) || +// (defined(__GLIBCXX__) && !defined(_GLIBCXX11_USE_C99_COMPLEX)) -// Historically, every tensor only had a single DispatchKey, and it was always -// something like CPU, and there wasn't any of this business where TLS -// could cause the DispatchKey of a tensor to change. But we still have some -// legacy code that is still using DispatchKey for things like instanceof -// checks; if at all possible, refactor the code to stop using DispatchKey in -// those cases. -@Namespace("c10") public static native DispatchKey legacyExtractDispatchKey(@ByVal DispatchKeySet s); -// Given a function type, constructs a function_traits type that drops the first -// parameter type if the first parameter is of type DispatchKeySet. NB: -// DispatchKeySet is currently explicitly hidden from JIT (mainly to avoid -// pushing unnecessary arguments on the stack - see Note [ Plumbing Keys Through -// the Dispatcher] for details). If at any point in the future we need to expose -// this type to JIT, revisit the usage of this type alias. - // namespace c10 -// Parsed from c10/core/Backend.h + // namespace _detail +// #endif -// #pragma once +@Namespace("c10_complex_math") public static native @ByVal @Name("sqrt") FloatComplex sqrt(@Const @ByRef FloatComplex x); -// #include -// #include -// #include -// #include +@Namespace("c10_complex_math") public static native @ByVal @Name("sqrt") DoubleComplex sqrt(@Const @ByRef DoubleComplex x); -// #include +@Namespace("c10_complex_math") public static native @ByVal @Name("pow") FloatComplex pow( + @Const @ByRef FloatComplex x, + @Const @ByRef FloatComplex y); -/** - * This legacy enum class defines the set of backends supported by old school, - * code generated Type-based ATen. A "backend" in this sense roughly - * corresponds to the cartesian product of (device type, layout), but restricted - * only to combinations which we actually have kernels for. Backend does NOT - * include dtype. - * - * The reason we are sunsetting this enum class is because it doesn't allow for - * open registration; e.g., if you want to add SparseXLA, you'd have to - * edit this enum; you wouldn't be able to do it out of tree. DispatchKey is - * the replacement for Backend which supports open registration. - * - * NB: The concept of 'Backend' here disagrees with the notion of backend - * exposed to users in torch.backends. Backend here is something like "CPU" - * or "SparseCUDA"; backend in torch.backends is something like "MKL" or - * "CUDNN". - */ -@Namespace("c10") public enum Backend { - CPU(0), - CUDA(1), - HIP(2), - VE(3), - FPGA(4), - IPU(5), - XPU(6), - SparseCPU(7), - SparseCUDA(8), - SparseCsrCPU(9), - SparseCsrCUDA(10), - SparseHIP(11), - SparseVE(12), - SparseXPU(13), - ORT(14), - XLA(15), - Vulkan(16), - Metal(17), - Meta(18), - QuantizedCPU(19), - QuantizedCUDA(20), - QuantizedXPU(21), - Undefined(22), - MkldnnCPU(23), - MPS(24), - HPU(25), - Lazy(26), - MTIA(27), - PrivateUse1(28), - NumOptions(29); +@Namespace("c10_complex_math") public static native @ByVal @Name("pow") DoubleComplex pow( + @Const @ByRef DoubleComplex x, + @Const @ByRef DoubleComplex y); - public final int value; - private Backend(int v) { this.value = v; } - private Backend(Backend e) { this.value = e.value; } - public Backend intern() { for (Backend e : values()) if (e.value == value) return e; return this; } - @Override public String toString() { return intern().name(); } -} +@Namespace("c10_complex_math") public static native @ByVal @Name("pow") FloatComplex pow( + @Const @ByRef FloatComplex x, + float y); -@Namespace("c10") public static native Backend dispatchKeyToBackend(DispatchKey t); -@Namespace("c10") public static native @Cast("c10::Backend") int dispatchKeyToBackend(@Cast("c10::DispatchKey") short t); +@Namespace("c10_complex_math") public static native @ByVal @Name("pow") DoubleComplex pow( + @Const @ByRef DoubleComplex x, + double y); -@Namespace("c10") public static native DispatchKey backendToDispatchKey(Backend b); -@Namespace("c10") public static native @Cast("c10::DispatchKey") short backendToDispatchKey(@Cast("c10::Backend") int b); +@Namespace("c10_complex_math") public static native @ByVal @Name("pow") FloatComplex pow( + float x, + @Const @ByRef FloatComplex y); -@Namespace("c10") public static native DeviceType backendToDeviceType(Backend b); -@Namespace("c10") public static native @Cast("c10::DeviceType") byte backendToDeviceType(@Cast("c10::Backend") int b); +@Namespace("c10_complex_math") public static native @ByVal @Name("pow") DoubleComplex pow( + double x, + @Const @ByRef DoubleComplex y); -// TODO: This probably shouldn't actually be static inline -@Namespace("c10") public static native @Cast("const char*") BytePointer toString(Backend b); -@Namespace("c10") public static native String toString(@Cast("c10::Backend") int b); +@Namespace("c10_complex_math") public static native @ByVal @Name("pow") DoubleComplex pow(@Const @ByRef DoubleComplex x, @Const @ByRef FloatComplex y); +@Namespace("c10_complex_math") public static native @ByVal @Name("pow") DoubleComplex pow(@Const @ByRef FloatComplex x, @Const @ByRef DoubleComplex y); -@Namespace("c10") public static native @Cast("bool") boolean isSparse(Backend b); -@Namespace("c10") public static native @Cast("bool") boolean isSparse(@Cast("c10::Backend") int b); +@Namespace("c10_complex_math") public static native @ByVal @Name("pow") DoubleComplex pow(@Const @ByRef DoubleComplex x, @Const @ByRef float y); +@Namespace("c10_complex_math") public static native @ByVal @Name("pow") DoubleComplex pow(@Const @ByRef FloatComplex x, @Const @ByRef double y); -@Namespace("c10") public static native @Cast("bool") boolean isSparseCsr(Backend b); -@Namespace("c10") public static native @Cast("bool") boolean isSparseCsr(@Cast("c10::Backend") int b); +@Namespace("c10_complex_math") public static native @ByVal @Name("pow") DoubleComplex pow(@Const @ByRef double x, @Const @ByRef FloatComplex y); +@Namespace("c10_complex_math") public static native @ByVal @Name("pow") DoubleComplex pow(@Const @ByRef float x, @Const @ByRef DoubleComplex y); - // namespace c10 +// Trigonometric functions +@Namespace("c10_complex_math") public static native @ByVal @Name("sin") FloatComplex sin(@Const @ByRef FloatComplex x); -// Parsed from c10/core/CopyBytes.h +@Namespace("c10_complex_math") public static native @ByVal @Name("sin") DoubleComplex sin(@Const @ByRef DoubleComplex x); -// #pragma once +@Namespace("c10_complex_math") public static native @ByVal @Name("cos") FloatComplex cos(@Const @ByRef FloatComplex x); -// #include -// Targeting ../CopyBytesFunction.java +@Namespace("c10_complex_math") public static native @ByVal @Name("cos") DoubleComplex cos(@Const @ByRef DoubleComplex x); +@Namespace("c10_complex_math") public static native @ByVal @Name("tan") FloatComplex tan(@Const @ByRef FloatComplex x); -// Targeting ../_CopyBytesFunctionRegisterer.java +@Namespace("c10_complex_math") public static native @ByVal @Name("tan") DoubleComplex tan(@Const @ByRef DoubleComplex x); +@Namespace("c10_complex_math") public static native @ByVal @Name("asin") FloatComplex asin(@Const @ByRef FloatComplex x); +@Namespace("c10_complex_math") public static native @ByVal @Name("asin") DoubleComplex asin(@Const @ByRef DoubleComplex x); -// #define REGISTER_COPY_BYTES_FUNCTION(from, to, ...) -// namespace { -// static _CopyBytesFunctionRegisterer C10_ANONYMOUS_VARIABLE( -// g_copy_function)(from, to, __VA_ARGS__); -// } +@Namespace("c10_complex_math") public static native @ByVal @Name("acos") FloatComplex acos(@Const @ByRef FloatComplex x); -/* - * WARNING: Implementations for this function are currently registered from - * ATen and caffe2, not yet from c10. Don't use this if not either ATen - * or caffe2 is present as well. - * We can't move them yet, because the CUDA implementations aren't unified yet - * between ATen and caffe2. - * We're planning to move the implementations into c10/backend/xxx - * to make c10 self contained again. - */ -@Namespace("c10") public static native void CopyBytes( - @Cast("size_t") long nbytes, - @Const Pointer src, - @ByVal Device src_device, - Pointer dst, - @ByVal Device dst_device, - @Cast("bool") boolean async); - // namespace c10 +@Namespace("c10_complex_math") public static native @ByVal @Name("acos") DoubleComplex acos(@Const @ByRef DoubleComplex x); +@Namespace("c10_complex_math") public static native @ByVal @Name("atan") FloatComplex atan(@Const @ByRef FloatComplex x); -// Parsed from c10/core/GradMode.h +@Namespace("c10_complex_math") public static native @ByVal @Name("atan") DoubleComplex atan(@Const @ByRef DoubleComplex x); -// #pragma once +// Hyperbolic functions -// #include -// #include -// Targeting ../GradMode.java +@Namespace("c10_complex_math") public static native @ByVal @Name("sinh") FloatComplex sinh(@Const @ByRef FloatComplex x); +@Namespace("c10_complex_math") public static native @ByVal @Name("sinh") DoubleComplex sinh(@Const @ByRef DoubleComplex x); -// Targeting ../AutoGradMode.java +@Namespace("c10_complex_math") public static native @ByVal @Name("cosh") FloatComplex cosh(@Const @ByRef FloatComplex x); +@Namespace("c10_complex_math") public static native @ByVal @Name("cosh") DoubleComplex cosh(@Const @ByRef DoubleComplex x); -// Targeting ../NoGradGuard.java +@Namespace("c10_complex_math") public static native @ByVal @Name("tanh") FloatComplex tanh(@Const @ByRef FloatComplex x); +@Namespace("c10_complex_math") public static native @ByVal @Name("tanh") DoubleComplex tanh(@Const @ByRef DoubleComplex x); -// Targeting ../AutoFwGradMode.java +@Namespace("c10_complex_math") public static native @ByVal @Name("asinh") FloatComplex asinh(@Const @ByRef FloatComplex x); +@Namespace("c10_complex_math") public static native @ByVal @Name("asinh") DoubleComplex asinh(@Const @ByRef DoubleComplex x); +@Namespace("c10_complex_math") public static native @ByVal @Name("acosh") FloatComplex acosh(@Const @ByRef FloatComplex x); - // namespace c10 +@Namespace("c10_complex_math") public static native @ByVal @Name("acosh") DoubleComplex acosh(@Const @ByRef DoubleComplex x); +@Namespace("c10_complex_math") public static native @ByVal @Name("atanh") FloatComplex atanh(@Const @ByRef FloatComplex x); -// Parsed from c10/core/InferenceMode.h +@Namespace("c10_complex_math") public static native @ByVal @Name("atanh") DoubleComplex atanh(@Const @ByRef DoubleComplex x); -// #pragma once +@Namespace("c10_complex_math") public static native @ByVal @Name("log1p") FloatComplex log1p(@Const @ByRef FloatComplex z); -// #include -// #include -// #include -// #include -// Targeting ../InferenceMode.java +@Namespace("c10_complex_math") public static native @ByVal @Name("log1p") DoubleComplex log1p(@Const @ByRef DoubleComplex z); + // namespace c10_complex_math - // namespace c10 + // namespace std -// Parsed from c10/core/Layout.h +// Parsed from c10/util/Half.h // #pragma once -// #include -// #include +/** Defines the Half type (half-precision floating-point) including conversions + * to standard C types and basic arithmetic operations. Note that arithmetic + * operations are implemented by converting to floating point and + * performing the operation in float32, instead of using CUDA half intrinsics. + * Most uses of this type within ATen are memory bound, including the + * element-wise kernels, and the half intrinsics aren't efficient on all GPUs. + * If you are writing a compute bound kernel, you can use the CUDA half + * intrinsics directly on the Half type from device code. */ -// #include -@Namespace("c10") public enum Layout { - Strided((byte)(0)), - Sparse((byte)(1)), - SparseCsr((byte)(2)), - Mkldnn((byte)(3)), - SparseCsc((byte)(4)), - SparseBsr((byte)(5)), - SparseBsc((byte)(6)), - NumOptions((byte)(7)); +// #include +// #include +// #include +// #include +// #include - public final byte value; - private Layout(byte v) { this.value = v; } - private Layout(Layout e) { this.value = e.value; } - public Layout intern() { for (Layout e : values()) if (e.value == value) return e; return this; } - @Override public String toString() { return intern().name(); } -} +// #if defined(__cplusplus) && (__cplusplus >= 201103L) +// #include +// #include +// #elif !defined(__OPENCL_VERSION__) +// #include +// #include +// #endif -@Namespace("c10") public static native Layout layout_from_backend(Backend backend); -@Namespace("c10") public static native @Cast("c10::Layout") byte layout_from_backend(@Cast("c10::Backend") int backend); +// #ifdef _MSC_VER +// #include +// #endif +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #ifdef __CUDACC__ +// #include +// #endif - // namespace c10 +// #ifdef __HIPCC__ +// #include +// #endif +// #if defined(CL_SYCL_LANGUAGE_VERSION) +// #include // for SYCL 1.2.1 +// #elif defined(SYCL_LANGUAGE_VERSION) +// #include // for SYCL 2020 +// #endif -// Parsed from c10/core/MemoryFormat.h +// Standard check for compiling CUDA with clang +// #if defined(__clang__) && defined(__CUDA__) && defined(__CUDA_ARCH__) +// #define C10_DEVICE_HOST_FUNCTION __device__ __host__ +// #else +// #define C10_DEVICE_HOST_FUNCTION +// #endif -// #pragma once +// #include // operator typeid -// #include -// #include -// #include +@Namespace("c10::detail") public static native float fp32_from_bits(@Cast("uint32_t") int w); -// #include +@Namespace("c10::detail") public static native @Cast("uint32_t") int fp32_to_bits(float f); -// Memory format is not the property of a Tensor. It is the way to tell an -// operator how the result should be organized in memory and nothing more. That -// means memory format should never be used as return value for any tensor state -// interrogation functions (internally and externally). -// -// Possible options are: -// Preserve: -// If any of the input tensors is in channels_last format, operator output -// should be in channels_last format -// -// Contiguous: -// Regardless of input tensors format, the output should be contiguous -// Tensor. -// -// ChannelsLast: -// Regardless of input tensors format, the output should be in channels_last -// format. -@Namespace("c10") public enum MemoryFormat { - Contiguous((byte)(0)), - Preserve((byte)(1)), - ChannelsLast((byte)(2)), - ChannelsLast3d((byte)(3)), - NumOptions((byte)(4)); +/* + * Convert a 16-bit floating-point number in IEEE half-precision format, in bit + * representation, to a 32-bit floating-point number in IEEE single-precision + * format, in bit representation. + * + * @note The implementation doesn't use any floating-point operations. + */ +@Namespace("c10::detail") public static native @Cast("uint32_t") int fp16_ieee_to_fp32_bits(@Cast("uint16_t") short h); - public final byte value; - private MemoryFormat(byte v) { this.value = v; } - private MemoryFormat(MemoryFormat e) { this.value = e.value; } - public MemoryFormat intern() { for (MemoryFormat e : values()) if (e.value == value) return e; return this; } - @Override public String toString() { return intern().name(); } -} +/* + * Convert a 16-bit floating-point number in IEEE half-precision format, in bit + * representation, to a 32-bit floating-point number in IEEE single-precision + * format. + * + * @note The implementation relies on IEEE-like (no assumption about rounding + * mode and no operations on denormals) floating-point operations and bitcasts + * between integer and floating-point variables. + */ +@Namespace("c10::detail") public static native float fp16_ieee_to_fp32_value(@Cast("uint16_t") short h); -// If you are seeing this, it means that this call site was not checked if -// the memory format could be preserved, and it was switched to old default -// behaviour of contiguous -// #define LEGACY_CONTIGUOUS_MEMORY_FORMAT c10::get_contiguous_memory_format() +/* + * Convert a 32-bit floating-point number in IEEE single-precision format to a + * 16-bit floating-point number in IEEE half-precision format, in bit + * representation. + * + * @note The implementation relies on IEEE-like (no assumption about rounding + * mode and no operations on denormals) floating-point operations and bitcasts + * between integer and floating-point variables. + */ +@Namespace("c10::detail") public static native @Cast("uint16_t") short fp16_ieee_from_fp32_value(float f); -@Namespace("c10") public static native MemoryFormat get_contiguous_memory_format(); +// Targeting ../Half.java -// Note: Hardcoded the channel last stride indices here to get better -// performance +// Targeting ../HalfComplex.java -@Namespace("c10") public static native @ByVal @Cast("std::vector*") LongVector get_channels_last_strides_2d(@ByVal @Cast("c10::ArrayRef*") LongArrayRef sizes); -@Namespace("c10") public static native @ByVal @Cast("std::vector*") LongVector get_channels_last_strides_2d(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... sizes); -@Namespace("c10") public static native @ByVal @Cast("std::vector*") LongVector get_channels_last_strides_3d(@ByVal @Cast("c10::ArrayRef*") LongArrayRef sizes); -@Namespace("c10") public static native @ByVal @Cast("std::vector*") LongVector get_channels_last_strides_3d(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... sizes); -// NOTE: -// Below are Helper functions for is_channels_last_strides_xd. -// 1. Please do not combine these helper functions, each helper function handles -// exactly one case of sizes + memory_format, by doing this, the strides indices -// will be a constant array and we can access it using constant index number, -// the compiler will fully unroll the loop on strides indices to gain a better -// performance. -// 2. No error check in helper function, caller ensures the correctness of the -// input -// 3. All helper functions have similar comments, only 1st helper function is -// commented here. +// In some versions of MSVC, there will be a compiler error when building. +// C4146: unary minus operator applied to unsigned type, result still unsigned +// C4804: unsafe use of type 'bool' in operation +// It can be addressed by disabling the following warning. +// #ifdef _MSC_VER +// #pragma warning(push) +// #pragma warning(disable : 4146) +// #pragma warning(disable : 4804) +// #pragma warning(disable : 4018) +// #endif -// Note [Ambiguous is_channels_last_strides_xd] -// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -// The flaw of carrying memory_format implicitly through strides is very hard -// to WAR properly. issue #24090 -// Without the history of permutation, we can't infer the memory_format of a -// tensor from the snapshot of its size & stride -// e.g. -// -// 1. We can NOT specify the memory_format of N111 tensor through strides in a -// meaningful way; -// -// 2. Two path that ended up with identical size/stride -// N11W contiguous tensor sliced at w-dimension becomes [N,1,1,1]@[W,W,W,W] -// NC11 channels_last tensor sliced at c-dimension becomes [N,1,1,1]@[C,C,C,C] -// So if we see a tensor [N,1,1,1]@[X,X,X,X], there's no way for us to infer -// the memory_format of the original tensor. -// -// Due to the limitations, our temporary WAR `is_channels_last_strides` does the -// best effort to infer whether the original memory_format of a tensor is -// at::MemoryFormat::ChannelsLast. The two objectives of this function (ordered -// by their importance): -// 1. Ensure that normal shape manipulation does not accidentally change the -// MemoryFormat of an existing tensor. -// 2. Allows user to mark MemoryFormat::ChannelsLast to tensors; -// -// The function does so via checking strides of the tensor, including strides of -// size-1 dimensions. Although conventionally PyTorch implies no restriction on -// trivial stride (stride for size-1 dimension). -// -// Note that this approach is a compromise. We did not solve the problem -// completely. Many cases we will not be able to infer the correct memory -// format. -// The implementation of `is_channels_last_strides` is to serve the objectives: -// MemoryFormat::ChannelsLast has to be explicitly opted-in (no accidental -// conversion); Best effort to maintain the ChannelsLast flag. -// -// Due to the fact that this is not a bulletproof solution, through testing -// (aten/src/ATen/test/memory_format_test.cpp) -// a. we ensure that the common tasks are supported; -// a. we identify corner cases where the implementation compromises on. -// -// By the time accumulated permutation is enabled to replace implicit -// memory_format through strides, we should be updating our tests and fix the -// issues in our tests. -// -// We use Channels Last 2d as an example above. -// This is a general problem for all the is_channels_last_strides_xd -// implementation. Please check the helper functions -// (is_channels_last_strides_*d_s*) for more details. +// The overflow checks may involve float to int conversion which may +// trigger precision loss warning. Re-enable the warning once the code +// is fixed. See T58053069. +// #ifdef __clang__ +// #pragma GCC diagnostic push +// #pragma GCC diagnostic ignored "-Wunknown-warning-option" +// #pragma GCC diagnostic ignored "-Wimplicit-int-float-conversion" +// #endif -@Namespace("c10") public static native @Cast("bool") boolean is_channels_last_strides_2d( - @ByVal @Cast("c10::ArrayRef*") LongArrayRef sizes, - @ByVal @Cast("c10::ArrayRef*") LongArrayRef strides); -@Namespace("c10") public static native @Cast("bool") boolean is_channels_last_strides_2d( - @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] sizes, - @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... strides); +// bool can be converted to any type. +// Without specializing on bool, in pytorch_linux_trusty_py2_7_9_build: +// `error: comparison of constant '255' with boolean expression is always false` +// for `f > limit::max()` below -@Namespace("c10") public static native @Cast("bool") boolean is_channels_last_strides_3d( - @ByVal @Cast("c10::ArrayRef*") LongArrayRef sizes, - @ByVal @Cast("c10::ArrayRef*") LongArrayRef strides); -@Namespace("c10") public static native @Cast("bool") boolean is_channels_last_strides_3d( - @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] sizes, - @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... strides); +// skip isnan and isinf check for integral types + +// #ifdef __clang__ +// #pragma GCC diagnostic pop +// #endif + +// #ifdef _MSC_VER +// #pragma warning(pop) +// #endif + +@Namespace("c10") public static native @Cast("std::ostream*") @ByRef @Name("operator <<") Pointer shiftLeft(@Cast("std::ostream*") @ByRef Pointer out, @Const @ByRef Half value); // namespace c10 +// #include // IWYU pragma: keep + -// Parsed from c10/core/QEngine.h +// Parsed from c10/util/Half-inl.h // #pragma once -// #include -// #include -// #include +// #include +// #include +// #include -/** - * QEngine is an enum that is used to select the engine to run quantized ops. - * Keep this enum in sync with get_qengine_id() in - * torch/backends/quantized/__init__.py - */ -@Namespace("c10") public enum QEngine { - NoQEngine((byte)(0)), - FBGEMM((byte)(1)), - QNNPACK((byte)(2)), - ONEDNN((byte)(3)), - X86((byte)(4)); +// #ifdef __CUDACC__ +// #include +// #endif - public final byte value; - private QEngine(byte v) { this.value = v; } - private QEngine(QEngine e) { this.value = e.value; } - public QEngine intern() { for (QEngine e : values()) if (e.value == value) return e; return this; } - @Override public String toString() { return intern().name(); } -} +// #ifdef __HIPCC__ +// #include +// #endif -@Namespace("c10") public static native @StdString BytePointer toString(QEngine qengine); +// #if defined(CL_SYCL_LANGUAGE_VERSION) +// #include // for SYCL 1.2.1 +// #elif defined(SYCL_LANGUAGE_VERSION) +// #include // for SYCL 2020 +// #endif - // namespace c10 +// #if C10_CLANG_HAS_WARNING("-Wimplicit-int-float-conversion") +// #endif +/** Constructors */ -// Parsed from c10/core/QScheme.h -// #pragma once -// #include -// #include +/** Implicit conversions */ -/** - * QScheme is an enum that specifies the type of quantization. This has a one - * to one correspondence with Quantizer - * Please refer to ATen/quantized/Quantizer.h to see the Quantizers classes. - * Keep this file in sync with torch/nn/_qscheme.py - */ -@Namespace("c10") public enum QScheme { - PER_TENSOR_AFFINE((byte)(0)), - PER_CHANNEL_AFFINE((byte)(1)), - PER_TENSOR_SYMMETRIC((byte)(2)), - PER_CHANNEL_SYMMETRIC((byte)(3)), - PER_CHANNEL_AFFINE_FLOAT_QPARAMS((byte)(4)), - COMPILE_TIME_NUM_QSCHEMES((byte)(5)); - public final byte value; - private QScheme(byte v) { this.value = v; } - private QScheme(QScheme e) { this.value = e.value; } - public QScheme intern() { for (QScheme e : values()) if (e.value == value) return e; return this; } - @Override public String toString() { return intern().name(); } -} -@Namespace("c10") @MemberGetter public static native int COMPILE_TIME_NUM_QSCHEMES(); -@Namespace("c10") public static native @StdString BytePointer toString(QScheme qscheme); +// #if defined(__CUDACC__) || defined(__HIPCC__) +// #endif - // namespace c10 +// #ifdef SYCL_LANGUAGE_VERSION +// #endif +// CUDA intrinsics -// Parsed from c10/core/Stream.h +// #if (defined(__CUDA_ARCH__) && (__CUDA_ARCH__ >= 350)) || +// (defined(__clang__) && defined(__CUDA__)) -// #pragma once +// #endif -// #include +/** Arithmetic */ -/** An index representing a specific stream. A StreamId is not independently - * meaningful without knowing the Device it is associated with; try to - * use Stream rather than StreamId directly. - * - * StreamIds are opaque; they are assigned by some DeviceType-specific - * numbering system which is not visible to the user. HOWEVER, we - * guarantee that StreamId 0 is always a valid stream, and corresponds - * to some sort of "default" stream. */ -// Targeting ../StreamData3.java +@Namespace("c10") public static native @ByVal @Name("operator +") Half add(@Const @ByRef Half a, @Const @ByRef Half b); +@Namespace("c10") public static native @ByVal @Name("operator -") Half subtract(@Const @ByRef Half a, @Const @ByRef Half b); -// Targeting ../Stream.java +@Namespace("c10") public static native @ByVal @Name("operator *") Half multiply(@Const @ByRef Half a, @Const @ByRef Half b); +@Namespace("c10") public static native @ByVal @Name("operator /") Half divide(@Const @ByRef Half a, @Const @ByRef Half b); +@Namespace("c10") public static native @ByVal @Name("operator -") Half subtract(@Const @ByRef Half a); +@Namespace("c10") public static native @ByRef @Name("operator +=") Half addPut(@ByRef Half a, @Const @ByRef Half b); +@Namespace("c10") public static native @ByRef @Name("operator -=") Half subtractPut(@ByRef Half a, @Const @ByRef Half b); +@Namespace("c10") public static native @ByRef @Name("operator *=") Half multiplyPut(@ByRef Half a, @Const @ByRef Half b); -// Targeting ../StreamHash.java +@Namespace("c10") public static native @ByRef @Name("operator /=") Half dividePut(@ByRef Half a, @Const @ByRef Half b); +/** Arithmetic with floats */ - // namespace std +@Namespace("c10") public static native @Name("operator +") float add(@ByVal Half a, float b); +@Namespace("c10") public static native @Name("operator -") float subtract(@ByVal Half a, float b); +@Namespace("c10") public static native @Name("operator *") float multiply(@ByVal Half a, float b); +@Namespace("c10") public static native @Name("operator /") float divide(@ByVal Half a, float b); +@Namespace("c10") public static native @Name("operator +") float add(float a, @ByVal Half b); +@Namespace("c10") public static native @Name("operator -") float subtract(float a, @ByVal Half b); +@Namespace("c10") public static native @Name("operator *") float multiply(float a, @ByVal Half b); +@Namespace("c10") public static native @Name("operator /") float divide(float a, @ByVal Half b); -// Parsed from c10/core/ScalarType.h +@Namespace("c10") public static native @ByRef @Name("operator +=") FloatPointer addPut(@ByRef FloatPointer a, @Const @ByRef Half b); +@Namespace("c10") public static native @ByRef @Name("operator +=") FloatBuffer addPut(@ByRef FloatBuffer a, @Const @ByRef Half b); +@Namespace("c10") public static native @ByRef @Name("operator +=") float[] addPut(@ByRef float[] a, @Const @ByRef Half b); +@Namespace("c10") public static native @ByRef @Name("operator -=") FloatPointer subtractPut(@ByRef FloatPointer a, @Const @ByRef Half b); +@Namespace("c10") public static native @ByRef @Name("operator -=") FloatBuffer subtractPut(@ByRef FloatBuffer a, @Const @ByRef Half b); +@Namespace("c10") public static native @ByRef @Name("operator -=") float[] subtractPut(@ByRef float[] a, @Const @ByRef Half b); +@Namespace("c10") public static native @ByRef @Name("operator *=") FloatPointer multiplyPut(@ByRef FloatPointer a, @Const @ByRef Half b); +@Namespace("c10") public static native @ByRef @Name("operator *=") FloatBuffer multiplyPut(@ByRef FloatBuffer a, @Const @ByRef Half b); +@Namespace("c10") public static native @ByRef @Name("operator *=") float[] multiplyPut(@ByRef float[] a, @Const @ByRef Half b); +@Namespace("c10") public static native @ByRef @Name("operator /=") FloatPointer dividePut(@ByRef FloatPointer a, @Const @ByRef Half b); +@Namespace("c10") public static native @ByRef @Name("operator /=") FloatBuffer dividePut(@ByRef FloatBuffer a, @Const @ByRef Half b); +@Namespace("c10") public static native @ByRef @Name("operator /=") float[] dividePut(@ByRef float[] a, @Const @ByRef Half b); -// #pragma once +/** Arithmetic with doubles */ -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include +@Namespace("c10") public static native @Name("operator +") double add(@ByVal Half a, double b); +@Namespace("c10") public static native @Name("operator -") double subtract(@ByVal Half a, double b); +@Namespace("c10") public static native @Name("operator *") double multiply(@ByVal Half a, double b); +@Namespace("c10") public static native @Name("operator /") double divide(@ByVal Half a, double b); -// #include -// #include -// #include +@Namespace("c10") public static native @Name("operator +") double add(double a, @ByVal Half b); +@Namespace("c10") public static native @Name("operator -") double subtract(double a, @ByVal Half b); +@Namespace("c10") public static native @Name("operator *") double multiply(double a, @ByVal Half b); +@Namespace("c10") public static native @Name("operator /") double divide(double a, @ByVal Half b); -// For the macros below: -// NB: If you want to macro some code for all non-QInt scalar types (i.e. types -// with complete information, you probably want one of the -// AT_FORALL_SCALAR_TYPES / AT_FORALL_SCALAR_TYPES_AND -// macros below, which are designed to behave similarly to the Dispatch macros -// with the same name. +/** Arithmetic with ints */ -// NB: Order matters for this macro; it is relied upon in -// _promoteTypesLookup and the serialization format. -// #define AT_FORALL_SCALAR_TYPES_WITH_COMPLEX_AND_QINTS(_) -// _(uint8_t, Byte) /* 0 */ -// _(int8_t, Char) /* 1 */ -// _(int16_t, Short) /* 2 */ -// _(int, Int) /* 3 */ -// _(int64_t, Long) /* 4 */ -// _(at::Half, Half) /* 5 */ -// _(float, Float) /* 6 */ -// _(double, Double) /* 7 */ -// _(c10::complex, ComplexHalf) /* 8 */ +@Namespace("c10") public static native @ByVal @Name("operator +") Half add(@ByVal Half a, int b); +@Namespace("c10") public static native @ByVal @Name("operator -") Half subtract(@ByVal Half a, int b); +@Namespace("c10") public static native @ByVal @Name("operator *") Half multiply(@ByVal Half a, int b); +@Namespace("c10") public static native @ByVal @Name("operator /") Half divide(@ByVal Half a, int b); + +@Namespace("c10") public static native @ByVal @Name("operator +") Half add(int a, @ByVal Half b); +@Namespace("c10") public static native @ByVal @Name("operator -") Half subtract(int a, @ByVal Half b); +@Namespace("c10") public static native @ByVal @Name("operator *") Half multiply(int a, @ByVal Half b); +@Namespace("c10") public static native @ByVal @Name("operator /") Half divide(int a, @ByVal Half b); + +//// Arithmetic with int64_t + +@Namespace("c10") public static native @ByVal @Name("operator +") Half add(@ByVal Half a, @Cast("int64_t") long b); +@Namespace("c10") public static native @ByVal @Name("operator -") Half subtract(@ByVal Half a, @Cast("int64_t") long b); +@Namespace("c10") public static native @ByVal @Name("operator *") Half multiply(@ByVal Half a, @Cast("int64_t") long b); +@Namespace("c10") public static native @ByVal @Name("operator /") Half divide(@ByVal Half a, @Cast("int64_t") long b); + +@Namespace("c10") public static native @ByVal @Name("operator +") Half add(@Cast("int64_t") long a, @ByVal Half b); +@Namespace("c10") public static native @ByVal @Name("operator -") Half subtract(@Cast("int64_t") long a, @ByVal Half b); +@Namespace("c10") public static native @ByVal @Name("operator *") Half multiply(@Cast("int64_t") long a, @ByVal Half b); +@Namespace("c10") public static native @ByVal @Name("operator /") Half divide(@Cast("int64_t") long a, @ByVal Half b); + +/** NOTE: we do not define comparisons directly and instead rely on the implicit + * conversion from c10::Half to float. */ + + // namespace c10 + + // namespace std + + + +// Parsed from c10/util/complex_utils.h + +// #if !defined(C10_INTERNAL_INCLUDE_COMPLEX_REMAINING_H) +// #error +// "c10/util/complex_utils.h is not meant to be individually included. Include c10/util/complex.h instead." +// #endif + +// #include + +// Extract double from std::complex; is identity otherwise +// TODO: Write in more idiomatic C++17 + + // namespace c10 + + // namespace std + + +// Parsed from c10/util/complex.h + +// #pragma once + +// #include + +// #include + +// #if defined(__CUDACC__) || defined(__HIPCC__) +// #endif + +// #if C10_CLANG_HAS_WARNING("-Wimplicit-float-conversion") +// #endif +// #if C10_CLANG_HAS_WARNING("-Wfloat-conversion") +// #endif +// Targeting ../DoubleComplex.java + + +// Targeting ../FloatComplex.java + + + + + + + + + + + + // namespace complex_literals + +// Define operators between integral scalars and c10::complex. std::complex does +// not support this when T is a floating-point number. This is useful because it +// saves a lot of "static_cast" when operate a complex and an integer. This +// makes the code both less verbose and potentially more efficient. +// #define COMPLEX_INTEGER_OP_TEMPLATE_CONDITION +// typename std::enable_if_t< +// std::is_floating_point::value && std::is_integral::value, +// int> = 0 + +// #undef COMPLEX_INTEGER_OP_TEMPLATE_CONDITION + + // namespace c10 + +// std functions +// +// The implementation of these functions also follow the design of C++20 + +// #if defined(USE_ROCM) +// #else +// #define ROCm_Bug(x) x +// #endif + +// #undef ROCm_Bug + +// For std::conj, there are other versions of it: +// constexpr std::complex conj( float z ); +// template< class DoubleOrInteger > +// constexpr std::complex conj( DoubleOrInteger z ); +// constexpr std::complex conj( long double z ); +// These are not implemented +// TODO(@zasdfgbnm): implement them as c10::conj + +// Thrust does not have complex --> complex version of thrust::proj, +// so this function is not implemented at c10 right now. +// TODO(@zasdfgbnm): implement it by ourselves + +// There is no c10 version of std::polar, because std::polar always +// returns std::complex. Use c10::polar instead; + + // namespace std + + // namespace c10 + +// #define C10_INTERNAL_INCLUDE_COMPLEX_REMAINING_H +// math functions are included in a separate file +// #include // IWYU pragma: keep +// utilities for complex types +// #include // IWYU pragma: keep +// #undef C10_INTERNAL_INCLUDE_COMPLEX_REMAINING_H + + +// Parsed from c10/util/qint32.h + +// #pragma once +// #include + +// #include +// Targeting ../qint32.java + + + + // namespace c10 + + +// Parsed from c10/util/qint8.h + +// #pragma once +// #include + +// #include +// Targeting ../qint8.java + + + + // namespace c10 + + +// Parsed from c10/util/quint2x4.h + +// #pragma once +// #include + +// #include +// Targeting ../quint2x4.java + + + + // namespace c10 + + +// Parsed from c10/util/quint4x2.h + +// #pragma once +// #include + +// #include +// Targeting ../quint4x2.java + + + + // namespace c10 + + +// Parsed from c10/util/quint8.h + +// #pragma once +// #include + +// #include +// Targeting ../quint8.java + + + + // namespace c10 + + +// Parsed from c10/core/ScalarType.h + +// #pragma once + +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include + +// #include +// #include +// #include + +// For the macros below: +// NB: If you want to macro some code for all non-QInt scalar types (i.e. types +// with complete information, you probably want one of the +// AT_FORALL_SCALAR_TYPES / AT_FORALL_SCALAR_TYPES_AND +// macros below, which are designed to behave similarly to the Dispatch macros +// with the same name. + +// NB: Order matters for this macro; it is relied upon in +// _promoteTypesLookup and the serialization format. +// #define AT_FORALL_SCALAR_TYPES_WITH_COMPLEX_AND_QINTS(_) +// _(uint8_t, Byte) /* 0 */ +// _(int8_t, Char) /* 1 */ +// _(int16_t, Short) /* 2 */ +// _(int, Int) /* 3 */ +// _(int64_t, Long) /* 4 */ +// _(at::Half, Half) /* 5 */ +// _(float, Float) /* 6 */ +// _(double, Double) /* 7 */ +// _(c10::complex, ComplexHalf) /* 8 */ // _(c10::complex, ComplexFloat) /* 9 */ // _(c10::complex, ComplexDouble) /* 10 */ // _(bool, Bool) /* 11 */ @@ -5480,7 +5150,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { @Namespace("c10") public static native @Cast("bool") boolean isIntegralType(ScalarType t, @Cast("bool") boolean includeBool); -@Namespace("c10") public static native @Cast("bool") @Deprecated boolean isIntegralType(ScalarType t); + @Namespace("c10") public static native @Cast("bool") boolean isFloatingType(ScalarType t); @@ -5506,7 +5176,9 @@ public class torch extends org.bytedeco.pytorch.presets.torch { @Namespace("c10") public static native ScalarType promoteTypes(ScalarType a, ScalarType b); - +@Namespace("c10") public static native @Cast("std::ostream*") @ByRef @Name("operator <<") Pointer shiftLeft( + @Cast("std::ostream*") @ByRef Pointer stream, + ScalarType scalar_type); // #define AT_FORAUTOCAST_SCALAR_TYPES(_) // _(half, Half) /* 0 */ @@ -5515,99 +5187,90 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // namespace c10 -// Parsed from c10/core/ScalarTypeToTypeMeta.h +// Parsed from c10/util/ExclusivelyOwned.h // #pragma once -// #include -// #include -// #include - -// these just expose TypeMeta/ScalarType bridge functions in c10 -// TODO move to typeid.h (or codemod away) when TypeMeta et al -// are moved from caffe2 to c10 (see note at top of typeid.h) - -/** - * convert ScalarType enum values to TypeMeta handles - */ -@Namespace("c10") public static native @ByVal TypeMeta scalarTypeToTypeMeta(ScalarType scalar_type); - -/** - * convert TypeMeta handles to ScalarType enum values - */ -@Namespace("c10") public static native ScalarType typeMetaToScalarType(@ByVal TypeMeta dtype); - -/** - * typeMetaToScalarType(), lifted to optional - */ -@Namespace("c10") public static native @ByVal ScalarTypeOptional optTypeMetaToScalarType( - @ByVal TypeMetaOptional type_meta); - -/** - * convenience: equality across TypeMeta/ScalarType conversion - */ - - - - - - +// #include +// See example implementation in TensorBase.h and TensorBody.h. +// Synopsis: +// +// repr_type -- type to use to store an owned T in ExclusivelyOwned. +// +// pointer_type -- pointer-esque type to return from +// ExclusivelyOwned's get() and operator*() methods. +// +// const_pointer_type -- similar to pointer_type, used for the const methods. +// +// static repr_type nullRepr() -- return a null instance of repr_type. +// +// template +// static repr_type createInPlace(Args&&... args) -- used by the in-place +// ExclusivelyOwned constructor. +// +// static repr_type moveToRepr(T&& x) -- move the given x into an +// instance of repr_type. used by the ExclusivelyOwned(T&&) +// constructor. +// +// static void destroyOwned(repr_type x) -- free memory for a +// known-exclusively-owned instance of x. Replaces calling repr_type's +// destructor. Being able to implement this more efficiently than +// repr_type's destructor is the main reason to use ExclusivelyOwned +// for a type. +// +// static T take(repr_type&) -- move out of the given repr_type into an owned T. +// +// static pointer_type getImpl(const repr_type&) -- return a pointer +// to the given repr_type. May take repr_type by value if that is more +// efficient. + +/** ExclusivelyOwned is a smart-pointer-like wrapper around an + * exclusively-owned instance of some type T that normally has + * mandatory reference counting (currently just Tensor). If you have + * an isolated piece of code that knows that it has sole ownership of + * an object of one of these types (i.e., because you created it + * directly or using a factory function) and that object will not + * escape from that isolated piece of code, then moving the object + * into an ExclusivelyOwned will avoid an atomic reference count + * decrement at destruction time. + * + * If you directly create the Tensor in the first + * place, you can use the in_place constructor of ExclusivelyOwned to + * additionally avoid doing any stores to initialize the refcount & + * weakcount. */ // namespace c10 -// Parsed from c10/core/Scalar.h +// Parsed from c10/util/MaybeOwned.h // #pragma once -// #include -// #include -// #include -// #include -// #include -// #include - -// #include -// #include -// #include -// #include // #include // #include -// #include -// #include -// #include +// #include -// #if C10_CLANG_HAS_WARNING("-Wimplicit-int-float-conversion") -// #endif -// Targeting ../Scalar.java +// #include +// Targeting ../MaybeOwnedTraitsGenericImplTensor.java -// define the scalar.to() specializations -// #define DEFINE_TO(T, name) -// template <> -// inline T Scalar::to() const { -// return to##name(); -// } +/** It is possible to eliminate the extra layer of indirection for + * borrows for some types that we control. For examples, see + * intrusive_ptr.h and TensorBody.h. */ - - - - - - - - - - - - -// #undef DEFINE_TO +// Explicitly enable MaybeOwned>, rather than allowing +// MaybeOwned to be used for any type right away. +// Targeting ../TensorMaybeOwned.java + + +// Targeting ../TensorBaseMaybeOwned.java - // namespace c10 + // namespace c10 + // Parsed from c10/core/SymNodeImpl.h @@ -5625,7 +5288,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // namespace c10 -// Parsed from c10/core/SymBool.h +// Parsed from c10/core/SymFloat.h // #pragma once @@ -5633,15 +5296,18 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// Targeting ../SymBool.java +// #include +// #include +// Targeting ../SymFloat.java +@Namespace("c10") public static native @Cast("std::ostream*") @ByRef @Name("operator <<") Pointer shiftLeft(@Cast("std::ostream*") @ByRef Pointer os, @Const @ByRef SymFloat s); // namespace c10 -// Parsed from c10/core/SymFloat.h +// Parsed from c10/core/SymBool.h // #pragma once @@ -5649,14 +5315,11 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include - -// #include -// #include -// Targeting ../SymFloat.java - +// Targeting ../SymBool.java +@Namespace("c10") public static native @Cast("std::ostream*") @ByRef @Name("operator <<") Pointer shiftLeft(@Cast("std::ostream*") @ByRef Pointer os, @Const @ByRef SymBool s); // namespace c10 @@ -5681,26890 +5344,12884 @@ public class torch extends org.bytedeco.pytorch.presets.torch { @Namespace("c10") public static native @ByVal @Name("operator *") SymInt multiply(@Cast("int64_t") long a, @Const @ByRef SymInt b); @Namespace("c10") public static native @ByVal @Name("operator /") SymInt divide(@Cast("int64_t") long a, @Const @ByRef SymInt b); @Namespace("c10") public static native @ByVal @Name("operator %") SymInt mod(@Cast("int64_t") long a, @Const @ByRef SymInt b); - - +@Namespace("c10") public static native @Cast("bool") @Name("operator ==") boolean equals(@Cast("int64_t") long a, @Const @ByRef SymInt b); +@Namespace("c10") public static native @Cast("bool") @Name("operator !=") boolean notEquals(@Cast("int64_t") long a, @Const @ByRef SymInt b); @Namespace("c10") public static native @Cast("bool") @Name("operator <") boolean lessThan(@Cast("int64_t") long a, @Const @ByRef SymInt b); @Namespace("c10") public static native @Cast("bool") @Name("operator <=") boolean lessThanEquals(@Cast("int64_t") long a, @Const @ByRef SymInt b); @Namespace("c10") public static native @Cast("bool") @Name("operator >") boolean greaterThan(@Cast("int64_t") long a, @Const @ByRef SymInt b); @Namespace("c10") public static native @Cast("bool") @Name("operator >=") boolean greaterThanEquals(@Cast("int64_t") long a, @Const @ByRef SymInt b); - +@Namespace("c10") public static native @Cast("std::ostream*") @ByRef @Name("operator <<") Pointer shiftLeft(@Cast("std::ostream*") @ByRef Pointer os, @Const @ByRef SymInt s); @Namespace("c10") public static native @ByVal @Name("operator -") SymInt subtract(@Const @ByRef SymInt s); // namespace c10 -// Parsed from c10/core/SymIntArrayRef.h +// Parsed from c10/util/TypeCast.h // #pragma once +// #include +// #include +// #include -// #include -// #include -// #include -// #include +// #include -@Namespace("c10") public static native @ByVal @Cast("c10::ArrayRef*") LongArrayRef asIntArrayRefUnchecked(@ByVal SymIntRef ar); +// #if C10_CLANG_HAS_WARNING("-Wimplicit-float-conversion") +// #endif +// #if C10_CLANG_HAS_WARNING("-Wimplicit-int-float-conversion") +// #endif -@Namespace("c10") public static native @ByVal LongArrayRefOptional asIntArrayRefSlowOpt( - @ByVal SymIntRef ar); +// Note: deliberately ignores undefined behavior, consistent with NumPy. +// PyTorch's type conversions can cause a variety of undefined behavior, +// including float to integral overflow and signed to unsigned integer overflow. +// Some of this undefined behavior is addressed below. +// Partial template instantiation for casting to uint8. +// Note: Converting from negative float values to unsigned integer types is +// undefined behavior in C++, and current CPU and GPU compilers exhibit +// divergent behavior. Casting from negative float values to signed +// integer types and then to unsigned integer types is not undefined, +// however, so this cast improves the consistency of type conversions +// to uint8 across compilers. +// Further note: Type conversions across compilers still have other undefined +// and divergent behavior. +// Define separately to avoid being inlined and prevent code-size bloat +@Namespace("c10") public static native void report_overflow(@Cast("const char*") BytePointer name); +@Namespace("c10") public static native void report_overflow(String name); -// #define C10_AS_INTARRAYREF_SLOW(a) c10::asIntArrayRefSlow(a, __FILE__, __LINE__) + // namespace c10 -// Prefer using a more semantic constructor, like -// fromIntArrayRefKnownNonNegative -@Namespace("c10") public static native @ByVal SymIntRef fromIntArrayRefUnchecked(@ByVal @Cast("c10::ArrayRef*") LongArrayRef array_ref); -@Namespace("c10") public static native @ByVal SymIntRef fromIntArrayRefUnchecked(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... array_ref); +// Trigger tests for D25440771. TODO: Remove this line any time you want. -@Namespace("c10") public static native @ByVal SymIntRef fromIntArrayRefKnownNonNegative(@ByVal @Cast("c10::ArrayRef*") LongArrayRef array_ref); -@Namespace("c10") public static native @ByVal SymIntRef fromIntArrayRefKnownNonNegative(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... array_ref); -@Namespace("c10") public static native @ByVal SymIntRef fromIntArrayRefSlow(@ByVal @Cast("c10::ArrayRef*") LongArrayRef array_ref); -@Namespace("c10") public static native @ByVal SymIntRef fromIntArrayRefSlow(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... array_ref); +// Parsed from c10/core/Scalar.h - // namespace c10 +// #pragma once +// #include +// #include +// #include +// #include +// #include +// #include -// Parsed from c10/core/Allocator.h +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include -// #pragma once +// #if C10_CLANG_HAS_WARNING("-Wimplicit-int-float-conversion") +// #endif +// Targeting ../Scalar.java -// #include -// #include -// #include -// #include -// #include -// #include -// Targeting ../DataPtr.java +// define the scalar.to() specializations +// #define DEFINE_TO(T, name) +// template <> +// inline T Scalar::to() const { +// return to##name(); +// } + + + + + + + + + + + + + +// #undef DEFINE_TO + // namespace c10 -// NB: Device is NOT tested for here; a CUDA nullptr is as much a nullptr as a -// CPU nullptr +// Parsed from c10/util/Backtrace.h +// #ifndef C10_UTIL_BACKTRACE_H_ +// #define C10_UTIL_BACKTRACE_H_ +// #include +// #include +// #include -// Targeting ../Allocator.java +// #include +@Namespace("c10") public static native @StdString BytePointer get_backtrace( + @Cast("size_t") long frames_to_skip/*=0*/, + @Cast("size_t") long maximum_number_of_frames/*=64*/, + @Cast("bool") boolean skip_python_frames/*=true*/); +@Namespace("c10") public static native @StdString BytePointer get_backtrace(); + // namespace c10 +// #endif // C10_UTIL_BACKTRACE_H_ -// This context is used to generate DataPtr which have arbitrary -// std::function deleters associated with them. In some user facing -// functions, we give a (user-friendly) interface for constructing -// tensors from external data which take an arbitrary std::function -// deleter. Grep for InefficientStdFunctionContext to find these -// occurrences. -// -// This context is inefficient because we have to do a dynamic -// allocation InefficientStdFunctionContext, on top of the dynamic -// allocation which is implied by std::function itself. +// Parsed from c10/util/IdWrapper.h -/** Set the allocator for DeviceType {@code t}. The passed in allocator pointer is - * expected to have static lifetime; this function does NOT take ownership - * of the raw pointer. (The reason for this is to prevent existing pointers - * to an allocator of a particular device from being invalidated when - * SetAllocator is called.) +// #pragma once + +// #include +// #include +// #include +// #include + +/** + * This template simplifies generation of simple classes that wrap an id + * in a typesafe way. Namely, you can use it to create a very lightweight + * type that only offers equality comparators and hashing. Example: * - * Also note that this is not thread-safe, and we assume this function will - * only be called during initialization. + * struct MyIdType final : IdWrapper { + * constexpr explicit MyIdType(uint32_t id): IdWrapper(id) {} + * }; * - * The 'priority' flag is introduced when we want to overwrite the default - * allocator, since the allocators are set statically. The default priority - * is 0, which means the lowest. Only higher or equal priority can overwrite - * existing ones. + * Then in the global top level namespace: + * + * C10_DEFINE_HASH_FOR_IDWRAPPER(MyIdType); + * + * That's it - equality operators and hash functions are automatically defined + * for you, given the underlying type supports it. */ -@Namespace("c10") public static native void SetAllocator(DeviceType t, Allocator alloc, @Cast("uint8_t") byte priority/*=0*/); -@Namespace("c10") public static native void SetAllocator(DeviceType t, Allocator alloc); -@Namespace("c10") public static native void SetAllocator(@Cast("c10::DeviceType") byte t, Allocator alloc, @Cast("uint8_t") byte priority/*=0*/); -@Namespace("c10") public static native void SetAllocator(@Cast("c10::DeviceType") byte t, Allocator alloc); -@Namespace("c10") public static native Allocator GetAllocator(DeviceType t); -@Namespace("c10") public static native Allocator GetAllocator(@Cast("c10::DeviceType") byte t); -// #define REGISTER_ALLOCATOR(t, f) -// namespace { -// static c10::AllocatorRegisterer g_allocator_d(f); -// } -// Targeting ../MemoryReportingInfoBase.java + // namespace c10 +// #define C10_DEFINE_HASH_FOR_IDWRAPPER(ClassName) +// namespace std { +// template <> +// struct hash { +// size_t operator()(ClassName x) const { +// return hash_value(x); +// } +// }; +// } -@Namespace("c10") public static native @Cast("bool") boolean memoryProfilingEnabled(); -@Namespace("c10") public static native void reportMemoryUsageToProfiler( - Pointer ptr, - @Cast("int64_t") long alloc_size, - @Cast("size_t") long total_allocated, - @Cast("size_t") long total_reserved, - @ByVal Device device); +// Parsed from c10/util/Type.h -@Namespace("c10") public static native void reportOutOfMemoryToProfiler( - @Cast("int64_t") long alloc_size, - @Cast("size_t") long total_allocated, - @Cast("size_t") long total_reserved, - @ByVal Device device); +// #ifndef C10_UTIL_TYPE_H_ +// #define C10_UTIL_TYPE_H_ - // namespace c10 +// #include +// #include +// #include +// #include -// Parsed from c10/core/DefaultDtype.h +/** Utility to demangle a C++ symbol name. */ +@Namespace("c10") public static native @StdString BytePointer demangle(@Cast("const char*") BytePointer name); +@Namespace("c10") public static native @StdString String demangle(String name); -// #pragma once +/** Returns the printable name of the type. */ -// #include -// #include - // namespace caffe2 -@Namespace("c10") public static native void set_default_dtype(@ByVal TypeMeta dtype); -@Namespace("c10") public static native @Const @ByVal TypeMeta get_default_dtype(); -@Namespace("c10") public static native ScalarType get_default_dtype_as_scalartype(); -@Namespace("c10") public static native @Const @ByVal TypeMeta get_default_complex_dtype(); // namespace c10 +// #endif // C10_UTIL_TYPE_H_ + -// Parsed from c10/core/StorageImpl.h +// Parsed from c10/util/ConstexprCrc.h // #pragma once -// #include -// #include -// #include +// #include +// #include +// #include +// #include +@Namespace("c10::util::detail") @MemberGetter public static native @Cast("const uint64_t") long crc64_table(int i); +@Namespace("c10::util::detail") @MemberGetter public static native @Cast("const uint64_t*") LongPointer crc64_table(); -// #include -// Targeting ../StorageImpl.java +@Namespace("c10::util::detail") public static native @Cast("const uint64_t") long crc64impl(@Cast("uint64_t") long accumulator, @Cast("const char*") BytePointer data, @Cast("size_t") long size); +@Namespace("c10::util::detail") public static native @Cast("const uint64_t") long crc64impl(@Cast("uint64_t") long accumulator, String data, @Cast("size_t") long size); +// Targeting ../crc64_t.java - // namespace c10 -// Parsed from c10/core/Storage.h +// CRC64 with Jones coefficients and an init value of 0. +@Namespace("c10::util") public static native @Const @ByVal crc64_t crc64(@Cast("const char*") BytePointer str, @Cast("size_t") long size); +@Namespace("c10::util") public static native @Const @ByVal crc64_t crc64(String str, @Cast("size_t") long size); -// #pragma once +@Namespace("c10::util") public static native @Const @ByVal crc64_t crc64(@ByVal @Cast("c10::string_view*") Pointer str); + // namespace util + // namespace c10 -// #include -// Targeting ../Storage.java - - - - // namespace c10 +// Allow usage of crc64_t in std::unordered_set + -// Parsed from c10/core/TensorOptions.h +// Parsed from c10/util/TypeIndex.h // #pragma once -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include - -// #include // #include -// #include +// #include +// #include +// #include +// #include +// #include -// #include -// #include -// #include +// TODO Make it work for more compilers -@Namespace("c10") public static native DispatchKey computeDispatchKey( - @ByVal ScalarTypeOptional dtype, - @ByVal LayoutOptional layout, - @ByVal DeviceOptional device); +// Intel compiler works +// #if defined(__INTEL_COMPILER) +public static final int C10_TYPENAME_SUPPORTS_CONSTEXPR = 0; +// #define C10_TYPENAME_CONSTEXPR -@Namespace("c10") public static native ScalarType dtype_or_default(@ByVal ScalarTypeOptional dtype); +// Clang works +// #elif defined(__clang__) -@Namespace("c10") public static native @ByVal TypeMeta dtype_or_default( - @ByVal TypeMetaOptional dtype); +// except for NVCC +// #if defined(__CUDACC__) +// #define C10_TYPENAME_CONSTEXPR +// #else +// #define C10_TYPENAME_CONSTEXPR constexpr +// #endif -@Namespace("c10") public static native Layout layout_or_default(@ByVal LayoutOptional layout); +// Windows works +// #elif defined(_MSC_VER) +// #elif defined(__GNUC__) +// #else +// #define C10_TYPENAME_CONSTEXPR constexpr +// Targeting ../type_index.java -@Namespace("c10") public static native @ByVal Device device_or_default(@ByVal DeviceOptional device); -/// -/// -/// -/// -/// -/// -/// -/// -/// -/// -/// -/// -/// -/// -/// -/// -/// -@Namespace("c10") public static native @Cast("bool") boolean pinned_memory_or_default(@ByVal BoolOptional pinned_memory); -// Targeting ../TensorOptions.java +// #if !defined(__clang__) && !defined(_MSC_VER) && defined(__GNUC__) && +// __GNUC__ < 5 +// Getting __PRETTY_FUNCTION__ at compile time only works with GCC >= 5 +// #error "You're running a too old version of GCC. We need GCC 5 or later." +// #endif +// #if defined(__clang__) && __clang_major__ < 4 +// Getting __PRETTY_FUNCTION__ at compile time only works with Clang >= 4 +// #error "You're running a too old version of Clang. We need Clang 4 or later." +// #endif +@Namespace("c10::util::detail") public static native @ByVal @Cast("const c10::string_view*") Pointer extract( + @ByVal @Cast("c10::string_view*") Pointer prefix, + @ByVal @Cast("c10::string_view*") Pointer suffix, + @ByVal @Cast("c10::string_view*") Pointer str); -// We should aspire to fit in one machine-size word; but a size greater than two -// words is too much. (We are doing terribly on 32-bit archs, where we require -// three machine size words to store tensor options. Eek!) +// #if !defined(__CUDA_ARCH__) +// #endif -/** Convenience function that returns a {@code TensorOptions} object with the {@code dtype} - * set to the given one. */ -@Namespace("c10") public static native @ByVal TensorOptions dtype(@ByVal TypeMeta dtype); + // namespace detail -// legacy function to support ScalarType -@Namespace("c10") public static native @ByVal TensorOptions dtype(ScalarType dtype); +@Namespace("c10::util") public static native @Const @ByVal @Name("get_type_index") type_index get_type_index_string(); -/** Convenience function that returns a {@code TensorOptions} object with the {@code layout} - * set to the given one. */ -@Namespace("c10") public static native @ByVal TensorOptions layout(Layout layout); -@Namespace("c10") public static native @ByVal TensorOptions layout(@Cast("c10::Layout") byte layout); +// #if !defined(TORCH_PEDANTIC) +// Use precomputed hashsum for std::string +// Needed to workaround ambiguity in class name resolution +// into __PRETTY_FUNCION__ when abovementioned class is defined in inlined +// namespace. In multi-ABI C++ library, `std::string` is an alias to +// `std::__cxx11::basic_string` which depending on compiler flags can be +// resolved to `basic_string` either in `std` namespace or in +// `std::__cxx11` one (`__cxx11` is an inline namespace) +// #endif + // namespace util + // namespace c10 + -/** Convenience function that returns a {@code TensorOptions} object with the {@code device} - * set to the given one. */ -@Namespace("c10") public static native @ByVal TensorOptions device(@ByVal Device device); -/** Convenience function that returns a {@code TensorOptions} object with the - * {@code device} set to CUDA and the {@code device_index} set to the given one. */ -@Namespace("c10") public static native @ByVal TensorOptions device_index(short device_index); +// Parsed from c10/util/flat_hash_map.h -/** Convenience function that returns a {@code TensorOptions} object with the - * {@code requires_grad} set to the given one. */ -@Namespace("c10") public static native @ByVal TensorOptions requires_grad(@Cast("bool") boolean requires_grad/*=true*/); +// Taken from +// https://github.com/skarupke/flat_hash_map/blob/2c4687431f978f02a3780e24b8b701d22aa32d9c/flat_hash_map.hpp +// with fixes applied: +// - https://github.com/skarupke/flat_hash_map/pull/25 +// - https://github.com/skarupke/flat_hash_map/pull/26 +// - replace size_t with uint64_t to fix it for 32bit +// - add "GCC diagnostic" pragma to ignore -Wshadow +// - make sherwood_v3_table::convertible_to_iterator public because GCC5 seems +// to have issues with it otherwise +// - fix compiler warnings in operator templated_iterator -/** Convenience function that returns a {@code TensorOptions} object with the - * {@code memory_format} set to the given one. */ -@Namespace("c10") public static native @ByVal TensorOptions memory_format(MemoryFormat memory_format); -@Namespace("c10") public static native @ByVal TensorOptions memory_format(@Cast("c10::MemoryFormat") byte memory_format); +// Copyright Malte Skarupke 2017. +// Distributed under the Boost Software License, Version 1.0. +// (See http://www.boost.org/LICENSE_1_0.txt) + +// #pragma once +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #if C10_CLANG_HAS_WARNING("-Wimplicit-int-float-conversion") +// #endif -@Namespace("c10") public static native @StdString BytePointer toString(@Const @ByVal TensorOptions options); +// #ifdef _MSC_VER +// #define SKA_NOINLINE(...) __declspec(noinline) __VA_ARGS__ +// #else +// #define SKA_NOINLINE(...) __VA_ARGS__ __attribute__((noinline)) +// #endif +@Namespace("ska::detailv3") @MemberGetter public static native byte min_lookups(); +public static final byte min_lookups = min_lookups(); -// This is intended to be a centralized location by which we can determine -// what an appropriate DispatchKey for a tensor is. +@Namespace("ska::detailv3") public static native byte log2(@Cast("uint64_t") long value); -@Namespace("c10") public static native Layout dispatchKeyToLayout(DispatchKey dispatch_key); -@Namespace("c10") public static native @Cast("c10::Layout") byte dispatchKeyToLayout(@Cast("c10::DispatchKey") short dispatch_key); +@Namespace("ska::detailv3") public static native @Cast("uint64_t") long next_power_of_two(@Cast("uint64_t") long i); -@Namespace("c10") public static native DeviceType dispatchKeyToDeviceType(DispatchKey dispatch_key); -@Namespace("c10") public static native @Cast("c10::DeviceType") byte dispatchKeyToDeviceType(@Cast("c10::DispatchKey") short dispatch_key); +// Implementation taken from http://en.cppreference.com/w/cpp/types/void_t +// (it takes CWG1558 into account and also works for older compilers) + // namespace detailv3 -@Namespace("c10") public static native @ByVal TensorOptions dispatchKeyToTensorOptions(DispatchKey dispatch_key); -@Namespace("c10") public static native @ByVal TensorOptions dispatchKeyToTensorOptions(@Cast("c10::DispatchKey") short dispatch_key); -@Namespace("c10::detail") public static native @Cast("bool") boolean backend_supports_empty_operator(@Const @ByVal TensorOptions options); + // end namespace ska - // namespace detail - // namespace c10 +// Parsed from c10/util/irange.h -// Parsed from c10/core/TensorImpl.h +// Copyright 2004-present Facebook. All Rights Reserved. // #pragma once -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include // #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include // #include -// #include +// #include // #include -// #include -// #include -// #include +// #include -// A global boolean variable to control whether we free memory when a Tensor -// is shrunk to a smaller size. As a result, a Tensor is always going to -// keep the memory allocated for its maximum capacity reshaped to so far. -// -// This parameter is respected "upper-case" methods which call Resize() -// (e.g., CopyFrom, ResizeLike); it is NOT respected by Tensor::resize_ -// or ShrinkTo, both of which guarantee to never to free memory. + // namespace detail +/** Creates an integer range for the half-open interval [begin, end) + * If end<=begin, then the range is empty. + * The range has the type of the {@code end} integer; {@code begin} integer is + * cast to this type. */ -// Since we can have high variance in blob memory allocated across different -// inputs in the same run, we will shrink the blob only if the memory gain -// is larger than this flag in bytes. This only applies to functions which -// respect caffe2_keep_on_shrink. +/** Creates an integer range for the half-open interval [0, end) + * If end<=begin, then the range is empty */ + + // namespace c10 -// #if C10_CLANG_HAS_WARNING("-Wimplicit-int-float-conversion") +// Parsed from c10/util/typeid.h + +// #pragma once + +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #ifdef __GXX_RTTI +// #include // #endif - // namespace at - // namespace c10 -/** - * A utility function to convert vector to vector. - */ -@Namespace("c10") public static native @ByVal @Cast("std::vector*") LongVector ToVectorint64_t(@Cast("const c10::ArrayRef*") @ByRef IntArrayRef src); +// #include -/** - * Return product of all dimensions starting from k +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include + +// #include +// #include + +/* + * TypeIdentifier is a small type containing an id. + * Types must be registered using CAFFE_DECLARE_KNOWN_TYPE() (in their header) + * and CAFFE_DEFINE_KNOWN_TYPE() (in their .cpp file) for them to have a type + * id. If a type is registered, you can also create an object containing meta + * data like constructor, destructor, stringified name, ... about the type by + * calling TypeMeta::Make. This returns a TypeMeta() object, which is + * basically just a pointer to the type information, so it's cheap to pass + * around. */ -@Namespace("c10") public static native @Cast("int64_t") long size_from_dim_(int k, @ByVal @Cast("c10::ArrayRef*") LongArrayRef dims); -@Namespace("c10") public static native @Cast("int64_t") long size_from_dim_(int k, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... dims); -// Product of all dims up to k (not including dims[k]) -@Namespace("c10") public static native @Cast("int64_t") long size_to_dim_(int k, @ByVal @Cast("c10::ArrayRef*") LongArrayRef dims); -@Namespace("c10") public static native @Cast("int64_t") long size_to_dim_(int k, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... dims); +// TODO: This file is still in the caffe2 namespace, despite living +// in the ATen directory. This is because the macro +// CAFFE_KNOWN_TYPE (and CAFFE_DECLARE_KNOWN_TYPE) defines a template +// specialization, which relies +// on the namespace of TypeMeta matching the namespace where the macro is +// called. This requires us to fix all of the call-sites, which I want to do +// later. So the namespace is not fixed at the moment. -// Product of all dims between k and l (not including dims[k] and dims[l]) -@Namespace("c10") public static native @Cast("int64_t") long size_between_dim_(int k, int l, @ByVal @Cast("c10::ArrayRef*") LongArrayRef dims); -@Namespace("c10") public static native @Cast("int64_t") long size_between_dim_(int k, int l, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... dims); +// Make at::Half a fundamental type. + // namespace guts + // namespace c10 +// Targeting ../TypeIdentifier.java -// Wrap around axis_index if it is negative, s.t., -1 is the last dim -@Namespace("c10") public static native int canonical_axis_index_(int axis_index, int ndims); -// Targeting ../PlacementDtor.java -// Targeting ../PlacementDeleteContext.java +// Allow usage in std::map / std::set +// TODO Disallow this and rather use std::unordered_map/set everywhere +@Namespace("caffe2") public static native @Cast("const bool") @Name("operator <") boolean lessThan(@ByVal TypeIdentifier lhs, @ByVal TypeIdentifier rhs); +@Namespace("caffe2") public static native @Cast("std::ostream*") @ByRef @Name("operator <<") Pointer shiftLeft( + @Cast("std::ostream*") @ByRef Pointer stream, + @ByVal TypeIdentifier typeId); -// Targeting ../AutogradMetaInterface.java + // namespace caffe2 + +// Targeting ../TypeMetaData.java -// Targeting ../AutogradMetaFactory.java +// Mechanism for throwing errors which can't be prevented at compile time +// due to type erasure. E.g. somebody calling TypeMeta::copy() for +// non-copyable type. Right now just throws exception but is implemented +// in .cpp to manage dependencies +@Namespace("caffe2::detail") public static native void _ThrowRuntimeTypeLogicError(@StdString BytePointer msg); +@Namespace("caffe2::detail") public static native void _ThrowRuntimeTypeLogicError(@StdString String msg); -@Namespace("c10::impl") public static native void SetAutogradMetaFactory(AutogradMetaFactory factory); -@Namespace("c10::impl") public static native AutogradMetaFactory GetAutogradMetaFactory(); -// Targeting ../AutogradMetaFactoryRegisterer.java +/** + * Placement new function for the type. + */ +/** + * Typed copy function for classes. + */ +/** + * A placeholder function for types that do not allow assignment. + */ +/** + * Destructor for non-fundamental types. + */ -// Targeting ../NamedTensorMetaInterface.java + // namespace detail +// +// note: this is outside TypeMeta bc gcc seems to have trouble +// with scalarTypeItemSizes as a constexpr static member used by +// a public inline instance method +// +// item sizes for TypeMeta::itemsize() fast path +@Namespace("caffe2") @MemberGetter public static native @Cast("const uint8_t") byte scalarTypeItemSizes(int i); +@Namespace("caffe2") @MemberGetter public static native @Cast("const uint8_t*") BytePointer scalarTypeItemSizes(); +// Targeting ../TypeMeta.java -// For ease of copy pasting -// #if 0 -// #endif -// Targeting ../VariableVersion.java +// specializations of TypeMeta::_typeMetaData for ScalarType types -// Forward declaration of TensorImpl needed for forward declaration of -// C10_TensorImpl_Size_Check_Dummy_Class +// #define DEFINE_SCALAR_METADATA_INSTANCE(T, name) +// template <> +// constexpr uint16_t TypeMeta::_typeMetaData() noexcept { +// return static_cast(ScalarType::name); +// } + /* 0 */ + /* 1 */ + /* 2 */ + /* 3 */ + /* 4 */ + /* 5 */ + /* 6 */ + /* 7 */ + /* 8 */ + /* 9 */ + /* 10 */ + /* 11 */ + /* 12 */ + /* 13 */ + /* 14 */ + /* 15 */ + /* 16 */ + /* 17 */ +// #undef DEFINE_SCALAR_METADATA_INSTANCE -// Forward declaration needed because TensorImpl needs to be friends with -// C10_TensorImpl_Size_Check_Dummy_Class in order to check the size -// of its private fields. -/** - * NOTE: Some TensorImpl methods are small and not overridden in the - * PyTorch codebase itself, but may theoretically need to be - * overridden by third-party TensorImpl subclasses. This macro allows - * users that need maximum performance and don't need these extension - * points to disable them with a build-time flag. (In particular, - * XLA's XLATensorImpl currently overrides these methods, so we can't - * enable this flag by default.) - */ -// #ifdef C10_DISABLE_TENSORIMPL_EXTENSIBILITY -// #define TENSORIMPL_MAYBE_VIRTUAL -// #else -// #define TENSORIMPL_MAYBE_VIRTUAL virtual -// Targeting ../TensorImpl.java -// Note [TensorImpl size constraints] -// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -// Changed the size of TensorImpl? If the size went down, good for -// you! Adjust the documentation below and the expected size. -// Did it go up? Read on... -// -// Struct size matters. In some production systems at Facebook, we have -// 400M live tensors during a training run. Do the math: every 64-bit -// word you add to Tensor is an extra 3.2 gigabytes in RAM. -// -// If you are a Facebook employee, you can check if the run in question -// has tipped you over the point using the command here: -// https://fburl.com/q5enpv98 -// -// For reference, we OOMed at 160 bytes (20 words) per TensorImpl. -// This is not counting overhead from strides out-of-line allocation and -// StorageImpl space and this is from before we inlined sizes and strides -// directly into TensorImpl as SmallVectors. -// -// Our memory usage on 32-bit systems is suboptimal, but we're not checking -// for it at the moment (to help avoid rage inducing cycles when the -// 32-bit number is wrong). -// -// Current breakdown: -// -// vtable pointer -// strong refcount TODO: pack these into one word -// weak refcount -// storage pointer -// autograd metadata pointer -// named tensor metadata pointer -// version counter pointer -// PyObjectSlot -// SizesAndStrides size/pointer -// SizesAndStrides sizes (pre-allocated 0) -// SizesAndStrides sizes (pre-allocated 1) -// SizesAndStrides sizes (pre-allocated 2) -// SizesAndStrides sizes (pre-allocated 3) -// SizesAndStrides sizes (pre-allocated 4) -// SizesAndStrides strides (pre-allocated 0) -// SizesAndStrides strides (pre-allocated 1) -// SizesAndStrides strides (pre-allocated 2) -// SizesAndStrides strides (pre-allocated 3) -// SizesAndStrides strides (pre-allocated 4) -// storage offset -// numel -// data type, device, is_contiguous, storage_access_should_throw_, bitfields -// DispatchKeySet -// +@Namespace("caffe2") public static native @Cast("bool") @Name("operator ==") @NoException(true) boolean equals(@Const @ByVal TypeMeta lhs, @Const @ByVal TypeMeta rhs); +@Namespace("caffe2") public static native @Cast("bool") @Name("operator !=") @NoException(true) boolean notEquals(@Const @ByVal TypeMeta lhs, @Const @ByVal TypeMeta rhs); -// Various preprocessor macros we use to check that the -// TensorImpl size hasn't changed unexpectedly. We undef -// these later. -// #ifndef __NVCC__ -public static final int C10_NVCC = 0; -// #else -// #endif +@Namespace("caffe2") public static native @Cast("std::ostream*") @ByRef @Name("operator <<") Pointer shiftLeft( + @Cast("std::ostream*") @ByRef Pointer stream, + @ByVal TypeMeta typeMeta); -// #ifndef __CUDA_VER_MAJOR__ -public static final int C10_CUDA_VERSION_MAJOR = 0; +/** + * Register unique id for a type so it can be used in TypeMeta context, e.g. be + * used as a type for Blob or for Tensor elements. + * + * CAFFE_KNOWN_TYPE is deprecated; prefer CAFFE_DECLARE_KNOWN_TYPE and + * CAFFE_DEFINE_KNOWN_TYPE. + * + * CAFFE_KNOWN_TYPE does explicit instantiation of TypeIdentifier::Get + * template function and thus needs to be put in a single translation unit (.cpp + * file) for a given type T. Other translation units that use type T as a type + * of the caffe2::Blob or element type of caffe2::Tensor need to depend on the + * translation unit that contains CAFFE_KNOWN_TYPE declaration via regular + * linkage dependencies. + * + * NOTE: the macro needs to be invoked in ::caffe2 namespace + */ +// Implementation note: in MSVC, we will need to prepend the C10_API +// keyword in order to get things compiled properly. in Linux, gcc seems to +// create attribute ignored error for explicit template instantiations, see +// http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2017/p0537r0.html +// https://gcc.gnu.org/bugzilla/show_bug.cgi?id=51930 +// and as a result, we define these two macros slightly differently. +// #if defined(_MSC_VER) || defined(__clang__) +// #define EXPORT_IF_NOT_GCC C10_EXPORT // #else +// #define EXPORT_IF_NOT_GCC // #endif -// #ifndef CUDA_VERSION -public static final int C10_CUDA_VERSION = 0; -// #else -// #endif +// CAFFE_KNOWN_TYPE is deprecated! Use CAFFE_DECLARE_KNOWN_TYPE and +// CAFFE_DEFINE_KNOWN_TYPE instead. +// #define CAFFE_KNOWN_TYPE(T) +// template uint16_t TypeMeta::addTypeMetaData(); +// template <> +// EXPORT_IF_NOT_GCC uint16_t TypeMeta::_typeMetaData() noexcept { +// static const uint16_t index = addTypeMetaData(); +// return index; +// } -// #ifndef __clang_major__ -public static final int C10_CLANG_MAJOR_VERSION = 0; -// #else -// #endif +// #define CAFFE_DEFINE_KNOWN_TYPE(T) +// template uint16_t TypeMeta::addTypeMetaData(); -// #ifndef __GNUC__ -public static final int C10_GCC_VERSION = 0; +// Unlike CAFFE_KNOWN_TYPE, CAFFE_DECLARE_KNOWN_TYPE avoids a function +// call to access _typeMetaData in the common case. +// #ifdef __CUDACC__ +// nvcc needs its own specialization that doesn't use +// C10_ALWAYS_INLINE so that it doesn't need to see a definition for +// _addTypeMeta. See NOTE [ TypeIdentifier::Get nvcc/clang discrepancy +// ]. +// #define CAFFE_DECLARE_KNOWN_TYPE(T) +// extern template uint16_t TypeMeta::addTypeMetaData(); +// template <> +// EXPORT_IF_NOT_GCC inline uint16_t TypeMeta::_typeMetaData() noexcept { +// static const uint16_t index = addTypeMetaData(); +// return index; +// } // #else +// #define CAFFE_DECLARE_KNOWN_TYPE(T) +// extern template uint16_t TypeMeta::addTypeMetaData(); +// template <> +// EXPORT_IF_NOT_GCC C10_ALWAYS_INLINE uint16_t +// TypeMeta::_typeMetaData() noexcept { +// static const uint16_t index = addTypeMetaData(); +// return index; +// } // #endif -// #ifndef __GNUC_MINOR__ -public static final int C10_GCC_VERSION_MINOR = 0; -// #else -// #endif +// #define CAFFE_KNOWN_TYPE_NOEXPORT(T) +// template <> +// uint16_t TypeMeta::_typeMetaData() noexcept { +// static const uint16_t index = addTypeMetaData(); +// return index; +// } -// We use a templatized class to both contain the logic of checking the sizes -// as well as to provide compile-time information that might be useful in -// figuring out why sizes may have changed. -// All the compile time information is given by the template fields that are -// always printed by the compiler when the static_assert fails. -// We use a class to encapsulate size-checking logic with -// templates to capture sizes and flags. We call this within -// a static assert to prove there is no run-time behaviour. -// Since the methods we call return either true or fail their -// own static_asserts, we should never see the error messages -// below. We have to provide it though for c++ <17. + -// Clean up after ourselves -// #undef C10_NVCC -// #undef C10_CUDA_VERSION_MAJOR -// #undef C10_CUDA_VERSION -// #undef C10_CLANG_MAJOR_VERSION -// #undef C10_GCC_VERSION -// #undef C10_GCC_VERSION_MINOR + - // namespace c10 + + + -// Parsed from c10/core/UndefinedTensorImpl.h + -// #pragma once + -// #include -// Targeting ../UndefinedTensorImpl.java + + + - // namespace c10 + +// For some of the compilers, long is defined separately from int32_t and +// int64_t. As a result we will need to actually define them separately. +// It is recommended that one does NOT use long - use int32_t and int64_t +// explicitly. Explicit long type annotation may go away in the future. +// details: This hack works by defining a _guard_long_unique type, which is +// long iff the compiler has a separate long type and is a dummy type otherwise. +// we then allocate a type id to that _guard_long_unique. If the compiler has a +// separate long type, this allocates a type id for long. Otherwise, it +// allocates a type id for the dummy type, which doesn't matter. + // namespace detail -// Parsed from c10/core/WrapDimMinimal.h -// #pragma once + -// #include -// #include -// This template can only be specialized at int64_t and c10::SymInt; -// you'll get linker errors otherwise - // namespace detail + -@Namespace("c10") public static native @Cast("int64_t") long maybe_wrap_dim( - @Cast("int64_t") long dim, - @Cast("int64_t") long dim_post_expr, - @Cast("bool") boolean wrap_scalar/*=true*/); -@Namespace("c10") public static native @Cast("int64_t") long maybe_wrap_dim( - @Cast("int64_t") long dim, - @Cast("int64_t") long dim_post_expr); -@Namespace("c10") public static native @ByVal SymInt maybe_wrap_dim( - @ByVal SymInt dim, - @ByVal SymInt dim_post_expr, - @Cast("bool") boolean wrap_scalar/*=true*/); -@Namespace("c10") public static native @ByVal SymInt maybe_wrap_dim( - @ByVal SymInt dim, - @ByVal SymInt dim_post_expr); + - // namespace c10 + + // namespace caffe2 -// Parsed from ATen/core/symbol.h -// #pragma once -// #include -// #include -// #include // For std::hash -// #include +// Parsed from c10/core/ScalarTypeToTypeMeta.h -// 'prim' symbols are synthetic operators that occur only in the IR -// and don't have corresponding implementations in ATen. +// #pragma once -// 'onnx' symbols correspond to ONNX operators. Their semantics -// are defined in https://github.com/onnx/onnx/blob/master/docs/Operators.md -// The particular version we are targeting is specified by '_onnx_opset_version' -// in torch.onnx.symbolic_helper -// -// In general, most ONNX operators won't get an entry here, because they -// are handled from the Python end. However, you may occasionally need -// to intern an ONNX symbol here so that you can conveniently write an -// optimization on ONNX operations. +// #include +// #include +// #include -// 'attr' symbols are attribute keys. They are shared between both ONNX and ATen -// operators (you disambiguate their meaning by looking at the operator itself). -// In general, you only need to define attribute keys that are used by -// onnx or prim; ATen attributes are automatically generated in FORALL_ATTR_BASE_SYMBOLS. +// these just expose TypeMeta/ScalarType bridge functions in c10 +// TODO move to typeid.h (or codemod away) when TypeMeta et al +// are moved from caffe2 to c10 (see note at top of typeid.h) -// Note [Symbol allocation] -// ~~~~~~~~~~~~~~~~~~~~~~~~ -// -// 1. Symbol namespace is split up into namespaces. -// -// 2. The intended access pattern for built-in symbols is onnx::MatMul -// in the c10 namespace (this is a Symbol). -// +/** + * convert ScalarType enum values to TypeMeta handles + */ +@Namespace("c10") public static native @ByVal TypeMeta scalarTypeToTypeMeta(ScalarType scalar_type); -// Built-in constant definition strategy: -// - Enum is the most convenient way to generate a contiguous sequence -// of numbers for an identifier. -// - However, an enum gives you a fresh type. We want onnx::MatMul to -// be type Symbol, not some random enum type! -// - Therefore, after using enums to generate the sequence of integers, -// we then declare constexpr Symbols to get everything the actual Symbol -// type we want. Symbols must be constexpr to be valid to be "case"ed on. +/** + * convert TypeMeta handles to ScalarType enum values + */ +@Namespace("c10") public static native ScalarType typeMetaToScalarType(@ByVal TypeMeta dtype); +/** + * typeMetaToScalarType(), lifted to optional + */ +@Namespace("c10") public static native @ByVal ScalarTypeOptional optTypeMetaToScalarType( + @ByVal TypeMetaOptional type_meta); -// Targeting ../Symbol.java +/** + * convenience: equality across TypeMeta/ScalarType conversion + */ +@Namespace("c10") public static native @Cast("bool") @Name("operator ==") boolean equals(ScalarType t, @ByVal TypeMeta m); +@Namespace("c10") public static native @Cast("bool") @Name("operator ==") boolean equals(@ByVal TypeMeta m, ScalarType t); +@Namespace("c10") public static native @Cast("bool") @Name("operator !=") boolean notEquals(ScalarType t, @ByVal TypeMeta m); +@Namespace("c10") public static native @Cast("bool") @Name("operator !=") boolean notEquals(@ByVal TypeMeta m, ScalarType t); + // namespace c10 +// Parsed from c10/util/ThreadLocalDebugInfo.h +// #pragma once +// #include +// #include +// #include +@Namespace("c10") public enum DebugInfoKind { + PRODUCER_INFO((byte)(0)), + MOBILE_RUNTIME_INFO((byte)(1)), + PROFILER_STATE((byte)(2)), + INFERENCE_CONTEXT((byte)(3)), // for inference usage + PARAM_COMMS_INFO((byte)(4)), + TEST_INFO((byte)(5)), // used only in tests + TEST_INFO_2((byte)(6));// used only in tests + public final byte value; + private DebugInfoKind(byte v) { this.value = v; } + private DebugInfoKind(DebugInfoKind e) { this.value = e.value; } + public DebugInfoKind intern() { for (DebugInfoKind e : values()) if (e.value == value) return e; return this; } + @Override public String toString() { return intern().name(); } +} +// Targeting ../DebugInfoBase.java +// Targeting ../ThreadLocalDebugInfo.java -// Targeting ../SymbolHash.java +// Targeting ../DebugInfoGuard.java + // namespace c10 -// Parsed from ATen/core/aten_interned_strings.h +// Parsed from c10/util/UniqueVoidPtr.h // #pragma once +// #include -// @generated by torchgen/gen.py from aten_interned_strings.h - -// #if defined(TORCH_ASSERT_NO_OPERATORS) || defined(TORCH_ASSERT_ONLY_METHOD_OPERATORS) -// #error This change adds a dependency on native_functions.yaml, -// meaning the file will need to be re-compiled every time an operator -// is changed or added. Consider if including for -// the c10::Symbol class would be sufficient, or if your change would be -// better placed in another file. -// #endif +// #include +// Targeting ../DeleterFnPtr.java -// ATen symbols correspond exactly to operators defined in ATen. Every -// symbol here corresponds exactly to an ATen operation defined in -// native_functions.yaml; attributes are in one-to-one correspondence -// with their ATen name. - -// #define FORALL_ATEN_BASE_SYMBOLS(_) -// _(aten, __and__) -// _(aten, __iand__) -// _(aten, __ilshift__) -// _(aten, __ior__) -// _(aten, __irshift__) -// _(aten, __ixor__) -// _(aten, __lshift__) -// _(aten, __or__) -// _(aten, __rshift__) -// _(aten, __xor__) -// _(aten, _adaptive_avg_pool2d) -// _(aten, _adaptive_avg_pool2d_backward) -// _(aten, _adaptive_avg_pool3d) -// _(aten, _adaptive_avg_pool3d_backward) -// _(aten, _add_batch_dim) -// _(aten, _add_relu) -// _(aten, _add_relu_) -// _(aten, _addmm_activation) -// _(aten, _aminmax) -// _(aten, _amp_foreach_non_finite_check_and_unscale) -// _(aten, _amp_foreach_non_finite_check_and_unscale_) -// _(aten, _amp_update_scale) -// _(aten, _amp_update_scale_) -// _(aten, _assert_async) -// _(aten, _assert_tensor_metadata) -// _(aten, _autocast_to_full_precision) -// _(aten, _autocast_to_reduced_precision) -// _(aten, _backward) -// _(aten, _batch_norm_impl_index) -// _(aten, _batch_norm_impl_index_backward) -// _(aten, _cast_Byte) -// _(aten, _cast_Char) -// _(aten, _cast_Double) -// _(aten, _cast_Float) -// _(aten, _cast_Half) -// _(aten, _cast_Int) -// _(aten, _cast_Long) -// _(aten, _cast_Short) -// _(aten, _cdist_backward) -// _(aten, _cdist_forward) -// _(aten, _cholesky_solve_helper) -// _(aten, _choose_qparams_per_tensor) -// _(aten, _chunk_grad_outputs_efficient_attention) -// _(aten, _coalesce) -// _(aten, _coalesced) -// _(aten, _coalesced_) -// _(aten, _compute_linear_combination) -// _(aten, _conj) -// _(aten, _conj_copy) -// _(aten, _conj_physical) -// _(aten, _conv_depthwise2d) -// _(aten, _convert_indices_from_coo_to_csr) -// _(aten, _convert_indices_from_csr_to_coo) -// _(aten, _convolution) -// _(aten, _convolution_double_backward) -// _(aten, _convolution_mode) -// _(aten, _copy_from) -// _(aten, _copy_from_and_resize) -// _(aten, _ctc_loss) -// _(aten, _ctc_loss_backward) -// _(aten, _cudnn_ctc_loss) -// _(aten, _cudnn_init_dropout_state) -// _(aten, _cudnn_rnn) -// _(aten, _cudnn_rnn_backward) -// _(aten, _cudnn_rnn_flatten_weight) -// _(aten, _cufft_clear_plan_cache) -// _(aten, _cufft_get_plan_cache_max_size) -// _(aten, _cufft_get_plan_cache_size) -// _(aten, _cufft_set_plan_cache_max_size) -// _(aten, _cummax_helper) -// _(aten, _cummin_helper) -// _(aten, _debug_has_internal_overlap) -// _(aten, _dimI) -// _(aten, _dimV) -// _(aten, _dim_arange) -// _(aten, _dirichlet_grad) -// _(aten, _efficient_attention_backward) -// _(aten, _efficient_attention_forward) -// _(aten, _efficientzerotensor) -// _(aten, _embedding_bag) -// _(aten, _embedding_bag_backward) -// _(aten, _embedding_bag_dense_backward) -// _(aten, _embedding_bag_forward_only) -// _(aten, _embedding_bag_per_sample_weights_backward) -// _(aten, _embedding_bag_sparse_backward) -// _(aten, _empty_affine_quantized) -// _(aten, _empty_per_channel_affine_quantized) -// _(aten, _euclidean_dist) -// _(aten, _fake_quantize_learnable_per_channel_affine) -// _(aten, _fake_quantize_learnable_per_channel_affine_backward) -// _(aten, _fake_quantize_learnable_per_tensor_affine) -// _(aten, _fake_quantize_learnable_per_tensor_affine_backward) -// _(aten, _fake_quantize_per_tensor_affine_cachemask_tensor_qparams) -// _(aten, _fft_c2c) -// _(aten, _fft_c2r) -// _(aten, _fft_r2c) -// _(aten, _flash_attention_backward) -// _(aten, _flash_attention_forward) -// _(aten, _foobar) -// _(aten, _foreach_abs) -// _(aten, _foreach_abs_) -// _(aten, _foreach_acos) -// _(aten, _foreach_acos_) -// _(aten, _foreach_add) -// _(aten, _foreach_add_) -// _(aten, _foreach_addcdiv) -// _(aten, _foreach_addcdiv_) -// _(aten, _foreach_addcmul) -// _(aten, _foreach_addcmul_) -// _(aten, _foreach_asin) -// _(aten, _foreach_asin_) -// _(aten, _foreach_atan) -// _(aten, _foreach_atan_) -// _(aten, _foreach_ceil) -// _(aten, _foreach_ceil_) -// _(aten, _foreach_clamp_max) -// _(aten, _foreach_clamp_max_) -// _(aten, _foreach_clamp_min) -// _(aten, _foreach_clamp_min_) -// _(aten, _foreach_cos) -// _(aten, _foreach_cos_) -// _(aten, _foreach_cosh) -// _(aten, _foreach_cosh_) -// _(aten, _foreach_div) -// _(aten, _foreach_div_) -// _(aten, _foreach_erf) -// _(aten, _foreach_erf_) -// _(aten, _foreach_erfc) -// _(aten, _foreach_erfc_) -// _(aten, _foreach_exp) -// _(aten, _foreach_exp_) -// _(aten, _foreach_expm1) -// _(aten, _foreach_expm1_) -// _(aten, _foreach_floor) -// _(aten, _foreach_floor_) -// _(aten, _foreach_frac) -// _(aten, _foreach_frac_) -// _(aten, _foreach_lerp) -// _(aten, _foreach_lerp_) -// _(aten, _foreach_lgamma) -// _(aten, _foreach_lgamma_) -// _(aten, _foreach_log) -// _(aten, _foreach_log10) -// _(aten, _foreach_log10_) -// _(aten, _foreach_log1p) -// _(aten, _foreach_log1p_) -// _(aten, _foreach_log2) -// _(aten, _foreach_log2_) -// _(aten, _foreach_log_) -// _(aten, _foreach_maximum) -// _(aten, _foreach_maximum_) -// _(aten, _foreach_minimum) -// _(aten, _foreach_minimum_) -// _(aten, _foreach_mul) -// _(aten, _foreach_mul_) -// _(aten, _foreach_neg) -// _(aten, _foreach_neg_) -// _(aten, _foreach_norm) -// _(aten, _foreach_reciprocal) -// _(aten, _foreach_reciprocal_) -// _(aten, _foreach_round) -// _(aten, _foreach_round_) -// _(aten, _foreach_sigmoid) -// _(aten, _foreach_sigmoid_) -// _(aten, _foreach_sin) -// _(aten, _foreach_sin_) -// _(aten, _foreach_sinh) -// _(aten, _foreach_sinh_) -// _(aten, _foreach_sqrt) -// _(aten, _foreach_sqrt_) -// _(aten, _foreach_sub) -// _(aten, _foreach_sub_) -// _(aten, _foreach_tan) -// _(aten, _foreach_tan_) -// _(aten, _foreach_tanh) -// _(aten, _foreach_tanh_) -// _(aten, _foreach_trunc) -// _(aten, _foreach_trunc_) -// _(aten, _foreach_zero) -// _(aten, _foreach_zero_) -// _(aten, _fused_adam) -// _(aten, _fused_adam_) -// _(aten, _fused_adamw) -// _(aten, _fused_adamw_) -// _(aten, _fused_dropout) -// _(aten, _fused_moving_avg_obs_fq_helper) -// _(aten, _fused_moving_avg_obs_fq_helper_functional) -// _(aten, _fused_sdp_choice) -// _(aten, _fw_primal) -// _(aten, _fw_primal_copy) -// _(aten, _gather_sparse_backward) -// _(aten, _grid_sampler_2d_cpu_fallback) -// _(aten, _grid_sampler_2d_cpu_fallback_backward) -// _(aten, _has_compatible_shallow_copy_type) -// _(aten, _has_same_storage_numel) -// _(aten, _histogramdd_bin_edges) -// _(aten, _histogramdd_from_bin_cts) -// _(aten, _histogramdd_from_bin_tensors) -// _(aten, _index_put_impl) -// _(aten, _index_put_impl_) -// _(aten, _indices) -// _(aten, _indices_copy) -// _(aten, _is_all_true) -// _(aten, _is_any_true) -// _(aten, _is_zerotensor) -// _(aten, _linalg_check_errors) -// _(aten, _linalg_det) -// _(aten, _linalg_eigh) -// _(aten, _linalg_slogdet) -// _(aten, _linalg_solve_ex) -// _(aten, _linalg_svd) -// _(aten, _local_scalar_dense) -// _(aten, _log_softmax) -// _(aten, _log_softmax_backward_data) -// _(aten, _logcumsumexp) -// _(aten, _lstm_mps) -// _(aten, _lu_with_info) -// _(aten, _make_dual) -// _(aten, _make_dual_copy) -// _(aten, _make_per_channel_quantized_tensor) -// _(aten, _make_per_tensor_quantized_tensor) -// _(aten, _masked_scale) -// _(aten, _masked_softmax) -// _(aten, _masked_softmax_backward) -// _(aten, _mkldnn_reshape) -// _(aten, _mkldnn_transpose) -// _(aten, _mkldnn_transpose_) -// _(aten, _mps_convolution) -// _(aten, _mps_convolution_transpose) -// _(aten, _native_batch_norm_legit) -// _(aten, _native_batch_norm_legit_functional) -// _(aten, _native_decoder_only_multi_head_attention) -// _(aten, _native_multi_head_attention) -// _(aten, _neg_view) -// _(aten, _neg_view_copy) -// _(aten, _nested_from_padded) -// _(aten, _nested_from_padded_and_nested_example) -// _(aten, _nested_select_backward) -// _(aten, _nested_sum_backward) -// _(aten, _nested_tensor_from_mask) -// _(aten, _nested_tensor_from_mask_left_aligned) -// _(aten, _nested_tensor_from_tensor_list) -// _(aten, _nested_tensor_offsets) -// _(aten, _nested_tensor_size) -// _(aten, _nested_tensor_softmax_with_shape) -// _(aten, _nested_tensor_strides) -// _(aten, _nested_view_from_buffer) -// _(aten, _nested_view_from_buffer_copy) -// _(aten, _new_zeros_with_same_feature_meta) -// _(aten, _nnpack_available) -// _(aten, _nnpack_spatial_convolution) -// _(aten, _nnz) -// _(aten, _pack_padded_sequence) -// _(aten, _pack_padded_sequence_backward) -// _(aten, _pad_circular) -// _(aten, _pad_enum) -// _(aten, _pad_packed_sequence) -// _(aten, _pdist_backward) -// _(aten, _pdist_forward) -// _(aten, _pin_memory) -// _(aten, _prelu_kernel) -// _(aten, _prelu_kernel_backward) -// _(aten, _remove_batch_dim) -// _(aten, _reshape_alias) -// _(aten, _reshape_alias_copy) -// _(aten, _reshape_copy) -// _(aten, _reshape_from_tensor) -// _(aten, _resize_output) -// _(aten, _resize_output_) -// _(aten, _rowwise_prune) -// _(aten, _sample_dirichlet) -// _(aten, _saturate_weight_to_fp16) -// _(aten, _scaled_dot_product_attention) -// _(aten, _scaled_dot_product_attention_math) -// _(aten, _scaled_dot_product_efficient_attention) -// _(aten, _scaled_dot_product_efficient_attention_backward) -// _(aten, _scaled_dot_product_flash_attention) -// _(aten, _scaled_dot_product_flash_attention_backward) -// _(aten, _segment_reduce_backward) -// _(aten, _shape_as_tensor) -// _(aten, _slow_conv2d_backward) -// _(aten, _slow_conv2d_forward) -// _(aten, _sobol_engine_draw) -// _(aten, _sobol_engine_ff) -// _(aten, _sobol_engine_ff_) -// _(aten, _sobol_engine_initialize_state) -// _(aten, _sobol_engine_initialize_state_) -// _(aten, _sobol_engine_scramble) -// _(aten, _sobol_engine_scramble_) -// _(aten, _softmax) -// _(aten, _softmax_backward_data) -// _(aten, _sparse_addmm) -// _(aten, _sparse_broadcast_to) -// _(aten, _sparse_broadcast_to_copy) -// _(aten, _sparse_bsc_tensor_unsafe) -// _(aten, _sparse_bsr_tensor_unsafe) -// _(aten, _sparse_compressed_tensor_unsafe) -// _(aten, _sparse_coo_tensor_unsafe) -// _(aten, _sparse_coo_tensor_with_dims) -// _(aten, _sparse_coo_tensor_with_dims_and_tensors) -// _(aten, _sparse_csc_tensor_unsafe) -// _(aten, _sparse_csr_prod) -// _(aten, _sparse_csr_sum) -// _(aten, _sparse_csr_tensor_unsafe) -// _(aten, _sparse_log_softmax) -// _(aten, _sparse_log_softmax_backward_data) -// _(aten, _sparse_mm) -// _(aten, _sparse_mm_reduce_impl) -// _(aten, _sparse_mm_reduce_impl_backward) -// _(aten, _sparse_softmax) -// _(aten, _sparse_softmax_backward_data) -// _(aten, _sparse_sparse_matmul) -// _(aten, _sparse_sum) -// _(aten, _sparse_sum_backward) -// _(aten, _spdiags) -// _(aten, _stack) -// _(aten, _standard_gamma) -// _(aten, _standard_gamma_grad) -// _(aten, _test_ambiguous_defaults) -// _(aten, _test_autograd_multiple_dispatch) -// _(aten, _test_autograd_multiple_dispatch_view) -// _(aten, _test_autograd_multiple_dispatch_view_copy) -// _(aten, _test_check_tensor) -// _(aten, _test_optional_filled_intlist) -// _(aten, _test_optional_floatlist) -// _(aten, _test_optional_intlist) -// _(aten, _test_serialization_subcmul) -// _(aten, _test_string_default) -// _(aten, _test_warn_in_autograd) -// _(aten, _thnn_differentiable_gru_cell_backward) -// _(aten, _thnn_differentiable_lstm_cell_backward) -// _(aten, _thnn_fused_gru_cell) -// _(aten, _thnn_fused_gru_cell_backward) -// _(aten, _thnn_fused_lstm_cell) -// _(aten, _thnn_fused_lstm_cell_backward) -// _(aten, _thnn_fused_lstm_cell_backward_impl) -// _(aten, _to_copy) -// _(aten, _to_cpu) -// _(aten, _to_dense) -// _(aten, _transform_bias_rescale_qkv) -// _(aten, _transformer_decoder_only_layer_fwd) -// _(aten, _transformer_encoder_layer_fwd) -// _(aten, _trilinear) -// _(aten, _triton_multi_head_attention) -// _(aten, _triton_scaled_dot_attention) -// _(aten, _unique) -// _(aten, _unique2) -// _(aten, _unpack_dual) -// _(aten, _unsafe_view) -// _(aten, _upsample_bicubic2d_aa) -// _(aten, _upsample_bicubic2d_aa_backward) -// _(aten, _upsample_bilinear2d_aa) -// _(aten, _upsample_bilinear2d_aa_backward) -// _(aten, _upsample_nearest_exact1d) -// _(aten, _upsample_nearest_exact1d_backward) -// _(aten, _upsample_nearest_exact2d) -// _(aten, _upsample_nearest_exact2d_backward) -// _(aten, _upsample_nearest_exact3d) -// _(aten, _upsample_nearest_exact3d_backward) -// _(aten, _use_cudnn_ctc_loss) -// _(aten, _use_cudnn_rnn_flatten_weight) -// _(aten, _validate_compressed_sparse_indices) -// _(aten, _validate_sparse_bsc_tensor_args) -// _(aten, _validate_sparse_bsr_tensor_args) -// _(aten, _validate_sparse_compressed_tensor_args) -// _(aten, _validate_sparse_coo_tensor_args) -// _(aten, _validate_sparse_csc_tensor_args) -// _(aten, _validate_sparse_csr_tensor_args) -// _(aten, _values) -// _(aten, _values_copy) -// _(aten, _version) -// _(aten, _weight_norm) -// _(aten, _weight_norm_differentiable_backward) -// _(aten, _weight_norm_interface) -// _(aten, _weight_norm_interface_backward) -// _(aten, abs) -// _(aten, abs_) -// _(aten, absolute) -// _(aten, absolute_) -// _(aten, acos) -// _(aten, acos_) -// _(aten, acosh) -// _(aten, acosh_) -// _(aten, adaptive_avg_pool1d) -// _(aten, adaptive_avg_pool2d) -// _(aten, adaptive_avg_pool3d) -// _(aten, adaptive_avg_pool3d_backward) -// _(aten, adaptive_max_pool1d) -// _(aten, adaptive_max_pool2d) -// _(aten, adaptive_max_pool2d_backward) -// _(aten, adaptive_max_pool3d) -// _(aten, adaptive_max_pool3d_backward) -// _(aten, add) -// _(aten, add_) -// _(aten, addbmm) -// _(aten, addbmm_) -// _(aten, addcdiv) -// _(aten, addcdiv_) -// _(aten, addcmul) -// _(aten, addcmul_) -// _(aten, addmm) -// _(aten, addmm_) -// _(aten, addmv) -// _(aten, addmv_) -// _(aten, addr) -// _(aten, addr_) -// _(aten, adjoint) -// _(aten, affine_grid_generator) -// _(aten, affine_grid_generator_backward) -// _(aten, alias) -// _(aten, alias_copy) -// _(aten, align_as) -// _(aten, align_tensors) -// _(aten, align_to) -// _(aten, all) -// _(aten, allclose) -// _(aten, alpha_dropout) -// _(aten, alpha_dropout_) -// _(aten, amax) -// _(aten, amin) -// _(aten, aminmax) -// _(aten, angle) -// _(aten, any) -// _(aten, arange) -// _(aten, arccos) -// _(aten, arccos_) -// _(aten, arccosh) -// _(aten, arccosh_) -// _(aten, arcsin) -// _(aten, arcsin_) -// _(aten, arcsinh) -// _(aten, arcsinh_) -// _(aten, arctan) -// _(aten, arctan2) -// _(aten, arctan2_) -// _(aten, arctan_) -// _(aten, arctanh) -// _(aten, arctanh_) -// _(aten, argmax) -// _(aten, argmin) -// _(aten, argsort) -// _(aten, argwhere) -// _(aten, as_strided) -// _(aten, as_strided_) -// _(aten, as_strided_copy) -// _(aten, as_strided_scatter) -// _(aten, asin) -// _(aten, asin_) -// _(aten, asinh) -// _(aten, asinh_) -// _(aten, atan) -// _(aten, atan2) -// _(aten, atan2_) -// _(aten, atan_) -// _(aten, atanh) -// _(aten, atanh_) -// _(aten, atleast_1d) -// _(aten, atleast_2d) -// _(aten, atleast_3d) -// _(aten, avg_pool1d) -// _(aten, avg_pool2d) -// _(aten, avg_pool2d_backward) -// _(aten, avg_pool3d) -// _(aten, avg_pool3d_backward) -// _(aten, baddbmm) -// _(aten, baddbmm_) -// _(aten, bartlett_window) -// _(aten, batch_norm) -// _(aten, batch_norm_backward_elemt) -// _(aten, batch_norm_backward_reduce) -// _(aten, batch_norm_elemt) -// _(aten, batch_norm_gather_stats) -// _(aten, batch_norm_gather_stats_with_counts) -// _(aten, batch_norm_stats) -// _(aten, batch_norm_update_stats) -// _(aten, bernoulli) -// _(aten, bernoulli_) -// _(aten, bilinear) -// _(aten, binary_cross_entropy) -// _(aten, binary_cross_entropy_backward) -// _(aten, binary_cross_entropy_with_logits) -// _(aten, bincount) -// _(aten, binomial) -// _(aten, bitwise_and) -// _(aten, bitwise_and_) -// _(aten, bitwise_left_shift) -// _(aten, bitwise_left_shift_) -// _(aten, bitwise_not) -// _(aten, bitwise_not_) -// _(aten, bitwise_or) -// _(aten, bitwise_or_) -// _(aten, bitwise_right_shift) -// _(aten, bitwise_right_shift_) -// _(aten, bitwise_xor) -// _(aten, bitwise_xor_) -// _(aten, blackman_window) -// _(aten, block_diag) -// _(aten, bmm) -// _(aten, broadcast_tensors) -// _(aten, broadcast_to) -// _(aten, bucketize) -// _(aten, can_cast) -// _(aten, cartesian_prod) -// _(aten, cat) -// _(aten, cauchy) -// _(aten, cauchy_) -// _(aten, ccol_indices) -// _(aten, ccol_indices_copy) -// _(aten, cdist) -// _(aten, ceil) -// _(aten, ceil_) -// _(aten, celu) -// _(aten, celu_) -// _(aten, chain_matmul) -// _(aten, chalf) -// _(aten, channel_shuffle) -// _(aten, cholesky) -// _(aten, cholesky_inverse) -// _(aten, cholesky_solve) -// _(aten, choose_qparams_optimized) -// _(aten, chunk) -// _(aten, clamp) -// _(aten, clamp_) -// _(aten, clamp_max) -// _(aten, clamp_max_) -// _(aten, clamp_min) -// _(aten, clamp_min_) -// _(aten, clip) -// _(aten, clip_) -// _(aten, clone) -// _(aten, coalesce) -// _(aten, col2im) -// _(aten, col_indices) -// _(aten, col_indices_copy) -// _(aten, column_stack) -// _(aten, combinations) -// _(aten, complex) -// _(aten, concat) -// _(aten, concatenate) -// _(aten, conj) -// _(aten, conj_physical) -// _(aten, conj_physical_) -// _(aten, constant_pad_nd) -// _(aten, contiguous) -// _(aten, conv1d) -// _(aten, conv2d) -// _(aten, conv3d) -// _(aten, conv_depthwise3d) -// _(aten, conv_tbc) -// _(aten, conv_tbc_backward) -// _(aten, conv_transpose1d) -// _(aten, conv_transpose2d) -// _(aten, conv_transpose3d) -// _(aten, convolution) -// _(aten, convolution_backward) -// _(aten, convolution_backward_overrideable) -// _(aten, convolution_overrideable) -// _(aten, copy) -// _(aten, copy_) -// _(aten, copy_sparse_to_sparse) -// _(aten, copy_sparse_to_sparse_) -// _(aten, copysign) -// _(aten, copysign_) -// _(aten, corrcoef) -// _(aten, cos) -// _(aten, cos_) -// _(aten, cosh) -// _(aten, cosh_) -// _(aten, cosine_embedding_loss) -// _(aten, cosine_similarity) -// _(aten, count_nonzero) -// _(aten, cov) -// _(aten, cross) -// _(aten, cross_entropy_loss) -// _(aten, crow_indices) -// _(aten, crow_indices_copy) -// _(aten, ctc_loss) -// _(aten, cudnn_affine_grid_generator) -// _(aten, cudnn_affine_grid_generator_backward) -// _(aten, cudnn_batch_norm) -// _(aten, cudnn_batch_norm_backward) -// _(aten, cudnn_convolution) -// _(aten, cudnn_convolution_add_relu) -// _(aten, cudnn_convolution_relu) -// _(aten, cudnn_convolution_transpose) -// _(aten, cudnn_grid_sampler) -// _(aten, cudnn_grid_sampler_backward) -// _(aten, cudnn_is_acceptable) -// _(aten, cummax) -// _(aten, cummaxmin_backward) -// _(aten, cummin) -// _(aten, cumprod) -// _(aten, cumprod_) -// _(aten, cumprod_backward) -// _(aten, cumsum) -// _(aten, cumsum_) -// _(aten, cumulative_trapezoid) -// _(aten, data) -// _(aten, deg2rad) -// _(aten, deg2rad_) -// _(aten, dense_dim) -// _(aten, dequantize) -// _(aten, det) -// _(aten, detach) -// _(aten, detach_) -// _(aten, detach_copy) -// _(aten, diag) -// _(aten, diag_embed) -// _(aten, diagflat) -// _(aten, diagonal) -// _(aten, diagonal_backward) -// _(aten, diagonal_copy) -// _(aten, diagonal_scatter) -// _(aten, diff) -// _(aten, digamma) -// _(aten, digamma_) -// _(aten, dist) -// _(aten, div) -// _(aten, div_) -// _(aten, divide) -// _(aten, divide_) -// _(aten, dot) -// _(aten, dropout) -// _(aten, dropout_) -// _(aten, dsplit) -// _(aten, dstack) -// _(aten, einsum) -// _(aten, elu) -// _(aten, elu_) -// _(aten, elu_backward) -// _(aten, embedding) -// _(aten, embedding_backward) -// _(aten, embedding_bag) -// _(aten, embedding_dense_backward) -// _(aten, embedding_renorm) -// _(aten, embedding_renorm_) -// _(aten, embedding_sparse_backward) -// _(aten, empty) -// _(aten, empty_like) -// _(aten, empty_quantized) -// _(aten, empty_strided) -// _(aten, eq) -// _(aten, eq_) -// _(aten, equal) -// _(aten, erf) -// _(aten, erf_) -// _(aten, erfc) -// _(aten, erfc_) -// _(aten, erfinv) -// _(aten, erfinv_) -// _(aten, exp) -// _(aten, exp2) -// _(aten, exp2_) -// _(aten, exp_) -// _(aten, expand) -// _(aten, expand_as) -// _(aten, expand_copy) -// _(aten, expm1) -// _(aten, expm1_) -// _(aten, exponential) -// _(aten, exponential_) -// _(aten, eye) -// _(aten, fake_quantize_per_channel_affine) -// _(aten, fake_quantize_per_channel_affine_cachemask) -// _(aten, fake_quantize_per_channel_affine_cachemask_backward) -// _(aten, fake_quantize_per_tensor_affine) -// _(aten, fake_quantize_per_tensor_affine_cachemask) -// _(aten, fake_quantize_per_tensor_affine_cachemask_backward) -// _(aten, fbgemm_linear_fp16_weight) -// _(aten, fbgemm_linear_fp16_weight_fp32_activation) -// _(aten, fbgemm_linear_int8_weight) -// _(aten, fbgemm_linear_int8_weight_fp32_activation) -// _(aten, fbgemm_linear_quantize_weight) -// _(aten, fbgemm_pack_gemm_matrix_fp16) -// _(aten, fbgemm_pack_quantized_matrix) -// _(aten, feature_alpha_dropout) -// _(aten, feature_alpha_dropout_) -// _(aten, feature_dropout) -// _(aten, feature_dropout_) -// _(aten, fft_fft) -// _(aten, fft_fft2) -// _(aten, fft_fftfreq) -// _(aten, fft_fftn) -// _(aten, fft_fftshift) -// _(aten, fft_hfft) -// _(aten, fft_hfft2) -// _(aten, fft_hfftn) -// _(aten, fft_ifft) -// _(aten, fft_ifft2) -// _(aten, fft_ifftn) -// _(aten, fft_ifftshift) -// _(aten, fft_ihfft) -// _(aten, fft_ihfft2) -// _(aten, fft_ihfftn) -// _(aten, fft_irfft) -// _(aten, fft_irfft2) -// _(aten, fft_irfftn) -// _(aten, fft_rfft) -// _(aten, fft_rfft2) -// _(aten, fft_rfftfreq) -// _(aten, fft_rfftn) -// _(aten, fill) -// _(aten, fill_) -// _(aten, fill_diagonal) -// _(aten, fill_diagonal_) -// _(aten, fix) -// _(aten, fix_) -// _(aten, flatten) -// _(aten, flatten_dense_tensors) -// _(aten, flip) -// _(aten, fliplr) -// _(aten, flipud) -// _(aten, float_power) -// _(aten, float_power_) -// _(aten, floor) -// _(aten, floor_) -// _(aten, floor_divide) -// _(aten, floor_divide_) -// _(aten, fmax) -// _(aten, fmin) -// _(aten, fmod) -// _(aten, fmod_) -// _(aten, frac) -// _(aten, frac_) -// _(aten, fractional_max_pool2d) -// _(aten, fractional_max_pool2d_backward) -// _(aten, fractional_max_pool3d) -// _(aten, fractional_max_pool3d_backward) -// _(aten, frexp) -// _(aten, frobenius_norm) -// _(aten, from_file) -// _(aten, full) -// _(aten, full_like) -// _(aten, fused_moving_avg_obs_fake_quant) -// _(aten, gather) -// _(aten, gather_backward) -// _(aten, gcd) -// _(aten, gcd_) -// _(aten, ge) -// _(aten, ge_) -// _(aten, gelu) -// _(aten, gelu_) -// _(aten, gelu_backward) -// _(aten, geometric) -// _(aten, geometric_) -// _(aten, geqrf) -// _(aten, ger) -// _(aten, glu) -// _(aten, glu_backward) -// _(aten, glu_backward_jvp) -// _(aten, glu_jvp) -// _(aten, gradient) -// _(aten, greater) -// _(aten, greater_) -// _(aten, greater_equal) -// _(aten, greater_equal_) -// _(aten, grid_sampler) -// _(aten, grid_sampler_2d) -// _(aten, grid_sampler_2d_backward) -// _(aten, grid_sampler_3d) -// _(aten, grid_sampler_3d_backward) -// _(aten, group_norm) -// _(aten, gru) -// _(aten, gru_cell) -// _(aten, gt) -// _(aten, gt_) -// _(aten, hamming_window) -// _(aten, hann_window) -// _(aten, hardshrink) -// _(aten, hardshrink_backward) -// _(aten, hardsigmoid) -// _(aten, hardsigmoid_) -// _(aten, hardsigmoid_backward) -// _(aten, hardswish) -// _(aten, hardswish_) -// _(aten, hardswish_backward) -// _(aten, hardtanh) -// _(aten, hardtanh_) -// _(aten, hardtanh_backward) -// _(aten, heaviside) -// _(aten, heaviside_) -// _(aten, hinge_embedding_loss) -// _(aten, histc) -// _(aten, histogram) -// _(aten, histogramdd) -// _(aten, hsplit) -// _(aten, hspmm) -// _(aten, hstack) -// _(aten, huber_loss) -// _(aten, huber_loss_backward) -// _(aten, hypot) -// _(aten, hypot_) -// _(aten, i0) -// _(aten, i0_) -// _(aten, igamma) -// _(aten, igamma_) -// _(aten, igammac) -// _(aten, igammac_) -// _(aten, im2col) -// _(aten, imag) -// _(aten, index) -// _(aten, index_add) -// _(aten, index_add_) -// _(aten, index_copy) -// _(aten, index_copy_) -// _(aten, index_fill) -// _(aten, index_fill_) -// _(aten, index_put) -// _(aten, index_put_) -// _(aten, index_reduce) -// _(aten, index_reduce_) -// _(aten, index_select) -// _(aten, index_select_backward) -// _(aten, indices) -// _(aten, indices_copy) -// _(aten, infinitely_differentiable_gelu_backward) -// _(aten, inner) -// _(aten, instance_norm) -// _(aten, int_repr) -// _(aten, inverse) -// _(aten, is_coalesced) -// _(aten, is_complex) -// _(aten, is_conj) -// _(aten, is_distributed) -// _(aten, is_floating_point) -// _(aten, is_inference) -// _(aten, is_leaf) -// _(aten, is_neg) -// _(aten, is_nonzero) -// _(aten, is_pinned) -// _(aten, is_same_size) -// _(aten, is_set_to) -// _(aten, is_signed) -// _(aten, is_vulkan_available) -// _(aten, isclose) -// _(aten, isfinite) -// _(aten, isin) -// _(aten, isinf) -// _(aten, isnan) -// _(aten, isneginf) -// _(aten, isposinf) -// _(aten, isreal) -// _(aten, istft) -// _(aten, item) -// _(aten, kaiser_window) -// _(aten, kl_div) -// _(aten, kron) -// _(aten, kthvalue) -// _(aten, l1_loss) -// _(aten, layer_norm) -// _(aten, lcm) -// _(aten, lcm_) -// _(aten, ldexp) -// _(aten, ldexp_) -// _(aten, le) -// _(aten, le_) -// _(aten, leaky_relu) -// _(aten, leaky_relu_) -// _(aten, leaky_relu_backward) -// _(aten, lerp) -// _(aten, lerp_) -// _(aten, less) -// _(aten, less_) -// _(aten, less_equal) -// _(aten, less_equal_) -// _(aten, lgamma) -// _(aten, lgamma_) -// _(aten, lift) -// _(aten, lift_fresh) -// _(aten, lift_fresh_copy) -// _(aten, linalg_cholesky) -// _(aten, linalg_cholesky_ex) -// _(aten, linalg_cond) -// _(aten, linalg_cross) -// _(aten, linalg_det) -// _(aten, linalg_diagonal) -// _(aten, linalg_eig) -// _(aten, linalg_eigh) -// _(aten, linalg_eigvals) -// _(aten, linalg_eigvalsh) -// _(aten, linalg_householder_product) -// _(aten, linalg_inv) -// _(aten, linalg_inv_ex) -// _(aten, linalg_ldl_factor) -// _(aten, linalg_ldl_factor_ex) -// _(aten, linalg_ldl_solve) -// _(aten, linalg_lstsq) -// _(aten, linalg_lu) -// _(aten, linalg_lu_factor) -// _(aten, linalg_lu_factor_ex) -// _(aten, linalg_lu_solve) -// _(aten, linalg_matmul) -// _(aten, linalg_matrix_exp) -// _(aten, linalg_matrix_norm) -// _(aten, linalg_matrix_power) -// _(aten, linalg_matrix_rank) -// _(aten, linalg_multi_dot) -// _(aten, linalg_norm) -// _(aten, linalg_pinv) -// _(aten, linalg_qr) -// _(aten, linalg_slogdet) -// _(aten, linalg_solve) -// _(aten, linalg_solve_ex) -// _(aten, linalg_solve_triangular) -// _(aten, linalg_svd) -// _(aten, linalg_svdvals) -// _(aten, linalg_tensorinv) -// _(aten, linalg_tensorsolve) -// _(aten, linalg_vander) -// _(aten, linalg_vecdot) -// _(aten, linalg_vector_norm) -// _(aten, linear) -// _(aten, linear_backward) -// _(aten, linspace) -// _(aten, log) -// _(aten, log10) -// _(aten, log10_) -// _(aten, log1p) -// _(aten, log1p_) -// _(aten, log2) -// _(aten, log2_) -// _(aten, log_) -// _(aten, log_normal) -// _(aten, log_normal_) -// _(aten, log_sigmoid) -// _(aten, log_sigmoid_backward) -// _(aten, log_sigmoid_forward) -// _(aten, log_softmax) -// _(aten, logaddexp) -// _(aten, logaddexp2) -// _(aten, logcumsumexp) -// _(aten, logdet) -// _(aten, logical_and) -// _(aten, logical_and_) -// _(aten, logical_not) -// _(aten, logical_not_) -// _(aten, logical_or) -// _(aten, logical_or_) -// _(aten, logical_xor) -// _(aten, logical_xor_) -// _(aten, logit) -// _(aten, logit_) -// _(aten, logit_backward) -// _(aten, logspace) -// _(aten, logsumexp) -// _(aten, lshift) -// _(aten, lstm) -// _(aten, lstm_cell) -// _(aten, lstm_mps_backward) -// _(aten, lt) -// _(aten, lt_) -// _(aten, lu_solve) -// _(aten, lu_unpack) -// _(aten, mH) -// _(aten, mT) -// _(aten, margin_ranking_loss) -// _(aten, masked_fill) -// _(aten, masked_fill_) -// _(aten, masked_scatter) -// _(aten, masked_scatter_) -// _(aten, masked_select) -// _(aten, masked_select_backward) -// _(aten, matmul) -// _(aten, matmul_backward) -// _(aten, matrix_H) -// _(aten, matrix_exp) -// _(aten, matrix_exp_backward) -// _(aten, matrix_power) -// _(aten, max) -// _(aten, max_pool1d) -// _(aten, max_pool1d_with_indices) -// _(aten, max_pool2d) -// _(aten, max_pool2d_backward) -// _(aten, max_pool2d_with_indices) -// _(aten, max_pool2d_with_indices_backward) -// _(aten, max_pool3d) -// _(aten, max_pool3d_with_indices) -// _(aten, max_pool3d_with_indices_backward) -// _(aten, max_unpool2d) -// _(aten, max_unpool3d) -// _(aten, maximum) -// _(aten, mean) -// _(aten, median) -// _(aten, meshgrid) -// _(aten, min) -// _(aten, minimum) -// _(aten, miopen_batch_norm) -// _(aten, miopen_batch_norm_backward) -// _(aten, miopen_convolution) -// _(aten, miopen_convolution_add_relu) -// _(aten, miopen_convolution_relu) -// _(aten, miopen_convolution_transpose) -// _(aten, miopen_depthwise_convolution) -// _(aten, miopen_rnn) -// _(aten, miopen_rnn_backward) -// _(aten, mish) -// _(aten, mish_) -// _(aten, mish_backward) -// _(aten, mkldnn_adaptive_avg_pool2d) -// _(aten, mkldnn_adaptive_avg_pool2d_backward) -// _(aten, mkldnn_convolution) -// _(aten, mkldnn_linear) -// _(aten, mkldnn_linear_backward) -// _(aten, mkldnn_linear_backward_input) -// _(aten, mkldnn_linear_backward_weights) -// _(aten, mkldnn_max_pool2d) -// _(aten, mkldnn_max_pool2d_backward) -// _(aten, mkldnn_max_pool3d) -// _(aten, mkldnn_max_pool3d_backward) -// _(aten, mkldnn_reorder_conv2d_weight) -// _(aten, mkldnn_reorder_conv3d_weight) -// _(aten, mkldnn_rnn_layer) -// _(aten, mkldnn_rnn_layer_backward) -// _(aten, mm) -// _(aten, mode) -// _(aten, moveaxis) -// _(aten, movedim) -// _(aten, mps_convolution_backward) -// _(aten, mps_convolution_transpose_backward) -// _(aten, mse_loss) -// _(aten, mse_loss_backward) -// _(aten, msort) -// _(aten, mul) -// _(aten, mul_) -// _(aten, multi_margin_loss) -// _(aten, multi_margin_loss_backward) -// _(aten, multilabel_margin_loss) -// _(aten, multilabel_margin_loss_backward) -// _(aten, multilabel_margin_loss_forward) -// _(aten, multinomial) -// _(aten, multiply) -// _(aten, multiply_) -// _(aten, mv) -// _(aten, mvlgamma) -// _(aten, mvlgamma_) -// _(aten, nan_to_num) -// _(aten, nan_to_num_) -// _(aten, nanmean) -// _(aten, nanmedian) -// _(aten, nanquantile) -// _(aten, nansum) -// _(aten, narrow) -// _(aten, narrow_copy) -// _(aten, native_batch_norm) -// _(aten, native_batch_norm_backward) -// _(aten, native_channel_shuffle) -// _(aten, native_dropout) -// _(aten, native_dropout_backward) -// _(aten, native_group_norm) -// _(aten, native_group_norm_backward) -// _(aten, native_layer_norm) -// _(aten, native_layer_norm_backward) -// _(aten, native_norm) -// _(aten, ne) -// _(aten, ne_) -// _(aten, neg) -// _(aten, neg_) -// _(aten, negative) -// _(aten, negative_) -// _(aten, nested_to_padded_tensor) -// _(aten, new_empty) -// _(aten, new_empty_strided) -// _(aten, new_full) -// _(aten, new_ones) -// _(aten, new_zeros) -// _(aten, nextafter) -// _(aten, nextafter_) -// _(aten, nll_loss) -// _(aten, nll_loss2d) -// _(aten, nll_loss2d_backward) -// _(aten, nll_loss2d_forward) -// _(aten, nll_loss_backward) -// _(aten, nll_loss_forward) -// _(aten, nll_loss_nd) -// _(aten, nonzero) -// _(aten, nonzero_numpy) -// _(aten, norm) -// _(aten, norm_except_dim) -// _(aten, normal) -// _(aten, normal_) -// _(aten, normal_functional) -// _(aten, not_equal) -// _(aten, not_equal_) -// _(aten, nuclear_norm) -// _(aten, numpy_T) -// _(aten, one_hot) -// _(aten, ones) -// _(aten, ones_like) -// _(aten, orgqr) -// _(aten, ormqr) -// _(aten, outer) -// _(aten, output_nr) -// _(aten, pad) -// _(aten, pad_sequence) -// _(aten, pairwise_distance) -// _(aten, pdist) -// _(aten, permute) -// _(aten, permute_copy) -// _(aten, pin_memory) -// _(aten, pinverse) -// _(aten, pixel_shuffle) -// _(aten, pixel_unshuffle) -// _(aten, poisson) -// _(aten, poisson_nll_loss) -// _(aten, polar) -// _(aten, polygamma) -// _(aten, polygamma_) -// _(aten, positive) -// _(aten, pow) -// _(aten, pow_) -// _(aten, prelu) -// _(aten, prod) -// _(aten, promote_types) -// _(aten, put) -// _(aten, put_) -// _(aten, q_per_channel_axis) -// _(aten, q_per_channel_scales) -// _(aten, q_per_channel_zero_points) -// _(aten, q_scale) -// _(aten, q_zero_point) -// _(aten, qr) -// _(aten, qscheme) -// _(aten, quantile) -// _(aten, quantize_per_channel) -// _(aten, quantize_per_tensor) -// _(aten, quantize_per_tensor_dynamic) -// _(aten, quantized_batch_norm) -// _(aten, quantized_gru_cell) -// _(aten, quantized_lstm_cell) -// _(aten, quantized_max_pool1d) -// _(aten, quantized_max_pool2d) -// _(aten, quantized_rnn_relu_cell) -// _(aten, quantized_rnn_tanh_cell) -// _(aten, rad2deg) -// _(aten, rad2deg_) -// _(aten, rand) -// _(aten, rand_like) -// _(aten, randint) -// _(aten, randint_like) -// _(aten, randn) -// _(aten, randn_like) -// _(aten, random) -// _(aten, random_) -// _(aten, randperm) -// _(aten, range) -// _(aten, ravel) -// _(aten, real) -// _(aten, reciprocal) -// _(aten, reciprocal_) -// _(aten, record_stream) -// _(aten, refine_names) -// _(aten, reflection_pad1d) -// _(aten, reflection_pad1d_backward) -// _(aten, reflection_pad2d) -// _(aten, reflection_pad2d_backward) -// _(aten, reflection_pad3d) -// _(aten, reflection_pad3d_backward) -// _(aten, relu) -// _(aten, relu6) -// _(aten, relu6_) -// _(aten, relu_) -// _(aten, remainder) -// _(aten, remainder_) -// _(aten, rename) -// _(aten, rename_) -// _(aten, renorm) -// _(aten, renorm_) -// _(aten, repeat) -// _(aten, repeat_interleave) -// _(aten, replication_pad1d) -// _(aten, replication_pad1d_backward) -// _(aten, replication_pad2d) -// _(aten, replication_pad2d_backward) -// _(aten, replication_pad3d) -// _(aten, replication_pad3d_backward) -// _(aten, requires_grad) -// _(aten, requires_grad_) -// _(aten, reshape) -// _(aten, reshape_as) -// _(aten, resize) -// _(aten, resize_) -// _(aten, resize_as) -// _(aten, resize_as_) -// _(aten, resize_as_sparse) -// _(aten, resize_as_sparse_) -// _(aten, resolve_conj) -// _(aten, resolve_neg) -// _(aten, result_type) -// _(aten, retain_grad) -// _(aten, retains_grad) -// _(aten, rnn_relu) -// _(aten, rnn_relu_cell) -// _(aten, rnn_tanh) -// _(aten, rnn_tanh_cell) -// _(aten, roll) -// _(aten, rot90) -// _(aten, round) -// _(aten, round_) -// _(aten, row_indices) -// _(aten, row_indices_copy) -// _(aten, row_stack) -// _(aten, rrelu) -// _(aten, rrelu_) -// _(aten, rrelu_with_noise) -// _(aten, rrelu_with_noise_) -// _(aten, rrelu_with_noise_backward) -// _(aten, rshift) -// _(aten, rsqrt) -// _(aten, rsqrt_) -// _(aten, rsub) -// _(aten, scalar_tensor) -// _(aten, scaled_dot_product_attention) -// _(aten, scatter) -// _(aten, scatter_) -// _(aten, scatter_add) -// _(aten, scatter_add_) -// _(aten, scatter_reduce) -// _(aten, scatter_reduce_) -// _(aten, searchsorted) -// _(aten, segment_reduce) -// _(aten, select) -// _(aten, select_backward) -// _(aten, select_copy) -// _(aten, select_scatter) -// _(aten, selu) -// _(aten, selu_) -// _(aten, set) -// _(aten, set_) -// _(aten, set_data) -// _(aten, sgn) -// _(aten, sgn_) -// _(aten, sigmoid) -// _(aten, sigmoid_) -// _(aten, sigmoid_backward) -// _(aten, sign) -// _(aten, sign_) -// _(aten, signbit) -// _(aten, silu) -// _(aten, silu_) -// _(aten, silu_backward) -// _(aten, sin) -// _(aten, sin_) -// _(aten, sinc) -// _(aten, sinc_) -// _(aten, sinh) -// _(aten, sinh_) -// _(aten, size) -// _(aten, slice) -// _(aten, slice_backward) -// _(aten, slice_copy) -// _(aten, slice_scatter) -// _(aten, slogdet) -// _(aten, slow_conv3d) -// _(aten, slow_conv3d_forward) -// _(aten, slow_conv_dilated2d) -// _(aten, slow_conv_dilated3d) -// _(aten, slow_conv_transpose2d) -// _(aten, slow_conv_transpose3d) -// _(aten, smm) -// _(aten, smooth_l1_loss) -// _(aten, smooth_l1_loss_backward) -// _(aten, soft_margin_loss) -// _(aten, soft_margin_loss_backward) -// _(aten, softmax) -// _(aten, softplus) -// _(aten, softplus_backward) -// _(aten, softshrink) -// _(aten, softshrink_backward) -// _(aten, sort) -// _(aten, sparse_bsc_tensor) -// _(aten, sparse_bsr_tensor) -// _(aten, sparse_compressed_tensor) -// _(aten, sparse_coo_tensor) -// _(aten, sparse_csc_tensor) -// _(aten, sparse_csr_tensor) -// _(aten, sparse_dim) -// _(aten, sparse_mask) -// _(aten, sparse_resize) -// _(aten, sparse_resize_) -// _(aten, sparse_resize_and_clear) -// _(aten, sparse_resize_and_clear_) -// _(aten, sparse_sampled_addmm) -// _(aten, special_airy_ai) -// _(aten, special_bessel_j0) -// _(aten, special_bessel_j1) -// _(aten, special_bessel_y0) -// _(aten, special_bessel_y1) -// _(aten, special_chebyshev_polynomial_t) -// _(aten, special_chebyshev_polynomial_u) -// _(aten, special_chebyshev_polynomial_v) -// _(aten, special_chebyshev_polynomial_w) -// _(aten, special_digamma) -// _(aten, special_entr) -// _(aten, special_erf) -// _(aten, special_erfc) -// _(aten, special_erfcx) -// _(aten, special_erfinv) -// _(aten, special_exp2) -// _(aten, special_expit) -// _(aten, special_expm1) -// _(aten, special_gammainc) -// _(aten, special_gammaincc) -// _(aten, special_gammaln) -// _(aten, special_hermite_polynomial_h) -// _(aten, special_hermite_polynomial_he) -// _(aten, special_i0) -// _(aten, special_i0e) -// _(aten, special_i1) -// _(aten, special_i1e) -// _(aten, special_laguerre_polynomial_l) -// _(aten, special_legendre_polynomial_p) -// _(aten, special_log1p) -// _(aten, special_log_ndtr) -// _(aten, special_log_softmax) -// _(aten, special_logit) -// _(aten, special_logsumexp) -// _(aten, special_modified_bessel_i0) -// _(aten, special_modified_bessel_i1) -// _(aten, special_modified_bessel_k0) -// _(aten, special_modified_bessel_k1) -// _(aten, special_multigammaln) -// _(aten, special_ndtr) -// _(aten, special_ndtri) -// _(aten, special_polygamma) -// _(aten, special_psi) -// _(aten, special_round) -// _(aten, special_scaled_modified_bessel_k0) -// _(aten, special_scaled_modified_bessel_k1) -// _(aten, special_shifted_chebyshev_polynomial_t) -// _(aten, special_shifted_chebyshev_polynomial_u) -// _(aten, special_shifted_chebyshev_polynomial_v) -// _(aten, special_shifted_chebyshev_polynomial_w) -// _(aten, special_sinc) -// _(aten, special_softmax) -// _(aten, special_spherical_bessel_j0) -// _(aten, special_xlog1py) -// _(aten, special_xlogy) -// _(aten, special_zeta) -// _(aten, split) -// _(aten, split_copy) -// _(aten, split_with_sizes) -// _(aten, split_with_sizes_copy) -// _(aten, sqrt) -// _(aten, sqrt_) -// _(aten, square) -// _(aten, square_) -// _(aten, squeeze) -// _(aten, squeeze_) -// _(aten, squeeze_copy) -// _(aten, sspaddmm) -// _(aten, stack) -// _(aten, std) -// _(aten, std_mean) -// _(aten, stft) -// _(aten, stride) -// _(aten, sub) -// _(aten, sub_) -// _(aten, subtract) -// _(aten, subtract_) -// _(aten, sum) -// _(aten, sum_to_size) -// _(aten, svd) -// _(aten, swapaxes) -// _(aten, swapaxes_) -// _(aten, swapdims) -// _(aten, swapdims_) -// _(aten, t) -// _(aten, t_) -// _(aten, t_copy) -// _(aten, take) -// _(aten, take_along_dim) -// _(aten, tan) -// _(aten, tan_) -// _(aten, tanh) -// _(aten, tanh_) -// _(aten, tanh_backward) -// _(aten, tensor_split) -// _(aten, tensordot) -// _(aten, thnn_conv2d) -// _(aten, threshold) -// _(aten, threshold_) -// _(aten, threshold_backward) -// _(aten, tile) -// _(aten, to) -// _(aten, to_dense) -// _(aten, to_dense_backward) -// _(aten, to_mkldnn) -// _(aten, to_mkldnn_backward) -// _(aten, to_padded_tensor) -// _(aten, to_sparse) -// _(aten, to_sparse_bsc) -// _(aten, to_sparse_bsr) -// _(aten, to_sparse_csc) -// _(aten, to_sparse_csr) -// _(aten, topk) -// _(aten, trace) -// _(aten, trace_backward) -// _(aten, transpose) -// _(aten, transpose_) -// _(aten, transpose_copy) -// _(aten, trapezoid) -// _(aten, trapz) -// _(aten, triangular_solve) -// _(aten, tril) -// _(aten, tril_) -// _(aten, tril_indices) -// _(aten, triplet_margin_loss) -// _(aten, triu) -// _(aten, triu_) -// _(aten, triu_indices) -// _(aten, true_divide) -// _(aten, true_divide_) -// _(aten, trunc) -// _(aten, trunc_) -// _(aten, type_as) -// _(aten, unbind) -// _(aten, unbind_copy) -// _(aten, unflatten) -// _(aten, unflatten_dense_tensors) -// _(aten, unfold) -// _(aten, unfold_backward) -// _(aten, unfold_copy) -// _(aten, uniform) -// _(aten, uniform_) -// _(aten, unique_consecutive) -// _(aten, unique_dim) -// _(aten, unique_dim_consecutive) -// _(aten, unsafe_chunk) -// _(aten, unsafe_split) -// _(aten, unsafe_split_with_sizes) -// _(aten, unsqueeze) -// _(aten, unsqueeze_) -// _(aten, unsqueeze_copy) -// _(aten, upsample_bicubic2d) -// _(aten, upsample_bicubic2d_backward) -// _(aten, upsample_bilinear2d) -// _(aten, upsample_bilinear2d_backward) -// _(aten, upsample_linear1d) -// _(aten, upsample_linear1d_backward) -// _(aten, upsample_nearest1d) -// _(aten, upsample_nearest1d_backward) -// _(aten, upsample_nearest2d) -// _(aten, upsample_nearest2d_backward) -// _(aten, upsample_nearest3d) -// _(aten, upsample_nearest3d_backward) -// _(aten, upsample_trilinear3d) -// _(aten, upsample_trilinear3d_backward) -// _(aten, value_selecting_reduction_backward) -// _(aten, values) -// _(aten, values_copy) -// _(aten, vander) -// _(aten, var) -// _(aten, var_mean) -// _(aten, vdot) -// _(aten, view) -// _(aten, view_as) -// _(aten, view_as_complex) -// _(aten, view_as_complex_copy) -// _(aten, view_as_real) -// _(aten, view_as_real_copy) -// _(aten, view_copy) -// _(aten, vsplit) -// _(aten, vstack) -// _(aten, where) -// _(aten, xlogy) -// _(aten, xlogy_) -// _(aten, zero) -// _(aten, zero_) -// _(aten, zeros) -// _(aten, zeros_like) - -// #define FORALL_ATTR_BASE_SYMBOLS(_) -// _(attr, A) -// _(attr, B) -// _(attr, C) -// _(attr, H) -// _(attr, HxW) -// _(attr, K) -// _(attr, L) -// _(attr, LD) -// _(attr, LU) -// _(attr, LU_data) -// _(attr, LU_pivots) -// _(attr, M) -// _(attr, N) -// _(attr, P) -// _(attr, Q) -// _(attr, R) -// _(attr, S) -// _(attr, U) -// _(attr, UPLO) -// _(attr, V) -// _(attr, Vh) -// _(attr, W) -// _(attr, X) -// _(attr, a) -// _(attr, abs) -// _(attr, accumulate) -// _(attr, addends) -// _(attr, adjoint) -// _(attr, align_corners) -// _(attr, allow_tf32) -// _(attr, alpha) -// _(attr, amsgrad) -// _(attr, anchor) -// _(attr, angle) -// _(attr, api_name) -// _(attr, append) -// _(attr, approximate) -// _(attr, arg1) -// _(attr, arg2) -// _(attr, arg3) -// _(attr, arg_out) -// _(attr, assume_unique) -// _(attr, atol) -// _(attr, attn_mask) -// _(attr, average_attn_weights) -// _(attr, averaging_const) -// _(attr, aweights) -// _(attr, axis) -// _(attr, axis0) -// _(attr, axis1) -// _(attr, b) -// _(attr, b_hh) -// _(attr, b_ih) -// _(attr, bag_size) -// _(attr, base) -// _(attr, batch1) -// _(attr, batch2) -// _(attr, batch_dim) -// _(attr, batch_first) -// _(attr, batch_size) -// _(attr, batch_sizes) -// _(attr, benchmark) -// _(attr, beta) -// _(attr, beta1) -// _(attr, beta2) -// _(attr, bias) -// _(attr, bias_defined) -// _(attr, bias_g) -// _(attr, bias_sizes) -// _(attr, bidirectional) -// _(attr, bin_edges) -// _(attr, bins) -// _(attr, bit_width) -// _(attr, blank) -// _(attr, blocksize) -// _(attr, boundaries) -// _(attr, buffer) -// _(attr, causal) -// _(attr, ccol_indices) -// _(attr, cdim) -// _(attr, cdist) -// _(attr, ceil_mode) -// _(attr, cell_state_fwd) -// _(attr, center) -// _(attr, ch_axis) -// _(attr, check_errors) -// _(attr, chunk_grad_outputs) -// _(attr, chunks) -// _(attr, coalesced) -// _(attr, coefficients) -// _(attr, col) -// _(attr, col_indices) -// _(attr, col_offsets) -// _(attr, col_offsets_hh) -// _(attr, col_offsets_ih) -// _(attr, compressed_idx) -// _(attr, compressed_indices) -// _(attr, compressed_indices_dtype) -// _(attr, compute_log_sumexp) -// _(attr, compute_mode) -// _(attr, compute_uv) -// _(attr, compute_v) -// _(attr, condition) -// _(attr, copy) -// _(attr, correction) -// _(attr, count) -// _(attr, count_include_pad) -// _(attr, counts) -// _(attr, cpu_dtype) -// _(attr, cpu_enabled) -// _(attr, cpu_nested_shape_example) -// _(attr, create_graph) -// _(attr, crow_indices) -// _(attr, cu_seqlens_k) -// _(attr, cu_seqlens_q) -// _(attr, cuda_dtype) -// _(attr, cuda_enabled) -// _(attr, cudnn_enable) -// _(attr, cudnn_enabled) -// _(attr, cum_seq_k) -// _(attr, cum_seq_q) -// _(attr, cx) -// _(attr, cx_) -// _(attr, cx_tmp) -// _(attr, cy) -// _(attr, cy_) -// _(attr, d) -// _(attr, data) -// _(attr, decimals) -// _(attr, delta) -// _(attr, dense) -// _(attr, dense_dim) -// _(attr, density) -// _(attr, descending) -// _(attr, destination) -// _(attr, deterministic) -// _(attr, device) -// _(attr, device_index) -// _(attr, dgrad_glu) -// _(attr, diagonal) -// _(attr, diagonals) -// _(attr, dilation) -// _(attr, dim) -// _(attr, dim0) -// _(attr, dim1) -// _(attr, dim2) -// _(attr, dimension) -// _(attr, dims) -// _(attr, dims_other) -// _(attr, dims_self) -// _(attr, divisor_override) -// _(attr, downscale_factor) -// _(attr, driver) -// _(attr, dropout) -// _(attr, dropout_mask) -// _(attr, dropout_p) -// _(attr, dropout_seed) -// _(attr, dropout_state) -// _(attr, dst) -// _(attr, dtype) -// _(attr, dual) -// _(attr, dummy) -// _(attr, dx) -// _(attr, edge_order) -// _(attr, eigenvalues) -// _(attr, eigenvectors) -// _(attr, eigvals) -// _(attr, eigvecs) -// _(attr, element) -// _(attr, elements) -// _(attr, ellipsis_idx) -// _(attr, embed_dim) -// _(attr, end) -// _(attr, end_dim) -// _(attr, eps) -// _(attr, epsilon) -// _(attr, equal_nan) -// _(attr, equation) -// _(attr, exp_avg_sqs) -// _(attr, exp_avgs) -// _(attr, expand1) -// _(attr, expand2) -// _(attr, expand3) -// _(attr, exponent) -// _(attr, exponential_average_factor) -// _(attr, fake_quant_enabled) -// _(attr, fake_quant_on) -// _(attr, ffn_bias_1) -// _(attr, ffn_bias_2) -// _(attr, ffn_weight_1) -// _(attr, ffn_weight_2) -// _(attr, filename) -// _(attr, fill_value) -// _(attr, flat) -// _(attr, forward) -// _(attr, found_inf) -// _(attr, from) -// _(attr, full) -// _(attr, full_matrices) -// _(attr, fuse_transform_0213) -// _(attr, fweights) -// _(attr, g) -// _(attr, gO) -// _(attr, generator) -// _(attr, ggI) -// _(attr, ggW) -// _(attr, ggb) -// _(attr, glu) -// _(attr, grad) -// _(attr, grad_bias) -// _(attr, grad_cy) -// _(attr, grad_factor) -// _(attr, grad_glu) -// _(attr, grad_hy) -// _(attr, grad_in) -// _(attr, grad_input) -// _(attr, grad_out) -// _(attr, grad_out_) -// _(attr, grad_output) -// _(attr, grad_scale) -// _(attr, grad_w) -// _(attr, grad_weight) -// _(attr, grad_x) -// _(attr, grad_y) -// _(attr, gradient) -// _(attr, grads) -// _(attr, grid) -// _(attr, group) -// _(attr, groups) -// _(attr, growth_interval) -// _(attr, growth_tracker) -// _(attr, half_to_float) -// _(attr, has_bias) -// _(attr, has_biases) -// _(attr, hermitian) -// _(attr, hidden_bias) -// _(attr, hidden_gates) -// _(attr, hidden_size) -// _(attr, high) -// _(attr, hist) -// _(attr, hop_length) -// _(attr, hx) -// _(attr, hx_) -// _(attr, hy_) -// _(attr, i1) -// _(attr, i2) -// _(attr, i3) -// _(attr, ignore_index) -// _(attr, imag) -// _(attr, impl_index) -// _(attr, implicit) -// _(attr, include_last_offset) -// _(attr, include_self) -// _(attr, incr_key) -// _(attr, incr_value) -// _(attr, increasing) -// _(attr, ind) -// _(attr, index) -// _(attr, indexing) -// _(attr, indices) -// _(attr, info) -// _(attr, initial) -// _(attr, input) -// _(attr, input1) -// _(attr, input2) -// _(attr, input3) -// _(attr, input_bias) -// _(attr, input_dtype) -// _(attr, input_g) -// _(attr, input_gates) -// _(attr, input_lengths) -// _(attr, input_scale) -// _(attr, input_size) -// _(attr, input_sizes) -// _(attr, inputs) -// _(attr, interpolation) -// _(attr, interpolation_mode) -// _(attr, inv_scale) -// _(attr, inverse) -// _(attr, invert) -// _(attr, invstd) -// _(attr, is_causal) -// _(attr, is_crow) -// _(attr, is_matrix) -// _(attr, is_result) -// _(attr, is_target) -// _(attr, k) -// _(attr, keepdim) -// _(attr, kernel_size) -// _(attr, key) -// _(attr, label_smoothing) -// _(attr, lambd) -// _(attr, largest) -// _(attr, last_dim_size) -// _(attr, layersOutputs) -// _(attr, layout) -// _(attr, left) -// _(attr, length) -// _(attr, lengths) -// _(attr, level) -// _(attr, like) -// _(attr, list) -// _(attr, log_alpha) -// _(attr, log_input) -// _(attr, log_probs) -// _(attr, log_target) -// _(attr, logabsdet) -// _(attr, logsumexp) -// _(attr, low) -// _(attr, lower) -// _(attr, lr) -// _(attr, ltm) -// _(attr, m) -// _(attr, mantissa) -// _(attr, margin) -// _(attr, mask) -// _(attr, mask_check) -// _(attr, mask_type) -// _(attr, mat) -// _(attr, mat1) -// _(attr, mat2) -// _(attr, matrices) -// _(attr, max) -// _(attr, max_exp_avg_sqs) -// _(attr, max_k) -// _(attr, max_norm) -// _(attr, max_q) -// _(attr, max_seqlen_q) -// _(attr, max_size) -// _(attr, max_val) -// _(attr, max_values) -// _(attr, maximize) -// _(attr, maximum_indices) -// _(attr, maxnorm) -// _(attr, mean) -// _(attr, mean_dy) -// _(attr, mean_dy_xmu) -// _(attr, median) -// _(attr, memory_format) -// _(attr, min) -// _(attr, min_indices) -// _(attr, min_val) -// _(attr, minlength) -// _(attr, mode) -// _(attr, momentum) -// _(attr, n) -// _(attr, n_bins) -// _(attr, n_fft) -// _(attr, names) -// _(attr, nan) -// _(attr, need_attn_weights) -// _(attr, need_weights) -// _(attr, neg_log_likelihood) -// _(attr, negative) -// _(attr, negative_slope) -// _(attr, neginf) -// _(attr, nested_size) -// _(attr, nested_strides) -// _(attr, new_data) -// _(attr, nnz) -// _(attr, noise) -// _(attr, non_blocking) -// _(attr, norm) -// _(attr, norm_bias_1) -// _(attr, norm_bias_2) -// _(attr, norm_first) -// _(attr, norm_type) -// _(attr, norm_weight_1) -// _(attr, norm_weight_2) -// _(attr, normalization) -// _(attr, normalized) -// _(attr, normalized_shape) -// _(attr, nt_example) -// _(attr, num_classes) -// _(attr, num_generated) -// _(attr, num_groups) -// _(attr, num_head) -// _(attr, num_heads) -// _(attr, num_layers) -// _(attr, num_samples) -// _(attr, num_weights) -// _(attr, numel) -// _(attr, observer_on) -// _(attr, offset) -// _(attr, offset2bag) -// _(attr, offsets) -// _(attr, onesided) -// _(attr, ord) -// _(attr, order) -// _(attr, other) -// _(attr, out) -// _(attr, out0) -// _(attr, out1) -// _(attr, out2) -// _(attr, out3) -// _(attr, out4) -// _(attr, out5) -// _(attr, out6) -// _(attr, out_dim) -// _(attr, out_int32) -// _(attr, outdim) -// _(attr, output) -// _(attr, output_mask) -// _(attr, output_padding) -// _(attr, output_scale) -// _(attr, output_size) -// _(attr, output_zero_point) -// _(attr, p) -// _(attr, packed) -// _(attr, packed_hh) -// _(attr, packed_ih) -// _(attr, packed_weight) -// _(attr, pad) -// _(attr, pad_mode) -// _(attr, padded) -// _(attr, padding) -// _(attr, padding_idx) -// _(attr, padding_mode) -// _(attr, padding_value) -// _(attr, params) -// _(attr, path) -// _(attr, pdist) -// _(attr, per_row_fake_quant) -// _(attr, per_sample_weights) -// _(attr, periodic) -// _(attr, philox_offset) -// _(attr, philox_seed) -// _(attr, pin_memory) -// _(attr, pivot) -// _(attr, pivots) -// _(attr, plain_idx) -// _(attr, plain_indices) -// _(attr, pos_weight) -// _(attr, posinf) -// _(attr, positive) -// _(attr, pow) -// _(attr, prepend) -// _(attr, primal) -// _(attr, prob) -// _(attr, proj_bias) -// _(attr, proj_size) -// _(attr, proj_weight) -// _(attr, q) -// _(attr, qkv) -// _(attr, qkv_bias) -// _(attr, qkv_weight) -// _(attr, qtensor) -// _(attr, quant_max) -// _(attr, quant_min) -// _(attr, quasi) -// _(attr, query) -// _(attr, r) -// _(attr, random_samples) -// _(attr, range) -// _(attr, rank) -// _(attr, ratio) -// _(attr, rcond) -// _(attr, real) -// _(attr, reduce) -// _(attr, reduce_range) -// _(attr, reduction) -// _(attr, repeats) -// _(attr, replacement) -// _(attr, requires_grad) -// _(attr, reserve) -// _(attr, reserveSpace) -// _(attr, reservedSpace) -// _(attr, residuals) -// _(attr, result) -// _(attr, retain_graph) -// _(attr, return_complex) -// _(attr, return_counts) -// _(attr, return_debug_mask) -// _(attr, return_inverse) -// _(attr, reverse) -// _(attr, right) -// _(attr, rounding_mode) -// _(attr, row) -// _(attr, row_indices) -// _(attr, rstd) -// _(attr, rtol) -// _(attr, running_max) -// _(attr, running_mean) -// _(attr, running_min) -// _(attr, running_var) -// _(attr, s) -// _(attr, save_invstd) -// _(attr, save_mean) -// _(attr, save_var) -// _(attr, save_var_transform) -// _(attr, saved_g) -// _(attr, saved_norms) -// _(attr, saved_v) -// _(attr, scalar) -// _(attr, scalar1) -// _(attr, scalar2) -// _(attr, scalars) -// _(attr, scale) -// _(attr, scale_backoff_factor) -// _(attr, scale_factors) -// _(attr, scale_grad_by_freq) -// _(attr, scale_growth_factor) -// _(attr, scale_hh) -// _(attr, scale_ih) -// _(attr, scales) -// _(attr, scales_d) -// _(attr, scales_h) -// _(attr, scales_w) -// _(attr, sections) -// _(attr, self) -// _(attr, self_is_result) -// _(attr, self_num_batch_dims) -// _(attr, self_or_result) -// _(attr, self_sizes) -// _(attr, sequences) -// _(attr, shape) -// _(attr, shared) -// _(attr, shifts) -// _(attr, side) -// _(attr, sigma) -// _(attr, sign) -// _(attr, singular_values) -// _(attr, size) -// _(attr, sizes) -// _(attr, sobolstate) -// _(attr, solution) -// _(attr, some) -// _(attr, sorted) -// _(attr, sorted_sequence) -// _(attr, sorter) -// _(attr, source) -// _(attr, spacing) -// _(attr, sparse) -// _(attr, sparse_dim) -// _(attr, sparse_grad) -// _(attr, split_size) -// _(attr, split_sizes) -// _(attr, src) -// _(attr, stable) -// _(attr, start) -// _(attr, start_dim) -// _(attr, state_steps) -// _(attr, std) -// _(attr, step) -// _(attr, steps) -// _(attr, storage_offset) -// _(attr, stride) -// _(attr, sumdim) -// _(attr, swap) -// _(attr, symmetric_quant) -// _(attr, t) -// _(attr, tangent) -// _(attr, target) -// _(attr, target_lengths) -// _(attr, targets) -// _(attr, tau) -// _(attr, tensor) -// _(attr, tensor1) -// _(attr, tensor2) -// _(attr, tensor_indices_or_sections) -// _(attr, tensors) -// _(attr, tensors1) -// _(attr, test_element) -// _(attr, test_elements) -// _(attr, the_template) -// _(attr, theta) -// _(attr, threshold) -// _(attr, to) -// _(attr, tol) -// _(attr, total) -// _(attr, total_length) -// _(attr, total_weight) -// _(attr, train) -// _(attr, training) -// _(attr, transpose) -// _(attr, transposed) -// _(attr, type1) -// _(attr, type2) -// _(attr, unbiased) -// _(attr, unitriangular) -// _(attr, unpack_data) -// _(attr, unpack_pivots) -// _(attr, unroll_dim) -// _(attr, unsafe) -// _(attr, upper) -// _(attr, upscale_factor) -// _(attr, use_gelu) -// _(attr, use_input_stats) -// _(attr, v) -// _(attr, value) -// _(attr, values) -// _(attr, var) -// _(attr, vec) -// _(attr, vec1) -// _(attr, vec2) -// _(attr, w_hh) -// _(attr, w_ih) -// _(attr, weight) -// _(attr, weight0) -// _(attr, weight1) -// _(attr, weight2) -// _(attr, weight3) -// _(attr, weight4) -// _(attr, weight_arr) -// _(attr, weight_buf) -// _(attr, weight_decay) -// _(attr, weight_g) -// _(attr, weight_scale) -// _(attr, weight_stride0) -// _(attr, weight_zero_point) -// _(attr, weights) -// _(attr, win_length) -// _(attr, window) -// _(attr, window_length) -// _(attr, with_replacement) -// _(attr, workspace) -// _(attr, wrap) -// _(attr, x) -// _(attr, x1) -// _(attr, x2) -// _(attr, y) -// _(attr, z) -// _(attr, z_state) -// _(attr, zero_infinity) -// _(attr, zero_point) -// _(attr, zero_point_hh) -// _(attr, zero_point_ih) -// _(attr, zero_points) - - -// Parsed from ATen/core/interned_strings.h -// #pragma once -// #include -// #include -// #include -// #include -// #include -// #include +// Does not delete anything +@Namespace("c10::detail") public static native void deleteNothing(Pointer arg0); +// Targeting ../UniqueVoidPtr.java -// #include -// #include -// #define FORALL_NS_SYMBOLS(_) -// _(namespaces, prim) -// _(namespaces, prims) -// _(namespaces, nvprims) -// _(namespaces, aten) -// _(namespaces, cuda) -// _(namespaces, onnx) -// _(namespaces, attr) -// _(namespaces, scope) -// _(namespaces, user) -// _(namespaces, _caffe2) -// _(namespaces, dimname) -// _(namespaces, namespaces) -// _(prim, Assign) -// _(prim, BroadcastingChunk) -// _(prim, BroadcastSizes) -// _(prim, ReductionSizes) -// _(prim, Constant) -// _(prim, ChunkSizes) -// _(prim, ConstantMKLDNNTensor) -// _(prim, BroadcastMKLDNNTensors) -// _(prim, MKLDNNGroup) -// _(prim, MKLDNNHardSwish) -// _(prim, MKLDNNHardSigmoid) -// _(prim, MKLDNNHardTanh) -// _(prim, MKLDNNClamp) -// _(prim, StaticRuntimeCopyOuts) -// _(prim, Drop) -// _(prim, Eval) -// _(prim, Expand) /* onnx */ -// _(prim, FusionGroup) -// _(prim, CudaFusionGroup) -// _(prim, CudaFusionGuard) -// _(prim, oneDNNFusionGroup) -// _(prim, oneDNNFusionGuard) -// _(prim, FunctionalGraph) -// _(prim, add_optional) -// _(prim, view_copy) -// _(prim, permute_copy) -// _(prim, reshape_copy) -// _(prim, squeeze_copy) -// _(prim, t_copy) -// _(prim, transpose_copy) -// _(prim, unsqueeze_copy) -// _(prim, flatten_copy) -// _(prim, expand_copy) -// _(prim, expand_as_copy) -// _(prim, DifferentiableGraph) -// _(prim, TensorExprGroup) -// _(prim, TensorExprDynamicGroup) -// _(prim, StaticSubgraph) -// _(prim, If) -// _(prim, Jump) /* debug */ -// _(prim, JumpNZ) /* debug */ -// _(prim, JumpZ) /* debug */ -// _(prim, Load) -// _(prim, Loop) -// _(prim, Param) -// _(prim, PackPadded) /* onnx */ -// _(prim, PadPacked) /* onnx */ -// _(prim, Placeholder) /* debug */ -// _(prim, Print) -// _(prim, EmptyListLiteral) -// _(prim, LegacyTypedConstructor) -// _(prim, PythonOp) -// _(prim, IgnoredPythonOp) -// _(prim, Reverse) -// _(prim, Return) -// _(prim, ReturnStmt) -// _(prim, BreakStmt) -// _(prim, ContinueStmt) -// _(prim, ComprehensionScope) -// _(prim, Store) -// _(prim, AutogradZero) -// _(prim, AutogradAnyNonZero) -// _(prim, AutogradAllNonZero) -// _(prim, AutogradAllZero) -// _(prim, Starred) -// _(prim, TupleConstruct) -// _(prim, TupleUnpack) -// _(prim, TupleIndex) -// _(prim, TupleSlice) -// _(prim, ListConstruct) -// _(prim, ListUnpack) -// _(prim, DictConstruct) -// _(prim, ModuleContainerIndex) -// _(prim, EnumName) -// _(prim, EnumValue) -// _(prim, StringIndex) -// _(prim, NumToTensor) -// _(prim, Uninitialized) -// _(prim, VarConcat) -// _(prim, VarStack) -// _(prim, With) -// _(prim, Enter) -// _(prim, Exit) -// _(prim, IfThenElse) -// _(aten, Bool) -// _(aten, Int) -// _(aten, FloatImplicit) -// _(aten, ComplexImplicit) -// _(aten, IntImplicit) -// _(aten, ScalarImplicit) -// _(aten, Float) -// _(aten, Complex) -// _(aten, str) -// _(aten, Delete) -// _(prim, device) -// _(prim, dtype) -// _(prim, layout) -// _(prim, id) -// _(prim, requires_grad) -// _(prim, MakeTestTensor) /* test */ -// _(prim, AutogradAdd) -// _(prim, GradOf) -// _(aten, grad) -// _(aten, backward) -// _(prim, Guard) -// _(prim, BailOut) -// _(prim, TypeCheck) -// _(prim, RequiresGradCheck) -// _(prim, FallbackGraph) -// _(prim, FusedConcat) -// _(prim, ConstantChunk) -// _(prim, MMTreeReduce) -// _(prim, MMBatchSide) -// _(prim, list) -// _(prim, dict) -// _(prim, min) -// _(prim, max) -// _(prim, abs) -// _(aten, divmod) -// _(prim, zip) -// _(prim, enumerate) -// _(prim, range) -// _(prim, rangelist) -// _(prim, isinstance) -// _(prim, tolist) -// _(prim, unchecked_cast) -// _(aten, _grad_sum_to_size) -// _(aten, _size_if_not_equal) -// _(aten, _ncf_unsqueeze) -// _(aten, warn) -// _(aten, sorted) -// _(aten, floordiv) -// _(aten, __range_length) -// _(aten, __derive_index) -// _(aten, __round_to_zero_floordiv) -// _(aten, is_scripting) -// _(aten, _unwrap_optional) -// _(prim, fork) -// _(prim, awaitable) -// _(prim, forkClosure) -// _(prim, awaitableClosure) -// _(prim, awaitable_nowait) -// _(prim, awaitable_wait) -// _(prim, RaiseException) -// _(prim, Closure) -// _(prim, CreateObject) -// _(prim, SetAttr) -// _(prim, GetAttr) -// _(prim, HasAttr) -// _(prim, profile) -// _(prim, profile_ivalue) -// _(prim, AddStatValue) -// _(prim, TimePoint) -// _(prim, CallFunction) -// _(prim, CallMethod) -// _(prim, LoopContinuation) -// _(prim, annotate) -// _(prim, TracedModuleForward) -// _(prim, TracedFork) -// _(prim, TracedAttr) -// _(prim, rpc_async) -// _(prim, rpc_sync) -// _(prim, rpc_remote) -// _(prim, is_cuda) -// _(aten, append) -// _(aten, as_tensor) -// _(aten, adaptive_avg_pool2d_backward) -// _(aten, dim) -// _(aten, format) -// _(aten, percentFormat) -// _(aten, __not__) -// _(aten, __is__) -// _(aten, __isnot__) -// _(aten, _ger) -// _(aten, __getitem__) -// _(aten, _set_item) -// _(aten, manual_seed) -// _(aten, device) -// _(aten, hash) -// _(aten, len) -// _(aten, list) -// _(aten, dict) -// _(aten, wait) -// _(aten, save) -// _(aten, keys) -// _(aten, ord) -// _(aten, chr) -// _(aten, hex) -// _(aten, oct) -// _(aten, clear) -// _(aten, setdefault) -// _(aten, bin) -// _(aten, pop) -// _(aten, insert) -// _(aten, tensor) -// _(prim, unchecked_unwrap_optional) -// _(aten, __contains__) -// _(prim, BailoutTemplate) -// _(prim, grad) -// _(cuda, _set_device) -// _(cuda, set_stream) -// _(cuda, _current_device) -// _(cuda, synchronize) -// _(aten, has_torch_function) -// _(aten, is_autocast_enabled) -// _(aten, is_autocast_cpu_enabled) -// FORALL_ATEN_BASE_SYMBOLS(_) -// _(onnx, Add) -// _(onnx, Concat) -// _(onnx, Constant) -// _(onnx, ConstantFill) -// _(onnx, Div) -// _(onnx, GRU) -// _(onnx, Gather) -// _(onnx, Gemm) -// _(onnx, LSTM) -// _(onnx, MatMul) -// _(onnx, Min) -// _(onnx, Max) -// _(onnx, Mul) -// _(onnx, Pow) -// _(onnx, RNN) -// _(onnx, Shape) -// _(onnx, Size) -// _(onnx, Slice) -// _(onnx, Softmax) -// _(onnx, Squeeze) -// _(onnx, Sub) -// _(onnx, Transpose) -// _(onnx, Unsqueeze) -// _(onnx, Loop) -// _(onnx, If) -// _(onnx, Reshape) -// _(onnx, Expand) -// _(onnx, Equal) -// _(onnx, Greater) -// _(onnx, GreaterOrEqual) -// _(onnx, Less) -// _(onnx, LessOrEqual) -// _(onnx, Not) -// _(aten, ATen) -// _(onnx, Split) -// _(onnx, ConstantOfShape) -// _(onnx, Cast) -// _(onnx, Mod) -// _(onnx, Sqrt) -// _(onnx, SplitToSequence) -// _(onnx, SequenceAt) -// _(onnx, SequenceConstruct) -// _(onnx, SequenceEmpty) -// _(onnx, SequenceInsert) -// _(onnx, SequenceErase) -// _(onnx, ConcatFromSequence) -// _(onnx, Identity) -// _(onnx, SoftmaxCrossEntropyLoss) -// _(onnx, NegativeLogLikelihoodLoss) -// _(onnx, LogSoftmax) -// _(onnx, ReduceL1) -// _(onnx, ReduceL2) -// _(onnx, Conv) -// _(onnx, BatchNormalization) -// _(onnx, ReduceMean) -// _(onnx, ReduceProd) -// _(onnx, Relu) -// _(onnx, Neg) -// _(onnx, NonZero) -// _(onnx, Range) -// _(onnx, Tile) -// _(onnx, Where) -// _(onnx, Optional) -// _(onnx, OptionalGetElement) -// _(onnx, OptionalHasElement) -// FORALL_ATTR_BASE_SYMBOLS(_) -// _(attr, Subgraph) -// _(attr, ReverseSubgraph) -// _(attr, f_real_outputs) -// _(attr, df_input_vjps) -// _(attr, df_input_captured_inputs) -// _(attr, df_input_captured_outputs) -// _(attr, df_output_vjps) -// _(attr, axes) -// _(attr, symbolic_shape_inputs) -// _(attr, allow_stack_outputs) -// _(attr, striding_inputs_desc) -// _(attr, striding_outputs_desc) -// _(attr, broadcast) -// _(attr, direction) -// _(attr, ends) -// _(attr, inplace) -// _(attr, input_as_shape) -// _(attr, is_zero) -// _(attr, num_none) -// _(attr, num_present) -// _(attr, perm) -// _(attr, starts) -// _(attr, profiled_type) -// _(attr, transA) -// _(attr, transB) -// _(attr, name) -// _(attr, module) -// _(attr, beg) -// _(attr, idx) -// _(attr, split) -// _(attr, slot) -// _(attr, kinds) -// _(attr, types) -// _(attr, scope) -// _(attr, keepdims) -// _(attr, cache_id) -// _(attr, new_axis) -// _(attr, warn_id) -// _(attr, output_layouts) -// _(attr, allowzero) -// _(attr, seen_none) -// _(attr, overload_name) - -@Namespace("c10") public enum _keys { - namespaces_prim(0), - namespaces_prims(1), - namespaces_nvprims(2), - namespaces_aten(3), - namespaces_cuda(4), - namespaces_onnx(5), - namespaces_attr(6), - namespaces_scope(7), - namespaces_user(8), - namespaces__caffe2(9), - namespaces_dimname(10), - namespaces_namespaces(11), - prim_Assign(12), - prim_BroadcastingChunk(13), - prim_BroadcastSizes(14), - prim_ReductionSizes(15), - prim_Constant(16), - prim_ChunkSizes(17), - prim_ConstantMKLDNNTensor(18), - prim_BroadcastMKLDNNTensors(19), - prim_MKLDNNGroup(20), - prim_MKLDNNHardSwish(21), - prim_MKLDNNHardSigmoid(22), - prim_MKLDNNHardTanh(23), - prim_MKLDNNClamp(24), - prim_StaticRuntimeCopyOuts(25), - prim_Drop(26), - prim_Eval(27), - prim_Expand(28), /* onnx */ - prim_FusionGroup(29), - prim_CudaFusionGroup(30), - prim_CudaFusionGuard(31), - prim_oneDNNFusionGroup(32), - prim_oneDNNFusionGuard(33), - prim_FunctionalGraph(34), - prim_add_optional(35), - prim_view_copy(36), - prim_permute_copy(37), - prim_reshape_copy(38), - prim_squeeze_copy(39), - prim_t_copy(40), - prim_transpose_copy(41), - prim_unsqueeze_copy(42), - prim_flatten_copy(43), - prim_expand_copy(44), - prim_expand_as_copy(45), - prim_DifferentiableGraph(46), - prim_TensorExprGroup(47), - prim_TensorExprDynamicGroup(48), - prim_StaticSubgraph(49), - prim_If(50), - prim_Jump(51), /* debug */ - prim_JumpNZ(52), /* debug */ - prim_JumpZ(53), /* debug */ - prim_Load(54), - prim_Loop(55), - prim_Param(56), - prim_PackPadded(57), /* onnx */ - prim_PadPacked(58), /* onnx */ - prim_Placeholder(59), /* debug */ - prim_Print(60), - prim_EmptyListLiteral(61), - prim_LegacyTypedConstructor(62), - prim_PythonOp(63), - prim_IgnoredPythonOp(64), - prim_Reverse(65), - prim_Return(66), - prim_ReturnStmt(67), - prim_BreakStmt(68), - prim_ContinueStmt(69), - prim_ComprehensionScope(70), - prim_Store(71), - prim_AutogradZero(72), - prim_AutogradAnyNonZero(73), - prim_AutogradAllNonZero(74), - prim_AutogradAllZero(75), - prim_Starred(76), - prim_TupleConstruct(77), - prim_TupleUnpack(78), - prim_TupleIndex(79), - prim_TupleSlice(80), - prim_ListConstruct(81), - prim_ListUnpack(82), - prim_DictConstruct(83), - prim_ModuleContainerIndex(84), - prim_EnumName(85), - prim_EnumValue(86), - prim_StringIndex(87), - prim_NumToTensor(88), - prim_Uninitialized(89), - prim_VarConcat(90), - prim_VarStack(91), - prim_With(92), - prim_Enter(93), - prim_Exit(94), - prim_IfThenElse(95), - aten_Bool(96), - aten_Int(97), - aten_FloatImplicit(98), - aten_ComplexImplicit(99), - aten_IntImplicit(100), - aten_ScalarImplicit(101), - aten_Float(102), - aten_Complex(103), - aten_str(104), - aten_Delete(105), - prim_device(106), - prim_dtype(107), - prim_layout(108), - prim_id(109), - prim_requires_grad(110), - prim_MakeTestTensor(111), /* test */ - prim_AutogradAdd(112), - prim_GradOf(113), - aten_grad(114), - aten_backward(115), - prim_Guard(116), - prim_BailOut(117), - prim_TypeCheck(118), - prim_RequiresGradCheck(119), - prim_FallbackGraph(120), - prim_FusedConcat(121), - prim_ConstantChunk(122), - prim_MMTreeReduce(123), - prim_MMBatchSide(124), - prim_list(125), - prim_dict(126), - prim_min(127), - prim_max(128), - prim_abs(129), - aten_divmod(130), - prim_zip(131), - prim_enumerate(132), - prim_range(133), - prim_rangelist(134), - prim_isinstance(135), - prim_tolist(136), - prim_unchecked_cast(137), - aten__grad_sum_to_size(138), - aten__size_if_not_equal(139), - aten__ncf_unsqueeze(140), - aten_warn(141), - aten_sorted(142), - aten_floordiv(143), - aten___range_length(144), - aten___derive_index(145), - aten___round_to_zero_floordiv(146), - aten_is_scripting(147), - aten__unwrap_optional(148), - prim_fork(149), - prim_awaitable(150), - prim_forkClosure(151), - prim_awaitableClosure(152), - prim_awaitable_nowait(153), - prim_awaitable_wait(154), - prim_RaiseException(155), - prim_Closure(156), - prim_CreateObject(157), - prim_SetAttr(158), - prim_GetAttr(159), - prim_HasAttr(160), - prim_profile(161), - prim_profile_ivalue(162), - prim_AddStatValue(163), - prim_TimePoint(164), - prim_CallFunction(165), - prim_CallMethod(166), - prim_LoopContinuation(167), - prim_annotate(168), - prim_TracedModuleForward(169), - prim_TracedFork(170), - prim_TracedAttr(171), - prim_rpc_async(172), - prim_rpc_sync(173), - prim_rpc_remote(174), - prim_is_cuda(175), - aten_append(176), - aten_as_tensor(177), - aten_adaptive_avg_pool2d_backward(178), - aten_dim(179), - aten_format(180), - aten_percentFormat(181), - aten___not__(182), - aten___is__(183), - aten___isnot__(184), - aten__ger(185), - aten___getitem__(186), - aten__set_item(187), - aten_manual_seed(188), - aten_device(189), - aten_hash(190), - aten_len(191), - aten_list(192), - aten_dict(193), - aten_wait(194), - aten_save(195), - aten_keys(196), - aten_ord(197), - aten_chr(198), - aten_hex(199), - aten_oct(200), - aten_clear(201), - aten_setdefault(202), - aten_bin(203), - aten_pop(204), - aten_insert(205), - aten_tensor(206), - prim_unchecked_unwrap_optional(207), - aten___contains__(208), - prim_BailoutTemplate(209), - prim_grad(210), - cuda__set_device(211), - cuda_set_stream(212), - cuda__current_device(213), - cuda_synchronize(214), - aten_has_torch_function(215), - aten_is_autocast_enabled(216), - aten_is_autocast_cpu_enabled(217), - aten___and__(218), -aten___iand__(219), -aten___ilshift__(220), -aten___ior__(221), -aten___irshift__(222), -aten___ixor__(223), -aten___lshift__(224), -aten___or__(225), -aten___rshift__(226), -aten___xor__(227), -aten__adaptive_avg_pool2d(228), -aten__adaptive_avg_pool2d_backward(229), -aten__adaptive_avg_pool3d(230), -aten__adaptive_avg_pool3d_backward(231), -aten__add_batch_dim(232), -aten__add_relu(233), -aten__add_relu_(234), -aten__addmm_activation(235), -aten__aminmax(236), -aten__amp_foreach_non_finite_check_and_unscale(237), -aten__amp_foreach_non_finite_check_and_unscale_(238), -aten__amp_update_scale(239), -aten__amp_update_scale_(240), -aten__assert_async(241), -aten__assert_tensor_metadata(242), -aten__autocast_to_full_precision(243), -aten__autocast_to_reduced_precision(244), -aten__backward(245), -aten__batch_norm_impl_index(246), -aten__batch_norm_impl_index_backward(247), -aten__cast_Byte(248), -aten__cast_Char(249), -aten__cast_Double(250), -aten__cast_Float(251), -aten__cast_Half(252), -aten__cast_Int(253), -aten__cast_Long(254), -aten__cast_Short(255), -aten__cdist_backward(256), -aten__cdist_forward(257), -aten__cholesky_solve_helper(258), -aten__choose_qparams_per_tensor(259), -aten__chunk_grad_outputs_efficient_attention(260), -aten__coalesce(261), -aten__coalesced(262), -aten__coalesced_(263), -aten__compute_linear_combination(264), -aten__conj(265), -aten__conj_copy(266), -aten__conj_physical(267), -aten__conv_depthwise2d(268), -aten__convert_indices_from_coo_to_csr(269), -aten__convert_indices_from_csr_to_coo(270), -aten__convolution(271), -aten__convolution_double_backward(272), -aten__convolution_mode(273), -aten__copy_from(274), -aten__copy_from_and_resize(275), -aten__ctc_loss(276), -aten__ctc_loss_backward(277), -aten__cudnn_ctc_loss(278), -aten__cudnn_init_dropout_state(279), -aten__cudnn_rnn(280), -aten__cudnn_rnn_backward(281), -aten__cudnn_rnn_flatten_weight(282), -aten__cufft_clear_plan_cache(283), -aten__cufft_get_plan_cache_max_size(284), -aten__cufft_get_plan_cache_size(285), -aten__cufft_set_plan_cache_max_size(286), -aten__cummax_helper(287), -aten__cummin_helper(288), -aten__debug_has_internal_overlap(289), -aten__dimI(290), -aten__dimV(291), -aten__dim_arange(292), -aten__dirichlet_grad(293), -aten__efficient_attention_backward(294), -aten__efficient_attention_forward(295), -aten__efficientzerotensor(296), -aten__embedding_bag(297), -aten__embedding_bag_backward(298), -aten__embedding_bag_dense_backward(299), -aten__embedding_bag_forward_only(300), -aten__embedding_bag_per_sample_weights_backward(301), -aten__embedding_bag_sparse_backward(302), -aten__empty_affine_quantized(303), -aten__empty_per_channel_affine_quantized(304), -aten__euclidean_dist(305), -aten__fake_quantize_learnable_per_channel_affine(306), -aten__fake_quantize_learnable_per_channel_affine_backward(307), -aten__fake_quantize_learnable_per_tensor_affine(308), -aten__fake_quantize_learnable_per_tensor_affine_backward(309), -aten__fake_quantize_per_tensor_affine_cachemask_tensor_qparams(310), -aten__fft_c2c(311), -aten__fft_c2r(312), -aten__fft_r2c(313), -aten__flash_attention_backward(314), -aten__flash_attention_forward(315), -aten__foobar(316), -aten__foreach_abs(317), -aten__foreach_abs_(318), -aten__foreach_acos(319), -aten__foreach_acos_(320), -aten__foreach_add(321), -aten__foreach_add_(322), -aten__foreach_addcdiv(323), -aten__foreach_addcdiv_(324), -aten__foreach_addcmul(325), -aten__foreach_addcmul_(326), -aten__foreach_asin(327), -aten__foreach_asin_(328), -aten__foreach_atan(329), -aten__foreach_atan_(330), -aten__foreach_ceil(331), -aten__foreach_ceil_(332), -aten__foreach_clamp_max(333), -aten__foreach_clamp_max_(334), -aten__foreach_clamp_min(335), -aten__foreach_clamp_min_(336), -aten__foreach_cos(337), -aten__foreach_cos_(338), -aten__foreach_cosh(339), -aten__foreach_cosh_(340), -aten__foreach_div(341), -aten__foreach_div_(342), -aten__foreach_erf(343), -aten__foreach_erf_(344), -aten__foreach_erfc(345), -aten__foreach_erfc_(346), -aten__foreach_exp(347), -aten__foreach_exp_(348), -aten__foreach_expm1(349), -aten__foreach_expm1_(350), -aten__foreach_floor(351), -aten__foreach_floor_(352), -aten__foreach_frac(353), -aten__foreach_frac_(354), -aten__foreach_lerp(355), -aten__foreach_lerp_(356), -aten__foreach_lgamma(357), -aten__foreach_lgamma_(358), -aten__foreach_log(359), -aten__foreach_log10(360), -aten__foreach_log10_(361), -aten__foreach_log1p(362), -aten__foreach_log1p_(363), -aten__foreach_log2(364), -aten__foreach_log2_(365), -aten__foreach_log_(366), -aten__foreach_maximum(367), -aten__foreach_maximum_(368), -aten__foreach_minimum(369), -aten__foreach_minimum_(370), -aten__foreach_mul(371), -aten__foreach_mul_(372), -aten__foreach_neg(373), -aten__foreach_neg_(374), -aten__foreach_norm(375), -aten__foreach_reciprocal(376), -aten__foreach_reciprocal_(377), -aten__foreach_round(378), -aten__foreach_round_(379), -aten__foreach_sigmoid(380), -aten__foreach_sigmoid_(381), -aten__foreach_sin(382), -aten__foreach_sin_(383), -aten__foreach_sinh(384), -aten__foreach_sinh_(385), -aten__foreach_sqrt(386), -aten__foreach_sqrt_(387), -aten__foreach_sub(388), -aten__foreach_sub_(389), -aten__foreach_tan(390), -aten__foreach_tan_(391), -aten__foreach_tanh(392), -aten__foreach_tanh_(393), -aten__foreach_trunc(394), -aten__foreach_trunc_(395), -aten__foreach_zero(396), -aten__foreach_zero_(397), -aten__fused_adam(398), -aten__fused_adam_(399), -aten__fused_adamw(400), -aten__fused_adamw_(401), -aten__fused_dropout(402), -aten__fused_moving_avg_obs_fq_helper(403), -aten__fused_moving_avg_obs_fq_helper_functional(404), -aten__fused_sdp_choice(405), -aten__fw_primal(406), -aten__fw_primal_copy(407), -aten__gather_sparse_backward(408), -aten__grid_sampler_2d_cpu_fallback(409), -aten__grid_sampler_2d_cpu_fallback_backward(410), -aten__has_compatible_shallow_copy_type(411), -aten__has_same_storage_numel(412), -aten__histogramdd_bin_edges(413), -aten__histogramdd_from_bin_cts(414), -aten__histogramdd_from_bin_tensors(415), -aten__index_put_impl(416), -aten__index_put_impl_(417), -aten__indices(418), -aten__indices_copy(419), -aten__is_all_true(420), -aten__is_any_true(421), -aten__is_zerotensor(422), -aten__linalg_check_errors(423), -aten__linalg_det(424), -aten__linalg_eigh(425), -aten__linalg_slogdet(426), -aten__linalg_solve_ex(427), -aten__linalg_svd(428), -aten__local_scalar_dense(429), -aten__log_softmax(430), -aten__log_softmax_backward_data(431), -aten__logcumsumexp(432), -aten__lstm_mps(433), -aten__lu_with_info(434), -aten__make_dual(435), -aten__make_dual_copy(436), -aten__make_per_channel_quantized_tensor(437), -aten__make_per_tensor_quantized_tensor(438), -aten__masked_scale(439), -aten__masked_softmax(440), -aten__masked_softmax_backward(441), -aten__mkldnn_reshape(442), -aten__mkldnn_transpose(443), -aten__mkldnn_transpose_(444), -aten__mps_convolution(445), -aten__mps_convolution_transpose(446), -aten__native_batch_norm_legit(447), -aten__native_batch_norm_legit_functional(448), -aten__native_decoder_only_multi_head_attention(449), -aten__native_multi_head_attention(450), -aten__neg_view(451), -aten__neg_view_copy(452), -aten__nested_from_padded(453), -aten__nested_from_padded_and_nested_example(454), -aten__nested_select_backward(455), -aten__nested_sum_backward(456), -aten__nested_tensor_from_mask(457), -aten__nested_tensor_from_mask_left_aligned(458), -aten__nested_tensor_from_tensor_list(459), -aten__nested_tensor_offsets(460), -aten__nested_tensor_size(461), -aten__nested_tensor_softmax_with_shape(462), -aten__nested_tensor_strides(463), -aten__nested_view_from_buffer(464), -aten__nested_view_from_buffer_copy(465), -aten__new_zeros_with_same_feature_meta(466), -aten__nnpack_available(467), -aten__nnpack_spatial_convolution(468), -aten__nnz(469), -aten__pack_padded_sequence(470), -aten__pack_padded_sequence_backward(471), -aten__pad_circular(472), -aten__pad_enum(473), -aten__pad_packed_sequence(474), -aten__pdist_backward(475), -aten__pdist_forward(476), -aten__pin_memory(477), -aten__prelu_kernel(478), -aten__prelu_kernel_backward(479), -aten__remove_batch_dim(480), -aten__reshape_alias(481), -aten__reshape_alias_copy(482), -aten__reshape_copy(483), -aten__reshape_from_tensor(484), -aten__resize_output(485), -aten__resize_output_(486), -aten__rowwise_prune(487), -aten__sample_dirichlet(488), -aten__saturate_weight_to_fp16(489), -aten__scaled_dot_product_attention(490), -aten__scaled_dot_product_attention_math(491), -aten__scaled_dot_product_efficient_attention(492), -aten__scaled_dot_product_efficient_attention_backward(493), -aten__scaled_dot_product_flash_attention(494), -aten__scaled_dot_product_flash_attention_backward(495), -aten__segment_reduce_backward(496), -aten__shape_as_tensor(497), -aten__slow_conv2d_backward(498), -aten__slow_conv2d_forward(499), -aten__sobol_engine_draw(500), -aten__sobol_engine_ff(501), -aten__sobol_engine_ff_(502), -aten__sobol_engine_initialize_state(503), -aten__sobol_engine_initialize_state_(504), -aten__sobol_engine_scramble(505), -aten__sobol_engine_scramble_(506), -aten__softmax(507), -aten__softmax_backward_data(508), -aten__sparse_addmm(509), -aten__sparse_broadcast_to(510), -aten__sparse_broadcast_to_copy(511), -aten__sparse_bsc_tensor_unsafe(512), -aten__sparse_bsr_tensor_unsafe(513), -aten__sparse_compressed_tensor_unsafe(514), -aten__sparse_coo_tensor_unsafe(515), -aten__sparse_coo_tensor_with_dims(516), -aten__sparse_coo_tensor_with_dims_and_tensors(517), -aten__sparse_csc_tensor_unsafe(518), -aten__sparse_csr_prod(519), -aten__sparse_csr_sum(520), -aten__sparse_csr_tensor_unsafe(521), -aten__sparse_log_softmax(522), -aten__sparse_log_softmax_backward_data(523), -aten__sparse_mm(524), -aten__sparse_mm_reduce_impl(525), -aten__sparse_mm_reduce_impl_backward(526), -aten__sparse_softmax(527), -aten__sparse_softmax_backward_data(528), -aten__sparse_sparse_matmul(529), -aten__sparse_sum(530), -aten__sparse_sum_backward(531), -aten__spdiags(532), -aten__stack(533), -aten__standard_gamma(534), -aten__standard_gamma_grad(535), -aten__test_ambiguous_defaults(536), -aten__test_autograd_multiple_dispatch(537), -aten__test_autograd_multiple_dispatch_view(538), -aten__test_autograd_multiple_dispatch_view_copy(539), -aten__test_check_tensor(540), -aten__test_optional_filled_intlist(541), -aten__test_optional_floatlist(542), -aten__test_optional_intlist(543), -aten__test_serialization_subcmul(544), -aten__test_string_default(545), -aten__test_warn_in_autograd(546), -aten__thnn_differentiable_gru_cell_backward(547), -aten__thnn_differentiable_lstm_cell_backward(548), -aten__thnn_fused_gru_cell(549), -aten__thnn_fused_gru_cell_backward(550), -aten__thnn_fused_lstm_cell(551), -aten__thnn_fused_lstm_cell_backward(552), -aten__thnn_fused_lstm_cell_backward_impl(553), -aten__to_copy(554), -aten__to_cpu(555), -aten__to_dense(556), -aten__transform_bias_rescale_qkv(557), -aten__transformer_decoder_only_layer_fwd(558), -aten__transformer_encoder_layer_fwd(559), -aten__trilinear(560), -aten__triton_multi_head_attention(561), -aten__triton_scaled_dot_attention(562), -aten__unique(563), -aten__unique2(564), -aten__unpack_dual(565), -aten__unsafe_view(566), -aten__upsample_bicubic2d_aa(567), -aten__upsample_bicubic2d_aa_backward(568), -aten__upsample_bilinear2d_aa(569), -aten__upsample_bilinear2d_aa_backward(570), -aten__upsample_nearest_exact1d(571), -aten__upsample_nearest_exact1d_backward(572), -aten__upsample_nearest_exact2d(573), -aten__upsample_nearest_exact2d_backward(574), -aten__upsample_nearest_exact3d(575), -aten__upsample_nearest_exact3d_backward(576), -aten__use_cudnn_ctc_loss(577), -aten__use_cudnn_rnn_flatten_weight(578), -aten__validate_compressed_sparse_indices(579), -aten__validate_sparse_bsc_tensor_args(580), -aten__validate_sparse_bsr_tensor_args(581), -aten__validate_sparse_compressed_tensor_args(582), -aten__validate_sparse_coo_tensor_args(583), -aten__validate_sparse_csc_tensor_args(584), -aten__validate_sparse_csr_tensor_args(585), -aten__values(586), -aten__values_copy(587), -aten__version(588), -aten__weight_norm(589), -aten__weight_norm_differentiable_backward(590), -aten__weight_norm_interface(591), -aten__weight_norm_interface_backward(592), -aten_abs(593), -aten_abs_(594), -aten_absolute(595), -aten_absolute_(596), -aten_acos(597), -aten_acos_(598), -aten_acosh(599), -aten_acosh_(600), -aten_adaptive_avg_pool1d(601), -aten_adaptive_avg_pool2d(602), -aten_adaptive_avg_pool3d(603), -aten_adaptive_avg_pool3d_backward(604), -aten_adaptive_max_pool1d(605), -aten_adaptive_max_pool2d(606), -aten_adaptive_max_pool2d_backward(607), -aten_adaptive_max_pool3d(608), -aten_adaptive_max_pool3d_backward(609), -aten_add(610), -aten_add_(611), -aten_addbmm(612), -aten_addbmm_(613), -aten_addcdiv(614), -aten_addcdiv_(615), -aten_addcmul(616), -aten_addcmul_(617), -aten_addmm(618), -aten_addmm_(619), -aten_addmv(620), -aten_addmv_(621), -aten_addr(622), -aten_addr_(623), -aten_adjoint(624), -aten_affine_grid_generator(625), -aten_affine_grid_generator_backward(626), -aten_alias(627), -aten_alias_copy(628), -aten_align_as(629), -aten_align_tensors(630), -aten_align_to(631), -aten_all(632), -aten_allclose(633), -aten_alpha_dropout(634), -aten_alpha_dropout_(635), -aten_amax(636), -aten_amin(637), -aten_aminmax(638), -aten_angle(639), -aten_any(640), -aten_arange(641), -aten_arccos(642), -aten_arccos_(643), -aten_arccosh(644), -aten_arccosh_(645), -aten_arcsin(646), -aten_arcsin_(647), -aten_arcsinh(648), -aten_arcsinh_(649), -aten_arctan(650), -aten_arctan2(651), -aten_arctan2_(652), -aten_arctan_(653), -aten_arctanh(654), -aten_arctanh_(655), -aten_argmax(656), -aten_argmin(657), -aten_argsort(658), -aten_argwhere(659), -aten_as_strided(660), -aten_as_strided_(661), -aten_as_strided_copy(662), -aten_as_strided_scatter(663), -aten_asin(664), -aten_asin_(665), -aten_asinh(666), -aten_asinh_(667), -aten_atan(668), -aten_atan2(669), -aten_atan2_(670), -aten_atan_(671), -aten_atanh(672), -aten_atanh_(673), -aten_atleast_1d(674), -aten_atleast_2d(675), -aten_atleast_3d(676), -aten_avg_pool1d(677), -aten_avg_pool2d(678), -aten_avg_pool2d_backward(679), -aten_avg_pool3d(680), -aten_avg_pool3d_backward(681), -aten_baddbmm(682), -aten_baddbmm_(683), -aten_bartlett_window(684), -aten_batch_norm(685), -aten_batch_norm_backward_elemt(686), -aten_batch_norm_backward_reduce(687), -aten_batch_norm_elemt(688), -aten_batch_norm_gather_stats(689), -aten_batch_norm_gather_stats_with_counts(690), -aten_batch_norm_stats(691), -aten_batch_norm_update_stats(692), -aten_bernoulli(693), -aten_bernoulli_(694), -aten_bilinear(695), -aten_binary_cross_entropy(696), -aten_binary_cross_entropy_backward(697), -aten_binary_cross_entropy_with_logits(698), -aten_bincount(699), -aten_binomial(700), -aten_bitwise_and(701), -aten_bitwise_and_(702), -aten_bitwise_left_shift(703), -aten_bitwise_left_shift_(704), -aten_bitwise_not(705), -aten_bitwise_not_(706), -aten_bitwise_or(707), -aten_bitwise_or_(708), -aten_bitwise_right_shift(709), -aten_bitwise_right_shift_(710), -aten_bitwise_xor(711), -aten_bitwise_xor_(712), -aten_blackman_window(713), -aten_block_diag(714), -aten_bmm(715), -aten_broadcast_tensors(716), -aten_broadcast_to(717), -aten_bucketize(718), -aten_can_cast(719), -aten_cartesian_prod(720), -aten_cat(721), -aten_cauchy(722), -aten_cauchy_(723), -aten_ccol_indices(724), -aten_ccol_indices_copy(725), -aten_cdist(726), -aten_ceil(727), -aten_ceil_(728), -aten_celu(729), -aten_celu_(730), -aten_chain_matmul(731), -aten_chalf(732), -aten_channel_shuffle(733), -aten_cholesky(734), -aten_cholesky_inverse(735), -aten_cholesky_solve(736), -aten_choose_qparams_optimized(737), -aten_chunk(738), -aten_clamp(739), -aten_clamp_(740), -aten_clamp_max(741), -aten_clamp_max_(742), -aten_clamp_min(743), -aten_clamp_min_(744), -aten_clip(745), -aten_clip_(746), -aten_clone(747), -aten_coalesce(748), -aten_col2im(749), -aten_col_indices(750), -aten_col_indices_copy(751), -aten_column_stack(752), -aten_combinations(753), -aten_complex(754), -aten_concat(755), -aten_concatenate(756), -aten_conj(757), -aten_conj_physical(758), -aten_conj_physical_(759), -aten_constant_pad_nd(760), -aten_contiguous(761), -aten_conv1d(762), -aten_conv2d(763), -aten_conv3d(764), -aten_conv_depthwise3d(765), -aten_conv_tbc(766), -aten_conv_tbc_backward(767), -aten_conv_transpose1d(768), -aten_conv_transpose2d(769), -aten_conv_transpose3d(770), -aten_convolution(771), -aten_convolution_backward(772), -aten_convolution_backward_overrideable(773), -aten_convolution_overrideable(774), -aten_copy(775), -aten_copy_(776), -aten_copy_sparse_to_sparse(777), -aten_copy_sparse_to_sparse_(778), -aten_copysign(779), -aten_copysign_(780), -aten_corrcoef(781), -aten_cos(782), -aten_cos_(783), -aten_cosh(784), -aten_cosh_(785), -aten_cosine_embedding_loss(786), -aten_cosine_similarity(787), -aten_count_nonzero(788), -aten_cov(789), -aten_cross(790), -aten_cross_entropy_loss(791), -aten_crow_indices(792), -aten_crow_indices_copy(793), -aten_ctc_loss(794), -aten_cudnn_affine_grid_generator(795), -aten_cudnn_affine_grid_generator_backward(796), -aten_cudnn_batch_norm(797), -aten_cudnn_batch_norm_backward(798), -aten_cudnn_convolution(799), -aten_cudnn_convolution_add_relu(800), -aten_cudnn_convolution_relu(801), -aten_cudnn_convolution_transpose(802), -aten_cudnn_grid_sampler(803), -aten_cudnn_grid_sampler_backward(804), -aten_cudnn_is_acceptable(805), -aten_cummax(806), -aten_cummaxmin_backward(807), -aten_cummin(808), -aten_cumprod(809), -aten_cumprod_(810), -aten_cumprod_backward(811), -aten_cumsum(812), -aten_cumsum_(813), -aten_cumulative_trapezoid(814), -aten_data(815), -aten_deg2rad(816), -aten_deg2rad_(817), -aten_dense_dim(818), -aten_dequantize(819), -aten_det(820), -aten_detach(821), -aten_detach_(822), -aten_detach_copy(823), -aten_diag(824), -aten_diag_embed(825), -aten_diagflat(826), -aten_diagonal(827), -aten_diagonal_backward(828), -aten_diagonal_copy(829), -aten_diagonal_scatter(830), -aten_diff(831), -aten_digamma(832), -aten_digamma_(833), -aten_dist(834), -aten_div(835), -aten_div_(836), -aten_divide(837), -aten_divide_(838), -aten_dot(839), -aten_dropout(840), -aten_dropout_(841), -aten_dsplit(842), -aten_dstack(843), -aten_einsum(844), -aten_elu(845), -aten_elu_(846), -aten_elu_backward(847), -aten_embedding(848), -aten_embedding_backward(849), -aten_embedding_bag(850), -aten_embedding_dense_backward(851), -aten_embedding_renorm(852), -aten_embedding_renorm_(853), -aten_embedding_sparse_backward(854), -aten_empty(855), -aten_empty_like(856), -aten_empty_quantized(857), -aten_empty_strided(858), -aten_eq(859), -aten_eq_(860), -aten_equal(861), -aten_erf(862), -aten_erf_(863), -aten_erfc(864), -aten_erfc_(865), -aten_erfinv(866), -aten_erfinv_(867), -aten_exp(868), -aten_exp2(869), -aten_exp2_(870), -aten_exp_(871), -aten_expand(872), -aten_expand_as(873), -aten_expand_copy(874), -aten_expm1(875), -aten_expm1_(876), -aten_exponential(877), -aten_exponential_(878), -aten_eye(879), -aten_fake_quantize_per_channel_affine(880), -aten_fake_quantize_per_channel_affine_cachemask(881), -aten_fake_quantize_per_channel_affine_cachemask_backward(882), -aten_fake_quantize_per_tensor_affine(883), -aten_fake_quantize_per_tensor_affine_cachemask(884), -aten_fake_quantize_per_tensor_affine_cachemask_backward(885), -aten_fbgemm_linear_fp16_weight(886), -aten_fbgemm_linear_fp16_weight_fp32_activation(887), -aten_fbgemm_linear_int8_weight(888), -aten_fbgemm_linear_int8_weight_fp32_activation(889), -aten_fbgemm_linear_quantize_weight(890), -aten_fbgemm_pack_gemm_matrix_fp16(891), -aten_fbgemm_pack_quantized_matrix(892), -aten_feature_alpha_dropout(893), -aten_feature_alpha_dropout_(894), -aten_feature_dropout(895), -aten_feature_dropout_(896), -aten_fft_fft(897), -aten_fft_fft2(898), -aten_fft_fftfreq(899), -aten_fft_fftn(900), -aten_fft_fftshift(901), -aten_fft_hfft(902), -aten_fft_hfft2(903), -aten_fft_hfftn(904), -aten_fft_ifft(905), -aten_fft_ifft2(906), -aten_fft_ifftn(907), -aten_fft_ifftshift(908), -aten_fft_ihfft(909), -aten_fft_ihfft2(910), -aten_fft_ihfftn(911), -aten_fft_irfft(912), -aten_fft_irfft2(913), -aten_fft_irfftn(914), -aten_fft_rfft(915), -aten_fft_rfft2(916), -aten_fft_rfftfreq(917), -aten_fft_rfftn(918), -aten_fill(919), -aten_fill_(920), -aten_fill_diagonal(921), -aten_fill_diagonal_(922), -aten_fix(923), -aten_fix_(924), -aten_flatten(925), -aten_flatten_dense_tensors(926), -aten_flip(927), -aten_fliplr(928), -aten_flipud(929), -aten_float_power(930), -aten_float_power_(931), -aten_floor(932), -aten_floor_(933), -aten_floor_divide(934), -aten_floor_divide_(935), -aten_fmax(936), -aten_fmin(937), -aten_fmod(938), -aten_fmod_(939), -aten_frac(940), -aten_frac_(941), -aten_fractional_max_pool2d(942), -aten_fractional_max_pool2d_backward(943), -aten_fractional_max_pool3d(944), -aten_fractional_max_pool3d_backward(945), -aten_frexp(946), -aten_frobenius_norm(947), -aten_from_file(948), -aten_full(949), -aten_full_like(950), -aten_fused_moving_avg_obs_fake_quant(951), -aten_gather(952), -aten_gather_backward(953), -aten_gcd(954), -aten_gcd_(955), -aten_ge(956), -aten_ge_(957), -aten_gelu(958), -aten_gelu_(959), -aten_gelu_backward(960), -aten_geometric(961), -aten_geometric_(962), -aten_geqrf(963), -aten_ger(964), -aten_glu(965), -aten_glu_backward(966), -aten_glu_backward_jvp(967), -aten_glu_jvp(968), -aten_gradient(969), -aten_greater(970), -aten_greater_(971), -aten_greater_equal(972), -aten_greater_equal_(973), -aten_grid_sampler(974), -aten_grid_sampler_2d(975), -aten_grid_sampler_2d_backward(976), -aten_grid_sampler_3d(977), -aten_grid_sampler_3d_backward(978), -aten_group_norm(979), -aten_gru(980), -aten_gru_cell(981), -aten_gt(982), -aten_gt_(983), -aten_hamming_window(984), -aten_hann_window(985), -aten_hardshrink(986), -aten_hardshrink_backward(987), -aten_hardsigmoid(988), -aten_hardsigmoid_(989), -aten_hardsigmoid_backward(990), -aten_hardswish(991), -aten_hardswish_(992), -aten_hardswish_backward(993), -aten_hardtanh(994), -aten_hardtanh_(995), -aten_hardtanh_backward(996), -aten_heaviside(997), -aten_heaviside_(998), -aten_hinge_embedding_loss(999), -aten_histc(1000), -aten_histogram(1001), -aten_histogramdd(1002), -aten_hsplit(1003), -aten_hspmm(1004), -aten_hstack(1005), -aten_huber_loss(1006), -aten_huber_loss_backward(1007), -aten_hypot(1008), -aten_hypot_(1009), -aten_i0(1010), -aten_i0_(1011), -aten_igamma(1012), -aten_igamma_(1013), -aten_igammac(1014), -aten_igammac_(1015), -aten_im2col(1016), -aten_imag(1017), -aten_index(1018), -aten_index_add(1019), -aten_index_add_(1020), -aten_index_copy(1021), -aten_index_copy_(1022), -aten_index_fill(1023), -aten_index_fill_(1024), -aten_index_put(1025), -aten_index_put_(1026), -aten_index_reduce(1027), -aten_index_reduce_(1028), -aten_index_select(1029), -aten_index_select_backward(1030), -aten_indices(1031), -aten_indices_copy(1032), -aten_infinitely_differentiable_gelu_backward(1033), -aten_inner(1034), -aten_instance_norm(1035), -aten_int_repr(1036), -aten_inverse(1037), -aten_is_coalesced(1038), -aten_is_complex(1039), -aten_is_conj(1040), -aten_is_distributed(1041), -aten_is_floating_point(1042), -aten_is_inference(1043), -aten_is_leaf(1044), -aten_is_neg(1045), -aten_is_nonzero(1046), -aten_is_pinned(1047), -aten_is_same_size(1048), -aten_is_set_to(1049), -aten_is_signed(1050), -aten_is_vulkan_available(1051), -aten_isclose(1052), -aten_isfinite(1053), -aten_isin(1054), -aten_isinf(1055), -aten_isnan(1056), -aten_isneginf(1057), -aten_isposinf(1058), -aten_isreal(1059), -aten_istft(1060), -aten_item(1061), -aten_kaiser_window(1062), -aten_kl_div(1063), -aten_kron(1064), -aten_kthvalue(1065), -aten_l1_loss(1066), -aten_layer_norm(1067), -aten_lcm(1068), -aten_lcm_(1069), -aten_ldexp(1070), -aten_ldexp_(1071), -aten_le(1072), -aten_le_(1073), -aten_leaky_relu(1074), -aten_leaky_relu_(1075), -aten_leaky_relu_backward(1076), -aten_lerp(1077), -aten_lerp_(1078), -aten_less(1079), -aten_less_(1080), -aten_less_equal(1081), -aten_less_equal_(1082), -aten_lgamma(1083), -aten_lgamma_(1084), -aten_lift(1085), -aten_lift_fresh(1086), -aten_lift_fresh_copy(1087), -aten_linalg_cholesky(1088), -aten_linalg_cholesky_ex(1089), -aten_linalg_cond(1090), -aten_linalg_cross(1091), -aten_linalg_det(1092), -aten_linalg_diagonal(1093), -aten_linalg_eig(1094), -aten_linalg_eigh(1095), -aten_linalg_eigvals(1096), -aten_linalg_eigvalsh(1097), -aten_linalg_householder_product(1098), -aten_linalg_inv(1099), -aten_linalg_inv_ex(1100), -aten_linalg_ldl_factor(1101), -aten_linalg_ldl_factor_ex(1102), -aten_linalg_ldl_solve(1103), -aten_linalg_lstsq(1104), -aten_linalg_lu(1105), -aten_linalg_lu_factor(1106), -aten_linalg_lu_factor_ex(1107), -aten_linalg_lu_solve(1108), -aten_linalg_matmul(1109), -aten_linalg_matrix_exp(1110), -aten_linalg_matrix_norm(1111), -aten_linalg_matrix_power(1112), -aten_linalg_matrix_rank(1113), -aten_linalg_multi_dot(1114), -aten_linalg_norm(1115), -aten_linalg_pinv(1116), -aten_linalg_qr(1117), -aten_linalg_slogdet(1118), -aten_linalg_solve(1119), -aten_linalg_solve_ex(1120), -aten_linalg_solve_triangular(1121), -aten_linalg_svd(1122), -aten_linalg_svdvals(1123), -aten_linalg_tensorinv(1124), -aten_linalg_tensorsolve(1125), -aten_linalg_vander(1126), -aten_linalg_vecdot(1127), -aten_linalg_vector_norm(1128), -aten_linear(1129), -aten_linear_backward(1130), -aten_linspace(1131), -aten_log(1132), -aten_log10(1133), -aten_log10_(1134), -aten_log1p(1135), -aten_log1p_(1136), -aten_log2(1137), -aten_log2_(1138), -aten_log_(1139), -aten_log_normal(1140), -aten_log_normal_(1141), -aten_log_sigmoid(1142), -aten_log_sigmoid_backward(1143), -aten_log_sigmoid_forward(1144), -aten_log_softmax(1145), -aten_logaddexp(1146), -aten_logaddexp2(1147), -aten_logcumsumexp(1148), -aten_logdet(1149), -aten_logical_and(1150), -aten_logical_and_(1151), -aten_logical_not(1152), -aten_logical_not_(1153), -aten_logical_or(1154), -aten_logical_or_(1155), -aten_logical_xor(1156), -aten_logical_xor_(1157), -aten_logit(1158), -aten_logit_(1159), -aten_logit_backward(1160), -aten_logspace(1161), -aten_logsumexp(1162), -aten_lshift(1163), -aten_lstm(1164), -aten_lstm_cell(1165), -aten_lstm_mps_backward(1166), -aten_lt(1167), -aten_lt_(1168), -aten_lu_solve(1169), -aten_lu_unpack(1170), -aten_mH(1171), -aten_mT(1172), -aten_margin_ranking_loss(1173), -aten_masked_fill(1174), -aten_masked_fill_(1175), -aten_masked_scatter(1176), -aten_masked_scatter_(1177), -aten_masked_select(1178), -aten_masked_select_backward(1179), -aten_matmul(1180), -aten_matmul_backward(1181), -aten_matrix_H(1182), -aten_matrix_exp(1183), -aten_matrix_exp_backward(1184), -aten_matrix_power(1185), -aten_max(1186), -aten_max_pool1d(1187), -aten_max_pool1d_with_indices(1188), -aten_max_pool2d(1189), -aten_max_pool2d_backward(1190), -aten_max_pool2d_with_indices(1191), -aten_max_pool2d_with_indices_backward(1192), -aten_max_pool3d(1193), -aten_max_pool3d_with_indices(1194), -aten_max_pool3d_with_indices_backward(1195), -aten_max_unpool2d(1196), -aten_max_unpool3d(1197), -aten_maximum(1198), -aten_mean(1199), -aten_median(1200), -aten_meshgrid(1201), -aten_min(1202), -aten_minimum(1203), -aten_miopen_batch_norm(1204), -aten_miopen_batch_norm_backward(1205), -aten_miopen_convolution(1206), -aten_miopen_convolution_add_relu(1207), -aten_miopen_convolution_relu(1208), -aten_miopen_convolution_transpose(1209), -aten_miopen_depthwise_convolution(1210), -aten_miopen_rnn(1211), -aten_miopen_rnn_backward(1212), -aten_mish(1213), -aten_mish_(1214), -aten_mish_backward(1215), -aten_mkldnn_adaptive_avg_pool2d(1216), -aten_mkldnn_adaptive_avg_pool2d_backward(1217), -aten_mkldnn_convolution(1218), -aten_mkldnn_linear(1219), -aten_mkldnn_linear_backward(1220), -aten_mkldnn_linear_backward_input(1221), -aten_mkldnn_linear_backward_weights(1222), -aten_mkldnn_max_pool2d(1223), -aten_mkldnn_max_pool2d_backward(1224), -aten_mkldnn_max_pool3d(1225), -aten_mkldnn_max_pool3d_backward(1226), -aten_mkldnn_reorder_conv2d_weight(1227), -aten_mkldnn_reorder_conv3d_weight(1228), -aten_mkldnn_rnn_layer(1229), -aten_mkldnn_rnn_layer_backward(1230), -aten_mm(1231), -aten_mode(1232), -aten_moveaxis(1233), -aten_movedim(1234), -aten_mps_convolution_backward(1235), -aten_mps_convolution_transpose_backward(1236), -aten_mse_loss(1237), -aten_mse_loss_backward(1238), -aten_msort(1239), -aten_mul(1240), -aten_mul_(1241), -aten_multi_margin_loss(1242), -aten_multi_margin_loss_backward(1243), -aten_multilabel_margin_loss(1244), -aten_multilabel_margin_loss_backward(1245), -aten_multilabel_margin_loss_forward(1246), -aten_multinomial(1247), -aten_multiply(1248), -aten_multiply_(1249), -aten_mv(1250), -aten_mvlgamma(1251), -aten_mvlgamma_(1252), -aten_nan_to_num(1253), -aten_nan_to_num_(1254), -aten_nanmean(1255), -aten_nanmedian(1256), -aten_nanquantile(1257), -aten_nansum(1258), -aten_narrow(1259), -aten_narrow_copy(1260), -aten_native_batch_norm(1261), -aten_native_batch_norm_backward(1262), -aten_native_channel_shuffle(1263), -aten_native_dropout(1264), -aten_native_dropout_backward(1265), -aten_native_group_norm(1266), -aten_native_group_norm_backward(1267), -aten_native_layer_norm(1268), -aten_native_layer_norm_backward(1269), -aten_native_norm(1270), -aten_ne(1271), -aten_ne_(1272), -aten_neg(1273), -aten_neg_(1274), -aten_negative(1275), -aten_negative_(1276), -aten_nested_to_padded_tensor(1277), -aten_new_empty(1278), -aten_new_empty_strided(1279), -aten_new_full(1280), -aten_new_ones(1281), -aten_new_zeros(1282), -aten_nextafter(1283), -aten_nextafter_(1284), -aten_nll_loss(1285), -aten_nll_loss2d(1286), -aten_nll_loss2d_backward(1287), -aten_nll_loss2d_forward(1288), -aten_nll_loss_backward(1289), -aten_nll_loss_forward(1290), -aten_nll_loss_nd(1291), -aten_nonzero(1292), -aten_nonzero_numpy(1293), -aten_norm(1294), -aten_norm_except_dim(1295), -aten_normal(1296), -aten_normal_(1297), -aten_normal_functional(1298), -aten_not_equal(1299), -aten_not_equal_(1300), -aten_nuclear_norm(1301), -aten_numpy_T(1302), -aten_one_hot(1303), -aten_ones(1304), -aten_ones_like(1305), -aten_orgqr(1306), -aten_ormqr(1307), -aten_outer(1308), -aten_output_nr(1309), -aten_pad(1310), -aten_pad_sequence(1311), -aten_pairwise_distance(1312), -aten_pdist(1313), -aten_permute(1314), -aten_permute_copy(1315), -aten_pin_memory(1316), -aten_pinverse(1317), -aten_pixel_shuffle(1318), -aten_pixel_unshuffle(1319), -aten_poisson(1320), -aten_poisson_nll_loss(1321), -aten_polar(1322), -aten_polygamma(1323), -aten_polygamma_(1324), -aten_positive(1325), -aten_pow(1326), -aten_pow_(1327), -aten_prelu(1328), -aten_prod(1329), -aten_promote_types(1330), -aten_put(1331), -aten_put_(1332), -aten_q_per_channel_axis(1333), -aten_q_per_channel_scales(1334), -aten_q_per_channel_zero_points(1335), -aten_q_scale(1336), -aten_q_zero_point(1337), -aten_qr(1338), -aten_qscheme(1339), -aten_quantile(1340), -aten_quantize_per_channel(1341), -aten_quantize_per_tensor(1342), -aten_quantize_per_tensor_dynamic(1343), -aten_quantized_batch_norm(1344), -aten_quantized_gru_cell(1345), -aten_quantized_lstm_cell(1346), -aten_quantized_max_pool1d(1347), -aten_quantized_max_pool2d(1348), -aten_quantized_rnn_relu_cell(1349), -aten_quantized_rnn_tanh_cell(1350), -aten_rad2deg(1351), -aten_rad2deg_(1352), -aten_rand(1353), -aten_rand_like(1354), -aten_randint(1355), -aten_randint_like(1356), -aten_randn(1357), -aten_randn_like(1358), -aten_random(1359), -aten_random_(1360), -aten_randperm(1361), -aten_range(1362), -aten_ravel(1363), -aten_real(1364), -aten_reciprocal(1365), -aten_reciprocal_(1366), -aten_record_stream(1367), -aten_refine_names(1368), -aten_reflection_pad1d(1369), -aten_reflection_pad1d_backward(1370), -aten_reflection_pad2d(1371), -aten_reflection_pad2d_backward(1372), -aten_reflection_pad3d(1373), -aten_reflection_pad3d_backward(1374), -aten_relu(1375), -aten_relu6(1376), -aten_relu6_(1377), -aten_relu_(1378), -aten_remainder(1379), -aten_remainder_(1380), -aten_rename(1381), -aten_rename_(1382), -aten_renorm(1383), -aten_renorm_(1384), -aten_repeat(1385), -aten_repeat_interleave(1386), -aten_replication_pad1d(1387), -aten_replication_pad1d_backward(1388), -aten_replication_pad2d(1389), -aten_replication_pad2d_backward(1390), -aten_replication_pad3d(1391), -aten_replication_pad3d_backward(1392), -aten_requires_grad(1393), -aten_requires_grad_(1394), -aten_reshape(1395), -aten_reshape_as(1396), -aten_resize(1397), -aten_resize_(1398), -aten_resize_as(1399), -aten_resize_as_(1400), -aten_resize_as_sparse(1401), -aten_resize_as_sparse_(1402), -aten_resolve_conj(1403), -aten_resolve_neg(1404), -aten_result_type(1405), -aten_retain_grad(1406), -aten_retains_grad(1407), -aten_rnn_relu(1408), -aten_rnn_relu_cell(1409), -aten_rnn_tanh(1410), -aten_rnn_tanh_cell(1411), -aten_roll(1412), -aten_rot90(1413), -aten_round(1414), -aten_round_(1415), -aten_row_indices(1416), -aten_row_indices_copy(1417), -aten_row_stack(1418), -aten_rrelu(1419), -aten_rrelu_(1420), -aten_rrelu_with_noise(1421), -aten_rrelu_with_noise_(1422), -aten_rrelu_with_noise_backward(1423), -aten_rshift(1424), -aten_rsqrt(1425), -aten_rsqrt_(1426), -aten_rsub(1427), -aten_scalar_tensor(1428), -aten_scaled_dot_product_attention(1429), -aten_scatter(1430), -aten_scatter_(1431), -aten_scatter_add(1432), -aten_scatter_add_(1433), -aten_scatter_reduce(1434), -aten_scatter_reduce_(1435), -aten_searchsorted(1436), -aten_segment_reduce(1437), -aten_select(1438), -aten_select_backward(1439), -aten_select_copy(1440), -aten_select_scatter(1441), -aten_selu(1442), -aten_selu_(1443), -aten_set(1444), -aten_set_(1445), -aten_set_data(1446), -aten_sgn(1447), -aten_sgn_(1448), -aten_sigmoid(1449), -aten_sigmoid_(1450), -aten_sigmoid_backward(1451), -aten_sign(1452), -aten_sign_(1453), -aten_signbit(1454), -aten_silu(1455), -aten_silu_(1456), -aten_silu_backward(1457), -aten_sin(1458), -aten_sin_(1459), -aten_sinc(1460), -aten_sinc_(1461), -aten_sinh(1462), -aten_sinh_(1463), -aten_size(1464), -aten_slice(1465), -aten_slice_backward(1466), -aten_slice_copy(1467), -aten_slice_scatter(1468), -aten_slogdet(1469), -aten_slow_conv3d(1470), -aten_slow_conv3d_forward(1471), -aten_slow_conv_dilated2d(1472), -aten_slow_conv_dilated3d(1473), -aten_slow_conv_transpose2d(1474), -aten_slow_conv_transpose3d(1475), -aten_smm(1476), -aten_smooth_l1_loss(1477), -aten_smooth_l1_loss_backward(1478), -aten_soft_margin_loss(1479), -aten_soft_margin_loss_backward(1480), -aten_softmax(1481), -aten_softplus(1482), -aten_softplus_backward(1483), -aten_softshrink(1484), -aten_softshrink_backward(1485), -aten_sort(1486), -aten_sparse_bsc_tensor(1487), -aten_sparse_bsr_tensor(1488), -aten_sparse_compressed_tensor(1489), -aten_sparse_coo_tensor(1490), -aten_sparse_csc_tensor(1491), -aten_sparse_csr_tensor(1492), -aten_sparse_dim(1493), -aten_sparse_mask(1494), -aten_sparse_resize(1495), -aten_sparse_resize_(1496), -aten_sparse_resize_and_clear(1497), -aten_sparse_resize_and_clear_(1498), -aten_sparse_sampled_addmm(1499), -aten_special_airy_ai(1500), -aten_special_bessel_j0(1501), -aten_special_bessel_j1(1502), -aten_special_bessel_y0(1503), -aten_special_bessel_y1(1504), -aten_special_chebyshev_polynomial_t(1505), -aten_special_chebyshev_polynomial_u(1506), -aten_special_chebyshev_polynomial_v(1507), -aten_special_chebyshev_polynomial_w(1508), -aten_special_digamma(1509), -aten_special_entr(1510), -aten_special_erf(1511), -aten_special_erfc(1512), -aten_special_erfcx(1513), -aten_special_erfinv(1514), -aten_special_exp2(1515), -aten_special_expit(1516), -aten_special_expm1(1517), -aten_special_gammainc(1518), -aten_special_gammaincc(1519), -aten_special_gammaln(1520), -aten_special_hermite_polynomial_h(1521), -aten_special_hermite_polynomial_he(1522), -aten_special_i0(1523), -aten_special_i0e(1524), -aten_special_i1(1525), -aten_special_i1e(1526), -aten_special_laguerre_polynomial_l(1527), -aten_special_legendre_polynomial_p(1528), -aten_special_log1p(1529), -aten_special_log_ndtr(1530), -aten_special_log_softmax(1531), -aten_special_logit(1532), -aten_special_logsumexp(1533), -aten_special_modified_bessel_i0(1534), -aten_special_modified_bessel_i1(1535), -aten_special_modified_bessel_k0(1536), -aten_special_modified_bessel_k1(1537), -aten_special_multigammaln(1538), -aten_special_ndtr(1539), -aten_special_ndtri(1540), -aten_special_polygamma(1541), -aten_special_psi(1542), -aten_special_round(1543), -aten_special_scaled_modified_bessel_k0(1544), -aten_special_scaled_modified_bessel_k1(1545), -aten_special_shifted_chebyshev_polynomial_t(1546), -aten_special_shifted_chebyshev_polynomial_u(1547), -aten_special_shifted_chebyshev_polynomial_v(1548), -aten_special_shifted_chebyshev_polynomial_w(1549), -aten_special_sinc(1550), -aten_special_softmax(1551), -aten_special_spherical_bessel_j0(1552), -aten_special_xlog1py(1553), -aten_special_xlogy(1554), -aten_special_zeta(1555), -aten_split(1556), -aten_split_copy(1557), -aten_split_with_sizes(1558), -aten_split_with_sizes_copy(1559), -aten_sqrt(1560), -aten_sqrt_(1561), -aten_square(1562), -aten_square_(1563), -aten_squeeze(1564), -aten_squeeze_(1565), -aten_squeeze_copy(1566), -aten_sspaddmm(1567), -aten_stack(1568), -aten_std(1569), -aten_std_mean(1570), -aten_stft(1571), -aten_stride(1572), -aten_sub(1573), -aten_sub_(1574), -aten_subtract(1575), -aten_subtract_(1576), -aten_sum(1577), -aten_sum_to_size(1578), -aten_svd(1579), -aten_swapaxes(1580), -aten_swapaxes_(1581), -aten_swapdims(1582), -aten_swapdims_(1583), -aten_t(1584), -aten_t_(1585), -aten_t_copy(1586), -aten_take(1587), -aten_take_along_dim(1588), -aten_tan(1589), -aten_tan_(1590), -aten_tanh(1591), -aten_tanh_(1592), -aten_tanh_backward(1593), -aten_tensor_split(1594), -aten_tensordot(1595), -aten_thnn_conv2d(1596), -aten_threshold(1597), -aten_threshold_(1598), -aten_threshold_backward(1599), -aten_tile(1600), -aten_to(1601), -aten_to_dense(1602), -aten_to_dense_backward(1603), -aten_to_mkldnn(1604), -aten_to_mkldnn_backward(1605), -aten_to_padded_tensor(1606), -aten_to_sparse(1607), -aten_to_sparse_bsc(1608), -aten_to_sparse_bsr(1609), -aten_to_sparse_csc(1610), -aten_to_sparse_csr(1611), -aten_topk(1612), -aten_trace(1613), -aten_trace_backward(1614), -aten_transpose(1615), -aten_transpose_(1616), -aten_transpose_copy(1617), -aten_trapezoid(1618), -aten_trapz(1619), -aten_triangular_solve(1620), -aten_tril(1621), -aten_tril_(1622), -aten_tril_indices(1623), -aten_triplet_margin_loss(1624), -aten_triu(1625), -aten_triu_(1626), -aten_triu_indices(1627), -aten_true_divide(1628), -aten_true_divide_(1629), -aten_trunc(1630), -aten_trunc_(1631), -aten_type_as(1632), -aten_unbind(1633), -aten_unbind_copy(1634), -aten_unflatten(1635), -aten_unflatten_dense_tensors(1636), -aten_unfold(1637), -aten_unfold_backward(1638), -aten_unfold_copy(1639), -aten_uniform(1640), -aten_uniform_(1641), -aten_unique_consecutive(1642), -aten_unique_dim(1643), -aten_unique_dim_consecutive(1644), -aten_unsafe_chunk(1645), -aten_unsafe_split(1646), -aten_unsafe_split_with_sizes(1647), -aten_unsqueeze(1648), -aten_unsqueeze_(1649), -aten_unsqueeze_copy(1650), -aten_upsample_bicubic2d(1651), -aten_upsample_bicubic2d_backward(1652), -aten_upsample_bilinear2d(1653), -aten_upsample_bilinear2d_backward(1654), -aten_upsample_linear1d(1655), -aten_upsample_linear1d_backward(1656), -aten_upsample_nearest1d(1657), -aten_upsample_nearest1d_backward(1658), -aten_upsample_nearest2d(1659), -aten_upsample_nearest2d_backward(1660), -aten_upsample_nearest3d(1661), -aten_upsample_nearest3d_backward(1662), -aten_upsample_trilinear3d(1663), -aten_upsample_trilinear3d_backward(1664), -aten_value_selecting_reduction_backward(1665), -aten_values(1666), -aten_values_copy(1667), -aten_vander(1668), -aten_var(1669), -aten_var_mean(1670), -aten_vdot(1671), -aten_view(1672), -aten_view_as(1673), -aten_view_as_complex(1674), -aten_view_as_complex_copy(1675), -aten_view_as_real(1676), -aten_view_as_real_copy(1677), -aten_view_copy(1678), -aten_vsplit(1679), -aten_vstack(1680), -aten_where(1681), -aten_xlogy(1682), -aten_xlogy_(1683), -aten_zero(1684), -aten_zero_(1685), -aten_zeros(1686), -aten_zeros_like(1687), - onnx_Add(1688), - onnx_Concat(1689), - onnx_Constant(1690), - onnx_ConstantFill(1691), - onnx_Div(1692), - onnx_GRU(1693), - onnx_Gather(1694), - onnx_Gemm(1695), - onnx_LSTM(1696), - onnx_MatMul(1697), - onnx_Min(1698), - onnx_Max(1699), - onnx_Mul(1700), - onnx_Pow(1701), - onnx_RNN(1702), - onnx_Shape(1703), - onnx_Size(1704), - onnx_Slice(1705), - onnx_Softmax(1706), - onnx_Squeeze(1707), - onnx_Sub(1708), - onnx_Transpose(1709), - onnx_Unsqueeze(1710), - onnx_Loop(1711), - onnx_If(1712), - onnx_Reshape(1713), - onnx_Expand(1714), - onnx_Equal(1715), - onnx_Greater(1716), - onnx_GreaterOrEqual(1717), - onnx_Less(1718), - onnx_LessOrEqual(1719), - onnx_Not(1720), - aten_ATen(1721), - onnx_Split(1722), - onnx_ConstantOfShape(1723), - onnx_Cast(1724), - onnx_Mod(1725), - onnx_Sqrt(1726), - onnx_SplitToSequence(1727), - onnx_SequenceAt(1728), - onnx_SequenceConstruct(1729), - onnx_SequenceEmpty(1730), - onnx_SequenceInsert(1731), - onnx_SequenceErase(1732), - onnx_ConcatFromSequence(1733), - onnx_Identity(1734), - onnx_SoftmaxCrossEntropyLoss(1735), - onnx_NegativeLogLikelihoodLoss(1736), - onnx_LogSoftmax(1737), - onnx_ReduceL1(1738), - onnx_ReduceL2(1739), - onnx_Conv(1740), - onnx_BatchNormalization(1741), - onnx_ReduceMean(1742), - onnx_ReduceProd(1743), - onnx_Relu(1744), - onnx_Neg(1745), - onnx_NonZero(1746), - onnx_Range(1747), - onnx_Tile(1748), - onnx_Where(1749), - onnx_Optional(1750), - onnx_OptionalGetElement(1751), - onnx_OptionalHasElement(1752), - attr_A(1753), -attr_B(1754), -attr_C(1755), -attr_H(1756), -attr_HxW(1757), -attr_K(1758), -attr_L(1759), -attr_LD(1760), -attr_LU(1761), -attr_LU_data(1762), -attr_LU_pivots(1763), -attr_M(1764), -attr_N(1765), -attr_P(1766), -attr_Q(1767), -attr_R(1768), -attr_S(1769), -attr_U(1770), -attr_UPLO(1771), -attr_V(1772), -attr_Vh(1773), -attr_W(1774), -attr_X(1775), -attr_a(1776), -attr_abs(1777), -attr_accumulate(1778), -attr_addends(1779), -attr_adjoint(1780), -attr_align_corners(1781), -attr_allow_tf32(1782), -attr_alpha(1783), -attr_amsgrad(1784), -attr_anchor(1785), -attr_angle(1786), -attr_api_name(1787), -attr_append(1788), -attr_approximate(1789), -attr_arg1(1790), -attr_arg2(1791), -attr_arg3(1792), -attr_arg_out(1793), -attr_assume_unique(1794), -attr_atol(1795), -attr_attn_mask(1796), -attr_average_attn_weights(1797), -attr_averaging_const(1798), -attr_aweights(1799), -attr_axis(1800), -attr_axis0(1801), -attr_axis1(1802), -attr_b(1803), -attr_b_hh(1804), -attr_b_ih(1805), -attr_bag_size(1806), -attr_base(1807), -attr_batch1(1808), -attr_batch2(1809), -attr_batch_dim(1810), -attr_batch_first(1811), -attr_batch_size(1812), -attr_batch_sizes(1813), -attr_benchmark(1814), -attr_beta(1815), -attr_beta1(1816), -attr_beta2(1817), -attr_bias(1818), -attr_bias_defined(1819), -attr_bias_g(1820), -attr_bias_sizes(1821), -attr_bidirectional(1822), -attr_bin_edges(1823), -attr_bins(1824), -attr_bit_width(1825), -attr_blank(1826), -attr_blocksize(1827), -attr_boundaries(1828), -attr_buffer(1829), -attr_causal(1830), -attr_ccol_indices(1831), -attr_cdim(1832), -attr_cdist(1833), -attr_ceil_mode(1834), -attr_cell_state_fwd(1835), -attr_center(1836), -attr_ch_axis(1837), -attr_check_errors(1838), -attr_chunk_grad_outputs(1839), -attr_chunks(1840), -attr_coalesced(1841), -attr_coefficients(1842), -attr_col(1843), -attr_col_indices(1844), -attr_col_offsets(1845), -attr_col_offsets_hh(1846), -attr_col_offsets_ih(1847), -attr_compressed_idx(1848), -attr_compressed_indices(1849), -attr_compressed_indices_dtype(1850), -attr_compute_log_sumexp(1851), -attr_compute_mode(1852), -attr_compute_uv(1853), -attr_compute_v(1854), -attr_condition(1855), -attr_copy(1856), -attr_correction(1857), -attr_count(1858), -attr_count_include_pad(1859), -attr_counts(1860), -attr_cpu_dtype(1861), -attr_cpu_enabled(1862), -attr_cpu_nested_shape_example(1863), -attr_create_graph(1864), -attr_crow_indices(1865), -attr_cu_seqlens_k(1866), -attr_cu_seqlens_q(1867), -attr_cuda_dtype(1868), -attr_cuda_enabled(1869), -attr_cudnn_enable(1870), -attr_cudnn_enabled(1871), -attr_cum_seq_k(1872), -attr_cum_seq_q(1873), -attr_cx(1874), -attr_cx_(1875), -attr_cx_tmp(1876), -attr_cy(1877), -attr_cy_(1878), -attr_d(1879), -attr_data(1880), -attr_decimals(1881), -attr_delta(1882), -attr_dense(1883), -attr_dense_dim(1884), -attr_density(1885), -attr_descending(1886), -attr_destination(1887), -attr_deterministic(1888), -attr_device(1889), -attr_device_index(1890), -attr_dgrad_glu(1891), -attr_diagonal(1892), -attr_diagonals(1893), -attr_dilation(1894), -attr_dim(1895), -attr_dim0(1896), -attr_dim1(1897), -attr_dim2(1898), -attr_dimension(1899), -attr_dims(1900), -attr_dims_other(1901), -attr_dims_self(1902), -attr_divisor_override(1903), -attr_downscale_factor(1904), -attr_driver(1905), -attr_dropout(1906), -attr_dropout_mask(1907), -attr_dropout_p(1908), -attr_dropout_seed(1909), -attr_dropout_state(1910), -attr_dst(1911), -attr_dtype(1912), -attr_dual(1913), -attr_dummy(1914), -attr_dx(1915), -attr_edge_order(1916), -attr_eigenvalues(1917), -attr_eigenvectors(1918), -attr_eigvals(1919), -attr_eigvecs(1920), -attr_element(1921), -attr_elements(1922), -attr_ellipsis_idx(1923), -attr_embed_dim(1924), -attr_end(1925), -attr_end_dim(1926), -attr_eps(1927), -attr_epsilon(1928), -attr_equal_nan(1929), -attr_equation(1930), -attr_exp_avg_sqs(1931), -attr_exp_avgs(1932), -attr_expand1(1933), -attr_expand2(1934), -attr_expand3(1935), -attr_exponent(1936), -attr_exponential_average_factor(1937), -attr_fake_quant_enabled(1938), -attr_fake_quant_on(1939), -attr_ffn_bias_1(1940), -attr_ffn_bias_2(1941), -attr_ffn_weight_1(1942), -attr_ffn_weight_2(1943), -attr_filename(1944), -attr_fill_value(1945), -attr_flat(1946), -attr_forward(1947), -attr_found_inf(1948), -attr_from(1949), -attr_full(1950), -attr_full_matrices(1951), -attr_fuse_transform_0213(1952), -attr_fweights(1953), -attr_g(1954), -attr_gO(1955), -attr_generator(1956), -attr_ggI(1957), -attr_ggW(1958), -attr_ggb(1959), -attr_glu(1960), -attr_grad(1961), -attr_grad_bias(1962), -attr_grad_cy(1963), -attr_grad_factor(1964), -attr_grad_glu(1965), -attr_grad_hy(1966), -attr_grad_in(1967), -attr_grad_input(1968), -attr_grad_out(1969), -attr_grad_out_(1970), -attr_grad_output(1971), -attr_grad_scale(1972), -attr_grad_w(1973), -attr_grad_weight(1974), -attr_grad_x(1975), -attr_grad_y(1976), -attr_gradient(1977), -attr_grads(1978), -attr_grid(1979), -attr_group(1980), -attr_groups(1981), -attr_growth_interval(1982), -attr_growth_tracker(1983), -attr_half_to_float(1984), -attr_has_bias(1985), -attr_has_biases(1986), -attr_hermitian(1987), -attr_hidden_bias(1988), -attr_hidden_gates(1989), -attr_hidden_size(1990), -attr_high(1991), -attr_hist(1992), -attr_hop_length(1993), -attr_hx(1994), -attr_hx_(1995), -attr_hy_(1996), -attr_i1(1997), -attr_i2(1998), -attr_i3(1999), -attr_ignore_index(2000), -attr_imag(2001), -attr_impl_index(2002), -attr_implicit(2003), -attr_include_last_offset(2004), -attr_include_self(2005), -attr_incr_key(2006), -attr_incr_value(2007), -attr_increasing(2008), -attr_ind(2009), -attr_index(2010), -attr_indexing(2011), -attr_indices(2012), -attr_info(2013), -attr_initial(2014), -attr_input(2015), -attr_input1(2016), -attr_input2(2017), -attr_input3(2018), -attr_input_bias(2019), -attr_input_dtype(2020), -attr_input_g(2021), -attr_input_gates(2022), -attr_input_lengths(2023), -attr_input_scale(2024), -attr_input_size(2025), -attr_input_sizes(2026), -attr_inputs(2027), -attr_interpolation(2028), -attr_interpolation_mode(2029), -attr_inv_scale(2030), -attr_inverse(2031), -attr_invert(2032), -attr_invstd(2033), -attr_is_causal(2034), -attr_is_crow(2035), -attr_is_matrix(2036), -attr_is_result(2037), -attr_is_target(2038), -attr_k(2039), -attr_keepdim(2040), -attr_kernel_size(2041), -attr_key(2042), -attr_label_smoothing(2043), -attr_lambd(2044), -attr_largest(2045), -attr_last_dim_size(2046), -attr_layersOutputs(2047), -attr_layout(2048), -attr_left(2049), -attr_length(2050), -attr_lengths(2051), -attr_level(2052), -attr_like(2053), -attr_list(2054), -attr_log_alpha(2055), -attr_log_input(2056), -attr_log_probs(2057), -attr_log_target(2058), -attr_logabsdet(2059), -attr_logsumexp(2060), -attr_low(2061), -attr_lower(2062), -attr_lr(2063), -attr_ltm(2064), -attr_m(2065), -attr_mantissa(2066), -attr_margin(2067), -attr_mask(2068), -attr_mask_check(2069), -attr_mask_type(2070), -attr_mat(2071), -attr_mat1(2072), -attr_mat2(2073), -attr_matrices(2074), -attr_max(2075), -attr_max_exp_avg_sqs(2076), -attr_max_k(2077), -attr_max_norm(2078), -attr_max_q(2079), -attr_max_seqlen_q(2080), -attr_max_size(2081), -attr_max_val(2082), -attr_max_values(2083), -attr_maximize(2084), -attr_maximum_indices(2085), -attr_maxnorm(2086), -attr_mean(2087), -attr_mean_dy(2088), -attr_mean_dy_xmu(2089), -attr_median(2090), -attr_memory_format(2091), -attr_min(2092), -attr_min_indices(2093), -attr_min_val(2094), -attr_minlength(2095), -attr_mode(2096), -attr_momentum(2097), -attr_n(2098), -attr_n_bins(2099), -attr_n_fft(2100), -attr_names(2101), -attr_nan(2102), -attr_need_attn_weights(2103), -attr_need_weights(2104), -attr_neg_log_likelihood(2105), -attr_negative(2106), -attr_negative_slope(2107), -attr_neginf(2108), -attr_nested_size(2109), -attr_nested_strides(2110), -attr_new_data(2111), -attr_nnz(2112), -attr_noise(2113), -attr_non_blocking(2114), -attr_norm(2115), -attr_norm_bias_1(2116), -attr_norm_bias_2(2117), -attr_norm_first(2118), -attr_norm_type(2119), -attr_norm_weight_1(2120), -attr_norm_weight_2(2121), -attr_normalization(2122), -attr_normalized(2123), -attr_normalized_shape(2124), -attr_nt_example(2125), -attr_num_classes(2126), -attr_num_generated(2127), -attr_num_groups(2128), -attr_num_head(2129), -attr_num_heads(2130), -attr_num_layers(2131), -attr_num_samples(2132), -attr_num_weights(2133), -attr_numel(2134), -attr_observer_on(2135), -attr_offset(2136), -attr_offset2bag(2137), -attr_offsets(2138), -attr_onesided(2139), -attr_ord(2140), -attr_order(2141), -attr_other(2142), -attr_out(2143), -attr_out0(2144), -attr_out1(2145), -attr_out2(2146), -attr_out3(2147), -attr_out4(2148), -attr_out5(2149), -attr_out6(2150), -attr_out_dim(2151), -attr_out_int32(2152), -attr_outdim(2153), -attr_output(2154), -attr_output_mask(2155), -attr_output_padding(2156), -attr_output_scale(2157), -attr_output_size(2158), -attr_output_zero_point(2159), -attr_p(2160), -attr_packed(2161), -attr_packed_hh(2162), -attr_packed_ih(2163), -attr_packed_weight(2164), -attr_pad(2165), -attr_pad_mode(2166), -attr_padded(2167), -attr_padding(2168), -attr_padding_idx(2169), -attr_padding_mode(2170), -attr_padding_value(2171), -attr_params(2172), -attr_path(2173), -attr_pdist(2174), -attr_per_row_fake_quant(2175), -attr_per_sample_weights(2176), -attr_periodic(2177), -attr_philox_offset(2178), -attr_philox_seed(2179), -attr_pin_memory(2180), -attr_pivot(2181), -attr_pivots(2182), -attr_plain_idx(2183), -attr_plain_indices(2184), -attr_pos_weight(2185), -attr_posinf(2186), -attr_positive(2187), -attr_pow(2188), -attr_prepend(2189), -attr_primal(2190), -attr_prob(2191), -attr_proj_bias(2192), -attr_proj_size(2193), -attr_proj_weight(2194), -attr_q(2195), -attr_qkv(2196), -attr_qkv_bias(2197), -attr_qkv_weight(2198), -attr_qtensor(2199), -attr_quant_max(2200), -attr_quant_min(2201), -attr_quasi(2202), -attr_query(2203), -attr_r(2204), -attr_random_samples(2205), -attr_range(2206), -attr_rank(2207), -attr_ratio(2208), -attr_rcond(2209), -attr_real(2210), -attr_reduce(2211), -attr_reduce_range(2212), -attr_reduction(2213), -attr_repeats(2214), -attr_replacement(2215), -attr_requires_grad(2216), -attr_reserve(2217), -attr_reserveSpace(2218), -attr_reservedSpace(2219), -attr_residuals(2220), -attr_result(2221), -attr_retain_graph(2222), -attr_return_complex(2223), -attr_return_counts(2224), -attr_return_debug_mask(2225), -attr_return_inverse(2226), -attr_reverse(2227), -attr_right(2228), -attr_rounding_mode(2229), -attr_row(2230), -attr_row_indices(2231), -attr_rstd(2232), -attr_rtol(2233), -attr_running_max(2234), -attr_running_mean(2235), -attr_running_min(2236), -attr_running_var(2237), -attr_s(2238), -attr_save_invstd(2239), -attr_save_mean(2240), -attr_save_var(2241), -attr_save_var_transform(2242), -attr_saved_g(2243), -attr_saved_norms(2244), -attr_saved_v(2245), -attr_scalar(2246), -attr_scalar1(2247), -attr_scalar2(2248), -attr_scalars(2249), -attr_scale(2250), -attr_scale_backoff_factor(2251), -attr_scale_factors(2252), -attr_scale_grad_by_freq(2253), -attr_scale_growth_factor(2254), -attr_scale_hh(2255), -attr_scale_ih(2256), -attr_scales(2257), -attr_scales_d(2258), -attr_scales_h(2259), -attr_scales_w(2260), -attr_sections(2261), -attr_self(2262), -attr_self_is_result(2263), -attr_self_num_batch_dims(2264), -attr_self_or_result(2265), -attr_self_sizes(2266), -attr_sequences(2267), -attr_shape(2268), -attr_shared(2269), -attr_shifts(2270), -attr_side(2271), -attr_sigma(2272), -attr_sign(2273), -attr_singular_values(2274), -attr_size(2275), -attr_sizes(2276), -attr_sobolstate(2277), -attr_solution(2278), -attr_some(2279), -attr_sorted(2280), -attr_sorted_sequence(2281), -attr_sorter(2282), -attr_source(2283), -attr_spacing(2284), -attr_sparse(2285), -attr_sparse_dim(2286), -attr_sparse_grad(2287), -attr_split_size(2288), -attr_split_sizes(2289), -attr_src(2290), -attr_stable(2291), -attr_start(2292), -attr_start_dim(2293), -attr_state_steps(2294), -attr_std(2295), -attr_step(2296), -attr_steps(2297), -attr_storage_offset(2298), -attr_stride(2299), -attr_sumdim(2300), -attr_swap(2301), -attr_symmetric_quant(2302), -attr_t(2303), -attr_tangent(2304), -attr_target(2305), -attr_target_lengths(2306), -attr_targets(2307), -attr_tau(2308), -attr_tensor(2309), -attr_tensor1(2310), -attr_tensor2(2311), -attr_tensor_indices_or_sections(2312), -attr_tensors(2313), -attr_tensors1(2314), -attr_test_element(2315), -attr_test_elements(2316), -attr_the_template(2317), -attr_theta(2318), -attr_threshold(2319), -attr_to(2320), -attr_tol(2321), -attr_total(2322), -attr_total_length(2323), -attr_total_weight(2324), -attr_train(2325), -attr_training(2326), -attr_transpose(2327), -attr_transposed(2328), -attr_type1(2329), -attr_type2(2330), -attr_unbiased(2331), -attr_unitriangular(2332), -attr_unpack_data(2333), -attr_unpack_pivots(2334), -attr_unroll_dim(2335), -attr_unsafe(2336), -attr_upper(2337), -attr_upscale_factor(2338), -attr_use_gelu(2339), -attr_use_input_stats(2340), -attr_v(2341), -attr_value(2342), -attr_values(2343), -attr_var(2344), -attr_vec(2345), -attr_vec1(2346), -attr_vec2(2347), -attr_w_hh(2348), -attr_w_ih(2349), -attr_weight(2350), -attr_weight0(2351), -attr_weight1(2352), -attr_weight2(2353), -attr_weight3(2354), -attr_weight4(2355), -attr_weight_arr(2356), -attr_weight_buf(2357), -attr_weight_decay(2358), -attr_weight_g(2359), -attr_weight_scale(2360), -attr_weight_stride0(2361), -attr_weight_zero_point(2362), -attr_weights(2363), -attr_win_length(2364), -attr_window(2365), -attr_window_length(2366), -attr_with_replacement(2367), -attr_workspace(2368), -attr_wrap(2369), -attr_x(2370), -attr_x1(2371), -attr_x2(2372), -attr_y(2373), -attr_z(2374), -attr_z_state(2375), -attr_zero_infinity(2376), -attr_zero_point(2377), -attr_zero_point_hh(2378), -attr_zero_point_ih(2379), -attr_zero_points(2380), - attr_Subgraph(2381), - attr_ReverseSubgraph(2382), - attr_f_real_outputs(2383), - attr_df_input_vjps(2384), - attr_df_input_captured_inputs(2385), - attr_df_input_captured_outputs(2386), - attr_df_output_vjps(2387), - attr_axes(2388), - attr_symbolic_shape_inputs(2389), - attr_allow_stack_outputs(2390), - attr_striding_inputs_desc(2391), - attr_striding_outputs_desc(2392), - attr_broadcast(2393), - attr_direction(2394), - attr_ends(2395), - attr_inplace(2396), - attr_input_as_shape(2397), - attr_is_zero(2398), - attr_num_none(2399), - attr_num_present(2400), - attr_perm(2401), - attr_starts(2402), - attr_profiled_type(2403), - attr_transA(2404), - attr_transB(2405), - attr_name(2406), - attr_module(2407), - attr_beg(2408), - attr_idx(2409), - attr_split(2410), - attr_slot(2411), - attr_kinds(2412), - attr_types(2413), - attr_scope(2414), - attr_keepdims(2415), - attr_cache_id(2416), - attr_new_axis(2417), - attr_warn_id(2418), - attr_output_layouts(2419), - attr_allowzero(2420), - attr_seen_none(2421), - attr_overload_name(2422), - num_symbols(2423); - public final int value; - private _keys(int v) { this.value = v; } - private _keys(_keys e) { this.value = e.value; } - public _keys intern() { for (_keys e : values()) if (e.value == value) return e; return this; } - @Override public String toString() { return intern().name(); } -} +// Note [How UniqueVoidPtr is implemented] +// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +// UniqueVoidPtr solves a common problem for allocators of tensor data, which +// is that the data pointer (e.g., float*) which you are interested in, is not +// the same as the context pointer (e.g., DLManagedTensor) which you need +// to actually deallocate the data. Under a conventional deleter design, you +// have to store extra context in the deleter itself so that you can actually +// delete the right thing. Implementing this with standard C++ is somewhat +// error-prone: if you use a std::unique_ptr to manage tensors, the deleter will +// not be called if the data pointer is nullptr, which can cause a leak if the +// context pointer is non-null (and the deleter is responsible for freeing both +// the data pointer and the context pointer). +// +// So, in our reimplementation of unique_ptr, which just store the context +// directly in the unique pointer, and attach the deleter to the context +// pointer itself. In simple cases, the context pointer is just the pointer +// itself. -// #define DEFINE_SYMBOL(ns, s) -// namespace ns { constexpr Symbol s(static_cast(_keys::ns##_##s)); } -@Namespace("c10::namespaces") @MemberGetter public static native @Const @ByRef Symbol prim(); - @Namespace("c10::namespaces") @MemberGetter public static native @Const @ByRef Symbol prims(); - @Namespace("c10::namespaces") @MemberGetter public static native @Const @ByRef Symbol nvprims(); - @Namespace("c10::namespaces") @MemberGetter public static native @Const @ByRef Symbol aten(); - @Namespace("c10::namespaces") @MemberGetter public static native @Const @ByRef Symbol cuda(); - @Namespace("c10::namespaces") @MemberGetter public static native @Const @ByRef Symbol onnx(); - @Namespace("c10::namespaces") @MemberGetter public static native @Const @ByRef Symbol attr(); - @Namespace("c10::namespaces") @MemberGetter public static native @Const @ByRef Symbol scope(); - @Namespace("c10::namespaces") @MemberGetter public static native @Const @ByRef Symbol user(); - @Namespace("c10::namespaces") @MemberGetter public static native @Const @ByRef Symbol _caffe2(); - @Namespace("c10::namespaces") @MemberGetter public static native @Const @ByRef Symbol dimname(); - @Namespace("c10::namespaces") @MemberGetter public static native @Const @ByRef Symbol namespaces(); - @Namespace("c10::prim") @MemberGetter public static native @Const @ByRef Symbol Assign(); - @Namespace("c10::prim") @MemberGetter public static native @Const @ByRef Symbol BroadcastingChunk(); - @Namespace("c10::prim") @MemberGetter public static native @Const @ByRef Symbol BroadcastSizes(); - @Namespace("c10::prim") @MemberGetter public static native @Const @ByRef Symbol ReductionSizes(); - @Namespace("c10::prim") @MemberGetter public static native @Const @ByRef Symbol Constant(); - @Namespace("c10::prim") @MemberGetter public static native @Const @ByRef Symbol ChunkSizes(); - @Namespace("c10::prim") @MemberGetter public static native @Const @ByRef Symbol ConstantMKLDNNTensor(); - @Namespace("c10::prim") @MemberGetter public static native @Const @ByRef Symbol BroadcastMKLDNNTensors(); - @Namespace("c10::prim") @MemberGetter public static native @Const @ByRef Symbol MKLDNNGroup(); - @Namespace("c10::prim") @MemberGetter public static native @Const @ByRef Symbol MKLDNNHardSwish(); - @Namespace("c10::prim") @MemberGetter public static native @Const @ByRef Symbol MKLDNNHardSigmoid(); - @Namespace("c10::prim") @MemberGetter public static native @Const @ByRef Symbol MKLDNNHardTanh(); - @Namespace("c10::prim") @MemberGetter public static native @Const @ByRef Symbol MKLDNNClamp(); - @Namespace("c10::prim") @MemberGetter public static native @Const @ByRef Symbol StaticRuntimeCopyOuts(); - @Namespace("c10::prim") @MemberGetter public static native @Const @ByRef Symbol Drop(); - @Namespace("c10::prim") @MemberGetter public static native @Const @ByRef Symbol Eval(); - @Namespace("c10::prim") @MemberGetter public static native @Const @ByRef Symbol Expand(); /* onnx */ - @Namespace("c10::prim") @MemberGetter public static native @Const @ByRef Symbol FusionGroup(); - @Namespace("c10::prim") @MemberGetter public static native @Const @ByRef Symbol CudaFusionGroup(); - @Namespace("c10::prim") @MemberGetter public static native @Const @ByRef Symbol CudaFusionGuard(); - @Namespace("c10::prim") @MemberGetter public static native @Const @ByRef Symbol oneDNNFusionGroup(); - @Namespace("c10::prim") @MemberGetter public static native @Const @ByRef Symbol oneDNNFusionGuard(); - @Namespace("c10::prim") @MemberGetter public static native @Const @ByRef Symbol FunctionalGraph(); - @Namespace("c10::prim") @MemberGetter public static native @Const @ByRef Symbol add_optional(); - @Namespace("c10::prim") @MemberGetter public static native @Const @ByRef Symbol view_copy(); - @Namespace("c10::prim") @MemberGetter public static native @Const @ByRef Symbol permute_copy(); - @Namespace("c10::prim") @MemberGetter public static native @Const @ByRef Symbol reshape_copy(); - @Namespace("c10::prim") @MemberGetter public static native @Const @ByRef Symbol squeeze_copy(); - @Namespace("c10::prim") @MemberGetter public static native @Const @ByRef Symbol t_copy(); - @Namespace("c10::prim") @MemberGetter public static native @Const @ByRef Symbol transpose_copy(); - @Namespace("c10::prim") @MemberGetter public static native @Const @ByRef Symbol unsqueeze_copy(); - @Namespace("c10::prim") @MemberGetter public static native @Const @ByRef Symbol flatten_copy(); - @Namespace("c10::prim") @MemberGetter public static native @Const @ByRef Symbol expand_copy(); - @Namespace("c10::prim") @MemberGetter public static native @Const @ByRef Symbol expand_as_copy(); - @Namespace("c10::prim") @MemberGetter public static native @Const @ByRef Symbol DifferentiableGraph(); - @Namespace("c10::prim") @MemberGetter public static native @Const @ByRef Symbol TensorExprGroup(); - @Namespace("c10::prim") @MemberGetter public static native @Const @ByRef Symbol TensorExprDynamicGroup(); - @Namespace("c10::prim") @MemberGetter public static native @Const @ByRef Symbol StaticSubgraph(); - @Namespace("c10::prim") @MemberGetter public static native @Const @ByRef Symbol If(); - @Namespace("c10::prim") @MemberGetter public static native @Const @ByRef Symbol Jump(); /* debug */ - @Namespace("c10::prim") @MemberGetter public static native @Const @ByRef Symbol JumpNZ(); /* debug */ - @Namespace("c10::prim") @MemberGetter public static native @Const @ByRef Symbol JumpZ(); /* debug */ - @Namespace("c10::prim") @MemberGetter public static native @Const @ByRef Symbol Load(); - @Namespace("c10::prim") @MemberGetter public static native @Const @ByRef Symbol Loop(); - @Namespace("c10::prim") @MemberGetter public static native @Const @ByRef Symbol Param(); - @Namespace("c10::prim") @MemberGetter public static native @Const @ByRef Symbol PackPadded(); /* onnx */ - @Namespace("c10::prim") @MemberGetter public static native @Const @ByRef Symbol PadPacked(); /* onnx */ - @Namespace("c10::prim") @MemberGetter public static native @Const @ByRef Symbol Placeholder(); /* debug */ - @Namespace("c10::prim") @MemberGetter public static native @Const @ByRef Symbol Print(); - @Namespace("c10::prim") @MemberGetter public static native @Const @ByRef Symbol EmptyListLiteral(); - @Namespace("c10::prim") @MemberGetter public static native @Const @ByRef Symbol LegacyTypedConstructor(); - @Namespace("c10::prim") @MemberGetter public static native @Const @ByRef Symbol PythonOp(); - @Namespace("c10::prim") @MemberGetter public static native @Const @ByRef Symbol IgnoredPythonOp(); - @Namespace("c10::prim") @MemberGetter public static native @Const @ByRef Symbol Reverse(); - @Namespace("c10::prim") @MemberGetter public static native @Const @ByRef Symbol Return(); - @Namespace("c10::prim") @MemberGetter public static native @Const @ByRef Symbol ReturnStmt(); - @Namespace("c10::prim") @MemberGetter public static native @Const @ByRef Symbol BreakStmt(); - @Namespace("c10::prim") @MemberGetter public static native @Const @ByRef Symbol ContinueStmt(); - @Namespace("c10::prim") @MemberGetter public static native @Const @ByRef Symbol ComprehensionScope(); - @Namespace("c10::prim") @MemberGetter public static native @Const @ByRef Symbol Store(); - @Namespace("c10::prim") @MemberGetter public static native @Const @ByRef Symbol AutogradZero(); - @Namespace("c10::prim") @MemberGetter public static native @Const @ByRef Symbol AutogradAnyNonZero(); - @Namespace("c10::prim") @MemberGetter public static native @Const @ByRef Symbol AutogradAllNonZero(); - @Namespace("c10::prim") @MemberGetter public static native @Const @ByRef Symbol AutogradAllZero(); - @Namespace("c10::prim") @MemberGetter public static native @Const @ByRef Symbol Starred(); - @Namespace("c10::prim") @MemberGetter public static native @Const @ByRef Symbol TupleConstruct(); - @Namespace("c10::prim") @MemberGetter public static native @Const @ByRef Symbol TupleUnpack(); - @Namespace("c10::prim") @MemberGetter public static native @Const @ByRef Symbol TupleIndex(); - @Namespace("c10::prim") @MemberGetter public static native @Const @ByRef Symbol TupleSlice(); - @Namespace("c10::prim") @MemberGetter public static native @Const @ByRef Symbol ListConstruct(); - @Namespace("c10::prim") @MemberGetter public static native @Const @ByRef Symbol ListUnpack(); - @Namespace("c10::prim") @MemberGetter public static native @Const @ByRef Symbol DictConstruct(); - @Namespace("c10::prim") @MemberGetter public static native @Const @ByRef Symbol ModuleContainerIndex(); - @Namespace("c10::prim") @MemberGetter public static native @Const @ByRef Symbol EnumName(); - @Namespace("c10::prim") @MemberGetter public static native @Const @ByRef Symbol EnumValue(); - @Namespace("c10::prim") @MemberGetter public static native @Const @ByRef Symbol StringIndex(); - @Namespace("c10::prim") @MemberGetter public static native @Const @ByRef Symbol NumToTensor(); - @Namespace("c10::prim") @MemberGetter public static native @Const @ByRef Symbol Uninitialized(); - @Namespace("c10::prim") @MemberGetter public static native @Const @ByRef Symbol VarConcat(); - @Namespace("c10::prim") @MemberGetter public static native @Const @ByRef Symbol VarStack(); - @Namespace("c10::prim") @MemberGetter public static native @Const @ByRef Symbol With(); - @Namespace("c10::prim") @MemberGetter public static native @Const @ByRef Symbol Enter(); - @Namespace("c10::prim") @MemberGetter public static native @Const @ByRef Symbol Exit(); - @Namespace("c10::prim") @MemberGetter public static native @Const @ByRef Symbol IfThenElse(); - @Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol Bool(); - @Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol Int(); - @Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol FloatImplicit(); - @Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol ComplexImplicit(); - @Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol IntImplicit(); - @Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol ScalarImplicit(); - @Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol Float(); - @Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol Complex(); - @Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol str(); - @Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol Delete(); - @Namespace("c10::prim") @MemberGetter public static native @Const @ByRef Symbol device(); - @Namespace("c10::prim") @MemberGetter public static native @Const @ByRef Symbol dtype(); - @Namespace("c10::prim") @MemberGetter public static native @Const @ByRef Symbol layout(); - @Namespace("c10::prim") @MemberGetter public static native @Const @ByRef Symbol id(); - @Namespace("c10::prim") @MemberGetter public static native @Const @ByRef Symbol requires_grad(); - @Namespace("c10::prim") @MemberGetter public static native @Const @ByRef Symbol MakeTestTensor(); /* test */ - @Namespace("c10::prim") @MemberGetter public static native @Const @ByRef Symbol AutogradAdd(); - @Namespace("c10::prim") @MemberGetter public static native @Const @ByRef Symbol GradOf(); - @Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol grad(); - @Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol backward(); - @Namespace("c10::prim") @MemberGetter public static native @Const @ByRef Symbol Guard(); - @Namespace("c10::prim") @MemberGetter public static native @Const @ByRef Symbol BailOut(); - @Namespace("c10::prim") @MemberGetter public static native @Const @ByRef Symbol TypeCheck(); - @Namespace("c10::prim") @MemberGetter public static native @Const @ByRef Symbol RequiresGradCheck(); - @Namespace("c10::prim") @MemberGetter public static native @Const @ByRef Symbol FallbackGraph(); - @Namespace("c10::prim") @MemberGetter public static native @Const @ByRef Symbol FusedConcat(); - @Namespace("c10::prim") @MemberGetter public static native @Const @ByRef Symbol ConstantChunk(); - @Namespace("c10::prim") @MemberGetter public static native @Const @ByRef Symbol MMTreeReduce(); - @Namespace("c10::prim") @MemberGetter public static native @Const @ByRef Symbol MMBatchSide(); - @Namespace("c10::prim") @MemberGetter public static native @Const @ByRef Symbol list(); - @Namespace("c10::prim") @MemberGetter public static native @Const @ByRef Symbol dict(); - @Namespace("c10::prim") @MemberGetter public static native @Const @ByRef Symbol min(); - @Namespace("c10::prim") @MemberGetter public static native @Const @ByRef Symbol max(); - @Namespace("c10::prim") @MemberGetter public static native @Const @ByRef Symbol abs(); - @Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol divmod(); - @Namespace("c10::prim") @MemberGetter public static native @Const @ByRef Symbol zip(); - @Namespace("c10::prim") @MemberGetter public static native @Const @ByRef Symbol enumerate(); - @Namespace("c10::prim") @MemberGetter public static native @Const @ByRef Symbol range(); - @Namespace("c10::prim") @MemberGetter public static native @Const @ByRef Symbol rangelist(); - @Namespace("c10::prim") @MemberGetter public static native @Const @ByRef Symbol isinstance(); - @Namespace("c10::prim") @MemberGetter public static native @Const @ByRef Symbol tolist(); - @Namespace("c10::prim") @MemberGetter public static native @Const @ByRef Symbol unchecked_cast(); - @Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol _grad_sum_to_size(); - @Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol _size_if_not_equal(); - @Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol _ncf_unsqueeze(); - @Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol warn(); - @Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol sorted(); - @Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol floordiv(); - @Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol __range_length(); - @Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol __derive_index(); - @Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol __round_to_zero_floordiv(); - @Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol is_scripting(); - @Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol _unwrap_optional(); - @Namespace("c10::prim") @MemberGetter public static native @Const @ByRef Symbol fork(); - @Namespace("c10::prim") @MemberGetter public static native @Const @ByRef Symbol awaitable(); - @Namespace("c10::prim") @MemberGetter public static native @Const @ByRef Symbol forkClosure(); - @Namespace("c10::prim") @MemberGetter public static native @Const @ByRef Symbol awaitableClosure(); - @Namespace("c10::prim") @MemberGetter public static native @Const @ByRef Symbol awaitable_nowait(); - @Namespace("c10::prim") @MemberGetter public static native @Const @ByRef Symbol awaitable_wait(); - @Namespace("c10::prim") @MemberGetter public static native @Const @ByRef Symbol RaiseException(); - @Namespace("c10::prim") @MemberGetter public static native @Const @ByRef Symbol Closure(); - @Namespace("c10::prim") @MemberGetter public static native @Const @ByRef Symbol CreateObject(); - @Namespace("c10::prim") @MemberGetter public static native @Const @ByRef Symbol SetAttr(); - @Namespace("c10::prim") @MemberGetter public static native @Const @ByRef Symbol GetAttr(); - @Namespace("c10::prim") @MemberGetter public static native @Const @ByRef Symbol HasAttr(); - @Namespace("c10::prim") @MemberGetter public static native @Const @ByRef Symbol profile(); - @Namespace("c10::prim") @MemberGetter public static native @Const @ByRef Symbol profile_ivalue(); - @Namespace("c10::prim") @MemberGetter public static native @Const @ByRef Symbol AddStatValue(); - @Namespace("c10::prim") @MemberGetter public static native @Const @ByRef Symbol TimePoint(); - @Namespace("c10::prim") @MemberGetter public static native @Const @ByRef Symbol CallFunction(); - @Namespace("c10::prim") @MemberGetter public static native @Const @ByRef Symbol CallMethod(); - @Namespace("c10::prim") @MemberGetter public static native @Const @ByRef Symbol LoopContinuation(); - @Namespace("c10::prim") @MemberGetter public static native @Const @ByRef Symbol annotate(); - @Namespace("c10::prim") @MemberGetter public static native @Const @ByRef Symbol TracedModuleForward(); - @Namespace("c10::prim") @MemberGetter public static native @Const @ByRef Symbol TracedFork(); - @Namespace("c10::prim") @MemberGetter public static native @Const @ByRef Symbol TracedAttr(); - @Namespace("c10::prim") @MemberGetter public static native @Const @ByRef Symbol rpc_async(); - @Namespace("c10::prim") @MemberGetter public static native @Const @ByRef Symbol rpc_sync(); - @Namespace("c10::prim") @MemberGetter public static native @Const @ByRef Symbol rpc_remote(); - @Namespace("c10::prim") @MemberGetter public static native @Const @ByRef Symbol is_cuda(); - @Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol append(); - @Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol as_tensor(); - @Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol adaptive_avg_pool2d_backward(); - @Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol dim(); - @Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol format(); - @Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol percentFormat(); - @Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol __not__(); - @Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol __is__(); - @Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol __isnot__(); - @Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol _ger(); - @Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol __getitem__(); - @Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol _set_item(); - @Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol manual_seed(); - @Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol hash(); - @Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol len(); - @Namespace("c10::aten") @MemberGetter public static native @Const @ByRef @Name("wait") Symbol _wait(); - @Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol save(); - @Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol keys(); - @Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol ord(); - @Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol chr(); - @Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol hex(); - @Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol oct(); - @Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol clear(); - @Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol setdefault(); - @Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol bin(); - @Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol pop(); - @Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol insert(); - @Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol tensor(); - @Namespace("c10::prim") @MemberGetter public static native @Const @ByRef Symbol unchecked_unwrap_optional(); - @Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol __contains__(); - @Namespace("c10::prim") @MemberGetter public static native @Const @ByRef Symbol BailoutTemplate(); - @Namespace("c10::cuda") @MemberGetter public static native @Const @ByRef Symbol _set_device(); - @Namespace("c10::cuda") @MemberGetter public static native @Const @ByRef Symbol set_stream(); - @Namespace("c10::cuda") @MemberGetter public static native @Const @ByRef Symbol _current_device(); - @Namespace("c10::cuda") @MemberGetter public static native @Const @ByRef Symbol synchronize(); - @Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol has_torch_function(); - @Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol is_autocast_enabled(); - @Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol is_autocast_cpu_enabled(); - @Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol __and__(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol __iand__(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol __ilshift__(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol __ior__(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol __irshift__(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol __ixor__(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol __lshift__(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol __or__(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol __rshift__(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol __xor__(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol _adaptive_avg_pool2d(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol _adaptive_avg_pool2d_backward(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol _adaptive_avg_pool3d(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol _adaptive_avg_pool3d_backward(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol _add_batch_dim(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol _add_relu(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol _add_relu_(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol _addmm_activation(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol _aminmax(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol _amp_foreach_non_finite_check_and_unscale(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol _amp_foreach_non_finite_check_and_unscale_(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol _amp_update_scale(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol _amp_update_scale_(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol _assert_async(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol _assert_tensor_metadata(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol _autocast_to_full_precision(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol _autocast_to_reduced_precision(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol _backward(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol _batch_norm_impl_index(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol _batch_norm_impl_index_backward(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol _cast_Byte(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol _cast_Char(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol _cast_Double(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol _cast_Float(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol _cast_Half(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol _cast_Int(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol _cast_Long(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol _cast_Short(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol _cdist_backward(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol _cdist_forward(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol _cholesky_solve_helper(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol _choose_qparams_per_tensor(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol _chunk_grad_outputs_efficient_attention(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol _coalesce(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol _coalesced(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol _coalesced_(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol _compute_linear_combination(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol _conj(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol _conj_copy(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol _conj_physical(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol _conv_depthwise2d(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol _convert_indices_from_coo_to_csr(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol _convert_indices_from_csr_to_coo(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol _convolution(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol _convolution_double_backward(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol _convolution_mode(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol _copy_from(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol _copy_from_and_resize(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol _ctc_loss(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol _ctc_loss_backward(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol _cudnn_ctc_loss(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol _cudnn_init_dropout_state(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol _cudnn_rnn(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol _cudnn_rnn_backward(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol _cudnn_rnn_flatten_weight(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol _cufft_clear_plan_cache(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol _cufft_get_plan_cache_max_size(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol _cufft_get_plan_cache_size(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol _cufft_set_plan_cache_max_size(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol _cummax_helper(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol _cummin_helper(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol _debug_has_internal_overlap(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol _dimI(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol _dimV(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol _dim_arange(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol _dirichlet_grad(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol _efficient_attention_backward(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol _efficient_attention_forward(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol _efficientzerotensor(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol _embedding_bag(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol _embedding_bag_backward(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol _embedding_bag_dense_backward(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol _embedding_bag_forward_only(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol _embedding_bag_per_sample_weights_backward(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol _embedding_bag_sparse_backward(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol _empty_affine_quantized(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol _empty_per_channel_affine_quantized(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol _euclidean_dist(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol _fake_quantize_learnable_per_channel_affine(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol _fake_quantize_learnable_per_channel_affine_backward(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol _fake_quantize_learnable_per_tensor_affine(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol _fake_quantize_learnable_per_tensor_affine_backward(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol _fake_quantize_per_tensor_affine_cachemask_tensor_qparams(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol _fft_c2c(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol _fft_c2r(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol _fft_r2c(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol _flash_attention_backward(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol _flash_attention_forward(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol _foobar(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol _foreach_abs(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol _foreach_abs_(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol _foreach_acos(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol _foreach_acos_(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol _foreach_add(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol _foreach_add_(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol _foreach_addcdiv(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol _foreach_addcdiv_(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol _foreach_addcmul(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol _foreach_addcmul_(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol _foreach_asin(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol _foreach_asin_(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol _foreach_atan(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol _foreach_atan_(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol _foreach_ceil(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol _foreach_ceil_(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol _foreach_clamp_max(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol _foreach_clamp_max_(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol _foreach_clamp_min(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol _foreach_clamp_min_(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol _foreach_cos(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol _foreach_cos_(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol _foreach_cosh(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol _foreach_cosh_(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol _foreach_div(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol _foreach_div_(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol _foreach_erf(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol _foreach_erf_(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol _foreach_erfc(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol _foreach_erfc_(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol _foreach_exp(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol _foreach_exp_(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol _foreach_expm1(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol _foreach_expm1_(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol _foreach_floor(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol _foreach_floor_(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol _foreach_frac(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol _foreach_frac_(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol _foreach_lerp(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol _foreach_lerp_(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol _foreach_lgamma(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol _foreach_lgamma_(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol _foreach_log(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol _foreach_log10(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol _foreach_log10_(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol _foreach_log1p(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol _foreach_log1p_(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol _foreach_log2(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol _foreach_log2_(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol _foreach_log_(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol _foreach_maximum(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol _foreach_maximum_(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol _foreach_minimum(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol _foreach_minimum_(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol _foreach_mul(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol _foreach_mul_(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol _foreach_neg(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol _foreach_neg_(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol _foreach_norm(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol _foreach_reciprocal(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol _foreach_reciprocal_(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol _foreach_round(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol _foreach_round_(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol _foreach_sigmoid(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol _foreach_sigmoid_(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol _foreach_sin(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol _foreach_sin_(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol _foreach_sinh(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol _foreach_sinh_(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol _foreach_sqrt(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol _foreach_sqrt_(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol _foreach_sub(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol _foreach_sub_(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol _foreach_tan(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol _foreach_tan_(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol _foreach_tanh(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol _foreach_tanh_(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol _foreach_trunc(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol _foreach_trunc_(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol _foreach_zero(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol _foreach_zero_(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol _fused_adam(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol _fused_adam_(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol _fused_adamw(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol _fused_adamw_(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol _fused_dropout(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol _fused_moving_avg_obs_fq_helper(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol _fused_moving_avg_obs_fq_helper_functional(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol _fused_sdp_choice(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol _fw_primal(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol _fw_primal_copy(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol _gather_sparse_backward(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol _grid_sampler_2d_cpu_fallback(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol _grid_sampler_2d_cpu_fallback_backward(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol _has_compatible_shallow_copy_type(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol _has_same_storage_numel(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol _histogramdd_bin_edges(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol _histogramdd_from_bin_cts(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol _histogramdd_from_bin_tensors(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol _index_put_impl(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol _index_put_impl_(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol _indices(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol _indices_copy(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol _is_all_true(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol _is_any_true(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol _is_zerotensor(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol _linalg_check_errors(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol _linalg_det(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol _linalg_eigh(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol _linalg_slogdet(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol _linalg_solve_ex(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol _linalg_svd(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol _local_scalar_dense(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol _log_softmax(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol _log_softmax_backward_data(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol _logcumsumexp(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol _lstm_mps(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol _lu_with_info(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol _make_dual(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol _make_dual_copy(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol _make_per_channel_quantized_tensor(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol _make_per_tensor_quantized_tensor(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol _masked_scale(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol _masked_softmax(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol _masked_softmax_backward(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol _mkldnn_reshape(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol _mkldnn_transpose(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol _mkldnn_transpose_(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol _mps_convolution(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol _mps_convolution_transpose(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol _native_batch_norm_legit(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol _native_batch_norm_legit_functional(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol _native_decoder_only_multi_head_attention(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol _native_multi_head_attention(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol _neg_view(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol _neg_view_copy(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol _nested_from_padded(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol _nested_from_padded_and_nested_example(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol _nested_select_backward(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol _nested_sum_backward(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol _nested_tensor_from_mask(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol _nested_tensor_from_mask_left_aligned(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol _nested_tensor_from_tensor_list(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol _nested_tensor_offsets(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol _nested_tensor_size(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol _nested_tensor_softmax_with_shape(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol _nested_tensor_strides(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol _nested_view_from_buffer(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol _nested_view_from_buffer_copy(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol _new_zeros_with_same_feature_meta(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol _nnpack_available(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol _nnpack_spatial_convolution(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol _nnz(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol _pack_padded_sequence(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol _pack_padded_sequence_backward(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol _pad_circular(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol _pad_enum(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol _pad_packed_sequence(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol _pdist_backward(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol _pdist_forward(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol _pin_memory(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol _prelu_kernel(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol _prelu_kernel_backward(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol _remove_batch_dim(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol _reshape_alias(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol _reshape_alias_copy(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol _reshape_copy(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol _reshape_from_tensor(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol _resize_output(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol _resize_output_(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol _rowwise_prune(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol _sample_dirichlet(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol _saturate_weight_to_fp16(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol _scaled_dot_product_attention(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol _scaled_dot_product_attention_math(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol _scaled_dot_product_efficient_attention(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol _scaled_dot_product_efficient_attention_backward(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol _scaled_dot_product_flash_attention(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol _scaled_dot_product_flash_attention_backward(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol _segment_reduce_backward(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol _shape_as_tensor(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol _slow_conv2d_backward(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol _slow_conv2d_forward(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol _sobol_engine_draw(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol _sobol_engine_ff(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol _sobol_engine_ff_(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol _sobol_engine_initialize_state(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol _sobol_engine_initialize_state_(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol _sobol_engine_scramble(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol _sobol_engine_scramble_(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol _softmax(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol _softmax_backward_data(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol _sparse_addmm(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol _sparse_broadcast_to(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol _sparse_broadcast_to_copy(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol _sparse_bsc_tensor_unsafe(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol _sparse_bsr_tensor_unsafe(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol _sparse_compressed_tensor_unsafe(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol _sparse_coo_tensor_unsafe(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol _sparse_coo_tensor_with_dims(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol _sparse_coo_tensor_with_dims_and_tensors(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol _sparse_csc_tensor_unsafe(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol _sparse_csr_prod(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol _sparse_csr_sum(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol _sparse_csr_tensor_unsafe(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol _sparse_log_softmax(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol _sparse_log_softmax_backward_data(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol _sparse_mm(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol _sparse_mm_reduce_impl(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol _sparse_mm_reduce_impl_backward(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol _sparse_softmax(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol _sparse_softmax_backward_data(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol _sparse_sparse_matmul(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol _sparse_sum(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol _sparse_sum_backward(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol _spdiags(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol _stack(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol _standard_gamma(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol _standard_gamma_grad(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol _test_ambiguous_defaults(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol _test_autograd_multiple_dispatch(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol _test_autograd_multiple_dispatch_view(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol _test_autograd_multiple_dispatch_view_copy(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol _test_check_tensor(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol _test_optional_filled_intlist(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol _test_optional_floatlist(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol _test_optional_intlist(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol _test_serialization_subcmul(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol _test_string_default(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol _test_warn_in_autograd(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol _thnn_differentiable_gru_cell_backward(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol _thnn_differentiable_lstm_cell_backward(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol _thnn_fused_gru_cell(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol _thnn_fused_gru_cell_backward(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol _thnn_fused_lstm_cell(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol _thnn_fused_lstm_cell_backward(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol _thnn_fused_lstm_cell_backward_impl(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol _to_copy(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol _to_cpu(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol _to_dense(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol _transform_bias_rescale_qkv(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol _transformer_decoder_only_layer_fwd(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol _transformer_encoder_layer_fwd(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol _trilinear(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol _triton_multi_head_attention(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol _triton_scaled_dot_attention(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol _unique(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol _unique2(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol _unpack_dual(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol _unsafe_view(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol _upsample_bicubic2d_aa(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol _upsample_bicubic2d_aa_backward(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol _upsample_bilinear2d_aa(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol _upsample_bilinear2d_aa_backward(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol _upsample_nearest_exact1d(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol _upsample_nearest_exact1d_backward(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol _upsample_nearest_exact2d(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol _upsample_nearest_exact2d_backward(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol _upsample_nearest_exact3d(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol _upsample_nearest_exact3d_backward(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol _use_cudnn_ctc_loss(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol _use_cudnn_rnn_flatten_weight(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol _validate_compressed_sparse_indices(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol _validate_sparse_bsc_tensor_args(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol _validate_sparse_bsr_tensor_args(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol _validate_sparse_compressed_tensor_args(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol _validate_sparse_coo_tensor_args(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol _validate_sparse_csc_tensor_args(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol _validate_sparse_csr_tensor_args(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol _values(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol _values_copy(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol _version(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol _weight_norm(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol _weight_norm_differentiable_backward(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol _weight_norm_interface(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol _weight_norm_interface_backward(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol abs_(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol absolute(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol absolute_(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol acos(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol acos_(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol acosh(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol acosh_(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol adaptive_avg_pool1d(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol adaptive_avg_pool2d(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol adaptive_avg_pool3d(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol adaptive_avg_pool3d_backward(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol adaptive_max_pool1d(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol adaptive_max_pool2d(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol adaptive_max_pool2d_backward(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol adaptive_max_pool3d(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol adaptive_max_pool3d_backward(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol add(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol add_(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol addbmm(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol addbmm_(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol addcdiv(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol addcdiv_(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol addcmul(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol addcmul_(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol addmm(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol addmm_(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol addmv(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol addmv_(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol addr(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol addr_(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol adjoint(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol affine_grid_generator(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol affine_grid_generator_backward(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol alias(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol alias_copy(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol align_as(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol align_tensors(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol align_to(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol all(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol allclose(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol alpha_dropout(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol alpha_dropout_(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol amax(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol amin(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol aminmax(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol angle(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol any(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol arange(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol arccos(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol arccos_(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol arccosh(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol arccosh_(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol arcsin(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol arcsin_(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol arcsinh(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol arcsinh_(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol arctan(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol arctan2(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol arctan2_(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol arctan_(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol arctanh(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol arctanh_(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol argmax(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol argmin(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol argsort(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol argwhere(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol as_strided(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol as_strided_(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol as_strided_copy(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol as_strided_scatter(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol asin(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol asin_(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol asinh(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol asinh_(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol atan(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol atan2(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol atan2_(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol atan_(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol atanh(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol atanh_(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol atleast_1d(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol atleast_2d(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol atleast_3d(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol avg_pool1d(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol avg_pool2d(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol avg_pool2d_backward(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol avg_pool3d(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol avg_pool3d_backward(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol baddbmm(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol baddbmm_(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol bartlett_window(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol batch_norm(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol batch_norm_backward_elemt(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol batch_norm_backward_reduce(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol batch_norm_elemt(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol batch_norm_gather_stats(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol batch_norm_gather_stats_with_counts(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol batch_norm_stats(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol batch_norm_update_stats(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol bernoulli(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol bernoulli_(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol bilinear(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol binary_cross_entropy(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol binary_cross_entropy_backward(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol binary_cross_entropy_with_logits(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol bincount(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol binomial(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol bitwise_and(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol bitwise_and_(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol bitwise_left_shift(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol bitwise_left_shift_(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol bitwise_not(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol bitwise_not_(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol bitwise_or(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol bitwise_or_(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol bitwise_right_shift(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol bitwise_right_shift_(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol bitwise_xor(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol bitwise_xor_(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol blackman_window(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol block_diag(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol bmm(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol broadcast_tensors(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol broadcast_to(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol bucketize(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol can_cast(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol cartesian_prod(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol cat(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol cauchy(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol cauchy_(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol ccol_indices(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol ccol_indices_copy(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol cdist(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol ceil(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol ceil_(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol celu(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol celu_(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol chain_matmul(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol chalf(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol channel_shuffle(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol cholesky(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol cholesky_inverse(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol cholesky_solve(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol choose_qparams_optimized(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol chunk(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol clamp(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol clamp_(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol clamp_max(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol clamp_max_(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol clamp_min(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol clamp_min_(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol clip(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol clip_(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef @Name("clone") Symbol _clone(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol coalesce(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol col2im(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol col_indices(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol col_indices_copy(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol column_stack(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol combinations(); - -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol concat(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol concatenate(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol conj(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol conj_physical(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol conj_physical_(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol constant_pad_nd(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol contiguous(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol conv1d(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol conv2d(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol conv3d(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol conv_depthwise3d(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol conv_tbc(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol conv_tbc_backward(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol conv_transpose1d(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol conv_transpose2d(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol conv_transpose3d(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol convolution(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol convolution_backward(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol convolution_backward_overrideable(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol convolution_overrideable(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol copy(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol copy_(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol copy_sparse_to_sparse(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol copy_sparse_to_sparse_(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol copysign(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol copysign_(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol corrcoef(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol cos(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol cos_(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol cosh(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol cosh_(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol cosine_embedding_loss(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol cosine_similarity(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol count_nonzero(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol cov(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol cross(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol cross_entropy_loss(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol crow_indices(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol crow_indices_copy(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol ctc_loss(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol cudnn_affine_grid_generator(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol cudnn_affine_grid_generator_backward(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol cudnn_batch_norm(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol cudnn_batch_norm_backward(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol cudnn_convolution(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol cudnn_convolution_add_relu(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol cudnn_convolution_relu(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol cudnn_convolution_transpose(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol cudnn_grid_sampler(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol cudnn_grid_sampler_backward(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol cudnn_is_acceptable(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol cummax(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol cummaxmin_backward(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol cummin(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol cumprod(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol cumprod_(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol cumprod_backward(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol cumsum(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol cumsum_(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol cumulative_trapezoid(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol data(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol deg2rad(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol deg2rad_(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol dense_dim(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol dequantize(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol det(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol detach(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol detach_(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol detach_copy(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol diag(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol diag_embed(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol diagflat(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol diagonal(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol diagonal_backward(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol diagonal_copy(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol diagonal_scatter(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol diff(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol digamma(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol digamma_(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol dist(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol div(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol div_(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol divide(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol divide_(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol dot(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol dropout(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol dropout_(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol dsplit(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol dstack(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol einsum(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol elu(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol elu_(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol elu_backward(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol embedding(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol embedding_backward(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol embedding_bag(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol embedding_dense_backward(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol embedding_renorm(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol embedding_renorm_(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol embedding_sparse_backward(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol empty(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol empty_like(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol empty_quantized(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol empty_strided(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol eq(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol eq_(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol equal(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol erf(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol erf_(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol erfc(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol erfc_(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol erfinv(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol erfinv_(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol exp(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol exp2(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol exp2_(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol exp_(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol expand(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol expand_as(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol expm1(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol expm1_(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol exponential(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol exponential_(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol eye(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol fake_quantize_per_channel_affine(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol fake_quantize_per_channel_affine_cachemask(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol fake_quantize_per_channel_affine_cachemask_backward(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol fake_quantize_per_tensor_affine(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol fake_quantize_per_tensor_affine_cachemask(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol fake_quantize_per_tensor_affine_cachemask_backward(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol fbgemm_linear_fp16_weight(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol fbgemm_linear_fp16_weight_fp32_activation(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol fbgemm_linear_int8_weight(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol fbgemm_linear_int8_weight_fp32_activation(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol fbgemm_linear_quantize_weight(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol fbgemm_pack_gemm_matrix_fp16(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol fbgemm_pack_quantized_matrix(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol feature_alpha_dropout(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol feature_alpha_dropout_(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol feature_dropout(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol feature_dropout_(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol fft_fft(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol fft_fft2(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol fft_fftfreq(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol fft_fftn(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol fft_fftshift(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol fft_hfft(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol fft_hfft2(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol fft_hfftn(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol fft_ifft(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol fft_ifft2(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol fft_ifftn(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol fft_ifftshift(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol fft_ihfft(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol fft_ihfft2(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol fft_ihfftn(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol fft_irfft(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol fft_irfft2(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol fft_irfftn(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol fft_rfft(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol fft_rfft2(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol fft_rfftfreq(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol fft_rfftn(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef @Name("fill") Symbol _fill(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol fill_(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol fill_diagonal(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol fill_diagonal_(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol fix(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol fix_(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol flatten(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol flatten_dense_tensors(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol flip(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol fliplr(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol flipud(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol float_power(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol float_power_(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol floor(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol floor_(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol floor_divide(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol floor_divide_(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol fmax(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol fmin(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol fmod(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol fmod_(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol frac(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol frac_(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol fractional_max_pool2d(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol fractional_max_pool2d_backward(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol fractional_max_pool3d(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol fractional_max_pool3d_backward(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol frexp(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol frobenius_norm(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol from_file(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol full(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol full_like(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol fused_moving_avg_obs_fake_quant(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol gather(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol gather_backward(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol gcd(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol gcd_(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol ge(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol ge_(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol gelu(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol gelu_(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol gelu_backward(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol geometric(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol geometric_(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol geqrf(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol ger(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol glu(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol glu_backward(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol glu_backward_jvp(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol glu_jvp(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol gradient(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol greater(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol greater_(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol greater_equal(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol greater_equal_(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol grid_sampler(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol grid_sampler_2d(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol grid_sampler_2d_backward(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol grid_sampler_3d(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol grid_sampler_3d_backward(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol group_norm(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol gru(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol gru_cell(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol gt(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol gt_(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol hamming_window(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol hann_window(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol hardshrink(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol hardshrink_backward(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol hardsigmoid(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol hardsigmoid_(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol hardsigmoid_backward(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol hardswish(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol hardswish_(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol hardswish_backward(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol hardtanh(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol hardtanh_(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol hardtanh_backward(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol heaviside(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol heaviside_(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol hinge_embedding_loss(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol histc(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol histogram(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol histogramdd(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol hsplit(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol hspmm(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol hstack(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol huber_loss(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol huber_loss_backward(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol hypot(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol hypot_(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol i0(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol i0_(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol igamma(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol igamma_(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol igammac(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol igammac_(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol im2col(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol imag(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol index(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol index_add(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol index_add_(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol index_copy(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol index_copy_(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol index_fill(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol index_fill_(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol index_put(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol index_put_(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol index_reduce(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol index_reduce_(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol index_select(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol index_select_backward(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol indices(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol indices_copy(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol infinitely_differentiable_gelu_backward(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol inner(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol instance_norm(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol int_repr(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol inverse(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol is_coalesced(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol is_complex(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol is_conj(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol is_distributed(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol is_floating_point(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol is_inference(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol is_leaf(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol is_neg(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol is_nonzero(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol is_pinned(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol is_same_size(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol is_set_to(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol is_signed(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol is_vulkan_available(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol isclose(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol isfinite(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol isin(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol isinf(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol isnan(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol isneginf(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol isposinf(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol isreal(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol istft(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol item(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol kaiser_window(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol kl_div(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol kron(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol kthvalue(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol l1_loss(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol layer_norm(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol lcm(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol lcm_(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol ldexp(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol ldexp_(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol le(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol le_(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol leaky_relu(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol leaky_relu_(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol leaky_relu_backward(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol lerp(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol lerp_(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol less(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol less_(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol less_equal(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol less_equal_(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol lgamma(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol lgamma_(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol lift(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol lift_fresh(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol lift_fresh_copy(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol linalg_cholesky(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol linalg_cholesky_ex(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol linalg_cond(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol linalg_cross(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol linalg_det(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol linalg_diagonal(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol linalg_eig(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol linalg_eigh(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol linalg_eigvals(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol linalg_eigvalsh(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol linalg_householder_product(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol linalg_inv(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol linalg_inv_ex(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol linalg_ldl_factor(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol linalg_ldl_factor_ex(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol linalg_ldl_solve(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol linalg_lstsq(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol linalg_lu(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol linalg_lu_factor(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol linalg_lu_factor_ex(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol linalg_lu_solve(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol linalg_matmul(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol linalg_matrix_exp(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol linalg_matrix_norm(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol linalg_matrix_power(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol linalg_matrix_rank(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol linalg_multi_dot(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol linalg_norm(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol linalg_pinv(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol linalg_qr(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol linalg_slogdet(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol linalg_solve(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol linalg_solve_ex(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol linalg_solve_triangular(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol linalg_svd(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol linalg_svdvals(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol linalg_tensorinv(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol linalg_tensorsolve(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol linalg_vander(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol linalg_vecdot(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol linalg_vector_norm(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol linear(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol linear_backward(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol linspace(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol log(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol log10(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol log10_(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol log1p(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol log1p_(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol log2(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol log2_(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol log_(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol log_normal(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol log_normal_(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol log_sigmoid(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol log_sigmoid_backward(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol log_sigmoid_forward(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol log_softmax(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol logaddexp(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol logaddexp2(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol logcumsumexp(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol logdet(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol logical_and(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol logical_and_(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol logical_not(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol logical_not_(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol logical_or(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol logical_or_(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol logical_xor(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol logical_xor_(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol logit(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol logit_(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol logit_backward(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol logspace(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol logsumexp(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol lshift(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol lstm(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol lstm_cell(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol lstm_mps_backward(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol lt(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol lt_(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol lu_solve(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol lu_unpack(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol mH(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol mT(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol margin_ranking_loss(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol masked_fill(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol masked_fill_(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol masked_scatter(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol masked_scatter_(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol masked_select(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol masked_select_backward(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol matmul(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol matmul_backward(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol matrix_H(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol matrix_exp(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol matrix_exp_backward(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol matrix_power(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol max_pool1d(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol max_pool1d_with_indices(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol max_pool2d(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol max_pool2d_backward(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol max_pool2d_with_indices(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol max_pool2d_with_indices_backward(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol max_pool3d(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol max_pool3d_with_indices(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol max_pool3d_with_indices_backward(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol max_unpool2d(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol max_unpool3d(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol maximum(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol mean(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol median(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol meshgrid(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol minimum(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol miopen_batch_norm(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol miopen_batch_norm_backward(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol miopen_convolution(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol miopen_convolution_add_relu(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol miopen_convolution_relu(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol miopen_convolution_transpose(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol miopen_depthwise_convolution(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol miopen_rnn(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol miopen_rnn_backward(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol mish(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol mish_(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol mish_backward(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol mkldnn_adaptive_avg_pool2d(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol mkldnn_adaptive_avg_pool2d_backward(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol mkldnn_convolution(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol mkldnn_linear(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol mkldnn_linear_backward(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol mkldnn_linear_backward_input(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol mkldnn_linear_backward_weights(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol mkldnn_max_pool2d(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol mkldnn_max_pool2d_backward(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol mkldnn_max_pool3d(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol mkldnn_max_pool3d_backward(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol mkldnn_reorder_conv2d_weight(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol mkldnn_reorder_conv3d_weight(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol mkldnn_rnn_layer(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol mkldnn_rnn_layer_backward(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol mm(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol mode(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol moveaxis(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol movedim(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol mps_convolution_backward(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol mps_convolution_transpose_backward(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol mse_loss(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol mse_loss_backward(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol msort(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol mul(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol mul_(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol multi_margin_loss(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol multi_margin_loss_backward(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol multilabel_margin_loss(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol multilabel_margin_loss_backward(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol multilabel_margin_loss_forward(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol multinomial(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol multiply(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol multiply_(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol mv(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol mvlgamma(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol mvlgamma_(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol nan_to_num(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol nan_to_num_(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol nanmean(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol nanmedian(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol nanquantile(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol nansum(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol narrow(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol narrow_copy(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol native_batch_norm(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol native_batch_norm_backward(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol native_channel_shuffle(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol native_dropout(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol native_dropout_backward(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol native_group_norm(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol native_group_norm_backward(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol native_layer_norm(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol native_layer_norm_backward(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol native_norm(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol ne(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol ne_(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol neg(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol neg_(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol negative(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol negative_(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol nested_to_padded_tensor(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol new_empty(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol new_empty_strided(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol new_full(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol new_ones(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol new_zeros(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol nextafter(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol nextafter_(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol nll_loss(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol nll_loss2d(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol nll_loss2d_backward(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol nll_loss2d_forward(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol nll_loss_backward(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol nll_loss_forward(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol nll_loss_nd(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol nonzero(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol nonzero_numpy(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol norm(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol norm_except_dim(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol normal(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol normal_(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol normal_functional(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol not_equal(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol not_equal_(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol nuclear_norm(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol numpy_T(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol one_hot(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol ones(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol ones_like(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol orgqr(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol ormqr(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol outer(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol output_nr(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol pad(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol pad_sequence(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol pairwise_distance(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol pdist(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol permute(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol pin_memory(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol pinverse(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol pixel_shuffle(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol pixel_unshuffle(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol poisson(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol poisson_nll_loss(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol polar(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol polygamma(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol polygamma_(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol positive(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol pow(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol pow_(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol prelu(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol prod(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol promote_types(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol put(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol put_(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol q_per_channel_axis(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol q_per_channel_scales(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol q_per_channel_zero_points(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol q_scale(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol q_zero_point(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol qr(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol qscheme(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol quantile(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol quantize_per_channel(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol quantize_per_tensor(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol quantize_per_tensor_dynamic(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol quantized_batch_norm(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol quantized_gru_cell(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol quantized_lstm_cell(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol quantized_max_pool1d(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol quantized_max_pool2d(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol quantized_rnn_relu_cell(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol quantized_rnn_tanh_cell(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol rad2deg(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol rad2deg_(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol rand(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol rand_like(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol randint(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol randint_like(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol randn(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol randn_like(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol random(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol random_(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol randperm(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol ravel(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol real(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol reciprocal(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol reciprocal_(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol record_stream(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol refine_names(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol reflection_pad1d(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol reflection_pad1d_backward(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol reflection_pad2d(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol reflection_pad2d_backward(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol reflection_pad3d(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol reflection_pad3d_backward(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol relu(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol relu6(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol relu6_(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol relu_(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol remainder(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol remainder_(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol rename(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol rename_(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol renorm(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol renorm_(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol repeat(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol repeat_interleave(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol replication_pad1d(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol replication_pad1d_backward(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol replication_pad2d(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol replication_pad2d_backward(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol replication_pad3d(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol replication_pad3d_backward(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol requires_grad_(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol reshape(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol reshape_as(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol resize(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol resize_(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol resize_as(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol resize_as_(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol resize_as_sparse(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol resize_as_sparse_(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol resolve_conj(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol resolve_neg(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol result_type(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol retain_grad(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol retains_grad(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol rnn_relu(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol rnn_relu_cell(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol rnn_tanh(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol rnn_tanh_cell(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol roll(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol rot90(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol round(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol round_(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol row_indices(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol row_indices_copy(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol row_stack(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol rrelu(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol rrelu_(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol rrelu_with_noise(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol rrelu_with_noise_(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol rrelu_with_noise_backward(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol rshift(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol rsqrt(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol rsqrt_(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol rsub(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol scalar_tensor(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol scaled_dot_product_attention(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol scatter(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol scatter_(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol scatter_add(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol scatter_add_(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol scatter_reduce(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol scatter_reduce_(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol searchsorted(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol segment_reduce(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol select(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol select_backward(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol select_copy(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol select_scatter(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol selu(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol selu_(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol set(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol set_(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol set_data(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol sgn(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol sgn_(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol sigmoid(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol sigmoid_(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol sigmoid_backward(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol sign(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol sign_(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol signbit(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol silu(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol silu_(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol silu_backward(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol sin(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol sin_(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol sinc(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol sinc_(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol sinh(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol sinh_(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol size(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol slice(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol slice_backward(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol slice_copy(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol slice_scatter(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol slogdet(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol slow_conv3d(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol slow_conv3d_forward(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol slow_conv_dilated2d(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol slow_conv_dilated3d(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol slow_conv_transpose2d(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol slow_conv_transpose3d(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol smm(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol smooth_l1_loss(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol smooth_l1_loss_backward(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol soft_margin_loss(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol soft_margin_loss_backward(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol softmax(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol softplus(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol softplus_backward(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol softshrink(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol softshrink_backward(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol sort(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol sparse_bsc_tensor(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol sparse_bsr_tensor(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol sparse_compressed_tensor(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol sparse_coo_tensor(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol sparse_csc_tensor(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol sparse_csr_tensor(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol sparse_dim(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol sparse_mask(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol sparse_resize(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol sparse_resize_(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol sparse_resize_and_clear(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol sparse_resize_and_clear_(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol sparse_sampled_addmm(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol special_airy_ai(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol special_bessel_j0(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol special_bessel_j1(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol special_bessel_y0(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol special_bessel_y1(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol special_chebyshev_polynomial_t(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol special_chebyshev_polynomial_u(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol special_chebyshev_polynomial_v(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol special_chebyshev_polynomial_w(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol special_digamma(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol special_entr(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol special_erf(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol special_erfc(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol special_erfcx(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol special_erfinv(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol special_exp2(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol special_expit(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol special_expm1(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol special_gammainc(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol special_gammaincc(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol special_gammaln(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol special_hermite_polynomial_h(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol special_hermite_polynomial_he(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol special_i0(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol special_i0e(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol special_i1(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol special_i1e(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol special_laguerre_polynomial_l(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol special_legendre_polynomial_p(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol special_log1p(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol special_log_ndtr(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol special_log_softmax(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol special_logit(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol special_logsumexp(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol special_modified_bessel_i0(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol special_modified_bessel_i1(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol special_modified_bessel_k0(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol special_modified_bessel_k1(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol special_multigammaln(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol special_ndtr(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol special_ndtri(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol special_polygamma(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol special_psi(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol special_round(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol special_scaled_modified_bessel_k0(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol special_scaled_modified_bessel_k1(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol special_shifted_chebyshev_polynomial_t(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol special_shifted_chebyshev_polynomial_u(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol special_shifted_chebyshev_polynomial_v(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol special_shifted_chebyshev_polynomial_w(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol special_sinc(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol special_softmax(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol special_spherical_bessel_j0(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol special_xlog1py(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol special_xlogy(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol special_zeta(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol split(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol split_copy(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol split_with_sizes(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol split_with_sizes_copy(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol sqrt(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol sqrt_(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol square(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol square_(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol squeeze(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol squeeze_(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol sspaddmm(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol stack(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol std(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol std_mean(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol stft(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol stride(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol sub(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol sub_(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol subtract(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol subtract_(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol sum(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol sum_to_size(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol svd(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol swapaxes(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol swapaxes_(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol swapdims(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol swapdims_(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol t(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol t_(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol take(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol take_along_dim(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol tan(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol tan_(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol tanh(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol tanh_(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol tanh_backward(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol tensor_split(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol tensordot(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol thnn_conv2d(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol threshold(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol threshold_(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol threshold_backward(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol tile(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol to(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol to_dense(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol to_dense_backward(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol to_mkldnn(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol to_mkldnn_backward(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol to_padded_tensor(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol to_sparse(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol to_sparse_bsc(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol to_sparse_bsr(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol to_sparse_csc(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol to_sparse_csr(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol topk(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol trace(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol trace_backward(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol transpose(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol transpose_(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol trapezoid(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol trapz(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol triangular_solve(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol tril(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol tril_(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol tril_indices(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol triplet_margin_loss(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol triu(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol triu_(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol triu_indices(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol true_divide(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol true_divide_(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol trunc(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol trunc_(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol type_as(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol unbind(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol unbind_copy(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol unflatten(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol unflatten_dense_tensors(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol unfold(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol unfold_backward(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol unfold_copy(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol uniform(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol uniform_(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol unique_consecutive(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol unique_dim(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol unique_dim_consecutive(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol unsafe_chunk(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol unsafe_split(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol unsafe_split_with_sizes(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol unsqueeze(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol unsqueeze_(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol upsample_bicubic2d(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol upsample_bicubic2d_backward(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol upsample_bilinear2d(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol upsample_bilinear2d_backward(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol upsample_linear1d(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol upsample_linear1d_backward(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol upsample_nearest1d(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol upsample_nearest1d_backward(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol upsample_nearest2d(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol upsample_nearest2d_backward(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol upsample_nearest3d(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol upsample_nearest3d_backward(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol upsample_trilinear3d(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol upsample_trilinear3d_backward(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol value_selecting_reduction_backward(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol values(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol values_copy(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol vander(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol var(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol var_mean(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol vdot(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol view(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol view_as(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol view_as_complex(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol view_as_complex_copy(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol view_as_real(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol view_as_real_copy(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol vsplit(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol vstack(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol where(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol xlogy(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol xlogy_(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef @Name("zero") Symbol _zero(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol zero_(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol zeros(); -@Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol zeros_like(); - @Namespace("c10::onnx") @MemberGetter public static native @Const @ByRef Symbol Add(); - @Namespace("c10::onnx") @MemberGetter public static native @Const @ByRef Symbol Concat(); - @Namespace("c10::onnx") @MemberGetter public static native @Const @ByRef Symbol ConstantFill(); - @Namespace("c10::onnx") @MemberGetter public static native @Const @ByRef Symbol Div(); - @Namespace("c10::onnx") @MemberGetter public static native @Const @ByRef Symbol GRU(); - @Namespace("c10::onnx") @MemberGetter public static native @Const @ByRef Symbol Gather(); - @Namespace("c10::onnx") @MemberGetter public static native @Const @ByRef Symbol Gemm(); - @Namespace("c10::onnx") @MemberGetter public static native @Const @ByRef Symbol LSTM(); - @Namespace("c10::onnx") @MemberGetter public static native @Const @ByRef Symbol MatMul(); - @Namespace("c10::onnx") @MemberGetter public static native @Const @ByRef Symbol Min(); - @Namespace("c10::onnx") @MemberGetter public static native @Const @ByRef Symbol Max(); - @Namespace("c10::onnx") @MemberGetter public static native @Const @ByRef Symbol Mul(); - @Namespace("c10::onnx") @MemberGetter public static native @Const @ByRef Symbol Pow(); - @Namespace("c10::onnx") @MemberGetter public static native @Const @ByRef Symbol RNN(); - @Namespace("c10::onnx") @MemberGetter public static native @Const @ByRef Symbol Shape(); - @Namespace("c10::onnx") @MemberGetter public static native @Const @ByRef Symbol Size(); - @Namespace("c10::onnx") @MemberGetter public static native @Const @ByRef Symbol Slice(); - @Namespace("c10::onnx") @MemberGetter public static native @Const @ByRef Symbol Softmax(); - @Namespace("c10::onnx") @MemberGetter public static native @Const @ByRef Symbol Squeeze(); - @Namespace("c10::onnx") @MemberGetter public static native @Const @ByRef Symbol Sub(); - @Namespace("c10::onnx") @MemberGetter public static native @Const @ByRef Symbol Transpose(); - @Namespace("c10::onnx") @MemberGetter public static native @Const @ByRef Symbol Unsqueeze(); - @Namespace("c10::onnx") @MemberGetter public static native @Const @ByRef Symbol Reshape(); - @Namespace("c10::onnx") @MemberGetter public static native @Const @ByRef Symbol Equal(); - @Namespace("c10::onnx") @MemberGetter public static native @Const @ByRef Symbol Greater(); - @Namespace("c10::onnx") @MemberGetter public static native @Const @ByRef Symbol GreaterOrEqual(); - @Namespace("c10::onnx") @MemberGetter public static native @Const @ByRef Symbol Less(); - @Namespace("c10::onnx") @MemberGetter public static native @Const @ByRef Symbol LessOrEqual(); - @Namespace("c10::onnx") @MemberGetter public static native @Const @ByRef Symbol Not(); - @Namespace("c10::aten") @MemberGetter public static native @Const @ByRef Symbol ATen(); - @Namespace("c10::onnx") @MemberGetter public static native @Const @ByRef Symbol Split(); - @Namespace("c10::onnx") @MemberGetter public static native @Const @ByRef Symbol ConstantOfShape(); - @Namespace("c10::onnx") @MemberGetter public static native @Const @ByRef Symbol Cast(); - @Namespace("c10::onnx") @MemberGetter public static native @Const @ByRef Symbol Mod(); - @Namespace("c10::onnx") @MemberGetter public static native @Const @ByRef Symbol Sqrt(); - @Namespace("c10::onnx") @MemberGetter public static native @Const @ByRef Symbol SplitToSequence(); - @Namespace("c10::onnx") @MemberGetter public static native @Const @ByRef Symbol SequenceAt(); - @Namespace("c10::onnx") @MemberGetter public static native @Const @ByRef Symbol SequenceConstruct(); - @Namespace("c10::onnx") @MemberGetter public static native @Const @ByRef Symbol SequenceEmpty(); - @Namespace("c10::onnx") @MemberGetter public static native @Const @ByRef Symbol SequenceInsert(); - @Namespace("c10::onnx") @MemberGetter public static native @Const @ByRef Symbol SequenceErase(); - @Namespace("c10::onnx") @MemberGetter public static native @Const @ByRef Symbol ConcatFromSequence(); - @Namespace("c10::onnx") @MemberGetter public static native @Const @ByRef Symbol Identity(); - @Namespace("c10::onnx") @MemberGetter public static native @Const @ByRef Symbol SoftmaxCrossEntropyLoss(); - @Namespace("c10::onnx") @MemberGetter public static native @Const @ByRef Symbol NegativeLogLikelihoodLoss(); - @Namespace("c10::onnx") @MemberGetter public static native @Const @ByRef Symbol LogSoftmax(); - @Namespace("c10::onnx") @MemberGetter public static native @Const @ByRef Symbol ReduceL1(); - @Namespace("c10::onnx") @MemberGetter public static native @Const @ByRef Symbol ReduceL2(); - @Namespace("c10::onnx") @MemberGetter public static native @Const @ByRef Symbol Conv(); - @Namespace("c10::onnx") @MemberGetter public static native @Const @ByRef Symbol BatchNormalization(); - @Namespace("c10::onnx") @MemberGetter public static native @Const @ByRef Symbol ReduceMean(); - @Namespace("c10::onnx") @MemberGetter public static native @Const @ByRef Symbol ReduceProd(); - @Namespace("c10::onnx") @MemberGetter public static native @Const @ByRef Symbol Relu(); - @Namespace("c10::onnx") @MemberGetter public static native @Const @ByRef Symbol Neg(); - @Namespace("c10::onnx") @MemberGetter public static native @Const @ByRef Symbol NonZero(); - @Namespace("c10::onnx") @MemberGetter public static native @Const @ByRef Symbol Range(); - @Namespace("c10::onnx") @MemberGetter public static native @Const @ByRef Symbol Tile(); - @Namespace("c10::onnx") @MemberGetter public static native @Const @ByRef Symbol Where(); - @Namespace("c10::onnx") @MemberGetter public static native @Const @ByRef Symbol Optional(); - @Namespace("c10::onnx") @MemberGetter public static native @Const @ByRef Symbol OptionalGetElement(); - @Namespace("c10::onnx") @MemberGetter public static native @Const @ByRef Symbol OptionalHasElement(); - @Namespace("c10::attr") @MemberGetter public static native @Const @ByRef Symbol A(); -@Namespace("c10::attr") @MemberGetter public static native @Const @ByRef Symbol B(); -@Namespace("c10::attr") @MemberGetter public static native @Const @ByRef Symbol C(); -@Namespace("c10::attr") @MemberGetter public static native @Const @ByRef Symbol H(); -@Namespace("c10::attr") @MemberGetter public static native @Const @ByRef Symbol HxW(); -@Namespace("c10::attr") @MemberGetter public static native @Const @ByRef Symbol K(); -@Namespace("c10::attr") @MemberGetter public static native @Const @ByRef Symbol L(); -@Namespace("c10::attr") @MemberGetter public static native @Const @ByRef Symbol LD(); -@Namespace("c10::attr") @MemberGetter public static native @Const @ByRef Symbol LU(); -@Namespace("c10::attr") @MemberGetter public static native @Const @ByRef Symbol LU_data(); -@Namespace("c10::attr") @MemberGetter public static native @Const @ByRef Symbol LU_pivots(); -@Namespace("c10::attr") @MemberGetter public static native @Const @ByRef Symbol M(); -@Namespace("c10::attr") @MemberGetter public static native @Const @ByRef Symbol N(); -@Namespace("c10::attr") @MemberGetter public static native @Const @ByRef Symbol P(); -@Namespace("c10::attr") @MemberGetter public static native @Const @ByRef Symbol Q(); -@Namespace("c10::attr") @MemberGetter public static native @Const @ByRef Symbol R(); -@Namespace("c10::attr") @MemberGetter public static native @Const @ByRef Symbol S(); -@Namespace("c10::attr") @MemberGetter public static native @Const @ByRef Symbol U(); -@Namespace("c10::attr") @MemberGetter public static native @Const @ByRef Symbol UPLO(); -@Namespace("c10::attr") @MemberGetter public static native @Const @ByRef Symbol V(); -@Namespace("c10::attr") @MemberGetter public static native @Const @ByRef Symbol Vh(); -@Namespace("c10::attr") @MemberGetter public static native @Const @ByRef Symbol W(); -@Namespace("c10::attr") @MemberGetter public static native @Const @ByRef Symbol X(); -@Namespace("c10::attr") @MemberGetter public static native @Const @ByRef Symbol a(); -@Namespace("c10::attr") @MemberGetter public static native @Const @ByRef Symbol accumulate(); -@Namespace("c10::attr") @MemberGetter public static native @Const @ByRef Symbol addends(); -@Namespace("c10::attr") @MemberGetter public static native @Const @ByRef Symbol align_corners(); -@Namespace("c10::attr") @MemberGetter public static native @Const @ByRef Symbol allow_tf32(); -@Namespace("c10::attr") @MemberGetter public static native @Const @ByRef Symbol alpha(); -@Namespace("c10::attr") @MemberGetter public static native @Const @ByRef Symbol amsgrad(); -@Namespace("c10::attr") @MemberGetter public static native @Const @ByRef Symbol anchor(); -@Namespace("c10::attr") @MemberGetter public static native @Const @ByRef Symbol api_name(); -@Namespace("c10::attr") @MemberGetter public static native @Const @ByRef Symbol approximate(); -@Namespace("c10::attr") @MemberGetter public static native @Const @ByRef Symbol arg1(); -@Namespace("c10::attr") @MemberGetter public static native @Const @ByRef Symbol arg2(); -@Namespace("c10::attr") @MemberGetter public static native @Const @ByRef Symbol arg3(); -@Namespace("c10::attr") @MemberGetter public static native @Const @ByRef Symbol arg_out(); -@Namespace("c10::attr") @MemberGetter public static native @Const @ByRef Symbol assume_unique(); -@Namespace("c10::attr") @MemberGetter public static native @Const @ByRef Symbol atol(); -@Namespace("c10::attr") @MemberGetter public static native @Const @ByRef Symbol attn_mask(); -@Namespace("c10::attr") @MemberGetter public static native @Const @ByRef Symbol average_attn_weights(); -@Namespace("c10::attr") @MemberGetter public static native @Const @ByRef Symbol averaging_const(); -@Namespace("c10::attr") @MemberGetter public static native @Const @ByRef Symbol aweights(); -@Namespace("c10::attr") @MemberGetter public static native @Const @ByRef Symbol axis(); -@Namespace("c10::attr") @MemberGetter public static native @Const @ByRef Symbol axis0(); -@Namespace("c10::attr") @MemberGetter public static native @Const @ByRef Symbol axis1(); -@Namespace("c10::attr") @MemberGetter public static native @Const @ByRef Symbol b(); -@Namespace("c10::attr") @MemberGetter public static native @Const @ByRef Symbol b_hh(); -@Namespace("c10::attr") @MemberGetter public static native @Const @ByRef Symbol b_ih(); -@Namespace("c10::attr") @MemberGetter public static native @Const @ByRef Symbol bag_size(); -@Namespace("c10::attr") @MemberGetter public static native @Const @ByRef Symbol base(); -@Namespace("c10::attr") @MemberGetter public static native @Const @ByRef Symbol batch1(); -@Namespace("c10::attr") @MemberGetter public static native @Const @ByRef Symbol batch2(); -@Namespace("c10::attr") @MemberGetter public static native @Const @ByRef Symbol batch_dim(); -@Namespace("c10::attr") @MemberGetter public static native @Const @ByRef Symbol batch_first(); -@Namespace("c10::attr") @MemberGetter public static native @Const @ByRef Symbol batch_size(); -@Namespace("c10::attr") @MemberGetter public static native @Const @ByRef Symbol batch_sizes(); -@Namespace("c10::attr") @MemberGetter public static native @Const @ByRef Symbol benchmark(); -@Namespace("c10::attr") @MemberGetter public static native @Const @ByRef Symbol beta(); -@Namespace("c10::attr") @MemberGetter public static native @Const @ByRef Symbol beta1(); -@Namespace("c10::attr") @MemberGetter public static native @Const @ByRef Symbol beta2(); -@Namespace("c10::attr") @MemberGetter public static native @Const @ByRef Symbol bias(); -@Namespace("c10::attr") @MemberGetter public static native @Const @ByRef Symbol bias_defined(); -@Namespace("c10::attr") @MemberGetter public static native @Const @ByRef Symbol bias_g(); -@Namespace("c10::attr") @MemberGetter public static native @Const @ByRef Symbol bias_sizes(); -@Namespace("c10::attr") @MemberGetter public static native @Const @ByRef Symbol bidirectional(); -@Namespace("c10::attr") @MemberGetter public static native @Const @ByRef Symbol bin_edges(); -@Namespace("c10::attr") @MemberGetter public static native @Const @ByRef Symbol bins(); -@Namespace("c10::attr") @MemberGetter public static native @Const @ByRef Symbol bit_width(); -@Namespace("c10::attr") @MemberGetter public static native @Const @ByRef Symbol blank(); -@Namespace("c10::attr") @MemberGetter public static native @Const @ByRef Symbol blocksize(); -@Namespace("c10::attr") @MemberGetter public static native @Const @ByRef Symbol boundaries(); -@Namespace("c10::attr") @MemberGetter public static native @Const @ByRef Symbol buffer(); -@Namespace("c10::attr") @MemberGetter public static native @Const @ByRef Symbol causal(); -@Namespace("c10::attr") @MemberGetter public static native @Const @ByRef Symbol cdim(); -@Namespace("c10::attr") @MemberGetter public static native @Const @ByRef Symbol ceil_mode(); -@Namespace("c10::attr") @MemberGetter public static native @Const @ByRef Symbol cell_state_fwd(); -@Namespace("c10::attr") @MemberGetter public static native @Const @ByRef Symbol center(); -@Namespace("c10::attr") @MemberGetter public static native @Const @ByRef Symbol ch_axis(); -@Namespace("c10::attr") @MemberGetter public static native @Const @ByRef Symbol check_errors(); -@Namespace("c10::attr") @MemberGetter public static native @Const @ByRef Symbol chunk_grad_outputs(); -@Namespace("c10::attr") @MemberGetter public static native @Const @ByRef Symbol chunks(); -@Namespace("c10::attr") @MemberGetter public static native @Const @ByRef Symbol coalesced(); -@Namespace("c10::attr") @MemberGetter public static native @Const @ByRef Symbol coefficients(); -@Namespace("c10::attr") @MemberGetter public static native @Const @ByRef Symbol col(); -@Namespace("c10::attr") @MemberGetter public static native @Const @ByRef Symbol col_offsets(); -@Namespace("c10::attr") @MemberGetter public static native @Const @ByRef Symbol col_offsets_hh(); -@Namespace("c10::attr") @MemberGetter public static native @Const @ByRef Symbol col_offsets_ih(); -@Namespace("c10::attr") @MemberGetter public static native @Const @ByRef Symbol compressed_idx(); -@Namespace("c10::attr") @MemberGetter public static native @Const @ByRef Symbol compressed_indices(); -@Namespace("c10::attr") @MemberGetter public static native @Const @ByRef Symbol compressed_indices_dtype(); -@Namespace("c10::attr") @MemberGetter public static native @Const @ByRef Symbol compute_log_sumexp(); -@Namespace("c10::attr") @MemberGetter public static native @Const @ByRef Symbol compute_mode(); -@Namespace("c10::attr") @MemberGetter public static native @Const @ByRef Symbol compute_uv(); -@Namespace("c10::attr") @MemberGetter public static native @Const @ByRef Symbol compute_v(); -@Namespace("c10::attr") @MemberGetter public static native @Const @ByRef Symbol condition(); -@Namespace("c10::attr") @MemberGetter public static native @Const @ByRef Symbol correction(); -@Namespace("c10::attr") @MemberGetter public static native @Const @ByRef Symbol count(); -@Namespace("c10::attr") @MemberGetter public static native @Const @ByRef Symbol count_include_pad(); -@Namespace("c10::attr") @MemberGetter public static native @Const @ByRef Symbol counts(); -@Namespace("c10::attr") @MemberGetter public static native @Const @ByRef Symbol cpu_dtype(); -@Namespace("c10::attr") @MemberGetter public static native @Const @ByRef Symbol cpu_enabled(); -@Namespace("c10::attr") @MemberGetter public static native @Const @ByRef Symbol cpu_nested_shape_example(); -@Namespace("c10::attr") @MemberGetter public static native @Const @ByRef Symbol create_graph(); -@Namespace("c10::attr") @MemberGetter public static native @Const @ByRef Symbol cu_seqlens_k(); -@Namespace("c10::attr") @MemberGetter public static native @Const @ByRef Symbol cu_seqlens_q(); -@Namespace("c10::attr") @MemberGetter public static native @Const @ByRef Symbol cuda_dtype(); -@Namespace("c10::attr") @MemberGetter public static native @Const @ByRef Symbol cuda_enabled(); -@Namespace("c10::attr") @MemberGetter public static native @Const @ByRef Symbol cudnn_enable(); -@Namespace("c10::attr") @MemberGetter public static native @Const @ByRef Symbol cudnn_enabled(); -@Namespace("c10::attr") @MemberGetter public static native @Const @ByRef Symbol cum_seq_k(); -@Namespace("c10::attr") @MemberGetter public static native @Const @ByRef Symbol cum_seq_q(); -@Namespace("c10::attr") @MemberGetter public static native @Const @ByRef Symbol cx(); -@Namespace("c10::attr") @MemberGetter public static native @Const @ByRef Symbol cx_(); -@Namespace("c10::attr") @MemberGetter public static native @Const @ByRef Symbol cx_tmp(); -@Namespace("c10::attr") @MemberGetter public static native @Const @ByRef Symbol cy(); -@Namespace("c10::attr") @MemberGetter public static native @Const @ByRef Symbol cy_(); -@Namespace("c10::attr") @MemberGetter public static native @Const @ByRef Symbol d(); -@Namespace("c10::attr") @MemberGetter public static native @Const @ByRef Symbol decimals(); -@Namespace("c10::attr") @MemberGetter public static native @Const @ByRef Symbol delta(); -@Namespace("c10::attr") @MemberGetter public static native @Const @ByRef Symbol dense(); -@Namespace("c10::attr") @MemberGetter public static native @Const @ByRef Symbol density(); -@Namespace("c10::attr") @MemberGetter public static native @Const @ByRef Symbol descending(); -@Namespace("c10::attr") @MemberGetter public static native @Const @ByRef Symbol destination(); -@Namespace("c10::attr") @MemberGetter public static native @Const @ByRef Symbol deterministic(); -@Namespace("c10::attr") @MemberGetter public static native @Const @ByRef Symbol device_index(); -@Namespace("c10::attr") @MemberGetter public static native @Const @ByRef Symbol dgrad_glu(); -@Namespace("c10::attr") @MemberGetter public static native @Const @ByRef Symbol diagonals(); -@Namespace("c10::attr") @MemberGetter public static native @Const @ByRef Symbol dilation(); -@Namespace("c10::attr") @MemberGetter public static native @Const @ByRef Symbol dim0(); -@Namespace("c10::attr") @MemberGetter public static native @Const @ByRef Symbol dim1(); -@Namespace("c10::attr") @MemberGetter public static native @Const @ByRef Symbol dim2(); -@Namespace("c10::attr") @MemberGetter public static native @Const @ByRef Symbol dimension(); -@Namespace("c10::attr") @MemberGetter public static native @Const @ByRef Symbol dims(); -@Namespace("c10::attr") @MemberGetter public static native @Const @ByRef Symbol dims_other(); -@Namespace("c10::attr") @MemberGetter public static native @Const @ByRef Symbol dims_self(); -@Namespace("c10::attr") @MemberGetter public static native @Const @ByRef Symbol divisor_override(); -@Namespace("c10::attr") @MemberGetter public static native @Const @ByRef Symbol downscale_factor(); -@Namespace("c10::attr") @MemberGetter public static native @Const @ByRef Symbol driver(); -@Namespace("c10::attr") @MemberGetter public static native @Const @ByRef Symbol dropout_mask(); -@Namespace("c10::attr") @MemberGetter public static native @Const @ByRef Symbol dropout_p(); -@Namespace("c10::attr") @MemberGetter public static native @Const @ByRef Symbol dropout_seed(); -@Namespace("c10::attr") @MemberGetter public static native @Const @ByRef Symbol dropout_state(); -@Namespace("c10::attr") @MemberGetter public static native @Const @ByRef Symbol dst(); -@Namespace("c10::attr") @MemberGetter public static native @Const @ByRef Symbol dual(); -@Namespace("c10::attr") @MemberGetter public static native @Const @ByRef Symbol dummy(); -@Namespace("c10::attr") @MemberGetter public static native @Const @ByRef Symbol dx(); -@Namespace("c10::attr") @MemberGetter public static native @Const @ByRef Symbol edge_order(); -@Namespace("c10::attr") @MemberGetter public static native @Const @ByRef Symbol eigenvalues(); -@Namespace("c10::attr") @MemberGetter public static native @Const @ByRef Symbol eigenvectors(); -@Namespace("c10::attr") @MemberGetter public static native @Const @ByRef Symbol eigvals(); -@Namespace("c10::attr") @MemberGetter public static native @Const @ByRef Symbol eigvecs(); -@Namespace("c10::attr") @MemberGetter public static native @Const @ByRef Symbol element(); -@Namespace("c10::attr") @MemberGetter public static native @Const @ByRef Symbol elements(); -@Namespace("c10::attr") @MemberGetter public static native @Const @ByRef Symbol ellipsis_idx(); -@Namespace("c10::attr") @MemberGetter public static native @Const @ByRef Symbol embed_dim(); -@Namespace("c10::attr") @MemberGetter public static native @Const @ByRef Symbol end(); -@Namespace("c10::attr") @MemberGetter public static native @Const @ByRef Symbol end_dim(); -@Namespace("c10::attr") @MemberGetter public static native @Const @ByRef Symbol eps(); -@Namespace("c10::attr") @MemberGetter public static native @Const @ByRef Symbol epsilon(); -@Namespace("c10::attr") @MemberGetter public static native @Const @ByRef Symbol equal_nan(); -@Namespace("c10::attr") @MemberGetter public static native @Const @ByRef Symbol equation(); -@Namespace("c10::attr") @MemberGetter public static native @Const @ByRef Symbol exp_avg_sqs(); -@Namespace("c10::attr") @MemberGetter public static native @Const @ByRef Symbol exp_avgs(); -@Namespace("c10::attr") @MemberGetter public static native @Const @ByRef Symbol expand1(); -@Namespace("c10::attr") @MemberGetter public static native @Const @ByRef Symbol expand2(); -@Namespace("c10::attr") @MemberGetter public static native @Const @ByRef Symbol expand3(); -@Namespace("c10::attr") @MemberGetter public static native @Const @ByRef Symbol exponent(); -@Namespace("c10::attr") @MemberGetter public static native @Const @ByRef Symbol exponential_average_factor(); -@Namespace("c10::attr") @MemberGetter public static native @Const @ByRef Symbol fake_quant_enabled(); -@Namespace("c10::attr") @MemberGetter public static native @Const @ByRef Symbol fake_quant_on(); -@Namespace("c10::attr") @MemberGetter public static native @Const @ByRef Symbol ffn_bias_1(); -@Namespace("c10::attr") @MemberGetter public static native @Const @ByRef Symbol ffn_bias_2(); -@Namespace("c10::attr") @MemberGetter public static native @Const @ByRef Symbol ffn_weight_1(); -@Namespace("c10::attr") @MemberGetter public static native @Const @ByRef Symbol ffn_weight_2(); -@Namespace("c10::attr") @MemberGetter public static native @Const @ByRef Symbol filename(); -@Namespace("c10::attr") @MemberGetter public static native @Const @ByRef Symbol fill_value(); -@Namespace("c10::attr") @MemberGetter public static native @Const @ByRef Symbol flat(); -@Namespace("c10::attr") @MemberGetter public static native @Const @ByRef Symbol forward(); -@Namespace("c10::attr") @MemberGetter public static native @Const @ByRef Symbol found_inf(); -@Namespace("c10::attr") @MemberGetter public static native @Const @ByRef Symbol from(); -@Namespace("c10::attr") @MemberGetter public static native @Const @ByRef Symbol full_matrices(); -@Namespace("c10::attr") @MemberGetter public static native @Const @ByRef Symbol fuse_transform_0213(); -@Namespace("c10::attr") @MemberGetter public static native @Const @ByRef Symbol fweights(); -@Namespace("c10::attr") @MemberGetter public static native @Const @ByRef Symbol g(); -@Namespace("c10::attr") @MemberGetter public static native @Const @ByRef Symbol gO(); -@Namespace("c10::attr") @MemberGetter public static native @Const @ByRef Symbol generator(); -@Namespace("c10::attr") @MemberGetter public static native @Const @ByRef Symbol ggI(); -@Namespace("c10::attr") @MemberGetter public static native @Const @ByRef Symbol ggW(); -@Namespace("c10::attr") @MemberGetter public static native @Const @ByRef Symbol ggb(); -@Namespace("c10::attr") @MemberGetter public static native @Const @ByRef Symbol grad_bias(); -@Namespace("c10::attr") @MemberGetter public static native @Const @ByRef Symbol grad_cy(); -@Namespace("c10::attr") @MemberGetter public static native @Const @ByRef Symbol grad_factor(); -@Namespace("c10::attr") @MemberGetter public static native @Const @ByRef Symbol grad_glu(); -@Namespace("c10::attr") @MemberGetter public static native @Const @ByRef Symbol grad_hy(); -@Namespace("c10::attr") @MemberGetter public static native @Const @ByRef Symbol grad_in(); -@Namespace("c10::attr") @MemberGetter public static native @Const @ByRef Symbol grad_input(); -@Namespace("c10::attr") @MemberGetter public static native @Const @ByRef Symbol grad_out(); -@Namespace("c10::attr") @MemberGetter public static native @Const @ByRef Symbol grad_out_(); -@Namespace("c10::attr") @MemberGetter public static native @Const @ByRef Symbol grad_output(); -@Namespace("c10::attr") @MemberGetter public static native @Const @ByRef Symbol grad_scale(); -@Namespace("c10::attr") @MemberGetter public static native @Const @ByRef Symbol grad_w(); -@Namespace("c10::attr") @MemberGetter public static native @Const @ByRef Symbol grad_weight(); -@Namespace("c10::attr") @MemberGetter public static native @Const @ByRef Symbol grad_x(); -@Namespace("c10::attr") @MemberGetter public static native @Const @ByRef Symbol grad_y(); -@Namespace("c10::attr") @MemberGetter public static native @Const @ByRef Symbol grads(); -@Namespace("c10::attr") @MemberGetter public static native @Const @ByRef Symbol grid(); -@Namespace("c10::attr") @MemberGetter public static native @Const @ByRef Symbol group(); -@Namespace("c10::attr") @MemberGetter public static native @Const @ByRef Symbol groups(); -@Namespace("c10::attr") @MemberGetter public static native @Const @ByRef Symbol growth_interval(); -@Namespace("c10::attr") @MemberGetter public static native @Const @ByRef Symbol growth_tracker(); -@Namespace("c10::attr") @MemberGetter public static native @Const @ByRef Symbol half_to_float(); -@Namespace("c10::attr") @MemberGetter public static native @Const @ByRef Symbol has_bias(); -@Namespace("c10::attr") @MemberGetter public static native @Const @ByRef Symbol has_biases(); -@Namespace("c10::attr") @MemberGetter public static native @Const @ByRef Symbol hermitian(); -@Namespace("c10::attr") @MemberGetter public static native @Const @ByRef Symbol hidden_bias(); -@Namespace("c10::attr") @MemberGetter public static native @Const @ByRef Symbol hidden_gates(); -@Namespace("c10::attr") @MemberGetter public static native @Const @ByRef Symbol hidden_size(); -@Namespace("c10::attr") @MemberGetter public static native @Const @ByRef Symbol high(); -@Namespace("c10::attr") @MemberGetter public static native @Const @ByRef Symbol hist(); -@Namespace("c10::attr") @MemberGetter public static native @Const @ByRef Symbol hop_length(); -@Namespace("c10::attr") @MemberGetter public static native @Const @ByRef Symbol hx(); -@Namespace("c10::attr") @MemberGetter public static native @Const @ByRef Symbol hx_(); -@Namespace("c10::attr") @MemberGetter public static native @Const @ByRef Symbol hy_(); -@Namespace("c10::attr") @MemberGetter public static native @Const @ByRef Symbol i1(); -@Namespace("c10::attr") @MemberGetter public static native @Const @ByRef Symbol i2(); -@Namespace("c10::attr") @MemberGetter public static native @Const @ByRef Symbol i3(); -@Namespace("c10::attr") @MemberGetter public static native @Const @ByRef Symbol ignore_index(); -@Namespace("c10::attr") @MemberGetter public static native @Const @ByRef Symbol impl_index(); -@Namespace("c10::attr") @MemberGetter public static native @Const @ByRef Symbol implicit(); -@Namespace("c10::attr") @MemberGetter public static native @Const @ByRef Symbol include_last_offset(); -@Namespace("c10::attr") @MemberGetter public static native @Const @ByRef Symbol include_self(); -@Namespace("c10::attr") @MemberGetter public static native @Const @ByRef Symbol incr_key(); -@Namespace("c10::attr") @MemberGetter public static native @Const @ByRef Symbol incr_value(); -@Namespace("c10::attr") @MemberGetter public static native @Const @ByRef Symbol increasing(); -@Namespace("c10::attr") @MemberGetter public static native @Const @ByRef Symbol ind(); -@Namespace("c10::attr") @MemberGetter public static native @Const @ByRef Symbol indexing(); -@Namespace("c10::attr") @MemberGetter public static native @Const @ByRef Symbol info(); -@Namespace("c10::attr") @MemberGetter public static native @Const @ByRef Symbol initial(); -@Namespace("c10::attr") @MemberGetter public static native @Const @ByRef Symbol input(); -@Namespace("c10::attr") @MemberGetter public static native @Const @ByRef Symbol input1(); -@Namespace("c10::attr") @MemberGetter public static native @Const @ByRef Symbol input2(); -@Namespace("c10::attr") @MemberGetter public static native @Const @ByRef Symbol input3(); -@Namespace("c10::attr") @MemberGetter public static native @Const @ByRef Symbol input_bias(); -@Namespace("c10::attr") @MemberGetter public static native @Const @ByRef Symbol input_dtype(); -@Namespace("c10::attr") @MemberGetter public static native @Const @ByRef Symbol input_g(); -@Namespace("c10::attr") @MemberGetter public static native @Const @ByRef Symbol input_gates(); -@Namespace("c10::attr") @MemberGetter public static native @Const @ByRef Symbol input_lengths(); -@Namespace("c10::attr") @MemberGetter public static native @Const @ByRef Symbol input_scale(); -@Namespace("c10::attr") @MemberGetter public static native @Const @ByRef Symbol input_size(); -@Namespace("c10::attr") @MemberGetter public static native @Const @ByRef Symbol input_sizes(); -@Namespace("c10::attr") @MemberGetter public static native @Const @ByRef Symbol inputs(); -@Namespace("c10::attr") @MemberGetter public static native @Const @ByRef Symbol interpolation(); -@Namespace("c10::attr") @MemberGetter public static native @Const @ByRef Symbol interpolation_mode(); -@Namespace("c10::attr") @MemberGetter public static native @Const @ByRef Symbol inv_scale(); -@Namespace("c10::attr") @MemberGetter public static native @Const @ByRef Symbol invert(); -@Namespace("c10::attr") @MemberGetter public static native @Const @ByRef Symbol invstd(); -@Namespace("c10::attr") @MemberGetter public static native @Const @ByRef Symbol is_causal(); -@Namespace("c10::attr") @MemberGetter public static native @Const @ByRef Symbol is_crow(); -@Namespace("c10::attr") @MemberGetter public static native @Const @ByRef Symbol is_matrix(); -@Namespace("c10::attr") @MemberGetter public static native @Const @ByRef Symbol is_result(); -@Namespace("c10::attr") @MemberGetter public static native @Const @ByRef Symbol is_target(); -@Namespace("c10::attr") @MemberGetter public static native @Const @ByRef Symbol k(); -@Namespace("c10::attr") @MemberGetter public static native @Const @ByRef Symbol keepdim(); -@Namespace("c10::attr") @MemberGetter public static native @Const @ByRef Symbol kernel_size(); -@Namespace("c10::attr") @MemberGetter public static native @Const @ByRef Symbol key(); -@Namespace("c10::attr") @MemberGetter public static native @Const @ByRef Symbol label_smoothing(); -@Namespace("c10::attr") @MemberGetter public static native @Const @ByRef Symbol lambd(); -@Namespace("c10::attr") @MemberGetter public static native @Const @ByRef Symbol largest(); -@Namespace("c10::attr") @MemberGetter public static native @Const @ByRef Symbol last_dim_size(); -@Namespace("c10::attr") @MemberGetter public static native @Const @ByRef Symbol layersOutputs(); -@Namespace("c10::attr") @MemberGetter public static native @Const @ByRef Symbol left(); -@Namespace("c10::attr") @MemberGetter public static native @Const @ByRef Symbol length(); -@Namespace("c10::attr") @MemberGetter public static native @Const @ByRef Symbol lengths(); -@Namespace("c10::attr") @MemberGetter public static native @Const @ByRef Symbol level(); -@Namespace("c10::attr") @MemberGetter public static native @Const @ByRef Symbol like(); -@Namespace("c10::attr") @MemberGetter public static native @Const @ByRef Symbol log_alpha(); -@Namespace("c10::attr") @MemberGetter public static native @Const @ByRef Symbol log_input(); -@Namespace("c10::attr") @MemberGetter public static native @Const @ByRef Symbol log_probs(); -@Namespace("c10::attr") @MemberGetter public static native @Const @ByRef Symbol log_target(); -@Namespace("c10::attr") @MemberGetter public static native @Const @ByRef Symbol logabsdet(); -@Namespace("c10::attr") @MemberGetter public static native @Const @ByRef Symbol low(); -@Namespace("c10::attr") @MemberGetter public static native @Const @ByRef Symbol lower(); -@Namespace("c10::attr") @MemberGetter public static native @Const @ByRef Symbol lr(); -@Namespace("c10::attr") @MemberGetter public static native @Const @ByRef Symbol ltm(); -@Namespace("c10::attr") @MemberGetter public static native @Const @ByRef Symbol m(); -@Namespace("c10::attr") @MemberGetter public static native @Const @ByRef Symbol mantissa(); -@Namespace("c10::attr") @MemberGetter public static native @Const @ByRef Symbol margin(); -@Namespace("c10::attr") @MemberGetter public static native @Const @ByRef Symbol mask(); -@Namespace("c10::attr") @MemberGetter public static native @Const @ByRef Symbol mask_check(); -@Namespace("c10::attr") @MemberGetter public static native @Const @ByRef Symbol mask_type(); -@Namespace("c10::attr") @MemberGetter public static native @Const @ByRef Symbol mat(); -@Namespace("c10::attr") @MemberGetter public static native @Const @ByRef Symbol mat1(); -@Namespace("c10::attr") @MemberGetter public static native @Const @ByRef Symbol mat2(); -@Namespace("c10::attr") @MemberGetter public static native @Const @ByRef Symbol matrices(); -@Namespace("c10::attr") @MemberGetter public static native @Const @ByRef Symbol max_exp_avg_sqs(); -@Namespace("c10::attr") @MemberGetter public static native @Const @ByRef Symbol max_k(); -@Namespace("c10::attr") @MemberGetter public static native @Const @ByRef Symbol max_norm(); -@Namespace("c10::attr") @MemberGetter public static native @Const @ByRef Symbol max_q(); -@Namespace("c10::attr") @MemberGetter public static native @Const @ByRef Symbol max_seqlen_q(); -@Namespace("c10::attr") @MemberGetter public static native @Const @ByRef Symbol max_size(); -@Namespace("c10::attr") @MemberGetter public static native @Const @ByRef Symbol max_val(); -@Namespace("c10::attr") @MemberGetter public static native @Const @ByRef Symbol max_values(); -@Namespace("c10::attr") @MemberGetter public static native @Const @ByRef Symbol maximize(); -@Namespace("c10::attr") @MemberGetter public static native @Const @ByRef Symbol maximum_indices(); -@Namespace("c10::attr") @MemberGetter public static native @Const @ByRef Symbol maxnorm(); -@Namespace("c10::attr") @MemberGetter public static native @Const @ByRef Symbol mean_dy(); -@Namespace("c10::attr") @MemberGetter public static native @Const @ByRef Symbol mean_dy_xmu(); -@Namespace("c10::attr") @MemberGetter public static native @Const @ByRef Symbol memory_format(); -@Namespace("c10::attr") @MemberGetter public static native @Const @ByRef Symbol min_indices(); -@Namespace("c10::attr") @MemberGetter public static native @Const @ByRef Symbol min_val(); -@Namespace("c10::attr") @MemberGetter public static native @Const @ByRef Symbol minlength(); -@Namespace("c10::attr") @MemberGetter public static native @Const @ByRef Symbol momentum(); -@Namespace("c10::attr") @MemberGetter public static native @Const @ByRef Symbol n(); -@Namespace("c10::attr") @MemberGetter public static native @Const @ByRef Symbol n_bins(); -@Namespace("c10::attr") @MemberGetter public static native @Const @ByRef Symbol n_fft(); -@Namespace("c10::attr") @MemberGetter public static native @Const @ByRef Symbol names(); -@Namespace("c10::attr") @MemberGetter public static native @Const @ByRef Symbol nan(); -@Namespace("c10::attr") @MemberGetter public static native @Const @ByRef Symbol need_attn_weights(); -@Namespace("c10::attr") @MemberGetter public static native @Const @ByRef Symbol need_weights(); -@Namespace("c10::attr") @MemberGetter public static native @Const @ByRef Symbol neg_log_likelihood(); -@Namespace("c10::attr") @MemberGetter public static native @Const @ByRef Symbol negative_slope(); -@Namespace("c10::attr") @MemberGetter public static native @Const @ByRef Symbol neginf(); -@Namespace("c10::attr") @MemberGetter public static native @Const @ByRef Symbol nested_size(); -@Namespace("c10::attr") @MemberGetter public static native @Const @ByRef Symbol nested_strides(); -@Namespace("c10::attr") @MemberGetter public static native @Const @ByRef Symbol new_data(); -@Namespace("c10::attr") @MemberGetter public static native @Const @ByRef Symbol nnz(); -@Namespace("c10::attr") @MemberGetter public static native @Const @ByRef Symbol noise(); -@Namespace("c10::attr") @MemberGetter public static native @Const @ByRef Symbol non_blocking(); -@Namespace("c10::attr") @MemberGetter public static native @Const @ByRef Symbol norm_bias_1(); -@Namespace("c10::attr") @MemberGetter public static native @Const @ByRef Symbol norm_bias_2(); -@Namespace("c10::attr") @MemberGetter public static native @Const @ByRef Symbol norm_first(); -@Namespace("c10::attr") @MemberGetter public static native @Const @ByRef Symbol norm_type(); -@Namespace("c10::attr") @MemberGetter public static native @Const @ByRef Symbol norm_weight_1(); -@Namespace("c10::attr") @MemberGetter public static native @Const @ByRef Symbol norm_weight_2(); -@Namespace("c10::attr") @MemberGetter public static native @Const @ByRef Symbol normalization(); -@Namespace("c10::attr") @MemberGetter public static native @Const @ByRef Symbol normalized(); -@Namespace("c10::attr") @MemberGetter public static native @Const @ByRef Symbol normalized_shape(); -@Namespace("c10::attr") @MemberGetter public static native @Const @ByRef Symbol nt_example(); -@Namespace("c10::attr") @MemberGetter public static native @Const @ByRef Symbol num_classes(); -@Namespace("c10::attr") @MemberGetter public static native @Const @ByRef Symbol num_generated(); -@Namespace("c10::attr") @MemberGetter public static native @Const @ByRef Symbol num_groups(); -@Namespace("c10::attr") @MemberGetter public static native @Const @ByRef Symbol num_head(); -@Namespace("c10::attr") @MemberGetter public static native @Const @ByRef Symbol num_heads(); -@Namespace("c10::attr") @MemberGetter public static native @Const @ByRef Symbol num_layers(); -@Namespace("c10::attr") @MemberGetter public static native @Const @ByRef Symbol num_samples(); -@Namespace("c10::attr") @MemberGetter public static native @Const @ByRef Symbol num_weights(); -@Namespace("c10::attr") @MemberGetter public static native @Const @ByRef Symbol numel(); -@Namespace("c10::attr") @MemberGetter public static native @Const @ByRef Symbol observer_on(); -@Namespace("c10::attr") @MemberGetter public static native @Const @ByRef Symbol offset(); -@Namespace("c10::attr") @MemberGetter public static native @Const @ByRef Symbol offset2bag(); -@Namespace("c10::attr") @MemberGetter public static native @Const @ByRef Symbol offsets(); -@Namespace("c10::attr") @MemberGetter public static native @Const @ByRef Symbol onesided(); -@Namespace("c10::attr") @MemberGetter public static native @Const @ByRef Symbol order(); -@Namespace("c10::attr") @MemberGetter public static native @Const @ByRef Symbol other(); -@Namespace("c10::attr") @MemberGetter public static native @Const @ByRef Symbol out(); -@Namespace("c10::attr") @MemberGetter public static native @Const @ByRef Symbol out0(); -@Namespace("c10::attr") @MemberGetter public static native @Const @ByRef Symbol out1(); -@Namespace("c10::attr") @MemberGetter public static native @Const @ByRef Symbol out2(); -@Namespace("c10::attr") @MemberGetter public static native @Const @ByRef Symbol out3(); -@Namespace("c10::attr") @MemberGetter public static native @Const @ByRef Symbol out4(); -@Namespace("c10::attr") @MemberGetter public static native @Const @ByRef Symbol out5(); -@Namespace("c10::attr") @MemberGetter public static native @Const @ByRef Symbol out6(); -@Namespace("c10::attr") @MemberGetter public static native @Const @ByRef Symbol out_dim(); -@Namespace("c10::attr") @MemberGetter public static native @Const @ByRef Symbol out_int32(); -@Namespace("c10::attr") @MemberGetter public static native @Const @ByRef Symbol outdim(); -@Namespace("c10::attr") @MemberGetter public static native @Const @ByRef Symbol output(); -@Namespace("c10::attr") @MemberGetter public static native @Const @ByRef Symbol output_mask(); -@Namespace("c10::attr") @MemberGetter public static native @Const @ByRef Symbol output_padding(); -@Namespace("c10::attr") @MemberGetter public static native @Const @ByRef Symbol output_scale(); -@Namespace("c10::attr") @MemberGetter public static native @Const @ByRef Symbol output_size(); -@Namespace("c10::attr") @MemberGetter public static native @Const @ByRef Symbol output_zero_point(); -@Namespace("c10::attr") @MemberGetter public static native @Const @ByRef Symbol p(); -@Namespace("c10::attr") @MemberGetter public static native @Const @ByRef Symbol packed(); -@Namespace("c10::attr") @MemberGetter public static native @Const @ByRef Symbol packed_hh(); -@Namespace("c10::attr") @MemberGetter public static native @Const @ByRef Symbol packed_ih(); -@Namespace("c10::attr") @MemberGetter public static native @Const @ByRef Symbol packed_weight(); -@Namespace("c10::attr") @MemberGetter public static native @Const @ByRef Symbol pad_mode(); -@Namespace("c10::attr") @MemberGetter public static native @Const @ByRef Symbol padded(); -@Namespace("c10::attr") @MemberGetter public static native @Const @ByRef Symbol padding(); -@Namespace("c10::attr") @MemberGetter public static native @Const @ByRef Symbol padding_idx(); -@Namespace("c10::attr") @MemberGetter public static native @Const @ByRef Symbol padding_mode(); -@Namespace("c10::attr") @MemberGetter public static native @Const @ByRef Symbol padding_value(); -@Namespace("c10::attr") @MemberGetter public static native @Const @ByRef Symbol params(); -@Namespace("c10::attr") @MemberGetter public static native @Const @ByRef Symbol path(); -@Namespace("c10::attr") @MemberGetter public static native @Const @ByRef Symbol per_row_fake_quant(); -@Namespace("c10::attr") @MemberGetter public static native @Const @ByRef Symbol per_sample_weights(); -@Namespace("c10::attr") @MemberGetter public static native @Const @ByRef Symbol periodic(); -@Namespace("c10::attr") @MemberGetter public static native @Const @ByRef Symbol philox_offset(); -@Namespace("c10::attr") @MemberGetter public static native @Const @ByRef Symbol philox_seed(); -@Namespace("c10::attr") @MemberGetter public static native @Const @ByRef Symbol pivot(); -@Namespace("c10::attr") @MemberGetter public static native @Const @ByRef Symbol pivots(); -@Namespace("c10::attr") @MemberGetter public static native @Const @ByRef Symbol plain_idx(); -@Namespace("c10::attr") @MemberGetter public static native @Const @ByRef Symbol plain_indices(); -@Namespace("c10::attr") @MemberGetter public static native @Const @ByRef Symbol pos_weight(); -@Namespace("c10::attr") @MemberGetter public static native @Const @ByRef Symbol posinf(); -@Namespace("c10::attr") @MemberGetter public static native @Const @ByRef Symbol prepend(); -@Namespace("c10::attr") @MemberGetter public static native @Const @ByRef Symbol primal(); -@Namespace("c10::attr") @MemberGetter public static native @Const @ByRef Symbol prob(); -@Namespace("c10::attr") @MemberGetter public static native @Const @ByRef Symbol proj_bias(); -@Namespace("c10::attr") @MemberGetter public static native @Const @ByRef Symbol proj_size(); -@Namespace("c10::attr") @MemberGetter public static native @Const @ByRef Symbol proj_weight(); -@Namespace("c10::attr") @MemberGetter public static native @Const @ByRef Symbol q(); -@Namespace("c10::attr") @MemberGetter public static native @Const @ByRef Symbol qkv(); -@Namespace("c10::attr") @MemberGetter public static native @Const @ByRef Symbol qkv_bias(); -@Namespace("c10::attr") @MemberGetter public static native @Const @ByRef Symbol qkv_weight(); -@Namespace("c10::attr") @MemberGetter public static native @Const @ByRef Symbol qtensor(); -@Namespace("c10::attr") @MemberGetter public static native @Const @ByRef Symbol quant_max(); -@Namespace("c10::attr") @MemberGetter public static native @Const @ByRef Symbol quant_min(); -@Namespace("c10::attr") @MemberGetter public static native @Const @ByRef Symbol quasi(); -@Namespace("c10::attr") @MemberGetter public static native @Const @ByRef Symbol query(); -@Namespace("c10::attr") @MemberGetter public static native @Const @ByRef Symbol r(); -@Namespace("c10::attr") @MemberGetter public static native @Const @ByRef Symbol random_samples(); -@Namespace("c10::attr") @MemberGetter public static native @Const @ByRef Symbol rank(); -@Namespace("c10::attr") @MemberGetter public static native @Const @ByRef Symbol ratio(); -@Namespace("c10::attr") @MemberGetter public static native @Const @ByRef Symbol rcond(); -@Namespace("c10::attr") @MemberGetter public static native @Const @ByRef Symbol reduce(); -@Namespace("c10::attr") @MemberGetter public static native @Const @ByRef Symbol reduce_range(); -@Namespace("c10::attr") @MemberGetter public static native @Const @ByRef Symbol reduction(); -@Namespace("c10::attr") @MemberGetter public static native @Const @ByRef Symbol repeats(); -@Namespace("c10::attr") @MemberGetter public static native @Const @ByRef Symbol replacement(); -@Namespace("c10::attr") @MemberGetter public static native @Const @ByRef Symbol reserve(); -@Namespace("c10::attr") @MemberGetter public static native @Const @ByRef Symbol reserveSpace(); -@Namespace("c10::attr") @MemberGetter public static native @Const @ByRef Symbol reservedSpace(); -@Namespace("c10::attr") @MemberGetter public static native @Const @ByRef Symbol residuals(); -@Namespace("c10::attr") @MemberGetter public static native @Const @ByRef Symbol result(); -@Namespace("c10::attr") @MemberGetter public static native @Const @ByRef Symbol retain_graph(); -@Namespace("c10::attr") @MemberGetter public static native @Const @ByRef Symbol return_complex(); -@Namespace("c10::attr") @MemberGetter public static native @Const @ByRef Symbol return_counts(); -@Namespace("c10::attr") @MemberGetter public static native @Const @ByRef Symbol return_debug_mask(); -@Namespace("c10::attr") @MemberGetter public static native @Const @ByRef Symbol return_inverse(); -@Namespace("c10::attr") @MemberGetter public static native @Const @ByRef Symbol reverse(); -@Namespace("c10::attr") @MemberGetter public static native @Const @ByRef Symbol right(); -@Namespace("c10::attr") @MemberGetter public static native @Const @ByRef Symbol rounding_mode(); -@Namespace("c10::attr") @MemberGetter public static native @Const @ByRef Symbol row(); -@Namespace("c10::attr") @MemberGetter public static native @Const @ByRef Symbol rstd(); -@Namespace("c10::attr") @MemberGetter public static native @Const @ByRef Symbol rtol(); -@Namespace("c10::attr") @MemberGetter public static native @Const @ByRef Symbol running_max(); -@Namespace("c10::attr") @MemberGetter public static native @Const @ByRef Symbol running_mean(); -@Namespace("c10::attr") @MemberGetter public static native @Const @ByRef Symbol running_min(); -@Namespace("c10::attr") @MemberGetter public static native @Const @ByRef Symbol running_var(); -@Namespace("c10::attr") @MemberGetter public static native @Const @ByRef Symbol s(); -@Namespace("c10::attr") @MemberGetter public static native @Const @ByRef Symbol save_invstd(); -@Namespace("c10::attr") @MemberGetter public static native @Const @ByRef Symbol save_mean(); -@Namespace("c10::attr") @MemberGetter public static native @Const @ByRef Symbol save_var(); -@Namespace("c10::attr") @MemberGetter public static native @Const @ByRef Symbol save_var_transform(); -@Namespace("c10::attr") @MemberGetter public static native @Const @ByRef Symbol saved_g(); -@Namespace("c10::attr") @MemberGetter public static native @Const @ByRef Symbol saved_norms(); -@Namespace("c10::attr") @MemberGetter public static native @Const @ByRef Symbol saved_v(); -@Namespace("c10::attr") @MemberGetter public static native @Const @ByRef Symbol scalar(); -@Namespace("c10::attr") @MemberGetter public static native @Const @ByRef Symbol scalar1(); -@Namespace("c10::attr") @MemberGetter public static native @Const @ByRef Symbol scalar2(); -@Namespace("c10::attr") @MemberGetter public static native @Const @ByRef Symbol scalars(); -@Namespace("c10::attr") @MemberGetter public static native @Const @ByRef Symbol scale(); -@Namespace("c10::attr") @MemberGetter public static native @Const @ByRef Symbol scale_backoff_factor(); -@Namespace("c10::attr") @MemberGetter public static native @Const @ByRef Symbol scale_factors(); -@Namespace("c10::attr") @MemberGetter public static native @Const @ByRef Symbol scale_grad_by_freq(); -@Namespace("c10::attr") @MemberGetter public static native @Const @ByRef Symbol scale_growth_factor(); -@Namespace("c10::attr") @MemberGetter public static native @Const @ByRef Symbol scale_hh(); -@Namespace("c10::attr") @MemberGetter public static native @Const @ByRef Symbol scale_ih(); -@Namespace("c10::attr") @MemberGetter public static native @Const @ByRef Symbol scales(); -@Namespace("c10::attr") @MemberGetter public static native @Const @ByRef Symbol scales_d(); -@Namespace("c10::attr") @MemberGetter public static native @Const @ByRef Symbol scales_h(); -@Namespace("c10::attr") @MemberGetter public static native @Const @ByRef Symbol scales_w(); -@Namespace("c10::attr") @MemberGetter public static native @Const @ByRef Symbol sections(); -@Namespace("c10::attr") @MemberGetter public static native @Const @ByRef Symbol self(); -@Namespace("c10::attr") @MemberGetter public static native @Const @ByRef Symbol self_is_result(); -@Namespace("c10::attr") @MemberGetter public static native @Const @ByRef Symbol self_num_batch_dims(); -@Namespace("c10::attr") @MemberGetter public static native @Const @ByRef Symbol self_or_result(); -@Namespace("c10::attr") @MemberGetter public static native @Const @ByRef Symbol self_sizes(); -@Namespace("c10::attr") @MemberGetter public static native @Const @ByRef Symbol sequences(); -@Namespace("c10::attr") @MemberGetter public static native @Const @ByRef Symbol shape(); -@Namespace("c10::attr") @MemberGetter public static native @Const @ByRef Symbol shared(); -@Namespace("c10::attr") @MemberGetter public static native @Const @ByRef Symbol shifts(); -@Namespace("c10::attr") @MemberGetter public static native @Const @ByRef Symbol side(); -@Namespace("c10::attr") @MemberGetter public static native @Const @ByRef Symbol sigma(); -@Namespace("c10::attr") @MemberGetter public static native @Const @ByRef Symbol singular_values(); -@Namespace("c10::attr") @MemberGetter public static native @Const @ByRef Symbol sizes(); -@Namespace("c10::attr") @MemberGetter public static native @Const @ByRef Symbol sobolstate(); -@Namespace("c10::attr") @MemberGetter public static native @Const @ByRef Symbol solution(); -@Namespace("c10::attr") @MemberGetter public static native @Const @ByRef Symbol some(); -@Namespace("c10::attr") @MemberGetter public static native @Const @ByRef Symbol sorted_sequence(); -@Namespace("c10::attr") @MemberGetter public static native @Const @ByRef Symbol sorter(); -@Namespace("c10::attr") @MemberGetter public static native @Const @ByRef Symbol source(); -@Namespace("c10::attr") @MemberGetter public static native @Const @ByRef Symbol spacing(); -@Namespace("c10::attr") @MemberGetter public static native @Const @ByRef Symbol sparse(); -@Namespace("c10::attr") @MemberGetter public static native @Const @ByRef Symbol sparse_grad(); -@Namespace("c10::attr") @MemberGetter public static native @Const @ByRef Symbol split_size(); -@Namespace("c10::attr") @MemberGetter public static native @Const @ByRef Symbol split_sizes(); -@Namespace("c10::attr") @MemberGetter public static native @Const @ByRef Symbol src(); -@Namespace("c10::attr") @MemberGetter public static native @Const @ByRef Symbol stable(); -@Namespace("c10::attr") @MemberGetter public static native @Const @ByRef Symbol start(); -@Namespace("c10::attr") @MemberGetter public static native @Const @ByRef Symbol start_dim(); -@Namespace("c10::attr") @MemberGetter public static native @Const @ByRef Symbol state_steps(); -@Namespace("c10::attr") @MemberGetter public static native @Const @ByRef Symbol step(); -@Namespace("c10::attr") @MemberGetter public static native @Const @ByRef Symbol steps(); -@Namespace("c10::attr") @MemberGetter public static native @Const @ByRef Symbol storage_offset(); -@Namespace("c10::attr") @MemberGetter public static native @Const @ByRef Symbol sumdim(); -@Namespace("c10::attr") @MemberGetter public static native @Const @ByRef Symbol swap(); -@Namespace("c10::attr") @MemberGetter public static native @Const @ByRef Symbol symmetric_quant(); -@Namespace("c10::attr") @MemberGetter public static native @Const @ByRef Symbol tangent(); -@Namespace("c10::attr") @MemberGetter public static native @Const @ByRef Symbol target(); -@Namespace("c10::attr") @MemberGetter public static native @Const @ByRef Symbol target_lengths(); -@Namespace("c10::attr") @MemberGetter public static native @Const @ByRef Symbol targets(); -@Namespace("c10::attr") @MemberGetter public static native @Const @ByRef Symbol tau(); -@Namespace("c10::attr") @MemberGetter public static native @Const @ByRef Symbol tensor1(); -@Namespace("c10::attr") @MemberGetter public static native @Const @ByRef Symbol tensor2(); -@Namespace("c10::attr") @MemberGetter public static native @Const @ByRef Symbol tensor_indices_or_sections(); -@Namespace("c10::attr") @MemberGetter public static native @Const @ByRef Symbol tensors(); -@Namespace("c10::attr") @MemberGetter public static native @Const @ByRef Symbol tensors1(); -@Namespace("c10::attr") @MemberGetter public static native @Const @ByRef Symbol test_element(); -@Namespace("c10::attr") @MemberGetter public static native @Const @ByRef Symbol test_elements(); -@Namespace("c10::attr") @MemberGetter public static native @Const @ByRef Symbol the_template(); -@Namespace("c10::attr") @MemberGetter public static native @Const @ByRef Symbol theta(); -@Namespace("c10::attr") @MemberGetter public static native @Const @ByRef Symbol tol(); -@Namespace("c10::attr") @MemberGetter public static native @Const @ByRef Symbol total(); -@Namespace("c10::attr") @MemberGetter public static native @Const @ByRef Symbol total_length(); -@Namespace("c10::attr") @MemberGetter public static native @Const @ByRef Symbol total_weight(); -@Namespace("c10::attr") @MemberGetter public static native @Const @ByRef Symbol train(); -@Namespace("c10::attr") @MemberGetter public static native @Const @ByRef Symbol training(); -@Namespace("c10::attr") @MemberGetter public static native @Const @ByRef Symbol transposed(); -@Namespace("c10::attr") @MemberGetter public static native @Const @ByRef Symbol type1(); -@Namespace("c10::attr") @MemberGetter public static native @Const @ByRef Symbol type2(); -@Namespace("c10::attr") @MemberGetter public static native @Const @ByRef Symbol unbiased(); -@Namespace("c10::attr") @MemberGetter public static native @Const @ByRef Symbol unitriangular(); -@Namespace("c10::attr") @MemberGetter public static native @Const @ByRef Symbol unpack_data(); -@Namespace("c10::attr") @MemberGetter public static native @Const @ByRef Symbol unpack_pivots(); -@Namespace("c10::attr") @MemberGetter public static native @Const @ByRef Symbol unroll_dim(); -@Namespace("c10::attr") @MemberGetter public static native @Const @ByRef Symbol unsafe(); -@Namespace("c10::attr") @MemberGetter public static native @Const @ByRef Symbol upper(); -@Namespace("c10::attr") @MemberGetter public static native @Const @ByRef Symbol upscale_factor(); -@Namespace("c10::attr") @MemberGetter public static native @Const @ByRef Symbol use_gelu(); -@Namespace("c10::attr") @MemberGetter public static native @Const @ByRef Symbol use_input_stats(); -@Namespace("c10::attr") @MemberGetter public static native @Const @ByRef Symbol v(); -@Namespace("c10::attr") @MemberGetter public static native @Const @ByRef Symbol value(); -@Namespace("c10::attr") @MemberGetter public static native @Const @ByRef Symbol vec(); -@Namespace("c10::attr") @MemberGetter public static native @Const @ByRef Symbol vec1(); -@Namespace("c10::attr") @MemberGetter public static native @Const @ByRef Symbol vec2(); -@Namespace("c10::attr") @MemberGetter public static native @Const @ByRef Symbol w_hh(); -@Namespace("c10::attr") @MemberGetter public static native @Const @ByRef Symbol w_ih(); -@Namespace("c10::attr") @MemberGetter public static native @Const @ByRef Symbol weight(); -@Namespace("c10::attr") @MemberGetter public static native @Const @ByRef Symbol weight0(); -@Namespace("c10::attr") @MemberGetter public static native @Const @ByRef Symbol weight1(); -@Namespace("c10::attr") @MemberGetter public static native @Const @ByRef Symbol weight2(); -@Namespace("c10::attr") @MemberGetter public static native @Const @ByRef Symbol weight3(); -@Namespace("c10::attr") @MemberGetter public static native @Const @ByRef Symbol weight4(); -@Namespace("c10::attr") @MemberGetter public static native @Const @ByRef Symbol weight_arr(); -@Namespace("c10::attr") @MemberGetter public static native @Const @ByRef Symbol weight_buf(); -@Namespace("c10::attr") @MemberGetter public static native @Const @ByRef Symbol weight_decay(); -@Namespace("c10::attr") @MemberGetter public static native @Const @ByRef Symbol weight_g(); -@Namespace("c10::attr") @MemberGetter public static native @Const @ByRef Symbol weight_scale(); -@Namespace("c10::attr") @MemberGetter public static native @Const @ByRef Symbol weight_stride0(); -@Namespace("c10::attr") @MemberGetter public static native @Const @ByRef Symbol weight_zero_point(); -@Namespace("c10::attr") @MemberGetter public static native @Const @ByRef Symbol weights(); -@Namespace("c10::attr") @MemberGetter public static native @Const @ByRef Symbol win_length(); -@Namespace("c10::attr") @MemberGetter public static native @Const @ByRef Symbol window(); -@Namespace("c10::attr") @MemberGetter public static native @Const @ByRef Symbol window_length(); -@Namespace("c10::attr") @MemberGetter public static native @Const @ByRef Symbol with_replacement(); -@Namespace("c10::attr") @MemberGetter public static native @Const @ByRef Symbol workspace(); -@Namespace("c10::attr") @MemberGetter public static native @Const @ByRef Symbol wrap(); -@Namespace("c10::attr") @MemberGetter public static native @Const @ByRef Symbol x(); -@Namespace("c10::attr") @MemberGetter public static native @Const @ByRef Symbol x1(); -@Namespace("c10::attr") @MemberGetter public static native @Const @ByRef Symbol x2(); -@Namespace("c10::attr") @MemberGetter public static native @Const @ByRef Symbol y(); -@Namespace("c10::attr") @MemberGetter public static native @Const @ByRef Symbol z(); -@Namespace("c10::attr") @MemberGetter public static native @Const @ByRef Symbol z_state(); -@Namespace("c10::attr") @MemberGetter public static native @Const @ByRef Symbol zero_infinity(); -@Namespace("c10::attr") @MemberGetter public static native @Const @ByRef Symbol zero_point(); -@Namespace("c10::attr") @MemberGetter public static native @Const @ByRef Symbol zero_point_hh(); -@Namespace("c10::attr") @MemberGetter public static native @Const @ByRef Symbol zero_point_ih(); -@Namespace("c10::attr") @MemberGetter public static native @Const @ByRef Symbol zero_points(); - @Namespace("c10::attr") @MemberGetter public static native @Const @ByRef Symbol Subgraph(); - @Namespace("c10::attr") @MemberGetter public static native @Const @ByRef Symbol ReverseSubgraph(); - @Namespace("c10::attr") @MemberGetter public static native @Const @ByRef Symbol f_real_outputs(); - @Namespace("c10::attr") @MemberGetter public static native @Const @ByRef Symbol df_input_vjps(); - @Namespace("c10::attr") @MemberGetter public static native @Const @ByRef Symbol df_input_captured_inputs(); - @Namespace("c10::attr") @MemberGetter public static native @Const @ByRef Symbol df_input_captured_outputs(); - @Namespace("c10::attr") @MemberGetter public static native @Const @ByRef Symbol df_output_vjps(); - @Namespace("c10::attr") @MemberGetter public static native @Const @ByRef Symbol axes(); - @Namespace("c10::attr") @MemberGetter public static native @Const @ByRef Symbol symbolic_shape_inputs(); - @Namespace("c10::attr") @MemberGetter public static native @Const @ByRef Symbol allow_stack_outputs(); - @Namespace("c10::attr") @MemberGetter public static native @Const @ByRef Symbol striding_inputs_desc(); - @Namespace("c10::attr") @MemberGetter public static native @Const @ByRef Symbol striding_outputs_desc(); - @Namespace("c10::attr") @MemberGetter public static native @Const @ByRef Symbol broadcast(); - @Namespace("c10::attr") @MemberGetter public static native @Const @ByRef Symbol direction(); - @Namespace("c10::attr") @MemberGetter public static native @Const @ByRef Symbol ends(); - @Namespace("c10::attr") @MemberGetter public static native @Const @ByRef Symbol inplace(); - @Namespace("c10::attr") @MemberGetter public static native @Const @ByRef Symbol input_as_shape(); - @Namespace("c10::attr") @MemberGetter public static native @Const @ByRef Symbol is_zero(); - @Namespace("c10::attr") @MemberGetter public static native @Const @ByRef Symbol num_none(); - @Namespace("c10::attr") @MemberGetter public static native @Const @ByRef Symbol num_present(); - @Namespace("c10::attr") @MemberGetter public static native @Const @ByRef Symbol perm(); - @Namespace("c10::attr") @MemberGetter public static native @Const @ByRef Symbol starts(); - @Namespace("c10::attr") @MemberGetter public static native @Const @ByRef Symbol profiled_type(); - @Namespace("c10::attr") @MemberGetter public static native @Const @ByRef Symbol transA(); - @Namespace("c10::attr") @MemberGetter public static native @Const @ByRef Symbol transB(); - @Namespace("c10::attr") @MemberGetter public static native @Const @ByRef Symbol name(); - @Namespace("c10::attr") @MemberGetter public static native @Const @ByRef Symbol module(); - @Namespace("c10::attr") @MemberGetter public static native @Const @ByRef Symbol beg(); - @Namespace("c10::attr") @MemberGetter public static native @Const @ByRef Symbol idx(); - @Namespace("c10::attr") @MemberGetter public static native @Const @ByRef Symbol slot(); - @Namespace("c10::attr") @MemberGetter public static native @Const @ByRef Symbol kinds(); - @Namespace("c10::attr") @MemberGetter public static native @Const @ByRef Symbol types(); - @Namespace("c10::attr") @MemberGetter public static native @Const @ByRef Symbol keepdims(); - @Namespace("c10::attr") @MemberGetter public static native @Const @ByRef Symbol cache_id(); - @Namespace("c10::attr") @MemberGetter public static native @Const @ByRef Symbol new_axis(); - @Namespace("c10::attr") @MemberGetter public static native @Const @ByRef Symbol warn_id(); - @Namespace("c10::attr") @MemberGetter public static native @Const @ByRef Symbol output_layouts(); - @Namespace("c10::attr") @MemberGetter public static native @Const @ByRef Symbol allowzero(); - @Namespace("c10::attr") @MemberGetter public static native @Const @ByRef Symbol seen_none(); - @Namespace("c10::attr") @MemberGetter public static native @Const @ByRef Symbol overload_name(); -// #undef DEFINE_SYMBOL +@Namespace("c10::detail") public static native @Cast("bool") @Name("operator ==") @NoException(true) boolean equals(@Const @ByRef UniqueVoidPtr sp, @ByVal @Cast("std::nullptr_t*") PointerPointer arg1); +@Namespace("c10::detail") public static native @Cast("bool") @Name("operator ==") @NoException(true) boolean equals(@ByVal @Cast("std::nullptr_t*") PointerPointer arg0, @Const @ByRef UniqueVoidPtr sp); +@Namespace("c10::detail") public static native @Cast("bool") @Name("operator !=") @NoException(true) boolean notEquals(@Const @ByRef UniqueVoidPtr sp, @ByVal @Cast("std::nullptr_t*") PointerPointer arg1); +@Namespace("c10::detail") public static native @Cast("bool") @Name("operator !=") @NoException(true) boolean notEquals(@ByVal @Cast("std::nullptr_t*") PointerPointer arg0, @Const @ByRef UniqueVoidPtr sp); + // namespace detail // namespace c10 -// Parsed from ATen/core/grad_mode.h +// Parsed from c10/core/Allocator.h // #pragma once -// #include -// #include +// #include +// #include +// #include +// #include +// #include +// #include +// Targeting ../DataPtr.java -// Parsed from ATen/core/ATenGeneral.h -// #pragma once +// NB: Device is NOT tested for here; a CUDA nullptr is as much a nullptr as a +// CPU nullptr -// #include +@Namespace("c10") public static native @Cast("bool") @Name("operator ==") @NoException(true) boolean equals(@Cast({"", "c10::DataPtr&&"}) @StdMove DataPtr dp, @ByVal @Cast("std::nullptr_t*") PointerPointer arg1); +@Namespace("c10") public static native @Cast("bool") @Name("operator ==") @NoException(true) boolean equals(@ByVal @Cast("std::nullptr_t*") PointerPointer arg0, @Cast({"", "c10::DataPtr&&"}) @StdMove DataPtr dp); +@Namespace("c10") public static native @Cast("bool") @Name("operator !=") @NoException(true) boolean notEquals(@Cast({"", "c10::DataPtr&&"}) @StdMove DataPtr dp, @ByVal @Cast("std::nullptr_t*") PointerPointer arg1); +@Namespace("c10") public static native @Cast("bool") @Name("operator !=") @NoException(true) boolean notEquals(@ByVal @Cast("std::nullptr_t*") PointerPointer arg0, @Cast({"", "c10::DataPtr&&"}) @StdMove DataPtr dp); +// Targeting ../Allocator.java -// Parsed from ATen/core/Dimname.h -// #pragma once +// This context is used to generate DataPtr which have arbitrary +// std::function deleters associated with them. In some user facing +// functions, we give a (user-friendly) interface for constructing +// tensors from external data which take an arbitrary std::function +// deleter. Grep for InefficientStdFunctionContext to find these +// occurrences. +// +// This context is inefficient because we have to do a dynamic +// allocation InefficientStdFunctionContext, on top of the dynamic +// allocation which is implied by std::function itself. -// #include -// #include -// #include -// #include +/** Set the allocator for DeviceType {@code t}. The passed in allocator pointer is + * expected to have static lifetime; this function does NOT take ownership + * of the raw pointer. (The reason for this is to prevent existing pointers + * to an allocator of a particular device from being invalidated when + * SetAllocator is called.) + * + * Also note that this is not thread-safe, and we assume this function will + * only be called during initialization. + * + * The 'priority' flag is introduced when we want to overwrite the default + * allocator, since the allocators are set statically. The default priority + * is 0, which means the lowest. Only higher or equal priority can overwrite + * existing ones. + */ +@Namespace("c10") public static native void SetAllocator(DeviceType t, Allocator alloc, @Cast("uint8_t") byte priority/*=0*/); +@Namespace("c10") public static native void SetAllocator(DeviceType t, Allocator alloc); +@Namespace("c10") public static native void SetAllocator(@Cast("c10::DeviceType") byte t, Allocator alloc, @Cast("uint8_t") byte priority/*=0*/); +@Namespace("c10") public static native void SetAllocator(@Cast("c10::DeviceType") byte t, Allocator alloc); +@Namespace("c10") public static native Allocator GetAllocator(DeviceType t); +@Namespace("c10") public static native Allocator GetAllocator(@Cast("c10::DeviceType") byte t); -@Namespace("at") public enum NameType { BASIC((byte)(0)), WILDCARD((byte)(1)); +// #define REGISTER_ALLOCATOR(t, f) +// namespace { +// static c10::AllocatorRegisterer g_allocator_d(f); +// } +// Targeting ../MemoryReportingInfoBase.java - public final byte value; - private NameType(byte v) { this.value = v; } - private NameType(NameType e) { this.value = e.value; } - public NameType intern() { for (NameType e : values()) if (e.value == value) return e; return this; } - @Override public String toString() { return intern().name(); } -} -// Targeting ../Dimname.java +@Namespace("c10") public static native @Cast("bool") boolean memoryProfilingEnabled(); +@Namespace("c10") public static native void reportMemoryUsageToProfiler( + Pointer ptr, + @Cast("int64_t") long alloc_size, + @Cast("size_t") long total_allocated, + @Cast("size_t") long total_reserved, + @ByVal Device device); -@Namespace("at") public static native @Cast("std::ostream*") @ByRef @Name("operator <<") Pointer shiftLeft(@Cast("std::ostream*") @ByRef Pointer out, @Const @ByRef Dimname dimname); +@Namespace("c10") public static native void reportOutOfMemoryToProfiler( + @Cast("int64_t") long alloc_size, + @Cast("size_t") long total_allocated, + @Cast("size_t") long total_reserved, + @ByVal Device device); -@Namespace("at") public static native @Cast("bool") @Name("operator ==") boolean equals(@Const @ByRef Dimname lhs, @Const @ByRef Dimname rhs); + // namespace c10 -@Namespace("at") public static native @Cast("bool") @Name("operator !=") boolean notEquals(@Const @ByRef Dimname lhs, @Const @ByRef Dimname rhs); - // namespace at +// Parsed from c10/core/StorageImpl.h +// #pragma once -// Parsed from ATen/core/DimVector.h +// #include +// #include +// #include -// #pragma once -// #include +// #include +// Targeting ../StorageImpl.java -// Re-declaring 'DimVector' type and size inside 'at' namespace. -// This is done to avoid modifying every use into their 'c10' -// equivalent. - // namespace at + // namespace c10 -// Parsed from ATen/core/Generator.h +// Parsed from c10/core/Storage.h // #pragma once -// #include -// #include -// #include -// #include -// #include -// #include -// #include - -// #include -// #include -// #include -// #include -// #include +// #include +// Targeting ../Storage.java -// For the record I don't think this is a correct pimpl idiom. -// Including Impl header in interface header defeats the purpose -// because you can't change Impl private members without forcing -// everything that included the interface to rebuild. -// Impl should be forward-declared in the interface header instead. -// #include -// Targeting ../Generator.java + // namespace c10 -/** - * Utility function to static cast input Generator* to - * the backend generator type (CPU/CUDAGeneratorImpl etc.) - */ -/** - * Utility function used in tensor implementations, which - * supplies the default generator to tensors, if an input generator - * is not supplied. The input Generator* is also static casted to - * the backend generator type (CPU/CUDAGeneratorImpl etc.) - */ +// Parsed from c10/core/CopyBytes.h -/** - * Helper function for checking the validity of new random generator - * state. Right now following conditions are checked: - * - * - The new state tensor must be a torch.ByteTensor - * - Data of the new state tensor must be contiguous - */ -@Namespace("at::detail") public static native void check_rng_state(@Const @ByRef TensorImpl new_state); +// #pragma once - // namespace detail +// #include +// Targeting ../CopyBytesFunction.java - // namespace at -// Parsed from ATen/core/Dict.h +// #define REGISTER_COPY_BYTES_FUNCTION(from, to, ...) +// namespace { +// static _CopyBytesFunctionRegisterer C10_ANONYMOUS_VARIABLE( +// g_copy_function)(from, to, __VA_ARGS__); +// } -// #pragma once +/* + * WARNING: Implementations for this function are currently registered from + * ATen and caffe2, not yet from c10. Don't use this if not either ATen + * or caffe2 is present as well. + * We can't move them yet, because the CUDA implementations aren't unified yet + * between ATen and caffe2. + * We're planning to move the implementations into c10/backend/xxx + * to make c10 self contained again. + */ +@Namespace("c10") public static native void CopyBytes( + @Cast("size_t") long nbytes, + @Const Pointer src, + @ByVal Device src_device, + Pointer dst, + @ByVal Device dst_device, + @Cast("bool") boolean async); + // namespace c10 -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// Targeting ../DictKeyHash.java +// Parsed from c10/core/AutogradState.h +// #pragma once -// Targeting ../DictKeyEqualTo.java +// #include +// Targeting ../AutogradState.java + // namespace c10 -// Targeting ../GenericDictEntryRef.java +// Parsed from c10/core/GradMode.h -// Targeting ../GenericDictIterator.java +// #pragma once +// #include +// #include +// Targeting ../GradMode.java -// Targeting ../StringGenericListDict.java +// Targeting ../AutoGradMode.java -// Targeting ../GenericDict.java +// Targeting ../NoGradGuard.java -// GenericDict is how IValue stores dicts. It is, however, not part of the -// public API. Kernels should use Dicts with concrete Key, Value types instead -// (maybe except for some internal prim ops). +// Targeting ../AutoFwGradMode.java + // namespace c10 -// #include // IWYU pragma: keep +// Parsed from c10/util/Registry.h +// #ifndef C10_UTIL_REGISTRY_H_ +// #define C10_UTIL_REGISTRY_H_ -// Parsed from ATen/core/List.h +/** + * Simple registry implementation that uses static variables to + * register object creators during program initialization time. + */ -// #pragma once +// NB: This Registry works poorly when you have other namespaces. +// Make all macro invocations from inside the at namespace. -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include // #include -// Targeting ../ListImpl.java - +// #include +// #include +@Namespace("c10") public static native @StdString BytePointer KeyStrRepr(@StdString BytePointer key); +@Namespace("c10") public static native @StdString String KeyStrRepr(@StdString String key); +@Namespace("c10") public enum RegistryPriority { + REGISTRY_FALLBACK(1), + REGISTRY_DEFAULT(2), + REGISTRY_PREFERRED(3); + public final int value; + private RegistryPriority(int v) { this.value = v; } + private RegistryPriority(RegistryPriority e) { this.value = e.value; } + public RegistryPriority intern() { for (RegistryPriority e : values()) if (e.value == value) return e; return this; } + @Override public String toString() { return intern().name(); } +} +/** + * \brief A template class that allows one to register classes by keys. + * + * The keys are usually a std::string specifying the name, but can be anything + * that can be used in a std::map. + * + * You should most likely not use the Registry class explicitly, but use the + * helper macros below to declare specific registries as well as registering + * objects. + */ +/** + * C10_DECLARE_TYPED_REGISTRY is a macro that expands to a function + * declaration, as well as creating a convenient typename for its corresponding + * registerer. + */ +// Note on C10_IMPORT and C10_EXPORT below: we need to explicitly mark DECLARE +// as import and DEFINE as export, because these registry macros will be used +// in downstream shared libraries as well, and one cannot use *_API - the API +// macro will be defined on a per-shared-library basis. Semantically, when one +// declares a typed registry it is always going to be IMPORT, and when one +// defines a registry (which should happen ONLY ONCE and ONLY IN SOURCE FILE), +// the instantiation unit is always going to be exported. +// +// The only unique condition is when in the same file one does DECLARE and +// DEFINE - in Windows compilers, this generates a warning that dllimport and +// dllexport are mixed, but the warning is fine and linker will be properly +// exporting the symbol. Same thing happens in the gflags flag declaration and +// definition caes. +// #define C10_DECLARE_TYPED_REGISTRY( +// RegistryName, SrcType, ObjectType, PtrType, ...) +// C10_IMPORT ::c10::Registry, ##__VA_ARGS__>* +// RegistryName(); +// typedef ::c10::Registerer, ##__VA_ARGS__> +// Registerer##RegistryName -// Targeting ../ListElementConstReferenceTraits.java +// #define C10_DEFINE_TYPED_REGISTRY( +// RegistryName, SrcType, ObjectType, PtrType, ...) +// C10_EXPORT ::c10::Registry, ##__VA_ARGS__>* +// RegistryName() { +// static ::c10::Registry, ##__VA_ARGS__>* +// registry = new ::c10:: +// Registry, ##__VA_ARGS__>(); +// return registry; +// } +// #define C10_DEFINE_TYPED_REGISTRY_WITHOUT_WARNING( +// RegistryName, SrcType, ObjectType, PtrType, ...) +// C10_EXPORT ::c10::Registry, ##__VA_ARGS__>* +// RegistryName() { +// static ::c10::Registry, ##__VA_ARGS__>* +// registry = +// new ::c10::Registry, ##__VA_ARGS__>( +// false); +// return registry; +// } +// Note(Yangqing): The __VA_ARGS__ below allows one to specify a templated +// creator with comma in its templated arguments. +// #define C10_REGISTER_TYPED_CREATOR(RegistryName, key, ...) +// static Registerer##RegistryName C10_ANONYMOUS_VARIABLE(g_##RegistryName)( +// key, RegistryName(), ##__VA_ARGS__); -// this wraps vector::iterator to make sure user code can't rely -// on it being the type of the underlying vector. - +// #define C10_REGISTER_TYPED_CREATOR_WITH_PRIORITY( +// RegistryName, key, priority, ...) +// static Registerer##RegistryName C10_ANONYMOUS_VARIABLE(g_##RegistryName)( +// key, priority, RegistryName(), ##__VA_ARGS__); -// Parsed from ATen/core/NamedTensor.h +// #define C10_REGISTER_TYPED_CLASS(RegistryName, key, ...) +// static Registerer##RegistryName C10_ANONYMOUS_VARIABLE(g_##RegistryName)( +// key, +// RegistryName(), +// Registerer##RegistryName::DefaultCreator<__VA_ARGS__>, +// ::c10::demangle_type<__VA_ARGS__>()); -// #pragma once +// #define C10_REGISTER_TYPED_CLASS_WITH_PRIORITY( +// RegistryName, key, priority, ...) +// static Registerer##RegistryName C10_ANONYMOUS_VARIABLE(g_##RegistryName)( +// key, +// priority, +// RegistryName(), +// Registerer##RegistryName::DefaultCreator<__VA_ARGS__>, +// ::c10::demangle_type<__VA_ARGS__>()); -// #include -// #include -// #include -// Targeting ../NamedTensorMeta.java +// C10_DECLARE_REGISTRY and C10_DEFINE_REGISTRY are hard-wired to use +// std::string as the key type, because that is the most commonly used cases. +// #define C10_DECLARE_REGISTRY(RegistryName, ObjectType, ...) +// C10_DECLARE_TYPED_REGISTRY( +// RegistryName, std::string, ObjectType, std::unique_ptr, ##__VA_ARGS__) +// #define C10_DEFINE_REGISTRY(RegistryName, ObjectType, ...) +// C10_DEFINE_TYPED_REGISTRY( +// RegistryName, std::string, ObjectType, std::unique_ptr, ##__VA_ARGS__) -// Targeting ../NamesMode.java +// #define C10_DEFINE_REGISTRY_WITHOUT_WARNING(RegistryName, ObjectType, ...) +// C10_DEFINE_TYPED_REGISTRY_WITHOUT_WARNING( +// RegistryName, std::string, ObjectType, std::unique_ptr, ##__VA_ARGS__) +// #define C10_DECLARE_SHARED_REGISTRY(RegistryName, ObjectType, ...) +// C10_DECLARE_TYPED_REGISTRY( +// RegistryName, std::string, ObjectType, std::shared_ptr, ##__VA_ARGS__) -// Targeting ../NoNamesGuard.java +// #define C10_DEFINE_SHARED_REGISTRY(RegistryName, ObjectType, ...) +// C10_DEFINE_TYPED_REGISTRY( +// RegistryName, std::string, ObjectType, std::shared_ptr, ##__VA_ARGS__) +// #define C10_DEFINE_SHARED_REGISTRY_WITHOUT_WARNING( +// RegistryName, ObjectType, ...) +// C10_DEFINE_TYPED_REGISTRY_WITHOUT_WARNING( +// RegistryName, std::string, ObjectType, std::shared_ptr, ##__VA_ARGS__) +// C10_REGISTER_CREATOR and C10_REGISTER_CLASS are hard-wired to use std::string +// as the key +// type, because that is the most commonly used cases. +// #define C10_REGISTER_CREATOR(RegistryName, key, ...) +// C10_REGISTER_TYPED_CREATOR(RegistryName, #key, __VA_ARGS__) +// #define C10_REGISTER_CREATOR_WITH_PRIORITY(RegistryName, key, priority, ...) +// C10_REGISTER_TYPED_CREATOR_WITH_PRIORITY( +// RegistryName, #key, priority, __VA_ARGS__) +// #define C10_REGISTER_CLASS(RegistryName, key, ...) +// C10_REGISTER_TYPED_CLASS(RegistryName, #key, __VA_ARGS__) +// #define C10_REGISTER_CLASS_WITH_PRIORITY(RegistryName, key, priority, ...) +// C10_REGISTER_TYPED_CLASS_WITH_PRIORITY( +// RegistryName, #key, priority, __VA_ARGS__) -// Sets the names of `tensor` to be `names`. -@Namespace("at") public static native @Const @ByRef TensorBase internal_set_names_inplace(@Const @ByRef TensorBase tensor, @ByVal DimnameListOptional names); -@Namespace("at") public static native @Const @ByRef TensorBase internal_set_names_inplace(@Const @ByRef TensorBase tensor, @StdMove DimnameVector names, @Cast("bool") boolean validate_names); + // namespace c10 -@Namespace("at") @MemberGetter public static native @Cast("const size_t") long kMaxNamedTensorDim(); +// #endif // C10_UTIL_REGISTRY_H_ +// Parsed from c10/util/Flags.h -// Some helper functions on TensorImpl. Useful for working with names in TH. -// XXX: Ideally these would exist as methods on TensorImpl -@Namespace("at::impl") public static native void internal_set_names_inplace(TensorImpl impl, @ByVal DimnameListOptional names, @Cast("bool") boolean validate_names); -@Namespace("at::impl") public static native void internal_set_names_inplace(TensorImpl impl, @StdMove DimnameVector names, @Cast("bool") boolean validate_names); +// #ifndef C10_UTIL_FLAGS_H_ +// #define C10_UTIL_FLAGS_H_ +/* Commandline flags support for C10. + * + * This is a portable commandline flags tool for c10, so we can optionally + * choose to use gflags or a lightweight custom implementation if gflags is + * not possible on a certain platform. If you have gflags installed, set the + * macro C10_USE_GFLAGS will seamlessly route everything to gflags. + * + * To define a flag foo of type bool default to true, do the following in the + * *global* namespace: + * C10_DEFINE_bool(foo, true, "An example."); + * + * To use it in another .cc file, you can use C10_DECLARE_* as follows: + * C10_DECLARE_bool(foo); + * + * In both cases, you can then access the flag via FLAGS_foo. + * + * It is recommended that you build with gflags. To learn more about the flags + * usage, refer to the gflags page here: + * + * https://gflags.github.io/gflags/ + * + * Note about Python users / devs: gflags is initiated from a C++ function + * ParseCommandLineFlags, and is usually done in native binaries in the main + * function. As Python does not have a modifiable main function, it is usually + * difficult to change the flags after Python starts. Hence, it is recommended + * that one sets the default value of the flags to one that's acceptable in + * general - that will allow Python to run without wrong flags. + */ +// #include -// Returns true if the tensor's names exist and are not all 'None'. -// Returns false if the tensor's names don't exist (were not allocated), -// or if all names are 'None'. -// We treat not-allocated-names the same as allocated names that are all 'None'. -@Namespace("at::impl") public static native @Cast("bool") boolean has_names(@Const TensorImpl impl); +// #include +// #include +/** + * Sets the usage message when a commandline tool is called with "--help". + */ +@Namespace("c10") public static native void SetUsageMessage(@StdString BytePointer str); +@Namespace("c10") public static native void SetUsageMessage(@StdString String str); -// Returns the names of the tensor's dimensions. -// Unnamed tensors are treated as having 'None' in all dimension; this method -// would return a DimnameList of all 'None's for an unnamed tensor. -@Namespace("at::impl") public static native @ByVal DimnameArrayRef get_names(@Const TensorImpl impl); +/** + * Returns the usage message for the commandline tool set by SetUsageMessage. + */ +@Namespace("c10") public static native @Cast("const char*") BytePointer UsageMessage(); -// This is more of an implementation detail; one should use impl::get_names / -// Tensor::names() whenever possible because it provides a cleaner API. -// Returns the names of the tensor if they have been allocated; returns nullopt -// instead if the haven't been. The names of a tensor are not allocated if a -// tensor is constructed with names=None. -@Namespace("at::impl") public static native @ByVal DimnameListOptional get_opt_names(@Const TensorImpl impl); +/** + * Parses the commandline flags. + * + * This command parses all the commandline arguments passed in via pargc + * and argv. Once it is finished, partc and argv will contain the remaining + * commandline args that c10 does not deal with. Note that following + * convention, argv[0] contains the binary name and is not parsed. + */ +@Namespace("c10") public static native @Cast("bool") boolean ParseCommandLineFlags(IntPointer pargc, @Cast("char***") @ByPtrPtr PointerPointer pargv); +@Namespace("c10") public static native @Cast("bool") boolean ParseCommandLineFlags(IntBuffer pargc, @Cast("char***") @ByPtrPtr PointerPointer pargv); +@Namespace("c10") public static native @Cast("bool") boolean ParseCommandLineFlags(int[] pargc, @Cast("char***") @ByPtrPtr PointerPointer pargv); - // namespace impl +/** + * Checks if the commandline flags has already been passed. + */ +@Namespace("c10") public static native @Cast("bool") boolean CommandLineFlagsHasBeenParsed(); - // namespace at + // namespace c10 +//////////////////////////////////////////////////////////////////////////////// +// Below are gflags and non-gflags specific implementations. +// In general, they define the following macros for one to declare (use +// C10_DECLARE) or define (use C10_DEFINE) flags: +// C10_{DECLARE,DEFINE}_{int,int64,double,bool,string} +//////////////////////////////////////////////////////////////////////////////// -// Parsed from ATen/core/Reduction.h +// #ifdef C10_USE_GFLAGS -// #pragma once +//////////////////////////////////////////////////////////////////////////////// +// Begin gflags section: most functions are basically rerouted to gflags. +//////////////////////////////////////////////////////////////////////////////// +// #include -// NB: Keep this in sync with Reduction class in torch/nn/_reduction.py -// These constants control the reduction behavior of loss functions. -// Ideally, this would be a scoped enum, but jit doesn't support that -@Namespace("at::Reduction") public enum Reduction { - None(0), // Do not reduce - Mean(1), // (Possibly weighted) mean of losses - Sum(2), // Sum losses - END(3); +// C10 uses hidden visibility by default. However, in gflags, it only uses +// export on Windows platform (with dllexport) but not on linux/mac (with +// default visibility). As a result, to ensure that we are always exporting +// global variables, we will redefine the GFLAGS_DLL_DEFINE_FLAG macro if we +// are building C10 as a shared libray. +// This has to be done after the inclusion of gflags, because some early +// versions of gflags.h (e.g. 2.0 on ubuntu 14.04) directly defines the +// macros, so we need to do definition after gflags is done. +// #ifdef GFLAGS_DLL_DEFINE_FLAG +// #endif // GFLAGS_DLL_DEFINE_FLAG +// #ifdef GFLAGS_DLL_DECLARE_FLAG +// #endif // GFLAGS_DLL_DECLARE_FLAG +// #define GFLAGS_DLL_DEFINE_FLAG C10_EXPORT +// #define GFLAGS_DLL_DECLARE_FLAG C10_IMPORT - public final int value; - private Reduction(int v) { this.value = v; } - private Reduction(Reduction e) { this.value = e.value; } - public Reduction intern() { for (Reduction e : values()) if (e.value == value) return e; return this; } - @Override public String toString() { return intern().name(); } -} - // namespace Reduction - // namespace at +// gflags before 2.0 uses namespace google and after 2.1 uses namespace gflags. +// Using GFLAGS_GFLAGS_H_ to capture this change. +// #ifndef GFLAGS_GFLAGS_H_ +// #endif // GFLAGS_GFLAGS_H_ +// Motivation about the gflags wrapper: +// (1) We would need to make sure that the gflags version and the non-gflags +// version of C10 are going to expose the same flags abstraction. One should +// explicitly use FLAGS_flag_name to access the flags. +// (2) For flag names, it is recommended to start with c10_ to distinguish it +// from regular gflags flags. For example, do +// C10_DEFINE_BOOL(c10_my_flag, true, "An example"); +// to allow one to use FLAGS_c10_my_flag. +// (3) Gflags has a design issue that does not properly expose the global flags, +// if one builds the library with -fvisibility=hidden. The current gflags (as of +// Aug 2018) only deals with the Windows case using dllexport, and not the Linux +// counterparts. As a result, we will explciitly use C10_EXPORT to export the +// flags defined in C10. This is done via a global reference, so the flag +// itself is not duplicated - under the hood it is the same global gflags flag. +// #define C10_GFLAGS_DEF_WRAPPER(type, real_type, name, default_value, help_str) +// DEFINE_##type(name, default_value, help_str); -// Parsed from ATen/core/Scalar.h +// #define C10_DEFINE_int(name, default_value, help_str) +// C10_GFLAGS_DEF_WRAPPER(int32, gflags::int32, name, default_value, help_str) +// #define C10_DEFINE_int32(name, default_value, help_str) +// C10_DEFINE_int(name, default_value, help_str) +// #define C10_DEFINE_int64(name, default_value, help_str) +// C10_GFLAGS_DEF_WRAPPER(int64, gflags::int64, name, default_value, help_str) +// #define C10_DEFINE_double(name, default_value, help_str) +// C10_GFLAGS_DEF_WRAPPER(double, double, name, default_value, help_str) +// #define C10_DEFINE_bool(name, default_value, help_str) +// C10_GFLAGS_DEF_WRAPPER(bool, bool, name, default_value, help_str) +// #define C10_DEFINE_string(name, default_value, help_str) +// C10_GFLAGS_DEF_WRAPPER(string, ::fLS::clstring, name, default_value, help_str) -// #include +// DECLARE_typed_var should be used in header files and in the global namespace. +// #define C10_GFLAGS_DECLARE_WRAPPER(type, real_type, name) DECLARE_##type(name); +// #define C10_DECLARE_int(name) +// C10_GFLAGS_DECLARE_WRAPPER(int32, gflags::int32, name) +// #define C10_DECLARE_int32(name) C10_DECLARE_int(name) +// #define C10_DECLARE_int64(name) +// C10_GFLAGS_DECLARE_WRAPPER(int64, gflags::int64, name) +// #define C10_DECLARE_double(name) +// C10_GFLAGS_DECLARE_WRAPPER(double, double, name) +// #define C10_DECLARE_bool(name) C10_GFLAGS_DECLARE_WRAPPER(bool, bool, name) +// #define C10_DECLARE_string(name) +// C10_GFLAGS_DECLARE_WRAPPER(string, ::fLS::clstring, name) +// Targeting ../C10FlagParser.java -// Parsed from ATen/core/TensorAccessor.h -// #pragma once -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// The PtrTraits argument to the TensorAccessor/GenericPackedTensorAccessor -// is used to enable the __restrict__ keyword/modifier for the data -// passed to cuda. -// #if defined(__CUDACC__) || defined(__HIPCC__) -// #endif + // namespace c10 -// TensorAccessorBase and TensorAccessor are used for both CPU and CUDA tensors. -// For CUDA tensors it is used in device code (only). This means that we restrict ourselves -// to functions and types available there (e.g. IntArrayRef isn't). +// The macros are defined outside the c10 namespace. In your code, you should +// write the C10_DEFINE_* and C10_DECLARE_* macros outside any namespace +// as well. -// The PtrTraits argument is only relevant to cuda to support `__restrict__` pointers. +// #define C10_DEFINE_typed_var(type, name, default_value, help_str) +// C10_EXPORT type FLAGS_##name = default_value; +// namespace c10 { +// namespace { +// class C10FlagParser_##name : public C10FlagParser { +// public: +// explicit C10FlagParser_##name(const std::string& content) { +// success_ = C10FlagParser::Parse(content, &FLAGS_##name); +// } +// }; +// } +// RegistererC10FlagsRegistry g_C10FlagsRegistry_##name( +// #name, +// C10FlagsRegistry(), +// RegistererC10FlagsRegistry::DefaultCreator, +// "(" #type ", default " #default_value ") " help_str); +// } -// The `TensorAccessor` is typically instantiated for CPU `Tensor`s using -// `Tensor.accessor()`. -// For CUDA `Tensor`s, `GenericPackedTensorAccessor` is used on the host and only -// indexing on the device uses `TensorAccessor`s. +// #define C10_DEFINE_int(name, default_value, help_str) +// C10_DEFINE_typed_var(int, name, default_value, help_str) +// #define C10_DEFINE_int32(name, default_value, help_str) +// C10_DEFINE_int(name, default_value, help_str) +// #define C10_DEFINE_int64(name, default_value, help_str) +// C10_DEFINE_typed_var(int64_t, name, default_value, help_str) +// #define C10_DEFINE_double(name, default_value, help_str) +// C10_DEFINE_typed_var(double, name, default_value, help_str) +// #define C10_DEFINE_bool(name, default_value, help_str) +// C10_DEFINE_typed_var(bool, name, default_value, help_str) +// #define C10_DEFINE_string(name, default_value, help_str) +// C10_DEFINE_typed_var(std::string, name, default_value, help_str) +// DECLARE_typed_var should be used in header files and in the global namespace. +// #define C10_DECLARE_typed_var(type, name) C10_IMPORT extern type FLAGS_##name -// GenericPackedTensorAccessorBase and GenericPackedTensorAccessor are used on for CUDA `Tensor`s on the host -// and as -// In contrast to `TensorAccessor`s, they copy the strides and sizes on instantiation (on the host) -// in order to transfer them on the device when calling kernels. -// On the device, indexing of multidimensional tensors gives to `TensorAccessor`s. -// Use RestrictPtrTraits as PtrTraits if you want the tensor's data pointer to be marked as __restrict__. -// Instantiation from data, sizes, strides is only needed on the host and std::copy isn't available -// on the device, so those functions are host only. +// #define C10_DECLARE_int(name) C10_DECLARE_typed_var(int, name) +// #define C10_DECLARE_int32(name) C10_DECLARE_int(name) +// #define C10_DECLARE_int64(name) C10_DECLARE_typed_var(int64_t, name) +// #define C10_DECLARE_double(name) C10_DECLARE_typed_var(double, name) +// #define C10_DECLARE_bool(name) C10_DECLARE_typed_var(bool, name) +// #define C10_DECLARE_string(name) C10_DECLARE_typed_var(std::string, name) +//////////////////////////////////////////////////////////////////////////////// +// End non-gflags section. +//////////////////////////////////////////////////////////////////////////////// -// Can't put this directly into the macro function args because of commas -// #define AT_X GenericPackedTensorAccessor +// #endif // C10_USE_GFLAGS -// Old name for `GenericPackedTensorAccessor` -// #undef AT_X - // namespace at +// #endif // C10_UTIL_FLAGS_H_ -// Parsed from ATen/core/TensorBase.h +// Parsed from c10/core/impl/LocalDispatchKeySet.h // #pragma once -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include - -// #include -// #include -// #include -// #include - - - // namespace torch::autograd - -// Convert Tensor to TensorBase without any need to include Tensor.h -@Namespace("at") public static native @Const @ByRef TensorBase get_tensor_base(@Const @ByRef Tensor t); -@Namespace("at::impl") public static native @Cast("bool") boolean variable_excluded_from_dispatch(); - +// #include +// #include +// #include -// Targeting ../TensorBase.java +// TLS management for DispatchKeySet (the "local" DispatchKeySet(s)) +// +// This manages two thread-local DispatchKeySets: +// +// - The included type set, which adds a tensor type for consideration +// in dispatch. (For example, you might add Profiling to +// the included type set to turn on profiling on all tensor operations.) +// +// - The excluded type set, which disqualifies a tensor type from dispatch. +// (For example, after redispatching on variable, we disqualify +// Autograd so we don't attempt to handle variable again.) +// (Exclusion wins over inclusion.) +// +// NB: Originally, I implemented the excluded type set as storing the inverted +// set, but TLS is defined to be zero-initialized, so this doesn't actually work +// (if it's inverted, you want the set to be -1 initialized). +// Targeting ../PODLocalDispatchKeySet.java +// Targeting ../LocalDispatchKeySet.java +// thread_local variables cannot be C10_API on Windows. +// Inlining this seems to break AutoDispatchBelowAutograd on Android. +// #if defined(_MSC_VER) || defined(C10_ANDROID) || defined(C10_IPHONE) +@Namespace("c10::impl") public static native @ByVal LocalDispatchKeySet tls_local_dispatch_key_set(); +// #else // defined(_MSC_VER) || defined(C10_ANDROID) || defined(C10_IPHONE) +// #endif // defined(_MSC_VER) || defined(C10_ANDROID) || defined(C10_IPHONE) +// Internal, use ThreadLocalStateGuard -// Helper creator for Tensor class which doesn't requires the users to pass -// in an intrusive_ptr instead it just converts the argument passed to -// requested intrusive_ptr type. +// Targeting ../IncludeDispatchKeyGuard.java - // namespace detail -@Namespace("at") public static native DispatchKey legacyExtractDispatchKey(@Const @ByRef TensorBase t); +// Targeting ../ForceDispatchKeyGuard.java -// Targeting ../MaybeOwnedTraits.java +// Non-RAII API for manipulating the thread-local dispatch state. +// Please prefer the RAII API. The non-RAII API may be useful when +// the included/excluded state of a given DispatchKey must span +// many calls from the Python to the C++, so you cannot conveniently +// use an RAII guard. +// +// Example use case: a Python context manager that includes a certain +// DispatchKey, to ensure ops running under the context manager dispatch +// through that DispatchKey's registered overrides. +// +// The non-RAII API is less efficient than the RAII guards because both the +// getter and setter will do a tls_getaddr lookup (the RAII struct only needs +// one!) + +@Namespace("c10::impl") public static native @Cast("bool") boolean tls_is_dispatch_key_excluded(DispatchKey x); +@Namespace("c10::impl") public static native @Cast("bool") boolean tls_is_dispatch_key_excluded(@Cast("c10::DispatchKey") short x); +@Namespace("c10::impl") public static native void tls_set_dispatch_key_excluded(DispatchKey x, @Cast("bool") boolean desired_state); +@Namespace("c10::impl") public static native void tls_set_dispatch_key_excluded(@Cast("c10::DispatchKey") short x, @Cast("bool") boolean desired_state); +@Namespace("c10::impl") public static native @Cast("bool") boolean tls_is_dispatch_key_included(DispatchKey x); +@Namespace("c10::impl") public static native @Cast("bool") boolean tls_is_dispatch_key_included(@Cast("c10::DispatchKey") short x); +@Namespace("c10::impl") public static native void tls_set_dispatch_key_included(DispatchKey x, @Cast("bool") boolean desired_state); +@Namespace("c10::impl") public static native void tls_set_dispatch_key_included(@Cast("c10::DispatchKey") short x, @Cast("bool") boolean desired_state); +@Namespace("c10::impl") public static native @Cast("bool") boolean tls_is_dispatch_keyset_excluded(@ByVal DispatchKeySet ks); +@Namespace("c10::impl") public static native @Cast("bool") boolean tls_is_dispatch_keyset_included(@ByVal DispatchKeySet ks); + // namespace impl // namespace c10 +// Parsed from c10/core/InferenceMode.h +// #pragma once +// #include +// #include +// #include +// #include +// Targeting ../InferenceMode.java - // namespace symint - // namespace at + // namespace c10 -// Parsed from ATen/core/TensorBody.h +// Parsed from c10/core/SymIntArrayRef.h // #pragma once -// #ifdef TORCH_ASSERT_NO_OPERATORS -// #error This change adds a dependency on native_functions.yaml, -// meaning the file will need to be re-compiled every time an operator -// is changed or added. Consider if your change would be better placed in -// another file, or if a more specific header might achieve the same goal. -// See NOTE: [Tensor vs. TensorBase] -// #endif - -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include +// #include +// #include // #include -// #include -// #include // #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include +@Namespace("c10") public static native @ByVal LongArrayRef asIntArrayRefUnchecked(@ByVal SymIntArrayRef ar); -// #include +@Namespace("c10") public static native @ByVal LongArrayRefOptional asIntArrayRefSlowOpt( + @ByVal SymIntArrayRef ar); -// Targeting ../DeprecatedTypeProperties.java - // namespace at - // namespace indexing - // namespace at +// #define C10_AS_INTARRAYREF_SLOW(a) c10::asIntArrayRefSlow(a, __FILE__, __LINE__) - // namespace torch::autograd -// Targeting ../Tensor.java +// Prefer using a more semantic constructor, like +// fromIntArrayRefKnownNonNegative +@Namespace("c10") public static native @ByVal SymIntArrayRef fromIntArrayRefUnchecked(@ByVal LongArrayRef array_ref); +@Namespace("c10") public static native @ByVal SymIntArrayRef fromIntArrayRefUnchecked(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... array_ref); +@Namespace("c10") public static native @ByVal SymIntArrayRef fromIntArrayRefKnownNonNegative(@ByVal LongArrayRef array_ref); +@Namespace("c10") public static native @ByVal SymIntArrayRef fromIntArrayRefKnownNonNegative(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... array_ref); -// Helper creator for Tensor class which doesn't requires the users to pass -// in an intrusive_ptr instead it just converts the argument passed to -// requested intrusive_ptr type. +@Namespace("c10") public static native @ByVal SymIntArrayRef fromIntArrayRefSlow(@ByVal LongArrayRef array_ref); +@Namespace("c10") public static native @ByVal SymIntArrayRef fromIntArrayRefSlow(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... array_ref); - // namespace detail + // namespace c10 - // namespace at -// aten::_backward(Tensor self, Tensor[] inputs, Tensor? gradient=None, bool? retain_graph=None, bool create_graph=False) -> () +// Parsed from c10/core/DefaultDtype.h +// #pragma once -// aten::set_data(Tensor(a!) self, Tensor new_data) -> () +// #include +// #include + // namespace caffe2 +@Namespace("c10") public static native void set_default_dtype(@ByVal TypeMeta dtype); +@Namespace("c10") public static native @Const @ByVal TypeMeta get_default_dtype(); +@Namespace("c10") public static native ScalarType get_default_dtype_as_scalartype(); +@Namespace("c10") public static native @Const @ByVal TypeMeta get_default_complex_dtype(); + // namespace c10 -// aten::data(Tensor self) -> Tensor +// Parsed from c10/core/TensorOptions.h +// #pragma once -// aten::is_leaf(Tensor self) -> bool +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include -// aten::output_nr(Tensor self) -> int +// #include +// #include +// #include +@Namespace("c10") public static native DispatchKey computeDispatchKey( + @ByVal ScalarTypeOptional dtype, + @ByVal LayoutOptional layout, + @ByVal DeviceOptional device); -// aten::_version(Tensor self) -> int +@Namespace("c10") public static native ScalarType dtype_or_default(@ByVal ScalarTypeOptional dtype); +@Namespace("c10") public static native @ByVal TypeMeta dtype_or_default( + @ByVal TypeMetaOptional dtype); -// aten::requires_grad_(Tensor(a!) self, bool requires_grad=True) -> Tensor(a!) +@Namespace("c10") public static native Layout layout_or_default(@ByVal LayoutOptional layout); +@Namespace("c10") public static native @ByVal Device device_or_default(@ByVal DeviceOptional device); -// aten::retain_grad(Tensor(a!) self) -> () +/// +/// +/// +/// +/// +/// +/// +/// +/// +/// +/// +/// +/// +/// +/// +/// +/// +@Namespace("c10") public static native @Cast("bool") boolean pinned_memory_or_default(@ByVal BoolOptional pinned_memory); +// Targeting ../TensorOptions.java -// aten::retains_grad(Tensor self) -> bool -// aten::_fw_primal(Tensor(a) self, int level) -> Tensor(a) +// We should aspire to fit in one machine-size word; but a size greater than two +// words is too much. (We are doing terribly on 32-bit archs, where we require +// three machine size words to store tensor options. Eek!) +/** Convenience function that returns a {@code TensorOptions} object with the {@code dtype} + * set to the given one. */ +@Namespace("c10") public static native @ByVal TensorOptions dtype(@ByVal TypeMeta dtype); -// aten::rename_(Tensor(a!) self, Dimname[]? names) -> Tensor(a!) +// legacy function to support ScalarType +@Namespace("c10") public static native @ByVal TensorOptions dtype(ScalarType dtype); +/** Convenience function that returns a {@code TensorOptions} object with the {@code layout} + * set to the given one. */ +@Namespace("c10") public static native @ByVal TensorOptions layout(Layout layout); +@Namespace("c10") public static native @ByVal TensorOptions layout(@Cast("c10::Layout") byte layout); -// aten::rename(Tensor(a) self, Dimname[]? names) -> Tensor(a) +/** Convenience function that returns a {@code TensorOptions} object with the {@code device} + * set to the given one. */ +@Namespace("c10") public static native @ByVal TensorOptions device(@ByVal Device device); +/** Convenience function that returns a {@code TensorOptions} object with the + * {@code device} set to CUDA and the {@code device_index} set to the given one. */ +@Namespace("c10") public static native @ByVal TensorOptions device_index(short device_index); -// aten::align_to(Tensor(a) self, Dimname[] names) -> Tensor(a) +/** Convenience function that returns a {@code TensorOptions} object with the + * {@code requires_grad} set to the given one. */ +@Namespace("c10") public static native @ByVal TensorOptions requires_grad(@Cast("bool") boolean requires_grad/*=true*/); +/** Convenience function that returns a {@code TensorOptions} object with the + * {@code memory_format} set to the given one. */ +@Namespace("c10") public static native @ByVal TensorOptions memory_format(MemoryFormat memory_format); +@Namespace("c10") public static native @ByVal TensorOptions memory_format(@Cast("c10::MemoryFormat") byte memory_format); -// aten::align_to.ellipsis_idx(Tensor(a) self, Dimname[] order, int ellipsis_idx) -> Tensor(a) +@Namespace("c10") public static native @Cast("std::ostream*") @ByRef @Name("operator <<") Pointer shiftLeft( + @Cast("std::ostream*") @ByRef Pointer stream, + @Const @ByRef TensorOptions options); +@Namespace("c10") public static native @StdString BytePointer toString(@Const @ByVal TensorOptions options); -// aten::align_as(Tensor self, Tensor other) -> Tensor +// This is intended to be a centralized location by which we can determine +// what an appropriate DispatchKey for a tensor is. +@Namespace("c10") public static native Layout dispatchKeyToLayout(DispatchKey dispatch_key); +@Namespace("c10") public static native @Cast("c10::Layout") byte dispatchKeyToLayout(@Cast("c10::DispatchKey") short dispatch_key); -// aten::refine_names(Tensor(a) self, Dimname[] names) -> Tensor(a) +@Namespace("c10") public static native DeviceType dispatchKeyToDeviceType(DispatchKey dispatch_key); +@Namespace("c10") public static native @Cast("c10::DeviceType") byte dispatchKeyToDeviceType(@Cast("c10::DispatchKey") short dispatch_key); +@Namespace("c10") public static native @ByVal TensorOptions dispatchKeyToTensorOptions(DispatchKey dispatch_key); +@Namespace("c10") public static native @ByVal TensorOptions dispatchKeyToTensorOptions(@Cast("c10::DispatchKey") short dispatch_key); +@Namespace("c10::detail") public static native @Cast("bool") boolean backend_supports_empty_operator(@Const @ByVal TensorOptions options); -// aten::abs(Tensor self) -> Tensor + // namespace detail + // namespace c10 -// aten::abs_(Tensor(a!) self) -> Tensor(a!) +// Parsed from c10/core/WrapDimMinimal.h -// aten::absolute(Tensor self) -> Tensor +// #pragma once +// #include +// #include +// This template can only be specialized at int64_t and c10::SymInt; +// you'll get linker errors otherwise + // namespace detail -// aten::absolute_(Tensor(a!) self) -> Tensor(a!) +@Namespace("c10") public static native @Cast("int64_t") long maybe_wrap_dim( + @Cast("int64_t") long dim, + @Cast("int64_t") long dim_post_expr, + @Cast("bool") boolean wrap_scalar/*=true*/); +@Namespace("c10") public static native @Cast("int64_t") long maybe_wrap_dim( + @Cast("int64_t") long dim, + @Cast("int64_t") long dim_post_expr); +@Namespace("c10") public static native @ByVal SymInt maybe_wrap_dim( + @ByVal SymInt dim, + @ByVal SymInt dim_post_expr, + @Cast("bool") boolean wrap_scalar/*=true*/); +@Namespace("c10") public static native @ByVal SymInt maybe_wrap_dim( + @ByVal SymInt dim, + @ByVal SymInt dim_post_expr); -// aten::angle(Tensor self) -> Tensor + // namespace c10 -// aten::sgn(Tensor self) -> Tensor +// Parsed from c10/core/impl/HermeticPyObjectTLS.h +// #pragma once -// aten::sgn_(Tensor(a!) self) -> Tensor(a!) +// #include +// #include +// Targeting ../HermeticPyObjectTLS.java -// aten::chalf(Tensor self, *, MemoryFormat? memory_format=None) -> Tensor + // namespace impl + // namespace c10 -// aten::_conj(Tensor(a) self) -> Tensor(a) +// Parsed from c10/core/impl/PyInterpreter.h -// aten::conj(Tensor(a) self) -> Tensor(a) +// #pragma once +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include -// aten::_conj_physical(Tensor self) -> Tensor +// Forward declarations + // namespace c10 + // namespace torch -// aten::conj_physical(Tensor self) -> Tensor +// Actual implementation +// Targeting ../PyInterpreterVTable.java -// aten::conj_physical_(Tensor(a!) self) -> Tensor(a!) +// Targeting ../PyInterpreter.java -// aten::resolve_conj(Tensor(a) self) -> Tensor(a) +// PyInterpreterStatus describes what the state of its interpreter tag +// is, relative to the thread currently holding the GIL. +@Namespace("c10::impl") public enum PyInterpreterStatus { + // We just allocated the Tensor, it hasn't escaped to other threads, + // we know that it definitely hasn't been tagged to be associated + // with an interpreter. + DEFINITELY_UNINITIALIZED(0), + // We queried the interpreter field and it looked uninitialized. But + // another thread may have raced with us to tag it with some other + // interpreter id. So we will have to do a CEX to make sure we can + // actually nab it. + MAYBE_UNINITIALIZED(1), + // We queried the interpreter field and it was tagged to belong to us. + // This means we have sole write access (as we hold the GIL for this + // interpreter) + TAGGED_BY_US(2), + // Someone else tagged this. We can't use this TensorImpl from Python. + TAGGED_BY_OTHER(3); -// aten::resolve_neg(Tensor(a) self) -> Tensor(a) + public final int value; + private PyInterpreterStatus(int v) { this.value = v; } + private PyInterpreterStatus(PyInterpreterStatus e) { this.value = e.value; } + public PyInterpreterStatus intern() { for (PyInterpreterStatus e : values()) if (e.value == value) return e; return this; } + @Override public String toString() { return intern().name(); } +} + // namespace impl + // namespace c10 -// aten::_neg_view(Tensor(a) self) -> Tensor(a) +// Parsed from c10/core/impl/PyObjectSlot.h -// aten::acos(Tensor self) -> Tensor +// #pragma once +// #include +// #include +// #include +// #include -// aten::acos_(Tensor(a!) self) -> Tensor(a!) +// #include + // namespace impl + // namespace c10 -// aten::arccos(Tensor self) -> Tensor +// Parsed from c10/core/impl/SizesAndStrides.h -// aten::arccos_(Tensor(a!) self) -> Tensor(a!) +// #pragma once +// #include +// #include -// aten::add.Tensor(Tensor self, Tensor other, *, Scalar alpha=1) -> Tensor +// #include +// #include +// #include +public static final int C10_SIZES_AND_STRIDES_MAX_INLINE_SIZE = 5; +// Targeting ../SizesAndStrides.java -// aten::add_.Tensor(Tensor(a!) self, Tensor other, *, Scalar alpha=1) -> Tensor(a!) -// aten::add.Scalar(Tensor self, Scalar other, Scalar alpha=1) -> Tensor + // namespace impl + // namespace c10 -// aten::add_.Scalar(Tensor(a!) self, Scalar other, Scalar alpha=1) -> Tensor(a!) - +// Parsed from c10/util/DimVector.h -// aten::addmv(Tensor self, Tensor mat, Tensor vec, *, Scalar beta=1, Scalar alpha=1) -> Tensor +// #pragma once +// #include +// #include +// #include +// #include -// aten::addmv_(Tensor(a!) self, Tensor mat, Tensor vec, *, Scalar beta=1, Scalar alpha=1) -> Tensor(a!) +@Namespace("c10") @MemberGetter public static native @Cast("const size_t") long kDimVectorStaticSize(); +/** A container for sizes or strides */ -// aten::addr(Tensor self, Tensor vec1, Tensor vec2, *, Scalar beta=1, Scalar alpha=1) -> Tensor + // namespace c10 -// aten::addr_(Tensor(a!) self, Tensor vec1, Tensor vec2, *, Scalar beta=1, Scalar alpha=1) -> Tensor(a!) +// Parsed from c10/util/Logging.h +// #ifndef C10_UTIL_LOGGING_H_ +// #define C10_UTIL_LOGGING_H_ -// aten::_is_all_true(Tensor self) -> Tensor +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include -// aten::_is_any_true(Tensor self) -> Tensor +// CAFFE2_LOG_THRESHOLD is a compile time flag that would allow us to turn off +// logging at compile time so no logging message below that level is produced +// at all. The value should be between INT_MIN and CAFFE_FATAL. +// #ifndef CAFFE2_LOG_THRESHOLD +// If we have not defined the compile time log threshold, we keep all the +// log cases. +public static native @MemberGetter int CAFFE2_LOG_THRESHOLD(); +public static final int CAFFE2_LOG_THRESHOLD = CAFFE2_LOG_THRESHOLD(); +// #endif // CAFFE2_LOG_THRESHOLD +// Below are different implementations for glog and non-glog cases. +// #ifdef C10_USE_GLOG +// #include +// #else // !C10_USE_GLOG +// #include +// #endif // C10_USE_GLOG -// aten::all.dim(Tensor self, int dim, bool keepdim=False) -> Tensor -// aten::all.dimname(Tensor self, Dimname dim, bool keepdim=False) -> Tensor +// Some versions of GLOG support less-spammy version of LOG_EVERY_MS. If it's +// not available - just short-circuit to the always working one one. +// We define the C10_ name to avoid confusing other files +// #ifdef LOG_EVERY_MS +// #define C10_LOG_EVERY_MS(severity, ms) LOG_EVERY_MS(severity, ms) +// #else +// #define C10_LOG_EVERY_MS(severity, ms) LOG(severity) +// #endif -// aten::allclose(Tensor self, Tensor other, float rtol=1e-05, float atol=1e-08, bool equal_nan=False) -> bool +// Same for LOG_FIRST_N +// #ifdef LOG_FIRST_N +// #define C10_LOG_FIRST_N(severity, n) LOG_FIRST_N(severity, n) +// #else +// #define C10_LOG_FIRST_N(severity, n) LOG(severity) +// #endif +// Same for LOG_EVERY_N +// #ifdef LOG_EVERY_N +// #define C10_LOG_EVERY_N(severity, n) LOG_EVERY_N(severity, n) +// #else +// #define C10_LOG_EVERY_N(severity, n) LOG(severity) +// #endif -// aten::any.dim(Tensor self, int dim, bool keepdim=False) -> Tensor +// Functions that we use for initialization. +@Namespace("c10") public static native @Cast("bool") boolean InitCaffeLogging(IntPointer argc, @Cast("char**") PointerPointer argv); +@Namespace("c10") public static native @Cast("bool") boolean InitCaffeLogging(IntPointer argc, @Cast("char**") @ByPtrPtr BytePointer argv); +@Namespace("c10") public static native @Cast("bool") boolean InitCaffeLogging(IntBuffer argc, @Cast("char**") @ByPtrPtr ByteBuffer argv); +@Namespace("c10") public static native @Cast("bool") boolean InitCaffeLogging(int[] argc, @Cast("char**") @ByPtrPtr byte[] argv); +@Namespace("c10") public static native void UpdateLoggingLevelsFromFlags(); +@Namespace("c10") public static native void ThrowEnforceNotMet( + @Cast("const char*") BytePointer file, + int line, + @Cast("const char*") BytePointer condition, + @StdString BytePointer msg, + @Const Pointer caller/*=nullptr*/); +@Namespace("c10") public static native void ThrowEnforceNotMet( + @Cast("const char*") BytePointer file, + int line, + @Cast("const char*") BytePointer condition, + @StdString BytePointer msg); +@Namespace("c10") public static native void ThrowEnforceNotMet( + String file, + int line, + String condition, + @StdString String msg, + @Const Pointer caller/*=nullptr*/); +@Namespace("c10") public static native void ThrowEnforceNotMet( + String file, + int line, + String condition, + @StdString String msg); -// aten::any.dimname(Tensor self, Dimname dim, bool keepdim=False) -> Tensor +@Namespace("c10") public static native void ThrowEnforceNotMet( + @Cast("const char*") BytePointer file, + int line, + @Cast("const char*") BytePointer condition, + @ByVal CompileTimeEmptyString arg3, + @Const Pointer caller/*=nullptr*/); +@Namespace("c10") public static native void ThrowEnforceNotMet( + @Cast("const char*") BytePointer file, + int line, + @Cast("const char*") BytePointer condition, + @ByVal CompileTimeEmptyString arg3); +@Namespace("c10") public static native void ThrowEnforceNotMet( + String file, + int line, + String condition, + @ByVal CompileTimeEmptyString arg3, + @Const Pointer caller/*=nullptr*/); +@Namespace("c10") public static native void ThrowEnforceNotMet( + String file, + int line, + String condition, + @ByVal CompileTimeEmptyString arg3); +@Namespace("c10") public static native void ThrowEnforceFiniteNotMet( + @Cast("const char*") BytePointer file, + int line, + @Cast("const char*") BytePointer condition, + @StdString BytePointer msg, + @Const Pointer caller/*=nullptr*/); +@Namespace("c10") public static native void ThrowEnforceFiniteNotMet( + @Cast("const char*") BytePointer file, + int line, + @Cast("const char*") BytePointer condition, + @StdString BytePointer msg); +@Namespace("c10") public static native void ThrowEnforceFiniteNotMet( + String file, + int line, + String condition, + @StdString String msg, + @Const Pointer caller/*=nullptr*/); +@Namespace("c10") public static native void ThrowEnforceFiniteNotMet( + String file, + int line, + String condition, + @StdString String msg); -// aten::argmax(Tensor self, int? dim=None, bool keepdim=False) -> Tensor +@Namespace("c10") public static native void ThrowEnforceFiniteNotMet( + @Cast("const char*") BytePointer file, + int line, + @Cast("const char*") BytePointer condition, + @ByVal CompileTimeEmptyString arg3, + @Const Pointer caller/*=nullptr*/); +@Namespace("c10") public static native void ThrowEnforceFiniteNotMet( + @Cast("const char*") BytePointer file, + int line, + @Cast("const char*") BytePointer condition, + @ByVal CompileTimeEmptyString arg3); +@Namespace("c10") public static native void ThrowEnforceFiniteNotMet( + String file, + int line, + String condition, + @ByVal CompileTimeEmptyString arg3, + @Const Pointer caller/*=nullptr*/); +@Namespace("c10") public static native void ThrowEnforceFiniteNotMet( + String file, + int line, + String condition, + @ByVal CompileTimeEmptyString arg3); +@Namespace("c10") public static native @Cast("const bool") boolean IsUsingGoogleLogging(); -// aten::argmin(Tensor self, int? dim=None, bool keepdim=False) -> Tensor +/** + * A utility to allow one to show log info to stderr after the program starts. + * + * This is similar to calling GLOG's --logtostderr, or setting caffe2_log_level + * to smaller than INFO. You are recommended to only use this in a few sparse + * cases, such as when you want to write a tutorial or something. Normally, use + * the commandline flags to set the log level. + */ +@Namespace("c10") public static native void ShowLogInfoToStderr(); +@Namespace("c10") public static native void SetStackTraceFetcher(@ByVal StringSupplier fetcher); -// aten::acosh(Tensor self) -> Tensor +// #define CAFFE_ENFORCE(condition, ...) +// do { +// if (C10_UNLIKELY(!(condition))) { +// ::c10::ThrowEnforceNotMet( +// __FILE__, __LINE__, #condition, ::c10::str(__VA_ARGS__)); +// } +// } while (false) +// #define CAFFE_ENFORCE_FINITE(condition, ...) +// do { +// if (C10_UNLIKELY(!(condition))) { +// ::c10::ThrowEnforceFiniteNotMet( +// __FILE__, __LINE__, #condition, ::c10::str(__VA_ARGS__)); +// } +// } while (false) -// aten::acosh_(Tensor(a!) self) -> Tensor(a!) +// #define CAFFE_ENFORCE_WITH_CALLER(condition, ...) +// do { +// if (C10_UNLIKELY(!(condition))) { +// ::c10::ThrowEnforceNotMet( +// __FILE__, __LINE__, #condition, ::c10::str(__VA_ARGS__), this); +// } +// } while (false) +// #define CAFFE_THROW(...) +// ::c10::ThrowEnforceNotMet(__FILE__, __LINE__, "", ::c10::str(__VA_ARGS__)) -// aten::arccosh(Tensor self) -> Tensor +/** + * Rich logging messages + * + * CAFFE_ENFORCE_THAT can be used with one of the "checker functions" that + * capture input argument values and add it to the exception message. E.g. + * {@code CAFFE_ENFORCE_THAT(Equals(foo(x), bar(y)), "Optional additional message")} + * would evaluate both foo and bar only once and if the results are not equal - + * include them in the exception message. + * + * Some of the basic checker functions like Equals or Greater are already + * defined below. Other header might define customized checkers by adding + * functions to caffe2::enforce_detail namespace. For example: + * + * namespace caffe2 { namespace enforce_detail { + * inline EnforceFailMessage IsVector(const vector& shape) { + * if (shape.size() == 1) { return EnforceOK(); } + * return c10::str("Shape ", shape, " is not a vector"); + * } + * }} + * + * With further usages like {@code CAFFE_ENFORCE_THAT(IsVector(Input(0).dims()))} + * + * Convenient wrappers for binary operations like CAFFE_ENFORCE_EQ are provided + * too. Please use them instead of TORCH_CHECK_EQ and friends for failures in + * user-provided input. + */ +// #define CAFFE_ENFORCE_THAT_IMPL(op, lhs, rhs, expr, ...) +// ::c10::enforce_detail::enforceThatImpl( +// op, lhs, rhs, __FILE__, __LINE__, expr, nullptr, ##__VA_ARGS__) +// #define CAFFE_ENFORCE_THAT_IMPL_WITH_CALLER(op, lhs, rhs, expr, ...) +// ::c10::enforce_detail::enforceThatImpl( +// op, (lhs), (rhs), __FILE__, __LINE__, expr, this, ##__VA_ARGS__) -// aten::arccosh_(Tensor(a!) self) -> Tensor(a!) + // namespace enforce_detail +// #define CAFFE_ENFORCE_THAT(cmp, op, lhs, rhs, ...) +// CAFFE_ENFORCE_THAT_IMPL(cmp, lhs, rhs, #lhs " " #op " " #rhs, ##__VA_ARGS__) -// aten::asinh(Tensor self) -> Tensor +// #define CAFFE_ENFORCE_BINARY_OP(cmp, op, x, y, ...) +// CAFFE_ENFORCE_THAT_IMPL(cmp, x, y, #x " " #op " " #y, ##__VA_ARGS__) +// #define CAFFE_ENFORCE_EQ(x, y, ...) +// CAFFE_ENFORCE_BINARY_OP(std::equal_to(), ==, x, y, ##__VA_ARGS__) +// #define CAFFE_ENFORCE_NE(x, y, ...) +// CAFFE_ENFORCE_BINARY_OP(std::not_equal_to(), !=, x, y, ##__VA_ARGS__) +// #define CAFFE_ENFORCE_LE(x, y, ...) +// CAFFE_ENFORCE_BINARY_OP(std::less_equal(), <=, x, y, ##__VA_ARGS__) +// #define CAFFE_ENFORCE_LT(x, y, ...) +// CAFFE_ENFORCE_BINARY_OP(std::less(), <, x, y, ##__VA_ARGS__) +// #define CAFFE_ENFORCE_GE(x, y, ...) +// CAFFE_ENFORCE_BINARY_OP(std::greater_equal(), >=, x, y, ##__VA_ARGS__) +// #define CAFFE_ENFORCE_GT(x, y, ...) +// CAFFE_ENFORCE_BINARY_OP(std::greater(), >, x, y, ##__VA_ARGS__) +// #define CAFFE_ENFORCE_BINARY_OP_WITH_CALLER(cmp, op, x, y, ...) +// CAFFE_ENFORCE_THAT_IMPL_WITH_CALLER( +// cmp, x, y, #x " " #op " " #y, ##__VA_ARGS__) +// #define CAFFE_ENFORCE_EQ_WITH_CALLER(x, y, ...) +// CAFFE_ENFORCE_BINARY_OP_WITH_CALLER( +// std::equal_to(), ==, x, y, ##__VA_ARGS__) +// #define CAFFE_ENFORCE_NE_WITH_CALLER(x, y, ...) +// CAFFE_ENFORCE_BINARY_OP_WITH_CALLER( +// std::not_equal_to(), !=, x, y, ##__VA_ARGS__) +// #define CAFFE_ENFORCE_LE_WITH_CALLER(x, y, ...) +// CAFFE_ENFORCE_BINARY_OP_WITH_CALLER( +// std::less_equal(), <=, x, y, ##__VA_ARGS__) +// #define CAFFE_ENFORCE_LT_WITH_CALLER(x, y, ...) +// CAFFE_ENFORCE_BINARY_OP_WITH_CALLER(std::less(), <, x, y, ##__VA_ARGS__) +// #define CAFFE_ENFORCE_GE_WITH_CALLER(x, y, ...) +// CAFFE_ENFORCE_BINARY_OP_WITH_CALLER( +// std::greater_equal(), >=, x, y, ##__VA_ARGS__) +// #define CAFFE_ENFORCE_GT_WITH_CALLER(x, y, ...) +// CAFFE_ENFORCE_BINARY_OP_WITH_CALLER( +// std::greater(), >, x, y, ##__VA_ARGS__) -// aten::asinh_(Tensor(a!) self) -> Tensor(a!) +/** + * Very lightweight logging for the first time API usage. It's beneficial for + * tracking of individual functionality usage in larger applications. + * + * In order to ensure light-weightedness of logging, we utilize static variable + * trick - LogAPIUsage will be invoked only once and further invocations will + * just do an atomic check. + * + * Example: + * // Logs caller info with an arbitrary text event, if there is a usage. + * C10_LOG_API_USAGE_ONCE("my_api"); + */ +// #define C10_LOG_API_USAGE_ONCE(...) +// C10_UNUSED static bool C10_ANONYMOUS_VARIABLE(logFlag) = +// ::c10::detail::LogAPIUsageFakeReturn(__VA_ARGS__); +// API usage logging capabilities +@Namespace("c10") public static native void SetAPIUsageLogger(@ByVal StringConsumer logger); +@Namespace("c10") public static native void LogAPIUsage(@StdString BytePointer context); +@Namespace("c10") public static native void LogAPIUsage(@StdString String context); +// Targeting ../DDPLoggingData.java -// aten::arcsinh(Tensor self) -> Tensor -// aten::arcsinh_(Tensor(a!) self) -> Tensor(a!) +@Namespace("c10") public static native void SetPyTorchDDPUsageLogger( + @ByVal DDPLogger logger); +@Namespace("c10") public static native void LogPyTorchDDPUsage(@Const @ByRef DDPLoggingData ddpData); +// Return value is needed to do the static variable initialization trick +@Namespace("c10::detail") public static native @Cast("bool") boolean LogAPIUsageFakeReturn(@StdString BytePointer context); +@Namespace("c10::detail") public static native @Cast("bool") boolean LogAPIUsageFakeReturn(@StdString String context); + // namespace detail +// Initializes the c10 logger. +@Namespace("c10") public static native void initLogging(); -// aten::atanh(Tensor self) -> Tensor + // namespace c10 +// #endif // C10_UTIL_LOGGING_H_ -// aten::atanh_(Tensor(a!) self) -> Tensor(a!) +// Parsed from c10/util/accumulate.h -// aten::arctanh(Tensor self) -> Tensor +// Copyright 2004-present Facebook. All Rights Reserved. +// #pragma once -// aten::arctanh_(Tensor(a!) self) -> Tensor(a!) +// #include +// #include +// #include +// #include -// aten::as_strided(Tensor(a) self, SymInt[] size, SymInt[] stride, SymInt? storage_offset=None) -> Tensor(a) +/** Sum of a list of integers; accumulates into the int64_t datatype */ +/** Sum of integer elements referred to by iterators; accumulates into the + * int64_t datatype */ -// aten::as_strided(Tensor(a) self, SymInt[] size, SymInt[] stride, SymInt? storage_offset=None) -> Tensor(a) +/** Product of a list of integers; accumulates into the int64_t datatype */ +/** Product of integer elements referred to by iterators; accumulates into the + * int64_t datatype */ -// aten::as_strided_(Tensor(a!) self, SymInt[] size, SymInt[] stride, SymInt? storage_offset=None) -> Tensor(a!) +/** Return product of all dimensions starting from k + * Returns 1 if k>=dims.size() */ +/** Product of all dims up to k (not including dims[k]) + * Throws an error if k>dims.size() */ -// aten::as_strided_(Tensor(a!) self, SymInt[] size, SymInt[] stride, SymInt? storage_offset=None) -> Tensor(a!) +/** Product of all dims between k and l (including dims[k] and excluding + * dims[l]) k and l may be supplied in either order */ + // namespace c10 -// aten::asin(Tensor self) -> Tensor +// Parsed from c10/util/safe_numerics.h -// aten::asin_(Tensor(a!) self) -> Tensor(a!) +// #pragma once +// #include +// #include +// #include +// #include +// #include -// aten::arcsin(Tensor self) -> Tensor +// GCC has __builtin_mul_overflow from before it supported __has_builtin +// #ifdef _MSC_VER +// #define C10_HAS_BUILTIN_OVERFLOW() (0) +// #include +// #include +// #else +// #define C10_HAS_BUILTIN_OVERFLOW() (1) +// #endif +@Namespace("c10") public static native @Cast("bool") boolean add_overflows(@Cast("uint64_t") long a, @Cast("uint64_t") long b, @Cast("uint64_t*") LongPointer out); +@Namespace("c10") public static native @Cast("bool") boolean add_overflows(@Cast("uint64_t") long a, @Cast("uint64_t") long b, @Cast("uint64_t*") LongBuffer out); +@Namespace("c10") public static native @Cast("bool") boolean add_overflows(@Cast("uint64_t") long a, @Cast("uint64_t") long b, @Cast("uint64_t*") long[] out); -// aten::arcsin_(Tensor(a!) self) -> Tensor(a!) +@Namespace("c10") public static native @Cast("bool") boolean mul_overflows(@Cast("uint64_t") long a, @Cast("uint64_t") long b, @Cast("uint64_t*") LongPointer out); +@Namespace("c10") public static native @Cast("bool") boolean mul_overflows(@Cast("uint64_t") long a, @Cast("uint64_t") long b, @Cast("uint64_t*") LongBuffer out); +@Namespace("c10") public static native @Cast("bool") boolean mul_overflows(@Cast("uint64_t") long a, @Cast("uint64_t") long b, @Cast("uint64_t*") long[] out); + // namespace c10 -// aten::atan(Tensor self) -> Tensor +// Parsed from c10/core/TensorImpl.h -// aten::atan_(Tensor(a!) self) -> Tensor(a!) +// #pragma once +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include -// aten::arctan(Tensor self) -> Tensor +// #include +// #include +// #include +// #include +// #include +// #include +// A global boolean variable to control whether we free memory when a Tensor +// is shrunk to a smaller size. As a result, a Tensor is always going to +// keep the memory allocated for its maximum capacity reshaped to so far. +// +// This parameter is respected "upper-case" methods which call Resize() +// (e.g., CopyFrom, ResizeLike); it is NOT respected by Tensor::resize_ +// or ShrinkTo, both of which guarantee to never to free memory. -// aten::arctan_(Tensor(a!) self) -> Tensor(a!) +// Since we can have high variance in blob memory allocated across different +// inputs in the same run, we will shrink the blob only if the memory gain +// is larger than this flag in bytes. This only applies to functions which +// respect caffe2_keep_on_shrink. -// aten::baddbmm(Tensor self, Tensor batch1, Tensor batch2, *, Scalar beta=1, Scalar alpha=1) -> Tensor +// #if C10_CLANG_HAS_WARNING("-Wimplicit-int-float-conversion") +// #endif + // namespace at + // namespace c10 -// aten::baddbmm_(Tensor(a!) self, Tensor batch1, Tensor batch2, *, Scalar beta=1, Scalar alpha=1) -> Tensor(a!) +/** + * A utility function to convert vector to vector. + */ +@Namespace("c10") public static native @ByVal @Cast("std::vector*") LongVector ToVectorint64_t(@Const @ByRef IntArrayRef src); +/** + * Return product of all dimensions starting from k + */ +@Namespace("c10") public static native @Cast("int64_t") long size_from_dim_(int k, @ByVal LongArrayRef dims); +@Namespace("c10") public static native @Cast("int64_t") long size_from_dim_(int k, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... dims); -// aten::bernoulli(Tensor self, *, Generator? generator=None) -> Tensor +// Product of all dims up to k (not including dims[k]) +@Namespace("c10") public static native @Cast("int64_t") long size_to_dim_(int k, @ByVal LongArrayRef dims); +@Namespace("c10") public static native @Cast("int64_t") long size_to_dim_(int k, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... dims); +// Product of all dims between k and l (not including dims[k] and dims[l]) +@Namespace("c10") public static native @Cast("int64_t") long size_between_dim_(int k, int l, @ByVal LongArrayRef dims); +@Namespace("c10") public static native @Cast("int64_t") long size_between_dim_(int k, int l, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... dims); -// aten::bernoulli_.Tensor(Tensor(a!) self, Tensor p, *, Generator? generator=None) -> Tensor(a!) +// Wrap around axis_index if it is negative, s.t., -1 is the last dim +@Namespace("c10") public static native int canonical_axis_index_(int axis_index, int ndims); +// Targeting ../PlacementDtor.java -// aten::bernoulli_.float(Tensor(a!) self, float p=0.5, *, Generator? generator=None) -> Tensor(a!) +// Targeting ../PlacementDeleteContext.java -// aten::bernoulli.p(Tensor self, float p, *, Generator? generator=None) -> Tensor +// Targeting ../AutogradMetaInterface.java -// aten::bincount(Tensor self, Tensor? weights=None, int minlength=0) -> Tensor +// Targeting ../AutogradMetaFactory.java -// aten::bitwise_not(Tensor self) -> Tensor +@Namespace("c10::impl") public static native void SetAutogradMetaFactory(AutogradMetaFactory factory); +@Namespace("c10::impl") public static native AutogradMetaFactory GetAutogradMetaFactory(); +// Targeting ../AutogradMetaFactoryRegisterer.java -// aten::bitwise_not_(Tensor(a!) self) -> Tensor(a!) -// aten::copysign.Tensor(Tensor self, Tensor other) -> Tensor +// Targeting ../NamedTensorMetaInterface.java -// aten::copysign_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!) -// aten::copysign.Scalar(Tensor self, Scalar other) -> Tensor +// For ease of copy pasting +// #if 0 +// #endif +// Targeting ../VariableVersion.java -// aten::copysign_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!) +// Forward declaration of TensorImpl needed for forward declaration of +// C10_TensorImpl_Size_Check_Dummy_Class -// aten::logical_not(Tensor self) -> Tensor +// Forward declaration needed because TensorImpl needs to be friends with +// C10_TensorImpl_Size_Check_Dummy_Class in order to check the size +// of its private fields. +/** + * NOTE: Some TensorImpl methods are small and not overridden in the + * PyTorch codebase itself, but may theoretically need to be + * overridden by third-party TensorImpl subclasses. This macro allows + * users that need maximum performance and don't need these extension + * points to disable them with a build-time flag. (In particular, + * XLA's XLATensorImpl currently overrides these methods, so we can't + * enable this flag by default.) + */ +// #ifdef C10_DISABLE_TENSORIMPL_EXTENSIBILITY +// #define TENSORIMPL_MAYBE_VIRTUAL +// #else +// #define TENSORIMPL_MAYBE_VIRTUAL virtual +// Targeting ../TensorImpl.java -// aten::logical_not_(Tensor(a!) self) -> Tensor(a!) -// aten::logical_xor(Tensor self, Tensor other) -> Tensor +// Note [TensorImpl size constraints] +// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +// Changed the size of TensorImpl? If the size went down, good for +// you! Adjust the documentation below and the expected size. +// Did it go up? Read on... +// +// Struct size matters. In some production systems at Facebook, we have +// 400M live tensors during a training run. Do the math: every 64-bit +// word you add to Tensor is an extra 3.2 gigabytes in RAM. +// +// If you are a Facebook employee, you can check if the run in question +// has tipped you over the point using the command here: +// https://fburl.com/q5enpv98 +// +// For reference, we OOMed at 160 bytes (20 words) per TensorImpl. +// This is not counting overhead from strides out-of-line allocation and +// StorageImpl space and this is from before we inlined sizes and strides +// directly into TensorImpl as SmallVectors. +// +// Our memory usage on 32-bit systems is suboptimal, but we're not checking +// for it at the moment (to help avoid rage inducing cycles when the +// 32-bit number is wrong). +// +// Current breakdown: +// +// vtable pointer +// strong refcount TODO: pack these into one word +// weak refcount +// storage pointer +// autograd metadata pointer +// named tensor metadata pointer +// version counter pointer +// PyObjectSlot +// SizesAndStrides size/pointer +// SizesAndStrides sizes (pre-allocated 0) +// SizesAndStrides sizes (pre-allocated 1) +// SizesAndStrides sizes (pre-allocated 2) +// SizesAndStrides sizes (pre-allocated 3) +// SizesAndStrides sizes (pre-allocated 4) +// SizesAndStrides strides (pre-allocated 0) +// SizesAndStrides strides (pre-allocated 1) +// SizesAndStrides strides (pre-allocated 2) +// SizesAndStrides strides (pre-allocated 3) +// SizesAndStrides strides (pre-allocated 4) +// storage offset +// numel +// data type, device, is_contiguous, storage_access_should_throw_, bitfields +// DispatchKeySet +// +// Various preprocessor macros we use to check that the +// TensorImpl size hasn't changed unexpectedly. We undef +// these later. +// #ifndef __NVCC__ +public static final int C10_NVCC = 0; +// #else +// #endif -// aten::logical_xor_(Tensor(a!) self, Tensor other) -> Tensor(a!) +// #ifndef __CUDA_VER_MAJOR__ +public static final int C10_CUDA_VERSION_MAJOR = 0; +// #else +// #endif +// #ifndef CUDA_VERSION +public static final int C10_CUDA_VERSION = 0; +// #else +// #endif -// aten::logical_and(Tensor self, Tensor other) -> Tensor +// #ifndef __clang_major__ +public static final int C10_CLANG_MAJOR_VERSION = 0; +// #else +// #endif +// #ifndef __GNUC__ +public static final int C10_GCC_VERSION = 0; +// #else +// #endif -// aten::logical_and_(Tensor(a!) self, Tensor other) -> Tensor(a!) - - -// aten::logical_or(Tensor self, Tensor other) -> Tensor - - -// aten::logical_or_(Tensor(a!) self, Tensor other) -> Tensor(a!) - - -// aten::bmm(Tensor self, Tensor mat2) -> Tensor - - -// aten::broadcast_to(Tensor(a) self, SymInt[] size) -> Tensor(a) - - -// aten::broadcast_to(Tensor(a) self, SymInt[] size) -> Tensor(a) - - -// aten::ceil(Tensor self) -> Tensor - - -// aten::ceil_(Tensor(a!) self) -> Tensor(a!) - - -// aten::unsafe_chunk(Tensor self, int chunks, int dim=0) -> Tensor[] - - -// aten::chunk(Tensor(a -> *) self, int chunks, int dim=0) -> Tensor(a)[] - - -// aten::tensor_split.sections(Tensor(a -> *) self, SymInt sections, int dim=0) -> Tensor(a)[] - - -// aten::tensor_split.sections(Tensor(a -> *) self, SymInt sections, int dim=0) -> Tensor(a)[] - - -// aten::tensor_split.indices(Tensor(a -> *) self, SymInt[] indices, int dim=0) -> Tensor(a)[] - - -// aten::tensor_split.indices(Tensor(a -> *) self, SymInt[] indices, int dim=0) -> Tensor(a)[] - - -// aten::tensor_split.tensor_indices_or_sections(Tensor(a -> *) self, Tensor tensor_indices_or_sections, int dim=0) -> Tensor(a)[] - - -// aten::clamp(Tensor self, Scalar? min=None, Scalar? max=None) -> Tensor - - -// aten::clamp.Tensor(Tensor self, Tensor? min=None, Tensor? max=None) -> Tensor - - -// aten::clamp_(Tensor(a!) self, Scalar? min=None, Scalar? max=None) -> Tensor(a!) - - -// aten::clamp_.Tensor(Tensor(a!) self, Tensor? min=None, Tensor? max=None) -> Tensor(a!) - - -// aten::clamp_max(Tensor self, Scalar max) -> Tensor - - -// aten::clamp_max.Tensor(Tensor self, Tensor max) -> Tensor - - -// aten::clamp_max_(Tensor(a!) self, Scalar max) -> Tensor(a!) - - -// aten::clamp_max_.Tensor(Tensor(a!) self, Tensor max) -> Tensor(a!) - - -// aten::clamp_min(Tensor self, Scalar min) -> Tensor - - -// aten::clamp_min.Tensor(Tensor self, Tensor min) -> Tensor - - -// aten::clamp_min_(Tensor(a!) self, Scalar min) -> Tensor(a!) - - -// aten::clamp_min_.Tensor(Tensor(a!) self, Tensor min) -> Tensor(a!) - - -// aten::clip(Tensor self, Scalar? min=None, Scalar? max=None) -> Tensor - - -// aten::clip.Tensor(Tensor self, Tensor? min=None, Tensor? max=None) -> Tensor - - -// aten::clip_(Tensor(a!) self, Scalar? min=None, Scalar? max=None) -> Tensor(a!) - - -// aten::clip_.Tensor(Tensor(a!) self, Tensor? min=None, Tensor? max=None) -> Tensor(a!) - - -// aten::contiguous(Tensor(a) self, *, MemoryFormat memory_format=contiguous_format) -> Tensor(a) - - -// aten::copy_(Tensor(a!) self, Tensor src, bool non_blocking=False) -> Tensor(a!) - - -// aten::cos(Tensor self) -> Tensor - - -// aten::cos_(Tensor(a!) self) -> Tensor(a!) - - -// aten::cosh(Tensor self) -> Tensor - - -// aten::cosh_(Tensor(a!) self) -> Tensor(a!) - - -// aten::count_nonzero.dim_IntList(Tensor self, int[] dim) -> Tensor - - -// aten::count_nonzero(Tensor self, int? dim=None) -> Tensor - - -// aten::cov(Tensor self, *, int correction=1, Tensor? fweights=None, Tensor? aweights=None) -> Tensor - - -// aten::corrcoef(Tensor self) -> Tensor - - -// aten::cummax(Tensor self, int dim) -> (Tensor values, Tensor indices) - - -// aten::cummax.dimname(Tensor self, Dimname dim) -> (Tensor values, Tensor indices) - - -// aten::cummin(Tensor self, int dim) -> (Tensor values, Tensor indices) - - -// aten::cummin.dimname(Tensor self, Dimname dim) -> (Tensor values, Tensor indices) - - -// aten::cumprod(Tensor self, int dim, *, ScalarType? dtype=None) -> Tensor - - -// aten::cumprod_(Tensor(a!) self, int dim, *, ScalarType? dtype=None) -> Tensor(a!) - - -// aten::cumprod.dimname(Tensor self, Dimname dim, *, ScalarType? dtype=None) -> Tensor - - -// aten::cumprod_.dimname(Tensor(a!) self, Dimname dim, *, ScalarType? dtype=None) -> Tensor(a!) - - -// aten::cumsum(Tensor self, int dim, *, ScalarType? dtype=None) -> Tensor - - -// aten::cumsum_(Tensor(a!) self, int dim, *, ScalarType? dtype=None) -> Tensor(a!) - - -// aten::cumsum.dimname(Tensor self, Dimname dim, *, ScalarType? dtype=None) -> Tensor - +// #ifndef __GNUC_MINOR__ +public static final int C10_GCC_VERSION_MINOR = 0; +// #else +// #endif -// aten::cumsum_.dimname(Tensor(a!) self, Dimname dim, *, ScalarType? dtype=None) -> Tensor(a!) +// We use a templatized class to both contain the logic of checking the sizes +// as well as to provide compile-time information that might be useful in +// figuring out why sizes may have changed. +// All the compile time information is given by the template fields that are +// always printed by the compiler when the static_assert fails. +// We use a class to encapsulate size-checking logic with +// templates to capture sizes and flags. We call this within +// a static assert to prove there is no run-time behaviour. +// Since the methods we call return either true or fail their +// own static_asserts, we should never see the error messages +// below. We have to provide it though for c++ <17. -// aten::diag_embed(Tensor self, int offset=0, int dim1=-2, int dim2=-1) -> Tensor +// Clean up after ourselves +// #undef C10_NVCC +// #undef C10_CUDA_VERSION_MAJOR +// #undef C10_CUDA_VERSION +// #undef C10_CLANG_MAJOR_VERSION +// #undef C10_GCC_VERSION +// #undef C10_GCC_VERSION_MINOR + // namespace c10 -// aten::diagflat(Tensor self, int offset=0) -> Tensor -// aten::diagonal(Tensor(a) self, int offset=0, int dim1=0, int dim2=1) -> Tensor(a) +// Parsed from c10/core/UndefinedTensorImpl.h +// #pragma once -// aten::diagonal.Dimname(Tensor(a) self, *, Dimname outdim, Dimname dim1, Dimname dim2, int offset=0) -> Tensor(a) +// #include +// Targeting ../UndefinedTensorImpl.java -// aten::fill_diagonal_(Tensor(a!) self, Scalar fill_value, bool wrap=False) -> Tensor(a!) + // namespace c10 -// aten::diff(Tensor self, int n=1, int dim=-1, Tensor? prepend=None, Tensor? append=None) -> Tensor +// Parsed from ATen/core/CheckMemoryFormat.h -// aten::div.Tensor(Tensor self, Tensor other) -> Tensor +// #include +@Namespace("c10::impl") public static native @ByVal MemoryFormatOptional check_tensor_options_and_extract_memory_format( + @Const @ByRef TensorOptions options, + @ByVal MemoryFormatOptional memory_format); -// aten::div_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!) + // namespace impl namespace c10 -// aten::div.Tensor_mode(Tensor self, Tensor other, *, str? rounding_mode) -> Tensor +// Parsed from c10/core/GeneratorImpl.h +// #pragma once -// aten::div_.Tensor_mode(Tensor(a!) self, Tensor other, *, str? rounding_mode) -> Tensor(a!) +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include -// aten::div.Scalar(Tensor self, Scalar other) -> Tensor +/** + * Note [Generator] + * ~~~~~~~~~~~~~~~~ + * A Pseudo Random Number Generator (PRNG) is an engine that uses an algorithm + * to generate a seemingly random sequence of numbers, that may be later be used + * in creating a random distribution. Such an engine almost always maintains a + * state and requires a seed to start off the creation of random numbers. Often + * times, users have found it beneficial to be able to explicitly create, + * retain, and destroy PRNG states and also be able to have control over the + * seed value. + * + * A Generator in ATen gives users the ability to read, write and modify a PRNG + * engine. For instance, it does so by letting users seed a PRNG engine, fork + * the state of the engine, etc. + * + * By default, there is one generator per device, and a device's generator is + * lazily created. A user can use the torch.Generator() api to create their own + * generator. Currently torch.Generator() can only create a CPUGeneratorImpl. + */ +/** + * Note [Acquire lock when using random generators] + * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + * Generator and its derived classes are NOT thread-safe. Please note that most + * of the places where we have inserted locking for generators are historically + * based, and we haven't actually checked that everything is truly thread safe + * (and it probably isn't). Please use the public mutex_ when using any methods + * from these classes, except for the read-only methods. You can learn about the + * usage by looking into the unittests (aten/src/ATen/cpu_generator_test.cpp) + * and other places where we have used lock_guard. + * + * TODO: Look into changing the threading semantics of Generators in ATen (e.g., + * making them non-thread safe and instead making the generator state + * splittable, to accommodate forks into other threads). + */ -// aten::div_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!) +// The default seed is selected to be a large number +// with good distribution of 0s and 1s in bit representation +@Namespace("c10") @MemberGetter public static native @Cast("const uint64_t") long default_rng_seed_val(); +// Targeting ../GeneratorImpl.java -// aten::div.Scalar_mode(Tensor self, Scalar other, *, str? rounding_mode) -> Tensor +@Namespace("c10::detail") public static native @Cast("uint64_t") long getNonDeterministicRandom(@Cast("bool") boolean is_cuda/*=false*/); +@Namespace("c10::detail") public static native @Cast("uint64_t") long getNonDeterministicRandom(); -// aten::div_.Scalar_mode(Tensor(a!) self, Scalar other, *, str? rounding_mode) -> Tensor(a!) + // namespace detail + // namespace c10 -// aten::divide.Tensor(Tensor self, Tensor other) -> Tensor +// Parsed from ATen/core/Generator.h -// aten::divide_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!) +// #pragma once +// #include +// #include +// #include +// #include +// #include +// #include +// #include -// aten::divide.Scalar(Tensor self, Scalar other) -> Tensor +// #include +// #include +// #include +// #include +// #include +// For the record I don't think this is a correct pimpl idiom. +// Including Impl header in interface header defeats the purpose +// because you can't change Impl private members without forcing +// everything that included the interface to rebuild. +// Impl should be forward-declared in the interface header instead. +// #include +// Targeting ../Generator.java -// aten::divide_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!) -// aten::divide.Tensor_mode(Tensor self, Tensor other, *, str? rounding_mode) -> Tensor +@Namespace("at") public static native @ByVal @Name("make_generator") Generator make_generator_cpu(); +@Namespace("at") public static native @ByVal @Name("make_generator") Generator make_generator_cpu(@Cast("uint64_t&&") long seed_in); +/** + * Utility function to static cast input Generator* to + * the backend generator type (CPU/CUDAGeneratorImpl etc.) + */ -// aten::divide_.Tensor_mode(Tensor(a!) self, Tensor other, *, str? rounding_mode) -> Tensor(a!) +/** + * Utility function used in tensor implementations, which + * supplies the default generator to tensors, if an input generator + * is not supplied. The input Generator* is also static casted to + * the backend generator type (CPU/CUDAGeneratorImpl etc.) + */ +/** + * Helper function for checking the validity of new random generator + * state. Right now following conditions are checked: + * + * - The new state tensor must be a torch.ByteTensor + * - Data of the new state tensor must be contiguous + */ +@Namespace("at::detail") public static native void check_rng_state(@Const @ByRef TensorImpl new_state); -// aten::divide.Scalar_mode(Tensor self, Scalar other, *, str? rounding_mode) -> Tensor + // namespace detail + // namespace at -// aten::divide_.Scalar_mode(Tensor(a!) self, Scalar other, *, str? rounding_mode) -> Tensor(a!) +// Parsed from ATen/core/symbol.h -// aten::true_divide.Tensor(Tensor self, Tensor other) -> Tensor +// #pragma once +// #include +// #include +// #include // For std::hash +// #include +// 'prim' symbols are synthetic operators that occur only in the IR +// and don't have corresponding implementations in ATen. -// aten::true_divide_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!) +// 'onnx' symbols correspond to ONNX operators. Their semantics +// are defined in https://github.com/onnx/onnx/blob/master/docs/Operators.md +// The particular version we are targeting is specified by '_onnx_opset_version' +// in torch.onnx.symbolic_helper +// +// In general, most ONNX operators won't get an entry here, because they +// are handled from the Python end. However, you may occasionally need +// to intern an ONNX symbol here so that you can conveniently write an +// optimization on ONNX operations. +// 'attr' symbols are attribute keys. They are shared between both ONNX and ATen +// operators (you disambiguate their meaning by looking at the operator itself). +// In general, you only need to define attribute keys that are used by +// onnx or prim; ATen attributes are automatically generated in FORALL_ATTR_BASE_SYMBOLS. -// aten::true_divide.Scalar(Tensor self, Scalar other) -> Tensor +// Note [Symbol allocation] +// ~~~~~~~~~~~~~~~~~~~~~~~~ +// +// 1. Symbol namespace is split up into namespaces. +// +// 2. The intended access pattern for built-in symbols is onnx::MatMul +// in the c10 namespace (this is a Symbol). +// +// Built-in constant definition strategy: +// - Enum is the most convenient way to generate a contiguous sequence +// of numbers for an identifier. +// - However, an enum gives you a fresh type. We want onnx::MatMul to +// be type Symbol, not some random enum type! +// - Therefore, after using enums to generate the sequence of integers, +// we then declare constexpr Symbols to get everything the actual Symbol +// type we want. Symbols must be constexpr to be valid to be "case"ed on. -// aten::true_divide_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!) +// Targeting ../Symbol.java -// aten::dot(Tensor self, Tensor tensor) -> Tensor -// aten::vdot(Tensor self, Tensor other) -> Tensor +@Namespace("c10") public static native @Cast("bool") @Name("operator ==") boolean equals(@ByVal Symbol lhs, @ByVal Symbol rhs); -// aten::new_empty(Tensor self, SymInt[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor -// aten::new_empty(Tensor self, SymInt[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor -// aten::new_empty(Tensor self, SymInt[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor -// aten::new_empty(Tensor self, SymInt[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor -// aten::new_empty_strided(Tensor self, SymInt[] size, SymInt[] stride, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor + // namespace c10 -// aten::new_empty_strided(Tensor self, SymInt[] size, SymInt[] stride, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor +// make symbol behave like an integer in hash tables -// aten::new_empty_strided(Tensor self, SymInt[] size, SymInt[] stride, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor +// Parsed from ATen/core/Dimname.h -// aten::new_empty_strided(Tensor self, SymInt[] size, SymInt[] stride, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor +// #pragma once +// #include +// #include +// #include +// #include -// aten::new_full(Tensor self, SymInt[] size, Scalar fill_value, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor +@Namespace("at") public enum NameType { BASIC((byte)(0)), WILDCARD((byte)(1)); + public final byte value; + private NameType(byte v) { this.value = v; } + private NameType(NameType e) { this.value = e.value; } + public NameType intern() { for (NameType e : values()) if (e.value == value) return e; return this; } + @Override public String toString() { return intern().name(); } +} +// Targeting ../Dimname.java -// aten::new_full(Tensor self, SymInt[] size, Scalar fill_value, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor -// aten::new_full(Tensor self, SymInt[] size, Scalar fill_value, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor +@Namespace("at") public static native @Cast("std::ostream*") @ByRef @Name("operator <<") Pointer shiftLeft(@Cast("std::ostream*") @ByRef Pointer out, @Const @ByRef Dimname dimname); +@Namespace("at") public static native @Cast("bool") @Name("operator ==") boolean equals(@Const @ByRef Dimname lhs, @Const @ByRef Dimname rhs); -// aten::new_full(Tensor self, SymInt[] size, Scalar fill_value, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor +@Namespace("at") public static native @Cast("bool") @Name("operator !=") boolean notEquals(@Const @ByRef Dimname lhs, @Const @ByRef Dimname rhs); + // namespace at -// aten::new_zeros(Tensor self, SymInt[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor +// Parsed from ATen/core/NamedTensor.h -// aten::new_zeros(Tensor self, SymInt[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor +// #pragma once +// #include +// #include +// #include +// Targeting ../NamedTensorMeta.java -// aten::new_zeros(Tensor self, SymInt[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor +// Targeting ../NamesMode.java -// aten::new_zeros(Tensor self, SymInt[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor +// Targeting ../NoNamesGuard.java -// aten::new_ones(Tensor self, SymInt[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor -// aten::new_ones(Tensor self, SymInt[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor -// aten::new_ones(Tensor self, SymInt[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor +// Sets the names of `tensor` to be `names`. +@Namespace("at") public static native @Const @ByRef TensorBase internal_set_names_inplace(@Const @ByRef TensorBase tensor, @ByVal DimnameListOptional names); +@Namespace("at") public static native @Const @ByRef TensorBase internal_set_names_inplace(@Const @ByRef TensorBase tensor, @StdMove DimnameVector names, @Cast("bool") boolean validate_names); -// aten::new_ones(Tensor self, SymInt[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor +@Namespace("at") @MemberGetter public static native @Cast("const size_t") long kMaxNamedTensorDim(); -// aten::resize_(Tensor(a!) self, SymInt[] size, *, MemoryFormat? memory_format=None) -> Tensor(a!) +// Some helper functions on TensorImpl. Useful for working with names in TH. +// XXX: Ideally these would exist as methods on TensorImpl +@Namespace("at::impl") public static native void internal_set_names_inplace(TensorImpl impl, @ByVal DimnameListOptional names, @Cast("bool") boolean validate_names); +@Namespace("at::impl") public static native void internal_set_names_inplace(TensorImpl impl, @StdMove DimnameVector names, @Cast("bool") boolean validate_names); -// aten::resize_(Tensor(a!) self, SymInt[] size, *, MemoryFormat? memory_format=None) -> Tensor(a!) -// aten::erf(Tensor self) -> Tensor +// Returns true if the tensor's names exist and are not all 'None'. +// Returns false if the tensor's names don't exist (were not allocated), +// or if all names are 'None'. +// We treat not-allocated-names the same as allocated names that are all 'None'. +@Namespace("at::impl") public static native @Cast("bool") boolean has_names(@Const TensorImpl impl); +// Returns the names of the tensor's dimensions. +// Unnamed tensors are treated as having 'None' in all dimension; this method +// would return a DimnameList of all 'None's for an unnamed tensor. +@Namespace("at::impl") public static native @ByVal DimnameArrayRef get_names(@Const TensorImpl impl); -// aten::erf_(Tensor(a!) self) -> Tensor(a!) +// This is more of an implementation detail; one should use impl::get_names / +// Tensor::names() whenever possible because it provides a cleaner API. +// Returns the names of the tensor if they have been allocated; returns nullopt +// instead if the haven't been. The names of a tensor are not allocated if a +// tensor is constructed with names=None. +@Namespace("at::impl") public static native @ByVal DimnameListOptional get_opt_names(@Const TensorImpl impl); + // namespace impl -// aten::erfc(Tensor self) -> Tensor + // namespace at -// aten::erfc_(Tensor(a!) self) -> Tensor(a!) +// Parsed from ATen/core/QuantizerBase.h +// #pragma once -// aten::exp(Tensor self) -> Tensor +// #include +// #include +// #include +// Targeting ../QTensorImpl.java -// aten::exp_(Tensor(a!) self) -> Tensor(a!) +// Targeting ../Quantizer.java -// aten::exp2(Tensor self) -> Tensor + // namespace at -// aten::exp2_(Tensor(a!) self) -> Tensor(a!) +// Parsed from ATen/core/TensorAccessor.h -// aten::expm1(Tensor self) -> Tensor +// #pragma once +// #include +// #include +// #include +// #include +// #include +// #include +// #include -// aten::expm1_(Tensor(a!) self) -> Tensor(a!) +// The PtrTraits argument to the TensorAccessor/GenericPackedTensorAccessor +// is used to enable the __restrict__ keyword/modifier for the data +// passed to cuda. +// #if defined(__CUDACC__) || defined(__HIPCC__) +// #endif -// aten::expand(Tensor(a) self, SymInt[] size, *, bool implicit=False) -> Tensor(a) +// TensorAccessorBase and TensorAccessor are used for both CPU and CUDA tensors. +// For CUDA tensors it is used in device code (only). This means that we restrict ourselves +// to functions and types available there (e.g. IntArrayRef isn't). +// The PtrTraits argument is only relevant to cuda to support `__restrict__` pointers. -// aten::expand(Tensor(a) self, SymInt[] size, *, bool implicit=False) -> Tensor(a) +// The `TensorAccessor` is typically instantiated for CPU `Tensor`s using +// `Tensor.accessor()`. +// For CUDA `Tensor`s, `GenericPackedTensorAccessor` is used on the host and only +// indexing on the device uses `TensorAccessor`s. -// aten::expand_as(Tensor(a) self, Tensor other) -> Tensor(a) +// GenericPackedTensorAccessorBase and GenericPackedTensorAccessor are used on for CUDA `Tensor`s on the host +// and as +// In contrast to `TensorAccessor`s, they copy the strides and sizes on instantiation (on the host) +// in order to transfer them on the device when calling kernels. +// On the device, indexing of multidimensional tensors gives to `TensorAccessor`s. +// Use RestrictPtrTraits as PtrTraits if you want the tensor's data pointer to be marked as __restrict__. +// Instantiation from data, sizes, strides is only needed on the host and std::copy isn't available +// on the device, so those functions are host only. -// aten::flatten.using_ints(Tensor(a) self, int start_dim=0, int end_dim=-1) -> Tensor(a) +// Can't put this directly into the macro function args because of commas +// #define AT_X GenericPackedTensorAccessor +// Old name for `GenericPackedTensorAccessor` +// #undef AT_X + // namespace at -// aten::flatten.named_out_dim(Tensor(a) self, int start_dim, int end_dim, Dimname out_dim) -> Tensor(a) +// Parsed from c10/util/ExclusivelyOwnedTensorTraits.h -// aten::flatten.using_names(Tensor(a) self, Dimname start_dim, Dimname end_dim, Dimname out_dim) -> Tensor(a) +// #pragma once +// #include -// aten::flatten.DimnameList(Tensor(a) self, Dimname[] dims, Dimname out_dim) -> Tensor(a) +// #include +// Shared ExclusivelyOwnedTraits implementation between caffe2::Tensor and +// at::TensorBase. + // namespace c10 -// aten::unflatten.int(Tensor(a) self, int dim, int[] sizes) -> Tensor(a) +// Parsed from ATen/core/TensorBase.h +// #pragma once -// aten::unflatten.Dimname(Tensor(a) self, Dimname dim, int[] sizes, Dimname[] names) -> Tensor(a) +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include -// aten::fill_.Scalar(Tensor(a!) self, Scalar value) -> Tensor(a!) + // namespace torch::autograd -// aten::fill_.Tensor(Tensor(a!) self, Tensor value) -> Tensor(a!) +// Convert Tensor to TensorBase without any need to include Tensor.h +@Namespace("at") public static native @Const @ByRef TensorBase get_tensor_base(@Const @ByRef Tensor t); +@Namespace("at::impl") public static native @Cast("bool") boolean variable_excluded_from_dispatch(); -// aten::floor(Tensor self) -> Tensor +// Targeting ../TensorBase.java -// aten::floor_(Tensor(a!) self) -> Tensor(a!) -// aten::floor_divide(Tensor self, Tensor other) -> Tensor -// aten::floor_divide_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!) -// aten::floor_divide.Scalar(Tensor self, Scalar other) -> Tensor +// Helper creator for Tensor class which doesn't requires the users to pass +// in an intrusive_ptr instead it just converts the argument passed to +// requested intrusive_ptr type. + // namespace detail -// aten::floor_divide_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!) +@Namespace("at") public static native DispatchKey legacyExtractDispatchKey(@Const @ByRef TensorBase t); -// aten::frac(Tensor self) -> Tensor +// Targeting ../MaybeOwnedTraitsTensor.java -// aten::frac_(Tensor(a!) self) -> Tensor(a!) + // namespace c10 -// aten::gcd(Tensor self, Tensor other) -> Tensor -// aten::gcd_(Tensor(a!) self, Tensor other) -> Tensor(a!) + // namespace symint -// aten::lcm(Tensor self, Tensor other) -> Tensor + // namespace at -// aten::lcm_(Tensor(a!) self, Tensor other) -> Tensor(a!) +// Parsed from ATen/MethodOperators.h +// #pragma once -// aten::index.Tensor(Tensor self, Tensor?[] indices) -> Tensor +// @generated by torchgen/gen.py from MethodOperators.h +// #ifdef TORCH_ASSERT_NO_OPERATORS +// #error This change adds a dependency on native_functions.yaml, +// meaning the file will need to be re-compiled every time an operator +// is changed or added. Consider if your change would be better placed in +// another file, or if a more specific header might achieve the same goal. +// See NOTE: [Tensor vs. TensorBase] +// #endif -// aten::index_copy_(Tensor(a!) self, int dim, Tensor index, Tensor source) -> Tensor(a!) +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include -// aten::index_copy(Tensor self, int dim, Tensor index, Tensor source) -> Tensor + // namespace _ops + // namespace at -// aten::index_copy_.dimname(Tensor(a!) self, Dimname dim, Tensor index, Tensor source) -> Tensor(a!) +// Parsed from ATen/core/TensorBody.h +// #pragma once -// aten::index_copy.dimname(Tensor self, Dimname dim, Tensor index, Tensor source) -> Tensor +// #ifdef TORCH_ASSERT_NO_OPERATORS +// #error This change adds a dependency on native_functions.yaml, +// meaning the file will need to be re-compiled every time an operator +// is changed or added. Consider if your change would be better placed in +// another file, or if a more specific header might achieve the same goal. +// See NOTE: [Tensor vs. TensorBase] +// #endif +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include -// aten::index_put_(Tensor(a!) self, Tensor?[] indices, Tensor values, bool accumulate=False) -> Tensor(a!) +// #include -// aten::index_put(Tensor self, Tensor?[] indices, Tensor values, bool accumulate=False) -> Tensor + // namespace at + // namespace indexing + // namespace at + // namespace torch::autograd +// Targeting ../Tensor.java -// aten::isclose(Tensor self, Tensor other, float rtol=1e-05, float atol=1e-08, bool equal_nan=False) -> Tensor +// Helper creator for Tensor class which doesn't requires the users to pass +// in an intrusive_ptr instead it just converts the argument passed to +// requested intrusive_ptr type. -// aten::isnan(Tensor self) -> Tensor + // namespace detail + // namespace at -// aten::is_distributed(Tensor self) -> bool +// aten::_backward(Tensor self, Tensor[] inputs, Tensor? gradient=None, bool? retain_graph=None, bool create_graph=False) -> () -// aten::is_floating_point(Tensor self) -> bool +// aten::set_data(Tensor(a!) self, Tensor new_data) -> () -// aten::is_complex(Tensor self) -> bool +// aten::data(Tensor self) -> Tensor -// aten::is_conj(Tensor self) -> bool +// aten::is_leaf(Tensor self) -> bool -// aten::_is_zerotensor(Tensor self) -> bool +// aten::output_nr(Tensor self) -> int -// aten::is_neg(Tensor self) -> bool +// aten::_version(Tensor self) -> int -// aten::isreal(Tensor self) -> Tensor +// aten::requires_grad_(Tensor(a!) self, bool requires_grad=True) -> Tensor(a!) -// aten::is_nonzero(Tensor self) -> bool +// aten::retain_grad(Tensor(a!) self) -> () -// aten::is_same_size(Tensor self, Tensor other) -> bool +// aten::retains_grad(Tensor self) -> bool -// aten::is_signed(Tensor self) -> bool +// aten::_fw_primal(Tensor(a) self, int level) -> Tensor(a) -// aten::is_inference(Tensor self) -> bool +// aten::rename_(Tensor(a!) self, Dimname[]? names) -> Tensor(a!) -// aten::kron(Tensor self, Tensor other) -> Tensor +// aten::rename(Tensor(a) self, Dimname[]? names) -> Tensor(a) -// aten::kthvalue(Tensor self, int k, int dim=-1, bool keepdim=False) -> (Tensor values, Tensor indices) +// aten::align_to(Tensor(a) self, Dimname[] names) -> Tensor(a) -// aten::kthvalue.dimname(Tensor self, int k, Dimname dim, bool keepdim=False) -> (Tensor values, Tensor indices) +// aten::align_to.ellipsis_idx(Tensor(a) self, Dimname[] order, int ellipsis_idx) -> Tensor(a) -// aten::nan_to_num(Tensor self, float? nan=None, float? posinf=None, float? neginf=None) -> Tensor +// aten::align_as(Tensor self, Tensor other) -> Tensor -// aten::nan_to_num_(Tensor(a!) self, float? nan=None, float? posinf=None, float? neginf=None) -> Tensor(a!) +// aten::refine_names(Tensor(a) self, Dimname[] names) -> Tensor(a) -// aten::ldexp.Tensor(Tensor self, Tensor other) -> Tensor +// aten::abs(Tensor self) -> Tensor -// aten::ldexp_(Tensor(a!) self, Tensor other) -> Tensor(a!) +// aten::abs_(Tensor(a!) self) -> Tensor(a!) -// aten::log(Tensor self) -> Tensor +// aten::absolute(Tensor self) -> Tensor -// aten::log_(Tensor(a!) self) -> Tensor(a!) +// aten::absolute_(Tensor(a!) self) -> Tensor(a!) -// aten::log10(Tensor self) -> Tensor +// aten::angle(Tensor self) -> Tensor -// aten::log10_(Tensor(a!) self) -> Tensor(a!) +// aten::sgn(Tensor self) -> Tensor -// aten::log1p(Tensor self) -> Tensor +// aten::sgn_(Tensor(a!) self) -> Tensor(a!) -// aten::log1p_(Tensor(a!) self) -> Tensor(a!) +// aten::chalf(Tensor self, *, MemoryFormat? memory_format=None) -> Tensor -// aten::log2(Tensor self) -> Tensor +// aten::_conj(Tensor(a) self) -> Tensor(a) -// aten::log2_(Tensor(a!) self) -> Tensor(a!) +// aten::conj(Tensor(a) self) -> Tensor(a) -// aten::logaddexp(Tensor self, Tensor other) -> Tensor +// aten::_conj_physical(Tensor self) -> Tensor -// aten::logaddexp2(Tensor self, Tensor other) -> Tensor +// aten::conj_physical(Tensor self) -> Tensor -// aten::xlogy.Tensor(Tensor self, Tensor other) -> Tensor +// aten::conj_physical_(Tensor(a!) self) -> Tensor(a!) -// aten::xlogy.Scalar_Other(Tensor self, Scalar other) -> Tensor +// aten::resolve_conj(Tensor(a) self) -> Tensor(a) -// aten::xlogy_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!) +// aten::resolve_neg(Tensor(a) self) -> Tensor(a) -// aten::xlogy_.Scalar_Other(Tensor(a!) self, Scalar other) -> Tensor(a!) +// aten::_neg_view(Tensor(a) self) -> Tensor(a) -// aten::log_softmax.int(Tensor self, int dim, ScalarType? dtype=None) -> Tensor +// aten::acos(Tensor self) -> Tensor -// aten::log_softmax.Dimname(Tensor self, Dimname dim, *, ScalarType? dtype=None) -> Tensor +// aten::acos_(Tensor(a!) self) -> Tensor(a!) -// aten::logcumsumexp(Tensor self, int dim) -> Tensor +// aten::arccos(Tensor self) -> Tensor -// aten::logcumsumexp.dimname(Tensor self, Dimname dim) -> Tensor +// aten::arccos_(Tensor(a!) self) -> Tensor(a!) -// aten::logsumexp(Tensor self, int[1] dim, bool keepdim=False) -> Tensor +// aten::add.Tensor(Tensor self, Tensor other, *, Scalar alpha=1) -> Tensor -// aten::logsumexp.names(Tensor self, Dimname[1] dim, bool keepdim=False) -> Tensor +// aten::add_.Tensor(Tensor(a!) self, Tensor other, *, Scalar alpha=1) -> Tensor(a!) -// aten::matmul(Tensor self, Tensor other) -> Tensor +// aten::add.Scalar(Tensor self, Scalar other, Scalar alpha=1) -> Tensor -// aten::matrix_power(Tensor self, int n) -> Tensor +// aten::add_.Scalar(Tensor(a!) self, Scalar other, Scalar alpha=1) -> Tensor(a!) -// aten::matrix_exp(Tensor self) -> Tensor +// aten::addmv(Tensor self, Tensor mat, Tensor vec, *, Scalar beta=1, Scalar alpha=1) -> Tensor -// aten::aminmax(Tensor self, *, int? dim=None, bool keepdim=False) -> (Tensor min, Tensor max) +// aten::addmv_(Tensor(a!) self, Tensor mat, Tensor vec, *, Scalar beta=1, Scalar alpha=1) -> Tensor(a!) -// aten::max.dim(Tensor self, int dim, bool keepdim=False) -> (Tensor values, Tensor indices) +// aten::addr(Tensor self, Tensor vec1, Tensor vec2, *, Scalar beta=1, Scalar alpha=1) -> Tensor -// aten::max.names_dim(Tensor self, Dimname dim, bool keepdim=False) -> (Tensor values, Tensor indices) +// aten::addr_(Tensor(a!) self, Tensor vec1, Tensor vec2, *, Scalar beta=1, Scalar alpha=1) -> Tensor(a!) -// aten::amax(Tensor self, int[1] dim=[], bool keepdim=False) -> Tensor +// aten::_is_all_true(Tensor self) -> Tensor -// aten::mean(Tensor self, *, ScalarType? dtype=None) -> Tensor +// aten::_is_any_true(Tensor self) -> Tensor -// aten::mean.dim(Tensor self, int[1]? dim, bool keepdim=False, *, ScalarType? dtype=None) -> Tensor +// aten::all.dim(Tensor self, int dim, bool keepdim=False) -> Tensor -// aten::mean.names_dim(Tensor self, Dimname[1] dim, bool keepdim=False, *, ScalarType? dtype=None) -> Tensor +// aten::all.dimname(Tensor self, Dimname dim, bool keepdim=False) -> Tensor -// aten::nanmean(Tensor self, int[1]? dim=None, bool keepdim=False, *, ScalarType? dtype=None) -> Tensor +// aten::allclose(Tensor self, Tensor other, float rtol=1e-05, float atol=1e-08, bool equal_nan=False) -> bool -// aten::median(Tensor self) -> Tensor +// aten::any.dim(Tensor self, int dim, bool keepdim=False) -> Tensor -// aten::median.dim(Tensor self, int dim, bool keepdim=False) -> (Tensor values, Tensor indices) +// aten::any.dimname(Tensor self, Dimname dim, bool keepdim=False) -> Tensor -// aten::median.names_dim(Tensor self, Dimname dim, bool keepdim=False) -> (Tensor values, Tensor indices) +// aten::argmax(Tensor self, int? dim=None, bool keepdim=False) -> Tensor -// aten::nanmedian(Tensor self) -> Tensor +// aten::argmin(Tensor self, int? dim=None, bool keepdim=False) -> Tensor -// aten::nanmedian.dim(Tensor self, int dim, bool keepdim=False) -> (Tensor values, Tensor indices) +// aten::acosh(Tensor self) -> Tensor -// aten::nanmedian.names_dim(Tensor self, Dimname dim, bool keepdim=False) -> (Tensor values, Tensor indices) +// aten::acosh_(Tensor(a!) self) -> Tensor(a!) -// aten::min.dim(Tensor self, int dim, bool keepdim=False) -> (Tensor values, Tensor indices) +// aten::arccosh(Tensor self) -> Tensor -// aten::min.names_dim(Tensor self, Dimname dim, bool keepdim=False) -> (Tensor values, Tensor indices) +// aten::arccosh_(Tensor(a!) self) -> Tensor(a!) -// aten::amin(Tensor self, int[1] dim=[], bool keepdim=False) -> Tensor +// aten::asinh(Tensor self) -> Tensor -// aten::mm(Tensor self, Tensor mat2) -> Tensor +// aten::asinh_(Tensor(a!) self) -> Tensor(a!) -// aten::mode(Tensor self, int dim=-1, bool keepdim=False) -> (Tensor values, Tensor indices) +// aten::arcsinh(Tensor self) -> Tensor -// aten::mode.dimname(Tensor self, Dimname dim, bool keepdim=False) -> (Tensor values, Tensor indices) +// aten::arcsinh_(Tensor(a!) self) -> Tensor(a!) -// aten::mul.Tensor(Tensor self, Tensor other) -> Tensor +// aten::atanh(Tensor self) -> Tensor -// aten::mul_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!) +// aten::atanh_(Tensor(a!) self) -> Tensor(a!) -// aten::mul.Scalar(Tensor self, Scalar other) -> Tensor +// aten::arctanh(Tensor self) -> Tensor -// aten::mul_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!) +// aten::arctanh_(Tensor(a!) self) -> Tensor(a!) -// aten::multiply.Tensor(Tensor self, Tensor other) -> Tensor +// aten::as_strided(Tensor(a) self, SymInt[] size, SymInt[] stride, SymInt? storage_offset=None) -> Tensor(a) -// aten::multiply_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!) +// aten::as_strided(Tensor(a) self, SymInt[] size, SymInt[] stride, SymInt? storage_offset=None) -> Tensor(a) -// aten::multiply.Scalar(Tensor self, Scalar other) -> Tensor +// aten::as_strided_(Tensor(a!) self, SymInt[] size, SymInt[] stride, SymInt? storage_offset=None) -> Tensor(a!) -// aten::multiply_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!) +// aten::as_strided_(Tensor(a!) self, SymInt[] size, SymInt[] stride, SymInt? storage_offset=None) -> Tensor(a!) -// aten::mv(Tensor self, Tensor vec) -> Tensor +// aten::asin(Tensor self) -> Tensor -// aten::mvlgamma(Tensor self, int p) -> Tensor +// aten::asin_(Tensor(a!) self) -> Tensor(a!) -// aten::mvlgamma_(Tensor(a!) self, int p) -> Tensor(a!) +// aten::arcsin(Tensor self) -> Tensor -// aten::narrow_copy(Tensor self, int dim, SymInt start, SymInt length) -> Tensor +// aten::arcsin_(Tensor(a!) self) -> Tensor(a!) -// aten::narrow_copy(Tensor self, int dim, SymInt start, SymInt length) -> Tensor +// aten::atan(Tensor self) -> Tensor -// aten::narrow(Tensor(a) self, int dim, SymInt start, SymInt length) -> Tensor(a) +// aten::atan_(Tensor(a!) self) -> Tensor(a!) -// aten::narrow(Tensor(a) self, int dim, SymInt start, SymInt length) -> Tensor(a) +// aten::arctan(Tensor self) -> Tensor -// aten::narrow.Tensor(Tensor(a) self, int dim, Tensor start, SymInt length) -> Tensor(a) +// aten::arctan_(Tensor(a!) self) -> Tensor(a!) -// aten::narrow.Tensor(Tensor(a) self, int dim, Tensor start, SymInt length) -> Tensor(a) +// aten::baddbmm(Tensor self, Tensor batch1, Tensor batch2, *, Scalar beta=1, Scalar alpha=1) -> Tensor -// aten::permute(Tensor(a) self, int[] dims) -> Tensor(a) +// aten::baddbmm_(Tensor(a!) self, Tensor batch1, Tensor batch2, *, Scalar beta=1, Scalar alpha=1) -> Tensor(a!) -// aten::movedim.intlist(Tensor(a) self, int[] source, int[] destination) -> Tensor(a) +// aten::bernoulli(Tensor self, *, Generator? generator=None) -> Tensor -// aten::movedim.int(Tensor(a) self, int source, int destination) -> Tensor(a) +// aten::bernoulli_.Tensor(Tensor(a!) self, Tensor p, *, Generator? generator=None) -> Tensor(a!) -// aten::moveaxis.intlist(Tensor(a) self, int[] source, int[] destination) -> Tensor(a) +// aten::bernoulli_.float(Tensor(a!) self, float p=0.5, *, Generator? generator=None) -> Tensor(a!) -// aten::moveaxis.int(Tensor(a) self, int source, int destination) -> Tensor(a) +// aten::bernoulli.p(Tensor self, float p, *, Generator? generator=None) -> Tensor -// aten::numpy_T(Tensor(a) self) -> Tensor(a) +// aten::bincount(Tensor self, Tensor? weights=None, int minlength=0) -> Tensor -// aten::matrix_H(Tensor(a) self) -> Tensor(a) +// aten::bitwise_not(Tensor self) -> Tensor -// aten::mT(Tensor(a) self) -> Tensor(a) +// aten::bitwise_not_(Tensor(a!) self) -> Tensor(a!) -// aten::mH(Tensor(a) self) -> Tensor(a) +// aten::copysign.Tensor(Tensor self, Tensor other) -> Tensor -// aten::adjoint(Tensor(a) self) -> Tensor(a) +// aten::copysign_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!) -// aten::is_pinned(Tensor self, Device? device=None) -> bool +// aten::copysign.Scalar(Tensor self, Scalar other) -> Tensor -// aten::pin_memory(Tensor(a) self, Device? device=None) -> Tensor(a) +// aten::copysign_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!) -// aten::pinverse(Tensor self, float rcond=1e-15) -> Tensor +// aten::logical_not(Tensor self) -> Tensor -// aten::rad2deg(Tensor self) -> Tensor +// aten::logical_not_(Tensor(a!) self) -> Tensor(a!) -// aten::rad2deg_(Tensor(a!) self) -> Tensor(a!) +// aten::logical_xor(Tensor self, Tensor other) -> Tensor -// aten::deg2rad(Tensor self) -> Tensor +// aten::logical_xor_(Tensor(a!) self, Tensor other) -> Tensor(a!) -// aten::deg2rad_(Tensor(a!) self) -> Tensor(a!) +// aten::logical_and(Tensor self, Tensor other) -> Tensor -// aten::ravel(Tensor(a) self) -> Tensor(a) +// aten::logical_and_(Tensor(a!) self, Tensor other) -> Tensor(a!) -// aten::reciprocal(Tensor self) -> Tensor +// aten::logical_or(Tensor self, Tensor other) -> Tensor -// aten::reciprocal_(Tensor(a!) self) -> Tensor(a!) +// aten::logical_or_(Tensor(a!) self, Tensor other) -> Tensor(a!) -// aten::neg(Tensor self) -> Tensor +// aten::bmm(Tensor self, Tensor mat2) -> Tensor -// aten::neg_(Tensor(a!) self) -> Tensor(a!) +// aten::broadcast_to(Tensor(a) self, SymInt[] size) -> Tensor(a) -// aten::negative(Tensor self) -> Tensor +// aten::broadcast_to(Tensor(a) self, SymInt[] size) -> Tensor(a) -// aten::negative_(Tensor(a!) self) -> Tensor(a!) +// aten::ceil(Tensor self) -> Tensor -// aten::repeat(Tensor self, SymInt[] repeats) -> Tensor +// aten::ceil_(Tensor(a!) self) -> Tensor(a!) -// aten::repeat(Tensor self, SymInt[] repeats) -> Tensor +// aten::unsafe_chunk(Tensor self, int chunks, int dim=0) -> Tensor[] -// aten::repeat_interleave.self_Tensor(Tensor self, Tensor repeats, int? dim=None, *, int? output_size=None) -> Tensor +// aten::chunk(Tensor(a -> *) self, int chunks, int dim=0) -> Tensor(a)[] -// aten::repeat_interleave.self_int(Tensor self, SymInt repeats, int? dim=None, *, int? output_size=None) -> Tensor +// aten::tensor_split.sections(Tensor(a -> *) self, SymInt sections, int dim=0) -> Tensor(a)[] -// aten::repeat_interleave.self_int(Tensor self, SymInt repeats, int? dim=None, *, int? output_size=None) -> Tensor +// aten::tensor_split.sections(Tensor(a -> *) self, SymInt sections, int dim=0) -> Tensor(a)[] -// aten::reshape(Tensor(a) self, SymInt[] shape) -> Tensor(a) +// aten::tensor_split.indices(Tensor(a -> *) self, SymInt[] indices, int dim=0) -> Tensor(a)[] -// aten::reshape(Tensor(a) self, SymInt[] shape) -> Tensor(a) +// aten::tensor_split.indices(Tensor(a -> *) self, SymInt[] indices, int dim=0) -> Tensor(a)[] -// aten::_reshape_alias(Tensor(a) self, SymInt[] size, SymInt[] stride) -> Tensor(a) +// aten::tensor_split.tensor_indices_or_sections(Tensor(a -> *) self, Tensor tensor_indices_or_sections, int dim=0) -> Tensor(a)[] -// aten::_reshape_alias(Tensor(a) self, SymInt[] size, SymInt[] stride) -> Tensor(a) +// aten::clamp(Tensor self, Scalar? min=None, Scalar? max=None) -> Tensor -// aten::reshape_as(Tensor(a) self, Tensor other) -> Tensor(a) +// aten::clamp.Tensor(Tensor self, Tensor? min=None, Tensor? max=None) -> Tensor -// aten::round(Tensor self) -> Tensor +// aten::clamp_(Tensor(a!) self, Scalar? min=None, Scalar? max=None) -> Tensor(a!) -// aten::round_(Tensor(a!) self) -> Tensor(a!) +// aten::clamp_.Tensor(Tensor(a!) self, Tensor? min=None, Tensor? max=None) -> Tensor(a!) -// aten::round.decimals(Tensor self, *, int decimals) -> Tensor +// aten::clamp_max(Tensor self, Scalar max) -> Tensor -// aten::round_.decimals(Tensor(a!) self, *, int decimals) -> Tensor(a!) +// aten::clamp_max.Tensor(Tensor self, Tensor max) -> Tensor -// aten::relu(Tensor self) -> Tensor +// aten::clamp_max_(Tensor(a!) self, Scalar max) -> Tensor(a!) -// aten::relu_(Tensor(a!) self) -> Tensor(a!) +// aten::clamp_max_.Tensor(Tensor(a!) self, Tensor max) -> Tensor(a!) -// aten::prelu(Tensor self, Tensor weight) -> Tensor +// aten::clamp_min(Tensor self, Scalar min) -> Tensor -// aten::hardshrink(Tensor self, Scalar lambd=0.5) -> Tensor +// aten::clamp_min.Tensor(Tensor self, Tensor min) -> Tensor -// aten::hardshrink_backward(Tensor grad_out, Tensor self, Scalar lambd) -> Tensor +// aten::clamp_min_(Tensor(a!) self, Scalar min) -> Tensor(a!) -// aten::rsqrt(Tensor self) -> Tensor +// aten::clamp_min_.Tensor(Tensor(a!) self, Tensor min) -> Tensor(a!) -// aten::rsqrt_(Tensor(a!) self) -> Tensor(a!) +// aten::clip(Tensor self, Scalar? min=None, Scalar? max=None) -> Tensor -// aten::select.Dimname(Tensor(a) self, Dimname dim, int index) -> Tensor(a) +// aten::clip.Tensor(Tensor self, Tensor? min=None, Tensor? max=None) -> Tensor -// aten::select.int(Tensor(a) self, int dim, SymInt index) -> Tensor(a) +// aten::clip_(Tensor(a!) self, Scalar? min=None, Scalar? max=None) -> Tensor(a!) -// aten::select.int(Tensor(a) self, int dim, SymInt index) -> Tensor(a) +// aten::clip_.Tensor(Tensor(a!) self, Tensor? min=None, Tensor? max=None) -> Tensor(a!) -// aten::sigmoid(Tensor self) -> Tensor +// aten::contiguous(Tensor(a) self, *, MemoryFormat memory_format=contiguous_format) -> Tensor(a) -// aten::sigmoid_(Tensor(a!) self) -> Tensor(a!) +// aten::copy_(Tensor(a!) self, Tensor src, bool non_blocking=False) -> Tensor(a!) -// aten::logit(Tensor self, float? eps=None) -> Tensor +// aten::cos(Tensor self) -> Tensor -// aten::logit_(Tensor(a!) self, float? eps=None) -> Tensor(a!) +// aten::cos_(Tensor(a!) self) -> Tensor(a!) -// aten::sin(Tensor self) -> Tensor +// aten::cosh(Tensor self) -> Tensor -// aten::sin_(Tensor(a!) self) -> Tensor(a!) +// aten::cosh_(Tensor(a!) self) -> Tensor(a!) -// aten::sinc(Tensor self) -> Tensor +// aten::count_nonzero.dim_IntList(Tensor self, int[] dim) -> Tensor -// aten::sinc_(Tensor(a!) self) -> Tensor(a!) +// aten::count_nonzero(Tensor self, int? dim=None) -> Tensor -// aten::sinh(Tensor self) -> Tensor +// aten::cov(Tensor self, *, int correction=1, Tensor? fweights=None, Tensor? aweights=None) -> Tensor -// aten::sinh_(Tensor(a!) self) -> Tensor(a!) +// aten::corrcoef(Tensor self) -> Tensor -// aten::detach(Tensor(a) self) -> Tensor(a) +// aten::cummax(Tensor self, int dim) -> (Tensor values, Tensor indices) -// aten::detach_(Tensor(a!) self) -> Tensor(a!) +// aten::cummax.dimname(Tensor self, Dimname dim) -> (Tensor values, Tensor indices) -// aten::size.Dimname(Tensor self, Dimname dim) -> int +// aten::cummin(Tensor self, int dim) -> (Tensor values, Tensor indices) -// aten::slice.Tensor(Tensor(a) self, int dim=0, SymInt? start=None, SymInt? end=None, SymInt step=1) -> Tensor(a) +// aten::cummin.dimname(Tensor self, Dimname dim) -> (Tensor values, Tensor indices) -// aten::slice.Tensor(Tensor(a) self, int dim=0, SymInt? start=None, SymInt? end=None, SymInt step=1) -> Tensor(a) +// aten::cumprod(Tensor self, int dim, *, ScalarType? dtype=None) -> Tensor -// aten::slice_scatter(Tensor self, Tensor src, int dim=0, SymInt? start=None, SymInt? end=None, SymInt step=1) -> Tensor +// aten::cumprod_(Tensor(a!) self, int dim, *, ScalarType? dtype=None) -> Tensor(a!) -// aten::slice_scatter(Tensor self, Tensor src, int dim=0, SymInt? start=None, SymInt? end=None, SymInt step=1) -> Tensor +// aten::cumprod.dimname(Tensor self, Dimname dim, *, ScalarType? dtype=None) -> Tensor -// aten::select_scatter(Tensor self, Tensor src, int dim, SymInt index) -> Tensor +// aten::cumprod_.dimname(Tensor(a!) self, Dimname dim, *, ScalarType? dtype=None) -> Tensor(a!) -// aten::select_scatter(Tensor self, Tensor src, int dim, SymInt index) -> Tensor +// aten::cumsum(Tensor self, int dim, *, ScalarType? dtype=None) -> Tensor -// aten::diagonal_scatter(Tensor self, Tensor src, int offset=0, int dim1=0, int dim2=1) -> Tensor +// aten::cumsum_(Tensor(a!) self, int dim, *, ScalarType? dtype=None) -> Tensor(a!) -// aten::as_strided_scatter(Tensor self, Tensor src, SymInt[] size, SymInt[] stride, SymInt? storage_offset=None) -> Tensor +// aten::cumsum.dimname(Tensor self, Dimname dim, *, ScalarType? dtype=None) -> Tensor -// aten::as_strided_scatter(Tensor self, Tensor src, SymInt[] size, SymInt[] stride, SymInt? storage_offset=None) -> Tensor +// aten::cumsum_.dimname(Tensor(a!) self, Dimname dim, *, ScalarType? dtype=None) -> Tensor(a!) -// aten::smm(Tensor self, Tensor mat2) -> Tensor +// aten::diag_embed(Tensor self, int offset=0, int dim1=-2, int dim2=-1) -> Tensor -// aten::softmax.int(Tensor self, int dim, ScalarType? dtype=None) -> Tensor +// aten::diagflat(Tensor self, int offset=0) -> Tensor -// aten::softmax.Dimname(Tensor self, Dimname dim, *, ScalarType? dtype=None) -> Tensor +// aten::diagonal(Tensor(a) self, int offset=0, int dim1=0, int dim2=1) -> Tensor(a) -// aten::unsafe_split.Tensor(Tensor self, SymInt split_size, int dim=0) -> Tensor[] +// aten::diagonal.Dimname(Tensor(a) self, *, Dimname outdim, Dimname dim1, Dimname dim2, int offset=0) -> Tensor(a) -// aten::unsafe_split.Tensor(Tensor self, SymInt split_size, int dim=0) -> Tensor[] +// aten::fill_diagonal_(Tensor(a!) self, Scalar fill_value, bool wrap=False) -> Tensor(a!) -// aten::split.Tensor(Tensor(a -> *) self, SymInt split_size, int dim=0) -> Tensor(a)[] +// aten::diff(Tensor self, int n=1, int dim=-1, Tensor? prepend=None, Tensor? append=None) -> Tensor -// aten::split.Tensor(Tensor(a -> *) self, SymInt split_size, int dim=0) -> Tensor(a)[] +// aten::div.Tensor(Tensor self, Tensor other) -> Tensor -// aten::split.sizes(Tensor(a -> *) self, SymInt[] split_size, int dim=0) -> Tensor(a)[] +// aten::div_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!) -// aten::split.sizes(Tensor(a -> *) self, SymInt[] split_size, int dim=0) -> Tensor(a)[] +// aten::div.Tensor_mode(Tensor self, Tensor other, *, str? rounding_mode) -> Tensor -// aten::unsafe_split_with_sizes(Tensor self, SymInt[] split_sizes, int dim=0) -> Tensor[] +// aten::div_.Tensor_mode(Tensor(a!) self, Tensor other, *, str? rounding_mode) -> Tensor(a!) -// aten::unsafe_split_with_sizes(Tensor self, SymInt[] split_sizes, int dim=0) -> Tensor[] +// aten::div.Scalar(Tensor self, Scalar other) -> Tensor -// aten::split_with_sizes(Tensor(a -> *) self, SymInt[] split_sizes, int dim=0) -> Tensor(a)[] +// aten::div_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!) -// aten::split_with_sizes(Tensor(a -> *) self, SymInt[] split_sizes, int dim=0) -> Tensor(a)[] +// aten::div.Scalar_mode(Tensor self, Scalar other, *, str? rounding_mode) -> Tensor -// aten::hsplit.int(Tensor(a -> *) self, int sections) -> Tensor(a)[] +// aten::div_.Scalar_mode(Tensor(a!) self, Scalar other, *, str? rounding_mode) -> Tensor(a!) -// aten::hsplit.array(Tensor(a -> *) self, int[] indices) -> Tensor(a)[] +// aten::divide.Tensor(Tensor self, Tensor other) -> Tensor -// aten::vsplit.int(Tensor(a -> *) self, int sections) -> Tensor(a)[] +// aten::divide_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!) -// aten::vsplit.array(Tensor(a -> *) self, int[] indices) -> Tensor(a)[] +// aten::divide.Scalar(Tensor self, Scalar other) -> Tensor -// aten::dsplit.int(Tensor(a -> *) self, int sections) -> Tensor(a)[] +// aten::divide_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!) -// aten::dsplit.array(Tensor(a -> *) self, int[] indices) -> Tensor(a)[] +// aten::divide.Tensor_mode(Tensor self, Tensor other, *, str? rounding_mode) -> Tensor -// aten::squeeze(Tensor(a) self) -> Tensor(a) +// aten::divide_.Tensor_mode(Tensor(a!) self, Tensor other, *, str? rounding_mode) -> Tensor(a!) -// aten::squeeze.dim(Tensor(a) self, int dim) -> Tensor(a) +// aten::divide.Scalar_mode(Tensor self, Scalar other, *, str? rounding_mode) -> Tensor -// aten::squeeze.dimname(Tensor(a) self, Dimname dim) -> Tensor(a) +// aten::divide_.Scalar_mode(Tensor(a!) self, Scalar other, *, str? rounding_mode) -> Tensor(a!) -// aten::squeeze.dims(Tensor(a) self, int[] dim) -> Tensor(a) +// aten::true_divide.Tensor(Tensor self, Tensor other) -> Tensor -// aten::squeeze_(Tensor(a!) self) -> Tensor(a!) +// aten::true_divide_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!) -// aten::squeeze_.dim(Tensor(a!) self, int dim) -> Tensor(a!) +// aten::true_divide.Scalar(Tensor self, Scalar other) -> Tensor -// aten::squeeze_.dims(Tensor(a!) self, int[] dim) -> Tensor(a!) +// aten::true_divide_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!) -// aten::squeeze_.dimname(Tensor(a!) self, Dimname dim) -> Tensor(a!) +// aten::dot(Tensor self, Tensor tensor) -> Tensor -// aten::sspaddmm(Tensor self, Tensor mat1, Tensor mat2, *, Scalar beta=1, Scalar alpha=1) -> Tensor +// aten::vdot(Tensor self, Tensor other) -> Tensor -// aten::stft(Tensor self, int n_fft, int? hop_length=None, int? win_length=None, Tensor? window=None, bool normalized=False, bool? onesided=None, bool? return_complex=None) -> Tensor +// aten::new_empty(Tensor self, SymInt[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor -// aten::stft.center(Tensor self, int n_fft, int? hop_length=None, int? win_length=None, Tensor? window=None, bool center=True, str pad_mode="reflect", bool normalized=False, bool? onesided=None, bool? return_complex=None) -> Tensor +// aten::new_empty(Tensor self, SymInt[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor -// aten::istft(Tensor self, int n_fft, int? hop_length=None, int? win_length=None, Tensor? window=None, bool center=True, bool normalized=False, bool? onesided=None, int? length=None, bool return_complex=False) -> Tensor +// aten::new_empty(Tensor self, SymInt[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor -// aten::stride.Dimname(Tensor self, Dimname dim) -> int +// aten::new_empty(Tensor self, SymInt[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor -// aten::sum(Tensor self, *, ScalarType? dtype=None) -> Tensor +// aten::new_empty_strided(Tensor self, SymInt[] size, SymInt[] stride, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor -// aten::sum.dim_IntList(Tensor self, int[1]? dim, bool keepdim=False, *, ScalarType? dtype=None) -> Tensor +// aten::new_empty_strided(Tensor self, SymInt[] size, SymInt[] stride, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor -// aten::sum.dim_DimnameList(Tensor self, Dimname[1] dim, bool keepdim=False, *, ScalarType? dtype=None) -> Tensor +// aten::new_empty_strided(Tensor self, SymInt[] size, SymInt[] stride, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor -// aten::nansum(Tensor self, int[1]? dim=None, bool keepdim=False, *, ScalarType? dtype=None) -> Tensor +// aten::new_empty_strided(Tensor self, SymInt[] size, SymInt[] stride, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor -// aten::sum_to_size(Tensor self, int[] size) -> Tensor +// aten::new_full(Tensor self, SymInt[] size, Scalar fill_value, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor -// aten::sqrt(Tensor self) -> Tensor +// aten::new_full(Tensor self, SymInt[] size, Scalar fill_value, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor -// aten::sqrt_(Tensor(a!) self) -> Tensor(a!) +// aten::new_full(Tensor self, SymInt[] size, Scalar fill_value, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor -// aten::square(Tensor self) -> Tensor +// aten::new_full(Tensor self, SymInt[] size, Scalar fill_value, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor -// aten::square_(Tensor(a!) self) -> Tensor(a!) +// aten::new_zeros(Tensor self, SymInt[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor -// aten::std(Tensor self, bool unbiased=True) -> Tensor +// aten::new_zeros(Tensor self, SymInt[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor -// aten::std.dim(Tensor self, int[1]? dim, bool unbiased=True, bool keepdim=False) -> Tensor +// aten::new_zeros(Tensor self, SymInt[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor -// aten::std.correction(Tensor self, int[1]? dim=None, *, int? correction=None, bool keepdim=False) -> Tensor +// aten::new_zeros(Tensor self, SymInt[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor -// aten::std.names_dim(Tensor self, Dimname[1] dim, bool unbiased=True, bool keepdim=False) -> Tensor +// aten::new_ones(Tensor self, SymInt[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor -// aten::std.correction_names(Tensor self, Dimname[1] dim, *, int? correction=None, bool keepdim=False) -> Tensor +// aten::new_ones(Tensor self, SymInt[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor -// aten::prod(Tensor self, *, ScalarType? dtype=None) -> Tensor +// aten::new_ones(Tensor self, SymInt[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor -// aten::prod.dim_int(Tensor self, int dim, bool keepdim=False, *, ScalarType? dtype=None) -> Tensor +// aten::new_ones(Tensor self, SymInt[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor -// aten::prod.dim_Dimname(Tensor self, Dimname dim, bool keepdim=False, *, ScalarType? dtype=None) -> Tensor +// aten::resize_(Tensor(a!) self, SymInt[] size, *, MemoryFormat? memory_format=None) -> Tensor(a!) -// aten::t(Tensor(a) self) -> Tensor(a) +// aten::resize_(Tensor(a!) self, SymInt[] size, *, MemoryFormat? memory_format=None) -> Tensor(a!) -// aten::t_(Tensor(a!) self) -> Tensor(a!) +// aten::erf(Tensor self) -> Tensor -// aten::tan(Tensor self) -> Tensor +// aten::erf_(Tensor(a!) self) -> Tensor(a!) -// aten::tan_(Tensor(a!) self) -> Tensor(a!) +// aten::erfc(Tensor self) -> Tensor -// aten::tanh(Tensor self) -> Tensor +// aten::erfc_(Tensor(a!) self) -> Tensor(a!) -// aten::tanh_(Tensor(a!) self) -> Tensor(a!) +// aten::exp(Tensor self) -> Tensor -// aten::tile(Tensor self, int[] dims) -> Tensor +// aten::exp_(Tensor(a!) self) -> Tensor(a!) -// aten::transpose.int(Tensor(a) self, int dim0, int dim1) -> Tensor(a) +// aten::exp2(Tensor self) -> Tensor -// aten::transpose.Dimname(Tensor(a) self, Dimname dim0, Dimname dim1) -> Tensor(a) +// aten::exp2_(Tensor(a!) self) -> Tensor(a!) -// aten::transpose_(Tensor(a!) self, int dim0, int dim1) -> Tensor(a!) +// aten::expm1(Tensor self) -> Tensor -// aten::flip(Tensor self, int[] dims) -> Tensor +// aten::expm1_(Tensor(a!) self) -> Tensor(a!) -// aten::fliplr(Tensor self) -> Tensor +// aten::expand(Tensor(a) self, SymInt[] size, *, bool implicit=False) -> Tensor(a) -// aten::flipud(Tensor self) -> Tensor +// aten::expand(Tensor(a) self, SymInt[] size, *, bool implicit=False) -> Tensor(a) -// aten::roll(Tensor self, int[1] shifts, int[1] dims=[]) -> Tensor +// aten::expand_as(Tensor(a) self, Tensor other) -> Tensor(a) -// aten::rot90(Tensor self, int k=1, int[] dims=[0,1]) -> Tensor +// aten::flatten.using_ints(Tensor(a) self, int start_dim=0, int end_dim=-1) -> Tensor(a) -// aten::_nested_tensor_size(Tensor self) -> Tensor +// aten::flatten.named_out_dim(Tensor(a) self, int start_dim, int end_dim, Dimname out_dim) -> Tensor(a) -// aten::_nested_tensor_strides(Tensor self) -> Tensor +// aten::flatten.using_names(Tensor(a) self, Dimname start_dim, Dimname end_dim, Dimname out_dim) -> Tensor(a) -// aten::_nested_tensor_offsets(Tensor self) -> int[] +// aten::flatten.DimnameList(Tensor(a) self, Dimname[] dims, Dimname out_dim) -> Tensor(a) -// aten::trunc(Tensor self) -> Tensor +// aten::unflatten.int(Tensor(a) self, int dim, int[] sizes) -> Tensor(a) -// aten::trunc_(Tensor(a!) self) -> Tensor(a!) +// aten::unflatten.Dimname(Tensor(a) self, Dimname dim, int[] sizes, Dimname[] names) -> Tensor(a) -// aten::fix(Tensor self) -> Tensor +// aten::fill_.Scalar(Tensor(a!) self, Scalar value) -> Tensor(a!) -// aten::fix_(Tensor(a!) self) -> Tensor(a!) +// aten::fill_.Tensor(Tensor(a!) self, Tensor value) -> Tensor(a!) -// aten::type_as(Tensor self, Tensor other) -> Tensor +// aten::floor(Tensor self) -> Tensor -// aten::unsqueeze(Tensor(a) self, int dim) -> Tensor(a) +// aten::floor_(Tensor(a!) self) -> Tensor(a!) -// aten::unsqueeze_(Tensor(a!) self, int dim) -> Tensor(a!) +// aten::floor_divide(Tensor self, Tensor other) -> Tensor -// aten::var(Tensor self, bool unbiased=True) -> Tensor +// aten::floor_divide_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!) -// aten::var.dim(Tensor self, int[1]? dim, bool unbiased=True, bool keepdim=False) -> Tensor +// aten::floor_divide.Scalar(Tensor self, Scalar other) -> Tensor -// aten::var.correction(Tensor self, int[1]? dim=None, *, int? correction=None, bool keepdim=False) -> Tensor +// aten::floor_divide_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!) -// aten::var.names_dim(Tensor self, Dimname[1] dim, bool unbiased=True, bool keepdim=False) -> Tensor +// aten::frac(Tensor self) -> Tensor -// aten::var.correction_names(Tensor self, Dimname[1] dim, *, int? correction=None, bool keepdim=False) -> Tensor +// aten::frac_(Tensor(a!) self) -> Tensor(a!) -// aten::view_as(Tensor(a) self, Tensor other) -> Tensor(a) +// aten::gcd(Tensor self, Tensor other) -> Tensor -// aten::where.self(Tensor condition, Tensor self, Tensor other) -> Tensor +// aten::gcd_(Tensor(a!) self, Tensor other) -> Tensor(a!) -// aten::where.ScalarOther(Tensor condition, Tensor self, Scalar other) -> Tensor +// aten::lcm(Tensor self, Tensor other) -> Tensor -// aten::norm.ScalarOpt_dtype(Tensor self, Scalar? p, *, ScalarType dtype) -> Tensor +// aten::lcm_(Tensor(a!) self, Tensor other) -> Tensor(a!) -// aten::norm.Scalar(Tensor self, Scalar p=2) -> Tensor +// aten::index.Tensor(Tensor self, Tensor?[] indices) -> Tensor -// aten::norm.ScalarOpt_dim_dtype(Tensor self, Scalar? p, int[1] dim, bool keepdim, *, ScalarType dtype) -> Tensor +// aten::index_copy_(Tensor(a!) self, int dim, Tensor index, Tensor source) -> Tensor(a!) -// aten::norm.ScalarOpt_dim(Tensor self, Scalar? p, int[1] dim, bool keepdim=False) -> Tensor +// aten::index_copy(Tensor self, int dim, Tensor index, Tensor source) -> Tensor -// aten::norm.names_ScalarOpt_dim_dtype(Tensor self, Scalar? p, Dimname[1] dim, bool keepdim, *, ScalarType dtype) -> Tensor +// aten::index_copy_.dimname(Tensor(a!) self, Dimname dim, Tensor index, Tensor source) -> Tensor(a!) -// aten::norm.names_ScalarOpt_dim(Tensor self, Scalar? p, Dimname[1] dim, bool keepdim=False) -> Tensor +// aten::index_copy.dimname(Tensor self, Dimname dim, Tensor index, Tensor source) -> Tensor -// aten::frexp.Tensor(Tensor self) -> (Tensor mantissa, Tensor exponent) +// aten::index_put_(Tensor(a!) self, Tensor?[] indices, Tensor values, bool accumulate=False) -> Tensor(a!) -// aten::clone(Tensor self, *, MemoryFormat? memory_format=None) -> Tensor +// aten::index_put(Tensor self, Tensor?[] indices, Tensor values, bool accumulate=False) -> Tensor -// aten::positive(Tensor(a) self) -> Tensor(a) +// aten::isclose(Tensor self, Tensor other, float rtol=1e-05, float atol=1e-08, bool equal_nan=False) -> Tensor -// aten::resize_as_(Tensor(a!) self, Tensor the_template, *, MemoryFormat? memory_format=None) -> Tensor(a!) +// aten::isnan(Tensor self) -> Tensor -// aten::resize_as_sparse_(Tensor(a!) self, Tensor the_template) -> Tensor(a!) +// aten::is_distributed(Tensor self) -> bool -// aten::zero_(Tensor(a!) self) -> Tensor(a!) +// aten::is_floating_point(Tensor self) -> bool -// aten::sub.Tensor(Tensor self, Tensor other, *, Scalar alpha=1) -> Tensor +// aten::is_complex(Tensor self) -> bool -// aten::sub_.Tensor(Tensor(a!) self, Tensor other, *, Scalar alpha=1) -> Tensor(a!) +// aten::is_conj(Tensor self) -> bool -// aten::sub.Scalar(Tensor self, Scalar other, Scalar alpha=1) -> Tensor +// aten::_is_zerotensor(Tensor self) -> bool -// aten::sub_.Scalar(Tensor(a!) self, Scalar other, Scalar alpha=1) -> Tensor(a!) +// aten::is_neg(Tensor self) -> bool -// aten::subtract.Tensor(Tensor self, Tensor other, *, Scalar alpha=1) -> Tensor +// aten::isreal(Tensor self) -> Tensor -// aten::subtract_.Tensor(Tensor(a!) self, Tensor other, *, Scalar alpha=1) -> Tensor(a!) +// aten::is_nonzero(Tensor self) -> bool -// aten::subtract.Scalar(Tensor self, Scalar other, Scalar alpha=1) -> Tensor +// aten::is_same_size(Tensor self, Tensor other) -> bool -// aten::subtract_.Scalar(Tensor(a!) self, Scalar other, Scalar alpha=1) -> Tensor(a!) +// aten::is_signed(Tensor self) -> bool -// aten::heaviside(Tensor self, Tensor values) -> Tensor +// aten::is_inference(Tensor self) -> bool -// aten::heaviside_(Tensor(a!) self, Tensor values) -> Tensor(a!) +// aten::kron(Tensor self, Tensor other) -> Tensor -// aten::addmm(Tensor self, Tensor mat1, Tensor mat2, *, Scalar beta=1, Scalar alpha=1) -> Tensor +// aten::kthvalue(Tensor self, int k, int dim=-1, bool keepdim=False) -> (Tensor values, Tensor indices) -// aten::addmm_(Tensor(a!) self, Tensor mat1, Tensor mat2, *, Scalar beta=1, Scalar alpha=1) -> Tensor(a!) +// aten::kthvalue.dimname(Tensor self, int k, Dimname dim, bool keepdim=False) -> (Tensor values, Tensor indices) -// aten::_addmm_activation(Tensor self, Tensor mat1, Tensor mat2, *, Scalar beta=1, Scalar alpha=1, bool use_gelu=False) -> Tensor +// aten::nan_to_num(Tensor self, float? nan=None, float? posinf=None, float? neginf=None) -> Tensor -// aten::sparse_resize_(Tensor(a!) self, int[] size, int sparse_dim, int dense_dim) -> Tensor(a!) +// aten::nan_to_num_(Tensor(a!) self, float? nan=None, float? posinf=None, float? neginf=None) -> Tensor(a!) -// aten::sparse_resize_and_clear_(Tensor(a!) self, int[] size, int sparse_dim, int dense_dim) -> Tensor(a!) +// aten::ldexp.Tensor(Tensor self, Tensor other) -> Tensor -// aten::sparse_mask(Tensor self, Tensor mask) -> Tensor +// aten::ldexp_(Tensor(a!) self, Tensor other) -> Tensor(a!) -// aten::to_dense(Tensor self, ScalarType? dtype=None) -> Tensor +// aten::log(Tensor self) -> Tensor -// aten::_to_dense(Tensor self, ScalarType? dtype=None) -> Tensor +// aten::log_(Tensor(a!) self) -> Tensor(a!) -// aten::sparse_dim(Tensor self) -> int +// aten::log10(Tensor self) -> Tensor -// aten::_dimI(Tensor self) -> int +// aten::log10_(Tensor(a!) self) -> Tensor(a!) -// aten::dense_dim(Tensor self) -> int +// aten::log1p(Tensor self) -> Tensor -// aten::_dimV(Tensor self) -> int +// aten::log1p_(Tensor(a!) self) -> Tensor(a!) -// aten::_nnz(Tensor self) -> int +// aten::log2(Tensor self) -> Tensor -// aten::coalesce(Tensor(a) self) -> Tensor(a) +// aten::log2_(Tensor(a!) self) -> Tensor(a!) -// aten::is_coalesced(Tensor self) -> bool +// aten::logaddexp(Tensor self, Tensor other) -> Tensor -// aten::_indices(Tensor(a) self) -> Tensor(a) +// aten::logaddexp2(Tensor self, Tensor other) -> Tensor -// aten::_values(Tensor(a) self) -> Tensor(a) +// aten::xlogy.Tensor(Tensor self, Tensor other) -> Tensor -// aten::_coalesced_(Tensor(a!) self, bool coalesced) -> Tensor(a!) +// aten::xlogy.Scalar_Other(Tensor self, Scalar other) -> Tensor -// aten::indices(Tensor(a) self) -> Tensor(a) +// aten::xlogy_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!) -// aten::values(Tensor(a) self) -> Tensor(a) +// aten::xlogy_.Scalar_Other(Tensor(a!) self, Scalar other) -> Tensor(a!) -// aten::crow_indices(Tensor(a) self) -> Tensor(a) +// aten::log_softmax.int(Tensor self, int dim, ScalarType? dtype=None) -> Tensor -// aten::col_indices(Tensor(a) self) -> Tensor(a) +// aten::log_softmax.Dimname(Tensor self, Dimname dim, *, ScalarType? dtype=None) -> Tensor -// aten::ccol_indices(Tensor(a) self) -> Tensor(a) +// aten::logcumsumexp(Tensor self, int dim) -> Tensor -// aten::row_indices(Tensor(a) self) -> Tensor(a) +// aten::logcumsumexp.dimname(Tensor self, Dimname dim) -> Tensor -// aten::unbind.int(Tensor(a -> *) self, int dim=0) -> Tensor(a)[] +// aten::logsumexp(Tensor self, int[1] dim, bool keepdim=False) -> Tensor -// aten::unbind.Dimname(Tensor(a -> *) self, Dimname dim) -> Tensor(a)[] +// aten::logsumexp.names(Tensor self, Dimname[1] dim, bool keepdim=False) -> Tensor -// aten::to_sparse.sparse_dim(Tensor self, int sparse_dim) -> Tensor +// aten::matmul(Tensor self, Tensor other) -> Tensor -// aten::to_sparse(Tensor self, *, Layout? layout=None, int[2]? blocksize=None, int? dense_dim=None) -> Tensor +// aten::matrix_power(Tensor self, int n) -> Tensor -// aten::to_sparse_csr(Tensor self, int? dense_dim=None) -> Tensor +// aten::matrix_exp(Tensor self) -> Tensor -// aten::to_sparse_csc(Tensor self, int? dense_dim=None) -> Tensor +// aten::aminmax(Tensor self, *, int? dim=None, bool keepdim=False) -> (Tensor min, Tensor max) -// aten::to_sparse_bsr(Tensor self, int[2] blocksize, int? dense_dim=None) -> Tensor +// aten::max.dim(Tensor self, int dim, bool keepdim=False) -> (Tensor values, Tensor indices) -// aten::to_sparse_bsc(Tensor self, int[2] blocksize, int? dense_dim=None) -> Tensor +// aten::max.names_dim(Tensor self, Dimname dim, bool keepdim=False) -> (Tensor values, Tensor indices) -// aten::to_mkldnn(Tensor self, ScalarType? dtype=None) -> Tensor +// aten::amax(Tensor self, int[1] dim=[], bool keepdim=False) -> Tensor -// aten::dequantize.self(Tensor self) -> Tensor +// aten::mean(Tensor self, *, ScalarType? dtype=None) -> Tensor -// aten::q_scale(Tensor self) -> float +// aten::mean.dim(Tensor self, int[1]? dim, bool keepdim=False, *, ScalarType? dtype=None) -> Tensor -// aten::q_zero_point(Tensor self) -> int +// aten::mean.names_dim(Tensor self, Dimname[1] dim, bool keepdim=False, *, ScalarType? dtype=None) -> Tensor -// aten::q_per_channel_scales(Tensor self) -> Tensor +// aten::nanmean(Tensor self, int[1]? dim=None, bool keepdim=False, *, ScalarType? dtype=None) -> Tensor -// aten::q_per_channel_zero_points(Tensor self) -> Tensor +// aten::median(Tensor self) -> Tensor -// aten::q_per_channel_axis(Tensor self) -> int +// aten::median.dim(Tensor self, int dim, bool keepdim=False) -> (Tensor values, Tensor indices) -// aten::int_repr(Tensor self) -> Tensor +// aten::median.names_dim(Tensor self, Dimname dim, bool keepdim=False) -> (Tensor values, Tensor indices) -// aten::qscheme(Tensor self) -> QScheme +// aten::nanmedian(Tensor self) -> Tensor -// aten::_autocast_to_reduced_precision(Tensor(a) self, bool cuda_enabled, bool cpu_enabled, ScalarType cuda_dtype, ScalarType cpu_dtype) -> Tensor(a) +// aten::nanmedian.dim(Tensor self, int dim, bool keepdim=False) -> (Tensor values, Tensor indices) -// aten::_autocast_to_full_precision(Tensor(a) self, bool cuda_enabled, bool cpu_enabled) -> Tensor(a) +// aten::nanmedian.names_dim(Tensor self, Dimname dim, bool keepdim=False) -> (Tensor values, Tensor indices) -// aten::to.dtype_layout(Tensor(a) self, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, bool non_blocking=False, bool copy=False, MemoryFormat? memory_format=None) -> Tensor(a) +// aten::min.dim(Tensor self, int dim, bool keepdim=False) -> (Tensor values, Tensor indices) -// aten::to.dtype_layout(Tensor(a) self, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, bool non_blocking=False, bool copy=False, MemoryFormat? memory_format=None) -> Tensor(a) +// aten::min.names_dim(Tensor self, Dimname dim, bool keepdim=False) -> (Tensor values, Tensor indices) -// aten::to.device(Tensor(a) self, Device device, ScalarType dtype, bool non_blocking=False, bool copy=False, MemoryFormat? memory_format=None) -> Tensor(a) +// aten::amin(Tensor self, int[1] dim=[], bool keepdim=False) -> Tensor -// aten::to.dtype(Tensor(a) self, ScalarType dtype, bool non_blocking=False, bool copy=False, MemoryFormat? memory_format=None) -> Tensor(a) +// aten::mm(Tensor self, Tensor mat2) -> Tensor -// aten::to.other(Tensor(a) self, Tensor other, bool non_blocking=False, bool copy=False, MemoryFormat? memory_format=None) -> Tensor(a) +// aten::mode(Tensor self, int dim=-1, bool keepdim=False) -> (Tensor values, Tensor indices) -// aten::item(Tensor self) -> Scalar +// aten::mode.dimname(Tensor self, Dimname dim, bool keepdim=False) -> (Tensor values, Tensor indices) -// aten::set_.source_Storage(Tensor(a!) self, Storage source) -> Tensor(a!) +// aten::mul.Tensor(Tensor self, Tensor other) -> Tensor -// aten::set_.source_Storage_storage_offset(Tensor(a!) self, Storage source, SymInt storage_offset, SymInt[] size, SymInt[] stride=[]) -> Tensor(a!) +// aten::mul_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!) -// aten::set_.source_Storage_storage_offset(Tensor(a!) self, Storage source, SymInt storage_offset, SymInt[] size, SymInt[] stride=[]) -> Tensor(a!) +// aten::mul.Scalar(Tensor self, Scalar other) -> Tensor -// aten::set_.source_Tensor_storage_offset(Tensor(a!) self, Tensor source, SymInt storage_offset, SymInt[] size, SymInt[] stride=[]) -> Tensor(a!) +// aten::mul_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!) -// aten::set_.source_Tensor_storage_offset(Tensor(a!) self, Tensor source, SymInt storage_offset, SymInt[] size, SymInt[] stride=[]) -> Tensor(a!) +// aten::multiply.Tensor(Tensor self, Tensor other) -> Tensor -// aten::set_.source_Tensor(Tensor(a!) self, Tensor source) -> Tensor(a!) +// aten::multiply_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!) -// aten::set_(Tensor(a!) self) -> Tensor(a!) +// aten::multiply.Scalar(Tensor self, Scalar other) -> Tensor -// aten::is_set_to(Tensor self, Tensor tensor) -> bool +// aten::multiply_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!) -// aten::masked_fill_.Scalar(Tensor(a!) self, Tensor mask, Scalar value) -> Tensor(a!) +// aten::mv(Tensor self, Tensor vec) -> Tensor -// aten::masked_fill.Scalar(Tensor self, Tensor mask, Scalar value) -> Tensor +// aten::mvlgamma(Tensor self, int p) -> Tensor -// aten::masked_fill_.Tensor(Tensor(a!) self, Tensor mask, Tensor value) -> Tensor(a!) +// aten::mvlgamma_(Tensor(a!) self, int p) -> Tensor(a!) -// aten::masked_fill.Tensor(Tensor self, Tensor mask, Tensor value) -> Tensor +// aten::narrow_copy(Tensor self, int dim, SymInt start, SymInt length) -> Tensor -// aten::masked_scatter_(Tensor(a!) self, Tensor mask, Tensor source) -> Tensor(a!) +// aten::narrow_copy(Tensor self, int dim, SymInt start, SymInt length) -> Tensor -// aten::masked_scatter(Tensor self, Tensor mask, Tensor source) -> Tensor +// aten::narrow(Tensor(a) self, int dim, SymInt start, SymInt length) -> Tensor(a) -// aten::view(Tensor(a) self, SymInt[] size) -> Tensor(a) +// aten::narrow(Tensor(a) self, int dim, SymInt start, SymInt length) -> Tensor(a) -// aten::view(Tensor(a) self, SymInt[] size) -> Tensor(a) +// aten::narrow.Tensor(Tensor(a) self, int dim, Tensor start, SymInt length) -> Tensor(a) -// aten::view.dtype(Tensor(a) self, ScalarType dtype) -> Tensor(a) +// aten::narrow.Tensor(Tensor(a) self, int dim, Tensor start, SymInt length) -> Tensor(a) -// aten::put_(Tensor(a!) self, Tensor index, Tensor source, bool accumulate=False) -> Tensor(a!) +// aten::permute(Tensor(a) self, int[] dims) -> Tensor(a) -// aten::put(Tensor self, Tensor index, Tensor source, bool accumulate=False) -> Tensor +// aten::movedim.intlist(Tensor(a) self, int[] source, int[] destination) -> Tensor(a) -// aten::index_add_(Tensor(a!) self, int dim, Tensor index, Tensor source, *, Scalar alpha=1) -> Tensor(a!) +// aten::movedim.int(Tensor(a) self, int source, int destination) -> Tensor(a) -// aten::index_add(Tensor self, int dim, Tensor index, Tensor source, *, Scalar alpha=1) -> Tensor +// aten::moveaxis.intlist(Tensor(a) self, int[] source, int[] destination) -> Tensor(a) -// aten::index_add.dimname(Tensor self, Dimname dim, Tensor index, Tensor source, *, Scalar alpha=1) -> Tensor +// aten::moveaxis.int(Tensor(a) self, int source, int destination) -> Tensor(a) -// aten::index_reduce_(Tensor(a!) self, int dim, Tensor index, Tensor source, str reduce, *, bool include_self=True) -> Tensor(a!) +// aten::numpy_T(Tensor(a) self) -> Tensor(a) -// aten::index_reduce(Tensor self, int dim, Tensor index, Tensor source, str reduce, *, bool include_self=True) -> Tensor +// aten::matrix_H(Tensor(a) self) -> Tensor(a) -// aten::index_fill_.int_Scalar(Tensor(a!) self, int dim, Tensor index, Scalar value) -> Tensor(a!) +// aten::mT(Tensor(a) self) -> Tensor(a) -// aten::index_fill.int_Scalar(Tensor self, int dim, Tensor index, Scalar value) -> Tensor +// aten::mH(Tensor(a) self) -> Tensor(a) -// aten::index_fill_.int_Tensor(Tensor(a!) self, int dim, Tensor index, Tensor value) -> Tensor(a!) +// aten::adjoint(Tensor(a) self) -> Tensor(a) -// aten::index_fill.int_Tensor(Tensor self, int dim, Tensor index, Tensor value) -> Tensor +// aten::is_pinned(Tensor self, Device? device=None) -> bool -// aten::index_fill_.Dimname_Scalar(Tensor(a!) self, Dimname dim, Tensor index, Scalar value) -> Tensor(a!) +// aten::pin_memory(Tensor(a) self, Device? device=None) -> Tensor(a) -// aten::index_fill_.Dimname_Tensor(Tensor(a!) self, Dimname dim, Tensor index, Tensor value) -> Tensor(a!) +// aten::pinverse(Tensor self, float rcond=1e-15) -> Tensor -// aten::index_fill.Dimname_Scalar(Tensor self, Dimname dim, Tensor index, Scalar value) -> Tensor +// aten::rad2deg(Tensor self) -> Tensor -// aten::index_fill.Dimname_Tensor(Tensor self, Dimname dim, Tensor index, Tensor value) -> Tensor +// aten::rad2deg_(Tensor(a!) self) -> Tensor(a!) -// aten::scatter.src(Tensor self, int dim, Tensor index, Tensor src) -> Tensor +// aten::deg2rad(Tensor self) -> Tensor -// aten::scatter_.src(Tensor(a!) self, int dim, Tensor index, Tensor src) -> Tensor(a!) +// aten::deg2rad_(Tensor(a!) self) -> Tensor(a!) -// aten::scatter.value(Tensor self, int dim, Tensor index, Scalar value) -> Tensor +// aten::ravel(Tensor(a) self) -> Tensor(a) -// aten::scatter_.value(Tensor(a!) self, int dim, Tensor index, Scalar value) -> Tensor(a!) +// aten::reciprocal(Tensor self) -> Tensor -// aten::scatter.reduce(Tensor self, int dim, Tensor index, Tensor src, *, str reduce) -> Tensor +// aten::reciprocal_(Tensor(a!) self) -> Tensor(a!) -// aten::scatter_.reduce(Tensor(a!) self, int dim, Tensor index, Tensor src, *, str reduce) -> Tensor(a!) +// aten::neg(Tensor self) -> Tensor -// aten::scatter.value_reduce(Tensor self, int dim, Tensor index, Scalar value, *, str reduce) -> Tensor +// aten::neg_(Tensor(a!) self) -> Tensor(a!) -// aten::scatter_.value_reduce(Tensor(a!) self, int dim, Tensor index, Scalar value, *, str reduce) -> Tensor(a!) +// aten::negative(Tensor self) -> Tensor -// aten::scatter.dimname_src(Tensor self, Dimname dim, Tensor index, Tensor src) -> Tensor +// aten::negative_(Tensor(a!) self) -> Tensor(a!) -// aten::scatter.dimname_value(Tensor self, Dimname dim, Tensor index, Scalar value) -> Tensor +// aten::repeat(Tensor self, SymInt[] repeats) -> Tensor -// aten::scatter_add(Tensor self, int dim, Tensor index, Tensor src) -> Tensor +// aten::repeat(Tensor self, SymInt[] repeats) -> Tensor -// aten::scatter_add_(Tensor(a!) self, int dim, Tensor index, Tensor src) -> Tensor(a!) +// aten::repeat_interleave.self_Tensor(Tensor self, Tensor repeats, int? dim=None, *, int? output_size=None) -> Tensor -// aten::scatter_add.dimname(Tensor self, Dimname dim, Tensor index, Tensor src) -> Tensor +// aten::repeat_interleave.self_int(Tensor self, SymInt repeats, int? dim=None, *, int? output_size=None) -> Tensor -// aten::scatter_reduce.two(Tensor self, int dim, Tensor index, Tensor src, str reduce, *, bool include_self=True) -> Tensor +// aten::repeat_interleave.self_int(Tensor self, SymInt repeats, int? dim=None, *, int? output_size=None) -> Tensor -// aten::scatter_reduce_.two(Tensor(a!) self, int dim, Tensor index, Tensor src, str reduce, *, bool include_self=True) -> Tensor(a!) +// aten::reshape(Tensor(a) self, SymInt[] shape) -> Tensor(a) -// aten::eq_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!) +// aten::reshape(Tensor(a) self, SymInt[] shape) -> Tensor(a) -// aten::eq_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!) +// aten::_reshape_alias(Tensor(a) self, SymInt[] size, SymInt[] stride) -> Tensor(a) -// aten::bitwise_and.Scalar(Tensor self, Scalar other) -> Tensor +// aten::_reshape_alias(Tensor(a) self, SymInt[] size, SymInt[] stride) -> Tensor(a) -// aten::bitwise_and.Tensor(Tensor self, Tensor other) -> Tensor +// aten::reshape_as(Tensor(a) self, Tensor other) -> Tensor(a) -// aten::bitwise_and_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!) +// aten::round(Tensor self) -> Tensor -// aten::bitwise_and_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!) +// aten::round_(Tensor(a!) self) -> Tensor(a!) -// aten::__and__.Scalar(Tensor self, Scalar other) -> Tensor +// aten::round.decimals(Tensor self, *, int decimals) -> Tensor -// aten::__and__.Tensor(Tensor self, Tensor other) -> Tensor +// aten::round_.decimals(Tensor(a!) self, *, int decimals) -> Tensor(a!) -// aten::__iand__.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!) +// aten::relu(Tensor self) -> Tensor -// aten::__iand__.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!) +// aten::relu_(Tensor(a!) self) -> Tensor(a!) -// aten::bitwise_or.Scalar(Tensor self, Scalar other) -> Tensor +// aten::prelu(Tensor self, Tensor weight) -> Tensor -// aten::bitwise_or.Tensor(Tensor self, Tensor other) -> Tensor +// aten::hardshrink(Tensor self, Scalar lambd=0.5) -> Tensor -// aten::bitwise_or_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!) +// aten::hardshrink_backward(Tensor grad_out, Tensor self, Scalar lambd) -> Tensor -// aten::bitwise_or_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!) +// aten::rsqrt(Tensor self) -> Tensor -// aten::__or__.Scalar(Tensor self, Scalar other) -> Tensor +// aten::rsqrt_(Tensor(a!) self) -> Tensor(a!) -// aten::__or__.Tensor(Tensor self, Tensor other) -> Tensor +// aten::select.Dimname(Tensor(a) self, Dimname dim, int index) -> Tensor(a) -// aten::__ior__.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!) +// aten::select.int(Tensor(a) self, int dim, SymInt index) -> Tensor(a) -// aten::__ior__.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!) +// aten::select.int(Tensor(a) self, int dim, SymInt index) -> Tensor(a) -// aten::bitwise_xor.Scalar(Tensor self, Scalar other) -> Tensor +// aten::sigmoid(Tensor self) -> Tensor -// aten::bitwise_xor.Tensor(Tensor self, Tensor other) -> Tensor +// aten::sigmoid_(Tensor(a!) self) -> Tensor(a!) -// aten::bitwise_xor_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!) +// aten::logit(Tensor self, float? eps=None) -> Tensor -// aten::bitwise_xor_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!) +// aten::logit_(Tensor(a!) self, float? eps=None) -> Tensor(a!) -// aten::__xor__.Scalar(Tensor self, Scalar other) -> Tensor +// aten::sin(Tensor self) -> Tensor -// aten::__xor__.Tensor(Tensor self, Tensor other) -> Tensor +// aten::sin_(Tensor(a!) self) -> Tensor(a!) -// aten::__ixor__.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!) +// aten::sinc(Tensor self) -> Tensor -// aten::__ixor__.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!) +// aten::sinc_(Tensor(a!) self) -> Tensor(a!) -// aten::__lshift__.Scalar(Tensor self, Scalar other) -> Tensor +// aten::sinh(Tensor self) -> Tensor -// aten::__lshift__.Tensor(Tensor self, Tensor other) -> Tensor +// aten::sinh_(Tensor(a!) self) -> Tensor(a!) -// aten::__ilshift__.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!) +// aten::detach(Tensor(a) self) -> Tensor(a) -// aten::__ilshift__.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!) +// aten::detach_(Tensor(a!) self) -> Tensor(a!) -// aten::bitwise_left_shift.Tensor(Tensor self, Tensor other) -> Tensor +// aten::size.Dimname(Tensor self, Dimname dim) -> int -// aten::bitwise_left_shift_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!) +// aten::slice.Tensor(Tensor(a) self, int dim=0, SymInt? start=None, SymInt? end=None, SymInt step=1) -> Tensor(a) -// aten::bitwise_left_shift.Tensor_Scalar(Tensor self, Scalar other) -> Tensor +// aten::slice.Tensor(Tensor(a) self, int dim=0, SymInt? start=None, SymInt? end=None, SymInt step=1) -> Tensor(a) -// aten::bitwise_left_shift_.Tensor_Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!) +// aten::slice_scatter(Tensor self, Tensor src, int dim=0, SymInt? start=None, SymInt? end=None, SymInt step=1) -> Tensor -// aten::__rshift__.Scalar(Tensor self, Scalar other) -> Tensor +// aten::slice_scatter(Tensor self, Tensor src, int dim=0, SymInt? start=None, SymInt? end=None, SymInt step=1) -> Tensor -// aten::__rshift__.Tensor(Tensor self, Tensor other) -> Tensor +// aten::select_scatter(Tensor self, Tensor src, int dim, SymInt index) -> Tensor -// aten::__irshift__.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!) +// aten::select_scatter(Tensor self, Tensor src, int dim, SymInt index) -> Tensor -// aten::__irshift__.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!) +// aten::diagonal_scatter(Tensor self, Tensor src, int offset=0, int dim1=0, int dim2=1) -> Tensor -// aten::bitwise_right_shift.Tensor(Tensor self, Tensor other) -> Tensor +// aten::as_strided_scatter(Tensor self, Tensor src, SymInt[] size, SymInt[] stride, SymInt? storage_offset=None) -> Tensor -// aten::bitwise_right_shift_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!) +// aten::as_strided_scatter(Tensor self, Tensor src, SymInt[] size, SymInt[] stride, SymInt? storage_offset=None) -> Tensor -// aten::bitwise_right_shift.Tensor_Scalar(Tensor self, Scalar other) -> Tensor +// aten::smm(Tensor self, Tensor mat2) -> Tensor -// aten::bitwise_right_shift_.Tensor_Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!) +// aten::softmax.int(Tensor self, int dim, ScalarType? dtype=None) -> Tensor -// aten::tril_(Tensor(a!) self, int diagonal=0) -> Tensor(a!) +// aten::softmax.Dimname(Tensor self, Dimname dim, *, ScalarType? dtype=None) -> Tensor -// aten::triu_(Tensor(a!) self, int diagonal=0) -> Tensor(a!) +// aten::unsafe_split.Tensor(Tensor self, SymInt split_size, int dim=0) -> Tensor[] -// aten::digamma_(Tensor(a!) self) -> Tensor(a!) +// aten::unsafe_split.Tensor(Tensor self, SymInt split_size, int dim=0) -> Tensor[] -// aten::lerp_.Scalar(Tensor(a!) self, Tensor end, Scalar weight) -> Tensor(a!) +// aten::split.Tensor(Tensor(a -> *) self, SymInt split_size, int dim=0) -> Tensor(a)[] -// aten::lerp_.Tensor(Tensor(a!) self, Tensor end, Tensor weight) -> Tensor(a!) +// aten::split.Tensor(Tensor(a -> *) self, SymInt split_size, int dim=0) -> Tensor(a)[] -// aten::addbmm_(Tensor(a!) self, Tensor batch1, Tensor batch2, *, Scalar beta=1, Scalar alpha=1) -> Tensor(a!) +// aten::split.sizes(Tensor(a -> *) self, SymInt[] split_size, int dim=0) -> Tensor(a)[] -// aten::addbmm(Tensor self, Tensor batch1, Tensor batch2, *, Scalar beta=1, Scalar alpha=1) -> Tensor +// aten::split.sizes(Tensor(a -> *) self, SymInt[] split_size, int dim=0) -> Tensor(a)[] -// aten::random_.from(Tensor(a!) self, int from, int? to, *, Generator? generator=None) -> Tensor(a!) +// aten::unsafe_split_with_sizes(Tensor self, SymInt[] split_sizes, int dim=0) -> Tensor[] -// aten::random_.to(Tensor(a!) self, int to, *, Generator? generator=None) -> Tensor(a!) +// aten::unsafe_split_with_sizes(Tensor self, SymInt[] split_sizes, int dim=0) -> Tensor[] -// aten::random_(Tensor(a!) self, *, Generator? generator=None) -> Tensor(a!) +// aten::split_with_sizes(Tensor(a -> *) self, SymInt[] split_sizes, int dim=0) -> Tensor(a)[] -// aten::uniform_(Tensor(a!) self, float from=0, float to=1, *, Generator? generator=None) -> Tensor(a!) +// aten::split_with_sizes(Tensor(a -> *) self, SymInt[] split_sizes, int dim=0) -> Tensor(a)[] -// aten::cauchy_(Tensor(a!) self, float median=0, float sigma=1, *, Generator? generator=None) -> Tensor(a!) +// aten::hsplit.int(Tensor(a -> *) self, int sections) -> Tensor(a)[] -// aten::log_normal_(Tensor(a!) self, float mean=1, float std=2, *, Generator? generator=None) -> Tensor(a!) +// aten::hsplit.array(Tensor(a -> *) self, int[] indices) -> Tensor(a)[] -// aten::exponential_(Tensor(a!) self, float lambd=1, *, Generator? generator=None) -> Tensor(a!) +// aten::vsplit.int(Tensor(a -> *) self, int sections) -> Tensor(a)[] -// aten::geometric_(Tensor(a!) self, float p, *, Generator? generator=None) -> Tensor(a!) +// aten::vsplit.array(Tensor(a -> *) self, int[] indices) -> Tensor(a)[] -// aten::diag(Tensor self, int diagonal=0) -> Tensor +// aten::dsplit.int(Tensor(a -> *) self, int sections) -> Tensor(a)[] -// aten::cross(Tensor self, Tensor other, int? dim=None) -> Tensor +// aten::dsplit.array(Tensor(a -> *) self, int[] indices) -> Tensor(a)[] -// aten::triu(Tensor self, int diagonal=0) -> Tensor +// aten::squeeze(Tensor(a) self) -> Tensor(a) -// aten::tril(Tensor self, int diagonal=0) -> Tensor +// aten::squeeze.dim(Tensor(a) self, int dim) -> Tensor(a) -// aten::trace(Tensor self) -> Tensor +// aten::squeeze.dimname(Tensor(a) self, Dimname dim) -> Tensor(a) -// aten::ne.Scalar(Tensor self, Scalar other) -> Tensor +// aten::squeeze.dims(Tensor(a) self, int[] dim) -> Tensor(a) -// aten::ne.Tensor(Tensor self, Tensor other) -> Tensor +// aten::squeeze_(Tensor(a!) self) -> Tensor(a!) -// aten::ne_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!) +// aten::squeeze_.dim(Tensor(a!) self, int dim) -> Tensor(a!) -// aten::ne_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!) +// aten::squeeze_.dims(Tensor(a!) self, int[] dim) -> Tensor(a!) -// aten::not_equal.Scalar(Tensor self, Scalar other) -> Tensor +// aten::squeeze_.dimname(Tensor(a!) self, Dimname dim) -> Tensor(a!) -// aten::not_equal.Tensor(Tensor self, Tensor other) -> Tensor +// aten::sspaddmm(Tensor self, Tensor mat1, Tensor mat2, *, Scalar beta=1, Scalar alpha=1) -> Tensor -// aten::not_equal_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!) +// aten::stft(Tensor self, int n_fft, int? hop_length=None, int? win_length=None, Tensor? window=None, bool normalized=False, bool? onesided=None, bool? return_complex=None) -> Tensor -// aten::not_equal_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!) +// aten::stft.center(Tensor self, int n_fft, int? hop_length=None, int? win_length=None, Tensor? window=None, bool center=True, str pad_mode="reflect", bool normalized=False, bool? onesided=None, bool? return_complex=None) -> Tensor -// aten::eq.Scalar(Tensor self, Scalar other) -> Tensor +// aten::istft(Tensor self, int n_fft, int? hop_length=None, int? win_length=None, Tensor? window=None, bool center=True, bool normalized=False, bool? onesided=None, int? length=None, bool return_complex=False) -> Tensor -// aten::eq.Tensor(Tensor self, Tensor other) -> Tensor +// aten::stride.Dimname(Tensor self, Dimname dim) -> int -// aten::ge.Scalar(Tensor self, Scalar other) -> Tensor +// aten::sum(Tensor self, *, ScalarType? dtype=None) -> Tensor -// aten::ge.Tensor(Tensor self, Tensor other) -> Tensor +// aten::sum.dim_IntList(Tensor self, int[1]? dim, bool keepdim=False, *, ScalarType? dtype=None) -> Tensor -// aten::ge_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!) +// aten::sum.dim_DimnameList(Tensor self, Dimname[1] dim, bool keepdim=False, *, ScalarType? dtype=None) -> Tensor -// aten::ge_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!) +// aten::nansum(Tensor self, int[1]? dim=None, bool keepdim=False, *, ScalarType? dtype=None) -> Tensor -// aten::greater_equal.Scalar(Tensor self, Scalar other) -> Tensor +// aten::sum_to_size(Tensor self, int[] size) -> Tensor -// aten::greater_equal.Tensor(Tensor self, Tensor other) -> Tensor +// aten::sqrt(Tensor self) -> Tensor -// aten::greater_equal_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!) +// aten::sqrt_(Tensor(a!) self) -> Tensor(a!) -// aten::greater_equal_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!) +// aten::square(Tensor self) -> Tensor -// aten::le.Scalar(Tensor self, Scalar other) -> Tensor +// aten::square_(Tensor(a!) self) -> Tensor(a!) -// aten::le.Tensor(Tensor self, Tensor other) -> Tensor +// aten::std(Tensor self, bool unbiased=True) -> Tensor -// aten::le_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!) +// aten::std.dim(Tensor self, int[1]? dim, bool unbiased=True, bool keepdim=False) -> Tensor -// aten::le_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!) +// aten::std.correction(Tensor self, int[1]? dim=None, *, int? correction=None, bool keepdim=False) -> Tensor -// aten::less_equal.Scalar(Tensor self, Scalar other) -> Tensor +// aten::std.names_dim(Tensor self, Dimname[1] dim, bool unbiased=True, bool keepdim=False) -> Tensor -// aten::less_equal.Tensor(Tensor self, Tensor other) -> Tensor +// aten::std.correction_names(Tensor self, Dimname[1] dim, *, int? correction=None, bool keepdim=False) -> Tensor -// aten::less_equal_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!) +// aten::prod(Tensor self, *, ScalarType? dtype=None) -> Tensor -// aten::less_equal_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!) +// aten::prod.dim_int(Tensor self, int dim, bool keepdim=False, *, ScalarType? dtype=None) -> Tensor -// aten::gt.Scalar(Tensor self, Scalar other) -> Tensor +// aten::prod.dim_Dimname(Tensor self, Dimname dim, bool keepdim=False, *, ScalarType? dtype=None) -> Tensor -// aten::gt.Tensor(Tensor self, Tensor other) -> Tensor +// aten::t(Tensor(a) self) -> Tensor(a) -// aten::gt_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!) +// aten::t_(Tensor(a!) self) -> Tensor(a!) -// aten::gt_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!) +// aten::tan(Tensor self) -> Tensor -// aten::greater.Scalar(Tensor self, Scalar other) -> Tensor +// aten::tan_(Tensor(a!) self) -> Tensor(a!) -// aten::greater.Tensor(Tensor self, Tensor other) -> Tensor +// aten::tanh(Tensor self) -> Tensor -// aten::greater_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!) +// aten::tanh_(Tensor(a!) self) -> Tensor(a!) -// aten::greater_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!) +// aten::tile(Tensor self, int[] dims) -> Tensor -// aten::lt.Scalar(Tensor self, Scalar other) -> Tensor +// aten::transpose.int(Tensor(a) self, int dim0, int dim1) -> Tensor(a) -// aten::lt.Tensor(Tensor self, Tensor other) -> Tensor +// aten::transpose.Dimname(Tensor(a) self, Dimname dim0, Dimname dim1) -> Tensor(a) -// aten::lt_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!) +// aten::transpose_(Tensor(a!) self, int dim0, int dim1) -> Tensor(a!) -// aten::lt_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!) +// aten::flip(Tensor self, int[] dims) -> Tensor -// aten::less.Scalar(Tensor self, Scalar other) -> Tensor +// aten::fliplr(Tensor self) -> Tensor -// aten::less.Tensor(Tensor self, Tensor other) -> Tensor +// aten::flipud(Tensor self) -> Tensor -// aten::less_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!) +// aten::roll(Tensor self, int[1] shifts, int[1] dims=[]) -> Tensor -// aten::less_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!) +// aten::rot90(Tensor self, int k=1, int[] dims=[0,1]) -> Tensor -// aten::take(Tensor self, Tensor index) -> Tensor +// aten::_nested_tensor_size(Tensor self) -> Tensor -// aten::take_along_dim(Tensor self, Tensor indices, int? dim=None) -> Tensor +// aten::_nested_tensor_strides(Tensor self) -> Tensor -// aten::index_select(Tensor self, int dim, Tensor index) -> Tensor +// aten::_nested_tensor_offsets(Tensor self) -> int[] -// aten::index_select.dimname(Tensor self, Dimname dim, Tensor index) -> Tensor +// aten::trunc(Tensor self) -> Tensor -// aten::masked_select(Tensor self, Tensor mask) -> Tensor +// aten::trunc_(Tensor(a!) self) -> Tensor(a!) -// aten::nonzero(Tensor self) -> Tensor +// aten::fix(Tensor self) -> Tensor -// aten::nonzero_numpy(Tensor self) -> Tensor[] +// aten::fix_(Tensor(a!) self) -> Tensor(a!) -// aten::argwhere(Tensor self) -> Tensor +// aten::type_as(Tensor self, Tensor other) -> Tensor -// aten::gather(Tensor self, int dim, Tensor index, *, bool sparse_grad=False) -> Tensor +// aten::unsqueeze(Tensor(a) self, int dim) -> Tensor(a) -// aten::gather.dimname(Tensor self, Dimname dim, Tensor index, *, bool sparse_grad=False) -> Tensor +// aten::unsqueeze_(Tensor(a!) self, int dim) -> Tensor(a!) -// aten::addcmul(Tensor self, Tensor tensor1, Tensor tensor2, *, Scalar value=1) -> Tensor +// aten::var(Tensor self, bool unbiased=True) -> Tensor -// aten::addcmul_(Tensor(a!) self, Tensor tensor1, Tensor tensor2, *, Scalar value=1) -> Tensor(a!) +// aten::var.dim(Tensor self, int[1]? dim, bool unbiased=True, bool keepdim=False) -> Tensor -// aten::addcdiv(Tensor self, Tensor tensor1, Tensor tensor2, *, Scalar value=1) -> Tensor +// aten::var.correction(Tensor self, int[1]? dim=None, *, int? correction=None, bool keepdim=False) -> Tensor -// aten::addcdiv_(Tensor(a!) self, Tensor tensor1, Tensor tensor2, *, Scalar value=1) -> Tensor(a!) +// aten::var.names_dim(Tensor self, Dimname[1] dim, bool unbiased=True, bool keepdim=False) -> Tensor -// aten::triangular_solve(Tensor self, Tensor A, bool upper=True, bool transpose=False, bool unitriangular=False) -> (Tensor solution, Tensor cloned_coefficient) +// aten::var.correction_names(Tensor self, Dimname[1] dim, *, int? correction=None, bool keepdim=False) -> Tensor -// aten::svd(Tensor self, bool some=True, bool compute_uv=True) -> (Tensor U, Tensor S, Tensor V) +// aten::view_as(Tensor(a) self, Tensor other) -> Tensor(a) -// aten::swapaxes(Tensor(a) self, int axis0, int axis1) -> Tensor(a) +// aten::where.self(Tensor condition, Tensor self, Tensor other) -> Tensor -// aten::swapaxes_(Tensor(a!) self, int axis0, int axis1) -> Tensor(a!) +// aten::where.ScalarOther(Tensor condition, Tensor self, Scalar other) -> Tensor -// aten::swapdims(Tensor(a) self, int dim0, int dim1) -> Tensor(a) +// aten::norm.ScalarOpt_dtype(Tensor self, Scalar? p, *, ScalarType dtype) -> Tensor -// aten::swapdims_(Tensor(a!) self, int dim0, int dim1) -> Tensor(a!) +// aten::norm.Scalar(Tensor self, Scalar p=2) -> Tensor -// aten::cholesky(Tensor self, bool upper=False) -> Tensor +// aten::norm.ScalarOpt_dim_dtype(Tensor self, Scalar? p, int[1] dim, bool keepdim, *, ScalarType dtype) -> Tensor -// aten::cholesky_solve(Tensor self, Tensor input2, bool upper=False) -> Tensor +// aten::norm.ScalarOpt_dim(Tensor self, Scalar? p, int[1] dim, bool keepdim=False) -> Tensor -// aten::cholesky_inverse(Tensor self, bool upper=False) -> Tensor +// aten::norm.names_ScalarOpt_dim_dtype(Tensor self, Scalar? p, Dimname[1] dim, bool keepdim, *, ScalarType dtype) -> Tensor -// aten::qr(Tensor self, bool some=True) -> (Tensor Q, Tensor R) +// aten::norm.names_ScalarOpt_dim(Tensor self, Scalar? p, Dimname[1] dim, bool keepdim=False) -> Tensor -// aten::geqrf(Tensor self) -> (Tensor a, Tensor tau) +// aten::frexp.Tensor(Tensor self) -> (Tensor mantissa, Tensor exponent) -// aten::orgqr(Tensor self, Tensor input2) -> Tensor +// aten::clone(Tensor self, *, MemoryFormat? memory_format=None) -> Tensor -// aten::ormqr(Tensor self, Tensor input2, Tensor input3, bool left=True, bool transpose=False) -> Tensor +// aten::positive(Tensor(a) self) -> Tensor(a) -// aten::lu_solve(Tensor self, Tensor LU_data, Tensor LU_pivots) -> Tensor +// aten::resize_as_(Tensor(a!) self, Tensor the_template, *, MemoryFormat? memory_format=None) -> Tensor(a!) -// aten::multinomial(Tensor self, int num_samples, bool replacement=False, *, Generator? generator=None) -> Tensor +// aten::resize_as_sparse_(Tensor(a!) self, Tensor the_template) -> Tensor(a!) -// aten::lgamma_(Tensor(a!) self) -> Tensor(a!) +// aten::zero_(Tensor(a!) self) -> Tensor(a!) -// aten::lgamma(Tensor self) -> Tensor +// aten::sub.Tensor(Tensor self, Tensor other, *, Scalar alpha=1) -> Tensor -// aten::digamma(Tensor self) -> Tensor +// aten::sub_.Tensor(Tensor(a!) self, Tensor other, *, Scalar alpha=1) -> Tensor(a!) -// aten::polygamma(int n, Tensor self) -> Tensor +// aten::sub.Scalar(Tensor self, Scalar other, Scalar alpha=1) -> Tensor -// aten::polygamma_(Tensor(a!) self, int n) -> Tensor(a!) +// aten::sub_.Scalar(Tensor(a!) self, Scalar other, Scalar alpha=1) -> Tensor(a!) -// aten::erfinv(Tensor self) -> Tensor +// aten::subtract.Tensor(Tensor self, Tensor other, *, Scalar alpha=1) -> Tensor -// aten::erfinv_(Tensor(a!) self) -> Tensor(a!) +// aten::subtract_.Tensor(Tensor(a!) self, Tensor other, *, Scalar alpha=1) -> Tensor(a!) -// aten::i0(Tensor self) -> Tensor +// aten::subtract.Scalar(Tensor self, Scalar other, Scalar alpha=1) -> Tensor -// aten::i0_(Tensor(a!) self) -> Tensor(a!) +// aten::subtract_.Scalar(Tensor(a!) self, Scalar other, Scalar alpha=1) -> Tensor(a!) -// aten::sign(Tensor self) -> Tensor +// aten::heaviside(Tensor self, Tensor values) -> Tensor -// aten::sign_(Tensor(a!) self) -> Tensor(a!) +// aten::heaviside_(Tensor(a!) self, Tensor values) -> Tensor(a!) -// aten::signbit(Tensor self) -> Tensor +// aten::addmm(Tensor self, Tensor mat1, Tensor mat2, *, Scalar beta=1, Scalar alpha=1) -> Tensor -// aten::dist(Tensor self, Tensor other, Scalar p=2) -> Tensor +// aten::addmm_(Tensor(a!) self, Tensor mat1, Tensor mat2, *, Scalar beta=1, Scalar alpha=1) -> Tensor(a!) -// aten::atan2_(Tensor(a!) self, Tensor other) -> Tensor(a!) +// aten::_addmm_activation(Tensor self, Tensor mat1, Tensor mat2, *, Scalar beta=1, Scalar alpha=1, bool use_gelu=False) -> Tensor -// aten::atan2(Tensor self, Tensor other) -> Tensor +// aten::sparse_resize_(Tensor(a!) self, int[] size, int sparse_dim, int dense_dim) -> Tensor(a!) -// aten::arctan2(Tensor self, Tensor other) -> Tensor +// aten::sparse_resize_and_clear_(Tensor(a!) self, int[] size, int sparse_dim, int dense_dim) -> Tensor(a!) -// aten::arctan2_(Tensor(a!) self, Tensor other) -> Tensor(a!) +// aten::sparse_mask(Tensor self, Tensor mask) -> Tensor -// aten::lerp.Scalar(Tensor self, Tensor end, Scalar weight) -> Tensor +// aten::to_dense(Tensor self, ScalarType? dtype=None) -> Tensor -// aten::lerp.Tensor(Tensor self, Tensor end, Tensor weight) -> Tensor +// aten::_to_dense(Tensor self, ScalarType? dtype=None) -> Tensor -// aten::histc(Tensor self, int bins=100, Scalar min=0, Scalar max=0) -> Tensor +// aten::sparse_dim(Tensor self) -> int -// aten::histogram.bins_tensor(Tensor self, Tensor bins, *, Tensor? weight=None, bool density=False) -> (Tensor hist, Tensor bin_edges) +// aten::_dimI(Tensor self) -> int -// aten::histogram.bin_ct(Tensor self, int bins=100, *, float[]? range=None, Tensor? weight=None, bool density=False) -> (Tensor hist, Tensor bin_edges) +// aten::dense_dim(Tensor self) -> int -// aten::fmod.Scalar(Tensor self, Scalar other) -> Tensor +// aten::_dimV(Tensor self) -> int -// aten::fmod_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!) +// aten::_nnz(Tensor self) -> int -// aten::fmod.Tensor(Tensor self, Tensor other) -> Tensor +// aten::coalesce(Tensor(a) self) -> Tensor(a) -// aten::fmod_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!) +// aten::is_coalesced(Tensor self) -> bool -// aten::hypot(Tensor self, Tensor other) -> Tensor +// aten::_indices(Tensor(a) self) -> Tensor(a) -// aten::hypot_(Tensor(a!) self, Tensor other) -> Tensor(a!) +// aten::_values(Tensor(a) self) -> Tensor(a) -// aten::igamma(Tensor self, Tensor other) -> Tensor +// aten::_coalesced_(Tensor(a!) self, bool coalesced) -> Tensor(a!) -// aten::igamma_(Tensor(a!) self, Tensor other) -> Tensor(a!) +// aten::indices(Tensor(a) self) -> Tensor(a) -// aten::igammac(Tensor self, Tensor other) -> Tensor +// aten::values(Tensor(a) self) -> Tensor(a) -// aten::igammac_(Tensor(a!) self, Tensor other) -> Tensor(a!) +// aten::crow_indices(Tensor(a) self) -> Tensor(a) -// aten::nextafter(Tensor self, Tensor other) -> Tensor +// aten::col_indices(Tensor(a) self) -> Tensor(a) -// aten::nextafter_(Tensor(a!) self, Tensor other) -> Tensor(a!) +// aten::ccol_indices(Tensor(a) self) -> Tensor(a) -// aten::remainder.Scalar(Tensor self, Scalar other) -> Tensor +// aten::row_indices(Tensor(a) self) -> Tensor(a) -// aten::remainder_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!) +// aten::unbind.int(Tensor(a -> *) self, int dim=0) -> Tensor(a)[] -// aten::remainder.Tensor(Tensor self, Tensor other) -> Tensor +// aten::unbind.Dimname(Tensor(a -> *) self, Dimname dim) -> Tensor(a)[] -// aten::remainder_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!) +// aten::to_sparse.sparse_dim(Tensor self, int sparse_dim) -> Tensor -// aten::min(Tensor self) -> Tensor +// aten::to_sparse(Tensor self, *, Layout? layout=None, int[2]? blocksize=None, int? dense_dim=None) -> Tensor -// aten::fmin(Tensor self, Tensor other) -> Tensor +// aten::to_sparse_csr(Tensor self, int? dense_dim=None) -> Tensor -// aten::max(Tensor self) -> Tensor +// aten::to_sparse_csc(Tensor self, int? dense_dim=None) -> Tensor -// aten::fmax(Tensor self, Tensor other) -> Tensor +// aten::to_sparse_bsr(Tensor self, int[2] blocksize, int? dense_dim=None) -> Tensor -// aten::maximum(Tensor self, Tensor other) -> Tensor +// aten::to_sparse_bsc(Tensor self, int[2] blocksize, int? dense_dim=None) -> Tensor -// aten::max.other(Tensor self, Tensor other) -> Tensor +// aten::to_mkldnn(Tensor self, ScalarType? dtype=None) -> Tensor -// aten::minimum(Tensor self, Tensor other) -> Tensor +// aten::dequantize.self(Tensor self) -> Tensor -// aten::min.other(Tensor self, Tensor other) -> Tensor +// aten::q_scale(Tensor self) -> float -// aten::quantile(Tensor self, Tensor q, int? dim=None, bool keepdim=False, *, str interpolation='linear') -> Tensor +// aten::q_zero_point(Tensor self) -> int -// aten::quantile.scalar(Tensor self, float q, int? dim=None, bool keepdim=False, *, str interpolation='linear') -> Tensor +// aten::q_per_channel_scales(Tensor self) -> Tensor -// aten::nanquantile(Tensor self, Tensor q, int? dim=None, bool keepdim=False, *, str interpolation='linear') -> Tensor +// aten::q_per_channel_zero_points(Tensor self) -> Tensor -// aten::nanquantile.scalar(Tensor self, float q, int? dim=None, bool keepdim=False, *, str interpolation='linear') -> Tensor +// aten::q_per_channel_axis(Tensor self) -> int -// aten::sort(Tensor self, int dim=-1, bool descending=False) -> (Tensor values, Tensor indices) +// aten::int_repr(Tensor self) -> Tensor -// aten::sort.stable(Tensor self, *, bool? stable, int dim=-1, bool descending=False) -> (Tensor values, Tensor indices) +// aten::qscheme(Tensor self) -> QScheme -// aten::sort.dimname(Tensor self, Dimname dim, bool descending=False) -> (Tensor values, Tensor indices) +// aten::_autocast_to_reduced_precision(Tensor(a) self, bool cuda_enabled, bool cpu_enabled, ScalarType cuda_dtype, ScalarType cpu_dtype) -> Tensor(a) -// aten::sort.dimname_stable(Tensor self, *, bool? stable, Dimname dim, bool descending=False) -> (Tensor values, Tensor indices) +// aten::_autocast_to_full_precision(Tensor(a) self, bool cuda_enabled, bool cpu_enabled) -> Tensor(a) -// aten::msort(Tensor self) -> Tensor +// aten::to.dtype_layout(Tensor(a) self, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, bool non_blocking=False, bool copy=False, MemoryFormat? memory_format=None) -> Tensor(a) -// aten::argsort(Tensor self, int dim=-1, bool descending=False) -> Tensor +// aten::to.dtype_layout(Tensor(a) self, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, bool non_blocking=False, bool copy=False, MemoryFormat? memory_format=None) -> Tensor(a) -// aten::argsort.stable(Tensor self, *, bool stable, int dim=-1, bool descending=False) -> Tensor +// aten::to.device(Tensor(a) self, Device device, ScalarType dtype, bool non_blocking=False, bool copy=False, MemoryFormat? memory_format=None) -> Tensor(a) -// aten::argsort.dimname(Tensor self, Dimname dim, bool descending=False) -> Tensor +// aten::to.dtype(Tensor(a) self, ScalarType dtype, bool non_blocking=False, bool copy=False, MemoryFormat? memory_format=None) -> Tensor(a) -// aten::topk(Tensor self, int k, int dim=-1, bool largest=True, bool sorted=True) -> (Tensor values, Tensor indices) +// aten::to.other(Tensor(a) self, Tensor other, bool non_blocking=False, bool copy=False, MemoryFormat? memory_format=None) -> Tensor(a) -// aten::all(Tensor self) -> Tensor +// aten::item(Tensor self) -> Scalar -// aten::any(Tensor self) -> Tensor +// aten::set_.source_Storage(Tensor(a!) self, Storage source) -> Tensor(a!) -// aten::renorm(Tensor self, Scalar p, int dim, Scalar maxnorm) -> Tensor +// aten::set_.source_Storage_storage_offset(Tensor(a!) self, Storage source, SymInt storage_offset, SymInt[] size, SymInt[] stride=[]) -> Tensor(a!) -// aten::renorm_(Tensor(a!) self, Scalar p, int dim, Scalar maxnorm) -> Tensor(a!) +// aten::set_.source_Storage_storage_offset(Tensor(a!) self, Storage source, SymInt storage_offset, SymInt[] size, SymInt[] stride=[]) -> Tensor(a!) -// aten::unfold(Tensor(a) self, int dimension, int size, int step) -> Tensor(a) +// aten::set_.source_Tensor_storage_offset(Tensor(a!) self, Tensor source, SymInt storage_offset, SymInt[] size, SymInt[] stride=[]) -> Tensor(a!) -// aten::equal(Tensor self, Tensor other) -> bool +// aten::set_.source_Tensor_storage_offset(Tensor(a!) self, Tensor source, SymInt storage_offset, SymInt[] size, SymInt[] stride=[]) -> Tensor(a!) -// aten::pow.Tensor_Tensor(Tensor self, Tensor exponent) -> Tensor +// aten::set_.source_Tensor(Tensor(a!) self, Tensor source) -> Tensor(a!) -// aten::pow.Tensor_Scalar(Tensor self, Scalar exponent) -> Tensor +// aten::set_(Tensor(a!) self) -> Tensor(a!) -// aten::pow_.Scalar(Tensor(a!) self, Scalar exponent) -> Tensor(a!) +// aten::is_set_to(Tensor self, Tensor tensor) -> bool -// aten::pow_.Tensor(Tensor(a!) self, Tensor exponent) -> Tensor(a!) +// aten::masked_fill_.Scalar(Tensor(a!) self, Tensor mask, Scalar value) -> Tensor(a!) -// aten::float_power.Tensor_Tensor(Tensor self, Tensor exponent) -> Tensor +// aten::masked_fill.Scalar(Tensor self, Tensor mask, Scalar value) -> Tensor -// aten::float_power.Tensor_Scalar(Tensor self, Scalar exponent) -> Tensor +// aten::masked_fill_.Tensor(Tensor(a!) self, Tensor mask, Tensor value) -> Tensor(a!) -// aten::float_power_.Scalar(Tensor(a!) self, Scalar exponent) -> Tensor(a!) +// aten::masked_fill.Tensor(Tensor self, Tensor mask, Tensor value) -> Tensor -// aten::float_power_.Tensor(Tensor(a!) self, Tensor exponent) -> Tensor(a!) +// aten::masked_scatter_(Tensor(a!) self, Tensor mask, Tensor source) -> Tensor(a!) -// aten::normal_(Tensor(a!) self, float mean=0, float std=1, *, Generator? generator=None) -> Tensor(a!) +// aten::masked_scatter(Tensor self, Tensor mask, Tensor source) -> Tensor -// aten::alias(Tensor(a) self) -> Tensor(a) +// aten::view(Tensor(a) self, SymInt[] size) -> Tensor(a) -// aten::isfinite(Tensor self) -> Tensor +// aten::view(Tensor(a) self, SymInt[] size) -> Tensor(a) -// aten::isinf(Tensor self) -> Tensor +// aten::view.dtype(Tensor(a) self, ScalarType dtype) -> Tensor(a) -// aten::record_stream(Tensor(a!) self, Stream s) -> () +// aten::put_(Tensor(a!) self, Tensor index, Tensor source, bool accumulate=False) -> Tensor(a!) -// aten::isposinf(Tensor self) -> Tensor +// aten::put(Tensor self, Tensor index, Tensor source, bool accumulate=False) -> Tensor -// aten::isneginf(Tensor self) -> Tensor +// aten::index_add_(Tensor(a!) self, int dim, Tensor index, Tensor source, *, Scalar alpha=1) -> Tensor(a!) -// aten::det(Tensor self) -> Tensor +// aten::index_add(Tensor self, int dim, Tensor index, Tensor source, *, Scalar alpha=1) -> Tensor -// aten::slogdet(Tensor self) -> (Tensor sign, Tensor logabsdet) +// aten::index_add.dimname(Tensor self, Dimname dim, Tensor index, Tensor source, *, Scalar alpha=1) -> Tensor -// aten::logdet(Tensor self) -> Tensor +// aten::index_reduce_(Tensor(a!) self, int dim, Tensor index, Tensor source, str reduce, *, bool include_self=True) -> Tensor(a!) -// aten::inverse(Tensor self) -> Tensor +// aten::index_reduce(Tensor self, int dim, Tensor index, Tensor source, str reduce, *, bool include_self=True) -> Tensor -// aten::inner(Tensor self, Tensor other) -> Tensor +// aten::index_fill_.int_Scalar(Tensor(a!) self, int dim, Tensor index, Scalar value) -> Tensor(a!) -// aten::outer(Tensor self, Tensor vec2) -> Tensor +// aten::index_fill.int_Scalar(Tensor self, int dim, Tensor index, Scalar value) -> Tensor -// aten::ger(Tensor self, Tensor vec2) -> Tensor +// aten::index_fill_.int_Tensor(Tensor(a!) self, int dim, Tensor index, Tensor value) -> Tensor(a!) -// aten::to_padded_tensor(Tensor self, float padding, SymInt[]? output_size=None) -> Tensor +// aten::index_fill.int_Tensor(Tensor self, int dim, Tensor index, Tensor value) -> Tensor -// aten::to_padded_tensor(Tensor self, float padding, SymInt[]? output_size=None) -> Tensor +// aten::index_fill_.Dimname_Scalar(Tensor(a!) self, Dimname dim, Tensor index, Scalar value) -> Tensor(a!) - // namespace at - // namespace c10 +// aten::index_fill_.Dimname_Tensor(Tensor(a!) self, Dimname dim, Tensor index, Tensor value) -> Tensor(a!) +// aten::index_fill.Dimname_Scalar(Tensor self, Dimname dim, Tensor index, Scalar value) -> Tensor - // namespace at +// aten::index_fill.Dimname_Tensor(Tensor self, Dimname dim, Tensor index, Tensor value) -> Tensor -// Parsed from ATen/core/Tensor.h -// #pragma once +// aten::scatter.src(Tensor self, int dim, Tensor index, Tensor src) -> Tensor -// #include -// #include -// Targeting ../OptionalTensorRef.java +// aten::scatter_.src(Tensor(a!) self, int dim, Tensor index, Tensor src) -> Tensor(a!) +// aten::scatter.value(Tensor self, int dim, Tensor index, Scalar value) -> Tensor +// aten::scatter_.value(Tensor(a!) self, int dim, Tensor index, Scalar value) -> Tensor(a!) - // namespace at +// aten::scatter.reduce(Tensor self, int dim, Tensor index, Tensor src, *, str reduce) -> Tensor -// Parsed from ATen/core/Formatting.h +// aten::scatter_.reduce(Tensor(a!) self, int dim, Tensor index, Tensor src, *, str reduce) -> Tensor(a!) -// #pragma once -// #include -// #include +// aten::scatter.value_reduce(Tensor self, int dim, Tensor index, Scalar value, *, str reduce) -> Tensor -// #include -// #include +// aten::scatter_.value_reduce(Tensor(a!) self, int dim, Tensor index, Scalar value, *, str reduce) -> Tensor(a!) -@Namespace("c10") public static native @StdString BytePointer toString(@Const @ByRef Scalar s); +// aten::scatter.dimname_src(Tensor self, Dimname dim, Tensor index, Tensor src) -> Tensor -@Namespace("at") public static native @Cast("std::ostream*") @ByRef @Name("operator <<") Pointer shiftLeft(@Cast("std::ostream*") @ByRef Pointer out, @Const @ByRef DeprecatedTypeProperties t); -@Namespace("at") public static native @Cast("std::ostream*") @ByRef Pointer print( - @Cast("std::ostream*") @ByRef Pointer stream, - @Const @ByRef Tensor tensor, - @Cast("int64_t") long linesize); -@Namespace("at") public static native @Cast("std::ostream*") @ByRef @Name("operator <<") Pointer shiftLeft(@Cast("std::ostream*") @ByRef Pointer out, @Const @ByRef Tensor t); -@Namespace("at") public static native void print(@Const @ByRef Tensor t, @Cast("int64_t") long linesize/*=80*/); -@Namespace("at") public static native void print(@Const @ByRef Tensor t); +// aten::scatter.dimname_value(Tensor self, Dimname dim, Tensor index, Scalar value) -> Tensor -// Parsed from ATen/core/UnsafeFromTH.h +// aten::scatter_add(Tensor self, int dim, Tensor index, Tensor src) -> Tensor -// #pragma once -// #include -@Namespace("at") public static native @ByVal Tensor unsafeTensorFromTH(Pointer th_pointer, @Cast("bool") boolean retain); +// aten::scatter_add_(Tensor(a!) self, int dim, Tensor index, Tensor src) -> Tensor(a!) -@Namespace("at") public static native @Cast({"", "c10::Storage&&"}) @StdMove Storage unsafeStorageFromTH(Pointer th_pointer, @Cast("bool") boolean retain); +// aten::scatter_add.dimname(Tensor self, Dimname dim, Tensor index, Tensor src) -> Tensor +// aten::scatter_reduce.two(Tensor self, int dim, Tensor index, Tensor src, str reduce, *, bool include_self=True) -> Tensor -// Parsed from ATen/core/Variadic.h -// #pragma once +// aten::scatter_reduce_.two(Tensor(a!) self, int dim, Tensor index, Tensor src, str reduce, *, bool include_self=True) -> Tensor(a!) -// #include -// #include -// #include -// #include -// #include -// #include +// aten::eq_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!) -// This class allows you to write variadic functions which -// call a (possibly overloaded) function on each argument, -// in order. This is most commonly used in autogenerated code, -// where it is convenient to have a function that can uniformly -// take arguments of different types. If your arguments -// are homogenous consider using a std::initializer_list instead. -// -// For examples of this in use, see torch/csrc/utils/variadic.h - // namespace torch +// aten::eq_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!) -// Parsed from ATen/core/blob.h +// aten::bitwise_and.Scalar(Tensor self, Scalar other) -> Tensor -// #pragma once -// #include -// #include -// #include -// #include -// #include +// aten::bitwise_and.Tensor(Tensor self, Tensor other) -> Tensor -// #include -// #include -// #include -// Targeting ../Blob.java +// aten::bitwise_and_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!) -@Namespace("caffe2") public static native void swap(@ByRef Blob lhs, @ByRef Blob rhs); +// aten::bitwise_and_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!) -@Namespace("caffe2") public static native @Cast("std::ostream*") @ByRef @Name("operator <<") Pointer shiftLeft(@Cast("std::ostream*") @ByRef Pointer out, @Const @ByRef Blob v); - // namespace caffe2 +// aten::__and__.Scalar(Tensor self, Scalar other) -> Tensor -// Parsed from ATen/core/class_type.h +// aten::__and__.Tensor(Tensor self, Tensor other) -> Tensor -// #pragma once -// #include +// aten::__iand__.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!) -// #include -// #include -// #include - // namespace jit - // namespace torch -// This enumerator represents the 'kind' of an attribute - a buffer, a parameter, or neither. -// This state is mutually exclusive. Buffers and Parameters can only appear on modules. -@Namespace("c10") public enum AttributeKind { - BUFFER(0), - PARAMETER(1), - REGULAR_ATTRIBUTE(2); +// aten::__iand__.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!) - public final int value; - private AttributeKind(int v) { this.value = v; } - private AttributeKind(AttributeKind e) { this.value = e.value; } - public AttributeKind intern() { for (AttributeKind e : values()) if (e.value == value) return e; return this; } - @Override public String toString() { return intern().name(); } -} -// Targeting ../ClassAttribute.java +// aten::bitwise_or.Scalar(Tensor self, Scalar other) -> Tensor -/** - * User Defined Types - */ -// Targeting ../ClassType.java +// aten::bitwise_or.Tensor(Tensor self, Tensor other) -> Tensor +// aten::bitwise_or_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!) +// aten::bitwise_or_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!) -// Parsed from ATen/core/enum_tag.h +// aten::__or__.Scalar(Tensor self, Scalar other) -> Tensor -// #pragma once -// @generated by torchgen/gen.py from enum_tag.h - // Enum of valid tags obtained from the entries in tags.yaml - @Namespace("at") public enum Tag { - core(0), - data_dependent_output(1), - dynamic_output_shape(2), - generated(3), - inplace_view(4), - nondeterministic_bitwise(5), - nondeterministic_seeded(6), - pointwise(7), - view_copy(8); +// aten::__or__.Tensor(Tensor self, Tensor other) -> Tensor - public final int value; - private Tag(int v) { this.value = v; } - private Tag(Tag e) { this.value = e.value; } - public Tag intern() { for (Tag e : values()) if (e.value == value) return e; return this; } - @Override public String toString() { return intern().name(); } - } +// aten::__ior__.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!) -// Parsed from ATen/core/enum_type.h +// aten::__ior__.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!) -// #pragma once -// #include +// aten::bitwise_xor.Scalar(Tensor self, Scalar other) -> Tensor -// #include -// Targeting ../EnumType.java +// aten::bitwise_xor.Tensor(Tensor self, Tensor other) -> Tensor - // namespace c10 +// aten::bitwise_xor_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!) -// Parsed from ATen/core/type_ptr.h +// aten::bitwise_xor_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!) -// #pragma once -// #include -// #include +// aten::__xor__.Scalar(Tensor self, Scalar other) -> Tensor -// #include -// #include -// Targeting ../SingletonTypePtr.java +// aten::__xor__.Tensor(Tensor self, Tensor other) -> Tensor -// Targeting ../AnyTypePtr.java +// aten::__ixor__.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!) -// Targeting ../AnyEnumTypePtr.java +// aten::__ixor__.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!) -// Targeting ../NumberTypePtr.java +// aten::__lshift__.Scalar(Tensor self, Scalar other) -> Tensor -// Targeting ../FloatTypePtr.java +// aten::__lshift__.Tensor(Tensor self, Tensor other) -> Tensor -// Targeting ../ComplexTypePtr.java +// aten::__ilshift__.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!) -// Targeting ../IntTypePtr.java +// aten::__ilshift__.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!) -// Targeting ../BoolTypePtr.java +// aten::bitwise_left_shift.Tensor(Tensor self, Tensor other) -> Tensor -// Targeting ../StringTypePtr.java +// aten::bitwise_left_shift_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!) -// Targeting ../StorageTypePtr.java +// aten::bitwise_left_shift.Tensor_Scalar(Tensor self, Scalar other) -> Tensor -// Targeting ../NoneTypePtr.java +// aten::bitwise_left_shift_.Tensor_Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!) -// Targeting ../GeneratorTypePtr.java +// aten::__rshift__.Scalar(Tensor self, Scalar other) -> Tensor -// Targeting ../QuantizerTypePtr.java +// aten::__rshift__.Tensor(Tensor self, Tensor other) -> Tensor -// Targeting ../QSchemeTypePtr.java +// aten::__irshift__.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!) -// Targeting ../DeviceObjTypePtr.java +// aten::__irshift__.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!) -// Targeting ../StreamObjTypePtr.java +// aten::bitwise_right_shift.Tensor(Tensor self, Tensor other) -> Tensor -// Targeting ../CapsuleTypePtr.java +// aten::bitwise_right_shift_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!) -// Targeting ../PyObjectTypePtr.java +// aten::bitwise_right_shift.Tensor_Scalar(Tensor self, Scalar other) -> Tensor -// Targeting ../LayoutTypePtr.java +// aten::bitwise_right_shift_.Tensor_Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!) -// Targeting ../ScalarTypeTypePtr.java +// aten::tril_(Tensor(a!) self, int diagonal=0) -> Tensor(a!) -// Targeting ../AnyListTypePtr.java +// aten::triu_(Tensor(a!) self, int diagonal=0) -> Tensor(a!) -// Targeting ../AnyTupleTypePtr.java +// aten::digamma_(Tensor(a!) self) -> Tensor(a!) -// Targeting ../AnyClassTypePtr.java +// aten::lerp_.Scalar(Tensor(a!) self, Tensor end, Scalar weight) -> Tensor(a!) +// aten::lerp_.Tensor(Tensor(a!) self, Tensor end, Tensor weight) -> Tensor(a!) +// aten::addbmm_(Tensor(a!) self, Tensor batch1, Tensor batch2, *, Scalar beta=1, Scalar alpha=1) -> Tensor(a!) - // namespace c10 +// aten::addbmm(Tensor self, Tensor batch1, Tensor batch2, *, Scalar beta=1, Scalar alpha=1) -> Tensor -// Parsed from ATen/core/functional.h +// aten::random_.from(Tensor(a!) self, int from, int? to, *, Generator? generator=None) -> Tensor(a!) -// #pragma once -// #include -// #include +// aten::random_.to(Tensor(a!) self, int to, *, Generator? generator=None) -> Tensor(a!) -// The passed in function must take T by value (T), or by -// const reference (const T&); taking T by non-const reference -// will result in an error like: -// -// error: no type named 'type' in 'class std::result_of' -// -// No explicit template parameters are required. -// Overload for explicit function and ArrayRef +// aten::random_(Tensor(a!) self, *, Generator? generator=None) -> Tensor(a!) -// C++ forbids taking an address of a constructor, so here's a workaround... -// Overload for constructor (R) application - // namespace c10 +// aten::uniform_(Tensor(a!) self, float from=0, float to=1, *, Generator? generator=None) -> Tensor(a!) -// Parsed from ATen/core/ivalue.h +// aten::cauchy_(Tensor(a!) self, float median=0, float sigma=1, *, Generator? generator=None) -> Tensor(a!) -// #pragma once -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// Targeting ../CustomClassHolder.java +// aten::log_normal_(Tensor(a!) self, float mean=1, float std=2, *, Generator? generator=None) -> Tensor(a!) - // namespace jit - // namespace torch -// Targeting ../RRefInterface.java +// aten::exponential_(Tensor(a!) self, float lambd=1, *, Generator? generator=None) -> Tensor(a!) +// aten::geometric_(Tensor(a!) self, float p, *, Generator? generator=None) -> Tensor(a!) -@Namespace("c10") public static native @Cast("bool") boolean _fastEqualsForContainer(@Const @ByRef IValue lhs, @Const @ByRef IValue rhs); -@Namespace("c10") public static native Function checkObjectSortSchema( - @Const @SharedPtr @ByRef ClassType t, - @Cast("std::stringstream*") @ByRef Pointer why_not); +// aten::diag(Tensor self, int diagonal=0) -> Tensor -// A comparator that checks ordering of two IValues of same type. -@Namespace("c10") public static native @ByVal @Cast("c10::IValueComparator*") Pointer getLessThanComparator(@Const @ByRef IValue v); -@Namespace("c10") public static native @ByVal @Cast("c10::IValueComparator*") Pointer getGreaterThanComparator(@Const @ByRef IValue v); -// Targeting ../Tuple.java +// aten::cross(Tensor self, Tensor other, int? dim=None) -> Tensor -// Targeting ../Future.java +// aten::triu(Tensor self, int diagonal=0) -> Tensor -// Targeting ../Await.java +// aten::tril(Tensor self, int diagonal=0) -> Tensor -// Targeting ../ConstantString.java +// aten::trace(Tensor self) -> Tensor -// Targeting ../Object.java +// aten::ne.Scalar(Tensor self, Scalar other) -> Tensor -// Targeting ../PyObjectHolder.java +// aten::ne.Tensor(Tensor self, Tensor other) -> Tensor -// Targeting ../EnumHolder.java +// aten::ne_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!) -// Targeting ../ComplexHolder.java +// aten::ne_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!) -// Targeting ../StreamData3Holder.java +// aten::not_equal.Scalar(Tensor self, Scalar other) -> Tensor +// aten::not_equal.Tensor(Tensor self, Tensor other) -> Tensor - // namespace ivalue -// This is an owning wrapper for a c10::optional> -// that can be implicitly converted to a (non-owning) optional>. -// Its purpose is to be used in generated code to keep the vector alive -// either until the end of a statement (as a temporary), or as a saved arg -// in autograd. -// Targeting ../Capsule.java +// aten::not_equal_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!) +// aten::not_equal_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!) -// IValue is the generic tagged union used by the interpreter to hold -// all value types. -// It is a 16-byte object with an 8-byte payload and an 8-byte tag. -// The tag is currently 4 bytes to determine the type, and 1 byte -// to mark whether that type is a subtype of c10::intrusive_ptr_target and needs -// retain/release calls. +// aten::eq.Scalar(Tensor self, Scalar other) -> Tensor -/// -/// -/// -/// -/// -// #define TORCH_FORALL_TAGS(_) -// _(None) -// _(Tensor) -// _(Storage) -// _(Double) -// _(ComplexDouble) -// _(Int) -// _(SymInt) -// _(SymFloat) -// _(Bool) -// _(Tuple) -// _(String) -// _(Blob) -// _(GenericList) -// _(GenericDict) -// _(Future) -// _(Await) -// _(Device) -// _(Stream) -// _(Object) -// _(PyObject) -// _(Uninitialized) -// _(Capsule) -// _(RRef) -// _(Quantizer) -// _(Generator) -// _(Enum) -// Targeting ../IValue.java +// aten::eq.Tensor(Tensor self, Tensor other) -> Tensor -// Targeting ../WeakIValue.java +// aten::ge.Scalar(Tensor self, Scalar other) -> Tensor -// Targeting ../StrongTypePtr.java +// aten::ge.Tensor(Tensor self, Tensor other) -> Tensor -// Targeting ../WeakTypePtr.java +// aten::ge_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!) -// Targeting ../WeakOrStrongCompilationUnit.java +// aten::ge_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!) -// Targeting ../WeakOrStrongTypePtr.java +// aten::greater_equal.Scalar(Tensor self, Scalar other) -> Tensor +// aten::greater_equal.Tensor(Tensor self, Tensor other) -> Tensor - // namespace c10 -// #include // IWYU pragma: keep +// aten::greater_equal_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!) -// Parsed from ATen/core/ivalue_to.h +// aten::greater_equal_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!) -// #pragma once -// #include - // namespace at -// Targeting ../ivalue_to_const_ref_overload_return.java +// aten::le.Scalar(Tensor self, Scalar other) -> Tensor +// aten::le.Tensor(Tensor self, Tensor other) -> Tensor - // namespace detail - // namespace c10 +// aten::le_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!) -// Parsed from ATen/core/operator_name.h -// #pragma once +// aten::le_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!) -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// Targeting ../OperatorName.java +// aten::less_equal.Scalar(Tensor self, Scalar other) -> Tensor -// Targeting ../OperatorNameView.java +// aten::less_equal.Tensor(Tensor self, Tensor other) -> Tensor +// aten::less_equal_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!) +// aten::less_equal_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!) -@Namespace("c10") public static native @StdString BytePointer toString(@Const @ByRef OperatorName opName); +// aten::gt.Scalar(Tensor self, Scalar other) -> Tensor - // namespace c10 +// aten::gt.Tensor(Tensor self, Tensor other) -> Tensor +// aten::gt_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!) -// Parsed from ATen/core/qualified_name.h -// #pragma once +// aten::gt_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!) -// #include -// #include -// #include -// #include -// #include -// Targeting ../QualifiedName.java +// aten::greater.Scalar(Tensor self, Scalar other) -> Tensor - // namespace c10 - // namespace std +// aten::greater.Tensor(Tensor self, Tensor other) -> Tensor -// Parsed from ATen/core/stack.h -// #pragma once +// aten::greater_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!) -// #include -// #include -// #include -// #include +// aten::greater_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!) -// TODO move this to c10 namespace -// Targeting ../Operation.java +// aten::lt.Scalar(Tensor self, Scalar other) -> Tensor -// An operation with N inputs and M outputs pops the last N inputs off -// the stack and pushes its M inputs onto the stack -// before: I0, I1, ... IN <- stack.back() -// after: O0, O1, ... OM -// operations are defined this way so that ownership of inputs can be -// transferred to the operation and it can incrementally drop ownership of -// tensors when they become unneeded. For large operations, like 'run an entire -// subgraph', this functionality is very important for minimizing gpu memory -// usage return value is the relative 'offset' to jump to for the next -// operation: -// pc += 1 + offset -// so a return value of 0 goes to the next instruction +// aten::lt.Tensor(Tensor self, Tensor other) -> Tensor -// treat the last N elements of the stack as a list, looking up -// element i -@Namespace("torch::jit") public static native @ByRef IValue peek(@ByRef IValueVector stack, @Cast("size_t") long i, @Cast("size_t") long N); -// treat the last N elements of the stack as a list, looking up the -// slice starting at index i and having length len -@Namespace("torch::jit") public static native @ByVal @Cast("at::ArrayRef*") IValueArrayRef peekSlice( - @Const @ByRef IValueVector stack, - @Cast("size_t") long i, - @Cast("size_t") long len, - @Cast("size_t") long N); -@Namespace("torch::jit") public static native @ByVal @Cast("at::ArrayRef*") IValueArrayRef last(@Const @ByRef IValueVector stack, @Cast("size_t") long N); -@Namespace("torch::jit") public static native void drop(@ByRef IValueVector stack, @Cast("size_t") long n); -@Namespace("torch::jit") public static native @ByVal IValue pop(@ByRef IValueVector stack); -@Namespace("torch::jit") public static native @ByVal IValueVector pop(@ByRef IValueVector stack, @Cast("size_t") long n); -// variadic pop: -// int64_t a; at::Tensor b; -// pop(stack, a, b); -// equivalent to: -// b = pop(stack).toTensor(); -// a = pop(stack).toInt(); +// aten::lt_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!) -@Namespace("torch::jit") public static native void push_one(@ByRef IValueVector stack, @ByVal TensorOptions options); -// The packer here is carefully written not to make any unnecessary -// copies. +// aten::lt_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!) -// pack takes the return values of aten functions pushes them onto the stack - // namespace jit - // namespace torch +// aten::less.Scalar(Tensor self, Scalar other) -> Tensor -// Parsed from ATen/core/alias_info.h +// aten::less.Tensor(Tensor self, Tensor other) -> Tensor -// #pragma once -// #include -// #include -// #include -// #include -// #include -// Targeting ../AliasInfo.java +// aten::less_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!) +// aten::less_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!) -// this does match the way things are represented in the schema +// aten::take(Tensor self, Tensor index) -> Tensor - // namespace c10 +// aten::take_along_dim(Tensor self, Tensor indices, int? dim=None) -> Tensor -// Parsed from ATen/core/jit_type_base.h +// aten::index_select(Tensor self, int dim, Tensor index) -> Tensor -// #pragma once -// #include -// #include -// #include -// #include +// aten::index_select.dimname(Tensor self, Dimname dim, Tensor index) -> Tensor -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #define C10_FORALL_TYPES(_) -// _(AnyType) -// _(EnumType) -// _(AnyEnumType) -// _(TensorType) -// _(StorageType) -// _(TupleType) -// _(ListType) -// _(DictType) -// _(NumberType) -// _(FloatType) -// _(ComplexType) -// _(FutureType) -// _(AwaitType) -// _(RRefType) -// _(IntType) -// _(NoneType) -// _(StringType) -// _(GeneratorType) -// _(QuantizerType) -// _(BoolType) -// _(OptionalType) -// _(VarType) -// _(DeviceObjType) -// _(StreamObjType) -// _(FunctionType) -// _(ClassType) -// _(PyObjectType) -// _(CapsuleType) -// _(InterfaceType) -// _(QSchemeType) -// _(ScalarTypeType) -// _(LayoutType) -// _(MemoryFormatType) -// _(AnyListType) -// _(AnyTupleType) -// _(AnyClassType) -// _(SymIntType) -// _(SymFloatType) -// _(UnionType) -// _(DynamicType) +// aten::masked_select(Tensor self, Tensor mask) -> Tensor -@Namespace("c10") public enum TypeKind { - AnyType(0), - EnumType(1), - AnyEnumType(2), - TensorType(3), - StorageType(4), - TupleType(5), - ListType(6), - DictType(7), - NumberType(8), - FloatType(9), - ComplexType(10), - FutureType(11), - AwaitType(12), - RRefType(13), - IntType(14), - NoneType(15), - StringType(16), - GeneratorType(17), - QuantizerType(18), - BoolType(19), - OptionalType(20), - VarType(21), - DeviceObjType(22), - StreamObjType(23), - FunctionType(24), - ClassType(25), - PyObjectType(26), - CapsuleType(27), - InterfaceType(28), - QSchemeType(29), - ScalarTypeType(30), - LayoutType(31), - MemoryFormatType(32), - AnyListType(33), - AnyTupleType(34), - AnyClassType(35), - SymIntType(36), - SymFloatType(37), - UnionType(38), - DynamicType(39); - public final int value; - private TypeKind(int v) { this.value = v; } - private TypeKind(TypeKind e) { this.value = e.value; } - public TypeKind intern() { for (TypeKind e : values()) if (e.value == value) return e; return this; } - @Override public String toString() { return intern().name(); } -} +// aten::nonzero(Tensor self) -> Tensor -@Namespace("c10") public static native @Cast("const char*") BytePointer typeKindToString(TypeKind kind); -@Namespace("c10") public static native String typeKindToString(@Cast("c10::TypeKind") int kind); -// Use this to customize how a Type is printed using `annotation_str()`. If -// c10::nullopt is returned, `annotation_str()` falls through to its default -// implementation. - // namespace detail -// #define TORCH_DECLARE_SINGLETON(Type) -// struct Type; -// namespace detail { -// template <> struct IsSingletonType : public std::integral_constant {}; -// } - - - - - - - - - - - - - - - - - - - - - - - +// aten::nonzero_numpy(Tensor self) -> Tensor[] -// Targeting ../Type.java +// aten::argwhere(Tensor self) -> Tensor +// aten::gather(Tensor self, int dim, Tensor index, *, bool sparse_grad=False) -> Tensor +// aten::gather.dimname(Tensor self, Dimname dim, Tensor index, *, bool sparse_grad=False) -> Tensor +// aten::addcmul(Tensor self, Tensor tensor1, Tensor tensor2, *, Scalar value=1) -> Tensor +// aten::addcmul_(Tensor(a!) self, Tensor tensor1, Tensor tensor2, *, Scalar value=1) -> Tensor(a!) +// aten::addcdiv(Tensor self, Tensor tensor1, Tensor tensor2, *, Scalar value=1) -> Tensor +// aten::addcdiv_(Tensor(a!) self, Tensor tensor1, Tensor tensor2, *, Scalar value=1) -> Tensor(a!) +// aten::triangular_solve(Tensor self, Tensor A, bool upper=True, bool transpose=False, bool unitriangular=False) -> (Tensor solution, Tensor cloned_coefficient) +// aten::svd(Tensor self, bool some=True, bool compute_uv=True) -> (Tensor U, Tensor S, Tensor V) +// aten::swapaxes(Tensor(a) self, int axis0, int axis1) -> Tensor(a) +// aten::swapaxes_(Tensor(a!) self, int axis0, int axis1) -> Tensor(a!) +// aten::swapdims(Tensor(a) self, int dim0, int dim1) -> Tensor(a) -// Explicitly enable MaybeOwned>, rather than allowing -// MaybeOwned to be used for any type right away. -// Targeting ../SharedType.java +// aten::swapdims_(Tensor(a!) self, int dim0, int dim1) -> Tensor(a!) +// aten::cholesky(Tensor self, bool upper=False) -> Tensor +// aten::cholesky_solve(Tensor self, Tensor input2, bool upper=False) -> Tensor -// Targeting ../NamedType.java +// aten::cholesky_inverse(Tensor self, bool upper=False) -> Tensor +// aten::qr(Tensor self, bool some=True) -> (Tensor Q, Tensor R) - // namespace c10 - // namespace std +// aten::geqrf(Tensor self) -> (Tensor a, Tensor tau) -// Parsed from ATen/core/jit_type.h -// #pragma once +// aten::orgqr(Tensor self, Tensor input2) -> Tensor -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include - // namespace jit - // namespace torch +// aten::ormqr(Tensor self, Tensor input2, Tensor input3, bool left=True, bool transpose=False) -> Tensor +// aten::lu_solve(Tensor self, Tensor LU_data, Tensor LU_pivots) -> Tensor -@Namespace("c10") public static native @Cast("bool") boolean is_contiguous_strides( - @ByVal @Cast("c10::ArrayRef*") LongArrayRef sizes, - @ByVal @Cast("c10::ArrayRef*") LongArrayRef strides); -@Namespace("c10") public static native @Cast("bool") boolean is_contiguous_strides( - @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] sizes, - @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... strides); -// Targeting ../AnyType.java +// aten::multinomial(Tensor self, int num_samples, bool replacement=False, *, Generator? generator=None) -> Tensor +// aten::lgamma_(Tensor(a!) self) -> Tensor(a!) -@Namespace("c10") public static native @StdString BytePointer toString(@Const @ByRef Type type); -// Shim for compatibility with code that uses TypePtr. -@Namespace("c10") public static native @StdString BytePointer toString(@Const @ByRef Type.TypePtr typePtr); +// aten::lgamma(Tensor self) -> Tensor -// Targeting ../AwaitSingleElementType.java +// aten::digamma(Tensor self) -> Tensor -// Targeting ../ListSingleElementType.java +// aten::polygamma(int n, Tensor self) -> Tensor -// Targeting ../RRefSingleElementType.java +// aten::polygamma_(Tensor(a!) self, int n) -> Tensor(a!) -// Targeting ../FutureSingleElementType.java +// aten::erfinv(Tensor self) -> Tensor -// Targeting ../OptionalSingleElementType.java +// aten::erfinv_(Tensor(a!) self) -> Tensor(a!) -// Targeting ../UnionType.java +// aten::i0(Tensor self) -> Tensor -// Targeting ../OptionalType.java +// aten::i0_(Tensor(a!) self) -> Tensor(a!) -// Targeting ../Stride.java +// aten::sign(Tensor self) -> Tensor +// aten::sign_(Tensor(a!) self) -> Tensor(a!) -@Namespace("c10") public static native @ByVal StrideOptional merge_primitive( - @Const @ByRef StrideOptional a, - @Const @ByRef StrideOptional b); -// Targeting ../ShapeSymbol.java +// aten::signbit(Tensor self) -> Tensor -@Namespace("c10") public static native @ByVal ShapeSymbol merge_primitive( - @Const @ByRef ShapeSymbol a, - @Const @ByRef ShapeSymbol b); -// Targeting ../SymbolicShape.java +// aten::dist(Tensor self, Tensor other, Scalar p=2) -> Tensor -@Namespace("c10::detail") public static native @Cast("bool") boolean isComplete(@Const @ByRef Stride s); +// aten::atan2_(Tensor(a!) self, Tensor other) -> Tensor(a!) -// Targeting ../LongVaryingShape.java +// aten::atan2(Tensor self, Tensor other) -> Tensor -// Targeting ../StrideVaryingShape.java +// aten::arctan2(Tensor self, Tensor other) -> Tensor -// TODO: investigate making this SingletonOrSharedTypePtr -// Targeting ../TensorType.java +// aten::arctan2_(Tensor(a!) self, Tensor other) -> Tensor(a!) -// Targeting ../ListType.java +// aten::lerp.Scalar(Tensor self, Tensor end, Scalar weight) -> Tensor -// Targeting ../DictType.java +// aten::lerp.Tensor(Tensor self, Tensor end, Tensor weight) -> Tensor -// Targeting ../FutureType.java +// aten::histc(Tensor self, int bins=100, Scalar min=0, Scalar max=0) -> Tensor -// Targeting ../AwaitType.java +// aten::histogram.bins_tensor(Tensor self, Tensor bins, *, Tensor? weight=None, bool density=False) -> (Tensor hist, Tensor bin_edges) -// Targeting ../RRefType.java +// aten::histogram.bin_ct(Tensor self, int bins=100, *, float[]? range=None, Tensor? weight=None, bool density=False) -> (Tensor hist, Tensor bin_edges) -// Any should never appear in a named type like a class, namedtuple or -// interface. If it does, then dynamic type information will be lost in the -// Pickler, leading to hard-to-track-down bugs that will only occur -// after saving or loading a model. This is because we rely on the -// static types in named types to reconstruct type tags of loaded -// values. Lifting this restriction requires solving the serialization -// problem first. -@Namespace("c10") public static native void checkNoAny( - @Const @ByRef Type base, - @Cast("const char*") BytePointer what, - @StdString BytePointer attrname, - @Const @ByRef Type.TypePtr attrtype); -@Namespace("c10") public static native void checkNoAny( - @Const @ByRef Type base, - String what, - @StdString String attrname, - @Const @ByRef Type.TypePtr attrtype); -// Targeting ../TupleType.java +// aten::fmod.Scalar(Tensor self, Scalar other) -> Tensor +// aten::fmod_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!) -// the common supertype of all Enums, only used in operator registraion. -// EnumType <: AnyEnumType for all Enums -// Targeting ../AnyEnumType.java +// aten::fmod.Tensor(Tensor self, Tensor other) -> Tensor -// Targeting ../NumberType.java +// aten::fmod_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!) -// Targeting ../FloatType.java +// aten::hypot(Tensor self, Tensor other) -> Tensor -// Targeting ../ComplexType.java +// aten::hypot_(Tensor(a!) self, Tensor other) -> Tensor(a!) -// We need to introduce `SymIntType` to represent the `SymInt` type -// used in function schemas e.g. `aten::narrow_copy(... SymInt length) -// `SymInt` will be used to enable tracing arithmetic operations on -// dimension values. Please see [SymInt.h] for more information -// Targeting ../SymIntType.java +// aten::igamma(Tensor self, Tensor other) -> Tensor -// Targeting ../SymFloatType.java +// aten::igamma_(Tensor(a!) self, Tensor other) -> Tensor(a!) -// Targeting ../IntType.java +// aten::igammac(Tensor self, Tensor other) -> Tensor -// Targeting ../BoolType.java +// aten::igammac_(Tensor(a!) self, Tensor other) -> Tensor(a!) -// Targeting ../StringType.java +// aten::nextafter(Tensor self, Tensor other) -> Tensor -// Targeting ../StorageType.java +// aten::nextafter_(Tensor(a!) self, Tensor other) -> Tensor(a!) -// Targeting ../FunctionType.java +// aten::remainder.Scalar(Tensor self, Scalar other) -> Tensor -// Targeting ../NoneType.java +// aten::remainder_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!) -// Targeting ../GeneratorType.java +// aten::remainder.Tensor(Tensor self, Tensor other) -> Tensor -// Targeting ../QuantizerType.java +// aten::remainder_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!) -// Targeting ../QSchemeType.java +// aten::min(Tensor self) -> Tensor -// Targeting ../DeviceObjType.java +// aten::fmin(Tensor self, Tensor other) -> Tensor -// Targeting ../StreamObjType.java +// aten::max(Tensor self) -> Tensor -// Targeting ../VarType.java +// aten::fmax(Tensor self, Tensor other) -> Tensor -// Targeting ../CapsuleType.java +// aten::maximum(Tensor self, Tensor other) -> Tensor -// Targeting ../PyObjectType.java +// aten::max.other(Tensor self, Tensor other) -> Tensor +// aten::minimum(Tensor self, Tensor other) -> Tensor -@Namespace("c10") public enum TypeVerbosity { - None(0), - Type(1), - TypeAndStride(2), - Full(3), - Symbolic(4), - Default(Full.value); - public final int value; - private TypeVerbosity(int v) { this.value = v; } - private TypeVerbosity(TypeVerbosity e) { this.value = e.value; } - public TypeVerbosity intern() { for (TypeVerbosity e : values()) if (e.value == value) return e; return this; } - @Override public String toString() { return intern().name(); } -} +// aten::min.other(Tensor self, Tensor other) -> Tensor -@Namespace("c10") public static native TypeVerbosity type_verbosity(); +// aten::quantile(Tensor self, Tensor q, int? dim=None, bool keepdim=False, *, str interpolation='linear') -> Tensor +// aten::quantile.scalar(Tensor self, float q, int? dim=None, bool keepdim=False, *, str interpolation='linear') -> Tensor +// aten::nanquantile(Tensor self, Tensor q, int? dim=None, bool keepdim=False, *, str interpolation='linear') -> Tensor -// what is the type, ignoring extra size/shape information? -// e.g. Tensor(2x3) -> Dynamic, and Tuple(Tensor(2x3),...) -> Tuple(Dynamic,...) -// `unshapedType` is used to remove Tensor subtypes. We treat all Tensor -// subtypes as simply "Tensor"; we also create a new version of any -// container types in which internal Tensors have undergone the same -// operation. This is used for type comparisons between two Tensor types -// (`unshapedType` means that we don't falsely return `false` for e.g. -// Tensors of different dimensions). It's also used in the alias -// analysis pass. -// Be careful with calls because this can be very slow. If calling this -// on a graph, use `EraseShapeInformation` in shape_analysis.h -@Namespace("c10") public static native @ByVal Type.TypePtr unshapedType(@Const @ByRef Type.TypePtr type); +// aten::nanquantile.scalar(Tensor self, float q, int? dim=None, bool keepdim=False, *, str interpolation='linear') -> Tensor +// aten::sort(Tensor self, int dim=-1, bool descending=False) -> (Tensor values, Tensor indices) -@Namespace("c10") public static native @ByVal ScalarTypeOptional tryScalarTypeFromJitType(@Const @ByRef Type type); +// aten::sort.stable(Tensor self, *, bool? stable, int dim=-1, bool descending=False) -> (Tensor values, Tensor indices) -@Namespace("c10") public static native ScalarType scalarTypeFromJitType(@Const @ByRef Type type); -// Attempt to find the correct supertype of the two types `t1` and `t2`. -// If no supertype is found, then nullopt will be returned if -// `default_to_union` is false, and `Union[t1, t2]` will be returned -// if it is true. If `t1 == t2`, or `t1` is a type refinement of `t2`, -// then `t2` will be returned (and vice versa). -// -// Two different tensortypes will return dynamic. -// -// Currently we chose not to support returning a NumberType for -// two types from the set of {FloatType, IntType, ComplexType}, because -// there is a lack of operator support for NumberType. -// -// If `type_hint` is an `InterfaceType`, then we can use that as a -// potential supertype for `ClassType`s in the list. Otherwise, we have -// no way to find and use some common interface type -@Namespace("c10") public static native @ByVal TypePtrOptional unifyTypes( - @Const @ByRef Type.TypePtr t1, - @Const @ByRef Type.TypePtr t2, - @Cast("bool") boolean default_to_union/*=false*/, - @ByVal(nullValue = "c10::TypePtr(nullptr)") Type.TypePtr type_hint); -@Namespace("c10") public static native @ByVal TypePtrOptional unifyTypes( - @Const @ByRef Type.TypePtr t1, - @Const @ByRef Type.TypePtr t2); +// aten::sort.dimname(Tensor self, Dimname dim, bool descending=False) -> (Tensor values, Tensor indices) -@Namespace("c10") public static native @ByVal TypePtrOptional unifyTypeList( - @ByVal TypeArrayRef elements, - @Cast("std::ostream*") @ByRef Pointer why_not, - @Cast("bool") boolean default_to_union/*=false*/, - @ByVal(nullValue = "c10::TypePtr(nullptr)") Type.TypePtr type_hint); -@Namespace("c10") public static native @ByVal TypePtrOptional unifyTypeList( - @ByVal TypeArrayRef elements, - @Cast("std::ostream*") @ByRef Pointer why_not); -// Targeting ../getTypePtr_.java +// aten::sort.dimname_stable(Tensor self, *, bool? stable, Dimname dim, bool descending=False) -> (Tensor values, Tensor indices) - // namespace detail -// Targeting ../MatchTypeReturn.java +// aten::msort(Tensor self) -> Tensor -// attempt to match the type variables in formal to actual, adding them to type_env. -// If no match is possible this returns a MatchTypeReturn with r.success() == false -// and a r.reason() that describes why it could not match. -// note: It is possible to successfully match a formal, but for type variables -// in the formal to still not be defined. In particular, None matches Optional[T] -// but does not define the value of T. -@Namespace("c10") public static native @ByVal MatchTypeReturn matchTypeVariables(@Const @ByRef Type.TypePtr formal, @Const @ByRef Type.TypePtr actual, @ByRef TypeEnv type_env); +// aten::argsort(Tensor self, int dim=-1, bool descending=False) -> Tensor -// replace type variables appearing in `type` with the values in -// `type_env`. Returns nullptr if a variable used in `type` -// does not appear in `type_env` -@Namespace("c10") public static native @ByVal Type.TypePtr tryEvalTypeVariables(@Const @ByRef Type.TypePtr type, @ByRef TypeEnv type_env); -@Namespace("c10") public static native @Cast("bool") boolean elementTypeCanBeInferredFromMembers(@Const @ByRef Type.TypePtr elem_type); -// Targeting ../InterfaceType.java +// aten::argsort.stable(Tensor self, *, bool stable, int dim=-1, bool descending=False) -> Tensor -// Targeting ../LayoutEnumerationType.java +// aten::argsort.dimname(Tensor self, Dimname dim, bool descending=False) -> Tensor -// Targeting ../ScalarTypeEnumerationType.java +// aten::topk(Tensor self, int k, int dim=-1, bool largest=True, bool sorted=True) -> (Tensor values, Tensor indices) -// Targeting ../MemoryFormattEnumerationType.java +// aten::all(Tensor self) -> Tensor +// aten::any(Tensor self) -> Tensor -// WARNING: These enumeration types below DO NOT actually get parsed out -// from the logical schema strings, instead they are mapped as ints. To -// observe these types, use real_type() instead of type() on Argument -// Targeting ../ScalarTypeType.java +// aten::renorm(Tensor self, Scalar p, int dim, Scalar maxnorm) -> Tensor -// Targeting ../MemoryFormatType.java +// aten::renorm_(Tensor(a!) self, Scalar p, int dim, Scalar maxnorm) -> Tensor(a!) -// Targeting ../LayoutType.java +// aten::unfold(Tensor(a) self, int dimension, int size, int step) -> Tensor(a) - // namespace detail -// the common supertype of all lists, -// List[T] <: AnyList for all T -// Targeting ../AnyListType.java +// aten::equal(Tensor self, Tensor other) -> bool +// aten::pow.Tensor_Tensor(Tensor self, Tensor exponent) -> Tensor -// the common supertype of all tuples, -// Tuple[T...] <: AnyTuple for all T -// Targeting ../AnyTupleType.java +// aten::pow.Tensor_Scalar(Tensor self, Scalar exponent) -> Tensor -// the common supertype of all classes, -// ClassType <: AnyClassType for all classes -// Targeting ../AnyClassType.java +// aten::pow_.Scalar(Tensor(a!) self, Scalar exponent) -> Tensor(a!) +// aten::pow_.Tensor(Tensor(a!) self, Tensor exponent) -> Tensor(a!) +// aten::float_power.Tensor_Tensor(Tensor self, Tensor exponent) -> Tensor +// aten::float_power.Tensor_Scalar(Tensor self, Scalar exponent) -> Tensor -// Targeting ../InferredType.java +// aten::float_power_.Scalar(Tensor(a!) self, Scalar exponent) -> Tensor(a!) +// aten::float_power_.Tensor(Tensor(a!) self, Tensor exponent) -> Tensor(a!) -@Namespace("c10") public static native @Cast("bool") boolean containsAnyType(@Const @ByRef Type.TypePtr type); - // namespace c10 +// aten::normal_(Tensor(a!) self, float mean=0, float std=1, *, Generator? generator=None) -> Tensor(a!) -// Parsed from ATen/core/function_schema.h +// aten::alias(Tensor(a) self) -> Tensor(a) -// #pragma once -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include +// aten::isfinite(Tensor self) -> Tensor -// schema as used in the compiler for resolving function calls and reporting -// errors. These objects should be constructed from C10 schema once those -// are available. +// aten::isinf(Tensor self) -> Tensor -// Targeting ../Argument.java +// aten::record_stream(Tensor(a!) self, Stream s) -> () +// aten::isposinf(Tensor self) -> Tensor +// aten::isneginf(Tensor self) -> Tensor -@Namespace("c10") public enum SchemaArgType { input(0), output(1); +// aten::det(Tensor self) -> Tensor - public final int value; - private SchemaArgType(int v) { this.value = v; } - private SchemaArgType(SchemaArgType e) { this.value = e.value; } - public SchemaArgType intern() { for (SchemaArgType e : values()) if (e.value == value) return e; return this; } - @Override public String toString() { return intern().name(); } -} -// Targeting ../SchemaArgument.java +// aten::slogdet(Tensor self) -> (Tensor sign, Tensor logabsdet) +// aten::logdet(Tensor self) -> Tensor -// Targeting ../FunctionSchema.java +// aten::inverse(Tensor self) -> Tensor +// aten::inner(Tensor self, Tensor other) -> Tensor +// aten::outer(Tensor self, Tensor vec2) -> Tensor -// print out Argument, which is compatible with FunctionSchema parser -// full format: Type(alias)? name=default_value +// aten::ger(Tensor self, Tensor vec2) -> Tensor +// aten::to_padded_tensor(Tensor self, float padding, SymInt[]? output_size=None) -> Tensor -@Namespace("c10") public static native @StdString BytePointer toString(@Const @ByRef FunctionSchema schema); +// aten::to_padded_tensor(Tensor self, float padding, SymInt[]? output_size=None) -> Tensor + // namespace at // namespace c10 - // namespace std -// #include // IWYU pragma: keep -// Parsed from ATen/core/function.h + // namespace at + + +// Parsed from ATen/core/Tensor.h // #pragma once -// #include -// #include -// #include +// #include // #include -// #include - -@Namespace("at") public static native void launch(@ByVal Func func); +// Targeting ../OptionalTensorRef.java -// Targeting ../RecursiveMethodCallError.java -@Namespace("torch::jit") public static native void preoptimizeGraph(@SharedPtr @ByRef Graph graph, @Cast("bool") boolean disable_autocast/*=false*/); -@Namespace("torch::jit") public static native void preoptimizeGraph(@SharedPtr @ByRef Graph graph); -// Targeting ../Function.java - // namespace jit - // namespace torch + // namespace at -// Parsed from ATen/core/boxing/KernelFunction.h +// Parsed from ATen/Tensor.h // #pragma once -// #include -// #include -// #include -// #include -// #include -// #include -// Targeting ../OperatorKernel.java +// #include -// Targeting ../KernelFunction.java +// Parsed from torch/csrc/autograd/function_hook.h +// #pragma once +// #include +// #include +// #include +// A hook that's called on gradients +// Targeting ../FunctionPreHook.java -// #include +// Targeting ../FunctionPostHook.java -// Parsed from ATen/core/dispatch/CppSignature.h -// #pragma once + // namespace autograd + // namespace torch -// #include -// #include -// #include -// #include -// #include -// Targeting ../CppSignature.java +// Parsed from torch/csrc/autograd/cpp_hook.h +// #pragma once +// #include +// #include +// #include + // namespace autograd + // namespace torch +// Parsed from c10/util/hash.h +// #pragma once +// #include +// #include +// #include +// #include +// #include +// #include -// Parsed from ATen/core/dispatch/DispatchKeyExtractor.h +// NOTE: hash_combine and SHA1 hashing is based on implementation from Boost +// +// Boost Software License - Version 1.0 - August 17th, 2003 +// +// Permission is hereby granted, free of charge, to any person or organization +// obtaining a copy of the software and accompanying documentation covered by +// this license (the "Software") to use, reproduce, display, distribute, +// execute, and transmit the Software, and to prepare derivative works of the +// Software, and to permit third-parties to whom the Software is furnished to +// do so, all subject to the following: +// +// The copyright notices in the Software and this entire statement, including +// the above license grant, this restriction and the following disclaimer, +// must be included in all copies of the Software, in whole or in part, and +// all derivative works of the Software, unless such copies or derivative +// works are solely in the form of machine-executable object code generated by +// a source language processor. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE, TITLE AND NON-INFRINGEMENT. IN NO EVENT +// SHALL THE COPYRIGHT HOLDERS OR ANYONE DISTRIBUTING THE SOFTWARE BE LIABLE +// FOR ANY DAMAGES OR OTHER LIABILITY, WHETHER IN CONTRACT, TORT OR OTHERWISE, +// ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER +// DEALINGS IN THE SOFTWARE. + +@Namespace("c10") public static native @Cast("size_t") long hash_combine(@Cast("size_t") long seed, @Cast("size_t") long value); + +// Creates the SHA1 hash of a string. A 160-bit hash. +// Based on the implementation in Boost (see notice above). +// Note that SHA1 hashes are no longer considered cryptographically +// secure, but are the standard hash for generating unique ids. +// Usage: +// // Let 'code' be a std::string +// c10::sha1 sha1_hash{code}; +// const auto hash_code = sha1_hash.str(); +// TODO: Compare vs OpenSSL and/or CryptoPP implementations -// #pragma once +//////////////////////////////////////////////////////////////////////////////// +// c10::hash implementation +//////////////////////////////////////////////////////////////////////////////// -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include +// Use template argument deduction to shorten calls to c10::hash -// Take a DispatchKeySet for a Tensor and determine what the actual dispatch -// DispatchKey should be, taking into account TLS, and skipping backends which -// fall through. -// -// Unlike Tensor::key_set(), the value of this on a tensor can change depending -// on TLS. -// -// NB: If there is no valid dispatch key, this will return Undefined -@Namespace("c10::impl") public static native @ByVal DispatchKeySet computeDispatchKeySet( - @ByVal DispatchKeySet ks, - @ByVal DispatchKeySet key_mask -); +// Use SFINAE to dispatch to std::hash if possible, cast enum types to int +// automatically, and fall back to T::hash otherwise. NOTE: C++14 added support +// for hashing enum types to the standard, and some compilers implement it even +// when C++14 flags aren't specified. This is why we have to disable this +// overload if T is an enum type (and use the one below in this case). + // namespace _hash_detail - // A small gadget to extract the DispatchKeySet from types which are known - // to have it. Used to extract dispatch keys from unboxed calls. +// Hasher struct - // NB: take by const reference (Don't do universal forwarding here! You - // don't want to move into this function!) +// Specialization for std::tuple -// Targeting ../DispatchKeyExtractor.java +// Specialization for std::vector + // namespace _hash_detail +// Use this function to actually hash multiple things in one line. +// Dispatches to c10::hash, so it can hash containers. +// Example: +// +// static size_t hash(const MyStruct& s) { +// return get_hash(s.member1, s.member2, s.member3); +// } +// Specialization for c10::complex + // namespace c10 -// Parsed from ATen/core/dispatch/RegistrationHandleRAII.h +// Parsed from torch/csrc/autograd/edge.h // #pragma once +// #include // #include -// Targeting ../RegistrationHandleRAII.java +// #include +// #include +// Targeting ../Edge.java + // namespace autograd + // namespace torch +// The idiomatic way of enabling use of a custom type as the key of hash +// containers in C++11. This method removes the requirement of having to pass +// a custom hasher to std::unordered_{map, set}. +// See http://en.cppreference.com/w/cpp/utility/hash for more information. + // namespace std -// Parsed from ATen/core/dispatch/OperatorOptions.h +// Parsed from torch/csrc/autograd/forward_grad.h // #pragma once -// #include +// #include -@Namespace("c10") public enum AliasAnalysisKind { - INTERNAL_SPECIAL_CASE((byte)(0)), - CONSERVATIVE((byte)(1)), // The most conservative alias analysis type, assumes - // side-effects. This is the default analysis. - FROM_SCHEMA((byte)(2)), - PURE_FUNCTION((byte)(3)); +// [ Using ForwardGrad ] +// ForwardGrad needs to be a shared_ptr to satisfy constraints of its inner +// design. But this shared_ptr must be uniquely associated with the object that +// stores it (as of writing, either AutogradMeta or SavedVariable). This object +// is called the "owning object" in the discussions below. This owning object +// must call `ForwardGrad::clear()` when it is destroyed to ensure that the +// ForwardGrad is properly de-allocated. - public final byte value; - private AliasAnalysisKind(byte v) { this.value = v; } - private AliasAnalysisKind(AliasAnalysisKind e) { this.value = e.value; } - public AliasAnalysisKind intern() { for (AliasAnalysisKind e : values()) if (e.value == value) return e; return this; } - @Override public String toString() { return intern().name(); } -} +// This file contains two classes that are used to store forward AD gradients +// and ensure that they are scoped properly. Because forward AD runs +// concurrently with the evaluation of the function, we need a mechanism to +// separate different forward AD invocations and be able to compute the right +// gradients. We model such invocations as levels here. The particular scoping +// issue mentioned above has two main drivers: +// - Ensure that we can conveniently use forward AD within a high level API +// without +// leaking the forward AD states outside. +// - Ensure that we can keep the level that we expose to the user API simple +// (an integer +// that represents the nesting depth) while avoiding confusions when the +// level index is re-used. -// #if !defined(_MSC_VER) -@Namespace("c10") public static native @Cast("const char*") BytePointer toString(AliasAnalysisKind aliasAnalysisKind); +// The important external APIs from this file are: +// - ForwardADLevel::get_next_idx() that can be used to enter a new level and +// get its index +// - ForwardADLevel::release_idx() that can be used to exit a given level. +// - ForwardGrad() can be used to store a given forward gradient that will +// handle the level +// tracking automatically. - // namespace c10 +// The basic implementation strategy is as follows: +// Every tensor has a ForwardGrad, maintaining a map from levels to tangents. +// ForwardGrad is responsible for registering itself to the appropriate +// ForwardADLevel when a new tangent is added to it via ForwardGrad::set_value +// and to un-register itself from this same level if that tangent is removed via +// ForwardGrad::reset. The ForwardADLevel is created when a new level is entered +// via ForwardADLevel::get_next_idx. A reference to the new ForwardADLevel is +// stored into a global (for the whole process) vector that ensure it can be +// accessed via ForwardADLevel::get_by_idx. This reference is deleted when the +// index is released by the user when calling ForwardADLevel::release_idx. When +// it is destructed, the ForwardADLevel is responsible for clearing all the +// tangents for its level stored in all the ForwardGrad that registered with it. +// +// This process-wide level design, compared to a thread local one, allows us to +// use very simple user facing handle for the level (an int) while enabling +// cross-thread forward AD. The only required synchronization for the user is +// when entering and exiting the levels. Some discussion on alternative design +// is in https://github.com/pytorch/pytorch/pull/49097#discussion_r543716453 and +// can be refined in the future. +// Correctness of concurrency: +// Each class uses its own lock when reading or modifying internal storages. +// This allows in particular to safely remove tangents from ForwardGrad when the +// ForwardADLevel is being exited. We ensure no deadlock by ensuring that a +// methods never calls into another class's method while the local class's lock +// is held except in one single case: calling from ForwardADLevel's destructor +// into ForwardGrad::reset with update_level=false. -// Parsed from ATen/core/dispatch/OperatorEntry.h +// The lifetime of these objects is as follows: +// The ForwardADLevel can be in three states: +// - Initialized: where one of its reference is held by the global vector +// and there may be more +// references held by temporary variables in ForwardGrad's methods. +// - About to be destructed: where "release_idx" has been called and the +// only reason for the +// ForwardADLevel not to be destructed right away is that some methods in +// ForwardGrad have owning reference to it. This is done so that a +// ForwardADLevel can never be destructed when a ForwardGrad is +// registered with it and in the process of adding something to its +// internal state. +// - Being destructed: Here the ForwardADLevel is not referenced anymore +// and can be safely reset +// all of the ForwardGrad. Note that we can have more than one reset +// being called here (which is ok) but we are guaranteed that there is at +// least one. +// The ForwardGrad is simpler as there is no intermediary state and no special +// destructor for. The logic to unregister it from the different ForwardADLevel +// is done when the owning object (AutogradMeta or SavedVariable) is being +// destroyed. -// #pragma once +// Other considered design: +// To avoid having the ForwardGrad::clear, we considered storing weak_ptr inside +// the ForwardADLevel. While this would work, it would mean that the set inside +// the ForwardADLevel would only grow unless we do an expensive linear scan to +// remove all the dangling weak pointers. Hence this approach was not used. -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include +// Data structures in this file are optimized for this maximum number of levels. +// The number of levels corresponds to the degree of the gradient being +// computed using forward AD and we don't expect more than second order +// gradients to be common. +public static final int EXPECTED_MAX_LEVEL = 2; +// Targeting ../ForwardADLevel.java -// #include -// #include -// #include -// #include -// #include -// #include +// Targeting ../ForwardGrad.java -// #ifdef C10_MOBILE -// #endif -// This data structure represents a kernel that was registered to us from a -// user. Unlike KernelFunction, AnnotatedKernel contains some extra metadata -// about the kernel that isn't necessary for actual dispatching (this is why -// we don't put AnnotatedKernel in the actual DispatchTable), but is useful for -// giving good error messages. -// Targeting ../AnnotatedSchema.java + // namespace autograd + // namespace torch -// Internal data structure that records information about a specific operator. -// It's not part of the public API; typically, users will interact with -// OperatorHandle instead. -// -// Concurrent writes to OperatorEntry are protected by the GLOBAL Dispatcher -// lock (this is important because some methods in OperatorEntry access -// dispatcher state) +// Parsed from ATen/NamedTensor.h - // namespace impl - // namespace c10 +// #include -// Parsed from ATen/core/dispatch/Dispatcher.h +// Parsed from ATen/core/ivalue_to.h // #pragma once -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include +// #include + // namespace at +// Determine the return type of `IValue::to() const &`. It's a const +// reference when possible and a copy otherwise. It is in this +// separate header so that List can use it as well. -// #include -// #include + // namespace detail + // namespace c10 -@Namespace("c10") public static native @Cast("bool") boolean show_dispatch_trace(); -@Namespace("c10") public static native void dispatch_trace_nesting_incr(); -@Namespace("c10") public static native void dispatch_trace_nesting_decr(); -@Namespace("c10") public static native @Cast("int64_t") long dispatch_trace_nesting_value(); -// Targeting ../DispatchTraceNestingGuard.java +// Parsed from ATen/core/qualified_name.h -// Targeting ../OpRegistrationListener.java +// #pragma once +// #include +// #include +// #include +// #include +// #include +// Targeting ../QualifiedName.java -// Targeting ../RegistrationListenerList.java + // namespace c10 + // namespace std -// Targeting ../SchemaRegistrationHandleRAII.java +// Parsed from ATen/core/type_ptr.h +// #pragma once -// Targeting ../Dispatcher.java +// #include +// #include +// #include +// #include +// Targeting ../SingletonTypePtr.java -// Targeting ../OperatorHandle.java +// Targeting ../AnyTypePtr.java -/** - * This is a handle to an operator schema registered with the dispatcher. - * It holds the same information as an OperatorHandle, but it is templated - * on the operator arguments and allows calling the operator in an - * unboxed way. - */ +// Targeting ../AnyEnumTypePtr.java -// CaptureKernelCall is intended to capture return values from Dispatcher -// unboxed kernel calls. A record function may request to get outputs from the -// kernel calls. For boxed kernels, it's straightforward, the returned values -// are in the stack object. The stack can be passed to record functions. For -// unboxed kernels, we need to handle different kinds of return values, cache -// them temporarily, then release the values for the actual function call -// return. -// Handle the lvalue reference differently since it should not be moved. +// Targeting ../NumberTypePtr.java -// Handle case where the kernel returns void. +// Targeting ../FloatTypePtr.java - // namespace detail -// See [Note: Argument forwarding in the dispatcher] for why Args doesn't use && +// Targeting ../ComplexTypePtr.java -// See [Note: Argument forwarding in the dispatcher] for why Args doesn't use && +// Targeting ../IntTypePtr.java -// See [Note: Argument forwarding in the dispatcher] for why Args doesn't use && +// Targeting ../BoolTypePtr.java +// Targeting ../StringTypePtr.java -// NB: this doesn't count as a "true" dispatcher jump, so no instrumentation +// Targeting ../StorageTypePtr.java +// Targeting ../NoneTypePtr.java - // namespace c10 +// Targeting ../GeneratorTypePtr.java - // namespace std +// Targeting ../QuantizerTypePtr.java -// Parsed from ATen/core/op_registration/op_allowlist.h -// #pragma once +// Targeting ../QSchemeTypePtr.java -// TODO: unify to C10_MOBILE. In theory this header could be used in OSS. -// #ifdef TEMPLATE_SELECTIVE_BUILD -// #include -// #endif -/** - * This header implements functionality to build PyTorch with only a certain - * set of operators (+ dependencies) included. - * - * - Build with -DTORCH_OPERATOR_WHITELIST="aten::add;aten::sub" and only these - * two ops will be included in your build. The allowlist records operators - * only, no overloads; if you include aten::add, all overloads of aten::add - * will be included. - * - * Internally, this is done by removing the operator registration calls - * using compile time programming, and the linker will then prune all - * operator functions that weren't registered. - * See Note [Selective build] for more details - * - * WARNING: The allowlist mechanism doesn't work for all ways you could go about - * registering an operator. If the dispatch key / operator name is not - * sufficiently obvious at compile time, then the allowlisting mechanism - * will fail (and the operator will be included in the binary anyway). - */ +// Targeting ../DeviceObjTypePtr.java -// #include -// #include -// #include +// Targeting ../StreamObjTypePtr.java -// #if defined(ENABLE_RECORD_KERNEL_FUNCTION_DTYPE) -// #include -// #endif -@Namespace("c10::impl") public static native @Cast("const bool") boolean allowlist_contains(@ByVal @Cast("c10::string_view*") Pointer allowlist, @ByVal @Cast("c10::string_view*") Pointer item); // Forward Declare +// Targeting ../CapsuleTypePtr.java -/** - * In selective build mode returns true/false depending on whether a build - * feature is available or not. - * - * In instrumenting mode (tracing mode), always returns true, and doesn't - * trigger any side effects. - */ -@Namespace("c10::impl") public static native @Cast("const bool") boolean is_build_feature_available(@Cast("const char*") BytePointer name); -@Namespace("c10::impl") public static native @Cast("const bool") boolean is_build_feature_available(String name); +// Targeting ../PyObjectTypePtr.java -/** - * Use BUILD_FEATURE_REQUIRED macro in user-code. - * - * In selective build mode becomes a no-op if the build feature passed - * in is available. If not available, throws an exception (c10::Error). - * The compiler is able to perform dead code elimination for code - * following this method if the build feature is not available. - * - * In instrumenting mode (tracing mode), registers (as a side effect) - * the presence of this specific build feature being triggered. - */ -// #if !defined(ENABLE_RECORD_KERNEL_FUNCTION_DTYPE) // selective build mode +// Targeting ../LayoutTypePtr.java -// #if defined(TORCH_BUILD_FEATURE_ALLOWLIST) -// #define BUILD_FEATURE_REQUIRED(NAME) -// if (!c10::impl::is_build_feature_available(NAME)) { -// ::c10::impl::build_feature_required_feature_not_available(NAME); -// } -// #else // Everything trivially selected -// #define BUILD_FEATURE_REQUIRED(NAME) -// #endif +// Targeting ../ScalarTypeTypePtr.java -// #else // trace mode -// #define BUILD_FEATURE_REQUIRED(NAME) -// RECORD_FUNCTION_WITH_SCOPE( -// at::RecordScope::BUILD_FEATURE, -// std::string(NAME), -// {}); -// #endif -// Use this macro, and not is_build_feature_available -// #define BUILD_FEATURE_AVAILABLE(NAME) ::c10::impl::is_build_feature_available(NAME) +// Targeting ../AnyListTypePtr.java -// returns true iff allowlist contains item -// allowlist_contains("a;bc;d", "bc") == true -// Returns true iff the given op name is on the allowlist -// and should be registered -@Namespace("c10::impl") public static native @Cast("const bool") boolean op_allowlist_check(@ByVal @Cast("c10::string_view*") Pointer op_name); +// Targeting ../AnyTupleTypePtr.java -// Returns true iff the given schema string is on the allowlist -// and should be registered -@Namespace("c10::impl") public static native @Cast("const bool") boolean schema_allowlist_check(@ByVal @Cast("c10::string_view*") Pointer schema); -// Returns true iff the given custom class name is on the allowlist -// and should be registered -@Namespace("c10::impl") public static native @Cast("const bool") boolean custom_class_allowlist_check(@ByVal @Cast("c10::string_view*") Pointer custom_class_name); +// Targeting ../AnyClassTypePtr.java -// schema_allowlist_check() implicitly depends on a macro, TORCH_OPERATOR_WHITELIST. -// Add this API to pass arbitrary allowlist. -@Namespace("c10::impl") public static native @Cast("const bool") boolean op_allowlist_contains_name_in_schema(@ByVal @Cast("c10::string_view*") Pointer allowlist, @ByVal @Cast("c10::string_view*") Pointer schema); -// Returns true iff the given dispatch key is on the allowlist -// and should be registered. When we turn this on, the list of valid -// mobile dispatch keys is hard coded (but you need to make sure -// that you have the correct set of dispatch keys for this). -@Namespace("c10::impl") public static native @Cast("const bool") boolean dispatch_key_allowlist_check(DispatchKey arg0); -@Namespace("c10::impl") public static native @Cast("const bool") boolean dispatch_key_allowlist_check(@Cast("c10::DispatchKey") short arg0); - // namespace impl // namespace c10 -// Parsed from ATen/record_function.h +// Parsed from ATen/core/jit_type_base.h // #pragma once -// #include -// #include -// #include -// #include -// #include -// #include - -// #include -// #include // #include // #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include -// Kind of record function scope; -@Namespace("at") public enum RecordScope { - // c10/ATen ops, autograd nodes - FUNCTION((byte)(0)), - // Functions/nodes called from the autograd - BACKWARD_FUNCTION((byte)(1)), - // TorchScript functions, methods - TORCHSCRIPT_FUNCTION((byte)(2)), - // Kernel Function dtype Tag - KERNEL_FUNCTION_DTYPE((byte)(3)), - // Torchbind custom class, - CUSTOM_CLASS((byte)(4)), - // Generic Build Feature - BUILD_FEATURE((byte)(5)), - // Kernel Function dtype Tag - LITE_INTERPRETER((byte)(6)), - // User defined scope (e.g. with record_function()) - USER_SCOPE((byte)(7)), - // Scopes for static runtime, a specialized TorchScript interpreter - STATIC_RUNTIME_OP((byte)(8)), - STATIC_RUNTIME_MODEL((byte)(9)), - NUM_SCOPES((byte)(10));// must be the last in the list - - public final byte value; - private RecordScope(byte v) { this.value = v; } - private RecordScope(RecordScope e) { this.value = e.value; } - public RecordScope intern() { for (RecordScope e : values()) if (e.value == value) return e; return this; } - @Override public String toString() { return intern().name(); } -} - - // namespace at - -// Targeting ../StringView.java - - - -// Soft limit on the number of callbacks to use; -@Namespace("at") @MemberGetter public static native @Cast("const std::size_t") long kSoftLimitCallbacks(); -// Targeting ../ObserverContext.java - - +// #define C10_FORALL_TYPES(_) +// _(AnyType) +// _(EnumType) +// _(AnyEnumType) +// _(TensorType) +// _(StorageType) +// _(TupleType) +// _(ListType) +// _(DictType) +// _(NumberType) +// _(FloatType) +// _(ComplexType) +// _(FutureType) +// _(AwaitType) +// _(RRefType) +// _(IntType) +// _(NoneType) +// _(StringType) +// _(GeneratorType) +// _(QuantizerType) +// _(BoolType) +// _(OptionalType) +// _(VarType) +// _(DeviceObjType) +// _(StreamObjType) +// _(FunctionType) +// _(ClassType) +// _(PyObjectType) +// _(CapsuleType) +// _(InterfaceType) +// _(QSchemeType) +// _(ScalarTypeType) +// _(LayoutType) +// _(MemoryFormatType) +// _(AnyListType) +// _(AnyTupleType) +// _(AnyClassType) +// _(SymIntType) +// _(SymFloatType) +// _(UnionType) +// _(DynamicType) -// -// PyTorch callbacks/observers API: -// +@Namespace("c10") public enum TypeKind { + AnyType(0), + EnumType(1), + AnyEnumType(2), + TensorType(3), + StorageType(4), + TupleType(5), + ListType(6), + DictType(7), + NumberType(8), + FloatType(9), + ComplexType(10), + FutureType(11), + AwaitType(12), + RRefType(13), + IntType(14), + NoneType(15), + StringType(16), + GeneratorType(17), + QuantizerType(18), + BoolType(19), + OptionalType(20), + VarType(21), + DeviceObjType(22), + StreamObjType(23), + FunctionType(24), + ClassType(25), + PyObjectType(26), + CapsuleType(27), + InterfaceType(28), + QSchemeType(29), + ScalarTypeType(30), + LayoutType(31), + MemoryFormatType(32), + AnyListType(33), + AnyTupleType(34), + AnyClassType(35), + SymIntType(36), + SymFloatType(37), + UnionType(38), + DynamicType(39); -/** - * RecordFunctionCallback represents a pair of callbacks to be used with - * RecordFunction, members: - * start, end - the callbacks to run when entering and exiting the scope; - * optionally, the start callback may return an ObserverContext which will - * be passed to the end callback, use appropriate constructor accordingly. - * needs_inputs - whether the callbacks need the inputs passed from the - * observed function/range; NOTE: passing the inputs incurs an additional - * overhead; sampling_probability - if not 1.0, then the callback is - * probabilistically sampled to run; NOTE: start and end callbacks always run as - * a pair and are sampled together; scopes - types of scopes to execute the - * callbacks on (see RecordScope); passing empty set means the callbacks will be - * executed for all possible scope types should_run - optional function that - * returns whether this callback should run; overwrites the effect of setting - * sampling_probability - */ + public final int value; + private TypeKind(int v) { this.value = v; } + private TypeKind(TypeKind e) { this.value = e.value; } + public TypeKind intern() { for (TypeKind e : values()) if (e.value == value) return e; return this; } + @Override public String toString() { return intern().name(); } +} -// Notes: -// - two types of callbacks are provided: thread local and global -// - thread local callbacks are added/removed only for the given thread -// and are stored locally for each thread and separately from the list -// of the global callbacks -// - global callbacks are stored in a single per process list and are -// invoked by every RecordFunction, in addition to the thread local -// callbacks specific to the given thread -// - we allow the added callbacks to be sampled, by specifying a sampling -// probability for each callback pair, if the start callback is -// not picked to run, the corresponding end callback won't be called -// - a typical use case for the global callbacks is passive monitoring -// in the background (e.g. fleet-wide monitoring), without focusing on -// the specific piece of code -// - in contrast, thread local callbacks are enabled locally, on demand, -// for the specific piece of code (range) and are not sampled -// - a typical use case for thread local callbacks is profiler and code -// execution tracer -// - note, thread local callbacks are automatically propagated with -// ThreadLocalState across JIT continuations and async tasks (at::launch) +@Namespace("c10") public static native @Cast("const char*") BytePointer typeKindToString(TypeKind kind); +@Namespace("c10") public static native String typeKindToString(@Cast("c10::TypeKind") int kind); -@Namespace("at") @MemberGetter public static native @Cast("const at::CallbackHandle") long INVALID_CALLBACK_HANDLE(); -// Targeting ../RecordFunctionCallbacksEntry.java +// Use this to customize how a Type is printed using `annotation_str()`. If +// c10::nullopt is returned, `annotation_str()` falls through to its default +// implementation. + // namespace detail +// #define TORCH_DECLARE_SINGLETON(Type) +// struct Type; +// namespace detail { +// template <> struct IsSingletonType : public std::integral_constant {}; +// } + + + + + + + + + + + + + + + + + + + + + + + +// Targeting ../Type.java -// Holds pairs (callbacks, unique_id) -// Targeting ../RecordFunction.java +// Explicitly enable MaybeOwned>, rather than allowing +// MaybeOwned to be used for any type right away. +// Targeting ../SharedType.java -@Namespace("at") public static native @ByVal @Cast("at::StepCallbacks*") Pointer getStepCallbacks(RecordScope scope); -@Namespace("at") public static native @ByVal @Cast("at::StepCallbacks*") Pointer getStepCallbacks(@Cast("at::RecordScope") byte scope); -@Namespace("at") public static native @ByVal @Cast("c10::optional*") Pointer getStepCallbacksUnlessEmpty( - RecordScope scope); -@Namespace("at") public static native @ByVal @Cast("c10::optional*") Pointer getStepCallbacksUnlessEmpty( - @Cast("at::RecordScope") byte scope); - // namespace detail -// optional argument - function's seq_no -// #define RECORD_FUNCTION_WITH_SCOPE(scope, fn, inputs, ...) -// at::RecordFunction guard(scope); -// if (guard.isActive()) { -// ::at::detail::record_function_with_scope( -// guard, fn, inputs, ##__VA_ARGS__); -// } -// #define RECORD_FUNCTION_WITH_SCOPE_INPUTS_OUTPUTS( -// scope, fn, inputs, outputs, ...) -// at::RecordFunction guard(scope); -// if (guard.isActive()) { -// if (guard.needsInputs()) { -// guard.before(fn, inputs, ##__VA_ARGS__); -// } else { -// guard.before(fn, ##__VA_ARGS__); -// } -// if (guard.needsOutputs()) { -// guard.setOutputs(outputs); -// } -// } +@Namespace("c10") public static native @Cast("bool") @Name("operator ==") boolean equals(@Const @ByRef Type lhs, @Const @ByRef Type rhs); +// Targeting ../NamedType.java -// #define RECORD_FUNCTION(fn, inputs, ...) -// RECORD_FUNCTION_WITH_SCOPE( -// at::RecordScope::FUNCTION, fn, inputs, ##__VA_ARGS__) -// #define RECORD_TORCHSCRIPT_FUNCTION(mn, inputs) -// RECORD_FUNCTION_WITH_SCOPE(at::RecordScope::TORCHSCRIPT_FUNCTION, mn, inputs) -// #define RECORD_FUNCTION_WITH_INPUTS_OUTPUTS(fn, inputs, outputs, ...) -// RECORD_FUNCTION_WITH_SCOPE_INPUTS_OUTPUTS( -// at::RecordScope::FUNCTION, fn, inputs, outputs, ##__VA_ARGS__) + // namespace c10 + // namespace std -// Custom user scopes in C++; similar to Python's 'with record_function("..."):' -// #define RECORD_USER_SCOPE(fn) -// RECORD_FUNCTION_WITH_SCOPE( -// at::RecordScope::USER_SCOPE, fn, c10::ArrayRef{}) -// RECORD_USER_SCOPE with inputs -// #define RECORD_USER_SCOPE_WITH_INPUTS(fn, inputs) -// RECORD_FUNCTION_WITH_SCOPE(at::RecordScope::USER_SCOPE, fn, inputs) +// Parsed from ATen/core/DimVector.h -// Helper macro to pass in debug handle that is used to -// post process events -// #define RECORD_WITH_SCOPE_DEBUG_HANDLE_AND_INPUTS( -// scope, fn, debug_handle, inputs, ...) -// at::RecordFunction guard(scope); -// if (guard.isActive()) { -// ::at::detail::record_function_with_scope_and_debug_handle( -// guard, fn, debug_handle, inputs, ##__VA_ARGS__); -// } +// #pragma once +// #include -// Helper macros to record LITE INTERPETER scope events with debug handles -// #define RECORD_EDGE_SCOPE_WITH_DEBUG_HANDLE_AND_INPUTS( -// fn, debug_handle, inputs) -// RECORD_WITH_SCOPE_DEBUG_HANDLE_AND_INPUTS( -// at::RecordScope::LITE_INTERPRETER, fn, debug_handle, inputs) +// Re-declaring 'DimVector' type and size inside 'at' namespace. +// This is done to avoid modifying every use into their 'c10' +// equivalent. -// Bookend to the RECORD_FUNCTION macros. Use this after the kernel -// launch to let the profiler bind the outputs to the op that produced -// them. Note that guard is declared by RECORD_FUNCTION so this macro -// needs to be called from the same scope as RECORD_FUNCTION -// #define RECORD_OUTPUTS(outputs) -// if (guard.needsOutputs()) { -// guard.setOutputs( -// std::vector(outputs.begin(), outputs.end())); -// } + // namespace at -/** - * addThreadLocalCallback adds a thread local callback to run with - * RecordFunction, returns handle to use with removeThreadLocalCallback - */ -@Namespace("at") public static native @Cast("at::CallbackHandle") long addThreadLocalCallback(@ByVal @Cast("at::RecordFunctionCallback*") Pointer cb); -/** - * hasThreadLocalCallbacks returns whether there're callbacks registered - * with addThreadLocalCallback - */ -@Namespace("at") public static native @Cast("bool") boolean hasThreadLocalCallbacks(); +// Parsed from ATen/core/blob.h -/** - * clearThreadLocalCallbacks removes all thread local callbacks - */ -@Namespace("at") public static native void clearThreadLocalCallbacks(); +// #pragma once -/** - * addGlobalCallback adds a global callback to run with RecordFunction: - * - * only during the program initialization - */ -@Namespace("at") public static native @Cast("at::CallbackHandle") long addGlobalCallback(@ByVal @Cast("at::RecordFunctionCallback*") Pointer cb); +// #include +// #include +// #include +// #include +// #include -/** - * removeCallback removes a callback given the handle returned by - * addThreadLocalCallback or addGlobalCallback; - * - * no other code can run simultaneously - */ -@Namespace("at") public static native void removeCallback(@Cast("at::CallbackHandle") long handle); +// #include +// #include +// #include +// Targeting ../Blob.java -/** - * Prevent the given callback from executing. If handle is invalid, - * does nothing. - */ -@Namespace("at") public static native void disableCallback(@Cast("at::CallbackHandle") long handle); -/** - * Allow the given callback, previously disabled with disableCallback, to - * execute again. If handle is invalid, does nothing. - */ -@Namespace("at") public static native void reenableCallback(@Cast("at::CallbackHandle") long handle); -/** - * hasGlobalCallbacks returns whether there're global callbacks - * registered with pushGlobalCallback - */ -@Namespace("at") public static native @Cast("bool") boolean hasGlobalCallbacks(); +@Namespace("caffe2") public static native void swap(@ByRef Blob lhs, @ByRef Blob rhs); -/** - * clearGlobalCallbacks removes all global callbacks - */ -@Namespace("at") public static native void clearGlobalCallbacks(); +@Namespace("caffe2") public static native @Cast("std::ostream*") @ByRef @Name("operator <<") Pointer shiftLeft(@Cast("std::ostream*") @ByRef Pointer out, @Const @ByRef Blob v); -// for both thread local and global callbacks -@Namespace("at") public static native @Cast("bool") boolean hasCallbacks(); -@Namespace("at") public static native void clearCallbacks(); + // namespace caffe2 -/** - * enableRecordFunction enables RecordFunction thread locally - */ -@Namespace("at") public static native void enableRecordFunction(@Cast("bool") boolean enable/*=true*/); -@Namespace("at") public static native void enableRecordFunction(); -/** - * isRecordFunctionEnabled returns whether RecordFunction - * is enabled thread locally - */ -@Namespace("at") public static native @Cast("bool") boolean isRecordFunctionEnabled(); -// Targeting ../RecordFunctionGuard.java +// Parsed from ATen/core/custom_class.h +// #pragma once -// Targeting ../DisableRecordFunctionGuard.java +// #include +// #include +// #include +// #include +// #include -// Targeting ../RecordFunctionTLS.java +@Namespace("c10") public static native @SharedPtr("c10::ClassType") @ByVal ClassType getCustomClassTypeImpl(@ByRef @Cast("std::type_index*") Pointer tindex); -@Namespace("at") public static native @Const @ByRef RecordFunctionTLS get_record_function_tls_(); -@Namespace("at") public static native void set_record_function_tls_(@Const @ByRef RecordFunctionTLS tls); +// Parsed from ATen/core/dynamic_type.h -@Namespace("at") public static native void set_record_function_seed_for_testing(@Cast("uint32_t") int seed); +// #pragma once - // namespace at +// #include +// #include +// #include +// #include +// #define DYNAMIC_TYPE_BIT(x) (1 << x) + +@Namespace("c10") @MemberGetter public static native @Cast("const c10::DynamicTypeBits") int kDynamicCovariantTypeBit(); +@Namespace("c10") @MemberGetter public static native @Cast("const c10::DynamicTypeBits") int kDynamicAnyTypeBit(); + +@Namespace("c10") @MemberGetter public static native @Cast("const c10::DynamicTypeBits") int kDynamicNoneTypeBit(); +@Namespace("c10") @MemberGetter public static native @Cast("const c10::DynamicTypeBits") int kDynamicIntTypeBit(); +@Namespace("c10") @MemberGetter public static native @Cast("const c10::DynamicTypeBits") int kDynamicFloatTypeBit(); +@Namespace("c10") @MemberGetter public static native @Cast("const c10::DynamicTypeBits") int kDynamicComplexTypeBit(); +@Namespace("c10") @MemberGetter public static native @Cast("const c10::DynamicTypeBits") int kDynamicListTypeBit(); +@Namespace("c10") @MemberGetter public static native @Cast("const c10::DynamicTypeBits") int kDynamicTupleTypeBit(); +@Namespace("c10") @MemberGetter public static native @Cast("const c10::DynamicTypeBits") int kDynamicClassTypeBit(); + +// #define FORALL_DYNAMIC_TYPES(_) +// _(Tensor, DYNAMIC_TYPE_BIT(0), 1) +// _(None, kDynamicNoneTypeBit, 1) +// _(Bool, DYNAMIC_TYPE_BIT(2), 1) +// _(Int, kDynamicIntTypeBit, 1) +// _(Float, kDynamicFloatTypeBit, 1) +// _(Complex, kDynamicComplexTypeBit, 1) +// _(Number, +// (kDynamicIntTypeBit | kDynamicFloatTypeBit | kDynamicComplexTypeBit), +// 1) +// _(String, DYNAMIC_TYPE_BIT(6), 1) +// _(List, kDynamicListTypeBit, 0) +// _(Tuple, (kDynamicTupleTypeBit | kDynamicCovariantTypeBit), 0) +// _(Dict, DYNAMIC_TYPE_BIT(9), 0) +// _(Class, kDynamicClassTypeBit, 0) +// _(Optional, +// (DYNAMIC_TYPE_BIT(11) | kDynamicNoneTypeBit | kDynamicCovariantTypeBit), +// 0) +// _(AnyList, (kDynamicListTypeBit | kDynamicAnyTypeBit), 1) +// _(AnyTuple, +// (kDynamicTupleTypeBit | kDynamicCovariantTypeBit | kDynamicAnyTypeBit), +// 1) +// _(DeviceObj, DYNAMIC_TYPE_BIT(12), 1) +// _(StreamObj, DYNAMIC_TYPE_BIT(13), 1) +// _(Capsule, DYNAMIC_TYPE_BIT(14), 1) +// _(Generator, DYNAMIC_TYPE_BIT(15), 1) +// _(Storage, DYNAMIC_TYPE_BIT(16), 1) +// _(Var, DYNAMIC_TYPE_BIT(17), 0) +// _(AnyClass, (kDynamicClassTypeBit | kDynamicAnyTypeBit), 1) +// _(QScheme, DYNAMIC_TYPE_BIT(18), 1) +// _(Quantizer, DYNAMIC_TYPE_BIT(19), 1) +// _(AnyEnum, DYNAMIC_TYPE_BIT(20), 1) +// _(RRef, DYNAMIC_TYPE_BIT(21), 0) +// _(Future, DYNAMIC_TYPE_BIT(22), 0) +// _(Await, DYNAMIC_TYPE_BIT(23), 0) +// _(Any, 0xffffffff, 1) + +// #define FORALL_DYNAMIC_TYPES_FAKE(_) +// _(ScalarType, kDynamicIntTypeBit, 1) +// _(Layout, kDynamicIntTypeBit, 1) +// _(SymInt, kDynamicIntTypeBit, 1) +// _(MemoryFormat, kDynamicIntTypeBit, 1) + +// #define FORWARD_DECL_TYPE(NAME, _, __) struct NAME ## Type; +// #undef FORWARD_DECL_TYPE -// Parsed from ATen/ThreadLocalState.h +/** + * DynamicType is designed as a low dependency type system for TorchScript. The + * existing JIT types are used for both compilation and runtime, which makes + * sense for server contexts because we often compile and run the model in + * the same process, however this doesn't hold for mobile devices where we + * always compiles a model ahead of time, therefore there will be dependencies + * which are not needed, but built with mobile runtime causing binary size + * bloat, by design. Every basic type like Int, Bool or String will bring their + * vtable, typeinfo, constructor, destructor and even more data from their + * specializations for STL types to the binary causing a long tail bloat. + * + * The core problem is about the complexity to implement and maintain a single + * type system for both analysis and execution purposes. Although they should + * have the exactly same semantics, in practice implement a unified abstraction + * adds conceptual and representational overhead for both sides of the world. + * + * To address the issues, DynamicType implements a minimal subset of JIT types + * and uses a generic algorithm to test all subtyping relations. To achieve + * this, we assign each dynamic type a single integer tag to represent its + * semantics. More specifically, a dynamic type is defined as a set of "control + * bits" and "data bits", where control bits describe the special behavior when + * testing a type and data bits map to identity of each nominal type. We use bit + * operations to perform all the tests. + * + * For example, a "covariant bit" is a control bit used to describe if a type + * is covariant, right now the most used one is tuple type, and in addition to + * the control bit, tuple type's data bit is the 8th bit from the LSB. Control + * bits start from MSB and data bits start from LSB. + * + * If two types are equal, then they are subtype of each other, also if the bits + * from one type tag is subset of the other tag, it automatically becomes a + * subtype of the other. This simplifies the subtyping logic a lot, and over the + * long term it is possible to adopt this scheme on the server side as well. + * Special cases can be added but they generally should not take too much code + * size. + * + * DynamicType may or may not inherit from c10::Type because it's not the core + * requirement of DynamicType to interface with existing JIT types, but we might + * want to inherit from c10::Type to reduce the migration cost. + */ -// #pragma once -// #include -// #include -// #include -// #include -// #include +// #define DYNAMIC_TYPE_TAG_VALUE(NAME, _, IS_BASE_TYPE) +// template <> +// struct TORCH_API DynamicTypeTrait { +// C10_ERASE static auto tagValue() { +// return DynamicType::Tag::NAME; +// } +// static constexpr bool isBaseType = IS_BASE_TYPE; +// template +// static std::enable_if_t getBaseType() { +// static auto type = detail::makeBaseType(tagValue()); +// return type; +// } +// }; // namespace c10 // namespace c10 // namespace c10 // namespace c10 // namespace c10 // namespace c10 // namespace c10 // namespace c10 // namespace c10 // namespace c10 // namespace c10 // namespace c10 // namespace c10 // namespace c10 // namespace c10 // namespace c10 // namespace c10 // namespace c10 // namespace c10 // namespace c10 // namespace c10 // namespace c10 // namespace c10 // namespace c10 // namespace c10 // namespace c10 // namespace c10 // namespace c10 // namespace c10 // namespace c10 // namespace c10 // namespace c10 // namespace c10 // namespace c10 +// #undef DYNAMIC_TYPE_TAG_VALUE -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// Targeting ../ThreadLocalState.java + // namespace c10 -// Targeting ../ThreadLocalStateGuard.java +// Parsed from ATen/core/type_factory.h +// #pragma once +// #include +// #include - // namespace at +// #include +// #include +// #include +// Helper functions for constructing DynamicTypes inline. -// Parsed from ATen/ATen.h + // namespace c10 + + +// Parsed from c10/util/order_preserving_flat_hash_map.h + +// Taken from +// https://github.com/skarupke/flat_hash_map/blob/2c4687431f978f02a3780e24b8b701d22aa32d9c/flat_hash_map.hpp +// with fixes applied: +// - https://github.com/skarupke/flat_hash_map/pull/25 +// - https://github.com/skarupke/flat_hash_map/pull/26 +// - replace size_t with uint64_t to fix it for 32bit +// - add "GCC diagnostic" pragma to ignore -Wshadow +// - make sherwood_v3_table::convertible_to_iterator public because GCC5 seems +// to have issues with it otherwise +// - fix compiler warnings in operator templated_iterator + +// Copyright Malte Skarupke 2017. +// Distributed under the Boost Software License, Version 1.0. +// (See http://www.boost.org/LICENSE_1_0.txt) + +// Modified to maintain insertion and deletion order through a doubly-linked +// list // #pragma once -// #if !defined(_MSC_VER) && __cplusplus < 201402L -// #error C++14 or later compatible compiler is required to use ATen. +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include + +// #if C10_CLANG_HAS_WARNING("-Wimplicit-int-float-conversion") // #endif -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include +// #ifdef _MSC_VER +// #define SKA_NOINLINE(...) __declspec(noinline) __VA_ARGS__ +// #else +// #define SKA_NOINLINE(...) __VA_ARGS__ __attribute__((noinline)) +// #endif -// TODO: try to remove this -// There is some back story, see https://github.com/pytorch/pytorch/issues/48684 -// #include -// Parsed from ATen/Config.h +// Implementation taken from http://en.cppreference.com/w/cpp/types/void_t +// (it takes CWG1558 into account and also works for older compilers) -// #pragma once +// Targeting ../prime_number_hash_policy.java -// Test these using #if AT_MKL_ENABLED(), not #ifdef, so that it's -// obvious if you forgot to include Config.h -// c.f. https://stackoverflow.com/questions/33759787/generating-an-error-if-checked-boolean-macro-is-not-defined -// -// DO NOT put the macros for CUDA libraries in this file; they belong in cuda/CUDAConfig.h -// #define AT_MKLDNN_ENABLED() 0 -// #define AT_MKL_ENABLED() 1 -// #define AT_MKL_SEQUENTIAL() 0 -// #define AT_FFTW_ENABLED() 0 -// #define AT_POCKETFFT_ENABLED() 0 -// #define AT_NNPACK_ENABLED() 1 -// #define CAFFE2_STATIC_LINK_CUDA() 0 -// #define AT_BUILD_WITH_BLAS() 1 -// #define AT_BUILD_WITH_LAPACK() 1 -public static final int AT_PARALLEL_OPENMP = 1; -public static final int AT_PARALLEL_NATIVE = 0; -public static final int AT_PARALLEL_NATIVE_TBB = 0; -// #define AT_BLAS_F2C() 0 -// #define AT_BLAS_USE_CBLAS_DOT() 0 +// Targeting ../power_of_two_hash_policy.java -// Parsed from ATen/Device.h +// Targeting ../fibonacci_hash_policy.java -// #pragma once -// #include -// Parsed from ATen/DeviceGuard.h + // namespace ska_ordered + + + +// Parsed from ATen/core/Dict_inl.h // #pragma once -// #include -// #include -// #include -// #include // TensorList whyyyyy +// #include +// #include -// Are you here because you're wondering why DeviceGuard(tensor) no -// longer works? For code organization reasons, we have temporarily(?) -// removed this constructor from DeviceGuard. The new way to -// spell it is: -// -// OptionalDeviceGuard guard(device_of(tensor)); -/** Return the Device of a Tensor, if the Tensor is defined. */ -@Namespace("at") public static native @ByVal DeviceOptional device_of(@Const @ByRef Tensor t); +@Namespace("c10") public static native @StdString BytePointer toString(@Const @ByRef Type type); -@Namespace("at") public static native @ByVal DeviceOptional device_of(@Const @ByRef TensorOptional t); -/** Return the Device of a TensorList, if the list is non-empty and - * the first Tensor is defined. (This function implicitly assumes - * that all tensors in the list have the same device.) */ -@Namespace("at") public static native @ByVal DeviceOptional device_of(@ByVal TensorArrayRef t); - // namespace at -// Parsed from ATen/DimVector.h -// #pragma once -// #include -// Parsed from ATen/Dispatch.h -// #pragma once -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #ifdef __CUDACC__ -// #include // For CUDA_VERSION -// #endif -// #ifdef TEMPLATE_SELECTIVE_BUILD -// #include -// #else -/** - * The method should_include_kernel_dtype() returns true/false - * based on whether the switching code for a specific dtype should be - * included based on build time constants generated from tracing model - * execution. This method will be implmeneted via code-generation and - * included in this file when code-gen is ready. - */ -@Namespace("at") public static native @Cast("const bool") boolean should_include_kernel_dtype( - @Cast("const char*") BytePointer arg0, - ScalarType arg1 -); -@Namespace("at") public static native @Cast("const bool") boolean should_include_kernel_dtype( - String arg0, - ScalarType arg1 -); - // namespace at -// #endif -/** - * In the Facebook internal build (using BUCK), this macro is enabled by - * passing in -c pt.enable_record_kernel_dtype=1 when building the tracer - * binary. - */ -// #if defined ENABLE_RECORD_KERNEL_FUNCTION_DTYPE -// #else -// #define RECORD_KERNEL_FUNCTION_DTYPE(NAME, enum_type) -// #endif -// Avoid if_constexpr if possble, as it's more expensive to compile -// #if defined __cpp_if_constexpr -// #define AT_PRIVATE_CHECK_SELECTIVE_BUILD(enum_type) -// do { -// if constexpr (!at::should_include_kernel_dtype( -// at_dispatch_name, enum_type)) { -// AT_ERROR( -// "dtype '", -// toString(enum_type), -// "' not selected for kernel tag ", -// at_dispatch_name); -// } -// } while (0) -// #else // defined __cpp_if_constexpr -// #define AT_PRIVATE_CHECK_SELECTIVE_BUILD(enum_type) -// at::guts::if_constexpr([&] { -// AT_ERROR( -// "dtype '", -// toString(enum_type), -// "' not selected for kernel tag ", -// at_dispatch_name); -// }) -// #endif -// #define AT_PRIVATE_CASE_TYPE_USING_HINT(enum_type, HINT, ...) -// case enum_type: { -// AT_PRIVATE_CHECK_SELECTIVE_BUILD(enum_type); -// using HINT C10_UNUSED = c10::impl::ScalarTypeToCPPTypeT; -// return __VA_ARGS__(); -// } -// #define AT_DISPATCH_CASE(enum_type, ...) -// AT_PRIVATE_CASE_TYPE_USING_HINT(enum_type, scalar_t, __VA_ARGS__) -// #define AT_DISPATCH_CASE_QINT(enum_type, scalar_type, ...) -// case enum_type: { -// AT_PRIVATE_CHECK_SELECTIVE_BUILD(enum_type); -// using scalar_t = scalar_type; -// using underlying_t C10_UNUSED = typename scalar_t::underlying; -// const auto& SCALAR_TYPE C10_UNUSED = enum_type; -// const auto& UNDERLYING_TYPE C10_UNUSED = toUnderlying(enum_type); -// return __VA_ARGS__(); -// } -// #define AT_QINT_SUB_BYTE_PRIVATE_CASE_TYPE( -// enum_type, scalar_type, bitwidth, qmin, qmax, ...) -// case enum_type: { -// AT_PRIVATE_CHECK_SELECTIVE_BUILD(enum_type); -// using scalar_t = scalar_type; -// using underlying_t C10_UNUSED = typename scalar_t::underlying; -// const auto& SCALAR_TYPE C10_UNUSED = enum_type; -// const auto& UNDERLYING_TYPE C10_UNUSED = toUnderlying(enum_type); -// C10_UNUSED int bit_width = bitwidth; -// C10_UNUSED int64_t quant_min = qmin; -// C10_UNUSED int64_t quant_max = qmax; -// return __VA_ARGS__(); -// } -@Namespace("detail") public static native ScalarType scalar_type(ScalarType s); -@Namespace("detail") public static native @Deprecated ScalarType scalar_type(@Const @ByRef DeprecatedTypeProperties t); -@Namespace("detail") public static native @Deprecated void deprecated_AT_DISPATCH_ALL_TYPES_AND_HALF(); -@Namespace("detail") public static native @Deprecated void deprecated_AT_DISPATCH_ALL_TYPES_AND_HALF_AND_COMPLEX(); - // namespace detail -// The AT_DISPATCH_* family of macros provides the ability to -// conveniently generate specializations of a kernel over all of the -// dtypes we care about in PyTorch. We call it "dispatch" because -// we are "dispatching" to the correct, dtype-specific kernel. -// -// A standard usage looks like: -// -// AT_DISPATCH_ALL_TYPES(self.scalar_type(), "op_name", [&] { -// // Your code here, with 'scalar_t' now defined to -// // be the dtype in question -// }); -// -// There are many variations of this macro, so it's important to -// understand exactly /which/ dtypes you want to get instantiated, as -// well as what the "default" set is. -// -// The default set of dtypes that are instantiated (e.g., by -// AT_DISPATCH_ALL_TYPES) are floating point types (float, double), -// and integral types (int32_t, int64_t, int16_t, int8_t, uint8_t), -// but NOT booleans (bool), half-precision floats (Half) or -// complex number (c10::complex, c10::complex). -// This "cut" is somewhat historical (the default types are the -// ones that TH historically supported), but it also reflects the -// fact that the non-default types are "poorly" behaved (booleans -// are NOT integers mod 2, half precision operations ~essentially -// don't exist on CPU, complex numbers are an experimental application). -// -// Here are the questions you should generally ask to decide which -// dispatch you want: -// -// 1. Is this an integral or floating point specific operation? -// (If so, you'll want one of the FLOATING or INTEGRAL macros.) -// -// 2. Should half be supported? (If you're on CPU, the answer is almost -// definitely no. If you do want support, use one of the AND_HALF -// macros) -// -// Much rarer situations: -// -// 3. Should bool be supported? (You often have to write your kernel -// differently if arithmetic operations are involved.) If so, -// Use AT_DISPATCH_ALL_TYPES_AND along with ScalarType::Bool -// -// 4. Should complex be supported? The answer is almost always no, -// unless you are working on "generic" code that should work on -// all dtypes. -// -// Parameters: -// ----------- -// -// 1. The NAME argument is a "tag" that is used to trace and then -// conditionally compile fragments of the case statements such -// that the kernel functions are specialized only for the dtypes -// that are needed. The NAME parameter *must* be a build time -// const char* (can't be std::string, etc...) -// -// Please ensure that the NAME is unique for every implementation -// or you run the risk of over-including code for the kernel -// functions. There is no risk of missing out on any code, so -// it's mostly a risk of a Type-2 error, and not a Type-1 error. -// -// Switch-like syntax: -// ------------------- -// There is also a switch-case like syntax which is useful if a kernel -// needs to be specialized for particular scalar types -// -// AT_DISPATCH_SWITCH(self.scalar_type(), "op_name", -// AT_DISPATCH_CASE_INTEGRAL_TYPES([&] { -// op_integral(iter); -// }) -// AT_DISPATCH_CASE_FLOATING_TYPES([&] { -// op_floating(iter); -// }) -// AT_DISPATCH_CASE(kBool, [&] { -// op_bool(iter); -// }) -// ); -// -// For each AT_DISPATCH_FOO macro, there is a corresponding -// AT_DISPATCH_CASE_FOO macro which can be used inside of an -// AT_DISPATCH_SWITCH block. -// NB: the the_type variable is not used, but we have kept it for -// backwards compatibility. It's probably not used by anyone though; -// but we're just being safe (and it doesn't hurt.) Note we must -// use it to shut up warnings about unused store. -// #define AT_DISPATCH_SWITCH(TYPE, NAME, ...) -// [&] { -// const auto& the_type = TYPE; -// constexpr const char* at_dispatch_name = NAME; -// /* don't use TYPE again in case it is an expensive or side-effect op */ -// at::ScalarType _st = ::detail::scalar_type(the_type); -// RECORD_KERNEL_FUNCTION_DTYPE(at_dispatch_name, _st); -// switch (_st) { -// __VA_ARGS__ -// default: -// AT_ERROR( -// '"', -// at_dispatch_name, -// "\" not implemented for '", -// toString(_st), -// "'"); -// } -// }() -// #define AT_DISPATCH_CASE_FLOATING_TYPES(...) -// AT_DISPATCH_CASE(at::ScalarType::Double, __VA_ARGS__) -// AT_DISPATCH_CASE(at::ScalarType::Float, __VA_ARGS__) -// #define AT_DISPATCH_FLOATING_TYPES(TYPE, NAME, ...) -// AT_DISPATCH_SWITCH(TYPE, NAME, AT_DISPATCH_CASE_FLOATING_TYPES(__VA_ARGS__)) -// #define AT_DISPATCH_CASE_FLOATING_TYPES_AND_HALF(...) -// AT_DISPATCH_CASE(at::ScalarType::Double, __VA_ARGS__) -// AT_DISPATCH_CASE(at::ScalarType::Float, __VA_ARGS__) -// AT_DISPATCH_CASE(at::ScalarType::Half, __VA_ARGS__) -// #define AT_DISPATCH_FLOATING_TYPES_AND_HALF(TYPE, NAME, ...) -// AT_DISPATCH_SWITCH( -// TYPE, NAME, AT_DISPATCH_CASE_FLOATING_TYPES_AND_HALF(__VA_ARGS__)) -// #define AT_DISPATCH_CASE_FLOATING_TYPES_AND(SCALARTYPE, ...) -// AT_DISPATCH_CASE_FLOATING_TYPES(__VA_ARGS__) -// AT_DISPATCH_CASE(SCALARTYPE, __VA_ARGS__) -// #define AT_DISPATCH_FLOATING_TYPES_AND(SCALARTYPE, TYPE, NAME, ...) -// AT_DISPATCH_SWITCH( -// TYPE, -// NAME, -// AT_DISPATCH_CASE_FLOATING_TYPES_AND(SCALARTYPE, __VA_ARGS__)) -// #define AT_DISPATCH_CASE_FLOATING_TYPES_AND2(SCALARTYPE1, SCALARTYPE2, ...) -// AT_DISPATCH_CASE_FLOATING_TYPES(__VA_ARGS__) -// AT_DISPATCH_CASE(SCALARTYPE1, __VA_ARGS__) -// AT_DISPATCH_CASE(SCALARTYPE2, __VA_ARGS__) -// #define AT_DISPATCH_FLOATING_TYPES_AND2( -// SCALARTYPE1, SCALARTYPE2, TYPE, NAME, ...) -// AT_DISPATCH_SWITCH( -// TYPE, -// NAME, -// AT_DISPATCH_CASE_FLOATING_TYPES_AND2( -// SCALARTYPE1, SCALARTYPE2, __VA_ARGS__)) -// #define AT_DISPATCH_CASE_COMPLEX_TYPES(...) -// AT_DISPATCH_CASE(at::ScalarType::ComplexDouble, __VA_ARGS__) -// AT_DISPATCH_CASE(at::ScalarType::ComplexFloat, __VA_ARGS__) -// #define AT_DISPATCH_COMPLEX_TYPES(TYPE, NAME, ...) -// AT_DISPATCH_SWITCH(TYPE, NAME, AT_DISPATCH_CASE_COMPLEX_TYPES(__VA_ARGS__)) -// #define AT_DISPATCH_CASE_COMPLEX_TYPES_AND(SCALARTYPE, ...) -// AT_DISPATCH_CASE_COMPLEX_TYPES(__VA_ARGS__) -// AT_DISPATCH_CASE(SCALARTYPE, __VA_ARGS__) -// #define AT_DISPATCH_COMPLEX_TYPES_AND(SCALARTYPE, TYPE, NAME, ...) -// AT_DISPATCH_SWITCH( -// TYPE, NAME, AT_DISPATCH_CASE_COMPLEX_TYPES_AND(SCALARTYPE, __VA_ARGS__)) -// #define AT_DISPATCH_CASE_FLOATING_AND_COMPLEX_TYPES(...) -// AT_DISPATCH_CASE_FLOATING_TYPES(__VA_ARGS__) -// AT_DISPATCH_CASE_COMPLEX_TYPES(__VA_ARGS__) -// #define AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES(TYPE, NAME, ...) -// AT_DISPATCH_SWITCH( -// TYPE, NAME, AT_DISPATCH_CASE_FLOATING_AND_COMPLEX_TYPES(__VA_ARGS__)) -// #define AT_DISPATCH_CASE_FLOATING_AND_COMPLEX_TYPES_AND1(SCALARTYPE, ...) -// AT_DISPATCH_CASE_FLOATING_AND_COMPLEX_TYPES(__VA_ARGS__) -// AT_DISPATCH_CASE(SCALARTYPE, __VA_ARGS__) -// #define AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES_AND1( -// SCALARTYPE, TYPE, NAME, ...) -// AT_DISPATCH_SWITCH( -// TYPE, -// NAME, -// AT_DISPATCH_CASE_FLOATING_AND_COMPLEX_TYPES_AND1( -// SCALARTYPE, __VA_ARGS__)) -// #define AT_DISPATCH_CASE_FLOATING_AND_COMPLEX_TYPES_AND2( -// SCALARTYPE1, SCALARTYPE2, ...) -// AT_DISPATCH_CASE_FLOATING_AND_COMPLEX_TYPES(__VA_ARGS__) -// AT_DISPATCH_CASE(SCALARTYPE1, __VA_ARGS__) -// AT_DISPATCH_CASE(SCALARTYPE2, __VA_ARGS__) -// #define AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES_AND2( -// SCALARTYPE1, SCALARTYPE2, TYPE, NAME, ...) -// AT_DISPATCH_SWITCH( -// TYPE, -// NAME, -// AT_DISPATCH_CASE_FLOATING_AND_COMPLEX_TYPES_AND2( -// SCALARTYPE1, SCALARTYPE2, __VA_ARGS__)) -// #define AT_DISPATCH_CASE_FLOATING_AND_COMPLEX_TYPES_AND3( -// SCALARTYPE1, SCALARTYPE2, SCALARTYPE3, ...) -// AT_DISPATCH_CASE_FLOATING_AND_COMPLEX_TYPES(__VA_ARGS__) -// AT_DISPATCH_CASE(SCALARTYPE1, __VA_ARGS__) -// AT_DISPATCH_CASE(SCALARTYPE2, __VA_ARGS__) -// AT_DISPATCH_CASE(SCALARTYPE3, __VA_ARGS__) -// #define AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES_AND3( -// SCALARTYPE1, SCALARTYPE2, SCALARTYPE3, TYPE, NAME, ...) -// AT_DISPATCH_SWITCH( -// TYPE, -// NAME, -// AT_DISPATCH_CASE_FLOATING_AND_COMPLEX_TYPES_AND3( -// SCALARTYPE1, SCALARTYPE2, SCALARTYPE3, __VA_ARGS__)) -// #define AT_DISPATCH_CASE_INTEGRAL_TYPES(...) -// AT_DISPATCH_CASE(at::ScalarType::Byte, __VA_ARGS__) -// AT_DISPATCH_CASE(at::ScalarType::Char, __VA_ARGS__) -// AT_DISPATCH_CASE(at::ScalarType::Int, __VA_ARGS__) -// AT_DISPATCH_CASE(at::ScalarType::Long, __VA_ARGS__) -// AT_DISPATCH_CASE(at::ScalarType::Short, __VA_ARGS__) -// #define AT_DISPATCH_INTEGRAL_TYPES(TYPE, NAME, ...) -// AT_DISPATCH_SWITCH(TYPE, NAME, AT_DISPATCH_CASE_INTEGRAL_TYPES(__VA_ARGS__)) -// #define AT_DISPATCH_CASE_INTEGRAL_TYPES_AND(SCALARTYPE, ...) -// AT_DISPATCH_CASE_INTEGRAL_TYPES(__VA_ARGS__) -// AT_DISPATCH_CASE(SCALARTYPE, __VA_ARGS__) -// #define AT_DISPATCH_INTEGRAL_TYPES_AND(SCALARTYPE, TYPE, NAME, ...) -// AT_DISPATCH_SWITCH( -// TYPE, -// NAME, -// AT_DISPATCH_CASE_INTEGRAL_TYPES_AND(SCALARTYPE, __VA_ARGS__)) -// #define AT_DISPATCH_CASE_ALL_TYPES(...) -// AT_DISPATCH_CASE_INTEGRAL_TYPES(__VA_ARGS__) -// AT_DISPATCH_CASE_FLOATING_TYPES(__VA_ARGS__) -// #define AT_DISPATCH_ALL_TYPES(TYPE, NAME, ...) -// AT_DISPATCH_SWITCH(TYPE, NAME, AT_DISPATCH_CASE_ALL_TYPES(__VA_ARGS__)) -// #define AT_DISPATCH_CASE_QINT_TYPES(...) -// AT_DISPATCH_CASE_QINT(at::kQInt8, at::qint8, __VA_ARGS__) -// AT_DISPATCH_CASE_QINT(at::kQUInt8, at::quint8, __VA_ARGS__) -// AT_DISPATCH_CASE_QINT(at::kQInt32, at::qint32, __VA_ARGS__) +// Parsed from ATen/core/Dict.h -// #define AT_DISPATCH_QINT_TYPES(TYPE, NAME, ...) -// AT_DISPATCH_SWITCH(TYPE, NAME, AT_DISPATCH_CASE_QINT_TYPES(__VA_ARGS__)) +// #pragma once -// #define AT_DISPATCH_CASE_QINT_BYTE_TYPES(...) -// AT_DISPATCH_CASE_QINT(at::kQInt8, at::qint8, __VA_ARGS__) -// AT_DISPATCH_CASE_QINT(at::kQUInt8, at::quint8, __VA_ARGS__) +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include -// #define AT_DISPATCH_QINT_BYTE_TYPES(TYPE, NAME, ...) -// AT_DISPATCH_SWITCH(TYPE, NAME, AT_DISPATCH_CASE_QINT_BYTE_TYPES(__VA_ARGS__)) -// #define AT_DISPATCH_CASE_QINT_AND_SUB_BYTE_TYPES(...) -// AT_QINT_SUB_BYTE_PRIVATE_CASE_TYPE( -// at::kQInt8, at::qint8, CHAR_BIT, SCHAR_MIN, SCHAR_MAX, __VA_ARGS__) -// AT_QINT_SUB_BYTE_PRIVATE_CASE_TYPE( -// at::kQUInt8, at::quint8, CHAR_BIT, 0, UCHAR_MAX, __VA_ARGS__) -// AT_QINT_SUB_BYTE_PRIVATE_CASE_TYPE( -// at::kQInt32, -// at::qint32, -// CHAR_BIT * sizeof(int), -// INT_MIN, -// INT_MAX, -// __VA_ARGS__) -// AT_QINT_SUB_BYTE_PRIVATE_CASE_TYPE( -// at::kQUInt4x2, at::quint4x2, 4, 0, 15, __VA_ARGS__) -// AT_QINT_SUB_BYTE_PRIVATE_CASE_TYPE( -// at::kQUInt2x4, at::quint2x4, 2, 0, 3, __VA_ARGS__) -// #define AT_DISPATCH_QINT_AND_SUB_BYTE_TYPES(TYPE, NAME, ...) -// AT_DISPATCH_SWITCH( -// TYPE, NAME, AT_DISPATCH_CASE_QINT_AND_SUB_BYTE_TYPES(__VA_ARGS__)) +// Targeting ../GenericDictEntryRef.java -// #define AT_DISPATCH_CASE_ALL_TYPES_AND_COMPLEX(...) -// AT_DISPATCH_CASE_ALL_TYPES(__VA_ARGS__) -// AT_DISPATCH_CASE_COMPLEX_TYPES(__VA_ARGS__) -// #define AT_DISPATCH_ALL_TYPES_AND_COMPLEX(TYPE, NAME, ...) -// AT_DISPATCH_SWITCH( -// TYPE, NAME, AT_DISPATCH_CASE_ALL_TYPES_AND_COMPLEX(__VA_ARGS__)) +// Targeting ../GenericDictIterator.java -// #define AT_DISPATCH_CASE_ALL_TYPES_AND(SCALARTYPE, ...) -// AT_DISPATCH_CASE_ALL_TYPES(__VA_ARGS__) -// AT_DISPATCH_CASE(SCALARTYPE, __VA_ARGS__) -// #define AT_DISPATCH_ALL_TYPES_AND(SCALARTYPE, TYPE, NAME, ...) -// AT_DISPATCH_SWITCH( -// TYPE, NAME, AT_DISPATCH_CASE_ALL_TYPES_AND(SCALARTYPE, __VA_ARGS__)) -// #define AT_DISPATCH_CASE_ALL_TYPES_AND_COMPLEX_AND(SCALARTYPE, ...) -// AT_DISPATCH_CASE_ALL_TYPES_AND_COMPLEX(__VA_ARGS__) -// AT_DISPATCH_CASE(SCALARTYPE, __VA_ARGS__) +// Targeting ../GenericDict.java -// #define AT_DISPATCH_ALL_TYPES_AND_COMPLEX_AND(SCALARTYPE, TYPE, NAME, ...) -// AT_DISPATCH_SWITCH( -// TYPE, -// NAME, -// AT_DISPATCH_CASE_ALL_TYPES_AND_COMPLEX_AND(SCALARTYPE, __VA_ARGS__)) -// #define AT_DISPATCH_CASE_ALL_TYPES_AND2(SCALARTYPE1, SCALARTYPE2, ...) -// AT_DISPATCH_CASE_ALL_TYPES(__VA_ARGS__) -// AT_DISPATCH_CASE(SCALARTYPE1, __VA_ARGS__) -// AT_DISPATCH_CASE(SCALARTYPE2, __VA_ARGS__) +// Targeting ../StringGenericListDict.java -// #define AT_DISPATCH_ALL_TYPES_AND2(SCALARTYPE1, SCALARTYPE2, TYPE, NAME, ...) -// AT_DISPATCH_SWITCH( -// TYPE, -// NAME, -// AT_DISPATCH_CASE_ALL_TYPES_AND2(SCALARTYPE1, SCALARTYPE2, __VA_ARGS__)) -// #define AT_DISPATCH_CASE_ALL_TYPES_AND_COMPLEX_AND2( -// SCALARTYPE1, SCALARTYPE2, ...) -// AT_DISPATCH_CASE_ALL_TYPES_AND_COMPLEX(__VA_ARGS__) -// AT_DISPATCH_CASE(SCALARTYPE1, __VA_ARGS__) -// AT_DISPATCH_CASE(SCALARTYPE2, __VA_ARGS__) +// GenericDict is how IValue stores dicts. It is, however, not part of the +// public API. Kernels should use Dicts with concrete Key, Value types instead +// (maybe except for some internal prim ops). -// #define AT_DISPATCH_ALL_TYPES_AND_COMPLEX_AND2( -// SCALARTYPE1, SCALARTYPE2, TYPE, NAME, ...) -// AT_DISPATCH_SWITCH( -// TYPE, -// NAME, -// AT_DISPATCH_CASE_ALL_TYPES_AND_COMPLEX_AND2( -// SCALARTYPE1, SCALARTYPE2, __VA_ARGS__)) -// #define AT_DISPATCH_CASE_ALL_TYPES_AND3( -// SCALARTYPE1, SCALARTYPE2, SCALARTYPE3, ...) -// AT_DISPATCH_CASE_ALL_TYPES(__VA_ARGS__) -// AT_DISPATCH_CASE(SCALARTYPE1, __VA_ARGS__) -// AT_DISPATCH_CASE(SCALARTYPE2, __VA_ARGS__) -// AT_DISPATCH_CASE(SCALARTYPE3, __VA_ARGS__) -// #define AT_DISPATCH_ALL_TYPES_AND3( -// SCALARTYPE1, SCALARTYPE2, SCALARTYPE3, TYPE, NAME, ...) -// AT_DISPATCH_SWITCH( -// TYPE, -// NAME, -// AT_DISPATCH_CASE_ALL_TYPES_AND3( -// SCALARTYPE1, SCALARTYPE2, SCALARTYPE3, __VA_ARGS__)) -// #define AT_DISPATCH_CASE_ALL_TYPES_AND_COMPLEX_AND3( -// SCALARTYPE1, SCALARTYPE2, SCALARTYPE3, ...) -// AT_DISPATCH_CASE_ALL_TYPES_AND_COMPLEX(__VA_ARGS__) -// AT_DISPATCH_CASE(SCALARTYPE1, __VA_ARGS__) -// AT_DISPATCH_CASE(SCALARTYPE2, __VA_ARGS__) -// AT_DISPATCH_CASE(SCALARTYPE3, __VA_ARGS__) -// #define AT_DISPATCH_ALL_TYPES_AND_COMPLEX_AND3( -// SCALARTYPE1, SCALARTYPE2, SCALARTYPE3, TYPE, NAME, ...) -// AT_DISPATCH_SWITCH( -// TYPE, -// NAME, -// AT_DISPATCH_CASE_ALL_TYPES_AND_COMPLEX_AND3( -// SCALARTYPE1, SCALARTYPE2, SCALARTYPE3, __VA_ARGS__)) +// #include // IWYU pragma: keep -// #define AT_DISPATCH_CASE_ALL_TYPES_AND_COMPLEX_AND4( -// SCALARTYPE1, SCALARTYPE2, SCALARTYPE3, SCALARTYPE4, ...) -// AT_DISPATCH_CASE_ALL_TYPES_AND_COMPLEX(__VA_ARGS__) -// AT_DISPATCH_CASE(SCALARTYPE1, __VA_ARGS__) -// AT_DISPATCH_CASE(SCALARTYPE2, __VA_ARGS__) -// AT_DISPATCH_CASE(SCALARTYPE3, __VA_ARGS__) -// AT_DISPATCH_CASE(SCALARTYPE4, __VA_ARGS__) -// #define AT_DISPATCH_ALL_TYPES_AND_COMPLEX_AND4( -// SCALARTYPE1, SCALARTYPE2, SCALARTYPE3, SCALARTYPE4, TYPE, NAME, ...) -// AT_DISPATCH_SWITCH( -// TYPE, -// NAME, -// AT_DISPATCH_CASE_ALL_TYPES_AND_COMPLEX_AND4( -// SCALARTYPE1, SCALARTYPE2, SCALARTYPE3, SCALARTYPE4, __VA_ARGS__)) +// Parsed from ATen/core/functional.h -// #define AT_DISPATCH_INDEX_TYPES(TYPE, NAME, ...) -// AT_DISPATCH_SWITCH( -// TYPE, -// NAME, -// AT_PRIVATE_CASE_TYPE_USING_HINT( -// at::ScalarType::Int, index_t, __VA_ARGS__) -// AT_PRIVATE_CASE_TYPE_USING_HINT( -// at::ScalarType::Long, index_t, __VA_ARGS__)) +// #pragma once -// ---------------------------------------------------------------------------- -// DEPRECATED MACROS, DON'T USE THESE -// ---------------------------------------------------------------------------- +// #include +// #include -// #define AT_DISPATCH_ALL_TYPES_AND_HALF(TYPE, NAME, ...) -// detail::deprecated_AT_DISPATCH_ALL_TYPES_AND_HALF(); -// AT_DISPATCH_SWITCH( -// TYPE, -// NAME, -// AT_DISPATCH_CASE_ALL_TYPES_AND(at::ScalarType::Half, __VA_ARGS__)) +// The passed in function must take T by value (T), or by +// const reference (const T&); taking T by non-const reference +// will result in an error like: +// +// error: no type named 'type' in 'class std::result_of' +// +// No explicit template parameters are required. +// Overload for explicit function and ArrayRef + +// C++ forbids taking an address of a constructor, so here's a workaround... +// Overload for constructor (R) application + + // namespace c10 -// Parsed from ATen/EmptyTensor.h + +// Parsed from ATen/core/jit_type.h // #pragma once -// #include -@Namespace("at::detail") public static native @Cast("size_t") long computeStorageNbytesContiguous( - @ByVal @Cast("c10::ArrayRef*") LongArrayRef sizes, - @Cast("size_t") long itemsize, - @Cast("size_t") long storage_offset/*=0*/); -@Namespace("at::detail") public static native @Cast("size_t") long computeStorageNbytesContiguous( - @ByVal @Cast("c10::ArrayRef*") LongArrayRef sizes, - @Cast("size_t") long itemsize); -@Namespace("at::detail") public static native @Cast("size_t") long computeStorageNbytesContiguous( - @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] sizes, - @Cast("size_t") long itemsize, - @Cast("size_t") long storage_offset/*=0*/); -@Namespace("at::detail") public static native @Cast("size_t") long computeStorageNbytesContiguous( - @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] sizes, - @Cast("size_t") long itemsize); -@Namespace("at::detail") public static native @ByVal SymInt computeStorageNbytesContiguous( - @ByVal SymIntRef sizes, - @Const @ByRef SymInt itemsize, - @Const @ByRef(nullValue = "c10::SymInt(0)") SymInt storage_offset); -@Namespace("at::detail") public static native @ByVal SymInt computeStorageNbytesContiguous( - @ByVal SymIntRef sizes, - @Const @ByRef SymInt itemsize); -@Namespace("at::detail") public static native @Cast("size_t") long computeStorageNbytes( - @ByVal @Cast("c10::ArrayRef*") LongArrayRef sizes, - @ByVal @Cast("c10::ArrayRef*") LongArrayRef strides, - @Cast("size_t") long itemsize, - @Cast("size_t") long storage_offset/*=0*/); -@Namespace("at::detail") public static native @Cast("size_t") long computeStorageNbytes( - @ByVal @Cast("c10::ArrayRef*") LongArrayRef sizes, - @ByVal @Cast("c10::ArrayRef*") LongArrayRef strides, - @Cast("size_t") long itemsize); -@Namespace("at::detail") public static native @Cast("size_t") long computeStorageNbytes( - @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] sizes, - @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] strides, - @Cast("size_t") long itemsize, - @Cast("size_t") long storage_offset/*=0*/); -@Namespace("at::detail") public static native @Cast("size_t") long computeStorageNbytes( +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include + +// #include +// #include +// #include +// #include +// #include +// #include + // namespace jit + // namespace torch + + + + +@Namespace("c10") public static native @Cast("bool") boolean is_contiguous_strides( + @Const @ByVal LongArrayRef sizes, + @Const @ByVal LongArrayRef strides); +@Namespace("c10") public static native @Cast("bool") boolean is_contiguous_strides( @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] sizes, - @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] strides, - @Cast("size_t") long itemsize); -@Namespace("at::detail") public static native @ByVal SymInt computeStorageNbytes( - @ByVal SymIntRef sizes, - @ByVal SymIntRef strides, - @Const @ByRef SymInt itemsize, - @Const @ByRef(nullValue = "c10::SymInt(0)") SymInt storage_offset); -@Namespace("at::detail") public static native @ByVal SymInt computeStorageNbytes( - @ByVal SymIntRef sizes, - @ByVal SymIntRef strides, - @Const @ByRef SymInt itemsize); + @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... strides); +// Targeting ../AnyType.java -@Namespace("at::detail") public static native @ByVal TensorBase empty_generic( - @ByVal @Cast("c10::ArrayRef*") LongArrayRef size, - Allocator allocator, - @ByVal DispatchKeySet ks, - ScalarType scalar_type, - @ByVal MemoryFormatOptional memory_format_opt); -@Namespace("at::detail") public static native @ByVal TensorBase empty_generic( - @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, - Allocator allocator, - @ByVal DispatchKeySet ks, - ScalarType scalar_type, - @ByVal MemoryFormatOptional memory_format_opt); -@Namespace("at::detail") public static native @ByVal TensorBase empty_strided_generic( - @ByVal @Cast("c10::ArrayRef*") LongArrayRef size, - @ByVal @Cast("c10::ArrayRef*") LongArrayRef stride, - Allocator allocator, - @ByVal DispatchKeySet ks, - ScalarType scalar_type); -@Namespace("at::detail") public static native @ByVal TensorBase empty_strided_generic( - @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, - @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] stride, - Allocator allocator, - @ByVal DispatchKeySet ks, - ScalarType scalar_type); -@Namespace("at::detail") public static native @ByVal TensorBase empty_strided_symint_generic( - @ByVal SymIntRef size, - @ByVal SymIntRef stride, - Allocator allocator, - @ByVal DispatchKeySet ks, - ScalarType scalar_type); +// Shim for compatibility with code that uses TypePtr. +@Namespace("c10") public static native @StdString BytePointer toString(@Const @ByRef Type.TypePtr typePtr); -@Namespace("at::detail") public static native @ByVal TensorBase empty_cpu( - @ByVal @Cast("c10::ArrayRef*") LongArrayRef size, - ScalarType dtype, - @Cast("bool") boolean pin_memory/*=false*/, - @ByVal(nullValue = "c10::optional(c10::nullopt)") MemoryFormatOptional memory_format_opt); -@Namespace("at::detail") public static native @ByVal TensorBase empty_cpu( - @ByVal @Cast("c10::ArrayRef*") LongArrayRef size, - ScalarType dtype); -@Namespace("at::detail") public static native @ByVal TensorBase empty_cpu( - @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, - ScalarType dtype, - @Cast("bool") boolean pin_memory/*=false*/, - @ByVal(nullValue = "c10::optional(c10::nullopt)") MemoryFormatOptional memory_format_opt); -@Namespace("at::detail") public static native @ByVal TensorBase empty_cpu( - @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, - ScalarType dtype); +@Namespace("c10") public static native @Cast("bool") @Name("operator !=") boolean notEquals(@Const @ByRef Type lhs, @Const @ByRef Type rhs); +// Targeting ../AwaitSingleElementType.java -@Namespace("at::detail") public static native @ByVal TensorBase empty_cpu( - @ByVal @Cast("c10::ArrayRef*") LongArrayRef size, - @ByVal ScalarTypeOptional dtype_opt, - @ByVal LayoutOptional layout_opt, - @ByVal DeviceOptional device_opt, - @ByVal BoolOptional pin_memory_opt, - @ByVal MemoryFormatOptional memory_format_opt); -@Namespace("at::detail") public static native @ByVal TensorBase empty_cpu( - @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, - @ByVal ScalarTypeOptional dtype_opt, - @ByVal LayoutOptional layout_opt, - @ByVal DeviceOptional device_opt, - @ByVal BoolOptional pin_memory_opt, - @ByVal MemoryFormatOptional memory_format_opt); -@Namespace("at::detail") public static native @ByVal TensorBase empty_cpu(@ByVal @Cast("c10::ArrayRef*") LongArrayRef size, @Const @ByRef TensorOptions options); -@Namespace("at::detail") public static native @ByVal TensorBase empty_cpu(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @Const @ByRef TensorOptions options); +// Targeting ../ListSingleElementType.java -@Namespace("at::detail") public static native @ByVal TensorBase empty_strided_cpu( - @ByVal @Cast("c10::ArrayRef*") LongArrayRef size, - @ByVal @Cast("c10::ArrayRef*") LongArrayRef stride, - ScalarType dtype, - @Cast("bool") boolean pin_memory/*=false*/); -@Namespace("at::detail") public static native @ByVal TensorBase empty_strided_cpu( - @ByVal @Cast("c10::ArrayRef*") LongArrayRef size, - @ByVal @Cast("c10::ArrayRef*") LongArrayRef stride, - ScalarType dtype); -@Namespace("at::detail") public static native @ByVal TensorBase empty_strided_cpu( - @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, - @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] stride, - ScalarType dtype, - @Cast("bool") boolean pin_memory/*=false*/); -@Namespace("at::detail") public static native @ByVal TensorBase empty_strided_cpu( - @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, - @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] stride, - ScalarType dtype); -@Namespace("at::detail") public static native @ByVal TensorBase empty_strided_cpu( - @ByVal @Cast("c10::ArrayRef*") LongArrayRef size, - @ByVal @Cast("c10::ArrayRef*") LongArrayRef stride, - @ByVal ScalarTypeOptional dtype_opt, - @ByVal LayoutOptional layout_opt, - @ByVal DeviceOptional device_opt, - @ByVal BoolOptional pin_memory_opt); -@Namespace("at::detail") public static native @ByVal TensorBase empty_strided_cpu( - @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, - @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] stride, - @ByVal ScalarTypeOptional dtype_opt, - @ByVal LayoutOptional layout_opt, - @ByVal DeviceOptional device_opt, - @ByVal BoolOptional pin_memory_opt); +// Targeting ../RRefSingleElementType.java -@Namespace("at::detail") public static native @ByVal TensorBase empty_strided_cpu( - @ByVal @Cast("c10::ArrayRef*") LongArrayRef size, - @ByVal @Cast("c10::ArrayRef*") LongArrayRef stride, - @Const @ByRef TensorOptions options); -@Namespace("at::detail") public static native @ByVal TensorBase empty_strided_cpu( - @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, - @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] stride, - @Const @ByRef TensorOptions options); -@Namespace("at::detail") public static native @ByVal TensorBase empty_meta( - @ByVal @Cast("c10::ArrayRef*") LongArrayRef size, - ScalarType dtype, - @ByVal(nullValue = "c10::optional(c10::nullopt)") MemoryFormatOptional memory_format_opt); -@Namespace("at::detail") public static native @ByVal TensorBase empty_meta( - @ByVal @Cast("c10::ArrayRef*") LongArrayRef size, - ScalarType dtype); -@Namespace("at::detail") public static native @ByVal TensorBase empty_meta( - @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, - ScalarType dtype, - @ByVal(nullValue = "c10::optional(c10::nullopt)") MemoryFormatOptional memory_format_opt); -@Namespace("at::detail") public static native @ByVal TensorBase empty_meta( - @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, - ScalarType dtype); +// Targeting ../FutureSingleElementType.java -@Namespace("at::detail") public static native @ByVal TensorBase empty_meta( - @ByVal @Cast("c10::ArrayRef*") LongArrayRef size, - @ByVal ScalarTypeOptional dtype_opt, - @ByVal LayoutOptional layout_opt, - @ByVal DeviceOptional device_opt, - @ByVal BoolOptional pin_memory_opt, - @ByVal MemoryFormatOptional memory_format_opt); -@Namespace("at::detail") public static native @ByVal TensorBase empty_meta( - @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, - @ByVal ScalarTypeOptional dtype_opt, - @ByVal LayoutOptional layout_opt, - @ByVal DeviceOptional device_opt, - @ByVal BoolOptional pin_memory_opt, - @ByVal MemoryFormatOptional memory_format_opt); -@Namespace("at::detail") public static native @ByVal TensorBase empty_symint_meta( - @ByVal SymIntRef size, - @ByVal ScalarTypeOptional dtype_opt, - @ByVal LayoutOptional layout_opt, - @ByVal DeviceOptional device_opt, - @ByVal BoolOptional pin_memory_opt, - @ByVal MemoryFormatOptional memory_format_opt); +// Targeting ../OptionalSingleElementType.java -@Namespace("at::detail") public static native @ByVal TensorBase empty_meta(@ByVal @Cast("c10::ArrayRef*") LongArrayRef size, @Const @ByRef TensorOptions options); -@Namespace("at::detail") public static native @ByVal TensorBase empty_meta(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @Const @ByRef TensorOptions options); -@Namespace("at::detail") public static native @ByVal TensorBase empty_strided_meta(@ByVal @Cast("c10::ArrayRef*") LongArrayRef size, @ByVal @Cast("c10::ArrayRef*") LongArrayRef stride, ScalarType dtype); -@Namespace("at::detail") public static native @ByVal TensorBase empty_strided_meta(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] stride, ScalarType dtype); +// Targeting ../UnionType.java -@Namespace("at::detail") public static native @ByVal TensorBase empty_strided_meta( - @ByVal @Cast("c10::ArrayRef*") LongArrayRef size, - @ByVal @Cast("c10::ArrayRef*") LongArrayRef stride, - @ByVal ScalarTypeOptional dtype_opt, - @ByVal LayoutOptional layout_opt, - @ByVal DeviceOptional device_opt, - @ByVal BoolOptional pin_memory_opt); -@Namespace("at::detail") public static native @ByVal TensorBase empty_strided_meta( - @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, - @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] stride, - @ByVal ScalarTypeOptional dtype_opt, - @ByVal LayoutOptional layout_opt, - @ByVal DeviceOptional device_opt, - @ByVal BoolOptional pin_memory_opt); -@Namespace("at::detail") public static native @ByVal TensorBase empty_strided_meta( - @ByVal @Cast("c10::ArrayRef*") LongArrayRef size, - @ByVal @Cast("c10::ArrayRef*") LongArrayRef stride, - @Const @ByRef TensorOptions options); -@Namespace("at::detail") public static native @ByVal TensorBase empty_strided_meta( - @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, - @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] stride, - @Const @ByRef TensorOptions options); +// Targeting ../OptionalType.java -@Namespace("at::detail") public static native @ByVal TensorBase empty_strided_symint_meta( - @ByVal SymIntRef size, - @ByVal SymIntRef stride, - ScalarType dtype); -@Namespace("at::detail") public static native @ByVal TensorBase empty_strided_symint_meta( - @ByVal SymIntRef size, - @ByVal SymIntRef stride, - @ByVal ScalarTypeOptional dtype_opt, - @ByVal LayoutOptional layout_opt, - @ByVal DeviceOptional device_opt, - @ByVal BoolOptional pin_memory_opt); +// Targeting ../Stride.java -@Namespace("at::detail") public static native @ByVal TensorBase empty_strided_symint_meta( - @ByVal SymIntRef size, - @ByVal SymIntRef stride, - @Const @ByRef TensorOptions options); - // namespace detail - // namespace at +@Namespace("c10") public static native @ByVal StrideOptional merge_primitive( + @Const @ByRef StrideOptional a, + @Const @ByRef StrideOptional b); +// Targeting ../ShapeSymbol.java -// Parsed from ATen/LinalgBackend.h -// #pragma once -// #include +@Namespace("c10") public static native @ByVal ShapeSymbol merge_primitive( + @Const @ByRef ShapeSymbol a, + @Const @ByRef ShapeSymbol b); +// Targeting ../SymbolicShape.java -// #include -// #include -@Namespace("at") public enum LinalgBackend { Default((byte)(0)), Cusolver((byte)(1)), Magma((byte)(2)); +@Namespace("c10::detail") public static native @Cast("bool") boolean isComplete(@Const @ByRef Stride s); - public final byte value; - private LinalgBackend(byte v) { this.value = v; } - private LinalgBackend(LinalgBackend e) { this.value = e.value; } - public LinalgBackend intern() { for (LinalgBackend e : values()) if (e.value == value) return e; return this; } - @Override public String toString() { return intern().name(); } -} +// Targeting ../LongVaryingShape.java -@Namespace("at") public static native @StdString BytePointer LinalgBackendToString(LinalgBackend backend); -@Namespace("at") public static native @StdString String LinalgBackendToString(@Cast("at::LinalgBackend") byte backend); -@Namespace("at") public static native @Cast("std::ostream*") @ByRef @Name("operator <<") Pointer shiftLeft( - @Cast("std::ostream*") @ByRef Pointer stream, - LinalgBackend backend); -@Namespace("at") public static native @Cast("std::ostream*") @ByRef @Name("operator <<") Pointer shiftLeft( - @Cast("std::ostream*") @ByRef Pointer stream, - @Cast("at::LinalgBackend") byte backend); +// Targeting ../StrideVaryingShape.java - // namespace at +// TODO: investigate making this SingletonOrSharedTypePtr +// Targeting ../TensorType.java -// Parsed from ATen/Formatting.h -// #include +// Targeting ../ListType.java -// Parsed from ATen/Generator.h +// Targeting ../DictType.java -// #pragma once -// #include +// Targeting ../FutureType.java -// Parsed from ATen/PadNd.h -// #pragma once -// #include -// #include +// Targeting ../AwaitType.java -@Namespace("at") public enum padding_mode { - reflect(0), - replicate(1), - circular(2), - constant(3); - public final int value; - private padding_mode(int v) { this.value = v; } - private padding_mode(padding_mode e) { this.value = e.value; } - public padding_mode intern() { for (padding_mode e : values()) if (e.value == value) return e; return this; } - @Override public String toString() { return intern().name(); } -} +// Targeting ../RRefType.java -@Namespace("at") public static native @ByVal @Cast("c10::string_view*") Pointer padding_mode_string(padding_mode m); -@Namespace("at") public static native @ByVal @Cast("c10::string_view*") Pointer padding_mode_string(@Cast("at::padding_mode") int m); - // namespace at +// Any should never appear in a named type like a class, namedtuple or +// interface. If it does, then dynamic type information will be lost in the +// Pickler, leading to hard-to-track-down bugs that will only occur +// after saving or loading a model. This is because we rely on the +// static types in named types to reconstruct type tags of loaded +// values. Lifting this restriction requires solving the serialization +// problem first. +@Namespace("c10") public static native void checkNoAny( + @Const @ByRef Type base, + @Cast("const char*") BytePointer what, + @StdString BytePointer attrname, + @Const @ByRef Type.TypePtr attrtype); +@Namespace("c10") public static native void checkNoAny( + @Const @ByRef Type base, + String what, + @StdString String attrname, + @Const @ByRef Type.TypePtr attrtype); +// Targeting ../TupleType.java -// Parsed from ATen/Parallel.h -// #pragma once -// #include -// #include -// #include -// #include -@Namespace("at") public static native @Cast("int64_t") long divup(@Cast("int64_t") long x, @Cast("int64_t") long y); +// the common supertype of all Enums, only used in operator registraion. +// EnumType <: AnyEnumType for all Enums +// Targeting ../AnyEnumType.java -// Called during new thread initialization -@Namespace("at") public static native void init_num_threads(); -// Sets the number of threads to be used in parallel region -@Namespace("at") public static native void set_num_threads(int arg0); +// Targeting ../NumberType.java -// Returns the maximum number of threads that may be used in a parallel region -@Namespace("at") public static native int get_num_threads(); -// Returns the current thread number (starting from 0) -// in the current parallel region, or 0 in the sequential region -@Namespace("at") public static native int get_thread_num(); +// Targeting ../FloatType.java -// Checks whether the code runs in parallel region -@Namespace("at") public static native @Cast("bool") boolean in_parallel_region(); -// Initialise num_threads lazily at first parallel call -@Namespace("at::internal") public static native void lazy_init_num_threads(); +// Targeting ../ComplexType.java -@Namespace("at::internal") public static native void set_thread_num(int arg0); -// Targeting ../ThreadIdGuard.java +// We need to introduce `SymIntType` to represent the `SymInt` type +// used in function schemas e.g. `aten::narrow_copy(... SymInt length) +// `SymInt` will be used to enable tracing arithmetic operations on +// dimension values. Please see [SymInt.h] for more information +// Targeting ../SymIntType.java - // namespace internal -/* -parallel_for +// Targeting ../SymFloatType.java -begin: index at which to start applying user function -end: index at which to stop applying user function +// Targeting ../IntType.java -grain_size: number of elements per chunk. impacts the degree of parallelization -f: user function applied in parallel to the chunks, signature: - void f(int64_t begin, int64_t end) +// Targeting ../BoolType.java -Warning: parallel_for does NOT copy thread local -states from the current thread to the worker threads. -This means for example that Tensor operations CANNOT be used in the -body of your function, only data pointers. -*/ -/* -parallel_reduce +// Targeting ../StringType.java -begin: index at which to start applying reduction -end: index at which to stop applying reduction +// Targeting ../StorageType.java -grain_size: number of elements per chunk. impacts number of elements in -intermediate results tensor and degree of parallelization. -ident: identity for binary combination function sf. sf(ident, x) needs to return -x. +// Targeting ../FunctionType.java -f: function for reduction over a chunk. f needs to be of signature scalar_t -f(int64_t partial_begin, int64_t partial_end, scalar_t identifiy) -sf: function to combine two partial results. sf needs to be of signature -scalar_t sf(scalar_t x, scalar_t y) +// Targeting ../NoneType.java -For example, you might have a tensor of 10000 entires and want to sum together -all the elements. Parallel_reduce with a grain_size of 2500 will then allocate -an intermediate result tensor with 4 elements. Then it will execute the function -"f" you provide and pass the beginning and end index of these chunks, so -0-2499, 2500-4999, etc. and the combination identity. It will then write out -the result from each of these chunks into the intermediate result tensor. After -that it'll reduce the partial results from each chunk into a single number using -the combination function sf and the identity ident. For a total summation this -would be "+" and 0 respectively. This is similar to tbb's approach [1], where -you need to provide a function to accumulate a subrange, a function to combine -two partial results and an identity. -Warning: parallel_reduce does NOT copy thread local -states from the current thread to the worker threads. -This means for example that Tensor operations CANNOT be used in the -body of your function, only data pointers. +// Targeting ../GeneratorType.java -[1] https://software.intel.com/en-us/node/506154 -*/ -// Returns a detailed string describing parallelization settings -@Namespace("at") public static native @StdString BytePointer get_parallel_info(); +// Targeting ../QuantizerType.java -// Sets number of threads used for inter-op parallelism -@Namespace("at") public static native void set_num_interop_threads(int arg0); -// Returns the number of threads used for inter-op parallelism -@Namespace("at") public static native int get_num_interop_threads(); +// Targeting ../QSchemeType.java -// Launches inter-op parallel task - // namespace internal +// Targeting ../DeviceObjType.java -// Launches intra-op parallel task -@Namespace("at") public static native void intraop_launch(@ByVal Func func); -// Returns number of intra-op threads used by default -@Namespace("at") public static native int intraop_default_num_threads(); +// Targeting ../StreamObjType.java - // namespace at -// #if AT_PARALLEL_OPENMP -// #include // IWYU pragma: keep -// #elif AT_PARALLEL_NATIVE -// #include // IWYU pragma: keep -// #elif AT_PARALLEL_NATIVE_TBB -// #include // IWYU pragma: keep -// #endif +// This type represents a type variable, used in FunctionSchema +// Targeting ../CapsuleType.java -// #include // IWYU pragma: keep +// Targeting ../PyObjectType.java -// Parsed from ATen/Utils.h -// #pragma once -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include +@Namespace("c10") public enum TypeVerbosity { + None(0), + Type(1), + TypeAndStride(2), + Full(3), + Symbolic(4), + Default(Full.value); -// #include -// #include -// #include -// #include -// #include + public final int value; + private TypeVerbosity(int v) { this.value = v; } + private TypeVerbosity(TypeVerbosity e) { this.value = e.value; } + public TypeVerbosity intern() { for (TypeVerbosity e : values()) if (e.value == value) return e; return this; } + @Override public String toString() { return intern().name(); } +} -// #define AT_DISALLOW_COPY_AND_ASSIGN(TypeName) -// TypeName(const TypeName&) = delete; -// void operator=(const TypeName&) = delete +@Namespace("c10") public static native TypeVerbosity type_verbosity(); -@Namespace("at") public static native int _crash_if_asan(int arg0); +@Namespace("c10") public static native @Cast("std::ostream*") @ByRef @Name("operator <<") Pointer shiftLeft(@Cast("std::ostream*") @ByRef Pointer out, @Const @ByRef Type t); +@Namespace("c10") public static native @Cast("std::ostream*") @ByRef @Name("operator <<") Pointer shiftLeft(@Cast("std::ostream*") @ByRef Pointer os, @Const @ByRef SymbolicShape s); +@Namespace("c10") public static native @Cast("std::ostream*") @ByRef @Name("operator <<") Pointer shiftLeft(@Cast("std::ostream*") @ByRef Pointer os, @Const @ByRef ShapeSymbol s); +@Namespace("c10") public static native @Cast("std::ostream*") @ByRef @Name("operator <<") Pointer shiftLeft(@Cast("std::ostream*") @ByRef Pointer os, @Const @ByRef Stride s); +// what is the type, ignoring extra size/shape information? +// e.g. Tensor(2x3) -> Dynamic, and Tuple(Tensor(2x3),...) -> Tuple(Dynamic,...) -// Converts a TensorList (i.e. ArrayRef to vector of TensorImpl*) -// NB: This is ONLY used by legacy TH bindings, and ONLY used by cat. -// Once cat is ported entirely to ATen this can be deleted! -@Namespace("at") public static native @ByVal TensorImplVector checked_dense_tensor_list_unwrap( - @ByVal TensorArrayRef tensors, - @Cast("const char*") BytePointer name, - int pos, - DeviceType device_type, - ScalarType scalar_type); -@Namespace("at") public static native @ByVal TensorImplVector checked_dense_tensor_list_unwrap( - @ByVal TensorArrayRef tensors, - String name, - int pos, - @Cast("c10::DeviceType") byte device_type, - ScalarType scalar_type); - // namespace detail +// `unshapedType` is used to remove Tensor subtypes. We treat all Tensor +// subtypes as simply "Tensor"; we also create a new version of any +// container types in which internal Tensors have undergone the same +// operation. This is used for type comparisons between two Tensor types +// (`unshapedType` means that we don't falsely return `false` for e.g. +// Tensors of different dimensions). It's also used in the alias +// analysis pass. +// Be careful with calls because this can be very slow. If calling this +// on a graph, use `EraseShapeInformation` in shape_analysis.h +@Namespace("c10") public static native @ByVal Type.TypePtr unshapedType(@Const @ByRef Type.TypePtr type); - // namespace at -// Parsed from ATen/TracerMode.h -// #pragma once +@Namespace("c10") public static native @ByVal ScalarTypeOptional tryScalarTypeFromJitType(@Const @ByRef Type type); -// #include -// #include -// #include +@Namespace("c10") public static native ScalarType scalarTypeFromJitType(@Const @ByRef Type type); -// NOTE [Tracing Mode Switches] -// -// Historically, tracing function was controlled by two switches: -// -// - `AutoDispatchBelowADInplaceOrView` guard -// -// Tracing function used to be script-generated inside `VariableType_*.cpp` -// kernels, sharing the same `Autograd` dispatch key with autograd function. -// Therefore, before tracing function was moved out of VariableType, -// `AutoDispatchBelowADInplaceOrView` guard can also disable tracing as a -// side effect of disabling `Autograd` dispatching. -// -// - `setTracingState()` API in `torch/csrc/jit/frontend/tracer.h` -// -// It stores tracing data in a `TracingState` object in TLS. If the -// `TracingState` object in TLS is `null`, then tracing is paused. -// -// The `TracingState` object is created in `tracer::trace()` - the main -// entrance of tracing function. It's temporarily set to `null` inside -// generated VariableType (now TraceType) to bypass tracing for intermediate -// ops (ops being called by other ops). After the intermediate op call -// finishes it's set back to the original `TracingState` object. -// -// The `TracingState` obect in TLS can also be read/written via its Python -// binding in `python_tracer.cpp`, and `get/setTracingState()` C++ APIs, -// which are also exposed as `TORCH_API`. -// -// Two new switches were introduced since tracing function was moved out of -// VariableType: -// -// - `tracer::impl::set_dispatch_enabled()` API -// -// Unlike the special `Autograd` dispatch key which is included in dispatch -// key set by default, `Tracer` dispatch key is off by default. The -// dispatching switch can be toggled via this new API. -// -// - `tracer::impl::NoTracerDispatchMode` guard -// -// It's used to cover the old semantics of `AutoDispatchBelowADInplaceOrView` -// after tracing was moved out of VariableType. -// -// Before tracing function was moved out of VariableType, tracing was enabled -// when the following conditions are satisfied: -// -// 1) `TracingState` object in TLS != null; -// - Either inside the execution scope of `tracer::trace()`, or -// - Eagerly called `setTracingState()` with non-null object. -// 2) Not inside `AutoDispatchBelowADInplaceOrView` scope; -// -// After: -// -// 1) `TracingState` object in TLS != null; -// 2) Has called `tracer::impl::set_dispatch_enabled(true)`; -// 3) Not inside `tracer::impl::NonDispatchGuard` scope; -// -// [TODOs] -// -// - `setTracingState()` v.s. `tracer::impl::set_dispatch_enabled()` -// -// Currently `set_dispatch_enabled()` is set/unset inside `setTracingState()` -// to keep the semantics exactly the same as before - it's confusing to keep -// both switches, though. We should consider simplifying/limiting the exposed -// `setTracingState()` Python/C++ APIs (and other APIs calling it) so that -// these two can be unified. -// -// - `AutoDispatchBelowADInplaceOrView` v.s. -// `tracer::impl::NoTracerDispatchMode` -// -// We don't need to always set both guards together to keep semantics -// unchanged. For the follow use cases of `AutoDispatchBelowADInplaceOrView` -// we don't need set the new tracer guard: -// -// * Script-generated VariableType kernels. The guard is not necessary as -// tracing is already disabled explicitly by `setTracingState(null)` in -// generated TraceType kernels - we could keep it as is or use the new guard -// instead. -// -// * Custom ops. Will be handled by fallback kernel for `Tracer`. -// -// * Functions that are not likely to be called in tracing context (no python -// binding / not an operator), e.g.: all mobile forward() wrappers, test -// binaries, and etc. -// -// * Where new threads are spawned, e.g.: ATen/native/ConvolutionMM2d.cpp. -// It's not necessary as tracing is off by default. -// -// For the rest of cases we might need have both: +// Attempt to find the correct supertype of the two types `t1` and `t2`. +// If no supertype is found, then nullopt will be returned if +// `default_to_union` is false, and `Union[t1, t2]` will be returned +// if it is true. If `t1 == t2`, or `t1` is a type refinement of `t2`, +// then `t2` will be returned (and vice versa). // -// * Functions that might be reachable from eager mode python (especially -// factory methods), e.g.: -// `internal_new_from_data()` in `torch/csrc/utils/tensor_new.cpp`. -// Without the new guard it will add `aten::empty` to the traced graph. +// Two different tensortypes will return dynamic. // -// * Some manually maintained functions, e.g.: -// `torch/csrc/autograd/VariableTypeManual.cpp`. -// Set the new guard if it's not obvious whether `setTracingState(null)` -// has been called before it reaches the `AutoDispatchBelowADInplaceOrView` -// guard. +// Currently we chose not to support returning a NumberType for +// two types from the set of {FloatType, IntType, ComplexType}, because +// there is a lack of operator support for NumberType. // -// We might need tweak the usage of the new guard to optimize/fix things. -// It should only affect the correctness of tracing function, because the -// guard is essentially no-op when the master `setTracingState()` switch is -// off. -// TODO: move this from `at::` to `jit::torch::` after -// `aten/src/ATen/cpp_custom_type_hack.h` is removed. +// If `type_hint` is an `InterfaceType`, then we can use that as a +// potential supertype for `ClassType`s in the list. Otherwise, we have +// no way to find and use some common interface type +@Namespace("c10") public static native @ByVal TypePtrOptional unifyTypes( + @Const @ByRef Type.TypePtr t1, + @Const @ByRef Type.TypePtr t2, + @Cast("bool") boolean default_to_union/*=false*/, + @ByVal(nullValue = "c10::TypePtr(nullptr)") Type.TypePtr type_hint); +@Namespace("c10") public static native @ByVal TypePtrOptional unifyTypes( + @Const @ByRef Type.TypePtr t1, + @Const @ByRef Type.TypePtr t2); -@Namespace("at::tracer::impl") public static native @Cast("bool") boolean is_dispatch_enabled(); +@Namespace("c10") public static native @ByVal TypePtrOptional unifyTypeList( + @ByVal TypeArrayRef elements, + @Cast("std::ostream*") @ByRef Pointer why_not, + @Cast("bool") boolean default_to_union/*=false*/, + @ByVal(nullValue = "c10::TypePtr(nullptr)") Type.TypePtr type_hint); +@Namespace("c10") public static native @ByVal TypePtrOptional unifyTypeList( + @ByVal TypeArrayRef elements, + @Cast("std::ostream*") @ByRef Pointer why_not); + // namespace detail +// Targeting ../MatchTypeReturn.java -@Namespace("at::tracer::impl") public static native void set_dispatch_enabled(@Cast("bool") boolean enabled); -// Targeting ../NoTracerDispatchMode.java +// attempt to match the type variables in formal to actual, adding them to type_env. +// If no match is possible this returns a MatchTypeReturn with r.success() == false +// and a r.reason() that describes why it could not match. +// note: It is possible to successfully match a formal, but for type variables +// in the formal to still not be defined. In particular, None matches Optional[T] +// but does not define the value of T. +@Namespace("c10") public static native @ByVal MatchTypeReturn matchTypeVariables(@Const @ByRef Type.TypePtr formal, @Const @ByRef Type.TypePtr actual, @ByRef TypeEnv type_env); - // namespace impl - // namespace tracer - // namespace at +// replace type variables appearing in `type` with the values in +// `type_env`. Returns nullptr if a variable used in `type` +// does not appear in `type_env` +@Namespace("c10") public static native @ByVal Type.TypePtr tryEvalTypeVariables(@Const @ByRef Type.TypePtr type, @ByRef TypeEnv type_env); +@Namespace("c10") public static native @Cast("bool") boolean elementTypeCanBeInferredFromMembers(@Const @ByRef Type.TypePtr elem_type); +// Targeting ../InterfaceType.java -// Parsed from ATen/WrapDimUtils.h -// #pragma once +// Targeting ../LayoutEnumerationType.java -// #include -// #include -// #include -// #include -// #include -// if dim_post_expr is 0 and wrap_scalar is true, then dim must be in the -// range [-1, 0]. This is a special case for scalar tensors and manifests in -// e.g. torch.sum(scalar_tensor, 0) Otherwise, dim should be in the range -// [-dim_post_expr, dim_post_expr-1]. - -@Namespace("at") public static native @Cast("int64_t") long maybe_wrap_dim(@Cast("int64_t") long dim, TensorImpl tensor); +// Targeting ../ScalarTypeEnumerationType.java -@Namespace("at") public static native @Cast("int64_t") long maybe_wrap_dim(@Cast("int64_t") long dim, @ByVal TensorArrayRef tensors); -@Namespace("at") public static native @Cast("int64_t") long maybe_wrap_dim( - @Cast("int64_t") long dim, - @Cast("std::vector*") @StdVector LongVector tensor_sizes); +// Targeting ../MemoryFormattEnumerationType.java -// Given an array of dimensions `dims` of length `ndims`, this function "Wraps" -// each dim in-place for a tensor of rank `dim_post_expr`, allowing dims to be -// specified using negative indices. -// -// Additionally, if `wrap_scalar` is true then scalar tensors with rank 0, will -// allow dimensions in the range [-1, 0]. Otherwise, an IndexError is raised for -// dimensions not in the range [-dim_post_expr, dim_post_expr). -@Namespace("at") public static native void maybe_wrap_dims_n( - @Cast("int64_t*") LongPointer dims, - @Cast("int64_t") long ndims, - @Cast("int64_t") long dim_post_expr, - @Cast("bool") boolean wrap_scalars/*=true*/); -@Namespace("at") public static native void maybe_wrap_dims_n( - @Cast("int64_t*") LongPointer dims, - @Cast("int64_t") long ndims, - @Cast("int64_t") long dim_post_expr); -@Namespace("at") public static native void maybe_wrap_dims_n( - @Cast("int64_t*") LongBuffer dims, - @Cast("int64_t") long ndims, - @Cast("int64_t") long dim_post_expr, - @Cast("bool") boolean wrap_scalars/*=true*/); -@Namespace("at") public static native void maybe_wrap_dims_n( - @Cast("int64_t*") LongBuffer dims, - @Cast("int64_t") long ndims, - @Cast("int64_t") long dim_post_expr); -@Namespace("at") public static native void maybe_wrap_dims_n( - @Cast("int64_t*") long[] dims, - @Cast("int64_t") long ndims, - @Cast("int64_t") long dim_post_expr, - @Cast("bool") boolean wrap_scalars/*=true*/); -@Namespace("at") public static native void maybe_wrap_dims_n( - @Cast("int64_t*") long[] dims, - @Cast("int64_t") long ndims, - @Cast("int64_t") long dim_post_expr); -// Given a contiguous container of dimensions `dims`, this function "Wraps" -// each dim in-place for a tensor of rank `dim_post_expr`, allowing dims to be -// specified using negative indices. -// -// Additionally, if `wrap_scalar` is true then scalar tensors with rank 0, will -// allow dimensions in the range [-1, 0]. Otherwise, an IndexError is raised for -// dimensions not in the range [-dim_post_expr, dim_post_expr). -// previously, size [0] tensors were the only possible empty tensors; thus, it -// wasn't possible to cat empty tensors unless all the other tensors were -// 1-dimensional, so we allowed these tensors to be "skipped" (both for wrap -// dimension behavior and dimension size checking). We maintain this behavior -// for backwards compatibility, but only for this specific size (i.e. other -// empty sizes are not skipped). +// WARNING: These enumeration types below DO NOT actually get parsed out +// from the logical schema strings, instead they are mapped as ints. To +// observe these types, use real_type() instead of type() on Argument +// Targeting ../ScalarTypeType.java -@Namespace("at") public static native @Cast("int64_t") long legacy_cat_wrap_dim( - @Cast("int64_t") long dim, - @Cast("std::vector*") @StdVector LongVector tensor_sizes); -@Namespace("at") public static native @Cast("int64_t") long legacy_cat_wrap_dim_symint( - @Cast("int64_t") long dim, - @StdVector SymIntVector tensor_sizes); +// Targeting ../MemoryFormatType.java -// wrap negative dims in a vector -@Namespace("at") public static native void wrap_all_dims( - @Cast("std::vector*") @ByRef LongVector dims_to_wrap, - @Cast("int64_t") long tensor_total_dims); - // namespace at +// Targeting ../LayoutType.java -// Parsed from ATen/Tensor.h + // namespace detail -// #pragma once +// the common supertype of all lists, +// List[T] <: AnyList for all T +// Targeting ../AnyListType.java -// #include -// Parsed from ATen/TensorGeometry.h +// the common supertype of all tuples, +// Tuple[T...] <: AnyTuple for all T +// Targeting ../AnyTupleType.java -// #pragma once -// #include -// #include -// Return if the tensor geometry represented by `sizes` and `strides` is -// contiguous Although we cache is_contiguous in tensor now, this is till useful -// because it allows checking if a particular geometry is contiguous without -// explicitly constructing a tensor, e.g., when you want to choose a kernel -// strategy based on whether a subgeometry is contiguous. -@Namespace("at") public static native @Cast("bool") boolean geometry_is_contiguous(@ByVal @Cast("c10::ArrayRef*") LongArrayRef sizes, @ByVal @Cast("c10::ArrayRef*") LongArrayRef strides); -@Namespace("at") public static native @Cast("bool") boolean geometry_is_contiguous(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] sizes, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... strides); -// Targeting ../TensorGeometry.java +// the common supertype of all classes, +// ClassType <: AnyClassType for all classes +// Targeting ../AnyClassType.java - // namespace at -// Parsed from ATen/TensorNames.h -// #pragma once -// #include -// Targeting ../TensorName.java +// Targeting ../InferredType.java -// Targeting ../TensorNames.java +@Namespace("c10") public static native @Cast("bool") boolean containsAnyType(@Const @ByRef Type.TypePtr type); - // namespace namedinference - // namespace at + // namespace c10 -// Parsed from ATen/TensorUtils.h +// Parsed from ATen/core/rref_interface.h // #pragma once -// #include -// #include -// #include -// #include -// #include +// #include +// #include +// Targeting ../RRefInterface.java -// #include -// These functions are NOT in Utils.h, because this file has a dep on Tensor.h -// #define TORCH_CHECK_TENSOR_ALL(cond, ...) -// TORCH_CHECK((cond)._is_all_true().item(), __VA_ARGS__); -// Targeting ../TensorArg.java -// Targeting ../TensorGeometryArg.java +// Parsed from c10/core/impl/DeviceGuardImplInterface.h +// #pragma once -// A string describing which function did checks on its input -// arguments. -// TODO: Consider generalizing this into a call stack. +// #include +// #include +// #include +// #include -// The undefined convention: singular operators assume their arguments -// are defined, but functions which take multiple tensors will -// implicitly filter out undefined tensors (to make it easier to perform -// tests which should apply if the tensor is defined, and should not -// otherwise.) -// -// NB: This means that the n-ary operators take lists of TensorArg, -// not TensorGeometryArg, because the Tensor to TensorGeometry -// conversion will blow up if you have undefined tensors. +// Just for C10_ANONYMOUS_VARIABLE +// #include -@Namespace("at") public static native @Cast("std::ostream*") @ByRef @Name("operator <<") Pointer shiftLeft(@Cast("std::ostream*") @ByRef Pointer out, @ByVal TensorGeometryArg t); -@Namespace("at") public static native void checkDim( - @Cast("at::CheckedFrom") BytePointer c, - @Const @ByRef Tensor tensor, - @Cast("const char*") BytePointer name, - int pos, - @Cast("int64_t") long dim); -@Namespace("at") public static native void checkDim( - @Cast("at::CheckedFrom") String c, - @Const @ByRef Tensor tensor, - String name, - int pos, - @Cast("int64_t") long dim); -@Namespace("at") public static native void checkDim(@Cast("at::CheckedFrom") BytePointer c, @Const @ByRef TensorGeometryArg t, @Cast("int64_t") long dim); -@Namespace("at") public static native void checkDim(@Cast("at::CheckedFrom") String c, @Const @ByRef TensorGeometryArg t, @Cast("int64_t") long dim); -// NB: this is an inclusive-exclusive range -@Namespace("at") public static native void checkDimRange( - @Cast("at::CheckedFrom") BytePointer c, - @Const @ByRef TensorGeometryArg t, - @Cast("int64_t") long dim_start, - @Cast("int64_t") long dim_end); -@Namespace("at") public static native void checkDimRange( - @Cast("at::CheckedFrom") String c, - @Const @ByRef TensorGeometryArg t, - @Cast("int64_t") long dim_start, - @Cast("int64_t") long dim_end); -@Namespace("at") public static native void checkSameDim( - @Cast("at::CheckedFrom") BytePointer c, - @Const @ByRef TensorGeometryArg t1, - @Const @ByRef TensorGeometryArg t2); -@Namespace("at") public static native void checkSameDim( - @Cast("at::CheckedFrom") String c, - @Const @ByRef TensorGeometryArg t1, - @Const @ByRef TensorGeometryArg t2); -@Namespace("at") public static native void checkContiguous(@Cast("at::CheckedFrom") BytePointer c, @Const @ByRef TensorGeometryArg t); -@Namespace("at") public static native void checkContiguous(@Cast("at::CheckedFrom") String c, @Const @ByRef TensorGeometryArg t); -@Namespace("at") public static native void checkAllContiguous(@Cast("at::CheckedFrom") BytePointer c, @ByVal TensorArgArrayRef ts); -@Namespace("at") public static native void checkAllContiguous(@Cast("at::CheckedFrom") String c, @ByVal TensorArgArrayRef ts); -@Namespace("at") public static native void checkSize( - @Cast("at::CheckedFrom") BytePointer c, - @Const @ByRef TensorGeometryArg t, - @ByVal @Cast("c10::ArrayRef*") LongArrayRef sizes); -@Namespace("at") public static native void checkSize( - @Cast("at::CheckedFrom") String c, - @Const @ByRef TensorGeometryArg t, - @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... sizes); -@Namespace("at") public static native void checkSize_symint( - @Cast("at::CheckedFrom") BytePointer c, - @Const @ByRef TensorGeometryArg t, - @ByVal SymIntRef sizes); -@Namespace("at") public static native void checkSize_symint( - @Cast("at::CheckedFrom") String c, - @Const @ByRef TensorGeometryArg t, - @ByVal SymIntRef sizes); -@Namespace("at") public static native void checkSize( - @Cast("at::CheckedFrom") BytePointer c, - @Const @ByRef TensorGeometryArg t, - @Cast("int64_t") long dim, - @Cast("int64_t") long size); -@Namespace("at") public static native void checkSize( - @Cast("at::CheckedFrom") String c, - @Const @ByRef TensorGeometryArg t, - @Cast("int64_t") long dim, - @Cast("int64_t") long size); -@Namespace("at") public static native void checkSize_symint( - @Cast("at::CheckedFrom") BytePointer c, - @Const @ByRef TensorGeometryArg t, - @Cast("int64_t") long dim, - @ByVal SymInt size); -@Namespace("at") public static native void checkSize_symint( - @Cast("at::CheckedFrom") String c, - @Const @ByRef TensorGeometryArg t, - @Cast("int64_t") long dim, - @ByVal SymInt size); -@Namespace("at") public static native void checkNumel( - @Cast("at::CheckedFrom") BytePointer c, - @Const @ByRef TensorGeometryArg t, - @Cast("int64_t") long numel); -@Namespace("at") public static native void checkNumel( - @Cast("at::CheckedFrom") String c, - @Const @ByRef TensorGeometryArg t, - @Cast("int64_t") long numel); +// #include -@Namespace("at") public static native void checkAllSameNumel(@Cast("at::CheckedFrom") BytePointer c, @ByVal TensorArgArrayRef tensors); -@Namespace("at") public static native void checkAllSameNumel(@Cast("at::CheckedFrom") String c, @ByVal TensorArgArrayRef tensors); -@Namespace("at") public static native void checkScalarType(@Cast("at::CheckedFrom") BytePointer c, @Const @ByRef TensorArg t, ScalarType s); -@Namespace("at") public static native void checkScalarType(@Cast("at::CheckedFrom") String c, @Const @ByRef TensorArg t, ScalarType s); -@Namespace("at") public static native void checkScalarTypes( - @Cast("at::CheckedFrom") BytePointer c, - @Const @ByRef TensorArg t, - @ByVal ScalarTypeArrayRef l); -@Namespace("at") public static native void checkScalarTypes( - @Cast("at::CheckedFrom") String c, - @Const @ByRef TensorArg t, - @ByVal ScalarTypeArrayRef l); -@Namespace("at") public static native void checkSameGPU( - @Cast("at::CheckedFrom") BytePointer c, - @Const @ByRef TensorArg t1, - @Const @ByRef TensorArg t2); -@Namespace("at") public static native void checkSameGPU( - @Cast("at::CheckedFrom") String c, - @Const @ByRef TensorArg t1, - @Const @ByRef TensorArg t2); -@Namespace("at") public static native void checkAllSameGPU(@Cast("at::CheckedFrom") BytePointer c, @ByVal TensorArgArrayRef tensors); -@Namespace("at") public static native void checkAllSameGPU(@Cast("at::CheckedFrom") String c, @ByVal TensorArgArrayRef tensors); -@Namespace("at") public static native void checkSameType( - @Cast("at::CheckedFrom") BytePointer c, - @Const @ByRef TensorArg t1, - @Const @ByRef TensorArg t2); -@Namespace("at") public static native void checkSameType( - @Cast("at::CheckedFrom") String c, - @Const @ByRef TensorArg t1, - @Const @ByRef TensorArg t2); -@Namespace("at") public static native void checkAllSameType(@Cast("at::CheckedFrom") BytePointer c, @ByVal TensorArgArrayRef tensors); -@Namespace("at") public static native void checkAllSameType(@Cast("at::CheckedFrom") String c, @ByVal TensorArgArrayRef tensors); -@Namespace("at") public static native void checkSameSize( - @Cast("at::CheckedFrom") BytePointer c, - @Const @ByRef TensorArg t1, - @Const @ByRef TensorArg t2); -@Namespace("at") public static native void checkSameSize( - @Cast("at::CheckedFrom") String c, - @Const @ByRef TensorArg t1, - @Const @ByRef TensorArg t2); -@Namespace("at") public static native void checkDefined(@Cast("at::CheckedFrom") BytePointer c, @Const @ByRef TensorArg t); -@Namespace("at") public static native void checkDefined(@Cast("at::CheckedFrom") String c, @Const @ByRef TensorArg t); -@Namespace("at") public static native void checkAllDefined(@Cast("at::CheckedFrom") BytePointer c, @ByVal TensorArgArrayRef t); -@Namespace("at") public static native void checkAllDefined(@Cast("at::CheckedFrom") String c, @ByVal TensorArgArrayRef t); +// Forward declaration -// FixMe: does TensorArg slow things down? -@Namespace("at") public static native void checkBackend( - @Cast("at::CheckedFrom") BytePointer c, - @ByVal TensorArrayRef t, - @ByVal Backend backend); -@Namespace("at") public static native void checkBackend( - @Cast("at::CheckedFrom") String c, - @ByVal TensorArrayRef t, - @ByVal Backend backend); +/** + * Flags defining the behavior of events. + * + * PYTORCH_DEFAULT and BACKEND_DEFAULT are valid for all backends. The + * BACKEND_DEFAULT is what a particular backend would select if no + * flags were given. PYTORCH_DEFAULT is the PyTorch's framework default + * choice for events on that backend, which may not be the same. For example, + * when PyTorch creates a CUDA event it sets the flag + * CUDA_EVENT_DISABLING_TIMING by default to improve performance. + * + * The mapping of PYTORCH_DEFAULT and BACKEND_DEFAULT is done by each + * backend implementation. Backend-specific flags, like CUDA_EVENT_DEFAULT, + * should map one-to-one with actual event flags for those backends. + */ +@Namespace("c10") public enum EventFlag { + PYTORCH_DEFAULT(0), + BACKEND_DEFAULT(1), + // CUDA flags + CUDA_EVENT_DEFAULT(2), + CUDA_EVENT_DISABLE_TIMING(3), // PyTorch-default for CUDA + // HIP flags + HIP_EVENT_DEFAULT(4), + HIP_EVENT_DISABLE_TIMING(5), // PyTorch-default for HIP + // FOR TESTING ONLY + INVALID(6); -@Namespace("at") public static native void checkDeviceType( - @Cast("at::CheckedFrom") BytePointer c, - @ByVal TensorArrayRef tensors, - @ByVal DeviceType device_type); -@Namespace("at") public static native void checkDeviceType( - @Cast("at::CheckedFrom") String c, - @ByVal TensorArrayRef tensors, - @ByVal DeviceType device_type); + public final int value; + private EventFlag(int v) { this.value = v; } + private EventFlag(EventFlag e) { this.value = e.value; } + public EventFlag intern() { for (EventFlag e : values()) if (e.value == value) return e; return this; } + @Override public String toString() { return intern().name(); } +} +// Targeting ../DeviceGuardImplInterface.java -@Namespace("at") public static native void checkLayout(@Cast("at::CheckedFrom") BytePointer c, @Const @ByRef Tensor t, Layout layout); -@Namespace("at") public static native void checkLayout(@Cast("at::CheckedFrom") String c, @Const @ByRef Tensor t, @Cast("c10::Layout") byte layout); -@Namespace("at") public static native void checkLayout( - @Cast("at::CheckedFrom") BytePointer c, - @ByVal TensorArrayRef tensors, - @ByVal Layout layout); -@Namespace("at") public static native void checkLayout( - @Cast("at::CheckedFrom") String c, - @ByVal TensorArrayRef tensors, - @ByVal Layout layout); -// Methods for getting data_ptr if tensor is defined -@Namespace("at") public static native Pointer maybe_data_ptr(@Const @ByRef Tensor tensor); -@Namespace("at") public static native Pointer maybe_data_ptr(@Const @ByRef TensorArg tensor); +// A no-op device guard impl that doesn't do anything interesting. Useful +// for devices that don't actually have a concept of device index. Prominent +// examples are CPU and Meta. -@Namespace("at") public static native void check_dim_size( - @Const @ByRef Tensor tensor, - @Cast("int64_t") long dim, - @Cast("int64_t") long dim_size, - @Cast("int64_t") long size); -@Namespace("at::detail") public static native @ByVal @Cast("std::vector*") LongVector defaultStrides(@ByVal @Cast("c10::ArrayRef*") LongArrayRef sizes); -@Namespace("at::detail") public static native @ByVal @Cast("std::vector*") LongVector defaultStrides(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... sizes); +// The registry is NON-owning. Each stored pointer is std::atomic so +// that under all interleavings of registry calls the structure is +// race-free. This doesn't cost us anything on reads in X86. (An +// unsynchronized implementation probably is OK too, but I didn't want +// to prove that we never read from device_guard_impl_registry at the +// same time some registration is occurring. Shiver.) +// +// I'd like this registry to be valid even at program destruction time +// (in case someone uses a DeviceGuard in a destructor to do some cleanup +// in the CUDA API.) Since there are no direct accesses of the underlying +// owning objects which I can use to enforce initialization order (unlike +// in a Meyer singleton), it implies that you must *leak* objects when +// putting them in the registry. This is done by deleting the destructor +// on DeviceGuardImplInterface. -@Namespace("at::detail") public static native @ByVal LongVectorOptional computeStride( - @ByVal @Cast("c10::ArrayRef*") LongArrayRef oldshape, - @ByVal @Cast("c10::ArrayRef*") LongArrayRef oldstride, - @ByVal @Cast("c10::ArrayRef*") LongArrayRef newshape); -@Namespace("at::detail") public static native @ByVal LongVectorOptional computeStride( - @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] oldshape, - @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] oldstride, - @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... newshape); +// Targeting ../DeviceGuardImplRegistrar.java -@Namespace("at::detail") public static native @ByVal SymDimVectorOptional computeStride( - @ByVal SymIntRef oldshape, - @ByVal SymIntRef oldstride, - @ByVal SymIntRef newshape); -@Namespace("at::detail") public static native @ByVal DimVectorOptional computeStride( - @ByVal @Cast("c10::ArrayRef*") LongArrayRef oldshape, - @ByVal @Cast("c10::ArrayRef*") LongArrayRef oldstride, - @Const @ByRef DimVector newshape); -@Namespace("at::detail") public static native @ByVal DimVectorOptional computeStride( - @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] oldshape, - @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] oldstride, - @Const @ByRef DimVector newshape); - // namespace detail - // namespace at +// #define C10_REGISTER_GUARD_IMPL(DevType, DeviceGuardImpl) +// static ::c10::impl::DeviceGuardImplRegistrar C10_ANONYMOUS_VARIABLE( +// g_##DeviceType)(::c10::DeviceType::DevType, new DeviceGuardImpl()); +@Namespace("c10::impl") public static native @Const DeviceGuardImplInterface getDeviceGuardImpl(DeviceType type); +@Namespace("c10::impl") public static native @Const DeviceGuardImplInterface getDeviceGuardImpl(@Cast("c10::DeviceType") byte type); + +@Namespace("c10::impl") public static native @Cast("bool") boolean hasDeviceGuardImpl(DeviceType type); +@Namespace("c10::impl") public static native @Cast("bool") boolean hasDeviceGuardImpl(@Cast("c10::DeviceType") byte type); + + // namespace impl + // namespace c10 -// Parsed from ATen/Context.h + +// Parsed from c10/core/impl/VirtualGuardImpl.h // #pragma once -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include // #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include +/** + * An implementation of DeviceGuardImplInterface which delegates + * to virtual dispatch on the DeviceGuardImpl registry. + */ -@Namespace("at") public enum Float32MatmulPrecision { HIGHEST(0), HIGH(1), MEDIUM(2); + // namespace impl + // namespace c10 - public final int value; - private Float32MatmulPrecision(int v) { this.value = v; } - private Float32MatmulPrecision(Float32MatmulPrecision e) { this.value = e.value; } - public Float32MatmulPrecision intern() { for (Float32MatmulPrecision e : values()) if (e.value == value) return e; return this; } - @Override public String toString() { return intern().name(); } -} -// Targeting ../Context.java +// Parsed from c10/core/impl/InlineDeviceGuard.h +// #pragma once -@Namespace("at") public static native @ByRef Context globalContext(); +// This file provides implementations of InlineDeviceGuard and +// InlineOptionalDeviceGuard. -@Namespace("at") public static native void init(); +// #include +// #include +// #include +// #include +// #include -@Namespace("at") public static native Allocator getCPUAllocator(); +/** + * A DeviceGuard is an RAII class that sets a device to some value + * on construction, and resets the device to its original value on + * destruction. + * + * InlineDeviceGuard is a helper class for implementing DeviceGuards. + * It is templated over a DeviceGuardImpl (anything that implements + * DeviceGuardImplInterface). There are two primary ways to instantiate + * InlineDeviceGuard: + * + * - With a concrete implementation of DeviceGuardImpl, e.g., CUDAGuardImpl. + * This is the best way to use InlineDeviceGuard, as all calls are + * devirtualized, giving you code as efficient as straight line + * calls to cudaGetDevice/cudaSetDevice. + * + * - With VirtualGuardImpl, which does a virtual dispatch to a DeviceGuardImpl + * retrieved from a DeviceType registry. We have explicitly instantiated + * InlineDeviceGuard this way as c10::DeviceGuard. + * + * If you are in a hurry, you can use InlineDeviceGuard directly: + * + * using CUDAGuard = impl::InlineDeviceGuard; + * + * However, you can provide a better user experience if you explicitly write a + * wrapper class that itself contains the template instantiation: + * + * class CUDAGuard { + * public: + * // ... the API ... + * private: + * impl::InlineDeviceGuard guard_; + * } + * + * The wrapper class provides a good place to write documentation, and helps + * avoid weird template instantiation errors when a user incorrectly uses the + * class. + * + * If you need to test this class, consider instantiating it with FakeGuardImpl. + */ -@Namespace("at") public static native @ByRef DeprecatedTypeProperties getDeprecatedTypeProperties( - Backend p, - ScalarType s); -@Namespace("at") public static native @ByRef DeprecatedTypeProperties getDeprecatedTypeProperties( - @Cast("c10::Backend") int p, - ScalarType s); +/** + * A OptionalDeviceGuard is an RAII class that sets a device to some value on + * initialization, and resets the device to its original value on destruction. + * + * InlineOptionalDeviceGuard is a helper class for implementing + * OptionalDeviceGuards. See guidance in InlineDeviceGuard on how to + * use this. See OptionalDeviceGuard for user-oriented usage notes. + */ -@Namespace("at") public static native @ByRef DeprecatedTypeProperties CPU(ScalarType s); + // namespace impl + // namespace c10 -@Namespace("at") public static native @ByRef DeprecatedTypeProperties CUDA(ScalarType s); -@Namespace("at") public static native @ByRef DeprecatedTypeProperties HIP(ScalarType s); +// Parsed from c10/core/DeviceGuard.h -@Namespace("at") public static native @ByRef DeprecatedTypeProperties MPS(ScalarType s); +// #pragma once -@Namespace("at") public static native @Cast("bool") boolean hasCUDA(); +// #include -@Namespace("at") public static native @Cast("bool") boolean hasHIP(); +/** RAII guard that sets a certain default device in its constructor, and + * changes it back to the device that was originally active upon destruction. + * + * The device is always reset to the one that was active at the time of + * construction of the guard. Even if you {@code set_device} after construction, the + * destructor will still reset the device to the one that was active at + * construction time. + * + * This device guard does NOT have an uninitialized state; it is guaranteed + * to reset a device on exit. If you are in a situation where you *might* + * want to setup a guard (i.e., are looking for the moral equivalent + * of optional), see OptionalDeviceGuard. */ +// Targeting ../OptionalDeviceGuard.java -@Namespace("at") public static native @Cast("bool") boolean hasIPU(); -@Namespace("at") public static native @Cast("bool") boolean hasXLA(); -@Namespace("at") public static native @Cast("bool") boolean hasMPS(); +// Note [Whither the DeviceGuard boilerplate] +// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +// Design note: in principle, we could avoid these wrappers using: +// +// using DeviceGuard = impl::InlineDeviceGuard; +// using OptionalDeviceGuard = +// impl::InlineOptionalDeviceGuard; +// +// But the error messages are worse, and our users can't just look at the +// header file to find out what's going on. Furthermore, for specializations +// like CUDAStreamGuard, it can be profitable to replace some interfaces with +// refined types (e.g., return CUDAStream instead of Stream). So, we eat +// the boilerplate and write out the API explicitly. -@Namespace("at") public static native @Cast("bool") boolean hasORT(); + // namespace c10 -// Despite its name, this function returns the number of *CUDA* GPUs. -@Namespace("at") public static native @Cast("size_t") long getNumGPUs(); -@Namespace("at") public static native @Cast("bool") boolean hasOpenMP(); +// Parsed from c10/core/impl/InlineEvent.h -@Namespace("at") public static native @Cast("bool") boolean hasMKL(); +// #pragma once -@Namespace("at") public static native @Cast("bool") boolean hasLAPACK(); +// #include +// #include +// #include +// #include -@Namespace("at") public static native @Cast("bool") boolean hasMAGMA(); + // namespace impl + // namespace c10 -@Namespace("at") public static native @Cast("bool") boolean hasMKLDNN(); -@Namespace("at") public static native void manual_seed(@Cast("uint64_t") long seed); -// Targeting ../NoTF32Guard.java +// Parsed from c10/core/Event.h +// #pragma once +// #include +// #include -// #ifdef USE_ROCM -// #endif +/** + * A backend-generic movable, not copyable, not thread-safe event. + * + * The design of this event follows that of CUDA and HIP events. These events + * are recorded and waited on by streams and can be rerecorded to, + * each rerecording essentially creating a new version of the event. + * For example, if (in CPU time), stream X is asked to record E, + * stream Y waits on E, and stream X is asked to record E again, then Y will + * wait for X to finish the first call to record and not the second, because + * it's waiting on the first version of event E, not the second. + * Querying an event only returns the status of its most recent version. + * + * Backend-generic events are implemented by this class and + * impl::InlineEvent. In addition to these events there are also + * some backend-specific events, like ATen's CUDAEvent. Each of these + * classes has its own use. + * + * impl::InlineEvent<...> or a backend-specific event should be + * preferred when the backend is known at compile time and known to + * be compiled. Backend-specific events may have additional functionality. + * + * This Event should be used if a particular backend may not be available, + * or the backend required is not known at compile time. + * + * These generic events are built on top of DeviceGuardImpls, analogous + * to DeviceGuard and InlineDeviceGuard. The name "DeviceGuardImpls," + * is no longer entirely accurate, as these classes implement the + * backend-specific logic for a generic backend interface. + * + * See DeviceGuardImplInterface.h for a list of all supported flags. + */ - // namespace at + // namespace c10 -// Parsed from ATen/ExpandUtils.h +// Parsed from c10/core/impl/InlineStreamGuard.h // #pragma once -// #ifndef AT_PER_OPERATOR_HEADERS -// #include -// #else -// #include -// #include -// #endif - -// #include -// #include -// #include -// #include +// #include +// #include // #include -// #include -// #include -// #include -// #include +/** + * A StreamGuard is an RAII class that changes the current device + * to the device corresponding to some stream, and changes the + * default stream on that device to be this stream. + * + * InlineStreamGuard is a helper class for implementing StreamGuards. + * See InlineDeviceGuard for guidance on how to use this class. + */ -@Namespace("at") public static native @ByVal @Cast("std::vector*") LongVector infer_size(@ByVal @Cast("c10::ArrayRef*") LongArrayRef a, @ByVal @Cast("c10::ArrayRef*") LongArrayRef b); -@Namespace("at") public static native @ByVal @Cast("std::vector*") LongVector infer_size(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] a, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... b); -@Namespace("at") public static native @ByVal DimVector infer_size_dimvector(@ByVal @Cast("c10::ArrayRef*") LongArrayRef a, @ByVal @Cast("c10::ArrayRef*") LongArrayRef b); -@Namespace("at") public static native @ByVal DimVector infer_size_dimvector(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] a, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... b); -@Namespace("at") public static native @ByVal SymDimVector infer_size_symdimvector(@ByVal SymIntRef a, @ByVal SymIntRef b); -// Targeting ../DimVectorInferExpandGeometryResult.java +/** + * An OptionalStreamGuard is an RAII class that sets a device to some value on + * initialization, and resets the device to its original value on destruction. + * See InlineOptionalDeviceGuard for more guidance on how to use this class. + */ + // namespace impl + // namespace c10 -@Namespace("at") public static native @ByVal @Cast("std::tuple,std::vector >*") LongVector inferExpandGeometry( - @ByVal @Cast("c10::ArrayRef*") LongArrayRef tensor_sizes, - @ByVal @Cast("c10::ArrayRef*") LongArrayRef tensor_strides, - @ByVal @Cast("c10::ArrayRef*") LongArrayRef sizes); -@Namespace("at") public static native @ByVal @Cast("std::tuple,std::vector >*") LongVector inferExpandGeometry( - @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] tensor_sizes, - @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] tensor_strides, - @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... sizes); +// Parsed from c10/core/StreamGuard.h -@Namespace("at") public static native @ByVal DimVectorInferExpandGeometryResult inferExpandGeometry_dimvector( - @ByVal @Cast("c10::ArrayRef*") LongArrayRef tensor_sizes, - @ByVal @Cast("c10::ArrayRef*") LongArrayRef tensor_strides, - @ByVal @Cast("c10::ArrayRef*") LongArrayRef sizes); -@Namespace("at") public static native @ByVal DimVectorInferExpandGeometryResult inferExpandGeometry_dimvector( - @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] tensor_sizes, - @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] tensor_strides, - @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... sizes); +// #pragma once -@Namespace("at") public static native @ByVal @Cast("std::vector*") LongVector infer_dense_strides( - @ByVal @Cast("c10::ArrayRef*") LongArrayRef tensor_sizes, - @ByVal @Cast("c10::ArrayRef*") LongArrayRef tensor_strides); -@Namespace("at") public static native @ByVal @Cast("std::vector*") LongVector infer_dense_strides( - @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] tensor_sizes, - @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... tensor_strides); +// #include -// True if input shapes are expandable -// NOTE: infer_size did a similar check, please keep them sync if change is -// needed -@Namespace("at") public static native @Cast("bool") boolean are_expandable(@ByVal @Cast("c10::ArrayRef*") LongArrayRef shape1, @ByVal @Cast("c10::ArrayRef*") LongArrayRef shape2); -@Namespace("at") public static native @Cast("bool") boolean are_expandable(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] shape1, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... shape2); +/** + * A StreamGuard is an RAII class that changes the current device + * to the device corresponding to some stream, and changes the + * default stream on that device to be this stream. + * + * Use of StreamGuard is HIGHLY discouraged in operator definitions. In + * a single operator, you probably don't know enough about the global + * state of the world to profitably decide how to set streams. Let + * the caller handle this appropriately, and just use the current stream + * in your operator code. + * + * This StreamGuard does NOT have an uninitialized state; it is guaranteed + * to reset the stream and device on exit. If you are in a situation + * where you *might* want to setup a stream guard, see OptionalStreamGuard. + */ -// avoid copy-construction of Tensor by using a reference_wrapper. +/** + * An OptionalStreamGuard is an RAII class that sets a device to some value on + * initialization, and resets the device to its original value on destruction. + * See OptionalDeviceGuard for more guidance on how to use this class. + */ -// NOTE [ ExpandUtils Borrowing ] -// -// Functions in ExpandUtils return `c10::MaybeOwned` because -// expansion may not actually be needed, in which case we can improve -// efficiency by returning -// `c10::MaybeOwned::borrowed(to_expand)`. However, this means -// that you need to be careful: the returned `c10::MaybeOwned` -// must not outlive the original `Tensor` object that `to_expand` -// referred to! The deleted rvalue reference overloads of these -// functions help with this by preventing trivial use of a temporary -// resulting from a function call, but it is still possible to make a -// mistake. +/** + * A MultiStreamGuard is an RAII class that sets the current streams of a set of + * devices all at once, and resets them to their original values on destruction. + */ -@Namespace("at") public static native @Cast({"", "c10::MaybeOwned&&"}) @StdMove TensorMaybeOwned expand_inplace( - @Const @ByRef Tensor tensor, - @Const @ByRef Tensor to_expand); + // namespace c10 +// Parsed from c10/util/FunctionRef.h -@Namespace("at") public static native @Cast({"", "c10::MaybeOwned&&"}) @StdMove TensorMaybeOwned expand_inplace( - @Const @ByRef Tensor tensor, - @Const @ByRef Tensor to_expand, - @Cast("const char*") BytePointer api_name); -@Namespace("at") public static native @Cast({"", "c10::MaybeOwned&&"}) @StdMove TensorMaybeOwned expand_inplace( - @Const @ByRef Tensor tensor, - @Const @ByRef Tensor to_expand, - String api_name); +//===- llvm/ADT/STLExtras.h - Useful STL related functions ------*- C++ -*-===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// +// +// This file contains some templates that are useful if you are working with the +// STL at all. +// +// No library is required when using these functions. +// +//===----------------------------------------------------------------------===// +// c10: modified from llvm::function_ref +// c10: added more SFINAE to enable use in overloaded functions +// #pragma once -@Namespace("at") public static native @ByVal TensorMaybeOwnedTensorMaybeOwnedTuple expand_inplace( - @Const @ByRef Tensor tensor, - @Const @ByRef Tensor to_expand1, - @Const @ByRef Tensor to_expand2); +// #include +// #include +// #include +/** An efficient, type-erasing, non-owning reference to a callable. This is + * intended for use as the type of a function parameter that is not used + * after the function in question returns. + * + * This class does not own the callable, so it is not in general safe to store + * a function_ref. */ + // namespace c10 +// Parsed from c10/util/intrusive_ptr.h -@Namespace("at") public static native @ByVal TensorMaybeOwnedTensorMaybeOwnedTuple expand_inplace( - @Const @ByRef Tensor tensor, - @Const @ByRef Tensor to_expand1, - @Const @ByRef Tensor to_expand2, - @Cast("const char*") BytePointer api_name); -@Namespace("at") public static native @ByVal TensorMaybeOwnedTensorMaybeOwnedTuple expand_inplace( - @Const @ByRef Tensor tensor, - @Const @ByRef Tensor to_expand1, - @Const @ByRef Tensor to_expand2, - String api_name); +// #pragma once +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// Targeting ../class_.java +@Namespace("c10::raw::weak_intrusive_ptr") public static native void incref(@Cast("c10::intrusive_ptr_target*") Pointer self); -// See NOTE [ ExpandUtils Borrowing ] above for `MaybeOwned` explanation. -@Namespace("at") public static native @ByVal TensorMaybeOwnedTensorMaybeOwnedTuple expand_outplace(@Const @ByRef Tensor to_expand1, @Const @ByRef Tensor to_expand2); +// Targeting ../DontIncreaseRefcount.java + // namespace raw +/** + * intrusive_ptr is an alternative to shared_ptr that has better + * performance because it does the refcounting intrusively + * (i.e. in a member of the object itself). + * Your class T needs to inherit from intrusive_ptr_target to allow it to be + * used in an intrusive_ptr. Your class's constructor should not allow + *{@code this} to escape to other threads or create an intrusive_ptr from {@code this}. + */ +// Note [Stack allocated intrusive_ptr_target safety] +// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +// A well known problem with std::enable_shared_from_this is that it +// allows you to create a std::shared_ptr from a stack allocated object, +// which is totally bogus because the object will die once you return +// from the stack. In intrusive_ptr, we can detect that this has occurred, +// because we set the refcount/weakcount of objects which inherit from +// intrusive_ptr_target to zero, *unless* we can prove that the object +// was dynamically allocated (e.g., via make_intrusive). +// +// Thus, whenever you transmute a T* into a intrusive_ptr, we check +// and make sure that the refcount isn't zero (or, a more subtle +// test for weak_intrusive_ptr, for which the refcount may validly +// be zero, but the weak refcount better not be zero), because that +// tells us if the object was allocated by us. If it wasn't, no +// intrusive_ptr for you! -@Namespace("at") public static native @ByVal TensorMaybeOwnedTensorMaybeOwnedTuple expand_outplace( - @Const @ByRef Tensor to_expand1, - @Const @ByRef Tensor to_expand2, - @Cast("const char*") BytePointer api_name); -@Namespace("at") public static native @ByVal TensorMaybeOwnedTensorMaybeOwnedTuple expand_outplace( - @Const @ByRef Tensor to_expand1, - @Const @ByRef Tensor to_expand2, - String api_name); +// Increment needs to be acquire-release to make use_count() and +// unique() reliable. +@Namespace("c10::detail") public static native @Cast("size_t") long atomic_refcount_increment(@Cast("std::atomic*") @ByRef LongPointer refcount); +// weak_use_count() is only used for testing, so we don't need it to +// be reliable. Relaxed should be fine. +@Namespace("c10::detail") public static native @Cast("size_t") long atomic_weakcount_increment(@Cast("std::atomic*") @ByRef LongPointer weakcount); +// Both decrements need to be acquire-release for correctness. See +// e.g. std::shared_ptr implementation. +@Namespace("c10::detail") public static native @Cast("size_t") long atomic_refcount_decrement(@Cast("std::atomic*") @ByRef LongPointer refcount); +@Namespace("c10::detail") public static native @Cast("size_t") long atomic_weakcount_decrement(@Cast("std::atomic*") @ByRef LongPointer weakcount); -@Namespace("at") public static native @ByVal TensorMaybeOwnedTensorMaybeOwnedTensorMaybeOwnedTuple expand_outplace( - @Const @ByRef Tensor to_expand1, - @Const @ByRef Tensor to_expand2, - @Const @ByRef Tensor to_expand3); +// Targeting ../TuplePtr.java +// Targeting ../FuturePtr.java +// Targeting ../ConstantStringPtr.java +// Targeting ../GeneratorImplPtr.java +// Targeting ../QuantizerPtr.java -@Namespace("at") public static native @ByVal TensorMaybeOwnedTensorMaybeOwnedTensorMaybeOwnedTuple expand_outplace( - @Const @ByRef Tensor to_expand1, - @Const @ByRef Tensor to_expand2, - @Const @ByRef Tensor to_expand3, - @Cast("const char*") BytePointer api_name); -@Namespace("at") public static native @ByVal TensorMaybeOwnedTensorMaybeOwnedTensorMaybeOwnedTuple expand_outplace( - @Const @ByRef Tensor to_expand1, - @Const @ByRef Tensor to_expand2, - @Const @ByRef Tensor to_expand3, - String api_name); +// Targeting ../AwaitPtr.java +// Targeting ../RRefInterfacePtr.java +// Targeting ../PyObjectHolderPtr.java +// Targeting ../EnumHolderPtr.java -@Namespace("at") public static native @Cast({"", "c10::MaybeOwned&&"}) @StdMove TensorMaybeOwned expand_size( - @Const @ByRef Tensor to_expand, - @ByVal @Cast("c10::ArrayRef*") LongArrayRef sizes); -@Namespace("at") public static native @Cast({"", "c10::MaybeOwned&&"}) @StdMove TensorMaybeOwned expand_size( - @Const @ByRef Tensor to_expand, - @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... sizes); +// Targeting ../TensorImplPtr.java +// Targeting ../TreeRef.java -@Namespace("at") public static native @Cast({"", "c10::MaybeOwned&&"}) @StdMove TensorMaybeOwned expand_size( - @Const @ByRef Tensor to_expand, - @ByVal @Cast("c10::ArrayRef*") LongArrayRef sizes, - @Cast("const char*") BytePointer api_name); -@Namespace("at") public static native @Cast({"", "c10::MaybeOwned&&"}) @StdMove TensorMaybeOwned expand_size( - @Const @ByRef Tensor to_expand, - @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] sizes, - String api_name); +// Targeting ../StorageImplPtr.java -@Namespace("at") public static native @Cast({"", "std::vector"}) @StdMove TensorVector expand_outplace(@ByVal TensorArrayRef to_expand); +// Targeting ../SymNode.java -@Namespace("at") public static native @ByVal Tensor sum_to( - @ByVal Tensor tensor, - @Const @ByVal SymIntRef shape, - @Cast("bool") boolean always_return_non_view/*=false*/); -@Namespace("at") public static native @ByVal Tensor sum_to( - @ByVal Tensor tensor, - @Const @ByVal SymIntRef shape); -// Sums `tensor` repeatedly to produce a tensor of shape `shape`. -// Precondition: is_expandable_to(shape, tensor.sizes()) must be true -@Namespace("at") public static native @ByVal Tensor sum_to( - @ByVal Tensor tensor, - @ByVal @Cast("c10::ArrayRef*") LongArrayRef shape, - @Cast("bool") boolean always_return_non_view/*=false*/); -@Namespace("at") public static native @ByVal Tensor sum_to( - @ByVal Tensor tensor, - @ByVal @Cast("c10::ArrayRef*") LongArrayRef shape); -@Namespace("at") public static native @ByVal Tensor sum_to( - @ByVal Tensor tensor, - @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] shape, - @Cast("bool") boolean always_return_non_view/*=false*/); -@Namespace("at") public static native @ByVal Tensor sum_to( - @ByVal Tensor tensor, - @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... shape); +// Targeting ../WeakStorage.java -@Namespace("at") public static native @Cast("bool") boolean is_expandable_to( - @ByVal SymIntRef shape, - @ByVal SymIntRef desired); -@Namespace("at") public static native @Cast("bool") boolean is_expandable_to(@ByVal @Cast("c10::ArrayRef*") LongArrayRef shape, @ByVal @Cast("c10::ArrayRef*") LongArrayRef desired); -@Namespace("at") public static native @Cast("bool") boolean is_expandable_to(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] shape, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... desired); - // namespace at +// To allow weak_intrusive_ptr inside std::map or std::set, we need operator< +// Alias for documentary purposes, to more easily distinguish +// weak raw intrusive pointers from intrusive pointers. -// Parsed from ATen/Functions.h +// This namespace provides some methods for working with +// raw pointers that subclass intrusive_ptr_target. They are not provided +// as methods on intrusive_ptr_target, because ideally you would not need these +// methods at all (use smart pointers), but if you are dealing with legacy code +// that still needs to pass around raw pointers, you may find these quite +// useful. +// +// An important usage note: some functions are only valid if you have a +// strong raw pointer to the object, while others are only valid if you +// have a weak raw pointer to the object. ONLY call intrusive_ptr namespace +// functions on strong pointers, and weak_intrusive_ptr namespace functions +// on weak pointers. If you mix it up, you may get an assert failure. -// #pragma once +// WARNING: Unlike the reclaim() API, it is NOT valid to pass +// NullType::singleton to this function -// @generated by torchgen/gen.py from Functions.h +// WARNING: Unlike the reclaim() API, it is NOT valid to pass +// NullType::singleton to this function +@Namespace("c10::raw::intrusive_ptr") public static native void decref(@Cast("c10::intrusive_ptr_target*") Pointer self); -// #ifdef TORCH_ASSERT_NO_OPERATORS -// #error This change adds a dependency on native_functions.yaml, -// meaning the file will need to be re-compiled every time an operator -// is changed or added. Consider if your change would be better placed in -// another file, or if a more specific header might achieve the same goal. -// See NOTE: [Tensor vs. TensorBase] -// #endif +@Namespace("c10::raw::intrusive_ptr") public static native @Cast("size_t") long use_count(@Cast("c10::intrusive_ptr_target*") Pointer self); -// #if defined(AT_PER_OPERATOR_HEADERS) && defined(TORCH_ASSERT_ONLY_METHOD_OPERATORS) -// #error This change adds a dependency on all pytorch operators, meaning the -// file will need to be re-compiled every time an operator is changed or added. -// Consider including a specific operator from and -// see NOTE [TORCH_ASSERT_ONLY_METHOD_OPERATORS]. -// #endif + // namespace intrusive_ptr -// NOTE: [TORCH_ASSERT_ONLY_METHOD_OPERATORS] -// -// In ATen, certain generated headers files include the definitions of -// every single operator in PyTorch. Unfortunately this means every -// time an operator signature is updated or changed in -// native_functions.yaml, you (and every other PyTorch developer) need -// to recompile every source file that includes any of these headers. -// -// To break up these header dependencies, and improve incremental -// build times for all PyTorch developers. These headers are split -// into per-operator headers in the `ATen/ops` folder. This limits -// incremental builds to only changes to methods of `Tensor`, or files -// that use the specific operator being changed. With `at::sum` as an -// example, you should include -// -// // instead of ATen/Functions.h -// // instead of ATen/NativeFunctions.h -// // instead of ATen/Operators.h -// // instead of ATen/CPUFunctions.h -// -// However, even if you're careful to use this in your own code. -// `Functions.h` might be included indirectly through another header -// without you realising. To avoid this, you can add -// -// #define TORCH_ASSERT_ONLY_METHOD_OPERATORS -// -// to the top of your source file. This way any time the non-specific -// headers are included, the compiler will error out. -// -// Also, be aware that `ops` are not available in all build -// configurations (namely fb-internal) so you must guard these -// includes with `#ifdef AT_PER_OPERATOR_HEADERS`. e.g. -// -// #ifndef AT_PER_OPERATOR_HEADERS -// #include -// #else -// #include -// #endif +// This gives the STRONG refcount of a WEAK pointer -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include + // namespace weak_intrusive_ptr + + // namespace raw + + // namespace c10 +// To allow intrusive_ptr and weak_intrusive_ptr inside std::unordered_map or +// std::unordered_set, we need std::hash + // namespace std + + +// Parsed from ATen/core/ivalue_inl.h + +// #pragma once + +// #include +// #include +// #include +// #include + +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include // #include -// #include -// #include -// #include -// #include -// #include - -// #include -// #include - -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include - - - -// Special C++ only overloads for std()-like functions (See gh-40287) -// These are needed because int -> bool conversion takes precedence over int -> IntArrayRef -// So, for example std(0) would select the std(unbiased=False) overload -@Namespace("at") public static native @ByVal Tensor var(@Const @ByRef Tensor self, int dim); -@Namespace("at") public static native @ByVal TensorTensorTuple var_mean(@Const @ByRef Tensor self, int dim); -@Namespace("at") public static native @ByVal Tensor std(@Const @ByRef Tensor self, int dim); -@Namespace("at") public static native @ByVal TensorTensorTuple std_mean(@Const @ByRef Tensor self, int dim); - -@Namespace("at") public static native @Cast("int64_t") long numel(@Const @ByRef Tensor tensor); - -@Namespace("at") public static native @Cast("int64_t") long size(@Const @ByRef Tensor tensor, @Cast("int64_t") long dim); - -@Namespace("at") public static native @Cast("int64_t") long stride(@Const @ByRef Tensor tensor, @Cast("int64_t") long dim); - -@Namespace("at") public static native @Cast("bool") boolean is_complex(@Const @ByRef Tensor tensor); - -@Namespace("at") public static native @Cast("bool") boolean is_floating_point(@Const @ByRef Tensor tensor); - -@Namespace("at") public static native @Cast("bool") boolean is_signed(@Const @ByRef Tensor tensor); - -@Namespace("at") public static native @Cast("bool") boolean is_inference(@Const @ByRef Tensor tensor); - -@Namespace("at") public static native @Cast("bool") boolean _is_zerotensor(@Const @ByRef Tensor tensor); - -@Namespace("at") public static native @Cast("bool") boolean is_conj(@Const @ByRef Tensor tensor); - -@Namespace("at") public static native @ByVal Tensor conj(@Const @ByRef Tensor tensor); - -@Namespace("at") public static native @Cast("bool") boolean is_neg(@Const @ByRef Tensor tensor); - - - - -// Parsed from ATen/NamedTensor.h - -// #include - - -// Parsed from ATen/NestedTensorImpl.h - -// #pragma once -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -@Namespace("at::native") public static native @Cast("bool") boolean nested_tensor_impl_is_contiguous(@Const NestedTensorImpl nt); -// Targeting ../NestedTensorImpl.java - - - -@Namespace("at::native") public static native NestedTensorImpl get_nested_tensor_impl_or_null( - @Const @ByRef Tensor tensor); - -@Namespace("at::native") public static native NestedTensorImpl get_nested_tensor_impl(@Const @ByRef Tensor tensor); - -@Namespace("at::native") public static native @Const @ByRef Tensor get_nested_size_tensor(@Const @ByRef Tensor tensor); - - // namespace native - // namespace at - - -// Parsed from ATen/NamedTensorUtils.h - -// #pragma once -// #include -// #include -// #include - -// #include -// #include -// #include - -@Namespace("at") public static native @Cast("bool") boolean has_names(@ByVal TensorArrayRef tensors); - -// Converts dim to an positional index. Errors if `dim` cannot be used to -// refer to any dimension of tensor. -@Namespace("at") public static native @Cast("int64_t") long dimname_to_position(@Const @ByRef Tensor tensor, @ByVal Dimname dim); -@Namespace("at") public static native @ByVal @Cast("std::vector*") LongVector dimnames_to_positions( - @Const @ByRef Tensor tensor, - @ByVal DimnameArrayRef dims); - -// Unifies two DimnameList to produce a third. This is useful for implementing -// the named inference rule for binary broadcasting operations like add. -// -// There are three main constraints: -// 1) Check matching: Names must match positionally from the right. -// 2) Check misaligned: If a name `n` is in `names`, then it must appear at -// the same index from the right in other. -// 3) The output names are obtained by unifying the names individually from the -// right. -@Namespace("at") public static native @StdMove DimnameVector unify_from_right( - @ByVal DimnameArrayRef names, - @ByVal DimnameArrayRef other, - @Cast("const char*") BytePointer action/*="broadcast"*/); -@Namespace("at") public static native @StdMove DimnameVector unify_from_right( - @ByVal DimnameArrayRef names, - @ByVal DimnameArrayRef other); -@Namespace("at") public static native @StdMove DimnameVector unify_from_right( - @ByVal DimnameArrayRef names, - @ByVal DimnameArrayRef other, - String action/*="broadcast"*/); - -@Namespace("at") public static native void reportNYIDimnameOverload(@Cast("const char*") BytePointer op_name); -@Namespace("at") public static native void reportNYIDimnameOverload(String op_name); - -// [NOTE] Writing name inference rules -// -// Operators that support named tensors are either composed of operations that -// support named tensors or implement some name inference rule. An op that -// implements its own name inference rule generally looks like the following: -// -// Tensor op(...) { -// perform_shape_checks(...); -// # (1) -// auto maybe_outnames = compute_outnames(...); -// auto result = [&]() { -// NoNamesGuard guard; -// return op_impl(...); -// }(); -// # (2) -// propagate_names_if_nonempty(result, maybe_outnames); -// -// Each op has (1) a compute outnames step and (2) a propagate names step. -// -// compute_outnames is responsible for checking that input names match and -// determining what the output names should be. It returns either: -// - {} (if the inputs tensors are all unnamed) -// - non-empty outnames. -// -// propagate_names_if_nonempty propagates the outnames if they exist to the -// result tensors. -// -// The {} case is an optimization; if the user does not use named tensors they -// pay no perf cost for it. - - -// Propagates `names` to `result` if `names` is not empty. -// `names` can be empty; see [NOTE] Writing name inference rules -// If `names` is not empty, `names.size()` should equal `result.dim()`. -// When in doubt, use this overload instead of the others. -@Namespace("at::namedinference") public static native @Const @ByRef Tensor propagate_names_if_nonempty( - @Const @ByRef Tensor result, - @ByVal DimnameArrayRef maybe_names, - @Cast("bool") boolean validate_names/*=false*/); -@Namespace("at::namedinference") public static native @Const @ByRef Tensor propagate_names_if_nonempty( - @Const @ByRef Tensor result, - @ByVal DimnameArrayRef maybe_names); - -// Propagates `names` to `result`. Only use this if we are certain that there -// are names to propagate (that names is not empty). -@Namespace("at::namedinference") public static native @Const @ByRef Tensor propagate_names( - @Const @ByRef Tensor result, - @ByVal DimnameArrayRef names, - @Cast("bool") boolean validate_names/*=false*/); -@Namespace("at::namedinference") public static native @Const @ByRef Tensor propagate_names( - @Const @ByRef Tensor result, - @ByVal DimnameArrayRef names); - -// Propagates all names from src to result. -@Namespace("at::namedinference") public static native void propagate_names(@Const @ByRef Tensor result, @Const @ByRef Tensor src); - -// Propagates all names except for those at the excluded_idxs. -@Namespace("at::namedinference") public static native void propagate_names_except( - @Const @ByRef Tensor result, - @Const @ByRef Tensor src, - @ByVal @Cast("c10::ArrayRef*") LongArrayRef excluded_idxs); -@Namespace("at::namedinference") public static native void propagate_names_except( - @Const @ByRef Tensor result, - @Const @ByRef Tensor src, - @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... excluded_idxs); - -// Used for reduction ops that have a `keepdim` arg. -@Namespace("at::namedinference") public static native void propagate_names_for_reduction( - @Const @ByRef Tensor result, - @Const @ByRef Tensor src, - @ByVal @Cast("c10::ArrayRef*") LongArrayRef excluded_idxs, - @Cast("bool") boolean keepdim); -@Namespace("at::namedinference") public static native void propagate_names_for_reduction( - @Const @ByRef Tensor result, - @Const @ByRef Tensor src, - @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] excluded_idxs, - @Cast("bool") boolean keepdim); - -@Namespace("at::namedinference") public static native void propagate_names_for_expand( - @Const @ByRef Tensor result, - @Const @ByRef Tensor self); - -@Namespace("at::namedinference") public static native @StdMove DimnameVector compute_broadcast_outnames( - @Const @ByRef Tensor self, - @Const @ByRef Tensor other); - -@Namespace("at::namedinference") public static native @StdMove DimnameVector broadcast_to_outnames( - @Const @ByRef Tensor tensor, - @Const @ByRef Tensor reference_tensor, - @Cast("const char*") BytePointer op_name); -@Namespace("at::namedinference") public static native @StdMove DimnameVector broadcast_to_outnames( - @Const @ByRef Tensor tensor, - @Const @ByRef Tensor reference_tensor, - String op_name); - -@Namespace("at::namedinference") public static native @StdMove DimnameVector compute_matmul_outnames( - @Const @ByRef Tensor self, - @Const @ByRef Tensor other); - -@Namespace("at::namedinference") public static native @StdMove DimnameVector compute_cdist_outnames( - @Const @ByRef Tensor self, - @Const @ByRef Tensor other); - -@Namespace("at::namedinference") public static native @StdMove DimnameVector compute_bmm_outnames( - @Const @ByRef Tensor result, - @Const @ByRef Tensor self, - @Const @ByRef Tensor other); - -@Namespace("at::namedinference") public static native @StdMove DimnameVector compute_squeeze_outnames(@Const @ByRef Tensor tensor); - - - -// TensorImpl* overloads for Legacy TH/THC code. Use these sparingly. - -@Namespace("at::namedinference") public static native TensorImpl propagate_names_if_nonempty( - TensorImpl result, - @ByVal DimnameArrayRef maybe_names, - @Cast("bool") boolean validate_names/*=false*/); -@Namespace("at::namedinference") public static native TensorImpl propagate_names_if_nonempty( - TensorImpl result, - @ByVal DimnameArrayRef maybe_names); - -@Namespace("at::namedinference") public static native TensorImpl propagate_names( - TensorImpl result, - @ByVal DimnameArrayRef names, - @Cast("bool") boolean validate_names/*=false*/); -@Namespace("at::namedinference") public static native TensorImpl propagate_names( - TensorImpl result, - @ByVal DimnameArrayRef names); - -@Namespace("at::namedinference") public static native void propagate_names(TensorImpl result, TensorImpl src); - -@Namespace("at::namedinference") public static native void propagate_names( - @Const @ByRef TensorBase result, - @ByVal DimnameArrayRef names, - @Cast("bool") boolean validate_names/*=false*/); -@Namespace("at::namedinference") public static native void propagate_names( - @Const @ByRef TensorBase result, - @ByVal DimnameArrayRef names); - -@Namespace("at::namedinference") public static native void propagate_names_if_nonempty( - @Const @ByRef TensorBase result, - @ByVal DimnameArrayRef names, - @Cast("bool") boolean validate_names/*=false*/); -@Namespace("at::namedinference") public static native void propagate_names_if_nonempty( - @Const @ByRef TensorBase result, - @ByVal DimnameArrayRef names); - -@Namespace("at::namedinference") public static native void propagate_names( - @Const @ByRef TensorBase result, - @Const @ByRef TensorBase src); - -// result = m1 @ m2 + bias -@Namespace("at::namedinference") public static native @StdMove DimnameVector propagate_names_for_addmm( - @Const @ByRef Tensor m1, - @Const @ByRef Tensor m2, - @Const @ByRef Tensor bias); - -@Namespace("at::namedinference") public static native @StdMove DimnameVector propagate_names_for_addmv( - @Const @ByRef Tensor mat, - @Const @ByRef Tensor vec, - @Const @ByRef Tensor bias); - -@Namespace("at::namedinference") public static native void check_names_for_dot(TensorImpl vec1, TensorImpl vec2); - -@Namespace("at::namedinference") public static native @StdMove DimnameVector compute_baddbmm_outnames( - @Const @ByRef Tensor result, - @Const @ByRef Tensor self, - @Const @ByRef Tensor other, - @Const @ByRef Tensor bias); - -@Namespace("at::namedinference") public static native @Cast("bool") boolean are_names_equal(TensorImpl self, TensorImpl other); - - // namespace namedinference - - // namespace at - - -// Parsed from ATen/SavedTensorHooks.h - -// #pragma once - -// #include -// #include -// #include -// #include -// #include - -// #include -// Targeting ../SavedTensorDefaultHooksTLS.java - - - - -// Targeting ../SavedTensorDefaultHooks.java - - - - // namespace at - - -// Parsed from ATen/ScalarOps.h - -// #pragma once - -// #include -// #include - -// #ifndef AT_PER_OPERATOR_HEADERS -// #include -// #else -// #include -// #endif -// When filling a number to 1-element CPU tensor, we want to skip -// everything but manipulate data ptr directly. -// Ideally this fast pass should be implemented in TensorIterator, -// but we also want to skip compute_types which in not avoidable -// in TensorIterator for now. - -@Namespace("at::detail") public static native @ByVal Tensor scalar_tensor_static( - @Const @ByRef Scalar s, - @ByVal ScalarTypeOptional dtype_opt, - @ByVal DeviceOptional device_opt); - // namespace detail - // namespace at - -// This is in the c10 namespace because we use ADL to find the functions in it. - -// FIXME: this should be (and was) Scalar::toTensor, but there is currently no -// way to implement this without going through Derived Types (which are not part -// of core). -@Namespace("c10") public static native @ByVal Tensor scalar_to_tensor( - @Const @ByRef Scalar s, - @Const @ByVal(nullValue = "c10::Device(at::kCPU)") Device device); -@Namespace("c10") public static native @ByVal Tensor scalar_to_tensor( - @Const @ByRef Scalar s); - - // namespace c10 - -@Namespace("at::native") public static native @ByVal Tensor wrapped_scalar_tensor( - @Const @ByRef Scalar scalar, - @Const @ByVal(nullValue = "c10::Device(at::kCPU)") Device device); -@Namespace("at::native") public static native @ByVal Tensor wrapped_scalar_tensor( - @Const @ByRef Scalar scalar); - - // namespace native - // namespace at - - -// Parsed from ATen/SequenceNumber.h - -// #pragma once - -// #include -// #include - -// A simple thread local enumeration, used to link forward and backward pass -// ops and is used by autograd and observers framework - -@Namespace("at::sequence_number") public static native @Cast("uint64_t") long peek(); -@Namespace("at::sequence_number") public static native @Cast("uint64_t") long get_and_increment(); - - // namespace sequence_number - // namespace at - - -// Parsed from ATen/TensorIndexing.h - -// #pragma once - -// #include -// #include -// #include -// #include -// #include -// #include -// #include - -// #ifndef AT_PER_OPERATOR_HEADERS -// #include -// #include -// #else -// #include -// #include -// #include -// #include -// #endif - -// #include - -// #include - -@Namespace("at::indexing") @MemberGetter public static native @Cast("const int64_t") long INDEX_MIN(); -@Namespace("at::indexing") @MemberGetter public static native @Cast("const int64_t") long INDEX_MAX(); - -@Namespace("at::indexing") public enum TensorIndexType { None(0), Ellipsis(1), Integer(2), Boolean(3), Slice(4), Tensor(5); - - public final int value; - private TensorIndexType(int v) { this.value = v; } - private TensorIndexType(TensorIndexType e) { this.value = e.value; } - public TensorIndexType intern() { for (TensorIndexType e : values()) if (e.value == value) return e; return this; } - @Override public String toString() { return intern().name(); } -} - -@Namespace("at::indexing") @MemberGetter public static native @ByRef @Cast("const c10::nullopt_t*") Pointer None(); -// Targeting ../EllipsisIndexType.java - - -@Namespace("at::indexing") @MemberGetter public static native @Const @ByRef EllipsisIndexType Ellipsis(); -// Targeting ../Slice.java - - - -@Namespace("at::indexing") public static native @Cast("std::ostream*") @ByRef @Name("operator <<") Pointer shiftLeft(@Cast("std::ostream*") @ByRef Pointer stream, @Const @ByRef Slice slice); -// Targeting ../TensorIndex.java - - - -@Namespace("at::indexing") public static native @Cast("std::ostream*") @ByRef @Name("operator <<") Pointer shiftLeft( - @Cast("std::ostream*") @ByRef Pointer stream, - @Const @ByRef TensorIndex tensor_index); -@Namespace("at::indexing") public static native @Cast("std::ostream*") @ByRef @Name("operator <<") Pointer shiftLeft( - @Cast("std::ostream*") @ByRef Pointer stream, - @Const @ByRef TensorIndexVector tensor_indices); -@Namespace("at::indexing::impl") public static native @ByVal Tensor applySlice( - @Const @ByRef Tensor self, - @Cast("int64_t") long dim, - @ByVal SymInt start, - @ByVal SymInt stop, - @ByVal SymInt step, - @Cast("bool") boolean disable_slice_optimization, - @Const @ByRef Device self_device, - @Const @ByRef SymIntArrayRefOptional self_sizes); - -@Namespace("at::indexing::impl") public static native @ByVal Tensor applySelect( - @Const @ByRef Tensor self, - @Cast("int64_t") long dim, - @Cast("int64_t") long index, - @Cast("int64_t") long real_dim, - @Const @ByRef Device arg4, - @Const @ByRef SymIntArrayRefOptional self_sizes); - -@Namespace("at::indexing::impl") public static native @ByVal Tensor boolToIndexingTensorCPUOrCUDA( - @Const @ByRef Tensor self, - @Cast("bool") boolean value); - -@Namespace("at::indexing::impl") public static native @ByVal Tensor boolToIndexingTensorNonNativeDeviceType( - @Const @ByRef Tensor self, - @Cast("bool") boolean value); - -@Namespace("at::indexing::impl") public static native @ByVal Tensor boolToIndexingTensor( - @Const @ByRef Tensor self, - @Cast("bool") boolean value, - @Const @ByRef Device self_device); - -@Namespace("at::indexing::impl") public static native @ByVal Tensor scalarToTensorNonNativeDeviceType( - @Const @ByRef Scalar v, - @Const @ByRef TensorOptions options); - -@Namespace("at::indexing::impl") public static native void recordTensorIndex( - @Const @ByRef Tensor tensor, - @ByRef TensorVector outIndices, - @Cast("int64_t*") LongPointer dim_ptr); -@Namespace("at::indexing::impl") public static native void recordTensorIndex( - @Const @ByRef Tensor tensor, - @ByRef TensorVector outIndices, - @Cast("int64_t*") LongBuffer dim_ptr); -@Namespace("at::indexing::impl") public static native void recordTensorIndex( - @Const @ByRef Tensor tensor, - @ByRef TensorVector outIndices, - @Cast("int64_t*") long[] dim_ptr); - -// NOTE: Why do we mirror instead of replace the `count_specified_dimensions` -// function in torch/csrc/autograd/python_variable_indexing.cpp? It's because -// `count_specified_dimensions` is on the hot path of Python tensor multi-dim -// indexing (i.e. it's called by `applySlicing` which is called by -// `THPVariable_getitem` / `THPVariable_setitem` when handling indexing of more -// than one dimension). If we were to merge the Python/C++ -// `count_specified_dimensions` function, on the Python side we would have to -// construct a `std::vector` container to be consumed by the C++ -// `count_specified_dimensions` function, which adds 100s of nanoseconds -// overhead and is undesirable. -@Namespace("at::indexing::impl") public static native @Cast("int64_t") long count_specified_dimensions( - @Const @ByRef TensorIndexArrayRef indices); - // namespace impl - -// NOTE: Many functions below are only for consumption from Python indexing -// implementation, they include: -// -// - `Tensor scalarToTensor(...)` -// - `IntArrayRef slicePrefix1sSize(...)` -// - `void copy_to(...)` -// - `Tensor handleDimInMultiDimIndexing(...)` -// - `Tensor dispatch_index(...)` -// - `Tensor dispatch_index_put_(...)` -// - `Tensor get_item(...)` -// - `void set_item(...)` -// -// The rest of the functions are in `at::indexing::impl` namespace, signifying -// that they shouldn't be used from Python indexing implementation. -@Namespace("at::indexing") public static native @ByVal Tensor scalarToTensor( - @Const @ByRef Scalar v, - @Const @ByRef TensorOptions options, - @Const @ByRef Device self_device); - -// To match numpy semantics: -// As a special case for backwards compatibility, -// strip away unit dimensions from the left of 'src' -@Namespace("at::indexing") public static native @ByVal SymIntRef slicePrefix1sSize(@Const @ByRef SymIntRef sizes); - -@Namespace("at::indexing") public static native void copy_to(@Const @ByRef Tensor dst, @Const @ByRef Tensor src); - -// See NOTE [ Setting `disable_slice_optimization` when calling C++ tensor -// indexing functions from Python ] -@Namespace("at::indexing") public static native @ByVal Tensor handleDimInMultiDimIndexing( - @Const @ByRef Tensor prev_dim_result, - @Const @ByRef Tensor original_tensor, - @Const @ByRef TensorIndex index, - @Cast("int64_t*") LongPointer dim_ptr, - @Cast("int64_t*") LongPointer specified_dims_ptr, - @Cast("int64_t") long real_dim, - @ByRef TensorVector outIndices, - @Cast("bool") boolean disable_slice_optimization, - @Const @ByRef Device original_tensor_device, - @Const @ByRef SymIntArrayRefOptional prev_dim_result_sizes); -@Namespace("at::indexing") public static native @ByVal Tensor handleDimInMultiDimIndexing( - @Const @ByRef Tensor prev_dim_result, - @Const @ByRef Tensor original_tensor, - @Const @ByRef TensorIndex index, - @Cast("int64_t*") LongBuffer dim_ptr, - @Cast("int64_t*") LongBuffer specified_dims_ptr, - @Cast("int64_t") long real_dim, - @ByRef TensorVector outIndices, - @Cast("bool") boolean disable_slice_optimization, - @Const @ByRef Device original_tensor_device, - @Const @ByRef SymIntArrayRefOptional prev_dim_result_sizes); -@Namespace("at::indexing") public static native @ByVal Tensor handleDimInMultiDimIndexing( - @Const @ByRef Tensor prev_dim_result, - @Const @ByRef Tensor original_tensor, - @Const @ByRef TensorIndex index, - @Cast("int64_t*") long[] dim_ptr, - @Cast("int64_t*") long[] specified_dims_ptr, - @Cast("int64_t") long real_dim, - @ByRef TensorVector outIndices, - @Cast("bool") boolean disable_slice_optimization, - @Const @ByRef Device original_tensor_device, - @Const @ByRef SymIntArrayRefOptional prev_dim_result_sizes); -// This mirrors `applySlicing` in -// torch/csrc/autograd/python_variable_indexing.cpp -@Namespace("at::indexing::impl") public static native @ByVal Tensor applySlicing( - @Const @ByRef Tensor self, - @Const @ByRef TensorIndexArrayRef indices, - @ByRef TensorVector outIndices, - @Cast("bool") boolean disable_slice_optimization, - @Const @ByRef Device self_device, - @Const @ByRef SymIntArrayRefOptional self_sizes); - // namespace impl - -@Namespace("at::indexing") public static native @ByVal Tensor dispatch_index( - @Const @ByRef Tensor self, - @Cast({"", "std::vector"}) @StdMove TensorVector indices); - -@Namespace("at::indexing") public static native @ByVal Tensor dispatch_index_put_( - @ByRef Tensor self, - @Cast({"", "std::vector"}) @StdMove TensorVector indices, - @Const @ByRef Tensor value); - -// NOTE [ Setting `disable_slice_optimization` when calling C++ tensor indexing -// functions from Python ] -// -// Question: When should we set `disable_slice_optimization` to `true` when -// calling C++ tensor indexing functions from Python indexing code? -// -// Answer: What "slice optimization" means: when we have a slicing expression -// like `x[0:5, 0]`, where the sliced tensor was of size 5 in dimension 0, we -// would skip dispatching the actual slice call as an optimization. However, -// here are the cases where we DON'T want this optimization: -// -// 1. When we are doing 1-D slicing (e.g. `tensor[:]`). -// Reason: we always return a shallow copy for expressions such as -// `tensor[:]` / `tensor[...]` / `tensor[:, :]`. (Note that for `tensor[:, -// :]`, we return an alias of `tensor` by doing the following: -// ``` -// Tensor sliced = impl::applySlicing(self, indices, tensorIndices, -// disable_slice_optimization, self_device, self_sizes); if -// (tensorIndices.empty()) { -// if (sliced.is_same(self)) { -// // ensure we return a shallow copy for things like x[...] -// sliced = at::alias(sliced); -// } -// return sliced; -// } -// ```) -// 2. When we are doing JIT tracing. -// Reason: JIT tracing needs the `self.slice(...)` call to properly trace the -// slice operation. - -// This mirrors `THPVariable_getitem` in -// torch/csrc/autograd/python_variable_indexing.cpp See NOTE [ Setting -// `disable_slice_optimization` when calling C++ tensor indexing functions from -// Python ] -@Namespace("at::indexing") public static native @ByVal Tensor get_item( - @Const @ByRef Tensor self, - @Const @ByRef TensorIndexArrayRef indices, - @Cast("bool") boolean disable_slice_optimization/*=false*/); -@Namespace("at::indexing") public static native @ByVal Tensor get_item( - @Const @ByRef Tensor self, - @Const @ByRef TensorIndexArrayRef indices); - -// This mirrors `THPVariable_setitem` in -// torch/csrc/autograd/python_variable_indexing.cpp for "the assigned value is a -// Tensor" case See NOTE [ Setting `disable_slice_optimization` when calling C++ -// tensor indexing functions from Python ] -@Namespace("at::indexing") public static native void set_item( - @Const @ByRef Tensor self, - @Const @ByRef TensorIndexArrayRef indices, - @Const @ByRef Tensor value, - @Cast("bool") boolean disable_slice_optimization/*=false*/); -@Namespace("at::indexing") public static native void set_item( - @Const @ByRef Tensor self, - @Const @ByRef TensorIndexArrayRef indices, - @Const @ByRef Tensor value); - - // namespace indexing - // namespace at - - -// Parsed from ATen/TensorOperators.h - -// #pragma once - -// #include -// #include - -// #ifndef AT_PER_OPERATOR_HEADERS -// #include -// #else -// #include -// #endif - -// #include -// #include - -// #define AT_FORALL_BINARY_OPS(_) -// _(+, x.add(y), y.add(x)) -// _(*, x.mul(y), y.mul(x)) -// _(-, -// x.sub(y), -// ::at::empty_like(y, at::MemoryFormat::Preserve).fi_(x).sub_(y)) -// _(/, -// x.div(y), -// ::at::empty_like(y, at::MemoryFormat::Preserve).fi_(x).div_(y)) -// _(%, -// x.remainder(y), -// ::at::empty_like(y, at::MemoryFormat::Preserve).fi_(x).remainder_(y)) -// _(&, x.bitwise_and(y), y.bitwise_and(x)) -// _(|, x.bitwise_or(y), y.bitwise_or(x)) -// _(^, x.bitwise_xor(y), y.bitwise_xor(x)) -// _(<, x.t(y), y.gt(x)) -// _(<=, x.e(y), y.ge(x)) -// _(>, x.gt(y), y.t(x)) -// _(>=, x.ge(y), y.e(x)) -// _(==, x.eq(y), y.eq(x)) -// _(!=, x.ne(y), y.ne(x)) - -// #define DEFINE_OPERATOR(op, body, reverse_scalar_body) -// static inline Tensor operator op(const Tensor& x, const Tensor& y) { -// return body; -// } -// static inline Tensor operator op(const Tensor& x, const Scalar& y) { -// return body; -// } -// static inline Tensor operator op(const Scalar& x, const Tensor& y) { -// return reverse_scalar_body; -// } - -@Namespace("at") public static native @ByVal @Name("operator +") Tensor add(@Const @ByRef Tensor x, @Const @ByRef Tensor y); - @Namespace("at") public static native @ByVal @Name("operator +") Tensor add(@Const @ByRef Tensor x, @Const @ByRef Scalar y); - @Namespace("at") public static native @ByVal @Name("operator +") Tensor add(@Const @ByRef Scalar x, @Const @ByRef Tensor y); - @Namespace("at") public static native @ByVal @Name("operator *") Tensor multiply(@Const @ByRef Tensor x, @Const @ByRef Tensor y); - @Namespace("at") public static native @ByVal @Name("operator *") Tensor multiply(@Const @ByRef Tensor x, @Const @ByRef Scalar y); - @Namespace("at") public static native @ByVal @Name("operator *") Tensor multiply(@Const @ByRef Scalar x, @Const @ByRef Tensor y); - @Namespace("at") public static native @ByVal @Name("operator -") Tensor subtract(@Const @ByRef Tensor x, @Const @ByRef Tensor y); - @Namespace("at") public static native @ByVal @Name("operator -") Tensor subtract(@Const @ByRef Tensor x, @Const @ByRef Scalar y); - @Namespace("at") public static native @ByVal @Name("operator -") Tensor subtract(@Const @ByRef Scalar x, @Const @ByRef Tensor y); - @Namespace("at") public static native @ByVal @Name("operator /") Tensor divide(@Const @ByRef Tensor x, @Const @ByRef Tensor y); - @Namespace("at") public static native @ByVal @Name("operator /") Tensor divide(@Const @ByRef Tensor x, @Const @ByRef Scalar y); - @Namespace("at") public static native @ByVal @Name("operator /") Tensor divide(@Const @ByRef Scalar x, @Const @ByRef Tensor y); - @Namespace("at") public static native @ByVal @Name("operator %") Tensor mod(@Const @ByRef Tensor x, @Const @ByRef Tensor y); - @Namespace("at") public static native @ByVal @Name("operator %") Tensor mod(@Const @ByRef Tensor x, @Const @ByRef Scalar y); - @Namespace("at") public static native @ByVal @Name("operator %") Tensor mod(@Const @ByRef Scalar x, @Const @ByRef Tensor y); - @Namespace("at") public static native @ByVal @Name("operator &") Tensor and(@Const @ByRef Tensor x, @Const @ByRef Tensor y); - @Namespace("at") public static native @ByVal @Name("operator &") Tensor and(@Const @ByRef Tensor x, @Const @ByRef Scalar y); - @Namespace("at") public static native @ByVal @Name("operator &") Tensor and(@Const @ByRef Scalar x, @Const @ByRef Tensor y); - @Namespace("at") public static native @ByVal @Name("operator |") Tensor or(@Const @ByRef Tensor x, @Const @ByRef Tensor y); - @Namespace("at") public static native @ByVal @Name("operator |") Tensor or(@Const @ByRef Tensor x, @Const @ByRef Scalar y); - @Namespace("at") public static native @ByVal @Name("operator |") Tensor or(@Const @ByRef Scalar x, @Const @ByRef Tensor y); - @Namespace("at") public static native @ByVal @Name("operator ^") Tensor xor(@Const @ByRef Tensor x, @Const @ByRef Tensor y); - @Namespace("at") public static native @ByVal @Name("operator ^") Tensor xor(@Const @ByRef Tensor x, @Const @ByRef Scalar y); - @Namespace("at") public static native @ByVal @Name("operator ^") Tensor xor(@Const @ByRef Scalar x, @Const @ByRef Tensor y); - @Namespace("at") public static native @ByVal @Name("operator <") Tensor lessThan(@Const @ByRef Tensor x, @Const @ByRef Tensor y); - @Namespace("at") public static native @ByVal @Name("operator <") Tensor lessThan(@Const @ByRef Tensor x, @Const @ByRef Scalar y); - @Namespace("at") public static native @ByVal @Name("operator <") Tensor lessThan(@Const @ByRef Scalar x, @Const @ByRef Tensor y); - @Namespace("at") public static native @ByVal @Name("operator <=") Tensor lessThanEquals(@Const @ByRef Tensor x, @Const @ByRef Tensor y); - @Namespace("at") public static native @ByVal @Name("operator <=") Tensor lessThanEquals(@Const @ByRef Tensor x, @Const @ByRef Scalar y); - @Namespace("at") public static native @ByVal @Name("operator <=") Tensor lessThanEquals(@Const @ByRef Scalar x, @Const @ByRef Tensor y); - @Namespace("at") public static native @ByVal @Name("operator >") Tensor greaterThan(@Const @ByRef Tensor x, @Const @ByRef Tensor y); - @Namespace("at") public static native @ByVal @Name("operator >") Tensor greaterThan(@Const @ByRef Tensor x, @Const @ByRef Scalar y); - @Namespace("at") public static native @ByVal @Name("operator >") Tensor greaterThan(@Const @ByRef Scalar x, @Const @ByRef Tensor y); - @Namespace("at") public static native @ByVal @Name("operator >=") Tensor greaterThanEquals(@Const @ByRef Tensor x, @Const @ByRef Tensor y); - @Namespace("at") public static native @ByVal @Name("operator >=") Tensor greaterThanEquals(@Const @ByRef Tensor x, @Const @ByRef Scalar y); - @Namespace("at") public static native @ByVal @Name("operator >=") Tensor greaterThanEquals(@Const @ByRef Scalar x, @Const @ByRef Tensor y); - @Namespace("at") public static native @ByVal @Name("operator ==") Tensor equals(@Const @ByRef Tensor x, @Const @ByRef Tensor y); - @Namespace("at") public static native @ByVal @Name("operator ==") Tensor equals(@Const @ByRef Tensor x, @Const @ByRef Scalar y); - @Namespace("at") public static native @ByVal @Name("operator ==") Tensor equals(@Const @ByRef Scalar x, @Const @ByRef Tensor y); - @Namespace("at") public static native @ByVal @Name("operator !=") Tensor notEquals(@Const @ByRef Tensor x, @Const @ByRef Tensor y); - @Namespace("at") public static native @ByVal @Name("operator !=") Tensor notEquals(@Const @ByRef Tensor x, @Const @ByRef Scalar y); - @Namespace("at") public static native @ByVal @Name("operator !=") Tensor notEquals(@Const @ByRef Scalar x, @Const @ByRef Tensor y); -// #undef DEFINE_OPERATOR -// #undef AT_FORALL_BINARY_OPS - - // namespace at - - -// Parsed from ATen/Version.h - -// #include - -/** Returns a detailed string describing the configuration PyTorch. */ -@Namespace("at") public static native @StdString BytePointer show_config(); - -@Namespace("at") public static native @StdString BytePointer get_mkl_version(); - -@Namespace("at") public static native @StdString BytePointer get_mkldnn_version(); - -@Namespace("at") public static native @StdString BytePointer get_openmp_version(); - -@Namespace("at") public static native @StdString BytePointer get_cxx_flags(); - - // namespace at - - -// Parsed from ATen/WrapDimUtilsMulti.h - -// #pragma once - -// #include -// #include -// #include -// #include -// #include - -// This is in an extra file to work around strange interaction of -// bitset on Windows with operator overloading - -@Namespace("at") @MemberGetter public static native @Cast("const size_t") long dim_bitset_size(); - - // namespace at - - -// Parsed from ATen/ops/from_blob.h - -// #pragma once -// #include - -@Namespace("at::detail") public static native void noopDelete(Pointer arg0); - - -// Targeting ../TensorMaker.java - - - -@Namespace("at") public static native @ByVal @NoException(true) TensorMaker for_blob(Pointer data, @ByVal @Cast("c10::ArrayRef*") LongArrayRef sizes); -@Namespace("at") public static native @ByVal @NoException(true) TensorMaker for_blob(Pointer data, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... sizes); - -@Namespace("at") public static native @ByVal Tensor from_blob( - Pointer data, - @ByVal @Cast("c10::ArrayRef*") LongArrayRef sizes, - @ByVal @Cast("c10::ArrayRef*") LongArrayRef strides, - @Const @ByRef Deleter deleter, - @Const @ByRef(nullValue = "c10::TensorOptions{}") TensorOptions options, - @Const @ByVal(nullValue = "c10::optional(c10::nullopt)") DeviceOptional target_device); -@Namespace("at") public static native @ByVal Tensor from_blob( - Pointer data, - @ByVal @Cast("c10::ArrayRef*") LongArrayRef sizes, - @ByVal @Cast("c10::ArrayRef*") LongArrayRef strides, - @Const @ByRef Deleter deleter); -@Namespace("at") public static native @ByVal Tensor from_blob( - Pointer data, - @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] sizes, - @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] strides, - @ByRef @Cast("void(*)(void*)") Pointer deleter, - @Const @ByRef(nullValue = "c10::TensorOptions{}") TensorOptions options, - @Const @ByVal(nullValue = "c10::optional(c10::nullopt)") DeviceOptional target_device); -@Namespace("at") public static native @ByVal Tensor from_blob( - Pointer data, - @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] sizes, - @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] strides, - @ByRef @Cast("void(*)(void*)") Pointer deleter); -@Namespace("at") public static native @ByVal Tensor from_blob( - Pointer data, - @ByVal @Cast("c10::ArrayRef*") LongArrayRef sizes, - @ByVal @Cast("c10::ArrayRef*") LongArrayRef strides, - @ByRef @Cast("void(*)(void*)") long deleter, - @Const @ByRef(nullValue = "c10::TensorOptions{}") TensorOptions options, - @Const @ByVal(nullValue = "c10::optional(c10::nullopt)") DeviceOptional target_device); -@Namespace("at") public static native @ByVal Tensor from_blob( - Pointer data, - @ByVal @Cast("c10::ArrayRef*") LongArrayRef sizes, - @ByVal @Cast("c10::ArrayRef*") LongArrayRef strides, - @ByRef @Cast("void(*)(void*)") long deleter); -@Namespace("at") public static native @ByVal Tensor from_blob( - Pointer data, - @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] sizes, - @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] strides, - @Const @ByRef Deleter deleter, - @Const @ByRef(nullValue = "c10::TensorOptions{}") TensorOptions options, - @Const @ByVal(nullValue = "c10::optional(c10::nullopt)") DeviceOptional target_device); -@Namespace("at") public static native @ByVal Tensor from_blob( - Pointer data, - @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] sizes, - @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] strides, - @Const @ByRef Deleter deleter); -@Namespace("at") public static native @ByVal Tensor from_blob( - Pointer data, - @ByVal @Cast("c10::ArrayRef*") LongArrayRef sizes, - @ByVal @Cast("c10::ArrayRef*") LongArrayRef strides, - @ByRef @Cast("void(*)(void*)") Pointer deleter, - @Const @ByRef(nullValue = "c10::TensorOptions{}") TensorOptions options, - @Const @ByVal(nullValue = "c10::optional(c10::nullopt)") DeviceOptional target_device); -@Namespace("at") public static native @ByVal Tensor from_blob( - Pointer data, - @ByVal @Cast("c10::ArrayRef*") LongArrayRef sizes, - @ByVal @Cast("c10::ArrayRef*") LongArrayRef strides, - @ByRef @Cast("void(*)(void*)") Pointer deleter); -@Namespace("at") public static native @ByVal Tensor from_blob( - Pointer data, - @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] sizes, - @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] strides, - @ByRef @Cast("void(*)(void*)") long deleter, - @Const @ByRef(nullValue = "c10::TensorOptions{}") TensorOptions options, - @Const @ByVal(nullValue = "c10::optional(c10::nullopt)") DeviceOptional target_device); -@Namespace("at") public static native @ByVal Tensor from_blob( - Pointer data, - @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] sizes, - @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] strides, - @ByRef @Cast("void(*)(void*)") long deleter); - -@Namespace("at") public static native @ByVal Tensor from_blob( - Pointer data, - @ByVal @Cast("c10::ArrayRef*") LongArrayRef sizes, - @ByVal @Cast("c10::ArrayRef*") LongArrayRef strides, - @Cast("int64_t") long storage_offset, - @Const @ByRef Deleter deleter, - @Const @ByRef(nullValue = "c10::TensorOptions{}") TensorOptions options, - @Const @ByVal(nullValue = "c10::optional(c10::nullopt)") DeviceOptional target_device); -@Namespace("at") public static native @ByVal Tensor from_blob( - Pointer data, - @ByVal @Cast("c10::ArrayRef*") LongArrayRef sizes, - @ByVal @Cast("c10::ArrayRef*") LongArrayRef strides, - @Cast("int64_t") long storage_offset, - @Const @ByRef Deleter deleter); -@Namespace("at") public static native @ByVal Tensor from_blob( - Pointer data, - @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] sizes, - @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] strides, - @Cast("int64_t") long storage_offset, - @ByRef @Cast("void(*)(void*)") Pointer deleter, - @Const @ByRef(nullValue = "c10::TensorOptions{}") TensorOptions options, - @Const @ByVal(nullValue = "c10::optional(c10::nullopt)") DeviceOptional target_device); -@Namespace("at") public static native @ByVal Tensor from_blob( - Pointer data, - @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] sizes, - @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] strides, - @Cast("int64_t") long storage_offset, - @ByRef @Cast("void(*)(void*)") Pointer deleter); -@Namespace("at") public static native @ByVal Tensor from_blob( - Pointer data, - @ByVal @Cast("c10::ArrayRef*") LongArrayRef sizes, - @ByVal @Cast("c10::ArrayRef*") LongArrayRef strides, - @Cast("int64_t") long storage_offset, - @ByRef @Cast("void(*)(void*)") long deleter, - @Const @ByRef(nullValue = "c10::TensorOptions{}") TensorOptions options, - @Const @ByVal(nullValue = "c10::optional(c10::nullopt)") DeviceOptional target_device); -@Namespace("at") public static native @ByVal Tensor from_blob( - Pointer data, - @ByVal @Cast("c10::ArrayRef*") LongArrayRef sizes, - @ByVal @Cast("c10::ArrayRef*") LongArrayRef strides, - @Cast("int64_t") long storage_offset, - @ByRef @Cast("void(*)(void*)") long deleter); -@Namespace("at") public static native @ByVal Tensor from_blob( - Pointer data, - @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] sizes, - @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] strides, - @Cast("int64_t") long storage_offset, - @Const @ByRef Deleter deleter, - @Const @ByRef(nullValue = "c10::TensorOptions{}") TensorOptions options, - @Const @ByVal(nullValue = "c10::optional(c10::nullopt)") DeviceOptional target_device); -@Namespace("at") public static native @ByVal Tensor from_blob( - Pointer data, - @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] sizes, - @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] strides, - @Cast("int64_t") long storage_offset, - @Const @ByRef Deleter deleter); -@Namespace("at") public static native @ByVal Tensor from_blob( - Pointer data, - @ByVal @Cast("c10::ArrayRef*") LongArrayRef sizes, - @ByVal @Cast("c10::ArrayRef*") LongArrayRef strides, - @Cast("int64_t") long storage_offset, - @ByRef @Cast("void(*)(void*)") Pointer deleter, - @Const @ByRef(nullValue = "c10::TensorOptions{}") TensorOptions options, - @Const @ByVal(nullValue = "c10::optional(c10::nullopt)") DeviceOptional target_device); -@Namespace("at") public static native @ByVal Tensor from_blob( - Pointer data, - @ByVal @Cast("c10::ArrayRef*") LongArrayRef sizes, - @ByVal @Cast("c10::ArrayRef*") LongArrayRef strides, - @Cast("int64_t") long storage_offset, - @ByRef @Cast("void(*)(void*)") Pointer deleter); -@Namespace("at") public static native @ByVal Tensor from_blob( - Pointer data, - @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] sizes, - @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] strides, - @Cast("int64_t") long storage_offset, - @ByRef @Cast("void(*)(void*)") long deleter, - @Const @ByRef(nullValue = "c10::TensorOptions{}") TensorOptions options, - @Const @ByVal(nullValue = "c10::optional(c10::nullopt)") DeviceOptional target_device); -@Namespace("at") public static native @ByVal Tensor from_blob( - Pointer data, - @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] sizes, - @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] strides, - @Cast("int64_t") long storage_offset, - @ByRef @Cast("void(*)(void*)") long deleter); - -@Namespace("at") public static native @ByVal Tensor from_blob( - Pointer data, - @ByVal @Cast("c10::ArrayRef*") LongArrayRef sizes, - @Const @ByRef Deleter deleter, - @Const @ByRef(nullValue = "c10::TensorOptions{}") TensorOptions options); -@Namespace("at") public static native @ByVal Tensor from_blob( - Pointer data, - @ByVal @Cast("c10::ArrayRef*") LongArrayRef sizes, - @Const @ByRef Deleter deleter); -@Namespace("at") public static native @ByVal Tensor from_blob( - Pointer data, - @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] sizes, - @ByRef @Cast("void(*)(void*)") Pointer deleter, - @Const @ByRef(nullValue = "c10::TensorOptions{}") TensorOptions options); -@Namespace("at") public static native @ByVal Tensor from_blob( - Pointer data, - @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] sizes, - @ByRef @Cast("void(*)(void*)") Pointer deleter); -@Namespace("at") public static native @ByVal Tensor from_blob( - Pointer data, - @ByVal @Cast("c10::ArrayRef*") LongArrayRef sizes, - @ByRef @Cast("void(*)(void*)") long deleter, - @Const @ByRef(nullValue = "c10::TensorOptions{}") TensorOptions options); -@Namespace("at") public static native @ByVal Tensor from_blob( - Pointer data, - @ByVal @Cast("c10::ArrayRef*") LongArrayRef sizes, - @ByRef @Cast("void(*)(void*)") long deleter); -@Namespace("at") public static native @ByVal Tensor from_blob( - Pointer data, - @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] sizes, - @Const @ByRef Deleter deleter, - @Const @ByRef(nullValue = "c10::TensorOptions{}") TensorOptions options); -@Namespace("at") public static native @ByVal Tensor from_blob( - Pointer data, - @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] sizes, - @Const @ByRef Deleter deleter); -@Namespace("at") public static native @ByVal Tensor from_blob( - Pointer data, - @ByVal @Cast("c10::ArrayRef*") LongArrayRef sizes, - @ByRef @Cast("void(*)(void*)") Pointer deleter, - @Const @ByRef(nullValue = "c10::TensorOptions{}") TensorOptions options); -@Namespace("at") public static native @ByVal Tensor from_blob( - Pointer data, - @ByVal @Cast("c10::ArrayRef*") LongArrayRef sizes, - @ByRef @Cast("void(*)(void*)") Pointer deleter); -@Namespace("at") public static native @ByVal Tensor from_blob( - Pointer data, - @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] sizes, - @ByRef @Cast("void(*)(void*)") long deleter, - @Const @ByRef(nullValue = "c10::TensorOptions{}") TensorOptions options); -@Namespace("at") public static native @ByVal Tensor from_blob( - Pointer data, - @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] sizes, - @ByRef @Cast("void(*)(void*)") long deleter); - -@Namespace("at") public static native @ByVal Tensor from_blob( - Pointer data, - @ByVal @Cast("c10::ArrayRef*") LongArrayRef sizes, - @ByVal @Cast("c10::ArrayRef*") LongArrayRef strides, - @Const @ByRef(nullValue = "c10::TensorOptions{}") TensorOptions options); -@Namespace("at") public static native @ByVal Tensor from_blob( - Pointer data, - @ByVal @Cast("c10::ArrayRef*") LongArrayRef sizes, - @ByVal @Cast("c10::ArrayRef*") LongArrayRef strides); -@Namespace("at") public static native @ByVal Tensor from_blob( - Pointer data, - @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] sizes, - @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] strides, - @Const @ByRef(nullValue = "c10::TensorOptions{}") TensorOptions options); -@Namespace("at") public static native @ByVal Tensor from_blob( - Pointer data, - @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] sizes, - @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... strides); - -@Namespace("at") public static native @ByVal Tensor from_blob( - Pointer data, - @ByVal @Cast("c10::ArrayRef*") LongArrayRef sizes, - @Const @ByRef(nullValue = "c10::TensorOptions{}") TensorOptions options); -@Namespace("at") public static native @ByVal Tensor from_blob( - Pointer data, - @ByVal @Cast("c10::ArrayRef*") LongArrayRef sizes); -@Namespace("at") public static native @ByVal Tensor from_blob( - Pointer data, - @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] sizes, - @Const @ByRef(nullValue = "c10::TensorOptions{}") TensorOptions options); -@Namespace("at") public static native @ByVal Tensor from_blob( - Pointer data, - @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... sizes); - - // namespace at - - -// Parsed from ATen/ops/tensor.h - -// #pragma once -// #include -// #include - -// These functions are defined in ATen/Utils.cpp. -// #define TENSOR(T, S) -// TORCH_API Tensor tensor(ArrayRef values, const TensorOptions& options); -// inline Tensor tensor( -// std::initializer_list values, const TensorOptions& options) { -// return at::tensor(ArrayRef(values), options); -// } -// inline Tensor tensor(T value, const TensorOptions& options) { -// return at::tensor(ArrayRef(value), options); -// } -// inline Tensor tensor(ArrayRef values) { -// return at::tensor(std::move(values), at::dtype(k##S)); -// } -// inline Tensor tensor(std::initializer_list values) { -// return at::tensor(ArrayRef(values)); -// } -// inline Tensor tensor(T value) { -// return at::tensor(ArrayRef(value)); -// } -@Namespace("at") public static native @ByVal Tensor tensor(@ByVal @Cast("c10::ArrayRef*") ByteArrayRef values, @Const @ByRef TensorOptions options); - @Namespace("at") public static native @ByVal Tensor tensor(@Cast("uint8_t") byte value, @Const @ByRef TensorOptions options); - @Namespace("at") public static native @ByVal Tensor tensor(@ByVal @Cast("c10::ArrayRef*") ByteArrayRef values); - @Namespace("at") public static native @ByVal Tensor tensor(@Cast("uint8_t") byte value); - @Namespace("at") public static native @ByVal Tensor tensor(@ByVal @Cast("c10::ArrayRef*") ShortArrayRef values, @Const @ByRef TensorOptions options); - @Namespace("at") public static native @ByVal Tensor tensor(short value, @Const @ByRef TensorOptions options); - @Namespace("at") public static native @ByVal Tensor tensor(@ByVal @Cast("c10::ArrayRef*") ShortArrayRef values); - @Namespace("at") public static native @ByVal Tensor tensor(short value); - @Namespace("at") public static native @ByVal Tensor tensor(@ByVal @Cast("c10::ArrayRef*") IntArrayRef values, @Const @ByRef TensorOptions options); - @Namespace("at") public static native @ByVal Tensor tensor(int value, @Const @ByRef TensorOptions options); - @Namespace("at") public static native @ByVal Tensor tensor(@ByVal @Cast("c10::ArrayRef*") IntArrayRef values); - @Namespace("at") public static native @ByVal Tensor tensor(int value); - @Namespace("at") public static native @ByVal Tensor tensor(@ByVal @Cast("c10::ArrayRef*") LongArrayRef values, @Const @ByRef TensorOptions options); - @Namespace("at") public static native @ByVal Tensor tensor(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] values, @Const @ByRef TensorOptions options); - @Namespace("at") public static native @ByVal Tensor tensor(@Cast("int64_t") long value, @Const @ByRef TensorOptions options); - @Namespace("at") public static native @ByVal Tensor tensor(@ByVal @Cast("c10::ArrayRef*") LongArrayRef values); - @Namespace("at") public static native @ByVal Tensor tensor(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... values); - @Namespace("at") public static native @ByVal Tensor tensor(@Cast("int64_t") long value); - @Namespace("at") public static native @ByVal Tensor tensor(@ByVal FloatArrayRef values, @Const @ByRef TensorOptions options); - @Namespace("at") public static native @ByVal Tensor tensor(float value, @Const @ByRef TensorOptions options); - @Namespace("at") public static native @ByVal Tensor tensor(@ByVal FloatArrayRef values); - @Namespace("at") public static native @ByVal Tensor tensor(float value); - @Namespace("at") public static native @ByVal Tensor tensor(@ByVal DoubleArrayRef values, @Const @ByRef TensorOptions options); - @Namespace("at") public static native @ByVal Tensor tensor(double value, @Const @ByRef TensorOptions options); - @Namespace("at") public static native @ByVal Tensor tensor(@ByVal DoubleArrayRef values); - @Namespace("at") public static native @ByVal Tensor tensor(double value); - @Namespace("at") public static native @ByVal Tensor tensor(@ByVal BoolArrayRef values, @Const @ByRef TensorOptions options); - @Namespace("at") public static native @ByVal Tensor tensor(@Cast("decltype(::c10::impl::ScalarTypeToCPPType<::c10::ScalarType::Bool>::t)") boolean value, @Const @ByRef TensorOptions options); - @Namespace("at") public static native @ByVal Tensor tensor(@ByVal BoolArrayRef values); - @Namespace("at") public static native @ByVal Tensor tensor(@Cast("decltype(::c10::impl::ScalarTypeToCPPType<::c10::ScalarType::Bool>::t)") boolean value); - @Namespace("at") public static native @ByVal Tensor tensor(@ByVal HalfArrayRef values, @Const @ByRef TensorOptions options); - @Namespace("at") public static native @ByVal Tensor tensor(@ByVal Half value, @Const @ByRef TensorOptions options); - @Namespace("at") public static native @ByVal Tensor tensor(@ByVal HalfArrayRef values); - @Namespace("at") public static native @ByVal Tensor tensor(@ByVal Half value); - @Namespace("at") public static native @ByVal Tensor tensor(@ByVal BFloat16ArrayRef values, @Const @ByRef TensorOptions options); - @Namespace("at") public static native @ByVal Tensor tensor(@ByVal BFloat16 value, @Const @ByRef TensorOptions options); - @Namespace("at") public static native @ByVal Tensor tensor(@ByVal BFloat16ArrayRef values); - @Namespace("at") public static native @ByVal Tensor tensor(@ByVal BFloat16 value); -@Namespace("at") public static native @ByVal Tensor tensor(@ByVal FloatComplexrrayRef values, @Const @ByRef TensorOptions options); - @Namespace("at") public static native @ByVal Tensor tensor(@ByVal FloatComplexrrayRef values); - @Namespace("at") public static native @ByVal Tensor tensor(@ByVal DoubleComplexrrayRef values, @Const @ByRef TensorOptions options); - @Namespace("at") public static native @ByVal Tensor tensor(@ByVal DoubleComplexrrayRef values); -// #undef TENSOR - - // namespace at - - -// Parsed from ATen/ops/_adaptive_avg_pool2d.h - -// #pragma once - -// @generated by torchgen/gen.py from Function.h - -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include - - - -// #include - - -// aten::_adaptive_avg_pool2d(Tensor self, SymInt[2] output_size) -> Tensor -@Namespace("at") public static native @ByVal Tensor _adaptive_avg_pool2d(@Const @ByRef Tensor self, @ByVal @Cast("c10::ArrayRef*") LongArrayRef output_size); -@Namespace("at") public static native @ByVal Tensor _adaptive_avg_pool2d(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... output_size); - - -// aten::_adaptive_avg_pool2d(Tensor self, SymInt[2] output_size) -> Tensor -@Namespace("at") public static native @ByVal Tensor _adaptive_avg_pool2d_symint(@Const @ByRef Tensor self, @ByVal SymIntRef output_size); - - -// aten::_adaptive_avg_pool2d.out(Tensor self, SymInt[2] output_size, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor _adaptive_avg_pool2d_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal @Cast("c10::ArrayRef*") LongArrayRef output_size); -@Namespace("at") public static native @ByRef Tensor _adaptive_avg_pool2d_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... output_size); - - -// aten::_adaptive_avg_pool2d.out(Tensor self, SymInt[2] output_size, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor _adaptive_avg_pool2d_outf(@Const @ByRef Tensor self, @ByVal @Cast("c10::ArrayRef*") LongArrayRef output_size, @ByRef Tensor out); -@Namespace("at") public static native @ByRef Tensor _adaptive_avg_pool2d_outf(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] output_size, @ByRef Tensor out); - - -// aten::_adaptive_avg_pool2d.out(Tensor self, SymInt[2] output_size, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor _adaptive_avg_pool2d_symint_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal SymIntRef output_size); - - -// aten::_adaptive_avg_pool2d.out(Tensor self, SymInt[2] output_size, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor _adaptive_avg_pool2d_symint_outf(@Const @ByRef Tensor self, @ByVal SymIntRef output_size, @ByRef Tensor out); - - - - - -// Parsed from ATen/ops/_adaptive_avg_pool2d_backward.h - -// #pragma once - -// @generated by torchgen/gen.py from Function.h - -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include - - - -// #include - - -// aten::_adaptive_avg_pool2d_backward(Tensor grad_output, Tensor self) -> Tensor -@Namespace("at") public static native @ByVal Tensor _adaptive_avg_pool2d_backward(@Const @ByRef Tensor grad_output, @Const @ByRef Tensor self); - -// aten::_adaptive_avg_pool2d_backward.out(Tensor grad_output, Tensor self, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor _adaptive_avg_pool2d_backward_out(@ByRef Tensor out, @Const @ByRef Tensor grad_output, @Const @ByRef Tensor self); -// aten::_adaptive_avg_pool2d_backward.out(Tensor grad_output, Tensor self, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor _adaptive_avg_pool2d_backward_outf(@Const @ByRef Tensor grad_output, @Const @ByRef Tensor self, @ByRef Tensor out); - - - - -// Parsed from ATen/ops/_adaptive_avg_pool3d.h - -// #pragma once - -// @generated by torchgen/gen.py from Function.h - -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include - - - -// #include - - -// aten::_adaptive_avg_pool3d(Tensor self, SymInt[3] output_size) -> Tensor -@Namespace("at") public static native @ByVal Tensor _adaptive_avg_pool3d(@Const @ByRef Tensor self, @ByVal @Cast("c10::ArrayRef*") LongArrayRef output_size); -@Namespace("at") public static native @ByVal Tensor _adaptive_avg_pool3d(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... output_size); - - -// aten::_adaptive_avg_pool3d(Tensor self, SymInt[3] output_size) -> Tensor -@Namespace("at") public static native @ByVal Tensor _adaptive_avg_pool3d_symint(@Const @ByRef Tensor self, @ByVal SymIntRef output_size); - - -// aten::_adaptive_avg_pool3d.out(Tensor self, SymInt[3] output_size, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor _adaptive_avg_pool3d_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal @Cast("c10::ArrayRef*") LongArrayRef output_size); -@Namespace("at") public static native @ByRef Tensor _adaptive_avg_pool3d_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... output_size); - - -// aten::_adaptive_avg_pool3d.out(Tensor self, SymInt[3] output_size, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor _adaptive_avg_pool3d_outf(@Const @ByRef Tensor self, @ByVal @Cast("c10::ArrayRef*") LongArrayRef output_size, @ByRef Tensor out); -@Namespace("at") public static native @ByRef Tensor _adaptive_avg_pool3d_outf(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] output_size, @ByRef Tensor out); - - -// aten::_adaptive_avg_pool3d.out(Tensor self, SymInt[3] output_size, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor _adaptive_avg_pool3d_symint_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal SymIntRef output_size); - - -// aten::_adaptive_avg_pool3d.out(Tensor self, SymInt[3] output_size, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor _adaptive_avg_pool3d_symint_outf(@Const @ByRef Tensor self, @ByVal SymIntRef output_size, @ByRef Tensor out); - - - - - -// Parsed from ATen/ops/_adaptive_avg_pool3d_backward.h - -// #pragma once - -// @generated by torchgen/gen.py from Function.h - -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include - - - -// #include - - -// aten::_adaptive_avg_pool3d_backward(Tensor grad_output, Tensor self) -> Tensor -@Namespace("at") public static native @ByVal Tensor _adaptive_avg_pool3d_backward(@Const @ByRef Tensor grad_output, @Const @ByRef Tensor self); - -// aten::_adaptive_avg_pool3d_backward.out(Tensor grad_output, Tensor self, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor _adaptive_avg_pool3d_backward_out(@ByRef Tensor out, @Const @ByRef Tensor grad_output, @Const @ByRef Tensor self); -// aten::_adaptive_avg_pool3d_backward.out(Tensor grad_output, Tensor self, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor _adaptive_avg_pool3d_backward_outf(@Const @ByRef Tensor grad_output, @Const @ByRef Tensor self, @ByRef Tensor out); - - - - -// Parsed from ATen/ops/_add_batch_dim.h - -// #pragma once - -// @generated by torchgen/gen.py from Function.h - -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include - - - -// #include - - -// aten::_add_batch_dim(Tensor self, int batch_dim, int level) -> Tensor -@Namespace("at") public static native @ByVal Tensor _add_batch_dim(@Const @ByRef Tensor self, @Cast("int64_t") long batch_dim, @Cast("int64_t") long level); - - - - -// Parsed from ATen/ops/_add_relu.h - -// #pragma once - -// @generated by torchgen/gen.py from Function.h - -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include - - - -// #include - - -// aten::_add_relu.Tensor(Tensor self, Tensor other, *, Scalar alpha=1) -> Tensor -@Namespace("at") public static native @ByVal Tensor _add_relu(@Const @ByRef Tensor self, @Const @ByRef Tensor other, @Const @ByRef(nullValue = "at::Scalar(1)") Scalar alpha); -@Namespace("at") public static native @ByVal Tensor _add_relu(@Const @ByRef Tensor self, @Const @ByRef Tensor other); - -// aten::_add_relu_.Tensor(Tensor(a!) self, Tensor other, *, Scalar alpha=1) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor _add_relu_(@ByRef Tensor self, @Const @ByRef Tensor other, @Const @ByRef(nullValue = "at::Scalar(1)") Scalar alpha); -@Namespace("at") public static native @ByRef Tensor _add_relu_(@ByRef Tensor self, @Const @ByRef Tensor other); - -// aten::_add_relu.out(Tensor self, Tensor other, *, Scalar alpha=1, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor _add_relu_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor other, @Const @ByRef(nullValue = "at::Scalar(1)") Scalar alpha); -@Namespace("at") public static native @ByRef Tensor _add_relu_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor other); -// aten::_add_relu.out(Tensor self, Tensor other, *, Scalar alpha=1, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor _add_relu_outf(@Const @ByRef Tensor self, @Const @ByRef Tensor other, @Const @ByRef Scalar alpha, @ByRef Tensor out); - -// aten::_add_relu.Scalar(Tensor self, Scalar other, Scalar alpha=1) -> Tensor -@Namespace("at") public static native @ByVal Tensor _add_relu(@Const @ByRef Tensor self, @Const @ByRef Scalar other, @Const @ByRef(nullValue = "at::Scalar(1)") Scalar alpha); -@Namespace("at") public static native @ByVal Tensor _add_relu(@Const @ByRef Tensor self, @Const @ByRef Scalar other); - -// aten::_add_relu_.Scalar(Tensor(a!) self, Scalar other, Scalar alpha=1) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor _add_relu_(@ByRef Tensor self, @Const @ByRef Scalar other, @Const @ByRef(nullValue = "at::Scalar(1)") Scalar alpha); -@Namespace("at") public static native @ByRef Tensor _add_relu_(@ByRef Tensor self, @Const @ByRef Scalar other); - -// aten::_add_relu.Scalar_out(Tensor self, Scalar other, Scalar alpha=1, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor _add_relu_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Scalar other, @Const @ByRef(nullValue = "at::Scalar(1)") Scalar alpha); -@Namespace("at") public static native @ByRef Tensor _add_relu_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Scalar other); -// aten::_add_relu.Scalar_out(Tensor self, Scalar other, Scalar alpha=1, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor _add_relu_outf(@Const @ByRef Tensor self, @Const @ByRef Scalar other, @Const @ByRef Scalar alpha, @ByRef Tensor out); - - - - -// Parsed from ATen/ops/_addmm_activation.h - -// #pragma once - -// @generated by torchgen/gen.py from Function.h - -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include - - - -// #include - - -// aten::_addmm_activation.out(Tensor self, Tensor mat1, Tensor mat2, *, Scalar beta=1, Scalar alpha=1, bool use_gelu=False, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor _addmm_activation_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor mat1, @Const @ByRef Tensor mat2, @Const @ByRef(nullValue = "at::Scalar(1)") Scalar beta, @Const @ByRef(nullValue = "at::Scalar(1)") Scalar alpha, @Cast("bool") boolean use_gelu/*=false*/); -@Namespace("at") public static native @ByRef Tensor _addmm_activation_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor mat1, @Const @ByRef Tensor mat2); -// aten::_addmm_activation.out(Tensor self, Tensor mat1, Tensor mat2, *, Scalar beta=1, Scalar alpha=1, bool use_gelu=False, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor _addmm_activation_outf(@Const @ByRef Tensor self, @Const @ByRef Tensor mat1, @Const @ByRef Tensor mat2, @Const @ByRef Scalar beta, @Const @ByRef Scalar alpha, @Cast("bool") boolean use_gelu, @ByRef Tensor out); - -// aten::_addmm_activation(Tensor self, Tensor mat1, Tensor mat2, *, Scalar beta=1, Scalar alpha=1, bool use_gelu=False) -> Tensor -@Namespace("at") public static native @ByVal Tensor _addmm_activation(@Const @ByRef Tensor self, @Const @ByRef Tensor mat1, @Const @ByRef Tensor mat2, @Const @ByRef(nullValue = "at::Scalar(1)") Scalar beta, @Const @ByRef(nullValue = "at::Scalar(1)") Scalar alpha, @Cast("bool") boolean use_gelu/*=false*/); -@Namespace("at") public static native @ByVal Tensor _addmm_activation(@Const @ByRef Tensor self, @Const @ByRef Tensor mat1, @Const @ByRef Tensor mat2); - - - - -// Parsed from ATen/ops/_aminmax.h - -// #pragma once - -// @generated by torchgen/gen.py from Function.h - -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include - - - -// #include - - -// aten::_aminmax(Tensor self) -> (Tensor, Tensor) -@Namespace("at") public static native @ByVal TensorTensorTuple _aminmax(@Const @ByRef Tensor self); - -// aten::_aminmax.dim(Tensor self, int dim, bool keepdim=False) -> (Tensor, Tensor) -@Namespace("at") public static native @ByVal TensorTensorTuple _aminmax(@Const @ByRef Tensor self, @Cast("int64_t") long dim, @Cast("bool") boolean keepdim/*=false*/); -@Namespace("at") public static native @ByVal TensorTensorTuple _aminmax(@Const @ByRef Tensor self, @Cast("int64_t") long dim); - -// aten::_aminmax.out(Tensor self, *, Tensor(a!) out0, Tensor(b!) out1) -> (Tensor(a!), Tensor(b!)) -@Namespace("at") public static native @ByVal @Cast("std::tuple*") PointerPointer _aminmax_out(@ByRef Tensor out0, @ByRef Tensor out1, @Const @ByRef Tensor self); -// aten::_aminmax.out(Tensor self, *, Tensor(a!) out0, Tensor(b!) out1) -> (Tensor(a!), Tensor(b!)) -@Namespace("at") public static native @ByVal @Cast("std::tuple*") PointerPointer _aminmax_outf(@Const @ByRef Tensor self, @ByRef Tensor out0, @ByRef Tensor out1); - -// aten::_aminmax.dim_out(Tensor self, int dim, bool keepdim=False, *, Tensor(a!) out0, Tensor(b!) out1) -> (Tensor(a!), Tensor(b!)) -@Namespace("at") public static native @ByVal @Cast("std::tuple*") PointerPointer _aminmax_out(@ByRef Tensor out0, @ByRef Tensor out1, @Const @ByRef Tensor self, @Cast("int64_t") long dim, @Cast("bool") boolean keepdim/*=false*/); -@Namespace("at") public static native @ByVal @Cast("std::tuple*") PointerPointer _aminmax_out(@ByRef Tensor out0, @ByRef Tensor out1, @Const @ByRef Tensor self, @Cast("int64_t") long dim); -// aten::_aminmax.dim_out(Tensor self, int dim, bool keepdim=False, *, Tensor(a!) out0, Tensor(b!) out1) -> (Tensor(a!), Tensor(b!)) -@Namespace("at") public static native @ByVal @Cast("std::tuple*") PointerPointer _aminmax_outf(@Const @ByRef Tensor self, @Cast("int64_t") long dim, @Cast("bool") boolean keepdim, @ByRef Tensor out0, @ByRef Tensor out1); - - - - -// Parsed from ATen/ops/_amp_foreach_non_finite_check_and_unscale.h - -// #pragma once - -// @generated by torchgen/gen.py from Function.h - -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include - - - -// #include - - -// aten::_amp_foreach_non_finite_check_and_unscale_(Tensor(a!)[] self, Tensor(b!) found_inf, Tensor inv_scale) -> () -@Namespace("at") public static native void _amp_foreach_non_finite_check_and_unscale_(@ByVal TensorArrayRef self, @ByRef Tensor found_inf, @Const @ByRef Tensor inv_scale); - -// aten::_amp_foreach_non_finite_check_and_unscale.out(Tensor[] self, Tensor(b!) found_inf, Tensor inv_scale, *, Tensor(a!)[] out) -> () -@Namespace("at") public static native void _amp_foreach_non_finite_check_and_unscale_out(@ByVal TensorArrayRef out, @ByVal TensorArrayRef self, @ByRef Tensor found_inf, @Const @ByRef Tensor inv_scale); -// aten::_amp_foreach_non_finite_check_and_unscale.out(Tensor[] self, Tensor(b!) found_inf, Tensor inv_scale, *, Tensor(a!)[] out) -> () -@Namespace("at") public static native void _amp_foreach_non_finite_check_and_unscale_outf(@ByVal TensorArrayRef self, @ByRef Tensor found_inf, @Const @ByRef Tensor inv_scale, @ByVal TensorArrayRef out); - -// aten::_amp_foreach_non_finite_check_and_unscale(Tensor[] self, Tensor found_inf, Tensor inv_scale) -> (Tensor[] self_out, Tensor found_inf_out) -@Namespace("at") public static native @ByVal TensorVectorTensorTuple _amp_foreach_non_finite_check_and_unscale(@ByVal TensorArrayRef self, @Const @ByRef Tensor found_inf, @Const @ByRef Tensor inv_scale); - - - - -// Parsed from ATen/ops/_amp_update_scale.h - -// #pragma once - -// @generated by torchgen/gen.py from Function.h - -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include - - - -// #include - - -// aten::_amp_update_scale_(Tensor(a!) self, Tensor(b!) growth_tracker, Tensor found_inf, float scale_growth_factor, float scale_backoff_factor, int growth_interval) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor _amp_update_scale_(@ByRef Tensor self, @ByRef Tensor growth_tracker, @Const @ByRef Tensor found_inf, double scale_growth_factor, double scale_backoff_factor, @Cast("int64_t") long growth_interval); - -// aten::_amp_update_scale.out(Tensor self, Tensor(b!) growth_tracker, Tensor found_inf, float scale_growth_factor, float scale_backoff_factor, int growth_interval, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor _amp_update_scale_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByRef Tensor growth_tracker, @Const @ByRef Tensor found_inf, double scale_growth_factor, double scale_backoff_factor, @Cast("int64_t") long growth_interval); -// aten::_amp_update_scale.out(Tensor self, Tensor(b!) growth_tracker, Tensor found_inf, float scale_growth_factor, float scale_backoff_factor, int growth_interval, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor _amp_update_scale_outf(@Const @ByRef Tensor self, @ByRef Tensor growth_tracker, @Const @ByRef Tensor found_inf, double scale_growth_factor, double scale_backoff_factor, @Cast("int64_t") long growth_interval, @ByRef Tensor out); - -// aten::_amp_update_scale(Tensor self, Tensor growth_tracker, Tensor found_inf, float scale_growth_factor, float scale_backoff_factor, int growth_interval) -> (Tensor, Tensor growth_tracker_out) -@Namespace("at") public static native @ByVal TensorTensorTuple _amp_update_scale(@Const @ByRef Tensor self, @Const @ByRef Tensor growth_tracker, @Const @ByRef Tensor found_inf, double scale_growth_factor, double scale_backoff_factor, @Cast("int64_t") long growth_interval); - - - - -// Parsed from ATen/ops/_assert_async.h - -// #pragma once - -// @generated by torchgen/gen.py from Function.h - -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include - - - -// #include - - -// aten::_assert_async(Tensor self) -> () -@Namespace("at") public static native void _assert_async(@Const @ByRef Tensor self); - - - - -// Parsed from ATen/ops/_assert_tensor_metadata.h - -// #pragma once - -// @generated by torchgen/gen.py from Function.h - -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include - - - -// #include - - -// aten::_assert_tensor_metadata(Tensor a, int[]? size=None, int[]? stride=None, ScalarType? dtype=None) -> () -@Namespace("at") public static native void _assert_tensor_metadata(@Const @ByRef Tensor a, @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") LongArrayRefOptional size, @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") LongArrayRefOptional stride, @ByVal(nullValue = "c10::optional(c10::nullopt)") ScalarTypeOptional dtype); -@Namespace("at") public static native void _assert_tensor_metadata(@Const @ByRef Tensor a); -@Namespace("at") public static native void _assert_tensor_metadata(@Const @ByRef Tensor a, @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] stride, @ByVal(nullValue = "c10::optional(c10::nullopt)") ScalarTypeOptional dtype); - - - - -// Parsed from ATen/ops/_autocast_to_full_precision.h - -// #pragma once - -// @generated by torchgen/gen.py from Function.h - -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include - - - -// #include - - - - - - -// Parsed from ATen/ops/_autocast_to_reduced_precision.h - -// #pragma once - -// @generated by torchgen/gen.py from Function.h - -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include - - - -// #include - - - - - - -// Parsed from ATen/ops/_backward.h - -// #pragma once - -// @generated by torchgen/gen.py from Function.h - -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include - - - -// #include - - - - - - -// Parsed from ATen/ops/_batch_norm_impl_index.h - -// #pragma once - -// @generated by torchgen/gen.py from Function.h - -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include - - - -// #include - - -// aten::_batch_norm_impl_index(Tensor input, Tensor? weight, Tensor? bias, Tensor? running_mean, Tensor? running_var, bool training, float momentum, float eps, bool cudnn_enabled) -> (Tensor, Tensor, Tensor, Tensor, int) -@Namespace("at") public static native @ByVal TensorTensorTensorTensorLongTuple _batch_norm_impl_index(@Const @ByRef Tensor input, @Const @ByRef TensorOptional weight, @Const @ByRef TensorOptional bias, @Const @ByRef TensorOptional running_mean, @Const @ByRef TensorOptional running_var, @Cast("bool") boolean training, double momentum, double eps, @Cast("bool") boolean cudnn_enabled); - - - - -// Parsed from ATen/ops/_batch_norm_impl_index_backward.h - -// #pragma once - -// @generated by torchgen/gen.py from Function.h - -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include - - - -// #include - - -// aten::_batch_norm_impl_index_backward(int impl_index, Tensor input, Tensor grad_output, Tensor? weight, Tensor? running_mean, Tensor? running_var, Tensor? save_mean, Tensor? save_var_transform, bool train, float eps, bool[3] output_mask, Tensor reservedSpace) -> (Tensor, Tensor, Tensor) -@Namespace("at") public static native @ByVal TensorTensorTensorTuple _batch_norm_impl_index_backward(@Cast("int64_t") long impl_index, @Const @ByRef Tensor input, @Const @ByRef Tensor grad_output, @Const @ByRef TensorOptional weight, @Const @ByRef TensorOptional running_mean, @Const @ByRef TensorOptional running_var, @Const @ByRef TensorOptional save_mean, @Const @ByRef TensorOptional save_var_transform, @Cast("bool") boolean train, double eps, @ByVal @Cast("std::array*") BoolPointer output_mask, @Const @ByRef Tensor reservedSpace); - - - - -// Parsed from ATen/ops/_cast_Byte.h - -// #pragma once - -// @generated by torchgen/gen.py from Function.h - -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include - - - -// #include - - -// aten::_cast_Byte(Tensor self, bool non_blocking=False) -> Tensor -@Namespace("at") public static native @ByVal Tensor _cast_Byte(@Const @ByRef Tensor self, @Cast("bool") boolean non_blocking/*=false*/); -@Namespace("at") public static native @ByVal Tensor _cast_Byte(@Const @ByRef Tensor self); - - - - -// Parsed from ATen/ops/_cast_Char.h - -// #pragma once - -// @generated by torchgen/gen.py from Function.h - -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include - - - -// #include - - -// aten::_cast_Char(Tensor self, bool non_blocking=False) -> Tensor -@Namespace("at") public static native @ByVal Tensor _cast_Char(@Const @ByRef Tensor self, @Cast("bool") boolean non_blocking/*=false*/); -@Namespace("at") public static native @ByVal Tensor _cast_Char(@Const @ByRef Tensor self); - - - - -// Parsed from ATen/ops/_cast_Double.h - -// #pragma once - -// @generated by torchgen/gen.py from Function.h - -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include - - - -// #include - - -// aten::_cast_Double(Tensor self, bool non_blocking=False) -> Tensor -@Namespace("at") public static native @ByVal Tensor _cast_Double(@Const @ByRef Tensor self, @Cast("bool") boolean non_blocking/*=false*/); -@Namespace("at") public static native @ByVal Tensor _cast_Double(@Const @ByRef Tensor self); - - - - -// Parsed from ATen/ops/_cast_Float.h - -// #pragma once - -// @generated by torchgen/gen.py from Function.h - -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include - - - -// #include - - -// aten::_cast_Float(Tensor self, bool non_blocking=False) -> Tensor -@Namespace("at") public static native @ByVal Tensor _cast_Float(@Const @ByRef Tensor self, @Cast("bool") boolean non_blocking/*=false*/); -@Namespace("at") public static native @ByVal Tensor _cast_Float(@Const @ByRef Tensor self); - - - - -// Parsed from ATen/ops/_cast_Half.h - -// #pragma once - -// @generated by torchgen/gen.py from Function.h - -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include - - - -// #include - - -// aten::_cast_Half(Tensor self, bool non_blocking=False) -> Tensor -@Namespace("at") public static native @ByVal Tensor _cast_Half(@Const @ByRef Tensor self, @Cast("bool") boolean non_blocking/*=false*/); -@Namespace("at") public static native @ByVal Tensor _cast_Half(@Const @ByRef Tensor self); - - - - -// Parsed from ATen/ops/_cast_Int.h - -// #pragma once - -// @generated by torchgen/gen.py from Function.h - -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include - - - -// #include - - -// aten::_cast_Int(Tensor self, bool non_blocking=False) -> Tensor -@Namespace("at") public static native @ByVal Tensor _cast_Int(@Const @ByRef Tensor self, @Cast("bool") boolean non_blocking/*=false*/); -@Namespace("at") public static native @ByVal Tensor _cast_Int(@Const @ByRef Tensor self); - - - - -// Parsed from ATen/ops/_cast_Long.h - -// #pragma once - -// @generated by torchgen/gen.py from Function.h - -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include - - - -// #include - - -// aten::_cast_Long(Tensor self, bool non_blocking=False) -> Tensor -@Namespace("at") public static native @ByVal Tensor _cast_Long(@Const @ByRef Tensor self, @Cast("bool") boolean non_blocking/*=false*/); -@Namespace("at") public static native @ByVal Tensor _cast_Long(@Const @ByRef Tensor self); - - - - -// Parsed from ATen/ops/_cast_Short.h - -// #pragma once - -// @generated by torchgen/gen.py from Function.h - -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include - - - -// #include - - -// aten::_cast_Short(Tensor self, bool non_blocking=False) -> Tensor -@Namespace("at") public static native @ByVal Tensor _cast_Short(@Const @ByRef Tensor self, @Cast("bool") boolean non_blocking/*=false*/); -@Namespace("at") public static native @ByVal Tensor _cast_Short(@Const @ByRef Tensor self); - - - - -// Parsed from ATen/ops/_cdist_backward.h - -// #pragma once - -// @generated by torchgen/gen.py from Function.h - -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include - - - -// #include - - -// aten::_cdist_backward(Tensor grad, Tensor x1, Tensor x2, float p, Tensor cdist) -> Tensor -@Namespace("at") public static native @ByVal Tensor _cdist_backward(@Const @ByRef Tensor grad, @Const @ByRef Tensor x1, @Const @ByRef Tensor x2, double p, @Const @ByRef Tensor cdist); - -// aten::_cdist_backward.out(Tensor grad, Tensor x1, Tensor x2, float p, Tensor cdist, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor _cdist_backward_out(@ByRef Tensor out, @Const @ByRef Tensor grad, @Const @ByRef Tensor x1, @Const @ByRef Tensor x2, double p, @Const @ByRef Tensor cdist); -// aten::_cdist_backward.out(Tensor grad, Tensor x1, Tensor x2, float p, Tensor cdist, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor _cdist_backward_outf(@Const @ByRef Tensor grad, @Const @ByRef Tensor x1, @Const @ByRef Tensor x2, double p, @Const @ByRef Tensor cdist, @ByRef Tensor out); - - - - -// Parsed from ATen/ops/_cdist_forward.h - -// #pragma once - -// @generated by torchgen/gen.py from Function.h - -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include - - - -// #include - - -// aten::_cdist_forward(Tensor x1, Tensor x2, float p, int? compute_mode) -> Tensor -@Namespace("at") public static native @ByVal Tensor _cdist_forward(@Const @ByRef Tensor x1, @Const @ByRef Tensor x2, double p, @ByVal LongOptional compute_mode); - -// aten::_cdist_forward.out(Tensor x1, Tensor x2, float p, int? compute_mode, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor _cdist_forward_out(@ByRef Tensor out, @Const @ByRef Tensor x1, @Const @ByRef Tensor x2, double p, @ByVal LongOptional compute_mode); -// aten::_cdist_forward.out(Tensor x1, Tensor x2, float p, int? compute_mode, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor _cdist_forward_outf(@Const @ByRef Tensor x1, @Const @ByRef Tensor x2, double p, @ByVal LongOptional compute_mode, @ByRef Tensor out); - - - - -// Parsed from ATen/ops/_cholesky_solve_helper.h - -// #pragma once - -// @generated by torchgen/gen.py from Function.h - -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include - - - -// #include - - -// aten::_cholesky_solve_helper(Tensor self, Tensor A, bool upper) -> Tensor -@Namespace("at") public static native @ByVal Tensor _cholesky_solve_helper(@Const @ByRef Tensor self, @Const @ByRef Tensor A, @Cast("bool") boolean upper); - -// aten::_cholesky_solve_helper.out(Tensor self, Tensor A, bool upper, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor _cholesky_solve_helper_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor A, @Cast("bool") boolean upper); -// aten::_cholesky_solve_helper.out(Tensor self, Tensor A, bool upper, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor _cholesky_solve_helper_outf(@Const @ByRef Tensor self, @Const @ByRef Tensor A, @Cast("bool") boolean upper, @ByRef Tensor out); - - - - -// Parsed from ATen/ops/_choose_qparams_per_tensor.h - -// #pragma once - -// @generated by torchgen/gen.py from Function.h - -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include - - - -// #include - - -// aten::_choose_qparams_per_tensor(Tensor self, bool reduce_range=False) -> (float, int) -@Namespace("at") public static native @ByVal @Cast("std::tuple*") LongPointer _choose_qparams_per_tensor(@Const @ByRef Tensor self, @Cast("bool") boolean reduce_range/*=false*/); -@Namespace("at") public static native @ByVal @Cast("std::tuple*") LongPointer _choose_qparams_per_tensor(@Const @ByRef Tensor self); - - - - -// Parsed from ATen/ops/_chunk_grad_outputs_efficient_attention.h - -// #pragma once - -// @generated by torchgen/gen.py from Function.h - -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include - - - -// #include - - -// aten::_chunk_grad_outputs_efficient_attention(Tensor query, Tensor key, Tensor value, bool is_causal=False) -> bool -@Namespace("at") public static native @Cast("bool") boolean _chunk_grad_outputs_efficient_attention(@Const @ByRef Tensor query, @Const @ByRef Tensor key, @Const @ByRef Tensor value, @Cast("bool") boolean is_causal/*=false*/); -@Namespace("at") public static native @Cast("bool") boolean _chunk_grad_outputs_efficient_attention(@Const @ByRef Tensor query, @Const @ByRef Tensor key, @Const @ByRef Tensor value); - - - - -// Parsed from ATen/ops/_coalesce.h - -// #pragma once - -// @generated by torchgen/gen.py from Function.h - -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include - - - -// #include - - -// aten::_coalesce(Tensor self) -> Tensor -@Namespace("at") public static native @ByVal Tensor _coalesce(@Const @ByRef Tensor self); - -// aten::_coalesce.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor _coalesce_out(@ByRef Tensor out, @Const @ByRef Tensor self); -// aten::_coalesce.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor _coalesce_outf(@Const @ByRef Tensor self, @ByRef Tensor out); - - - - -// Parsed from ATen/ops/_coalesced.h - -// #pragma once - -// @generated by torchgen/gen.py from Function.h - -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include - - - -// #include - - -// aten::_coalesced.out(Tensor self, bool coalesced, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor _coalesced_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Cast("bool") boolean coalesced); -// aten::_coalesced.out(Tensor self, bool coalesced, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor _coalesced_outf(@Const @ByRef Tensor self, @Cast("bool") boolean coalesced, @ByRef Tensor out); - -// aten::_coalesced(Tensor self, bool coalesced) -> Tensor -@Namespace("at") public static native @ByVal Tensor _coalesced(@Const @ByRef Tensor self, @Cast("bool") boolean coalesced); - - - - -// Parsed from ATen/ops/_compute_linear_combination.h - -// #pragma once - -// @generated by torchgen/gen.py from Function.h - -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include - - - -// #include - - -// aten::_compute_linear_combination(Tensor input, Tensor coefficients) -> Tensor -@Namespace("at") public static native @ByVal Tensor _compute_linear_combination(@Const @ByRef Tensor input, @Const @ByRef Tensor coefficients); - -// aten::_compute_linear_combination.out(Tensor input, Tensor coefficients, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor _compute_linear_combination_out(@ByRef Tensor out, @Const @ByRef Tensor input, @Const @ByRef Tensor coefficients); -// aten::_compute_linear_combination.out(Tensor input, Tensor coefficients, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor _compute_linear_combination_outf(@Const @ByRef Tensor input, @Const @ByRef Tensor coefficients, @ByRef Tensor out); - - - - -// Parsed from ATen/ops/_conj.h - -// #pragma once - -// @generated by torchgen/gen.py from Function.h - -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include - - - -// #include - - -// aten::_conj(Tensor(a) self) -> Tensor(a) -@Namespace("at") public static native @ByVal Tensor _conj(@Const @ByRef Tensor self); - - - - -// Parsed from ATen/ops/_conj_copy.h - -// #pragma once - -// @generated by torchgen/gen.py from Function.h - -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include - - - -// #include - - -// aten::_conj_copy(Tensor self) -> Tensor -@Namespace("at") public static native @ByVal Tensor _conj_copy(@Const @ByRef Tensor self); - -// aten::_conj_copy.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor _conj_copy_out(@ByRef Tensor out, @Const @ByRef Tensor self); -// aten::_conj_copy.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor _conj_copy_outf(@Const @ByRef Tensor self, @ByRef Tensor out); - - - - -// Parsed from ATen/ops/_conj_physical.h - -// #pragma once - -// @generated by torchgen/gen.py from Function.h - -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include - - - -// #include - - -// aten::_conj_physical(Tensor self) -> Tensor -@Namespace("at") public static native @ByVal Tensor _conj_physical(@Const @ByRef Tensor self); - -// aten::_conj_physical.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor _conj_physical_out(@ByRef Tensor out, @Const @ByRef Tensor self); -// aten::_conj_physical.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor _conj_physical_outf(@Const @ByRef Tensor self, @ByRef Tensor out); - - - - -// Parsed from ATen/ops/_conv_depthwise2d.h - -// #pragma once - -// @generated by torchgen/gen.py from Function.h - -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include - - - -// #include - - -// aten::_conv_depthwise2d.out(Tensor self, Tensor weight, int[2] kernel_size, Tensor? bias, int[2] stride, SymInt[2] padding, int[2] dilation, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @Const @ByRef Tensor _conv_depthwise2d_out(@Const @ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor weight, @ByVal @Cast("c10::ArrayRef*") LongArrayRef kernel_size, @Const @ByRef TensorOptional bias, @ByVal @Cast("c10::ArrayRef*") LongArrayRef stride, @ByVal @Cast("c10::ArrayRef*") LongArrayRef padding, @ByVal @Cast("c10::ArrayRef*") LongArrayRef dilation); -@Namespace("at") public static native @Const @ByRef Tensor _conv_depthwise2d_out(@Const @ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor weight, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] kernel_size, @Const @ByRef TensorOptional bias, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] stride, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] padding, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... dilation); - - -// aten::_conv_depthwise2d.out(Tensor self, Tensor weight, int[2] kernel_size, Tensor? bias, int[2] stride, SymInt[2] padding, int[2] dilation, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @Const @ByRef Tensor _conv_depthwise2d_outf(@Const @ByRef Tensor self, @Const @ByRef Tensor weight, @ByVal @Cast("c10::ArrayRef*") LongArrayRef kernel_size, @Const @ByRef TensorOptional bias, @ByVal @Cast("c10::ArrayRef*") LongArrayRef stride, @ByVal @Cast("c10::ArrayRef*") LongArrayRef padding, @ByVal @Cast("c10::ArrayRef*") LongArrayRef dilation, @Const @ByRef Tensor out); -@Namespace("at") public static native @Const @ByRef Tensor _conv_depthwise2d_outf(@Const @ByRef Tensor self, @Const @ByRef Tensor weight, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] kernel_size, @Const @ByRef TensorOptional bias, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] stride, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] padding, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dilation, @Const @ByRef Tensor out); - - -// aten::_conv_depthwise2d.out(Tensor self, Tensor weight, int[2] kernel_size, Tensor? bias, int[2] stride, SymInt[2] padding, int[2] dilation, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @Const @ByRef Tensor _conv_depthwise2d_symint_out(@Const @ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor weight, @ByVal @Cast("c10::ArrayRef*") LongArrayRef kernel_size, @Const @ByRef TensorOptional bias, @ByVal @Cast("c10::ArrayRef*") LongArrayRef stride, @ByVal SymIntRef padding, @ByVal @Cast("c10::ArrayRef*") LongArrayRef dilation); -@Namespace("at") public static native @Const @ByRef Tensor _conv_depthwise2d_symint_out(@Const @ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor weight, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] kernel_size, @Const @ByRef TensorOptional bias, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] stride, @ByVal SymIntRef padding, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... dilation); - - -// aten::_conv_depthwise2d.out(Tensor self, Tensor weight, int[2] kernel_size, Tensor? bias, int[2] stride, SymInt[2] padding, int[2] dilation, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @Const @ByRef Tensor _conv_depthwise2d_symint_outf(@Const @ByRef Tensor self, @Const @ByRef Tensor weight, @ByVal @Cast("c10::ArrayRef*") LongArrayRef kernel_size, @Const @ByRef TensorOptional bias, @ByVal @Cast("c10::ArrayRef*") LongArrayRef stride, @ByVal SymIntRef padding, @ByVal @Cast("c10::ArrayRef*") LongArrayRef dilation, @Const @ByRef Tensor out); -@Namespace("at") public static native @Const @ByRef Tensor _conv_depthwise2d_symint_outf(@Const @ByRef Tensor self, @Const @ByRef Tensor weight, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] kernel_size, @Const @ByRef TensorOptional bias, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] stride, @ByVal SymIntRef padding, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dilation, @Const @ByRef Tensor out); - - -// aten::_conv_depthwise2d(Tensor self, Tensor weight, int[2] kernel_size, Tensor? bias, int[2] stride, SymInt[2] padding, int[2] dilation) -> Tensor -@Namespace("at") public static native @ByVal Tensor _conv_depthwise2d(@Const @ByRef Tensor self, @Const @ByRef Tensor weight, @ByVal @Cast("c10::ArrayRef*") LongArrayRef kernel_size, @Const @ByRef TensorOptional bias, @ByVal @Cast("c10::ArrayRef*") LongArrayRef stride, @ByVal @Cast("c10::ArrayRef*") LongArrayRef padding, @ByVal @Cast("c10::ArrayRef*") LongArrayRef dilation); -@Namespace("at") public static native @ByVal Tensor _conv_depthwise2d(@Const @ByRef Tensor self, @Const @ByRef Tensor weight, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] kernel_size, @Const @ByRef TensorOptional bias, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] stride, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] padding, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... dilation); - - -// aten::_conv_depthwise2d(Tensor self, Tensor weight, int[2] kernel_size, Tensor? bias, int[2] stride, SymInt[2] padding, int[2] dilation) -> Tensor -@Namespace("at") public static native @ByVal Tensor _conv_depthwise2d_symint(@Const @ByRef Tensor self, @Const @ByRef Tensor weight, @ByVal @Cast("c10::ArrayRef*") LongArrayRef kernel_size, @Const @ByRef TensorOptional bias, @ByVal @Cast("c10::ArrayRef*") LongArrayRef stride, @ByVal SymIntRef padding, @ByVal @Cast("c10::ArrayRef*") LongArrayRef dilation); -@Namespace("at") public static native @ByVal Tensor _conv_depthwise2d_symint(@Const @ByRef Tensor self, @Const @ByRef Tensor weight, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] kernel_size, @Const @ByRef TensorOptional bias, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] stride, @ByVal SymIntRef padding, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... dilation); - - - - - -// Parsed from ATen/ops/_convert_indices_from_coo_to_csr.h - -// #pragma once - -// @generated by torchgen/gen.py from Function.h - -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include - - - -// #include - - -// aten::_convert_indices_from_coo_to_csr(Tensor self, int size, *, bool out_int32=False) -> Tensor -@Namespace("at") public static native @ByVal Tensor _convert_indices_from_coo_to_csr(@Const @ByRef Tensor self, @Cast("int64_t") long size, @Cast("bool") boolean out_int32/*=false*/); -@Namespace("at") public static native @ByVal Tensor _convert_indices_from_coo_to_csr(@Const @ByRef Tensor self, @Cast("int64_t") long size); - -// aten::_convert_indices_from_coo_to_csr.out(Tensor self, int size, *, bool out_int32=False, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor _convert_indices_from_coo_to_csr_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Cast("int64_t") long size, @Cast("bool") boolean out_int32/*=false*/); -@Namespace("at") public static native @ByRef Tensor _convert_indices_from_coo_to_csr_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Cast("int64_t") long size); -// aten::_convert_indices_from_coo_to_csr.out(Tensor self, int size, *, bool out_int32=False, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor _convert_indices_from_coo_to_csr_outf(@Const @ByRef Tensor self, @Cast("int64_t") long size, @Cast("bool") boolean out_int32, @ByRef Tensor out); - - - - -// Parsed from ATen/ops/_convert_indices_from_csr_to_coo.h - -// #pragma once - -// @generated by torchgen/gen.py from Function.h - -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include - - - -// #include - - -// aten::_convert_indices_from_csr_to_coo(Tensor crow_indices, Tensor col_indices, *, bool out_int32=False, bool transpose=False) -> Tensor -@Namespace("at") public static native @ByVal Tensor _convert_indices_from_csr_to_coo(@Const @ByRef Tensor crow_indices, @Const @ByRef Tensor col_indices, @Cast("bool") boolean out_int32/*=false*/, @Cast("bool") boolean transpose/*=false*/); -@Namespace("at") public static native @ByVal Tensor _convert_indices_from_csr_to_coo(@Const @ByRef Tensor crow_indices, @Const @ByRef Tensor col_indices); - -// aten::_convert_indices_from_csr_to_coo.out(Tensor crow_indices, Tensor col_indices, *, bool out_int32=False, bool transpose=False, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor _convert_indices_from_csr_to_coo_out(@ByRef Tensor out, @Const @ByRef Tensor crow_indices, @Const @ByRef Tensor col_indices, @Cast("bool") boolean out_int32/*=false*/, @Cast("bool") boolean transpose/*=false*/); -@Namespace("at") public static native @ByRef Tensor _convert_indices_from_csr_to_coo_out(@ByRef Tensor out, @Const @ByRef Tensor crow_indices, @Const @ByRef Tensor col_indices); -// aten::_convert_indices_from_csr_to_coo.out(Tensor crow_indices, Tensor col_indices, *, bool out_int32=False, bool transpose=False, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor _convert_indices_from_csr_to_coo_outf(@Const @ByRef Tensor crow_indices, @Const @ByRef Tensor col_indices, @Cast("bool") boolean out_int32, @Cast("bool") boolean transpose, @ByRef Tensor out); - - - - -// Parsed from ATen/ops/_convolution.h - -// #pragma once - -// @generated by torchgen/gen.py from Function.h - -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include - - - -// #include - - -// aten::_convolution(Tensor input, Tensor weight, Tensor? bias, int[] stride, SymInt[] padding, int[] dilation, bool transposed, SymInt[] output_padding, int groups, bool benchmark, bool deterministic, bool cudnn_enabled, bool allow_tf32) -> Tensor -@Namespace("at") public static native @ByVal Tensor _convolution(@Const @ByRef Tensor input, @Const @ByRef Tensor weight, @Const @ByRef TensorOptional bias, @ByVal @Cast("c10::ArrayRef*") LongArrayRef stride, @ByVal @Cast("c10::ArrayRef*") LongArrayRef padding, @ByVal @Cast("c10::ArrayRef*") LongArrayRef dilation, @Cast("bool") boolean transposed, @ByVal @Cast("c10::ArrayRef*") LongArrayRef output_padding, @Cast("int64_t") long groups, @Cast("bool") boolean benchmark, @Cast("bool") boolean deterministic, @Cast("bool") boolean cudnn_enabled, @Cast("bool") boolean allow_tf32); -@Namespace("at") public static native @ByVal Tensor _convolution(@Const @ByRef Tensor input, @Const @ByRef Tensor weight, @Const @ByRef TensorOptional bias, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] stride, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] padding, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dilation, @Cast("bool") boolean transposed, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] output_padding, @Cast("int64_t") long groups, @Cast("bool") boolean benchmark, @Cast("bool") boolean deterministic, @Cast("bool") boolean cudnn_enabled, @Cast("bool") boolean allow_tf32); - - -// aten::_convolution(Tensor input, Tensor weight, Tensor? bias, int[] stride, SymInt[] padding, int[] dilation, bool transposed, SymInt[] output_padding, int groups, bool benchmark, bool deterministic, bool cudnn_enabled, bool allow_tf32) -> Tensor -@Namespace("at") public static native @ByVal Tensor _convolution_symint(@Const @ByRef Tensor input, @Const @ByRef Tensor weight, @Const @ByRef TensorOptional bias, @ByVal @Cast("c10::ArrayRef*") LongArrayRef stride, @ByVal SymIntRef padding, @ByVal @Cast("c10::ArrayRef*") LongArrayRef dilation, @Cast("bool") boolean transposed, @ByVal SymIntRef output_padding, @Cast("int64_t") long groups, @Cast("bool") boolean benchmark, @Cast("bool") boolean deterministic, @Cast("bool") boolean cudnn_enabled, @Cast("bool") boolean allow_tf32); -@Namespace("at") public static native @ByVal Tensor _convolution_symint(@Const @ByRef Tensor input, @Const @ByRef Tensor weight, @Const @ByRef TensorOptional bias, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] stride, @ByVal SymIntRef padding, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dilation, @Cast("bool") boolean transposed, @ByVal SymIntRef output_padding, @Cast("int64_t") long groups, @Cast("bool") boolean benchmark, @Cast("bool") boolean deterministic, @Cast("bool") boolean cudnn_enabled, @Cast("bool") boolean allow_tf32); - - -// aten::_convolution.deprecated(Tensor input, Tensor weight, Tensor? bias, int[] stride, int[] padding, int[] dilation, bool transposed, int[] output_padding, int groups, bool benchmark, bool deterministic, bool cudnn_enabled) -> Tensor -@Namespace("at") public static native @ByVal Tensor _convolution(@Const @ByRef Tensor input, @Const @ByRef Tensor weight, @Const @ByRef TensorOptional bias, @ByVal @Cast("c10::ArrayRef*") LongArrayRef stride, @ByVal @Cast("c10::ArrayRef*") LongArrayRef padding, @ByVal @Cast("c10::ArrayRef*") LongArrayRef dilation, @Cast("bool") boolean transposed, @ByVal @Cast("c10::ArrayRef*") LongArrayRef output_padding, @Cast("int64_t") long groups, @Cast("bool") boolean benchmark, @Cast("bool") boolean deterministic, @Cast("bool") boolean cudnn_enabled); -@Namespace("at") public static native @ByVal Tensor _convolution(@Const @ByRef Tensor input, @Const @ByRef Tensor weight, @Const @ByRef TensorOptional bias, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] stride, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] padding, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dilation, @Cast("bool") boolean transposed, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] output_padding, @Cast("int64_t") long groups, @Cast("bool") boolean benchmark, @Cast("bool") boolean deterministic, @Cast("bool") boolean cudnn_enabled); - -// aten::_convolution.out(Tensor input, Tensor weight, Tensor? bias, int[] stride, SymInt[] padding, int[] dilation, bool transposed, SymInt[] output_padding, int groups, bool benchmark, bool deterministic, bool cudnn_enabled, bool allow_tf32, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor _convolution_out(@ByRef Tensor out, @Const @ByRef Tensor input, @Const @ByRef Tensor weight, @Const @ByRef TensorOptional bias, @ByVal @Cast("c10::ArrayRef*") LongArrayRef stride, @ByVal @Cast("c10::ArrayRef*") LongArrayRef padding, @ByVal @Cast("c10::ArrayRef*") LongArrayRef dilation, @Cast("bool") boolean transposed, @ByVal @Cast("c10::ArrayRef*") LongArrayRef output_padding, @Cast("int64_t") long groups, @Cast("bool") boolean benchmark, @Cast("bool") boolean deterministic, @Cast("bool") boolean cudnn_enabled, @Cast("bool") boolean allow_tf32); -@Namespace("at") public static native @ByRef Tensor _convolution_out(@ByRef Tensor out, @Const @ByRef Tensor input, @Const @ByRef Tensor weight, @Const @ByRef TensorOptional bias, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] stride, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] padding, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dilation, @Cast("bool") boolean transposed, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] output_padding, @Cast("int64_t") long groups, @Cast("bool") boolean benchmark, @Cast("bool") boolean deterministic, @Cast("bool") boolean cudnn_enabled, @Cast("bool") boolean allow_tf32); - - -// aten::_convolution.out(Tensor input, Tensor weight, Tensor? bias, int[] stride, SymInt[] padding, int[] dilation, bool transposed, SymInt[] output_padding, int groups, bool benchmark, bool deterministic, bool cudnn_enabled, bool allow_tf32, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor _convolution_outf(@Const @ByRef Tensor input, @Const @ByRef Tensor weight, @Const @ByRef TensorOptional bias, @ByVal @Cast("c10::ArrayRef*") LongArrayRef stride, @ByVal @Cast("c10::ArrayRef*") LongArrayRef padding, @ByVal @Cast("c10::ArrayRef*") LongArrayRef dilation, @Cast("bool") boolean transposed, @ByVal @Cast("c10::ArrayRef*") LongArrayRef output_padding, @Cast("int64_t") long groups, @Cast("bool") boolean benchmark, @Cast("bool") boolean deterministic, @Cast("bool") boolean cudnn_enabled, @Cast("bool") boolean allow_tf32, @ByRef Tensor out); -@Namespace("at") public static native @ByRef Tensor _convolution_outf(@Const @ByRef Tensor input, @Const @ByRef Tensor weight, @Const @ByRef TensorOptional bias, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] stride, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] padding, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dilation, @Cast("bool") boolean transposed, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] output_padding, @Cast("int64_t") long groups, @Cast("bool") boolean benchmark, @Cast("bool") boolean deterministic, @Cast("bool") boolean cudnn_enabled, @Cast("bool") boolean allow_tf32, @ByRef Tensor out); - - -// aten::_convolution.out(Tensor input, Tensor weight, Tensor? bias, int[] stride, SymInt[] padding, int[] dilation, bool transposed, SymInt[] output_padding, int groups, bool benchmark, bool deterministic, bool cudnn_enabled, bool allow_tf32, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor _convolution_symint_out(@ByRef Tensor out, @Const @ByRef Tensor input, @Const @ByRef Tensor weight, @Const @ByRef TensorOptional bias, @ByVal @Cast("c10::ArrayRef*") LongArrayRef stride, @ByVal SymIntRef padding, @ByVal @Cast("c10::ArrayRef*") LongArrayRef dilation, @Cast("bool") boolean transposed, @ByVal SymIntRef output_padding, @Cast("int64_t") long groups, @Cast("bool") boolean benchmark, @Cast("bool") boolean deterministic, @Cast("bool") boolean cudnn_enabled, @Cast("bool") boolean allow_tf32); -@Namespace("at") public static native @ByRef Tensor _convolution_symint_out(@ByRef Tensor out, @Const @ByRef Tensor input, @Const @ByRef Tensor weight, @Const @ByRef TensorOptional bias, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] stride, @ByVal SymIntRef padding, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dilation, @Cast("bool") boolean transposed, @ByVal SymIntRef output_padding, @Cast("int64_t") long groups, @Cast("bool") boolean benchmark, @Cast("bool") boolean deterministic, @Cast("bool") boolean cudnn_enabled, @Cast("bool") boolean allow_tf32); - - -// aten::_convolution.out(Tensor input, Tensor weight, Tensor? bias, int[] stride, SymInt[] padding, int[] dilation, bool transposed, SymInt[] output_padding, int groups, bool benchmark, bool deterministic, bool cudnn_enabled, bool allow_tf32, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor _convolution_symint_outf(@Const @ByRef Tensor input, @Const @ByRef Tensor weight, @Const @ByRef TensorOptional bias, @ByVal @Cast("c10::ArrayRef*") LongArrayRef stride, @ByVal SymIntRef padding, @ByVal @Cast("c10::ArrayRef*") LongArrayRef dilation, @Cast("bool") boolean transposed, @ByVal SymIntRef output_padding, @Cast("int64_t") long groups, @Cast("bool") boolean benchmark, @Cast("bool") boolean deterministic, @Cast("bool") boolean cudnn_enabled, @Cast("bool") boolean allow_tf32, @ByRef Tensor out); -@Namespace("at") public static native @ByRef Tensor _convolution_symint_outf(@Const @ByRef Tensor input, @Const @ByRef Tensor weight, @Const @ByRef TensorOptional bias, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] stride, @ByVal SymIntRef padding, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dilation, @Cast("bool") boolean transposed, @ByVal SymIntRef output_padding, @Cast("int64_t") long groups, @Cast("bool") boolean benchmark, @Cast("bool") boolean deterministic, @Cast("bool") boolean cudnn_enabled, @Cast("bool") boolean allow_tf32, @ByRef Tensor out); - - - - - -// Parsed from ATen/ops/_convolution_double_backward.h - -// #pragma once - -// @generated by torchgen/gen.py from Function.h - -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include - - - -// #include - - -// aten::_convolution_double_backward(Tensor? ggI, Tensor? ggW, Tensor? ggb, Tensor gO, Tensor weight, Tensor self, int[] stride, SymInt[] padding, int[] dilation, bool transposed, SymInt[] output_padding, int groups, bool[3] output_mask) -> (Tensor, Tensor, Tensor) -@Namespace("at") public static native @ByVal TensorTensorTensorTuple _convolution_double_backward(@Const @ByRef TensorOptional ggI, @Const @ByRef TensorOptional ggW, @Const @ByRef TensorOptional ggb, @Const @ByRef Tensor gO, @Const @ByRef Tensor weight, @Const @ByRef Tensor self, @ByVal @Cast("c10::ArrayRef*") LongArrayRef stride, @ByVal @Cast("c10::ArrayRef*") LongArrayRef padding, @ByVal @Cast("c10::ArrayRef*") LongArrayRef dilation, @Cast("bool") boolean transposed, @ByVal @Cast("c10::ArrayRef*") LongArrayRef output_padding, @Cast("int64_t") long groups, @ByVal @Cast("std::array*") BoolPointer output_mask); -@Namespace("at") public static native @ByVal TensorTensorTensorTuple _convolution_double_backward(@Const @ByRef TensorOptional ggI, @Const @ByRef TensorOptional ggW, @Const @ByRef TensorOptional ggb, @Const @ByRef Tensor gO, @Const @ByRef Tensor weight, @Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] stride, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] padding, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dilation, @Cast("bool") boolean transposed, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] output_padding, @Cast("int64_t") long groups, @ByVal @Cast("std::array*") BoolPointer output_mask); - - -// aten::_convolution_double_backward(Tensor? ggI, Tensor? ggW, Tensor? ggb, Tensor gO, Tensor weight, Tensor self, int[] stride, SymInt[] padding, int[] dilation, bool transposed, SymInt[] output_padding, int groups, bool[3] output_mask) -> (Tensor, Tensor, Tensor) -@Namespace("at") public static native @ByVal TensorTensorTensorTuple _convolution_double_backward_symint(@Const @ByRef TensorOptional ggI, @Const @ByRef TensorOptional ggW, @Const @ByRef TensorOptional ggb, @Const @ByRef Tensor gO, @Const @ByRef Tensor weight, @Const @ByRef Tensor self, @ByVal @Cast("c10::ArrayRef*") LongArrayRef stride, @ByVal SymIntRef padding, @ByVal @Cast("c10::ArrayRef*") LongArrayRef dilation, @Cast("bool") boolean transposed, @ByVal SymIntRef output_padding, @Cast("int64_t") long groups, @ByVal @Cast("std::array*") BoolPointer output_mask); -@Namespace("at") public static native @ByVal TensorTensorTensorTuple _convolution_double_backward_symint(@Const @ByRef TensorOptional ggI, @Const @ByRef TensorOptional ggW, @Const @ByRef TensorOptional ggb, @Const @ByRef Tensor gO, @Const @ByRef Tensor weight, @Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] stride, @ByVal SymIntRef padding, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dilation, @Cast("bool") boolean transposed, @ByVal SymIntRef output_padding, @Cast("int64_t") long groups, @ByVal @Cast("std::array*") BoolPointer output_mask); - - - - - -// Parsed from ATen/ops/_convolution_mode.h - -// #pragma once - -// @generated by torchgen/gen.py from Function.h - -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include - - - -// #include - - -// aten::_convolution_mode(Tensor input, Tensor weight, Tensor? bias, int[] stride, str padding, int[] dilation, int groups) -> Tensor -@Namespace("at") public static native @ByVal Tensor _convolution_mode(@Const @ByRef Tensor input, @Const @ByRef Tensor weight, @Const @ByRef TensorOptional bias, @ByVal @Cast("c10::ArrayRef*") LongArrayRef stride, @ByVal @Cast("c10::string_view*") Pointer padding, @ByVal @Cast("c10::ArrayRef*") LongArrayRef dilation, @Cast("int64_t") long groups); -@Namespace("at") public static native @ByVal Tensor _convolution_mode(@Const @ByRef Tensor input, @Const @ByRef Tensor weight, @Const @ByRef TensorOptional bias, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] stride, @ByVal @Cast("c10::string_view*") Pointer padding, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dilation, @Cast("int64_t") long groups); - - - - -// Parsed from ATen/ops/_copy_from.h - -// #pragma once - -// @generated by torchgen/gen.py from Function.h - -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include - - - -// #include - - -// aten::_copy_from(Tensor self, Tensor dst, bool non_blocking=False) -> Tensor -@Namespace("at") public static native @ByVal Tensor _copy_from(@Const @ByRef Tensor self, @Const @ByRef Tensor dst, @Cast("bool") boolean non_blocking/*=false*/); -@Namespace("at") public static native @ByVal Tensor _copy_from(@Const @ByRef Tensor self, @Const @ByRef Tensor dst); - -// aten::_copy_from.out(Tensor self, Tensor dst, bool non_blocking=False, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor _copy_from_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor dst, @Cast("bool") boolean non_blocking/*=false*/); -@Namespace("at") public static native @ByRef Tensor _copy_from_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor dst); -// aten::_copy_from.out(Tensor self, Tensor dst, bool non_blocking=False, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor _copy_from_outf(@Const @ByRef Tensor self, @Const @ByRef Tensor dst, @Cast("bool") boolean non_blocking, @ByRef Tensor out); - - - - -// Parsed from ATen/ops/_copy_from_and_resize.h - -// #pragma once - -// @generated by torchgen/gen.py from Function.h - -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include - - - -// #include - - -// aten::_copy_from_and_resize(Tensor self, Tensor dst) -> Tensor -@Namespace("at") public static native @ByVal Tensor _copy_from_and_resize(@Const @ByRef Tensor self, @Const @ByRef Tensor dst); - -// aten::_copy_from_and_resize.out(Tensor self, Tensor dst, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor _copy_from_and_resize_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor dst); -// aten::_copy_from_and_resize.out(Tensor self, Tensor dst, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor _copy_from_and_resize_outf(@Const @ByRef Tensor self, @Const @ByRef Tensor dst, @ByRef Tensor out); - - - - -// Parsed from ATen/ops/_ctc_loss.h - -// #pragma once - -// @generated by torchgen/gen.py from Function.h - -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include - - - -// #include - - -// aten::_ctc_loss(Tensor log_probs, Tensor targets, int[] input_lengths, int[] target_lengths, int blank=0, bool zero_infinity=False) -> (Tensor, Tensor) -@Namespace("at") public static native @ByVal TensorTensorTuple _ctc_loss(@Const @ByRef Tensor log_probs, @Const @ByRef Tensor targets, @ByVal @Cast("c10::ArrayRef*") LongArrayRef input_lengths, @ByVal @Cast("c10::ArrayRef*") LongArrayRef target_lengths, @Cast("int64_t") long blank/*=0*/, @Cast("bool") boolean zero_infinity/*=false*/); -@Namespace("at") public static native @ByVal TensorTensorTuple _ctc_loss(@Const @ByRef Tensor log_probs, @Const @ByRef Tensor targets, @ByVal @Cast("c10::ArrayRef*") LongArrayRef input_lengths, @ByVal @Cast("c10::ArrayRef*") LongArrayRef target_lengths); -@Namespace("at") public static native @ByVal TensorTensorTuple _ctc_loss(@Const @ByRef Tensor log_probs, @Const @ByRef Tensor targets, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] input_lengths, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] target_lengths, @Cast("int64_t") long blank/*=0*/, @Cast("bool") boolean zero_infinity/*=false*/); -@Namespace("at") public static native @ByVal TensorTensorTuple _ctc_loss(@Const @ByRef Tensor log_probs, @Const @ByRef Tensor targets, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] input_lengths, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... target_lengths); - -// aten::_ctc_loss.Tensor(Tensor log_probs, Tensor targets, Tensor input_lengths, Tensor target_lengths, int blank=0, bool zero_infinity=False) -> (Tensor, Tensor) -@Namespace("at") public static native @ByVal TensorTensorTuple _ctc_loss(@Const @ByRef Tensor log_probs, @Const @ByRef Tensor targets, @Const @ByRef Tensor input_lengths, @Const @ByRef Tensor target_lengths, @Cast("int64_t") long blank/*=0*/, @Cast("bool") boolean zero_infinity/*=false*/); -@Namespace("at") public static native @ByVal TensorTensorTuple _ctc_loss(@Const @ByRef Tensor log_probs, @Const @ByRef Tensor targets, @Const @ByRef Tensor input_lengths, @Const @ByRef Tensor target_lengths); - -// aten::_ctc_loss.out(Tensor log_probs, Tensor targets, int[] input_lengths, int[] target_lengths, int blank=0, bool zero_infinity=False, *, Tensor(a!) out0, Tensor(b!) out1) -> (Tensor(a!), Tensor(b!)) -@Namespace("at") public static native @ByVal @Cast("std::tuple*") PointerPointer _ctc_loss_out(@ByRef Tensor out0, @ByRef Tensor out1, @Const @ByRef Tensor log_probs, @Const @ByRef Tensor targets, @ByVal @Cast("c10::ArrayRef*") LongArrayRef input_lengths, @ByVal @Cast("c10::ArrayRef*") LongArrayRef target_lengths, @Cast("int64_t") long blank/*=0*/, @Cast("bool") boolean zero_infinity/*=false*/); -@Namespace("at") public static native @ByVal @Cast("std::tuple*") PointerPointer _ctc_loss_out(@ByRef Tensor out0, @ByRef Tensor out1, @Const @ByRef Tensor log_probs, @Const @ByRef Tensor targets, @ByVal @Cast("c10::ArrayRef*") LongArrayRef input_lengths, @ByVal @Cast("c10::ArrayRef*") LongArrayRef target_lengths); -@Namespace("at") public static native @ByVal @Cast("std::tuple*") PointerPointer _ctc_loss_out(@ByRef Tensor out0, @ByRef Tensor out1, @Const @ByRef Tensor log_probs, @Const @ByRef Tensor targets, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] input_lengths, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] target_lengths, @Cast("int64_t") long blank/*=0*/, @Cast("bool") boolean zero_infinity/*=false*/); -@Namespace("at") public static native @ByVal @Cast("std::tuple*") PointerPointer _ctc_loss_out(@ByRef Tensor out0, @ByRef Tensor out1, @Const @ByRef Tensor log_probs, @Const @ByRef Tensor targets, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] input_lengths, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... target_lengths); -// aten::_ctc_loss.out(Tensor log_probs, Tensor targets, int[] input_lengths, int[] target_lengths, int blank=0, bool zero_infinity=False, *, Tensor(a!) out0, Tensor(b!) out1) -> (Tensor(a!), Tensor(b!)) -@Namespace("at") public static native @ByVal @Cast("std::tuple*") PointerPointer _ctc_loss_outf(@Const @ByRef Tensor log_probs, @Const @ByRef Tensor targets, @ByVal @Cast("c10::ArrayRef*") LongArrayRef input_lengths, @ByVal @Cast("c10::ArrayRef*") LongArrayRef target_lengths, @Cast("int64_t") long blank, @Cast("bool") boolean zero_infinity, @ByRef Tensor out0, @ByRef Tensor out1); -@Namespace("at") public static native @ByVal @Cast("std::tuple*") PointerPointer _ctc_loss_outf(@Const @ByRef Tensor log_probs, @Const @ByRef Tensor targets, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] input_lengths, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] target_lengths, @Cast("int64_t") long blank, @Cast("bool") boolean zero_infinity, @ByRef Tensor out0, @ByRef Tensor out1); - -// aten::_ctc_loss.Tensor_out(Tensor log_probs, Tensor targets, Tensor input_lengths, Tensor target_lengths, int blank=0, bool zero_infinity=False, *, Tensor(a!) out0, Tensor(b!) out1) -> (Tensor(a!), Tensor(b!)) -@Namespace("at") public static native @ByVal @Cast("std::tuple*") PointerPointer _ctc_loss_out(@ByRef Tensor out0, @ByRef Tensor out1, @Const @ByRef Tensor log_probs, @Const @ByRef Tensor targets, @Const @ByRef Tensor input_lengths, @Const @ByRef Tensor target_lengths, @Cast("int64_t") long blank/*=0*/, @Cast("bool") boolean zero_infinity/*=false*/); -@Namespace("at") public static native @ByVal @Cast("std::tuple*") PointerPointer _ctc_loss_out(@ByRef Tensor out0, @ByRef Tensor out1, @Const @ByRef Tensor log_probs, @Const @ByRef Tensor targets, @Const @ByRef Tensor input_lengths, @Const @ByRef Tensor target_lengths); -// aten::_ctc_loss.Tensor_out(Tensor log_probs, Tensor targets, Tensor input_lengths, Tensor target_lengths, int blank=0, bool zero_infinity=False, *, Tensor(a!) out0, Tensor(b!) out1) -> (Tensor(a!), Tensor(b!)) -@Namespace("at") public static native @ByVal @Cast("std::tuple*") PointerPointer _ctc_loss_outf(@Const @ByRef Tensor log_probs, @Const @ByRef Tensor targets, @Const @ByRef Tensor input_lengths, @Const @ByRef Tensor target_lengths, @Cast("int64_t") long blank, @Cast("bool") boolean zero_infinity, @ByRef Tensor out0, @ByRef Tensor out1); - - - - -// Parsed from ATen/ops/_ctc_loss_backward.h - -// #pragma once - -// @generated by torchgen/gen.py from Function.h - -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include - - - -// #include - - -// aten::_ctc_loss_backward(Tensor grad, Tensor log_probs, Tensor targets, int[] input_lengths, int[] target_lengths, Tensor neg_log_likelihood, Tensor log_alpha, int blank, bool zero_infinity=False) -> Tensor -@Namespace("at") public static native @ByVal Tensor _ctc_loss_backward(@Const @ByRef Tensor grad, @Const @ByRef Tensor log_probs, @Const @ByRef Tensor targets, @ByVal @Cast("c10::ArrayRef*") LongArrayRef input_lengths, @ByVal @Cast("c10::ArrayRef*") LongArrayRef target_lengths, @Const @ByRef Tensor neg_log_likelihood, @Const @ByRef Tensor log_alpha, @Cast("int64_t") long blank, @Cast("bool") boolean zero_infinity/*=false*/); -@Namespace("at") public static native @ByVal Tensor _ctc_loss_backward(@Const @ByRef Tensor grad, @Const @ByRef Tensor log_probs, @Const @ByRef Tensor targets, @ByVal @Cast("c10::ArrayRef*") LongArrayRef input_lengths, @ByVal @Cast("c10::ArrayRef*") LongArrayRef target_lengths, @Const @ByRef Tensor neg_log_likelihood, @Const @ByRef Tensor log_alpha, @Cast("int64_t") long blank); -@Namespace("at") public static native @ByVal Tensor _ctc_loss_backward(@Const @ByRef Tensor grad, @Const @ByRef Tensor log_probs, @Const @ByRef Tensor targets, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] input_lengths, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] target_lengths, @Const @ByRef Tensor neg_log_likelihood, @Const @ByRef Tensor log_alpha, @Cast("int64_t") long blank, @Cast("bool") boolean zero_infinity/*=false*/); -@Namespace("at") public static native @ByVal Tensor _ctc_loss_backward(@Const @ByRef Tensor grad, @Const @ByRef Tensor log_probs, @Const @ByRef Tensor targets, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] input_lengths, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] target_lengths, @Const @ByRef Tensor neg_log_likelihood, @Const @ByRef Tensor log_alpha, @Cast("int64_t") long blank); - -// aten::_ctc_loss_backward.Tensor(Tensor grad, Tensor log_probs, Tensor targets, Tensor input_lengths, Tensor target_lengths, Tensor neg_log_likelihood, Tensor log_alpha, int blank, bool zero_infinity=False) -> Tensor -@Namespace("at") public static native @ByVal Tensor _ctc_loss_backward(@Const @ByRef Tensor grad, @Const @ByRef Tensor log_probs, @Const @ByRef Tensor targets, @Const @ByRef Tensor input_lengths, @Const @ByRef Tensor target_lengths, @Const @ByRef Tensor neg_log_likelihood, @Const @ByRef Tensor log_alpha, @Cast("int64_t") long blank, @Cast("bool") boolean zero_infinity/*=false*/); -@Namespace("at") public static native @ByVal Tensor _ctc_loss_backward(@Const @ByRef Tensor grad, @Const @ByRef Tensor log_probs, @Const @ByRef Tensor targets, @Const @ByRef Tensor input_lengths, @Const @ByRef Tensor target_lengths, @Const @ByRef Tensor neg_log_likelihood, @Const @ByRef Tensor log_alpha, @Cast("int64_t") long blank); - -// aten::_ctc_loss_backward.out(Tensor grad, Tensor log_probs, Tensor targets, int[] input_lengths, int[] target_lengths, Tensor neg_log_likelihood, Tensor log_alpha, int blank, bool zero_infinity=False, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor _ctc_loss_backward_out(@ByRef Tensor out, @Const @ByRef Tensor grad, @Const @ByRef Tensor log_probs, @Const @ByRef Tensor targets, @ByVal @Cast("c10::ArrayRef*") LongArrayRef input_lengths, @ByVal @Cast("c10::ArrayRef*") LongArrayRef target_lengths, @Const @ByRef Tensor neg_log_likelihood, @Const @ByRef Tensor log_alpha, @Cast("int64_t") long blank, @Cast("bool") boolean zero_infinity/*=false*/); -@Namespace("at") public static native @ByRef Tensor _ctc_loss_backward_out(@ByRef Tensor out, @Const @ByRef Tensor grad, @Const @ByRef Tensor log_probs, @Const @ByRef Tensor targets, @ByVal @Cast("c10::ArrayRef*") LongArrayRef input_lengths, @ByVal @Cast("c10::ArrayRef*") LongArrayRef target_lengths, @Const @ByRef Tensor neg_log_likelihood, @Const @ByRef Tensor log_alpha, @Cast("int64_t") long blank); -@Namespace("at") public static native @ByRef Tensor _ctc_loss_backward_out(@ByRef Tensor out, @Const @ByRef Tensor grad, @Const @ByRef Tensor log_probs, @Const @ByRef Tensor targets, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] input_lengths, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] target_lengths, @Const @ByRef Tensor neg_log_likelihood, @Const @ByRef Tensor log_alpha, @Cast("int64_t") long blank, @Cast("bool") boolean zero_infinity/*=false*/); -@Namespace("at") public static native @ByRef Tensor _ctc_loss_backward_out(@ByRef Tensor out, @Const @ByRef Tensor grad, @Const @ByRef Tensor log_probs, @Const @ByRef Tensor targets, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] input_lengths, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] target_lengths, @Const @ByRef Tensor neg_log_likelihood, @Const @ByRef Tensor log_alpha, @Cast("int64_t") long blank); -// aten::_ctc_loss_backward.out(Tensor grad, Tensor log_probs, Tensor targets, int[] input_lengths, int[] target_lengths, Tensor neg_log_likelihood, Tensor log_alpha, int blank, bool zero_infinity=False, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor _ctc_loss_backward_outf(@Const @ByRef Tensor grad, @Const @ByRef Tensor log_probs, @Const @ByRef Tensor targets, @ByVal @Cast("c10::ArrayRef*") LongArrayRef input_lengths, @ByVal @Cast("c10::ArrayRef*") LongArrayRef target_lengths, @Const @ByRef Tensor neg_log_likelihood, @Const @ByRef Tensor log_alpha, @Cast("int64_t") long blank, @Cast("bool") boolean zero_infinity, @ByRef Tensor out); -@Namespace("at") public static native @ByRef Tensor _ctc_loss_backward_outf(@Const @ByRef Tensor grad, @Const @ByRef Tensor log_probs, @Const @ByRef Tensor targets, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] input_lengths, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] target_lengths, @Const @ByRef Tensor neg_log_likelihood, @Const @ByRef Tensor log_alpha, @Cast("int64_t") long blank, @Cast("bool") boolean zero_infinity, @ByRef Tensor out); - - - - -// Parsed from ATen/ops/_cudnn_ctc_loss.h - -// #pragma once - -// @generated by torchgen/gen.py from Function.h - -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include - - - -// #include - - -// aten::_cudnn_ctc_loss(Tensor log_probs, Tensor targets, int[] input_lengths, int[] target_lengths, int blank, bool deterministic, bool zero_infinity) -> (Tensor, Tensor) -@Namespace("at") public static native @ByVal TensorTensorTuple _cudnn_ctc_loss(@Const @ByRef Tensor log_probs, @Const @ByRef Tensor targets, @ByVal @Cast("c10::ArrayRef*") LongArrayRef input_lengths, @ByVal @Cast("c10::ArrayRef*") LongArrayRef target_lengths, @Cast("int64_t") long blank, @Cast("bool") boolean deterministic, @Cast("bool") boolean zero_infinity); -@Namespace("at") public static native @ByVal TensorTensorTuple _cudnn_ctc_loss(@Const @ByRef Tensor log_probs, @Const @ByRef Tensor targets, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] input_lengths, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] target_lengths, @Cast("int64_t") long blank, @Cast("bool") boolean deterministic, @Cast("bool") boolean zero_infinity); - -// aten::_cudnn_ctc_loss.Tensor(Tensor log_probs, Tensor targets, Tensor input_lengths, Tensor target_lengths, int blank, bool deterministic, bool zero_infinity) -> (Tensor, Tensor) -@Namespace("at") public static native @ByVal TensorTensorTuple _cudnn_ctc_loss(@Const @ByRef Tensor log_probs, @Const @ByRef Tensor targets, @Const @ByRef Tensor input_lengths, @Const @ByRef Tensor target_lengths, @Cast("int64_t") long blank, @Cast("bool") boolean deterministic, @Cast("bool") boolean zero_infinity); - -// aten::_cudnn_ctc_loss.out(Tensor log_probs, Tensor targets, int[] input_lengths, int[] target_lengths, int blank, bool deterministic, bool zero_infinity, *, Tensor(a!) out0, Tensor(b!) out1) -> (Tensor(a!), Tensor(b!)) -@Namespace("at") public static native @ByVal @Cast("std::tuple*") PointerPointer _cudnn_ctc_loss_out(@ByRef Tensor out0, @ByRef Tensor out1, @Const @ByRef Tensor log_probs, @Const @ByRef Tensor targets, @ByVal @Cast("c10::ArrayRef*") LongArrayRef input_lengths, @ByVal @Cast("c10::ArrayRef*") LongArrayRef target_lengths, @Cast("int64_t") long blank, @Cast("bool") boolean deterministic, @Cast("bool") boolean zero_infinity); -@Namespace("at") public static native @ByVal @Cast("std::tuple*") PointerPointer _cudnn_ctc_loss_out(@ByRef Tensor out0, @ByRef Tensor out1, @Const @ByRef Tensor log_probs, @Const @ByRef Tensor targets, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] input_lengths, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] target_lengths, @Cast("int64_t") long blank, @Cast("bool") boolean deterministic, @Cast("bool") boolean zero_infinity); -// aten::_cudnn_ctc_loss.out(Tensor log_probs, Tensor targets, int[] input_lengths, int[] target_lengths, int blank, bool deterministic, bool zero_infinity, *, Tensor(a!) out0, Tensor(b!) out1) -> (Tensor(a!), Tensor(b!)) -@Namespace("at") public static native @ByVal @Cast("std::tuple*") PointerPointer _cudnn_ctc_loss_outf(@Const @ByRef Tensor log_probs, @Const @ByRef Tensor targets, @ByVal @Cast("c10::ArrayRef*") LongArrayRef input_lengths, @ByVal @Cast("c10::ArrayRef*") LongArrayRef target_lengths, @Cast("int64_t") long blank, @Cast("bool") boolean deterministic, @Cast("bool") boolean zero_infinity, @ByRef Tensor out0, @ByRef Tensor out1); -@Namespace("at") public static native @ByVal @Cast("std::tuple*") PointerPointer _cudnn_ctc_loss_outf(@Const @ByRef Tensor log_probs, @Const @ByRef Tensor targets, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] input_lengths, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] target_lengths, @Cast("int64_t") long blank, @Cast("bool") boolean deterministic, @Cast("bool") boolean zero_infinity, @ByRef Tensor out0, @ByRef Tensor out1); - - - - -// Parsed from ATen/ops/_cudnn_init_dropout_state.h - -// #pragma once - -// @generated by torchgen/gen.py from Function.h - -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include - - - -// #include - - -// aten::_cudnn_init_dropout_state(float dropout, bool train, int dropout_seed, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=False) -> Tensor -@Namespace("at") public static native @ByVal Tensor _cudnn_init_dropout_state(double dropout, @Cast("bool") boolean train, @Cast("int64_t") long dropout_seed, @ByVal TensorOptions options); -// aten::_cudnn_init_dropout_state(float dropout, bool train, int dropout_seed, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=False) -> Tensor -@Namespace("at") public static native @ByVal Tensor _cudnn_init_dropout_state(double dropout, @Cast("bool") boolean train, @Cast("int64_t") long dropout_seed, @ByVal ScalarTypeOptional dtype, @ByVal LayoutOptional layout, @ByVal DeviceOptional device, @ByVal BoolOptional pin_memory); - -// aten::_cudnn_init_dropout_state.out(float dropout, bool train, int dropout_seed, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor _cudnn_init_dropout_state_out(@ByRef Tensor out, double dropout, @Cast("bool") boolean train, @Cast("int64_t") long dropout_seed); -// aten::_cudnn_init_dropout_state.out(float dropout, bool train, int dropout_seed, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor _cudnn_init_dropout_state_outf(double dropout, @Cast("bool") boolean train, @Cast("int64_t") long dropout_seed, @ByRef Tensor out); - - - - -// Parsed from ATen/ops/_cudnn_rnn.h - -// #pragma once - -// @generated by torchgen/gen.py from Function.h - -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include - - - -// #include - - -// aten::_cudnn_rnn(Tensor input, Tensor[] weight, int weight_stride0, Tensor? weight_buf, Tensor hx, Tensor? cx, int mode, SymInt hidden_size, SymInt proj_size, int num_layers, bool batch_first, float dropout, bool train, bool bidirectional, SymInt[] batch_sizes, Tensor? dropout_state) -> (Tensor, Tensor, Tensor, Tensor, Tensor) -@Namespace("at") public static native @ByVal TensorTensorTensorTensorTensorTuple _cudnn_rnn(@Const @ByRef Tensor input, @ByVal TensorArrayRef weight, @Cast("int64_t") long weight_stride0, @Const @ByRef TensorOptional weight_buf, @Const @ByRef Tensor hx, @Const @ByRef TensorOptional cx, @Cast("int64_t") long mode, @Cast("int64_t") long hidden_size, @Cast("int64_t") long proj_size, @Cast("int64_t") long num_layers, @Cast("bool") boolean batch_first, double dropout, @Cast("bool") boolean train, @Cast("bool") boolean bidirectional, @ByVal @Cast("c10::ArrayRef*") LongArrayRef batch_sizes, @Const @ByRef TensorOptional dropout_state); -@Namespace("at") public static native @ByVal TensorTensorTensorTensorTensorTuple _cudnn_rnn(@Const @ByRef Tensor input, @ByVal TensorArrayRef weight, @Cast("int64_t") long weight_stride0, @Const @ByRef TensorOptional weight_buf, @Const @ByRef Tensor hx, @Const @ByRef TensorOptional cx, @Cast("int64_t") long mode, @Cast("int64_t") long hidden_size, @Cast("int64_t") long proj_size, @Cast("int64_t") long num_layers, @Cast("bool") boolean batch_first, double dropout, @Cast("bool") boolean train, @Cast("bool") boolean bidirectional, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] batch_sizes, @Const @ByRef TensorOptional dropout_state); - - -// aten::_cudnn_rnn(Tensor input, Tensor[] weight, int weight_stride0, Tensor? weight_buf, Tensor hx, Tensor? cx, int mode, SymInt hidden_size, SymInt proj_size, int num_layers, bool batch_first, float dropout, bool train, bool bidirectional, SymInt[] batch_sizes, Tensor? dropout_state) -> (Tensor, Tensor, Tensor, Tensor, Tensor) -@Namespace("at") public static native @ByVal TensorTensorTensorTensorTensorTuple _cudnn_rnn_symint(@Const @ByRef Tensor input, @ByVal TensorArrayRef weight, @Cast("int64_t") long weight_stride0, @Const @ByRef TensorOptional weight_buf, @Const @ByRef Tensor hx, @Const @ByRef TensorOptional cx, @Cast("int64_t") long mode, @ByVal SymInt hidden_size, @ByVal SymInt proj_size, @Cast("int64_t") long num_layers, @Cast("bool") boolean batch_first, double dropout, @Cast("bool") boolean train, @Cast("bool") boolean bidirectional, @ByVal SymIntRef batch_sizes, @Const @ByRef TensorOptional dropout_state); - - -// aten::_cudnn_rnn.out(Tensor input, Tensor[] weight, int weight_stride0, Tensor? weight_buf, Tensor hx, Tensor? cx, int mode, SymInt hidden_size, SymInt proj_size, int num_layers, bool batch_first, float dropout, bool train, bool bidirectional, SymInt[] batch_sizes, Tensor? dropout_state, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2, Tensor(d!) out3, Tensor(e!) out4) -> (Tensor(a!), Tensor(b!), Tensor(c!), Tensor(d!), Tensor(e!)) -@Namespace("at") public static native @ByVal @Cast("std::tuple*") PointerPointer _cudnn_rnn_out(@ByRef Tensor out0, @ByRef Tensor out1, @ByRef Tensor out2, @ByRef Tensor out3, @ByRef Tensor out4, @Const @ByRef Tensor input, @ByVal TensorArrayRef weight, @Cast("int64_t") long weight_stride0, @Const @ByRef TensorOptional weight_buf, @Const @ByRef Tensor hx, @Const @ByRef TensorOptional cx, @Cast("int64_t") long mode, @Cast("int64_t") long hidden_size, @Cast("int64_t") long proj_size, @Cast("int64_t") long num_layers, @Cast("bool") boolean batch_first, double dropout, @Cast("bool") boolean train, @Cast("bool") boolean bidirectional, @ByVal @Cast("c10::ArrayRef*") LongArrayRef batch_sizes, @Const @ByRef TensorOptional dropout_state); -@Namespace("at") public static native @ByVal @Cast("std::tuple*") PointerPointer _cudnn_rnn_out(@ByRef Tensor out0, @ByRef Tensor out1, @ByRef Tensor out2, @ByRef Tensor out3, @ByRef Tensor out4, @Const @ByRef Tensor input, @ByVal TensorArrayRef weight, @Cast("int64_t") long weight_stride0, @Const @ByRef TensorOptional weight_buf, @Const @ByRef Tensor hx, @Const @ByRef TensorOptional cx, @Cast("int64_t") long mode, @Cast("int64_t") long hidden_size, @Cast("int64_t") long proj_size, @Cast("int64_t") long num_layers, @Cast("bool") boolean batch_first, double dropout, @Cast("bool") boolean train, @Cast("bool") boolean bidirectional, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] batch_sizes, @Const @ByRef TensorOptional dropout_state); - - -// aten::_cudnn_rnn.out(Tensor input, Tensor[] weight, int weight_stride0, Tensor? weight_buf, Tensor hx, Tensor? cx, int mode, SymInt hidden_size, SymInt proj_size, int num_layers, bool batch_first, float dropout, bool train, bool bidirectional, SymInt[] batch_sizes, Tensor? dropout_state, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2, Tensor(d!) out3, Tensor(e!) out4) -> (Tensor(a!), Tensor(b!), Tensor(c!), Tensor(d!), Tensor(e!)) -@Namespace("at") public static native @ByVal @Cast("std::tuple*") PointerPointer _cudnn_rnn_outf(@Const @ByRef Tensor input, @ByVal TensorArrayRef weight, @Cast("int64_t") long weight_stride0, @Const @ByRef TensorOptional weight_buf, @Const @ByRef Tensor hx, @Const @ByRef TensorOptional cx, @Cast("int64_t") long mode, @Cast("int64_t") long hidden_size, @Cast("int64_t") long proj_size, @Cast("int64_t") long num_layers, @Cast("bool") boolean batch_first, double dropout, @Cast("bool") boolean train, @Cast("bool") boolean bidirectional, @ByVal @Cast("c10::ArrayRef*") LongArrayRef batch_sizes, @Const @ByRef TensorOptional dropout_state, @ByRef Tensor out0, @ByRef Tensor out1, @ByRef Tensor out2, @ByRef Tensor out3, @ByRef Tensor out4); -@Namespace("at") public static native @ByVal @Cast("std::tuple*") PointerPointer _cudnn_rnn_outf(@Const @ByRef Tensor input, @ByVal TensorArrayRef weight, @Cast("int64_t") long weight_stride0, @Const @ByRef TensorOptional weight_buf, @Const @ByRef Tensor hx, @Const @ByRef TensorOptional cx, @Cast("int64_t") long mode, @Cast("int64_t") long hidden_size, @Cast("int64_t") long proj_size, @Cast("int64_t") long num_layers, @Cast("bool") boolean batch_first, double dropout, @Cast("bool") boolean train, @Cast("bool") boolean bidirectional, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] batch_sizes, @Const @ByRef TensorOptional dropout_state, @ByRef Tensor out0, @ByRef Tensor out1, @ByRef Tensor out2, @ByRef Tensor out3, @ByRef Tensor out4); - - -// aten::_cudnn_rnn.out(Tensor input, Tensor[] weight, int weight_stride0, Tensor? weight_buf, Tensor hx, Tensor? cx, int mode, SymInt hidden_size, SymInt proj_size, int num_layers, bool batch_first, float dropout, bool train, bool bidirectional, SymInt[] batch_sizes, Tensor? dropout_state, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2, Tensor(d!) out3, Tensor(e!) out4) -> (Tensor(a!), Tensor(b!), Tensor(c!), Tensor(d!), Tensor(e!)) -@Namespace("at") public static native @ByVal @Cast("std::tuple*") PointerPointer _cudnn_rnn_symint_out(@ByRef Tensor out0, @ByRef Tensor out1, @ByRef Tensor out2, @ByRef Tensor out3, @ByRef Tensor out4, @Const @ByRef Tensor input, @ByVal TensorArrayRef weight, @Cast("int64_t") long weight_stride0, @Const @ByRef TensorOptional weight_buf, @Const @ByRef Tensor hx, @Const @ByRef TensorOptional cx, @Cast("int64_t") long mode, @ByVal SymInt hidden_size, @ByVal SymInt proj_size, @Cast("int64_t") long num_layers, @Cast("bool") boolean batch_first, double dropout, @Cast("bool") boolean train, @Cast("bool") boolean bidirectional, @ByVal SymIntRef batch_sizes, @Const @ByRef TensorOptional dropout_state); - - -// aten::_cudnn_rnn.out(Tensor input, Tensor[] weight, int weight_stride0, Tensor? weight_buf, Tensor hx, Tensor? cx, int mode, SymInt hidden_size, SymInt proj_size, int num_layers, bool batch_first, float dropout, bool train, bool bidirectional, SymInt[] batch_sizes, Tensor? dropout_state, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2, Tensor(d!) out3, Tensor(e!) out4) -> (Tensor(a!), Tensor(b!), Tensor(c!), Tensor(d!), Tensor(e!)) -@Namespace("at") public static native @ByVal @Cast("std::tuple*") PointerPointer _cudnn_rnn_symint_outf(@Const @ByRef Tensor input, @ByVal TensorArrayRef weight, @Cast("int64_t") long weight_stride0, @Const @ByRef TensorOptional weight_buf, @Const @ByRef Tensor hx, @Const @ByRef TensorOptional cx, @Cast("int64_t") long mode, @ByVal SymInt hidden_size, @ByVal SymInt proj_size, @Cast("int64_t") long num_layers, @Cast("bool") boolean batch_first, double dropout, @Cast("bool") boolean train, @Cast("bool") boolean bidirectional, @ByVal SymIntRef batch_sizes, @Const @ByRef TensorOptional dropout_state, @ByRef Tensor out0, @ByRef Tensor out1, @ByRef Tensor out2, @ByRef Tensor out3, @ByRef Tensor out4); - - - - - -// Parsed from ATen/ops/_cudnn_rnn_backward.h - -// #pragma once - -// @generated by torchgen/gen.py from Function.h - -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include - - - -// #include - - -// aten::_cudnn_rnn_backward(Tensor input, Tensor[] weight, int weight_stride0, Tensor weight_buf, Tensor hx, Tensor? cx, Tensor output, Tensor? grad_output, Tensor? grad_hy, Tensor? grad_cy, int mode, SymInt hidden_size, SymInt proj_size, int num_layers, bool batch_first, float dropout, bool train, bool bidirectional, SymInt[] batch_sizes, Tensor? dropout_state, Tensor reserve, bool[4] output_mask) -> (Tensor, Tensor, Tensor, Tensor[]) -@Namespace("at") public static native @ByVal TensorTensorTensorTensorVectorTuple _cudnn_rnn_backward(@Const @ByRef Tensor input, @ByVal TensorArrayRef weight, @Cast("int64_t") long weight_stride0, @Const @ByRef Tensor weight_buf, @Const @ByRef Tensor hx, @Const @ByRef TensorOptional cx, @Const @ByRef Tensor output, @Const @ByRef TensorOptional grad_output, @Const @ByRef TensorOptional grad_hy, @Const @ByRef TensorOptional grad_cy, @Cast("int64_t") long mode, @Cast("int64_t") long hidden_size, @Cast("int64_t") long proj_size, @Cast("int64_t") long num_layers, @Cast("bool") boolean batch_first, double dropout, @Cast("bool") boolean train, @Cast("bool") boolean bidirectional, @ByVal @Cast("c10::ArrayRef*") LongArrayRef batch_sizes, @Const @ByRef TensorOptional dropout_state, @Const @ByRef Tensor reserve, @ByVal @Cast("std::array*") BoolPointer output_mask); -@Namespace("at") public static native @ByVal TensorTensorTensorTensorVectorTuple _cudnn_rnn_backward(@Const @ByRef Tensor input, @ByVal TensorArrayRef weight, @Cast("int64_t") long weight_stride0, @Const @ByRef Tensor weight_buf, @Const @ByRef Tensor hx, @Const @ByRef TensorOptional cx, @Const @ByRef Tensor output, @Const @ByRef TensorOptional grad_output, @Const @ByRef TensorOptional grad_hy, @Const @ByRef TensorOptional grad_cy, @Cast("int64_t") long mode, @Cast("int64_t") long hidden_size, @Cast("int64_t") long proj_size, @Cast("int64_t") long num_layers, @Cast("bool") boolean batch_first, double dropout, @Cast("bool") boolean train, @Cast("bool") boolean bidirectional, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] batch_sizes, @Const @ByRef TensorOptional dropout_state, @Const @ByRef Tensor reserve, @ByVal @Cast("std::array*") BoolPointer output_mask); - - -// aten::_cudnn_rnn_backward(Tensor input, Tensor[] weight, int weight_stride0, Tensor weight_buf, Tensor hx, Tensor? cx, Tensor output, Tensor? grad_output, Tensor? grad_hy, Tensor? grad_cy, int mode, SymInt hidden_size, SymInt proj_size, int num_layers, bool batch_first, float dropout, bool train, bool bidirectional, SymInt[] batch_sizes, Tensor? dropout_state, Tensor reserve, bool[4] output_mask) -> (Tensor, Tensor, Tensor, Tensor[]) -@Namespace("at") public static native @ByVal TensorTensorTensorTensorVectorTuple _cudnn_rnn_backward_symint(@Const @ByRef Tensor input, @ByVal TensorArrayRef weight, @Cast("int64_t") long weight_stride0, @Const @ByRef Tensor weight_buf, @Const @ByRef Tensor hx, @Const @ByRef TensorOptional cx, @Const @ByRef Tensor output, @Const @ByRef TensorOptional grad_output, @Const @ByRef TensorOptional grad_hy, @Const @ByRef TensorOptional grad_cy, @Cast("int64_t") long mode, @ByVal SymInt hidden_size, @ByVal SymInt proj_size, @Cast("int64_t") long num_layers, @Cast("bool") boolean batch_first, double dropout, @Cast("bool") boolean train, @Cast("bool") boolean bidirectional, @ByVal SymIntRef batch_sizes, @Const @ByRef TensorOptional dropout_state, @Const @ByRef Tensor reserve, @ByVal @Cast("std::array*") BoolPointer output_mask); - - -// aten::_cudnn_rnn_backward.out(Tensor input, Tensor[] weight, int weight_stride0, Tensor weight_buf, Tensor hx, Tensor? cx, Tensor output, Tensor? grad_output, Tensor? grad_hy, Tensor? grad_cy, int mode, SymInt hidden_size, SymInt proj_size, int num_layers, bool batch_first, float dropout, bool train, bool bidirectional, SymInt[] batch_sizes, Tensor? dropout_state, Tensor reserve, bool[4] output_mask, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2, Tensor(d!)[] out3) -> () -@Namespace("at") public static native void _cudnn_rnn_backward_out(@ByRef Tensor out0, @ByRef Tensor out1, @ByRef Tensor out2, @ByVal TensorArrayRef out3, @Const @ByRef Tensor input, @ByVal TensorArrayRef weight, @Cast("int64_t") long weight_stride0, @Const @ByRef Tensor weight_buf, @Const @ByRef Tensor hx, @Const @ByRef TensorOptional cx, @Const @ByRef Tensor output, @Const @ByRef TensorOptional grad_output, @Const @ByRef TensorOptional grad_hy, @Const @ByRef TensorOptional grad_cy, @Cast("int64_t") long mode, @Cast("int64_t") long hidden_size, @Cast("int64_t") long proj_size, @Cast("int64_t") long num_layers, @Cast("bool") boolean batch_first, double dropout, @Cast("bool") boolean train, @Cast("bool") boolean bidirectional, @ByVal @Cast("c10::ArrayRef*") LongArrayRef batch_sizes, @Const @ByRef TensorOptional dropout_state, @Const @ByRef Tensor reserve, @ByVal @Cast("std::array*") BoolPointer output_mask); -@Namespace("at") public static native void _cudnn_rnn_backward_out(@ByRef Tensor out0, @ByRef Tensor out1, @ByRef Tensor out2, @ByVal TensorArrayRef out3, @Const @ByRef Tensor input, @ByVal TensorArrayRef weight, @Cast("int64_t") long weight_stride0, @Const @ByRef Tensor weight_buf, @Const @ByRef Tensor hx, @Const @ByRef TensorOptional cx, @Const @ByRef Tensor output, @Const @ByRef TensorOptional grad_output, @Const @ByRef TensorOptional grad_hy, @Const @ByRef TensorOptional grad_cy, @Cast("int64_t") long mode, @Cast("int64_t") long hidden_size, @Cast("int64_t") long proj_size, @Cast("int64_t") long num_layers, @Cast("bool") boolean batch_first, double dropout, @Cast("bool") boolean train, @Cast("bool") boolean bidirectional, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] batch_sizes, @Const @ByRef TensorOptional dropout_state, @Const @ByRef Tensor reserve, @ByVal @Cast("std::array*") BoolPointer output_mask); - - -// aten::_cudnn_rnn_backward.out(Tensor input, Tensor[] weight, int weight_stride0, Tensor weight_buf, Tensor hx, Tensor? cx, Tensor output, Tensor? grad_output, Tensor? grad_hy, Tensor? grad_cy, int mode, SymInt hidden_size, SymInt proj_size, int num_layers, bool batch_first, float dropout, bool train, bool bidirectional, SymInt[] batch_sizes, Tensor? dropout_state, Tensor reserve, bool[4] output_mask, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2, Tensor(d!)[] out3) -> () -@Namespace("at") public static native void _cudnn_rnn_backward_outf(@Const @ByRef Tensor input, @ByVal TensorArrayRef weight, @Cast("int64_t") long weight_stride0, @Const @ByRef Tensor weight_buf, @Const @ByRef Tensor hx, @Const @ByRef TensorOptional cx, @Const @ByRef Tensor output, @Const @ByRef TensorOptional grad_output, @Const @ByRef TensorOptional grad_hy, @Const @ByRef TensorOptional grad_cy, @Cast("int64_t") long mode, @Cast("int64_t") long hidden_size, @Cast("int64_t") long proj_size, @Cast("int64_t") long num_layers, @Cast("bool") boolean batch_first, double dropout, @Cast("bool") boolean train, @Cast("bool") boolean bidirectional, @ByVal @Cast("c10::ArrayRef*") LongArrayRef batch_sizes, @Const @ByRef TensorOptional dropout_state, @Const @ByRef Tensor reserve, @ByVal @Cast("std::array*") BoolPointer output_mask, @ByRef Tensor out0, @ByRef Tensor out1, @ByRef Tensor out2, @ByVal TensorArrayRef out3); -@Namespace("at") public static native void _cudnn_rnn_backward_outf(@Const @ByRef Tensor input, @ByVal TensorArrayRef weight, @Cast("int64_t") long weight_stride0, @Const @ByRef Tensor weight_buf, @Const @ByRef Tensor hx, @Const @ByRef TensorOptional cx, @Const @ByRef Tensor output, @Const @ByRef TensorOptional grad_output, @Const @ByRef TensorOptional grad_hy, @Const @ByRef TensorOptional grad_cy, @Cast("int64_t") long mode, @Cast("int64_t") long hidden_size, @Cast("int64_t") long proj_size, @Cast("int64_t") long num_layers, @Cast("bool") boolean batch_first, double dropout, @Cast("bool") boolean train, @Cast("bool") boolean bidirectional, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] batch_sizes, @Const @ByRef TensorOptional dropout_state, @Const @ByRef Tensor reserve, @ByVal @Cast("std::array*") BoolPointer output_mask, @ByRef Tensor out0, @ByRef Tensor out1, @ByRef Tensor out2, @ByVal TensorArrayRef out3); - - -// aten::_cudnn_rnn_backward.out(Tensor input, Tensor[] weight, int weight_stride0, Tensor weight_buf, Tensor hx, Tensor? cx, Tensor output, Tensor? grad_output, Tensor? grad_hy, Tensor? grad_cy, int mode, SymInt hidden_size, SymInt proj_size, int num_layers, bool batch_first, float dropout, bool train, bool bidirectional, SymInt[] batch_sizes, Tensor? dropout_state, Tensor reserve, bool[4] output_mask, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2, Tensor(d!)[] out3) -> () -@Namespace("at") public static native void _cudnn_rnn_backward_symint_out(@ByRef Tensor out0, @ByRef Tensor out1, @ByRef Tensor out2, @ByVal TensorArrayRef out3, @Const @ByRef Tensor input, @ByVal TensorArrayRef weight, @Cast("int64_t") long weight_stride0, @Const @ByRef Tensor weight_buf, @Const @ByRef Tensor hx, @Const @ByRef TensorOptional cx, @Const @ByRef Tensor output, @Const @ByRef TensorOptional grad_output, @Const @ByRef TensorOptional grad_hy, @Const @ByRef TensorOptional grad_cy, @Cast("int64_t") long mode, @ByVal SymInt hidden_size, @ByVal SymInt proj_size, @Cast("int64_t") long num_layers, @Cast("bool") boolean batch_first, double dropout, @Cast("bool") boolean train, @Cast("bool") boolean bidirectional, @ByVal SymIntRef batch_sizes, @Const @ByRef TensorOptional dropout_state, @Const @ByRef Tensor reserve, @ByVal @Cast("std::array*") BoolPointer output_mask); - - -// aten::_cudnn_rnn_backward.out(Tensor input, Tensor[] weight, int weight_stride0, Tensor weight_buf, Tensor hx, Tensor? cx, Tensor output, Tensor? grad_output, Tensor? grad_hy, Tensor? grad_cy, int mode, SymInt hidden_size, SymInt proj_size, int num_layers, bool batch_first, float dropout, bool train, bool bidirectional, SymInt[] batch_sizes, Tensor? dropout_state, Tensor reserve, bool[4] output_mask, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2, Tensor(d!)[] out3) -> () -@Namespace("at") public static native void _cudnn_rnn_backward_symint_outf(@Const @ByRef Tensor input, @ByVal TensorArrayRef weight, @Cast("int64_t") long weight_stride0, @Const @ByRef Tensor weight_buf, @Const @ByRef Tensor hx, @Const @ByRef TensorOptional cx, @Const @ByRef Tensor output, @Const @ByRef TensorOptional grad_output, @Const @ByRef TensorOptional grad_hy, @Const @ByRef TensorOptional grad_cy, @Cast("int64_t") long mode, @ByVal SymInt hidden_size, @ByVal SymInt proj_size, @Cast("int64_t") long num_layers, @Cast("bool") boolean batch_first, double dropout, @Cast("bool") boolean train, @Cast("bool") boolean bidirectional, @ByVal SymIntRef batch_sizes, @Const @ByRef TensorOptional dropout_state, @Const @ByRef Tensor reserve, @ByVal @Cast("std::array*") BoolPointer output_mask, @ByRef Tensor out0, @ByRef Tensor out1, @ByRef Tensor out2, @ByVal TensorArrayRef out3); - - - - - -// Parsed from ATen/ops/_cudnn_rnn_flatten_weight.h - -// #pragma once - -// @generated by torchgen/gen.py from Function.h - -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include - - - -// #include - - -// aten::_cudnn_rnn_flatten_weight(Tensor[] weight_arr, int weight_stride0, SymInt input_size, int mode, SymInt hidden_size, SymInt proj_size, int num_layers, bool batch_first, bool bidirectional) -> Tensor -@Namespace("at") public static native @ByVal Tensor _cudnn_rnn_flatten_weight(@ByVal TensorArrayRef weight_arr, @Cast("int64_t") long weight_stride0, @Cast("int64_t") long input_size, @Cast("int64_t") long mode, @Cast("int64_t") long hidden_size, @Cast("int64_t") long proj_size, @Cast("int64_t") long num_layers, @Cast("bool") boolean batch_first, @Cast("bool") boolean bidirectional); - - -// aten::_cudnn_rnn_flatten_weight(Tensor[] weight_arr, int weight_stride0, SymInt input_size, int mode, SymInt hidden_size, SymInt proj_size, int num_layers, bool batch_first, bool bidirectional) -> Tensor -@Namespace("at") public static native @ByVal Tensor _cudnn_rnn_flatten_weight_symint(@ByVal TensorArrayRef weight_arr, @Cast("int64_t") long weight_stride0, @ByVal SymInt input_size, @Cast("int64_t") long mode, @ByVal SymInt hidden_size, @ByVal SymInt proj_size, @Cast("int64_t") long num_layers, @Cast("bool") boolean batch_first, @Cast("bool") boolean bidirectional); - - -// aten::_cudnn_rnn_flatten_weight.out(Tensor[] weight_arr, int weight_stride0, SymInt input_size, int mode, SymInt hidden_size, SymInt proj_size, int num_layers, bool batch_first, bool bidirectional, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor _cudnn_rnn_flatten_weight_out(@ByRef Tensor out, @ByVal TensorArrayRef weight_arr, @Cast("int64_t") long weight_stride0, @Cast("int64_t") long input_size, @Cast("int64_t") long mode, @Cast("int64_t") long hidden_size, @Cast("int64_t") long proj_size, @Cast("int64_t") long num_layers, @Cast("bool") boolean batch_first, @Cast("bool") boolean bidirectional); - - -// aten::_cudnn_rnn_flatten_weight.out(Tensor[] weight_arr, int weight_stride0, SymInt input_size, int mode, SymInt hidden_size, SymInt proj_size, int num_layers, bool batch_first, bool bidirectional, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor _cudnn_rnn_flatten_weight_outf(@ByVal TensorArrayRef weight_arr, @Cast("int64_t") long weight_stride0, @Cast("int64_t") long input_size, @Cast("int64_t") long mode, @Cast("int64_t") long hidden_size, @Cast("int64_t") long proj_size, @Cast("int64_t") long num_layers, @Cast("bool") boolean batch_first, @Cast("bool") boolean bidirectional, @ByRef Tensor out); - - -// aten::_cudnn_rnn_flatten_weight.out(Tensor[] weight_arr, int weight_stride0, SymInt input_size, int mode, SymInt hidden_size, SymInt proj_size, int num_layers, bool batch_first, bool bidirectional, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor _cudnn_rnn_flatten_weight_symint_out(@ByRef Tensor out, @ByVal TensorArrayRef weight_arr, @Cast("int64_t") long weight_stride0, @ByVal SymInt input_size, @Cast("int64_t") long mode, @ByVal SymInt hidden_size, @ByVal SymInt proj_size, @Cast("int64_t") long num_layers, @Cast("bool") boolean batch_first, @Cast("bool") boolean bidirectional); - - -// aten::_cudnn_rnn_flatten_weight.out(Tensor[] weight_arr, int weight_stride0, SymInt input_size, int mode, SymInt hidden_size, SymInt proj_size, int num_layers, bool batch_first, bool bidirectional, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor _cudnn_rnn_flatten_weight_symint_outf(@ByVal TensorArrayRef weight_arr, @Cast("int64_t") long weight_stride0, @ByVal SymInt input_size, @Cast("int64_t") long mode, @ByVal SymInt hidden_size, @ByVal SymInt proj_size, @Cast("int64_t") long num_layers, @Cast("bool") boolean batch_first, @Cast("bool") boolean bidirectional, @ByRef Tensor out); - - - - - -// Parsed from ATen/ops/_cufft_clear_plan_cache.h - -// #pragma once - -// @generated by torchgen/gen.py from Function.h - -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include - - - -// #include - - -// aten::_cufft_clear_plan_cache(int device_index) -> () -@Namespace("at") public static native void _cufft_clear_plan_cache(@Cast("int64_t") long device_index); - - - - -// Parsed from ATen/ops/_cufft_get_plan_cache_max_size.h - -// #pragma once - -// @generated by torchgen/gen.py from Function.h - -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include - - - -// #include - - -// aten::_cufft_get_plan_cache_max_size(int device_index) -> int -@Namespace("at") public static native @Cast("int64_t") long _cufft_get_plan_cache_max_size(@Cast("int64_t") long device_index); - - - - -// Parsed from ATen/ops/_cufft_get_plan_cache_size.h - -// #pragma once - -// @generated by torchgen/gen.py from Function.h - -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include - - - -// #include - - -// aten::_cufft_get_plan_cache_size(int device_index) -> int -@Namespace("at") public static native @Cast("int64_t") long _cufft_get_plan_cache_size(@Cast("int64_t") long device_index); - - - - -// Parsed from ATen/ops/_cufft_set_plan_cache_max_size.h - -// #pragma once - -// @generated by torchgen/gen.py from Function.h - -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include - - - -// #include - - -// aten::_cufft_set_plan_cache_max_size(int device_index, int max_size) -> () -@Namespace("at") public static native void _cufft_set_plan_cache_max_size(@Cast("int64_t") long device_index, @Cast("int64_t") long max_size); - - - - -// Parsed from ATen/ops/_cummax_helper.h - -// #pragma once - -// @generated by torchgen/gen.py from Function.h - -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include - - - -// #include - - -// aten::_cummax_helper(Tensor self, Tensor(a!) values, Tensor(b!) indices, int dim) -> () -@Namespace("at") public static native void _cummax_helper(@Const @ByRef Tensor self, @ByRef Tensor values, @ByRef Tensor indices, @Cast("int64_t") long dim); - - - - -// Parsed from ATen/ops/_cummin_helper.h - -// #pragma once - -// @generated by torchgen/gen.py from Function.h - -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include - - - -// #include - - -// aten::_cummin_helper(Tensor self, Tensor(a!) values, Tensor(b!) indices, int dim) -> () -@Namespace("at") public static native void _cummin_helper(@Const @ByRef Tensor self, @ByRef Tensor values, @ByRef Tensor indices, @Cast("int64_t") long dim); - - - - -// Parsed from ATen/ops/_debug_has_internal_overlap.h - -// #pragma once - -// @generated by torchgen/gen.py from Function.h - -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include - - - -// #include - - -// aten::_debug_has_internal_overlap(Tensor self) -> int -@Namespace("at") public static native @Cast("int64_t") long _debug_has_internal_overlap(@Const @ByRef Tensor self); - - - - -// Parsed from ATen/ops/_dimI.h - -// #pragma once - -// @generated by torchgen/gen.py from Function.h - -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include - - - -// #include - - - - - - -// Parsed from ATen/ops/_dimV.h - -// #pragma once - -// @generated by torchgen/gen.py from Function.h - -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include - - - -// #include - - - - - - -// Parsed from ATen/ops/_dim_arange.h - -// #pragma once - -// @generated by torchgen/gen.py from Function.h - -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include - - - -// #include - - -// aten::_dim_arange(Tensor like, int dim) -> Tensor -@Namespace("at") public static native @ByVal Tensor _dim_arange(@Const @ByRef Tensor like, @Cast("int64_t") long dim); - - - - -// Parsed from ATen/ops/_dirichlet_grad.h - -// #pragma once - -// @generated by torchgen/gen.py from Function.h - -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include - - - -// #include - - -// aten::_dirichlet_grad(Tensor x, Tensor alpha, Tensor total) -> Tensor -@Namespace("at") public static native @ByVal Tensor _dirichlet_grad(@Const @ByRef Tensor x, @Const @ByRef Tensor alpha, @Const @ByRef Tensor total); - -// aten::_dirichlet_grad.out(Tensor x, Tensor alpha, Tensor total, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor _dirichlet_grad_out(@ByRef Tensor out, @Const @ByRef Tensor x, @Const @ByRef Tensor alpha, @Const @ByRef Tensor total); -// aten::_dirichlet_grad.out(Tensor x, Tensor alpha, Tensor total, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor _dirichlet_grad_outf(@Const @ByRef Tensor x, @Const @ByRef Tensor alpha, @Const @ByRef Tensor total, @ByRef Tensor out); - - - - -// Parsed from ATen/ops/_efficient_attention_backward.h - -// #pragma once - -// @generated by torchgen/gen.py from Function.h - -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include - - - -// #include - - -// aten::_efficient_attention_backward(Tensor grad_out_, Tensor query, Tensor key, Tensor value, Tensor out, Tensor logsumexp, bool is_causal=False, bool chunk_grad_outputs=False) -> (Tensor, Tensor, Tensor) -@Namespace("at") public static native @ByVal TensorTensorTensorTuple _efficient_attention_backward(@Const @ByRef Tensor grad_out_, @Const @ByRef Tensor query, @Const @ByRef Tensor key, @Const @ByRef Tensor value, @Const @ByRef Tensor out, @Const @ByRef Tensor logsumexp, @Cast("bool") boolean is_causal/*=false*/, @Cast("bool") boolean chunk_grad_outputs/*=false*/); -@Namespace("at") public static native @ByVal TensorTensorTensorTuple _efficient_attention_backward(@Const @ByRef Tensor grad_out_, @Const @ByRef Tensor query, @Const @ByRef Tensor key, @Const @ByRef Tensor value, @Const @ByRef Tensor out, @Const @ByRef Tensor logsumexp); - - - - -// Parsed from ATen/ops/_efficient_attention_forward.h - -// #pragma once - -// @generated by torchgen/gen.py from Function.h - -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include - - - -// #include - - -// aten::_efficient_attention_forward(Tensor query, Tensor key, Tensor value, Tensor? cu_seqlens_q, Tensor? cu_seqlens_k, int? max_seqlen_q, bool compute_log_sumexp=False, bool causal=False) -> (Tensor, Tensor) -@Namespace("at") public static native @ByVal TensorTensorTuple _efficient_attention_forward(@Const @ByRef Tensor query, @Const @ByRef Tensor key, @Const @ByRef Tensor value, @Const @ByRef TensorOptional cu_seqlens_q, @Const @ByRef TensorOptional cu_seqlens_k, @ByVal LongOptional max_seqlen_q, @Cast("bool") boolean compute_log_sumexp/*=false*/, @Cast("bool") boolean causal/*=false*/); -@Namespace("at") public static native @ByVal TensorTensorTuple _efficient_attention_forward(@Const @ByRef Tensor query, @Const @ByRef Tensor key, @Const @ByRef Tensor value, @Const @ByRef TensorOptional cu_seqlens_q, @Const @ByRef TensorOptional cu_seqlens_k, @ByVal LongOptional max_seqlen_q); - - - - -// Parsed from ATen/ops/_efficientzerotensor.h - -// #pragma once - -// @generated by torchgen/gen.py from Function.h - -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include - - - -// #include - - -// aten::_efficientzerotensor(int[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor -@Namespace("at") public static native @ByVal Tensor _efficientzerotensor(@ByVal @Cast("c10::ArrayRef*") LongArrayRef size, @ByVal(nullValue = "at::TensorOptions{}") TensorOptions options); -@Namespace("at") public static native @ByVal Tensor _efficientzerotensor(@ByVal @Cast("c10::ArrayRef*") LongArrayRef size); -@Namespace("at") public static native @ByVal Tensor _efficientzerotensor(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @ByVal(nullValue = "at::TensorOptions{}") TensorOptions options); -@Namespace("at") public static native @ByVal Tensor _efficientzerotensor(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... size); -// aten::_efficientzerotensor(int[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor -@Namespace("at") public static native @ByVal Tensor _efficientzerotensor(@ByVal @Cast("c10::ArrayRef*") LongArrayRef size, @ByVal ScalarTypeOptional dtype, @ByVal LayoutOptional layout, @ByVal DeviceOptional device, @ByVal BoolOptional pin_memory); -@Namespace("at") public static native @ByVal Tensor _efficientzerotensor(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @ByVal ScalarTypeOptional dtype, @ByVal LayoutOptional layout, @ByVal DeviceOptional device, @ByVal BoolOptional pin_memory); - -// aten::_efficientzerotensor.out(int[] size, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor _efficientzerotensor_out(@ByRef Tensor out, @ByVal @Cast("c10::ArrayRef*") LongArrayRef size); -@Namespace("at") public static native @ByRef Tensor _efficientzerotensor_out(@ByRef Tensor out, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... size); -// aten::_efficientzerotensor.out(int[] size, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor _efficientzerotensor_outf(@ByVal @Cast("c10::ArrayRef*") LongArrayRef size, @ByRef Tensor out); -@Namespace("at") public static native @ByRef Tensor _efficientzerotensor_outf(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @ByRef Tensor out); - - - - -// Parsed from ATen/ops/_embedding_bag.h - -// #pragma once - -// @generated by torchgen/gen.py from Function.h - -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include - - - -// #include - - -// aten::_embedding_bag(Tensor weight, Tensor indices, Tensor offsets, bool scale_grad_by_freq=False, int mode=0, bool sparse=False, Tensor? per_sample_weights=None, bool include_last_offset=False, int padding_idx=-1) -> (Tensor, Tensor, Tensor, Tensor) -@Namespace("at") public static native @ByVal TensorTensorTensorTensorTuple _embedding_bag(@Const @ByRef Tensor weight, @Const @ByRef Tensor indices, @Const @ByRef Tensor offsets, @Cast("bool") boolean scale_grad_by_freq/*=false*/, @Cast("int64_t") long mode/*=0*/, @Cast("bool") boolean sparse/*=false*/, @Const @ByRef(nullValue = "c10::optional{}") TensorOptional per_sample_weights, @Cast("bool") boolean include_last_offset/*=false*/, @Cast("int64_t") long padding_idx/*=-1*/); -@Namespace("at") public static native @ByVal TensorTensorTensorTensorTuple _embedding_bag(@Const @ByRef Tensor weight, @Const @ByRef Tensor indices, @Const @ByRef Tensor offsets); - -// aten::_embedding_bag.out(Tensor weight, Tensor indices, Tensor offsets, bool scale_grad_by_freq=False, int mode=0, bool sparse=False, Tensor? per_sample_weights=None, bool include_last_offset=False, int padding_idx=-1, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2, Tensor(d!) out3) -> (Tensor(a!), Tensor(b!), Tensor(c!), Tensor(d!)) -@Namespace("at") public static native @ByVal @Cast("std::tuple*") PointerPointer _embedding_bag_out(@ByRef Tensor out0, @ByRef Tensor out1, @ByRef Tensor out2, @ByRef Tensor out3, @Const @ByRef Tensor weight, @Const @ByRef Tensor indices, @Const @ByRef Tensor offsets, @Cast("bool") boolean scale_grad_by_freq/*=false*/, @Cast("int64_t") long mode/*=0*/, @Cast("bool") boolean sparse/*=false*/, @Const @ByRef(nullValue = "c10::optional{}") TensorOptional per_sample_weights, @Cast("bool") boolean include_last_offset/*=false*/, @Cast("int64_t") long padding_idx/*=-1*/); -@Namespace("at") public static native @ByVal @Cast("std::tuple*") PointerPointer _embedding_bag_out(@ByRef Tensor out0, @ByRef Tensor out1, @ByRef Tensor out2, @ByRef Tensor out3, @Const @ByRef Tensor weight, @Const @ByRef Tensor indices, @Const @ByRef Tensor offsets); -// aten::_embedding_bag.out(Tensor weight, Tensor indices, Tensor offsets, bool scale_grad_by_freq=False, int mode=0, bool sparse=False, Tensor? per_sample_weights=None, bool include_last_offset=False, int padding_idx=-1, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2, Tensor(d!) out3) -> (Tensor(a!), Tensor(b!), Tensor(c!), Tensor(d!)) -@Namespace("at") public static native @ByVal @Cast("std::tuple*") PointerPointer _embedding_bag_outf(@Const @ByRef Tensor weight, @Const @ByRef Tensor indices, @Const @ByRef Tensor offsets, @Cast("bool") boolean scale_grad_by_freq, @Cast("int64_t") long mode, @Cast("bool") boolean sparse, @Const @ByRef TensorOptional per_sample_weights, @Cast("bool") boolean include_last_offset, @Cast("int64_t") long padding_idx, @ByRef Tensor out0, @ByRef Tensor out1, @ByRef Tensor out2, @ByRef Tensor out3); - - - - -// Parsed from ATen/ops/_embedding_bag_backward.h - -// #pragma once - -// @generated by torchgen/gen.py from Function.h - -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include - - - -// #include - - -// aten::_embedding_bag_backward(Tensor grad, Tensor indices, Tensor offsets, Tensor offset2bag, Tensor bag_size, Tensor maximum_indices, SymInt num_weights, bool scale_grad_by_freq, int mode, bool sparse, Tensor? per_sample_weights, int padding_idx=-1) -> Tensor -@Namespace("at") public static native @ByVal Tensor _embedding_bag_backward(@Const @ByRef Tensor grad, @Const @ByRef Tensor indices, @Const @ByRef Tensor offsets, @Const @ByRef Tensor offset2bag, @Const @ByRef Tensor bag_size, @Const @ByRef Tensor maximum_indices, @Cast("int64_t") long num_weights, @Cast("bool") boolean scale_grad_by_freq, @Cast("int64_t") long mode, @Cast("bool") boolean sparse, @Const @ByRef TensorOptional per_sample_weights, @Cast("int64_t") long padding_idx/*=-1*/); -@Namespace("at") public static native @ByVal Tensor _embedding_bag_backward(@Const @ByRef Tensor grad, @Const @ByRef Tensor indices, @Const @ByRef Tensor offsets, @Const @ByRef Tensor offset2bag, @Const @ByRef Tensor bag_size, @Const @ByRef Tensor maximum_indices, @Cast("int64_t") long num_weights, @Cast("bool") boolean scale_grad_by_freq, @Cast("int64_t") long mode, @Cast("bool") boolean sparse, @Const @ByRef TensorOptional per_sample_weights); - - -// aten::_embedding_bag_backward(Tensor grad, Tensor indices, Tensor offsets, Tensor offset2bag, Tensor bag_size, Tensor maximum_indices, SymInt num_weights, bool scale_grad_by_freq, int mode, bool sparse, Tensor? per_sample_weights, int padding_idx=-1) -> Tensor -@Namespace("at") public static native @ByVal Tensor _embedding_bag_backward_symint(@Const @ByRef Tensor grad, @Const @ByRef Tensor indices, @Const @ByRef Tensor offsets, @Const @ByRef Tensor offset2bag, @Const @ByRef Tensor bag_size, @Const @ByRef Tensor maximum_indices, @ByVal SymInt num_weights, @Cast("bool") boolean scale_grad_by_freq, @Cast("int64_t") long mode, @Cast("bool") boolean sparse, @Const @ByRef TensorOptional per_sample_weights, @Cast("int64_t") long padding_idx/*=-1*/); -@Namespace("at") public static native @ByVal Tensor _embedding_bag_backward_symint(@Const @ByRef Tensor grad, @Const @ByRef Tensor indices, @Const @ByRef Tensor offsets, @Const @ByRef Tensor offset2bag, @Const @ByRef Tensor bag_size, @Const @ByRef Tensor maximum_indices, @ByVal SymInt num_weights, @Cast("bool") boolean scale_grad_by_freq, @Cast("int64_t") long mode, @Cast("bool") boolean sparse, @Const @ByRef TensorOptional per_sample_weights); - - - - - -// Parsed from ATen/ops/_embedding_bag_dense_backward.h - -// #pragma once - -// @generated by torchgen/gen.py from Function.h - -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include - - - -// #include - - -// aten::_embedding_bag_dense_backward(Tensor grad, Tensor indices, Tensor offset2bag, Tensor bag_size, Tensor maximum_indices, SymInt num_weights, bool scale_grad_by_freq, int mode, Tensor? per_sample_weights, int padding_idx=-1) -> Tensor -@Namespace("at") public static native @ByVal Tensor _embedding_bag_dense_backward(@Const @ByRef Tensor grad, @Const @ByRef Tensor indices, @Const @ByRef Tensor offset2bag, @Const @ByRef Tensor bag_size, @Const @ByRef Tensor maximum_indices, @Cast("int64_t") long num_weights, @Cast("bool") boolean scale_grad_by_freq, @Cast("int64_t") long mode, @Const @ByRef TensorOptional per_sample_weights, @Cast("int64_t") long padding_idx/*=-1*/); -@Namespace("at") public static native @ByVal Tensor _embedding_bag_dense_backward(@Const @ByRef Tensor grad, @Const @ByRef Tensor indices, @Const @ByRef Tensor offset2bag, @Const @ByRef Tensor bag_size, @Const @ByRef Tensor maximum_indices, @Cast("int64_t") long num_weights, @Cast("bool") boolean scale_grad_by_freq, @Cast("int64_t") long mode, @Const @ByRef TensorOptional per_sample_weights); - - -// aten::_embedding_bag_dense_backward(Tensor grad, Tensor indices, Tensor offset2bag, Tensor bag_size, Tensor maximum_indices, SymInt num_weights, bool scale_grad_by_freq, int mode, Tensor? per_sample_weights, int padding_idx=-1) -> Tensor -@Namespace("at") public static native @ByVal Tensor _embedding_bag_dense_backward_symint(@Const @ByRef Tensor grad, @Const @ByRef Tensor indices, @Const @ByRef Tensor offset2bag, @Const @ByRef Tensor bag_size, @Const @ByRef Tensor maximum_indices, @ByVal SymInt num_weights, @Cast("bool") boolean scale_grad_by_freq, @Cast("int64_t") long mode, @Const @ByRef TensorOptional per_sample_weights, @Cast("int64_t") long padding_idx/*=-1*/); -@Namespace("at") public static native @ByVal Tensor _embedding_bag_dense_backward_symint(@Const @ByRef Tensor grad, @Const @ByRef Tensor indices, @Const @ByRef Tensor offset2bag, @Const @ByRef Tensor bag_size, @Const @ByRef Tensor maximum_indices, @ByVal SymInt num_weights, @Cast("bool") boolean scale_grad_by_freq, @Cast("int64_t") long mode, @Const @ByRef TensorOptional per_sample_weights); - - -// aten::_embedding_bag_dense_backward.out(Tensor grad, Tensor indices, Tensor offset2bag, Tensor bag_size, Tensor maximum_indices, SymInt num_weights, bool scale_grad_by_freq, int mode, Tensor? per_sample_weights, int padding_idx=-1, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor _embedding_bag_dense_backward_out(@ByRef Tensor out, @Const @ByRef Tensor grad, @Const @ByRef Tensor indices, @Const @ByRef Tensor offset2bag, @Const @ByRef Tensor bag_size, @Const @ByRef Tensor maximum_indices, @Cast("int64_t") long num_weights, @Cast("bool") boolean scale_grad_by_freq, @Cast("int64_t") long mode, @Const @ByRef TensorOptional per_sample_weights, @Cast("int64_t") long padding_idx/*=-1*/); -@Namespace("at") public static native @ByRef Tensor _embedding_bag_dense_backward_out(@ByRef Tensor out, @Const @ByRef Tensor grad, @Const @ByRef Tensor indices, @Const @ByRef Tensor offset2bag, @Const @ByRef Tensor bag_size, @Const @ByRef Tensor maximum_indices, @Cast("int64_t") long num_weights, @Cast("bool") boolean scale_grad_by_freq, @Cast("int64_t") long mode, @Const @ByRef TensorOptional per_sample_weights); - - -// aten::_embedding_bag_dense_backward.out(Tensor grad, Tensor indices, Tensor offset2bag, Tensor bag_size, Tensor maximum_indices, SymInt num_weights, bool scale_grad_by_freq, int mode, Tensor? per_sample_weights, int padding_idx=-1, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor _embedding_bag_dense_backward_outf(@Const @ByRef Tensor grad, @Const @ByRef Tensor indices, @Const @ByRef Tensor offset2bag, @Const @ByRef Tensor bag_size, @Const @ByRef Tensor maximum_indices, @Cast("int64_t") long num_weights, @Cast("bool") boolean scale_grad_by_freq, @Cast("int64_t") long mode, @Const @ByRef TensorOptional per_sample_weights, @Cast("int64_t") long padding_idx, @ByRef Tensor out); - - -// aten::_embedding_bag_dense_backward.out(Tensor grad, Tensor indices, Tensor offset2bag, Tensor bag_size, Tensor maximum_indices, SymInt num_weights, bool scale_grad_by_freq, int mode, Tensor? per_sample_weights, int padding_idx=-1, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor _embedding_bag_dense_backward_symint_out(@ByRef Tensor out, @Const @ByRef Tensor grad, @Const @ByRef Tensor indices, @Const @ByRef Tensor offset2bag, @Const @ByRef Tensor bag_size, @Const @ByRef Tensor maximum_indices, @ByVal SymInt num_weights, @Cast("bool") boolean scale_grad_by_freq, @Cast("int64_t") long mode, @Const @ByRef TensorOptional per_sample_weights, @Cast("int64_t") long padding_idx/*=-1*/); -@Namespace("at") public static native @ByRef Tensor _embedding_bag_dense_backward_symint_out(@ByRef Tensor out, @Const @ByRef Tensor grad, @Const @ByRef Tensor indices, @Const @ByRef Tensor offset2bag, @Const @ByRef Tensor bag_size, @Const @ByRef Tensor maximum_indices, @ByVal SymInt num_weights, @Cast("bool") boolean scale_grad_by_freq, @Cast("int64_t") long mode, @Const @ByRef TensorOptional per_sample_weights); - - -// aten::_embedding_bag_dense_backward.out(Tensor grad, Tensor indices, Tensor offset2bag, Tensor bag_size, Tensor maximum_indices, SymInt num_weights, bool scale_grad_by_freq, int mode, Tensor? per_sample_weights, int padding_idx=-1, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor _embedding_bag_dense_backward_symint_outf(@Const @ByRef Tensor grad, @Const @ByRef Tensor indices, @Const @ByRef Tensor offset2bag, @Const @ByRef Tensor bag_size, @Const @ByRef Tensor maximum_indices, @ByVal SymInt num_weights, @Cast("bool") boolean scale_grad_by_freq, @Cast("int64_t") long mode, @Const @ByRef TensorOptional per_sample_weights, @Cast("int64_t") long padding_idx, @ByRef Tensor out); - - - - - -// Parsed from ATen/ops/_embedding_bag_forward_only.h - -// #pragma once - -// @generated by torchgen/gen.py from Function.h - -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include - - - -// #include - - -// aten::_embedding_bag_forward_only(Tensor weight, Tensor indices, Tensor offsets, bool scale_grad_by_freq=False, int mode=0, bool sparse=False, Tensor? per_sample_weights=None, bool include_last_offset=False, int padding_idx=-1) -> (Tensor, Tensor, Tensor, Tensor) -@Namespace("at") public static native @ByVal TensorTensorTensorTensorTuple _embedding_bag_forward_only(@Const @ByRef Tensor weight, @Const @ByRef Tensor indices, @Const @ByRef Tensor offsets, @Cast("bool") boolean scale_grad_by_freq/*=false*/, @Cast("int64_t") long mode/*=0*/, @Cast("bool") boolean sparse/*=false*/, @Const @ByRef(nullValue = "c10::optional{}") TensorOptional per_sample_weights, @Cast("bool") boolean include_last_offset/*=false*/, @Cast("int64_t") long padding_idx/*=-1*/); -@Namespace("at") public static native @ByVal TensorTensorTensorTensorTuple _embedding_bag_forward_only(@Const @ByRef Tensor weight, @Const @ByRef Tensor indices, @Const @ByRef Tensor offsets); - -// aten::_embedding_bag_forward_only.out(Tensor weight, Tensor indices, Tensor offsets, bool scale_grad_by_freq=False, int mode=0, bool sparse=False, Tensor? per_sample_weights=None, bool include_last_offset=False, int padding_idx=-1, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2, Tensor(d!) out3) -> (Tensor(a!), Tensor(b!), Tensor(c!), Tensor(d!)) -@Namespace("at") public static native @ByVal @Cast("std::tuple*") PointerPointer _embedding_bag_forward_only_out(@ByRef Tensor out0, @ByRef Tensor out1, @ByRef Tensor out2, @ByRef Tensor out3, @Const @ByRef Tensor weight, @Const @ByRef Tensor indices, @Const @ByRef Tensor offsets, @Cast("bool") boolean scale_grad_by_freq/*=false*/, @Cast("int64_t") long mode/*=0*/, @Cast("bool") boolean sparse/*=false*/, @Const @ByRef(nullValue = "c10::optional{}") TensorOptional per_sample_weights, @Cast("bool") boolean include_last_offset/*=false*/, @Cast("int64_t") long padding_idx/*=-1*/); -@Namespace("at") public static native @ByVal @Cast("std::tuple*") PointerPointer _embedding_bag_forward_only_out(@ByRef Tensor out0, @ByRef Tensor out1, @ByRef Tensor out2, @ByRef Tensor out3, @Const @ByRef Tensor weight, @Const @ByRef Tensor indices, @Const @ByRef Tensor offsets); -// aten::_embedding_bag_forward_only.out(Tensor weight, Tensor indices, Tensor offsets, bool scale_grad_by_freq=False, int mode=0, bool sparse=False, Tensor? per_sample_weights=None, bool include_last_offset=False, int padding_idx=-1, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2, Tensor(d!) out3) -> (Tensor(a!), Tensor(b!), Tensor(c!), Tensor(d!)) -@Namespace("at") public static native @ByVal @Cast("std::tuple*") PointerPointer _embedding_bag_forward_only_outf(@Const @ByRef Tensor weight, @Const @ByRef Tensor indices, @Const @ByRef Tensor offsets, @Cast("bool") boolean scale_grad_by_freq, @Cast("int64_t") long mode, @Cast("bool") boolean sparse, @Const @ByRef TensorOptional per_sample_weights, @Cast("bool") boolean include_last_offset, @Cast("int64_t") long padding_idx, @ByRef Tensor out0, @ByRef Tensor out1, @ByRef Tensor out2, @ByRef Tensor out3); - - - - -// Parsed from ATen/ops/_embedding_bag_per_sample_weights_backward.h - -// #pragma once - -// @generated by torchgen/gen.py from Function.h - -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include - - - -// #include - - -// aten::_embedding_bag_per_sample_weights_backward(Tensor grad, Tensor weight, Tensor indices, Tensor offsets, Tensor offset2bag, int mode, int padding_idx=-1) -> Tensor -@Namespace("at") public static native @ByVal Tensor _embedding_bag_per_sample_weights_backward(@Const @ByRef Tensor grad, @Const @ByRef Tensor weight, @Const @ByRef Tensor indices, @Const @ByRef Tensor offsets, @Const @ByRef Tensor offset2bag, @Cast("int64_t") long mode, @Cast("int64_t") long padding_idx/*=-1*/); -@Namespace("at") public static native @ByVal Tensor _embedding_bag_per_sample_weights_backward(@Const @ByRef Tensor grad, @Const @ByRef Tensor weight, @Const @ByRef Tensor indices, @Const @ByRef Tensor offsets, @Const @ByRef Tensor offset2bag, @Cast("int64_t") long mode); - -// aten::_embedding_bag_per_sample_weights_backward.out(Tensor grad, Tensor weight, Tensor indices, Tensor offsets, Tensor offset2bag, int mode, int padding_idx=-1, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor _embedding_bag_per_sample_weights_backward_out(@ByRef Tensor out, @Const @ByRef Tensor grad, @Const @ByRef Tensor weight, @Const @ByRef Tensor indices, @Const @ByRef Tensor offsets, @Const @ByRef Tensor offset2bag, @Cast("int64_t") long mode, @Cast("int64_t") long padding_idx/*=-1*/); -@Namespace("at") public static native @ByRef Tensor _embedding_bag_per_sample_weights_backward_out(@ByRef Tensor out, @Const @ByRef Tensor grad, @Const @ByRef Tensor weight, @Const @ByRef Tensor indices, @Const @ByRef Tensor offsets, @Const @ByRef Tensor offset2bag, @Cast("int64_t") long mode); -// aten::_embedding_bag_per_sample_weights_backward.out(Tensor grad, Tensor weight, Tensor indices, Tensor offsets, Tensor offset2bag, int mode, int padding_idx=-1, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor _embedding_bag_per_sample_weights_backward_outf(@Const @ByRef Tensor grad, @Const @ByRef Tensor weight, @Const @ByRef Tensor indices, @Const @ByRef Tensor offsets, @Const @ByRef Tensor offset2bag, @Cast("int64_t") long mode, @Cast("int64_t") long padding_idx, @ByRef Tensor out); - - - - -// Parsed from ATen/ops/_embedding_bag_sparse_backward.h - -// #pragma once - -// @generated by torchgen/gen.py from Function.h - -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include - - - -// #include - - -// aten::_embedding_bag_sparse_backward(Tensor grad, Tensor indices, Tensor offsets, Tensor offset2bag, Tensor bag_size, SymInt num_weights, bool scale_grad_by_freq, int mode, Tensor? per_sample_weights, int padding_idx=-1) -> Tensor -@Namespace("at") public static native @ByVal Tensor _embedding_bag_sparse_backward(@Const @ByRef Tensor grad, @Const @ByRef Tensor indices, @Const @ByRef Tensor offsets, @Const @ByRef Tensor offset2bag, @Const @ByRef Tensor bag_size, @Cast("int64_t") long num_weights, @Cast("bool") boolean scale_grad_by_freq, @Cast("int64_t") long mode, @Const @ByRef TensorOptional per_sample_weights, @Cast("int64_t") long padding_idx/*=-1*/); -@Namespace("at") public static native @ByVal Tensor _embedding_bag_sparse_backward(@Const @ByRef Tensor grad, @Const @ByRef Tensor indices, @Const @ByRef Tensor offsets, @Const @ByRef Tensor offset2bag, @Const @ByRef Tensor bag_size, @Cast("int64_t") long num_weights, @Cast("bool") boolean scale_grad_by_freq, @Cast("int64_t") long mode, @Const @ByRef TensorOptional per_sample_weights); - - -// aten::_embedding_bag_sparse_backward(Tensor grad, Tensor indices, Tensor offsets, Tensor offset2bag, Tensor bag_size, SymInt num_weights, bool scale_grad_by_freq, int mode, Tensor? per_sample_weights, int padding_idx=-1) -> Tensor -@Namespace("at") public static native @ByVal Tensor _embedding_bag_sparse_backward_symint(@Const @ByRef Tensor grad, @Const @ByRef Tensor indices, @Const @ByRef Tensor offsets, @Const @ByRef Tensor offset2bag, @Const @ByRef Tensor bag_size, @ByVal SymInt num_weights, @Cast("bool") boolean scale_grad_by_freq, @Cast("int64_t") long mode, @Const @ByRef TensorOptional per_sample_weights, @Cast("int64_t") long padding_idx/*=-1*/); -@Namespace("at") public static native @ByVal Tensor _embedding_bag_sparse_backward_symint(@Const @ByRef Tensor grad, @Const @ByRef Tensor indices, @Const @ByRef Tensor offsets, @Const @ByRef Tensor offset2bag, @Const @ByRef Tensor bag_size, @ByVal SymInt num_weights, @Cast("bool") boolean scale_grad_by_freq, @Cast("int64_t") long mode, @Const @ByRef TensorOptional per_sample_weights); - - - - - -// Parsed from ATen/ops/_empty_affine_quantized.h - -// #pragma once - -// @generated by torchgen/gen.py from Function.h - -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include - - - -// #include - - -// aten::_empty_affine_quantized(int[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, float scale=1, int zero_point=0, MemoryFormat? memory_format=contiguous_format) -> Tensor -@Namespace("at") public static native @ByVal Tensor _empty_affine_quantized(@ByVal @Cast("c10::ArrayRef*") LongArrayRef size, @ByVal(nullValue = "at::TensorOptions{}") TensorOptions options, double scale/*=1*/, @Cast("int64_t") long zero_point/*=0*/, @ByVal(nullValue = "c10::optional(c10::MemoryFormat::Contiguous)") MemoryFormatOptional memory_format); -@Namespace("at") public static native @ByVal Tensor _empty_affine_quantized(@ByVal @Cast("c10::ArrayRef*") LongArrayRef size); -@Namespace("at") public static native @ByVal Tensor _empty_affine_quantized(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @ByVal(nullValue = "at::TensorOptions{}") TensorOptions options, double scale/*=1*/, @Cast("int64_t") long zero_point/*=0*/, @ByVal(nullValue = "c10::optional(c10::MemoryFormat::Contiguous)") MemoryFormatOptional memory_format); -@Namespace("at") public static native @ByVal Tensor _empty_affine_quantized(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... size); -// aten::_empty_affine_quantized(int[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, float scale=1, int zero_point=0, MemoryFormat? memory_format=contiguous_format) -> Tensor -@Namespace("at") public static native @ByVal Tensor _empty_affine_quantized(@ByVal @Cast("c10::ArrayRef*") LongArrayRef size, @ByVal ScalarTypeOptional dtype, @ByVal LayoutOptional layout, @ByVal DeviceOptional device, @ByVal BoolOptional pin_memory, double scale, @Cast("int64_t") long zero_point, @ByVal MemoryFormatOptional memory_format); -@Namespace("at") public static native @ByVal Tensor _empty_affine_quantized(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @ByVal ScalarTypeOptional dtype, @ByVal LayoutOptional layout, @ByVal DeviceOptional device, @ByVal BoolOptional pin_memory, double scale, @Cast("int64_t") long zero_point, @ByVal MemoryFormatOptional memory_format); - -// aten::_empty_affine_quantized.out(int[] size, *, float scale=1, int zero_point=0, MemoryFormat? memory_format=contiguous_format, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor _empty_affine_quantized_out(@ByRef Tensor out, @ByVal @Cast("c10::ArrayRef*") LongArrayRef size, double scale/*=1*/, @Cast("int64_t") long zero_point/*=0*/, @ByVal(nullValue = "c10::optional(c10::MemoryFormat::Contiguous)") MemoryFormatOptional memory_format); -@Namespace("at") public static native @ByRef Tensor _empty_affine_quantized_out(@ByRef Tensor out, @ByVal @Cast("c10::ArrayRef*") LongArrayRef size); -@Namespace("at") public static native @ByRef Tensor _empty_affine_quantized_out(@ByRef Tensor out, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, double scale/*=1*/, @Cast("int64_t") long zero_point/*=0*/, @ByVal(nullValue = "c10::optional(c10::MemoryFormat::Contiguous)") MemoryFormatOptional memory_format); -@Namespace("at") public static native @ByRef Tensor _empty_affine_quantized_out(@ByRef Tensor out, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... size); -// aten::_empty_affine_quantized.out(int[] size, *, float scale=1, int zero_point=0, MemoryFormat? memory_format=contiguous_format, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor _empty_affine_quantized_outf(@ByVal @Cast("c10::ArrayRef*") LongArrayRef size, double scale, @Cast("int64_t") long zero_point, @ByVal MemoryFormatOptional memory_format, @ByRef Tensor out); -@Namespace("at") public static native @ByRef Tensor _empty_affine_quantized_outf(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, double scale, @Cast("int64_t") long zero_point, @ByVal MemoryFormatOptional memory_format, @ByRef Tensor out); - - - - -// Parsed from ATen/ops/_empty_per_channel_affine_quantized.h - -// #pragma once - -// @generated by torchgen/gen.py from Function.h - -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include - - - -// #include - - -// aten::_empty_per_channel_affine_quantized(int[] size, *, Tensor scales, Tensor zero_points, int axis, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, MemoryFormat? memory_format=contiguous_format) -> Tensor -@Namespace("at") public static native @ByVal Tensor _empty_per_channel_affine_quantized(@ByVal @Cast("c10::ArrayRef*") LongArrayRef size, @Const @ByRef Tensor scales, @Const @ByRef Tensor zero_points, @Cast("int64_t") long axis, @ByVal(nullValue = "at::TensorOptions{}") TensorOptions options, @ByVal(nullValue = "c10::optional(c10::MemoryFormat::Contiguous)") MemoryFormatOptional memory_format); -@Namespace("at") public static native @ByVal Tensor _empty_per_channel_affine_quantized(@ByVal @Cast("c10::ArrayRef*") LongArrayRef size, @Const @ByRef Tensor scales, @Const @ByRef Tensor zero_points, @Cast("int64_t") long axis); -@Namespace("at") public static native @ByVal Tensor _empty_per_channel_affine_quantized(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @Const @ByRef Tensor scales, @Const @ByRef Tensor zero_points, @Cast("int64_t") long axis, @ByVal(nullValue = "at::TensorOptions{}") TensorOptions options, @ByVal(nullValue = "c10::optional(c10::MemoryFormat::Contiguous)") MemoryFormatOptional memory_format); -@Namespace("at") public static native @ByVal Tensor _empty_per_channel_affine_quantized(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @Const @ByRef Tensor scales, @Const @ByRef Tensor zero_points, @Cast("int64_t") long axis); -// aten::_empty_per_channel_affine_quantized(int[] size, *, Tensor scales, Tensor zero_points, int axis, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, MemoryFormat? memory_format=contiguous_format) -> Tensor -@Namespace("at") public static native @ByVal Tensor _empty_per_channel_affine_quantized(@ByVal @Cast("c10::ArrayRef*") LongArrayRef size, @Const @ByRef Tensor scales, @Const @ByRef Tensor zero_points, @Cast("int64_t") long axis, @ByVal ScalarTypeOptional dtype, @ByVal LayoutOptional layout, @ByVal DeviceOptional device, @ByVal BoolOptional pin_memory, @ByVal MemoryFormatOptional memory_format); -@Namespace("at") public static native @ByVal Tensor _empty_per_channel_affine_quantized(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @Const @ByRef Tensor scales, @Const @ByRef Tensor zero_points, @Cast("int64_t") long axis, @ByVal ScalarTypeOptional dtype, @ByVal LayoutOptional layout, @ByVal DeviceOptional device, @ByVal BoolOptional pin_memory, @ByVal MemoryFormatOptional memory_format); - -// aten::_empty_per_channel_affine_quantized.out(int[] size, *, Tensor scales, Tensor zero_points, int axis, MemoryFormat? memory_format=contiguous_format, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor _empty_per_channel_affine_quantized_out(@ByRef Tensor out, @ByVal @Cast("c10::ArrayRef*") LongArrayRef size, @Const @ByRef Tensor scales, @Const @ByRef Tensor zero_points, @Cast("int64_t") long axis, @ByVal(nullValue = "c10::optional(c10::MemoryFormat::Contiguous)") MemoryFormatOptional memory_format); -@Namespace("at") public static native @ByRef Tensor _empty_per_channel_affine_quantized_out(@ByRef Tensor out, @ByVal @Cast("c10::ArrayRef*") LongArrayRef size, @Const @ByRef Tensor scales, @Const @ByRef Tensor zero_points, @Cast("int64_t") long axis); -@Namespace("at") public static native @ByRef Tensor _empty_per_channel_affine_quantized_out(@ByRef Tensor out, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @Const @ByRef Tensor scales, @Const @ByRef Tensor zero_points, @Cast("int64_t") long axis, @ByVal(nullValue = "c10::optional(c10::MemoryFormat::Contiguous)") MemoryFormatOptional memory_format); -@Namespace("at") public static native @ByRef Tensor _empty_per_channel_affine_quantized_out(@ByRef Tensor out, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @Const @ByRef Tensor scales, @Const @ByRef Tensor zero_points, @Cast("int64_t") long axis); -// aten::_empty_per_channel_affine_quantized.out(int[] size, *, Tensor scales, Tensor zero_points, int axis, MemoryFormat? memory_format=contiguous_format, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor _empty_per_channel_affine_quantized_outf(@ByVal @Cast("c10::ArrayRef*") LongArrayRef size, @Const @ByRef Tensor scales, @Const @ByRef Tensor zero_points, @Cast("int64_t") long axis, @ByVal MemoryFormatOptional memory_format, @ByRef Tensor out); -@Namespace("at") public static native @ByRef Tensor _empty_per_channel_affine_quantized_outf(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @Const @ByRef Tensor scales, @Const @ByRef Tensor zero_points, @Cast("int64_t") long axis, @ByVal MemoryFormatOptional memory_format, @ByRef Tensor out); - - - - -// Parsed from ATen/ops/_euclidean_dist.h - -// #pragma once - -// @generated by torchgen/gen.py from Function.h - -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include - - - -// #include - - -// aten::_euclidean_dist(Tensor x1, Tensor x2) -> Tensor -@Namespace("at") public static native @ByVal Tensor _euclidean_dist(@Const @ByRef Tensor x1, @Const @ByRef Tensor x2); - -// aten::_euclidean_dist.out(Tensor x1, Tensor x2, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor _euclidean_dist_out(@ByRef Tensor out, @Const @ByRef Tensor x1, @Const @ByRef Tensor x2); -// aten::_euclidean_dist.out(Tensor x1, Tensor x2, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor _euclidean_dist_outf(@Const @ByRef Tensor x1, @Const @ByRef Tensor x2, @ByRef Tensor out); - - - - -// Parsed from ATen/ops/_fake_quantize_learnable_per_channel_affine.h - -// #pragma once - -// @generated by torchgen/gen.py from Function.h - -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include - - - -// #include - - -// aten::_fake_quantize_learnable_per_channel_affine(Tensor self, Tensor scale, Tensor zero_point, int axis, int quant_min, int quant_max, float grad_factor=1.0) -> Tensor -@Namespace("at") public static native @ByVal Tensor _fake_quantize_learnable_per_channel_affine(@Const @ByRef Tensor self, @Const @ByRef Tensor scale, @Const @ByRef Tensor zero_point, @Cast("int64_t") long axis, @Cast("int64_t") long quant_min, @Cast("int64_t") long quant_max, double grad_factor/*=1.0*/); -@Namespace("at") public static native @ByVal Tensor _fake_quantize_learnable_per_channel_affine(@Const @ByRef Tensor self, @Const @ByRef Tensor scale, @Const @ByRef Tensor zero_point, @Cast("int64_t") long axis, @Cast("int64_t") long quant_min, @Cast("int64_t") long quant_max); - -// aten::_fake_quantize_learnable_per_channel_affine.out(Tensor self, Tensor scale, Tensor zero_point, int axis, int quant_min, int quant_max, float grad_factor=1.0, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor _fake_quantize_learnable_per_channel_affine_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor scale, @Const @ByRef Tensor zero_point, @Cast("int64_t") long axis, @Cast("int64_t") long quant_min, @Cast("int64_t") long quant_max, double grad_factor/*=1.0*/); -@Namespace("at") public static native @ByRef Tensor _fake_quantize_learnable_per_channel_affine_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor scale, @Const @ByRef Tensor zero_point, @Cast("int64_t") long axis, @Cast("int64_t") long quant_min, @Cast("int64_t") long quant_max); -// aten::_fake_quantize_learnable_per_channel_affine.out(Tensor self, Tensor scale, Tensor zero_point, int axis, int quant_min, int quant_max, float grad_factor=1.0, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor _fake_quantize_learnable_per_channel_affine_outf(@Const @ByRef Tensor self, @Const @ByRef Tensor scale, @Const @ByRef Tensor zero_point, @Cast("int64_t") long axis, @Cast("int64_t") long quant_min, @Cast("int64_t") long quant_max, double grad_factor, @ByRef Tensor out); - - - - -// Parsed from ATen/ops/_fake_quantize_learnable_per_channel_affine_backward.h - -// #pragma once - -// @generated by torchgen/gen.py from Function.h - -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include - - - -// #include - - -// aten::_fake_quantize_learnable_per_channel_affine_backward(Tensor grad, Tensor self, Tensor scale, Tensor zero_point, int axis, int quant_min, int quant_max, float grad_factor=1.0) -> (Tensor, Tensor, Tensor) -@Namespace("at") public static native @ByVal TensorTensorTensorTuple _fake_quantize_learnable_per_channel_affine_backward(@Const @ByRef Tensor grad, @Const @ByRef Tensor self, @Const @ByRef Tensor scale, @Const @ByRef Tensor zero_point, @Cast("int64_t") long axis, @Cast("int64_t") long quant_min, @Cast("int64_t") long quant_max, double grad_factor/*=1.0*/); -@Namespace("at") public static native @ByVal TensorTensorTensorTuple _fake_quantize_learnable_per_channel_affine_backward(@Const @ByRef Tensor grad, @Const @ByRef Tensor self, @Const @ByRef Tensor scale, @Const @ByRef Tensor zero_point, @Cast("int64_t") long axis, @Cast("int64_t") long quant_min, @Cast("int64_t") long quant_max); - - - - -// Parsed from ATen/ops/_fake_quantize_learnable_per_tensor_affine.h - -// #pragma once - -// @generated by torchgen/gen.py from Function.h - -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include - - - -// #include - - -// aten::_fake_quantize_learnable_per_tensor_affine(Tensor self, Tensor scale, Tensor zero_point, int quant_min, int quant_max, float grad_factor=1.0) -> Tensor -@Namespace("at") public static native @ByVal Tensor _fake_quantize_learnable_per_tensor_affine(@Const @ByRef Tensor self, @Const @ByRef Tensor scale, @Const @ByRef Tensor zero_point, @Cast("int64_t") long quant_min, @Cast("int64_t") long quant_max, double grad_factor/*=1.0*/); -@Namespace("at") public static native @ByVal Tensor _fake_quantize_learnable_per_tensor_affine(@Const @ByRef Tensor self, @Const @ByRef Tensor scale, @Const @ByRef Tensor zero_point, @Cast("int64_t") long quant_min, @Cast("int64_t") long quant_max); - -// aten::_fake_quantize_learnable_per_tensor_affine.out(Tensor self, Tensor scale, Tensor zero_point, int quant_min, int quant_max, float grad_factor=1.0, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor _fake_quantize_learnable_per_tensor_affine_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor scale, @Const @ByRef Tensor zero_point, @Cast("int64_t") long quant_min, @Cast("int64_t") long quant_max, double grad_factor/*=1.0*/); -@Namespace("at") public static native @ByRef Tensor _fake_quantize_learnable_per_tensor_affine_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor scale, @Const @ByRef Tensor zero_point, @Cast("int64_t") long quant_min, @Cast("int64_t") long quant_max); -// aten::_fake_quantize_learnable_per_tensor_affine.out(Tensor self, Tensor scale, Tensor zero_point, int quant_min, int quant_max, float grad_factor=1.0, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor _fake_quantize_learnable_per_tensor_affine_outf(@Const @ByRef Tensor self, @Const @ByRef Tensor scale, @Const @ByRef Tensor zero_point, @Cast("int64_t") long quant_min, @Cast("int64_t") long quant_max, double grad_factor, @ByRef Tensor out); - - - - -// Parsed from ATen/ops/_fake_quantize_learnable_per_tensor_affine_backward.h - -// #pragma once - -// @generated by torchgen/gen.py from Function.h - -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include - - - -// #include - - -// aten::_fake_quantize_learnable_per_tensor_affine_backward(Tensor grad, Tensor self, Tensor scale, Tensor zero_point, int quant_min, int quant_max, float grad_factor=1.0) -> (Tensor, Tensor, Tensor) -@Namespace("at") public static native @ByVal TensorTensorTensorTuple _fake_quantize_learnable_per_tensor_affine_backward(@Const @ByRef Tensor grad, @Const @ByRef Tensor self, @Const @ByRef Tensor scale, @Const @ByRef Tensor zero_point, @Cast("int64_t") long quant_min, @Cast("int64_t") long quant_max, double grad_factor/*=1.0*/); -@Namespace("at") public static native @ByVal TensorTensorTensorTuple _fake_quantize_learnable_per_tensor_affine_backward(@Const @ByRef Tensor grad, @Const @ByRef Tensor self, @Const @ByRef Tensor scale, @Const @ByRef Tensor zero_point, @Cast("int64_t") long quant_min, @Cast("int64_t") long quant_max); - - - - -// Parsed from ATen/ops/_fake_quantize_per_tensor_affine_cachemask_tensor_qparams.h - -// #pragma once - -// @generated by torchgen/gen.py from Function.h - -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include - - - -// #include - - -// aten::_fake_quantize_per_tensor_affine_cachemask_tensor_qparams(Tensor self, Tensor scale, Tensor zero_point, Tensor fake_quant_enabled, int quant_min, int quant_max) -> (Tensor output, Tensor mask) -@Namespace("at") public static native @ByVal TensorTensorTuple _fake_quantize_per_tensor_affine_cachemask_tensor_qparams(@Const @ByRef Tensor self, @Const @ByRef Tensor scale, @Const @ByRef Tensor zero_point, @Const @ByRef Tensor fake_quant_enabled, @Cast("int64_t") long quant_min, @Cast("int64_t") long quant_max); - -// aten::_fake_quantize_per_tensor_affine_cachemask_tensor_qparams.out(Tensor self, Tensor scale, Tensor zero_point, Tensor fake_quant_enabled, int quant_min, int quant_max, *, Tensor(a!) out0, Tensor(b!) out1) -> (Tensor(a!), Tensor(b!)) -@Namespace("at") public static native @ByVal @Cast("std::tuple*") PointerPointer _fake_quantize_per_tensor_affine_cachemask_tensor_qparams_out(@ByRef Tensor out0, @ByRef Tensor out1, @Const @ByRef Tensor self, @Const @ByRef Tensor scale, @Const @ByRef Tensor zero_point, @Const @ByRef Tensor fake_quant_enabled, @Cast("int64_t") long quant_min, @Cast("int64_t") long quant_max); -// aten::_fake_quantize_per_tensor_affine_cachemask_tensor_qparams.out(Tensor self, Tensor scale, Tensor zero_point, Tensor fake_quant_enabled, int quant_min, int quant_max, *, Tensor(a!) out0, Tensor(b!) out1) -> (Tensor(a!), Tensor(b!)) -@Namespace("at") public static native @ByVal @Cast("std::tuple*") PointerPointer _fake_quantize_per_tensor_affine_cachemask_tensor_qparams_outf(@Const @ByRef Tensor self, @Const @ByRef Tensor scale, @Const @ByRef Tensor zero_point, @Const @ByRef Tensor fake_quant_enabled, @Cast("int64_t") long quant_min, @Cast("int64_t") long quant_max, @ByRef Tensor out0, @ByRef Tensor out1); - - - - -// Parsed from ATen/ops/_fft_c2c.h - -// #pragma once - -// @generated by torchgen/gen.py from Function.h - -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include - - - -// #include - - -// aten::_fft_c2c(Tensor self, SymInt[] dim, int normalization, bool forward) -> Tensor -@Namespace("at") public static native @ByVal Tensor _fft_c2c(@Const @ByRef Tensor self, @ByVal @Cast("c10::ArrayRef*") LongArrayRef dim, @Cast("int64_t") long normalization, @Cast("bool") boolean forward); -@Namespace("at") public static native @ByVal Tensor _fft_c2c(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dim, @Cast("int64_t") long normalization, @Cast("bool") boolean forward); - - -// aten::_fft_c2c(Tensor self, SymInt[] dim, int normalization, bool forward) -> Tensor -@Namespace("at") public static native @ByVal Tensor _fft_c2c_symint(@Const @ByRef Tensor self, @ByVal SymIntRef dim, @Cast("int64_t") long normalization, @Cast("bool") boolean forward); - - -// aten::_fft_c2c.out(Tensor self, SymInt[] dim, int normalization, bool forward, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor _fft_c2c_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal @Cast("c10::ArrayRef*") LongArrayRef dim, @Cast("int64_t") long normalization, @Cast("bool") boolean forward); -@Namespace("at") public static native @ByRef Tensor _fft_c2c_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dim, @Cast("int64_t") long normalization, @Cast("bool") boolean forward); - - -// aten::_fft_c2c.out(Tensor self, SymInt[] dim, int normalization, bool forward, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor _fft_c2c_outf(@Const @ByRef Tensor self, @ByVal @Cast("c10::ArrayRef*") LongArrayRef dim, @Cast("int64_t") long normalization, @Cast("bool") boolean forward, @ByRef Tensor out); -@Namespace("at") public static native @ByRef Tensor _fft_c2c_outf(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dim, @Cast("int64_t") long normalization, @Cast("bool") boolean forward, @ByRef Tensor out); - - -// aten::_fft_c2c.out(Tensor self, SymInt[] dim, int normalization, bool forward, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor _fft_c2c_symint_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal SymIntRef dim, @Cast("int64_t") long normalization, @Cast("bool") boolean forward); - - -// aten::_fft_c2c.out(Tensor self, SymInt[] dim, int normalization, bool forward, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor _fft_c2c_symint_outf(@Const @ByRef Tensor self, @ByVal SymIntRef dim, @Cast("int64_t") long normalization, @Cast("bool") boolean forward, @ByRef Tensor out); - - - - - -// Parsed from ATen/ops/_fft_c2r.h - -// #pragma once - -// @generated by torchgen/gen.py from Function.h - -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include - - - -// #include - - -// aten::_fft_c2r(Tensor self, int[] dim, int normalization, int last_dim_size) -> Tensor -@Namespace("at") public static native @ByVal Tensor _fft_c2r(@Const @ByRef Tensor self, @ByVal @Cast("c10::ArrayRef*") LongArrayRef dim, @Cast("int64_t") long normalization, @Cast("int64_t") long last_dim_size); -@Namespace("at") public static native @ByVal Tensor _fft_c2r(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dim, @Cast("int64_t") long normalization, @Cast("int64_t") long last_dim_size); - -// aten::_fft_c2r.out(Tensor self, int[] dim, int normalization, int last_dim_size, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor _fft_c2r_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal @Cast("c10::ArrayRef*") LongArrayRef dim, @Cast("int64_t") long normalization, @Cast("int64_t") long last_dim_size); -@Namespace("at") public static native @ByRef Tensor _fft_c2r_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dim, @Cast("int64_t") long normalization, @Cast("int64_t") long last_dim_size); -// aten::_fft_c2r.out(Tensor self, int[] dim, int normalization, int last_dim_size, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor _fft_c2r_outf(@Const @ByRef Tensor self, @ByVal @Cast("c10::ArrayRef*") LongArrayRef dim, @Cast("int64_t") long normalization, @Cast("int64_t") long last_dim_size, @ByRef Tensor out); -@Namespace("at") public static native @ByRef Tensor _fft_c2r_outf(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dim, @Cast("int64_t") long normalization, @Cast("int64_t") long last_dim_size, @ByRef Tensor out); - - - - -// Parsed from ATen/ops/_fft_r2c.h - -// #pragma once - -// @generated by torchgen/gen.py from Function.h - -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include - - - -// #include - - -// aten::_fft_r2c(Tensor self, int[] dim, int normalization, bool onesided) -> Tensor -@Namespace("at") public static native @ByVal Tensor _fft_r2c(@Const @ByRef Tensor self, @ByVal @Cast("c10::ArrayRef*") LongArrayRef dim, @Cast("int64_t") long normalization, @Cast("bool") boolean onesided); -@Namespace("at") public static native @ByVal Tensor _fft_r2c(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dim, @Cast("int64_t") long normalization, @Cast("bool") boolean onesided); - -// aten::_fft_r2c.out(Tensor self, int[] dim, int normalization, bool onesided, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor _fft_r2c_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal @Cast("c10::ArrayRef*") LongArrayRef dim, @Cast("int64_t") long normalization, @Cast("bool") boolean onesided); -@Namespace("at") public static native @ByRef Tensor _fft_r2c_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dim, @Cast("int64_t") long normalization, @Cast("bool") boolean onesided); -// aten::_fft_r2c.out(Tensor self, int[] dim, int normalization, bool onesided, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor _fft_r2c_outf(@Const @ByRef Tensor self, @ByVal @Cast("c10::ArrayRef*") LongArrayRef dim, @Cast("int64_t") long normalization, @Cast("bool") boolean onesided, @ByRef Tensor out); -@Namespace("at") public static native @ByRef Tensor _fft_r2c_outf(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dim, @Cast("int64_t") long normalization, @Cast("bool") boolean onesided, @ByRef Tensor out); - - - - -// Parsed from ATen/ops/_flash_attention_backward.h - -// #pragma once - -// @generated by torchgen/gen.py from Function.h - -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include - - - -// #include - - -// aten::_flash_attention_backward(Tensor grad_out, Tensor query, Tensor key, Tensor value, Tensor out, Tensor logsumexp, Tensor cum_seq_q, Tensor cum_seq_k, int max_q, int max_k, float dropout_p, bool is_causal, int philox_seed, int philox_offset) -> (Tensor, Tensor, Tensor) -@Namespace("at") public static native @ByVal TensorTensorTensorTuple _flash_attention_backward(@Const @ByRef Tensor grad_out, @Const @ByRef Tensor query, @Const @ByRef Tensor key, @Const @ByRef Tensor value, @Const @ByRef Tensor out, @Const @ByRef Tensor logsumexp, @Const @ByRef Tensor cum_seq_q, @Const @ByRef Tensor cum_seq_k, @Cast("int64_t") long max_q, @Cast("int64_t") long max_k, double dropout_p, @Cast("bool") boolean is_causal, @Cast("int64_t") long philox_seed, @Cast("int64_t") long philox_offset); - - - - -// Parsed from ATen/ops/_flash_attention_forward.h - -// #pragma once - -// @generated by torchgen/gen.py from Function.h - -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include - - - -// #include - - -// aten::_flash_attention_forward(Tensor query, Tensor key, Tensor value, Tensor cum_seq_q, Tensor cum_seq_k, int max_q, int max_k, float dropout_p, bool is_causal, bool return_debug_mask) -> (Tensor output, Tensor softmax_logsumexp, int philox_seed, int philox_offset, Tensor debug_attn_mask) -@Namespace("at") public static native @ByVal TensorTensorLongLongTensorTuple _flash_attention_forward(@Const @ByRef Tensor query, @Const @ByRef Tensor key, @Const @ByRef Tensor value, @Const @ByRef Tensor cum_seq_q, @Const @ByRef Tensor cum_seq_k, @Cast("int64_t") long max_q, @Cast("int64_t") long max_k, double dropout_p, @Cast("bool") boolean is_causal, @Cast("bool") boolean return_debug_mask); - - - - -// Parsed from ATen/ops/_foobar.h - -// #pragma once - -// @generated by torchgen/gen.py from Function.h - -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include - - - -// #include - - -// aten::_foobar(Tensor self, bool arg1=True, bool arg2=True, *, bool arg3=True) -> Tensor -@Namespace("at") public static native @ByVal Tensor _foobar(@Const @ByRef Tensor self, @Cast("bool") boolean arg1/*=true*/, @Cast("bool") boolean arg2/*=true*/, @Cast("bool") boolean arg3/*=true*/); -@Namespace("at") public static native @ByVal Tensor _foobar(@Const @ByRef Tensor self); - -// aten::_foobar.out(Tensor self, bool arg1=True, bool arg2=True, *, bool arg3=True, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor _foobar_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Cast("bool") boolean arg1/*=true*/, @Cast("bool") boolean arg2/*=true*/, @Cast("bool") boolean arg3/*=true*/); -@Namespace("at") public static native @ByRef Tensor _foobar_out(@ByRef Tensor out, @Const @ByRef Tensor self); -// aten::_foobar.out(Tensor self, bool arg1=True, bool arg2=True, *, bool arg3=True, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor _foobar_outf(@Const @ByRef Tensor self, @Cast("bool") boolean arg1, @Cast("bool") boolean arg2, @Cast("bool") boolean arg3, @ByRef Tensor out); - - - - -// Parsed from ATen/ops/_foreach_abs.h - -// #pragma once - -// @generated by torchgen/gen.py from Function.h - -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include - - - -// #include - - -// aten::_foreach_abs(Tensor[] self) -> Tensor[] -@Namespace("at") public static native @Cast({"", "std::vector"}) @StdMove TensorVector _foreach_abs(@ByVal TensorArrayRef self); - -// aten::_foreach_abs_(Tensor(a!)[] self) -> () -@Namespace("at") public static native void _foreach_abs_(@ByVal TensorArrayRef self); - -// aten::_foreach_abs.out(Tensor[] self, *, Tensor(a!)[] out) -> () -@Namespace("at") public static native void _foreach_abs_out(@ByVal TensorArrayRef out, @ByVal TensorArrayRef self); -// aten::_foreach_abs.out(Tensor[] self, *, Tensor(a!)[] out) -> () -@Namespace("at") public static native void _foreach_abs_outf(@ByVal TensorArrayRef self, @ByVal TensorArrayRef out); - - - - -// Parsed from ATen/ops/_foreach_acos.h - -// #pragma once - -// @generated by torchgen/gen.py from Function.h - -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include - - - -// #include - - -// aten::_foreach_acos(Tensor[] self) -> Tensor[] -@Namespace("at") public static native @Cast({"", "std::vector"}) @StdMove TensorVector _foreach_acos(@ByVal TensorArrayRef self); - -// aten::_foreach_acos_(Tensor(a!)[] self) -> () -@Namespace("at") public static native void _foreach_acos_(@ByVal TensorArrayRef self); - -// aten::_foreach_acos.out(Tensor[] self, *, Tensor(a!)[] out) -> () -@Namespace("at") public static native void _foreach_acos_out(@ByVal TensorArrayRef out, @ByVal TensorArrayRef self); -// aten::_foreach_acos.out(Tensor[] self, *, Tensor(a!)[] out) -> () -@Namespace("at") public static native void _foreach_acos_outf(@ByVal TensorArrayRef self, @ByVal TensorArrayRef out); - - - - -// Parsed from ATen/ops/_foreach_add.h - -// #pragma once - -// @generated by torchgen/gen.py from Function.h - -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include - - - -// #include - - -// aten::_foreach_add.Scalar(Tensor[] self, Scalar scalar) -> Tensor[] -@Namespace("at") public static native @Cast({"", "std::vector"}) @StdMove TensorVector _foreach_add(@ByVal TensorArrayRef self, @Const @ByRef Scalar scalar); - -// aten::_foreach_add_.Scalar(Tensor(a!)[] self, Scalar scalar) -> () -@Namespace("at") public static native void _foreach_add_(@ByVal TensorArrayRef self, @Const @ByRef Scalar scalar); - -// aten::_foreach_add.List(Tensor[] self, Tensor[] other, *, Scalar alpha=1) -> Tensor[] -@Namespace("at") public static native @Cast({"", "std::vector"}) @StdMove TensorVector _foreach_add(@ByVal TensorArrayRef self, @ByVal TensorArrayRef other, @Const @ByRef(nullValue = "at::Scalar(1)") Scalar alpha); -@Namespace("at") public static native @Cast({"", "std::vector"}) @StdMove TensorVector _foreach_add(@ByVal TensorArrayRef self, @ByVal TensorArrayRef other); - -// aten::_foreach_add_.List(Tensor(a!)[] self, Tensor[] other, *, Scalar alpha=1) -> () -@Namespace("at") public static native void _foreach_add_(@ByVal TensorArrayRef self, @ByVal TensorArrayRef other, @Const @ByRef(nullValue = "at::Scalar(1)") Scalar alpha); -@Namespace("at") public static native void _foreach_add_(@ByVal TensorArrayRef self, @ByVal TensorArrayRef other); - -// aten::_foreach_add.ScalarList(Tensor[] self, Scalar[] scalars) -> Tensor[] -@Namespace("at") public static native @Cast({"", "std::vector"}) @StdMove TensorVector _foreach_add(@ByVal TensorArrayRef self, @ByVal ScalarArrayRef scalars); - -// aten::_foreach_add_.ScalarList(Tensor(a!)[] self, Scalar[] scalars) -> () -@Namespace("at") public static native void _foreach_add_(@ByVal TensorArrayRef self, @ByVal ScalarArrayRef scalars); - -// aten::_foreach_add.Scalar_out(Tensor[] self, Scalar scalar, *, Tensor(a!)[] out) -> () -@Namespace("at") public static native void _foreach_add_out(@ByVal TensorArrayRef out, @ByVal TensorArrayRef self, @Const @ByRef Scalar scalar); -// aten::_foreach_add.Scalar_out(Tensor[] self, Scalar scalar, *, Tensor(a!)[] out) -> () -@Namespace("at") public static native void _foreach_add_outf(@ByVal TensorArrayRef self, @Const @ByRef Scalar scalar, @ByVal TensorArrayRef out); - -// aten::_foreach_add.List_out(Tensor[] self, Tensor[] other, *, Scalar alpha=1, Tensor(a!)[] out) -> () -@Namespace("at") public static native void _foreach_add_out(@ByVal TensorArrayRef out, @ByVal TensorArrayRef self, @ByVal TensorArrayRef other, @Const @ByRef(nullValue = "at::Scalar(1)") Scalar alpha); -@Namespace("at") public static native void _foreach_add_out(@ByVal TensorArrayRef out, @ByVal TensorArrayRef self, @ByVal TensorArrayRef other); -// aten::_foreach_add.List_out(Tensor[] self, Tensor[] other, *, Scalar alpha=1, Tensor(a!)[] out) -> () -@Namespace("at") public static native void _foreach_add_outf(@ByVal TensorArrayRef self, @ByVal TensorArrayRef other, @Const @ByRef Scalar alpha, @ByVal TensorArrayRef out); - -// aten::_foreach_add.ScalarList_out(Tensor[] self, Scalar[] scalars, *, Tensor(a!)[] out) -> () -@Namespace("at") public static native void _foreach_add_out(@ByVal TensorArrayRef out, @ByVal TensorArrayRef self, @ByVal ScalarArrayRef scalars); -// aten::_foreach_add.ScalarList_out(Tensor[] self, Scalar[] scalars, *, Tensor(a!)[] out) -> () -@Namespace("at") public static native void _foreach_add_outf(@ByVal TensorArrayRef self, @ByVal ScalarArrayRef scalars, @ByVal TensorArrayRef out); - - - - -// Parsed from ATen/ops/_foreach_addcdiv.h - -// #pragma once - -// @generated by torchgen/gen.py from Function.h - -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include - - - -// #include - - -// aten::_foreach_addcdiv_.Scalar(Tensor(a!)[] self, Tensor[] tensor1, Tensor[] tensor2, Scalar value=1) -> () -@Namespace("at") public static native void _foreach_addcdiv_(@ByVal TensorArrayRef self, @ByVal TensorArrayRef tensor1, @ByVal TensorArrayRef tensor2, @Const @ByRef(nullValue = "at::Scalar(1)") Scalar value); -@Namespace("at") public static native void _foreach_addcdiv_(@ByVal TensorArrayRef self, @ByVal TensorArrayRef tensor1, @ByVal TensorArrayRef tensor2); - -// aten::_foreach_addcdiv_.ScalarList(Tensor(a!)[] self, Tensor[] tensor1, Tensor[] tensor2, Scalar[] scalars) -> () -@Namespace("at") public static native void _foreach_addcdiv_(@ByVal TensorArrayRef self, @ByVal TensorArrayRef tensor1, @ByVal TensorArrayRef tensor2, @ByVal ScalarArrayRef scalars); - -// aten::_foreach_addcdiv_.Tensor(Tensor(a!)[] self, Tensor[] tensor1, Tensor[] tensor2, Tensor scalars) -> () -@Namespace("at") public static native void _foreach_addcdiv_(@ByVal TensorArrayRef self, @ByVal TensorArrayRef tensor1, @ByVal TensorArrayRef tensor2, @Const @ByRef Tensor scalars); - -// aten::_foreach_addcdiv.Scalar(Tensor[] self, Tensor[] tensor1, Tensor[] tensor2, Scalar value=1) -> Tensor[] -@Namespace("at") public static native @Cast({"", "std::vector"}) @StdMove TensorVector _foreach_addcdiv(@ByVal TensorArrayRef self, @ByVal TensorArrayRef tensor1, @ByVal TensorArrayRef tensor2, @Const @ByRef(nullValue = "at::Scalar(1)") Scalar value); -@Namespace("at") public static native @Cast({"", "std::vector"}) @StdMove TensorVector _foreach_addcdiv(@ByVal TensorArrayRef self, @ByVal TensorArrayRef tensor1, @ByVal TensorArrayRef tensor2); - -// aten::_foreach_addcdiv.ScalarList(Tensor[] self, Tensor[] tensor1, Tensor[] tensor2, Scalar[] scalars) -> Tensor[] -@Namespace("at") public static native @Cast({"", "std::vector"}) @StdMove TensorVector _foreach_addcdiv(@ByVal TensorArrayRef self, @ByVal TensorArrayRef tensor1, @ByVal TensorArrayRef tensor2, @ByVal ScalarArrayRef scalars); - -// aten::_foreach_addcdiv.Tensor(Tensor[] self, Tensor[] tensor1, Tensor[] tensor2, Tensor scalars) -> Tensor[] -@Namespace("at") public static native @Cast({"", "std::vector"}) @StdMove TensorVector _foreach_addcdiv(@ByVal TensorArrayRef self, @ByVal TensorArrayRef tensor1, @ByVal TensorArrayRef tensor2, @Const @ByRef Tensor scalars); - -// aten::_foreach_addcdiv.Scalar_out(Tensor[] self, Tensor[] tensor1, Tensor[] tensor2, Scalar value=1, *, Tensor(a!)[] out) -> () -@Namespace("at") public static native void _foreach_addcdiv_out(@ByVal TensorArrayRef out, @ByVal TensorArrayRef self, @ByVal TensorArrayRef tensor1, @ByVal TensorArrayRef tensor2, @Const @ByRef(nullValue = "at::Scalar(1)") Scalar value); -@Namespace("at") public static native void _foreach_addcdiv_out(@ByVal TensorArrayRef out, @ByVal TensorArrayRef self, @ByVal TensorArrayRef tensor1, @ByVal TensorArrayRef tensor2); -// aten::_foreach_addcdiv.Scalar_out(Tensor[] self, Tensor[] tensor1, Tensor[] tensor2, Scalar value=1, *, Tensor(a!)[] out) -> () -@Namespace("at") public static native void _foreach_addcdiv_outf(@ByVal TensorArrayRef self, @ByVal TensorArrayRef tensor1, @ByVal TensorArrayRef tensor2, @Const @ByRef Scalar value, @ByVal TensorArrayRef out); - -// aten::_foreach_addcdiv.ScalarList_out(Tensor[] self, Tensor[] tensor1, Tensor[] tensor2, Scalar[] scalars, *, Tensor(a!)[] out) -> () -@Namespace("at") public static native void _foreach_addcdiv_out(@ByVal TensorArrayRef out, @ByVal TensorArrayRef self, @ByVal TensorArrayRef tensor1, @ByVal TensorArrayRef tensor2, @ByVal ScalarArrayRef scalars); -// aten::_foreach_addcdiv.ScalarList_out(Tensor[] self, Tensor[] tensor1, Tensor[] tensor2, Scalar[] scalars, *, Tensor(a!)[] out) -> () -@Namespace("at") public static native void _foreach_addcdiv_outf(@ByVal TensorArrayRef self, @ByVal TensorArrayRef tensor1, @ByVal TensorArrayRef tensor2, @ByVal ScalarArrayRef scalars, @ByVal TensorArrayRef out); - -// aten::_foreach_addcdiv.Tensor_out(Tensor[] self, Tensor[] tensor1, Tensor[] tensor2, Tensor scalars, *, Tensor(a!)[] out) -> () -@Namespace("at") public static native void _foreach_addcdiv_out(@ByVal TensorArrayRef out, @ByVal TensorArrayRef self, @ByVal TensorArrayRef tensor1, @ByVal TensorArrayRef tensor2, @Const @ByRef Tensor scalars); -// aten::_foreach_addcdiv.Tensor_out(Tensor[] self, Tensor[] tensor1, Tensor[] tensor2, Tensor scalars, *, Tensor(a!)[] out) -> () -@Namespace("at") public static native void _foreach_addcdiv_outf(@ByVal TensorArrayRef self, @ByVal TensorArrayRef tensor1, @ByVal TensorArrayRef tensor2, @Const @ByRef Tensor scalars, @ByVal TensorArrayRef out); - - - - -// Parsed from ATen/ops/_foreach_addcmul.h - -// #pragma once - -// @generated by torchgen/gen.py from Function.h - -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include - - - -// #include - - -// aten::_foreach_addcmul_.Scalar(Tensor(a!)[] self, Tensor[] tensor1, Tensor[] tensor2, Scalar value=1) -> () -@Namespace("at") public static native void _foreach_addcmul_(@ByVal TensorArrayRef self, @ByVal TensorArrayRef tensor1, @ByVal TensorArrayRef tensor2, @Const @ByRef(nullValue = "at::Scalar(1)") Scalar value); -@Namespace("at") public static native void _foreach_addcmul_(@ByVal TensorArrayRef self, @ByVal TensorArrayRef tensor1, @ByVal TensorArrayRef tensor2); - -// aten::_foreach_addcmul_.ScalarList(Tensor(a!)[] self, Tensor[] tensor1, Tensor[] tensor2, Scalar[] scalars) -> () -@Namespace("at") public static native void _foreach_addcmul_(@ByVal TensorArrayRef self, @ByVal TensorArrayRef tensor1, @ByVal TensorArrayRef tensor2, @ByVal ScalarArrayRef scalars); - -// aten::_foreach_addcmul_.Tensor(Tensor(a!)[] self, Tensor[] tensor1, Tensor[] tensor2, Tensor scalars) -> () -@Namespace("at") public static native void _foreach_addcmul_(@ByVal TensorArrayRef self, @ByVal TensorArrayRef tensor1, @ByVal TensorArrayRef tensor2, @Const @ByRef Tensor scalars); - -// aten::_foreach_addcmul.Scalar(Tensor[] self, Tensor[] tensor1, Tensor[] tensor2, Scalar value=1) -> Tensor[] -@Namespace("at") public static native @Cast({"", "std::vector"}) @StdMove TensorVector _foreach_addcmul(@ByVal TensorArrayRef self, @ByVal TensorArrayRef tensor1, @ByVal TensorArrayRef tensor2, @Const @ByRef(nullValue = "at::Scalar(1)") Scalar value); -@Namespace("at") public static native @Cast({"", "std::vector"}) @StdMove TensorVector _foreach_addcmul(@ByVal TensorArrayRef self, @ByVal TensorArrayRef tensor1, @ByVal TensorArrayRef tensor2); - -// aten::_foreach_addcmul.ScalarList(Tensor[] self, Tensor[] tensor1, Tensor[] tensor2, Scalar[] scalars) -> Tensor[] -@Namespace("at") public static native @Cast({"", "std::vector"}) @StdMove TensorVector _foreach_addcmul(@ByVal TensorArrayRef self, @ByVal TensorArrayRef tensor1, @ByVal TensorArrayRef tensor2, @ByVal ScalarArrayRef scalars); - -// aten::_foreach_addcmul.Tensor(Tensor[] self, Tensor[] tensor1, Tensor[] tensor2, Tensor scalars) -> Tensor[] -@Namespace("at") public static native @Cast({"", "std::vector"}) @StdMove TensorVector _foreach_addcmul(@ByVal TensorArrayRef self, @ByVal TensorArrayRef tensor1, @ByVal TensorArrayRef tensor2, @Const @ByRef Tensor scalars); - -// aten::_foreach_addcmul.Scalar_out(Tensor[] self, Tensor[] tensor1, Tensor[] tensor2, Scalar value=1, *, Tensor(a!)[] out) -> () -@Namespace("at") public static native void _foreach_addcmul_out(@ByVal TensorArrayRef out, @ByVal TensorArrayRef self, @ByVal TensorArrayRef tensor1, @ByVal TensorArrayRef tensor2, @Const @ByRef(nullValue = "at::Scalar(1)") Scalar value); -@Namespace("at") public static native void _foreach_addcmul_out(@ByVal TensorArrayRef out, @ByVal TensorArrayRef self, @ByVal TensorArrayRef tensor1, @ByVal TensorArrayRef tensor2); -// aten::_foreach_addcmul.Scalar_out(Tensor[] self, Tensor[] tensor1, Tensor[] tensor2, Scalar value=1, *, Tensor(a!)[] out) -> () -@Namespace("at") public static native void _foreach_addcmul_outf(@ByVal TensorArrayRef self, @ByVal TensorArrayRef tensor1, @ByVal TensorArrayRef tensor2, @Const @ByRef Scalar value, @ByVal TensorArrayRef out); - -// aten::_foreach_addcmul.ScalarList_out(Tensor[] self, Tensor[] tensor1, Tensor[] tensor2, Scalar[] scalars, *, Tensor(a!)[] out) -> () -@Namespace("at") public static native void _foreach_addcmul_out(@ByVal TensorArrayRef out, @ByVal TensorArrayRef self, @ByVal TensorArrayRef tensor1, @ByVal TensorArrayRef tensor2, @ByVal ScalarArrayRef scalars); -// aten::_foreach_addcmul.ScalarList_out(Tensor[] self, Tensor[] tensor1, Tensor[] tensor2, Scalar[] scalars, *, Tensor(a!)[] out) -> () -@Namespace("at") public static native void _foreach_addcmul_outf(@ByVal TensorArrayRef self, @ByVal TensorArrayRef tensor1, @ByVal TensorArrayRef tensor2, @ByVal ScalarArrayRef scalars, @ByVal TensorArrayRef out); - -// aten::_foreach_addcmul.Tensor_out(Tensor[] self, Tensor[] tensor1, Tensor[] tensor2, Tensor scalars, *, Tensor(a!)[] out) -> () -@Namespace("at") public static native void _foreach_addcmul_out(@ByVal TensorArrayRef out, @ByVal TensorArrayRef self, @ByVal TensorArrayRef tensor1, @ByVal TensorArrayRef tensor2, @Const @ByRef Tensor scalars); -// aten::_foreach_addcmul.Tensor_out(Tensor[] self, Tensor[] tensor1, Tensor[] tensor2, Tensor scalars, *, Tensor(a!)[] out) -> () -@Namespace("at") public static native void _foreach_addcmul_outf(@ByVal TensorArrayRef self, @ByVal TensorArrayRef tensor1, @ByVal TensorArrayRef tensor2, @Const @ByRef Tensor scalars, @ByVal TensorArrayRef out); - - - - -// Parsed from ATen/ops/_foreach_asin.h - -// #pragma once - -// @generated by torchgen/gen.py from Function.h - -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include - - - -// #include - - -// aten::_foreach_asin(Tensor[] self) -> Tensor[] -@Namespace("at") public static native @Cast({"", "std::vector"}) @StdMove TensorVector _foreach_asin(@ByVal TensorArrayRef self); - -// aten::_foreach_asin_(Tensor(a!)[] self) -> () -@Namespace("at") public static native void _foreach_asin_(@ByVal TensorArrayRef self); - -// aten::_foreach_asin.out(Tensor[] self, *, Tensor(a!)[] out) -> () -@Namespace("at") public static native void _foreach_asin_out(@ByVal TensorArrayRef out, @ByVal TensorArrayRef self); -// aten::_foreach_asin.out(Tensor[] self, *, Tensor(a!)[] out) -> () -@Namespace("at") public static native void _foreach_asin_outf(@ByVal TensorArrayRef self, @ByVal TensorArrayRef out); - - - - -// Parsed from ATen/ops/_foreach_atan.h - -// #pragma once - -// @generated by torchgen/gen.py from Function.h - -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include - - - -// #include - - -// aten::_foreach_atan(Tensor[] self) -> Tensor[] -@Namespace("at") public static native @Cast({"", "std::vector"}) @StdMove TensorVector _foreach_atan(@ByVal TensorArrayRef self); - -// aten::_foreach_atan_(Tensor(a!)[] self) -> () -@Namespace("at") public static native void _foreach_atan_(@ByVal TensorArrayRef self); - -// aten::_foreach_atan.out(Tensor[] self, *, Tensor(a!)[] out) -> () -@Namespace("at") public static native void _foreach_atan_out(@ByVal TensorArrayRef out, @ByVal TensorArrayRef self); -// aten::_foreach_atan.out(Tensor[] self, *, Tensor(a!)[] out) -> () -@Namespace("at") public static native void _foreach_atan_outf(@ByVal TensorArrayRef self, @ByVal TensorArrayRef out); - - - - -// Parsed from ATen/ops/_foreach_ceil.h - -// #pragma once - -// @generated by torchgen/gen.py from Function.h - -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include - - - -// #include - - -// aten::_foreach_ceil(Tensor[] self) -> Tensor[] -@Namespace("at") public static native @Cast({"", "std::vector"}) @StdMove TensorVector _foreach_ceil(@ByVal TensorArrayRef self); - -// aten::_foreach_ceil_(Tensor(a!)[] self) -> () -@Namespace("at") public static native void _foreach_ceil_(@ByVal TensorArrayRef self); - -// aten::_foreach_ceil.out(Tensor[] self, *, Tensor(a!)[] out) -> () -@Namespace("at") public static native void _foreach_ceil_out(@ByVal TensorArrayRef out, @ByVal TensorArrayRef self); -// aten::_foreach_ceil.out(Tensor[] self, *, Tensor(a!)[] out) -> () -@Namespace("at") public static native void _foreach_ceil_outf(@ByVal TensorArrayRef self, @ByVal TensorArrayRef out); - - - - -// Parsed from ATen/ops/_foreach_clamp_max.h - -// #pragma once - -// @generated by torchgen/gen.py from Function.h - -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include - - - -// #include - - -// aten::_foreach_clamp_max.Scalar(Tensor[] self, Scalar scalar) -> Tensor[] -@Namespace("at") public static native @Cast({"", "std::vector"}) @StdMove TensorVector _foreach_clamp_max(@ByVal TensorArrayRef self, @Const @ByRef Scalar scalar); - -// aten::_foreach_clamp_max_.Scalar(Tensor(a!)[] self, Scalar scalar) -> () -@Namespace("at") public static native void _foreach_clamp_max_(@ByVal TensorArrayRef self, @Const @ByRef Scalar scalar); - -// aten::_foreach_clamp_max.List(Tensor[] self, Tensor[] other) -> Tensor[] -@Namespace("at") public static native @Cast({"", "std::vector"}) @StdMove TensorVector _foreach_clamp_max(@ByVal TensorArrayRef self, @ByVal TensorArrayRef other); - -// aten::_foreach_clamp_max_.List(Tensor(a!)[] self, Tensor[] other) -> () -@Namespace("at") public static native void _foreach_clamp_max_(@ByVal TensorArrayRef self, @ByVal TensorArrayRef other); - -// aten::_foreach_clamp_max.ScalarList(Tensor[] self, Scalar[] scalars) -> Tensor[] -@Namespace("at") public static native @Cast({"", "std::vector"}) @StdMove TensorVector _foreach_clamp_max(@ByVal TensorArrayRef self, @ByVal ScalarArrayRef scalars); - -// aten::_foreach_clamp_max_.ScalarList(Tensor(a!)[] self, Scalar[] scalars) -> () -@Namespace("at") public static native void _foreach_clamp_max_(@ByVal TensorArrayRef self, @ByVal ScalarArrayRef scalars); - -// aten::_foreach_clamp_max.Scalar_out(Tensor[] self, Scalar scalar, *, Tensor(a!)[] out) -> () -@Namespace("at") public static native void _foreach_clamp_max_out(@ByVal TensorArrayRef out, @ByVal TensorArrayRef self, @Const @ByRef Scalar scalar); -// aten::_foreach_clamp_max.Scalar_out(Tensor[] self, Scalar scalar, *, Tensor(a!)[] out) -> () -@Namespace("at") public static native void _foreach_clamp_max_outf(@ByVal TensorArrayRef self, @Const @ByRef Scalar scalar, @ByVal TensorArrayRef out); - -// aten::_foreach_clamp_max.List_out(Tensor[] self, Tensor[] other, *, Tensor(a!)[] out) -> () -@Namespace("at") public static native void _foreach_clamp_max_out(@ByVal TensorArrayRef out, @ByVal TensorArrayRef self, @ByVal TensorArrayRef other); -// aten::_foreach_clamp_max.List_out(Tensor[] self, Tensor[] other, *, Tensor(a!)[] out) -> () -@Namespace("at") public static native void _foreach_clamp_max_outf(@ByVal TensorArrayRef self, @ByVal TensorArrayRef other, @ByVal TensorArrayRef out); - -// aten::_foreach_clamp_max.ScalarList_out(Tensor[] self, Scalar[] scalars, *, Tensor(a!)[] out) -> () -@Namespace("at") public static native void _foreach_clamp_max_out(@ByVal TensorArrayRef out, @ByVal TensorArrayRef self, @ByVal ScalarArrayRef scalars); -// aten::_foreach_clamp_max.ScalarList_out(Tensor[] self, Scalar[] scalars, *, Tensor(a!)[] out) -> () -@Namespace("at") public static native void _foreach_clamp_max_outf(@ByVal TensorArrayRef self, @ByVal ScalarArrayRef scalars, @ByVal TensorArrayRef out); - - - - -// Parsed from ATen/ops/_foreach_clamp_min.h - -// #pragma once - -// @generated by torchgen/gen.py from Function.h - -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include - - - -// #include - - -// aten::_foreach_clamp_min.Scalar(Tensor[] self, Scalar scalar) -> Tensor[] -@Namespace("at") public static native @Cast({"", "std::vector"}) @StdMove TensorVector _foreach_clamp_min(@ByVal TensorArrayRef self, @Const @ByRef Scalar scalar); - -// aten::_foreach_clamp_min_.Scalar(Tensor(a!)[] self, Scalar scalar) -> () -@Namespace("at") public static native void _foreach_clamp_min_(@ByVal TensorArrayRef self, @Const @ByRef Scalar scalar); - -// aten::_foreach_clamp_min.List(Tensor[] self, Tensor[] other) -> Tensor[] -@Namespace("at") public static native @Cast({"", "std::vector"}) @StdMove TensorVector _foreach_clamp_min(@ByVal TensorArrayRef self, @ByVal TensorArrayRef other); - -// aten::_foreach_clamp_min_.List(Tensor(a!)[] self, Tensor[] other) -> () -@Namespace("at") public static native void _foreach_clamp_min_(@ByVal TensorArrayRef self, @ByVal TensorArrayRef other); - -// aten::_foreach_clamp_min.ScalarList(Tensor[] self, Scalar[] scalars) -> Tensor[] -@Namespace("at") public static native @Cast({"", "std::vector"}) @StdMove TensorVector _foreach_clamp_min(@ByVal TensorArrayRef self, @ByVal ScalarArrayRef scalars); - -// aten::_foreach_clamp_min_.ScalarList(Tensor(a!)[] self, Scalar[] scalars) -> () -@Namespace("at") public static native void _foreach_clamp_min_(@ByVal TensorArrayRef self, @ByVal ScalarArrayRef scalars); - -// aten::_foreach_clamp_min.Scalar_out(Tensor[] self, Scalar scalar, *, Tensor(a!)[] out) -> () -@Namespace("at") public static native void _foreach_clamp_min_out(@ByVal TensorArrayRef out, @ByVal TensorArrayRef self, @Const @ByRef Scalar scalar); -// aten::_foreach_clamp_min.Scalar_out(Tensor[] self, Scalar scalar, *, Tensor(a!)[] out) -> () -@Namespace("at") public static native void _foreach_clamp_min_outf(@ByVal TensorArrayRef self, @Const @ByRef Scalar scalar, @ByVal TensorArrayRef out); - -// aten::_foreach_clamp_min.List_out(Tensor[] self, Tensor[] other, *, Tensor(a!)[] out) -> () -@Namespace("at") public static native void _foreach_clamp_min_out(@ByVal TensorArrayRef out, @ByVal TensorArrayRef self, @ByVal TensorArrayRef other); -// aten::_foreach_clamp_min.List_out(Tensor[] self, Tensor[] other, *, Tensor(a!)[] out) -> () -@Namespace("at") public static native void _foreach_clamp_min_outf(@ByVal TensorArrayRef self, @ByVal TensorArrayRef other, @ByVal TensorArrayRef out); - -// aten::_foreach_clamp_min.ScalarList_out(Tensor[] self, Scalar[] scalars, *, Tensor(a!)[] out) -> () -@Namespace("at") public static native void _foreach_clamp_min_out(@ByVal TensorArrayRef out, @ByVal TensorArrayRef self, @ByVal ScalarArrayRef scalars); -// aten::_foreach_clamp_min.ScalarList_out(Tensor[] self, Scalar[] scalars, *, Tensor(a!)[] out) -> () -@Namespace("at") public static native void _foreach_clamp_min_outf(@ByVal TensorArrayRef self, @ByVal ScalarArrayRef scalars, @ByVal TensorArrayRef out); - - - - -// Parsed from ATen/ops/_foreach_cos.h - -// #pragma once - -// @generated by torchgen/gen.py from Function.h - -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include - - - -// #include - - -// aten::_foreach_cos(Tensor[] self) -> Tensor[] -@Namespace("at") public static native @Cast({"", "std::vector"}) @StdMove TensorVector _foreach_cos(@ByVal TensorArrayRef self); - -// aten::_foreach_cos_(Tensor(a!)[] self) -> () -@Namespace("at") public static native void _foreach_cos_(@ByVal TensorArrayRef self); - -// aten::_foreach_cos.out(Tensor[] self, *, Tensor(a!)[] out) -> () -@Namespace("at") public static native void _foreach_cos_out(@ByVal TensorArrayRef out, @ByVal TensorArrayRef self); -// aten::_foreach_cos.out(Tensor[] self, *, Tensor(a!)[] out) -> () -@Namespace("at") public static native void _foreach_cos_outf(@ByVal TensorArrayRef self, @ByVal TensorArrayRef out); - - - - -// Parsed from ATen/ops/_foreach_cosh.h - -// #pragma once - -// @generated by torchgen/gen.py from Function.h - -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include - - - -// #include - - -// aten::_foreach_cosh(Tensor[] self) -> Tensor[] -@Namespace("at") public static native @Cast({"", "std::vector"}) @StdMove TensorVector _foreach_cosh(@ByVal TensorArrayRef self); - -// aten::_foreach_cosh_(Tensor(a!)[] self) -> () -@Namespace("at") public static native void _foreach_cosh_(@ByVal TensorArrayRef self); - -// aten::_foreach_cosh.out(Tensor[] self, *, Tensor(a!)[] out) -> () -@Namespace("at") public static native void _foreach_cosh_out(@ByVal TensorArrayRef out, @ByVal TensorArrayRef self); -// aten::_foreach_cosh.out(Tensor[] self, *, Tensor(a!)[] out) -> () -@Namespace("at") public static native void _foreach_cosh_outf(@ByVal TensorArrayRef self, @ByVal TensorArrayRef out); - - - - -// Parsed from ATen/ops/_foreach_div.h - -// #pragma once - -// @generated by torchgen/gen.py from Function.h - -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include - - - -// #include - - -// aten::_foreach_div.Scalar(Tensor[] self, Scalar scalar) -> Tensor[] -@Namespace("at") public static native @Cast({"", "std::vector"}) @StdMove TensorVector _foreach_div(@ByVal TensorArrayRef self, @Const @ByRef Scalar scalar); - -// aten::_foreach_div_.Scalar(Tensor(a!)[] self, Scalar scalar) -> () -@Namespace("at") public static native void _foreach_div_(@ByVal TensorArrayRef self, @Const @ByRef Scalar scalar); - -// aten::_foreach_div.List(Tensor[] self, Tensor[] other) -> Tensor[] -@Namespace("at") public static native @Cast({"", "std::vector"}) @StdMove TensorVector _foreach_div(@ByVal TensorArrayRef self, @ByVal TensorArrayRef other); - -// aten::_foreach_div_.List(Tensor(a!)[] self, Tensor[] other) -> () -@Namespace("at") public static native void _foreach_div_(@ByVal TensorArrayRef self, @ByVal TensorArrayRef other); - -// aten::_foreach_div.ScalarList(Tensor[] self, Scalar[] scalars) -> Tensor[] -@Namespace("at") public static native @Cast({"", "std::vector"}) @StdMove TensorVector _foreach_div(@ByVal TensorArrayRef self, @ByVal ScalarArrayRef scalars); - -// aten::_foreach_div_.ScalarList(Tensor(a!)[] self, Scalar[] scalars) -> () -@Namespace("at") public static native void _foreach_div_(@ByVal TensorArrayRef self, @ByVal ScalarArrayRef scalars); - -// aten::_foreach_div.Scalar_out(Tensor[] self, Scalar scalar, *, Tensor(a!)[] out) -> () -@Namespace("at") public static native void _foreach_div_out(@ByVal TensorArrayRef out, @ByVal TensorArrayRef self, @Const @ByRef Scalar scalar); -// aten::_foreach_div.Scalar_out(Tensor[] self, Scalar scalar, *, Tensor(a!)[] out) -> () -@Namespace("at") public static native void _foreach_div_outf(@ByVal TensorArrayRef self, @Const @ByRef Scalar scalar, @ByVal TensorArrayRef out); - -// aten::_foreach_div.List_out(Tensor[] self, Tensor[] other, *, Tensor(a!)[] out) -> () -@Namespace("at") public static native void _foreach_div_out(@ByVal TensorArrayRef out, @ByVal TensorArrayRef self, @ByVal TensorArrayRef other); -// aten::_foreach_div.List_out(Tensor[] self, Tensor[] other, *, Tensor(a!)[] out) -> () -@Namespace("at") public static native void _foreach_div_outf(@ByVal TensorArrayRef self, @ByVal TensorArrayRef other, @ByVal TensorArrayRef out); - -// aten::_foreach_div.ScalarList_out(Tensor[] self, Scalar[] scalars, *, Tensor(a!)[] out) -> () -@Namespace("at") public static native void _foreach_div_out(@ByVal TensorArrayRef out, @ByVal TensorArrayRef self, @ByVal ScalarArrayRef scalars); -// aten::_foreach_div.ScalarList_out(Tensor[] self, Scalar[] scalars, *, Tensor(a!)[] out) -> () -@Namespace("at") public static native void _foreach_div_outf(@ByVal TensorArrayRef self, @ByVal ScalarArrayRef scalars, @ByVal TensorArrayRef out); - - - - -// Parsed from ATen/ops/_foreach_erf.h - -// #pragma once - -// @generated by torchgen/gen.py from Function.h - -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include - - - -// #include - - -// aten::_foreach_erf(Tensor[] self) -> Tensor[] -@Namespace("at") public static native @Cast({"", "std::vector"}) @StdMove TensorVector _foreach_erf(@ByVal TensorArrayRef self); - -// aten::_foreach_erf_(Tensor(a!)[] self) -> () -@Namespace("at") public static native void _foreach_erf_(@ByVal TensorArrayRef self); - -// aten::_foreach_erf.out(Tensor[] self, *, Tensor(a!)[] out) -> () -@Namespace("at") public static native void _foreach_erf_out(@ByVal TensorArrayRef out, @ByVal TensorArrayRef self); -// aten::_foreach_erf.out(Tensor[] self, *, Tensor(a!)[] out) -> () -@Namespace("at") public static native void _foreach_erf_outf(@ByVal TensorArrayRef self, @ByVal TensorArrayRef out); - - - - -// Parsed from ATen/ops/_foreach_erfc.h - -// #pragma once - -// @generated by torchgen/gen.py from Function.h - -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include - - - -// #include - - -// aten::_foreach_erfc(Tensor[] self) -> Tensor[] -@Namespace("at") public static native @Cast({"", "std::vector"}) @StdMove TensorVector _foreach_erfc(@ByVal TensorArrayRef self); - -// aten::_foreach_erfc_(Tensor(a!)[] self) -> () -@Namespace("at") public static native void _foreach_erfc_(@ByVal TensorArrayRef self); - -// aten::_foreach_erfc.out(Tensor[] self, *, Tensor(a!)[] out) -> () -@Namespace("at") public static native void _foreach_erfc_out(@ByVal TensorArrayRef out, @ByVal TensorArrayRef self); -// aten::_foreach_erfc.out(Tensor[] self, *, Tensor(a!)[] out) -> () -@Namespace("at") public static native void _foreach_erfc_outf(@ByVal TensorArrayRef self, @ByVal TensorArrayRef out); - - - - -// Parsed from ATen/ops/_foreach_exp.h - -// #pragma once - -// @generated by torchgen/gen.py from Function.h - -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include - - - -// #include - - -// aten::_foreach_exp(Tensor[] self) -> Tensor[] -@Namespace("at") public static native @Cast({"", "std::vector"}) @StdMove TensorVector _foreach_exp(@ByVal TensorArrayRef self); - -// aten::_foreach_exp_(Tensor(a!)[] self) -> () -@Namespace("at") public static native void _foreach_exp_(@ByVal TensorArrayRef self); - -// aten::_foreach_exp.out(Tensor[] self, *, Tensor(a!)[] out) -> () -@Namespace("at") public static native void _foreach_exp_out(@ByVal TensorArrayRef out, @ByVal TensorArrayRef self); -// aten::_foreach_exp.out(Tensor[] self, *, Tensor(a!)[] out) -> () -@Namespace("at") public static native void _foreach_exp_outf(@ByVal TensorArrayRef self, @ByVal TensorArrayRef out); - - - - -// Parsed from ATen/ops/_foreach_expm1.h - -// #pragma once - -// @generated by torchgen/gen.py from Function.h - -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include - - - -// #include - - -// aten::_foreach_expm1(Tensor[] self) -> Tensor[] -@Namespace("at") public static native @Cast({"", "std::vector"}) @StdMove TensorVector _foreach_expm1(@ByVal TensorArrayRef self); - -// aten::_foreach_expm1_(Tensor(a!)[] self) -> () -@Namespace("at") public static native void _foreach_expm1_(@ByVal TensorArrayRef self); - -// aten::_foreach_expm1.out(Tensor[] self, *, Tensor(a!)[] out) -> () -@Namespace("at") public static native void _foreach_expm1_out(@ByVal TensorArrayRef out, @ByVal TensorArrayRef self); -// aten::_foreach_expm1.out(Tensor[] self, *, Tensor(a!)[] out) -> () -@Namespace("at") public static native void _foreach_expm1_outf(@ByVal TensorArrayRef self, @ByVal TensorArrayRef out); - - - - -// Parsed from ATen/ops/_foreach_floor.h - -// #pragma once - -// @generated by torchgen/gen.py from Function.h - -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include - - - -// #include - - -// aten::_foreach_floor(Tensor[] self) -> Tensor[] -@Namespace("at") public static native @Cast({"", "std::vector"}) @StdMove TensorVector _foreach_floor(@ByVal TensorArrayRef self); - -// aten::_foreach_floor_(Tensor(a!)[] self) -> () -@Namespace("at") public static native void _foreach_floor_(@ByVal TensorArrayRef self); - -// aten::_foreach_floor.out(Tensor[] self, *, Tensor(a!)[] out) -> () -@Namespace("at") public static native void _foreach_floor_out(@ByVal TensorArrayRef out, @ByVal TensorArrayRef self); -// aten::_foreach_floor.out(Tensor[] self, *, Tensor(a!)[] out) -> () -@Namespace("at") public static native void _foreach_floor_outf(@ByVal TensorArrayRef self, @ByVal TensorArrayRef out); - - - - -// Parsed from ATen/ops/_foreach_frac.h - -// #pragma once - -// @generated by torchgen/gen.py from Function.h - -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include - - - -// #include - - -// aten::_foreach_frac(Tensor[] self) -> Tensor[] -@Namespace("at") public static native @Cast({"", "std::vector"}) @StdMove TensorVector _foreach_frac(@ByVal TensorArrayRef self); - -// aten::_foreach_frac_(Tensor(a!)[] self) -> () -@Namespace("at") public static native void _foreach_frac_(@ByVal TensorArrayRef self); - -// aten::_foreach_frac.out(Tensor[] self, *, Tensor(a!)[] out) -> () -@Namespace("at") public static native void _foreach_frac_out(@ByVal TensorArrayRef out, @ByVal TensorArrayRef self); -// aten::_foreach_frac.out(Tensor[] self, *, Tensor(a!)[] out) -> () -@Namespace("at") public static native void _foreach_frac_outf(@ByVal TensorArrayRef self, @ByVal TensorArrayRef out); - - - - -// Parsed from ATen/ops/_foreach_lerp.h - -// #pragma once - -// @generated by torchgen/gen.py from Function.h - -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include - - - -// #include - - -// aten::_foreach_lerp.List(Tensor[] self, Tensor[] tensors1, Tensor[] weights) -> Tensor[] -@Namespace("at") public static native @Cast({"", "std::vector"}) @StdMove TensorVector _foreach_lerp(@ByVal TensorArrayRef self, @ByVal TensorArrayRef tensors1, @ByVal TensorArrayRef weights); - -// aten::_foreach_lerp_.List(Tensor(a!)[] self, Tensor[] tensors1, Tensor[] weights) -> () -@Namespace("at") public static native void _foreach_lerp_(@ByVal TensorArrayRef self, @ByVal TensorArrayRef tensors1, @ByVal TensorArrayRef weights); - -// aten::_foreach_lerp.Scalar(Tensor[] self, Tensor[] tensors1, Scalar weight) -> Tensor[] -@Namespace("at") public static native @Cast({"", "std::vector"}) @StdMove TensorVector _foreach_lerp(@ByVal TensorArrayRef self, @ByVal TensorArrayRef tensors1, @Const @ByRef Scalar weight); - -// aten::_foreach_lerp_.Scalar(Tensor(a!)[] self, Tensor[] tensors1, Scalar weight) -> () -@Namespace("at") public static native void _foreach_lerp_(@ByVal TensorArrayRef self, @ByVal TensorArrayRef tensors1, @Const @ByRef Scalar weight); - -// aten::_foreach_lerp.List_out(Tensor[] self, Tensor[] tensors1, Tensor[] weights, *, Tensor(a!)[] out) -> () -@Namespace("at") public static native void _foreach_lerp_out(@ByVal TensorArrayRef out, @ByVal TensorArrayRef self, @ByVal TensorArrayRef tensors1, @ByVal TensorArrayRef weights); -// aten::_foreach_lerp.List_out(Tensor[] self, Tensor[] tensors1, Tensor[] weights, *, Tensor(a!)[] out) -> () -@Namespace("at") public static native void _foreach_lerp_outf(@ByVal TensorArrayRef self, @ByVal TensorArrayRef tensors1, @ByVal TensorArrayRef weights, @ByVal TensorArrayRef out); - -// aten::_foreach_lerp.Scalar_out(Tensor[] self, Tensor[] tensors1, Scalar weight, *, Tensor(a!)[] out) -> () -@Namespace("at") public static native void _foreach_lerp_out(@ByVal TensorArrayRef out, @ByVal TensorArrayRef self, @ByVal TensorArrayRef tensors1, @Const @ByRef Scalar weight); -// aten::_foreach_lerp.Scalar_out(Tensor[] self, Tensor[] tensors1, Scalar weight, *, Tensor(a!)[] out) -> () -@Namespace("at") public static native void _foreach_lerp_outf(@ByVal TensorArrayRef self, @ByVal TensorArrayRef tensors1, @Const @ByRef Scalar weight, @ByVal TensorArrayRef out); - - - - -// Parsed from ATen/ops/_foreach_lgamma.h - -// #pragma once - -// @generated by torchgen/gen.py from Function.h - -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include - - - -// #include - - -// aten::_foreach_lgamma(Tensor[] self) -> Tensor[] -@Namespace("at") public static native @Cast({"", "std::vector"}) @StdMove TensorVector _foreach_lgamma(@ByVal TensorArrayRef self); - -// aten::_foreach_lgamma_(Tensor(a!)[] self) -> () -@Namespace("at") public static native void _foreach_lgamma_(@ByVal TensorArrayRef self); - -// aten::_foreach_lgamma.out(Tensor[] self, *, Tensor(a!)[] out) -> () -@Namespace("at") public static native void _foreach_lgamma_out(@ByVal TensorArrayRef out, @ByVal TensorArrayRef self); -// aten::_foreach_lgamma.out(Tensor[] self, *, Tensor(a!)[] out) -> () -@Namespace("at") public static native void _foreach_lgamma_outf(@ByVal TensorArrayRef self, @ByVal TensorArrayRef out); - - - - -// Parsed from ATen/ops/_foreach_log.h - -// #pragma once - -// @generated by torchgen/gen.py from Function.h - -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include - - - -// #include - - -// aten::_foreach_log(Tensor[] self) -> Tensor[] -@Namespace("at") public static native @Cast({"", "std::vector"}) @StdMove TensorVector _foreach_log(@ByVal TensorArrayRef self); - -// aten::_foreach_log_(Tensor(a!)[] self) -> () -@Namespace("at") public static native void _foreach_log_(@ByVal TensorArrayRef self); - -// aten::_foreach_log.out(Tensor[] self, *, Tensor(a!)[] out) -> () -@Namespace("at") public static native void _foreach_log_out(@ByVal TensorArrayRef out, @ByVal TensorArrayRef self); -// aten::_foreach_log.out(Tensor[] self, *, Tensor(a!)[] out) -> () -@Namespace("at") public static native void _foreach_log_outf(@ByVal TensorArrayRef self, @ByVal TensorArrayRef out); - - - - -// Parsed from ATen/ops/_foreach_log10.h - -// #pragma once - -// @generated by torchgen/gen.py from Function.h - -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include - - - -// #include - - -// aten::_foreach_log10(Tensor[] self) -> Tensor[] -@Namespace("at") public static native @Cast({"", "std::vector"}) @StdMove TensorVector _foreach_log10(@ByVal TensorArrayRef self); - -// aten::_foreach_log10_(Tensor(a!)[] self) -> () -@Namespace("at") public static native void _foreach_log10_(@ByVal TensorArrayRef self); - -// aten::_foreach_log10.out(Tensor[] self, *, Tensor(a!)[] out) -> () -@Namespace("at") public static native void _foreach_log10_out(@ByVal TensorArrayRef out, @ByVal TensorArrayRef self); -// aten::_foreach_log10.out(Tensor[] self, *, Tensor(a!)[] out) -> () -@Namespace("at") public static native void _foreach_log10_outf(@ByVal TensorArrayRef self, @ByVal TensorArrayRef out); - - - - -// Parsed from ATen/ops/_foreach_log1p.h - -// #pragma once - -// @generated by torchgen/gen.py from Function.h - -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include - - - -// #include - - -// aten::_foreach_log1p(Tensor[] self) -> Tensor[] -@Namespace("at") public static native @Cast({"", "std::vector"}) @StdMove TensorVector _foreach_log1p(@ByVal TensorArrayRef self); - -// aten::_foreach_log1p_(Tensor(a!)[] self) -> () -@Namespace("at") public static native void _foreach_log1p_(@ByVal TensorArrayRef self); - -// aten::_foreach_log1p.out(Tensor[] self, *, Tensor(a!)[] out) -> () -@Namespace("at") public static native void _foreach_log1p_out(@ByVal TensorArrayRef out, @ByVal TensorArrayRef self); -// aten::_foreach_log1p.out(Tensor[] self, *, Tensor(a!)[] out) -> () -@Namespace("at") public static native void _foreach_log1p_outf(@ByVal TensorArrayRef self, @ByVal TensorArrayRef out); - - - - -// Parsed from ATen/ops/_foreach_log2.h - -// #pragma once - -// @generated by torchgen/gen.py from Function.h - -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include - - - -// #include - - -// aten::_foreach_log2(Tensor[] self) -> Tensor[] -@Namespace("at") public static native @Cast({"", "std::vector"}) @StdMove TensorVector _foreach_log2(@ByVal TensorArrayRef self); - -// aten::_foreach_log2_(Tensor(a!)[] self) -> () -@Namespace("at") public static native void _foreach_log2_(@ByVal TensorArrayRef self); - -// aten::_foreach_log2.out(Tensor[] self, *, Tensor(a!)[] out) -> () -@Namespace("at") public static native void _foreach_log2_out(@ByVal TensorArrayRef out, @ByVal TensorArrayRef self); -// aten::_foreach_log2.out(Tensor[] self, *, Tensor(a!)[] out) -> () -@Namespace("at") public static native void _foreach_log2_outf(@ByVal TensorArrayRef self, @ByVal TensorArrayRef out); - - - - -// Parsed from ATen/ops/_foreach_maximum.h - -// #pragma once - -// @generated by torchgen/gen.py from Function.h - -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include - - - -// #include - - -// aten::_foreach_maximum.Scalar(Tensor[] self, Scalar scalar) -> Tensor[] -@Namespace("at") public static native @Cast({"", "std::vector"}) @StdMove TensorVector _foreach_maximum(@ByVal TensorArrayRef self, @Const @ByRef Scalar scalar); - -// aten::_foreach_maximum_.Scalar(Tensor(a!)[] self, Scalar scalar) -> () -@Namespace("at") public static native void _foreach_maximum_(@ByVal TensorArrayRef self, @Const @ByRef Scalar scalar); - -// aten::_foreach_maximum.List(Tensor[] self, Tensor[] other) -> Tensor[] -@Namespace("at") public static native @Cast({"", "std::vector"}) @StdMove TensorVector _foreach_maximum(@ByVal TensorArrayRef self, @ByVal TensorArrayRef other); - -// aten::_foreach_maximum_.List(Tensor(a!)[] self, Tensor[] other) -> () -@Namespace("at") public static native void _foreach_maximum_(@ByVal TensorArrayRef self, @ByVal TensorArrayRef other); - -// aten::_foreach_maximum.ScalarList(Tensor[] self, Scalar[] scalars) -> Tensor[] -@Namespace("at") public static native @Cast({"", "std::vector"}) @StdMove TensorVector _foreach_maximum(@ByVal TensorArrayRef self, @ByVal ScalarArrayRef scalars); - -// aten::_foreach_maximum_.ScalarList(Tensor(a!)[] self, Scalar[] scalars) -> () -@Namespace("at") public static native void _foreach_maximum_(@ByVal TensorArrayRef self, @ByVal ScalarArrayRef scalars); - -// aten::_foreach_maximum.Scalar_out(Tensor[] self, Scalar scalar, *, Tensor(a!)[] out) -> () -@Namespace("at") public static native void _foreach_maximum_out(@ByVal TensorArrayRef out, @ByVal TensorArrayRef self, @Const @ByRef Scalar scalar); -// aten::_foreach_maximum.Scalar_out(Tensor[] self, Scalar scalar, *, Tensor(a!)[] out) -> () -@Namespace("at") public static native void _foreach_maximum_outf(@ByVal TensorArrayRef self, @Const @ByRef Scalar scalar, @ByVal TensorArrayRef out); - -// aten::_foreach_maximum.List_out(Tensor[] self, Tensor[] other, *, Tensor(a!)[] out) -> () -@Namespace("at") public static native void _foreach_maximum_out(@ByVal TensorArrayRef out, @ByVal TensorArrayRef self, @ByVal TensorArrayRef other); -// aten::_foreach_maximum.List_out(Tensor[] self, Tensor[] other, *, Tensor(a!)[] out) -> () -@Namespace("at") public static native void _foreach_maximum_outf(@ByVal TensorArrayRef self, @ByVal TensorArrayRef other, @ByVal TensorArrayRef out); - -// aten::_foreach_maximum.ScalarList_out(Tensor[] self, Scalar[] scalars, *, Tensor(a!)[] out) -> () -@Namespace("at") public static native void _foreach_maximum_out(@ByVal TensorArrayRef out, @ByVal TensorArrayRef self, @ByVal ScalarArrayRef scalars); -// aten::_foreach_maximum.ScalarList_out(Tensor[] self, Scalar[] scalars, *, Tensor(a!)[] out) -> () -@Namespace("at") public static native void _foreach_maximum_outf(@ByVal TensorArrayRef self, @ByVal ScalarArrayRef scalars, @ByVal TensorArrayRef out); - - - - -// Parsed from ATen/ops/_foreach_minimum.h - -// #pragma once - -// @generated by torchgen/gen.py from Function.h - -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include - - - -// #include - - -// aten::_foreach_minimum.Scalar(Tensor[] self, Scalar scalar) -> Tensor[] -@Namespace("at") public static native @Cast({"", "std::vector"}) @StdMove TensorVector _foreach_minimum(@ByVal TensorArrayRef self, @Const @ByRef Scalar scalar); - -// aten::_foreach_minimum_.Scalar(Tensor(a!)[] self, Scalar scalar) -> () -@Namespace("at") public static native void _foreach_minimum_(@ByVal TensorArrayRef self, @Const @ByRef Scalar scalar); - -// aten::_foreach_minimum.List(Tensor[] self, Tensor[] other) -> Tensor[] -@Namespace("at") public static native @Cast({"", "std::vector"}) @StdMove TensorVector _foreach_minimum(@ByVal TensorArrayRef self, @ByVal TensorArrayRef other); - -// aten::_foreach_minimum_.List(Tensor(a!)[] self, Tensor[] other) -> () -@Namespace("at") public static native void _foreach_minimum_(@ByVal TensorArrayRef self, @ByVal TensorArrayRef other); - -// aten::_foreach_minimum.ScalarList(Tensor[] self, Scalar[] scalars) -> Tensor[] -@Namespace("at") public static native @Cast({"", "std::vector"}) @StdMove TensorVector _foreach_minimum(@ByVal TensorArrayRef self, @ByVal ScalarArrayRef scalars); - -// aten::_foreach_minimum_.ScalarList(Tensor(a!)[] self, Scalar[] scalars) -> () -@Namespace("at") public static native void _foreach_minimum_(@ByVal TensorArrayRef self, @ByVal ScalarArrayRef scalars); - -// aten::_foreach_minimum.Scalar_out(Tensor[] self, Scalar scalar, *, Tensor(a!)[] out) -> () -@Namespace("at") public static native void _foreach_minimum_out(@ByVal TensorArrayRef out, @ByVal TensorArrayRef self, @Const @ByRef Scalar scalar); -// aten::_foreach_minimum.Scalar_out(Tensor[] self, Scalar scalar, *, Tensor(a!)[] out) -> () -@Namespace("at") public static native void _foreach_minimum_outf(@ByVal TensorArrayRef self, @Const @ByRef Scalar scalar, @ByVal TensorArrayRef out); - -// aten::_foreach_minimum.List_out(Tensor[] self, Tensor[] other, *, Tensor(a!)[] out) -> () -@Namespace("at") public static native void _foreach_minimum_out(@ByVal TensorArrayRef out, @ByVal TensorArrayRef self, @ByVal TensorArrayRef other); -// aten::_foreach_minimum.List_out(Tensor[] self, Tensor[] other, *, Tensor(a!)[] out) -> () -@Namespace("at") public static native void _foreach_minimum_outf(@ByVal TensorArrayRef self, @ByVal TensorArrayRef other, @ByVal TensorArrayRef out); - -// aten::_foreach_minimum.ScalarList_out(Tensor[] self, Scalar[] scalars, *, Tensor(a!)[] out) -> () -@Namespace("at") public static native void _foreach_minimum_out(@ByVal TensorArrayRef out, @ByVal TensorArrayRef self, @ByVal ScalarArrayRef scalars); -// aten::_foreach_minimum.ScalarList_out(Tensor[] self, Scalar[] scalars, *, Tensor(a!)[] out) -> () -@Namespace("at") public static native void _foreach_minimum_outf(@ByVal TensorArrayRef self, @ByVal ScalarArrayRef scalars, @ByVal TensorArrayRef out); - - - - -// Parsed from ATen/ops/_foreach_mul.h - -// #pragma once - -// @generated by torchgen/gen.py from Function.h - -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include - - - -// #include - - -// aten::_foreach_mul.Scalar(Tensor[] self, Scalar scalar) -> Tensor[] -@Namespace("at") public static native @Cast({"", "std::vector"}) @StdMove TensorVector _foreach_mul(@ByVal TensorArrayRef self, @Const @ByRef Scalar scalar); - -// aten::_foreach_mul_.Scalar(Tensor(a!)[] self, Scalar scalar) -> () -@Namespace("at") public static native void _foreach_mul_(@ByVal TensorArrayRef self, @Const @ByRef Scalar scalar); - -// aten::_foreach_mul.List(Tensor[] self, Tensor[] other) -> Tensor[] -@Namespace("at") public static native @Cast({"", "std::vector"}) @StdMove TensorVector _foreach_mul(@ByVal TensorArrayRef self, @ByVal TensorArrayRef other); - -// aten::_foreach_mul_.List(Tensor(a!)[] self, Tensor[] other) -> () -@Namespace("at") public static native void _foreach_mul_(@ByVal TensorArrayRef self, @ByVal TensorArrayRef other); - -// aten::_foreach_mul.ScalarList(Tensor[] self, Scalar[] scalars) -> Tensor[] -@Namespace("at") public static native @Cast({"", "std::vector"}) @StdMove TensorVector _foreach_mul(@ByVal TensorArrayRef self, @ByVal ScalarArrayRef scalars); - -// aten::_foreach_mul_.ScalarList(Tensor(a!)[] self, Scalar[] scalars) -> () -@Namespace("at") public static native void _foreach_mul_(@ByVal TensorArrayRef self, @ByVal ScalarArrayRef scalars); - -// aten::_foreach_mul.Scalar_out(Tensor[] self, Scalar scalar, *, Tensor(a!)[] out) -> () -@Namespace("at") public static native void _foreach_mul_out(@ByVal TensorArrayRef out, @ByVal TensorArrayRef self, @Const @ByRef Scalar scalar); -// aten::_foreach_mul.Scalar_out(Tensor[] self, Scalar scalar, *, Tensor(a!)[] out) -> () -@Namespace("at") public static native void _foreach_mul_outf(@ByVal TensorArrayRef self, @Const @ByRef Scalar scalar, @ByVal TensorArrayRef out); - -// aten::_foreach_mul.List_out(Tensor[] self, Tensor[] other, *, Tensor(a!)[] out) -> () -@Namespace("at") public static native void _foreach_mul_out(@ByVal TensorArrayRef out, @ByVal TensorArrayRef self, @ByVal TensorArrayRef other); -// aten::_foreach_mul.List_out(Tensor[] self, Tensor[] other, *, Tensor(a!)[] out) -> () -@Namespace("at") public static native void _foreach_mul_outf(@ByVal TensorArrayRef self, @ByVal TensorArrayRef other, @ByVal TensorArrayRef out); - -// aten::_foreach_mul.ScalarList_out(Tensor[] self, Scalar[] scalars, *, Tensor(a!)[] out) -> () -@Namespace("at") public static native void _foreach_mul_out(@ByVal TensorArrayRef out, @ByVal TensorArrayRef self, @ByVal ScalarArrayRef scalars); -// aten::_foreach_mul.ScalarList_out(Tensor[] self, Scalar[] scalars, *, Tensor(a!)[] out) -> () -@Namespace("at") public static native void _foreach_mul_outf(@ByVal TensorArrayRef self, @ByVal ScalarArrayRef scalars, @ByVal TensorArrayRef out); - - - - -// Parsed from ATen/ops/_foreach_neg.h - -// #pragma once - -// @generated by torchgen/gen.py from Function.h - -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include - - - -// #include - - -// aten::_foreach_neg(Tensor[] self) -> Tensor[] -@Namespace("at") public static native @Cast({"", "std::vector"}) @StdMove TensorVector _foreach_neg(@ByVal TensorArrayRef self); - -// aten::_foreach_neg_(Tensor(a!)[] self) -> () -@Namespace("at") public static native void _foreach_neg_(@ByVal TensorArrayRef self); - -// aten::_foreach_neg.out(Tensor[] self, *, Tensor(a!)[] out) -> () -@Namespace("at") public static native void _foreach_neg_out(@ByVal TensorArrayRef out, @ByVal TensorArrayRef self); -// aten::_foreach_neg.out(Tensor[] self, *, Tensor(a!)[] out) -> () -@Namespace("at") public static native void _foreach_neg_outf(@ByVal TensorArrayRef self, @ByVal TensorArrayRef out); - - - - -// Parsed from ATen/ops/_foreach_norm.h - -// #pragma once - -// @generated by torchgen/gen.py from Function.h - -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include - - - -// #include - - -// aten::_foreach_norm.Scalar(Tensor[] self, Scalar ord=2) -> Tensor[] -@Namespace("at") public static native @Cast({"", "std::vector"}) @StdMove TensorVector _foreach_norm(@ByVal TensorArrayRef self, @Const @ByRef(nullValue = "at::Scalar(2)") Scalar ord); -@Namespace("at") public static native @Cast({"", "std::vector"}) @StdMove TensorVector _foreach_norm(@ByVal TensorArrayRef self); - -// aten::_foreach_norm.Scalar_out(Tensor[] self, Scalar ord=2, *, Tensor(a!)[] out) -> () -@Namespace("at") public static native void _foreach_norm_out(@ByVal TensorArrayRef out, @ByVal TensorArrayRef self, @Const @ByRef(nullValue = "at::Scalar(2)") Scalar ord); -@Namespace("at") public static native void _foreach_norm_out(@ByVal TensorArrayRef out, @ByVal TensorArrayRef self); -// aten::_foreach_norm.Scalar_out(Tensor[] self, Scalar ord=2, *, Tensor(a!)[] out) -> () -@Namespace("at") public static native void _foreach_norm_outf(@ByVal TensorArrayRef self, @Const @ByRef Scalar ord, @ByVal TensorArrayRef out); - - - - -// Parsed from ATen/ops/_foreach_reciprocal.h - -// #pragma once - -// @generated by torchgen/gen.py from Function.h - -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include - - - -// #include - - -// aten::_foreach_reciprocal(Tensor[] self) -> Tensor[] -@Namespace("at") public static native @Cast({"", "std::vector"}) @StdMove TensorVector _foreach_reciprocal(@ByVal TensorArrayRef self); - -// aten::_foreach_reciprocal_(Tensor(a!)[] self) -> () -@Namespace("at") public static native void _foreach_reciprocal_(@ByVal TensorArrayRef self); - -// aten::_foreach_reciprocal.out(Tensor[] self, *, Tensor(a!)[] out) -> () -@Namespace("at") public static native void _foreach_reciprocal_out(@ByVal TensorArrayRef out, @ByVal TensorArrayRef self); -// aten::_foreach_reciprocal.out(Tensor[] self, *, Tensor(a!)[] out) -> () -@Namespace("at") public static native void _foreach_reciprocal_outf(@ByVal TensorArrayRef self, @ByVal TensorArrayRef out); - - - - -// Parsed from ATen/ops/_foreach_round.h - -// #pragma once - -// @generated by torchgen/gen.py from Function.h - -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include - - - -// #include - - -// aten::_foreach_round(Tensor[] self) -> Tensor[] -@Namespace("at") public static native @Cast({"", "std::vector"}) @StdMove TensorVector _foreach_round(@ByVal TensorArrayRef self); - -// aten::_foreach_round_(Tensor(a!)[] self) -> () -@Namespace("at") public static native void _foreach_round_(@ByVal TensorArrayRef self); - -// aten::_foreach_round.out(Tensor[] self, *, Tensor(a!)[] out) -> () -@Namespace("at") public static native void _foreach_round_out(@ByVal TensorArrayRef out, @ByVal TensorArrayRef self); -// aten::_foreach_round.out(Tensor[] self, *, Tensor(a!)[] out) -> () -@Namespace("at") public static native void _foreach_round_outf(@ByVal TensorArrayRef self, @ByVal TensorArrayRef out); - - - - -// Parsed from ATen/ops/_foreach_sigmoid.h - -// #pragma once - -// @generated by torchgen/gen.py from Function.h - -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include - - - -// #include - - -// aten::_foreach_sigmoid(Tensor[] self) -> Tensor[] -@Namespace("at") public static native @Cast({"", "std::vector"}) @StdMove TensorVector _foreach_sigmoid(@ByVal TensorArrayRef self); - -// aten::_foreach_sigmoid_(Tensor(a!)[] self) -> () -@Namespace("at") public static native void _foreach_sigmoid_(@ByVal TensorArrayRef self); - -// aten::_foreach_sigmoid.out(Tensor[] self, *, Tensor(a!)[] out) -> () -@Namespace("at") public static native void _foreach_sigmoid_out(@ByVal TensorArrayRef out, @ByVal TensorArrayRef self); -// aten::_foreach_sigmoid.out(Tensor[] self, *, Tensor(a!)[] out) -> () -@Namespace("at") public static native void _foreach_sigmoid_outf(@ByVal TensorArrayRef self, @ByVal TensorArrayRef out); - - - - -// Parsed from ATen/ops/_foreach_sin.h - -// #pragma once - -// @generated by torchgen/gen.py from Function.h - -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include - - - -// #include - - -// aten::_foreach_sin(Tensor[] self) -> Tensor[] -@Namespace("at") public static native @Cast({"", "std::vector"}) @StdMove TensorVector _foreach_sin(@ByVal TensorArrayRef self); - -// aten::_foreach_sin_(Tensor(a!)[] self) -> () -@Namespace("at") public static native void _foreach_sin_(@ByVal TensorArrayRef self); - -// aten::_foreach_sin.out(Tensor[] self, *, Tensor(a!)[] out) -> () -@Namespace("at") public static native void _foreach_sin_out(@ByVal TensorArrayRef out, @ByVal TensorArrayRef self); -// aten::_foreach_sin.out(Tensor[] self, *, Tensor(a!)[] out) -> () -@Namespace("at") public static native void _foreach_sin_outf(@ByVal TensorArrayRef self, @ByVal TensorArrayRef out); - - - - -// Parsed from ATen/ops/_foreach_sinh.h - -// #pragma once - -// @generated by torchgen/gen.py from Function.h - -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include - - - -// #include - - -// aten::_foreach_sinh(Tensor[] self) -> Tensor[] -@Namespace("at") public static native @Cast({"", "std::vector"}) @StdMove TensorVector _foreach_sinh(@ByVal TensorArrayRef self); - -// aten::_foreach_sinh_(Tensor(a!)[] self) -> () -@Namespace("at") public static native void _foreach_sinh_(@ByVal TensorArrayRef self); - -// aten::_foreach_sinh.out(Tensor[] self, *, Tensor(a!)[] out) -> () -@Namespace("at") public static native void _foreach_sinh_out(@ByVal TensorArrayRef out, @ByVal TensorArrayRef self); -// aten::_foreach_sinh.out(Tensor[] self, *, Tensor(a!)[] out) -> () -@Namespace("at") public static native void _foreach_sinh_outf(@ByVal TensorArrayRef self, @ByVal TensorArrayRef out); - - - - -// Parsed from ATen/ops/_foreach_sqrt.h - -// #pragma once - -// @generated by torchgen/gen.py from Function.h - -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include - - - -// #include - - -// aten::_foreach_sqrt(Tensor[] self) -> Tensor[] -@Namespace("at") public static native @Cast({"", "std::vector"}) @StdMove TensorVector _foreach_sqrt(@ByVal TensorArrayRef self); - -// aten::_foreach_sqrt_(Tensor(a!)[] self) -> () -@Namespace("at") public static native void _foreach_sqrt_(@ByVal TensorArrayRef self); - -// aten::_foreach_sqrt.out(Tensor[] self, *, Tensor(a!)[] out) -> () -@Namespace("at") public static native void _foreach_sqrt_out(@ByVal TensorArrayRef out, @ByVal TensorArrayRef self); -// aten::_foreach_sqrt.out(Tensor[] self, *, Tensor(a!)[] out) -> () -@Namespace("at") public static native void _foreach_sqrt_outf(@ByVal TensorArrayRef self, @ByVal TensorArrayRef out); - - - - -// Parsed from ATen/ops/_foreach_sub.h - -// #pragma once - -// @generated by torchgen/gen.py from Function.h - -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include - - - -// #include - - -// aten::_foreach_sub.Scalar(Tensor[] self, Scalar scalar) -> Tensor[] -@Namespace("at") public static native @Cast({"", "std::vector"}) @StdMove TensorVector _foreach_sub(@ByVal TensorArrayRef self, @Const @ByRef Scalar scalar); - -// aten::_foreach_sub_.Scalar(Tensor(a!)[] self, Scalar scalar) -> () -@Namespace("at") public static native void _foreach_sub_(@ByVal TensorArrayRef self, @Const @ByRef Scalar scalar); - -// aten::_foreach_sub.List(Tensor[] self, Tensor[] other, *, Scalar alpha=1) -> Tensor[] -@Namespace("at") public static native @Cast({"", "std::vector"}) @StdMove TensorVector _foreach_sub(@ByVal TensorArrayRef self, @ByVal TensorArrayRef other, @Const @ByRef(nullValue = "at::Scalar(1)") Scalar alpha); -@Namespace("at") public static native @Cast({"", "std::vector"}) @StdMove TensorVector _foreach_sub(@ByVal TensorArrayRef self, @ByVal TensorArrayRef other); - -// aten::_foreach_sub_.List(Tensor(a!)[] self, Tensor[] other, *, Scalar alpha=1) -> () -@Namespace("at") public static native void _foreach_sub_(@ByVal TensorArrayRef self, @ByVal TensorArrayRef other, @Const @ByRef(nullValue = "at::Scalar(1)") Scalar alpha); -@Namespace("at") public static native void _foreach_sub_(@ByVal TensorArrayRef self, @ByVal TensorArrayRef other); - -// aten::_foreach_sub.ScalarList(Tensor[] self, Scalar[] scalars) -> Tensor[] -@Namespace("at") public static native @Cast({"", "std::vector"}) @StdMove TensorVector _foreach_sub(@ByVal TensorArrayRef self, @ByVal ScalarArrayRef scalars); - -// aten::_foreach_sub_.ScalarList(Tensor(a!)[] self, Scalar[] scalars) -> () -@Namespace("at") public static native void _foreach_sub_(@ByVal TensorArrayRef self, @ByVal ScalarArrayRef scalars); - -// aten::_foreach_sub.Scalar_out(Tensor[] self, Scalar scalar, *, Tensor(a!)[] out) -> () -@Namespace("at") public static native void _foreach_sub_out(@ByVal TensorArrayRef out, @ByVal TensorArrayRef self, @Const @ByRef Scalar scalar); -// aten::_foreach_sub.Scalar_out(Tensor[] self, Scalar scalar, *, Tensor(a!)[] out) -> () -@Namespace("at") public static native void _foreach_sub_outf(@ByVal TensorArrayRef self, @Const @ByRef Scalar scalar, @ByVal TensorArrayRef out); - -// aten::_foreach_sub.List_out(Tensor[] self, Tensor[] other, *, Scalar alpha=1, Tensor(a!)[] out) -> () -@Namespace("at") public static native void _foreach_sub_out(@ByVal TensorArrayRef out, @ByVal TensorArrayRef self, @ByVal TensorArrayRef other, @Const @ByRef(nullValue = "at::Scalar(1)") Scalar alpha); -@Namespace("at") public static native void _foreach_sub_out(@ByVal TensorArrayRef out, @ByVal TensorArrayRef self, @ByVal TensorArrayRef other); -// aten::_foreach_sub.List_out(Tensor[] self, Tensor[] other, *, Scalar alpha=1, Tensor(a!)[] out) -> () -@Namespace("at") public static native void _foreach_sub_outf(@ByVal TensorArrayRef self, @ByVal TensorArrayRef other, @Const @ByRef Scalar alpha, @ByVal TensorArrayRef out); - -// aten::_foreach_sub.ScalarList_out(Tensor[] self, Scalar[] scalars, *, Tensor(a!)[] out) -> () -@Namespace("at") public static native void _foreach_sub_out(@ByVal TensorArrayRef out, @ByVal TensorArrayRef self, @ByVal ScalarArrayRef scalars); -// aten::_foreach_sub.ScalarList_out(Tensor[] self, Scalar[] scalars, *, Tensor(a!)[] out) -> () -@Namespace("at") public static native void _foreach_sub_outf(@ByVal TensorArrayRef self, @ByVal ScalarArrayRef scalars, @ByVal TensorArrayRef out); - - - - -// Parsed from ATen/ops/_foreach_tan.h - -// #pragma once - -// @generated by torchgen/gen.py from Function.h - -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include - - - -// #include - - -// aten::_foreach_tan(Tensor[] self) -> Tensor[] -@Namespace("at") public static native @Cast({"", "std::vector"}) @StdMove TensorVector _foreach_tan(@ByVal TensorArrayRef self); - -// aten::_foreach_tan_(Tensor(a!)[] self) -> () -@Namespace("at") public static native void _foreach_tan_(@ByVal TensorArrayRef self); - -// aten::_foreach_tan.out(Tensor[] self, *, Tensor(a!)[] out) -> () -@Namespace("at") public static native void _foreach_tan_out(@ByVal TensorArrayRef out, @ByVal TensorArrayRef self); -// aten::_foreach_tan.out(Tensor[] self, *, Tensor(a!)[] out) -> () -@Namespace("at") public static native void _foreach_tan_outf(@ByVal TensorArrayRef self, @ByVal TensorArrayRef out); - - - - -// Parsed from ATen/ops/_foreach_tanh.h - -// #pragma once - -// @generated by torchgen/gen.py from Function.h - -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include - - - -// #include - - -// aten::_foreach_tanh(Tensor[] self) -> Tensor[] -@Namespace("at") public static native @Cast({"", "std::vector"}) @StdMove TensorVector _foreach_tanh(@ByVal TensorArrayRef self); - -// aten::_foreach_tanh_(Tensor(a!)[] self) -> () -@Namespace("at") public static native void _foreach_tanh_(@ByVal TensorArrayRef self); - -// aten::_foreach_tanh.out(Tensor[] self, *, Tensor(a!)[] out) -> () -@Namespace("at") public static native void _foreach_tanh_out(@ByVal TensorArrayRef out, @ByVal TensorArrayRef self); -// aten::_foreach_tanh.out(Tensor[] self, *, Tensor(a!)[] out) -> () -@Namespace("at") public static native void _foreach_tanh_outf(@ByVal TensorArrayRef self, @ByVal TensorArrayRef out); - - - - -// Parsed from ATen/ops/_foreach_trunc.h - -// #pragma once - -// @generated by torchgen/gen.py from Function.h - -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include - - - -// #include - - -// aten::_foreach_trunc(Tensor[] self) -> Tensor[] -@Namespace("at") public static native @Cast({"", "std::vector"}) @StdMove TensorVector _foreach_trunc(@ByVal TensorArrayRef self); - -// aten::_foreach_trunc_(Tensor(a!)[] self) -> () -@Namespace("at") public static native void _foreach_trunc_(@ByVal TensorArrayRef self); - -// aten::_foreach_trunc.out(Tensor[] self, *, Tensor(a!)[] out) -> () -@Namespace("at") public static native void _foreach_trunc_out(@ByVal TensorArrayRef out, @ByVal TensorArrayRef self); -// aten::_foreach_trunc.out(Tensor[] self, *, Tensor(a!)[] out) -> () -@Namespace("at") public static native void _foreach_trunc_outf(@ByVal TensorArrayRef self, @ByVal TensorArrayRef out); - - - - -// Parsed from ATen/ops/_foreach_zero.h - -// #pragma once - -// @generated by torchgen/gen.py from Function.h - -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include - - - -// #include - - -// aten::_foreach_zero_(Tensor(a!)[] self) -> () -@Namespace("at") public static native void _foreach_zero_(@ByVal TensorArrayRef self); - -// aten::_foreach_zero.out(Tensor[] self, *, Tensor(a!)[] out) -> () -@Namespace("at") public static native void _foreach_zero_out(@ByVal TensorArrayRef out, @ByVal TensorArrayRef self); -// aten::_foreach_zero.out(Tensor[] self, *, Tensor(a!)[] out) -> () -@Namespace("at") public static native void _foreach_zero_outf(@ByVal TensorArrayRef self, @ByVal TensorArrayRef out); - -// aten::_foreach_zero(Tensor[] self) -> Tensor[] self_out -@Namespace("at") public static native @Cast({"", "std::vector"}) @StdMove TensorVector _foreach_zero(@ByVal TensorArrayRef self); - - - - -// Parsed from ATen/ops/_fused_adam.h - -// #pragma once - -// @generated by torchgen/gen.py from Function.h - -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include - - - -// #include - - -// aten::_fused_adam_(Tensor(a!)[] self, Tensor(b!)[] grads, Tensor(c!)[] exp_avgs, Tensor(d!)[] exp_avg_sqs, Tensor(e!)[] max_exp_avg_sqs, Tensor[] state_steps, *, float lr, float beta1, float beta2, float weight_decay, float eps, bool amsgrad, bool maximize, Tensor? grad_scale=None, Tensor? found_inf=None) -> () -@Namespace("at") public static native void _fused_adam_(@ByVal TensorArrayRef self, @ByVal TensorArrayRef grads, @ByVal TensorArrayRef exp_avgs, @ByVal TensorArrayRef exp_avg_sqs, @ByVal TensorArrayRef max_exp_avg_sqs, @ByVal TensorArrayRef state_steps, double lr, double beta1, double beta2, double weight_decay, double eps, @Cast("bool") boolean amsgrad, @Cast("bool") boolean maximize, @Const @ByRef(nullValue = "c10::optional{}") TensorOptional grad_scale, @Const @ByRef(nullValue = "c10::optional{}") TensorOptional found_inf); -@Namespace("at") public static native void _fused_adam_(@ByVal TensorArrayRef self, @ByVal TensorArrayRef grads, @ByVal TensorArrayRef exp_avgs, @ByVal TensorArrayRef exp_avg_sqs, @ByVal TensorArrayRef max_exp_avg_sqs, @ByVal TensorArrayRef state_steps, double lr, double beta1, double beta2, double weight_decay, double eps, @Cast("bool") boolean amsgrad, @Cast("bool") boolean maximize); - -// aten::_fused_adam.out(Tensor[] self, Tensor(b!)[] grads, Tensor(c!)[] exp_avgs, Tensor(d!)[] exp_avg_sqs, Tensor(e!)[] max_exp_avg_sqs, Tensor[] state_steps, *, float lr, float beta1, float beta2, float weight_decay, float eps, bool amsgrad, bool maximize, Tensor? grad_scale=None, Tensor? found_inf=None, Tensor(a!)[] out) -> () -@Namespace("at") public static native void _fused_adam_out(@ByVal TensorArrayRef out, @ByVal TensorArrayRef self, @ByVal TensorArrayRef grads, @ByVal TensorArrayRef exp_avgs, @ByVal TensorArrayRef exp_avg_sqs, @ByVal TensorArrayRef max_exp_avg_sqs, @ByVal TensorArrayRef state_steps, double lr, double beta1, double beta2, double weight_decay, double eps, @Cast("bool") boolean amsgrad, @Cast("bool") boolean maximize, @Const @ByRef(nullValue = "c10::optional{}") TensorOptional grad_scale, @Const @ByRef(nullValue = "c10::optional{}") TensorOptional found_inf); -@Namespace("at") public static native void _fused_adam_out(@ByVal TensorArrayRef out, @ByVal TensorArrayRef self, @ByVal TensorArrayRef grads, @ByVal TensorArrayRef exp_avgs, @ByVal TensorArrayRef exp_avg_sqs, @ByVal TensorArrayRef max_exp_avg_sqs, @ByVal TensorArrayRef state_steps, double lr, double beta1, double beta2, double weight_decay, double eps, @Cast("bool") boolean amsgrad, @Cast("bool") boolean maximize); -// aten::_fused_adam.out(Tensor[] self, Tensor(b!)[] grads, Tensor(c!)[] exp_avgs, Tensor(d!)[] exp_avg_sqs, Tensor(e!)[] max_exp_avg_sqs, Tensor[] state_steps, *, float lr, float beta1, float beta2, float weight_decay, float eps, bool amsgrad, bool maximize, Tensor? grad_scale=None, Tensor? found_inf=None, Tensor(a!)[] out) -> () -@Namespace("at") public static native void _fused_adam_outf(@ByVal TensorArrayRef self, @ByVal TensorArrayRef grads, @ByVal TensorArrayRef exp_avgs, @ByVal TensorArrayRef exp_avg_sqs, @ByVal TensorArrayRef max_exp_avg_sqs, @ByVal TensorArrayRef state_steps, double lr, double beta1, double beta2, double weight_decay, double eps, @Cast("bool") boolean amsgrad, @Cast("bool") boolean maximize, @Const @ByRef TensorOptional grad_scale, @Const @ByRef TensorOptional found_inf, @ByVal TensorArrayRef out); - -// aten::_fused_adam(Tensor[] self, Tensor[] grads, Tensor[] exp_avgs, Tensor[] exp_avg_sqs, Tensor[] max_exp_avg_sqs, Tensor[] state_steps, *, float lr, float beta1, float beta2, float weight_decay, float eps, bool amsgrad, bool maximize, Tensor? grad_scale=None, Tensor? found_inf=None) -> (Tensor[] self_out, Tensor[] grads_out, Tensor[] exp_avgs_out, Tensor[] exp_avg_sqs_out, Tensor[] max_exp_avg_sqs_out) -@Namespace("at") public static native @ByVal TensorVectorTensorVectorTensorVectorTensorVectorTensorVectorTuple _fused_adam(@ByVal TensorArrayRef self, @ByVal TensorArrayRef grads, @ByVal TensorArrayRef exp_avgs, @ByVal TensorArrayRef exp_avg_sqs, @ByVal TensorArrayRef max_exp_avg_sqs, @ByVal TensorArrayRef state_steps, double lr, double beta1, double beta2, double weight_decay, double eps, @Cast("bool") boolean amsgrad, @Cast("bool") boolean maximize, @Const @ByRef(nullValue = "c10::optional{}") TensorOptional grad_scale, @Const @ByRef(nullValue = "c10::optional{}") TensorOptional found_inf); -@Namespace("at") public static native @ByVal TensorVectorTensorVectorTensorVectorTensorVectorTensorVectorTuple _fused_adam(@ByVal TensorArrayRef self, @ByVal TensorArrayRef grads, @ByVal TensorArrayRef exp_avgs, @ByVal TensorArrayRef exp_avg_sqs, @ByVal TensorArrayRef max_exp_avg_sqs, @ByVal TensorArrayRef state_steps, double lr, double beta1, double beta2, double weight_decay, double eps, @Cast("bool") boolean amsgrad, @Cast("bool") boolean maximize); - - - - -// Parsed from ATen/ops/_fused_adamw.h - -// #pragma once - -// @generated by torchgen/gen.py from Function.h - -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include - - - -// #include - - -// aten::_fused_adamw_(Tensor(a!)[] self, Tensor(b!)[] grads, Tensor(c!)[] exp_avgs, Tensor(d!)[] exp_avg_sqs, Tensor(e!)[] max_exp_avg_sqs, Tensor[] state_steps, *, float lr, float beta1, float beta2, float weight_decay, float eps, bool amsgrad, bool maximize, Tensor? grad_scale=None, Tensor? found_inf=None) -> () -@Namespace("at") public static native void _fused_adamw_(@ByVal TensorArrayRef self, @ByVal TensorArrayRef grads, @ByVal TensorArrayRef exp_avgs, @ByVal TensorArrayRef exp_avg_sqs, @ByVal TensorArrayRef max_exp_avg_sqs, @ByVal TensorArrayRef state_steps, double lr, double beta1, double beta2, double weight_decay, double eps, @Cast("bool") boolean amsgrad, @Cast("bool") boolean maximize, @Const @ByRef(nullValue = "c10::optional{}") TensorOptional grad_scale, @Const @ByRef(nullValue = "c10::optional{}") TensorOptional found_inf); -@Namespace("at") public static native void _fused_adamw_(@ByVal TensorArrayRef self, @ByVal TensorArrayRef grads, @ByVal TensorArrayRef exp_avgs, @ByVal TensorArrayRef exp_avg_sqs, @ByVal TensorArrayRef max_exp_avg_sqs, @ByVal TensorArrayRef state_steps, double lr, double beta1, double beta2, double weight_decay, double eps, @Cast("bool") boolean amsgrad, @Cast("bool") boolean maximize); - -// aten::_fused_adamw.out(Tensor[] self, Tensor(b!)[] grads, Tensor(c!)[] exp_avgs, Tensor(d!)[] exp_avg_sqs, Tensor(e!)[] max_exp_avg_sqs, Tensor[] state_steps, *, float lr, float beta1, float beta2, float weight_decay, float eps, bool amsgrad, bool maximize, Tensor? grad_scale=None, Tensor? found_inf=None, Tensor(a!)[] out) -> () -@Namespace("at") public static native void _fused_adamw_out(@ByVal TensorArrayRef out, @ByVal TensorArrayRef self, @ByVal TensorArrayRef grads, @ByVal TensorArrayRef exp_avgs, @ByVal TensorArrayRef exp_avg_sqs, @ByVal TensorArrayRef max_exp_avg_sqs, @ByVal TensorArrayRef state_steps, double lr, double beta1, double beta2, double weight_decay, double eps, @Cast("bool") boolean amsgrad, @Cast("bool") boolean maximize, @Const @ByRef(nullValue = "c10::optional{}") TensorOptional grad_scale, @Const @ByRef(nullValue = "c10::optional{}") TensorOptional found_inf); -@Namespace("at") public static native void _fused_adamw_out(@ByVal TensorArrayRef out, @ByVal TensorArrayRef self, @ByVal TensorArrayRef grads, @ByVal TensorArrayRef exp_avgs, @ByVal TensorArrayRef exp_avg_sqs, @ByVal TensorArrayRef max_exp_avg_sqs, @ByVal TensorArrayRef state_steps, double lr, double beta1, double beta2, double weight_decay, double eps, @Cast("bool") boolean amsgrad, @Cast("bool") boolean maximize); -// aten::_fused_adamw.out(Tensor[] self, Tensor(b!)[] grads, Tensor(c!)[] exp_avgs, Tensor(d!)[] exp_avg_sqs, Tensor(e!)[] max_exp_avg_sqs, Tensor[] state_steps, *, float lr, float beta1, float beta2, float weight_decay, float eps, bool amsgrad, bool maximize, Tensor? grad_scale=None, Tensor? found_inf=None, Tensor(a!)[] out) -> () -@Namespace("at") public static native void _fused_adamw_outf(@ByVal TensorArrayRef self, @ByVal TensorArrayRef grads, @ByVal TensorArrayRef exp_avgs, @ByVal TensorArrayRef exp_avg_sqs, @ByVal TensorArrayRef max_exp_avg_sqs, @ByVal TensorArrayRef state_steps, double lr, double beta1, double beta2, double weight_decay, double eps, @Cast("bool") boolean amsgrad, @Cast("bool") boolean maximize, @Const @ByRef TensorOptional grad_scale, @Const @ByRef TensorOptional found_inf, @ByVal TensorArrayRef out); - -// aten::_fused_adamw(Tensor[] self, Tensor[] grads, Tensor[] exp_avgs, Tensor[] exp_avg_sqs, Tensor[] max_exp_avg_sqs, Tensor[] state_steps, *, float lr, float beta1, float beta2, float weight_decay, float eps, bool amsgrad, bool maximize, Tensor? grad_scale=None, Tensor? found_inf=None) -> (Tensor[] self_out, Tensor[] grads_out, Tensor[] exp_avgs_out, Tensor[] exp_avg_sqs_out, Tensor[] max_exp_avg_sqs_out) -@Namespace("at") public static native @ByVal TensorVectorTensorVectorTensorVectorTensorVectorTensorVectorTuple _fused_adamw(@ByVal TensorArrayRef self, @ByVal TensorArrayRef grads, @ByVal TensorArrayRef exp_avgs, @ByVal TensorArrayRef exp_avg_sqs, @ByVal TensorArrayRef max_exp_avg_sqs, @ByVal TensorArrayRef state_steps, double lr, double beta1, double beta2, double weight_decay, double eps, @Cast("bool") boolean amsgrad, @Cast("bool") boolean maximize, @Const @ByRef(nullValue = "c10::optional{}") TensorOptional grad_scale, @Const @ByRef(nullValue = "c10::optional{}") TensorOptional found_inf); -@Namespace("at") public static native @ByVal TensorVectorTensorVectorTensorVectorTensorVectorTensorVectorTuple _fused_adamw(@ByVal TensorArrayRef self, @ByVal TensorArrayRef grads, @ByVal TensorArrayRef exp_avgs, @ByVal TensorArrayRef exp_avg_sqs, @ByVal TensorArrayRef max_exp_avg_sqs, @ByVal TensorArrayRef state_steps, double lr, double beta1, double beta2, double weight_decay, double eps, @Cast("bool") boolean amsgrad, @Cast("bool") boolean maximize); - - - - -// Parsed from ATen/ops/_fused_dropout.h - -// #pragma once - -// @generated by torchgen/gen.py from Function.h - -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include - - - -// #include - - -// aten::_fused_dropout(Tensor self, float p, Generator? generator=None) -> (Tensor, Tensor) -@Namespace("at") public static native @ByVal TensorTensorTuple _fused_dropout(@Const @ByRef Tensor self, double p, @ByVal(nullValue = "c10::optional(c10::nullopt)") GeneratorOptional generator); -@Namespace("at") public static native @ByVal TensorTensorTuple _fused_dropout(@Const @ByRef Tensor self, double p); - -// aten::_fused_dropout.out(Tensor self, float p, Generator? generator=None, *, Tensor(a!) out0, Tensor(b!) out1) -> (Tensor(a!), Tensor(b!)) -@Namespace("at") public static native @ByVal @Cast("std::tuple*") PointerPointer _fused_dropout_out(@ByRef Tensor out0, @ByRef Tensor out1, @Const @ByRef Tensor self, double p, @ByVal(nullValue = "c10::optional(c10::nullopt)") GeneratorOptional generator); -@Namespace("at") public static native @ByVal @Cast("std::tuple*") PointerPointer _fused_dropout_out(@ByRef Tensor out0, @ByRef Tensor out1, @Const @ByRef Tensor self, double p); -// aten::_fused_dropout.out(Tensor self, float p, Generator? generator=None, *, Tensor(a!) out0, Tensor(b!) out1) -> (Tensor(a!), Tensor(b!)) -@Namespace("at") public static native @ByVal @Cast("std::tuple*") PointerPointer _fused_dropout_outf(@Const @ByRef Tensor self, double p, @ByVal GeneratorOptional generator, @ByRef Tensor out0, @ByRef Tensor out1); - - - - -// Parsed from ATen/ops/_fused_moving_avg_obs_fq_helper.h - -// #pragma once - -// @generated by torchgen/gen.py from Function.h - -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include - - - -// #include - - -// aten::_fused_moving_avg_obs_fq_helper(Tensor self, Tensor observer_on, Tensor fake_quant_on, Tensor(a!) running_min, Tensor(b!) running_max, Tensor(c!) scale, Tensor(d!) zero_point, float averaging_const, int quant_min, int quant_max, int ch_axis, bool per_row_fake_quant=False, bool symmetric_quant=False) -> (Tensor output, Tensor mask) -@Namespace("at") public static native @ByVal TensorTensorTuple _fused_moving_avg_obs_fq_helper(@Const @ByRef Tensor self, @Const @ByRef Tensor observer_on, @Const @ByRef Tensor fake_quant_on, @ByRef Tensor running_min, @ByRef Tensor running_max, @ByRef Tensor scale, @ByRef Tensor zero_point, double averaging_const, @Cast("int64_t") long quant_min, @Cast("int64_t") long quant_max, @Cast("int64_t") long ch_axis, @Cast("bool") boolean per_row_fake_quant/*=false*/, @Cast("bool") boolean symmetric_quant/*=false*/); -@Namespace("at") public static native @ByVal TensorTensorTuple _fused_moving_avg_obs_fq_helper(@Const @ByRef Tensor self, @Const @ByRef Tensor observer_on, @Const @ByRef Tensor fake_quant_on, @ByRef Tensor running_min, @ByRef Tensor running_max, @ByRef Tensor scale, @ByRef Tensor zero_point, double averaging_const, @Cast("int64_t") long quant_min, @Cast("int64_t") long quant_max, @Cast("int64_t") long ch_axis); - -// aten::_fused_moving_avg_obs_fq_helper.out(Tensor self, Tensor observer_on, Tensor fake_quant_on, Tensor(a!) running_min, Tensor(b!) running_max, Tensor(c!) scale, Tensor(d!) zero_point, float averaging_const, int quant_min, int quant_max, int ch_axis, bool per_row_fake_quant=False, bool symmetric_quant=False, *, Tensor(e!) out0, Tensor(f!) out1) -> (Tensor(e!), Tensor(f!)) -@Namespace("at") public static native @ByVal @Cast("std::tuple*") PointerPointer _fused_moving_avg_obs_fq_helper_out(@ByRef Tensor out0, @ByRef Tensor out1, @Const @ByRef Tensor self, @Const @ByRef Tensor observer_on, @Const @ByRef Tensor fake_quant_on, @ByRef Tensor running_min, @ByRef Tensor running_max, @ByRef Tensor scale, @ByRef Tensor zero_point, double averaging_const, @Cast("int64_t") long quant_min, @Cast("int64_t") long quant_max, @Cast("int64_t") long ch_axis, @Cast("bool") boolean per_row_fake_quant/*=false*/, @Cast("bool") boolean symmetric_quant/*=false*/); -@Namespace("at") public static native @ByVal @Cast("std::tuple*") PointerPointer _fused_moving_avg_obs_fq_helper_out(@ByRef Tensor out0, @ByRef Tensor out1, @Const @ByRef Tensor self, @Const @ByRef Tensor observer_on, @Const @ByRef Tensor fake_quant_on, @ByRef Tensor running_min, @ByRef Tensor running_max, @ByRef Tensor scale, @ByRef Tensor zero_point, double averaging_const, @Cast("int64_t") long quant_min, @Cast("int64_t") long quant_max, @Cast("int64_t") long ch_axis); -// aten::_fused_moving_avg_obs_fq_helper.out(Tensor self, Tensor observer_on, Tensor fake_quant_on, Tensor(a!) running_min, Tensor(b!) running_max, Tensor(c!) scale, Tensor(d!) zero_point, float averaging_const, int quant_min, int quant_max, int ch_axis, bool per_row_fake_quant=False, bool symmetric_quant=False, *, Tensor(e!) out0, Tensor(f!) out1) -> (Tensor(e!), Tensor(f!)) -@Namespace("at") public static native @ByVal @Cast("std::tuple*") PointerPointer _fused_moving_avg_obs_fq_helper_outf(@Const @ByRef Tensor self, @Const @ByRef Tensor observer_on, @Const @ByRef Tensor fake_quant_on, @ByRef Tensor running_min, @ByRef Tensor running_max, @ByRef Tensor scale, @ByRef Tensor zero_point, double averaging_const, @Cast("int64_t") long quant_min, @Cast("int64_t") long quant_max, @Cast("int64_t") long ch_axis, @Cast("bool") boolean per_row_fake_quant, @Cast("bool") boolean symmetric_quant, @ByRef Tensor out0, @ByRef Tensor out1); - -// aten::_fused_moving_avg_obs_fq_helper_functional(Tensor self, Tensor observer_on, Tensor fake_quant_on, Tensor running_min, Tensor running_max, Tensor scale, Tensor zero_point, float averaging_const, int quant_min, int quant_max, int ch_axis, bool per_row_fake_quant=False, bool symmetric_quant=False) -> (Tensor output, Tensor mask, Tensor running_min_out, Tensor running_max_out, Tensor scale_out, Tensor zero_point_out) -@Namespace("at") public static native @ByVal TensorTensorTensorTensorTensorTensorTuple _fused_moving_avg_obs_fq_helper_functional(@Const @ByRef Tensor self, @Const @ByRef Tensor observer_on, @Const @ByRef Tensor fake_quant_on, @Const @ByRef Tensor running_min, @Const @ByRef Tensor running_max, @Const @ByRef Tensor scale, @Const @ByRef Tensor zero_point, double averaging_const, @Cast("int64_t") long quant_min, @Cast("int64_t") long quant_max, @Cast("int64_t") long ch_axis, @Cast("bool") boolean per_row_fake_quant/*=false*/, @Cast("bool") boolean symmetric_quant/*=false*/); -@Namespace("at") public static native @ByVal TensorTensorTensorTensorTensorTensorTuple _fused_moving_avg_obs_fq_helper_functional(@Const @ByRef Tensor self, @Const @ByRef Tensor observer_on, @Const @ByRef Tensor fake_quant_on, @Const @ByRef Tensor running_min, @Const @ByRef Tensor running_max, @Const @ByRef Tensor scale, @Const @ByRef Tensor zero_point, double averaging_const, @Cast("int64_t") long quant_min, @Cast("int64_t") long quant_max, @Cast("int64_t") long ch_axis); - - - - -// Parsed from ATen/ops/_fused_sdp_choice.h - -// #pragma once - -// @generated by torchgen/gen.py from Function.h - -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include - - - -// #include - - -// aten::_fused_sdp_choice(Tensor query, Tensor key, Tensor value, Tensor? attn_mask=None, float dropout_p=0.0, bool is_causal=False) -> int -@Namespace("at") public static native @Cast("int64_t") long _fused_sdp_choice(@Const @ByRef Tensor query, @Const @ByRef Tensor key, @Const @ByRef Tensor value, @Const @ByRef(nullValue = "c10::optional{}") TensorOptional attn_mask, double dropout_p/*=0.0*/, @Cast("bool") boolean is_causal/*=false*/); -@Namespace("at") public static native @Cast("int64_t") long _fused_sdp_choice(@Const @ByRef Tensor query, @Const @ByRef Tensor key, @Const @ByRef Tensor value); - - - - -// Parsed from ATen/ops/_fw_primal.h - -// #pragma once - -// @generated by torchgen/gen.py from Function.h - -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include - - - -// #include - - - - - - -// Parsed from ATen/ops/_fw_primal_copy.h - -// #pragma once - -// @generated by torchgen/gen.py from Function.h - -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include - - - -// #include - - -// aten::_fw_primal_copy(Tensor self, int level) -> Tensor -@Namespace("at") public static native @ByVal Tensor _fw_primal_copy(@Const @ByRef Tensor self, @Cast("int64_t") long level); - -// aten::_fw_primal_copy.out(Tensor self, int level, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor _fw_primal_copy_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Cast("int64_t") long level); -// aten::_fw_primal_copy.out(Tensor self, int level, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor _fw_primal_copy_outf(@Const @ByRef Tensor self, @Cast("int64_t") long level, @ByRef Tensor out); - - - - -// Parsed from ATen/ops/_gather_sparse_backward.h - -// #pragma once - -// @generated by torchgen/gen.py from Function.h - -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include - - - -// #include - - -// aten::_gather_sparse_backward(Tensor self, int dim, Tensor index, Tensor grad) -> Tensor -@Namespace("at") public static native @ByVal Tensor _gather_sparse_backward(@Const @ByRef Tensor self, @Cast("int64_t") long dim, @Const @ByRef Tensor index, @Const @ByRef Tensor grad); - - - - -// Parsed from ATen/ops/_grid_sampler_2d_cpu_fallback.h - -// #pragma once - -// @generated by torchgen/gen.py from Function.h - -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include - - - -// #include - - -// aten::_grid_sampler_2d_cpu_fallback(Tensor input, Tensor grid, int interpolation_mode, int padding_mode, bool align_corners) -> Tensor -@Namespace("at") public static native @ByVal Tensor _grid_sampler_2d_cpu_fallback(@Const @ByRef Tensor input, @Const @ByRef Tensor grid, @Cast("int64_t") long interpolation_mode, @Cast("int64_t") long padding_mode, @Cast("bool") boolean align_corners); - -// aten::_grid_sampler_2d_cpu_fallback.out(Tensor input, Tensor grid, int interpolation_mode, int padding_mode, bool align_corners, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor _grid_sampler_2d_cpu_fallback_out(@ByRef Tensor out, @Const @ByRef Tensor input, @Const @ByRef Tensor grid, @Cast("int64_t") long interpolation_mode, @Cast("int64_t") long padding_mode, @Cast("bool") boolean align_corners); -// aten::_grid_sampler_2d_cpu_fallback.out(Tensor input, Tensor grid, int interpolation_mode, int padding_mode, bool align_corners, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor _grid_sampler_2d_cpu_fallback_outf(@Const @ByRef Tensor input, @Const @ByRef Tensor grid, @Cast("int64_t") long interpolation_mode, @Cast("int64_t") long padding_mode, @Cast("bool") boolean align_corners, @ByRef Tensor out); - - - - -// Parsed from ATen/ops/_grid_sampler_2d_cpu_fallback_backward.h - -// #pragma once - -// @generated by torchgen/gen.py from Function.h - -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include - - - -// #include - - -// aten::_grid_sampler_2d_cpu_fallback_backward(Tensor grad_output, Tensor input, Tensor grid, int interpolation_mode, int padding_mode, bool align_corners) -> (Tensor, Tensor) -@Namespace("at") public static native @ByVal TensorTensorTuple _grid_sampler_2d_cpu_fallback_backward(@Const @ByRef Tensor grad_output, @Const @ByRef Tensor input, @Const @ByRef Tensor grid, @Cast("int64_t") long interpolation_mode, @Cast("int64_t") long padding_mode, @Cast("bool") boolean align_corners); - - - - -// Parsed from ATen/ops/_has_compatible_shallow_copy_type.h - -// #pragma once - -// @generated by torchgen/gen.py from Function.h - -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include - - - -// #include - - -// aten::_has_compatible_shallow_copy_type(Tensor self, Tensor from) -> bool -@Namespace("at") public static native @Cast("bool") boolean _has_compatible_shallow_copy_type(@Const @ByRef Tensor self, @Const @ByRef Tensor from); - - - - -// Parsed from ATen/ops/_has_same_storage_numel.h - -// #pragma once - -// @generated by torchgen/gen.py from Function.h - -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include - - - -// #include - - -// aten::_has_same_storage_numel(Tensor self, Tensor other) -> bool -@Namespace("at") public static native @Cast("bool") boolean _has_same_storage_numel(@Const @ByRef Tensor self, @Const @ByRef Tensor other); - - - - -// Parsed from ATen/ops/_histogramdd_bin_edges.h - -// #pragma once - -// @generated by torchgen/gen.py from Function.h - -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include + // namespace jit +@Namespace("torch") public static native @Cast("bool") boolean isCustomClass(@Const @ByRef IValue v); + // namespace torch +// For custom class __init__ registration, we need to pass in a function +// that looks like this: [](IValue x, args...) +// However, make_boxed_from_unboxed_functor.h automatically sets the input types +// of the function by introspecting the types of the functor (which is IValue in +// this case). However, we need the type it binds to be Foo. -// #include +// Instead, we pass in a lambda [](ivalue_holder x, args...) from +// which getTypePtr can recover the original class pointer. -// aten::_histogramdd_bin_edges(Tensor self, int[] bins, *, float[]? range=None, Tensor? weight=None, bool density=False) -> Tensor[] -@Namespace("at") public static native @Cast({"", "std::vector"}) @StdMove TensorVector _histogramdd_bin_edges(@Const @ByRef Tensor self, @ByVal @Cast("c10::ArrayRef*") LongArrayRef bins, @ByVal(nullValue = "c10::optional >(c10::nullopt)") DoubleArrayRefOptional range, @Const @ByRef(nullValue = "c10::optional{}") TensorOptional weight, @Cast("bool") boolean density/*=false*/); -@Namespace("at") public static native @Cast({"", "std::vector"}) @StdMove TensorVector _histogramdd_bin_edges(@Const @ByRef Tensor self, @ByVal @Cast("c10::ArrayRef*") LongArrayRef bins); -@Namespace("at") public static native @Cast({"", "std::vector"}) @StdMove TensorVector _histogramdd_bin_edges(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] bins, @ByVal(nullValue = "c10::optional >(c10::nullopt)") DoubleArrayRefOptional range, @Const @ByRef(nullValue = "c10::optional{}") TensorOptional weight, @Cast("bool") boolean density/*=false*/); -@Namespace("at") public static native @Cast({"", "std::vector"}) @StdMove TensorVector _histogramdd_bin_edges(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... bins); -// aten::_histogramdd_bin_edges.out(Tensor self, int[] bins, *, float[]? range=None, Tensor? weight=None, bool density=False, Tensor(a!)[] out) -> () -@Namespace("at") public static native void _histogramdd_bin_edges_out(@ByVal TensorArrayRef out, @Const @ByRef Tensor self, @ByVal @Cast("c10::ArrayRef*") LongArrayRef bins, @ByVal(nullValue = "c10::optional >(c10::nullopt)") DoubleArrayRefOptional range, @Const @ByRef(nullValue = "c10::optional{}") TensorOptional weight, @Cast("bool") boolean density/*=false*/); -@Namespace("at") public static native void _histogramdd_bin_edges_out(@ByVal TensorArrayRef out, @Const @ByRef Tensor self, @ByVal @Cast("c10::ArrayRef*") LongArrayRef bins); -@Namespace("at") public static native void _histogramdd_bin_edges_out(@ByVal TensorArrayRef out, @Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] bins, @ByVal(nullValue = "c10::optional >(c10::nullopt)") DoubleArrayRefOptional range, @Const @ByRef(nullValue = "c10::optional{}") TensorOptional weight, @Cast("bool") boolean density/*=false*/); -@Namespace("at") public static native void _histogramdd_bin_edges_out(@ByVal TensorArrayRef out, @Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... bins); -// aten::_histogramdd_bin_edges.out(Tensor self, int[] bins, *, float[]? range=None, Tensor? weight=None, bool density=False, Tensor(a!)[] out) -> () -@Namespace("at") public static native void _histogramdd_bin_edges_outf(@Const @ByRef Tensor self, @ByVal @Cast("c10::ArrayRef*") LongArrayRef bins, @ByVal DoubleArrayRefOptional range, @Const @ByRef TensorOptional weight, @Cast("bool") boolean density, @ByVal TensorArrayRef out); -@Namespace("at") public static native void _histogramdd_bin_edges_outf(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] bins, @ByVal DoubleArrayRefOptional range, @Const @ByRef TensorOptional weight, @Cast("bool") boolean density, @ByVal TensorArrayRef out); -// Parsed from ATen/ops/_histogramdd_from_bin_cts.h - -// #pragma once - -// @generated by torchgen/gen.py from Function.h - -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include - - - -// #include - - -// aten::_histogramdd_from_bin_cts(Tensor self, int[] bins, *, float[]? range=None, Tensor? weight=None, bool density=False) -> Tensor -@Namespace("at") public static native @ByVal Tensor _histogramdd_from_bin_cts(@Const @ByRef Tensor self, @ByVal @Cast("c10::ArrayRef*") LongArrayRef bins, @ByVal(nullValue = "c10::optional >(c10::nullopt)") DoubleArrayRefOptional range, @Const @ByRef(nullValue = "c10::optional{}") TensorOptional weight, @Cast("bool") boolean density/*=false*/); -@Namespace("at") public static native @ByVal Tensor _histogramdd_from_bin_cts(@Const @ByRef Tensor self, @ByVal @Cast("c10::ArrayRef*") LongArrayRef bins); -@Namespace("at") public static native @ByVal Tensor _histogramdd_from_bin_cts(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] bins, @ByVal(nullValue = "c10::optional >(c10::nullopt)") DoubleArrayRefOptional range, @Const @ByRef(nullValue = "c10::optional{}") TensorOptional weight, @Cast("bool") boolean density/*=false*/); -@Namespace("at") public static native @ByVal Tensor _histogramdd_from_bin_cts(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... bins); - -// aten::_histogramdd_from_bin_cts.out(Tensor self, int[] bins, *, float[]? range=None, Tensor? weight=None, bool density=False, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor _histogramdd_from_bin_cts_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal @Cast("c10::ArrayRef*") LongArrayRef bins, @ByVal(nullValue = "c10::optional >(c10::nullopt)") DoubleArrayRefOptional range, @Const @ByRef(nullValue = "c10::optional{}") TensorOptional weight, @Cast("bool") boolean density/*=false*/); -@Namespace("at") public static native @ByRef Tensor _histogramdd_from_bin_cts_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal @Cast("c10::ArrayRef*") LongArrayRef bins); -@Namespace("at") public static native @ByRef Tensor _histogramdd_from_bin_cts_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] bins, @ByVal(nullValue = "c10::optional >(c10::nullopt)") DoubleArrayRefOptional range, @Const @ByRef(nullValue = "c10::optional{}") TensorOptional weight, @Cast("bool") boolean density/*=false*/); -@Namespace("at") public static native @ByRef Tensor _histogramdd_from_bin_cts_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... bins); -// aten::_histogramdd_from_bin_cts.out(Tensor self, int[] bins, *, float[]? range=None, Tensor? weight=None, bool density=False, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor _histogramdd_from_bin_cts_outf(@Const @ByRef Tensor self, @ByVal @Cast("c10::ArrayRef*") LongArrayRef bins, @ByVal DoubleArrayRefOptional range, @Const @ByRef TensorOptional weight, @Cast("bool") boolean density, @ByRef Tensor out); -@Namespace("at") public static native @ByRef Tensor _histogramdd_from_bin_cts_outf(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] bins, @ByVal DoubleArrayRefOptional range, @Const @ByRef TensorOptional weight, @Cast("bool") boolean density, @ByRef Tensor out); - - - - -// Parsed from ATen/ops/_histogramdd_from_bin_tensors.h - -// #pragma once - -// @generated by torchgen/gen.py from Function.h - -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include - - - -// #include - - -// aten::_histogramdd_from_bin_tensors(Tensor self, Tensor[] bins, *, Tensor? weight=None, bool density=False) -> Tensor -@Namespace("at") public static native @ByVal Tensor _histogramdd_from_bin_tensors(@Const @ByRef Tensor self, @ByVal TensorArrayRef bins, @Const @ByRef(nullValue = "c10::optional{}") TensorOptional weight, @Cast("bool") boolean density/*=false*/); -@Namespace("at") public static native @ByVal Tensor _histogramdd_from_bin_tensors(@Const @ByRef Tensor self, @ByVal TensorArrayRef bins); - -// aten::_histogramdd_from_bin_tensors.out(Tensor self, Tensor[] bins, *, Tensor? weight=None, bool density=False, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor _histogramdd_from_bin_tensors_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal TensorArrayRef bins, @Const @ByRef(nullValue = "c10::optional{}") TensorOptional weight, @Cast("bool") boolean density/*=false*/); -@Namespace("at") public static native @ByRef Tensor _histogramdd_from_bin_tensors_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal TensorArrayRef bins); -// aten::_histogramdd_from_bin_tensors.out(Tensor self, Tensor[] bins, *, Tensor? weight=None, bool density=False, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor _histogramdd_from_bin_tensors_outf(@Const @ByRef Tensor self, @ByVal TensorArrayRef bins, @Const @ByRef TensorOptional weight, @Cast("bool") boolean density, @ByRef Tensor out); - - - - -// Parsed from ATen/ops/_index_put_impl.h - -// #pragma once - -// @generated by torchgen/gen.py from Function.h - -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include - - - -// #include - - -// aten::_index_put_impl_(Tensor(a!) self, Tensor?[] indices, Tensor values, bool accumulate=False, bool unsafe=False) -> Tensor(a!) - -// aten::_index_put_impl.out(Tensor self, Tensor?[] indices, Tensor values, bool accumulate=False, bool unsafe=False, *, Tensor(a!) out) -> Tensor(a!) -// aten::_index_put_impl.out(Tensor self, Tensor?[] indices, Tensor values, bool accumulate=False, bool unsafe=False, *, Tensor(a!) out) -> Tensor(a!) - -// aten::_index_put_impl(Tensor self, Tensor?[] indices, Tensor values, bool accumulate=False, bool unsafe=False) -> Tensor - - - - -// Parsed from ATen/ops/_indices.h - -// #pragma once - -// @generated by torchgen/gen.py from Function.h - -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include - - - -// #include - - - - - - -// Parsed from ATen/ops/_indices_copy.h - -// #pragma once - -// @generated by torchgen/gen.py from Function.h - -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include - - - -// #include - - -// aten::_indices_copy(Tensor self) -> Tensor -@Namespace("at") public static native @ByVal Tensor _indices_copy(@Const @ByRef Tensor self); - -// aten::_indices_copy.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor _indices_copy_out(@ByRef Tensor out, @Const @ByRef Tensor self); -// aten::_indices_copy.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor _indices_copy_outf(@Const @ByRef Tensor self, @ByRef Tensor out); - - - - -// Parsed from ATen/ops/_is_all_true.h - -// #pragma once - -// @generated by torchgen/gen.py from Function.h - -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include - - - -// #include - - -// aten::_is_all_true(Tensor self) -> Tensor -@Namespace("at") public static native @ByVal Tensor _is_all_true(@Const @ByRef Tensor self); - - - - -// Parsed from ATen/ops/_is_any_true.h - -// #pragma once - -// @generated by torchgen/gen.py from Function.h - -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include - - - -// #include - - -// aten::_is_any_true(Tensor self) -> Tensor -@Namespace("at") public static native @ByVal Tensor _is_any_true(@Const @ByRef Tensor self); - - - - -// Parsed from ATen/ops/_is_zerotensor.h - -// #pragma once - -// @generated by torchgen/gen.py from Function.h - -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include - - - -// #include - - -// aten::_is_zerotensor(Tensor self) -> bool -@Namespace("at") public static native @Cast("bool") boolean __dispatch__is_zerotensor(@Const @ByRef Tensor self); - - - - -// Parsed from ATen/ops/_linalg_check_errors.h - -// #pragma once - -// @generated by torchgen/gen.py from Function.h - -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include - - - -// #include - - -// aten::_linalg_check_errors(Tensor info, str api_name, *, bool is_matrix) -> () -@Namespace("at") public static native void _linalg_check_errors(@Const @ByRef Tensor info, @ByVal @Cast("c10::string_view*") Pointer api_name, @Cast("bool") boolean is_matrix); - - - - -// Parsed from ATen/ops/_linalg_det.h - -// #pragma once - -// @generated by torchgen/gen.py from Function.h - -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include - - - -// #include - - -// aten::_linalg_det(Tensor A) -> (Tensor result, Tensor LU, Tensor pivots) -@Namespace("at") public static native @ByVal TensorTensorTensorTuple _linalg_det(@Const @ByRef Tensor A); - -// aten::_linalg_det.result(Tensor A, *, Tensor(a!) result, Tensor(b!) LU, Tensor(c!) pivots) -> (Tensor(a!) result, Tensor(b!) LU, Tensor(c!) pivots) -@Namespace("at") public static native @ByVal @Cast("std::tuple*") PointerPointer _linalg_det_out(@ByRef Tensor result, @ByRef Tensor LU, @ByRef Tensor pivots, @Const @ByRef Tensor A); -// aten::_linalg_det.result(Tensor A, *, Tensor(a!) result, Tensor(b!) LU, Tensor(c!) pivots) -> (Tensor(a!) result, Tensor(b!) LU, Tensor(c!) pivots) -@Namespace("at") public static native @ByVal @Cast("std::tuple*") PointerPointer _linalg_det_outf(@Const @ByRef Tensor A, @ByRef Tensor result, @ByRef Tensor LU, @ByRef Tensor pivots); - - - - -// Parsed from ATen/ops/_linalg_eigh.h - -// #pragma once - -// @generated by torchgen/gen.py from Function.h - -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include - - - -// #include - - -// aten::_linalg_eigh(Tensor A, str UPLO="L", bool compute_v=True) -> (Tensor eigenvalues, Tensor eigenvectors) -@Namespace("at") public static native @ByVal TensorTensorTuple _linalg_eigh(@Const @ByRef Tensor A, @ByVal(nullValue = "c10::string_view(\"L\")") @Cast("c10::string_view*") Pointer UPLO, @Cast("bool") boolean compute_v/*=true*/); -@Namespace("at") public static native @ByVal TensorTensorTuple _linalg_eigh(@Const @ByRef Tensor A); - -// aten::_linalg_eigh.eigenvalues(Tensor A, str UPLO="L", bool compute_v=True, *, Tensor(a!) eigenvalues, Tensor(b!) eigenvectors) -> (Tensor(a!) eigenvalues, Tensor(b!) eigenvectors) -@Namespace("at") public static native @ByVal @Cast("std::tuple*") PointerPointer _linalg_eigh_out(@ByRef Tensor eigenvalues, @ByRef Tensor eigenvectors, @Const @ByRef Tensor A, @ByVal(nullValue = "c10::string_view(\"L\")") @Cast("c10::string_view*") Pointer UPLO, @Cast("bool") boolean compute_v/*=true*/); -@Namespace("at") public static native @ByVal @Cast("std::tuple*") PointerPointer _linalg_eigh_out(@ByRef Tensor eigenvalues, @ByRef Tensor eigenvectors, @Const @ByRef Tensor A); -// aten::_linalg_eigh.eigenvalues(Tensor A, str UPLO="L", bool compute_v=True, *, Tensor(a!) eigenvalues, Tensor(b!) eigenvectors) -> (Tensor(a!) eigenvalues, Tensor(b!) eigenvectors) -@Namespace("at") public static native @ByVal @Cast("std::tuple*") PointerPointer _linalg_eigh_outf(@Const @ByRef Tensor A, @ByVal @Cast("c10::string_view*") Pointer UPLO, @Cast("bool") boolean compute_v, @ByRef Tensor eigenvalues, @ByRef Tensor eigenvectors); - - - - -// Parsed from ATen/ops/_linalg_slogdet.h - -// #pragma once - -// @generated by torchgen/gen.py from Function.h - -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include - - - -// #include -// aten::_linalg_slogdet(Tensor A) -> (Tensor sign, Tensor logabsdet, Tensor LU, Tensor pivots) -@Namespace("at") public static native @ByVal TensorTensorTensorTensorTuple _linalg_slogdet(@Const @ByRef Tensor A); -// aten::_linalg_slogdet.sign(Tensor A, *, Tensor(a!) sign, Tensor(b!) logabsdet, Tensor(c!) LU, Tensor(d!) pivots) -> (Tensor(a!) sign, Tensor(b!) logabsdet, Tensor(c!) LU, Tensor(d!) pivots) -@Namespace("at") public static native @ByVal @Cast("std::tuple*") PointerPointer _linalg_slogdet_out(@ByRef Tensor sign, @ByRef Tensor logabsdet, @ByRef Tensor LU, @ByRef Tensor pivots, @Const @ByRef Tensor A); -// aten::_linalg_slogdet.sign(Tensor A, *, Tensor(a!) sign, Tensor(b!) logabsdet, Tensor(c!) LU, Tensor(d!) pivots) -> (Tensor(a!) sign, Tensor(b!) logabsdet, Tensor(c!) LU, Tensor(d!) pivots) -@Namespace("at") public static native @ByVal @Cast("std::tuple*") PointerPointer _linalg_slogdet_outf(@Const @ByRef Tensor A, @ByRef Tensor sign, @ByRef Tensor logabsdet, @ByRef Tensor LU, @ByRef Tensor pivots); -// Parsed from ATen/ops/_linalg_solve_ex.h - -// #pragma once - -// @generated by torchgen/gen.py from Function.h - -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include - - - -// #include - - -// aten::_linalg_solve_ex(Tensor A, Tensor B, *, bool left=True, bool check_errors=False) -> (Tensor result, Tensor LU, Tensor pivots, Tensor info) -@Namespace("at") public static native @ByVal TensorTensorTensorTensorTuple _linalg_solve_ex(@Const @ByRef Tensor A, @Const @ByRef Tensor B, @Cast("bool") boolean left/*=true*/, @Cast("bool") boolean check_errors/*=false*/); -@Namespace("at") public static native @ByVal TensorTensorTensorTensorTuple _linalg_solve_ex(@Const @ByRef Tensor A, @Const @ByRef Tensor B); - -// aten::_linalg_solve_ex.result(Tensor A, Tensor B, *, bool left=True, bool check_errors=False, Tensor(a!) result, Tensor(b!) LU, Tensor(c!) pivots, Tensor(d!) info) -> (Tensor(a!) result, Tensor(b!) LU, Tensor(c!) pivots, Tensor(d!) info) -@Namespace("at") public static native @ByVal @Cast("std::tuple*") PointerPointer _linalg_solve_ex_out(@ByRef Tensor result, @ByRef Tensor LU, @ByRef Tensor pivots, @ByRef Tensor info, @Const @ByRef Tensor A, @Const @ByRef Tensor B, @Cast("bool") boolean left/*=true*/, @Cast("bool") boolean check_errors/*=false*/); -@Namespace("at") public static native @ByVal @Cast("std::tuple*") PointerPointer _linalg_solve_ex_out(@ByRef Tensor result, @ByRef Tensor LU, @ByRef Tensor pivots, @ByRef Tensor info, @Const @ByRef Tensor A, @Const @ByRef Tensor B); -// aten::_linalg_solve_ex.result(Tensor A, Tensor B, *, bool left=True, bool check_errors=False, Tensor(a!) result, Tensor(b!) LU, Tensor(c!) pivots, Tensor(d!) info) -> (Tensor(a!) result, Tensor(b!) LU, Tensor(c!) pivots, Tensor(d!) info) -@Namespace("at") public static native @ByVal @Cast("std::tuple*") PointerPointer _linalg_solve_ex_outf(@Const @ByRef Tensor A, @Const @ByRef Tensor B, @Cast("bool") boolean left, @Cast("bool") boolean check_errors, @ByRef Tensor result, @ByRef Tensor LU, @ByRef Tensor pivots, @ByRef Tensor info); - - - - -// Parsed from ATen/ops/_linalg_svd.h - -// #pragma once - -// @generated by torchgen/gen.py from Function.h - -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include - - - -// #include - - -// aten::_linalg_svd(Tensor A, bool full_matrices=False, bool compute_uv=True, *, str? driver=None) -> (Tensor U, Tensor S, Tensor Vh) -@Namespace("at") public static native @ByVal TensorTensorTensorTuple _linalg_svd(@Const @ByRef Tensor A, @Cast("bool") boolean full_matrices/*=false*/, @Cast("bool") boolean compute_uv/*=true*/, @ByVal(nullValue = "c10::optional(c10::nullopt)") @Cast("c10::optional*") Pointer driver); -@Namespace("at") public static native @ByVal TensorTensorTensorTuple _linalg_svd(@Const @ByRef Tensor A); - -// aten::_linalg_svd.U(Tensor A, bool full_matrices=False, bool compute_uv=True, *, str? driver=None, Tensor(a!) U, Tensor(b!) S, Tensor(c!) Vh) -> (Tensor(a!) U, Tensor(b!) S, Tensor(c!) Vh) -@Namespace("at") public static native @ByVal @Cast("std::tuple*") PointerPointer _linalg_svd_out(@ByRef Tensor U, @ByRef Tensor S, @ByRef Tensor Vh, @Const @ByRef Tensor A, @Cast("bool") boolean full_matrices/*=false*/, @Cast("bool") boolean compute_uv/*=true*/, @ByVal(nullValue = "c10::optional(c10::nullopt)") @Cast("c10::optional*") Pointer driver); -@Namespace("at") public static native @ByVal @Cast("std::tuple*") PointerPointer _linalg_svd_out(@ByRef Tensor U, @ByRef Tensor S, @ByRef Tensor Vh, @Const @ByRef Tensor A); -// aten::_linalg_svd.U(Tensor A, bool full_matrices=False, bool compute_uv=True, *, str? driver=None, Tensor(a!) U, Tensor(b!) S, Tensor(c!) Vh) -> (Tensor(a!) U, Tensor(b!) S, Tensor(c!) Vh) -@Namespace("at") public static native @ByVal @Cast("std::tuple*") PointerPointer _linalg_svd_outf(@Const @ByRef Tensor A, @Cast("bool") boolean full_matrices, @Cast("bool") boolean compute_uv, @ByVal @Cast("c10::optional*") Pointer driver, @ByRef Tensor U, @ByRef Tensor S, @ByRef Tensor Vh); - - - - -// Parsed from ATen/ops/_local_scalar_dense.h - -// #pragma once - -// @generated by torchgen/gen.py from Function.h - -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include - - - -// #include -// aten::_local_scalar_dense(Tensor self) -> Scalar -@Namespace("at") public static native @ByVal Scalar _local_scalar_dense(@Const @ByRef Tensor self); -// Parsed from ATen/ops/_log_softmax.h -// #pragma once -// @generated by torchgen/gen.py from Function.h -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// aten::_log_softmax(Tensor self, int dim, bool half_to_float) -> Tensor -@Namespace("at") public static native @ByVal Tensor _log_softmax(@Const @ByRef Tensor self, @Cast("int64_t") long dim, @Cast("bool") boolean half_to_float); -// aten::_log_softmax.out(Tensor self, int dim, bool half_to_float, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor _log_softmax_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Cast("int64_t") long dim, @Cast("bool") boolean half_to_float); -// aten::_log_softmax.out(Tensor self, int dim, bool half_to_float, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor _log_softmax_outf(@Const @ByRef Tensor self, @Cast("int64_t") long dim, @Cast("bool") boolean half_to_float, @ByRef Tensor out); -// Parsed from ATen/ops/_log_softmax_backward_data.h -// #pragma once -// @generated by torchgen/gen.py from Function.h -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include +@Namespace("c10::ivalue") public static native void checkCustomClassType(@Const ClassType expected_type, @Const Type actual_type); +// Targeting ../ConstantString.java -// aten::_log_softmax_backward_data(Tensor grad_output, Tensor output, int dim, ScalarType input_dtype) -> Tensor -@Namespace("at") public static native @ByVal Tensor _log_softmax_backward_data(@Const @ByRef Tensor grad_output, @Const @ByRef Tensor output, @Cast("int64_t") long dim, ScalarType input_dtype); +// Targeting ../TupleElements.java -// aten::_log_softmax_backward_data.out(Tensor grad_output, Tensor output, int dim, ScalarType input_dtype, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor _log_softmax_backward_data_out(@ByRef Tensor out, @Const @ByRef Tensor grad_output, @Const @ByRef Tensor output, @Cast("int64_t") long dim, ScalarType input_dtype); -// aten::_log_softmax_backward_data.out(Tensor grad_output, Tensor output, int dim, ScalarType input_dtype, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor _log_softmax_backward_data_outf(@Const @ByRef Tensor grad_output, @Const @ByRef Tensor output, @Cast("int64_t") long dim, ScalarType input_dtype, @ByRef Tensor out); +// Targeting ../Tuple.java -// Parsed from ATen/ops/_logcumsumexp.h +// Targeting ../Future.java -// #pragma once -// @generated by torchgen/gen.py from Function.h +// Targeting ../Await.java -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include +// Input is a list of Futures with the same target type. +// Output is a Future to the List of completed Futures. +@Namespace("c10") public static native @ByVal FuturePtr collectAll( + @ByVal FuturePtrList srcs); +// Input is a List of Futures with the same target type. +// Output is a Future that will be updated with a seen value. +@Namespace("c10") public static native @ByVal FuturePtr collectAny( + @ByVal FuturePtrList srcs); -// #include +// User-defined object. +// Targeting ../PyObjectHolder.java -// aten::_logcumsumexp(Tensor self, int dim) -> Tensor -@Namespace("at") public static native @ByVal Tensor _logcumsumexp(@Const @ByRef Tensor self, @Cast("int64_t") long dim); +// Targeting ../EnumHolder.java -// aten::_logcumsumexp.out(Tensor self, int dim, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor _logcumsumexp_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Cast("int64_t") long dim); -// aten::_logcumsumexp.out(Tensor self, int dim, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor _logcumsumexp_outf(@Const @ByRef Tensor self, @Cast("int64_t") long dim, @ByRef Tensor out); +// #undef TORCH_FORALL_TAGS + // namespace detail -// Parsed from ATen/ops/_lstm_mps.h -// #pragma once -// @generated by torchgen/gen.py from Function.h +// note: when adding a DEFINE_TO case here you should also add a +// toX method to IValue. These named methods are much more discoverable +// than the to templated function. -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include +// #define DEFINE_TO(T, method_name) +// template <> +// inline T IValue::to()&& { +// return static_cast(std::move(*this).method_name()); +// } +// template <> +// inline c10::detail::ivalue_to_const_ref_overload_return::type IValue::to() const& { +// typedef c10::detail::ivalue_to_const_ref_overload_return::type return_type; +// return static_cast(this->method_name()); +// } + -// #include + + -// aten::_lstm_mps(Tensor input, Tensor[] hx, Tensor[] params, bool has_biases, int num_layers, float dropout, bool train, bool bidirectional, bool batch_first) -> (Tensor, Tensor, Tensor, Tensor, Tensor, Tensor) -@Namespace("at") public static native @ByVal TensorTensorTensorTensorTensorTensorTuple _lstm_mps(@Const @ByRef Tensor input, @ByVal TensorArrayRef hx, @ByVal TensorArrayRef params, @Cast("bool") boolean has_biases, @Cast("int64_t") long num_layers, double dropout, @Cast("bool") boolean train, @Cast("bool") boolean bidirectional, @Cast("bool") boolean batch_first); + -// aten::_lstm_mps.out(Tensor input, Tensor[] hx, Tensor[] params, bool has_biases, int num_layers, float dropout, bool train, bool bidirectional, bool batch_first, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2, Tensor(d!) out3, Tensor(e!) out4, Tensor(f!) out5) -> (Tensor(a!), Tensor(b!), Tensor(c!), Tensor(d!), Tensor(e!), Tensor(f!)) -@Namespace("at") public static native @ByVal @Cast("std::tuple*") PointerPointer _lstm_mps_out(@ByRef Tensor out0, @ByRef Tensor out1, @ByRef Tensor out2, @ByRef Tensor out3, @ByRef Tensor out4, @ByRef Tensor out5, @Const @ByRef Tensor input, @ByVal TensorArrayRef hx, @ByVal TensorArrayRef params, @Cast("bool") boolean has_biases, @Cast("int64_t") long num_layers, double dropout, @Cast("bool") boolean train, @Cast("bool") boolean bidirectional, @Cast("bool") boolean batch_first); -// aten::_lstm_mps.out(Tensor input, Tensor[] hx, Tensor[] params, bool has_biases, int num_layers, float dropout, bool train, bool bidirectional, bool batch_first, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2, Tensor(d!) out3, Tensor(e!) out4, Tensor(f!) out5) -> (Tensor(a!), Tensor(b!), Tensor(c!), Tensor(d!), Tensor(e!), Tensor(f!)) -@Namespace("at") public static native @ByVal @Cast("std::tuple*") PointerPointer _lstm_mps_outf(@Const @ByRef Tensor input, @ByVal TensorArrayRef hx, @ByVal TensorArrayRef params, @Cast("bool") boolean has_biases, @Cast("int64_t") long num_layers, double dropout, @Cast("bool") boolean train, @Cast("bool") boolean bidirectional, @Cast("bool") boolean batch_first, @ByRef Tensor out0, @ByRef Tensor out1, @ByRef Tensor out2, @ByRef Tensor out3, @ByRef Tensor out4, @ByRef Tensor out5); + + + + -// Parsed from ATen/ops/_lu_with_info.h + -// #pragma once + -// @generated by torchgen/gen.py from Function.h + -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include + + + -// #include + + -// aten::_lu_with_info(Tensor self, bool pivot=True, bool check_errors=True) -> (Tensor LU, Tensor pivots, Tensor info) -@Namespace("at") public static native @ByVal TensorTensorTensorTuple _lu_with_info(@Const @ByRef Tensor self, @Cast("bool") boolean pivot/*=true*/, @Cast("bool") boolean check_errors/*=true*/); -@Namespace("at") public static native @ByVal TensorTensorTensorTuple _lu_with_info(@Const @ByRef Tensor self); + + + + -// Parsed from ATen/ops/_make_dual.h + -// #pragma once + -// @generated by torchgen/gen.py from Function.h + -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include + + + -// #include + + -// aten::_make_dual(Tensor(a) primal, Tensor tangent, int level) -> Tensor(a) -@Namespace("at") public static native @ByVal Tensor _make_dual(@Const @ByRef Tensor primal, @Const @ByRef Tensor tangent, @Cast("int64_t") long level); + + + + -// Parsed from ATen/ops/_make_dual_copy.h + -// #pragma once + -// @generated by torchgen/gen.py from Function.h + -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include + + + -// #include + + -// aten::_make_dual_copy(Tensor primal, Tensor tangent, int level) -> Tensor -@Namespace("at") public static native @ByVal Tensor _make_dual_copy(@Const @ByRef Tensor primal, @Const @ByRef Tensor tangent, @Cast("int64_t") long level); + -// aten::_make_dual_copy.out(Tensor primal, Tensor tangent, int level, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor _make_dual_copy_out(@ByRef Tensor out, @Const @ByRef Tensor primal, @Const @ByRef Tensor tangent, @Cast("int64_t") long level); -// aten::_make_dual_copy.out(Tensor primal, Tensor tangent, int level, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor _make_dual_copy_outf(@Const @ByRef Tensor primal, @Const @ByRef Tensor tangent, @Cast("int64_t") long level, @ByRef Tensor out); + + + +// generic_to converts an IValue from a generic list or generic dict +// to a concrete list/dict type likelike List, Dict<...> or optional. +// Note that in the case of lists, this only works for IValue-based lists, +// i.e. not for int64_t, double, ... +// generic_to is an implementation detail of IValue::to and not +// supposed to be called directly. +// The _fake_type parameter allows us to overload +// based on the return type. -// Parsed from ATen/ops/_make_per_channel_quantized_tensor.h -// #pragma once -// @generated by torchgen/gen.py from Function.h -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include + // namespace detail + // namespace detail -// #include -// aten::_make_per_channel_quantized_tensor(Tensor self, Tensor scale, Tensor zero_point, int axis) -> Tensor -@Namespace("at") public static native @ByVal Tensor _make_per_channel_quantized_tensor(@Const @ByRef Tensor self, @Const @ByRef Tensor scale, @Const @ByRef Tensor zero_point, @Cast("int64_t") long axis); -// aten::_make_per_channel_quantized_tensor.out(Tensor self, Tensor scale, Tensor zero_point, int axis, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor _make_per_channel_quantized_tensor_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor scale, @Const @ByRef Tensor zero_point, @Cast("int64_t") long axis); -// aten::_make_per_channel_quantized_tensor.out(Tensor self, Tensor scale, Tensor zero_point, int axis, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor _make_per_channel_quantized_tensor_outf(@Const @ByRef Tensor self, @Const @ByRef Tensor scale, @Const @ByRef Tensor zero_point, @Cast("int64_t") long axis, @ByRef Tensor out); -// Parsed from ATen/ops/_make_per_tensor_quantized_tensor.h -// #pragma once -// @generated by torchgen/gen.py from Function.h -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// aten::_make_per_tensor_quantized_tensor(Tensor self, float scale, int zero_point) -> Tensor -@Namespace("at") public static native @ByVal Tensor _make_per_tensor_quantized_tensor(@Const @ByRef Tensor self, double scale, @Cast("int64_t") long zero_point); -// aten::_make_per_tensor_quantized_tensor.out(Tensor self, float scale, int zero_point, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor _make_per_tensor_quantized_tensor_out(@ByRef Tensor out, @Const @ByRef Tensor self, double scale, @Cast("int64_t") long zero_point); -// aten::_make_per_tensor_quantized_tensor.out(Tensor self, float scale, int zero_point, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor _make_per_tensor_quantized_tensor_outf(@Const @ByRef Tensor self, double scale, @Cast("int64_t") long zero_point, @ByRef Tensor out); -// Parsed from ATen/ops/_masked_scale.h -// #pragma once -// @generated by torchgen/gen.py from Function.h -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// aten::_masked_scale(Tensor self, Tensor mask, float scale) -> Tensor -@Namespace("at") public static native @ByVal Tensor _masked_scale(@Const @ByRef Tensor self, @Const @ByRef Tensor mask, double scale); -// aten::_masked_scale.out(Tensor self, Tensor mask, float scale, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor _masked_scale_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor mask, double scale); -// aten::_masked_scale.out(Tensor self, Tensor mask, float scale, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor _masked_scale_outf(@Const @ByRef Tensor self, @Const @ByRef Tensor mask, double scale, @ByRef Tensor out); -// Parsed from ATen/ops/_masked_softmax.h -// #pragma once -// @generated by torchgen/gen.py from Function.h -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// aten::_masked_softmax(Tensor self, Tensor mask, int? dim=None, int? mask_type=None) -> Tensor -@Namespace("at") public static native @ByVal Tensor _masked_softmax(@Const @ByRef Tensor self, @Const @ByRef Tensor mask, @ByVal(nullValue = "c10::optional(c10::nullopt)") LongOptional dim, @ByVal(nullValue = "c10::optional(c10::nullopt)") LongOptional mask_type); -@Namespace("at") public static native @ByVal Tensor _masked_softmax(@Const @ByRef Tensor self, @Const @ByRef Tensor mask); -// aten::_masked_softmax.out(Tensor self, Tensor mask, int? dim=None, int? mask_type=None, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor _masked_softmax_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor mask, @ByVal(nullValue = "c10::optional(c10::nullopt)") LongOptional dim, @ByVal(nullValue = "c10::optional(c10::nullopt)") LongOptional mask_type); -@Namespace("at") public static native @ByRef Tensor _masked_softmax_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor mask); -// aten::_masked_softmax.out(Tensor self, Tensor mask, int? dim=None, int? mask_type=None, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor _masked_softmax_outf(@Const @ByRef Tensor self, @Const @ByRef Tensor mask, @ByVal LongOptional dim, @ByVal LongOptional mask_type, @ByRef Tensor out); -// Parsed from ATen/ops/_masked_softmax_backward.h -// #pragma once -// @generated by torchgen/gen.py from Function.h -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// aten::_masked_softmax_backward(Tensor grad_output, Tensor output, Tensor mask, int? dim=None) -> Tensor -@Namespace("at") public static native @ByVal Tensor _masked_softmax_backward(@Const @ByRef Tensor grad_output, @Const @ByRef Tensor output, @Const @ByRef Tensor mask, @ByVal(nullValue = "c10::optional(c10::nullopt)") LongOptional dim); -@Namespace("at") public static native @ByVal Tensor _masked_softmax_backward(@Const @ByRef Tensor grad_output, @Const @ByRef Tensor output, @Const @ByRef Tensor mask); -// aten::_masked_softmax_backward.out(Tensor grad_output, Tensor output, Tensor mask, int? dim=None, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor _masked_softmax_backward_out(@ByRef Tensor out, @Const @ByRef Tensor grad_output, @Const @ByRef Tensor output, @Const @ByRef Tensor mask, @ByVal(nullValue = "c10::optional(c10::nullopt)") LongOptional dim); -@Namespace("at") public static native @ByRef Tensor _masked_softmax_backward_out(@ByRef Tensor out, @Const @ByRef Tensor grad_output, @Const @ByRef Tensor output, @Const @ByRef Tensor mask); -// aten::_masked_softmax_backward.out(Tensor grad_output, Tensor output, Tensor mask, int? dim=None, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor _masked_softmax_backward_outf(@Const @ByRef Tensor grad_output, @Const @ByRef Tensor output, @Const @ByRef Tensor mask, @ByVal LongOptional dim, @ByRef Tensor out); -// Parsed from ATen/ops/_mkldnn_reshape.h -// #pragma once -// @generated by torchgen/gen.py from Function.h -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// aten::_mkldnn_reshape(Tensor self, int[] shape) -> Tensor -@Namespace("at") public static native @ByVal Tensor _mkldnn_reshape(@Const @ByRef Tensor self, @ByVal @Cast("c10::ArrayRef*") LongArrayRef shape); -@Namespace("at") public static native @ByVal Tensor _mkldnn_reshape(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... shape); -// aten::_mkldnn_reshape.out(Tensor self, int[] shape, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor _mkldnn_reshape_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal @Cast("c10::ArrayRef*") LongArrayRef shape); -@Namespace("at") public static native @ByRef Tensor _mkldnn_reshape_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... shape); -// aten::_mkldnn_reshape.out(Tensor self, int[] shape, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor _mkldnn_reshape_outf(@Const @ByRef Tensor self, @ByVal @Cast("c10::ArrayRef*") LongArrayRef shape, @ByRef Tensor out); -@Namespace("at") public static native @ByRef Tensor _mkldnn_reshape_outf(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] shape, @ByRef Tensor out); -// Parsed from ATen/ops/_mkldnn_transpose.h -// #pragma once -// @generated by torchgen/gen.py from Function.h -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// aten::_mkldnn_transpose(Tensor self, int dim0, int dim1) -> Tensor -@Namespace("at") public static native @ByVal Tensor _mkldnn_transpose(@Const @ByRef Tensor self, @Cast("int64_t") long dim0, @Cast("int64_t") long dim1); -// aten::_mkldnn_transpose_(Tensor(a!) self, int dim0, int dim1) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor _mkldnn_transpose_(@ByRef Tensor self, @Cast("int64_t") long dim0, @Cast("int64_t") long dim1); -// aten::_mkldnn_transpose.out(Tensor self, int dim0, int dim1, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor _mkldnn_transpose_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Cast("int64_t") long dim0, @Cast("int64_t") long dim1); -// aten::_mkldnn_transpose.out(Tensor self, int dim0, int dim1, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor _mkldnn_transpose_outf(@Const @ByRef Tensor self, @Cast("int64_t") long dim0, @Cast("int64_t") long dim1, @ByRef Tensor out); -// Parsed from ATen/ops/_mps_convolution.h -// #pragma once -// @generated by torchgen/gen.py from Function.h -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// aten::_mps_convolution(Tensor self, Tensor weight, Tensor? bias, int[] padding, int[] stride, int[] dilation, int groups) -> Tensor -@Namespace("at") public static native @ByVal Tensor _mps_convolution(@Const @ByRef Tensor self, @Const @ByRef Tensor weight, @Const @ByRef TensorOptional bias, @ByVal @Cast("c10::ArrayRef*") LongArrayRef padding, @ByVal @Cast("c10::ArrayRef*") LongArrayRef stride, @ByVal @Cast("c10::ArrayRef*") LongArrayRef dilation, @Cast("int64_t") long groups); -@Namespace("at") public static native @ByVal Tensor _mps_convolution(@Const @ByRef Tensor self, @Const @ByRef Tensor weight, @Const @ByRef TensorOptional bias, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] padding, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] stride, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dilation, @Cast("int64_t") long groups); -// aten::_mps_convolution.out(Tensor self, Tensor weight, Tensor? bias, int[] padding, int[] stride, int[] dilation, int groups, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor _mps_convolution_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor weight, @Const @ByRef TensorOptional bias, @ByVal @Cast("c10::ArrayRef*") LongArrayRef padding, @ByVal @Cast("c10::ArrayRef*") LongArrayRef stride, @ByVal @Cast("c10::ArrayRef*") LongArrayRef dilation, @Cast("int64_t") long groups); -@Namespace("at") public static native @ByRef Tensor _mps_convolution_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor weight, @Const @ByRef TensorOptional bias, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] padding, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] stride, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dilation, @Cast("int64_t") long groups); -// aten::_mps_convolution.out(Tensor self, Tensor weight, Tensor? bias, int[] padding, int[] stride, int[] dilation, int groups, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor _mps_convolution_outf(@Const @ByRef Tensor self, @Const @ByRef Tensor weight, @Const @ByRef TensorOptional bias, @ByVal @Cast("c10::ArrayRef*") LongArrayRef padding, @ByVal @Cast("c10::ArrayRef*") LongArrayRef stride, @ByVal @Cast("c10::ArrayRef*") LongArrayRef dilation, @Cast("int64_t") long groups, @ByRef Tensor out); -@Namespace("at") public static native @ByRef Tensor _mps_convolution_outf(@Const @ByRef Tensor self, @Const @ByRef Tensor weight, @Const @ByRef TensorOptional bias, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] padding, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] stride, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dilation, @Cast("int64_t") long groups, @ByRef Tensor out); + // namespace detail + // namespace ivalue -// Parsed from ATen/ops/_mps_convolution_transpose.h -// #pragma once -// @generated by torchgen/gen.py from Function.h + // namespace c10 -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include +// Parsed from ATen/core/ivalue.h +// #pragma once -// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// Targeting ../CustomClassHolder.java -// aten::_mps_convolution_transpose(Tensor self, Tensor weight, int[] padding, int[] output_padding, int[] stride, int[] dilation, int groups) -> Tensor -@Namespace("at") public static native @ByVal Tensor _mps_convolution_transpose(@Const @ByRef Tensor self, @Const @ByRef Tensor weight, @ByVal @Cast("c10::ArrayRef*") LongArrayRef padding, @ByVal @Cast("c10::ArrayRef*") LongArrayRef output_padding, @ByVal @Cast("c10::ArrayRef*") LongArrayRef stride, @ByVal @Cast("c10::ArrayRef*") LongArrayRef dilation, @Cast("int64_t") long groups); -@Namespace("at") public static native @ByVal Tensor _mps_convolution_transpose(@Const @ByRef Tensor self, @Const @ByRef Tensor weight, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] padding, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] output_padding, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] stride, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dilation, @Cast("int64_t") long groups); + // namespace jit + // namespace torch -// aten::_mps_convolution_transpose.out(Tensor self, Tensor weight, int[] padding, int[] output_padding, int[] stride, int[] dilation, int groups, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor _mps_convolution_transpose_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor weight, @ByVal @Cast("c10::ArrayRef*") LongArrayRef padding, @ByVal @Cast("c10::ArrayRef*") LongArrayRef output_padding, @ByVal @Cast("c10::ArrayRef*") LongArrayRef stride, @ByVal @Cast("c10::ArrayRef*") LongArrayRef dilation, @Cast("int64_t") long groups); -@Namespace("at") public static native @ByRef Tensor _mps_convolution_transpose_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor weight, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] padding, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] output_padding, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] stride, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dilation, @Cast("int64_t") long groups); -// aten::_mps_convolution_transpose.out(Tensor self, Tensor weight, int[] padding, int[] output_padding, int[] stride, int[] dilation, int groups, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor _mps_convolution_transpose_outf(@Const @ByRef Tensor self, @Const @ByRef Tensor weight, @ByVal @Cast("c10::ArrayRef*") LongArrayRef padding, @ByVal @Cast("c10::ArrayRef*") LongArrayRef output_padding, @ByVal @Cast("c10::ArrayRef*") LongArrayRef stride, @ByVal @Cast("c10::ArrayRef*") LongArrayRef dilation, @Cast("int64_t") long groups, @ByRef Tensor out); -@Namespace("at") public static native @ByRef Tensor _mps_convolution_transpose_outf(@Const @ByRef Tensor self, @Const @ByRef Tensor weight, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] padding, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] output_padding, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] stride, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dilation, @Cast("int64_t") long groups, @ByRef Tensor out); +@Namespace("c10") public static native @Cast("bool") boolean _fastEqualsForContainer(@Const @ByRef IValue lhs, @Const @ByRef IValue rhs); +@Namespace("c10") public static native Function checkObjectSortSchema( + @Const @SharedPtr("c10::ClassType") @ByRef ClassType t, + @Cast("std::stringstream*") @ByRef Pointer why_not); +// A comparator that checks ordering of two IValues of same type. -// Parsed from ATen/ops/_native_batch_norm_legit.h -// #pragma once +// We need a ComplexHolder because currently the payloads in the Union +// only take 64 bits. Since ComplexDouble takes up 128 bits, and is too big +// to fit in the IValue directly, we indirect complex numbers through an intrusive +// pointer to ComplexHolder (which contains a c10::complex). -// @generated by torchgen/gen.py from Function.h +// Similar to ComplexHolder, for StreamData3 -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include + // namespace ivalue +// This is an owning wrapper for a c10::optional> +// that can be implicitly converted to a (non-owning) optional>. +// Its purpose is to be used in generated code to keep the vector alive +// either until the end of a statement (as a temporary), or as a saved arg +// in autograd. +// Capsule is an internal implementation detail of custom C++ classes. We +// define it as an owning wrapper for +// c10::intrusive_ptr This wrapper is here to serve as +// an abstraction of the type erased custom class object pointer. It also allow +// pybind11 to treat this as a standalone class to register as a separate type +// caster, instead of a custom pointer holder which the pointer holder type +// caster try to "unwrap" it automatically. -// #include +// IValue is the generic tagged union used by the interpreter to hold +// all value types. +// It is a 16-byte object with an 8-byte payload and an 8-byte tag. +// The tag is currently 4 bytes to determine the type, and 1 byte +// to mark whether that type is a subtype of c10::intrusive_ptr_target and needs +// retain/release calls. -// aten::_native_batch_norm_legit(Tensor input, Tensor? weight, Tensor? bias, Tensor(a!) running_mean, Tensor(b!) running_var, bool training, float momentum, float eps) -> (Tensor, Tensor, Tensor) -@Namespace("at") public static native @ByVal TensorTensorTensorTuple _native_batch_norm_legit(@Const @ByRef Tensor input, @Const @ByRef TensorOptional weight, @Const @ByRef TensorOptional bias, @ByRef Tensor running_mean, @ByRef Tensor running_var, @Cast("bool") boolean training, double momentum, double eps); +/// +/// +/// +/// +/// +// #define TORCH_FORALL_TAGS(_) +// _(None) +// _(Tensor) +// _(Storage) +// _(Double) +// _(ComplexDouble) +// _(Int) +// _(SymInt) +// _(SymFloat) +// _(Bool) +// _(Tuple) +// _(String) +// _(Blob) +// _(GenericList) +// _(GenericDict) +// _(Future) +// _(Await) +// _(Device) +// _(Stream) +// _(Object) +// _(PyObject) +// _(Uninitialized) +// _(Capsule) +// _(RRef) +// _(Quantizer) +// _(Generator) +// _(Enum) +// Targeting ../IValue.java -// aten::_native_batch_norm_legit.out(Tensor input, Tensor? weight, Tensor? bias, Tensor(a!) running_mean, Tensor(b!) running_var, bool training, float momentum, float eps, *, Tensor(d!) out, Tensor(e!) save_mean, Tensor(f!) save_invstd) -> (Tensor(d!), Tensor(e!), Tensor(f!)) -@Namespace("at") public static native @ByVal @Cast("std::tuple*") PointerPointer _native_batch_norm_legit_out(@ByRef Tensor out, @ByRef Tensor save_mean, @ByRef Tensor save_invstd, @Const @ByRef Tensor input, @Const @ByRef TensorOptional weight, @Const @ByRef TensorOptional bias, @ByRef Tensor running_mean, @ByRef Tensor running_var, @Cast("bool") boolean training, double momentum, double eps); -// aten::_native_batch_norm_legit.out(Tensor input, Tensor? weight, Tensor? bias, Tensor(a!) running_mean, Tensor(b!) running_var, bool training, float momentum, float eps, *, Tensor(d!) out, Tensor(e!) save_mean, Tensor(f!) save_invstd) -> (Tensor(d!), Tensor(e!), Tensor(f!)) -@Namespace("at") public static native @ByVal @Cast("std::tuple*") PointerPointer _native_batch_norm_legit_outf(@Const @ByRef Tensor input, @Const @ByRef TensorOptional weight, @Const @ByRef TensorOptional bias, @ByRef Tensor running_mean, @ByRef Tensor running_var, @Cast("bool") boolean training, double momentum, double eps, @ByRef Tensor out, @ByRef Tensor save_mean, @ByRef Tensor save_invstd); -// aten::_native_batch_norm_legit.no_stats(Tensor input, Tensor? weight, Tensor? bias, bool training, float momentum, float eps) -> (Tensor, Tensor, Tensor) -@Namespace("at") public static native @ByVal TensorTensorTensorTuple _native_batch_norm_legit(@Const @ByRef Tensor input, @Const @ByRef TensorOptional weight, @Const @ByRef TensorOptional bias, @Cast("bool") boolean training, double momentum, double eps); +// Targeting ../WeakIValue.java -// aten::_native_batch_norm_legit.no_stats_out(Tensor input, Tensor? weight, Tensor? bias, bool training, float momentum, float eps, *, Tensor(a!) out, Tensor(b!) save_mean, Tensor(c!) save_invstd) -> (Tensor(a!), Tensor(b!), Tensor(c!)) -@Namespace("at") public static native @ByVal @Cast("std::tuple*") PointerPointer _native_batch_norm_legit_out(@ByRef Tensor out, @ByRef Tensor save_mean, @ByRef Tensor save_invstd, @Const @ByRef Tensor input, @Const @ByRef TensorOptional weight, @Const @ByRef TensorOptional bias, @Cast("bool") boolean training, double momentum, double eps); -// aten::_native_batch_norm_legit.no_stats_out(Tensor input, Tensor? weight, Tensor? bias, bool training, float momentum, float eps, *, Tensor(a!) out, Tensor(b!) save_mean, Tensor(c!) save_invstd) -> (Tensor(a!), Tensor(b!), Tensor(c!)) -@Namespace("at") public static native @ByVal @Cast("std::tuple*") PointerPointer _native_batch_norm_legit_outf(@Const @ByRef Tensor input, @Const @ByRef TensorOptional weight, @Const @ByRef TensorOptional bias, @Cast("bool") boolean training, double momentum, double eps, @ByRef Tensor out, @ByRef Tensor save_mean, @ByRef Tensor save_invstd); -// aten::_native_batch_norm_legit_functional(Tensor input, Tensor? weight, Tensor? bias, Tensor running_mean, Tensor running_var, bool training, float momentum, float eps) -> (Tensor, Tensor, Tensor, Tensor running_mean_out, Tensor running_var_out) -@Namespace("at") public static native @ByVal TensorTensorTensorTensorTensorTuple _native_batch_norm_legit_functional(@Const @ByRef Tensor input, @Const @ByRef TensorOptional weight, @Const @ByRef TensorOptional bias, @Const @ByRef Tensor running_mean, @Const @ByRef Tensor running_var, @Cast("bool") boolean training, double momentum, double eps); +// Targeting ../StrongTypePtr.java +// Targeting ../WeakTypePtr.java -// Parsed from ATen/ops/_native_decoder_only_multi_head_attention.h +// Targeting ../WeakOrStrongCompilationUnit.java -// #pragma once -// @generated by torchgen/gen.py from Function.h +// Targeting ../WeakOrStrongTypePtr.java -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include + // namespace c10 +// #include // IWYU pragma: keep -// aten::_native_decoder_only_multi_head_attention(Tensor query, Tensor key, Tensor value, int embed_dim, int num_head, Tensor qkv_weight, Tensor qkv_bias, Tensor proj_weight, Tensor proj_bias, Tensor? mask=None, Tensor? incr_key=None, Tensor? incr_value=None, bool need_weights=True, bool average_attn_weights=True) -> (Tensor, Tensor, Tensor, Tensor) -@Namespace("at") public static native @ByVal TensorTensorTensorTensorTuple _native_decoder_only_multi_head_attention(@Const @ByRef Tensor query, @Const @ByRef Tensor key, @Const @ByRef Tensor value, @Cast("int64_t") long embed_dim, @Cast("int64_t") long num_head, @Const @ByRef Tensor qkv_weight, @Const @ByRef Tensor qkv_bias, @Const @ByRef Tensor proj_weight, @Const @ByRef Tensor proj_bias, @Const @ByRef(nullValue = "c10::optional{}") TensorOptional mask, @Const @ByRef(nullValue = "c10::optional{}") TensorOptional incr_key, @Const @ByRef(nullValue = "c10::optional{}") TensorOptional incr_value, @Cast("bool") boolean need_weights/*=true*/, @Cast("bool") boolean average_attn_weights/*=true*/); -@Namespace("at") public static native @ByVal TensorTensorTensorTensorTuple _native_decoder_only_multi_head_attention(@Const @ByRef Tensor query, @Const @ByRef Tensor key, @Const @ByRef Tensor value, @Cast("int64_t") long embed_dim, @Cast("int64_t") long num_head, @Const @ByRef Tensor qkv_weight, @Const @ByRef Tensor qkv_bias, @Const @ByRef Tensor proj_weight, @Const @ByRef Tensor proj_bias); -// aten::_native_decoder_only_multi_head_attention.out(Tensor query, Tensor key, Tensor value, int embed_dim, int num_head, Tensor qkv_weight, Tensor qkv_bias, Tensor proj_weight, Tensor proj_bias, Tensor? mask=None, Tensor? incr_key=None, Tensor? incr_value=None, bool need_weights=True, bool average_attn_weights=True, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2, Tensor(d!) out3) -> (Tensor(a!), Tensor(b!), Tensor(c!), Tensor(d!)) -@Namespace("at") public static native @ByVal @Cast("std::tuple*") PointerPointer _native_decoder_only_multi_head_attention_out(@ByRef Tensor out0, @ByRef Tensor out1, @ByRef Tensor out2, @ByRef Tensor out3, @Const @ByRef Tensor query, @Const @ByRef Tensor key, @Const @ByRef Tensor value, @Cast("int64_t") long embed_dim, @Cast("int64_t") long num_head, @Const @ByRef Tensor qkv_weight, @Const @ByRef Tensor qkv_bias, @Const @ByRef Tensor proj_weight, @Const @ByRef Tensor proj_bias, @Const @ByRef(nullValue = "c10::optional{}") TensorOptional mask, @Const @ByRef(nullValue = "c10::optional{}") TensorOptional incr_key, @Const @ByRef(nullValue = "c10::optional{}") TensorOptional incr_value, @Cast("bool") boolean need_weights/*=true*/, @Cast("bool") boolean average_attn_weights/*=true*/); -@Namespace("at") public static native @ByVal @Cast("std::tuple*") PointerPointer _native_decoder_only_multi_head_attention_out(@ByRef Tensor out0, @ByRef Tensor out1, @ByRef Tensor out2, @ByRef Tensor out3, @Const @ByRef Tensor query, @Const @ByRef Tensor key, @Const @ByRef Tensor value, @Cast("int64_t") long embed_dim, @Cast("int64_t") long num_head, @Const @ByRef Tensor qkv_weight, @Const @ByRef Tensor qkv_bias, @Const @ByRef Tensor proj_weight, @Const @ByRef Tensor proj_bias); -// aten::_native_decoder_only_multi_head_attention.out(Tensor query, Tensor key, Tensor value, int embed_dim, int num_head, Tensor qkv_weight, Tensor qkv_bias, Tensor proj_weight, Tensor proj_bias, Tensor? mask=None, Tensor? incr_key=None, Tensor? incr_value=None, bool need_weights=True, bool average_attn_weights=True, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2, Tensor(d!) out3) -> (Tensor(a!), Tensor(b!), Tensor(c!), Tensor(d!)) -@Namespace("at") public static native @ByVal @Cast("std::tuple*") PointerPointer _native_decoder_only_multi_head_attention_outf(@Const @ByRef Tensor query, @Const @ByRef Tensor key, @Const @ByRef Tensor value, @Cast("int64_t") long embed_dim, @Cast("int64_t") long num_head, @Const @ByRef Tensor qkv_weight, @Const @ByRef Tensor qkv_bias, @Const @ByRef Tensor proj_weight, @Const @ByRef Tensor proj_bias, @Const @ByRef TensorOptional mask, @Const @ByRef TensorOptional incr_key, @Const @ByRef TensorOptional incr_value, @Cast("bool") boolean need_weights, @Cast("bool") boolean average_attn_weights, @ByRef Tensor out0, @ByRef Tensor out1, @ByRef Tensor out2, @ByRef Tensor out3); +// Parsed from ATen/core/List_inl.h +// #pragma once +// #include +// #include -// Parsed from ATen/ops/_native_multi_head_attention.h -// #pragma once -// @generated by torchgen/gen.py from Function.h -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// aten::_native_multi_head_attention(Tensor query, Tensor key, Tensor value, int embed_dim, int num_head, Tensor qkv_weight, Tensor qkv_bias, Tensor proj_weight, Tensor proj_bias, Tensor? mask=None, bool need_weights=True, bool average_attn_weights=True, int? mask_type=None) -> (Tensor, Tensor) -@Namespace("at") public static native @ByVal TensorTensorTuple _native_multi_head_attention(@Const @ByRef Tensor query, @Const @ByRef Tensor key, @Const @ByRef Tensor value, @Cast("int64_t") long embed_dim, @Cast("int64_t") long num_head, @Const @ByRef Tensor qkv_weight, @Const @ByRef Tensor qkv_bias, @Const @ByRef Tensor proj_weight, @Const @ByRef Tensor proj_bias, @Const @ByRef(nullValue = "c10::optional{}") TensorOptional mask, @Cast("bool") boolean need_weights/*=true*/, @Cast("bool") boolean average_attn_weights/*=true*/, @ByVal(nullValue = "c10::optional(c10::nullopt)") LongOptional mask_type); -@Namespace("at") public static native @ByVal TensorTensorTuple _native_multi_head_attention(@Const @ByRef Tensor query, @Const @ByRef Tensor key, @Const @ByRef Tensor value, @Cast("int64_t") long embed_dim, @Cast("int64_t") long num_head, @Const @ByRef Tensor qkv_weight, @Const @ByRef Tensor qkv_bias, @Const @ByRef Tensor proj_weight, @Const @ByRef Tensor proj_bias); -// aten::_native_multi_head_attention.out(Tensor query, Tensor key, Tensor value, int embed_dim, int num_head, Tensor qkv_weight, Tensor qkv_bias, Tensor proj_weight, Tensor proj_bias, Tensor? mask=None, bool need_weights=True, bool average_attn_weights=True, int? mask_type=None, *, Tensor(a!) out0, Tensor(b!) out1) -> (Tensor(a!), Tensor(b!)) -@Namespace("at") public static native @ByVal @Cast("std::tuple*") PointerPointer _native_multi_head_attention_out(@ByRef Tensor out0, @ByRef Tensor out1, @Const @ByRef Tensor query, @Const @ByRef Tensor key, @Const @ByRef Tensor value, @Cast("int64_t") long embed_dim, @Cast("int64_t") long num_head, @Const @ByRef Tensor qkv_weight, @Const @ByRef Tensor qkv_bias, @Const @ByRef Tensor proj_weight, @Const @ByRef Tensor proj_bias, @Const @ByRef(nullValue = "c10::optional{}") TensorOptional mask, @Cast("bool") boolean need_weights/*=true*/, @Cast("bool") boolean average_attn_weights/*=true*/, @ByVal(nullValue = "c10::optional(c10::nullopt)") LongOptional mask_type); -@Namespace("at") public static native @ByVal @Cast("std::tuple*") PointerPointer _native_multi_head_attention_out(@ByRef Tensor out0, @ByRef Tensor out1, @Const @ByRef Tensor query, @Const @ByRef Tensor key, @Const @ByRef Tensor value, @Cast("int64_t") long embed_dim, @Cast("int64_t") long num_head, @Const @ByRef Tensor qkv_weight, @Const @ByRef Tensor qkv_bias, @Const @ByRef Tensor proj_weight, @Const @ByRef Tensor proj_bias); -// aten::_native_multi_head_attention.out(Tensor query, Tensor key, Tensor value, int embed_dim, int num_head, Tensor qkv_weight, Tensor qkv_bias, Tensor proj_weight, Tensor proj_bias, Tensor? mask=None, bool need_weights=True, bool average_attn_weights=True, int? mask_type=None, *, Tensor(a!) out0, Tensor(b!) out1) -> (Tensor(a!), Tensor(b!)) -@Namespace("at") public static native @ByVal @Cast("std::tuple*") PointerPointer _native_multi_head_attention_outf(@Const @ByRef Tensor query, @Const @ByRef Tensor key, @Const @ByRef Tensor value, @Cast("int64_t") long embed_dim, @Cast("int64_t") long num_head, @Const @ByRef Tensor qkv_weight, @Const @ByRef Tensor qkv_bias, @Const @ByRef Tensor proj_weight, @Const @ByRef Tensor proj_bias, @Const @ByRef TensorOptional mask, @Cast("bool") boolean need_weights, @Cast("bool") boolean average_attn_weights, @ByVal LongOptional mask_type, @ByRef Tensor out0, @ByRef Tensor out1); -// Parsed from ATen/ops/_neg_view.h -// #pragma once -// @generated by torchgen/gen.py from Function.h -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// aten::_neg_view(Tensor(a) self) -> Tensor(a) -@Namespace("at") public static native @ByVal Tensor _neg_view(@Const @ByRef Tensor self); +@Namespace("c10::impl") public static native void swap(@ByRef(true) DoubleComplexElementReference lhs, @ByRef(true) DoubleComplexElementReference rhs); +@Namespace("c10::impl") public static native void swap(@ByRef(true) BooleanElementReference lhs, @ByRef(true) BooleanElementReference rhs); -// Parsed from ATen/ops/_neg_view_copy.h +@Namespace("c10::impl") public static native void swap(@ByRef(true) LongElementReference lhs, @ByRef(true) LongElementReference rhs); -// #pragma once +@Namespace("c10::impl") public static native void swap(@ByRef(true) DoubleElementReference lhs, @ByRef(true) DoubleElementReference rhs); -// @generated by torchgen/gen.py from Function.h +@Namespace("c10::impl") public static native void swap(@ByRef(true) TensorOptionalElementReference lhs, @ByRef(true) TensorOptionalElementReference rhs); -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include +@Namespace("c10::impl") public static native void swap(@ByRef(true) TensorElementReference lhs, @ByRef(true) TensorElementReference rhs); +@Namespace("c10::impl") public static native void swap(@ByRef(true) FuturePtrElementReference lhs, @ByRef(true) FuturePtrElementReference rhs); +@Namespace("c10::impl") public static native void swap(@ByRef(true) GenericElementReference lhs, @ByRef(true) GenericElementReference rhs); -// #include -// aten::_neg_view_copy(Tensor self) -> Tensor -@Namespace("at") public static native @ByVal Tensor _neg_view_copy(@Const @ByRef Tensor self); -// aten::_neg_view_copy.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor _neg_view_copy_out(@ByRef Tensor out, @Const @ByRef Tensor self); -// aten::_neg_view_copy.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor _neg_view_copy_outf(@Const @ByRef Tensor self, @ByRef Tensor out); + // namespace impl -// Parsed from ATen/ops/_nested_from_padded.h -// #pragma once -// @generated by torchgen/gen.py from Function.h -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// aten::_nested_from_padded(Tensor padded, Tensor cpu_nested_shape_example, bool fuse_transform_0213=False) -> Tensor -@Namespace("at") public static native @ByVal Tensor _nested_from_padded(@Const @ByRef Tensor padded, @Const @ByRef Tensor cpu_nested_shape_example, @Cast("bool") boolean fuse_transform_0213/*=false*/); -@Namespace("at") public static native @ByVal Tensor _nested_from_padded(@Const @ByRef Tensor padded, @Const @ByRef Tensor cpu_nested_shape_example); -// aten::_nested_from_padded.out(Tensor padded, Tensor cpu_nested_shape_example, bool fuse_transform_0213=False, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor _nested_from_padded_out(@ByRef Tensor out, @Const @ByRef Tensor padded, @Const @ByRef Tensor cpu_nested_shape_example, @Cast("bool") boolean fuse_transform_0213/*=false*/); -@Namespace("at") public static native @ByRef Tensor _nested_from_padded_out(@ByRef Tensor out, @Const @ByRef Tensor padded, @Const @ByRef Tensor cpu_nested_shape_example); -// aten::_nested_from_padded.out(Tensor padded, Tensor cpu_nested_shape_example, bool fuse_transform_0213=False, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor _nested_from_padded_outf(@Const @ByRef Tensor padded, @Const @ByRef Tensor cpu_nested_shape_example, @Cast("bool") boolean fuse_transform_0213, @ByRef Tensor out); -// Parsed from ATen/ops/_nested_from_padded_and_nested_example.h -// #pragma once -// @generated by torchgen/gen.py from Function.h -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// aten::_nested_from_padded_and_nested_example(Tensor padded, Tensor nt_example) -> Tensor -@Namespace("at") public static native @ByVal Tensor _nested_from_padded_and_nested_example(@Const @ByRef Tensor padded, @Const @ByRef Tensor nt_example); -// aten::_nested_from_padded_and_nested_example.out(Tensor padded, Tensor nt_example, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor _nested_from_padded_and_nested_example_out(@ByRef Tensor out, @Const @ByRef Tensor padded, @Const @ByRef Tensor nt_example); -// aten::_nested_from_padded_and_nested_example.out(Tensor padded, Tensor nt_example, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor _nested_from_padded_and_nested_example_outf(@Const @ByRef Tensor padded, @Const @ByRef Tensor nt_example, @ByRef Tensor out); -// Parsed from ATen/ops/_nested_select_backward.h -// #pragma once -// @generated by torchgen/gen.py from Function.h -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// aten::_nested_select_backward(Tensor grad_output, Tensor self, int dim, SymInt index) -> Tensor -@Namespace("at") public static native @ByVal Tensor _nested_select_backward(@Const @ByRef Tensor grad_output, @Const @ByRef Tensor self, @Cast("int64_t") long dim, @Cast("int64_t") long index); -// aten::_nested_select_backward(Tensor grad_output, Tensor self, int dim, SymInt index) -> Tensor -@Namespace("at") public static native @ByVal Tensor _nested_select_backward_symint(@Const @ByRef Tensor grad_output, @Const @ByRef Tensor self, @Cast("int64_t") long dim, @ByVal SymInt index); -// Parsed from ATen/ops/_nested_sum_backward.h -// #pragma once -// @generated by torchgen/gen.py from Function.h -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// aten::_nested_sum_backward(Tensor grad, Tensor self, int[1]? dim, bool keepdim=False) -> Tensor -@Namespace("at") public static native @ByVal Tensor _nested_sum_backward(@Const @ByRef Tensor grad, @Const @ByRef Tensor self, @ByVal LongArrayRefOptional dim, @Cast("bool") boolean keepdim/*=false*/); -@Namespace("at") public static native @ByVal Tensor _nested_sum_backward(@Const @ByRef Tensor grad, @Const @ByRef Tensor self, @ByVal LongArrayRefOptional dim); -@Namespace("at") public static native @ByVal Tensor _nested_sum_backward(@Const @ByRef Tensor grad, @Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dim, @Cast("bool") boolean keepdim/*=false*/); -@Namespace("at") public static native @ByVal Tensor _nested_sum_backward(@Const @ByRef Tensor grad, @Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... dim); -// Parsed from ATen/ops/_nested_tensor_from_mask.h -// #pragma once -// @generated by torchgen/gen.py from Function.h -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include +// Parsed from ATen/core/List.h -// #include +// #pragma once +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include -// aten::_nested_tensor_from_mask(Tensor t, Tensor mask, bool mask_check=True) -> Tensor -@Namespace("at") public static native @ByVal Tensor _nested_tensor_from_mask(@Const @ByRef Tensor t, @Const @ByRef Tensor mask, @Cast("bool") boolean mask_check/*=true*/); -@Namespace("at") public static native @ByVal Tensor _nested_tensor_from_mask(@Const @ByRef Tensor t, @Const @ByRef Tensor mask); -// aten::_nested_tensor_from_mask.out(Tensor t, Tensor mask, bool mask_check=True, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor _nested_tensor_from_mask_out(@ByRef Tensor out, @Const @ByRef Tensor t, @Const @ByRef Tensor mask, @Cast("bool") boolean mask_check/*=true*/); -@Namespace("at") public static native @ByRef Tensor _nested_tensor_from_mask_out(@ByRef Tensor out, @Const @ByRef Tensor t, @Const @ByRef Tensor mask); -// aten::_nested_tensor_from_mask.out(Tensor t, Tensor mask, bool mask_check=True, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor _nested_tensor_from_mask_outf(@Const @ByRef Tensor t, @Const @ByRef Tensor mask, @Cast("bool") boolean mask_check, @ByRef Tensor out); +// There is no to() overload for c10::optional. +// Targeting ../DoubleComplexElementReference.java +// Targeting ../BooleanElementReference.java -// Parsed from ATen/ops/_nested_tensor_from_mask_left_aligned.h -// #pragma once +// Targeting ../LongElementReference.java -// @generated by torchgen/gen.py from Function.h -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include +// Targeting ../DoubleElementReference.java +// Targeting ../TensorOptionalElementReference.java -// #include +// Targeting ../TensorElementReference.java -// aten::_nested_tensor_from_mask_left_aligned(Tensor t, Tensor mask) -> bool -@Namespace("at") public static native @Cast("bool") boolean _nested_tensor_from_mask_left_aligned(@Const @ByRef Tensor t, @Const @ByRef Tensor mask); +// Targeting ../FuturePtrElementReference.java +// Targeting ../GenericElementReference.java -// Parsed from ATen/ops/_nested_tensor_from_tensor_list.h -// #pragma once +// Targeting ../DoubleComplexListIterator.java -// @generated by torchgen/gen.py from Function.h -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include +// Targeting ../BooleanListIterator.java +// Targeting ../LongListIterator.java -// #include +// Targeting ../DoubleListIterator.java -// aten::_nested_tensor_from_tensor_list(Tensor[] list, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor -@Namespace("at") public static native @ByVal Tensor _nested_tensor_from_tensor_list(@ByVal TensorArrayRef list, @ByVal(nullValue = "c10::optional(c10::nullopt)") ScalarTypeOptional dtype, @ByVal(nullValue = "c10::optional(c10::nullopt)") LayoutOptional layout, @ByVal(nullValue = "c10::optional(c10::nullopt)") DeviceOptional device, @ByVal(nullValue = "c10::optional(c10::nullopt)") BoolOptional pin_memory); -@Namespace("at") public static native @ByVal Tensor _nested_tensor_from_tensor_list(@ByVal TensorArrayRef list); -// aten::_nested_tensor_from_tensor_list.out(Tensor[] list, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor _nested_tensor_from_tensor_list_out(@ByRef Tensor out, @ByVal TensorArrayRef list, @ByVal(nullValue = "c10::optional(c10::nullopt)") ScalarTypeOptional dtype, @ByVal(nullValue = "c10::optional(c10::nullopt)") LayoutOptional layout, @ByVal(nullValue = "c10::optional(c10::nullopt)") DeviceOptional device, @ByVal(nullValue = "c10::optional(c10::nullopt)") BoolOptional pin_memory); -@Namespace("at") public static native @ByRef Tensor _nested_tensor_from_tensor_list_out(@ByRef Tensor out, @ByVal TensorArrayRef list); -// aten::_nested_tensor_from_tensor_list.out(Tensor[] list, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor _nested_tensor_from_tensor_list_outf(@ByVal TensorArrayRef list, @ByVal ScalarTypeOptional dtype, @ByVal LayoutOptional layout, @ByVal DeviceOptional device, @ByVal BoolOptional pin_memory, @ByRef Tensor out); +// Targeting ../TensorOptionalListIterator.java +// Targeting ../TensorListIterator.java -// Parsed from ATen/ops/_nested_tensor_offsets.h +// Targeting ../FuturePtrListIterator.java -// #pragma once -// @generated by torchgen/gen.py from Function.h +// Targeting ../GenericListIterator.java -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include +@Namespace("c10::impl") public static native @Const IValue ptr_to_first_element(@Const @ByRef GenericList list); +// Targeting ../DoubleComplexList.java -// #include +// Targeting ../BooleanList.java +// Targeting ../LongList.java +// Targeting ../DoubleList.java -// Parsed from ATen/ops/_nested_tensor_size.h -// #pragma once +// Targeting ../TensorOptionalList.java -// @generated by torchgen/gen.py from Function.h -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include +// Targeting ../TensorList.java +// Targeting ../FuturePtrList.java -// #include +// Targeting ../GenericList.java -// aten::_nested_tensor_size.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor _nested_tensor_size_out(@ByRef Tensor out, @Const @ByRef Tensor self); -// aten::_nested_tensor_size.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor _nested_tensor_size_outf(@Const @ByRef Tensor self, @ByRef Tensor out); +// GenericList is how IValue stores lists. It is, however, not part of the +// public API. Kernels should use Lists with concrete types instead +// (maybe except for some internal prim ops). -// Parsed from ATen/ops/_nested_tensor_softmax_with_shape.h -// #pragma once -// @generated by torchgen/gen.py from Function.h +// #include // IWYU pragma: keep -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include +// Parsed from ATen/core/IListRef_inl.h +// #pragma once -// #include +// #include +// #include -// aten::_nested_tensor_softmax_with_shape(Tensor self, Tensor query) -> Tensor -@Namespace("at") public static native @ByVal Tensor _nested_tensor_softmax_with_shape(@Const @ByRef Tensor self, @Const @ByRef Tensor query); +/* + * Specializations of `IListRefTagImplBase` that implement the default + * implementation for `IListRefTag::Unboxed`. + */ +/* + * Specializations of `IListRefTagImplBase` that implement the default + * implementation for `IListRefTag::Boxed`. + */ +/* + * Specializations of `IListRefTagImplBase` that implement the default + * implementation for `IListRefTag::Materialized`. + */ +/* + * [Note: ITensorListRef] + * Specializations necessary for `IListRef` type. + * + * Since the default implementations are usually done with supporting + * `Tensor` in mind, we only have to inherit from the base implementations. + */ -// Parsed from ATen/ops/_nested_tensor_strides.h +/* + * [Note: IOptTensorListRef] + * Specializations necessary for `IListRef` type. + * + * We can't get an `at::OptionalTensorRef` directly from an instance of + * `List>` (the type that corresponds to the boxed world). + * + * So, the default implementation won't help us. Thus, we have to implement + * this method ourselves. + */ -// #pragma once + // namespace detail + // namespace c10 -// @generated by torchgen/gen.py from Function.h +// [Note: ITensorListRef] +// [Note: IOptTensorListRef] -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include + // namespace at +// Parsed from ATen/core/IListRef.h -// #include +// #pragma once +// #include +// #include +// #include -// aten::_nested_tensor_strides.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor _nested_tensor_strides_out(@ByRef Tensor out, @Const @ByRef Tensor self); -// aten::_nested_tensor_strides.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor _nested_tensor_strides_outf(@Const @ByRef Tensor self, @ByRef Tensor out); +// #include +// #include +// #include +// #include +/* + * [Note: IListRef] + * Wrapper around different API containers (e.g. boxed and unboxed). + * + * What is it? + * =========== + * It is a tagged union of both boxed and unboxed API containers. + * Working implementations: + * + * - `IListRef` + * - `IListRef` + * + * Note that `IListRef` is a view type. Meaning that it won't own the + * tensors it holds. It's intended to be used only as argument parameters. + * Specifically, where these 2 worlds overlap. + * + * What is this for? + * ================= + * Historically, PyTorch has maintained 2 different APIs: the unboxed + * (called from C++ API and Python eager mode) and boxed APIs (called + * from the TorchScript JIT, mobile interpreter, and boxed fallbacks). + * + * Calling unboxed kernels from the boxed "world" and vice-versa may + * result in non-negligible overhead. Lists are one of those types: + * + * - Boxed world: `c10::List` + * - Unboxed world: `c10::ArrayRef` + * + * In this context, `c10::IListRef` solves this problem by wrapping those + * 2 container types, so that we don't need to convert from one to + * the other. + * + * (see https://github.com/pytorch/pytorch/issues/66328) + * + * What does it do? + * ================ + * This container wraps around the different tagged containers + * (currently, only boxed and unboxed), without incurring in extra + * overhead for converting from one to another. It does so while + * exposing usual container methods, which dispatch to corresponding + * implementations. + * + * While it works with different container types, it introduces + * overhead for repeatedly calling member functions (since those will + * get dispatched, again). Therefore, you should only use it to iterate + * through the list up to one time. If you need to do more complex things, + * call `materialize()` first. + * + * Adding support for a new Tag + * ============================ + * Suppose we want to add a new tag: `Chest`. Here are the steps + * we would have to go through: + * + * 1. Add a line for it in the macro `TORCH_ILISTREF_FORALL_TAGS`. + * + * #define TORCH_ILISTREF_FORALL_TAGS(_, ...) \ + * ... + * _(Chest, ##__VA_ARGS__) + * + * 2. Add type aliases, union members, and constructors. + * + * template + * class IListRef { + * ... + * using chest_type = + * typename detail::IListRefTagImpl::list_type; + * ... + * IListRef(...) : tag_(IListRefTag::Chest) { + * ... + * } + * ... + * union Payload { + * ... + * chest_type chest; + * ... + * }; + * ... + * }; + * + * 3. Add a default implementation for it (in 'IListRef_inl.h'). It's + * preferable to make the default implementation work for `T = Tensor` + * (both `Unboxed` and `Boxed` do it). + * + * template + * class IListRefTagImplBase { + * public: + * using elem_type = ListElemT; + * using list_type = ChestContainer; + * + * static const list_type& unwrap(const IListRef& ilist) { ... } + * + * static typename list_type::const_iterator& unwrap( + * IListRefIterator& it) { ... } + * + * static const typename list_type::const_iterator& unwrap( + * const IListRefIterator& it) { ... } + * + * static IListRefConstRef iterator_get( + * const typename list_type::const_iterator& it) { ... } + * } + * + * 4. Add an specialization for each of the already supported types. + * Finally, for consistency, add them to the tracking list. + * (see [Note: IListRefTagImpl Specializations]) + * + * template <> + * class IListRefTagImpl + * : public IListRefTagImplBase {}; + * + * Adding support for a new Type + * ============================= + * Suppose we want to add support for a new type: `Matrix`. + * Here are the steps we would have to go through: + * + * 1. Add an specialization for each of the existing tags. + * For consistency, add them to the tracking list. + * (see [Note: IListRefTagImpl Specializations]) + * + * template <> + * class IListRefTagImpl + * : public IListRefTagImplBase {}; + * + * template <> + * class IListRefTagImpl + * : public IListRefTagImplBase {}; + * + * Common Problems + * =============== + * 1. One of `IListRef(Iterator)` methods are failing to compile. + * + * That may be happening because the container type you added + * is not compatible with the code written for that method. If + * that's true, then you might have to transform that code into + * a static method call (see `List::operator[]` method). + * + * 2. Can't make `IListRefIterator::operator*` return a const-reference. + * + * First, keep in mind that we assume that boxed containers will + * have to deal with `IValue` (e.g. `c10::List`). In this context, + * what may be happening is that `IValue` doesn't store internally + * your type `T`. Instead, it constructs a type new `T` everytime + * you try to get `T` for it (see `IListRef`). + */ +/* + * Applies arbitrary macros to each `IListRefTag`. + */ +// #define TORCH_ILISTREF_FORALL_TAGS(_, ...) +// _(Unboxed, ##__VA_ARGS__) +// _(Boxed, ##__VA_ARGS__) +// _(Materialized, ##__VA_ARGS__) +/* + * Defines a "switch-case" for `TAG`. Inside, it executes `BODY`, + * while bringing to scope: + * + * - `ImplT`: the implementation class for `TAG` + * - `this_`: the result of unwrapping `this` + */ +// #define TORCH_ILISTREF_UNWRAP_CASE(TAG, BODY) +// case c10::IListRefTag::TAG: { +// using ImplT = c10::detail::IListRefTagImpl; +// auto& this_ = ImplT::unwrap(*this); +// BODY +// } break; -// Parsed from ATen/ops/_nested_view_from_buffer.h +/* + * Dispatches the unwrap call, depending on `TAG`, followed by + * the execution of `BODY`. It aborts if `TAG` is not a `IListRefTag`. + * + * This macro is useful because it allows us to handle different + * types (that correspond to different tags) to be implemented + * only once. We can do it even when the implementation of the + * different tags aren't syntatically the same, by dispatching + * it to a function (e.g. `ImplT::(this_)`). + */ +// #define TORCH_ILISTREF_UNWRAP(TAG, BODY) +// switch (TAG) { +// TORCH_ILISTREF_FORALL_TAGS(TORCH_ILISTREF_UNWRAP_CASE, BODY) +// break; +// default: +// TORCH_INTERNAL_ASSERT(false, "invalid IListRef tag."); +// } -// #pragma once +@Namespace("c10") public enum IListRefTag { + TORCH_ILISTREF_FORALL_TAGS(0),DEFINE_TAG(1), + None(2); -// @generated by torchgen/gen.py from Function.h + public final int value; + private IListRefTag(int v) { this.value = v; } + private IListRefTag(IListRefTag e) { this.value = e.value; } + public IListRefTag intern() { for (IListRefTag e : values()) if (e.value == value) return e; return this; } + @Override public String toString() { return intern().name(); } +} +/* + * Type alias that specifies whether we return a reference or a copy of `T`. + * + * What is this for? + * ================= + * Since values in the boxed world are represented by an `IValue`, we also + * depend on whether it can be converted to a const-reference (`Tensor`) or + * has to create a new copy of `T` (`OptionalTensorRef`). + */ -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include +/* + * Interface that implements key functions for each `IListRefTag` type. + * + * What is this for? + * ================= + * Given an `IListRef(Iterator)`, some methods have to be implemented + * differently for each `TAG`. Therefore, the methods inside this class + * are used as dispatch targets for the different `IListRefTag` values. + * + * You should create an specialization of this class for each possible + * combination of `IListRefTag` type (except `None`) and element types + * (e.g. `Tensor`). + * + * What does it do? + * ================ + * 1. defines static methods to be used as dispatch targets by both + * `IListRef` and `IListRefIterator` (see the implementation of + * `IListRefTagImplBase`). + * + * 2. defines the `elem_type` and `list_type` aliases that will be + * used in the definition of `IListRef`. In general, we should do + * so by inheriting from `IListRefTagImplBase`. + * + * [Note: IListRefTagImpl Specialization] + * ====================================== + * For `IListRef(Iterator)`: + * - + * - + * - + * + * For `IListRef(Iterator)`: + * - + * - + * - + */ +/* + * Base implementation of `IListRefTagImpl` methods. + * + * What is this for? + * ================= + * This should make adding specializations for new types easier. For + * example, one should be able to add a new type just by making its + * `IListRefTagImpl` specialization inherit from `IListRefTagImplBase`. + * + * You should create a partial specialization for this class only if + * you introduce a new `IListRefTag`. The idea being that there is one + * default implementation for each possible value of `IListRefTag`. + * + * What does it do? + * ================ + * 1. defines `elem_type` as an alias to `ListElemT`. + * + * 1. defines `list_type` as an alias to the default container type + * that will hold a collection of `elem_type`. The idea being that + * all types tagged as `TAG` will have `list_type` as its container, + * with different `elem_type`. + * + * 3. defines the default implementation for each of the methods that + * are supposed to be defined on `IListRefTagImpl` specializations. + * + * 4. inheriting from `IListRefTagImplBase` also means + * that the payload of the type `IListRef` will be of type `list_type` + * when it is tagged as `TAG`. + */ +/* + * Materialized container for `IListRef`. + * + * What is this for? + * ================= + * Container that groups `T` references together. This exchanges the + * overhead of every method call from `IListRef` for a dynamic allocation. + * + * You should use this container instead of `IListRef` if: + * + * - You are going to iterate the list more than once + * - You need to repeatedly access arbitrary elements (using `operator[]`) + * What does it do? + + * ================ + * Removes the reference (&) from the type, and wraps it into a + * `std::reference_wrapper`. If `IListRefConstRef` is not a + * reference type, then it's left unchanged. + */ -// #include + // namespace detail +/* + * Iterator for `IListRef`. + * + * What is it? + * =========== + * Currently, a `std::bidirectional_iterator` that wraps the iterator + * types defined for each of the `IListRefTag`. + * + * One should be able to use it, as if it were the unwrapped + * iterators themselves. + + * What does it do? + * ================ + * Similarly to `IListRef`, this is a wrapper class. Specifically, it + * wraps each container's `const_iterator` type alias. So, for example, + * given that the container for `IListRefTag::Boxed` is `c10::List`, this + * iterator will wrap a `c10::List::const_iterator`. + * + * [Note: MSVC Iterator Debug] + * =========================== + * MSVC `vector::iterator` implementation (used in the boxed variant) + * makes it so this union's destructor, copy-constructor (assignment), and + * move-constructor (assignment) are implicitly deleted. + * + * Therefore, we need to explicitly define them as needed. Follows a list + * of places where these are needed and their reason: + * + * - `Payload` destructor: + * it is deleted only if the macro `_ITERATOR_DEBUG_LEVEL` is set to 2. + * + * - `IListRefIterator` destructor: + * same as above. However, we need to explicitly call the variant + * destructor explicitly. + * + * - `IListRefIterator` copy-constructor: + * it is deleted only if the macro `_ITERATOR_DEBUG_LEVEL` is different + * than 0. + */ -// aten::_nested_view_from_buffer(Tensor(a) self, Tensor nested_size, Tensor nested_strides, int[] offsets) -> Tensor(a) -@Namespace("at") public static native @ByVal Tensor _nested_view_from_buffer(@Const @ByRef Tensor self, @Const @ByRef Tensor nested_size, @Const @ByRef Tensor nested_strides, @ByVal @Cast("c10::ArrayRef*") LongArrayRef offsets); -@Namespace("at") public static native @ByVal Tensor _nested_view_from_buffer(@Const @ByRef Tensor self, @Const @ByRef Tensor nested_size, @Const @ByRef Tensor nested_strides, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... offsets); +/* + * See [Note: IListRef] + */ + // namespace c10 +// #include -// Parsed from ATen/ops/_nested_view_from_buffer_copy.h +// Parsed from ATen/WrapDimUtils.h // #pragma once -// @generated by torchgen/gen.py from Function.h - -// #include -// #include -// #include -// #include -// #include -// #include +// #include // #include -// #include -// #include -// #include -// #include -// #include - +// #include +// #include +// #include +// if dim_post_expr is 0 and wrap_scalar is true, then dim must be in the +// range [-1, 0]. This is a special case for scalar tensors and manifests in +// e.g. torch.sum(scalar_tensor, 0) Otherwise, dim should be in the range +// [-dim_post_expr, dim_post_expr-1]. -// #include +@Namespace("at") public static native @Cast("int64_t") long maybe_wrap_dim(@Cast("int64_t") long dim, TensorImpl tensor); +@Namespace("at") public static native @Cast("int64_t") long maybe_wrap_dim(@Cast("int64_t") long dim, @ByVal @Cast("at::TensorList*") TensorArrayRef tensors); -// aten::_nested_view_from_buffer_copy(Tensor self, Tensor nested_size, Tensor nested_strides, int[] offsets) -> Tensor -@Namespace("at") public static native @ByVal Tensor _nested_view_from_buffer_copy(@Const @ByRef Tensor self, @Const @ByRef Tensor nested_size, @Const @ByRef Tensor nested_strides, @ByVal @Cast("c10::ArrayRef*") LongArrayRef offsets); -@Namespace("at") public static native @ByVal Tensor _nested_view_from_buffer_copy(@Const @ByRef Tensor self, @Const @ByRef Tensor nested_size, @Const @ByRef Tensor nested_strides, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... offsets); +@Namespace("at") public static native @Cast("int64_t") long maybe_wrap_dim( + @Cast("int64_t") long dim, + @Cast("std::vector*") @StdVector LongVector tensor_sizes); -// aten::_nested_view_from_buffer_copy.out(Tensor self, Tensor nested_size, Tensor nested_strides, int[] offsets, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor _nested_view_from_buffer_copy_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor nested_size, @Const @ByRef Tensor nested_strides, @ByVal @Cast("c10::ArrayRef*") LongArrayRef offsets); -@Namespace("at") public static native @ByRef Tensor _nested_view_from_buffer_copy_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor nested_size, @Const @ByRef Tensor nested_strides, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... offsets); -// aten::_nested_view_from_buffer_copy.out(Tensor self, Tensor nested_size, Tensor nested_strides, int[] offsets, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor _nested_view_from_buffer_copy_outf(@Const @ByRef Tensor self, @Const @ByRef Tensor nested_size, @Const @ByRef Tensor nested_strides, @ByVal @Cast("c10::ArrayRef*") LongArrayRef offsets, @ByRef Tensor out); -@Namespace("at") public static native @ByRef Tensor _nested_view_from_buffer_copy_outf(@Const @ByRef Tensor self, @Const @ByRef Tensor nested_size, @Const @ByRef Tensor nested_strides, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] offsets, @ByRef Tensor out); +// Given an array of dimensions `dims` of length `ndims`, this function "Wraps" +// each dim in-place for a tensor of rank `dim_post_expr`, allowing dims to be +// specified using negative indices. +// +// Additionally, if `wrap_scalar` is true then scalar tensors with rank 0, will +// allow dimensions in the range [-1, 0]. Otherwise, an IndexError is raised for +// dimensions not in the range [-dim_post_expr, dim_post_expr). +@Namespace("at") public static native void maybe_wrap_dims_n( + @Cast("int64_t*") LongPointer dims, + @Cast("int64_t") long ndims, + @Cast("int64_t") long dim_post_expr, + @Cast("bool") boolean wrap_scalars/*=true*/); +@Namespace("at") public static native void maybe_wrap_dims_n( + @Cast("int64_t*") LongPointer dims, + @Cast("int64_t") long ndims, + @Cast("int64_t") long dim_post_expr); +@Namespace("at") public static native void maybe_wrap_dims_n( + @Cast("int64_t*") LongBuffer dims, + @Cast("int64_t") long ndims, + @Cast("int64_t") long dim_post_expr, + @Cast("bool") boolean wrap_scalars/*=true*/); +@Namespace("at") public static native void maybe_wrap_dims_n( + @Cast("int64_t*") LongBuffer dims, + @Cast("int64_t") long ndims, + @Cast("int64_t") long dim_post_expr); +@Namespace("at") public static native void maybe_wrap_dims_n( + @Cast("int64_t*") long[] dims, + @Cast("int64_t") long ndims, + @Cast("int64_t") long dim_post_expr, + @Cast("bool") boolean wrap_scalars/*=true*/); +@Namespace("at") public static native void maybe_wrap_dims_n( + @Cast("int64_t*") long[] dims, + @Cast("int64_t") long ndims, + @Cast("int64_t") long dim_post_expr); +// Given a contiguous container of dimensions `dims`, this function "Wraps" +// each dim in-place for a tensor of rank `dim_post_expr`, allowing dims to be +// specified using negative indices. +// +// Additionally, if `wrap_scalar` is true then scalar tensors with rank 0, will +// allow dimensions in the range [-1, 0]. Otherwise, an IndexError is raised for +// dimensions not in the range [-dim_post_expr, dim_post_expr). +// previously, size [0] tensors were the only possible empty tensors; thus, it +// wasn't possible to cat empty tensors unless all the other tensors were +// 1-dimensional, so we allowed these tensors to be "skipped" (both for wrap +// dimension behavior and dimension size checking). We maintain this behavior +// for backwards compatibility, but only for this specific size (i.e. other +// empty sizes are not skipped). +@Namespace("at") public static native @Cast("int64_t") long legacy_cat_wrap_dim( + @Cast("int64_t") long dim, + @Cast("std::vector*") @StdVector LongVector tensor_sizes); -// Parsed from ATen/ops/_new_zeros_with_same_feature_meta.h +@Namespace("at") public static native @Cast("int64_t") long legacy_cat_wrap_dim_symint( + @Cast("int64_t") long dim, + @StdVector SymIntVector tensor_sizes); -// #pragma once +// wrap negative dims in a vector +@Namespace("at") public static native void wrap_all_dims( + @Cast("std::vector*") @ByRef LongVector dims_to_wrap, + @Cast("int64_t") long tensor_total_dims); -// @generated by torchgen/gen.py from Function.h + // namespace at -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include +// Parsed from ATen/TensorNames.h +// #pragma once -// #include +// #include +// Targeting ../TensorName.java -// aten::_new_zeros_with_same_feature_meta(Tensor self, Tensor other, *, int self_num_batch_dims=0) -> Tensor -@Namespace("at") public static native @ByVal Tensor _new_zeros_with_same_feature_meta(@Const @ByRef Tensor self, @Const @ByRef Tensor other, @Cast("int64_t") long self_num_batch_dims/*=0*/); -@Namespace("at") public static native @ByVal Tensor _new_zeros_with_same_feature_meta(@Const @ByRef Tensor self, @Const @ByRef Tensor other); +// Targeting ../TensorNames.java -// aten::_new_zeros_with_same_feature_meta.out(Tensor self, Tensor other, *, int self_num_batch_dims=0, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor _new_zeros_with_same_feature_meta_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor other, @Cast("int64_t") long self_num_batch_dims/*=0*/); -@Namespace("at") public static native @ByRef Tensor _new_zeros_with_same_feature_meta_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor other); -// aten::_new_zeros_with_same_feature_meta.out(Tensor self, Tensor other, *, int self_num_batch_dims=0, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor _new_zeros_with_same_feature_meta_outf(@Const @ByRef Tensor self, @Const @ByRef Tensor other, @Cast("int64_t") long self_num_batch_dims, @ByRef Tensor out); + // namespace namedinference + // namespace at -// Parsed from ATen/ops/_nnpack_spatial_convolution.h +// Parsed from ATen/NamedTensorUtils.h // #pragma once +// #include +// #include +// #include -// @generated by torchgen/gen.py from Function.h - -// #include -// #include -// #include -// #include -// #include -// #include +// #include // #include -// #include -// #include -// #include -// #include -// #include - - +// #include -// #include +@Namespace("at") public static native @Cast("bool") boolean has_names(@ByVal TensorArrayRef tensors); +// Converts dim to an positional index. Errors if `dim` cannot be used to +// refer to any dimension of tensor. +@Namespace("at") public static native @Cast("int64_t") long dimname_to_position(@Const @ByRef Tensor tensor, @ByVal Dimname dim); +@Namespace("at") public static native @ByVal @Cast("std::vector*") LongVector dimnames_to_positions( + @Const @ByRef Tensor tensor, + @ByVal DimnameArrayRef dims); -// aten::_nnpack_spatial_convolution(Tensor input, Tensor weight, Tensor? bias, SymInt[2] padding, int[2] stride=1) -> Tensor -@Namespace("at") public static native @ByVal Tensor _nnpack_spatial_convolution(@Const @ByRef Tensor input, @Const @ByRef Tensor weight, @Const @ByRef TensorOptional bias, @ByVal @Cast("c10::ArrayRef*") LongArrayRef padding, @ByVal(nullValue = "at::IntArrayRef(1)") @Cast("c10::ArrayRef*") LongArrayRef stride); -@Namespace("at") public static native @ByVal Tensor _nnpack_spatial_convolution(@Const @ByRef Tensor input, @Const @ByRef Tensor weight, @Const @ByRef TensorOptional bias, @ByVal @Cast("c10::ArrayRef*") LongArrayRef padding); -@Namespace("at") public static native @ByVal Tensor _nnpack_spatial_convolution(@Const @ByRef Tensor input, @Const @ByRef Tensor weight, @Const @ByRef TensorOptional bias, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] padding, @ByVal(nullValue = "at::IntArrayRef(1)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... stride); -@Namespace("at") public static native @ByVal Tensor _nnpack_spatial_convolution(@Const @ByRef Tensor input, @Const @ByRef Tensor weight, @Const @ByRef TensorOptional bias, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... padding); +// Unifies two DimnameList to produce a third. This is useful for implementing +// the named inference rule for binary broadcasting operations like add. +// +// There are three main constraints: +// 1) Check matching: Names must match positionally from the right. +// 2) Check misaligned: If a name `n` is in `names`, then it must appear at +// the same index from the right in other. +// 3) The output names are obtained by unifying the names individually from the +// right. +@Namespace("at") public static native @StdMove DimnameVector unify_from_right( + @ByVal DimnameArrayRef names, + @ByVal DimnameArrayRef other, + @Cast("const char*") BytePointer action/*="broadcast"*/); +@Namespace("at") public static native @StdMove DimnameVector unify_from_right( + @ByVal DimnameArrayRef names, + @ByVal DimnameArrayRef other); +@Namespace("at") public static native @StdMove DimnameVector unify_from_right( + @ByVal DimnameArrayRef names, + @ByVal DimnameArrayRef other, + String action/*="broadcast"*/); +@Namespace("at") public static native void reportNYIDimnameOverload(@Cast("const char*") BytePointer op_name); +@Namespace("at") public static native void reportNYIDimnameOverload(String op_name); -// aten::_nnpack_spatial_convolution(Tensor input, Tensor weight, Tensor? bias, SymInt[2] padding, int[2] stride=1) -> Tensor -@Namespace("at") public static native @ByVal Tensor _nnpack_spatial_convolution_symint(@Const @ByRef Tensor input, @Const @ByRef Tensor weight, @Const @ByRef TensorOptional bias, @ByVal SymIntRef padding, @ByVal(nullValue = "at::IntArrayRef(1)") @Cast("c10::ArrayRef*") LongArrayRef stride); -@Namespace("at") public static native @ByVal Tensor _nnpack_spatial_convolution_symint(@Const @ByRef Tensor input, @Const @ByRef Tensor weight, @Const @ByRef TensorOptional bias, @ByVal SymIntRef padding); -@Namespace("at") public static native @ByVal Tensor _nnpack_spatial_convolution_symint(@Const @ByRef Tensor input, @Const @ByRef Tensor weight, @Const @ByRef TensorOptional bias, @ByVal SymIntRef padding, @ByVal(nullValue = "at::IntArrayRef(1)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... stride); +// [NOTE] Writing name inference rules +// +// Operators that support named tensors are either composed of operations that +// support named tensors or implement some name inference rule. An op that +// implements its own name inference rule generally looks like the following: +// +// Tensor op(...) { +// perform_shape_checks(...); +// # (1) +// auto maybe_outnames = compute_outnames(...); +// auto result = [&]() { +// NoNamesGuard guard; +// return op_impl(...); +// }(); +// # (2) +// propagate_names_if_nonempty(result, maybe_outnames); +// +// Each op has (1) a compute outnames step and (2) a propagate names step. +// +// compute_outnames is responsible for checking that input names match and +// determining what the output names should be. It returns either: +// - {} (if the inputs tensors are all unnamed) +// - non-empty outnames. +// +// propagate_names_if_nonempty propagates the outnames if they exist to the +// result tensors. +// +// The {} case is an optimization; if the user does not use named tensors they +// pay no perf cost for it. -// aten::_nnpack_spatial_convolution.out(Tensor input, Tensor weight, Tensor? bias, SymInt[2] padding, int[2] stride=1, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor _nnpack_spatial_convolution_out(@ByRef Tensor out, @Const @ByRef Tensor input, @Const @ByRef Tensor weight, @Const @ByRef TensorOptional bias, @ByVal @Cast("c10::ArrayRef*") LongArrayRef padding, @ByVal(nullValue = "at::IntArrayRef(1)") @Cast("c10::ArrayRef*") LongArrayRef stride); -@Namespace("at") public static native @ByRef Tensor _nnpack_spatial_convolution_out(@ByRef Tensor out, @Const @ByRef Tensor input, @Const @ByRef Tensor weight, @Const @ByRef TensorOptional bias, @ByVal @Cast("c10::ArrayRef*") LongArrayRef padding); -@Namespace("at") public static native @ByRef Tensor _nnpack_spatial_convolution_out(@ByRef Tensor out, @Const @ByRef Tensor input, @Const @ByRef Tensor weight, @Const @ByRef TensorOptional bias, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] padding, @ByVal(nullValue = "at::IntArrayRef(1)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... stride); -@Namespace("at") public static native @ByRef Tensor _nnpack_spatial_convolution_out(@ByRef Tensor out, @Const @ByRef Tensor input, @Const @ByRef Tensor weight, @Const @ByRef TensorOptional bias, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... padding); +// Propagates `names` to `result` if `names` is not empty. +// `names` can be empty; see [NOTE] Writing name inference rules +// If `names` is not empty, `names.size()` should equal `result.dim()`. +// When in doubt, use this overload instead of the others. +@Namespace("at::namedinference") public static native @Const @ByRef Tensor propagate_names_if_nonempty( + @Const @ByRef Tensor result, + @ByVal DimnameArrayRef maybe_names, + @Cast("bool") boolean validate_names/*=false*/); +@Namespace("at::namedinference") public static native @Const @ByRef Tensor propagate_names_if_nonempty( + @Const @ByRef Tensor result, + @ByVal DimnameArrayRef maybe_names); +// Propagates `names` to `result`. Only use this if we are certain that there +// are names to propagate (that names is not empty). +@Namespace("at::namedinference") public static native @Const @ByRef Tensor propagate_names( + @Const @ByRef Tensor result, + @ByVal DimnameArrayRef names, + @Cast("bool") boolean validate_names/*=false*/); +@Namespace("at::namedinference") public static native @Const @ByRef Tensor propagate_names( + @Const @ByRef Tensor result, + @ByVal DimnameArrayRef names); -// aten::_nnpack_spatial_convolution.out(Tensor input, Tensor weight, Tensor? bias, SymInt[2] padding, int[2] stride=1, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor _nnpack_spatial_convolution_outf(@Const @ByRef Tensor input, @Const @ByRef Tensor weight, @Const @ByRef TensorOptional bias, @ByVal @Cast("c10::ArrayRef*") LongArrayRef padding, @ByVal @Cast("c10::ArrayRef*") LongArrayRef stride, @ByRef Tensor out); -@Namespace("at") public static native @ByRef Tensor _nnpack_spatial_convolution_outf(@Const @ByRef Tensor input, @Const @ByRef Tensor weight, @Const @ByRef TensorOptional bias, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] padding, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] stride, @ByRef Tensor out); +// Propagates all names from src to result. +@Namespace("at::namedinference") public static native void propagate_names(@Const @ByRef Tensor result, @Const @ByRef Tensor src); +// Propagates all names except for those at the excluded_idxs. +@Namespace("at::namedinference") public static native void propagate_names_except( + @Const @ByRef Tensor result, + @Const @ByRef Tensor src, + @ByVal LongArrayRef excluded_idxs); +@Namespace("at::namedinference") public static native void propagate_names_except( + @Const @ByRef Tensor result, + @Const @ByRef Tensor src, + @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... excluded_idxs); -// aten::_nnpack_spatial_convolution.out(Tensor input, Tensor weight, Tensor? bias, SymInt[2] padding, int[2] stride=1, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor _nnpack_spatial_convolution_symint_out(@ByRef Tensor out, @Const @ByRef Tensor input, @Const @ByRef Tensor weight, @Const @ByRef TensorOptional bias, @ByVal SymIntRef padding, @ByVal(nullValue = "at::IntArrayRef(1)") @Cast("c10::ArrayRef*") LongArrayRef stride); -@Namespace("at") public static native @ByRef Tensor _nnpack_spatial_convolution_symint_out(@ByRef Tensor out, @Const @ByRef Tensor input, @Const @ByRef Tensor weight, @Const @ByRef TensorOptional bias, @ByVal SymIntRef padding); -@Namespace("at") public static native @ByRef Tensor _nnpack_spatial_convolution_symint_out(@ByRef Tensor out, @Const @ByRef Tensor input, @Const @ByRef Tensor weight, @Const @ByRef TensorOptional bias, @ByVal SymIntRef padding, @ByVal(nullValue = "at::IntArrayRef(1)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... stride); +// Used for reduction ops that have a `keepdim` arg. +@Namespace("at::namedinference") public static native void propagate_names_for_reduction( + @Const @ByRef Tensor result, + @Const @ByRef Tensor src, + @ByVal LongArrayRef excluded_idxs, + @Cast("bool") boolean keepdim); +@Namespace("at::namedinference") public static native void propagate_names_for_reduction( + @Const @ByRef Tensor result, + @Const @ByRef Tensor src, + @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] excluded_idxs, + @Cast("bool") boolean keepdim); +@Namespace("at::namedinference") public static native void propagate_names_for_expand( + @Const @ByRef Tensor result, + @Const @ByRef Tensor self); -// aten::_nnpack_spatial_convolution.out(Tensor input, Tensor weight, Tensor? bias, SymInt[2] padding, int[2] stride=1, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor _nnpack_spatial_convolution_symint_outf(@Const @ByRef Tensor input, @Const @ByRef Tensor weight, @Const @ByRef TensorOptional bias, @ByVal SymIntRef padding, @ByVal @Cast("c10::ArrayRef*") LongArrayRef stride, @ByRef Tensor out); -@Namespace("at") public static native @ByRef Tensor _nnpack_spatial_convolution_symint_outf(@Const @ByRef Tensor input, @Const @ByRef Tensor weight, @Const @ByRef TensorOptional bias, @ByVal SymIntRef padding, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] stride, @ByRef Tensor out); +@Namespace("at::namedinference") public static native @StdMove DimnameVector compute_broadcast_outnames( + @Const @ByRef Tensor self, + @Const @ByRef Tensor other); +@Namespace("at::namedinference") public static native @StdMove DimnameVector broadcast_to_outnames( + @Const @ByRef Tensor tensor, + @Const @ByRef Tensor reference_tensor, + @Cast("const char*") BytePointer op_name); +@Namespace("at::namedinference") public static native @StdMove DimnameVector broadcast_to_outnames( + @Const @ByRef Tensor tensor, + @Const @ByRef Tensor reference_tensor, + String op_name); +@Namespace("at::namedinference") public static native @StdMove DimnameVector compute_matmul_outnames( + @Const @ByRef Tensor self, + @Const @ByRef Tensor other); +@Namespace("at::namedinference") public static native @StdMove DimnameVector compute_cdist_outnames( + @Const @ByRef Tensor self, + @Const @ByRef Tensor other); +@Namespace("at::namedinference") public static native @StdMove DimnameVector compute_bmm_outnames( + @Const @ByRef Tensor result, + @Const @ByRef Tensor self, + @Const @ByRef Tensor other); -// Parsed from ATen/ops/_nnz.h +@Namespace("at::namedinference") public static native @StdMove DimnameVector compute_squeeze_outnames(@Const @ByRef Tensor tensor); +@Namespace("at::namedinference") public static native @StdMove DimnameVector compute_squeeze_outnames( + @Const @ByRef Tensor tensor, + long dims); -// #pragma once -// @generated by torchgen/gen.py from Function.h -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include +// TensorImpl* overloads for Legacy TH/THC code. Use these sparingly. +@Namespace("at::namedinference") public static native TensorImpl propagate_names_if_nonempty( + TensorImpl result, + @ByVal DimnameArrayRef maybe_names, + @Cast("bool") boolean validate_names/*=false*/); +@Namespace("at::namedinference") public static native TensorImpl propagate_names_if_nonempty( + TensorImpl result, + @ByVal DimnameArrayRef maybe_names); +@Namespace("at::namedinference") public static native TensorImpl propagate_names( + TensorImpl result, + @ByVal DimnameArrayRef names, + @Cast("bool") boolean validate_names/*=false*/); +@Namespace("at::namedinference") public static native TensorImpl propagate_names( + TensorImpl result, + @ByVal DimnameArrayRef names); -// #include +@Namespace("at::namedinference") public static native void propagate_names(TensorImpl result, TensorImpl src); +@Namespace("at::namedinference") public static native void propagate_names( + @Const @ByRef TensorBase result, + @ByVal DimnameArrayRef names, + @Cast("bool") boolean validate_names/*=false*/); +@Namespace("at::namedinference") public static native void propagate_names( + @Const @ByRef TensorBase result, + @ByVal DimnameArrayRef names); +@Namespace("at::namedinference") public static native void propagate_names_if_nonempty( + @Const @ByRef TensorBase result, + @ByVal DimnameArrayRef names, + @Cast("bool") boolean validate_names/*=false*/); +@Namespace("at::namedinference") public static native void propagate_names_if_nonempty( + @Const @ByRef TensorBase result, + @ByVal DimnameArrayRef names); +@Namespace("at::namedinference") public static native void propagate_names( + @Const @ByRef TensorBase result, + @Const @ByRef TensorBase src); +// result = m1 @ m2 + bias +@Namespace("at::namedinference") public static native @StdMove DimnameVector propagate_names_for_addmm( + @Const @ByRef Tensor m1, + @Const @ByRef Tensor m2, + @Const @ByRef Tensor bias); +@Namespace("at::namedinference") public static native @StdMove DimnameVector propagate_names_for_addmv( + @Const @ByRef Tensor mat, + @Const @ByRef Tensor vec, + @Const @ByRef Tensor bias); -// Parsed from ATen/ops/_pack_padded_sequence.h +@Namespace("at::namedinference") public static native void check_names_for_dot(TensorImpl vec1, TensorImpl vec2); -// #pragma once +@Namespace("at::namedinference") public static native @StdMove DimnameVector compute_baddbmm_outnames( + @Const @ByRef Tensor result, + @Const @ByRef Tensor self, + @Const @ByRef Tensor other, + @Const @ByRef Tensor bias); -// @generated by torchgen/gen.py from Function.h +@Namespace("at::namedinference") public static native @Cast("bool") boolean are_names_equal(TensorImpl self, TensorImpl other); -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include + // namespace namedinference + // namespace at -// #include +// Parsed from torch/csrc/autograd/variable.h +// #pragma once -// aten::_pack_padded_sequence(Tensor input, Tensor lengths, bool batch_first) -> (Tensor, Tensor) -@Namespace("at") public static native @ByVal TensorTensorTuple _pack_padded_sequence(@Const @ByRef Tensor input, @Const @ByRef Tensor lengths, @Cast("bool") boolean batch_first); +// #include -// aten::_pack_padded_sequence.out(Tensor input, Tensor lengths, bool batch_first, *, Tensor(a!) out0, Tensor(b!) out1) -> (Tensor(a!), Tensor(b!)) -@Namespace("at") public static native @ByVal @Cast("std::tuple*") PointerPointer _pack_padded_sequence_out(@ByRef Tensor out0, @ByRef Tensor out1, @Const @ByRef Tensor input, @Const @ByRef Tensor lengths, @Cast("bool") boolean batch_first); -// aten::_pack_padded_sequence.out(Tensor input, Tensor lengths, bool batch_first, *, Tensor(a!) out0, Tensor(b!) out1) -> (Tensor(a!), Tensor(b!)) -@Namespace("at") public static native @ByVal @Cast("std::tuple*") PointerPointer _pack_padded_sequence_outf(@Const @ByRef Tensor input, @Const @ByRef Tensor lengths, @Cast("bool") boolean batch_first, @ByRef Tensor out0, @ByRef Tensor out1); +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +/** {@code Variable} is exactly the same as {@code Tensor} (i.e. we have {@code using Variable = + * at::Tensor}). This means you can perform all the usual mathematical and + * other operations you can perform on {@code Tensor}s also on {@code Variable}s. + * + * The only reason we are keeping the {@code Variable} class is backward + * compatibility with external user's legacy C++ frontend code. Our intention + * is to eliminate the {@code Variable} class in the near future. */ -// Parsed from ATen/ops/_pack_padded_sequence_backward.h + // namespace autograd + // namespace torch -// #pragma once +// The following are all internal APIs and should not be shown in libtorch docs. +// Therefore, we wrap the following code with `#ifndef DOXYGEN_SHOULD_SKIP_THIS +// ... #endif` -// @generated by torchgen/gen.py from Function.h +// #ifndef DOXYGEN_SHOULD_SKIP_THIS -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include +/** Check if this type is supported by the autograd engine. + * If you change this, update the doc at the top of the + * torch/autograd/__init__.py file and + * "test_set_requires_grad_only_for_continuous_types" in test/test_autograd.py */ +@Namespace("torch::autograd") public static native @Cast("bool") boolean isDifferentiableType(ScalarType t); +/**~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + * Variable + * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + * A {@code Variable} augments a {@code Tensor} with the ability to interact in our + * autograd machinery. Conceptually, {@code Variable}s travel along {@code Edge}s between + * {@code Node}s in the autograd graph. A {@code Variable} can either be a leaf, like a + * weight in a neural network, or an interior variable, when it is the result + * of an operation between variables. Every {@code Variable} also stores another + * {@code Variable} called its {@code grad} (gradient). If the variable is a leaf, its + * gradient will be accumulated into this variable. + * + * Every Tensor is a Variable, but sometimes we colloquially refer to Variables + * that don't require gradients as Tensors (since none of the autograd + * machinery for Variables applies). Historically, Variables and Tensors + * were separate concepts, but now they are exactly the same (i.e. we have + * {@code using Variable = at::Tensor}). + * + * Gradient Edges + * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + * Furthermore, {@code Variable}s have the notion of a {@code gradient_edge}, which is the + * edge in the autograd graph that connects the variable to a particular input + * of the gradient function that will be invoked with the variable during the + * backward pass. More precisely, this gradient function can be one of two + * things: + * 1. A {@code grad_fn}, if the variable is in the interior of the graph. This is the + * gradient of the function that produced the variable. + * 2. A {@code grad_accumulator}, if the variable is a leaf, which accumulates a + * scalar gradient value into its {@code grad} variable. + * + * Versioning + * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + * Another major feature of {@code Variable}s are *versions*. Versions are + * incremented when an in-place mutation of a variable occurs. Versions are + * useful when constructing {@code SavedVariable}s, which take a snapshot of a + * {@code Variable} at a certain version. You can retrieve a {@code Variable}'s version + * through its {@code current_version()} method. + * + * Views + * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + * It is possible for a {@code Variable} to be a *view* of another {@code Variable}, in + * which case it tracks that {@code Variable}'s data and autograd history. Beyond + * construction, the interface of a view is identical to that of a regular + * {@code Variable}. You can determine whether {@code Variable} is in fact a view by + * probing its {@code is_view()} method. Note that the *view* semantics are only + * meaningful for {@code Variable} relations that are relevant to autograd. + * See NOTE [ Autograd View Variables ] for more details. + * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ */ +// Private-ish functions for manipulating variables; we don't want to put them +// on Tensor proper -// #include +// WARNING: This may return a nullptr. If you require AutogradMeta to return +// a materialized structure, use materialize_autograd_meta instead. +@Namespace("torch::autograd::impl") public static native AutogradMeta get_autograd_meta(@Const @ByRef TensorBase arg0); +// WARNING: This will return a nullptr if the Tensor is not a view. +@Namespace("torch::autograd::impl") public static native DifferentiableViewMeta get_view_autograd_meta(@Const @ByRef TensorBase arg0); -// aten::_pack_padded_sequence_backward(Tensor grad, SymInt[] input_size, Tensor batch_sizes, bool batch_first) -> Tensor -@Namespace("at") public static native @ByVal Tensor _pack_padded_sequence_backward(@Const @ByRef Tensor grad, @ByVal @Cast("c10::ArrayRef*") LongArrayRef input_size, @Const @ByRef Tensor batch_sizes, @Cast("bool") boolean batch_first); -@Namespace("at") public static native @ByVal Tensor _pack_padded_sequence_backward(@Const @ByRef Tensor grad, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] input_size, @Const @ByRef Tensor batch_sizes, @Cast("bool") boolean batch_first); +// Returns the current autograd meta, materializing it if it was previously +// none. This counts as a *mutating* operation, so do not call it on +// "read-only" operators; in particular, this is NOT thread safe +@Namespace("torch::autograd::impl") public static native AutogradMeta materialize_autograd_meta(@Const @ByRef TensorBase arg0); +/** Set the gradient accumulator of the {@code Variable}. This is only applicable to + * leaf variables. Interior variables should call {@code set_gradient_edge()}. */ -// aten::_pack_padded_sequence_backward(Tensor grad, SymInt[] input_size, Tensor batch_sizes, bool batch_first) -> Tensor -@Namespace("at") public static native @ByVal Tensor _pack_padded_sequence_backward_symint(@Const @ByRef Tensor grad, @ByVal SymIntRef input_size, @Const @ByRef Tensor batch_sizes, @Cast("bool") boolean batch_first); +/** Attempts to get a pointer to the gradient accumulator of the {@code Variable}, + * if it still exists. If the gradient accumulator function has been + * destroyed, returns a {@code nullptr}. */ +@Namespace("torch::autograd::impl") public static native @SharedPtr Node try_get_grad_accumulator(@Cast("const torch::autograd::Variable*") @ByRef Tensor arg0); +/** Gets the gradient accumulator of the {@code Variable} if it has one, or else + * create one on the fly and return it. */ +@Namespace("torch::autograd::impl") public static native @SharedPtr Node grad_accumulator(@Cast("const torch::autograd::Variable*") @ByRef Tensor arg0); +/** Returns the "canonical" gradient edge of this {@code Variable}, i.e. either the + * gradient function if this is an interior {@code Variable}, or the gradient + * accumulator otherwise. If the {@code Variable} is interior, the returned {@code Edge} + * will store the input index of the {@code Node} to which this variable is + * connected in its {@code input_nr} field. For leaves, the {@code input_nr} is always + * zero. Note that {@code set_gradient_edge} and {@code gradient_edge} are not + * symmetric. You must use {@code set_gradient_edge} to set the {@code grad_fn} and + * {@code set_grad_accumulator} to set the accumulator. */ +@Namespace("torch::autograd::impl") public static native @ByVal Edge gradient_edge(@Cast("const torch::autograd::Variable*") @ByRef Tensor arg0); +/** Set the gradient edge -- i.e. {@code grad_fn} and {@code input_nr} -- of the + * {@code Variable}. + * NOTE: This will always set the {@code grad_fn}, even if this is a leaf variable, + * and never the {@code grad_accumulator}. For the latter, use + * {@code set_grad_accumulator}. This allows late construction of an interior + * {@code Variable}. */ +/// +@Namespace("torch::autograd::impl") public static native void set_gradient_edge(@Cast("const torch::autograd::Variable*") @ByRef Tensor arg0, @ByVal Edge edge); -// Parsed from ATen/ops/_pad_circular.h +// Autograd Graph Interaction +//~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -// #pragma once +/** Update the {@code grad_fn} of an existing Variable. Called after in-place + * modifications. + * + * For View Variables: + * Called after in-place modifications. Modifies the grad_fn of the base + * Variable. */ +@Namespace("torch::autograd::impl") public static native void rebase_history(@Cast("const torch::autograd::Variable*") @ByRef Tensor arg0, @ByVal Edge gradient_edge); -// @generated by torchgen/gen.py from Function.h +/** Gets the raw gradient function pointer, whatever it currently is. */ +@Namespace("torch::autograd::impl") public static native Node grad_fn_unsafe(@Cast("const torch::autograd::Variable*") @ByRef Tensor arg0); -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include +/** Increments the version count of this {@code Variable}. */ +@Namespace("torch::autograd::impl") public static native void bump_version(@Cast("const torch::autograd::Variable*") @ByRef Tensor arg0); +@Namespace("torch::autograd::impl") public static native void set_version_counter( + @Cast("const torch::autograd::Variable*") @ByRef Tensor arg0, + @Const @ByRef VariableVersion version_counter); +/** Retrieves this {@code Variable}s version counter. */ +@Namespace("torch::autograd::impl") public static native @Const @ByRef VariableVersion version_counter(@Cast("const torch::autograd::Variable*") @ByRef Tensor arg0); +@Namespace("torch::autograd::impl") public static native void set_name(@Cast("const torch::autograd::Variable*") @ByRef Tensor arg0, @StdString BytePointer name); +@Namespace("torch::autograd::impl") public static native void set_name(@Cast("const torch::autograd::Variable*") @ByRef Tensor arg0, @StdString String name); -// #include +@Namespace("torch::autograd::impl") public static native void add_hook( + @Const @ByRef TensorBase arg0, + @UniquePtr @Cast({"", "std::unique_ptr&&"}) FunctionPreHook hook); +@Namespace("torch::autograd::impl") public static native @ByRef FunctionPreHookVector hooks(@Cast("const torch::autograd::Variable*") @ByRef Tensor arg0); +@Namespace("torch::autograd::impl") public static native void clear_hooks(@Const @ByRef TensorBase arg0); +@Namespace("torch::autograd::impl") public static native void create_cpp_hook( + @Const @ByRef TensorBase arg0, + @Cast("bool") boolean is_retains_grad_hooks/*=false*/); +@Namespace("torch::autograd::impl") public static native void create_cpp_hook( + @Const @ByRef TensorBase arg0); -// aten::_pad_circular(Tensor self, SymInt[] pad) -> Tensor -@Namespace("at") public static native @ByVal Tensor _pad_circular(@Const @ByRef Tensor self, @ByVal @Cast("c10::ArrayRef*") LongArrayRef pad); -@Namespace("at") public static native @ByVal Tensor _pad_circular(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... pad); +// Targeting ../AutogradMeta.java -// aten::_pad_circular(Tensor self, SymInt[] pad) -> Tensor -@Namespace("at") public static native @ByVal Tensor _pad_circular_symint(@Const @ByRef Tensor self, @ByVal SymIntRef pad); +//~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +// DifferentiableViewMeta +//~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +/** NOTE [ Autograd View Variables ] + * + * Many operations return Variable that shares storage with an input Variable. + * The returned Variable is called a **view** Variable on the input **base** + * Variable. + * + * In PyTorch, we have two types of views: differentiable views, and + * non-differentiable views. In either type, to support proper version + * checking, the base and view Variables must always share the same + * version_counter. + * + * + * Differentiable Views + * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + * This class allows to track both forward and backward AD differentiable + * views. These views can have different base as non-differentiable view for + * forward and backward mode AD are not the same. + * + * Most function are either both forward and backward differentiable views (for + * example: view, select, narrow, transpose, etc) or both not forward and not + * backward differentiable views (for example: indices, values, eq, lt, etc). + * But there are also functions that are forward but not backward + * differentiable views (only detach for now) or functions that are backward + * but not forward differentiable view (only make_dual and unpack dual for + * now). + * + * A concrete example of two views with different bases is as follow: + * + * # Have: + * # dual is a dual Tensor that is neither a forward or backward view + * detached_dual = dual.detach() + * view = detached_dual.view_as(dual) + * # The forward base of view is dual + * # The backward base of view is detached_dual + * + * - Backward Mode View + * Differentiable views are the view variables where you want gradients to flow + * back to the base variables. Out-of-place operations on views are quite + * straightforward, but in-place ones are very tricky. Even if the base + * variable may not require grad when we create the view, we still need to + * track the view relation because future in-place ops may require back-proping + * through it. For example, we need to support + * + * (1) in-place operation on view, e.g., + * + * # Have: + * # base.requires_grad = False + * # var.requires_grad = True + * base[1] = var # i.e., base[1].copy_(var) + * torch.autograd.grad(base.sum(), var) <- should return an all ones + * tensor + * + * (2) in-place operation on base after view is created, e.g., + * + * # Have: + * # base.requires_grad = False + * # var.requires_grad = True + * view = base[1] + * base.copy_(var) + * torch.autograd.grad(view.sum(), var) <- should return a tensor with + * var[1] filled with all ones and + * zeros everywhere else + * + * - Forward Mode View + * Forward differentiable views follow the same semantic as backward ones but + * show up differently as they are computed along with the forward evaluation. + * The hard examples above are thus very similar + * + * (1) in-place operation on view, e.g., + * + * # Have: + * # base is a regular Tensor + * # var is a dual Tensor whose tangent is all ones + * base[1] = var # i.e., base[1].copy_(var) + * # Now, base is a dual Tensor + * _, fw_grad = fwAD.unpack_dual(base) <- fw_grad should be a tensor with + * fw_grad[1] filled with all ones + * and zeros everywhere else + * + * (2) in-place operation on base after view is created, e.g., + * + * # Have: + * # base is a regular Tensor + * # var is a dual Tensor whose tangent is all ones + * view = base[1] + * base.copy_(var) + * _, fw_grad = fwAD.unpack_dual(view) <- fw_grad should be an all ones + * tensor + * + * See Note [Forward Grad View/inplace] for more details on how we handle these + * hard cases. + * + * + * DifferentiableViewMeta is created to support gradient tracking of + * such **in-place** operations. In particular, + * + if an in-place op is done on base, the grad_fn field of the view may + * become stale. So accesses should always go through grad_fn(), which + * reconstructs an updated grad_fn if the version_counter has incremented. + * All other fields are always valid. + * + if an in-place op is done on view, in rebase_history() of view, which is + * called after every in-place op in VariableType.cpp, the grad_fn of base + * is updated. + * + if a single autograd Node returns multiple differentiable views, if any + * output is modified by an inplace operation, the autograd engine will + * make an equivalent graph (corresponding to the view operations) without + * using equivalent graph, where each output is treated as if it were + * produced by a distinct view operation. This discards the original (e.g., + * user provided) grad_fn. If the provided grad_fn does more than the + * backward of the view, then the DifferentiableViewMeta must be created + * with creation_meta= CreationMeta::MULTI_OUTPUT_NODE to prevent the + * engine from ignoring the provided grad_fn. + * + * Interaction with GradMode: + * The particular case that we consider here is: + * + * # Have: + * # base.requires_grad = True or False + * with torch.no_grad(): + * view = base[1] + * base.requires_grad_() + * view.copy_(var) + * torch.autograd.grad(base.sum(), var) <- what should it return? + * + * Given that this particular code example is ambiguous and can easily be + * replace by either moving both inside the no_grad block or both outside, we + * explicitly forbid it. For now, it is deprecated by a warning. This is + * achieved by setting creation_meta=CreationMeta::NO_GRAD_MODE for all + * differentiable views created in no_grad mode. + * + * See Note [View + Inplace update for base tensor] + * and Note [View + Inplace update for view tensor] for the details how + * autograd handles inplace update with view ops. + * + * Non-Differentiable Views + * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + * In certain cases, although function outputs share storage with inputs, they + * will **never** require gradient history tracking. Instead of registering the + * view relation via DifferentiableViewMeta in autograd, the views will be + * using usual AutogradMeta and just share the version counters with the base + * Variables. + * Such views include: + * 1. Views created from .detach() + * 2. Views that are non-differentiable by its nature. + * E.g., {@code sparse_tensor.indices()} is a integral view on a (possibly) + * floating point tensor. + * See top of {@code derivatives.yaml} on how to specify that outputs of a + * function are non-differentiable. + * These are called non-differentiable views as the gradients do not flow + * through the view relation. + * + * Relevant logic for both differentiable and non-differentiable views is + * implemented in make_variable_(non_)differentiable_view below, and + * wrap_output of gen_variable_type.py. +

+ * NOTE [ View + Inplace detection ] + * + * We want to detect views followed by inplace as they are often forbidden to + * ensure correctness of the computed gradients. But since we want to only + * notify the user when both happen, we tag the DifferentiableViewMeta when the + * view is created via the {@code make_variable_*_view()} functions. This tag is then + * checked by the {@code check_inplace()} function from {@code VariableTypeUtils.h} that + * should be called before every inplace operation and to detect cases where + * other views are modified and this one is rebased by side effect, we also + * check in the {@code VariableHooks::grad_fn()}. +

+ * Flag that gives more information about when this view was created: + * - IN_CUSTOM_FUNCTION should be set when the view is created inside a custom + * autograd Function is returned. + * - NO_GRAD_MODE should be set when a view in created when GradMode is + * disabled + * - MULTI_OUTPUT_NODE should be set when a Node created by codegen code + * returns + * multiple differentiable views + * - Inference_MODE should be set when a view of normal tensor is created in + * InferenceMode. + * - DEFAULT is for all other cases */ +@Namespace("torch::autograd") public enum CreationMeta { + DEFAULT((byte)(0)), + IN_CUSTOM_FUNCTION((byte)(1)), + MULTI_OUTPUT_NODE((byte)(2)), + NO_GRAD_MODE((byte)(3)), + INFERENCE_MODE((byte)(4)); + public final byte value; + private CreationMeta(byte v) { this.value = v; } + private CreationMeta(CreationMeta e) { this.value = e.value; } + public CreationMeta intern() { for (CreationMeta e : values()) if (e.value == value) return e; return this; } + @Override public String toString() { return intern().name(); } +} +/** Handles correctly propagating CreationMeta when a new view is created from a + * previous view. In general, we don't want the new view to be _less_ + * restrictive than the previous view (it's okay to be _more_ restrictive). A + * CreationMeta value of DEFAULT is currently the least restrictive, as the + * behavior for all other CreationMeta values is to error out for in-place ops. + * A CreationMeta value of INFERENCE_MODE is currently the most restrictive, so + * it takes precedence in propagation. If this changes, the logic here will + * need to be updated to properly handle the new semantics. */ +@Namespace("torch::autograd") public static native CreationMeta propagate_creation_meta( + CreationMeta prev_view_creation_meta, + CreationMeta new_view_creation_meta); +@Namespace("torch::autograd") public static native @Cast("torch::autograd::CreationMeta") byte propagate_creation_meta( + @Cast("torch::autograd::CreationMeta") byte prev_view_creation_meta, + @Cast("torch::autograd::CreationMeta") byte new_view_creation_meta); -// Parsed from ATen/ops/_pad_enum.h +/** Unified function to handle error checking when rebase happens + * indirect=true means that the caller is not doing the inplace, but the + * inplace happened somewhere else. */ +@Namespace("torch::autograd") public static native void handle_view_on_rebase( + DifferentiableViewMeta diff_view_meta, + @Cast("bool") boolean indirect/*=false*/); +@Namespace("torch::autograd") public static native void handle_view_on_rebase( + DifferentiableViewMeta diff_view_meta); +// Targeting ../DifferentiableViewMeta.java -// #pragma once -// @generated by torchgen/gen.py from Function.h -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include +//~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +// Variable Implementation +//~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +// Factory Functions +//~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +/** Creates a {@code Variable} that is a *view* of another (*base*) variable. + * The {@code gradient_edge} is an optional (gradient_function, input_number) pair. + * {@code is_differentiable} is a bool that specifies whether this view is + * differentiable, i.e., whether the relation should be tracked by autograd. + * See NOTE [ Autograd View Variables ] for details. +

+ * NOTE: {@code allow_tensor_metadata_change} is set to true by default, because + * there are a lot of call sites to these factory functions that need to change + * the variable's size or storage afterwards, and they don't expect the + * original tensor (where the variable is created from) to be updated. Setting + * {@code allow_tensor_metadata_change_} to false by default would unnecessarily + * prevent those changes from happening and is undesirable. */ -// #include +// See NOTE [ Autograd View Variables ] for details. +// Differentiable view. Track history with DifferentiableViewMeta. +@Namespace("torch::autograd") public static native @ByVal @Cast("torch::autograd::Variable*") Tensor make_variable_differentiable_view( + @Const @ByRef Tensor data, + @ByVal @Cast("c10::optional*") Pointer backward_info, + @ByVal @Cast("c10::optional*") Pointer forward_info, + @Cast("bool") boolean shared_view_info, + CreationMeta creation_meta, + @Cast("bool") boolean allow_tensor_metadata_change/*=true*/); +@Namespace("torch::autograd") public static native @ByVal @Cast("torch::autograd::Variable*") Tensor make_variable_differentiable_view( + @Const @ByRef Tensor data, + @ByVal @Cast("c10::optional*") Pointer backward_info, + @ByVal @Cast("c10::optional*") Pointer forward_info, + @Cast("bool") boolean shared_view_info, + CreationMeta creation_meta); +@Namespace("torch::autograd") public static native @ByVal @Cast("torch::autograd::Variable*") Tensor make_variable_differentiable_view( + @Const @ByRef Tensor data, + @ByVal @Cast("c10::optional*") Pointer backward_info, + @ByVal @Cast("c10::optional*") Pointer forward_info, + @Cast("bool") boolean shared_view_info, + @Cast("torch::autograd::CreationMeta") byte creation_meta, + @Cast("bool") boolean allow_tensor_metadata_change/*=true*/); +@Namespace("torch::autograd") public static native @ByVal @Cast("torch::autograd::Variable*") Tensor make_variable_differentiable_view( + @Const @ByRef Tensor data, + @ByVal @Cast("c10::optional*") Pointer backward_info, + @ByVal @Cast("c10::optional*") Pointer forward_info, + @Cast("bool") boolean shared_view_info, + @Cast("torch::autograd::CreationMeta") byte creation_meta); +// See NOTE [ Autograd View Variables ] for details. +// Non-differentiable view. Just share version counter. -// aten::_pad_enum(Tensor self, SymInt[] pad, int mode, float? value=None) -> Tensor -@Namespace("at") public static native @ByVal Tensor _pad_enum(@Const @ByRef Tensor self, @ByVal @Cast("c10::ArrayRef*") LongArrayRef pad, @Cast("int64_t") long mode, @ByVal(nullValue = "c10::optional(c10::nullopt)") DoubleOptional value); -@Namespace("at") public static native @ByVal Tensor _pad_enum(@Const @ByRef Tensor self, @ByVal @Cast("c10::ArrayRef*") LongArrayRef pad, @Cast("int64_t") long mode); -@Namespace("at") public static native @ByVal Tensor _pad_enum(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] pad, @Cast("int64_t") long mode, @ByVal(nullValue = "c10::optional(c10::nullopt)") DoubleOptional value); -@Namespace("at") public static native @ByVal Tensor _pad_enum(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] pad, @Cast("int64_t") long mode); +/// +@Namespace("torch::autograd") public static native @ByVal @Cast("torch::autograd::Variable*") Tensor make_variable_non_differentiable_view( + @ByVal @Cast("torch::autograd::Variable*") Tensor base, + @Const @ByRef Tensor data, + @Cast("bool") boolean allow_tensor_metadata_change/*=true*/); +@Namespace("torch::autograd") public static native @ByVal @Cast("torch::autograd::Variable*") Tensor make_variable_non_differentiable_view( + @ByVal @Cast("torch::autograd::Variable*") Tensor base, + @Const @ByRef Tensor data); +/** Creates a {@code Variable} from the given {@code Tensor}, copying its underlying + * {@code TensorImpl}. {@code requires_grad} should be set only for leaves, and determines + * whether the {@code Variable} will accumulate gradients. NOTE: {@code data} must *not* be + * a {@code Variable} already. Its dynamic type *must* be {@code Tensor}. + * + * TODO: Eliminate this function as much as possible, as it can be expressed + * more clearly as detach() or a no-op in most call sites (especially when + * there is only one use of the variable). */ +@Namespace("torch::autograd") public static native @ByVal @Cast("torch::autograd::Variable*") Tensor make_variable( + @ByVal Tensor data, + @Cast("bool") boolean requires_grad/*=false*/, + @Cast("bool") boolean allow_tensor_metadata_change/*=true*/); +@Namespace("torch::autograd") public static native @ByVal @Cast("torch::autograd::Variable*") Tensor make_variable( + @ByVal Tensor data); -// aten::_pad_enum(Tensor self, SymInt[] pad, int mode, float? value=None) -> Tensor -@Namespace("at") public static native @ByVal Tensor _pad_enum_symint(@Const @ByRef Tensor self, @ByVal SymIntRef pad, @Cast("int64_t") long mode, @ByVal(nullValue = "c10::optional(c10::nullopt)") DoubleOptional value); -@Namespace("at") public static native @ByVal Tensor _pad_enum_symint(@Const @ByRef Tensor self, @ByVal SymIntRef pad, @Cast("int64_t") long mode); +/** Creates a {@code Variable} from the given {@code Tensor}, copying its underlying + * {@code TensorImpl}. {@code gradient_edge} should be a (function, input_nr) pair + * specifying the function in the autograd graph, and what particular input of + * that function, this variable is connected to. */ +@Namespace("torch::autograd") public static native @ByVal @Cast("torch::autograd::Variable*") Tensor make_variable( + @ByVal Tensor data, + @ByVal Edge gradient_edge, + @Cast("bool") boolean allow_tensor_metadata_change/*=true*/); +@Namespace("torch::autograd") public static native @ByVal @Cast("torch::autograd::Variable*") Tensor make_variable( + @ByVal Tensor data, + @ByVal Edge gradient_edge); +@Namespace("torch::autograd::utils") public static native @Cast("bool") boolean has_same_meta(@Cast("const torch::autograd::Variable*") @ByRef Tensor base, @Cast("const torch::autograd::Variable*") @ByRef Tensor other); + // namespace utils + // namespace autograd + // namespace torch +// #endif /* DOXYGEN_SHOULD_SKIP_THIS */ -// Parsed from ATen/ops/_pad_packed_sequence.h +// Parsed from torch/csrc/autograd/autograd.h // #pragma once -// @generated by torchgen/gen.py from Function.h - -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include - - - -// #include - - -// aten::_pad_packed_sequence(Tensor data, Tensor batch_sizes, bool batch_first, Scalar padding_value, int total_length) -> (Tensor, Tensor) -@Namespace("at") public static native @ByVal TensorTensorTuple _pad_packed_sequence(@Const @ByRef Tensor data, @Const @ByRef Tensor batch_sizes, @Cast("bool") boolean batch_first, @Const @ByRef Scalar padding_value, @Cast("int64_t") long total_length); - +// #include +/** Computes the sum of gradients of given tensors with respect to graph leaves. + * + * The graph is differentiated using the chain rule. If any of {@code }tensors{@code } + * are non-scalar (i.e. their data has more than one element) and require + * gradient, then the Jacobian-vector product would be computed, in this case + * the function additionally requires specifying {@code grad_tensors}. It should be a + * sequence of matching length, that contains the "vector" in the + * Jacobian-vector product, usually the gradient of the differentiated function + * w.r.t. corresponding tensors + * ({@code torch::Tensor()} is an acceptable value for all tensors that don't need + * gradient tensors). + * + * This function accumulates gradients in the leaves - you might need to zero + * them before calling it. + * + * @param tensors Tensors of which the derivative will be computed. + * @param grad_tensors The "vector" in the Jacobian-vector product, usually + * gradients + * w.r.t. each element of corresponding tensors. {@code torch::Tensor()} values + * can be specified for scalar Tensors or ones that don't require grad. If + * a {@code torch::Tensor()} value would be acceptable for all grad_tensors, then + * this argument is optional. + * @param retain_graph If {@code false}, the graph used to compute the grad will be + * freed. + * Note that in nearly all cases setting this option to {@code true} is not + * needed and often can be worked around in a much more efficient way. + * Defaults to the value of {@code create_graph}. + * @param create_graph If {@code true}, graph of the derivative will be constructed, + * allowing + * to compute higher order derivative products. Defaults to {@code false}. + * @param inputs Inputs w.r.t. which the gradient will be accumulated into + * {@code at::Tensor::grad}. All other Tensors will be ignored. If not provided, + * the gradient is accumulated into all the leaf Tensors that were used to + * compute param {@code tensors}. */ +// When inputs are provided and a given input is not a leaf, +// the current implementation will call its grad_fn (even though it is not +// strictly needed to get this gradients). It is an implementation detail +// on which the user should not rely. See +// https://github.com/pytorch/pytorch/pull/60521#issuecomment-867061780 for +// more details. +/// +/// +@Namespace("torch::autograd") public static native void backward( + @Cast({"", "std::vector"}) @StdMove TensorVector tensors, + @Cast({"", "std::vector"}) @StdMove TensorVector grad_tensors/*={}*/, + @ByVal(nullValue = "c10::optional(c10::nullopt)") BoolOptional retain_graph, + @Cast("bool") boolean create_graph/*=false*/, + @Cast({"", "std::vector"}) @StdMove TensorVector inputs/*={}*/); +@Namespace("torch::autograd") public static native void backward( + @Cast({"", "std::vector"}) @StdMove TensorVector tensors); -// Parsed from ATen/ops/_pdist_backward.h +/** Computes and returns the sum of gradients of outputs with respect to the + * inputs. + * + * {@code }grad_outputs{@code } should be a sequence of length matching {@code }output{@code } + * containing the "vector" in Jacobian-vector product, usually the pre-computed + * gradients w.r.t. each of the outputs. If an output doesn't require_grad, + * then the gradient can be {@code }torch::Tensor(){@code }). + * + * @param outputs outputs of the differentiated function. + * @param inputs Inputs w.r.t. which the gradient will be + * returned (and not accumulated into {@code }at::Tensor::grad{@code }). + * @param grad_outputs The "vector" in the Jacobian-vector product. + * Usually gradients w.r.t. each output. {@code torch::Tensor()} values can be + * specified for scalar Tensors or ones that don't require grad. If a + * {@code torch::Tensor()} value would be acceptable for all grad_tensors, then + * this argument is optional. Default: {@code {}}. + * @param retain_graph If {@code }false{@code }, the graph used to compute the grad + * will be freed. Note that in nearly all cases setting this option to + * {@code }true{@code } is not needed and often can be worked around in a much more + * efficient way. Defaults to the value of {@code }create_graph{@code }. + * @param create_graph If {@code }true{@code }, graph of the derivative will + * be constructed, allowing to compute higher order derivative products. + * Default: {@code }false{@code }. + * @param allow_unused If {@code }false{@code }, specifying inputs that were not + * used when computing outputs (and therefore their grad is always zero) + * is an error. Defaults to {@code }false{@code }. */ +@Namespace("torch::autograd") public static native @Cast({"", "std::vector"}) @StdMove TensorVector grad( + @Cast({"", "std::vector"}) @StdMove TensorVector outputs, + @Cast({"", "std::vector"}) @StdMove TensorVector inputs, + @Cast({"", "std::vector"}) @StdMove TensorVector grad_outputs/*={}*/, + @ByVal(nullValue = "c10::optional(c10::nullopt)") BoolOptional retain_graph, + @Cast("bool") boolean create_graph/*=false*/, + @Cast("bool") boolean allow_unused/*=false*/); +@Namespace("torch::autograd") public static native @Cast({"", "std::vector"}) @StdMove TensorVector grad( + @Cast({"", "std::vector"}) @StdMove TensorVector outputs, + @Cast({"", "std::vector"}) @StdMove TensorVector inputs); -// #pragma once +/** Creates a new dual level and returns its index. This level index should then + * be used to call into the other functions below. This API supports entering a + * new level before the previous one is exited. We call them nested forward AD + * levels. These can be used to compute higher order derivatives. */ +@Namespace("torch::autograd::forward_ad") public static native @Cast("uint64_t") long enter_dual_level(); -// @generated by torchgen/gen.py from Function.h +/** Exits the given level. This will clear up all the gradients from this level + * and all dual Tensors that had gradients for this level will become regular + * Tensors again. This function can only be used to exit the innermost nesting + * level and so exiting must happen in reverse order compared to the entering + * that was done with the function above. */ +@Namespace("torch::autograd::forward_ad") public static native void exit_dual_level(@Cast("uint64_t") long level); -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include + // namespace forward_ad + // namespace autograd + // namespace torch +// Parsed from ATen/core/alias_info.h -// #include +// #pragma once +// #include +// #include +// #include +// #include +// #include +// Targeting ../AliasInfo.java -// aten::_pdist_backward(Tensor grad, Tensor self, float p, Tensor pdist) -> Tensor -@Namespace("at") public static native @ByVal Tensor _pdist_backward(@Const @ByRef Tensor grad, @Const @ByRef Tensor self, double p, @Const @ByRef Tensor pdist); -// aten::_pdist_backward.out(Tensor grad, Tensor self, float p, Tensor pdist, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor _pdist_backward_out(@ByRef Tensor out, @Const @ByRef Tensor grad, @Const @ByRef Tensor self, double p, @Const @ByRef Tensor pdist); -// aten::_pdist_backward.out(Tensor grad, Tensor self, float p, Tensor pdist, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor _pdist_backward_outf(@Const @ByRef Tensor grad, @Const @ByRef Tensor self, double p, @Const @ByRef Tensor pdist, @ByRef Tensor out); +@Namespace("c10") public static native @Cast("bool") @Name("operator ==") boolean equals(@Const @ByRef AliasInfo lhs, @Const @ByRef AliasInfo rhs); +// this does match the way things are represented in the schema +@Namespace("c10") public static native @Cast("std::ostream*") @ByRef @Name("operator <<") Pointer shiftLeft(@Cast("std::ostream*") @ByRef Pointer out, @Const @ByRef AliasInfo aliasInfo); + // namespace c10 -// Parsed from ATen/ops/_pdist_forward.h +// Parsed from ATen/core/operator_name.h // #pragma once -// @generated by torchgen/gen.py from Function.h - -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include +// #include +// #include // #include +// #include +// #include +// #include +// #include +// Targeting ../OperatorName.java -// #include +// Non-owning view of an OperatorName. Unlike OperatorName, most of +// its functions are constexpr, so it can be used for compile time +// computations +@Namespace("c10") public static native @Cast("bool") @Name("operator ==") boolean equals(@Const @ByRef OperatorName lhs, @Const @ByRef OperatorName rhs); -// aten::_pdist_forward(Tensor self, float p=2) -> Tensor -@Namespace("at") public static native @ByVal Tensor _pdist_forward(@Const @ByRef Tensor self, double p/*=2*/); -@Namespace("at") public static native @ByVal Tensor _pdist_forward(@Const @ByRef Tensor self); +@Namespace("c10") public static native @Cast("bool") @Name("operator !=") boolean notEquals(@Const @ByRef OperatorName lhs, @Const @ByRef OperatorName rhs); -// aten::_pdist_forward.out(Tensor self, float p=2, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor _pdist_forward_out(@ByRef Tensor out, @Const @ByRef Tensor self, double p/*=2*/); -@Namespace("at") public static native @ByRef Tensor _pdist_forward_out(@ByRef Tensor out, @Const @ByRef Tensor self); -// aten::_pdist_forward.out(Tensor self, float p=2, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor _pdist_forward_outf(@Const @ByRef Tensor self, double p, @ByRef Tensor out); +@Namespace("c10") public static native @StdString BytePointer toString(@Const @ByRef OperatorName opName); +@Namespace("c10") public static native @Cast("std::ostream*") @ByRef @Name("operator <<") Pointer shiftLeft(@Cast("std::ostream*") @ByRef Pointer arg0, @Const @ByRef OperatorName arg1); + // namespace c10 -// Parsed from ATen/ops/_pin_memory.h +// Parsed from ATen/core/dispatch/OperatorOptions.h // #pragma once -// @generated by torchgen/gen.py from Function.h - -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include - - - -// #include - +// #include -// aten::_pin_memory(Tensor self, Device? device=None) -> Tensor -@Namespace("at") public static native @ByVal Tensor _pin_memory(@Const @ByRef Tensor self, @ByVal(nullValue = "c10::optional(c10::nullopt)") DeviceOptional device); -@Namespace("at") public static native @ByVal Tensor _pin_memory(@Const @ByRef Tensor self); +@Namespace("c10") public enum AliasAnalysisKind { + INTERNAL_SPECIAL_CASE((byte)(0)), + CONSERVATIVE((byte)(1)), // The most conservative alias analysis type, assumes + // side-effects. This is the default analysis. + FROM_SCHEMA((byte)(2)), + PURE_FUNCTION((byte)(3)); -// aten::_pin_memory.out(Tensor self, Device? device=None, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor _pin_memory_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal(nullValue = "c10::optional(c10::nullopt)") DeviceOptional device); -@Namespace("at") public static native @ByRef Tensor _pin_memory_out(@ByRef Tensor out, @Const @ByRef Tensor self); -// aten::_pin_memory.out(Tensor self, Device? device=None, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor _pin_memory_outf(@Const @ByRef Tensor self, @ByVal DeviceOptional device, @ByRef Tensor out); + public final byte value; + private AliasAnalysisKind(byte v) { this.value = v; } + private AliasAnalysisKind(AliasAnalysisKind e) { this.value = e.value; } + public AliasAnalysisKind intern() { for (AliasAnalysisKind e : values()) if (e.value == value) return e; return this; } + @Override public String toString() { return intern().name(); } +} +// #if !defined(_MSC_VER) +@Namespace("c10") public static native @Cast("const char*") BytePointer toString(AliasAnalysisKind aliasAnalysisKind); + // namespace c10 -// Parsed from ATen/ops/_prelu_kernel.h +// Parsed from ATen/core/function_schema.h // #pragma once -// @generated by torchgen/gen.py from Function.h - -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// schema as used in the compiler for resolving function calls and reporting +// errors. These objects should be constructed from C10 schema once those +// are available. +@Namespace("c10") public static native @Cast("bool") @Name("operator ==") boolean equals(@Const @ByRef Argument lhs, @Const @ByRef Argument rhs); +// Targeting ../Argument.java -// #include -// aten::_prelu_kernel(Tensor self, Tensor weight) -> Tensor -@Namespace("at") public static native @ByVal Tensor _prelu_kernel(@Const @ByRef Tensor self, @Const @ByRef Tensor weight); +@Namespace("c10") public static native @Cast("bool") @Name("operator !=") boolean notEquals(@Const @ByRef Argument lhs, @Const @ByRef Argument rhs); +@Namespace("c10") public enum SchemaArgType { input(0), output(1); + public final int value; + private SchemaArgType(int v) { this.value = v; } + private SchemaArgType(SchemaArgType e) { this.value = e.value; } + public SchemaArgType intern() { for (SchemaArgType e : values()) if (e.value == value) return e; return this; } + @Override public String toString() { return intern().name(); } +} +// Targeting ../SchemaArgument.java -// Parsed from ATen/ops/_prelu_kernel_backward.h -// #pragma once +@Namespace("c10") public static native @Cast("bool") @Name("operator ==") boolean equals(@Const @ByRef FunctionSchema lhs, @Const @ByRef FunctionSchema rhs); +// Targeting ../FunctionSchema.java -// @generated by torchgen/gen.py from Function.h -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include +@Namespace("c10") public static native @Cast("bool") @Name("operator !=") boolean notEquals(@Const @ByRef FunctionSchema lhs, @Const @ByRef FunctionSchema rhs); +// print out Argument, which is compatible with FunctionSchema parser +// full format: Type(alias)? name=default_value +@Namespace("c10") public static native @Cast("std::ostream*") @ByRef @Name("operator <<") Pointer shiftLeft(@Cast("std::ostream*") @ByRef Pointer out, @Const @ByRef Argument arg); -// #include +@Namespace("c10") public static native @Cast("std::ostream*") @ByRef @Name("operator <<") Pointer shiftLeft(@Cast("std::ostream*") @ByRef Pointer out, @Const @ByRef FunctionSchema schema); +@Namespace("c10") public static native @StdString BytePointer toString(@Const @ByRef FunctionSchema schema); -// aten::_prelu_kernel_backward(Tensor grad_output, Tensor self, Tensor weight) -> (Tensor, Tensor) -@Namespace("at") public static native @ByVal TensorTensorTuple _prelu_kernel_backward(@Const @ByRef Tensor grad_output, @Const @ByRef Tensor self, @Const @ByRef Tensor weight); + // namespace c10 + // namespace std +// #include // IWYU pragma: keep -// Parsed from ATen/ops/_remove_batch_dim.h +// Parsed from ATen/core/function_schema_inl.h // #pragma once +// #include -// @generated by torchgen/gen.py from Function.h +// note: windows build doesn't find symbols in operator files unless +// this is a header file -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include +@Namespace("c10") public static native @Cast("size_t") long findFirstOutArg(@StdVector Argument args); -// #include -// aten::_remove_batch_dim(Tensor self, int level, int batch_size, int out_dim) -> Tensor -@Namespace("at") public static native @ByVal Tensor _remove_batch_dim(@Const @ByRef Tensor self, @Cast("int64_t") long level, @Cast("int64_t") long batch_size, @Cast("int64_t") long out_dim); -// Parsed from ATen/ops/_reshape_alias.h -// #pragma once -// @generated by torchgen/gen.py from Function.h -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// aten::_reshape_alias(Tensor(a) self, SymInt[] size, SymInt[] stride) -> Tensor(a) -@Namespace("at") public static native @ByVal Tensor _reshape_alias(@Const @ByRef Tensor self, @ByVal @Cast("c10::ArrayRef*") LongArrayRef size, @ByVal @Cast("c10::ArrayRef*") LongArrayRef stride); -@Namespace("at") public static native @ByVal Tensor _reshape_alias(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... stride); -// aten::_reshape_alias(Tensor(a) self, SymInt[] size, SymInt[] stride) -> Tensor(a) -@Namespace("at") public static native @ByVal Tensor _reshape_alias_symint(@Const @ByRef Tensor self, @ByVal SymIntRef size, @ByVal SymIntRef stride); +// covariant subtyping of list of Arguments +@Namespace("c10") public static native @Cast("bool") boolean isSubtypeOfList( + @ByVal ArgumentArrayRef child, + @ByVal ArgumentArrayRef parent, + @Cast("std::ostream*") Pointer why_not); + // namespace c10 -// Parsed from ATen/ops/_reshape_alias_copy.h +// Parsed from ATen/core/op_registration/infer_schema.h // #pragma once -// @generated by torchgen/gen.py from Function.h +/** + * This file contains functionality to take a C++ function and infer its + * c10::FunctionSchema. + */ -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include +// #include +// #include +// #include +// Targeting ../ArgumentDef.java -// #include +/** Checks the static C++ types {@code Types} for correctness to catch common error cases. */ +/** Creates a vector of {@code ArgumentDef} from a list of C++ types that are specified + * as template arguments. */ -// aten::_reshape_alias_copy(Tensor self, SymInt[] size, SymInt[] stride) -> Tensor -@Namespace("at") public static native @ByVal Tensor _reshape_alias_copy(@Const @ByRef Tensor self, @ByVal @Cast("c10::ArrayRef*") LongArrayRef size, @ByVal @Cast("c10::ArrayRef*") LongArrayRef stride); -@Namespace("at") public static native @ByVal Tensor _reshape_alias_copy(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... stride); +/** Creates a vector of {@code ArgumentDef} from a list of C++ types that are specified + * as a tuple (i.e. in the way c10 kernels return values). + * It can be a tuple if there's three output arguments with types A, B, C. + * It can be an empty tuple<>, or void for kernels that don't return anything. + * It can be a single type A (i.e. no tuple) for the case where a kernel just + * returns one value. */ -// aten::_reshape_alias_copy(Tensor self, SymInt[] size, SymInt[] stride) -> Tensor -@Namespace("at") public static native @ByVal Tensor _reshape_alias_copy_symint(@Const @ByRef Tensor self, @ByVal SymIntRef size, @ByVal SymIntRef stride); +@Namespace("c10::detail::infer_schema") public static native @ByVal FunctionSchema make_function_schema(@ByVal ArgumentDefArrayRef arguments, @ByVal ArgumentDefArrayRef returns); +/** Creates a {@code FunctionSchema} object from a {@code FunctionTraits} type for a + * function. Flattens std::tuple returns into multiple return types */ -// aten::_reshape_alias_copy.out(Tensor self, SymInt[] size, SymInt[] stride, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor _reshape_alias_copy_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal @Cast("c10::ArrayRef*") LongArrayRef size, @ByVal @Cast("c10::ArrayRef*") LongArrayRef stride); -@Namespace("at") public static native @ByRef Tensor _reshape_alias_copy_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... stride); +/** Creates a {@code FunctionSchema} object from a {@code FunctionTraits} type for a + * function. Preserves std::tuple returns as a Tuple return type */ -// aten::_reshape_alias_copy.out(Tensor self, SymInt[] size, SymInt[] stride, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor _reshape_alias_copy_outf(@Const @ByRef Tensor self, @ByVal @Cast("c10::ArrayRef*") LongArrayRef size, @ByVal @Cast("c10::ArrayRef*") LongArrayRef stride, @ByRef Tensor out); -@Namespace("at") public static native @ByRef Tensor _reshape_alias_copy_outf(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] stride, @ByRef Tensor out); -// aten::_reshape_alias_copy.out(Tensor self, SymInt[] size, SymInt[] stride, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor _reshape_alias_copy_symint_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal SymIntRef size, @ByVal SymIntRef stride); +@Namespace("c10") public static native @ByVal StringOptional findSchemaDifferences(@Const @ByRef FunctionSchema inferred, @Const @ByRef FunctionSchema specified); -// aten::_reshape_alias_copy.out(Tensor self, SymInt[] size, SymInt[] stride, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor _reshape_alias_copy_symint_outf(@Const @ByRef Tensor self, @ByVal SymIntRef size, @ByVal SymIntRef stride, @ByRef Tensor out); +// Parsed from ATen/record_function.h +// #pragma once +// #include +// #include +// #include +// #include +// #include +// #include -// Parsed from ATen/ops/_reshape_copy.h +// #include +// #include +// #include +// #include -// #pragma once -// @generated by torchgen/gen.py from Function.h +// Kind of record function scope; +@Namespace("at") public enum RecordScope { + // c10/ATen ops, autograd nodes + FUNCTION((byte)(0)), + // Functions/nodes called from the autograd + BACKWARD_FUNCTION((byte)(1)), + // TorchScript functions, methods + TORCHSCRIPT_FUNCTION((byte)(2)), + // Kernel Function dtype Tag + KERNEL_FUNCTION_DTYPE((byte)(3)), + // Torchbind custom class, + CUSTOM_CLASS((byte)(4)), + // Generic Build Feature + BUILD_FEATURE((byte)(5)), + // Kernel Function dtype Tag + LITE_INTERPRETER((byte)(6)), + // User defined scope (e.g. with record_function()) + USER_SCOPE((byte)(7)), + // Scopes for static runtime, a specialized TorchScript interpreter + STATIC_RUNTIME_OP((byte)(8)), + STATIC_RUNTIME_MODEL((byte)(9)), + NUM_SCOPES((byte)(10));// must be the last in the list -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include + public final byte value; + private RecordScope(byte v) { this.value = v; } + private RecordScope(RecordScope e) { this.value = e.value; } + public RecordScope intern() { for (RecordScope e : values()) if (e.value == value) return e; return this; } + @Override public String toString() { return intern().name(); } +} + + // namespace at +// Targeting ../StringView.java -// #include +// Soft limit on the number of callbacks to use; +@Namespace("at") @MemberGetter public static native @Cast("const std::size_t") long kSoftLimitCallbacks(); -// aten::_reshape_copy(Tensor self, SymInt[] size) -> Tensor -@Namespace("at") public static native @ByVal Tensor _reshape_copy(@Const @ByRef Tensor self, @ByVal @Cast("c10::ArrayRef*") LongArrayRef size); -@Namespace("at") public static native @ByVal Tensor _reshape_copy(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... size); +// An abstract base class for various observer contexts that can be attached to +// the RecordFunction. +// +// PyTorch callbacks/observers API: +// -// aten::_reshape_copy(Tensor self, SymInt[] size) -> Tensor -@Namespace("at") public static native @ByVal Tensor _reshape_copy_symint(@Const @ByRef Tensor self, @ByVal SymIntRef size); +/** + * RecordFunctionCallback represents a pair of callbacks to be used with + * RecordFunction, members: + * start, end - the callbacks to run when entering and exiting the scope; + * optionally, the start callback may return an ObserverContext which will + * be passed to the end callback, use appropriate constructor accordingly. + * needs_inputs - whether the callbacks need the inputs passed from the + * observed function/range; NOTE: passing the inputs incurs an additional + * overhead; sampling_probability - if not 1.0, then the callback is + * probabilistically sampled to run; NOTE: start and end callbacks always run as + * a pair and are sampled together; scopes - types of scopes to execute the + * callbacks on (see RecordScope); passing empty set means the callbacks will be + * executed for all possible scope types should_run - optional function that + * returns whether this callback should run; overwrites the effect of setting + * sampling_probability + */ +// Notes: +// - two types of callbacks are provided: thread local and global +// - thread local callbacks are added/removed only for the given thread +// and are stored locally for each thread and separately from the list +// of the global callbacks +// - global callbacks are stored in a single per process list and are +// invoked by every RecordFunction, in addition to the thread local +// callbacks specific to the given thread +// - we allow the added callbacks to be sampled, by specifying a sampling +// probability for each callback pair, if the start callback is +// not picked to run, the corresponding end callback won't be called +// - a typical use case for the global callbacks is passive monitoring +// in the background (e.g. fleet-wide monitoring), without focusing on +// the specific piece of code +// - in contrast, thread local callbacks are enabled locally, on demand, +// for the specific piece of code (range) and are not sampled +// - a typical use case for thread local callbacks is profiler and code +// execution tracer +// - note, thread local callbacks are automatically propagated with +// ThreadLocalState across JIT continuations and async tasks (at::launch) +@Namespace("at") @MemberGetter public static native @Cast("const at::CallbackHandle") long INVALID_CALLBACK_HANDLE(); +// Targeting ../RecordFunctionCallbacksEntry.java -// Parsed from ATen/ops/_reshape_from_tensor.h +// Holds pairs (callbacks, unique_id) +// Targeting ../RecordFunction.java -// #pragma once -// @generated by torchgen/gen.py from Function.h -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include +@Namespace("at") public static native @ByVal @Cast("at::StepCallbacks*") Pointer getStepCallbacks(RecordScope scope); +@Namespace("at") public static native @ByVal @Cast("at::StepCallbacks*") Pointer getStepCallbacks(@Cast("at::RecordScope") byte scope); +@Namespace("at") public static native @ByVal @Cast("c10::optional*") Pointer getStepCallbacksUnlessEmpty( + RecordScope scope); +@Namespace("at") public static native @ByVal @Cast("c10::optional*") Pointer getStepCallbacksUnlessEmpty( + @Cast("at::RecordScope") byte scope); + // namespace detail -// #include +// optional argument - function's seq_no +// #define RECORD_FUNCTION_WITH_SCOPE(scope, fn, inputs, ...) +// at::RecordFunction guard(scope); +// if (guard.isActive()) { +// ::at::detail::record_function_with_scope( +// guard, fn, inputs, ##__VA_ARGS__); +// } +// #define RECORD_FUNCTION_WITH_SCOPE_INPUTS_OUTPUTS( +// scope, fn, inputs, outputs, ...) +// at::RecordFunction guard(scope); +// if (guard.isActive()) { +// if (guard.needsInputs()) { +// guard.before(fn, inputs, ##__VA_ARGS__); +// } else { +// guard.before(fn, ##__VA_ARGS__); +// } +// if (guard.needsOutputs()) { +// guard.setOutputs(outputs); +// } +// } -// aten::_reshape_from_tensor(Tensor self, Tensor shape) -> Tensor -@Namespace("at") public static native @ByVal Tensor _reshape_from_tensor(@Const @ByRef Tensor self, @Const @ByRef Tensor shape); +// #define RECORD_FUNCTION(fn, inputs, ...) +// RECORD_FUNCTION_WITH_SCOPE( +// at::RecordScope::FUNCTION, fn, inputs, ##__VA_ARGS__) +// #define RECORD_TORCHSCRIPT_FUNCTION(mn, inputs) +// RECORD_FUNCTION_WITH_SCOPE(at::RecordScope::TORCHSCRIPT_FUNCTION, mn, inputs) +// #define RECORD_FUNCTION_WITH_INPUTS_OUTPUTS(fn, inputs, outputs, ...) +// RECORD_FUNCTION_WITH_SCOPE_INPUTS_OUTPUTS( +// at::RecordScope::FUNCTION, fn, inputs, outputs, ##__VA_ARGS__) +// Custom user scopes in C++; similar to Python's 'with record_function("..."):' +// #define RECORD_USER_SCOPE(fn) +// RECORD_FUNCTION_WITH_SCOPE( +// at::RecordScope::USER_SCOPE, fn, c10::ArrayRef{}) -// Parsed from ATen/ops/_resize_output.h +// RECORD_USER_SCOPE with inputs +// #define RECORD_USER_SCOPE_WITH_INPUTS(fn, inputs) +// RECORD_FUNCTION_WITH_SCOPE(at::RecordScope::USER_SCOPE, fn, inputs) -// #pragma once +// Helper macro to pass in debug handle that is used to +// post process events +// #define RECORD_WITH_SCOPE_DEBUG_HANDLE_AND_INPUTS( +// scope, fn, debug_handle, inputs, ...) +// at::RecordFunction guard(scope); +// if (guard.isActive()) { +// ::at::detail::record_function_with_scope_and_debug_handle( +// guard, fn, debug_handle, inputs, ##__VA_ARGS__); +// } -// @generated by torchgen/gen.py from Function.h +// Helper macros to record LITE INTERPETER scope events with debug handles +// #define RECORD_EDGE_SCOPE_WITH_DEBUG_HANDLE_AND_INPUTS( +// fn, debug_handle, inputs) +// RECORD_WITH_SCOPE_DEBUG_HANDLE_AND_INPUTS( +// at::RecordScope::LITE_INTERPRETER, fn, debug_handle, inputs) -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include +// Bookend to the RECORD_FUNCTION macros. Use this after the kernel +// launch to let the profiler bind the outputs to the op that produced +// them. Note that guard is declared by RECORD_FUNCTION so this macro +// needs to be called from the same scope as RECORD_FUNCTION +// #define RECORD_OUTPUTS(outputs) +// if (guard.needsOutputs()) { +// guard.setOutputs( +// std::vector(outputs.begin(), outputs.end())); +// } + +/** + * addThreadLocalCallback adds a thread local callback to run with + * RecordFunction, returns handle to use with removeThreadLocalCallback + */ +@Namespace("at") public static native @Cast("at::CallbackHandle") long addThreadLocalCallback(@ByVal @Cast("at::RecordFunctionCallback*") Pointer cb); +/** + * hasThreadLocalCallbacks returns whether there're callbacks registered + * with addThreadLocalCallback + */ +@Namespace("at") public static native @Cast("bool") boolean hasThreadLocalCallbacks(); +/** + * clearThreadLocalCallbacks removes all thread local callbacks + */ +@Namespace("at") public static native void clearThreadLocalCallbacks(); -// #include +/** + * addGlobalCallback adds a global callback to run with RecordFunction: + * + * only during the program initialization + */ +@Namespace("at") public static native @Cast("at::CallbackHandle") long addGlobalCallback(@ByVal @Cast("at::RecordFunctionCallback*") Pointer cb); +/** + * removeCallback removes a callback given the handle returned by + * addThreadLocalCallback or addGlobalCallback; + * + * no other code can run simultaneously + */ +@Namespace("at") public static native void removeCallback(@Cast("at::CallbackHandle") long handle); -// aten::_resize_output_(Tensor(a!) self, int[] size, Device device) -> Tensor(a!) -@Namespace("at") public static native @Const @ByRef Tensor _resize_output_(@Const @ByRef Tensor self, @ByVal @Cast("c10::ArrayRef*") LongArrayRef size, @ByVal Device device); -@Namespace("at") public static native @Const @ByRef Tensor _resize_output_(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @ByVal Device device); +/** + * Prevent the given callback from executing. If handle is invalid, + * does nothing. + */ +@Namespace("at") public static native void disableCallback(@Cast("at::CallbackHandle") long handle); -// aten::_resize_output.out(Tensor self, int[] size, Device device, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @Const @ByRef Tensor _resize_output_out(@Const @ByRef Tensor out, @Const @ByRef Tensor self, @ByVal @Cast("c10::ArrayRef*") LongArrayRef size, @ByVal Device device); -@Namespace("at") public static native @Const @ByRef Tensor _resize_output_out(@Const @ByRef Tensor out, @Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @ByVal Device device); -// aten::_resize_output.out(Tensor self, int[] size, Device device, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @Const @ByRef Tensor _resize_output_outf(@Const @ByRef Tensor self, @ByVal @Cast("c10::ArrayRef*") LongArrayRef size, @ByVal Device device, @Const @ByRef Tensor out); -@Namespace("at") public static native @Const @ByRef Tensor _resize_output_outf(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @ByVal Device device, @Const @ByRef Tensor out); +/** + * Allow the given callback, previously disabled with disableCallback, to + * execute again. If handle is invalid, does nothing. + */ +@Namespace("at") public static native void reenableCallback(@Cast("at::CallbackHandle") long handle); -// aten::_resize_output(Tensor self, int[] size, Device device) -> Tensor -@Namespace("at") public static native @ByVal Tensor _resize_output(@Const @ByRef Tensor self, @ByVal @Cast("c10::ArrayRef*") LongArrayRef size, @ByVal Device device); -@Namespace("at") public static native @ByVal Tensor _resize_output(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @ByVal Device device); +/** + * hasGlobalCallbacks returns whether there're global callbacks + * registered with pushGlobalCallback + */ +@Namespace("at") public static native @Cast("bool") boolean hasGlobalCallbacks(); +/** + * clearGlobalCallbacks removes all global callbacks + */ +@Namespace("at") public static native void clearGlobalCallbacks(); +// for both thread local and global callbacks +@Namespace("at") public static native @Cast("bool") boolean hasCallbacks(); +@Namespace("at") public static native void clearCallbacks(); +/** + * enableRecordFunction enables RecordFunction thread locally + */ +@Namespace("at") public static native void enableRecordFunction(@Cast("bool") boolean enable/*=true*/); +@Namespace("at") public static native void enableRecordFunction(); -// Parsed from ATen/ops/_rowwise_prune.h +/** + * isRecordFunctionEnabled returns whether RecordFunction + * is enabled thread locally + */ +@Namespace("at") public static native @Cast("bool") boolean isRecordFunctionEnabled(); +// Targeting ../RecordFunctionGuard.java -// #pragma once -// @generated by torchgen/gen.py from Function.h +// Targeting ../DisableRecordFunctionGuard.java -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include +// Targeting ../RecordFunctionTLS.java -// #include +@Namespace("at") public static native @Const @ByRef RecordFunctionTLS get_record_function_tls_(); -// aten::_rowwise_prune(Tensor weight, Tensor mask, ScalarType compressed_indices_dtype) -> (Tensor, Tensor) -@Namespace("at") public static native @ByVal TensorTensorTuple _rowwise_prune(@Const @ByRef Tensor weight, @Const @ByRef Tensor mask, ScalarType compressed_indices_dtype); +@Namespace("at") public static native void set_record_function_tls_(@Const @ByRef RecordFunctionTLS tls); +@Namespace("at") public static native void set_record_function_seed_for_testing(@Cast("uint32_t") int seed); + // namespace at -// Parsed from ATen/ops/_sample_dirichlet.h +// Parsed from ATen/core/op_registration/op_allowlist.h // #pragma once -// @generated by torchgen/gen.py from Function.h +// TODO: unify to C10_MOBILE. In theory this header could be used in OSS. +// #ifdef TEMPLATE_SELECTIVE_BUILD +// #include +// #endif -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include +/** + * This header implements functionality to build PyTorch with only a certain + * set of operators (+ dependencies) included. + * + * - Build with -DTORCH_OPERATOR_WHITELIST="aten::add;aten::sub" and only these + * two ops will be included in your build. The allowlist records operators + * only, no overloads; if you include aten::add, all overloads of aten::add + * will be included. + * + * Internally, this is done by removing the operator registration calls + * using compile time programming, and the linker will then prune all + * operator functions that weren't registered. + * See Note [Selective build] for more details + * + * WARNING: The allowlist mechanism doesn't work for all ways you could go about + * registering an operator. If the dispatch key / operator name is not + * sufficiently obvious at compile time, then the allowlisting mechanism + * will fail (and the operator will be included in the binary anyway). + */ +// #include +// #include +// #include -// #include +// #if defined(ENABLE_RECORD_KERNEL_FUNCTION_DTYPE) +// #include +// #endif +@Namespace("c10::impl") public static native @Cast("const bool") boolean allowlist_contains(@ByVal @Cast("c10::string_view*") Pointer allowlist, @ByVal @Cast("c10::string_view*") Pointer item); // Forward Declare -// aten::_sample_dirichlet(Tensor self, Generator? generator=None) -> Tensor -@Namespace("at") public static native @ByVal Tensor _sample_dirichlet(@Const @ByRef Tensor self, @ByVal(nullValue = "c10::optional(c10::nullopt)") GeneratorOptional generator); -@Namespace("at") public static native @ByVal Tensor _sample_dirichlet(@Const @ByRef Tensor self); +/** + * In selective build mode returns true/false depending on whether a build + * feature is available or not. + * + * In instrumenting mode (tracing mode), always returns true, and doesn't + * trigger any side effects. + */ +@Namespace("c10::impl") public static native @Cast("const bool") boolean is_build_feature_available(@Cast("const char*") BytePointer name); +@Namespace("c10::impl") public static native @Cast("const bool") boolean is_build_feature_available(String name); -// aten::_sample_dirichlet.out(Tensor self, Generator? generator=None, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor _sample_dirichlet_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal(nullValue = "c10::optional(c10::nullopt)") GeneratorOptional generator); -@Namespace("at") public static native @ByRef Tensor _sample_dirichlet_out(@ByRef Tensor out, @Const @ByRef Tensor self); -// aten::_sample_dirichlet.out(Tensor self, Generator? generator=None, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor _sample_dirichlet_outf(@Const @ByRef Tensor self, @ByVal GeneratorOptional generator, @ByRef Tensor out); +/** + * Use BUILD_FEATURE_REQUIRED macro in user-code. + * + * In selective build mode becomes a no-op if the build feature passed + * in is available. If not available, throws an exception (c10::Error). + * The compiler is able to perform dead code elimination for code + * following this method if the build feature is not available. + * + * In instrumenting mode (tracing mode), registers (as a side effect) + * the presence of this specific build feature being triggered. + */ +// #if !defined(ENABLE_RECORD_KERNEL_FUNCTION_DTYPE) // selective build mode +// #if defined(TORCH_BUILD_FEATURE_ALLOWLIST) +// #define BUILD_FEATURE_REQUIRED(NAME) +// if (!c10::impl::is_build_feature_available(NAME)) { +// ::c10::impl::build_feature_required_feature_not_available(NAME); +// } +// #else // Everything trivially selected +// #define BUILD_FEATURE_REQUIRED(NAME) -// Parsed from ATen/ops/_saturate_weight_to_fp16.h +// #endif -// #pragma once +// #else // trace mode +// #define BUILD_FEATURE_REQUIRED(NAME) +// RECORD_FUNCTION_WITH_SCOPE( +// at::RecordScope::BUILD_FEATURE, +// std::string(NAME), +// {}); +// #endif -// @generated by torchgen/gen.py from Function.h +// Use this macro, and not is_build_feature_available +// #define BUILD_FEATURE_AVAILABLE(NAME) ::c10::impl::is_build_feature_available(NAME) -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include +// returns true iff allowlist contains item +// allowlist_contains("a;bc;d", "bc") == true +// Returns true iff the given op name is on the allowlist +// and should be registered +@Namespace("c10::impl") public static native @Cast("const bool") boolean op_allowlist_check(@ByVal @Cast("c10::string_view*") Pointer op_name); +// Returns true iff the given schema string is on the allowlist +// and should be registered +@Namespace("c10::impl") public static native @Cast("const bool") boolean schema_allowlist_check(@ByVal @Cast("c10::string_view*") Pointer schema); -// #include +// Returns true iff the given custom class name is on the allowlist +// and should be registered +@Namespace("c10::impl") public static native @Cast("const bool") boolean custom_class_allowlist_check(@ByVal @Cast("c10::string_view*") Pointer custom_class_name); +// schema_allowlist_check() implicitly depends on a macro, TORCH_OPERATOR_WHITELIST. +// Add this API to pass arbitrary allowlist. +@Namespace("c10::impl") public static native @Cast("const bool") boolean op_allowlist_contains_name_in_schema(@ByVal @Cast("c10::string_view*") Pointer allowlist, @ByVal @Cast("c10::string_view*") Pointer schema); -// aten::_saturate_weight_to_fp16(Tensor weight) -> Tensor -@Namespace("at") public static native @ByVal Tensor _saturate_weight_to_fp16(@Const @ByRef Tensor weight); +// Returns true iff the given dispatch key is on the allowlist +// and should be registered. When we turn this on, the list of valid +// mobile dispatch keys is hard coded (but you need to make sure +// that you have the correct set of dispatch keys for this). +@Namespace("c10::impl") public static native @Cast("const bool") boolean dispatch_key_allowlist_check(DispatchKey arg0); +@Namespace("c10::impl") public static native @Cast("const bool") boolean dispatch_key_allowlist_check(@Cast("c10::DispatchKey") short arg0); + // namespace impl + // namespace c10 +// Parsed from c10/util/either.h -// Parsed from ATen/ops/_scaled_dot_product_attention.h +// Originally taken from +// https://github.com/cryfs/cryfs/blob/14ad22570ddacef22d5ff139cdff68a54fc8234d/src/cpp-utils/either.h // #pragma once -// @generated by torchgen/gen.py from Function.h - -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include +// #include +// #include // #include +/** + * either is a tagged union that holds either an object of type A + * or an object of type B. + */ + // namespace c10 +// Parsed from torch/csrc/jit/frontend/function_schema_parser.h -// #include +// #pragma once +// #include +// #include +// #include +// #include -// aten::_scaled_dot_product_attention(Tensor query, Tensor key, Tensor value, Tensor? attn_mask=None, float dropout_p=0.0, bool need_attn_weights=False, bool is_causal=False) -> (Tensor, Tensor) -@Namespace("at") public static native @ByVal TensorTensorTuple _scaled_dot_product_attention(@Const @ByRef Tensor query, @Const @ByRef Tensor key, @Const @ByRef Tensor value, @Const @ByRef(nullValue = "c10::optional{}") TensorOptional attn_mask, double dropout_p/*=0.0*/, @Cast("bool") boolean need_attn_weights/*=false*/, @Cast("bool") boolean is_causal/*=false*/); -@Namespace("at") public static native @ByVal TensorTensorTuple _scaled_dot_product_attention(@Const @ByRef Tensor query, @Const @ByRef Tensor key, @Const @ByRef Tensor value); +@Namespace("torch::jit") public static native @ByVal FunctionSchema parseSchema(@StdString BytePointer schema); +@Namespace("torch::jit") public static native @ByVal FunctionSchema parseSchema(@StdString String schema); +@Namespace("torch::jit") public static native @ByVal OperatorName parseName(@StdString BytePointer name); +@Namespace("torch::jit") public static native @ByVal OperatorName parseName(@StdString String name); + // namespace jit + // namespace torch -// Parsed from ATen/ops/_scaled_dot_product_attention_math.h +// Parsed from c10/core/CompileTimeFunctionPointer.h // #pragma once -// @generated by torchgen/gen.py from Function.h - -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include - - +// #include -// #include +/** + * Represent a function pointer as a C++ type. + * This allows using the function pointer as a type + * in a template and calling it from inside the template + * allows the compiler to inline the call because it + * knows the function pointer at compile time. + * + * Example 1: + * int add(int a, int b) {return a + b;} + * using Add = TORCH_FN_TYPE(add); + * template struct Executor { + * int execute(int a, int b) { + * return Func::func_ptr()(a, b); + * } + * }; + * Executor executor; + * EXPECT_EQ(3, executor.execute(1, 2)); + * + * Example 2: + * int add(int a, int b) {return a + b;} + * template int execute(Func, int a, int b) { + * return Func::func_ptr()(a, b); + * } + * EXPECT_EQ(3, execute(TORCH_FN(add), 1, 2)); + */ + // namespace c10 -// aten::_scaled_dot_product_attention_math(Tensor query, Tensor key, Tensor value, Tensor? attn_mask=None, float dropout_p=0.0, bool is_causal=False, Tensor? dropout_mask=None) -> (Tensor, Tensor) -@Namespace("at") public static native @ByVal TensorTensorTuple _scaled_dot_product_attention_math(@Const @ByRef Tensor query, @Const @ByRef Tensor key, @Const @ByRef Tensor value, @Const @ByRef(nullValue = "c10::optional{}") TensorOptional attn_mask, double dropout_p/*=0.0*/, @Cast("bool") boolean is_causal/*=false*/, @Const @ByRef(nullValue = "c10::optional{}") TensorOptional dropout_mask); -@Namespace("at") public static native @ByVal TensorTensorTuple _scaled_dot_product_attention_math(@Const @ByRef Tensor query, @Const @ByRef Tensor key, @Const @ByRef Tensor value); +// #define TORCH_FN_TYPE(func) +// ::c10::CompileTimeFunctionPointer< +// std::remove_pointer_t>, +// func> +// #define TORCH_FN(func) TORCH_FN_TYPE(func)() +// Parsed from ATen/core/boxing/OperatorKernel.h +// #pragma once +// #include +// Targeting ../OperatorKernel.java -// Parsed from ATen/ops/_scaled_dot_product_efficient_attention.h -// #pragma once -// @generated by torchgen/gen.py from Function.h + // namespace c10 -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include +// Parsed from ATen/core/boxing/BoxedKernel.h +// #pragma once -// #include +// #include +// #include +// #include +// This kernel implements the behavior of falling through to the next available +// registered dispatch key. The implementation of this function is FAST; it is +// no overhead to fallthrough to the next key. See cpp file for some more +// implementation notes; notably, this does NOT actually go through the +// boxing/unboxing codepath. +@Namespace("c10") public static native void fallthrough_kernel(OperatorKernel arg0, @Const @ByRef OperatorHandle arg1, @ByVal DispatchKeySet arg2, @Cast("c10::Stack*") IValueVector arg3); + +// Note [Ambiguity in AutogradOther kernel] +// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +// This error-reporting kernel is registered to the AutogradOther entry in the +// dispatch table when there is both a CompositeImplicitAutograd kernel and a +// backend kernel for ANY backend that maps to AutogradOther. To see why +// this is necessary in the AutogradOther case, it's helpful to first see +// why everything works out fine for a backend that has a reserved Autograd +// entry (see rule 2.2 in [Note] DispatchTable computation): +// +// CPU AutogradCPU +// reg? registers with... +// ------------------------------------------------- +// y Autograd registration takes precedence +// over CompositeImplicitAutograd. +// This is good, because the CPU specific backend +// implementation is more specialized and typically better; +// if we used the composite, we would bypass it. +// (NB: the Autograd key is guaranteed to exist because +// the autograd codegen requires it!) +// +// n CompositeImplicitAutograd takes precedence. +// This is also good, because the Autograd +// registration (if it exists) would try to redispatch +// to the (non-existent) CPU implementation; by +// using the composite, we ensure the operator +// actually works. +// +// As you can see, when we have a specific Autograd key (AutogradCPU), we can +// decide whether or not to use the CompositeImplicitAutograd kernel or the +// Autograd kernel based on whether or not the backend kernel exists. +// +// However, for AutogradOther (which is the catchall autograd kernel for +// everything that doesn't have a specific Autograd key), we can't do this +// trick because there isn't any unique backend to peek at to disambiguate; +// if there are some backends that have implementations they prefer Autograd, +// but unimplemented backends would prefer CompositeImplicitAutograd. Rather +// than arbitrarily pick one or the other, we just register a kernel that raises +// an error and let the user decide how to proceed. +@Namespace("c10") public static native void ambiguous_autogradother_kernel(OperatorKernel arg0, @Const @ByRef OperatorHandle arg1, @ByVal DispatchKeySet arg2, @Cast("c10::Stack*") IValueVector arg3); + +// Note [named_not_supported_kernel] +// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +// This kernel implements reporting an error message saying that named tensor is +// not supported. This kernel doesn't rely on the Stack, and so it is special +// cased in the dispatcher to be triggered before we attempt boxing (so we can +// give a good error message in cases when boxing is not supported). When +// boxing is universally supported this can be removed. +@Namespace("c10") public static native void named_not_supported_kernel(OperatorKernel arg0, @Const @ByRef OperatorHandle arg1, @ByVal DispatchKeySet arg2, @Cast("c10::Stack*") IValueVector arg3); -// aten::_scaled_dot_product_efficient_attention(Tensor query, Tensor key, Tensor value, bool compute_log_sumexp, bool is_causal=False) -> (Tensor, Tensor) -@Namespace("at") public static native @ByVal TensorTensorTuple _scaled_dot_product_efficient_attention(@Const @ByRef Tensor query, @Const @ByRef Tensor key, @Const @ByRef Tensor value, @Cast("bool") boolean compute_log_sumexp, @Cast("bool") boolean is_causal/*=false*/); -@Namespace("at") public static native @ByVal TensorTensorTuple _scaled_dot_product_efficient_attention(@Const @ByRef Tensor query, @Const @ByRef Tensor key, @Const @ByRef Tensor value, @Cast("bool") boolean compute_log_sumexp); +/** + * BoxedKernel is similar to a std::function storing a boxed kernel. + */ + // namespace c10 +// #include -// Parsed from ATen/ops/_scaled_dot_product_efficient_attention_backward.h +// Parsed from ATen/core/boxing/BoxedKernel_impl.h // #pragma once -// @generated by torchgen/gen.py from Function.h - -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// aten::_scaled_dot_product_efficient_attention_backward(Tensor grad_out_, Tensor query, Tensor key, Tensor value, Tensor out, Tensor logsumexp, bool is_causal=False, bool chunk_grad_outputs=False) -> (Tensor, Tensor, Tensor) -@Namespace("at") public static native @ByVal TensorTensorTensorTuple _scaled_dot_product_efficient_attention_backward(@Const @ByRef Tensor grad_out_, @Const @ByRef Tensor query, @Const @ByRef Tensor key, @Const @ByRef Tensor value, @Const @ByRef Tensor out, @Const @ByRef Tensor logsumexp, @Cast("bool") boolean is_causal/*=false*/, @Cast("bool") boolean chunk_grad_outputs/*=false*/); -@Namespace("at") public static native @ByVal TensorTensorTensorTuple _scaled_dot_product_efficient_attention_backward(@Const @ByRef Tensor grad_out_, @Const @ByRef Tensor query, @Const @ByRef Tensor key, @Const @ByRef Tensor value, @Const @ByRef Tensor out, @Const @ByRef Tensor logsumexp); -// Parsed from ATen/ops/_scaled_dot_product_flash_attention_backward.h -// #pragma once -// @generated by torchgen/gen.py from Function.h -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// aten::_scaled_dot_product_flash_attention_backward(Tensor grad_out, Tensor query, Tensor key, Tensor value, Tensor out, Tensor logsumexp, Tensor cum_seq_q, Tensor cum_seq_k, int max_q, int max_k, float dropout_p, bool is_causal, int philox_seed, int philox_offset) -> (Tensor grad_query, Tensor grad_key, Tensor grad_value) -@Namespace("at") public static native @ByVal TensorTensorTensorTuple _scaled_dot_product_flash_attention_backward(@Const @ByRef Tensor grad_out, @Const @ByRef Tensor query, @Const @ByRef Tensor key, @Const @ByRef Tensor value, @Const @ByRef Tensor out, @Const @ByRef Tensor logsumexp, @Const @ByRef Tensor cum_seq_q, @Const @ByRef Tensor cum_seq_k, @Cast("int64_t") long max_q, @Cast("int64_t") long max_k, double dropout_p, @Cast("bool") boolean is_causal, @Cast("int64_t") long philox_seed, @Cast("int64_t") long philox_offset); -// Parsed from ATen/ops/_segment_reduce_backward.h -// #pragma once -// @generated by torchgen/gen.py from Function.h -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// aten::_segment_reduce_backward(Tensor grad, Tensor output, Tensor data, str reduce, *, Tensor? lengths=None, Tensor? offsets=None, int axis=0, Scalar? initial=None) -> Tensor -@Namespace("at") public static native @ByVal Tensor _segment_reduce_backward(@Const @ByRef Tensor grad, @Const @ByRef Tensor output, @Const @ByRef Tensor data, @ByVal @Cast("c10::string_view*") Pointer reduce, @Const @ByRef(nullValue = "c10::optional{}") TensorOptional lengths, @Const @ByRef(nullValue = "c10::optional{}") TensorOptional offsets, @Cast("int64_t") long axis/*=0*/, @Const @ByRef(nullValue = "c10::optional(c10::nullopt)") ScalarOptional initial); -@Namespace("at") public static native @ByVal Tensor _segment_reduce_backward(@Const @ByRef Tensor grad, @Const @ByRef Tensor output, @Const @ByRef Tensor data, @ByVal @Cast("c10::string_view*") Pointer reduce); + // namespace c10 -// aten::_segment_reduce_backward.out(Tensor grad, Tensor output, Tensor data, str reduce, *, Tensor? lengths=None, Tensor? offsets=None, int axis=0, Scalar? initial=None, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor _segment_reduce_backward_out(@ByRef Tensor out, @Const @ByRef Tensor grad, @Const @ByRef Tensor output, @Const @ByRef Tensor data, @ByVal @Cast("c10::string_view*") Pointer reduce, @Const @ByRef(nullValue = "c10::optional{}") TensorOptional lengths, @Const @ByRef(nullValue = "c10::optional{}") TensorOptional offsets, @Cast("int64_t") long axis/*=0*/, @Const @ByRef(nullValue = "c10::optional(c10::nullopt)") ScalarOptional initial); -@Namespace("at") public static native @ByRef Tensor _segment_reduce_backward_out(@ByRef Tensor out, @Const @ByRef Tensor grad, @Const @ByRef Tensor output, @Const @ByRef Tensor data, @ByVal @Cast("c10::string_view*") Pointer reduce); -// aten::_segment_reduce_backward.out(Tensor grad, Tensor output, Tensor data, str reduce, *, Tensor? lengths=None, Tensor? offsets=None, int axis=0, Scalar? initial=None, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor _segment_reduce_backward_outf(@Const @ByRef Tensor grad, @Const @ByRef Tensor output, @Const @ByRef Tensor data, @ByVal @Cast("c10::string_view*") Pointer reduce, @Const @ByRef TensorOptional lengths, @Const @ByRef TensorOptional offsets, @Cast("int64_t") long axis, @Const @ByRef ScalarOptional initial, @ByRef Tensor out); +// Parsed from ATen/core/stack.h +// #pragma once +// #include -// Parsed from ATen/ops/_shape_as_tensor.h +// #include +// #include +// #include -// #pragma once +// TODO move this to c10 namespace +// Targeting ../Operation.java -// @generated by torchgen/gen.py from Function.h -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include +// An operation with N inputs and M outputs pops the last N inputs off +// the stack and pushes its M inputs onto the stack +// before: I0, I1, ... IN <- stack.back() +// after: O0, O1, ... OM +// operations are defined this way so that ownership of inputs can be +// transferred to the operation and it can incrementally drop ownership of +// tensors when they become unneeded. For large operations, like 'run an entire +// subgraph', this functionality is very important for minimizing gpu memory +// usage return value is the relative 'offset' to jump to for the next +// operation: +// pc += 1 + offset +// so a return value of 0 goes to the next instruction +// treat the last N elements of the stack as a list, looking up +// element i +@Namespace("torch::jit") public static native @ByRef IValue peek(@ByRef IValueVector stack, @Cast("size_t") long i, @Cast("size_t") long N); +// treat the last N elements of the stack as a list, looking up the +// slice starting at index i and having length len +@Namespace("torch::jit") public static native @ByVal IValueArrayRef peekSlice( + @Const @ByRef IValueVector stack, + @Cast("size_t") long i, + @Cast("size_t") long len, + @Cast("size_t") long N); +@Namespace("torch::jit") public static native @ByVal IValueArrayRef last(@Const @ByRef IValueVector stack, @Cast("size_t") long N); +@Namespace("torch::jit") public static native void drop(@ByRef IValueVector stack, @Cast("size_t") long n); +@Namespace("torch::jit") public static native @ByVal IValue pop(@ByRef IValueVector stack); +@Namespace("torch::jit") public static native @ByVal IValueVector pop(@ByRef IValueVector stack, @Cast("size_t") long n); -// #include +// variadic pop: +// int64_t a; at::Tensor b; +// pop(stack, a, b); +// equivalent to: +// b = pop(stack).toTensor(); +// a = pop(stack).toInt(); +@Namespace("torch::jit") public static native void push_one(@ByRef IValueVector stack, @ByVal TensorOptions options); -// aten::_shape_as_tensor(Tensor self) -> Tensor -@Namespace("at") public static native @ByVal Tensor _shape_as_tensor(@Const @ByRef Tensor self); +// The packer here is carefully written not to make any unnecessary +// copies. +// pack takes the return values of aten functions pushes them onto the stack + // namespace jit + // namespace torch -// Parsed from ATen/ops/_slow_conv2d_backward.h +// Parsed from ATen/core/boxing/impl/boxing.h // #pragma once -// @generated by torchgen/gen.py from Function.h +// This file contains boxing (not unboxing) logic, +// i.e. how to make a vector from a set of concrete arguments. -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include +// #include +// #include // #include -// #include -// #include - +// #include -// #include +// #include +// +// utils +// -// aten::_slow_conv2d_backward.grad_input(Tensor grad_output, Tensor self, Tensor weight, int[2] kernel_size, int[2] stride, int[2] padding, *, Tensor(a!) grad_input, Tensor(b!) grad_weight, Tensor(c!) grad_bias) -> (Tensor(a!), Tensor(b!), Tensor(c!)) -@Namespace("at") public static native @ByVal @Cast("std::tuple*") PointerPointer _slow_conv2d_backward_out(@ByRef Tensor grad_input, @ByRef Tensor grad_weight, @ByRef Tensor grad_bias, @Const @ByRef Tensor grad_output, @Const @ByRef Tensor self, @Const @ByRef Tensor weight, @ByVal @Cast("c10::ArrayRef*") LongArrayRef kernel_size, @ByVal @Cast("c10::ArrayRef*") LongArrayRef stride, @ByVal @Cast("c10::ArrayRef*") LongArrayRef padding); -@Namespace("at") public static native @ByVal @Cast("std::tuple*") PointerPointer _slow_conv2d_backward_out(@ByRef Tensor grad_input, @ByRef Tensor grad_weight, @ByRef Tensor grad_bias, @Const @ByRef Tensor grad_output, @Const @ByRef Tensor self, @Const @ByRef Tensor weight, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] kernel_size, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] stride, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... padding); -// aten::_slow_conv2d_backward.grad_input(Tensor grad_output, Tensor self, Tensor weight, int[2] kernel_size, int[2] stride, int[2] padding, *, Tensor(a!) grad_input, Tensor(b!) grad_weight, Tensor(c!) grad_bias) -> (Tensor(a!), Tensor(b!), Tensor(c!)) -@Namespace("at") public static native @ByVal @Cast("std::tuple*") PointerPointer _slow_conv2d_backward_outf(@Const @ByRef Tensor grad_output, @Const @ByRef Tensor self, @Const @ByRef Tensor weight, @ByVal @Cast("c10::ArrayRef*") LongArrayRef kernel_size, @ByVal @Cast("c10::ArrayRef*") LongArrayRef stride, @ByVal @Cast("c10::ArrayRef*") LongArrayRef padding, @ByRef Tensor grad_input, @ByRef Tensor grad_weight, @ByRef Tensor grad_bias); -@Namespace("at") public static native @ByVal @Cast("std::tuple*") PointerPointer _slow_conv2d_backward_outf(@Const @ByRef Tensor grad_output, @Const @ByRef Tensor self, @Const @ByRef Tensor weight, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] kernel_size, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] stride, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] padding, @ByRef Tensor grad_input, @ByRef Tensor grad_weight, @ByRef Tensor grad_bias); +// is_mutable_tensor_ref -// aten::_slow_conv2d_backward.output_mask(Tensor grad_output, Tensor self, Tensor weight, int[2] kernel_size, int[2] stride, int[2] padding, bool[3] output_mask) -> (Tensor grad_input, Tensor grad_weight, Tensor grad_bias) -@Namespace("at") public static native @ByVal TensorTensorTensorTuple _slow_conv2d_backward(@Const @ByRef Tensor grad_output, @Const @ByRef Tensor self, @Const @ByRef Tensor weight, @ByVal @Cast("c10::ArrayRef*") LongArrayRef kernel_size, @ByVal @Cast("c10::ArrayRef*") LongArrayRef stride, @ByVal @Cast("c10::ArrayRef*") LongArrayRef padding, @ByVal @Cast("std::array*") BoolPointer output_mask); -@Namespace("at") public static native @ByVal TensorTensorTensorTuple _slow_conv2d_backward(@Const @ByRef Tensor grad_output, @Const @ByRef Tensor self, @Const @ByRef Tensor weight, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] kernel_size, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] stride, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] padding, @ByVal @Cast("std::array*") BoolPointer output_mask); +// is_tuple_of_mutable_tensor_refs +// -// aten::_slow_conv2d_backward.output_mask_out(Tensor grad_output, Tensor self, Tensor weight, int[2] kernel_size, int[2] stride, int[2] padding, bool[3] output_mask, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2) -> (Tensor(a!), Tensor(b!), Tensor(c!)) -@Namespace("at") public static native @ByVal @Cast("std::tuple*") PointerPointer _slow_conv2d_backward_out(@ByRef Tensor out0, @ByRef Tensor out1, @ByRef Tensor out2, @Const @ByRef Tensor grad_output, @Const @ByRef Tensor self, @Const @ByRef Tensor weight, @ByVal @Cast("c10::ArrayRef*") LongArrayRef kernel_size, @ByVal @Cast("c10::ArrayRef*") LongArrayRef stride, @ByVal @Cast("c10::ArrayRef*") LongArrayRef padding, @ByVal @Cast("std::array*") BoolPointer output_mask); -@Namespace("at") public static native @ByVal @Cast("std::tuple*") PointerPointer _slow_conv2d_backward_out(@ByRef Tensor out0, @ByRef Tensor out1, @ByRef Tensor out2, @Const @ByRef Tensor grad_output, @Const @ByRef Tensor self, @Const @ByRef Tensor weight, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] kernel_size, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] stride, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] padding, @ByVal @Cast("std::array*") BoolPointer output_mask); -// aten::_slow_conv2d_backward.output_mask_out(Tensor grad_output, Tensor self, Tensor weight, int[2] kernel_size, int[2] stride, int[2] padding, bool[3] output_mask, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2) -> (Tensor(a!), Tensor(b!), Tensor(c!)) -@Namespace("at") public static native @ByVal @Cast("std::tuple*") PointerPointer _slow_conv2d_backward_outf(@Const @ByRef Tensor grad_output, @Const @ByRef Tensor self, @Const @ByRef Tensor weight, @ByVal @Cast("c10::ArrayRef*") LongArrayRef kernel_size, @ByVal @Cast("c10::ArrayRef*") LongArrayRef stride, @ByVal @Cast("c10::ArrayRef*") LongArrayRef padding, @ByVal @Cast("std::array*") BoolPointer output_mask, @ByRef Tensor out0, @ByRef Tensor out1, @ByRef Tensor out2); -@Namespace("at") public static native @ByVal @Cast("std::tuple*") PointerPointer _slow_conv2d_backward_outf(@Const @ByRef Tensor grad_output, @Const @ByRef Tensor self, @Const @ByRef Tensor weight, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] kernel_size, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] stride, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] padding, @ByVal @Cast("std::array*") BoolPointer output_mask, @ByRef Tensor out0, @ByRef Tensor out1, @ByRef Tensor out2); +// has_ivalue_to tests the presence/absence of instance method IValue::to() +// +// +// boxing predicates +// +// A boxable arg type is one that IValue has a constructor for. +// an unboxable result is one that can be extracted from an IValue -// Parsed from ATen/ops/_slow_conv2d_forward.h +// +// boxArgs - utility for pushing unboxed args onto IValue stack +// -// #pragma once -// @generated by torchgen/gen.py from Function.h -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include +// torch::jit::push pushes 4 values for a TensorOptions; this needs to +// be kept in sync. +// NOTE: this could probably be simplified with C++17 fold expressions. -// #include +@Namespace("c10::impl") public static native void boxToStack(@Cast("c10::impl::IValueAlignedStorage*") Pointer dest, @ByVal TensorOptions options, @ByRef IntPointer lastIdx); +@Namespace("c10::impl") public static native void boxToStack(@Cast("c10::impl::IValueAlignedStorage*") Pointer dest, @ByVal TensorOptions options, @ByRef IntBuffer lastIdx); +@Namespace("c10::impl") public static native void boxToStack(@Cast("c10::impl::IValueAlignedStorage*") Pointer dest, @ByVal TensorOptions options, @ByRef int[] lastIdx); +@Namespace("c10::impl") public static native void boxArgsToStack(@Cast("c10::impl::IValueAlignedStorage*") Pointer arg0, @ByRef IntPointer arg1); +@Namespace("c10::impl") public static native void boxArgsToStack(@Cast("c10::impl::IValueAlignedStorage*") Pointer arg0, @ByRef IntBuffer arg1); +@Namespace("c10::impl") public static native void boxArgsToStack(@Cast("c10::impl::IValueAlignedStorage*") Pointer arg0, @ByRef int[] arg1); -// aten::_slow_conv2d_forward.output(Tensor self, Tensor weight, int[2] kernel_size, Tensor? bias, int[2] stride, int[2] padding, *, Tensor(a!) output) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor _slow_conv2d_forward_out(@ByRef Tensor output, @Const @ByRef Tensor self, @Const @ByRef Tensor weight, @ByVal @Cast("c10::ArrayRef*") LongArrayRef kernel_size, @Const @ByRef TensorOptional bias, @ByVal @Cast("c10::ArrayRef*") LongArrayRef stride, @ByVal @Cast("c10::ArrayRef*") LongArrayRef padding); -@Namespace("at") public static native @ByRef Tensor _slow_conv2d_forward_out(@ByRef Tensor output, @Const @ByRef Tensor self, @Const @ByRef Tensor weight, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] kernel_size, @Const @ByRef TensorOptional bias, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] stride, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... padding); -// aten::_slow_conv2d_forward.output(Tensor self, Tensor weight, int[2] kernel_size, Tensor? bias, int[2] stride, int[2] padding, *, Tensor(a!) output) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor _slow_conv2d_forward_outf(@Const @ByRef Tensor self, @Const @ByRef Tensor weight, @ByVal @Cast("c10::ArrayRef*") LongArrayRef kernel_size, @Const @ByRef TensorOptional bias, @ByVal @Cast("c10::ArrayRef*") LongArrayRef stride, @ByVal @Cast("c10::ArrayRef*") LongArrayRef padding, @ByRef Tensor output); -@Namespace("at") public static native @ByRef Tensor _slow_conv2d_forward_outf(@Const @ByRef Tensor self, @Const @ByRef Tensor weight, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] kernel_size, @Const @ByRef TensorOptional bias, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] stride, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] padding, @ByRef Tensor output); +// +// PopResult is a helper class whose specializations handle popping single and +// multiple return values, respectively. +// -// aten::_slow_conv2d_forward(Tensor self, Tensor weight, int[2] kernel_size, Tensor? bias, int[2] stride, int[2] padding) -> Tensor -@Namespace("at") public static native @ByVal Tensor _slow_conv2d_forward(@Const @ByRef Tensor self, @Const @ByRef Tensor weight, @ByVal @Cast("c10::ArrayRef*") LongArrayRef kernel_size, @Const @ByRef TensorOptional bias, @ByVal @Cast("c10::ArrayRef*") LongArrayRef stride, @ByVal @Cast("c10::ArrayRef*") LongArrayRef padding); -@Namespace("at") public static native @ByVal Tensor _slow_conv2d_forward(@Const @ByRef Tensor self, @Const @ByRef Tensor weight, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] kernel_size, @Const @ByRef TensorOptional bias, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] stride, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... padding); +// +// BoxedKernelWrapper +// +// For a given function type FT, BoxedKernelWrapper implements +// a `call` method that +// - takes a boxed kernel and unboxed arguments as specified by FT, +// - calls `boxArgs` to box the arguments +// - calls the boxed kernel +// - unboxes and returns the result +// +// The partial specializations below handle various cases: in +// particular, not all types appearing in op signatures are supported, +// and ops returning references have nonstandard wrapper implementations. +// +// 1. The base specialization of BoxedKernelWrapper should never be instantiated. +// A "no call method defined on BoxedKernelWrapper" compile error means that +// an op signature has failed to trigger any of the partial specializations +// that follow this one. +// +// +// 2. Supported signatures, other than those involving non-const Tensor refs - +// i.e., "functional" ops. +// +// +// 3. in-place ops take a single non-const Tensor reference +// as their first argument, and return it. +// +// Note: all signatures matching this pattern are assumed to be for such ops. +// Because of this, the generated BoxedKernelWrapper specializations simply +// return the in-place argument. +// -// Parsed from ATen/ops/_sobol_engine_draw.h +// +// 3.5. In-process migration to make in-place ops take and return +// const references instead. -// #pragma once +// +// 4. out of place ops that take a single non-const Tensor reference as their +// final argument, and also return it. +// +// Note: all signatures matching this pattern are assumed to be for such ops. +// This assumption permits the generated BoxedKernelWrapper specializations to simply +// return out arguments. +// -// @generated by torchgen/gen.py from Function.h +// +// 5. out of place ops that take multiple non-const Tensor references as their +// final arguments, and return them in a std::tuple. +// +// Note: all signatures matching this pattern are assumed to be for such ops. +// This assumption permits the generated BoxedKernelWrapper specializations to simply +// return the out arguments. +// -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include + // impl + // c10 +// Parsed from ATen/core/boxing/impl/make_boxed_from_unboxed_functor.h -// #include +// #pragma once +// #include +// #include +// #include +// #include +// #include +// #include +// #include -// aten::_sobol_engine_draw(Tensor quasi, int n, Tensor sobolstate, int dimension, int num_generated, ScalarType? dtype) -> (Tensor, Tensor) -@Namespace("at") public static native @ByVal TensorTensorTuple _sobol_engine_draw(@Const @ByRef Tensor quasi, @Cast("int64_t") long n, @Const @ByRef Tensor sobolstate, @Cast("int64_t") long dimension, @Cast("int64_t") long num_generated, @ByVal ScalarTypeOptional dtype); +// #include // TODO Instead of this, move torch::jit::Stack to the c10 namespace. +/* + * [Note: Argument forwarding in the dispatcher] + * + * The dispatcher uses a somewhat unusual way to forward arguments through several layers of + * wrapper functions. This can be confusing because an experienced C++ programmer would look at this + * and think "oh this is supposed to be forwarding a universal reference but the && is missing. This is a bug.". + * It is not a bug. The common way in C++ to forward arguments is to use universal references: + * + * > template void func(T&& arg) { func2(std::forward(arg)); } + * + * but that relies on inferring the correct reference type (i.e. value vs & vs &&) from the argument. + * In our case, we cannot rely on the argument as supplied by the caller, because that could infer a + * different reference type than was used in the kernel function. The correct reference type + * is dictated by the kernel signature and must be identical since we cast function pointers + * through void* pointers and mismatches would be UB. So we need a forwarding pattern that determines + * the reference type to use by looking at the explicitly supplied operator signature, not by looking at + * the argument we're calling it with. + * + * What does std::forward do, exactly? + * ------------------------------------ + * std::forward(t) is a way to cast t to the reference type supplied in T. + * Let's assume decay_t == U and T is either U or some reference of U. + * - std::forward(t) will return U&, no matter what kind of reference t is. + * - std::forward(t) will return U&&, no matter what kind of reference t is. + * - std::forward(t) will return U&& (not U!), no matter what kind of reference t is. + * + * For universal references, that means that in the following function + * > template void func(T&& arg) { func2(std::forward(arg)); } + * + * - when called with arg being a rvalue reference or non-reference value, T gets inferred to be + * a non-reference U, and std::forward(t) will return U&&, correctly moving the argument. + * - when called with arg behind a lvalue reference, T gets inferred to be U& because that's the only + * way to match the signature (in C++, a type that is (T&)&& will collapse to T&). + * That means std::forward(t) will return U& and the value will not be moved but passed on as + * a lvalue reference. + * + * How do we use that? + * ------------------------------------ + * But std::forward can also be used outside of the common "universal forwarding" pattern to change + * reference types. So instead of following the common C++ pattern, we notice what + * std::forward() actually does, and that is it takes a value and changes its reference to the + * type of reference passed in as T. If we don't infer T but explicitly specify it, we can use this + * to forward based on an explicitly specified reference type instead of the inferred argument type. + * + * This is why many of the dispatcher functions look like + * > template func(T t) { func2(std::forward(t)); } + * instead of the common + * > template func(T&& t) { func2(std::forward(t)); } + * + * and are expected to be called by explicitly specifying the template parameters in a way that matches + * the expected operator signature at each call site. + */ + // supported_primitive_arg_types defines which primitive types we allow in + // kernel functions as arguments or returns. + // Additionally, we support lists, dicts and optionals containing these types. + // We have an unboxed functor in hand that takes C++ arguments, and + // we're building a boxed functor wrapper for it that takes IValues. + // So "outside" is boxed and "inside" is unboxed. + // + // So a valid input type is one that our boxed functor wrapper can + // unbox from an IValue into a C++ value. + // + // Whereas a valid output type is one that our wrapper can recieve + // as a C++ value from the unboxed functor, and box into an IValue. + // + // assert_is_valid_input_type + // checks that T can be unboxed from an IValue into a C++ value. + // -// Parsed from ATen/ops/_sobol_engine_ff.h + // The following specialisations of assert_is_valid_input_type are technically not + // necessary since we would hit the base case and show an error message + // there if they didn't exist, but we can show a better error message + // in some common error scenarios. -// #pragma once + // + // assert_is_valid_output_type + // -// @generated by torchgen/gen.py from Function.h + // The following specialisations of assert_is_valid_output_type are technically not + // necessary since we would hit the base case and show an error message + // there if they didn't exist, but we can show a better error message + // in some common error scenarios. -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include + // ivalue_to_arg + // The following two specializations take advantage of specialized + // `toTensor()` overloads on IValue to avoid copying. + // return_to_ivalue -// #include + // Special case to allow kernels to return `Tensor&`. + // TODO Delete this once kernels don't do that anymore + // wrap_kernel_functor_unboxed_ -// aten::_sobol_engine_ff_(Tensor(a!) self, int n, Tensor sobolstate, int dimension, int num_generated) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor _sobol_engine_ff_(@ByRef Tensor self, @Cast("int64_t") long n, @Const @ByRef Tensor sobolstate, @Cast("int64_t") long dimension, @Cast("int64_t") long num_generated); + // This specialization is for kernels with a first argument that is NOT of type DispatchKeySet + // This includes kernels with 0 arguments. + // This specialization is for kernels with a first argument of type DispatchKeySet + // call_functor_with_args_from_stack + // push_outputs -// Parsed from ATen/ops/_sobol_engine_initialize_state.h + // make_boxed_from_unboxed_functor + // namespace impl -// #pragma once + // namespace c10 -// @generated by torchgen/gen.py from Function.h -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include +// Parsed from ATen/core/boxing/impl/WrapFunctionIntoFunctor.h +// #pragma once -// #include +// #include + + // WrapFunctionIntoFunctor: Wraps a compile time function pointer into a kernel functor. + // Since it is a compile time function pointer, many compilers can inline it + // into the wrapper and you don't get any performance overhead for wrapping. -// aten::_sobol_engine_initialize_state_(Tensor(a!) self, int dimension) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor _sobol_engine_initialize_state_(@ByRef Tensor self, @Cast("int64_t") long dimension); -// Parsed from ATen/ops/_sobol_engine_scramble.h +// Parsed from ATen/core/boxing/impl/WrapFunctionIntoRuntimeFunctor.h // #pragma once -// @generated by torchgen/gen.py from Function.h - -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include - - - -// #include +// #include + + // WrapFunctionIntoRuntimeFunctor: Wraps any runtime functor into a functor that + // inherits from c10::OperatorKernel, so it can be used as a c10 kernel. + // This can, for example, be used for lambdas, functors or even function pointers. + // In the case of function pointers, since it is a runtime function pointer, + // there is an overhead for calling it whenever the kernel is invoked. -// aten::_sobol_engine_scramble_(Tensor(a!) self, Tensor ltm, int dimension) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor _sobol_engine_scramble_(@ByRef Tensor self, @Const @ByRef Tensor ltm, @Cast("int64_t") long dimension); -// Parsed from ATen/ops/_softmax.h +// Parsed from ATen/core/boxing/KernelFunction.h // #pragma once -// @generated by torchgen/gen.py from Function.h - -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include - +// #include +// #include +// #include +// #include +// #include +// #include // TODO Instead of this, move torch::jit::Stack to the c10 namespace. +// Targeting ../KernelFunction.java -// #include -// aten::_softmax(Tensor self, int dim, bool half_to_float) -> Tensor -@Namespace("at") public static native @ByVal Tensor _softmax(@Const @ByRef Tensor self, @Cast("int64_t") long dim, @Cast("bool") boolean half_to_float); -// aten::_softmax.out(Tensor self, int dim, bool half_to_float, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor _softmax_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Cast("int64_t") long dim, @Cast("bool") boolean half_to_float); -// aten::_softmax.out(Tensor self, int dim, bool half_to_float, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor _softmax_outf(@Const @ByRef Tensor self, @Cast("int64_t") long dim, @Cast("bool") boolean half_to_float, @ByRef Tensor out); +// #include +// Parsed from ATen/core/boxing/KernelFunction_impl.h +// #include +// #include +// #include +// #include -// Parsed from ATen/ops/_softmax_backward_data.h -// #pragma once -// @generated by torchgen/gen.py from Function.h -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// aten::_softmax_backward_data(Tensor grad_output, Tensor output, int dim, ScalarType input_dtype) -> Tensor -@Namespace("at") public static native @ByVal Tensor _softmax_backward_data(@Const @ByRef Tensor grad_output, @Const @ByRef Tensor output, @Cast("int64_t") long dim, ScalarType input_dtype); -// aten::_softmax_backward_data.out(Tensor grad_output, Tensor output, int dim, ScalarType input_dtype, *, Tensor(a!) grad_input) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor _softmax_backward_data_out(@ByRef Tensor grad_input, @Const @ByRef Tensor grad_output, @Const @ByRef Tensor output, @Cast("int64_t") long dim, ScalarType input_dtype); -// aten::_softmax_backward_data.out(Tensor grad_output, Tensor output, int dim, ScalarType input_dtype, *, Tensor(a!) grad_input) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor _softmax_backward_data_outf(@Const @ByRef Tensor grad_output, @Const @ByRef Tensor output, @Cast("int64_t") long dim, ScalarType input_dtype, @ByRef Tensor grad_input); -// Parsed from ATen/ops/_sparse_addmm.h -// #pragma once -// @generated by torchgen/gen.py from Function.h -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include +// This template requires you to explicitly specify the argument you want to +// forward; it doesn't work if you try to deduce it +// NB: keep this in sync with cloneWithRealTypes in function_schema.cpp +@Namespace("c10") public static native long unpackSymInt(@ByVal SymInt x); +@Namespace("c10") public static native @ByVal LongArrayRef unpackSymInt(@ByVal SymIntArrayRef x); -// #include +@Namespace("c10") public static native @ByVal LongOptional unpackSymInt(@ByVal SymIntOptional x); -// aten::_sparse_addmm(Tensor self, Tensor mat1, Tensor mat2, *, Scalar beta=1, Scalar alpha=1) -> Tensor -@Namespace("at") public static native @ByVal Tensor _sparse_addmm(@Const @ByRef Tensor self, @Const @ByRef Tensor mat1, @Const @ByRef Tensor mat2, @Const @ByRef(nullValue = "at::Scalar(1)") Scalar beta, @Const @ByRef(nullValue = "at::Scalar(1)") Scalar alpha); -@Namespace("at") public static native @ByVal Tensor _sparse_addmm(@Const @ByRef Tensor self, @Const @ByRef Tensor mat1, @Const @ByRef Tensor mat2); -// aten::_sparse_addmm.out(Tensor self, Tensor mat1, Tensor mat2, *, Scalar beta=1, Scalar alpha=1, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor _sparse_addmm_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor mat1, @Const @ByRef Tensor mat2, @Const @ByRef(nullValue = "at::Scalar(1)") Scalar beta, @Const @ByRef(nullValue = "at::Scalar(1)") Scalar alpha); -@Namespace("at") public static native @ByRef Tensor _sparse_addmm_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor mat1, @Const @ByRef Tensor mat2); -// aten::_sparse_addmm.out(Tensor self, Tensor mat1, Tensor mat2, *, Scalar beta=1, Scalar alpha=1, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor _sparse_addmm_outf(@Const @ByRef Tensor self, @Const @ByRef Tensor mat1, @Const @ByRef Tensor mat2, @Const @ByRef Scalar beta, @Const @ByRef Scalar alpha, @ByRef Tensor out); -// Parsed from ATen/ops/_sparse_broadcast_to.h -// #pragma once -// @generated by torchgen/gen.py from Function.h -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// aten::_sparse_broadcast_to(Tensor(a) self, int[] size) -> Tensor(a) -@Namespace("at") public static native @ByVal Tensor _sparse_broadcast_to(@Const @ByRef Tensor self, @ByVal @Cast("c10::ArrayRef*") LongArrayRef size); -@Namespace("at") public static native @ByVal Tensor _sparse_broadcast_to(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... size); -// Parsed from ATen/ops/_sparse_broadcast_to_copy.h -// #pragma once -// @generated by torchgen/gen.py from Function.h -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// aten::_sparse_broadcast_to_copy(Tensor self, int[] size) -> Tensor -@Namespace("at") public static native @ByVal Tensor _sparse_broadcast_to_copy(@Const @ByRef Tensor self, @ByVal @Cast("c10::ArrayRef*") LongArrayRef size); -@Namespace("at") public static native @ByVal Tensor _sparse_broadcast_to_copy(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... size); -// aten::_sparse_broadcast_to_copy.out(Tensor self, int[] size, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor _sparse_broadcast_to_copy_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal @Cast("c10::ArrayRef*") LongArrayRef size); -@Namespace("at") public static native @ByRef Tensor _sparse_broadcast_to_copy_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... size); -// aten::_sparse_broadcast_to_copy.out(Tensor self, int[] size, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor _sparse_broadcast_to_copy_outf(@Const @ByRef Tensor self, @ByVal @Cast("c10::ArrayRef*") LongArrayRef size, @ByRef Tensor out); -@Namespace("at") public static native @ByRef Tensor _sparse_broadcast_to_copy_outf(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @ByRef Tensor out); -// Parsed from ATen/ops/_sparse_bsc_tensor_unsafe.h +// Parsed from ATen/core/dispatch/CppSignature.h // #pragma once -// @generated by torchgen/gen.py from Function.h - -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include - +// #include +// #include +// #include +// #include +// #include +// Targeting ../CppSignature.java -// #include +@Namespace("c10::impl") public static native @Cast("bool") @Name("operator !=") boolean notEquals(@Const @ByRef CppSignature lhs, @Const @ByRef CppSignature rhs); -// aten::_sparse_bsc_tensor_unsafe(Tensor ccol_indices, Tensor row_indices, Tensor values, int[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor -@Namespace("at") public static native @ByVal Tensor _sparse_bsc_tensor_unsafe(@Const @ByRef Tensor ccol_indices, @Const @ByRef Tensor row_indices, @Const @ByRef Tensor values, @ByVal @Cast("c10::ArrayRef*") LongArrayRef size, @ByVal(nullValue = "at::TensorOptions{}") TensorOptions options); -@Namespace("at") public static native @ByVal Tensor _sparse_bsc_tensor_unsafe(@Const @ByRef Tensor ccol_indices, @Const @ByRef Tensor row_indices, @Const @ByRef Tensor values, @ByVal @Cast("c10::ArrayRef*") LongArrayRef size); -@Namespace("at") public static native @ByVal Tensor _sparse_bsc_tensor_unsafe(@Const @ByRef Tensor ccol_indices, @Const @ByRef Tensor row_indices, @Const @ByRef Tensor values, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @ByVal(nullValue = "at::TensorOptions{}") TensorOptions options); -@Namespace("at") public static native @ByVal Tensor _sparse_bsc_tensor_unsafe(@Const @ByRef Tensor ccol_indices, @Const @ByRef Tensor row_indices, @Const @ByRef Tensor values, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... size); -// aten::_sparse_bsc_tensor_unsafe(Tensor ccol_indices, Tensor row_indices, Tensor values, int[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor -@Namespace("at") public static native @ByVal Tensor _sparse_bsc_tensor_unsafe(@Const @ByRef Tensor ccol_indices, @Const @ByRef Tensor row_indices, @Const @ByRef Tensor values, @ByVal @Cast("c10::ArrayRef*") LongArrayRef size, @ByVal ScalarTypeOptional dtype, @ByVal LayoutOptional layout, @ByVal DeviceOptional device, @ByVal BoolOptional pin_memory); -@Namespace("at") public static native @ByVal Tensor _sparse_bsc_tensor_unsafe(@Const @ByRef Tensor ccol_indices, @Const @ByRef Tensor row_indices, @Const @ByRef Tensor values, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @ByVal ScalarTypeOptional dtype, @ByVal LayoutOptional layout, @ByVal DeviceOptional device, @ByVal BoolOptional pin_memory); -// Parsed from ATen/ops/_sparse_bsr_tensor_unsafe.h +// Parsed from ATen/core/dispatch/RegistrationHandleRAII.h // #pragma once -// @generated by torchgen/gen.py from Function.h - -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include - +// #include +// Targeting ../RegistrationHandleRAII.java -// #include -// aten::_sparse_bsr_tensor_unsafe(Tensor crow_indices, Tensor col_indices, Tensor values, int[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor -@Namespace("at") public static native @ByVal Tensor _sparse_bsr_tensor_unsafe(@Const @ByRef Tensor crow_indices, @Const @ByRef Tensor col_indices, @Const @ByRef Tensor values, @ByVal @Cast("c10::ArrayRef*") LongArrayRef size, @ByVal(nullValue = "at::TensorOptions{}") TensorOptions options); -@Namespace("at") public static native @ByVal Tensor _sparse_bsr_tensor_unsafe(@Const @ByRef Tensor crow_indices, @Const @ByRef Tensor col_indices, @Const @ByRef Tensor values, @ByVal @Cast("c10::ArrayRef*") LongArrayRef size); -@Namespace("at") public static native @ByVal Tensor _sparse_bsr_tensor_unsafe(@Const @ByRef Tensor crow_indices, @Const @ByRef Tensor col_indices, @Const @ByRef Tensor values, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @ByVal(nullValue = "at::TensorOptions{}") TensorOptions options); -@Namespace("at") public static native @ByVal Tensor _sparse_bsr_tensor_unsafe(@Const @ByRef Tensor crow_indices, @Const @ByRef Tensor col_indices, @Const @ByRef Tensor values, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... size); -// aten::_sparse_bsr_tensor_unsafe(Tensor crow_indices, Tensor col_indices, Tensor values, int[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor -@Namespace("at") public static native @ByVal Tensor _sparse_bsr_tensor_unsafe(@Const @ByRef Tensor crow_indices, @Const @ByRef Tensor col_indices, @Const @ByRef Tensor values, @ByVal @Cast("c10::ArrayRef*") LongArrayRef size, @ByVal ScalarTypeOptional dtype, @ByVal LayoutOptional layout, @ByVal DeviceOptional device, @ByVal BoolOptional pin_memory); -@Namespace("at") public static native @ByVal Tensor _sparse_bsr_tensor_unsafe(@Const @ByRef Tensor crow_indices, @Const @ByRef Tensor col_indices, @Const @ByRef Tensor values, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @ByVal ScalarTypeOptional dtype, @ByVal LayoutOptional layout, @ByVal DeviceOptional device, @ByVal BoolOptional pin_memory); +// Parsed from ATen/core/ATenOpList.h +// #pragma once -// Parsed from ATen/ops/_sparse_compressed_tensor_unsafe.h +// #include -// #pragma once -// @generated by torchgen/gen.py from Function.h +// check if an op is a custom op (i.e. did not come from native_functions.yaml) +@Namespace("at") public static native @Cast("bool") boolean is_custom_op(@Const @ByRef OperatorName opName); -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include +// Parsed from ATen/core/op_registration/op_registration.h -// #include +// #pragma once +/** + * Include this file if you want to register operators. It includes all + * functionality needed to do so for you. + */ -// aten::_sparse_compressed_tensor_unsafe(Tensor compressed_indices, Tensor plain_indices, Tensor values, int[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor -@Namespace("at") public static native @ByVal Tensor _sparse_compressed_tensor_unsafe(@Const @ByRef Tensor compressed_indices, @Const @ByRef Tensor plain_indices, @Const @ByRef Tensor values, @ByVal @Cast("c10::ArrayRef*") LongArrayRef size, @ByVal(nullValue = "at::TensorOptions{}") TensorOptions options); -@Namespace("at") public static native @ByVal Tensor _sparse_compressed_tensor_unsafe(@Const @ByRef Tensor compressed_indices, @Const @ByRef Tensor plain_indices, @Const @ByRef Tensor values, @ByVal @Cast("c10::ArrayRef*") LongArrayRef size); -@Namespace("at") public static native @ByVal Tensor _sparse_compressed_tensor_unsafe(@Const @ByRef Tensor compressed_indices, @Const @ByRef Tensor plain_indices, @Const @ByRef Tensor values, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @ByVal(nullValue = "at::TensorOptions{}") TensorOptions options); -@Namespace("at") public static native @ByVal Tensor _sparse_compressed_tensor_unsafe(@Const @ByRef Tensor compressed_indices, @Const @ByRef Tensor plain_indices, @Const @ByRef Tensor values, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... size); -// aten::_sparse_compressed_tensor_unsafe(Tensor compressed_indices, Tensor plain_indices, Tensor values, int[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor -@Namespace("at") public static native @ByVal Tensor _sparse_compressed_tensor_unsafe(@Const @ByRef Tensor compressed_indices, @Const @ByRef Tensor plain_indices, @Const @ByRef Tensor values, @ByVal @Cast("c10::ArrayRef*") LongArrayRef size, @ByVal ScalarTypeOptional dtype, @ByVal LayoutOptional layout, @ByVal DeviceOptional device, @ByVal BoolOptional pin_memory); -@Namespace("at") public static native @ByVal Tensor _sparse_compressed_tensor_unsafe(@Const @ByRef Tensor compressed_indices, @Const @ByRef Tensor plain_indices, @Const @ByRef Tensor values, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @ByVal ScalarTypeOptional dtype, @ByVal LayoutOptional layout, @ByVal DeviceOptional device, @ByVal BoolOptional pin_memory); +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #if defined(EXPOSE_C2_OPS) || !defined(CAFFE2_IS_XPLAT_BUILD) +// #include +// #endif +// #include +// The first argument of the schema might be of type DispatchKeySet, in which case we remove it. +// We do this because every argument in a function schema is expected to be convertable +// to an ivalue, but DispatchKeySet is not a type we want the jit to be aware of. +// See Note [Plumbing Keys Through The Dispatcher] +// Targeting ../RegisterOperators.java -// Parsed from ATen/ops/_sparse_coo_tensor_unsafe.h + // namespace c10 + // Old-style API -// #pragma once -// @generated by torchgen/gen.py from Function.h -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include +// Parsed from ATen/core/enum_tag.h +// #pragma once +// @generated by torchgen/gen.py from enum_tag.h + // Enum of valid tags obtained from the entries in tags.yaml + @Namespace("at") public enum Tag { + core(0), + data_dependent_output(1), + dynamic_output_shape(2), + generated(3), + inplace_view(4), + nondeterministic_bitwise(5), + nondeterministic_seeded(6), + pointwise(7), + view_copy(8); -// #include + public final int value; + private Tag(int v) { this.value = v; } + private Tag(Tag e) { this.value = e.value; } + public Tag intern() { for (Tag e : values()) if (e.value == value) return e; return this; } + @Override public String toString() { return intern().name(); } + } -// aten::_sparse_coo_tensor_unsafe(Tensor indices, Tensor values, SymInt[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor -@Namespace("at") public static native @ByVal Tensor _sparse_coo_tensor_unsafe(@Const @ByRef Tensor indices, @Const @ByRef Tensor values, @ByVal @Cast("c10::ArrayRef*") LongArrayRef size, @ByVal(nullValue = "at::TensorOptions{}") TensorOptions options); -@Namespace("at") public static native @ByVal Tensor _sparse_coo_tensor_unsafe(@Const @ByRef Tensor indices, @Const @ByRef Tensor values, @ByVal @Cast("c10::ArrayRef*") LongArrayRef size); -@Namespace("at") public static native @ByVal Tensor _sparse_coo_tensor_unsafe(@Const @ByRef Tensor indices, @Const @ByRef Tensor values, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @ByVal(nullValue = "at::TensorOptions{}") TensorOptions options); -@Namespace("at") public static native @ByVal Tensor _sparse_coo_tensor_unsafe(@Const @ByRef Tensor indices, @Const @ByRef Tensor values, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... size); +// Parsed from ATen/core/function.h -// aten::_sparse_coo_tensor_unsafe(Tensor indices, Tensor values, SymInt[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor -@Namespace("at") public static native @ByVal Tensor _sparse_coo_tensor_unsafe(@Const @ByRef Tensor indices, @Const @ByRef Tensor values, @ByVal @Cast("c10::ArrayRef*") LongArrayRef size, @ByVal ScalarTypeOptional dtype, @ByVal LayoutOptional layout, @ByVal DeviceOptional device, @ByVal BoolOptional pin_memory); -@Namespace("at") public static native @ByVal Tensor _sparse_coo_tensor_unsafe(@Const @ByRef Tensor indices, @Const @ByRef Tensor values, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @ByVal ScalarTypeOptional dtype, @ByVal LayoutOptional layout, @ByVal DeviceOptional device, @ByVal BoolOptional pin_memory); +// #pragma once +// #include +// #include +// #include +// #include +// #include -// aten::_sparse_coo_tensor_unsafe(Tensor indices, Tensor values, SymInt[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor -@Namespace("at") public static native @ByVal Tensor _sparse_coo_tensor_unsafe_symint(@Const @ByRef Tensor indices, @Const @ByRef Tensor values, @ByVal SymIntRef size, @ByVal(nullValue = "at::TensorOptions{}") TensorOptions options); -@Namespace("at") public static native @ByVal Tensor _sparse_coo_tensor_unsafe_symint(@Const @ByRef Tensor indices, @Const @ByRef Tensor values, @ByVal SymIntRef size); +@Namespace("at") public static native void launch(@ByVal Func func); -// aten::_sparse_coo_tensor_unsafe(Tensor indices, Tensor values, SymInt[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor -@Namespace("at") public static native @ByVal Tensor _sparse_coo_tensor_unsafe_symint(@Const @ByRef Tensor indices, @Const @ByRef Tensor values, @ByVal SymIntRef size, @ByVal ScalarTypeOptional dtype, @ByVal LayoutOptional layout, @ByVal DeviceOptional device, @ByVal BoolOptional pin_memory); +@Namespace("torch::jit") public static native void preoptimizeGraph(@SharedPtr("torch::jit::Graph") @ByRef Graph graph, @Cast("bool") boolean disable_autocast/*=false*/); +@Namespace("torch::jit") public static native void preoptimizeGraph(@SharedPtr("torch::jit::Graph") @ByRef Graph graph); +// Targeting ../Function.java + // namespace jit + // namespace torch -// Parsed from ATen/ops/_sparse_coo_tensor_with_dims.h +// Parsed from ATen/core/class_type.h // #pragma once -// @generated by torchgen/gen.py from Function.h +// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include +// #include +// #include // #include + // namespace jit + // namespace torch +// This enumerator represents the 'kind' of an attribute - a buffer, a parameter, or neither. +// This state is mutually exclusive. Buffers and Parameters can only appear on modules. +@Namespace("c10") public enum AttributeKind { + BUFFER(0), + PARAMETER(1), + REGULAR_ATTRIBUTE(2); + public final int value; + private AttributeKind(int v) { this.value = v; } + private AttributeKind(AttributeKind e) { this.value = e.value; } + public AttributeKind intern() { for (AttributeKind e : values()) if (e.value == value) return e; return this; } + @Override public String toString() { return intern().name(); } +} +// Targeting ../ClassAttribute.java -// #include -// aten::_sparse_coo_tensor_with_dims(int sparse_dim, int dense_dim, int[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=False) -> Tensor -@Namespace("at") public static native @ByVal Tensor _sparse_coo_tensor_with_dims(@Cast("int64_t") long sparse_dim, @Cast("int64_t") long dense_dim, @ByVal @Cast("c10::ArrayRef*") LongArrayRef size, @ByVal TensorOptions options); -@Namespace("at") public static native @ByVal Tensor _sparse_coo_tensor_with_dims(@Cast("int64_t") long sparse_dim, @Cast("int64_t") long dense_dim, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @ByVal TensorOptions options); -// aten::_sparse_coo_tensor_with_dims(int sparse_dim, int dense_dim, int[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=False) -> Tensor -@Namespace("at") public static native @ByVal Tensor _sparse_coo_tensor_with_dims(@Cast("int64_t") long sparse_dim, @Cast("int64_t") long dense_dim, @ByVal @Cast("c10::ArrayRef*") LongArrayRef size, @ByVal ScalarTypeOptional dtype, @ByVal LayoutOptional layout, @ByVal DeviceOptional device, @ByVal BoolOptional pin_memory); -@Namespace("at") public static native @ByVal Tensor _sparse_coo_tensor_with_dims(@Cast("int64_t") long sparse_dim, @Cast("int64_t") long dense_dim, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @ByVal ScalarTypeOptional dtype, @ByVal LayoutOptional layout, @ByVal DeviceOptional device, @ByVal BoolOptional pin_memory); +/** + * User Defined Types + */ +// Targeting ../ClassType.java -// aten::_sparse_coo_tensor_with_dims.out(int sparse_dim, int dense_dim, int[] size, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor _sparse_coo_tensor_with_dims_out(@ByRef Tensor out, @Cast("int64_t") long sparse_dim, @Cast("int64_t") long dense_dim, @ByVal @Cast("c10::ArrayRef*") LongArrayRef size); -@Namespace("at") public static native @ByRef Tensor _sparse_coo_tensor_with_dims_out(@ByRef Tensor out, @Cast("int64_t") long sparse_dim, @Cast("int64_t") long dense_dim, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... size); -// aten::_sparse_coo_tensor_with_dims.out(int sparse_dim, int dense_dim, int[] size, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor _sparse_coo_tensor_with_dims_outf(@Cast("int64_t") long sparse_dim, @Cast("int64_t") long dense_dim, @ByVal @Cast("c10::ArrayRef*") LongArrayRef size, @ByRef Tensor out); -@Namespace("at") public static native @ByRef Tensor _sparse_coo_tensor_with_dims_outf(@Cast("int64_t") long sparse_dim, @Cast("int64_t") long dense_dim, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @ByRef Tensor out); -// Parsed from ATen/ops/_sparse_coo_tensor_with_dims_and_tensors.h -// #pragma once +// Parsed from torch/library.h -// @generated by torchgen/gen.py from Function.h -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include +/// +/// +/// +/// +/// +/// +/// +/// +/// +// #pragma once +/** \file + * + * This header provides an API for extending PyTorch's core library + * of operators with user defined operators and data types. This + * API can be used in a few ways: + * + * * You can define new custom operators and classes with TORCH_LIBRARY(), + * making them available for use in both eager Python as well as in + * TorchScript. This API is modeled off of pybind11's {@code PYBIND11_MODULE} + * macro, as the provided functionality is similar (pybind11 lets you bind + * C++ to Python only; {@code torch/library.h} lets you bind C++ simultaneously to + * Python and TorchScript). + * + * * You can override existing operators with TORCH_LIBRARY_IMPL(), + * providing a new implementation for these operators for a custom + * backend (e.g., XLA). When you pass operators with tensors of your custom + * backend, your overridden implementations will be called instead + * of the standard implementations. + * + * * You can use both capabilities at the same time, allowing you + * to write custom operators that register CPU/CUDA/Autograd + * implementations without having to write the boilerplate + * conditionals yourself. + * + * For a tutorial style introduction to the library API, check + * out the [Extending TorchScript with Custom C++ + * Operators](https://pytorch.org/tutorials/advanced/torch_script_custom_ops.html) + * tutorial. + * + *

{@code
+ *  // Define a library whose operators live in the namespace 'myops'.
+ *  // You must define all of the operators for this library in
+ *  // this namespace.
+ *  TORCH_LIBRARY(myops, m) {
+ *    // Define a operator with exactly one implementation for all backends.
+ *    m.def("add(Tensor self, Tensor other) -> Tensor", &add_impl);
+ * 
+ *    // Define a schema for an operator, but provide no implementation
+ *    // (use this syntax if you want to use the dispatcher)
+ *    m.def("mul(Tensor self, Tensor other) -> Tensor");
+ * 
+ *    // Provide an implementation for a defined operator (you can
+ *    // provide multiple; one per backend).  The dispatcher takes care of
+ *    // calling the correct implementation depending on if we get a CPU
+ *    // tensor or a CUDA tensor
+ *    m.impl("mul", torch::kCPU, &mul_cpu_impl);
+ *    m.impl("mul", torch::kCUDA, &mul_cuda_impl);
+ *  }
+ * 
+ *  // Define implementations for operators for a non-standard backend,
+ *  // e.g., XLA (valid values are entries of DispatchKey).  This can
+ *  // be used to define operators in a different file than the initial
+ *  // TORCH_LIBRARY definition (e.g., if it is in an external library)
+ *  TORCH_LIBRARY_IMPL(myops, XLA, m) {
+ *    m.impl("mul", &mul_xla_impl);
+ *  }
+ *  }
*/ +// #include +// #include +// #include +// #include -// #include +// Just for inferFunctionSchemaFromFunctor +// #include +// #include +// #if defined C10_MOBILE +/** + * The NoInferSchemaTag is a type name used to indicate that this call to the + * CppFunction constructor should not trigger schema inference from functor. + * Schema inference from functor utilizes template meta-programming, and is + * costly from a size perspective. Ideally, one would expect that the schema + * inference would require very little binary size since most of the + * computation can be done by the compiler at build time, but that isn't + * necessarily the case. + * + * Schema inference is elided only for mobile use-cases where we don't need + * the additional runtime cost or size overhead on client devices. + * + */ +// #endif -// aten::_sparse_coo_tensor_with_dims_and_tensors(int sparse_dim, int dense_dim, SymInt[] size, Tensor indices, Tensor values, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=False) -> Tensor -@Namespace("at") public static native @ByVal Tensor _sparse_coo_tensor_with_dims_and_tensors(@Cast("int64_t") long sparse_dim, @Cast("int64_t") long dense_dim, @ByVal @Cast("c10::ArrayRef*") LongArrayRef size, @Const @ByRef Tensor indices, @Const @ByRef Tensor values, @ByVal TensorOptions options); -@Namespace("at") public static native @ByVal Tensor _sparse_coo_tensor_with_dims_and_tensors(@Cast("int64_t") long sparse_dim, @Cast("int64_t") long dense_dim, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @Const @ByRef Tensor indices, @Const @ByRef Tensor values, @ByVal TensorOptions options); +// For multipy/torchdeploy use case +@Namespace("torch") public enum _RegisterOrVerify { + REGISTER(0), + VERIFY(1); + public final int value; + private _RegisterOrVerify(int v) { this.value = v; } + private _RegisterOrVerify(_RegisterOrVerify e) { this.value = e.value; } + public _RegisterOrVerify intern() { for (_RegisterOrVerify e : values()) if (e.value == value) return e; return this; } + @Override public String toString() { return intern().name(); } +} +// Targeting ../CppFunction.java -// aten::_sparse_coo_tensor_with_dims_and_tensors(int sparse_dim, int dense_dim, SymInt[] size, Tensor indices, Tensor values, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=False) -> Tensor -@Namespace("at") public static native @ByVal Tensor _sparse_coo_tensor_with_dims_and_tensors(@Cast("int64_t") long sparse_dim, @Cast("int64_t") long dense_dim, @ByVal @Cast("c10::ArrayRef*") LongArrayRef size, @Const @ByRef Tensor indices, @Const @ByRef Tensor values, @ByVal ScalarTypeOptional dtype, @ByVal LayoutOptional layout, @ByVal DeviceOptional device, @ByVal BoolOptional pin_memory); -@Namespace("at") public static native @ByVal Tensor _sparse_coo_tensor_with_dims_and_tensors(@Cast("int64_t") long sparse_dim, @Cast("int64_t") long dense_dim, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @Const @ByRef Tensor indices, @Const @ByRef Tensor values, @ByVal ScalarTypeOptional dtype, @ByVal LayoutOptional layout, @ByVal DeviceOptional device, @ByVal BoolOptional pin_memory); -// aten::_sparse_coo_tensor_with_dims_and_tensors(int sparse_dim, int dense_dim, SymInt[] size, Tensor indices, Tensor values, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=False) -> Tensor -@Namespace("at") public static native @ByVal Tensor _sparse_coo_tensor_with_dims_and_tensors_symint(@Cast("int64_t") long sparse_dim, @Cast("int64_t") long dense_dim, @ByVal SymIntRef size, @Const @ByRef Tensor indices, @Const @ByRef Tensor values, @ByVal TensorOptions options); +/** \defgroup torch-dispatch-overloads torch::dispatch overloads +

+ * Create a torch::CppFunction which is associated with a specific + * dispatch key. torch::CppFunctions that are tagged with a + * c10::DispatchKey don't get invoked unless the dispatcher determines + * that this particular c10::DispatchKey is the one that should be + * dispatched to. + * + * This function is generally not used directly, instead, prefer using + * TORCH_LIBRARY_IMPL(), which will implicitly set the c10::DispatchKey + * for all registration calls inside of its body. + * + * \ingroup torch-dispatch-overloads */ +/** Convenience overload of dispatch() which accepts c10::DeviceType + * + * \ingroup torch-dispatch-overloads */ -// aten::_sparse_coo_tensor_with_dims_and_tensors(int sparse_dim, int dense_dim, SymInt[] size, Tensor indices, Tensor values, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=False) -> Tensor -@Namespace("at") public static native @ByVal Tensor _sparse_coo_tensor_with_dims_and_tensors_symint(@Cast("int64_t") long sparse_dim, @Cast("int64_t") long dense_dim, @ByVal SymIntRef size, @Const @ByRef Tensor indices, @Const @ByRef Tensor values, @ByVal ScalarTypeOptional dtype, @ByVal LayoutOptional layout, @ByVal DeviceOptional device, @ByVal BoolOptional pin_memory); +/** \defgroup torch-schema-overloads torch::schema overloads +

+ * Construct a c10::FunctionSchema from a string, with an explicitly + * specified c10::AliasAnalysisKind. Ordinarily, schemas are simply + * passed in as strings, but if you need to specify a custom alias + * analysis, you can replace the string with a call to this function. + * + *

{@code
+ *  // Default alias analysis (FROM_SCHEMA)
+ *  m.def("def3(Tensor self) -> Tensor");
+ *  // Pure function alias analysis
+ *  m.def(torch::schema("def3(Tensor self) -> Tensor",
+ *  c10::AliasAnalysisKind::PURE_FUNCTION));
+ *  }
+ * + * \ingroup torch-schema-overloads */ +/// +@Namespace("torch") public static native @ByVal FunctionSchema schema(@Cast("const char*") BytePointer str, AliasAnalysisKind k); +@Namespace("torch") public static native @ByVal FunctionSchema schema(String str, @Cast("c10::AliasAnalysisKind") byte k); -// aten::_sparse_coo_tensor_with_dims_and_tensors.out(int sparse_dim, int dense_dim, SymInt[] size, Tensor indices, Tensor values, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor _sparse_coo_tensor_with_dims_and_tensors_out(@ByRef Tensor out, @Cast("int64_t") long sparse_dim, @Cast("int64_t") long dense_dim, @ByVal @Cast("c10::ArrayRef*") LongArrayRef size, @Const @ByRef Tensor indices, @Const @ByRef Tensor values); -@Namespace("at") public static native @ByRef Tensor _sparse_coo_tensor_with_dims_and_tensors_out(@ByRef Tensor out, @Cast("int64_t") long sparse_dim, @Cast("int64_t") long dense_dim, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @Const @ByRef Tensor indices, @Const @ByRef Tensor values); +/** Function schemas can be directly constructed from string literals. + * + * \ingroup torch-schema-overloads */ +/// +/// +@Namespace("torch") public static native @ByVal FunctionSchema schema(@Cast("const char*") BytePointer s); +@Namespace("torch") public static native @ByVal FunctionSchema schema(String s); -// aten::_sparse_coo_tensor_with_dims_and_tensors.out(int sparse_dim, int dense_dim, SymInt[] size, Tensor indices, Tensor values, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor _sparse_coo_tensor_with_dims_and_tensors_outf(@Cast("int64_t") long sparse_dim, @Cast("int64_t") long dense_dim, @ByVal @Cast("c10::ArrayRef*") LongArrayRef size, @Const @ByRef Tensor indices, @Const @ByRef Tensor values, @ByRef Tensor out); -@Namespace("at") public static native @ByRef Tensor _sparse_coo_tensor_with_dims_and_tensors_outf(@Cast("int64_t") long sparse_dim, @Cast("int64_t") long dense_dim, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @Const @ByRef Tensor indices, @Const @ByRef Tensor values, @ByRef Tensor out); +/** \private + * + * Already constructed function schemas are accepted if they are + * rvalues. + * + * \ingroup torch-schema-overloads */ +@Namespace("torch") public static native @ByRef(true) FunctionSchema schema(@ByRef(true) FunctionSchema s); -// aten::_sparse_coo_tensor_with_dims_and_tensors.out(int sparse_dim, int dense_dim, SymInt[] size, Tensor indices, Tensor values, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor _sparse_coo_tensor_with_dims_and_tensors_symint_out(@ByRef Tensor out, @Cast("int64_t") long sparse_dim, @Cast("int64_t") long dense_dim, @ByVal SymIntRef size, @Const @ByRef Tensor indices, @Const @ByRef Tensor values); -// aten::_sparse_coo_tensor_with_dims_and_tensors.out(int sparse_dim, int dense_dim, SymInt[] size, Tensor indices, Tensor values, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor _sparse_coo_tensor_with_dims_and_tensors_symint_outf(@Cast("int64_t") long sparse_dim, @Cast("int64_t") long dense_dim, @ByVal SymIntRef size, @Const @ByRef Tensor indices, @Const @ByRef Tensor values, @ByRef Tensor out); + // namespace detail +// Note [Selective build] +// ~~~~~~~~~~~~~~~~~~~~~~ +// In some settings, especially mobile, it is important to avoid compiling any +// references to functions that you aren't actually going to use, so that they +// can be eliminated by the linker. We call this capability "selective build". +// +// A very easy way to implement selective build which results in a lot of +// boilerplate is to just add ifdef's around every registration call, but this +// means you have to write a lot of extra lines of code at every registration +// site, and it also means you have to define some munging scheme to map +// operators to macros. +// +// Instead of doing this, we have a different mechanism centered around the +// concept of a SelectiveStr. A selective name is like a const char* string, +// except it also carries at compile time a boolean saying whether or not a +// registration should actually happen or not. We then have extra overloads +// which bypass registration entirely if a selective name is disabled. We do a +// constexpr test to see if a operator should be enabled or not; this is +// currently implemented in ATen/core/op_registration/op_allowlist.h +// dummy class for non selected custom torchbind classes +// Targeting ../DisabledStr.java -// Parsed from ATen/ops/_sparse_csc_tensor_unsafe.h +// Targeting ../EnabledStr.java -// #pragma once -// @generated by torchgen/gen.py from Function.h -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include +// #define TORCH_SELECTIVE_CLASS(n) +// torch::detail::SelectiveStr(n) +// #define TORCH_SELECTIVE_NAME(n) +// torch::detail::SelectiveStr(n) +// #define TORCH_SELECTIVE_SCHEMA(n) +// torch::detail::SelectiveStr(n) +// Targeting ../Library.java -// #include -// aten::_sparse_csc_tensor_unsafe(Tensor ccol_indices, Tensor row_indices, Tensor values, int[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor -@Namespace("at") public static native @ByVal Tensor _sparse_csc_tensor_unsafe(@Const @ByRef Tensor ccol_indices, @Const @ByRef Tensor row_indices, @Const @ByRef Tensor values, @ByVal @Cast("c10::ArrayRef*") LongArrayRef size, @ByVal(nullValue = "at::TensorOptions{}") TensorOptions options); -@Namespace("at") public static native @ByVal Tensor _sparse_csc_tensor_unsafe(@Const @ByRef Tensor ccol_indices, @Const @ByRef Tensor row_indices, @Const @ByRef Tensor values, @ByVal @Cast("c10::ArrayRef*") LongArrayRef size); -@Namespace("at") public static native @ByVal Tensor _sparse_csc_tensor_unsafe(@Const @ByRef Tensor ccol_indices, @Const @ByRef Tensor row_indices, @Const @ByRef Tensor values, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @ByVal(nullValue = "at::TensorOptions{}") TensorOptions options); -@Namespace("at") public static native @ByVal Tensor _sparse_csc_tensor_unsafe(@Const @ByRef Tensor ccol_indices, @Const @ByRef Tensor row_indices, @Const @ByRef Tensor values, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... size); -// aten::_sparse_csc_tensor_unsafe(Tensor ccol_indices, Tensor row_indices, Tensor values, int[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor -@Namespace("at") public static native @ByVal Tensor _sparse_csc_tensor_unsafe(@Const @ByRef Tensor ccol_indices, @Const @ByRef Tensor row_indices, @Const @ByRef Tensor values, @ByVal @Cast("c10::ArrayRef*") LongArrayRef size, @ByVal ScalarTypeOptional dtype, @ByVal LayoutOptional layout, @ByVal DeviceOptional device, @ByVal BoolOptional pin_memory); -@Namespace("at") public static native @ByVal Tensor _sparse_csc_tensor_unsafe(@Const @ByRef Tensor ccol_indices, @Const @ByRef Tensor row_indices, @Const @ByRef Tensor values, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @ByVal ScalarTypeOptional dtype, @ByVal LayoutOptional layout, @ByVal DeviceOptional device, @ByVal BoolOptional pin_memory); + // namespace detail + // namespace torch +// NB: The EXACT NAMING of the initializer functions (e.g., +// TORCH_LIBRARY_init_aten) matters for the code analyzer; +// see the regexes at tools/code_analyzer/run_analyzer.sh +/** Macro for defining a function that will be run at static + * initialization time to define a library of operators in the + * namespace {@code ns} (must be a valid C++ identifier, no quotes). + * Use this macro when you want to define a new set of custom operators + * that do not already exist in PyTorch. + * + * Example usage: + * + *
{@code
+ *  TORCH_LIBRARY(myops, m) {
+ *    // m is a torch::Library; methods on it will define
+ *    // operators in the myops namespace
+ *    m.def("add", add_impl);
+ *  }
+ *  }
+ * + * The {@code m} argument is bound to a torch::Library that is used to + * register operators. There may only be one TORCH_LIBRARY() + * for any given namespace. */ -// Parsed from ATen/ops/_sparse_csr_prod.h +/// +// #define TORCH_LIBRARY(ns, m) +// static void TORCH_LIBRARY_init_##ns(torch::Library&); +// static const torch::detail::TorchLibraryInit TORCH_LIBRARY_static_init_##ns( +// torch::Library::DEF, +// &TORCH_LIBRARY_init_##ns, +// #ns, +// c10::nullopt, +// __FILE__, +// __LINE__); +// void TORCH_LIBRARY_init_##ns(torch::Library& m) + +/** \private + * + * This macro is a version of TORCH_LIBRARY() that doesn't enforce that there + * is only one library (it is a "fragment"). This is used inside the + * PerOpRegistration.cpp file, as well as in places where all op registrations + * within the same namespace cannot be easily put into one macro block + * (this is mostly the case for custom ops in fbcode that were ported from + * the old API) */ -// #pragma once +/// +// #define TORCH_LIBRARY_FRAGMENT(ns, m) _TORCH_LIBRARY_FRAGMENT(ns, m, C10_UID) -// @generated by torchgen/gen.py from Function.h +/** \private + * + * The above macro requires an extra unique identifier (uid) to prevent + * variable name collisions This can happen if TORCH_LIBRARY_FRAGMENT is called + * multiple times with the same namespace in the same translation unit. Note + * that the TORCH_LIBRARY variant doesn't run into this problem, because it + * enforces that it can only be called once for a given namespace. */ -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include +/// +/// +/// +/// +/// +/// +// #define _TORCH_LIBRARY_FRAGMENT(ns, m, uid) +// static void C10_CONCATENATE( +// TORCH_LIBRARY_FRAGMENT_init_##ns##_, uid)(torch::Library&); +// static const torch::detail::TorchLibraryInit C10_CONCATENATE( +// TORCH_LIBRARY_FRAGMENT_static_init_##ns##_, uid)( +// torch::Library::FRAGMENT, +// &C10_CONCATENATE(TORCH_LIBRARY_FRAGMENT_init_##ns##_, uid), +// #ns, +// c10::nullopt, +// __FILE__, +// __LINE__); +// void C10_CONCATENATE( +// TORCH_LIBRARY_FRAGMENT_init_##ns##_, uid)(torch::Library & m) + +/** Macro for defining a function that will be run at static + * initialization time to define operator overrides for dispatch key + * {@code k} (must be an unqualified enum member of c10::DispatchKey) in + * namespace {@code ns} (must be a valid C++ identifer, no quotes). Use this + * macro when you want to implement a preexisting set of custom + * operators on a new dispatch key (e.g., you want to provide CUDA + * implementations of already existing operators). One common usage + * pattern is to use TORCH_LIBRARY() to define schema for all new + * operators you want to define, and then use several + * TORCH_LIBRARY_IMPL() blocks to provide implementations of the + * operator for CPU, CUDA and Autograd. + * + * In some cases, you need to define something that applies to all namespaces, + * not just one namespace (usually a fallback). In that case, use the reserved + * namespace _, e.g., + * + *
{@code
+ *  TORCH_LIBRARY_IMPL(_, XLA, m) {
+ *     m.fallback(xla_fallback);
+ *  }
+ *  }
+ * + * Example usage: + * + *
{@code
+ *  TORCH_LIBRARY_IMPL(myops, CPU, m) {
+ *    // m is a torch::Library; methods on it will define
+ *    // CPU implementations of operators in the myops namespace.
+ *    // It is NOT valid to call torch::Library::def()
+ *    // in this context.
+ *    m.impl("add", add_cpu_impl);
+ *  }
+ *  }
+ * + * If {@code }add_cpu_impl{@code } is an overloaded function, use a + * {@code }static_cast{@code } to specify which overload you want + * (by providing the full type). + * */ +// NB: if the dispatch key is not whitelisted, we simply omit the Library +// call entirely +/// +// #define TORCH_LIBRARY_IMPL(ns, k, m) _TORCH_LIBRARY_IMPL(ns, k, m, C10_UID) +/** \private + * + * The above macro requires an extra unique identifier (uid) to prevent + * variable name collisions. This can happen if TORCH_LIBRARY_IMPL is called + * multiple times with the same namespace and dispatch key in the same + * translation unit. */ +// #define _TORCH_LIBRARY_IMPL(ns, k, m, uid) +// static void C10_CONCATENATE( +// TORCH_LIBRARY_IMPL_init_##ns##_##k##_, uid)(torch::Library&); +// static const torch::detail::TorchLibraryInit C10_CONCATENATE( +// TORCH_LIBRARY_IMPL_static_init_##ns##_##k##_, uid)( +// torch::Library::IMPL, +// c10::guts::if_constexpr( +// []() { +// return &C10_CONCATENATE( +// TORCH_LIBRARY_IMPL_init_##ns##_##k##_, uid); +// }, +// []() { return [](torch::Library&) -> void {}; }), +// #ns, +// c10::make_optional(c10::DispatchKey::k), +// __FILE__, +// __LINE__); +// void C10_CONCATENATE( +// TORCH_LIBRARY_IMPL_init_##ns##_##k##_, uid)(torch::Library & m) + +// These are variants of the macros above which are to be used for testing (they +// don't setup the static initializer, so you can control the visibility of +// the allocated library yourself). +// +// DO NOT use these in production code, they are NOT understood by the +// code analyzer and will be incorrectly analyzed in those situations. + +/** \private */ +// #define MAKE_TORCH_LIBRARY(ns) +// torch::Library(torch::Library::DEF, #ns, c10::nullopt, __FILE__, __LINE__) +/** \private */ +// #define MAKE_TORCH_LIBRARY_IMPL(ns, k) +// torch::Library( +// torch::Library::IMPL, +// #ns, +// c10::make_optional(c10::DispatchKey::k), +// __FILE__, +// __LINE__) + +// Make the custom class API visible, so it is available from +// torch::Library. -// #include +// #include -// aten::_sparse_csr_prod.dim_dtype(Tensor self, int[1] dim, bool keepdim=False, *, ScalarType? dtype=None) -> Tensor -@Namespace("at") public static native @ByVal Tensor _sparse_csr_prod(@Const @ByRef Tensor self, @ByVal @Cast("c10::ArrayRef*") LongArrayRef dim, @Cast("bool") boolean keepdim/*=false*/, @ByVal(nullValue = "c10::optional(c10::nullopt)") ScalarTypeOptional dtype); -@Namespace("at") public static native @ByVal Tensor _sparse_csr_prod(@Const @ByRef Tensor self, @ByVal @Cast("c10::ArrayRef*") LongArrayRef dim); -@Namespace("at") public static native @ByVal Tensor _sparse_csr_prod(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dim, @Cast("bool") boolean keepdim/*=false*/, @ByVal(nullValue = "c10::optional(c10::nullopt)") ScalarTypeOptional dtype); -@Namespace("at") public static native @ByVal Tensor _sparse_csr_prod(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... dim); +// Parsed from torch/csrc/autograd/autograd_not_implemented_fallback.h -// aten::_sparse_csr_prod.dim_dtype_out(Tensor self, int[1] dim, bool keepdim=False, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor _sparse_csr_prod_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal @Cast("c10::ArrayRef*") LongArrayRef dim, @Cast("bool") boolean keepdim/*=false*/, @ByVal(nullValue = "c10::optional(c10::nullopt)") ScalarTypeOptional dtype); -@Namespace("at") public static native @ByRef Tensor _sparse_csr_prod_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal @Cast("c10::ArrayRef*") LongArrayRef dim); -@Namespace("at") public static native @ByRef Tensor _sparse_csr_prod_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dim, @Cast("bool") boolean keepdim/*=false*/, @ByVal(nullValue = "c10::optional(c10::nullopt)") ScalarTypeOptional dtype); -@Namespace("at") public static native @ByRef Tensor _sparse_csr_prod_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... dim); -// aten::_sparse_csr_prod.dim_dtype_out(Tensor self, int[1] dim, bool keepdim=False, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor _sparse_csr_prod_outf(@Const @ByRef Tensor self, @ByVal @Cast("c10::ArrayRef*") LongArrayRef dim, @Cast("bool") boolean keepdim, @ByVal ScalarTypeOptional dtype, @ByRef Tensor out); -@Namespace("at") public static native @ByRef Tensor _sparse_csr_prod_outf(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dim, @Cast("bool") boolean keepdim, @ByVal ScalarTypeOptional dtype, @ByRef Tensor out); +// #pragma once +// #include +@Namespace("torch::autograd") public static native @ByVal CppFunction autogradNotImplementedFallback(); +@Namespace("torch::autograd") public static native @ByVal CppFunction autogradNotImplementedInplaceOrViewFallback(); -// Parsed from ATen/ops/_sparse_csr_sum.h + // namespace autograd + // namespace torch -// #pragma once -// @generated by torchgen/gen.py from Function.h +// Parsed from torch/csrc/autograd/anomaly_mode.h -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include +// #pragma once +// #include +// #include +// #include +// Targeting ../AnomalyMode.java -// #include +// Targeting ../DetectAnomalyGuard.java -// aten::_sparse_csr_sum.dim_dtype(Tensor self, int[1] dim, bool keepdim=False, *, ScalarType? dtype=None) -> Tensor -@Namespace("at") public static native @ByVal Tensor _sparse_csr_sum(@Const @ByRef Tensor self, @ByVal @Cast("c10::ArrayRef*") LongArrayRef dim, @Cast("bool") boolean keepdim/*=false*/, @ByVal(nullValue = "c10::optional(c10::nullopt)") ScalarTypeOptional dtype); -@Namespace("at") public static native @ByVal Tensor _sparse_csr_sum(@Const @ByRef Tensor self, @ByVal @Cast("c10::ArrayRef*") LongArrayRef dim); -@Namespace("at") public static native @ByVal Tensor _sparse_csr_sum(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dim, @Cast("bool") boolean keepdim/*=false*/, @ByVal(nullValue = "c10::optional(c10::nullopt)") ScalarTypeOptional dtype); -@Namespace("at") public static native @ByVal Tensor _sparse_csr_sum(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... dim); +// Targeting ../AnomalyMetadata.java -// aten::_sparse_csr_sum.dim_dtype_out(Tensor self, int[1] dim, bool keepdim=False, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor _sparse_csr_sum_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal @Cast("c10::ArrayRef*") LongArrayRef dim, @Cast("bool") boolean keepdim/*=false*/, @ByVal(nullValue = "c10::optional(c10::nullopt)") ScalarTypeOptional dtype); -@Namespace("at") public static native @ByRef Tensor _sparse_csr_sum_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal @Cast("c10::ArrayRef*") LongArrayRef dim); -@Namespace("at") public static native @ByRef Tensor _sparse_csr_sum_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dim, @Cast("bool") boolean keepdim/*=false*/, @ByVal(nullValue = "c10::optional(c10::nullopt)") ScalarTypeOptional dtype); -@Namespace("at") public static native @ByRef Tensor _sparse_csr_sum_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... dim); -// aten::_sparse_csr_sum.dim_dtype_out(Tensor self, int[1] dim, bool keepdim=False, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor _sparse_csr_sum_outf(@Const @ByRef Tensor self, @ByVal @Cast("c10::ArrayRef*") LongArrayRef dim, @Cast("bool") boolean keepdim, @ByVal ScalarTypeOptional dtype, @ByRef Tensor out); -@Namespace("at") public static native @ByRef Tensor _sparse_csr_sum_outf(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dim, @Cast("bool") boolean keepdim, @ByVal ScalarTypeOptional dtype, @ByRef Tensor out); + // namespace autograd + // namespace torch -// Parsed from ATen/ops/_sparse_csr_tensor_unsafe.h +// Parsed from ATen/core/grad_mode.h // #pragma once -// @generated by torchgen/gen.py from Function.h - -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include - +// #include +// #include -// #include +// Parsed from torch/csrc/autograd/grad_mode.h -// aten::_sparse_csr_tensor_unsafe(Tensor crow_indices, Tensor col_indices, Tensor values, int[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor -@Namespace("at") public static native @ByVal Tensor _sparse_csr_tensor_unsafe(@Const @ByRef Tensor crow_indices, @Const @ByRef Tensor col_indices, @Const @ByRef Tensor values, @ByVal @Cast("c10::ArrayRef*") LongArrayRef size, @ByVal(nullValue = "at::TensorOptions{}") TensorOptions options); -@Namespace("at") public static native @ByVal Tensor _sparse_csr_tensor_unsafe(@Const @ByRef Tensor crow_indices, @Const @ByRef Tensor col_indices, @Const @ByRef Tensor values, @ByVal @Cast("c10::ArrayRef*") LongArrayRef size); -@Namespace("at") public static native @ByVal Tensor _sparse_csr_tensor_unsafe(@Const @ByRef Tensor crow_indices, @Const @ByRef Tensor col_indices, @Const @ByRef Tensor values, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @ByVal(nullValue = "at::TensorOptions{}") TensorOptions options); -@Namespace("at") public static native @ByVal Tensor _sparse_csr_tensor_unsafe(@Const @ByRef Tensor crow_indices, @Const @ByRef Tensor col_indices, @Const @ByRef Tensor values, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... size); -// aten::_sparse_csr_tensor_unsafe(Tensor crow_indices, Tensor col_indices, Tensor values, int[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor -@Namespace("at") public static native @ByVal Tensor _sparse_csr_tensor_unsafe(@Const @ByRef Tensor crow_indices, @Const @ByRef Tensor col_indices, @Const @ByRef Tensor values, @ByVal @Cast("c10::ArrayRef*") LongArrayRef size, @ByVal ScalarTypeOptional dtype, @ByVal LayoutOptional layout, @ByVal DeviceOptional device, @ByVal BoolOptional pin_memory); -@Namespace("at") public static native @ByVal Tensor _sparse_csr_tensor_unsafe(@Const @ByRef Tensor crow_indices, @Const @ByRef Tensor col_indices, @Const @ByRef Tensor values, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @ByVal ScalarTypeOptional dtype, @ByVal LayoutOptional layout, @ByVal DeviceOptional device, @ByVal BoolOptional pin_memory); +// #pragma once +// #include +// #include + // namespace autograd + // namespace torch -// Parsed from ATen/ops/_sparse_log_softmax.h +// Parsed from ATen/FuncTorchTLS.h // #pragma once -// @generated by torchgen/gen.py from Function.h +// #include +// #include +// Targeting ../FuncTorchTLSBase.java -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include +// returns deepcopy of the functorch tls +@Namespace("at::functorch") public static native @UniquePtr FuncTorchTLSBase getCopyOfFuncTorchTLS(); -// #include +// sets the functorch tls. always does a deep copy. +@Namespace("at::functorch") public static native void setFuncTorchTLS( + @Const @SharedPtr("const at::functorch::FuncTorchTLSBase") @ByRef FuncTorchTLSBase state); +// get a mutable reference to the functorch tls +@Namespace("at::functorch") public static native @UniquePtr FuncTorchTLSBase functorchTLSAccessor(); -// aten::_sparse_log_softmax.int(Tensor self, int dim, ScalarType? dtype=None) -> Tensor -@Namespace("at") public static native @ByVal Tensor _sparse_log_softmax(@Const @ByRef Tensor self, @Cast("int64_t") long dim, @ByVal(nullValue = "c10::optional(c10::nullopt)") ScalarTypeOptional dtype); -@Namespace("at") public static native @ByVal Tensor _sparse_log_softmax(@Const @ByRef Tensor self, @Cast("int64_t") long dim); + // namespace functorch + // namespace at -// aten::_sparse_log_softmax.Dimname(Tensor self, Dimname dim, *, ScalarType? dtype=None) -> Tensor -@Namespace("at") public static native @ByVal Tensor _sparse_log_softmax(@Const @ByRef Tensor self, @ByVal Dimname dim, @ByVal(nullValue = "c10::optional(c10::nullopt)") ScalarTypeOptional dtype); -@Namespace("at") public static native @ByVal Tensor _sparse_log_softmax(@Const @ByRef Tensor self, @ByVal Dimname dim); -// aten::_sparse_log_softmax(Tensor self, int dim, bool half_to_float) -> Tensor -@Namespace("at") public static native @ByVal Tensor _sparse_log_softmax(@Const @ByRef Tensor self, @Cast("int64_t") long dim, @Cast("bool") boolean half_to_float); +// Parsed from c10/core/SafePyObject.h -// aten::_sparse_log_softmax.out(Tensor self, int dim, bool half_to_float, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor _sparse_log_softmax_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Cast("int64_t") long dim, @Cast("bool") boolean half_to_float); -// aten::_sparse_log_softmax.out(Tensor self, int dim, bool half_to_float, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor _sparse_log_softmax_outf(@Const @ByRef Tensor self, @Cast("int64_t") long dim, @Cast("bool") boolean half_to_float, @ByRef Tensor out); +// #pragma once +// #include +// #include +// #include +// Targeting ../SafePyObject.java +// Targeting ../SafePyHandle.java -// Parsed from ATen/ops/_sparse_log_softmax_backward_data.h -// #pragma once -// @generated by torchgen/gen.py from Function.h + // namespace c10 -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include +// Parsed from ATen/PythonTorchFunctionTLS.h +// #pragma once -// #include +// #include +// #include +@Namespace("at::impl") public enum TorchFunctionDisabledState { ENABLED(0), SUBCLASSES_DISABLED(1), ALL_DISABLED(2); -// aten::_sparse_log_softmax_backward_data(Tensor grad_output, Tensor output, int dim, Tensor self) -> Tensor -@Namespace("at") public static native @ByVal Tensor _sparse_log_softmax_backward_data(@Const @ByRef Tensor grad_output, @Const @ByRef Tensor output, @Cast("int64_t") long dim, @Const @ByRef Tensor self); + public final int value; + private TorchFunctionDisabledState(int v) { this.value = v; } + private TorchFunctionDisabledState(TorchFunctionDisabledState e) { this.value = e.value; } + public TorchFunctionDisabledState intern() { for (TorchFunctionDisabledState e : values()) if (e.value == value) return e; return this; } + @Override public String toString() { return intern().name(); } +} +// Targeting ../PythonTorchFunctionTLS.java -// aten::_sparse_log_softmax_backward_data.out(Tensor grad_output, Tensor output, int dim, Tensor self, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor _sparse_log_softmax_backward_data_out(@ByRef Tensor out, @Const @ByRef Tensor grad_output, @Const @ByRef Tensor output, @Cast("int64_t") long dim, @Const @ByRef Tensor self); -// aten::_sparse_log_softmax_backward_data.out(Tensor grad_output, Tensor output, int dim, Tensor self, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor _sparse_log_softmax_backward_data_outf(@Const @ByRef Tensor grad_output, @Const @ByRef Tensor output, @Cast("int64_t") long dim, @Const @ByRef Tensor self, @ByRef Tensor out); +@Namespace("at::impl") public static native @Cast("bool") boolean torch_function_mode_enabled(); + // namespace impl + // namespace at -// Parsed from ATen/ops/_sparse_mm.h -// #pragma once +// Parsed from ATen/SavedTensorHooks.h -// @generated by torchgen/gen.py from Function.h +// #pragma once -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include +// #include // #include +// #include +// #include +// #include +// #include +// Targeting ../SavedTensorDefaultHooksTLS.java -// #include -// aten::_sparse_mm(Tensor sparse, Tensor dense) -> Tensor -@Namespace("at") public static native @ByVal Tensor _sparse_mm(@Const @ByRef Tensor sparse, @Const @ByRef Tensor dense); +// Targeting ../SavedTensorDefaultHooks.java -// aten::_sparse_mm.reduce(Tensor sparse, Tensor dense, str reduce) -> Tensor -@Namespace("at") public static native @ByVal Tensor _sparse_mm(@Const @ByRef Tensor sparse, @Const @ByRef Tensor dense, @ByVal @Cast("c10::string_view*") Pointer reduce); + // namespace at -// Parsed from ATen/ops/_sparse_mm_reduce_impl.h +// Parsed from ATen/ThreadLocalPythonObjects.h // #pragma once -// @generated by torchgen/gen.py from Function.h +// #include +// #include +// #include +// Targeting ../ThreadLocalPythonObjects.java -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include + // namespace impl + // namespace at -// #include +// Parsed from c10/core/impl/PythonDispatcherTLS.h -// aten::_sparse_mm_reduce_impl(Tensor self, Tensor other, str reduce) -> (Tensor, Tensor) -@Namespace("at") public static native @ByVal TensorTensorTuple _sparse_mm_reduce_impl(@Const @ByRef Tensor self, @Const @ByRef Tensor other, @ByVal @Cast("c10::string_view*") Pointer reduce); +// #pragma once +// #include +// #include +// #include +// Targeting ../PythonDispatcherTLS.java +// Targeting ../DisablePythonDispatcher.java -// Parsed from ATen/ops/_sparse_mm_reduce_impl_backward.h -// #pragma once -// @generated by torchgen/gen.py from Function.h + // namespace impl + // namespace c10 -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include +// Parsed from c10/core/impl/TorchDispatchModeTLS.h +// #pragma once -// #include +// #include +// #include +// Targeting ../TorchDispatchModeTLS.java -// aten::_sparse_mm_reduce_impl_backward(Tensor self, Tensor grad_out, Tensor weight, str reduce, Tensor arg_out, bool[2] output_mask) -> (Tensor, Tensor) -@Namespace("at") public static native @ByVal TensorTensorTuple _sparse_mm_reduce_impl_backward(@Const @ByRef Tensor self, @Const @ByRef Tensor grad_out, @Const @ByRef Tensor weight, @ByVal @Cast("c10::string_view*") Pointer reduce, @Const @ByRef Tensor arg_out, @ByVal @Cast("std::array*") BoolPointer output_mask); +@Namespace("c10::impl") public static native @Cast("bool") boolean dispatch_mode_enabled(); + // namespace impl + // namespace c10 -// Parsed from ATen/ops/_sparse_softmax.h +// Parsed from ATen/ThreadLocalState.h // #pragma once -// @generated by torchgen/gen.py from Function.h +// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// Targeting ../ThreadLocalState.java -// #include +// Targeting ../ThreadLocalStateGuard.java + -// aten::_sparse_softmax.int(Tensor self, int dim, ScalarType? dtype=None) -> Tensor -@Namespace("at") public static native @ByVal Tensor _sparse_softmax(@Const @ByRef Tensor self, @Cast("int64_t") long dim, @ByVal(nullValue = "c10::optional(c10::nullopt)") ScalarTypeOptional dtype); -@Namespace("at") public static native @ByVal Tensor _sparse_softmax(@Const @ByRef Tensor self, @Cast("int64_t") long dim); + // namespace at -// aten::_sparse_softmax.Dimname(Tensor self, Dimname dim, *, ScalarType? dtype=None) -> Tensor -@Namespace("at") public static native @ByVal Tensor _sparse_softmax(@Const @ByRef Tensor self, @ByVal Dimname dim, @ByVal(nullValue = "c10::optional(c10::nullopt)") ScalarTypeOptional dtype); -@Namespace("at") public static native @ByVal Tensor _sparse_softmax(@Const @ByRef Tensor self, @ByVal Dimname dim); -// aten::_sparse_softmax(Tensor self, int dim, bool half_to_float) -> Tensor -@Namespace("at") public static native @ByVal Tensor _sparse_softmax(@Const @ByRef Tensor self, @Cast("int64_t") long dim, @Cast("bool") boolean half_to_float); +// Parsed from c10/util/ThreadLocal.h -// aten::_sparse_softmax.out(Tensor self, int dim, bool half_to_float, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor _sparse_softmax_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Cast("int64_t") long dim, @Cast("bool") boolean half_to_float); -// aten::_sparse_softmax.out(Tensor self, int dim, bool half_to_float, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor _sparse_softmax_outf(@Const @ByRef Tensor self, @Cast("int64_t") long dim, @Cast("bool") boolean half_to_float, @ByRef Tensor out); +// #pragma once +// #include +/** + * Android versions with libgnustl incorrectly handle thread_local C++ + * qualifier with composite types. NDK up to r17 version is affected. + * + * (A fix landed on Jun 4 2018: + * https://android-review.googlesource.com/c/toolchain/gcc/+/683601) + * + * In such cases, use c10::ThreadLocal wrapper + * which is {@code pthread_*} based with smart pointer semantics. + * + * In addition, convenient macro C10_DEFINE_TLS_static is available. + * To define static TLS variable of type std::string, do the following + *
{@code
+ *  C10_DEFINE_TLS_static(std::string, str_tls_);
+ *  ///////
+ *  {
+ *    *str_tls_ = "abc";
+ *    assert(str_tls_->length(), 3);
+ *  }
+ * }
+ * + * (see c10/test/util/ThreadLocal_test.cpp for more examples) + */ +// #if !defined(C10_PREFER_CUSTOM_THREAD_LOCAL_STORAGE) +// #if defined(C10_ANDROID) && defined(__GLIBCXX__) && __GLIBCXX__ < 20180604 +// #define C10_PREFER_CUSTOM_THREAD_LOCAL_STORAGE +// #endif // defined(C10_ANDROID) && defined(__GLIBCXX__) && __GLIBCXX__ < 20180604 -// Parsed from ATen/ops/_sparse_softmax_backward_data.h +// #endif // !defined(C10_PREFER_CUSTOM_THREAD_LOCAL_STORAGE) -// #pragma once +// #if defined(C10_PREFER_CUSTOM_THREAD_LOCAL_STORAGE) +// #include +// #include +// #include +// #include -// @generated by torchgen/gen.py from Function.h +/** + * \brief Temporary thread_local C++ qualifier replacement for Android + * based on {@code pthread_*}. + * To be used with composite types that provide default ctor. + */ -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include + // namespace c10 +// #define C10_DEFINE_TLS_static(Type, Name) static ::c10::ThreadLocal Name +// #define C10_DECLARE_TLS_class_static(Class, Type, Name) +// static ::c10::ThreadLocal Name -// #include +// #define C10_DEFINE_TLS_class_static(Class, Type, Name) +// ::c10::ThreadLocal Class::Name +// #else // defined(C10_PREFER_CUSTOM_THREAD_LOCAL_STORAGE) -// aten::_sparse_softmax_backward_data(Tensor grad_output, Tensor output, int dim, Tensor self) -> Tensor -@Namespace("at") public static native @ByVal Tensor _sparse_softmax_backward_data(@Const @ByRef Tensor grad_output, @Const @ByRef Tensor output, @Cast("int64_t") long dim, @Const @ByRef Tensor self); +/** + * \brief Default thread_local implementation for non-Android cases. + * To be used with composite types that provide default ctor. + */ -// aten::_sparse_softmax_backward_data.out(Tensor grad_output, Tensor output, int dim, Tensor self, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor _sparse_softmax_backward_data_out(@ByRef Tensor out, @Const @ByRef Tensor grad_output, @Const @ByRef Tensor output, @Cast("int64_t") long dim, @Const @ByRef Tensor self); -// aten::_sparse_softmax_backward_data.out(Tensor grad_output, Tensor output, int dim, Tensor self, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor _sparse_softmax_backward_data_outf(@Const @ByRef Tensor grad_output, @Const @ByRef Tensor output, @Cast("int64_t") long dim, @Const @ByRef Tensor self, @ByRef Tensor out); + // namespace c10 +// #define C10_DEFINE_TLS_static(Type, Name) +// static ::c10::ThreadLocal Name([]() { +// static thread_local Type var; +// return &var; +// }) +// #define C10_DECLARE_TLS_class_static(Class, Type, Name) +// static ::c10::ThreadLocal Name +// #define C10_DEFINE_TLS_class_static(Class, Type, Name) +// ::c10::ThreadLocal Class::Name([]() { +// static thread_local Type var; +// return &var; +// }) -// Parsed from ATen/ops/_sparse_sparse_matmul.h +// #endif // defined(C10_PREFER_CUSTOM_THREAD_LOCAL_STORAGE) -// #pragma once -// @generated by torchgen/gen.py from Function.h +// Parsed from torch/csrc/autograd/input_buffer.h -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include +// #pragma once +// The InputBuffer class accumulates a list of Variables for use by a +// function. It implements logic to avoid modifying the passed +// values in-place (adding an input twice will accumulate the result). +// This behaviour is needed and used only in backward graphs. +// #include +// #include +// #include -// #include +// #include +// #include +// #include + // namespace autograd + // namespace torch -// aten::_sparse_sparse_matmul(Tensor self, Tensor other) -> Tensor -@Namespace("at") public static native @ByVal Tensor _sparse_sparse_matmul(@Const @ByRef Tensor self, @Const @ByRef Tensor other); -// aten::_sparse_sparse_matmul.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor _sparse_sparse_matmul_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor other); -// aten::_sparse_sparse_matmul.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor _sparse_sparse_matmul_outf(@Const @ByRef Tensor self, @Const @ByRef Tensor other, @ByRef Tensor out); +// Parsed from torch/csrc/autograd/utils/warnings.h +// #pragma once +// #include +// #include +// #include +// Warning handler for multi-threaded contexts. Gather warnings from +// all threads into a single queue, then process together at the end +// in the main thread. -// Parsed from ATen/ops/_sparse_sum.h + // namespace utils + // namespace autograd + // namespace torch -// #pragma once -// @generated by torchgen/gen.py from Function.h +// Parsed from torch/csrc/autograd/graph_task.h -// #include -// #include -// #include -// #include -// #include -// #include +// #pragma once +// #include // #include -// #include -// #include -// #include -// #include -// #include +// #include +// #include +// #include +// #include +// Targeting ../ReadyQueue.java -// #include +@Namespace("torch::autograd") @MemberGetter public static native int NO_DEVICE(); +public static final int NO_DEVICE = NO_DEVICE(); +@Namespace("torch::autograd") @MemberGetter public static native int CPU_DEVICE(); +public static final int CPU_DEVICE = CPU_DEVICE(); -// aten::_sparse_sum(Tensor self) -> Tensor -@Namespace("at") public static native @ByVal Tensor _sparse_sum(@Const @ByRef Tensor self); -// aten::_sparse_sum.dtype(Tensor self, *, ScalarType dtype) -> Tensor -@Namespace("at") public static native @ByVal Tensor _sparse_sum(@Const @ByRef Tensor self, ScalarType dtype); +// GraphTask holds metadata needed for a single execution of backward() -// aten::_sparse_sum.dim(Tensor self, int[1] dim) -> Tensor -@Namespace("at") public static native @ByVal Tensor _sparse_sum(@Const @ByRef Tensor self, @ByVal @Cast("c10::ArrayRef*") LongArrayRef dim); -@Namespace("at") public static native @ByVal Tensor _sparse_sum(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... dim); +// The guard that sets and restores current_graph_task. -// aten::_sparse_sum.dim_dtype(Tensor self, int[1] dim, *, ScalarType dtype) -> Tensor -@Namespace("at") public static native @ByVal Tensor _sparse_sum(@Const @ByRef Tensor self, @ByVal @Cast("c10::ArrayRef*") LongArrayRef dim, ScalarType dtype); -@Namespace("at") public static native @ByVal Tensor _sparse_sum(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dim, ScalarType dtype); -// aten::_sparse_sum.dim_out(Tensor self, int[1] dim, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor _sparse_sum_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal @Cast("c10::ArrayRef*") LongArrayRef dim); -@Namespace("at") public static native @ByRef Tensor _sparse_sum_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... dim); -// aten::_sparse_sum.dim_out(Tensor self, int[1] dim, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor _sparse_sum_outf(@Const @ByRef Tensor self, @ByVal @Cast("c10::ArrayRef*") LongArrayRef dim, @ByRef Tensor out); -@Namespace("at") public static native @ByRef Tensor _sparse_sum_outf(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dim, @ByRef Tensor out); +@Namespace("torch::autograd") public static native @Const NodeSet get_current_graph_task_nodes_in_graph(); +@Namespace("torch::autograd") public static native @Cast("bool") boolean get_current_graph_task_keep_graph(); +@Namespace("torch::autograd") public static native @Cast("torch::autograd::Node**") @StdVector PointerPointer get_current_graph_task_execution_order(); +@Namespace("torch::autograd") public static native int get_current_graph_task_id(); + // namespace autograd + // namespace torch -// Parsed from ATen/ops/_sparse_sum_backward.h +// Parsed from ATen/core/MT19937RNGEngine.h // #pragma once -// @generated by torchgen/gen.py from Function.h - -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include +// #include +// define constants like M_PI and C keywords for MSVC +// #ifdef _MSC_VER +// #ifndef _USE_MATH_DEFINES +// #define _USE_MATH_DEFINES +// #endif +// #include +// #endif +// #include +// #include +// #include -// #include +@Namespace("at") @MemberGetter public static native int MERSENNE_STATE_N(); +@Namespace("at") @MemberGetter public static native int MERSENNE_STATE_M(); +@Namespace("at") @MemberGetter public static native @Cast("const uint32_t") int MATRIX_A(); +@Namespace("at") @MemberGetter public static native @Cast("const uint32_t") int UMASK(); +@Namespace("at") @MemberGetter public static native @Cast("const uint32_t") int LMASK(); +// Targeting ../mt19937_data_pod.java -// aten::_sparse_sum_backward(Tensor grad, Tensor self, int[] dim) -> Tensor -@Namespace("at") public static native @ByVal Tensor _sparse_sum_backward(@Const @ByRef Tensor grad, @Const @ByRef Tensor self, @ByVal @Cast("c10::ArrayRef*") LongArrayRef dim); -@Namespace("at") public static native @ByVal Tensor _sparse_sum_backward(@Const @ByRef Tensor grad, @Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... dim); +// Targeting ../mt19937_engine.java -// aten::_sparse_sum_backward.out(Tensor grad, Tensor self, int[] dim, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor _sparse_sum_backward_out(@ByRef Tensor out, @Const @ByRef Tensor grad, @Const @ByRef Tensor self, @ByVal @Cast("c10::ArrayRef*") LongArrayRef dim); -@Namespace("at") public static native @ByRef Tensor _sparse_sum_backward_out(@ByRef Tensor out, @Const @ByRef Tensor grad, @Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... dim); -// aten::_sparse_sum_backward.out(Tensor grad, Tensor self, int[] dim, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor _sparse_sum_backward_outf(@Const @ByRef Tensor grad, @Const @ByRef Tensor self, @ByVal @Cast("c10::ArrayRef*") LongArrayRef dim, @ByRef Tensor out); -@Namespace("at") public static native @ByRef Tensor _sparse_sum_backward_outf(@Const @ByRef Tensor grad, @Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dim, @ByRef Tensor out); + // namespace at -// Parsed from ATen/ops/_spdiags.h +// Parsed from ATen/CPUGeneratorImpl.h // #pragma once -// @generated by torchgen/gen.py from Function.h - -// #include -// #include -// #include -// #include // #include -// #include -// #include -// #include -// #include -// #include -// #include +// #include +// #include // #include +// Targeting ../CPUGeneratorImpl.java -// #include - - -// aten::_spdiags(Tensor diagonals, Tensor offsets, int[] shape, Layout? layout=None) -> Tensor -@Namespace("at") public static native @ByVal Tensor _spdiags(@Const @ByRef Tensor diagonals, @Const @ByRef Tensor offsets, @ByVal @Cast("c10::ArrayRef*") LongArrayRef shape, @ByVal(nullValue = "c10::optional(c10::nullopt)") LayoutOptional layout); -@Namespace("at") public static native @ByVal Tensor _spdiags(@Const @ByRef Tensor diagonals, @Const @ByRef Tensor offsets, @ByVal @Cast("c10::ArrayRef*") LongArrayRef shape); -@Namespace("at") public static native @ByVal Tensor _spdiags(@Const @ByRef Tensor diagonals, @Const @ByRef Tensor offsets, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] shape, @ByVal(nullValue = "c10::optional(c10::nullopt)") LayoutOptional layout); -@Namespace("at") public static native @ByVal Tensor _spdiags(@Const @ByRef Tensor diagonals, @Const @ByRef Tensor offsets, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... shape); - -// aten::_spdiags.out(Tensor diagonals, Tensor offsets, int[] shape, Layout? layout=None, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor _spdiags_out(@ByRef Tensor out, @Const @ByRef Tensor diagonals, @Const @ByRef Tensor offsets, @ByVal @Cast("c10::ArrayRef*") LongArrayRef shape, @ByVal(nullValue = "c10::optional(c10::nullopt)") LayoutOptional layout); -@Namespace("at") public static native @ByRef Tensor _spdiags_out(@ByRef Tensor out, @Const @ByRef Tensor diagonals, @Const @ByRef Tensor offsets, @ByVal @Cast("c10::ArrayRef*") LongArrayRef shape); -@Namespace("at") public static native @ByRef Tensor _spdiags_out(@ByRef Tensor out, @Const @ByRef Tensor diagonals, @Const @ByRef Tensor offsets, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] shape, @ByVal(nullValue = "c10::optional(c10::nullopt)") LayoutOptional layout); -@Namespace("at") public static native @ByRef Tensor _spdiags_out(@ByRef Tensor out, @Const @ByRef Tensor diagonals, @Const @ByRef Tensor offsets, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... shape); -// aten::_spdiags.out(Tensor diagonals, Tensor offsets, int[] shape, Layout? layout=None, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor _spdiags_outf(@Const @ByRef Tensor diagonals, @Const @ByRef Tensor offsets, @ByVal @Cast("c10::ArrayRef*") LongArrayRef shape, @ByVal LayoutOptional layout, @ByRef Tensor out); -@Namespace("at") public static native @ByRef Tensor _spdiags_outf(@Const @ByRef Tensor diagonals, @Const @ByRef Tensor offsets, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] shape, @ByVal LayoutOptional layout, @ByRef Tensor out); +@Namespace("at::detail") public static native @Const @ByRef Generator getDefaultCPUGenerator(); +@Namespace("at::detail") public static native @ByVal Generator createCPUGenerator(@Cast("uint64_t") long seed_val/*=c10::default_rng_seed_val*/); +@Namespace("at::detail") public static native @ByVal Generator createCPUGenerator(); + // namespace detail + // namespace at -// Parsed from ATen/ops/_stack.h +// Parsed from ATen/LinalgBackend.h // #pragma once -// @generated by torchgen/gen.py from Function.h +// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include +// #include +// #include + +@Namespace("at") public enum LinalgBackend { Default((byte)(0)), Cusolver((byte)(1)), Magma((byte)(2)); + public final byte value; + private LinalgBackend(byte v) { this.value = v; } + private LinalgBackend(LinalgBackend e) { this.value = e.value; } + public LinalgBackend intern() { for (LinalgBackend e : values()) if (e.value == value) return e; return this; } + @Override public String toString() { return intern().name(); } +} +@Namespace("at") public static native @StdString BytePointer LinalgBackendToString(LinalgBackend backend); +@Namespace("at") public static native @StdString String LinalgBackendToString(@Cast("at::LinalgBackend") byte backend); -// #include +@Namespace("at") public static native @Cast("std::ostream*") @ByRef @Name("operator <<") Pointer shiftLeft( + @Cast("std::ostream*") @ByRef Pointer stream, + LinalgBackend backend); + // namespace at -// aten::_stack(Tensor[] tensors, int dim=0) -> Tensor -@Namespace("at") public static native @ByVal Tensor _stack(@ByVal TensorArrayRef tensors, @Cast("int64_t") long dim/*=0*/); -@Namespace("at") public static native @ByVal Tensor _stack(@ByVal TensorArrayRef tensors); -// aten::_stack.out(Tensor[] tensors, int dim=0, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor _stack_out(@ByRef Tensor out, @ByVal TensorArrayRef tensors, @Cast("int64_t") long dim/*=0*/); -@Namespace("at") public static native @ByRef Tensor _stack_out(@ByRef Tensor out, @ByVal TensorArrayRef tensors); -// aten::_stack.out(Tensor[] tensors, int dim=0, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor _stack_outf(@ByVal TensorArrayRef tensors, @Cast("int64_t") long dim, @ByRef Tensor out); +// Parsed from ATen/core/ATenGeneral.h +// #pragma once +// #include -// Parsed from ATen/ops/_standard_gamma.h +// Parsed from ATen/core/LegacyTypeDispatch.h // #pragma once -// @generated by torchgen/gen.py from Function.h +// The legacy mechanism for dispatching operators in ATen is a Type +// object, which is essentially a giant virtual dispatch table +// for every operation we support dynamically dispatching over. +// +// This has been deprecated in favor of ATenDispatch, and in the future, +// c10 dispatcher. +// TODO: Clean up what remains here -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include +// #include +// Targeting ../AutoDispatchBelowAutograd.java +// Targeting ../AutoNonVariableTypeMode.java -// #include +// Targeting ../AutoDispatchSkipFunctionalize.java -// aten::_standard_gamma(Tensor self, Generator? generator=None) -> Tensor -@Namespace("at") public static native @ByVal Tensor _standard_gamma(@Const @ByRef Tensor self, @ByVal(nullValue = "c10::optional(c10::nullopt)") GeneratorOptional generator); -@Namespace("at") public static native @ByVal Tensor _standard_gamma(@Const @ByRef Tensor self); -// aten::_standard_gamma.out(Tensor self, Generator? generator=None, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor _standard_gamma_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal(nullValue = "c10::optional(c10::nullopt)") GeneratorOptional generator); -@Namespace("at") public static native @ByRef Tensor _standard_gamma_out(@ByRef Tensor out, @Const @ByRef Tensor self); -// aten::_standard_gamma.out(Tensor self, Generator? generator=None, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor _standard_gamma_outf(@Const @ByRef Tensor self, @ByVal GeneratorOptional generator, @ByRef Tensor out); +// Targeting ../AutoDispatchBelowADInplaceOrView.java + // namespace at -// Parsed from ATen/ops/_standard_gamma_grad.h +// Parsed from ATen/detail/CUDAHooksInterface.h // #pragma once -// @generated by torchgen/gen.py from Function.h - -// #include -// #include -// #include -// #include +// #include // #include -// #include -// #include -// #include -// #include -// #include -// #include +// #include // #include +// #include + +// #include +// #include +// #include +// Forward-declares at::cuda::NVRTC + // at::cuda -// #include +// NB: Class must live in `at` due to limitations of Registry.h. +// #ifdef _MSC_VER +@Namespace("at") @MemberGetter public static native @Cast("const char*") BytePointer CUDA_HELP(); +// #else +// Targeting ../CUDAHooksInterface.java -// aten::_standard_gamma_grad(Tensor self, Tensor output) -> Tensor -@Namespace("at") public static native @ByVal Tensor _standard_gamma_grad(@Const @ByRef Tensor self, @Const @ByRef Tensor output); -// aten::_standard_gamma_grad.out(Tensor self, Tensor output, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor _standard_gamma_grad_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor output); -// aten::_standard_gamma_grad.out(Tensor self, Tensor output, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor _standard_gamma_grad_outf(@Const @ByRef Tensor self, @Const @ByRef Tensor output, @ByRef Tensor out); +// Targeting ../CUDAHooksArgs.java +// #define REGISTER_CUDA_HOOKS(clsname) +// C10_REGISTER_CLASS(CUDAHooksRegistry, clsname, clsname) +@Namespace("at::detail") public static native @Const @ByRef CUDAHooksInterface getCUDAHooks(); + // namespace detail + // namespace at -// Parsed from ATen/ops/_test_ambiguous_defaults.h +// Parsed from ATen/detail/HIPHooksInterface.h // #pragma once -// @generated by torchgen/gen.py from Function.h - -// #include -// #include -// #include -// #include +// #include // #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include +// #include +// #include +// #include +// #include +// #include -// #include +// NB: Class must live in `at` due to limitations of Registry.h. +// Targeting ../HIPHooksInterface.java -// aten::_test_ambiguous_defaults.a(Tensor dummy, int a=1, int b=1) -> Tensor +// Targeting ../HIPHooksArgs.java -// aten::_test_ambiguous_defaults.b(Tensor dummy, int a=2, str b="2") -> Tensor +// #define REGISTER_HIP_HOOKS(clsname) +// C10_REGISTER_CLASS(HIPHooksRegistry, clsname, clsname) +@Namespace("at::detail") public static native @Const @ByRef HIPHooksInterface getHIPHooks(); + // namespace detail + // namespace at +// Parsed from ATen/detail/MPSHooksInterface.h -// Parsed from ATen/ops/_test_autograd_multiple_dispatch.h +// Copyright © 2022 Apple Inc. // #pragma once -// @generated by torchgen/gen.py from Function.h - -// #include -// #include -// #include -// #include +// #include // #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include - - +// #include +// #include -// #include +// #include +// #include +// Targeting ../MPSHooksInterface.java -// aten::_test_autograd_multiple_dispatch.fullcoverage(Tensor self) -> Tensor -@Namespace("at") public static native @ByVal Tensor _test_autograd_multiple_dispatch(@Const @ByRef Tensor self); -// aten::_test_autograd_multiple_dispatch.ntonly(Tensor self, bool b) -> Tensor -@Namespace("at") public static native @ByVal Tensor _test_autograd_multiple_dispatch(@Const @ByRef Tensor self, @Cast("bool") boolean b); +// Targeting ../MPSHooksArgs.java -// aten::_test_autograd_multiple_dispatch.fullcoverage_out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor _test_autograd_multiple_dispatch_out(@ByRef Tensor out, @Const @ByRef Tensor self); -// aten::_test_autograd_multiple_dispatch.fullcoverage_out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor _test_autograd_multiple_dispatch_outf(@Const @ByRef Tensor self, @ByRef Tensor out); +// #define REGISTER_MPS_HOOKS(clsname) +// C10_REGISTER_CLASS(MPSHooksRegistry, clsname, clsname) +@Namespace("at::detail") public static native @Const @ByRef MPSHooksInterface getMPSHooks(); + // namespace detail + // namespace at -// Parsed from ATen/ops/_test_autograd_multiple_dispatch_view.h +// Parsed from ATen/detail/ORTHooksInterface.h // #pragma once -// @generated by torchgen/gen.py from Function.h +// #include +// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include +@MemberGetter public static native @Cast("const char*") BytePointer ORT_HELP(); +// Targeting ../ORTHooksInterface.java +// Targeting ../ORTHooksArgs.java -// #include +// #define REGISTER_ORT_HOOKS(clsname) +// C10_REGISTER_CLASS(ORTHooksRegistry, clsname, clsname) +@Namespace("at::detail") public static native @Const @ByRef ORTHooksInterface getORTHooks(); + // namespace detail -// aten::_test_autograd_multiple_dispatch_view(Tensor(a) self) -> Tensor(a) -@Namespace("at") public static native @ByVal Tensor _test_autograd_multiple_dispatch_view(@Const @ByRef Tensor self); + // namespace at +// Parsed from c10/core/QEngine.h +// #pragma once -// Parsed from ATen/ops/_test_autograd_multiple_dispatch_view_copy.h +// #include +// #include +// #include -// #pragma once +/** + * QEngine is an enum that is used to select the engine to run quantized ops. + * Keep this enum in sync with get_qengine_id() in + * torch/backends/quantized/__init__.py + */ +@Namespace("c10") public enum QEngine { + NoQEngine((byte)(0)), + FBGEMM((byte)(1)), + QNNPACK((byte)(2)), + ONEDNN((byte)(3)), + X86((byte)(4)); -// @generated by torchgen/gen.py from Function.h + public final byte value; + private QEngine(byte v) { this.value = v; } + private QEngine(QEngine e) { this.value = e.value; } + public QEngine intern() { for (QEngine e : values()) if (e.value == value) return e; return this; } + @Override public String toString() { return intern().name(); } +} -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include +@Namespace("c10") public static native @StdString BytePointer toString(QEngine qengine); + // namespace c10 -// #include +// Parsed from c10/util/CallOnce.h +// #pragma once -// aten::_test_autograd_multiple_dispatch_view_copy(Tensor self) -> Tensor -@Namespace("at") public static native @ByVal Tensor _test_autograd_multiple_dispatch_view_copy(@Const @ByRef Tensor self); +// #include +// #include +// #include +// #include -// aten::_test_autograd_multiple_dispatch_view_copy.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor _test_autograd_multiple_dispatch_view_copy_out(@ByRef Tensor out, @Const @ByRef Tensor self); -// aten::_test_autograd_multiple_dispatch_view_copy.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor _test_autograd_multiple_dispatch_view_copy_outf(@Const @ByRef Tensor self, @ByRef Tensor out); +// #include +// #include +// custom c10 call_once implementation to avoid the deadlock in std::call_once. +// The implementation here is a simplified version from folly and likely much +// much higher memory footprint. + // namespace c10 -// Parsed from ATen/ops/_test_check_tensor.h +// Parsed from c10/util/env.h // #pragma once -// @generated by torchgen/gen.py from Function.h - -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include +// #include // #include +// #include +// Reads an environment variable and returns +// - optional, if set equal to "1" +// - optional, if set equal to "0" +// - nullopt, otherwise +// +// NB: +// Issues a warning if the value of the environment variable is not 0 or 1. +@Namespace("c10::utils") public static native @ByVal BoolOptional check_env(@Cast("const char*") BytePointer name); +@Namespace("c10::utils") public static native @ByVal BoolOptional check_env(String name); + // namespace utils + // namespace c10 +// Parsed from ATen/Context.h -// #include - - -// aten::_test_check_tensor(Tensor self) -> Tensor -@Namespace("at") public static native @ByVal Tensor _test_check_tensor(@Const @ByRef Tensor self); +// #pragma once +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +@Namespace("at") public enum Float32MatmulPrecision { HIGHEST(0), HIGH(1), MEDIUM(2); -// Parsed from ATen/ops/_test_optional_filled_intlist.h + public final int value; + private Float32MatmulPrecision(int v) { this.value = v; } + private Float32MatmulPrecision(Float32MatmulPrecision e) { this.value = e.value; } + public Float32MatmulPrecision intern() { for (Float32MatmulPrecision e : values()) if (e.value == value) return e; return this; } + @Override public String toString() { return intern().name(); } +} +// Targeting ../Context.java -// #pragma once -// @generated by torchgen/gen.py from Function.h -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include +@Namespace("at") public static native @ByRef Context globalContext(); +@Namespace("at") public static native void init(); +@Namespace("at") public static native Allocator getCPUAllocator(); -// #include +@Namespace("at") public static native @Cast("bool") boolean hasCUDA(); +@Namespace("at") public static native @Cast("bool") boolean hasHIP(); -// aten::_test_optional_filled_intlist(Tensor values, int[2]? addends) -> Tensor +@Namespace("at") public static native @Cast("bool") boolean hasIPU(); +@Namespace("at") public static native @Cast("bool") boolean hasXLA(); -// aten::_test_optional_filled_intlist.out(Tensor values, int[2]? addends, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor _test_optional_filled_intlist_out(@ByRef Tensor out, @Const @ByRef Tensor values, @ByVal LongArrayRefOptional addends); -@Namespace("at") public static native @ByRef Tensor _test_optional_filled_intlist_out(@ByRef Tensor out, @Const @ByRef Tensor values, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... addends); -// aten::_test_optional_filled_intlist.out(Tensor values, int[2]? addends, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor _test_optional_filled_intlist_outf(@Const @ByRef Tensor values, @ByVal LongArrayRefOptional addends, @ByRef Tensor out); -@Namespace("at") public static native @ByRef Tensor _test_optional_filled_intlist_outf(@Const @ByRef Tensor values, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] addends, @ByRef Tensor out); +@Namespace("at") public static native @Cast("bool") boolean hasMPS(); +@Namespace("at") public static native @Cast("bool") boolean hasORT(); +// Despite its name, this function returns the number of *CUDA* GPUs. +@Namespace("at") public static native @Cast("size_t") long getNumGPUs(); +@Namespace("at") public static native @Cast("bool") boolean hasOpenMP(); -// Parsed from ATen/ops/_test_optional_floatlist.h +@Namespace("at") public static native @Cast("bool") boolean hasMKL(); -// #pragma once +@Namespace("at") public static native @Cast("bool") boolean hasLAPACK(); -// @generated by torchgen/gen.py from Function.h +@Namespace("at") public static native @Cast("bool") boolean hasMAGMA(); -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include +@Namespace("at") public static native @Cast("bool") boolean hasMKLDNN(); +@Namespace("at") public static native void manual_seed(@Cast("uint64_t") long seed); +// Targeting ../NoTF32Guard.java -// #include +// #ifdef USE_ROCM +// #endif -// aten::_test_optional_floatlist(Tensor values, float[]? addends) -> Tensor + // namespace at -// aten::_test_optional_floatlist.out(Tensor values, float[]? addends, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor _test_optional_floatlist_out(@ByRef Tensor out, @Const @ByRef Tensor values, @ByVal DoubleArrayRefOptional addends); -// aten::_test_optional_floatlist.out(Tensor values, float[]? addends, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor _test_optional_floatlist_outf(@Const @ByRef Tensor values, @ByVal DoubleArrayRefOptional addends, @ByRef Tensor out); +// Parsed from ATen/DeviceGuard.h +// #pragma once +// #include +// #include +// #include +// #include // TensorList whyyyyy +// Are you here because you're wondering why DeviceGuard(tensor) no +// longer works? For code organization reasons, we have temporarily(?) +// removed this constructor from DeviceGuard. The new way to +// spell it is: +// +// OptionalDeviceGuard guard(device_of(tensor)); -// Parsed from ATen/ops/_test_optional_intlist.h +/** Return the Device of a Tensor, if the Tensor is defined. */ +@Namespace("at") public static native @ByVal DeviceOptional device_of(@Const @ByRef Tensor t); -// #pragma once +@Namespace("at") public static native @ByVal DeviceOptional device_of(@Const @ByRef TensorOptional t); -// @generated by torchgen/gen.py from Function.h +/** Return the Device of a TensorList, if the list is non-empty and + * the first Tensor is defined. (This function implicitly assumes + * that all tensors in the list have the same device.) */ +@Namespace("at") public static native @ByVal DeviceOptional device_of(@ByVal TensorArrayRef t); -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include + // namespace at +// Parsed from ATen/DimVector.h -// #include +// #pragma once +// #include -// aten::_test_optional_intlist(Tensor values, int[]? addends) -> Tensor +// Parsed from ATen/EmptyTensor.h +// #pragma once +// #include -// aten::_test_optional_intlist.out(Tensor values, int[]? addends, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor _test_optional_intlist_out(@ByRef Tensor out, @Const @ByRef Tensor values, @ByVal LongArrayRefOptional addends); -@Namespace("at") public static native @ByRef Tensor _test_optional_intlist_out(@ByRef Tensor out, @Const @ByRef Tensor values, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... addends); -// aten::_test_optional_intlist.out(Tensor values, int[]? addends, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor _test_optional_intlist_outf(@Const @ByRef Tensor values, @ByVal LongArrayRefOptional addends, @ByRef Tensor out); -@Namespace("at") public static native @ByRef Tensor _test_optional_intlist_outf(@Const @ByRef Tensor values, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] addends, @ByRef Tensor out); +@Namespace("at::detail") public static native @Cast("size_t") long computeStorageNbytesContiguous( + @ByVal LongArrayRef sizes, + @Cast("size_t") long itemsize, + @Cast("size_t") long storage_offset/*=0*/); +@Namespace("at::detail") public static native @Cast("size_t") long computeStorageNbytesContiguous( + @ByVal LongArrayRef sizes, + @Cast("size_t") long itemsize); +@Namespace("at::detail") public static native @Cast("size_t") long computeStorageNbytesContiguous( + @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] sizes, + @Cast("size_t") long itemsize, + @Cast("size_t") long storage_offset/*=0*/); +@Namespace("at::detail") public static native @Cast("size_t") long computeStorageNbytesContiguous( + @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] sizes, + @Cast("size_t") long itemsize); +@Namespace("at::detail") public static native @ByVal SymInt computeStorageNbytesContiguous( + @ByVal SymIntArrayRef sizes, + @Const @ByRef SymInt itemsize, + @Const @ByRef(nullValue = "c10::SymInt(0)") SymInt storage_offset); +@Namespace("at::detail") public static native @ByVal SymInt computeStorageNbytesContiguous( + @ByVal SymIntArrayRef sizes, + @Const @ByRef SymInt itemsize); +@Namespace("at::detail") public static native @Cast("size_t") long computeStorageNbytes( + @ByVal LongArrayRef sizes, + @ByVal LongArrayRef strides, + @Cast("size_t") long itemsize, + @Cast("size_t") long storage_offset/*=0*/); +@Namespace("at::detail") public static native @Cast("size_t") long computeStorageNbytes( + @ByVal LongArrayRef sizes, + @ByVal LongArrayRef strides, + @Cast("size_t") long itemsize); +@Namespace("at::detail") public static native @Cast("size_t") long computeStorageNbytes( + @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] sizes, + @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] strides, + @Cast("size_t") long itemsize, + @Cast("size_t") long storage_offset/*=0*/); +@Namespace("at::detail") public static native @Cast("size_t") long computeStorageNbytes( + @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] sizes, + @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] strides, + @Cast("size_t") long itemsize); +@Namespace("at::detail") public static native @ByVal SymInt computeStorageNbytes( + @ByVal SymIntArrayRef sizes, + @ByVal SymIntArrayRef strides, + @Const @ByRef SymInt itemsize, + @Const @ByRef(nullValue = "c10::SymInt(0)") SymInt storage_offset); +@Namespace("at::detail") public static native @ByVal SymInt computeStorageNbytes( + @ByVal SymIntArrayRef sizes, + @ByVal SymIntArrayRef strides, + @Const @ByRef SymInt itemsize); +@Namespace("at::detail") public static native @ByVal TensorBase empty_generic( + @ByVal LongArrayRef size, + Allocator allocator, + @ByVal DispatchKeySet ks, + ScalarType scalar_type, + @ByVal MemoryFormatOptional memory_format_opt); +@Namespace("at::detail") public static native @ByVal TensorBase empty_generic( + @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, + Allocator allocator, + @ByVal DispatchKeySet ks, + ScalarType scalar_type, + @ByVal MemoryFormatOptional memory_format_opt); +@Namespace("at::detail") public static native @ByVal TensorBase empty_strided_generic( + @ByVal LongArrayRef size, + @ByVal LongArrayRef stride, + Allocator allocator, + @ByVal DispatchKeySet ks, + ScalarType scalar_type); +@Namespace("at::detail") public static native @ByVal TensorBase empty_strided_generic( + @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, + @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] stride, + Allocator allocator, + @ByVal DispatchKeySet ks, + ScalarType scalar_type); +@Namespace("at::detail") public static native @ByVal TensorBase empty_strided_symint_generic( + @ByVal SymIntArrayRef size, + @ByVal SymIntArrayRef stride, + Allocator allocator, + @ByVal DispatchKeySet ks, + ScalarType scalar_type); -// Parsed from ATen/ops/_test_serialization_subcmul.h +@Namespace("at::detail") public static native @ByVal TensorBase empty_cpu( + @ByVal LongArrayRef size, + ScalarType dtype, + @Cast("bool") boolean pin_memory/*=false*/, + @ByVal(nullValue = "c10::optional(c10::nullopt)") MemoryFormatOptional memory_format_opt); +@Namespace("at::detail") public static native @ByVal TensorBase empty_cpu( + @ByVal LongArrayRef size, + ScalarType dtype); +@Namespace("at::detail") public static native @ByVal TensorBase empty_cpu( + @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, + ScalarType dtype, + @Cast("bool") boolean pin_memory/*=false*/, + @ByVal(nullValue = "c10::optional(c10::nullopt)") MemoryFormatOptional memory_format_opt); +@Namespace("at::detail") public static native @ByVal TensorBase empty_cpu( + @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, + ScalarType dtype); -// #pragma once +@Namespace("at::detail") public static native @ByVal TensorBase empty_cpu( + @ByVal LongArrayRef size, + @ByVal ScalarTypeOptional dtype_opt, + @ByVal LayoutOptional layout_opt, + @ByVal DeviceOptional device_opt, + @ByVal BoolOptional pin_memory_opt, + @ByVal MemoryFormatOptional memory_format_opt); +@Namespace("at::detail") public static native @ByVal TensorBase empty_cpu( + @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, + @ByVal ScalarTypeOptional dtype_opt, + @ByVal LayoutOptional layout_opt, + @ByVal DeviceOptional device_opt, + @ByVal BoolOptional pin_memory_opt, + @ByVal MemoryFormatOptional memory_format_opt); -// @generated by torchgen/gen.py from Function.h +@Namespace("at::detail") public static native @ByVal TensorBase empty_cpu(@ByVal LongArrayRef size, @Const @ByRef TensorOptions options); +@Namespace("at::detail") public static native @ByVal TensorBase empty_cpu(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @Const @ByRef TensorOptions options); -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include +@Namespace("at::detail") public static native @ByVal TensorBase empty_strided_cpu( + @ByVal LongArrayRef size, + @ByVal LongArrayRef stride, + ScalarType dtype, + @Cast("bool") boolean pin_memory/*=false*/); +@Namespace("at::detail") public static native @ByVal TensorBase empty_strided_cpu( + @ByVal LongArrayRef size, + @ByVal LongArrayRef stride, + ScalarType dtype); +@Namespace("at::detail") public static native @ByVal TensorBase empty_strided_cpu( + @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, + @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] stride, + ScalarType dtype, + @Cast("bool") boolean pin_memory/*=false*/); +@Namespace("at::detail") public static native @ByVal TensorBase empty_strided_cpu( + @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, + @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] stride, + ScalarType dtype); +@Namespace("at::detail") public static native @ByVal TensorBase empty_strided_cpu( + @ByVal LongArrayRef size, + @ByVal LongArrayRef stride, + @ByVal ScalarTypeOptional dtype_opt, + @ByVal LayoutOptional layout_opt, + @ByVal DeviceOptional device_opt, + @ByVal BoolOptional pin_memory_opt); +@Namespace("at::detail") public static native @ByVal TensorBase empty_strided_cpu( + @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, + @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] stride, + @ByVal ScalarTypeOptional dtype_opt, + @ByVal LayoutOptional layout_opt, + @ByVal DeviceOptional device_opt, + @ByVal BoolOptional pin_memory_opt); +@Namespace("at::detail") public static native @ByVal TensorBase empty_strided_cpu( + @ByVal LongArrayRef size, + @ByVal LongArrayRef stride, + @Const @ByRef TensorOptions options); +@Namespace("at::detail") public static native @ByVal TensorBase empty_strided_cpu( + @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, + @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] stride, + @Const @ByRef TensorOptions options); -// #include +@Namespace("at::detail") public static native @ByVal TensorBase empty_meta( + @ByVal LongArrayRef size, + ScalarType dtype, + @ByVal(nullValue = "c10::optional(c10::nullopt)") MemoryFormatOptional memory_format_opt); +@Namespace("at::detail") public static native @ByVal TensorBase empty_meta( + @ByVal LongArrayRef size, + ScalarType dtype); +@Namespace("at::detail") public static native @ByVal TensorBase empty_meta( + @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, + ScalarType dtype, + @ByVal(nullValue = "c10::optional(c10::nullopt)") MemoryFormatOptional memory_format_opt); +@Namespace("at::detail") public static native @ByVal TensorBase empty_meta( + @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, + ScalarType dtype); +@Namespace("at::detail") public static native @ByVal TensorBase empty_meta( + @ByVal LongArrayRef size, + @ByVal ScalarTypeOptional dtype_opt, + @ByVal LayoutOptional layout_opt, + @ByVal DeviceOptional device_opt, + @ByVal BoolOptional pin_memory_opt, + @ByVal MemoryFormatOptional memory_format_opt); +@Namespace("at::detail") public static native @ByVal TensorBase empty_meta( + @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, + @ByVal ScalarTypeOptional dtype_opt, + @ByVal LayoutOptional layout_opt, + @ByVal DeviceOptional device_opt, + @ByVal BoolOptional pin_memory_opt, + @ByVal MemoryFormatOptional memory_format_opt); -// aten::_test_serialization_subcmul(Tensor self, Tensor other, Scalar alpha=1) -> Tensor +@Namespace("at::detail") public static native @ByVal TensorBase empty_symint_meta( + @ByVal SymIntArrayRef size, + @ByVal ScalarTypeOptional dtype_opt, + @ByVal LayoutOptional layout_opt, + @ByVal DeviceOptional device_opt, + @ByVal BoolOptional pin_memory_opt, + @ByVal MemoryFormatOptional memory_format_opt); +@Namespace("at::detail") public static native @ByVal TensorBase empty_meta(@ByVal LongArrayRef size, @Const @ByRef TensorOptions options); +@Namespace("at::detail") public static native @ByVal TensorBase empty_meta(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @Const @ByRef TensorOptions options); +@Namespace("at::detail") public static native @ByVal TensorBase empty_strided_meta(@ByVal LongArrayRef size, @ByVal LongArrayRef stride, ScalarType dtype); +@Namespace("at::detail") public static native @ByVal TensorBase empty_strided_meta(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] stride, ScalarType dtype); +@Namespace("at::detail") public static native @ByVal TensorBase empty_strided_meta( + @ByVal LongArrayRef size, + @ByVal LongArrayRef stride, + @ByVal ScalarTypeOptional dtype_opt, + @ByVal LayoutOptional layout_opt, + @ByVal DeviceOptional device_opt, + @ByVal BoolOptional pin_memory_opt); +@Namespace("at::detail") public static native @ByVal TensorBase empty_strided_meta( + @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, + @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] stride, + @ByVal ScalarTypeOptional dtype_opt, + @ByVal LayoutOptional layout_opt, + @ByVal DeviceOptional device_opt, + @ByVal BoolOptional pin_memory_opt); +@Namespace("at::detail") public static native @ByVal TensorBase empty_strided_meta( + @ByVal LongArrayRef size, + @ByVal LongArrayRef stride, + @Const @ByRef TensorOptions options); +@Namespace("at::detail") public static native @ByVal TensorBase empty_strided_meta( + @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, + @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] stride, + @Const @ByRef TensorOptions options); -// Parsed from ATen/ops/_test_string_default.h +@Namespace("at::detail") public static native @ByVal TensorBase empty_strided_symint_meta( + @ByVal SymIntArrayRef size, + @ByVal SymIntArrayRef stride, + ScalarType dtype); -// #pragma once +@Namespace("at::detail") public static native @ByVal TensorBase empty_strided_symint_meta( + @ByVal SymIntArrayRef size, + @ByVal SymIntArrayRef stride, + @ByVal ScalarTypeOptional dtype_opt, + @ByVal LayoutOptional layout_opt, + @ByVal DeviceOptional device_opt, + @ByVal BoolOptional pin_memory_opt); -// @generated by torchgen/gen.py from Function.h +@Namespace("at::detail") public static native @ByVal TensorBase empty_strided_symint_meta( + @ByVal SymIntArrayRef size, + @ByVal SymIntArrayRef stride, + @Const @ByRef TensorOptions options); -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include + // namespace detail + // namespace at +// Parsed from ATen/TensorGeometry.h -// #include +// #pragma once +// #include +// #include -// aten::_test_string_default(Tensor dummy, str a="\"'\\", str b='"\'\\') -> Tensor +// Return if the tensor geometry represented by `sizes` and `strides` is +// contiguous Although we cache is_contiguous in tensor now, this is till useful +// because it allows checking if a particular geometry is contiguous without +// explicitly constructing a tensor, e.g., when you want to choose a kernel +// strategy based on whether a subgeometry is contiguous. +@Namespace("at") public static native @Cast("bool") boolean geometry_is_contiguous(@ByVal LongArrayRef sizes, @ByVal LongArrayRef strides); +@Namespace("at") public static native @Cast("bool") boolean geometry_is_contiguous(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] sizes, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... strides); +// Targeting ../TensorGeometry.java + // namespace at -// Parsed from ATen/ops/_test_warn_in_autograd.h +// Parsed from ATen/core/Formatting.h // #pragma once -// @generated by torchgen/gen.py from Function.h +// #include +// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include // #include -// #include -// #include -// #include -// #include - - - -// #include +// #include +@Namespace("c10") public static native @Cast("std::ostream*") @ByRef @Name("operator <<") Pointer shiftLeft(@Cast("std::ostream*") @ByRef Pointer out, Backend b); +@Namespace("c10") public static native @Cast("std::ostream*") @ByRef @Name("operator <<") Pointer shiftLeft(@Cast("std::ostream*") @ByRef Pointer out, @Cast("c10::Backend") int b); +@Namespace("c10") public static native @Cast("std::ostream*") @ByRef @Name("operator <<") Pointer shiftLeft(@Cast("std::ostream*") @ByRef Pointer out, @Const @ByRef Scalar s); +@Namespace("c10") public static native @StdString BytePointer toString(@Const @ByRef Scalar s); +@Namespace("at") public static native @Cast("std::ostream*") @ByRef Pointer print( + @Cast("std::ostream*") @ByRef Pointer stream, + @Const @ByRef Tensor tensor, + @Cast("int64_t") long linesize); +@Namespace("at") public static native @Cast("std::ostream*") @ByRef @Name("operator <<") Pointer shiftLeft(@Cast("std::ostream*") @ByRef Pointer out, @Const @ByRef Tensor t); +@Namespace("at") public static native void print(@Const @ByRef Tensor t, @Cast("int64_t") long linesize/*=80*/); +@Namespace("at") public static native void print(@Const @ByRef Tensor t); -// aten::_test_warn_in_autograd(Tensor self) -> Tensor -@Namespace("at") public static native @ByVal Tensor _test_warn_in_autograd(@Const @ByRef Tensor self); -// aten::_test_warn_in_autograd.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor _test_warn_in_autograd_out(@ByRef Tensor out, @Const @ByRef Tensor self); -// aten::_test_warn_in_autograd.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor _test_warn_in_autograd_outf(@Const @ByRef Tensor self, @ByRef Tensor out); +// Parsed from ATen/Formatting.h +// #include -// Parsed from ATen/ops/_thnn_differentiable_gru_cell_backward.h +// Parsed from ATen/Utils.h // #pragma once -// @generated by torchgen/gen.py from Function.h - -// #include -// #include -// #include -// #include +// #include +// #include +// #include // #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include - - +// #include +// #include +// #include +// #include +// #include +// #include +// #include -// #include +// #include +// #include +// #include +// #include +// #include +// #define AT_DISALLOW_COPY_AND_ASSIGN(TypeName) +// TypeName(const TypeName&) = delete; +// void operator=(const TypeName&) = delete -// aten::_thnn_differentiable_gru_cell_backward(Tensor grad_hy, Tensor input_gates, Tensor hidden_gates, Tensor hx, Tensor? input_bias, Tensor? hidden_bias) -> (Tensor, Tensor, Tensor, Tensor, Tensor) -@Namespace("at") public static native @ByVal TensorTensorTensorTensorTensorTuple _thnn_differentiable_gru_cell_backward(@Const @ByRef Tensor grad_hy, @Const @ByRef Tensor input_gates, @Const @ByRef Tensor hidden_gates, @Const @ByRef Tensor hx, @Const @ByRef TensorOptional input_bias, @Const @ByRef TensorOptional hidden_bias); +@Namespace("at") public static native int _crash_if_asan(int arg0); +// Converts a TensorList (i.e. ArrayRef to vector of TensorImpl*) +// NB: This is ONLY used by legacy TH bindings, and ONLY used by cat. +// Once cat is ported entirely to ATen this can be deleted! +@Namespace("at") public static native @ByVal TensorImplVector checked_dense_tensor_list_unwrap( + @ByVal TensorArrayRef tensors, + @Cast("const char*") BytePointer name, + int pos, + DeviceType device_type, + ScalarType scalar_type); +@Namespace("at") public static native @ByVal TensorImplVector checked_dense_tensor_list_unwrap( + @ByVal TensorArrayRef tensors, + String name, + int pos, + @Cast("c10::DeviceType") byte device_type, + ScalarType scalar_type); + // namespace detail + // namespace at -// Parsed from ATen/ops/_thnn_differentiable_lstm_cell_backward.h +// Parsed from ATen/TensorUtils.h // #pragma once -// @generated by torchgen/gen.py from Function.h +// #include +// #include +// #include +// #include +// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include +// #include +// These functions are NOT in Utils.h, because this file has a dep on Tensor.h +// #define TORCH_CHECK_TENSOR_ALL(cond, ...) +// TORCH_CHECK((cond)._is_all_true().item(), __VA_ARGS__); +// Targeting ../TensorArg.java -// #include +// Targeting ../TensorGeometryArg.java -// aten::_thnn_differentiable_lstm_cell_backward(Tensor? grad_hy, Tensor? grad_cy, Tensor input_gates, Tensor hidden_gates, Tensor? input_bias, Tensor? hidden_bias, Tensor cx, Tensor cy) -> (Tensor, Tensor, Tensor, Tensor, Tensor) -@Namespace("at") public static native @ByVal TensorTensorTensorTensorTensorTuple _thnn_differentiable_lstm_cell_backward(@Const @ByRef TensorOptional grad_hy, @Const @ByRef TensorOptional grad_cy, @Const @ByRef Tensor input_gates, @Const @ByRef Tensor hidden_gates, @Const @ByRef TensorOptional input_bias, @Const @ByRef TensorOptional hidden_bias, @Const @ByRef Tensor cx, @Const @ByRef Tensor cy); +// A string describing which function did checks on its input +// arguments. +// TODO: Consider generalizing this into a call stack. +// The undefined convention: singular operators assume their arguments +// are defined, but functions which take multiple tensors will +// implicitly filter out undefined tensors (to make it easier to perform +// tests which should apply if the tensor is defined, and should not +// otherwise.) +// +// NB: This means that the n-ary operators take lists of TensorArg, +// not TensorGeometryArg, because the Tensor to TensorGeometry +// conversion will blow up if you have undefined tensors. -// Parsed from ATen/ops/_thnn_fused_gru_cell.h +@Namespace("at") public static native @Cast("std::ostream*") @ByRef @Name("operator <<") Pointer shiftLeft(@Cast("std::ostream*") @ByRef Pointer out, @ByVal TensorGeometryArg t); +@Namespace("at") public static native void checkDim( + @Cast("at::CheckedFrom") BytePointer c, + @Const @ByRef Tensor tensor, + @Cast("const char*") BytePointer name, + int pos, + @Cast("int64_t") long dim); +@Namespace("at") public static native void checkDim( + @Cast("at::CheckedFrom") String c, + @Const @ByRef Tensor tensor, + String name, + int pos, + @Cast("int64_t") long dim); +@Namespace("at") public static native void checkDim(@Cast("at::CheckedFrom") BytePointer c, @Const @ByRef TensorGeometryArg t, @Cast("int64_t") long dim); +@Namespace("at") public static native void checkDim(@Cast("at::CheckedFrom") String c, @Const @ByRef TensorGeometryArg t, @Cast("int64_t") long dim); +// NB: this is an inclusive-exclusive range +@Namespace("at") public static native void checkDimRange( + @Cast("at::CheckedFrom") BytePointer c, + @Const @ByRef TensorGeometryArg t, + @Cast("int64_t") long dim_start, + @Cast("int64_t") long dim_end); +@Namespace("at") public static native void checkDimRange( + @Cast("at::CheckedFrom") String c, + @Const @ByRef TensorGeometryArg t, + @Cast("int64_t") long dim_start, + @Cast("int64_t") long dim_end); +@Namespace("at") public static native void checkSameDim( + @Cast("at::CheckedFrom") BytePointer c, + @Const @ByRef TensorGeometryArg t1, + @Const @ByRef TensorGeometryArg t2); +@Namespace("at") public static native void checkSameDim( + @Cast("at::CheckedFrom") String c, + @Const @ByRef TensorGeometryArg t1, + @Const @ByRef TensorGeometryArg t2); +@Namespace("at") public static native void checkContiguous(@Cast("at::CheckedFrom") BytePointer c, @Const @ByRef TensorGeometryArg t); +@Namespace("at") public static native void checkContiguous(@Cast("at::CheckedFrom") String c, @Const @ByRef TensorGeometryArg t); +@Namespace("at") public static native void checkAllContiguous(@Cast("at::CheckedFrom") BytePointer c, @ByVal TensorArgArrayRef ts); +@Namespace("at") public static native void checkAllContiguous(@Cast("at::CheckedFrom") String c, @ByVal TensorArgArrayRef ts); +@Namespace("at") public static native void checkSize( + @Cast("at::CheckedFrom") BytePointer c, + @Const @ByRef TensorGeometryArg t, + @ByVal LongArrayRef sizes); +@Namespace("at") public static native void checkSize( + @Cast("at::CheckedFrom") String c, + @Const @ByRef TensorGeometryArg t, + @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... sizes); +@Namespace("at") public static native void checkSize_symint( + @Cast("at::CheckedFrom") BytePointer c, + @Const @ByRef TensorGeometryArg t, + @ByVal SymIntArrayRef sizes); +@Namespace("at") public static native void checkSize_symint( + @Cast("at::CheckedFrom") String c, + @Const @ByRef TensorGeometryArg t, + @ByVal SymIntArrayRef sizes); +@Namespace("at") public static native void checkSize( + @Cast("at::CheckedFrom") BytePointer c, + @Const @ByRef TensorGeometryArg t, + @Cast("int64_t") long dim, + @Cast("int64_t") long size); +@Namespace("at") public static native void checkSize( + @Cast("at::CheckedFrom") String c, + @Const @ByRef TensorGeometryArg t, + @Cast("int64_t") long dim, + @Cast("int64_t") long size); +@Namespace("at") public static native void checkSize_symint( + @Cast("at::CheckedFrom") BytePointer c, + @Const @ByRef TensorGeometryArg t, + @Cast("int64_t") long dim, + @ByVal SymInt size); +@Namespace("at") public static native void checkSize_symint( + @Cast("at::CheckedFrom") String c, + @Const @ByRef TensorGeometryArg t, + @Cast("int64_t") long dim, + @ByVal SymInt size); +@Namespace("at") public static native void checkNumel( + @Cast("at::CheckedFrom") BytePointer c, + @Const @ByRef TensorGeometryArg t, + @Cast("int64_t") long numel); +@Namespace("at") public static native void checkNumel( + @Cast("at::CheckedFrom") String c, + @Const @ByRef TensorGeometryArg t, + @Cast("int64_t") long numel); -// #pragma once +@Namespace("at") public static native void checkAllSameNumel(@Cast("at::CheckedFrom") BytePointer c, @ByVal TensorArgArrayRef tensors); +@Namespace("at") public static native void checkAllSameNumel(@Cast("at::CheckedFrom") String c, @ByVal TensorArgArrayRef tensors); +@Namespace("at") public static native void checkScalarType(@Cast("at::CheckedFrom") BytePointer c, @Const @ByRef TensorArg t, ScalarType s); +@Namespace("at") public static native void checkScalarType(@Cast("at::CheckedFrom") String c, @Const @ByRef TensorArg t, ScalarType s); +@Namespace("at") public static native void checkScalarTypes( + @Cast("at::CheckedFrom") BytePointer c, + @Const @ByRef TensorArg t, + @ByVal ScalarTypeArrayRef l); +@Namespace("at") public static native void checkScalarTypes( + @Cast("at::CheckedFrom") String c, + @Const @ByRef TensorArg t, + @ByVal ScalarTypeArrayRef l); +@Namespace("at") public static native void checkSameGPU( + @Cast("at::CheckedFrom") BytePointer c, + @Const @ByRef TensorArg t1, + @Const @ByRef TensorArg t2); +@Namespace("at") public static native void checkSameGPU( + @Cast("at::CheckedFrom") String c, + @Const @ByRef TensorArg t1, + @Const @ByRef TensorArg t2); +@Namespace("at") public static native void checkAllSameGPU(@Cast("at::CheckedFrom") BytePointer c, @ByVal TensorArgArrayRef tensors); +@Namespace("at") public static native void checkAllSameGPU(@Cast("at::CheckedFrom") String c, @ByVal TensorArgArrayRef tensors); +@Namespace("at") public static native void checkSameType( + @Cast("at::CheckedFrom") BytePointer c, + @Const @ByRef TensorArg t1, + @Const @ByRef TensorArg t2); +@Namespace("at") public static native void checkSameType( + @Cast("at::CheckedFrom") String c, + @Const @ByRef TensorArg t1, + @Const @ByRef TensorArg t2); +@Namespace("at") public static native void checkAllSameType(@Cast("at::CheckedFrom") BytePointer c, @ByVal TensorArgArrayRef tensors); +@Namespace("at") public static native void checkAllSameType(@Cast("at::CheckedFrom") String c, @ByVal TensorArgArrayRef tensors); +@Namespace("at") public static native void checkSameSize( + @Cast("at::CheckedFrom") BytePointer c, + @Const @ByRef TensorArg t1, + @Const @ByRef TensorArg t2); +@Namespace("at") public static native void checkSameSize( + @Cast("at::CheckedFrom") String c, + @Const @ByRef TensorArg t1, + @Const @ByRef TensorArg t2); +@Namespace("at") public static native void checkDefined(@Cast("at::CheckedFrom") BytePointer c, @Const @ByRef TensorArg t); +@Namespace("at") public static native void checkDefined(@Cast("at::CheckedFrom") String c, @Const @ByRef TensorArg t); +@Namespace("at") public static native void checkAllDefined(@Cast("at::CheckedFrom") BytePointer c, @ByVal TensorArgArrayRef t); +@Namespace("at") public static native void checkAllDefined(@Cast("at::CheckedFrom") String c, @ByVal TensorArgArrayRef t); -// @generated by torchgen/gen.py from Function.h +// FixMe: does TensorArg slow things down? +@Namespace("at") public static native void checkBackend( + @Cast("at::CheckedFrom") BytePointer c, + @ByVal TensorArrayRef t, + @ByVal Backend backend); +@Namespace("at") public static native void checkBackend( + @Cast("at::CheckedFrom") String c, + @ByVal TensorArrayRef t, + @ByVal Backend backend); -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include +@Namespace("at") public static native void checkDeviceType( + @Cast("at::CheckedFrom") BytePointer c, + @ByVal TensorArrayRef tensors, + @ByVal DeviceType device_type); +@Namespace("at") public static native void checkDeviceType( + @Cast("at::CheckedFrom") String c, + @ByVal TensorArrayRef tensors, + @ByVal DeviceType device_type); +@Namespace("at") public static native void checkLayout(@Cast("at::CheckedFrom") BytePointer c, @Const @ByRef Tensor t, Layout layout); +@Namespace("at") public static native void checkLayout(@Cast("at::CheckedFrom") String c, @Const @ByRef Tensor t, @Cast("c10::Layout") byte layout); +@Namespace("at") public static native void checkLayout( + @Cast("at::CheckedFrom") BytePointer c, + @ByVal TensorArrayRef tensors, + @ByVal Layout layout); +@Namespace("at") public static native void checkLayout( + @Cast("at::CheckedFrom") String c, + @ByVal TensorArrayRef tensors, + @ByVal Layout layout); -// #include +// Methods for getting data_ptr if tensor is defined +@Namespace("at") public static native Pointer maybe_data_ptr(@Const @ByRef Tensor tensor); +@Namespace("at") public static native Pointer maybe_data_ptr(@Const @ByRef TensorArg tensor); +@Namespace("at") public static native void check_dim_size( + @Const @ByRef Tensor tensor, + @Cast("int64_t") long dim, + @Cast("int64_t") long dim_size, + @Cast("int64_t") long size); +@Namespace("at::detail") public static native @ByVal @Cast("std::vector*") LongVector defaultStrides(@ByVal LongArrayRef sizes); +@Namespace("at::detail") public static native @ByVal @Cast("std::vector*") LongVector defaultStrides(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... sizes); -// aten::_thnn_fused_gru_cell(Tensor input_gates, Tensor hidden_gates, Tensor hx, Tensor? input_bias=None, Tensor? hidden_bias=None) -> (Tensor, Tensor) -@Namespace("at") public static native @ByVal TensorTensorTuple _thnn_fused_gru_cell(@Const @ByRef Tensor input_gates, @Const @ByRef Tensor hidden_gates, @Const @ByRef Tensor hx, @Const @ByRef(nullValue = "c10::optional{}") TensorOptional input_bias, @Const @ByRef(nullValue = "c10::optional{}") TensorOptional hidden_bias); -@Namespace("at") public static native @ByVal TensorTensorTuple _thnn_fused_gru_cell(@Const @ByRef Tensor input_gates, @Const @ByRef Tensor hidden_gates, @Const @ByRef Tensor hx); +@Namespace("at::detail") public static native @ByVal LongVectorOptional computeStride( + @ByVal LongArrayRef oldshape, + @ByVal LongArrayRef oldstride, + @ByVal LongArrayRef newshape); +@Namespace("at::detail") public static native @ByVal LongVectorOptional computeStride( + @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] oldshape, + @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] oldstride, + @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... newshape); -// aten::_thnn_fused_gru_cell.out(Tensor input_gates, Tensor hidden_gates, Tensor hx, Tensor? input_bias=None, Tensor? hidden_bias=None, *, Tensor(a!) out0, Tensor(b!) out1) -> (Tensor(a!), Tensor(b!)) -@Namespace("at") public static native @ByVal @Cast("std::tuple*") PointerPointer _thnn_fused_gru_cell_out(@ByRef Tensor out0, @ByRef Tensor out1, @Const @ByRef Tensor input_gates, @Const @ByRef Tensor hidden_gates, @Const @ByRef Tensor hx, @Const @ByRef(nullValue = "c10::optional{}") TensorOptional input_bias, @Const @ByRef(nullValue = "c10::optional{}") TensorOptional hidden_bias); -@Namespace("at") public static native @ByVal @Cast("std::tuple*") PointerPointer _thnn_fused_gru_cell_out(@ByRef Tensor out0, @ByRef Tensor out1, @Const @ByRef Tensor input_gates, @Const @ByRef Tensor hidden_gates, @Const @ByRef Tensor hx); -// aten::_thnn_fused_gru_cell.out(Tensor input_gates, Tensor hidden_gates, Tensor hx, Tensor? input_bias=None, Tensor? hidden_bias=None, *, Tensor(a!) out0, Tensor(b!) out1) -> (Tensor(a!), Tensor(b!)) -@Namespace("at") public static native @ByVal @Cast("std::tuple*") PointerPointer _thnn_fused_gru_cell_outf(@Const @ByRef Tensor input_gates, @Const @ByRef Tensor hidden_gates, @Const @ByRef Tensor hx, @Const @ByRef TensorOptional input_bias, @Const @ByRef TensorOptional hidden_bias, @ByRef Tensor out0, @ByRef Tensor out1); +@Namespace("at::detail") public static native @ByVal SymDimVectorOptional computeStride( + @ByVal SymIntArrayRef oldshape, + @ByVal SymIntArrayRef oldstride, + @ByVal SymIntArrayRef newshape); +@Namespace("at::detail") public static native @ByVal DimVectorOptional computeStride( + @ByVal LongArrayRef oldshape, + @ByVal LongArrayRef oldstride, + @Const @ByRef DimVector newshape); +@Namespace("at::detail") public static native @ByVal DimVectorOptional computeStride( + @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] oldshape, + @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] oldstride, + @Const @ByRef DimVector newshape); + // namespace detail + // namespace at -// Parsed from ATen/ops/_thnn_fused_gru_cell_backward.h +// Parsed from ATen/TracerMode.h // #pragma once -// @generated by torchgen/gen.py from Function.h +// #include +// #include +// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include +// NOTE [Tracing Mode Switches] +// +// Historically, tracing function was controlled by two switches: +// +// - `AutoDispatchBelowADInplaceOrView` guard +// +// Tracing function used to be script-generated inside `VariableType_*.cpp` +// kernels, sharing the same `Autograd` dispatch key with autograd function. +// Therefore, before tracing function was moved out of VariableType, +// `AutoDispatchBelowADInplaceOrView` guard can also disable tracing as a +// side effect of disabling `Autograd` dispatching. +// +// - `setTracingState()` API in `torch/csrc/jit/frontend/tracer.h` +// +// It stores tracing data in a `TracingState` object in TLS. If the +// `TracingState` object in TLS is `null`, then tracing is paused. +// +// The `TracingState` object is created in `tracer::trace()` - the main +// entrance of tracing function. It's temporarily set to `null` inside +// generated VariableType (now TraceType) to bypass tracing for intermediate +// ops (ops being called by other ops). After the intermediate op call +// finishes it's set back to the original `TracingState` object. +// +// The `TracingState` obect in TLS can also be read/written via its Python +// binding in `python_tracer.cpp`, and `get/setTracingState()` C++ APIs, +// which are also exposed as `TORCH_API`. +// +// Two new switches were introduced since tracing function was moved out of +// VariableType: +// +// - `tracer::impl::set_dispatch_enabled()` API +// +// Unlike the special `Autograd` dispatch key which is included in dispatch +// key set by default, `Tracer` dispatch key is off by default. The +// dispatching switch can be toggled via this new API. +// +// - `tracer::impl::NoTracerDispatchMode` guard +// +// It's used to cover the old semantics of `AutoDispatchBelowADInplaceOrView` +// after tracing was moved out of VariableType. +// +// Before tracing function was moved out of VariableType, tracing was enabled +// when the following conditions are satisfied: +// +// 1) `TracingState` object in TLS != null; +// - Either inside the execution scope of `tracer::trace()`, or +// - Eagerly called `setTracingState()` with non-null object. +// 2) Not inside `AutoDispatchBelowADInplaceOrView` scope; +// +// After: +// +// 1) `TracingState` object in TLS != null; +// 2) Has called `tracer::impl::set_dispatch_enabled(true)`; +// 3) Not inside `tracer::impl::NonDispatchGuard` scope; +// +// [TODOs] +// +// - `setTracingState()` v.s. `tracer::impl::set_dispatch_enabled()` +// +// Currently `set_dispatch_enabled()` is set/unset inside `setTracingState()` +// to keep the semantics exactly the same as before - it's confusing to keep +// both switches, though. We should consider simplifying/limiting the exposed +// `setTracingState()` Python/C++ APIs (and other APIs calling it) so that +// these two can be unified. +// +// - `AutoDispatchBelowADInplaceOrView` v.s. +// `tracer::impl::NoTracerDispatchMode` +// +// We don't need to always set both guards together to keep semantics +// unchanged. For the follow use cases of `AutoDispatchBelowADInplaceOrView` +// we don't need set the new tracer guard: +// +// * Script-generated VariableType kernels. The guard is not necessary as +// tracing is already disabled explicitly by `setTracingState(null)` in +// generated TraceType kernels - we could keep it as is or use the new guard +// instead. +// +// * Custom ops. Will be handled by fallback kernel for `Tracer`. +// +// * Functions that are not likely to be called in tracing context (no python +// binding / not an operator), e.g.: all mobile forward() wrappers, test +// binaries, and etc. +// +// * Where new threads are spawned, e.g.: ATen/native/ConvolutionMM2d.cpp. +// It's not necessary as tracing is off by default. +// +// For the rest of cases we might need have both: +// +// * Functions that might be reachable from eager mode python (especially +// factory methods), e.g.: +// `internal_new_from_data()` in `torch/csrc/utils/tensor_new.cpp`. +// Without the new guard it will add `aten::empty` to the traced graph. +// +// * Some manually maintained functions, e.g.: +// `torch/csrc/autograd/VariableTypeManual.cpp`. +// Set the new guard if it's not obvious whether `setTracingState(null)` +// has been called before it reaches the `AutoDispatchBelowADInplaceOrView` +// guard. +// +// We might need tweak the usage of the new guard to optimize/fix things. +// It should only affect the correctness of tracing function, because the +// guard is essentially no-op when the master `setTracingState()` switch is +// off. +// TODO: move this from `at::` to `jit::torch::` after +// `aten/src/ATen/cpp_custom_type_hack.h` is removed. +@Namespace("at::tracer::impl") public static native @Cast("bool") boolean is_dispatch_enabled(); +@Namespace("at::tracer::impl") public static native void set_dispatch_enabled(@Cast("bool") boolean enabled); -// #include + // namespace impl + // namespace tracer + // namespace at -// aten::_thnn_fused_gru_cell_backward(Tensor grad_hy, Tensor workspace, bool has_bias) -> (Tensor, Tensor, Tensor, Tensor, Tensor) -@Namespace("at") public static native @ByVal TensorTensorTensorTensorTensorTuple _thnn_fused_gru_cell_backward(@Const @ByRef Tensor grad_hy, @Const @ByRef Tensor workspace, @Cast("bool") boolean has_bias); +// Parsed from ATen/core/Reduction.h -// aten::_thnn_fused_gru_cell_backward.out(Tensor grad_hy, Tensor workspace, bool has_bias, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2, Tensor(d!) out3, Tensor(e!) out4) -> (Tensor(a!), Tensor(b!), Tensor(c!), Tensor(d!), Tensor(e!)) -@Namespace("at") public static native @ByVal @Cast("std::tuple*") PointerPointer _thnn_fused_gru_cell_backward_out(@ByRef Tensor out0, @ByRef Tensor out1, @ByRef Tensor out2, @ByRef Tensor out3, @ByRef Tensor out4, @Const @ByRef Tensor grad_hy, @Const @ByRef Tensor workspace, @Cast("bool") boolean has_bias); -// aten::_thnn_fused_gru_cell_backward.out(Tensor grad_hy, Tensor workspace, bool has_bias, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2, Tensor(d!) out3, Tensor(e!) out4) -> (Tensor(a!), Tensor(b!), Tensor(c!), Tensor(d!), Tensor(e!)) -@Namespace("at") public static native @ByVal @Cast("std::tuple*") PointerPointer _thnn_fused_gru_cell_backward_outf(@Const @ByRef Tensor grad_hy, @Const @ByRef Tensor workspace, @Cast("bool") boolean has_bias, @ByRef Tensor out0, @ByRef Tensor out1, @ByRef Tensor out2, @ByRef Tensor out3, @ByRef Tensor out4); +// #pragma once +// NB: Keep this in sync with Reduction class in torch/nn/_reduction.py +// These constants control the reduction behavior of loss functions. +// Ideally, this would be a scoped enum, but jit doesn't support that +@Namespace("at::Reduction") public enum Reduction { + None(0), // Do not reduce + Mean(1), // (Possibly weighted) mean of losses + Sum(2), // Sum losses + END(3); + public final int value; + private Reduction(int v) { this.value = v; } + private Reduction(Reduction e) { this.value = e.value; } + public Reduction intern() { for (Reduction e : values()) if (e.value == value) return e; return this; } + @Override public String toString() { return intern().name(); } +} + // namespace Reduction + // namespace at -// Parsed from ATen/ops/_thnn_fused_lstm_cell.h +// Parsed from ATen/ops/abs.h // #pragma once @@ -32585,23 +18242,24 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include +// #include + +// aten::abs(Tensor self) -> Tensor +@Namespace("at") public static native @ByVal Tensor abs(@Const @ByRef Tensor self); -// aten::_thnn_fused_lstm_cell(Tensor input_gates, Tensor hidden_gates, Tensor cx, Tensor? input_bias=None, Tensor? hidden_bias=None) -> (Tensor, Tensor, Tensor) -@Namespace("at") public static native @ByVal TensorTensorTensorTuple _thnn_fused_lstm_cell(@Const @ByRef Tensor input_gates, @Const @ByRef Tensor hidden_gates, @Const @ByRef Tensor cx, @Const @ByRef(nullValue = "c10::optional{}") TensorOptional input_bias, @Const @ByRef(nullValue = "c10::optional{}") TensorOptional hidden_bias); -@Namespace("at") public static native @ByVal TensorTensorTensorTuple _thnn_fused_lstm_cell(@Const @ByRef Tensor input_gates, @Const @ByRef Tensor hidden_gates, @Const @ByRef Tensor cx); +// aten::abs_(Tensor(a!) self) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor abs_(@ByRef Tensor self); -// aten::_thnn_fused_lstm_cell.out(Tensor input_gates, Tensor hidden_gates, Tensor cx, Tensor? input_bias=None, Tensor? hidden_bias=None, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2) -> (Tensor(a!), Tensor(b!), Tensor(c!)) -@Namespace("at") public static native @ByVal @Cast("std::tuple*") PointerPointer _thnn_fused_lstm_cell_out(@ByRef Tensor out0, @ByRef Tensor out1, @ByRef Tensor out2, @Const @ByRef Tensor input_gates, @Const @ByRef Tensor hidden_gates, @Const @ByRef Tensor cx, @Const @ByRef(nullValue = "c10::optional{}") TensorOptional input_bias, @Const @ByRef(nullValue = "c10::optional{}") TensorOptional hidden_bias); -@Namespace("at") public static native @ByVal @Cast("std::tuple*") PointerPointer _thnn_fused_lstm_cell_out(@ByRef Tensor out0, @ByRef Tensor out1, @ByRef Tensor out2, @Const @ByRef Tensor input_gates, @Const @ByRef Tensor hidden_gates, @Const @ByRef Tensor cx); -// aten::_thnn_fused_lstm_cell.out(Tensor input_gates, Tensor hidden_gates, Tensor cx, Tensor? input_bias=None, Tensor? hidden_bias=None, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2) -> (Tensor(a!), Tensor(b!), Tensor(c!)) -@Namespace("at") public static native @ByVal @Cast("std::tuple*") PointerPointer _thnn_fused_lstm_cell_outf(@Const @ByRef Tensor input_gates, @Const @ByRef Tensor hidden_gates, @Const @ByRef Tensor cx, @Const @ByRef TensorOptional input_bias, @Const @ByRef TensorOptional hidden_bias, @ByRef Tensor out0, @ByRef Tensor out1, @ByRef Tensor out2); +// aten::abs.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor abs_out(@ByRef Tensor out, @Const @ByRef Tensor self); +// aten::abs.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor abs_outf(@Const @ByRef Tensor self, @ByRef Tensor out); -// Parsed from ATen/ops/_thnn_fused_lstm_cell_backward.h +// Parsed from ATen/ops/absolute.h // #pragma once @@ -32622,16 +18280,21 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include +// #include + +// aten::absolute(Tensor self) -> Tensor +@Namespace("at") public static native @ByVal Tensor absolute(@Const @ByRef Tensor self); -// aten::_thnn_fused_lstm_cell_backward(Tensor? grad_hy, Tensor? grad_cy, Tensor cx, Tensor cy, Tensor workspace, bool has_bias) -> (Tensor, Tensor, Tensor, Tensor, Tensor) -@Namespace("at") public static native @ByVal TensorTensorTensorTensorTensorTuple _thnn_fused_lstm_cell_backward(@Const @ByRef TensorOptional grad_hy, @Const @ByRef TensorOptional grad_cy, @Const @ByRef Tensor cx, @Const @ByRef Tensor cy, @Const @ByRef Tensor workspace, @Cast("bool") boolean has_bias); +// aten::absolute.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor absolute_out(@ByRef Tensor out, @Const @ByRef Tensor self); +// aten::absolute.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor absolute_outf(@Const @ByRef Tensor self, @ByRef Tensor out); -// Parsed from ATen/ops/_thnn_fused_lstm_cell_backward_impl.h +// Parsed from ATen/ops/acos.h // #pragma once @@ -32652,21 +18315,24 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include +// #include + +// aten::acos(Tensor self) -> Tensor +@Namespace("at") public static native @ByVal Tensor acos(@Const @ByRef Tensor self); -// aten::_thnn_fused_lstm_cell_backward_impl(Tensor? grad_hy, Tensor? grad_cy, Tensor cx, Tensor cy, Tensor workspace, bool has_bias) -> (Tensor, Tensor, Tensor) -@Namespace("at") public static native @ByVal TensorTensorTensorTuple _thnn_fused_lstm_cell_backward_impl(@Const @ByRef TensorOptional grad_hy, @Const @ByRef TensorOptional grad_cy, @Const @ByRef Tensor cx, @Const @ByRef Tensor cy, @Const @ByRef Tensor workspace, @Cast("bool") boolean has_bias); +// aten::acos_(Tensor(a!) self) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor acos_(@ByRef Tensor self); -// aten::_thnn_fused_lstm_cell_backward_impl.out(Tensor? grad_hy, Tensor? grad_cy, Tensor cx, Tensor cy, Tensor workspace, bool has_bias, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2) -> (Tensor(a!), Tensor(b!), Tensor(c!)) -@Namespace("at") public static native @ByVal @Cast("std::tuple*") PointerPointer _thnn_fused_lstm_cell_backward_impl_out(@ByRef Tensor out0, @ByRef Tensor out1, @ByRef Tensor out2, @Const @ByRef TensorOptional grad_hy, @Const @ByRef TensorOptional grad_cy, @Const @ByRef Tensor cx, @Const @ByRef Tensor cy, @Const @ByRef Tensor workspace, @Cast("bool") boolean has_bias); -// aten::_thnn_fused_lstm_cell_backward_impl.out(Tensor? grad_hy, Tensor? grad_cy, Tensor cx, Tensor cy, Tensor workspace, bool has_bias, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2) -> (Tensor(a!), Tensor(b!), Tensor(c!)) -@Namespace("at") public static native @ByVal @Cast("std::tuple*") PointerPointer _thnn_fused_lstm_cell_backward_impl_outf(@Const @ByRef TensorOptional grad_hy, @Const @ByRef TensorOptional grad_cy, @Const @ByRef Tensor cx, @Const @ByRef Tensor cy, @Const @ByRef Tensor workspace, @Cast("bool") boolean has_bias, @ByRef Tensor out0, @ByRef Tensor out1, @ByRef Tensor out2); +// aten::acos.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor acos_out(@ByRef Tensor out, @Const @ByRef Tensor self); +// aten::acos.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor acos_outf(@Const @ByRef Tensor self, @ByRef Tensor out); -// Parsed from ATen/ops/_to_copy.h +// Parsed from ATen/ops/acosh.h // #pragma once @@ -32687,25 +18353,24 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include +// #include + +// aten::acosh(Tensor self) -> Tensor +@Namespace("at") public static native @ByVal Tensor acosh(@Const @ByRef Tensor self); -// aten::_to_copy(Tensor self, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, bool non_blocking=False, MemoryFormat? memory_format=None) -> Tensor -@Namespace("at") public static native @ByVal Tensor _to_copy(@Const @ByRef Tensor self, @ByVal(nullValue = "at::TensorOptions{}") TensorOptions options, @Cast("bool") boolean non_blocking/*=false*/, @ByVal(nullValue = "c10::optional(c10::nullopt)") MemoryFormatOptional memory_format); -@Namespace("at") public static native @ByVal Tensor _to_copy(@Const @ByRef Tensor self); -// aten::_to_copy(Tensor self, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, bool non_blocking=False, MemoryFormat? memory_format=None) -> Tensor -@Namespace("at") public static native @ByVal Tensor _to_copy(@Const @ByRef Tensor self, @ByVal ScalarTypeOptional dtype, @ByVal LayoutOptional layout, @ByVal DeviceOptional device, @ByVal BoolOptional pin_memory, @Cast("bool") boolean non_blocking, @ByVal MemoryFormatOptional memory_format); +// aten::acosh_(Tensor(a!) self) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor acosh_(@ByRef Tensor self); -// aten::_to_copy.out(Tensor self, *, bool non_blocking=False, MemoryFormat? memory_format=None, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor _to_copy_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Cast("bool") boolean non_blocking/*=false*/, @ByVal(nullValue = "c10::optional(c10::nullopt)") MemoryFormatOptional memory_format); -@Namespace("at") public static native @ByRef Tensor _to_copy_out(@ByRef Tensor out, @Const @ByRef Tensor self); -// aten::_to_copy.out(Tensor self, *, bool non_blocking=False, MemoryFormat? memory_format=None, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor _to_copy_outf(@Const @ByRef Tensor self, @Cast("bool") boolean non_blocking, @ByVal MemoryFormatOptional memory_format, @ByRef Tensor out); +// aten::acosh.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor acosh_out(@ByRef Tensor out, @Const @ByRef Tensor self); +// aten::acosh.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor acosh_outf(@Const @ByRef Tensor self, @ByRef Tensor out); -// Parsed from ATen/ops/_to_cpu.h +// Parsed from ATen/ops/adaptive_avg_pool1d.h // #pragma once @@ -32726,16 +18391,17 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include +// #include -// aten::_to_cpu(Tensor[] tensors) -> Tensor[] -@Namespace("at") public static native @Cast({"", "std::vector"}) @StdMove TensorVector _to_cpu(@ByVal TensorArrayRef tensors); +// aten::adaptive_avg_pool1d(Tensor self, int[1] output_size) -> Tensor +@Namespace("at") public static native @ByVal Tensor adaptive_avg_pool1d(@Const @ByRef Tensor self, @ByVal LongArrayRef output_size); +@Namespace("at") public static native @ByVal Tensor adaptive_avg_pool1d(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... output_size); -// Parsed from ATen/ops/_to_dense.h +// Parsed from ATen/ops/adaptive_avg_pool2d.h // #pragma once @@ -32756,54 +18422,40 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include - - -// aten::_to_dense.out(Tensor self, ScalarType? dtype=None, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor _to_dense_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal(nullValue = "c10::optional(c10::nullopt)") ScalarTypeOptional dtype); -@Namespace("at") public static native @ByRef Tensor _to_dense_out(@ByRef Tensor out, @Const @ByRef Tensor self); -// aten::_to_dense.out(Tensor self, ScalarType? dtype=None, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor _to_dense_outf(@Const @ByRef Tensor self, @ByVal ScalarTypeOptional dtype, @ByRef Tensor out); +// #include +// aten::adaptive_avg_pool2d.out(Tensor self, SymInt[2] output_size, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor adaptive_avg_pool2d_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal LongArrayRef output_size); +@Namespace("at") public static native @ByRef Tensor adaptive_avg_pool2d_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... output_size); -// Parsed from ATen/ops/_transform_bias_rescale_qkv.h +// aten::adaptive_avg_pool2d.out(Tensor self, SymInt[2] output_size, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor adaptive_avg_pool2d_outf(@Const @ByRef Tensor self, @ByVal LongArrayRef output_size, @ByRef Tensor out); +@Namespace("at") public static native @ByRef Tensor adaptive_avg_pool2d_outf(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] output_size, @ByRef Tensor out); -// #pragma once -// @generated by torchgen/gen.py from Function.h +// aten::adaptive_avg_pool2d.out(Tensor self, SymInt[2] output_size, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor adaptive_avg_pool2d_symint_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal SymIntArrayRef output_size); -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include +// aten::adaptive_avg_pool2d.out(Tensor self, SymInt[2] output_size, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor adaptive_avg_pool2d_symint_outf(@Const @ByRef Tensor self, @ByVal SymIntArrayRef output_size, @ByRef Tensor out); -// #include +// aten::adaptive_avg_pool2d(Tensor self, SymInt[2] output_size) -> Tensor +@Namespace("at") public static native @ByVal Tensor adaptive_avg_pool2d(@Const @ByRef Tensor self, @ByVal LongArrayRef output_size); +@Namespace("at") public static native @ByVal Tensor adaptive_avg_pool2d(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... output_size); -// aten::_transform_bias_rescale_qkv(Tensor qkv, Tensor qkv_bias, int num_heads) -> (Tensor, Tensor, Tensor) -@Namespace("at") public static native @ByVal TensorTensorTensorTuple _transform_bias_rescale_qkv(@Const @ByRef Tensor qkv, @Const @ByRef Tensor qkv_bias, @Cast("int64_t") long num_heads); +// aten::adaptive_avg_pool2d(Tensor self, SymInt[2] output_size) -> Tensor +@Namespace("at") public static native @ByVal Tensor adaptive_avg_pool2d_symint(@Const @ByRef Tensor self, @ByVal SymIntArrayRef output_size); -// aten::_transform_bias_rescale_qkv.out(Tensor qkv, Tensor qkv_bias, int num_heads, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2) -> (Tensor(a!), Tensor(b!), Tensor(c!)) -@Namespace("at") public static native @ByVal @Cast("std::tuple*") PointerPointer _transform_bias_rescale_qkv_out(@ByRef Tensor out0, @ByRef Tensor out1, @ByRef Tensor out2, @Const @ByRef Tensor qkv, @Const @ByRef Tensor qkv_bias, @Cast("int64_t") long num_heads); -// aten::_transform_bias_rescale_qkv.out(Tensor qkv, Tensor qkv_bias, int num_heads, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2) -> (Tensor(a!), Tensor(b!), Tensor(c!)) -@Namespace("at") public static native @ByVal @Cast("std::tuple*") PointerPointer _transform_bias_rescale_qkv_outf(@Const @ByRef Tensor qkv, @Const @ByRef Tensor qkv_bias, @Cast("int64_t") long num_heads, @ByRef Tensor out0, @ByRef Tensor out1, @ByRef Tensor out2); -// Parsed from ATen/ops/_transformer_decoder_only_layer_fwd.h +// Parsed from ATen/ops/adaptive_avg_pool3d.h // #pragma once @@ -32824,60 +18476,40 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include - - -// aten::_transformer_decoder_only_layer_fwd(Tensor src, int embed_dim, int num_heads, Tensor qkv_weight, Tensor qkv_bias, Tensor proj_weight, Tensor proj_bias, bool use_gelu, bool norm_first, float eps, Tensor norm_weight_1, Tensor norm_bias_1, Tensor norm_weight_2, Tensor norm_bias_2, Tensor ffn_weight_1, Tensor ffn_bias_1, Tensor ffn_weight_2, Tensor ffn_bias_2, Tensor? mask=None, Tensor? incr_key=None, Tensor? incr_value=None) -> (Tensor, Tensor, Tensor) -@Namespace("at") public static native @ByVal TensorTensorTensorTuple _transformer_decoder_only_layer_fwd(@Const @ByRef Tensor src, @Cast("int64_t") long embed_dim, @Cast("int64_t") long num_heads, @Const @ByRef Tensor qkv_weight, @Const @ByRef Tensor qkv_bias, @Const @ByRef Tensor proj_weight, @Const @ByRef Tensor proj_bias, @Cast("bool") boolean use_gelu, @Cast("bool") boolean norm_first, double eps, @Const @ByRef Tensor norm_weight_1, @Const @ByRef Tensor norm_bias_1, @Const @ByRef Tensor norm_weight_2, @Const @ByRef Tensor norm_bias_2, @Const @ByRef Tensor ffn_weight_1, @Const @ByRef Tensor ffn_bias_1, @Const @ByRef Tensor ffn_weight_2, @Const @ByRef Tensor ffn_bias_2, @Const @ByRef(nullValue = "c10::optional{}") TensorOptional mask, @Const @ByRef(nullValue = "c10::optional{}") TensorOptional incr_key, @Const @ByRef(nullValue = "c10::optional{}") TensorOptional incr_value); -@Namespace("at") public static native @ByVal TensorTensorTensorTuple _transformer_decoder_only_layer_fwd(@Const @ByRef Tensor src, @Cast("int64_t") long embed_dim, @Cast("int64_t") long num_heads, @Const @ByRef Tensor qkv_weight, @Const @ByRef Tensor qkv_bias, @Const @ByRef Tensor proj_weight, @Const @ByRef Tensor proj_bias, @Cast("bool") boolean use_gelu, @Cast("bool") boolean norm_first, double eps, @Const @ByRef Tensor norm_weight_1, @Const @ByRef Tensor norm_bias_1, @Const @ByRef Tensor norm_weight_2, @Const @ByRef Tensor norm_bias_2, @Const @ByRef Tensor ffn_weight_1, @Const @ByRef Tensor ffn_bias_1, @Const @ByRef Tensor ffn_weight_2, @Const @ByRef Tensor ffn_bias_2); - -// aten::_transformer_decoder_only_layer_fwd.out(Tensor src, int embed_dim, int num_heads, Tensor qkv_weight, Tensor qkv_bias, Tensor proj_weight, Tensor proj_bias, bool use_gelu, bool norm_first, float eps, Tensor norm_weight_1, Tensor norm_bias_1, Tensor norm_weight_2, Tensor norm_bias_2, Tensor ffn_weight_1, Tensor ffn_bias_1, Tensor ffn_weight_2, Tensor ffn_bias_2, Tensor? mask=None, Tensor? incr_key=None, Tensor? incr_value=None, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2) -> (Tensor(a!), Tensor(b!), Tensor(c!)) -@Namespace("at") public static native @ByVal @Cast("std::tuple*") PointerPointer _transformer_decoder_only_layer_fwd_out(@ByRef Tensor out0, @ByRef Tensor out1, @ByRef Tensor out2, @Const @ByRef Tensor src, @Cast("int64_t") long embed_dim, @Cast("int64_t") long num_heads, @Const @ByRef Tensor qkv_weight, @Const @ByRef Tensor qkv_bias, @Const @ByRef Tensor proj_weight, @Const @ByRef Tensor proj_bias, @Cast("bool") boolean use_gelu, @Cast("bool") boolean norm_first, double eps, @Const @ByRef Tensor norm_weight_1, @Const @ByRef Tensor norm_bias_1, @Const @ByRef Tensor norm_weight_2, @Const @ByRef Tensor norm_bias_2, @Const @ByRef Tensor ffn_weight_1, @Const @ByRef Tensor ffn_bias_1, @Const @ByRef Tensor ffn_weight_2, @Const @ByRef Tensor ffn_bias_2, @Const @ByRef(nullValue = "c10::optional{}") TensorOptional mask, @Const @ByRef(nullValue = "c10::optional{}") TensorOptional incr_key, @Const @ByRef(nullValue = "c10::optional{}") TensorOptional incr_value); -@Namespace("at") public static native @ByVal @Cast("std::tuple*") PointerPointer _transformer_decoder_only_layer_fwd_out(@ByRef Tensor out0, @ByRef Tensor out1, @ByRef Tensor out2, @Const @ByRef Tensor src, @Cast("int64_t") long embed_dim, @Cast("int64_t") long num_heads, @Const @ByRef Tensor qkv_weight, @Const @ByRef Tensor qkv_bias, @Const @ByRef Tensor proj_weight, @Const @ByRef Tensor proj_bias, @Cast("bool") boolean use_gelu, @Cast("bool") boolean norm_first, double eps, @Const @ByRef Tensor norm_weight_1, @Const @ByRef Tensor norm_bias_1, @Const @ByRef Tensor norm_weight_2, @Const @ByRef Tensor norm_bias_2, @Const @ByRef Tensor ffn_weight_1, @Const @ByRef Tensor ffn_bias_1, @Const @ByRef Tensor ffn_weight_2, @Const @ByRef Tensor ffn_bias_2); -// aten::_transformer_decoder_only_layer_fwd.out(Tensor src, int embed_dim, int num_heads, Tensor qkv_weight, Tensor qkv_bias, Tensor proj_weight, Tensor proj_bias, bool use_gelu, bool norm_first, float eps, Tensor norm_weight_1, Tensor norm_bias_1, Tensor norm_weight_2, Tensor norm_bias_2, Tensor ffn_weight_1, Tensor ffn_bias_1, Tensor ffn_weight_2, Tensor ffn_bias_2, Tensor? mask=None, Tensor? incr_key=None, Tensor? incr_value=None, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2) -> (Tensor(a!), Tensor(b!), Tensor(c!)) -@Namespace("at") public static native @ByVal @Cast("std::tuple*") PointerPointer _transformer_decoder_only_layer_fwd_outf(@Const @ByRef Tensor src, @Cast("int64_t") long embed_dim, @Cast("int64_t") long num_heads, @Const @ByRef Tensor qkv_weight, @Const @ByRef Tensor qkv_bias, @Const @ByRef Tensor proj_weight, @Const @ByRef Tensor proj_bias, @Cast("bool") boolean use_gelu, @Cast("bool") boolean norm_first, double eps, @Const @ByRef Tensor norm_weight_1, @Const @ByRef Tensor norm_bias_1, @Const @ByRef Tensor norm_weight_2, @Const @ByRef Tensor norm_bias_2, @Const @ByRef Tensor ffn_weight_1, @Const @ByRef Tensor ffn_bias_1, @Const @ByRef Tensor ffn_weight_2, @Const @ByRef Tensor ffn_bias_2, @Const @ByRef TensorOptional mask, @Const @ByRef TensorOptional incr_key, @Const @ByRef TensorOptional incr_value, @ByRef Tensor out0, @ByRef Tensor out1, @ByRef Tensor out2); +// #include +// aten::adaptive_avg_pool3d.out(Tensor self, SymInt[3] output_size, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor adaptive_avg_pool3d_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal LongArrayRef output_size); +@Namespace("at") public static native @ByRef Tensor adaptive_avg_pool3d_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... output_size); -// Parsed from ATen/ops/_transformer_encoder_layer_fwd.h +// aten::adaptive_avg_pool3d.out(Tensor self, SymInt[3] output_size, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor adaptive_avg_pool3d_outf(@Const @ByRef Tensor self, @ByVal LongArrayRef output_size, @ByRef Tensor out); +@Namespace("at") public static native @ByRef Tensor adaptive_avg_pool3d_outf(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] output_size, @ByRef Tensor out); -// #pragma once -// @generated by torchgen/gen.py from Function.h +// aten::adaptive_avg_pool3d.out(Tensor self, SymInt[3] output_size, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor adaptive_avg_pool3d_symint_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal SymIntArrayRef output_size); -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include +// aten::adaptive_avg_pool3d.out(Tensor self, SymInt[3] output_size, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor adaptive_avg_pool3d_symint_outf(@Const @ByRef Tensor self, @ByVal SymIntArrayRef output_size, @ByRef Tensor out); -// #include +// aten::adaptive_avg_pool3d(Tensor self, SymInt[3] output_size) -> Tensor +@Namespace("at") public static native @ByVal Tensor adaptive_avg_pool3d(@Const @ByRef Tensor self, @ByVal LongArrayRef output_size); +@Namespace("at") public static native @ByVal Tensor adaptive_avg_pool3d(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... output_size); -// aten::_transformer_encoder_layer_fwd(Tensor src, int embed_dim, int num_heads, Tensor qkv_weight, Tensor qkv_bias, Tensor proj_weight, Tensor proj_bias, bool use_gelu, bool norm_first, float eps, Tensor norm_weight_1, Tensor norm_bias_1, Tensor norm_weight_2, Tensor norm_bias_2, Tensor ffn_weight_1, Tensor ffn_bias_1, Tensor ffn_weight_2, Tensor ffn_bias_2, Tensor? mask=None, int? mask_type=None) -> Tensor -@Namespace("at") public static native @ByVal Tensor _transformer_encoder_layer_fwd(@Const @ByRef Tensor src, @Cast("int64_t") long embed_dim, @Cast("int64_t") long num_heads, @Const @ByRef Tensor qkv_weight, @Const @ByRef Tensor qkv_bias, @Const @ByRef Tensor proj_weight, @Const @ByRef Tensor proj_bias, @Cast("bool") boolean use_gelu, @Cast("bool") boolean norm_first, double eps, @Const @ByRef Tensor norm_weight_1, @Const @ByRef Tensor norm_bias_1, @Const @ByRef Tensor norm_weight_2, @Const @ByRef Tensor norm_bias_2, @Const @ByRef Tensor ffn_weight_1, @Const @ByRef Tensor ffn_bias_1, @Const @ByRef Tensor ffn_weight_2, @Const @ByRef Tensor ffn_bias_2, @Const @ByRef(nullValue = "c10::optional{}") TensorOptional mask, @ByVal(nullValue = "c10::optional(c10::nullopt)") LongOptional mask_type); -@Namespace("at") public static native @ByVal Tensor _transformer_encoder_layer_fwd(@Const @ByRef Tensor src, @Cast("int64_t") long embed_dim, @Cast("int64_t") long num_heads, @Const @ByRef Tensor qkv_weight, @Const @ByRef Tensor qkv_bias, @Const @ByRef Tensor proj_weight, @Const @ByRef Tensor proj_bias, @Cast("bool") boolean use_gelu, @Cast("bool") boolean norm_first, double eps, @Const @ByRef Tensor norm_weight_1, @Const @ByRef Tensor norm_bias_1, @Const @ByRef Tensor norm_weight_2, @Const @ByRef Tensor norm_bias_2, @Const @ByRef Tensor ffn_weight_1, @Const @ByRef Tensor ffn_bias_1, @Const @ByRef Tensor ffn_weight_2, @Const @ByRef Tensor ffn_bias_2); +// aten::adaptive_avg_pool3d(Tensor self, SymInt[3] output_size) -> Tensor +@Namespace("at") public static native @ByVal Tensor adaptive_avg_pool3d_symint(@Const @ByRef Tensor self, @ByVal SymIntArrayRef output_size); -// aten::_transformer_encoder_layer_fwd.out(Tensor src, int embed_dim, int num_heads, Tensor qkv_weight, Tensor qkv_bias, Tensor proj_weight, Tensor proj_bias, bool use_gelu, bool norm_first, float eps, Tensor norm_weight_1, Tensor norm_bias_1, Tensor norm_weight_2, Tensor norm_bias_2, Tensor ffn_weight_1, Tensor ffn_bias_1, Tensor ffn_weight_2, Tensor ffn_bias_2, Tensor? mask=None, int? mask_type=None, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor _transformer_encoder_layer_fwd_out(@ByRef Tensor out, @Const @ByRef Tensor src, @Cast("int64_t") long embed_dim, @Cast("int64_t") long num_heads, @Const @ByRef Tensor qkv_weight, @Const @ByRef Tensor qkv_bias, @Const @ByRef Tensor proj_weight, @Const @ByRef Tensor proj_bias, @Cast("bool") boolean use_gelu, @Cast("bool") boolean norm_first, double eps, @Const @ByRef Tensor norm_weight_1, @Const @ByRef Tensor norm_bias_1, @Const @ByRef Tensor norm_weight_2, @Const @ByRef Tensor norm_bias_2, @Const @ByRef Tensor ffn_weight_1, @Const @ByRef Tensor ffn_bias_1, @Const @ByRef Tensor ffn_weight_2, @Const @ByRef Tensor ffn_bias_2, @Const @ByRef(nullValue = "c10::optional{}") TensorOptional mask, @ByVal(nullValue = "c10::optional(c10::nullopt)") LongOptional mask_type); -@Namespace("at") public static native @ByRef Tensor _transformer_encoder_layer_fwd_out(@ByRef Tensor out, @Const @ByRef Tensor src, @Cast("int64_t") long embed_dim, @Cast("int64_t") long num_heads, @Const @ByRef Tensor qkv_weight, @Const @ByRef Tensor qkv_bias, @Const @ByRef Tensor proj_weight, @Const @ByRef Tensor proj_bias, @Cast("bool") boolean use_gelu, @Cast("bool") boolean norm_first, double eps, @Const @ByRef Tensor norm_weight_1, @Const @ByRef Tensor norm_bias_1, @Const @ByRef Tensor norm_weight_2, @Const @ByRef Tensor norm_bias_2, @Const @ByRef Tensor ffn_weight_1, @Const @ByRef Tensor ffn_bias_1, @Const @ByRef Tensor ffn_weight_2, @Const @ByRef Tensor ffn_bias_2); -// aten::_transformer_encoder_layer_fwd.out(Tensor src, int embed_dim, int num_heads, Tensor qkv_weight, Tensor qkv_bias, Tensor proj_weight, Tensor proj_bias, bool use_gelu, bool norm_first, float eps, Tensor norm_weight_1, Tensor norm_bias_1, Tensor norm_weight_2, Tensor norm_bias_2, Tensor ffn_weight_1, Tensor ffn_bias_1, Tensor ffn_weight_2, Tensor ffn_bias_2, Tensor? mask=None, int? mask_type=None, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor _transformer_encoder_layer_fwd_outf(@Const @ByRef Tensor src, @Cast("int64_t") long embed_dim, @Cast("int64_t") long num_heads, @Const @ByRef Tensor qkv_weight, @Const @ByRef Tensor qkv_bias, @Const @ByRef Tensor proj_weight, @Const @ByRef Tensor proj_bias, @Cast("bool") boolean use_gelu, @Cast("bool") boolean norm_first, double eps, @Const @ByRef Tensor norm_weight_1, @Const @ByRef Tensor norm_bias_1, @Const @ByRef Tensor norm_weight_2, @Const @ByRef Tensor norm_bias_2, @Const @ByRef Tensor ffn_weight_1, @Const @ByRef Tensor ffn_bias_1, @Const @ByRef Tensor ffn_weight_2, @Const @ByRef Tensor ffn_bias_2, @Const @ByRef TensorOptional mask, @ByVal LongOptional mask_type, @ByRef Tensor out); -// Parsed from ATen/ops/_trilinear.h +// Parsed from ATen/ops/adaptive_avg_pool3d_backward.h // #pragma once @@ -32898,28 +18530,18 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include - +// #include -// aten::_trilinear(Tensor i1, Tensor i2, Tensor i3, int[] expand1, int[] expand2, int[] expand3, int[] sumdim, int unroll_dim=1) -> Tensor -@Namespace("at") public static native @ByVal Tensor _trilinear(@Const @ByRef Tensor i1, @Const @ByRef Tensor i2, @Const @ByRef Tensor i3, @ByVal @Cast("c10::ArrayRef*") LongArrayRef expand1, @ByVal @Cast("c10::ArrayRef*") LongArrayRef expand2, @ByVal @Cast("c10::ArrayRef*") LongArrayRef expand3, @ByVal @Cast("c10::ArrayRef*") LongArrayRef sumdim, @Cast("int64_t") long unroll_dim/*=1*/); -@Namespace("at") public static native @ByVal Tensor _trilinear(@Const @ByRef Tensor i1, @Const @ByRef Tensor i2, @Const @ByRef Tensor i3, @ByVal @Cast("c10::ArrayRef*") LongArrayRef expand1, @ByVal @Cast("c10::ArrayRef*") LongArrayRef expand2, @ByVal @Cast("c10::ArrayRef*") LongArrayRef expand3, @ByVal @Cast("c10::ArrayRef*") LongArrayRef sumdim); -@Namespace("at") public static native @ByVal Tensor _trilinear(@Const @ByRef Tensor i1, @Const @ByRef Tensor i2, @Const @ByRef Tensor i3, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] expand1, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] expand2, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] expand3, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] sumdim, @Cast("int64_t") long unroll_dim/*=1*/); -@Namespace("at") public static native @ByVal Tensor _trilinear(@Const @ByRef Tensor i1, @Const @ByRef Tensor i2, @Const @ByRef Tensor i3, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] expand1, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] expand2, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] expand3, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... sumdim); -// aten::_trilinear.out(Tensor i1, Tensor i2, Tensor i3, int[] expand1, int[] expand2, int[] expand3, int[] sumdim, int unroll_dim=1, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor _trilinear_out(@ByRef Tensor out, @Const @ByRef Tensor i1, @Const @ByRef Tensor i2, @Const @ByRef Tensor i3, @ByVal @Cast("c10::ArrayRef*") LongArrayRef expand1, @ByVal @Cast("c10::ArrayRef*") LongArrayRef expand2, @ByVal @Cast("c10::ArrayRef*") LongArrayRef expand3, @ByVal @Cast("c10::ArrayRef*") LongArrayRef sumdim, @Cast("int64_t") long unroll_dim/*=1*/); -@Namespace("at") public static native @ByRef Tensor _trilinear_out(@ByRef Tensor out, @Const @ByRef Tensor i1, @Const @ByRef Tensor i2, @Const @ByRef Tensor i3, @ByVal @Cast("c10::ArrayRef*") LongArrayRef expand1, @ByVal @Cast("c10::ArrayRef*") LongArrayRef expand2, @ByVal @Cast("c10::ArrayRef*") LongArrayRef expand3, @ByVal @Cast("c10::ArrayRef*") LongArrayRef sumdim); -@Namespace("at") public static native @ByRef Tensor _trilinear_out(@ByRef Tensor out, @Const @ByRef Tensor i1, @Const @ByRef Tensor i2, @Const @ByRef Tensor i3, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] expand1, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] expand2, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] expand3, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] sumdim, @Cast("int64_t") long unroll_dim/*=1*/); -@Namespace("at") public static native @ByRef Tensor _trilinear_out(@ByRef Tensor out, @Const @ByRef Tensor i1, @Const @ByRef Tensor i2, @Const @ByRef Tensor i3, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] expand1, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] expand2, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] expand3, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... sumdim); -// aten::_trilinear.out(Tensor i1, Tensor i2, Tensor i3, int[] expand1, int[] expand2, int[] expand3, int[] sumdim, int unroll_dim=1, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor _trilinear_outf(@Const @ByRef Tensor i1, @Const @ByRef Tensor i2, @Const @ByRef Tensor i3, @ByVal @Cast("c10::ArrayRef*") LongArrayRef expand1, @ByVal @Cast("c10::ArrayRef*") LongArrayRef expand2, @ByVal @Cast("c10::ArrayRef*") LongArrayRef expand3, @ByVal @Cast("c10::ArrayRef*") LongArrayRef sumdim, @Cast("int64_t") long unroll_dim, @ByRef Tensor out); -@Namespace("at") public static native @ByRef Tensor _trilinear_outf(@Const @ByRef Tensor i1, @Const @ByRef Tensor i2, @Const @ByRef Tensor i3, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] expand1, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] expand2, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] expand3, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] sumdim, @Cast("int64_t") long unroll_dim, @ByRef Tensor out); +// aten::adaptive_avg_pool3d_backward.grad_input(Tensor grad_output, Tensor self, *, Tensor(a!) grad_input) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor adaptive_avg_pool3d_backward_out(@ByRef Tensor grad_input, @Const @ByRef Tensor grad_output, @Const @ByRef Tensor self); +// aten::adaptive_avg_pool3d_backward.grad_input(Tensor grad_output, Tensor self, *, Tensor(a!) grad_input) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor adaptive_avg_pool3d_backward_outf(@Const @ByRef Tensor grad_output, @Const @ByRef Tensor self, @ByRef Tensor grad_input); -// Parsed from ATen/ops/_triton_multi_head_attention.h +// Parsed from ATen/ops/adaptive_max_pool1d.h // #pragma once @@ -32940,23 +18562,17 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include - +// #include -// aten::_triton_multi_head_attention(Tensor query, Tensor key, Tensor value, int embed_dim, int num_head, Tensor qkv_weight, Tensor qkv_bias, Tensor proj_weight, Tensor proj_bias, Tensor? mask=None) -> Tensor -@Namespace("at") public static native @ByVal Tensor _triton_multi_head_attention(@Const @ByRef Tensor query, @Const @ByRef Tensor key, @Const @ByRef Tensor value, @Cast("int64_t") long embed_dim, @Cast("int64_t") long num_head, @Const @ByRef Tensor qkv_weight, @Const @ByRef Tensor qkv_bias, @Const @ByRef Tensor proj_weight, @Const @ByRef Tensor proj_bias, @Const @ByRef(nullValue = "c10::optional{}") TensorOptional mask); -@Namespace("at") public static native @ByVal Tensor _triton_multi_head_attention(@Const @ByRef Tensor query, @Const @ByRef Tensor key, @Const @ByRef Tensor value, @Cast("int64_t") long embed_dim, @Cast("int64_t") long num_head, @Const @ByRef Tensor qkv_weight, @Const @ByRef Tensor qkv_bias, @Const @ByRef Tensor proj_weight, @Const @ByRef Tensor proj_bias); -// aten::_triton_multi_head_attention.out(Tensor query, Tensor key, Tensor value, int embed_dim, int num_head, Tensor qkv_weight, Tensor qkv_bias, Tensor proj_weight, Tensor proj_bias, Tensor? mask=None, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor _triton_multi_head_attention_out(@ByRef Tensor out, @Const @ByRef Tensor query, @Const @ByRef Tensor key, @Const @ByRef Tensor value, @Cast("int64_t") long embed_dim, @Cast("int64_t") long num_head, @Const @ByRef Tensor qkv_weight, @Const @ByRef Tensor qkv_bias, @Const @ByRef Tensor proj_weight, @Const @ByRef Tensor proj_bias, @Const @ByRef(nullValue = "c10::optional{}") TensorOptional mask); -@Namespace("at") public static native @ByRef Tensor _triton_multi_head_attention_out(@ByRef Tensor out, @Const @ByRef Tensor query, @Const @ByRef Tensor key, @Const @ByRef Tensor value, @Cast("int64_t") long embed_dim, @Cast("int64_t") long num_head, @Const @ByRef Tensor qkv_weight, @Const @ByRef Tensor qkv_bias, @Const @ByRef Tensor proj_weight, @Const @ByRef Tensor proj_bias); -// aten::_triton_multi_head_attention.out(Tensor query, Tensor key, Tensor value, int embed_dim, int num_head, Tensor qkv_weight, Tensor qkv_bias, Tensor proj_weight, Tensor proj_bias, Tensor? mask=None, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor _triton_multi_head_attention_outf(@Const @ByRef Tensor query, @Const @ByRef Tensor key, @Const @ByRef Tensor value, @Cast("int64_t") long embed_dim, @Cast("int64_t") long num_head, @Const @ByRef Tensor qkv_weight, @Const @ByRef Tensor qkv_bias, @Const @ByRef Tensor proj_weight, @Const @ByRef Tensor proj_bias, @Const @ByRef TensorOptional mask, @ByRef Tensor out); +// aten::adaptive_max_pool1d(Tensor self, int[1] output_size) -> (Tensor, Tensor) +@Namespace("at") public static native @ByVal T_TensorTensor_T adaptive_max_pool1d(@Const @ByRef Tensor self, @ByVal LongArrayRef output_size); +@Namespace("at") public static native @ByVal T_TensorTensor_T adaptive_max_pool1d(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... output_size); -// Parsed from ATen/ops/_triton_scaled_dot_attention.h +// Parsed from ATen/ops/adaptive_max_pool2d.h // #pragma once @@ -32977,23 +18593,24 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include +// #include -// aten::_triton_scaled_dot_attention(Tensor q, Tensor k, Tensor v, float dropout_p=0.0) -> Tensor -@Namespace("at") public static native @ByVal Tensor _triton_scaled_dot_attention(@Const @ByRef Tensor q, @Const @ByRef Tensor k, @Const @ByRef Tensor v, double dropout_p/*=0.0*/); -@Namespace("at") public static native @ByVal Tensor _triton_scaled_dot_attention(@Const @ByRef Tensor q, @Const @ByRef Tensor k, @Const @ByRef Tensor v); +// aten::adaptive_max_pool2d.out(Tensor self, int[2] output_size, *, Tensor(a!) out, Tensor(b!) indices) -> (Tensor(a!), Tensor(b!)) +@Namespace("at") public static native @ByVal T_TensorTensor_T adaptive_max_pool2d_out(@ByRef Tensor out, @ByRef Tensor indices, @Const @ByRef Tensor self, @ByVal LongArrayRef output_size); +@Namespace("at") public static native @ByVal T_TensorTensor_T adaptive_max_pool2d_out(@ByRef Tensor out, @ByRef Tensor indices, @Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... output_size); +// aten::adaptive_max_pool2d.out(Tensor self, int[2] output_size, *, Tensor(a!) out, Tensor(b!) indices) -> (Tensor(a!), Tensor(b!)) +@Namespace("at") public static native @ByVal T_TensorTensor_T adaptive_max_pool2d_outf(@Const @ByRef Tensor self, @ByVal LongArrayRef output_size, @ByRef Tensor out, @ByRef Tensor indices); +@Namespace("at") public static native @ByVal T_TensorTensor_T adaptive_max_pool2d_outf(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] output_size, @ByRef Tensor out, @ByRef Tensor indices); -// aten::_triton_scaled_dot_attention.out(Tensor q, Tensor k, Tensor v, float dropout_p=0.0, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor _triton_scaled_dot_attention_out(@ByRef Tensor out, @Const @ByRef Tensor q, @Const @ByRef Tensor k, @Const @ByRef Tensor v, double dropout_p/*=0.0*/); -@Namespace("at") public static native @ByRef Tensor _triton_scaled_dot_attention_out(@ByRef Tensor out, @Const @ByRef Tensor q, @Const @ByRef Tensor k, @Const @ByRef Tensor v); -// aten::_triton_scaled_dot_attention.out(Tensor q, Tensor k, Tensor v, float dropout_p=0.0, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor _triton_scaled_dot_attention_outf(@Const @ByRef Tensor q, @Const @ByRef Tensor k, @Const @ByRef Tensor v, double dropout_p, @ByRef Tensor out); +// aten::adaptive_max_pool2d(Tensor self, int[2] output_size) -> (Tensor, Tensor) +@Namespace("at") public static native @ByVal T_TensorTensor_T adaptive_max_pool2d(@Const @ByRef Tensor self, @ByVal LongArrayRef output_size); +@Namespace("at") public static native @ByVal T_TensorTensor_T adaptive_max_pool2d(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... output_size); -// Parsed from ATen/ops/_unique.h +// Parsed from ATen/ops/adaptive_max_pool2d_backward.h // #pragma once @@ -33014,23 +18631,21 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include +// #include -// aten::_unique(Tensor self, bool sorted=True, bool return_inverse=False) -> (Tensor, Tensor) -@Namespace("at") public static native @ByVal TensorTensorTuple _unique(@Const @ByRef Tensor self, @Cast("bool") boolean sorted/*=true*/, @Cast("bool") boolean return_inverse/*=false*/); -@Namespace("at") public static native @ByVal TensorTensorTuple _unique(@Const @ByRef Tensor self); +// aten::adaptive_max_pool2d_backward.grad_input(Tensor grad_output, Tensor self, Tensor indices, *, Tensor(a!) grad_input) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor adaptive_max_pool2d_backward_out(@ByRef Tensor grad_input, @Const @ByRef Tensor grad_output, @Const @ByRef Tensor self, @Const @ByRef Tensor indices); +// aten::adaptive_max_pool2d_backward.grad_input(Tensor grad_output, Tensor self, Tensor indices, *, Tensor(a!) grad_input) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor adaptive_max_pool2d_backward_outf(@Const @ByRef Tensor grad_output, @Const @ByRef Tensor self, @Const @ByRef Tensor indices, @ByRef Tensor grad_input); -// aten::_unique.out(Tensor self, bool sorted=True, bool return_inverse=False, *, Tensor(a!) out0, Tensor(b!) out1) -> (Tensor(a!), Tensor(b!)) -@Namespace("at") public static native @ByVal @Cast("std::tuple*") PointerPointer _unique_out(@ByRef Tensor out0, @ByRef Tensor out1, @Const @ByRef Tensor self, @Cast("bool") boolean sorted/*=true*/, @Cast("bool") boolean return_inverse/*=false*/); -@Namespace("at") public static native @ByVal @Cast("std::tuple*") PointerPointer _unique_out(@ByRef Tensor out0, @ByRef Tensor out1, @Const @ByRef Tensor self); -// aten::_unique.out(Tensor self, bool sorted=True, bool return_inverse=False, *, Tensor(a!) out0, Tensor(b!) out1) -> (Tensor(a!), Tensor(b!)) -@Namespace("at") public static native @ByVal @Cast("std::tuple*") PointerPointer _unique_outf(@Const @ByRef Tensor self, @Cast("bool") boolean sorted, @Cast("bool") boolean return_inverse, @ByRef Tensor out0, @ByRef Tensor out1); +// aten::adaptive_max_pool2d_backward(Tensor grad_output, Tensor self, Tensor indices) -> Tensor +@Namespace("at") public static native @ByVal Tensor adaptive_max_pool2d_backward(@Const @ByRef Tensor grad_output, @Const @ByRef Tensor self, @Const @ByRef Tensor indices); -// Parsed from ATen/ops/_unique2.h +// Parsed from ATen/ops/adaptive_max_pool3d.h // #pragma once @@ -33051,23 +18666,24 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include +// #include -// aten::_unique2(Tensor self, bool sorted=True, bool return_inverse=False, bool return_counts=False) -> (Tensor, Tensor, Tensor) -@Namespace("at") public static native @ByVal TensorTensorTensorTuple _unique2(@Const @ByRef Tensor self, @Cast("bool") boolean sorted/*=true*/, @Cast("bool") boolean return_inverse/*=false*/, @Cast("bool") boolean return_counts/*=false*/); -@Namespace("at") public static native @ByVal TensorTensorTensorTuple _unique2(@Const @ByRef Tensor self); +// aten::adaptive_max_pool3d.out(Tensor self, int[3] output_size, *, Tensor(a!) out, Tensor(b!) indices) -> (Tensor(a!), Tensor(b!)) +@Namespace("at") public static native @ByVal T_TensorTensor_T adaptive_max_pool3d_out(@ByRef Tensor out, @ByRef Tensor indices, @Const @ByRef Tensor self, @ByVal LongArrayRef output_size); +@Namespace("at") public static native @ByVal T_TensorTensor_T adaptive_max_pool3d_out(@ByRef Tensor out, @ByRef Tensor indices, @Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... output_size); +// aten::adaptive_max_pool3d.out(Tensor self, int[3] output_size, *, Tensor(a!) out, Tensor(b!) indices) -> (Tensor(a!), Tensor(b!)) +@Namespace("at") public static native @ByVal T_TensorTensor_T adaptive_max_pool3d_outf(@Const @ByRef Tensor self, @ByVal LongArrayRef output_size, @ByRef Tensor out, @ByRef Tensor indices); +@Namespace("at") public static native @ByVal T_TensorTensor_T adaptive_max_pool3d_outf(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] output_size, @ByRef Tensor out, @ByRef Tensor indices); -// aten::_unique2.out(Tensor self, bool sorted=True, bool return_inverse=False, bool return_counts=False, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2) -> (Tensor(a!), Tensor(b!), Tensor(c!)) -@Namespace("at") public static native @ByVal @Cast("std::tuple*") PointerPointer _unique2_out(@ByRef Tensor out0, @ByRef Tensor out1, @ByRef Tensor out2, @Const @ByRef Tensor self, @Cast("bool") boolean sorted/*=true*/, @Cast("bool") boolean return_inverse/*=false*/, @Cast("bool") boolean return_counts/*=false*/); -@Namespace("at") public static native @ByVal @Cast("std::tuple*") PointerPointer _unique2_out(@ByRef Tensor out0, @ByRef Tensor out1, @ByRef Tensor out2, @Const @ByRef Tensor self); -// aten::_unique2.out(Tensor self, bool sorted=True, bool return_inverse=False, bool return_counts=False, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2) -> (Tensor(a!), Tensor(b!), Tensor(c!)) -@Namespace("at") public static native @ByVal @Cast("std::tuple*") PointerPointer _unique2_outf(@Const @ByRef Tensor self, @Cast("bool") boolean sorted, @Cast("bool") boolean return_inverse, @Cast("bool") boolean return_counts, @ByRef Tensor out0, @ByRef Tensor out1, @ByRef Tensor out2); +// aten::adaptive_max_pool3d(Tensor self, int[3] output_size) -> (Tensor, Tensor) +@Namespace("at") public static native @ByVal T_TensorTensor_T adaptive_max_pool3d(@Const @ByRef Tensor self, @ByVal LongArrayRef output_size); +@Namespace("at") public static native @ByVal T_TensorTensor_T adaptive_max_pool3d(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... output_size); -// Parsed from ATen/ops/_unpack_dual.h +// Parsed from ATen/ops/adaptive_max_pool3d_backward.h // #pragma once @@ -33088,16 +18704,21 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include +// #include + +// aten::adaptive_max_pool3d_backward.grad_input(Tensor grad_output, Tensor self, Tensor indices, *, Tensor(a!) grad_input) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor adaptive_max_pool3d_backward_out(@ByRef Tensor grad_input, @Const @ByRef Tensor grad_output, @Const @ByRef Tensor self, @Const @ByRef Tensor indices); +// aten::adaptive_max_pool3d_backward.grad_input(Tensor grad_output, Tensor self, Tensor indices, *, Tensor(a!) grad_input) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor adaptive_max_pool3d_backward_outf(@Const @ByRef Tensor grad_output, @Const @ByRef Tensor self, @Const @ByRef Tensor indices, @ByRef Tensor grad_input); -// aten::_unpack_dual(Tensor(a) dual, int level) -> (Tensor(a) primal, Tensor tangent) -@Namespace("at") public static native @ByVal TensorTensorTuple _unpack_dual(@Const @ByRef Tensor dual, @Cast("int64_t") long level); +// aten::adaptive_max_pool3d_backward(Tensor grad_output, Tensor self, Tensor indices) -> Tensor +@Namespace("at") public static native @ByVal Tensor adaptive_max_pool3d_backward(@Const @ByRef Tensor grad_output, @Const @ByRef Tensor self, @Const @ByRef Tensor indices); -// Parsed from ATen/ops/_unsafe_view.h +// Parsed from ATen/ops/add.h // #pragma once @@ -33118,40 +18739,33 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include - - -// aten::_unsafe_view(Tensor self, SymInt[] size) -> Tensor -@Namespace("at") public static native @ByVal Tensor _unsafe_view(@Const @ByRef Tensor self, @ByVal @Cast("c10::ArrayRef*") LongArrayRef size); -@Namespace("at") public static native @ByVal Tensor _unsafe_view(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... size); - - -// aten::_unsafe_view(Tensor self, SymInt[] size) -> Tensor -@Namespace("at") public static native @ByVal Tensor _unsafe_view_symint(@Const @ByRef Tensor self, @ByVal SymIntRef size); - - -// aten::_unsafe_view.out(Tensor self, SymInt[] size, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor _unsafe_view_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal @Cast("c10::ArrayRef*") LongArrayRef size); -@Namespace("at") public static native @ByRef Tensor _unsafe_view_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... size); - - -// aten::_unsafe_view.out(Tensor self, SymInt[] size, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor _unsafe_view_outf(@Const @ByRef Tensor self, @ByVal @Cast("c10::ArrayRef*") LongArrayRef size, @ByRef Tensor out); -@Namespace("at") public static native @ByRef Tensor _unsafe_view_outf(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @ByRef Tensor out); +// #include -// aten::_unsafe_view.out(Tensor self, SymInt[] size, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor _unsafe_view_symint_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal SymIntRef size); +// aten::add.Tensor(Tensor self, Tensor other, *, Scalar alpha=1) -> Tensor +@Namespace("at") public static native @ByVal Tensor add(@Const @ByRef Tensor self, @Const @ByRef Tensor other, @Const @ByRef(nullValue = "at::Scalar(1)") Scalar alpha); +@Namespace("at") public static native @ByVal Tensor add(@Const @ByRef Tensor self, @Const @ByRef Tensor other); +// aten::add.out(Tensor self, Tensor other, *, Scalar alpha=1, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor add_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor other, @Const @ByRef(nullValue = "at::Scalar(1)") Scalar alpha); +@Namespace("at") public static native @ByRef Tensor add_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor other); +// aten::add.out(Tensor self, Tensor other, *, Scalar alpha=1, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor add_outf(@Const @ByRef Tensor self, @Const @ByRef Tensor other, @Const @ByRef Scalar alpha, @ByRef Tensor out); -// aten::_unsafe_view.out(Tensor self, SymInt[] size, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor _unsafe_view_symint_outf(@Const @ByRef Tensor self, @ByVal SymIntRef size, @ByRef Tensor out); +// aten::add.Scalar(Tensor self, Scalar other, Scalar alpha=1) -> Tensor +@Namespace("at") public static native @ByVal Tensor add(@Const @ByRef Tensor self, @Const @ByRef Scalar other, @Const @ByRef(nullValue = "at::Scalar(1)") Scalar alpha); +@Namespace("at") public static native @ByVal Tensor add(@Const @ByRef Tensor self, @Const @ByRef Scalar other); +// aten::add.Scalar_out(Tensor self, Scalar other, Scalar alpha=1, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor add_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Scalar other, @Const @ByRef(nullValue = "at::Scalar(1)") Scalar alpha); +@Namespace("at") public static native @ByRef Tensor add_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Scalar other); +// aten::add.Scalar_out(Tensor self, Scalar other, Scalar alpha=1, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor add_outf(@Const @ByRef Tensor self, @Const @ByRef Scalar other, @Const @ByRef Scalar alpha, @ByRef Tensor out); -// Parsed from ATen/ops/_upsample_bicubic2d_aa.h +// Parsed from ATen/ops/addbmm.h // #pragma once @@ -33172,55 +18786,23 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include - - -// aten::_upsample_bicubic2d_aa.vec(Tensor input, SymInt[]? output_size, bool align_corners, float[]? scale_factors) -> Tensor -@Namespace("at") public static native @ByVal Tensor _upsample_bicubic2d_aa(@Const @ByRef Tensor input, @ByVal LongArrayRefOptional output_size, @Cast("bool") boolean align_corners, @ByVal DoubleArrayRefOptional scale_factors); -@Namespace("at") public static native @ByVal Tensor _upsample_bicubic2d_aa(@Const @ByRef Tensor input, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] output_size, @Cast("bool") boolean align_corners, @ByVal DoubleArrayRefOptional scale_factors); - - -// aten::_upsample_bicubic2d_aa.vec(Tensor input, SymInt[]? output_size, bool align_corners, float[]? scale_factors) -> Tensor -@Namespace("at") public static native @ByVal Tensor _upsample_bicubic2d_aa_symint(@Const @ByRef Tensor input, @ByVal SymIntArrayRefOptional output_size, @Cast("bool") boolean align_corners, @ByVal DoubleArrayRefOptional scale_factors); - - -// aten::_upsample_bicubic2d_aa.out(Tensor self, SymInt[2] output_size, bool align_corners, float? scales_h=None, float? scales_w=None, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor _upsample_bicubic2d_aa_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal @Cast("c10::ArrayRef*") LongArrayRef output_size, @Cast("bool") boolean align_corners, @ByVal(nullValue = "c10::optional(c10::nullopt)") DoubleOptional scales_h, @ByVal(nullValue = "c10::optional(c10::nullopt)") DoubleOptional scales_w); -@Namespace("at") public static native @ByRef Tensor _upsample_bicubic2d_aa_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal @Cast("c10::ArrayRef*") LongArrayRef output_size, @Cast("bool") boolean align_corners); -@Namespace("at") public static native @ByRef Tensor _upsample_bicubic2d_aa_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] output_size, @Cast("bool") boolean align_corners, @ByVal(nullValue = "c10::optional(c10::nullopt)") DoubleOptional scales_h, @ByVal(nullValue = "c10::optional(c10::nullopt)") DoubleOptional scales_w); -@Namespace("at") public static native @ByRef Tensor _upsample_bicubic2d_aa_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] output_size, @Cast("bool") boolean align_corners); - - -// aten::_upsample_bicubic2d_aa.out(Tensor self, SymInt[2] output_size, bool align_corners, float? scales_h=None, float? scales_w=None, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor _upsample_bicubic2d_aa_outf(@Const @ByRef Tensor self, @ByVal @Cast("c10::ArrayRef*") LongArrayRef output_size, @Cast("bool") boolean align_corners, @ByVal DoubleOptional scales_h, @ByVal DoubleOptional scales_w, @ByRef Tensor out); -@Namespace("at") public static native @ByRef Tensor _upsample_bicubic2d_aa_outf(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] output_size, @Cast("bool") boolean align_corners, @ByVal DoubleOptional scales_h, @ByVal DoubleOptional scales_w, @ByRef Tensor out); - - -// aten::_upsample_bicubic2d_aa.out(Tensor self, SymInt[2] output_size, bool align_corners, float? scales_h=None, float? scales_w=None, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor _upsample_bicubic2d_aa_symint_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal SymIntRef output_size, @Cast("bool") boolean align_corners, @ByVal(nullValue = "c10::optional(c10::nullopt)") DoubleOptional scales_h, @ByVal(nullValue = "c10::optional(c10::nullopt)") DoubleOptional scales_w); -@Namespace("at") public static native @ByRef Tensor _upsample_bicubic2d_aa_symint_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal SymIntRef output_size, @Cast("bool") boolean align_corners); - - -// aten::_upsample_bicubic2d_aa.out(Tensor self, SymInt[2] output_size, bool align_corners, float? scales_h=None, float? scales_w=None, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor _upsample_bicubic2d_aa_symint_outf(@Const @ByRef Tensor self, @ByVal SymIntRef output_size, @Cast("bool") boolean align_corners, @ByVal DoubleOptional scales_h, @ByVal DoubleOptional scales_w, @ByRef Tensor out); - - -// aten::_upsample_bicubic2d_aa(Tensor self, SymInt[2] output_size, bool align_corners, float? scales_h=None, float? scales_w=None) -> Tensor -@Namespace("at") public static native @ByVal Tensor _upsample_bicubic2d_aa(@Const @ByRef Tensor self, @ByVal @Cast("c10::ArrayRef*") LongArrayRef output_size, @Cast("bool") boolean align_corners, @ByVal(nullValue = "c10::optional(c10::nullopt)") DoubleOptional scales_h, @ByVal(nullValue = "c10::optional(c10::nullopt)") DoubleOptional scales_w); -@Namespace("at") public static native @ByVal Tensor _upsample_bicubic2d_aa(@Const @ByRef Tensor self, @ByVal @Cast("c10::ArrayRef*") LongArrayRef output_size, @Cast("bool") boolean align_corners); -@Namespace("at") public static native @ByVal Tensor _upsample_bicubic2d_aa(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] output_size, @Cast("bool") boolean align_corners, @ByVal(nullValue = "c10::optional(c10::nullopt)") DoubleOptional scales_h, @ByVal(nullValue = "c10::optional(c10::nullopt)") DoubleOptional scales_w); -@Namespace("at") public static native @ByVal Tensor _upsample_bicubic2d_aa(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] output_size, @Cast("bool") boolean align_corners); +// #include -// aten::_upsample_bicubic2d_aa(Tensor self, SymInt[2] output_size, bool align_corners, float? scales_h=None, float? scales_w=None) -> Tensor -@Namespace("at") public static native @ByVal Tensor _upsample_bicubic2d_aa_symint(@Const @ByRef Tensor self, @ByVal SymIntRef output_size, @Cast("bool") boolean align_corners, @ByVal(nullValue = "c10::optional(c10::nullopt)") DoubleOptional scales_h, @ByVal(nullValue = "c10::optional(c10::nullopt)") DoubleOptional scales_w); -@Namespace("at") public static native @ByVal Tensor _upsample_bicubic2d_aa_symint(@Const @ByRef Tensor self, @ByVal SymIntRef output_size, @Cast("bool") boolean align_corners); +// aten::addbmm.out(Tensor self, Tensor batch1, Tensor batch2, *, Scalar beta=1, Scalar alpha=1, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor addbmm_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor batch1, @Const @ByRef Tensor batch2, @Const @ByRef(nullValue = "at::Scalar(1)") Scalar beta, @Const @ByRef(nullValue = "at::Scalar(1)") Scalar alpha); +@Namespace("at") public static native @ByRef Tensor addbmm_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor batch1, @Const @ByRef Tensor batch2); +// aten::addbmm.out(Tensor self, Tensor batch1, Tensor batch2, *, Scalar beta=1, Scalar alpha=1, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor addbmm_outf(@Const @ByRef Tensor self, @Const @ByRef Tensor batch1, @Const @ByRef Tensor batch2, @Const @ByRef Scalar beta, @Const @ByRef Scalar alpha, @ByRef Tensor out); +// aten::addbmm(Tensor self, Tensor batch1, Tensor batch2, *, Scalar beta=1, Scalar alpha=1) -> Tensor +@Namespace("at") public static native @ByVal Tensor addbmm(@Const @ByRef Tensor self, @Const @ByRef Tensor batch1, @Const @ByRef Tensor batch2, @Const @ByRef(nullValue = "at::Scalar(1)") Scalar beta, @Const @ByRef(nullValue = "at::Scalar(1)") Scalar alpha); +@Namespace("at") public static native @ByVal Tensor addbmm(@Const @ByRef Tensor self, @Const @ByRef Tensor batch1, @Const @ByRef Tensor batch2); -// Parsed from ATen/ops/_upsample_bicubic2d_aa_backward.h +// Parsed from ATen/ops/addcdiv.h // #pragma once @@ -33241,46 +18823,23 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include - - -// aten::_upsample_bicubic2d_aa_backward.grad_input(Tensor grad_output, SymInt[2] output_size, SymInt[4] input_size, bool align_corners, float? scales_h=None, float? scales_w=None, *, Tensor(a!) grad_input) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor _upsample_bicubic2d_aa_backward_out(@ByRef Tensor grad_input, @Const @ByRef Tensor grad_output, @ByVal @Cast("c10::ArrayRef*") LongArrayRef output_size, @ByVal @Cast("c10::ArrayRef*") LongArrayRef input_size, @Cast("bool") boolean align_corners, @ByVal(nullValue = "c10::optional(c10::nullopt)") DoubleOptional scales_h, @ByVal(nullValue = "c10::optional(c10::nullopt)") DoubleOptional scales_w); -@Namespace("at") public static native @ByRef Tensor _upsample_bicubic2d_aa_backward_out(@ByRef Tensor grad_input, @Const @ByRef Tensor grad_output, @ByVal @Cast("c10::ArrayRef*") LongArrayRef output_size, @ByVal @Cast("c10::ArrayRef*") LongArrayRef input_size, @Cast("bool") boolean align_corners); -@Namespace("at") public static native @ByRef Tensor _upsample_bicubic2d_aa_backward_out(@ByRef Tensor grad_input, @Const @ByRef Tensor grad_output, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] output_size, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] input_size, @Cast("bool") boolean align_corners, @ByVal(nullValue = "c10::optional(c10::nullopt)") DoubleOptional scales_h, @ByVal(nullValue = "c10::optional(c10::nullopt)") DoubleOptional scales_w); -@Namespace("at") public static native @ByRef Tensor _upsample_bicubic2d_aa_backward_out(@ByRef Tensor grad_input, @Const @ByRef Tensor grad_output, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] output_size, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] input_size, @Cast("bool") boolean align_corners); - - -// aten::_upsample_bicubic2d_aa_backward.grad_input(Tensor grad_output, SymInt[2] output_size, SymInt[4] input_size, bool align_corners, float? scales_h=None, float? scales_w=None, *, Tensor(a!) grad_input) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor _upsample_bicubic2d_aa_backward_outf(@Const @ByRef Tensor grad_output, @ByVal @Cast("c10::ArrayRef*") LongArrayRef output_size, @ByVal @Cast("c10::ArrayRef*") LongArrayRef input_size, @Cast("bool") boolean align_corners, @ByVal DoubleOptional scales_h, @ByVal DoubleOptional scales_w, @ByRef Tensor grad_input); -@Namespace("at") public static native @ByRef Tensor _upsample_bicubic2d_aa_backward_outf(@Const @ByRef Tensor grad_output, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] output_size, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] input_size, @Cast("bool") boolean align_corners, @ByVal DoubleOptional scales_h, @ByVal DoubleOptional scales_w, @ByRef Tensor grad_input); - - -// aten::_upsample_bicubic2d_aa_backward.grad_input(Tensor grad_output, SymInt[2] output_size, SymInt[4] input_size, bool align_corners, float? scales_h=None, float? scales_w=None, *, Tensor(a!) grad_input) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor _upsample_bicubic2d_aa_backward_symint_out(@ByRef Tensor grad_input, @Const @ByRef Tensor grad_output, @ByVal SymIntRef output_size, @ByVal SymIntRef input_size, @Cast("bool") boolean align_corners, @ByVal(nullValue = "c10::optional(c10::nullopt)") DoubleOptional scales_h, @ByVal(nullValue = "c10::optional(c10::nullopt)") DoubleOptional scales_w); -@Namespace("at") public static native @ByRef Tensor _upsample_bicubic2d_aa_backward_symint_out(@ByRef Tensor grad_input, @Const @ByRef Tensor grad_output, @ByVal SymIntRef output_size, @ByVal SymIntRef input_size, @Cast("bool") boolean align_corners); - - -// aten::_upsample_bicubic2d_aa_backward.grad_input(Tensor grad_output, SymInt[2] output_size, SymInt[4] input_size, bool align_corners, float? scales_h=None, float? scales_w=None, *, Tensor(a!) grad_input) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor _upsample_bicubic2d_aa_backward_symint_outf(@Const @ByRef Tensor grad_output, @ByVal SymIntRef output_size, @ByVal SymIntRef input_size, @Cast("bool") boolean align_corners, @ByVal DoubleOptional scales_h, @ByVal DoubleOptional scales_w, @ByRef Tensor grad_input); - - -// aten::_upsample_bicubic2d_aa_backward(Tensor grad_output, SymInt[2] output_size, SymInt[4] input_size, bool align_corners, float? scales_h=None, float? scales_w=None) -> Tensor -@Namespace("at") public static native @ByVal Tensor _upsample_bicubic2d_aa_backward(@Const @ByRef Tensor grad_output, @ByVal @Cast("c10::ArrayRef*") LongArrayRef output_size, @ByVal @Cast("c10::ArrayRef*") LongArrayRef input_size, @Cast("bool") boolean align_corners, @ByVal(nullValue = "c10::optional(c10::nullopt)") DoubleOptional scales_h, @ByVal(nullValue = "c10::optional(c10::nullopt)") DoubleOptional scales_w); -@Namespace("at") public static native @ByVal Tensor _upsample_bicubic2d_aa_backward(@Const @ByRef Tensor grad_output, @ByVal @Cast("c10::ArrayRef*") LongArrayRef output_size, @ByVal @Cast("c10::ArrayRef*") LongArrayRef input_size, @Cast("bool") boolean align_corners); -@Namespace("at") public static native @ByVal Tensor _upsample_bicubic2d_aa_backward(@Const @ByRef Tensor grad_output, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] output_size, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] input_size, @Cast("bool") boolean align_corners, @ByVal(nullValue = "c10::optional(c10::nullopt)") DoubleOptional scales_h, @ByVal(nullValue = "c10::optional(c10::nullopt)") DoubleOptional scales_w); -@Namespace("at") public static native @ByVal Tensor _upsample_bicubic2d_aa_backward(@Const @ByRef Tensor grad_output, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] output_size, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] input_size, @Cast("bool") boolean align_corners); +// #include -// aten::_upsample_bicubic2d_aa_backward(Tensor grad_output, SymInt[2] output_size, SymInt[4] input_size, bool align_corners, float? scales_h=None, float? scales_w=None) -> Tensor -@Namespace("at") public static native @ByVal Tensor _upsample_bicubic2d_aa_backward_symint(@Const @ByRef Tensor grad_output, @ByVal SymIntRef output_size, @ByVal SymIntRef input_size, @Cast("bool") boolean align_corners, @ByVal(nullValue = "c10::optional(c10::nullopt)") DoubleOptional scales_h, @ByVal(nullValue = "c10::optional(c10::nullopt)") DoubleOptional scales_w); -@Namespace("at") public static native @ByVal Tensor _upsample_bicubic2d_aa_backward_symint(@Const @ByRef Tensor grad_output, @ByVal SymIntRef output_size, @ByVal SymIntRef input_size, @Cast("bool") boolean align_corners); +// aten::addcdiv.out(Tensor self, Tensor tensor1, Tensor tensor2, *, Scalar value=1, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor addcdiv_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor tensor1, @Const @ByRef Tensor tensor2, @Const @ByRef(nullValue = "at::Scalar(1)") Scalar value); +@Namespace("at") public static native @ByRef Tensor addcdiv_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor tensor1, @Const @ByRef Tensor tensor2); +// aten::addcdiv.out(Tensor self, Tensor tensor1, Tensor tensor2, *, Scalar value=1, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor addcdiv_outf(@Const @ByRef Tensor self, @Const @ByRef Tensor tensor1, @Const @ByRef Tensor tensor2, @Const @ByRef Scalar value, @ByRef Tensor out); +// aten::addcdiv(Tensor self, Tensor tensor1, Tensor tensor2, *, Scalar value=1) -> Tensor +@Namespace("at") public static native @ByVal Tensor addcdiv(@Const @ByRef Tensor self, @Const @ByRef Tensor tensor1, @Const @ByRef Tensor tensor2, @Const @ByRef(nullValue = "at::Scalar(1)") Scalar value); +@Namespace("at") public static native @ByVal Tensor addcdiv(@Const @ByRef Tensor self, @Const @ByRef Tensor tensor1, @Const @ByRef Tensor tensor2); -// Parsed from ATen/ops/_upsample_bilinear2d_aa.h +// Parsed from ATen/ops/addcmul.h // #pragma once @@ -33301,55 +18860,23 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include - - -// aten::_upsample_bilinear2d_aa.vec(Tensor input, SymInt[]? output_size, bool align_corners, float[]? scale_factors) -> Tensor -@Namespace("at") public static native @ByVal Tensor _upsample_bilinear2d_aa(@Const @ByRef Tensor input, @ByVal LongArrayRefOptional output_size, @Cast("bool") boolean align_corners, @ByVal DoubleArrayRefOptional scale_factors); -@Namespace("at") public static native @ByVal Tensor _upsample_bilinear2d_aa(@Const @ByRef Tensor input, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] output_size, @Cast("bool") boolean align_corners, @ByVal DoubleArrayRefOptional scale_factors); - - -// aten::_upsample_bilinear2d_aa.vec(Tensor input, SymInt[]? output_size, bool align_corners, float[]? scale_factors) -> Tensor -@Namespace("at") public static native @ByVal Tensor _upsample_bilinear2d_aa_symint(@Const @ByRef Tensor input, @ByVal SymIntArrayRefOptional output_size, @Cast("bool") boolean align_corners, @ByVal DoubleArrayRefOptional scale_factors); - - -// aten::_upsample_bilinear2d_aa.out(Tensor self, SymInt[2] output_size, bool align_corners, float? scales_h=None, float? scales_w=None, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor _upsample_bilinear2d_aa_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal @Cast("c10::ArrayRef*") LongArrayRef output_size, @Cast("bool") boolean align_corners, @ByVal(nullValue = "c10::optional(c10::nullopt)") DoubleOptional scales_h, @ByVal(nullValue = "c10::optional(c10::nullopt)") DoubleOptional scales_w); -@Namespace("at") public static native @ByRef Tensor _upsample_bilinear2d_aa_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal @Cast("c10::ArrayRef*") LongArrayRef output_size, @Cast("bool") boolean align_corners); -@Namespace("at") public static native @ByRef Tensor _upsample_bilinear2d_aa_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] output_size, @Cast("bool") boolean align_corners, @ByVal(nullValue = "c10::optional(c10::nullopt)") DoubleOptional scales_h, @ByVal(nullValue = "c10::optional(c10::nullopt)") DoubleOptional scales_w); -@Namespace("at") public static native @ByRef Tensor _upsample_bilinear2d_aa_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] output_size, @Cast("bool") boolean align_corners); - - -// aten::_upsample_bilinear2d_aa.out(Tensor self, SymInt[2] output_size, bool align_corners, float? scales_h=None, float? scales_w=None, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor _upsample_bilinear2d_aa_outf(@Const @ByRef Tensor self, @ByVal @Cast("c10::ArrayRef*") LongArrayRef output_size, @Cast("bool") boolean align_corners, @ByVal DoubleOptional scales_h, @ByVal DoubleOptional scales_w, @ByRef Tensor out); -@Namespace("at") public static native @ByRef Tensor _upsample_bilinear2d_aa_outf(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] output_size, @Cast("bool") boolean align_corners, @ByVal DoubleOptional scales_h, @ByVal DoubleOptional scales_w, @ByRef Tensor out); - - -// aten::_upsample_bilinear2d_aa.out(Tensor self, SymInt[2] output_size, bool align_corners, float? scales_h=None, float? scales_w=None, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor _upsample_bilinear2d_aa_symint_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal SymIntRef output_size, @Cast("bool") boolean align_corners, @ByVal(nullValue = "c10::optional(c10::nullopt)") DoubleOptional scales_h, @ByVal(nullValue = "c10::optional(c10::nullopt)") DoubleOptional scales_w); -@Namespace("at") public static native @ByRef Tensor _upsample_bilinear2d_aa_symint_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal SymIntRef output_size, @Cast("bool") boolean align_corners); - - -// aten::_upsample_bilinear2d_aa.out(Tensor self, SymInt[2] output_size, bool align_corners, float? scales_h=None, float? scales_w=None, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor _upsample_bilinear2d_aa_symint_outf(@Const @ByRef Tensor self, @ByVal SymIntRef output_size, @Cast("bool") boolean align_corners, @ByVal DoubleOptional scales_h, @ByVal DoubleOptional scales_w, @ByRef Tensor out); - - -// aten::_upsample_bilinear2d_aa(Tensor self, SymInt[2] output_size, bool align_corners, float? scales_h=None, float? scales_w=None) -> Tensor -@Namespace("at") public static native @ByVal Tensor _upsample_bilinear2d_aa(@Const @ByRef Tensor self, @ByVal @Cast("c10::ArrayRef*") LongArrayRef output_size, @Cast("bool") boolean align_corners, @ByVal(nullValue = "c10::optional(c10::nullopt)") DoubleOptional scales_h, @ByVal(nullValue = "c10::optional(c10::nullopt)") DoubleOptional scales_w); -@Namespace("at") public static native @ByVal Tensor _upsample_bilinear2d_aa(@Const @ByRef Tensor self, @ByVal @Cast("c10::ArrayRef*") LongArrayRef output_size, @Cast("bool") boolean align_corners); -@Namespace("at") public static native @ByVal Tensor _upsample_bilinear2d_aa(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] output_size, @Cast("bool") boolean align_corners, @ByVal(nullValue = "c10::optional(c10::nullopt)") DoubleOptional scales_h, @ByVal(nullValue = "c10::optional(c10::nullopt)") DoubleOptional scales_w); -@Namespace("at") public static native @ByVal Tensor _upsample_bilinear2d_aa(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] output_size, @Cast("bool") boolean align_corners); +// #include -// aten::_upsample_bilinear2d_aa(Tensor self, SymInt[2] output_size, bool align_corners, float? scales_h=None, float? scales_w=None) -> Tensor -@Namespace("at") public static native @ByVal Tensor _upsample_bilinear2d_aa_symint(@Const @ByRef Tensor self, @ByVal SymIntRef output_size, @Cast("bool") boolean align_corners, @ByVal(nullValue = "c10::optional(c10::nullopt)") DoubleOptional scales_h, @ByVal(nullValue = "c10::optional(c10::nullopt)") DoubleOptional scales_w); -@Namespace("at") public static native @ByVal Tensor _upsample_bilinear2d_aa_symint(@Const @ByRef Tensor self, @ByVal SymIntRef output_size, @Cast("bool") boolean align_corners); +// aten::addcmul.out(Tensor self, Tensor tensor1, Tensor tensor2, *, Scalar value=1, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor addcmul_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor tensor1, @Const @ByRef Tensor tensor2, @Const @ByRef(nullValue = "at::Scalar(1)") Scalar value); +@Namespace("at") public static native @ByRef Tensor addcmul_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor tensor1, @Const @ByRef Tensor tensor2); +// aten::addcmul.out(Tensor self, Tensor tensor1, Tensor tensor2, *, Scalar value=1, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor addcmul_outf(@Const @ByRef Tensor self, @Const @ByRef Tensor tensor1, @Const @ByRef Tensor tensor2, @Const @ByRef Scalar value, @ByRef Tensor out); +// aten::addcmul(Tensor self, Tensor tensor1, Tensor tensor2, *, Scalar value=1) -> Tensor +@Namespace("at") public static native @ByVal Tensor addcmul(@Const @ByRef Tensor self, @Const @ByRef Tensor tensor1, @Const @ByRef Tensor tensor2, @Const @ByRef(nullValue = "at::Scalar(1)") Scalar value); +@Namespace("at") public static native @ByVal Tensor addcmul(@Const @ByRef Tensor self, @Const @ByRef Tensor tensor1, @Const @ByRef Tensor tensor2); -// Parsed from ATen/ops/_upsample_bilinear2d_aa_backward.h +// Parsed from ATen/ops/addmm.h // #pragma once @@ -33370,46 +18897,23 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include - - -// aten::_upsample_bilinear2d_aa_backward.grad_input(Tensor grad_output, SymInt[2] output_size, SymInt[4] input_size, bool align_corners, float? scales_h=None, float? scales_w=None, *, Tensor(a!) grad_input) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor _upsample_bilinear2d_aa_backward_out(@ByRef Tensor grad_input, @Const @ByRef Tensor grad_output, @ByVal @Cast("c10::ArrayRef*") LongArrayRef output_size, @ByVal @Cast("c10::ArrayRef*") LongArrayRef input_size, @Cast("bool") boolean align_corners, @ByVal(nullValue = "c10::optional(c10::nullopt)") DoubleOptional scales_h, @ByVal(nullValue = "c10::optional(c10::nullopt)") DoubleOptional scales_w); -@Namespace("at") public static native @ByRef Tensor _upsample_bilinear2d_aa_backward_out(@ByRef Tensor grad_input, @Const @ByRef Tensor grad_output, @ByVal @Cast("c10::ArrayRef*") LongArrayRef output_size, @ByVal @Cast("c10::ArrayRef*") LongArrayRef input_size, @Cast("bool") boolean align_corners); -@Namespace("at") public static native @ByRef Tensor _upsample_bilinear2d_aa_backward_out(@ByRef Tensor grad_input, @Const @ByRef Tensor grad_output, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] output_size, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] input_size, @Cast("bool") boolean align_corners, @ByVal(nullValue = "c10::optional(c10::nullopt)") DoubleOptional scales_h, @ByVal(nullValue = "c10::optional(c10::nullopt)") DoubleOptional scales_w); -@Namespace("at") public static native @ByRef Tensor _upsample_bilinear2d_aa_backward_out(@ByRef Tensor grad_input, @Const @ByRef Tensor grad_output, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] output_size, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] input_size, @Cast("bool") boolean align_corners); - - -// aten::_upsample_bilinear2d_aa_backward.grad_input(Tensor grad_output, SymInt[2] output_size, SymInt[4] input_size, bool align_corners, float? scales_h=None, float? scales_w=None, *, Tensor(a!) grad_input) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor _upsample_bilinear2d_aa_backward_outf(@Const @ByRef Tensor grad_output, @ByVal @Cast("c10::ArrayRef*") LongArrayRef output_size, @ByVal @Cast("c10::ArrayRef*") LongArrayRef input_size, @Cast("bool") boolean align_corners, @ByVal DoubleOptional scales_h, @ByVal DoubleOptional scales_w, @ByRef Tensor grad_input); -@Namespace("at") public static native @ByRef Tensor _upsample_bilinear2d_aa_backward_outf(@Const @ByRef Tensor grad_output, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] output_size, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] input_size, @Cast("bool") boolean align_corners, @ByVal DoubleOptional scales_h, @ByVal DoubleOptional scales_w, @ByRef Tensor grad_input); - - -// aten::_upsample_bilinear2d_aa_backward.grad_input(Tensor grad_output, SymInt[2] output_size, SymInt[4] input_size, bool align_corners, float? scales_h=None, float? scales_w=None, *, Tensor(a!) grad_input) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor _upsample_bilinear2d_aa_backward_symint_out(@ByRef Tensor grad_input, @Const @ByRef Tensor grad_output, @ByVal SymIntRef output_size, @ByVal SymIntRef input_size, @Cast("bool") boolean align_corners, @ByVal(nullValue = "c10::optional(c10::nullopt)") DoubleOptional scales_h, @ByVal(nullValue = "c10::optional(c10::nullopt)") DoubleOptional scales_w); -@Namespace("at") public static native @ByRef Tensor _upsample_bilinear2d_aa_backward_symint_out(@ByRef Tensor grad_input, @Const @ByRef Tensor grad_output, @ByVal SymIntRef output_size, @ByVal SymIntRef input_size, @Cast("bool") boolean align_corners); - - -// aten::_upsample_bilinear2d_aa_backward.grad_input(Tensor grad_output, SymInt[2] output_size, SymInt[4] input_size, bool align_corners, float? scales_h=None, float? scales_w=None, *, Tensor(a!) grad_input) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor _upsample_bilinear2d_aa_backward_symint_outf(@Const @ByRef Tensor grad_output, @ByVal SymIntRef output_size, @ByVal SymIntRef input_size, @Cast("bool") boolean align_corners, @ByVal DoubleOptional scales_h, @ByVal DoubleOptional scales_w, @ByRef Tensor grad_input); - - -// aten::_upsample_bilinear2d_aa_backward(Tensor grad_output, SymInt[2] output_size, SymInt[4] input_size, bool align_corners, float? scales_h=None, float? scales_w=None) -> Tensor -@Namespace("at") public static native @ByVal Tensor _upsample_bilinear2d_aa_backward(@Const @ByRef Tensor grad_output, @ByVal @Cast("c10::ArrayRef*") LongArrayRef output_size, @ByVal @Cast("c10::ArrayRef*") LongArrayRef input_size, @Cast("bool") boolean align_corners, @ByVal(nullValue = "c10::optional(c10::nullopt)") DoubleOptional scales_h, @ByVal(nullValue = "c10::optional(c10::nullopt)") DoubleOptional scales_w); -@Namespace("at") public static native @ByVal Tensor _upsample_bilinear2d_aa_backward(@Const @ByRef Tensor grad_output, @ByVal @Cast("c10::ArrayRef*") LongArrayRef output_size, @ByVal @Cast("c10::ArrayRef*") LongArrayRef input_size, @Cast("bool") boolean align_corners); -@Namespace("at") public static native @ByVal Tensor _upsample_bilinear2d_aa_backward(@Const @ByRef Tensor grad_output, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] output_size, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] input_size, @Cast("bool") boolean align_corners, @ByVal(nullValue = "c10::optional(c10::nullopt)") DoubleOptional scales_h, @ByVal(nullValue = "c10::optional(c10::nullopt)") DoubleOptional scales_w); -@Namespace("at") public static native @ByVal Tensor _upsample_bilinear2d_aa_backward(@Const @ByRef Tensor grad_output, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] output_size, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] input_size, @Cast("bool") boolean align_corners); +// #include -// aten::_upsample_bilinear2d_aa_backward(Tensor grad_output, SymInt[2] output_size, SymInt[4] input_size, bool align_corners, float? scales_h=None, float? scales_w=None) -> Tensor -@Namespace("at") public static native @ByVal Tensor _upsample_bilinear2d_aa_backward_symint(@Const @ByRef Tensor grad_output, @ByVal SymIntRef output_size, @ByVal SymIntRef input_size, @Cast("bool") boolean align_corners, @ByVal(nullValue = "c10::optional(c10::nullopt)") DoubleOptional scales_h, @ByVal(nullValue = "c10::optional(c10::nullopt)") DoubleOptional scales_w); -@Namespace("at") public static native @ByVal Tensor _upsample_bilinear2d_aa_backward_symint(@Const @ByRef Tensor grad_output, @ByVal SymIntRef output_size, @ByVal SymIntRef input_size, @Cast("bool") boolean align_corners); +// aten::addmm.out(Tensor self, Tensor mat1, Tensor mat2, *, Scalar beta=1, Scalar alpha=1, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor addmm_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor mat1, @Const @ByRef Tensor mat2, @Const @ByRef(nullValue = "at::Scalar(1)") Scalar beta, @Const @ByRef(nullValue = "at::Scalar(1)") Scalar alpha); +@Namespace("at") public static native @ByRef Tensor addmm_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor mat1, @Const @ByRef Tensor mat2); +// aten::addmm.out(Tensor self, Tensor mat1, Tensor mat2, *, Scalar beta=1, Scalar alpha=1, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor addmm_outf(@Const @ByRef Tensor self, @Const @ByRef Tensor mat1, @Const @ByRef Tensor mat2, @Const @ByRef Scalar beta, @Const @ByRef Scalar alpha, @ByRef Tensor out); +// aten::addmm(Tensor self, Tensor mat1, Tensor mat2, *, Scalar beta=1, Scalar alpha=1) -> Tensor +@Namespace("at") public static native @ByVal Tensor addmm(@Const @ByRef Tensor self, @Const @ByRef Tensor mat1, @Const @ByRef Tensor mat2, @Const @ByRef(nullValue = "at::Scalar(1)") Scalar beta, @Const @ByRef(nullValue = "at::Scalar(1)") Scalar alpha); +@Namespace("at") public static native @ByVal Tensor addmm(@Const @ByRef Tensor self, @Const @ByRef Tensor mat1, @Const @ByRef Tensor mat2); -// Parsed from ATen/ops/_upsample_nearest_exact1d.h +// Parsed from ATen/ops/addmv.h // #pragma once @@ -33430,55 +18934,27 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include - - -// aten::_upsample_nearest_exact1d.vec(Tensor input, SymInt[]? output_size, float[]? scale_factors) -> Tensor -@Namespace("at") public static native @ByVal Tensor _upsample_nearest_exact1d(@Const @ByRef Tensor input, @ByVal LongArrayRefOptional output_size, @ByVal DoubleArrayRefOptional scale_factors); -@Namespace("at") public static native @ByVal Tensor _upsample_nearest_exact1d(@Const @ByRef Tensor input, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] output_size, @ByVal DoubleArrayRefOptional scale_factors); - - -// aten::_upsample_nearest_exact1d.vec(Tensor input, SymInt[]? output_size, float[]? scale_factors) -> Tensor -@Namespace("at") public static native @ByVal Tensor _upsample_nearest_exact1d_symint(@Const @ByRef Tensor input, @ByVal SymIntArrayRefOptional output_size, @ByVal DoubleArrayRefOptional scale_factors); - - -// aten::_upsample_nearest_exact1d.out(Tensor self, SymInt[1] output_size, float? scales=None, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor _upsample_nearest_exact1d_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal @Cast("c10::ArrayRef*") LongArrayRef output_size, @ByVal(nullValue = "c10::optional(c10::nullopt)") DoubleOptional scales); -@Namespace("at") public static native @ByRef Tensor _upsample_nearest_exact1d_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal @Cast("c10::ArrayRef*") LongArrayRef output_size); -@Namespace("at") public static native @ByRef Tensor _upsample_nearest_exact1d_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] output_size, @ByVal(nullValue = "c10::optional(c10::nullopt)") DoubleOptional scales); -@Namespace("at") public static native @ByRef Tensor _upsample_nearest_exact1d_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... output_size); - - -// aten::_upsample_nearest_exact1d.out(Tensor self, SymInt[1] output_size, float? scales=None, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor _upsample_nearest_exact1d_outf(@Const @ByRef Tensor self, @ByVal @Cast("c10::ArrayRef*") LongArrayRef output_size, @ByVal DoubleOptional scales, @ByRef Tensor out); -@Namespace("at") public static native @ByRef Tensor _upsample_nearest_exact1d_outf(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] output_size, @ByVal DoubleOptional scales, @ByRef Tensor out); - - -// aten::_upsample_nearest_exact1d.out(Tensor self, SymInt[1] output_size, float? scales=None, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor _upsample_nearest_exact1d_symint_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal SymIntRef output_size, @ByVal(nullValue = "c10::optional(c10::nullopt)") DoubleOptional scales); -@Namespace("at") public static native @ByRef Tensor _upsample_nearest_exact1d_symint_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal SymIntRef output_size); - - -// aten::_upsample_nearest_exact1d.out(Tensor self, SymInt[1] output_size, float? scales=None, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor _upsample_nearest_exact1d_symint_outf(@Const @ByRef Tensor self, @ByVal SymIntRef output_size, @ByVal DoubleOptional scales, @ByRef Tensor out); - +// #include -// aten::_upsample_nearest_exact1d(Tensor self, SymInt[1] output_size, float? scales=None) -> Tensor -@Namespace("at") public static native @ByVal Tensor _upsample_nearest_exact1d(@Const @ByRef Tensor self, @ByVal @Cast("c10::ArrayRef*") LongArrayRef output_size, @ByVal(nullValue = "c10::optional(c10::nullopt)") DoubleOptional scales); -@Namespace("at") public static native @ByVal Tensor _upsample_nearest_exact1d(@Const @ByRef Tensor self, @ByVal @Cast("c10::ArrayRef*") LongArrayRef output_size); -@Namespace("at") public static native @ByVal Tensor _upsample_nearest_exact1d(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] output_size, @ByVal(nullValue = "c10::optional(c10::nullopt)") DoubleOptional scales); -@Namespace("at") public static native @ByVal Tensor _upsample_nearest_exact1d(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... output_size); +// aten::addmv(Tensor self, Tensor mat, Tensor vec, *, Scalar beta=1, Scalar alpha=1) -> Tensor +@Namespace("at") public static native @ByVal Tensor addmv(@Const @ByRef Tensor self, @Const @ByRef Tensor mat, @Const @ByRef Tensor vec, @Const @ByRef(nullValue = "at::Scalar(1)") Scalar beta, @Const @ByRef(nullValue = "at::Scalar(1)") Scalar alpha); +@Namespace("at") public static native @ByVal Tensor addmv(@Const @ByRef Tensor self, @Const @ByRef Tensor mat, @Const @ByRef Tensor vec); -// aten::_upsample_nearest_exact1d(Tensor self, SymInt[1] output_size, float? scales=None) -> Tensor -@Namespace("at") public static native @ByVal Tensor _upsample_nearest_exact1d_symint(@Const @ByRef Tensor self, @ByVal SymIntRef output_size, @ByVal(nullValue = "c10::optional(c10::nullopt)") DoubleOptional scales); -@Namespace("at") public static native @ByVal Tensor _upsample_nearest_exact1d_symint(@Const @ByRef Tensor self, @ByVal SymIntRef output_size); +// aten::addmv_(Tensor(a!) self, Tensor mat, Tensor vec, *, Scalar beta=1, Scalar alpha=1) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor addmv_(@ByRef Tensor self, @Const @ByRef Tensor mat, @Const @ByRef Tensor vec, @Const @ByRef(nullValue = "at::Scalar(1)") Scalar beta, @Const @ByRef(nullValue = "at::Scalar(1)") Scalar alpha); +@Namespace("at") public static native @ByRef Tensor addmv_(@ByRef Tensor self, @Const @ByRef Tensor mat, @Const @ByRef Tensor vec); +// aten::addmv.out(Tensor self, Tensor mat, Tensor vec, *, Scalar beta=1, Scalar alpha=1, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor addmv_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor mat, @Const @ByRef Tensor vec, @Const @ByRef(nullValue = "at::Scalar(1)") Scalar beta, @Const @ByRef(nullValue = "at::Scalar(1)") Scalar alpha); +@Namespace("at") public static native @ByRef Tensor addmv_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor mat, @Const @ByRef Tensor vec); +// aten::addmv.out(Tensor self, Tensor mat, Tensor vec, *, Scalar beta=1, Scalar alpha=1, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor addmv_outf(@Const @ByRef Tensor self, @Const @ByRef Tensor mat, @Const @ByRef Tensor vec, @Const @ByRef Scalar beta, @Const @ByRef Scalar alpha, @ByRef Tensor out); -// Parsed from ATen/ops/_upsample_nearest_exact1d_backward.h +// Parsed from ATen/ops/addr.h // #pragma once @@ -33499,46 +18975,23 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include - - -// aten::_upsample_nearest_exact1d_backward.grad_input(Tensor grad_output, SymInt[1] output_size, SymInt[3] input_size, float? scales=None, *, Tensor(a!) grad_input) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor _upsample_nearest_exact1d_backward_out(@ByRef Tensor grad_input, @Const @ByRef Tensor grad_output, @ByVal @Cast("c10::ArrayRef*") LongArrayRef output_size, @ByVal @Cast("c10::ArrayRef*") LongArrayRef input_size, @ByVal(nullValue = "c10::optional(c10::nullopt)") DoubleOptional scales); -@Namespace("at") public static native @ByRef Tensor _upsample_nearest_exact1d_backward_out(@ByRef Tensor grad_input, @Const @ByRef Tensor grad_output, @ByVal @Cast("c10::ArrayRef*") LongArrayRef output_size, @ByVal @Cast("c10::ArrayRef*") LongArrayRef input_size); -@Namespace("at") public static native @ByRef Tensor _upsample_nearest_exact1d_backward_out(@ByRef Tensor grad_input, @Const @ByRef Tensor grad_output, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] output_size, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] input_size, @ByVal(nullValue = "c10::optional(c10::nullopt)") DoubleOptional scales); -@Namespace("at") public static native @ByRef Tensor _upsample_nearest_exact1d_backward_out(@ByRef Tensor grad_input, @Const @ByRef Tensor grad_output, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] output_size, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... input_size); - - -// aten::_upsample_nearest_exact1d_backward.grad_input(Tensor grad_output, SymInt[1] output_size, SymInt[3] input_size, float? scales=None, *, Tensor(a!) grad_input) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor _upsample_nearest_exact1d_backward_outf(@Const @ByRef Tensor grad_output, @ByVal @Cast("c10::ArrayRef*") LongArrayRef output_size, @ByVal @Cast("c10::ArrayRef*") LongArrayRef input_size, @ByVal DoubleOptional scales, @ByRef Tensor grad_input); -@Namespace("at") public static native @ByRef Tensor _upsample_nearest_exact1d_backward_outf(@Const @ByRef Tensor grad_output, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] output_size, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] input_size, @ByVal DoubleOptional scales, @ByRef Tensor grad_input); - - -// aten::_upsample_nearest_exact1d_backward.grad_input(Tensor grad_output, SymInt[1] output_size, SymInt[3] input_size, float? scales=None, *, Tensor(a!) grad_input) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor _upsample_nearest_exact1d_backward_symint_out(@ByRef Tensor grad_input, @Const @ByRef Tensor grad_output, @ByVal SymIntRef output_size, @ByVal SymIntRef input_size, @ByVal(nullValue = "c10::optional(c10::nullopt)") DoubleOptional scales); -@Namespace("at") public static native @ByRef Tensor _upsample_nearest_exact1d_backward_symint_out(@ByRef Tensor grad_input, @Const @ByRef Tensor grad_output, @ByVal SymIntRef output_size, @ByVal SymIntRef input_size); - - -// aten::_upsample_nearest_exact1d_backward.grad_input(Tensor grad_output, SymInt[1] output_size, SymInt[3] input_size, float? scales=None, *, Tensor(a!) grad_input) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor _upsample_nearest_exact1d_backward_symint_outf(@Const @ByRef Tensor grad_output, @ByVal SymIntRef output_size, @ByVal SymIntRef input_size, @ByVal DoubleOptional scales, @ByRef Tensor grad_input); - - -// aten::_upsample_nearest_exact1d_backward(Tensor grad_output, SymInt[1] output_size, SymInt[3] input_size, float? scales=None) -> Tensor -@Namespace("at") public static native @ByVal Tensor _upsample_nearest_exact1d_backward(@Const @ByRef Tensor grad_output, @ByVal @Cast("c10::ArrayRef*") LongArrayRef output_size, @ByVal @Cast("c10::ArrayRef*") LongArrayRef input_size, @ByVal(nullValue = "c10::optional(c10::nullopt)") DoubleOptional scales); -@Namespace("at") public static native @ByVal Tensor _upsample_nearest_exact1d_backward(@Const @ByRef Tensor grad_output, @ByVal @Cast("c10::ArrayRef*") LongArrayRef output_size, @ByVal @Cast("c10::ArrayRef*") LongArrayRef input_size); -@Namespace("at") public static native @ByVal Tensor _upsample_nearest_exact1d_backward(@Const @ByRef Tensor grad_output, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] output_size, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] input_size, @ByVal(nullValue = "c10::optional(c10::nullopt)") DoubleOptional scales); -@Namespace("at") public static native @ByVal Tensor _upsample_nearest_exact1d_backward(@Const @ByRef Tensor grad_output, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] output_size, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... input_size); +// #include -// aten::_upsample_nearest_exact1d_backward(Tensor grad_output, SymInt[1] output_size, SymInt[3] input_size, float? scales=None) -> Tensor -@Namespace("at") public static native @ByVal Tensor _upsample_nearest_exact1d_backward_symint(@Const @ByRef Tensor grad_output, @ByVal SymIntRef output_size, @ByVal SymIntRef input_size, @ByVal(nullValue = "c10::optional(c10::nullopt)") DoubleOptional scales); -@Namespace("at") public static native @ByVal Tensor _upsample_nearest_exact1d_backward_symint(@Const @ByRef Tensor grad_output, @ByVal SymIntRef output_size, @ByVal SymIntRef input_size); +// aten::addr(Tensor self, Tensor vec1, Tensor vec2, *, Scalar beta=1, Scalar alpha=1) -> Tensor +@Namespace("at") public static native @ByVal Tensor addr(@Const @ByRef Tensor self, @Const @ByRef Tensor vec1, @Const @ByRef Tensor vec2, @Const @ByRef(nullValue = "at::Scalar(1)") Scalar beta, @Const @ByRef(nullValue = "at::Scalar(1)") Scalar alpha); +@Namespace("at") public static native @ByVal Tensor addr(@Const @ByRef Tensor self, @Const @ByRef Tensor vec1, @Const @ByRef Tensor vec2); +// aten::addr.out(Tensor self, Tensor vec1, Tensor vec2, *, Scalar beta=1, Scalar alpha=1, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor addr_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor vec1, @Const @ByRef Tensor vec2, @Const @ByRef(nullValue = "at::Scalar(1)") Scalar beta, @Const @ByRef(nullValue = "at::Scalar(1)") Scalar alpha); +@Namespace("at") public static native @ByRef Tensor addr_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor vec1, @Const @ByRef Tensor vec2); +// aten::addr.out(Tensor self, Tensor vec1, Tensor vec2, *, Scalar beta=1, Scalar alpha=1, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor addr_outf(@Const @ByRef Tensor self, @Const @ByRef Tensor vec1, @Const @ByRef Tensor vec2, @Const @ByRef Scalar beta, @Const @ByRef Scalar alpha, @ByRef Tensor out); -// Parsed from ATen/ops/_upsample_nearest_exact2d.h +// Parsed from ATen/ops/adjoint.h // #pragma once @@ -33559,55 +19012,16 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include - - -// aten::_upsample_nearest_exact2d.vec(Tensor input, SymInt[]? output_size, float[]? scale_factors) -> Tensor -@Namespace("at") public static native @ByVal Tensor _upsample_nearest_exact2d(@Const @ByRef Tensor input, @ByVal LongArrayRefOptional output_size, @ByVal DoubleArrayRefOptional scale_factors); -@Namespace("at") public static native @ByVal Tensor _upsample_nearest_exact2d(@Const @ByRef Tensor input, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] output_size, @ByVal DoubleArrayRefOptional scale_factors); - - -// aten::_upsample_nearest_exact2d.vec(Tensor input, SymInt[]? output_size, float[]? scale_factors) -> Tensor -@Namespace("at") public static native @ByVal Tensor _upsample_nearest_exact2d_symint(@Const @ByRef Tensor input, @ByVal SymIntArrayRefOptional output_size, @ByVal DoubleArrayRefOptional scale_factors); - - -// aten::_upsample_nearest_exact2d.out(Tensor self, SymInt[2] output_size, float? scales_h=None, float? scales_w=None, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor _upsample_nearest_exact2d_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal @Cast("c10::ArrayRef*") LongArrayRef output_size, @ByVal(nullValue = "c10::optional(c10::nullopt)") DoubleOptional scales_h, @ByVal(nullValue = "c10::optional(c10::nullopt)") DoubleOptional scales_w); -@Namespace("at") public static native @ByRef Tensor _upsample_nearest_exact2d_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal @Cast("c10::ArrayRef*") LongArrayRef output_size); -@Namespace("at") public static native @ByRef Tensor _upsample_nearest_exact2d_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] output_size, @ByVal(nullValue = "c10::optional(c10::nullopt)") DoubleOptional scales_h, @ByVal(nullValue = "c10::optional(c10::nullopt)") DoubleOptional scales_w); -@Namespace("at") public static native @ByRef Tensor _upsample_nearest_exact2d_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... output_size); - - -// aten::_upsample_nearest_exact2d.out(Tensor self, SymInt[2] output_size, float? scales_h=None, float? scales_w=None, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor _upsample_nearest_exact2d_outf(@Const @ByRef Tensor self, @ByVal @Cast("c10::ArrayRef*") LongArrayRef output_size, @ByVal DoubleOptional scales_h, @ByVal DoubleOptional scales_w, @ByRef Tensor out); -@Namespace("at") public static native @ByRef Tensor _upsample_nearest_exact2d_outf(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] output_size, @ByVal DoubleOptional scales_h, @ByVal DoubleOptional scales_w, @ByRef Tensor out); - - -// aten::_upsample_nearest_exact2d.out(Tensor self, SymInt[2] output_size, float? scales_h=None, float? scales_w=None, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor _upsample_nearest_exact2d_symint_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal SymIntRef output_size, @ByVal(nullValue = "c10::optional(c10::nullopt)") DoubleOptional scales_h, @ByVal(nullValue = "c10::optional(c10::nullopt)") DoubleOptional scales_w); -@Namespace("at") public static native @ByRef Tensor _upsample_nearest_exact2d_symint_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal SymIntRef output_size); - - -// aten::_upsample_nearest_exact2d.out(Tensor self, SymInt[2] output_size, float? scales_h=None, float? scales_w=None, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor _upsample_nearest_exact2d_symint_outf(@Const @ByRef Tensor self, @ByVal SymIntRef output_size, @ByVal DoubleOptional scales_h, @ByVal DoubleOptional scales_w, @ByRef Tensor out); - - -// aten::_upsample_nearest_exact2d(Tensor self, SymInt[2] output_size, float? scales_h=None, float? scales_w=None) -> Tensor -@Namespace("at") public static native @ByVal Tensor _upsample_nearest_exact2d(@Const @ByRef Tensor self, @ByVal @Cast("c10::ArrayRef*") LongArrayRef output_size, @ByVal(nullValue = "c10::optional(c10::nullopt)") DoubleOptional scales_h, @ByVal(nullValue = "c10::optional(c10::nullopt)") DoubleOptional scales_w); -@Namespace("at") public static native @ByVal Tensor _upsample_nearest_exact2d(@Const @ByRef Tensor self, @ByVal @Cast("c10::ArrayRef*") LongArrayRef output_size); -@Namespace("at") public static native @ByVal Tensor _upsample_nearest_exact2d(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] output_size, @ByVal(nullValue = "c10::optional(c10::nullopt)") DoubleOptional scales_h, @ByVal(nullValue = "c10::optional(c10::nullopt)") DoubleOptional scales_w); -@Namespace("at") public static native @ByVal Tensor _upsample_nearest_exact2d(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... output_size); - +// #include -// aten::_upsample_nearest_exact2d(Tensor self, SymInt[2] output_size, float? scales_h=None, float? scales_w=None) -> Tensor -@Namespace("at") public static native @ByVal Tensor _upsample_nearest_exact2d_symint(@Const @ByRef Tensor self, @ByVal SymIntRef output_size, @ByVal(nullValue = "c10::optional(c10::nullopt)") DoubleOptional scales_h, @ByVal(nullValue = "c10::optional(c10::nullopt)") DoubleOptional scales_w); -@Namespace("at") public static native @ByVal Tensor _upsample_nearest_exact2d_symint(@Const @ByRef Tensor self, @ByVal SymIntRef output_size); +// aten::adjoint(Tensor(a) self) -> Tensor(a) +@Namespace("at") public static native @ByVal Tensor adjoint(@Const @ByRef Tensor self); -// Parsed from ATen/ops/_upsample_nearest_exact2d_backward.h +// Parsed from ATen/ops/affine_grid_generator.h // #pragma once @@ -33628,46 +19042,24 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include - - -// aten::_upsample_nearest_exact2d_backward.grad_input(Tensor grad_output, SymInt[2] output_size, SymInt[4] input_size, float? scales_h=None, float? scales_w=None, *, Tensor(a!) grad_input) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor _upsample_nearest_exact2d_backward_out(@ByRef Tensor grad_input, @Const @ByRef Tensor grad_output, @ByVal @Cast("c10::ArrayRef*") LongArrayRef output_size, @ByVal @Cast("c10::ArrayRef*") LongArrayRef input_size, @ByVal(nullValue = "c10::optional(c10::nullopt)") DoubleOptional scales_h, @ByVal(nullValue = "c10::optional(c10::nullopt)") DoubleOptional scales_w); -@Namespace("at") public static native @ByRef Tensor _upsample_nearest_exact2d_backward_out(@ByRef Tensor grad_input, @Const @ByRef Tensor grad_output, @ByVal @Cast("c10::ArrayRef*") LongArrayRef output_size, @ByVal @Cast("c10::ArrayRef*") LongArrayRef input_size); -@Namespace("at") public static native @ByRef Tensor _upsample_nearest_exact2d_backward_out(@ByRef Tensor grad_input, @Const @ByRef Tensor grad_output, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] output_size, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] input_size, @ByVal(nullValue = "c10::optional(c10::nullopt)") DoubleOptional scales_h, @ByVal(nullValue = "c10::optional(c10::nullopt)") DoubleOptional scales_w); -@Namespace("at") public static native @ByRef Tensor _upsample_nearest_exact2d_backward_out(@ByRef Tensor grad_input, @Const @ByRef Tensor grad_output, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] output_size, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... input_size); - - -// aten::_upsample_nearest_exact2d_backward.grad_input(Tensor grad_output, SymInt[2] output_size, SymInt[4] input_size, float? scales_h=None, float? scales_w=None, *, Tensor(a!) grad_input) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor _upsample_nearest_exact2d_backward_outf(@Const @ByRef Tensor grad_output, @ByVal @Cast("c10::ArrayRef*") LongArrayRef output_size, @ByVal @Cast("c10::ArrayRef*") LongArrayRef input_size, @ByVal DoubleOptional scales_h, @ByVal DoubleOptional scales_w, @ByRef Tensor grad_input); -@Namespace("at") public static native @ByRef Tensor _upsample_nearest_exact2d_backward_outf(@Const @ByRef Tensor grad_output, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] output_size, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] input_size, @ByVal DoubleOptional scales_h, @ByVal DoubleOptional scales_w, @ByRef Tensor grad_input); - - -// aten::_upsample_nearest_exact2d_backward.grad_input(Tensor grad_output, SymInt[2] output_size, SymInt[4] input_size, float? scales_h=None, float? scales_w=None, *, Tensor(a!) grad_input) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor _upsample_nearest_exact2d_backward_symint_out(@ByRef Tensor grad_input, @Const @ByRef Tensor grad_output, @ByVal SymIntRef output_size, @ByVal SymIntRef input_size, @ByVal(nullValue = "c10::optional(c10::nullopt)") DoubleOptional scales_h, @ByVal(nullValue = "c10::optional(c10::nullopt)") DoubleOptional scales_w); -@Namespace("at") public static native @ByRef Tensor _upsample_nearest_exact2d_backward_symint_out(@ByRef Tensor grad_input, @Const @ByRef Tensor grad_output, @ByVal SymIntRef output_size, @ByVal SymIntRef input_size); - - -// aten::_upsample_nearest_exact2d_backward.grad_input(Tensor grad_output, SymInt[2] output_size, SymInt[4] input_size, float? scales_h=None, float? scales_w=None, *, Tensor(a!) grad_input) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor _upsample_nearest_exact2d_backward_symint_outf(@Const @ByRef Tensor grad_output, @ByVal SymIntRef output_size, @ByVal SymIntRef input_size, @ByVal DoubleOptional scales_h, @ByVal DoubleOptional scales_w, @ByRef Tensor grad_input); - - -// aten::_upsample_nearest_exact2d_backward(Tensor grad_output, SymInt[2] output_size, SymInt[4] input_size, float? scales_h=None, float? scales_w=None) -> Tensor -@Namespace("at") public static native @ByVal Tensor _upsample_nearest_exact2d_backward(@Const @ByRef Tensor grad_output, @ByVal @Cast("c10::ArrayRef*") LongArrayRef output_size, @ByVal @Cast("c10::ArrayRef*") LongArrayRef input_size, @ByVal(nullValue = "c10::optional(c10::nullopt)") DoubleOptional scales_h, @ByVal(nullValue = "c10::optional(c10::nullopt)") DoubleOptional scales_w); -@Namespace("at") public static native @ByVal Tensor _upsample_nearest_exact2d_backward(@Const @ByRef Tensor grad_output, @ByVal @Cast("c10::ArrayRef*") LongArrayRef output_size, @ByVal @Cast("c10::ArrayRef*") LongArrayRef input_size); -@Namespace("at") public static native @ByVal Tensor _upsample_nearest_exact2d_backward(@Const @ByRef Tensor grad_output, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] output_size, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] input_size, @ByVal(nullValue = "c10::optional(c10::nullopt)") DoubleOptional scales_h, @ByVal(nullValue = "c10::optional(c10::nullopt)") DoubleOptional scales_w); -@Namespace("at") public static native @ByVal Tensor _upsample_nearest_exact2d_backward(@Const @ByRef Tensor grad_output, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] output_size, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... input_size); +// #include -// aten::_upsample_nearest_exact2d_backward(Tensor grad_output, SymInt[2] output_size, SymInt[4] input_size, float? scales_h=None, float? scales_w=None) -> Tensor -@Namespace("at") public static native @ByVal Tensor _upsample_nearest_exact2d_backward_symint(@Const @ByRef Tensor grad_output, @ByVal SymIntRef output_size, @ByVal SymIntRef input_size, @ByVal(nullValue = "c10::optional(c10::nullopt)") DoubleOptional scales_h, @ByVal(nullValue = "c10::optional(c10::nullopt)") DoubleOptional scales_w); -@Namespace("at") public static native @ByVal Tensor _upsample_nearest_exact2d_backward_symint(@Const @ByRef Tensor grad_output, @ByVal SymIntRef output_size, @ByVal SymIntRef input_size); +// aten::affine_grid_generator(Tensor theta, int[] size, bool align_corners) -> Tensor +@Namespace("at") public static native @ByVal Tensor affine_grid_generator(@Const @ByRef Tensor theta, @ByVal LongArrayRef size, @Cast("bool") boolean align_corners); +@Namespace("at") public static native @ByVal Tensor affine_grid_generator(@Const @ByRef Tensor theta, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @Cast("bool") boolean align_corners); +// aten::affine_grid_generator.out(Tensor theta, int[] size, bool align_corners, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor affine_grid_generator_out(@ByRef Tensor out, @Const @ByRef Tensor theta, @ByVal LongArrayRef size, @Cast("bool") boolean align_corners); +@Namespace("at") public static native @ByRef Tensor affine_grid_generator_out(@ByRef Tensor out, @Const @ByRef Tensor theta, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @Cast("bool") boolean align_corners); +// aten::affine_grid_generator.out(Tensor theta, int[] size, bool align_corners, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor affine_grid_generator_outf(@Const @ByRef Tensor theta, @ByVal LongArrayRef size, @Cast("bool") boolean align_corners, @ByRef Tensor out); +@Namespace("at") public static native @ByRef Tensor affine_grid_generator_outf(@Const @ByRef Tensor theta, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @Cast("bool") boolean align_corners, @ByRef Tensor out); -// Parsed from ATen/ops/_upsample_nearest_exact3d.h +// Parsed from ATen/ops/affine_grid_generator_backward.h // #pragma once @@ -33688,55 +19080,17 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include - - -// aten::_upsample_nearest_exact3d.vec(Tensor input, SymInt[]? output_size, float[]? scale_factors) -> Tensor -@Namespace("at") public static native @ByVal Tensor _upsample_nearest_exact3d(@Const @ByRef Tensor input, @ByVal LongArrayRefOptional output_size, @ByVal DoubleArrayRefOptional scale_factors); -@Namespace("at") public static native @ByVal Tensor _upsample_nearest_exact3d(@Const @ByRef Tensor input, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] output_size, @ByVal DoubleArrayRefOptional scale_factors); - - -// aten::_upsample_nearest_exact3d.vec(Tensor input, SymInt[]? output_size, float[]? scale_factors) -> Tensor -@Namespace("at") public static native @ByVal Tensor _upsample_nearest_exact3d_symint(@Const @ByRef Tensor input, @ByVal SymIntArrayRefOptional output_size, @ByVal DoubleArrayRefOptional scale_factors); - - -// aten::_upsample_nearest_exact3d.out(Tensor self, SymInt[3] output_size, float? scales_d=None, float? scales_h=None, float? scales_w=None, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor _upsample_nearest_exact3d_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal @Cast("c10::ArrayRef*") LongArrayRef output_size, @ByVal(nullValue = "c10::optional(c10::nullopt)") DoubleOptional scales_d, @ByVal(nullValue = "c10::optional(c10::nullopt)") DoubleOptional scales_h, @ByVal(nullValue = "c10::optional(c10::nullopt)") DoubleOptional scales_w); -@Namespace("at") public static native @ByRef Tensor _upsample_nearest_exact3d_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal @Cast("c10::ArrayRef*") LongArrayRef output_size); -@Namespace("at") public static native @ByRef Tensor _upsample_nearest_exact3d_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] output_size, @ByVal(nullValue = "c10::optional(c10::nullopt)") DoubleOptional scales_d, @ByVal(nullValue = "c10::optional(c10::nullopt)") DoubleOptional scales_h, @ByVal(nullValue = "c10::optional(c10::nullopt)") DoubleOptional scales_w); -@Namespace("at") public static native @ByRef Tensor _upsample_nearest_exact3d_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... output_size); - - -// aten::_upsample_nearest_exact3d.out(Tensor self, SymInt[3] output_size, float? scales_d=None, float? scales_h=None, float? scales_w=None, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor _upsample_nearest_exact3d_outf(@Const @ByRef Tensor self, @ByVal @Cast("c10::ArrayRef*") LongArrayRef output_size, @ByVal DoubleOptional scales_d, @ByVal DoubleOptional scales_h, @ByVal DoubleOptional scales_w, @ByRef Tensor out); -@Namespace("at") public static native @ByRef Tensor _upsample_nearest_exact3d_outf(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] output_size, @ByVal DoubleOptional scales_d, @ByVal DoubleOptional scales_h, @ByVal DoubleOptional scales_w, @ByRef Tensor out); - - -// aten::_upsample_nearest_exact3d.out(Tensor self, SymInt[3] output_size, float? scales_d=None, float? scales_h=None, float? scales_w=None, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor _upsample_nearest_exact3d_symint_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal SymIntRef output_size, @ByVal(nullValue = "c10::optional(c10::nullopt)") DoubleOptional scales_d, @ByVal(nullValue = "c10::optional(c10::nullopt)") DoubleOptional scales_h, @ByVal(nullValue = "c10::optional(c10::nullopt)") DoubleOptional scales_w); -@Namespace("at") public static native @ByRef Tensor _upsample_nearest_exact3d_symint_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal SymIntRef output_size); - - -// aten::_upsample_nearest_exact3d.out(Tensor self, SymInt[3] output_size, float? scales_d=None, float? scales_h=None, float? scales_w=None, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor _upsample_nearest_exact3d_symint_outf(@Const @ByRef Tensor self, @ByVal SymIntRef output_size, @ByVal DoubleOptional scales_d, @ByVal DoubleOptional scales_h, @ByVal DoubleOptional scales_w, @ByRef Tensor out); - - -// aten::_upsample_nearest_exact3d(Tensor self, SymInt[3] output_size, float? scales_d=None, float? scales_h=None, float? scales_w=None) -> Tensor -@Namespace("at") public static native @ByVal Tensor _upsample_nearest_exact3d(@Const @ByRef Tensor self, @ByVal @Cast("c10::ArrayRef*") LongArrayRef output_size, @ByVal(nullValue = "c10::optional(c10::nullopt)") DoubleOptional scales_d, @ByVal(nullValue = "c10::optional(c10::nullopt)") DoubleOptional scales_h, @ByVal(nullValue = "c10::optional(c10::nullopt)") DoubleOptional scales_w); -@Namespace("at") public static native @ByVal Tensor _upsample_nearest_exact3d(@Const @ByRef Tensor self, @ByVal @Cast("c10::ArrayRef*") LongArrayRef output_size); -@Namespace("at") public static native @ByVal Tensor _upsample_nearest_exact3d(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] output_size, @ByVal(nullValue = "c10::optional(c10::nullopt)") DoubleOptional scales_d, @ByVal(nullValue = "c10::optional(c10::nullopt)") DoubleOptional scales_h, @ByVal(nullValue = "c10::optional(c10::nullopt)") DoubleOptional scales_w); -@Namespace("at") public static native @ByVal Tensor _upsample_nearest_exact3d(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... output_size); - +// #include -// aten::_upsample_nearest_exact3d(Tensor self, SymInt[3] output_size, float? scales_d=None, float? scales_h=None, float? scales_w=None) -> Tensor -@Namespace("at") public static native @ByVal Tensor _upsample_nearest_exact3d_symint(@Const @ByRef Tensor self, @ByVal SymIntRef output_size, @ByVal(nullValue = "c10::optional(c10::nullopt)") DoubleOptional scales_d, @ByVal(nullValue = "c10::optional(c10::nullopt)") DoubleOptional scales_h, @ByVal(nullValue = "c10::optional(c10::nullopt)") DoubleOptional scales_w); -@Namespace("at") public static native @ByVal Tensor _upsample_nearest_exact3d_symint(@Const @ByRef Tensor self, @ByVal SymIntRef output_size); +// aten::affine_grid_generator_backward(Tensor grad, int[] size, bool align_corners) -> Tensor +@Namespace("at") public static native @ByVal Tensor affine_grid_generator_backward(@Const @ByRef Tensor grad, @ByVal LongArrayRef size, @Cast("bool") boolean align_corners); +@Namespace("at") public static native @ByVal Tensor affine_grid_generator_backward(@Const @ByRef Tensor grad, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @Cast("bool") boolean align_corners); -// Parsed from ATen/ops/_upsample_nearest_exact3d_backward.h +// Parsed from ATen/ops/alias.h // #pragma once @@ -33757,46 +19111,16 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include - - -// aten::_upsample_nearest_exact3d_backward.grad_input(Tensor grad_output, SymInt[3] output_size, SymInt[5] input_size, float? scales_d=None, float? scales_h=None, float? scales_w=None, *, Tensor(a!) grad_input) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor _upsample_nearest_exact3d_backward_out(@ByRef Tensor grad_input, @Const @ByRef Tensor grad_output, @ByVal @Cast("c10::ArrayRef*") LongArrayRef output_size, @ByVal @Cast("c10::ArrayRef*") LongArrayRef input_size, @ByVal(nullValue = "c10::optional(c10::nullopt)") DoubleOptional scales_d, @ByVal(nullValue = "c10::optional(c10::nullopt)") DoubleOptional scales_h, @ByVal(nullValue = "c10::optional(c10::nullopt)") DoubleOptional scales_w); -@Namespace("at") public static native @ByRef Tensor _upsample_nearest_exact3d_backward_out(@ByRef Tensor grad_input, @Const @ByRef Tensor grad_output, @ByVal @Cast("c10::ArrayRef*") LongArrayRef output_size, @ByVal @Cast("c10::ArrayRef*") LongArrayRef input_size); -@Namespace("at") public static native @ByRef Tensor _upsample_nearest_exact3d_backward_out(@ByRef Tensor grad_input, @Const @ByRef Tensor grad_output, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] output_size, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] input_size, @ByVal(nullValue = "c10::optional(c10::nullopt)") DoubleOptional scales_d, @ByVal(nullValue = "c10::optional(c10::nullopt)") DoubleOptional scales_h, @ByVal(nullValue = "c10::optional(c10::nullopt)") DoubleOptional scales_w); -@Namespace("at") public static native @ByRef Tensor _upsample_nearest_exact3d_backward_out(@ByRef Tensor grad_input, @Const @ByRef Tensor grad_output, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] output_size, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... input_size); - - -// aten::_upsample_nearest_exact3d_backward.grad_input(Tensor grad_output, SymInt[3] output_size, SymInt[5] input_size, float? scales_d=None, float? scales_h=None, float? scales_w=None, *, Tensor(a!) grad_input) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor _upsample_nearest_exact3d_backward_outf(@Const @ByRef Tensor grad_output, @ByVal @Cast("c10::ArrayRef*") LongArrayRef output_size, @ByVal @Cast("c10::ArrayRef*") LongArrayRef input_size, @ByVal DoubleOptional scales_d, @ByVal DoubleOptional scales_h, @ByVal DoubleOptional scales_w, @ByRef Tensor grad_input); -@Namespace("at") public static native @ByRef Tensor _upsample_nearest_exact3d_backward_outf(@Const @ByRef Tensor grad_output, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] output_size, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] input_size, @ByVal DoubleOptional scales_d, @ByVal DoubleOptional scales_h, @ByVal DoubleOptional scales_w, @ByRef Tensor grad_input); - - -// aten::_upsample_nearest_exact3d_backward.grad_input(Tensor grad_output, SymInt[3] output_size, SymInt[5] input_size, float? scales_d=None, float? scales_h=None, float? scales_w=None, *, Tensor(a!) grad_input) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor _upsample_nearest_exact3d_backward_symint_out(@ByRef Tensor grad_input, @Const @ByRef Tensor grad_output, @ByVal SymIntRef output_size, @ByVal SymIntRef input_size, @ByVal(nullValue = "c10::optional(c10::nullopt)") DoubleOptional scales_d, @ByVal(nullValue = "c10::optional(c10::nullopt)") DoubleOptional scales_h, @ByVal(nullValue = "c10::optional(c10::nullopt)") DoubleOptional scales_w); -@Namespace("at") public static native @ByRef Tensor _upsample_nearest_exact3d_backward_symint_out(@ByRef Tensor grad_input, @Const @ByRef Tensor grad_output, @ByVal SymIntRef output_size, @ByVal SymIntRef input_size); - - -// aten::_upsample_nearest_exact3d_backward.grad_input(Tensor grad_output, SymInt[3] output_size, SymInt[5] input_size, float? scales_d=None, float? scales_h=None, float? scales_w=None, *, Tensor(a!) grad_input) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor _upsample_nearest_exact3d_backward_symint_outf(@Const @ByRef Tensor grad_output, @ByVal SymIntRef output_size, @ByVal SymIntRef input_size, @ByVal DoubleOptional scales_d, @ByVal DoubleOptional scales_h, @ByVal DoubleOptional scales_w, @ByRef Tensor grad_input); - - -// aten::_upsample_nearest_exact3d_backward(Tensor grad_output, SymInt[3] output_size, SymInt[5] input_size, float? scales_d=None, float? scales_h=None, float? scales_w=None) -> Tensor -@Namespace("at") public static native @ByVal Tensor _upsample_nearest_exact3d_backward(@Const @ByRef Tensor grad_output, @ByVal @Cast("c10::ArrayRef*") LongArrayRef output_size, @ByVal @Cast("c10::ArrayRef*") LongArrayRef input_size, @ByVal(nullValue = "c10::optional(c10::nullopt)") DoubleOptional scales_d, @ByVal(nullValue = "c10::optional(c10::nullopt)") DoubleOptional scales_h, @ByVal(nullValue = "c10::optional(c10::nullopt)") DoubleOptional scales_w); -@Namespace("at") public static native @ByVal Tensor _upsample_nearest_exact3d_backward(@Const @ByRef Tensor grad_output, @ByVal @Cast("c10::ArrayRef*") LongArrayRef output_size, @ByVal @Cast("c10::ArrayRef*") LongArrayRef input_size); -@Namespace("at") public static native @ByVal Tensor _upsample_nearest_exact3d_backward(@Const @ByRef Tensor grad_output, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] output_size, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] input_size, @ByVal(nullValue = "c10::optional(c10::nullopt)") DoubleOptional scales_d, @ByVal(nullValue = "c10::optional(c10::nullopt)") DoubleOptional scales_h, @ByVal(nullValue = "c10::optional(c10::nullopt)") DoubleOptional scales_w); -@Namespace("at") public static native @ByVal Tensor _upsample_nearest_exact3d_backward(@Const @ByRef Tensor grad_output, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] output_size, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... input_size); - +// #include -// aten::_upsample_nearest_exact3d_backward(Tensor grad_output, SymInt[3] output_size, SymInt[5] input_size, float? scales_d=None, float? scales_h=None, float? scales_w=None) -> Tensor -@Namespace("at") public static native @ByVal Tensor _upsample_nearest_exact3d_backward_symint(@Const @ByRef Tensor grad_output, @ByVal SymIntRef output_size, @ByVal SymIntRef input_size, @ByVal(nullValue = "c10::optional(c10::nullopt)") DoubleOptional scales_d, @ByVal(nullValue = "c10::optional(c10::nullopt)") DoubleOptional scales_h, @ByVal(nullValue = "c10::optional(c10::nullopt)") DoubleOptional scales_w); -@Namespace("at") public static native @ByVal Tensor _upsample_nearest_exact3d_backward_symint(@Const @ByRef Tensor grad_output, @ByVal SymIntRef output_size, @ByVal SymIntRef input_size); +// aten::alias(Tensor(a) self) -> Tensor(a) +@Namespace("at") public static native @ByVal Tensor alias(@Const @ByRef Tensor self); -// Parsed from ATen/ops/_use_cudnn_ctc_loss.h +// Parsed from ATen/ops/alias_copy.h // #pragma once @@ -33817,20 +19141,21 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include +// #include -// aten::_use_cudnn_ctc_loss(Tensor log_probs, Tensor targets, int[] input_lengths, int[] target_lengths, int blank) -> bool -@Namespace("at") public static native @Cast("bool") boolean _use_cudnn_ctc_loss(@Const @ByRef Tensor log_probs, @Const @ByRef Tensor targets, @ByVal @Cast("c10::ArrayRef*") LongArrayRef input_lengths, @ByVal @Cast("c10::ArrayRef*") LongArrayRef target_lengths, @Cast("int64_t") long blank); -@Namespace("at") public static native @Cast("bool") boolean _use_cudnn_ctc_loss(@Const @ByRef Tensor log_probs, @Const @ByRef Tensor targets, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] input_lengths, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] target_lengths, @Cast("int64_t") long blank); +// aten::alias_copy(Tensor self) -> Tensor +@Namespace("at") public static native @ByVal Tensor alias_copy(@Const @ByRef Tensor self); -// aten::_use_cudnn_ctc_loss.Tensor(Tensor log_probs, Tensor targets, Tensor input_lengths, Tensor target_lengths, int blank) -> bool -@Namespace("at") public static native @Cast("bool") boolean _use_cudnn_ctc_loss(@Const @ByRef Tensor log_probs, @Const @ByRef Tensor targets, @Const @ByRef Tensor input_lengths, @Const @ByRef Tensor target_lengths, @Cast("int64_t") long blank); +// aten::alias_copy.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor alias_copy_out(@ByRef Tensor out, @Const @ByRef Tensor self); +// aten::alias_copy.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor alias_copy_outf(@Const @ByRef Tensor self, @ByRef Tensor out); -// Parsed from ATen/ops/_validate_compressed_sparse_indices.h +// Parsed from ATen/ops/align_as.h // #pragma once @@ -33851,16 +19176,14 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include +// #include -// aten::_validate_compressed_sparse_indices(bool is_crow, Tensor compressed_idx, Tensor plain_idx, int cdim, int dim, int nnz) -> () -@Namespace("at") public static native void _validate_compressed_sparse_indices(@Cast("bool") boolean is_crow, @Const @ByRef Tensor compressed_idx, @Const @ByRef Tensor plain_idx, @Cast("int64_t") long cdim, @Cast("int64_t") long dim, @Cast("int64_t") long nnz); -// Parsed from ATen/ops/_validate_sparse_bsc_tensor_args.h +// Parsed from ATen/ops/align_tensors.h // #pragma once @@ -33881,17 +19204,16 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include +// #include -// aten::_validate_sparse_bsc_tensor_args(Tensor ccol_indices, Tensor row_indices, Tensor values, int[] size) -> () -@Namespace("at") public static native void _validate_sparse_bsc_tensor_args(@Const @ByRef Tensor ccol_indices, @Const @ByRef Tensor row_indices, @Const @ByRef Tensor values, @ByVal @Cast("c10::ArrayRef*") LongArrayRef size); -@Namespace("at") public static native void _validate_sparse_bsc_tensor_args(@Const @ByRef Tensor ccol_indices, @Const @ByRef Tensor row_indices, @Const @ByRef Tensor values, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... size); +// aten::align_tensors(Tensor[] tensors) -> Tensor[] +@Namespace("at") public static native @Cast({"", "std::vector"}) @StdMove TensorVector align_tensors(@ByVal @Cast("at::TensorList*") TensorArrayRef tensors); -// Parsed from ATen/ops/_validate_sparse_bsr_tensor_args.h +// Parsed from ATen/ops/align_to.h // #pragma once @@ -33912,17 +19234,14 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include +// #include -// aten::_validate_sparse_bsr_tensor_args(Tensor crow_indices, Tensor col_indices, Tensor values, int[] size) -> () -@Namespace("at") public static native void _validate_sparse_bsr_tensor_args(@Const @ByRef Tensor crow_indices, @Const @ByRef Tensor col_indices, @Const @ByRef Tensor values, @ByVal @Cast("c10::ArrayRef*") LongArrayRef size); -@Namespace("at") public static native void _validate_sparse_bsr_tensor_args(@Const @ByRef Tensor crow_indices, @Const @ByRef Tensor col_indices, @Const @ByRef Tensor values, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... size); -// Parsed from ATen/ops/_validate_sparse_compressed_tensor_args.h +// Parsed from ATen/ops/all.h // #pragma once @@ -33943,17 +19262,41 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include +// #include + + +// aten::all.dim(Tensor self, int dim, bool keepdim=False) -> Tensor +@Namespace("at") public static native @ByVal Tensor all(@Const @ByRef Tensor self, @Cast("int64_t") long dim, @Cast("bool") boolean keepdim/*=false*/); +@Namespace("at") public static native @ByVal Tensor all(@Const @ByRef Tensor self, @Cast("int64_t") long dim); + +// aten::all.out(Tensor self, int dim, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor all_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Cast("int64_t") long dim, @Cast("bool") boolean keepdim/*=false*/); +@Namespace("at") public static native @ByRef Tensor all_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Cast("int64_t") long dim); +// aten::all.out(Tensor self, int dim, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor all_outf(@Const @ByRef Tensor self, @Cast("int64_t") long dim, @Cast("bool") boolean keepdim, @ByRef Tensor out); + +// aten::all.dimname(Tensor self, Dimname dim, bool keepdim=False) -> Tensor +@Namespace("at") public static native @ByVal Tensor all(@Const @ByRef Tensor self, @ByVal Dimname dim, @Cast("bool") boolean keepdim/*=false*/); +@Namespace("at") public static native @ByVal Tensor all(@Const @ByRef Tensor self, @ByVal Dimname dim); + +// aten::all.dimname_out(Tensor self, Dimname dim, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor all_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal Dimname dim, @Cast("bool") boolean keepdim/*=false*/); +@Namespace("at") public static native @ByRef Tensor all_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal Dimname dim); +// aten::all.dimname_out(Tensor self, Dimname dim, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor all_outf(@Const @ByRef Tensor self, @ByVal Dimname dim, @Cast("bool") boolean keepdim, @ByRef Tensor out); +// aten::all(Tensor self) -> Tensor +@Namespace("at") public static native @ByVal Tensor all(@Const @ByRef Tensor self); -// aten::_validate_sparse_compressed_tensor_args(Tensor compressed_indices, Tensor plain_indices, Tensor values, int[] size, Layout layout) -> () -@Namespace("at") public static native void _validate_sparse_compressed_tensor_args(@Const @ByRef Tensor compressed_indices, @Const @ByRef Tensor plain_indices, @Const @ByRef Tensor values, @ByVal @Cast("c10::ArrayRef*") LongArrayRef size, @ByVal Layout layout); -@Namespace("at") public static native void _validate_sparse_compressed_tensor_args(@Const @ByRef Tensor compressed_indices, @Const @ByRef Tensor plain_indices, @Const @ByRef Tensor values, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @ByVal Layout layout); +// aten::all.all_out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor all_out(@ByRef Tensor out, @Const @ByRef Tensor self); +// aten::all.all_out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor all_outf(@Const @ByRef Tensor self, @ByRef Tensor out); -// Parsed from ATen/ops/_validate_sparse_coo_tensor_args.h +// Parsed from ATen/ops/allclose.h // #pragma once @@ -33974,17 +19317,17 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include +// #include -// aten::_validate_sparse_coo_tensor_args(Tensor indices, Tensor values, int[] size) -> () -@Namespace("at") public static native void _validate_sparse_coo_tensor_args(@Const @ByRef Tensor indices, @Const @ByRef Tensor values, @ByVal @Cast("c10::ArrayRef*") LongArrayRef size); -@Namespace("at") public static native void _validate_sparse_coo_tensor_args(@Const @ByRef Tensor indices, @Const @ByRef Tensor values, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... size); +// aten::allclose(Tensor self, Tensor other, float rtol=1e-05, float atol=1e-08, bool equal_nan=False) -> bool +@Namespace("at") public static native @Cast("bool") boolean allclose(@Const @ByRef Tensor self, @Const @ByRef Tensor other, double rtol/*=1e-05*/, double atol/*=1e-08*/, @Cast("bool") boolean equal_nan/*=false*/); +@Namespace("at") public static native @Cast("bool") boolean allclose(@Const @ByRef Tensor self, @Const @ByRef Tensor other); -// Parsed from ATen/ops/_validate_sparse_csc_tensor_args.h +// Parsed from ATen/ops/alpha_dropout.h // #pragma once @@ -34005,17 +19348,19 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include +// #include + +// aten::alpha_dropout(Tensor input, float p, bool train) -> Tensor +@Namespace("at") public static native @ByVal Tensor alpha_dropout(@Const @ByRef Tensor input, double p, @Cast("bool") boolean train); -// aten::_validate_sparse_csc_tensor_args(Tensor ccol_indices, Tensor row_indices, Tensor values, int[] size) -> () -@Namespace("at") public static native void _validate_sparse_csc_tensor_args(@Const @ByRef Tensor ccol_indices, @Const @ByRef Tensor row_indices, @Const @ByRef Tensor values, @ByVal @Cast("c10::ArrayRef*") LongArrayRef size); -@Namespace("at") public static native void _validate_sparse_csc_tensor_args(@Const @ByRef Tensor ccol_indices, @Const @ByRef Tensor row_indices, @Const @ByRef Tensor values, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... size); +// aten::alpha_dropout_(Tensor(a!) self, float p, bool train) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor alpha_dropout_(@ByRef Tensor self, double p, @Cast("bool") boolean train); -// Parsed from ATen/ops/_validate_sparse_csr_tensor_args.h +// Parsed from ATen/ops/amax.h // #pragma once @@ -34036,17 +19381,26 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include +// #include + +// aten::amax(Tensor self, int[1] dim=[], bool keepdim=False) -> Tensor +@Namespace("at") public static native @ByVal Tensor amax(@Const @ByRef Tensor self, @ByVal(nullValue = "at::IntArrayRef{}") LongArrayRef dim, @Cast("bool") boolean keepdim/*=false*/); +@Namespace("at") public static native @ByVal Tensor amax(@Const @ByRef Tensor self); +@Namespace("at") public static native @ByVal Tensor amax(@Const @ByRef Tensor self, @ByVal(nullValue = "at::IntArrayRef{}") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dim, @Cast("bool") boolean keepdim/*=false*/); -// aten::_validate_sparse_csr_tensor_args(Tensor crow_indices, Tensor col_indices, Tensor values, int[] size) -> () -@Namespace("at") public static native void _validate_sparse_csr_tensor_args(@Const @ByRef Tensor crow_indices, @Const @ByRef Tensor col_indices, @Const @ByRef Tensor values, @ByVal @Cast("c10::ArrayRef*") LongArrayRef size); -@Namespace("at") public static native void _validate_sparse_csr_tensor_args(@Const @ByRef Tensor crow_indices, @Const @ByRef Tensor col_indices, @Const @ByRef Tensor values, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... size); +// aten::amax.out(Tensor self, int[1] dim=[], bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor amax_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal(nullValue = "at::IntArrayRef{}") LongArrayRef dim, @Cast("bool") boolean keepdim/*=false*/); +@Namespace("at") public static native @ByRef Tensor amax_out(@ByRef Tensor out, @Const @ByRef Tensor self); +@Namespace("at") public static native @ByRef Tensor amax_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal(nullValue = "at::IntArrayRef{}") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dim, @Cast("bool") boolean keepdim/*=false*/); +// aten::amax.out(Tensor self, int[1] dim=[], bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor amax_outf(@Const @ByRef Tensor self, @ByVal LongArrayRef dim, @Cast("bool") boolean keepdim, @ByRef Tensor out); +@Namespace("at") public static native @ByRef Tensor amax_outf(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dim, @Cast("bool") boolean keepdim, @ByRef Tensor out); -// Parsed from ATen/ops/_values.h +// Parsed from ATen/ops/amin.h // #pragma once @@ -34067,14 +19421,26 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include +// #include + +// aten::amin(Tensor self, int[1] dim=[], bool keepdim=False) -> Tensor +@Namespace("at") public static native @ByVal Tensor amin(@Const @ByRef Tensor self, @ByVal(nullValue = "at::IntArrayRef{}") LongArrayRef dim, @Cast("bool") boolean keepdim/*=false*/); +@Namespace("at") public static native @ByVal Tensor amin(@Const @ByRef Tensor self); +@Namespace("at") public static native @ByVal Tensor amin(@Const @ByRef Tensor self, @ByVal(nullValue = "at::IntArrayRef{}") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dim, @Cast("bool") boolean keepdim/*=false*/); +// aten::amin.out(Tensor self, int[1] dim=[], bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor amin_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal(nullValue = "at::IntArrayRef{}") LongArrayRef dim, @Cast("bool") boolean keepdim/*=false*/); +@Namespace("at") public static native @ByRef Tensor amin_out(@ByRef Tensor out, @Const @ByRef Tensor self); +@Namespace("at") public static native @ByRef Tensor amin_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal(nullValue = "at::IntArrayRef{}") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dim, @Cast("bool") boolean keepdim/*=false*/); +// aten::amin.out(Tensor self, int[1] dim=[], bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor amin_outf(@Const @ByRef Tensor self, @ByVal LongArrayRef dim, @Cast("bool") boolean keepdim, @ByRef Tensor out); +@Namespace("at") public static native @ByRef Tensor amin_outf(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dim, @Cast("bool") boolean keepdim, @ByRef Tensor out); -// Parsed from ATen/ops/_values_copy.h +// Parsed from ATen/ops/aminmax.h // #pragma once @@ -34095,21 +19461,23 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include +// #include -// aten::_values_copy(Tensor self) -> Tensor -@Namespace("at") public static native @ByVal Tensor _values_copy(@Const @ByRef Tensor self); +// aten::aminmax(Tensor self, *, int? dim=None, bool keepdim=False) -> (Tensor min, Tensor max) +@Namespace("at") public static native @ByVal T_TensorTensor_T aminmax(@Const @ByRef Tensor self, @ByVal(nullValue = "c10::optional(c10::nullopt)") LongOptional dim, @Cast("bool") boolean keepdim/*=false*/); +@Namespace("at") public static native @ByVal T_TensorTensor_T aminmax(@Const @ByRef Tensor self); -// aten::_values_copy.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor _values_copy_out(@ByRef Tensor out, @Const @ByRef Tensor self); -// aten::_values_copy.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor _values_copy_outf(@Const @ByRef Tensor self, @ByRef Tensor out); +// aten::aminmax.out(Tensor self, *, int? dim=None, bool keepdim=False, Tensor(a!) min, Tensor(b!) max) -> (Tensor(a!) min, Tensor(b!) max) +@Namespace("at") public static native @ByVal T_TensorTensor_T aminmax_out(@ByRef Tensor min, @ByRef Tensor max, @Const @ByRef Tensor self, @ByVal(nullValue = "c10::optional(c10::nullopt)") LongOptional dim, @Cast("bool") boolean keepdim/*=false*/); +@Namespace("at") public static native @ByVal T_TensorTensor_T aminmax_out(@ByRef Tensor min, @ByRef Tensor max, @Const @ByRef Tensor self); +// aten::aminmax.out(Tensor self, *, int? dim=None, bool keepdim=False, Tensor(a!) min, Tensor(b!) max) -> (Tensor(a!) min, Tensor(b!) max) +@Namespace("at") public static native @ByVal T_TensorTensor_T aminmax_outf(@Const @ByRef Tensor self, @ByVal LongOptional dim, @Cast("bool") boolean keepdim, @ByRef Tensor min, @ByRef Tensor max); -// Parsed from ATen/ops/_version.h +// Parsed from ATen/ops/and.h // #pragma once @@ -34130,14 +19498,19 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include +// #include +// aten::__and__.Scalar(Tensor self, Scalar other) -> Tensor +@Namespace("at") public static native @ByVal Tensor __and__(@Const @ByRef Tensor self, @Const @ByRef Scalar other); + +// aten::__and__.Tensor(Tensor self, Tensor other) -> Tensor +@Namespace("at") public static native @ByVal Tensor __and__(@Const @ByRef Tensor self, @Const @ByRef Tensor other); -// Parsed from ATen/ops/_weight_norm.h +// Parsed from ATen/ops/angle.h // #pragma once @@ -34158,17 +19531,21 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include +// #include + +// aten::angle(Tensor self) -> Tensor +@Namespace("at") public static native @ByVal Tensor angle(@Const @ByRef Tensor self); -// aten::_weight_norm(Tensor v, Tensor g, int dim=0) -> Tensor -@Namespace("at") public static native @ByVal Tensor _weight_norm(@Const @ByRef Tensor v, @Const @ByRef Tensor g, @Cast("int64_t") long dim/*=0*/); -@Namespace("at") public static native @ByVal Tensor _weight_norm(@Const @ByRef Tensor v, @Const @ByRef Tensor g); +// aten::angle.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor angle_out(@ByRef Tensor out, @Const @ByRef Tensor self); +// aten::angle.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor angle_outf(@Const @ByRef Tensor self, @ByRef Tensor out); -// Parsed from ATen/ops/_weight_norm_differentiable_backward.h +// Parsed from ATen/ops/any.h // #pragma once @@ -34189,16 +19566,41 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include +// #include + + +// aten::any.dim(Tensor self, int dim, bool keepdim=False) -> Tensor +@Namespace("at") public static native @ByVal Tensor any(@Const @ByRef Tensor self, @Cast("int64_t") long dim, @Cast("bool") boolean keepdim/*=false*/); +@Namespace("at") public static native @ByVal Tensor any(@Const @ByRef Tensor self, @Cast("int64_t") long dim); + +// aten::any.out(Tensor self, int dim, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor any_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Cast("int64_t") long dim, @Cast("bool") boolean keepdim/*=false*/); +@Namespace("at") public static native @ByRef Tensor any_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Cast("int64_t") long dim); +// aten::any.out(Tensor self, int dim, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor any_outf(@Const @ByRef Tensor self, @Cast("int64_t") long dim, @Cast("bool") boolean keepdim, @ByRef Tensor out); + +// aten::any.dimname(Tensor self, Dimname dim, bool keepdim=False) -> Tensor +@Namespace("at") public static native @ByVal Tensor any(@Const @ByRef Tensor self, @ByVal Dimname dim, @Cast("bool") boolean keepdim/*=false*/); +@Namespace("at") public static native @ByVal Tensor any(@Const @ByRef Tensor self, @ByVal Dimname dim); + +// aten::any.dimname_out(Tensor self, Dimname dim, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor any_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal Dimname dim, @Cast("bool") boolean keepdim/*=false*/); +@Namespace("at") public static native @ByRef Tensor any_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal Dimname dim); +// aten::any.dimname_out(Tensor self, Dimname dim, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor any_outf(@Const @ByRef Tensor self, @ByVal Dimname dim, @Cast("bool") boolean keepdim, @ByRef Tensor out); +// aten::any(Tensor self) -> Tensor +@Namespace("at") public static native @ByVal Tensor any(@Const @ByRef Tensor self); -// aten::_weight_norm_differentiable_backward(Tensor grad_w, Tensor saved_v, Tensor saved_g, Tensor saved_norms, int dim) -> (Tensor, Tensor) -@Namespace("at") public static native @ByVal TensorTensorTuple _weight_norm_differentiable_backward(@Const @ByRef Tensor grad_w, @Const @ByRef Tensor saved_v, @Const @ByRef Tensor saved_g, @Const @ByRef Tensor saved_norms, @Cast("int64_t") long dim); +// aten::any.all_out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor any_out(@ByRef Tensor out, @Const @ByRef Tensor self); +// aten::any.all_out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor any_outf(@Const @ByRef Tensor self, @ByRef Tensor out); -// Parsed from ATen/ops/_weight_norm_interface.h +// Parsed from ATen/ops/arange.h // #pragma once @@ -34219,23 +19621,41 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include +// #include + + +// aten::arange(Scalar end, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor +@Namespace("at") public static native @ByVal Tensor arange(@Const @ByRef Scalar end, @ByVal(nullValue = "at::TensorOptions{}") TensorOptions options); +@Namespace("at") public static native @ByVal Tensor arange(@Const @ByRef Scalar end); +// aten::arange(Scalar end, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor +@Namespace("at") public static native @ByVal Tensor arange(@Const @ByRef Scalar end, @ByVal ScalarTypeOptional dtype, @ByVal LayoutOptional layout, @ByVal DeviceOptional device, @ByVal BoolOptional pin_memory); + +// aten::arange.start(Scalar start, Scalar end, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor +@Namespace("at") public static native @ByVal Tensor arange(@Const @ByRef Scalar start, @Const @ByRef Scalar end, @ByVal(nullValue = "at::TensorOptions{}") TensorOptions options); +@Namespace("at") public static native @ByVal Tensor arange(@Const @ByRef Scalar start, @Const @ByRef Scalar end); +// aten::arange.start(Scalar start, Scalar end, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor +@Namespace("at") public static native @ByVal Tensor arange(@Const @ByRef Scalar start, @Const @ByRef Scalar end, @ByVal ScalarTypeOptional dtype, @ByVal LayoutOptional layout, @ByVal DeviceOptional device, @ByVal BoolOptional pin_memory); +// aten::arange.start_step(Scalar start, Scalar end, Scalar step=1, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor +@Namespace("at") public static native @ByVal Tensor arange(@Const @ByRef Scalar start, @Const @ByRef Scalar end, @Const @ByRef Scalar step, @ByVal(nullValue = "at::TensorOptions{}") TensorOptions options); +@Namespace("at") public static native @ByVal Tensor arange(@Const @ByRef Scalar start, @Const @ByRef Scalar end, @Const @ByRef Scalar step); +// aten::arange.start_step(Scalar start, Scalar end, Scalar step=1, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor +@Namespace("at") public static native @ByVal Tensor arange(@Const @ByRef Scalar start, @Const @ByRef Scalar end, @Const @ByRef Scalar step, @ByVal ScalarTypeOptional dtype, @ByVal LayoutOptional layout, @ByVal DeviceOptional device, @ByVal BoolOptional pin_memory); -// aten::_weight_norm_interface(Tensor v, Tensor g, int dim=0) -> (Tensor, Tensor) -@Namespace("at") public static native @ByVal TensorTensorTuple _weight_norm_interface(@Const @ByRef Tensor v, @Const @ByRef Tensor g, @Cast("int64_t") long dim/*=0*/); -@Namespace("at") public static native @ByVal TensorTensorTuple _weight_norm_interface(@Const @ByRef Tensor v, @Const @ByRef Tensor g); +// aten::arange.out(Scalar end, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor arange_out(@ByRef Tensor out, @Const @ByRef Scalar end); +// aten::arange.out(Scalar end, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor arange_outf(@Const @ByRef Scalar end, @ByRef Tensor out); -// aten::_weight_norm_interface.out(Tensor v, Tensor g, int dim=0, *, Tensor(a!) out0, Tensor(b!) out1) -> (Tensor(a!), Tensor(b!)) -@Namespace("at") public static native @ByVal @Cast("std::tuple*") PointerPointer _weight_norm_interface_out(@ByRef Tensor out0, @ByRef Tensor out1, @Const @ByRef Tensor v, @Const @ByRef Tensor g, @Cast("int64_t") long dim/*=0*/); -@Namespace("at") public static native @ByVal @Cast("std::tuple*") PointerPointer _weight_norm_interface_out(@ByRef Tensor out0, @ByRef Tensor out1, @Const @ByRef Tensor v, @Const @ByRef Tensor g); -// aten::_weight_norm_interface.out(Tensor v, Tensor g, int dim=0, *, Tensor(a!) out0, Tensor(b!) out1) -> (Tensor(a!), Tensor(b!)) -@Namespace("at") public static native @ByVal @Cast("std::tuple*") PointerPointer _weight_norm_interface_outf(@Const @ByRef Tensor v, @Const @ByRef Tensor g, @Cast("int64_t") long dim, @ByRef Tensor out0, @ByRef Tensor out1); +// aten::arange.start_out(Scalar start, Scalar end, Scalar step=1, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor arange_out(@ByRef Tensor out, @Const @ByRef Scalar start, @Const @ByRef Scalar end, @Const @ByRef Scalar step); +// aten::arange.start_out(Scalar start, Scalar end, Scalar step=1, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor arange_outf(@Const @ByRef Scalar start, @Const @ByRef Scalar end, @Const @ByRef Scalar step, @ByRef Tensor out); -// Parsed from ATen/ops/_weight_norm_interface_backward.h +// Parsed from ATen/ops/arccos.h // #pragma once @@ -34256,21 +19676,24 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include +// #include + +// aten::arccos(Tensor self) -> Tensor +@Namespace("at") public static native @ByVal Tensor arccos(@Const @ByRef Tensor self); -// aten::_weight_norm_interface_backward(Tensor grad_w, Tensor saved_v, Tensor saved_g, Tensor saved_norms, int dim) -> (Tensor, Tensor) -@Namespace("at") public static native @ByVal TensorTensorTuple _weight_norm_interface_backward(@Const @ByRef Tensor grad_w, @Const @ByRef Tensor saved_v, @Const @ByRef Tensor saved_g, @Const @ByRef Tensor saved_norms, @Cast("int64_t") long dim); +// aten::arccos_(Tensor(a!) self) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor arccos_(@ByRef Tensor self); -// aten::_weight_norm_interface_backward.out(Tensor grad_w, Tensor saved_v, Tensor saved_g, Tensor saved_norms, int dim, *, Tensor(a!) out0, Tensor(b!) out1) -> (Tensor(a!), Tensor(b!)) -@Namespace("at") public static native @ByVal @Cast("std::tuple*") PointerPointer _weight_norm_interface_backward_out(@ByRef Tensor out0, @ByRef Tensor out1, @Const @ByRef Tensor grad_w, @Const @ByRef Tensor saved_v, @Const @ByRef Tensor saved_g, @Const @ByRef Tensor saved_norms, @Cast("int64_t") long dim); -// aten::_weight_norm_interface_backward.out(Tensor grad_w, Tensor saved_v, Tensor saved_g, Tensor saved_norms, int dim, *, Tensor(a!) out0, Tensor(b!) out1) -> (Tensor(a!), Tensor(b!)) -@Namespace("at") public static native @ByVal @Cast("std::tuple*") PointerPointer _weight_norm_interface_backward_outf(@Const @ByRef Tensor grad_w, @Const @ByRef Tensor saved_v, @Const @ByRef Tensor saved_g, @Const @ByRef Tensor saved_norms, @Cast("int64_t") long dim, @ByRef Tensor out0, @ByRef Tensor out1); +// aten::arccos.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor arccos_out(@ByRef Tensor out, @Const @ByRef Tensor self); +// aten::arccos.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor arccos_outf(@Const @ByRef Tensor self, @ByRef Tensor out); -// Parsed from ATen/ops/abs.h +// Parsed from ATen/ops/arccosh.h // #pragma once @@ -34291,24 +19714,24 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include +// #include -// aten::abs(Tensor self) -> Tensor -@Namespace("at") public static native @ByVal Tensor abs(@Const @ByRef Tensor self); +// aten::arccosh(Tensor self) -> Tensor +@Namespace("at") public static native @ByVal Tensor arccosh(@Const @ByRef Tensor self); -// aten::abs_(Tensor(a!) self) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor abs_(@ByRef Tensor self); +// aten::arccosh_(Tensor(a!) self) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor arccosh_(@ByRef Tensor self); -// aten::abs.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor abs_out(@ByRef Tensor out, @Const @ByRef Tensor self); -// aten::abs.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor abs_outf(@Const @ByRef Tensor self, @ByRef Tensor out); +// aten::arccosh.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor arccosh_out(@ByRef Tensor out, @Const @ByRef Tensor self); +// aten::arccosh.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor arccosh_outf(@Const @ByRef Tensor self, @ByRef Tensor out); -// Parsed from ATen/ops/absolute.h +// Parsed from ATen/ops/arcsin.h // #pragma once @@ -34329,21 +19752,24 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include +// #include -// aten::absolute(Tensor self) -> Tensor -@Namespace("at") public static native @ByVal Tensor absolute(@Const @ByRef Tensor self); +// aten::arcsin(Tensor self) -> Tensor +@Namespace("at") public static native @ByVal Tensor arcsin(@Const @ByRef Tensor self); -// aten::absolute.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor absolute_out(@ByRef Tensor out, @Const @ByRef Tensor self); -// aten::absolute.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor absolute_outf(@Const @ByRef Tensor self, @ByRef Tensor out); +// aten::arcsin_(Tensor(a!) self) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor arcsin_(@ByRef Tensor self); + +// aten::arcsin.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor arcsin_out(@ByRef Tensor out, @Const @ByRef Tensor self); +// aten::arcsin.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor arcsin_outf(@Const @ByRef Tensor self, @ByRef Tensor out); -// Parsed from ATen/ops/acos.h +// Parsed from ATen/ops/arcsinh.h // #pragma once @@ -34364,24 +19790,24 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include +// #include -// aten::acos(Tensor self) -> Tensor -@Namespace("at") public static native @ByVal Tensor acos(@Const @ByRef Tensor self); +// aten::arcsinh(Tensor self) -> Tensor +@Namespace("at") public static native @ByVal Tensor arcsinh(@Const @ByRef Tensor self); -// aten::acos_(Tensor(a!) self) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor acos_(@ByRef Tensor self); +// aten::arcsinh_(Tensor(a!) self) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor arcsinh_(@ByRef Tensor self); -// aten::acos.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor acos_out(@ByRef Tensor out, @Const @ByRef Tensor self); -// aten::acos.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor acos_outf(@Const @ByRef Tensor self, @ByRef Tensor out); +// aten::arcsinh.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor arcsinh_out(@ByRef Tensor out, @Const @ByRef Tensor self); +// aten::arcsinh.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor arcsinh_outf(@Const @ByRef Tensor self, @ByRef Tensor out); -// Parsed from ATen/ops/acosh.h +// Parsed from ATen/ops/arctan.h // #pragma once @@ -34402,24 +19828,24 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include +// #include -// aten::acosh(Tensor self) -> Tensor -@Namespace("at") public static native @ByVal Tensor acosh(@Const @ByRef Tensor self); +// aten::arctan(Tensor self) -> Tensor +@Namespace("at") public static native @ByVal Tensor arctan(@Const @ByRef Tensor self); -// aten::acosh_(Tensor(a!) self) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor acosh_(@ByRef Tensor self); +// aten::arctan_(Tensor(a!) self) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor arctan_(@ByRef Tensor self); -// aten::acosh.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor acosh_out(@ByRef Tensor out, @Const @ByRef Tensor self); -// aten::acosh.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor acosh_outf(@Const @ByRef Tensor self, @ByRef Tensor out); +// aten::arctan.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor arctan_out(@ByRef Tensor out, @Const @ByRef Tensor self); +// aten::arctan.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor arctan_outf(@Const @ByRef Tensor self, @ByRef Tensor out); -// Parsed from ATen/ops/adaptive_avg_pool1d.h +// Parsed from ATen/ops/arctan2.h // #pragma once @@ -34440,17 +19866,21 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include +// #include -// aten::adaptive_avg_pool1d(Tensor self, int[1] output_size) -> Tensor -@Namespace("at") public static native @ByVal Tensor adaptive_avg_pool1d(@Const @ByRef Tensor self, @ByVal @Cast("c10::ArrayRef*") LongArrayRef output_size); -@Namespace("at") public static native @ByVal Tensor adaptive_avg_pool1d(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... output_size); +// aten::arctan2(Tensor self, Tensor other) -> Tensor +@Namespace("at") public static native @ByVal Tensor arctan2(@Const @ByRef Tensor self, @Const @ByRef Tensor other); + +// aten::arctan2.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor arctan2_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor other); +// aten::arctan2.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor arctan2_outf(@Const @ByRef Tensor self, @Const @ByRef Tensor other, @ByRef Tensor out); -// Parsed from ATen/ops/adaptive_avg_pool2d.h +// Parsed from ATen/ops/arctanh.h // #pragma once @@ -34471,40 +19901,24 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include - - -// aten::adaptive_avg_pool2d.out(Tensor self, SymInt[2] output_size, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor adaptive_avg_pool2d_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal @Cast("c10::ArrayRef*") LongArrayRef output_size); -@Namespace("at") public static native @ByRef Tensor adaptive_avg_pool2d_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... output_size); - - -// aten::adaptive_avg_pool2d.out(Tensor self, SymInt[2] output_size, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor adaptive_avg_pool2d_outf(@Const @ByRef Tensor self, @ByVal @Cast("c10::ArrayRef*") LongArrayRef output_size, @ByRef Tensor out); -@Namespace("at") public static native @ByRef Tensor adaptive_avg_pool2d_outf(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] output_size, @ByRef Tensor out); - - -// aten::adaptive_avg_pool2d.out(Tensor self, SymInt[2] output_size, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor adaptive_avg_pool2d_symint_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal SymIntRef output_size); - - -// aten::adaptive_avg_pool2d.out(Tensor self, SymInt[2] output_size, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor adaptive_avg_pool2d_symint_outf(@Const @ByRef Tensor self, @ByVal SymIntRef output_size, @ByRef Tensor out); - +// #include -// aten::adaptive_avg_pool2d(Tensor self, SymInt[2] output_size) -> Tensor -@Namespace("at") public static native @ByVal Tensor adaptive_avg_pool2d(@Const @ByRef Tensor self, @ByVal @Cast("c10::ArrayRef*") LongArrayRef output_size); -@Namespace("at") public static native @ByVal Tensor adaptive_avg_pool2d(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... output_size); +// aten::arctanh(Tensor self) -> Tensor +@Namespace("at") public static native @ByVal Tensor arctanh(@Const @ByRef Tensor self); -// aten::adaptive_avg_pool2d(Tensor self, SymInt[2] output_size) -> Tensor -@Namespace("at") public static native @ByVal Tensor adaptive_avg_pool2d_symint(@Const @ByRef Tensor self, @ByVal SymIntRef output_size); +// aten::arctanh_(Tensor(a!) self) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor arctanh_(@ByRef Tensor self); +// aten::arctanh.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor arctanh_out(@ByRef Tensor out, @Const @ByRef Tensor self); +// aten::arctanh.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor arctanh_outf(@Const @ByRef Tensor self, @ByRef Tensor out); -// Parsed from ATen/ops/adaptive_avg_pool3d.h +// Parsed from ATen/ops/argmax.h // #pragma once @@ -34525,40 +19939,23 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include - - -// aten::adaptive_avg_pool3d.out(Tensor self, SymInt[3] output_size, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor adaptive_avg_pool3d_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal @Cast("c10::ArrayRef*") LongArrayRef output_size); -@Namespace("at") public static native @ByRef Tensor adaptive_avg_pool3d_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... output_size); - - -// aten::adaptive_avg_pool3d.out(Tensor self, SymInt[3] output_size, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor adaptive_avg_pool3d_outf(@Const @ByRef Tensor self, @ByVal @Cast("c10::ArrayRef*") LongArrayRef output_size, @ByRef Tensor out); -@Namespace("at") public static native @ByRef Tensor adaptive_avg_pool3d_outf(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] output_size, @ByRef Tensor out); - - -// aten::adaptive_avg_pool3d.out(Tensor self, SymInt[3] output_size, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor adaptive_avg_pool3d_symint_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal SymIntRef output_size); - - -// aten::adaptive_avg_pool3d.out(Tensor self, SymInt[3] output_size, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor adaptive_avg_pool3d_symint_outf(@Const @ByRef Tensor self, @ByVal SymIntRef output_size, @ByRef Tensor out); - - -// aten::adaptive_avg_pool3d(Tensor self, SymInt[3] output_size) -> Tensor -@Namespace("at") public static native @ByVal Tensor adaptive_avg_pool3d(@Const @ByRef Tensor self, @ByVal @Cast("c10::ArrayRef*") LongArrayRef output_size); -@Namespace("at") public static native @ByVal Tensor adaptive_avg_pool3d(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... output_size); +// #include -// aten::adaptive_avg_pool3d(Tensor self, SymInt[3] output_size) -> Tensor -@Namespace("at") public static native @ByVal Tensor adaptive_avg_pool3d_symint(@Const @ByRef Tensor self, @ByVal SymIntRef output_size); +// aten::argmax(Tensor self, int? dim=None, bool keepdim=False) -> Tensor +@Namespace("at") public static native @ByVal Tensor argmax(@Const @ByRef Tensor self, @ByVal(nullValue = "c10::optional(c10::nullopt)") LongOptional dim, @Cast("bool") boolean keepdim/*=false*/); +@Namespace("at") public static native @ByVal Tensor argmax(@Const @ByRef Tensor self); +// aten::argmax.out(Tensor self, int? dim=None, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor argmax_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal(nullValue = "c10::optional(c10::nullopt)") LongOptional dim, @Cast("bool") boolean keepdim/*=false*/); +@Namespace("at") public static native @ByRef Tensor argmax_out(@ByRef Tensor out, @Const @ByRef Tensor self); +// aten::argmax.out(Tensor self, int? dim=None, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor argmax_outf(@Const @ByRef Tensor self, @ByVal LongOptional dim, @Cast("bool") boolean keepdim, @ByRef Tensor out); -// Parsed from ATen/ops/adaptive_avg_pool3d_backward.h +// Parsed from ATen/ops/argmin.h // #pragma once @@ -34579,18 +19976,23 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include +// #include -// aten::adaptive_avg_pool3d_backward.grad_input(Tensor grad_output, Tensor self, *, Tensor(a!) grad_input) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor adaptive_avg_pool3d_backward_out(@ByRef Tensor grad_input, @Const @ByRef Tensor grad_output, @Const @ByRef Tensor self); -// aten::adaptive_avg_pool3d_backward.grad_input(Tensor grad_output, Tensor self, *, Tensor(a!) grad_input) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor adaptive_avg_pool3d_backward_outf(@Const @ByRef Tensor grad_output, @Const @ByRef Tensor self, @ByRef Tensor grad_input); +// aten::argmin(Tensor self, int? dim=None, bool keepdim=False) -> Tensor +@Namespace("at") public static native @ByVal Tensor argmin(@Const @ByRef Tensor self, @ByVal(nullValue = "c10::optional(c10::nullopt)") LongOptional dim, @Cast("bool") boolean keepdim/*=false*/); +@Namespace("at") public static native @ByVal Tensor argmin(@Const @ByRef Tensor self); + +// aten::argmin.out(Tensor self, int? dim=None, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor argmin_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal(nullValue = "c10::optional(c10::nullopt)") LongOptional dim, @Cast("bool") boolean keepdim/*=false*/); +@Namespace("at") public static native @ByRef Tensor argmin_out(@ByRef Tensor out, @Const @ByRef Tensor self); +// aten::argmin.out(Tensor self, int? dim=None, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor argmin_outf(@Const @ByRef Tensor self, @ByVal LongOptional dim, @Cast("bool") boolean keepdim, @ByRef Tensor out); -// Parsed from ATen/ops/adaptive_max_pool1d.h +// Parsed from ATen/ops/argsort.h // #pragma once @@ -34611,17 +20013,31 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include +// #include -// aten::adaptive_max_pool1d(Tensor self, int[1] output_size) -> (Tensor, Tensor) -@Namespace("at") public static native @ByVal TensorTensorTuple adaptive_max_pool1d(@Const @ByRef Tensor self, @ByVal @Cast("c10::ArrayRef*") LongArrayRef output_size); -@Namespace("at") public static native @ByVal TensorTensorTuple adaptive_max_pool1d(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... output_size); +// aten::argsort(Tensor self, int dim=-1, bool descending=False) -> Tensor +@Namespace("at") public static native @ByVal Tensor argsort(@Const @ByRef Tensor self, @Cast("int64_t") long dim/*=-1*/, @Cast("bool") boolean descending/*=false*/); +@Namespace("at") public static native @ByVal Tensor argsort(@Const @ByRef Tensor self); +// aten::argsort.stable(Tensor self, *, bool stable, int dim=-1, bool descending=False) -> Tensor +@Namespace("at") public static native @ByVal Tensor argsort(@Const @ByRef Tensor self, @Cast("bool") boolean stable, @Cast("int64_t") long dim/*=-1*/, @Cast("bool") boolean descending/*=false*/); +@Namespace("at") public static native @ByVal Tensor argsort(@Const @ByRef Tensor self, @Cast("bool") boolean stable); +// aten::argsort.dimname(Tensor self, Dimname dim, bool descending=False) -> Tensor +@Namespace("at") public static native @ByVal Tensor argsort(@Const @ByRef Tensor self, @ByVal Dimname dim, @Cast("bool") boolean descending/*=false*/); +@Namespace("at") public static native @ByVal Tensor argsort(@Const @ByRef Tensor self, @ByVal Dimname dim); +// aten::argsort.stable_out(Tensor self, *, bool stable, int dim=-1, bool descending=False, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor argsort_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Cast("bool") boolean stable, @Cast("int64_t") long dim/*=-1*/, @Cast("bool") boolean descending/*=false*/); +@Namespace("at") public static native @ByRef Tensor argsort_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Cast("bool") boolean stable); +// aten::argsort.stable_out(Tensor self, *, bool stable, int dim=-1, bool descending=False, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor argsort_outf(@Const @ByRef Tensor self, @Cast("bool") boolean stable, @Cast("int64_t") long dim, @Cast("bool") boolean descending, @ByRef Tensor out); -// Parsed from ATen/ops/adaptive_max_pool2d.h + + + +// Parsed from ATen/ops/argwhere.h // #pragma once @@ -34642,24 +20058,16 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include - +// #include -// aten::adaptive_max_pool2d.out(Tensor self, int[2] output_size, *, Tensor(a!) out, Tensor(b!) indices) -> (Tensor(a!), Tensor(b!)) -@Namespace("at") public static native @ByVal @Cast("std::tuple*") PointerPointer adaptive_max_pool2d_out(@ByRef Tensor out, @ByRef Tensor indices, @Const @ByRef Tensor self, @ByVal @Cast("c10::ArrayRef*") LongArrayRef output_size); -@Namespace("at") public static native @ByVal @Cast("std::tuple*") PointerPointer adaptive_max_pool2d_out(@ByRef Tensor out, @ByRef Tensor indices, @Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... output_size); -// aten::adaptive_max_pool2d.out(Tensor self, int[2] output_size, *, Tensor(a!) out, Tensor(b!) indices) -> (Tensor(a!), Tensor(b!)) -@Namespace("at") public static native @ByVal @Cast("std::tuple*") PointerPointer adaptive_max_pool2d_outf(@Const @ByRef Tensor self, @ByVal @Cast("c10::ArrayRef*") LongArrayRef output_size, @ByRef Tensor out, @ByRef Tensor indices); -@Namespace("at") public static native @ByVal @Cast("std::tuple*") PointerPointer adaptive_max_pool2d_outf(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] output_size, @ByRef Tensor out, @ByRef Tensor indices); -// aten::adaptive_max_pool2d(Tensor self, int[2] output_size) -> (Tensor, Tensor) -@Namespace("at") public static native @ByVal TensorTensorTuple adaptive_max_pool2d(@Const @ByRef Tensor self, @ByVal @Cast("c10::ArrayRef*") LongArrayRef output_size); -@Namespace("at") public static native @ByVal TensorTensorTuple adaptive_max_pool2d(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... output_size); +// aten::argwhere(Tensor self) -> Tensor +@Namespace("at") public static native @ByVal Tensor argwhere(@Const @ByRef Tensor self); -// Parsed from ATen/ops/adaptive_max_pool2d_backward.h +// Parsed from ATen/ops/as_strided.h // #pragma once @@ -34680,21 +20088,37 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include +// #include -// aten::adaptive_max_pool2d_backward.grad_input(Tensor grad_output, Tensor self, Tensor indices, *, Tensor(a!) grad_input) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor adaptive_max_pool2d_backward_out(@ByRef Tensor grad_input, @Const @ByRef Tensor grad_output, @Const @ByRef Tensor self, @Const @ByRef Tensor indices); -// aten::adaptive_max_pool2d_backward.grad_input(Tensor grad_output, Tensor self, Tensor indices, *, Tensor(a!) grad_input) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor adaptive_max_pool2d_backward_outf(@Const @ByRef Tensor grad_output, @Const @ByRef Tensor self, @Const @ByRef Tensor indices, @ByRef Tensor grad_input); +// aten::as_strided(Tensor(a) self, SymInt[] size, SymInt[] stride, SymInt? storage_offset=None) -> Tensor(a) +@Namespace("at") public static native @ByVal Tensor as_strided(@Const @ByRef Tensor self, @ByVal LongArrayRef size, @ByVal LongArrayRef stride, @ByVal(nullValue = "c10::optional(c10::nullopt)") LongOptional storage_offset); +@Namespace("at") public static native @ByVal Tensor as_strided(@Const @ByRef Tensor self, @ByVal LongArrayRef size, @ByVal LongArrayRef stride); +@Namespace("at") public static native @ByVal Tensor as_strided(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] stride, @ByVal(nullValue = "c10::optional(c10::nullopt)") LongOptional storage_offset); +@Namespace("at") public static native @ByVal Tensor as_strided(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... stride); -// aten::adaptive_max_pool2d_backward(Tensor grad_output, Tensor self, Tensor indices) -> Tensor -@Namespace("at") public static native @ByVal Tensor adaptive_max_pool2d_backward(@Const @ByRef Tensor grad_output, @Const @ByRef Tensor self, @Const @ByRef Tensor indices); + +// aten::as_strided(Tensor(a) self, SymInt[] size, SymInt[] stride, SymInt? storage_offset=None) -> Tensor(a) +@Namespace("at") public static native @ByVal Tensor as_strided_symint(@Const @ByRef Tensor self, @ByVal SymIntArrayRef size, @ByVal SymIntArrayRef stride, @ByVal(nullValue = "c10::optional(c10::nullopt)") SymIntOptional storage_offset); +@Namespace("at") public static native @ByVal Tensor as_strided_symint(@Const @ByRef Tensor self, @ByVal SymIntArrayRef size, @ByVal SymIntArrayRef stride); +// aten::as_strided_(Tensor(a!) self, SymInt[] size, SymInt[] stride, SymInt? storage_offset=None) -> Tensor(a!) +@Namespace("at") public static native @Const @ByRef Tensor as_strided_(@Const @ByRef Tensor self, @ByVal LongArrayRef size, @ByVal LongArrayRef stride, @ByVal(nullValue = "c10::optional(c10::nullopt)") LongOptional storage_offset); +@Namespace("at") public static native @Const @ByRef Tensor as_strided_(@Const @ByRef Tensor self, @ByVal LongArrayRef size, @ByVal LongArrayRef stride); +@Namespace("at") public static native @Const @ByRef Tensor as_strided_(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] stride, @ByVal(nullValue = "c10::optional(c10::nullopt)") LongOptional storage_offset); +@Namespace("at") public static native @Const @ByRef Tensor as_strided_(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... stride); -// Parsed from ATen/ops/adaptive_max_pool3d.h +// aten::as_strided_(Tensor(a!) self, SymInt[] size, SymInt[] stride, SymInt? storage_offset=None) -> Tensor(a!) +@Namespace("at") public static native @Const @ByRef Tensor as_strided__symint(@Const @ByRef Tensor self, @ByVal SymIntArrayRef size, @ByVal SymIntArrayRef stride, @ByVal(nullValue = "c10::optional(c10::nullopt)") SymIntOptional storage_offset); +@Namespace("at") public static native @Const @ByRef Tensor as_strided__symint(@Const @ByRef Tensor self, @ByVal SymIntArrayRef size, @ByVal SymIntArrayRef stride); + + + + + +// Parsed from ATen/ops/as_strided_copy.h // #pragma once @@ -34715,59 +20139,46 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include - - -// aten::adaptive_max_pool3d.out(Tensor self, int[3] output_size, *, Tensor(a!) out, Tensor(b!) indices) -> (Tensor(a!), Tensor(b!)) -@Namespace("at") public static native @ByVal @Cast("std::tuple*") PointerPointer adaptive_max_pool3d_out(@ByRef Tensor out, @ByRef Tensor indices, @Const @ByRef Tensor self, @ByVal @Cast("c10::ArrayRef*") LongArrayRef output_size); -@Namespace("at") public static native @ByVal @Cast("std::tuple*") PointerPointer adaptive_max_pool3d_out(@ByRef Tensor out, @ByRef Tensor indices, @Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... output_size); -// aten::adaptive_max_pool3d.out(Tensor self, int[3] output_size, *, Tensor(a!) out, Tensor(b!) indices) -> (Tensor(a!), Tensor(b!)) -@Namespace("at") public static native @ByVal @Cast("std::tuple*") PointerPointer adaptive_max_pool3d_outf(@Const @ByRef Tensor self, @ByVal @Cast("c10::ArrayRef*") LongArrayRef output_size, @ByRef Tensor out, @ByRef Tensor indices); -@Namespace("at") public static native @ByVal @Cast("std::tuple*") PointerPointer adaptive_max_pool3d_outf(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] output_size, @ByRef Tensor out, @ByRef Tensor indices); - -// aten::adaptive_max_pool3d(Tensor self, int[3] output_size) -> (Tensor, Tensor) -@Namespace("at") public static native @ByVal TensorTensorTuple adaptive_max_pool3d(@Const @ByRef Tensor self, @ByVal @Cast("c10::ArrayRef*") LongArrayRef output_size); -@Namespace("at") public static native @ByVal TensorTensorTuple adaptive_max_pool3d(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... output_size); +// #include +// aten::as_strided_copy(Tensor self, SymInt[] size, SymInt[] stride, SymInt? storage_offset=None) -> Tensor +@Namespace("at") public static native @ByVal Tensor as_strided_copy(@Const @ByRef Tensor self, @ByVal LongArrayRef size, @ByVal LongArrayRef stride, @ByVal(nullValue = "c10::optional(c10::nullopt)") LongOptional storage_offset); +@Namespace("at") public static native @ByVal Tensor as_strided_copy(@Const @ByRef Tensor self, @ByVal LongArrayRef size, @ByVal LongArrayRef stride); +@Namespace("at") public static native @ByVal Tensor as_strided_copy(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] stride, @ByVal(nullValue = "c10::optional(c10::nullopt)") LongOptional storage_offset); +@Namespace("at") public static native @ByVal Tensor as_strided_copy(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... stride); -// Parsed from ATen/ops/adaptive_max_pool3d_backward.h +// aten::as_strided_copy(Tensor self, SymInt[] size, SymInt[] stride, SymInt? storage_offset=None) -> Tensor +@Namespace("at") public static native @ByVal Tensor as_strided_copy_symint(@Const @ByRef Tensor self, @ByVal SymIntArrayRef size, @ByVal SymIntArrayRef stride, @ByVal(nullValue = "c10::optional(c10::nullopt)") SymIntOptional storage_offset); +@Namespace("at") public static native @ByVal Tensor as_strided_copy_symint(@Const @ByRef Tensor self, @ByVal SymIntArrayRef size, @ByVal SymIntArrayRef stride); -// #pragma once -// @generated by torchgen/gen.py from Function.h +// aten::as_strided_copy.out(Tensor self, SymInt[] size, SymInt[] stride, SymInt? storage_offset=None, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor as_strided_copy_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal LongArrayRef size, @ByVal LongArrayRef stride, @ByVal(nullValue = "c10::optional(c10::nullopt)") LongOptional storage_offset); +@Namespace("at") public static native @ByRef Tensor as_strided_copy_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal LongArrayRef size, @ByVal LongArrayRef stride); +@Namespace("at") public static native @ByRef Tensor as_strided_copy_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] stride, @ByVal(nullValue = "c10::optional(c10::nullopt)") LongOptional storage_offset); +@Namespace("at") public static native @ByRef Tensor as_strided_copy_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... stride); -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include +// aten::as_strided_copy.out(Tensor self, SymInt[] size, SymInt[] stride, SymInt? storage_offset=None, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor as_strided_copy_outf(@Const @ByRef Tensor self, @ByVal LongArrayRef size, @ByVal LongArrayRef stride, @ByVal LongOptional storage_offset, @ByRef Tensor out); +@Namespace("at") public static native @ByRef Tensor as_strided_copy_outf(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] stride, @ByVal LongOptional storage_offset, @ByRef Tensor out); -// #include +// aten::as_strided_copy.out(Tensor self, SymInt[] size, SymInt[] stride, SymInt? storage_offset=None, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor as_strided_copy_symint_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal SymIntArrayRef size, @ByVal SymIntArrayRef stride, @ByVal(nullValue = "c10::optional(c10::nullopt)") SymIntOptional storage_offset); +@Namespace("at") public static native @ByRef Tensor as_strided_copy_symint_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal SymIntArrayRef size, @ByVal SymIntArrayRef stride); -// aten::adaptive_max_pool3d_backward.grad_input(Tensor grad_output, Tensor self, Tensor indices, *, Tensor(a!) grad_input) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor adaptive_max_pool3d_backward_out(@ByRef Tensor grad_input, @Const @ByRef Tensor grad_output, @Const @ByRef Tensor self, @Const @ByRef Tensor indices); -// aten::adaptive_max_pool3d_backward.grad_input(Tensor grad_output, Tensor self, Tensor indices, *, Tensor(a!) grad_input) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor adaptive_max_pool3d_backward_outf(@Const @ByRef Tensor grad_output, @Const @ByRef Tensor self, @Const @ByRef Tensor indices, @ByRef Tensor grad_input); +// aten::as_strided_copy.out(Tensor self, SymInt[] size, SymInt[] stride, SymInt? storage_offset=None, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor as_strided_copy_symint_outf(@Const @ByRef Tensor self, @ByVal SymIntArrayRef size, @ByVal SymIntArrayRef stride, @ByVal SymIntOptional storage_offset, @ByRef Tensor out); -// aten::adaptive_max_pool3d_backward(Tensor grad_output, Tensor self, Tensor indices) -> Tensor -@Namespace("at") public static native @ByVal Tensor adaptive_max_pool3d_backward(@Const @ByRef Tensor grad_output, @Const @ByRef Tensor self, @Const @ByRef Tensor indices); -// Parsed from ATen/ops/add.h +// Parsed from ATen/ops/as_strided_scatter.h // #pragma once @@ -34788,31 +20199,46 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include +// #include -// aten::add.Tensor(Tensor self, Tensor other, *, Scalar alpha=1) -> Tensor -@Namespace("at") public static native @ByVal Tensor add(@Const @ByRef Tensor self, @Const @ByRef Tensor other, @Const @ByRef(nullValue = "at::Scalar(1)") Scalar alpha); +// aten::as_strided_scatter(Tensor self, Tensor src, SymInt[] size, SymInt[] stride, SymInt? storage_offset=None) -> Tensor +@Namespace("at") public static native @ByVal Tensor as_strided_scatter(@Const @ByRef Tensor self, @Const @ByRef Tensor src, @ByVal LongArrayRef size, @ByVal LongArrayRef stride, @ByVal(nullValue = "c10::optional(c10::nullopt)") LongOptional storage_offset); +@Namespace("at") public static native @ByVal Tensor as_strided_scatter(@Const @ByRef Tensor self, @Const @ByRef Tensor src, @ByVal LongArrayRef size, @ByVal LongArrayRef stride); +@Namespace("at") public static native @ByVal Tensor as_strided_scatter(@Const @ByRef Tensor self, @Const @ByRef Tensor src, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] stride, @ByVal(nullValue = "c10::optional(c10::nullopt)") LongOptional storage_offset); +@Namespace("at") public static native @ByVal Tensor as_strided_scatter(@Const @ByRef Tensor self, @Const @ByRef Tensor src, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... stride); -// aten::add.out(Tensor self, Tensor other, *, Scalar alpha=1, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor add_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor other, @Const @ByRef(nullValue = "at::Scalar(1)") Scalar alpha); -@Namespace("at") public static native @ByRef Tensor add_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor other); -// aten::add.out(Tensor self, Tensor other, *, Scalar alpha=1, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor add_outf(@Const @ByRef Tensor self, @Const @ByRef Tensor other, @Const @ByRef Scalar alpha, @ByRef Tensor out); -// aten::add.Scalar(Tensor self, Scalar other, Scalar alpha=1) -> Tensor -@Namespace("at") public static native @ByVal Tensor add(@Const @ByRef Tensor self, @Const @ByRef Scalar other, @Const @ByRef(nullValue = "at::Scalar(1)") Scalar alpha); +// aten::as_strided_scatter(Tensor self, Tensor src, SymInt[] size, SymInt[] stride, SymInt? storage_offset=None) -> Tensor +@Namespace("at") public static native @ByVal Tensor as_strided_scatter_symint(@Const @ByRef Tensor self, @Const @ByRef Tensor src, @ByVal SymIntArrayRef size, @ByVal SymIntArrayRef stride, @ByVal(nullValue = "c10::optional(c10::nullopt)") SymIntOptional storage_offset); +@Namespace("at") public static native @ByVal Tensor as_strided_scatter_symint(@Const @ByRef Tensor self, @Const @ByRef Tensor src, @ByVal SymIntArrayRef size, @ByVal SymIntArrayRef stride); -// aten::add.Scalar_out(Tensor self, Scalar other, Scalar alpha=1, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor add_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Scalar other, @Const @ByRef(nullValue = "at::Scalar(1)") Scalar alpha); -@Namespace("at") public static native @ByRef Tensor add_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Scalar other); -// aten::add.Scalar_out(Tensor self, Scalar other, Scalar alpha=1, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor add_outf(@Const @ByRef Tensor self, @Const @ByRef Scalar other, @Const @ByRef Scalar alpha, @ByRef Tensor out); + +// aten::as_strided_scatter.out(Tensor self, Tensor src, SymInt[] size, SymInt[] stride, SymInt? storage_offset=None, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor as_strided_scatter_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor src, @ByVal LongArrayRef size, @ByVal LongArrayRef stride, @ByVal(nullValue = "c10::optional(c10::nullopt)") LongOptional storage_offset); +@Namespace("at") public static native @ByRef Tensor as_strided_scatter_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor src, @ByVal LongArrayRef size, @ByVal LongArrayRef stride); +@Namespace("at") public static native @ByRef Tensor as_strided_scatter_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor src, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] stride, @ByVal(nullValue = "c10::optional(c10::nullopt)") LongOptional storage_offset); +@Namespace("at") public static native @ByRef Tensor as_strided_scatter_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor src, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... stride); +// aten::as_strided_scatter.out(Tensor self, Tensor src, SymInt[] size, SymInt[] stride, SymInt? storage_offset=None, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor as_strided_scatter_outf(@Const @ByRef Tensor self, @Const @ByRef Tensor src, @ByVal LongArrayRef size, @ByVal LongArrayRef stride, @ByVal LongOptional storage_offset, @ByRef Tensor out); +@Namespace("at") public static native @ByRef Tensor as_strided_scatter_outf(@Const @ByRef Tensor self, @Const @ByRef Tensor src, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] stride, @ByVal LongOptional storage_offset, @ByRef Tensor out); -// Parsed from ATen/ops/addbmm.h +// aten::as_strided_scatter.out(Tensor self, Tensor src, SymInt[] size, SymInt[] stride, SymInt? storage_offset=None, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor as_strided_scatter_symint_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor src, @ByVal SymIntArrayRef size, @ByVal SymIntArrayRef stride, @ByVal(nullValue = "c10::optional(c10::nullopt)") SymIntOptional storage_offset); +@Namespace("at") public static native @ByRef Tensor as_strided_scatter_symint_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor src, @ByVal SymIntArrayRef size, @ByVal SymIntArrayRef stride); + + +// aten::as_strided_scatter.out(Tensor self, Tensor src, SymInt[] size, SymInt[] stride, SymInt? storage_offset=None, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor as_strided_scatter_symint_outf(@Const @ByRef Tensor self, @Const @ByRef Tensor src, @ByVal SymIntArrayRef size, @ByVal SymIntArrayRef stride, @ByVal SymIntOptional storage_offset, @ByRef Tensor out); + + + + + +// Parsed from ATen/ops/asin.h // #pragma once @@ -34833,23 +20259,24 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include +// #include -// aten::addbmm.out(Tensor self, Tensor batch1, Tensor batch2, *, Scalar beta=1, Scalar alpha=1, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor addbmm_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor batch1, @Const @ByRef Tensor batch2, @Const @ByRef(nullValue = "at::Scalar(1)") Scalar beta, @Const @ByRef(nullValue = "at::Scalar(1)") Scalar alpha); -@Namespace("at") public static native @ByRef Tensor addbmm_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor batch1, @Const @ByRef Tensor batch2); -// aten::addbmm.out(Tensor self, Tensor batch1, Tensor batch2, *, Scalar beta=1, Scalar alpha=1, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor addbmm_outf(@Const @ByRef Tensor self, @Const @ByRef Tensor batch1, @Const @ByRef Tensor batch2, @Const @ByRef Scalar beta, @Const @ByRef Scalar alpha, @ByRef Tensor out); +// aten::asin(Tensor self) -> Tensor +@Namespace("at") public static native @ByVal Tensor asin(@Const @ByRef Tensor self); -// aten::addbmm(Tensor self, Tensor batch1, Tensor batch2, *, Scalar beta=1, Scalar alpha=1) -> Tensor -@Namespace("at") public static native @ByVal Tensor addbmm(@Const @ByRef Tensor self, @Const @ByRef Tensor batch1, @Const @ByRef Tensor batch2, @Const @ByRef(nullValue = "at::Scalar(1)") Scalar beta, @Const @ByRef(nullValue = "at::Scalar(1)") Scalar alpha); -@Namespace("at") public static native @ByVal Tensor addbmm(@Const @ByRef Tensor self, @Const @ByRef Tensor batch1, @Const @ByRef Tensor batch2); +// aten::asin_(Tensor(a!) self) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor asin_(@ByRef Tensor self); + +// aten::asin.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor asin_out(@ByRef Tensor out, @Const @ByRef Tensor self); +// aten::asin.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor asin_outf(@Const @ByRef Tensor self, @ByRef Tensor out); -// Parsed from ATen/ops/addcdiv.h +// Parsed from ATen/ops/asinh.h // #pragma once @@ -34870,23 +20297,24 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include +// #include -// aten::addcdiv.out(Tensor self, Tensor tensor1, Tensor tensor2, *, Scalar value=1, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor addcdiv_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor tensor1, @Const @ByRef Tensor tensor2, @Const @ByRef(nullValue = "at::Scalar(1)") Scalar value); -@Namespace("at") public static native @ByRef Tensor addcdiv_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor tensor1, @Const @ByRef Tensor tensor2); -// aten::addcdiv.out(Tensor self, Tensor tensor1, Tensor tensor2, *, Scalar value=1, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor addcdiv_outf(@Const @ByRef Tensor self, @Const @ByRef Tensor tensor1, @Const @ByRef Tensor tensor2, @Const @ByRef Scalar value, @ByRef Tensor out); +// aten::asinh(Tensor self) -> Tensor +@Namespace("at") public static native @ByVal Tensor asinh(@Const @ByRef Tensor self); -// aten::addcdiv(Tensor self, Tensor tensor1, Tensor tensor2, *, Scalar value=1) -> Tensor -@Namespace("at") public static native @ByVal Tensor addcdiv(@Const @ByRef Tensor self, @Const @ByRef Tensor tensor1, @Const @ByRef Tensor tensor2, @Const @ByRef(nullValue = "at::Scalar(1)") Scalar value); -@Namespace("at") public static native @ByVal Tensor addcdiv(@Const @ByRef Tensor self, @Const @ByRef Tensor tensor1, @Const @ByRef Tensor tensor2); +// aten::asinh_(Tensor(a!) self) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor asinh_(@ByRef Tensor self); + +// aten::asinh.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor asinh_out(@ByRef Tensor out, @Const @ByRef Tensor self); +// aten::asinh.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor asinh_outf(@Const @ByRef Tensor self, @ByRef Tensor out); -// Parsed from ATen/ops/addcmul.h +// Parsed from ATen/ops/atan.h // #pragma once @@ -34907,23 +20335,24 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include +// #include -// aten::addcmul.out(Tensor self, Tensor tensor1, Tensor tensor2, *, Scalar value=1, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor addcmul_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor tensor1, @Const @ByRef Tensor tensor2, @Const @ByRef(nullValue = "at::Scalar(1)") Scalar value); -@Namespace("at") public static native @ByRef Tensor addcmul_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor tensor1, @Const @ByRef Tensor tensor2); -// aten::addcmul.out(Tensor self, Tensor tensor1, Tensor tensor2, *, Scalar value=1, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor addcmul_outf(@Const @ByRef Tensor self, @Const @ByRef Tensor tensor1, @Const @ByRef Tensor tensor2, @Const @ByRef Scalar value, @ByRef Tensor out); +// aten::atan(Tensor self) -> Tensor +@Namespace("at") public static native @ByVal Tensor atan(@Const @ByRef Tensor self); -// aten::addcmul(Tensor self, Tensor tensor1, Tensor tensor2, *, Scalar value=1) -> Tensor -@Namespace("at") public static native @ByVal Tensor addcmul(@Const @ByRef Tensor self, @Const @ByRef Tensor tensor1, @Const @ByRef Tensor tensor2, @Const @ByRef(nullValue = "at::Scalar(1)") Scalar value); -@Namespace("at") public static native @ByVal Tensor addcmul(@Const @ByRef Tensor self, @Const @ByRef Tensor tensor1, @Const @ByRef Tensor tensor2); +// aten::atan_(Tensor(a!) self) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor atan_(@ByRef Tensor self); + +// aten::atan.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor atan_out(@ByRef Tensor out, @Const @ByRef Tensor self); +// aten::atan.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor atan_outf(@Const @ByRef Tensor self, @ByRef Tensor out); -// Parsed from ATen/ops/addmm.h +// Parsed from ATen/ops/atan2.h // #pragma once @@ -34944,23 +20373,21 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include +// #include -// aten::addmm.out(Tensor self, Tensor mat1, Tensor mat2, *, Scalar beta=1, Scalar alpha=1, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor addmm_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor mat1, @Const @ByRef Tensor mat2, @Const @ByRef(nullValue = "at::Scalar(1)") Scalar beta, @Const @ByRef(nullValue = "at::Scalar(1)") Scalar alpha); -@Namespace("at") public static native @ByRef Tensor addmm_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor mat1, @Const @ByRef Tensor mat2); -// aten::addmm.out(Tensor self, Tensor mat1, Tensor mat2, *, Scalar beta=1, Scalar alpha=1, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor addmm_outf(@Const @ByRef Tensor self, @Const @ByRef Tensor mat1, @Const @ByRef Tensor mat2, @Const @ByRef Scalar beta, @Const @ByRef Scalar alpha, @ByRef Tensor out); +// aten::atan2.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor atan2_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor other); +// aten::atan2.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor atan2_outf(@Const @ByRef Tensor self, @Const @ByRef Tensor other, @ByRef Tensor out); -// aten::addmm(Tensor self, Tensor mat1, Tensor mat2, *, Scalar beta=1, Scalar alpha=1) -> Tensor -@Namespace("at") public static native @ByVal Tensor addmm(@Const @ByRef Tensor self, @Const @ByRef Tensor mat1, @Const @ByRef Tensor mat2, @Const @ByRef(nullValue = "at::Scalar(1)") Scalar beta, @Const @ByRef(nullValue = "at::Scalar(1)") Scalar alpha); -@Namespace("at") public static native @ByVal Tensor addmm(@Const @ByRef Tensor self, @Const @ByRef Tensor mat1, @Const @ByRef Tensor mat2); +// aten::atan2(Tensor self, Tensor other) -> Tensor +@Namespace("at") public static native @ByVal Tensor atan2(@Const @ByRef Tensor self, @Const @ByRef Tensor other); -// Parsed from ATen/ops/addmv.h +// Parsed from ATen/ops/atanh.h // #pragma once @@ -34981,27 +20408,24 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include +// #include -// aten::addmv(Tensor self, Tensor mat, Tensor vec, *, Scalar beta=1, Scalar alpha=1) -> Tensor -@Namespace("at") public static native @ByVal Tensor addmv(@Const @ByRef Tensor self, @Const @ByRef Tensor mat, @Const @ByRef Tensor vec, @Const @ByRef(nullValue = "at::Scalar(1)") Scalar beta, @Const @ByRef(nullValue = "at::Scalar(1)") Scalar alpha); -@Namespace("at") public static native @ByVal Tensor addmv(@Const @ByRef Tensor self, @Const @ByRef Tensor mat, @Const @ByRef Tensor vec); +// aten::atanh(Tensor self) -> Tensor +@Namespace("at") public static native @ByVal Tensor atanh(@Const @ByRef Tensor self); -// aten::addmv_(Tensor(a!) self, Tensor mat, Tensor vec, *, Scalar beta=1, Scalar alpha=1) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor addmv_(@ByRef Tensor self, @Const @ByRef Tensor mat, @Const @ByRef Tensor vec, @Const @ByRef(nullValue = "at::Scalar(1)") Scalar beta, @Const @ByRef(nullValue = "at::Scalar(1)") Scalar alpha); -@Namespace("at") public static native @ByRef Tensor addmv_(@ByRef Tensor self, @Const @ByRef Tensor mat, @Const @ByRef Tensor vec); +// aten::atanh_(Tensor(a!) self) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor atanh_(@ByRef Tensor self); -// aten::addmv.out(Tensor self, Tensor mat, Tensor vec, *, Scalar beta=1, Scalar alpha=1, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor addmv_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor mat, @Const @ByRef Tensor vec, @Const @ByRef(nullValue = "at::Scalar(1)") Scalar beta, @Const @ByRef(nullValue = "at::Scalar(1)") Scalar alpha); -@Namespace("at") public static native @ByRef Tensor addmv_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor mat, @Const @ByRef Tensor vec); -// aten::addmv.out(Tensor self, Tensor mat, Tensor vec, *, Scalar beta=1, Scalar alpha=1, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor addmv_outf(@Const @ByRef Tensor self, @Const @ByRef Tensor mat, @Const @ByRef Tensor vec, @Const @ByRef Scalar beta, @Const @ByRef Scalar alpha, @ByRef Tensor out); +// aten::atanh.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor atanh_out(@ByRef Tensor out, @Const @ByRef Tensor self); +// aten::atanh.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor atanh_outf(@Const @ByRef Tensor self, @ByRef Tensor out); -// Parsed from ATen/ops/addr.h +// Parsed from ATen/ops/atleast_1d.h // #pragma once @@ -35022,23 +20446,19 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include +// #include -// aten::addr(Tensor self, Tensor vec1, Tensor vec2, *, Scalar beta=1, Scalar alpha=1) -> Tensor -@Namespace("at") public static native @ByVal Tensor addr(@Const @ByRef Tensor self, @Const @ByRef Tensor vec1, @Const @ByRef Tensor vec2, @Const @ByRef(nullValue = "at::Scalar(1)") Scalar beta, @Const @ByRef(nullValue = "at::Scalar(1)") Scalar alpha); -@Namespace("at") public static native @ByVal Tensor addr(@Const @ByRef Tensor self, @Const @ByRef Tensor vec1, @Const @ByRef Tensor vec2); +// aten::atleast_1d(Tensor self) -> Tensor +@Namespace("at") public static native @ByVal Tensor atleast_1d(@Const @ByRef Tensor self); -// aten::addr.out(Tensor self, Tensor vec1, Tensor vec2, *, Scalar beta=1, Scalar alpha=1, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor addr_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor vec1, @Const @ByRef Tensor vec2, @Const @ByRef(nullValue = "at::Scalar(1)") Scalar beta, @Const @ByRef(nullValue = "at::Scalar(1)") Scalar alpha); -@Namespace("at") public static native @ByRef Tensor addr_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor vec1, @Const @ByRef Tensor vec2); -// aten::addr.out(Tensor self, Tensor vec1, Tensor vec2, *, Scalar beta=1, Scalar alpha=1, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor addr_outf(@Const @ByRef Tensor self, @Const @ByRef Tensor vec1, @Const @ByRef Tensor vec2, @Const @ByRef Scalar beta, @Const @ByRef Scalar alpha, @ByRef Tensor out); +// aten::atleast_1d.Sequence(Tensor[] tensors) -> Tensor[] +@Namespace("at") public static native @Cast({"", "std::vector"}) @StdMove TensorVector atleast_1d(@ByVal @Cast("at::TensorList*") TensorArrayRef tensors); -// Parsed from ATen/ops/adjoint.h +// Parsed from ATen/ops/atleast_2d.h // #pragma once @@ -35059,16 +20479,19 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include +// #include -// aten::adjoint(Tensor(a) self) -> Tensor(a) -@Namespace("at") public static native @ByVal Tensor adjoint(@Const @ByRef Tensor self); +// aten::atleast_2d(Tensor self) -> Tensor +@Namespace("at") public static native @ByVal Tensor atleast_2d(@Const @ByRef Tensor self); + +// aten::atleast_2d.Sequence(Tensor[] tensors) -> Tensor[] +@Namespace("at") public static native @Cast({"", "std::vector"}) @StdMove TensorVector atleast_2d(@ByVal @Cast("at::TensorList*") TensorArrayRef tensors); -// Parsed from ATen/ops/affine_grid_generator.h +// Parsed from ATen/ops/atleast_3d.h // #pragma once @@ -35089,24 +20512,19 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include +// #include -// aten::affine_grid_generator(Tensor theta, int[] size, bool align_corners) -> Tensor -@Namespace("at") public static native @ByVal Tensor affine_grid_generator(@Const @ByRef Tensor theta, @ByVal @Cast("c10::ArrayRef*") LongArrayRef size, @Cast("bool") boolean align_corners); -@Namespace("at") public static native @ByVal Tensor affine_grid_generator(@Const @ByRef Tensor theta, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @Cast("bool") boolean align_corners); +// aten::atleast_3d(Tensor self) -> Tensor +@Namespace("at") public static native @ByVal Tensor atleast_3d(@Const @ByRef Tensor self); -// aten::affine_grid_generator.out(Tensor theta, int[] size, bool align_corners, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor affine_grid_generator_out(@ByRef Tensor out, @Const @ByRef Tensor theta, @ByVal @Cast("c10::ArrayRef*") LongArrayRef size, @Cast("bool") boolean align_corners); -@Namespace("at") public static native @ByRef Tensor affine_grid_generator_out(@ByRef Tensor out, @Const @ByRef Tensor theta, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @Cast("bool") boolean align_corners); -// aten::affine_grid_generator.out(Tensor theta, int[] size, bool align_corners, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor affine_grid_generator_outf(@Const @ByRef Tensor theta, @ByVal @Cast("c10::ArrayRef*") LongArrayRef size, @Cast("bool") boolean align_corners, @ByRef Tensor out); -@Namespace("at") public static native @ByRef Tensor affine_grid_generator_outf(@Const @ByRef Tensor theta, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @Cast("bool") boolean align_corners, @ByRef Tensor out); +// aten::atleast_3d.Sequence(Tensor[] tensors) -> Tensor[] +@Namespace("at") public static native @Cast({"", "std::vector"}) @StdMove TensorVector atleast_3d(@ByVal @Cast("at::TensorList*") TensorArrayRef tensors); -// Parsed from ATen/ops/affine_grid_generator_backward.h +// Parsed from ATen/ops/avg_pool1d.h // #pragma once @@ -35127,17 +20545,19 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include +// #include -// aten::affine_grid_generator_backward(Tensor grad, int[] size, bool align_corners) -> Tensor -@Namespace("at") public static native @ByVal Tensor affine_grid_generator_backward(@Const @ByRef Tensor grad, @ByVal @Cast("c10::ArrayRef*") LongArrayRef size, @Cast("bool") boolean align_corners); -@Namespace("at") public static native @ByVal Tensor affine_grid_generator_backward(@Const @ByRef Tensor grad, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @Cast("bool") boolean align_corners); +// aten::avg_pool1d(Tensor self, int[1] kernel_size, int[1] stride=[], int[1] padding=0, bool ceil_mode=False, bool count_include_pad=True) -> Tensor +@Namespace("at") public static native @ByVal Tensor avg_pool1d(@Const @ByRef Tensor self, @ByVal LongArrayRef kernel_size, @ByVal(nullValue = "at::IntArrayRef{}") LongArrayRef stride, @ByVal(nullValue = "at::IntArrayRef(0)") LongArrayRef padding, @Cast("bool") boolean ceil_mode/*=false*/, @Cast("bool") boolean count_include_pad/*=true*/); +@Namespace("at") public static native @ByVal Tensor avg_pool1d(@Const @ByRef Tensor self, @ByVal LongArrayRef kernel_size); +@Namespace("at") public static native @ByVal Tensor avg_pool1d(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] kernel_size, @ByVal(nullValue = "at::IntArrayRef{}") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] stride, @ByVal(nullValue = "at::IntArrayRef(0)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] padding, @Cast("bool") boolean ceil_mode/*=false*/, @Cast("bool") boolean count_include_pad/*=true*/); +@Namespace("at") public static native @ByVal Tensor avg_pool1d(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... kernel_size); -// Parsed from ATen/ops/alias.h +// Parsed from ATen/ops/avg_pool2d.h // #pragma once @@ -35158,16 +20578,28 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include +// #include + +// aten::avg_pool2d.out(Tensor self, int[2] kernel_size, int[2] stride=[], int[2] padding=0, bool ceil_mode=False, bool count_include_pad=True, int? divisor_override=None, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor avg_pool2d_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal LongArrayRef kernel_size, @ByVal(nullValue = "at::IntArrayRef{}") LongArrayRef stride, @ByVal(nullValue = "at::IntArrayRef(0)") LongArrayRef padding, @Cast("bool") boolean ceil_mode/*=false*/, @Cast("bool") boolean count_include_pad/*=true*/, @ByVal(nullValue = "c10::optional(c10::nullopt)") LongOptional divisor_override); +@Namespace("at") public static native @ByRef Tensor avg_pool2d_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal LongArrayRef kernel_size); +@Namespace("at") public static native @ByRef Tensor avg_pool2d_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] kernel_size, @ByVal(nullValue = "at::IntArrayRef{}") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] stride, @ByVal(nullValue = "at::IntArrayRef(0)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] padding, @Cast("bool") boolean ceil_mode/*=false*/, @Cast("bool") boolean count_include_pad/*=true*/, @ByVal(nullValue = "c10::optional(c10::nullopt)") LongOptional divisor_override); +@Namespace("at") public static native @ByRef Tensor avg_pool2d_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... kernel_size); +// aten::avg_pool2d.out(Tensor self, int[2] kernel_size, int[2] stride=[], int[2] padding=0, bool ceil_mode=False, bool count_include_pad=True, int? divisor_override=None, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor avg_pool2d_outf(@Const @ByRef Tensor self, @ByVal LongArrayRef kernel_size, @ByVal LongArrayRef stride, @ByVal LongArrayRef padding, @Cast("bool") boolean ceil_mode, @Cast("bool") boolean count_include_pad, @ByVal LongOptional divisor_override, @ByRef Tensor out); +@Namespace("at") public static native @ByRef Tensor avg_pool2d_outf(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] kernel_size, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] stride, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] padding, @Cast("bool") boolean ceil_mode, @Cast("bool") boolean count_include_pad, @ByVal LongOptional divisor_override, @ByRef Tensor out); -// aten::alias(Tensor(a) self) -> Tensor(a) -@Namespace("at") public static native @ByVal Tensor alias(@Const @ByRef Tensor self); +// aten::avg_pool2d(Tensor self, int[2] kernel_size, int[2] stride=[], int[2] padding=0, bool ceil_mode=False, bool count_include_pad=True, int? divisor_override=None) -> Tensor +@Namespace("at") public static native @ByVal Tensor avg_pool2d(@Const @ByRef Tensor self, @ByVal LongArrayRef kernel_size, @ByVal(nullValue = "at::IntArrayRef{}") LongArrayRef stride, @ByVal(nullValue = "at::IntArrayRef(0)") LongArrayRef padding, @Cast("bool") boolean ceil_mode/*=false*/, @Cast("bool") boolean count_include_pad/*=true*/, @ByVal(nullValue = "c10::optional(c10::nullopt)") LongOptional divisor_override); +@Namespace("at") public static native @ByVal Tensor avg_pool2d(@Const @ByRef Tensor self, @ByVal LongArrayRef kernel_size); +@Namespace("at") public static native @ByVal Tensor avg_pool2d(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] kernel_size, @ByVal(nullValue = "at::IntArrayRef{}") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] stride, @ByVal(nullValue = "at::IntArrayRef(0)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] padding, @Cast("bool") boolean ceil_mode/*=false*/, @Cast("bool") boolean count_include_pad/*=true*/, @ByVal(nullValue = "c10::optional(c10::nullopt)") LongOptional divisor_override); +@Namespace("at") public static native @ByVal Tensor avg_pool2d(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... kernel_size); -// Parsed from ATen/ops/alias_copy.h +// Parsed from ATen/ops/avg_pool2d_backward.h // #pragma once @@ -35188,21 +20620,24 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include +// #include -// aten::alias_copy(Tensor self) -> Tensor -@Namespace("at") public static native @ByVal Tensor alias_copy(@Const @ByRef Tensor self); +// aten::avg_pool2d_backward.grad_input(Tensor grad_output, Tensor self, int[2] kernel_size, int[2] stride, int[2] padding, bool ceil_mode, bool count_include_pad, int? divisor_override, *, Tensor(a!) grad_input) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor avg_pool2d_backward_out(@ByRef Tensor grad_input, @Const @ByRef Tensor grad_output, @Const @ByRef Tensor self, @ByVal LongArrayRef kernel_size, @ByVal LongArrayRef stride, @ByVal LongArrayRef padding, @Cast("bool") boolean ceil_mode, @Cast("bool") boolean count_include_pad, @ByVal LongOptional divisor_override); +@Namespace("at") public static native @ByRef Tensor avg_pool2d_backward_out(@ByRef Tensor grad_input, @Const @ByRef Tensor grad_output, @Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] kernel_size, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] stride, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] padding, @Cast("bool") boolean ceil_mode, @Cast("bool") boolean count_include_pad, @ByVal LongOptional divisor_override); +// aten::avg_pool2d_backward.grad_input(Tensor grad_output, Tensor self, int[2] kernel_size, int[2] stride, int[2] padding, bool ceil_mode, bool count_include_pad, int? divisor_override, *, Tensor(a!) grad_input) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor avg_pool2d_backward_outf(@Const @ByRef Tensor grad_output, @Const @ByRef Tensor self, @ByVal LongArrayRef kernel_size, @ByVal LongArrayRef stride, @ByVal LongArrayRef padding, @Cast("bool") boolean ceil_mode, @Cast("bool") boolean count_include_pad, @ByVal LongOptional divisor_override, @ByRef Tensor grad_input); +@Namespace("at") public static native @ByRef Tensor avg_pool2d_backward_outf(@Const @ByRef Tensor grad_output, @Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] kernel_size, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] stride, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] padding, @Cast("bool") boolean ceil_mode, @Cast("bool") boolean count_include_pad, @ByVal LongOptional divisor_override, @ByRef Tensor grad_input); -// aten::alias_copy.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor alias_copy_out(@ByRef Tensor out, @Const @ByRef Tensor self); -// aten::alias_copy.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor alias_copy_outf(@Const @ByRef Tensor self, @ByRef Tensor out); +// aten::avg_pool2d_backward(Tensor grad_output, Tensor self, int[2] kernel_size, int[2] stride, int[2] padding, bool ceil_mode, bool count_include_pad, int? divisor_override) -> Tensor +@Namespace("at") public static native @ByVal Tensor avg_pool2d_backward(@Const @ByRef Tensor grad_output, @Const @ByRef Tensor self, @ByVal LongArrayRef kernel_size, @ByVal LongArrayRef stride, @ByVal LongArrayRef padding, @Cast("bool") boolean ceil_mode, @Cast("bool") boolean count_include_pad, @ByVal LongOptional divisor_override); +@Namespace("at") public static native @ByVal Tensor avg_pool2d_backward(@Const @ByRef Tensor grad_output, @Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] kernel_size, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] stride, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] padding, @Cast("bool") boolean ceil_mode, @Cast("bool") boolean count_include_pad, @ByVal LongOptional divisor_override); -// Parsed from ATen/ops/align_as.h +// Parsed from ATen/ops/avg_pool3d.h // #pragma once @@ -35223,14 +20658,28 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include +// #include +// aten::avg_pool3d.out(Tensor self, int[3] kernel_size, int[3] stride=[], int[3] padding=0, bool ceil_mode=False, bool count_include_pad=True, int? divisor_override=None, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor avg_pool3d_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal LongArrayRef kernel_size, @ByVal(nullValue = "at::IntArrayRef{}") LongArrayRef stride, @ByVal(nullValue = "at::IntArrayRef(0)") LongArrayRef padding, @Cast("bool") boolean ceil_mode/*=false*/, @Cast("bool") boolean count_include_pad/*=true*/, @ByVal(nullValue = "c10::optional(c10::nullopt)") LongOptional divisor_override); +@Namespace("at") public static native @ByRef Tensor avg_pool3d_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal LongArrayRef kernel_size); +@Namespace("at") public static native @ByRef Tensor avg_pool3d_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] kernel_size, @ByVal(nullValue = "at::IntArrayRef{}") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] stride, @ByVal(nullValue = "at::IntArrayRef(0)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] padding, @Cast("bool") boolean ceil_mode/*=false*/, @Cast("bool") boolean count_include_pad/*=true*/, @ByVal(nullValue = "c10::optional(c10::nullopt)") LongOptional divisor_override); +@Namespace("at") public static native @ByRef Tensor avg_pool3d_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... kernel_size); +// aten::avg_pool3d.out(Tensor self, int[3] kernel_size, int[3] stride=[], int[3] padding=0, bool ceil_mode=False, bool count_include_pad=True, int? divisor_override=None, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor avg_pool3d_outf(@Const @ByRef Tensor self, @ByVal LongArrayRef kernel_size, @ByVal LongArrayRef stride, @ByVal LongArrayRef padding, @Cast("bool") boolean ceil_mode, @Cast("bool") boolean count_include_pad, @ByVal LongOptional divisor_override, @ByRef Tensor out); +@Namespace("at") public static native @ByRef Tensor avg_pool3d_outf(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] kernel_size, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] stride, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] padding, @Cast("bool") boolean ceil_mode, @Cast("bool") boolean count_include_pad, @ByVal LongOptional divisor_override, @ByRef Tensor out); + +// aten::avg_pool3d(Tensor self, int[3] kernel_size, int[3] stride=[], int[3] padding=0, bool ceil_mode=False, bool count_include_pad=True, int? divisor_override=None) -> Tensor +@Namespace("at") public static native @ByVal Tensor avg_pool3d(@Const @ByRef Tensor self, @ByVal LongArrayRef kernel_size, @ByVal(nullValue = "at::IntArrayRef{}") LongArrayRef stride, @ByVal(nullValue = "at::IntArrayRef(0)") LongArrayRef padding, @Cast("bool") boolean ceil_mode/*=false*/, @Cast("bool") boolean count_include_pad/*=true*/, @ByVal(nullValue = "c10::optional(c10::nullopt)") LongOptional divisor_override); +@Namespace("at") public static native @ByVal Tensor avg_pool3d(@Const @ByRef Tensor self, @ByVal LongArrayRef kernel_size); +@Namespace("at") public static native @ByVal Tensor avg_pool3d(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] kernel_size, @ByVal(nullValue = "at::IntArrayRef{}") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] stride, @ByVal(nullValue = "at::IntArrayRef(0)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] padding, @Cast("bool") boolean ceil_mode/*=false*/, @Cast("bool") boolean count_include_pad/*=true*/, @ByVal(nullValue = "c10::optional(c10::nullopt)") LongOptional divisor_override); +@Namespace("at") public static native @ByVal Tensor avg_pool3d(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... kernel_size); -// Parsed from ATen/ops/align_tensors.h +// Parsed from ATen/ops/avg_pool3d_backward.h // #pragma once @@ -35251,16 +20700,24 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include +// #include -// aten::align_tensors(Tensor[] tensors) -> Tensor[] -@Namespace("at") public static native @Cast({"", "std::vector"}) @StdMove TensorVector align_tensors(@ByVal TensorArrayRef tensors); +// aten::avg_pool3d_backward.grad_input(Tensor grad_output, Tensor self, int[3] kernel_size, int[3] stride, int[3] padding, bool ceil_mode, bool count_include_pad, int? divisor_override, *, Tensor(a!) grad_input) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor avg_pool3d_backward_out(@ByRef Tensor grad_input, @Const @ByRef Tensor grad_output, @Const @ByRef Tensor self, @ByVal LongArrayRef kernel_size, @ByVal LongArrayRef stride, @ByVal LongArrayRef padding, @Cast("bool") boolean ceil_mode, @Cast("bool") boolean count_include_pad, @ByVal LongOptional divisor_override); +@Namespace("at") public static native @ByRef Tensor avg_pool3d_backward_out(@ByRef Tensor grad_input, @Const @ByRef Tensor grad_output, @Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] kernel_size, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] stride, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] padding, @Cast("bool") boolean ceil_mode, @Cast("bool") boolean count_include_pad, @ByVal LongOptional divisor_override); +// aten::avg_pool3d_backward.grad_input(Tensor grad_output, Tensor self, int[3] kernel_size, int[3] stride, int[3] padding, bool ceil_mode, bool count_include_pad, int? divisor_override, *, Tensor(a!) grad_input) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor avg_pool3d_backward_outf(@Const @ByRef Tensor grad_output, @Const @ByRef Tensor self, @ByVal LongArrayRef kernel_size, @ByVal LongArrayRef stride, @ByVal LongArrayRef padding, @Cast("bool") boolean ceil_mode, @Cast("bool") boolean count_include_pad, @ByVal LongOptional divisor_override, @ByRef Tensor grad_input); +@Namespace("at") public static native @ByRef Tensor avg_pool3d_backward_outf(@Const @ByRef Tensor grad_output, @Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] kernel_size, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] stride, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] padding, @Cast("bool") boolean ceil_mode, @Cast("bool") boolean count_include_pad, @ByVal LongOptional divisor_override, @ByRef Tensor grad_input); +// aten::avg_pool3d_backward(Tensor grad_output, Tensor self, int[3] kernel_size, int[3] stride, int[3] padding, bool ceil_mode, bool count_include_pad, int? divisor_override) -> Tensor +@Namespace("at") public static native @ByVal Tensor avg_pool3d_backward(@Const @ByRef Tensor grad_output, @Const @ByRef Tensor self, @ByVal LongArrayRef kernel_size, @ByVal LongArrayRef stride, @ByVal LongArrayRef padding, @Cast("bool") boolean ceil_mode, @Cast("bool") boolean count_include_pad, @ByVal LongOptional divisor_override); +@Namespace("at") public static native @ByVal Tensor avg_pool3d_backward(@Const @ByRef Tensor grad_output, @Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] kernel_size, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] stride, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] padding, @Cast("bool") boolean ceil_mode, @Cast("bool") boolean count_include_pad, @ByVal LongOptional divisor_override); -// Parsed from ATen/ops/align_to.h + +// Parsed from ATen/ops/baddbmm.h // #pragma once @@ -35281,14 +20738,23 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include +// #include +// aten::baddbmm(Tensor self, Tensor batch1, Tensor batch2, *, Scalar beta=1, Scalar alpha=1) -> Tensor +@Namespace("at") public static native @ByVal Tensor baddbmm(@Const @ByRef Tensor self, @Const @ByRef Tensor batch1, @Const @ByRef Tensor batch2, @Const @ByRef(nullValue = "at::Scalar(1)") Scalar beta, @Const @ByRef(nullValue = "at::Scalar(1)") Scalar alpha); +@Namespace("at") public static native @ByVal Tensor baddbmm(@Const @ByRef Tensor self, @Const @ByRef Tensor batch1, @Const @ByRef Tensor batch2); +// aten::baddbmm.out(Tensor self, Tensor batch1, Tensor batch2, *, Scalar beta=1, Scalar alpha=1, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor baddbmm_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor batch1, @Const @ByRef Tensor batch2, @Const @ByRef(nullValue = "at::Scalar(1)") Scalar beta, @Const @ByRef(nullValue = "at::Scalar(1)") Scalar alpha); +@Namespace("at") public static native @ByRef Tensor baddbmm_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor batch1, @Const @ByRef Tensor batch2); +// aten::baddbmm.out(Tensor self, Tensor batch1, Tensor batch2, *, Scalar beta=1, Scalar alpha=1, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor baddbmm_outf(@Const @ByRef Tensor self, @Const @ByRef Tensor batch1, @Const @ByRef Tensor batch2, @Const @ByRef Scalar beta, @Const @ByRef Scalar alpha, @ByRef Tensor out); -// Parsed from ATen/ops/all.h + +// Parsed from ATen/ops/bartlett_window.h // #pragma once @@ -35309,41 +20775,35 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include - - -// aten::all.dim(Tensor self, int dim, bool keepdim=False) -> Tensor -@Namespace("at") public static native @ByVal Tensor all(@Const @ByRef Tensor self, @Cast("int64_t") long dim, @Cast("bool") boolean keepdim/*=false*/); -@Namespace("at") public static native @ByVal Tensor all(@Const @ByRef Tensor self, @Cast("int64_t") long dim); +// #include -// aten::all.out(Tensor self, int dim, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor all_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Cast("int64_t") long dim, @Cast("bool") boolean keepdim/*=false*/); -@Namespace("at") public static native @ByRef Tensor all_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Cast("int64_t") long dim); -// aten::all.out(Tensor self, int dim, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor all_outf(@Const @ByRef Tensor self, @Cast("int64_t") long dim, @Cast("bool") boolean keepdim, @ByRef Tensor out); -// aten::all.dimname(Tensor self, Dimname dim, bool keepdim=False) -> Tensor -@Namespace("at") public static native @ByVal Tensor all(@Const @ByRef Tensor self, @ByVal Dimname dim, @Cast("bool") boolean keepdim/*=false*/); -@Namespace("at") public static native @ByVal Tensor all(@Const @ByRef Tensor self, @ByVal Dimname dim); +// aten::bartlett_window(int window_length, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor +@Namespace("at") public static native @ByVal Tensor bartlett_window(@Cast("int64_t") long window_length, @ByVal(nullValue = "at::TensorOptions{}") TensorOptions options); +@Namespace("at") public static native @ByVal Tensor bartlett_window(@Cast("int64_t") long window_length); +// aten::bartlett_window(int window_length, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor +@Namespace("at") public static native @ByVal Tensor bartlett_window(@Cast("int64_t") long window_length, @ByVal ScalarTypeOptional dtype, @ByVal LayoutOptional layout, @ByVal DeviceOptional device, @ByVal BoolOptional pin_memory); -// aten::all.dimname_out(Tensor self, Dimname dim, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor all_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal Dimname dim, @Cast("bool") boolean keepdim/*=false*/); -@Namespace("at") public static native @ByRef Tensor all_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal Dimname dim); -// aten::all.dimname_out(Tensor self, Dimname dim, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor all_outf(@Const @ByRef Tensor self, @ByVal Dimname dim, @Cast("bool") boolean keepdim, @ByRef Tensor out); +// aten::bartlett_window.periodic(int window_length, bool periodic, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor +@Namespace("at") public static native @ByVal Tensor bartlett_window(@Cast("int64_t") long window_length, @Cast("bool") boolean periodic, @ByVal(nullValue = "at::TensorOptions{}") TensorOptions options); +@Namespace("at") public static native @ByVal Tensor bartlett_window(@Cast("int64_t") long window_length, @Cast("bool") boolean periodic); +// aten::bartlett_window.periodic(int window_length, bool periodic, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor +@Namespace("at") public static native @ByVal Tensor bartlett_window(@Cast("int64_t") long window_length, @Cast("bool") boolean periodic, @ByVal ScalarTypeOptional dtype, @ByVal LayoutOptional layout, @ByVal DeviceOptional device, @ByVal BoolOptional pin_memory); -// aten::all(Tensor self) -> Tensor -@Namespace("at") public static native @ByVal Tensor all(@Const @ByRef Tensor self); +// aten::bartlett_window.out(int window_length, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor bartlett_window_out(@ByRef Tensor out, @Cast("int64_t") long window_length); +// aten::bartlett_window.out(int window_length, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor bartlett_window_outf(@Cast("int64_t") long window_length, @ByRef Tensor out); -// aten::all.all_out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor all_out(@ByRef Tensor out, @Const @ByRef Tensor self); -// aten::all.all_out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor all_outf(@Const @ByRef Tensor self, @ByRef Tensor out); +// aten::bartlett_window.periodic_out(int window_length, bool periodic, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor bartlett_window_out(@ByRef Tensor out, @Cast("int64_t") long window_length, @Cast("bool") boolean periodic); +// aten::bartlett_window.periodic_out(int window_length, bool periodic, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor bartlett_window_outf(@Cast("int64_t") long window_length, @Cast("bool") boolean periodic, @ByRef Tensor out); -// Parsed from ATen/ops/allclose.h +// Parsed from ATen/ops/batch_norm.h // #pragma once @@ -35364,17 +20824,16 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include +// #include -// aten::allclose(Tensor self, Tensor other, float rtol=1e-05, float atol=1e-08, bool equal_nan=False) -> bool -@Namespace("at") public static native @Cast("bool") boolean allclose(@Const @ByRef Tensor self, @Const @ByRef Tensor other, double rtol/*=1e-05*/, double atol/*=1e-08*/, @Cast("bool") boolean equal_nan/*=false*/); -@Namespace("at") public static native @Cast("bool") boolean allclose(@Const @ByRef Tensor self, @Const @ByRef Tensor other); +// aten::batch_norm(Tensor input, Tensor? weight, Tensor? bias, Tensor? running_mean, Tensor? running_var, bool training, float momentum, float eps, bool cudnn_enabled) -> Tensor +@Namespace("at") public static native @ByVal Tensor batch_norm(@Const @ByRef Tensor input, @Const @ByRef TensorOptional weight, @Const @ByRef TensorOptional bias, @Const @ByRef TensorOptional running_mean, @Const @ByRef TensorOptional running_var, @Cast("bool") boolean training, double momentum, double eps, @Cast("bool") boolean cudnn_enabled); -// Parsed from ATen/ops/alpha_dropout.h +// Parsed from ATen/ops/batch_norm_backward_elemt.h // #pragma once @@ -35395,19 +20854,21 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include +// #include -// aten::alpha_dropout(Tensor input, float p, bool train) -> Tensor -@Namespace("at") public static native @ByVal Tensor alpha_dropout(@Const @ByRef Tensor input, double p, @Cast("bool") boolean train); +// aten::batch_norm_backward_elemt(Tensor grad_out, Tensor input, Tensor mean, Tensor invstd, Tensor? weight, Tensor mean_dy, Tensor mean_dy_xmu, Tensor count) -> Tensor +@Namespace("at") public static native @ByVal Tensor batch_norm_backward_elemt(@Const @ByRef Tensor grad_out, @Const @ByRef Tensor input, @Const @ByRef Tensor mean, @Const @ByRef Tensor invstd, @Const @ByRef TensorOptional weight, @Const @ByRef Tensor mean_dy, @Const @ByRef Tensor mean_dy_xmu, @Const @ByRef Tensor count); -// aten::alpha_dropout_(Tensor(a!) self, float p, bool train) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor alpha_dropout_(@ByRef Tensor self, double p, @Cast("bool") boolean train); +// aten::batch_norm_backward_elemt.out(Tensor grad_out, Tensor input, Tensor mean, Tensor invstd, Tensor? weight, Tensor mean_dy, Tensor mean_dy_xmu, Tensor count, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor batch_norm_backward_elemt_out(@ByRef Tensor out, @Const @ByRef Tensor grad_out, @Const @ByRef Tensor input, @Const @ByRef Tensor mean, @Const @ByRef Tensor invstd, @Const @ByRef TensorOptional weight, @Const @ByRef Tensor mean_dy, @Const @ByRef Tensor mean_dy_xmu, @Const @ByRef Tensor count); +// aten::batch_norm_backward_elemt.out(Tensor grad_out, Tensor input, Tensor mean, Tensor invstd, Tensor? weight, Tensor mean_dy, Tensor mean_dy_xmu, Tensor count, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor batch_norm_backward_elemt_outf(@Const @ByRef Tensor grad_out, @Const @ByRef Tensor input, @Const @ByRef Tensor mean, @Const @ByRef Tensor invstd, @Const @ByRef TensorOptional weight, @Const @ByRef Tensor mean_dy, @Const @ByRef Tensor mean_dy_xmu, @Const @ByRef Tensor count, @ByRef Tensor out); -// Parsed from ATen/ops/amax.h +// Parsed from ATen/ops/batch_norm_backward_reduce.h // #pragma once @@ -35428,26 +20889,21 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include +// #include -// aten::amax(Tensor self, int[1] dim=[], bool keepdim=False) -> Tensor -@Namespace("at") public static native @ByVal Tensor amax(@Const @ByRef Tensor self, @ByVal(nullValue = "at::IntArrayRef{}") @Cast("c10::ArrayRef*") LongArrayRef dim, @Cast("bool") boolean keepdim/*=false*/); -@Namespace("at") public static native @ByVal Tensor amax(@Const @ByRef Tensor self); -@Namespace("at") public static native @ByVal Tensor amax(@Const @ByRef Tensor self, @ByVal(nullValue = "at::IntArrayRef{}") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dim, @Cast("bool") boolean keepdim/*=false*/); +// aten::batch_norm_backward_reduce(Tensor grad_out, Tensor input, Tensor mean, Tensor invstd, Tensor? weight, bool input_g, bool weight_g, bool bias_g) -> (Tensor, Tensor, Tensor, Tensor) +@Namespace("at") public static native @ByVal T_TensorTensorTensorTensor_T batch_norm_backward_reduce(@Const @ByRef Tensor grad_out, @Const @ByRef Tensor input, @Const @ByRef Tensor mean, @Const @ByRef Tensor invstd, @Const @ByRef TensorOptional weight, @Cast("bool") boolean input_g, @Cast("bool") boolean weight_g, @Cast("bool") boolean bias_g); -// aten::amax.out(Tensor self, int[1] dim=[], bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor amax_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal(nullValue = "at::IntArrayRef{}") @Cast("c10::ArrayRef*") LongArrayRef dim, @Cast("bool") boolean keepdim/*=false*/); -@Namespace("at") public static native @ByRef Tensor amax_out(@ByRef Tensor out, @Const @ByRef Tensor self); -@Namespace("at") public static native @ByRef Tensor amax_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal(nullValue = "at::IntArrayRef{}") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dim, @Cast("bool") boolean keepdim/*=false*/); -// aten::amax.out(Tensor self, int[1] dim=[], bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor amax_outf(@Const @ByRef Tensor self, @ByVal @Cast("c10::ArrayRef*") LongArrayRef dim, @Cast("bool") boolean keepdim, @ByRef Tensor out); -@Namespace("at") public static native @ByRef Tensor amax_outf(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dim, @Cast("bool") boolean keepdim, @ByRef Tensor out); +// aten::batch_norm_backward_reduce.out(Tensor grad_out, Tensor input, Tensor mean, Tensor invstd, Tensor? weight, bool input_g, bool weight_g, bool bias_g, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2, Tensor(d!) out3) -> (Tensor(a!), Tensor(b!), Tensor(c!), Tensor(d!)) +@Namespace("at") public static native @ByVal T_TensorTensorTensorTensor_T batch_norm_backward_reduce_out(@ByRef Tensor out0, @ByRef Tensor out1, @ByRef Tensor out2, @ByRef Tensor out3, @Const @ByRef Tensor grad_out, @Const @ByRef Tensor input, @Const @ByRef Tensor mean, @Const @ByRef Tensor invstd, @Const @ByRef TensorOptional weight, @Cast("bool") boolean input_g, @Cast("bool") boolean weight_g, @Cast("bool") boolean bias_g); +// aten::batch_norm_backward_reduce.out(Tensor grad_out, Tensor input, Tensor mean, Tensor invstd, Tensor? weight, bool input_g, bool weight_g, bool bias_g, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2, Tensor(d!) out3) -> (Tensor(a!), Tensor(b!), Tensor(c!), Tensor(d!)) +@Namespace("at") public static native @ByVal T_TensorTensorTensorTensor_T batch_norm_backward_reduce_outf(@Const @ByRef Tensor grad_out, @Const @ByRef Tensor input, @Const @ByRef Tensor mean, @Const @ByRef Tensor invstd, @Const @ByRef TensorOptional weight, @Cast("bool") boolean input_g, @Cast("bool") boolean weight_g, @Cast("bool") boolean bias_g, @ByRef Tensor out0, @ByRef Tensor out1, @ByRef Tensor out2, @ByRef Tensor out3); -// Parsed from ATen/ops/amin.h +// Parsed from ATen/ops/batch_norm_elemt.h // #pragma once @@ -35468,26 +20924,21 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include +// #include -// aten::amin(Tensor self, int[1] dim=[], bool keepdim=False) -> Tensor -@Namespace("at") public static native @ByVal Tensor amin(@Const @ByRef Tensor self, @ByVal(nullValue = "at::IntArrayRef{}") @Cast("c10::ArrayRef*") LongArrayRef dim, @Cast("bool") boolean keepdim/*=false*/); -@Namespace("at") public static native @ByVal Tensor amin(@Const @ByRef Tensor self); -@Namespace("at") public static native @ByVal Tensor amin(@Const @ByRef Tensor self, @ByVal(nullValue = "at::IntArrayRef{}") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dim, @Cast("bool") boolean keepdim/*=false*/); +// aten::batch_norm_elemt(Tensor input, Tensor? weight, Tensor? bias, Tensor mean, Tensor invstd, float eps) -> Tensor +@Namespace("at") public static native @ByVal Tensor batch_norm_elemt(@Const @ByRef Tensor input, @Const @ByRef TensorOptional weight, @Const @ByRef TensorOptional bias, @Const @ByRef Tensor mean, @Const @ByRef Tensor invstd, double eps); -// aten::amin.out(Tensor self, int[1] dim=[], bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor amin_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal(nullValue = "at::IntArrayRef{}") @Cast("c10::ArrayRef*") LongArrayRef dim, @Cast("bool") boolean keepdim/*=false*/); -@Namespace("at") public static native @ByRef Tensor amin_out(@ByRef Tensor out, @Const @ByRef Tensor self); -@Namespace("at") public static native @ByRef Tensor amin_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal(nullValue = "at::IntArrayRef{}") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dim, @Cast("bool") boolean keepdim/*=false*/); -// aten::amin.out(Tensor self, int[1] dim=[], bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor amin_outf(@Const @ByRef Tensor self, @ByVal @Cast("c10::ArrayRef*") LongArrayRef dim, @Cast("bool") boolean keepdim, @ByRef Tensor out); -@Namespace("at") public static native @ByRef Tensor amin_outf(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dim, @Cast("bool") boolean keepdim, @ByRef Tensor out); +// aten::batch_norm_elemt.out(Tensor input, Tensor? weight, Tensor? bias, Tensor mean, Tensor invstd, float eps, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor batch_norm_elemt_out(@ByRef Tensor out, @Const @ByRef Tensor input, @Const @ByRef TensorOptional weight, @Const @ByRef TensorOptional bias, @Const @ByRef Tensor mean, @Const @ByRef Tensor invstd, double eps); +// aten::batch_norm_elemt.out(Tensor input, Tensor? weight, Tensor? bias, Tensor mean, Tensor invstd, float eps, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor batch_norm_elemt_outf(@Const @ByRef Tensor input, @Const @ByRef TensorOptional weight, @Const @ByRef TensorOptional bias, @Const @ByRef Tensor mean, @Const @ByRef Tensor invstd, double eps, @ByRef Tensor out); -// Parsed from ATen/ops/aminmax.h +// Parsed from ATen/ops/batch_norm_gather_stats.h // #pragma once @@ -35508,23 +20959,21 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include +// #include -// aten::aminmax(Tensor self, *, int? dim=None, bool keepdim=False) -> (Tensor min, Tensor max) -@Namespace("at") public static native @ByVal TensorTensorTuple aminmax(@Const @ByRef Tensor self, @ByVal(nullValue = "c10::optional(c10::nullopt)") LongOptional dim, @Cast("bool") boolean keepdim/*=false*/); -@Namespace("at") public static native @ByVal TensorTensorTuple aminmax(@Const @ByRef Tensor self); +// aten::batch_norm_gather_stats(Tensor input, Tensor mean, Tensor invstd, Tensor? running_mean, Tensor? running_var, float momentum, float eps, int count) -> (Tensor, Tensor) +@Namespace("at") public static native @ByVal T_TensorTensor_T batch_norm_gather_stats(@Const @ByRef Tensor input, @Const @ByRef Tensor mean, @Const @ByRef Tensor invstd, @Const @ByRef TensorOptional running_mean, @Const @ByRef TensorOptional running_var, double momentum, double eps, @Cast("int64_t") long count); -// aten::aminmax.out(Tensor self, *, int? dim=None, bool keepdim=False, Tensor(a!) min, Tensor(b!) max) -> (Tensor(a!) min, Tensor(b!) max) -@Namespace("at") public static native @ByVal @Cast("std::tuple*") PointerPointer aminmax_out(@ByRef Tensor min, @ByRef Tensor max, @Const @ByRef Tensor self, @ByVal(nullValue = "c10::optional(c10::nullopt)") LongOptional dim, @Cast("bool") boolean keepdim/*=false*/); -@Namespace("at") public static native @ByVal @Cast("std::tuple*") PointerPointer aminmax_out(@ByRef Tensor min, @ByRef Tensor max, @Const @ByRef Tensor self); -// aten::aminmax.out(Tensor self, *, int? dim=None, bool keepdim=False, Tensor(a!) min, Tensor(b!) max) -> (Tensor(a!) min, Tensor(b!) max) -@Namespace("at") public static native @ByVal @Cast("std::tuple*") PointerPointer aminmax_outf(@Const @ByRef Tensor self, @ByVal LongOptional dim, @Cast("bool") boolean keepdim, @ByRef Tensor min, @ByRef Tensor max); +// aten::batch_norm_gather_stats.out(Tensor input, Tensor mean, Tensor invstd, Tensor? running_mean, Tensor? running_var, float momentum, float eps, int count, *, Tensor(a!) out0, Tensor(b!) out1) -> (Tensor(a!), Tensor(b!)) +@Namespace("at") public static native @ByVal T_TensorTensor_T batch_norm_gather_stats_out(@ByRef Tensor out0, @ByRef Tensor out1, @Const @ByRef Tensor input, @Const @ByRef Tensor mean, @Const @ByRef Tensor invstd, @Const @ByRef TensorOptional running_mean, @Const @ByRef TensorOptional running_var, double momentum, double eps, @Cast("int64_t") long count); +// aten::batch_norm_gather_stats.out(Tensor input, Tensor mean, Tensor invstd, Tensor? running_mean, Tensor? running_var, float momentum, float eps, int count, *, Tensor(a!) out0, Tensor(b!) out1) -> (Tensor(a!), Tensor(b!)) +@Namespace("at") public static native @ByVal T_TensorTensor_T batch_norm_gather_stats_outf(@Const @ByRef Tensor input, @Const @ByRef Tensor mean, @Const @ByRef Tensor invstd, @Const @ByRef TensorOptional running_mean, @Const @ByRef TensorOptional running_var, double momentum, double eps, @Cast("int64_t") long count, @ByRef Tensor out0, @ByRef Tensor out1); -// Parsed from ATen/ops/and.h +// Parsed from ATen/ops/batch_norm_gather_stats_with_counts.h // #pragma once @@ -35545,19 +20994,21 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include +// #include -// aten::__and__.Scalar(Tensor self, Scalar other) -> Tensor -@Namespace("at") public static native @ByVal Tensor __and__(@Const @ByRef Tensor self, @Const @ByRef Scalar other); +// aten::batch_norm_gather_stats_with_counts(Tensor input, Tensor mean, Tensor invstd, Tensor? running_mean, Tensor? running_var, float momentum, float eps, Tensor counts) -> (Tensor, Tensor) +@Namespace("at") public static native @ByVal T_TensorTensor_T batch_norm_gather_stats_with_counts(@Const @ByRef Tensor input, @Const @ByRef Tensor mean, @Const @ByRef Tensor invstd, @Const @ByRef TensorOptional running_mean, @Const @ByRef TensorOptional running_var, double momentum, double eps, @Const @ByRef Tensor counts); -// aten::__and__.Tensor(Tensor self, Tensor other) -> Tensor -@Namespace("at") public static native @ByVal Tensor __and__(@Const @ByRef Tensor self, @Const @ByRef Tensor other); +// aten::batch_norm_gather_stats_with_counts.out(Tensor input, Tensor mean, Tensor invstd, Tensor? running_mean, Tensor? running_var, float momentum, float eps, Tensor counts, *, Tensor(a!) out0, Tensor(b!) out1) -> (Tensor(a!), Tensor(b!)) +@Namespace("at") public static native @ByVal T_TensorTensor_T batch_norm_gather_stats_with_counts_out(@ByRef Tensor out0, @ByRef Tensor out1, @Const @ByRef Tensor input, @Const @ByRef Tensor mean, @Const @ByRef Tensor invstd, @Const @ByRef TensorOptional running_mean, @Const @ByRef TensorOptional running_var, double momentum, double eps, @Const @ByRef Tensor counts); +// aten::batch_norm_gather_stats_with_counts.out(Tensor input, Tensor mean, Tensor invstd, Tensor? running_mean, Tensor? running_var, float momentum, float eps, Tensor counts, *, Tensor(a!) out0, Tensor(b!) out1) -> (Tensor(a!), Tensor(b!)) +@Namespace("at") public static native @ByVal T_TensorTensor_T batch_norm_gather_stats_with_counts_outf(@Const @ByRef Tensor input, @Const @ByRef Tensor mean, @Const @ByRef Tensor invstd, @Const @ByRef TensorOptional running_mean, @Const @ByRef TensorOptional running_var, double momentum, double eps, @Const @ByRef Tensor counts, @ByRef Tensor out0, @ByRef Tensor out1); -// Parsed from ATen/ops/angle.h +// Parsed from ATen/ops/batch_norm_stats.h // #pragma once @@ -35578,21 +21029,21 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include +// #include -// aten::angle(Tensor self) -> Tensor -@Namespace("at") public static native @ByVal Tensor angle(@Const @ByRef Tensor self); +// aten::batch_norm_stats(Tensor input, float eps) -> (Tensor, Tensor) +@Namespace("at") public static native @ByVal T_TensorTensor_T batch_norm_stats(@Const @ByRef Tensor input, double eps); -// aten::angle.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor angle_out(@ByRef Tensor out, @Const @ByRef Tensor self); -// aten::angle.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor angle_outf(@Const @ByRef Tensor self, @ByRef Tensor out); +// aten::batch_norm_stats.out(Tensor input, float eps, *, Tensor(a!) out0, Tensor(b!) out1) -> (Tensor(a!), Tensor(b!)) +@Namespace("at") public static native @ByVal T_TensorTensor_T batch_norm_stats_out(@ByRef Tensor out0, @ByRef Tensor out1, @Const @ByRef Tensor input, double eps); +// aten::batch_norm_stats.out(Tensor input, float eps, *, Tensor(a!) out0, Tensor(b!) out1) -> (Tensor(a!), Tensor(b!)) +@Namespace("at") public static native @ByVal T_TensorTensor_T batch_norm_stats_outf(@Const @ByRef Tensor input, double eps, @ByRef Tensor out0, @ByRef Tensor out1); -// Parsed from ATen/ops/any.h +// Parsed from ATen/ops/batch_norm_update_stats.h // #pragma once @@ -35613,41 +21064,21 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include - - -// aten::any.dim(Tensor self, int dim, bool keepdim=False) -> Tensor -@Namespace("at") public static native @ByVal Tensor any(@Const @ByRef Tensor self, @Cast("int64_t") long dim, @Cast("bool") boolean keepdim/*=false*/); -@Namespace("at") public static native @ByVal Tensor any(@Const @ByRef Tensor self, @Cast("int64_t") long dim); - -// aten::any.out(Tensor self, int dim, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor any_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Cast("int64_t") long dim, @Cast("bool") boolean keepdim/*=false*/); -@Namespace("at") public static native @ByRef Tensor any_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Cast("int64_t") long dim); -// aten::any.out(Tensor self, int dim, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor any_outf(@Const @ByRef Tensor self, @Cast("int64_t") long dim, @Cast("bool") boolean keepdim, @ByRef Tensor out); - -// aten::any.dimname(Tensor self, Dimname dim, bool keepdim=False) -> Tensor -@Namespace("at") public static native @ByVal Tensor any(@Const @ByRef Tensor self, @ByVal Dimname dim, @Cast("bool") boolean keepdim/*=false*/); -@Namespace("at") public static native @ByVal Tensor any(@Const @ByRef Tensor self, @ByVal Dimname dim); +// #include -// aten::any.dimname_out(Tensor self, Dimname dim, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor any_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal Dimname dim, @Cast("bool") boolean keepdim/*=false*/); -@Namespace("at") public static native @ByRef Tensor any_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal Dimname dim); -// aten::any.dimname_out(Tensor self, Dimname dim, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor any_outf(@Const @ByRef Tensor self, @ByVal Dimname dim, @Cast("bool") boolean keepdim, @ByRef Tensor out); -// aten::any(Tensor self) -> Tensor -@Namespace("at") public static native @ByVal Tensor any(@Const @ByRef Tensor self); +// aten::batch_norm_update_stats(Tensor input, Tensor? running_mean, Tensor? running_var, float momentum) -> (Tensor, Tensor) +@Namespace("at") public static native @ByVal T_TensorTensor_T batch_norm_update_stats(@Const @ByRef Tensor input, @Const @ByRef TensorOptional running_mean, @Const @ByRef TensorOptional running_var, double momentum); -// aten::any.all_out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor any_out(@ByRef Tensor out, @Const @ByRef Tensor self); -// aten::any.all_out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor any_outf(@Const @ByRef Tensor self, @ByRef Tensor out); +// aten::batch_norm_update_stats.out(Tensor input, Tensor? running_mean, Tensor? running_var, float momentum, *, Tensor(a!) out0, Tensor(b!) out1) -> (Tensor(a!), Tensor(b!)) +@Namespace("at") public static native @ByVal T_TensorTensor_T batch_norm_update_stats_out(@ByRef Tensor out0, @ByRef Tensor out1, @Const @ByRef Tensor input, @Const @ByRef TensorOptional running_mean, @Const @ByRef TensorOptional running_var, double momentum); +// aten::batch_norm_update_stats.out(Tensor input, Tensor? running_mean, Tensor? running_var, float momentum, *, Tensor(a!) out0, Tensor(b!) out1) -> (Tensor(a!), Tensor(b!)) +@Namespace("at") public static native @ByVal T_TensorTensor_T batch_norm_update_stats_outf(@Const @ByRef Tensor input, @Const @ByRef TensorOptional running_mean, @Const @ByRef TensorOptional running_var, double momentum, @ByRef Tensor out0, @ByRef Tensor out1); -// Parsed from ATen/ops/arange.h +// Parsed from ATen/ops/bernoulli.h // #pragma once @@ -35668,41 +21099,40 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include +// #include -// aten::arange(Scalar end, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor -@Namespace("at") public static native @ByVal Tensor arange(@Const @ByRef Scalar end, @ByVal(nullValue = "at::TensorOptions{}") TensorOptions options); -@Namespace("at") public static native @ByVal Tensor arange(@Const @ByRef Scalar end); -// aten::arange(Scalar end, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor -@Namespace("at") public static native @ByVal Tensor arange(@Const @ByRef Scalar end, @ByVal ScalarTypeOptional dtype, @ByVal LayoutOptional layout, @ByVal DeviceOptional device, @ByVal BoolOptional pin_memory); +// aten::bernoulli(Tensor self, *, Generator? generator=None) -> Tensor +@Namespace("at") public static native @ByVal Tensor bernoulli(@Const @ByRef Tensor self, @ByVal(nullValue = "c10::optional(c10::nullopt)") GeneratorOptional generator); +@Namespace("at") public static native @ByVal Tensor bernoulli(@Const @ByRef Tensor self); -// aten::arange.start(Scalar start, Scalar end, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor -@Namespace("at") public static native @ByVal Tensor arange(@Const @ByRef Scalar start, @Const @ByRef Scalar end, @ByVal(nullValue = "at::TensorOptions{}") TensorOptions options); -@Namespace("at") public static native @ByVal Tensor arange(@Const @ByRef Scalar start, @Const @ByRef Scalar end); -// aten::arange.start(Scalar start, Scalar end, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor -@Namespace("at") public static native @ByVal Tensor arange(@Const @ByRef Scalar start, @Const @ByRef Scalar end, @ByVal ScalarTypeOptional dtype, @ByVal LayoutOptional layout, @ByVal DeviceOptional device, @ByVal BoolOptional pin_memory); +// aten::bernoulli.out(Tensor self, *, Generator? generator=None, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor bernoulli_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal(nullValue = "c10::optional(c10::nullopt)") GeneratorOptional generator); +// aten::bernoulli.out(Tensor self, *, Generator? generator=None, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor bernoulli_outf(@Const @ByRef Tensor self, @ByVal GeneratorOptional generator, @ByRef Tensor out); -// aten::arange.start_step(Scalar start, Scalar end, Scalar step=1, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor -@Namespace("at") public static native @ByVal Tensor arange(@Const @ByRef Scalar start, @Const @ByRef Scalar end, @Const @ByRef Scalar step, @ByVal(nullValue = "at::TensorOptions{}") TensorOptions options); -@Namespace("at") public static native @ByVal Tensor arange(@Const @ByRef Scalar start, @Const @ByRef Scalar end, @Const @ByRef Scalar step); -// aten::arange.start_step(Scalar start, Scalar end, Scalar step=1, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor -@Namespace("at") public static native @ByVal Tensor arange(@Const @ByRef Scalar start, @Const @ByRef Scalar end, @Const @ByRef Scalar step, @ByVal ScalarTypeOptional dtype, @ByVal LayoutOptional layout, @ByVal DeviceOptional device, @ByVal BoolOptional pin_memory); +// aten::bernoulli.p(Tensor self, float p, *, Generator? generator=None) -> Tensor +@Namespace("at") public static native @ByVal Tensor bernoulli(@Const @ByRef Tensor self, double p, @ByVal(nullValue = "c10::optional(c10::nullopt)") GeneratorOptional generator); +@Namespace("at") public static native @ByVal Tensor bernoulli(@Const @ByRef Tensor self, double p); -// aten::arange.out(Scalar end, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor arange_out(@ByRef Tensor out, @Const @ByRef Scalar end); -// aten::arange.out(Scalar end, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor arange_outf(@Const @ByRef Scalar end, @ByRef Tensor out); +// aten::bernoulli.Tensor_out(Tensor self, Tensor p, *, Generator? generator=None, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor bernoulli_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor p, @ByVal(nullValue = "c10::optional(c10::nullopt)") GeneratorOptional generator); +// aten::bernoulli.Tensor_out(Tensor self, Tensor p, *, Generator? generator=None, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor bernoulli_outf(@Const @ByRef Tensor self, @Const @ByRef Tensor p, @ByVal GeneratorOptional generator, @ByRef Tensor out); -// aten::arange.start_out(Scalar start, Scalar end, Scalar step=1, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor arange_out(@ByRef Tensor out, @Const @ByRef Scalar start, @Const @ByRef Scalar end, @Const @ByRef Scalar step); -// aten::arange.start_out(Scalar start, Scalar end, Scalar step=1, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor arange_outf(@Const @ByRef Scalar start, @Const @ByRef Scalar end, @Const @ByRef Scalar step, @ByRef Tensor out); +// aten::bernoulli.Tensor(Tensor self, Tensor p, *, Generator? generator=None) -> Tensor +@Namespace("at") public static native @ByVal Tensor bernoulli(@Const @ByRef Tensor self, @Const @ByRef Tensor p, @ByVal(nullValue = "c10::optional(c10::nullopt)") GeneratorOptional generator); +@Namespace("at") public static native @ByVal Tensor bernoulli(@Const @ByRef Tensor self, @Const @ByRef Tensor p); + +// aten::bernoulli.float_out(Tensor self, float p=0.5, *, Generator? generator=None, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor bernoulli_out(@ByRef Tensor out, @Const @ByRef Tensor self, double p/*=0.5*/, @ByVal(nullValue = "c10::optional(c10::nullopt)") GeneratorOptional generator); +// aten::bernoulli.float_out(Tensor self, float p=0.5, *, Generator? generator=None, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor bernoulli_outf(@Const @ByRef Tensor self, double p, @ByVal GeneratorOptional generator, @ByRef Tensor out); -// Parsed from ATen/ops/arccos.h +// Parsed from ATen/ops/bilinear.h // #pragma once @@ -35723,24 +21153,17 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include - - -// aten::arccos(Tensor self) -> Tensor -@Namespace("at") public static native @ByVal Tensor arccos(@Const @ByRef Tensor self); +// #include -// aten::arccos_(Tensor(a!) self) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor arccos_(@ByRef Tensor self); -// aten::arccos.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor arccos_out(@ByRef Tensor out, @Const @ByRef Tensor self); -// aten::arccos.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor arccos_outf(@Const @ByRef Tensor self, @ByRef Tensor out); +// aten::bilinear(Tensor input1, Tensor input2, Tensor weight, Tensor? bias=None) -> Tensor +@Namespace("at") public static native @ByVal Tensor bilinear(@Const @ByRef Tensor input1, @Const @ByRef Tensor input2, @Const @ByRef Tensor weight, @Const @ByRef(nullValue = "c10::optional{}") TensorOptional bias); +@Namespace("at") public static native @ByVal Tensor bilinear(@Const @ByRef Tensor input1, @Const @ByRef Tensor input2, @Const @ByRef Tensor weight); -// Parsed from ATen/ops/arccosh.h +// Parsed from ATen/ops/binary_cross_entropy.h // #pragma once @@ -35761,24 +21184,23 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include - +// #include -// aten::arccosh(Tensor self) -> Tensor -@Namespace("at") public static native @ByVal Tensor arccosh(@Const @ByRef Tensor self); -// aten::arccosh_(Tensor(a!) self) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor arccosh_(@ByRef Tensor self); +// aten::binary_cross_entropy(Tensor self, Tensor target, Tensor? weight=None, int reduction=Mean) -> Tensor +@Namespace("at") public static native @ByVal Tensor binary_cross_entropy(@Const @ByRef Tensor self, @Const @ByRef Tensor target, @Const @ByRef(nullValue = "c10::optional{}") TensorOptional weight, @Cast("int64_t") long reduction/*=at::Reduction::Mean*/); +@Namespace("at") public static native @ByVal Tensor binary_cross_entropy(@Const @ByRef Tensor self, @Const @ByRef Tensor target); -// aten::arccosh.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor arccosh_out(@ByRef Tensor out, @Const @ByRef Tensor self); -// aten::arccosh.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor arccosh_outf(@Const @ByRef Tensor self, @ByRef Tensor out); +// aten::binary_cross_entropy.out(Tensor self, Tensor target, Tensor? weight=None, int reduction=Mean, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor binary_cross_entropy_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor target, @Const @ByRef(nullValue = "c10::optional{}") TensorOptional weight, @Cast("int64_t") long reduction/*=at::Reduction::Mean*/); +@Namespace("at") public static native @ByRef Tensor binary_cross_entropy_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor target); +// aten::binary_cross_entropy.out(Tensor self, Tensor target, Tensor? weight=None, int reduction=Mean, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor binary_cross_entropy_outf(@Const @ByRef Tensor self, @Const @ByRef Tensor target, @Const @ByRef TensorOptional weight, @Cast("int64_t") long reduction, @ByRef Tensor out); -// Parsed from ATen/ops/arcsin.h +// Parsed from ATen/ops/binary_cross_entropy_backward.h // #pragma once @@ -35799,24 +21221,23 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include - +// #include -// aten::arcsin(Tensor self) -> Tensor -@Namespace("at") public static native @ByVal Tensor arcsin(@Const @ByRef Tensor self); -// aten::arcsin_(Tensor(a!) self) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor arcsin_(@ByRef Tensor self); +// aten::binary_cross_entropy_backward(Tensor grad_output, Tensor self, Tensor target, Tensor? weight=None, int reduction=Mean) -> Tensor +@Namespace("at") public static native @ByVal Tensor binary_cross_entropy_backward(@Const @ByRef Tensor grad_output, @Const @ByRef Tensor self, @Const @ByRef Tensor target, @Const @ByRef(nullValue = "c10::optional{}") TensorOptional weight, @Cast("int64_t") long reduction/*=at::Reduction::Mean*/); +@Namespace("at") public static native @ByVal Tensor binary_cross_entropy_backward(@Const @ByRef Tensor grad_output, @Const @ByRef Tensor self, @Const @ByRef Tensor target); -// aten::arcsin.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor arcsin_out(@ByRef Tensor out, @Const @ByRef Tensor self); -// aten::arcsin.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor arcsin_outf(@Const @ByRef Tensor self, @ByRef Tensor out); +// aten::binary_cross_entropy_backward.grad_input(Tensor grad_output, Tensor self, Tensor target, Tensor? weight=None, int reduction=Mean, *, Tensor(a!) grad_input) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor binary_cross_entropy_backward_out(@ByRef Tensor grad_input, @Const @ByRef Tensor grad_output, @Const @ByRef Tensor self, @Const @ByRef Tensor target, @Const @ByRef(nullValue = "c10::optional{}") TensorOptional weight, @Cast("int64_t") long reduction/*=at::Reduction::Mean*/); +@Namespace("at") public static native @ByRef Tensor binary_cross_entropy_backward_out(@ByRef Tensor grad_input, @Const @ByRef Tensor grad_output, @Const @ByRef Tensor self, @Const @ByRef Tensor target); +// aten::binary_cross_entropy_backward.grad_input(Tensor grad_output, Tensor self, Tensor target, Tensor? weight=None, int reduction=Mean, *, Tensor(a!) grad_input) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor binary_cross_entropy_backward_outf(@Const @ByRef Tensor grad_output, @Const @ByRef Tensor self, @Const @ByRef Tensor target, @Const @ByRef TensorOptional weight, @Cast("int64_t") long reduction, @ByRef Tensor grad_input); -// Parsed from ATen/ops/arcsinh.h +// Parsed from ATen/ops/binary_cross_entropy_with_logits.h // #pragma once @@ -35837,24 +21258,23 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include - +// #include -// aten::arcsinh(Tensor self) -> Tensor -@Namespace("at") public static native @ByVal Tensor arcsinh(@Const @ByRef Tensor self); -// aten::arcsinh_(Tensor(a!) self) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor arcsinh_(@ByRef Tensor self); +// aten::binary_cross_entropy_with_logits(Tensor self, Tensor target, Tensor? weight=None, Tensor? pos_weight=None, int reduction=Mean) -> Tensor +@Namespace("at") public static native @ByVal Tensor binary_cross_entropy_with_logits(@Const @ByRef Tensor self, @Const @ByRef Tensor target, @Const @ByRef(nullValue = "c10::optional{}") TensorOptional weight, @Const @ByRef(nullValue = "c10::optional{}") TensorOptional pos_weight, @Cast("int64_t") long reduction/*=at::Reduction::Mean*/); +@Namespace("at") public static native @ByVal Tensor binary_cross_entropy_with_logits(@Const @ByRef Tensor self, @Const @ByRef Tensor target); -// aten::arcsinh.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor arcsinh_out(@ByRef Tensor out, @Const @ByRef Tensor self); -// aten::arcsinh.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor arcsinh_outf(@Const @ByRef Tensor self, @ByRef Tensor out); +// aten::binary_cross_entropy_with_logits.out(Tensor self, Tensor target, Tensor? weight=None, Tensor? pos_weight=None, int reduction=Mean, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor binary_cross_entropy_with_logits_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor target, @Const @ByRef(nullValue = "c10::optional{}") TensorOptional weight, @Const @ByRef(nullValue = "c10::optional{}") TensorOptional pos_weight, @Cast("int64_t") long reduction/*=at::Reduction::Mean*/); +@Namespace("at") public static native @ByRef Tensor binary_cross_entropy_with_logits_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor target); +// aten::binary_cross_entropy_with_logits.out(Tensor self, Tensor target, Tensor? weight=None, Tensor? pos_weight=None, int reduction=Mean, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor binary_cross_entropy_with_logits_outf(@Const @ByRef Tensor self, @Const @ByRef Tensor target, @Const @ByRef TensorOptional weight, @Const @ByRef TensorOptional pos_weight, @Cast("int64_t") long reduction, @ByRef Tensor out); -// Parsed from ATen/ops/arctan.h +// Parsed from ATen/ops/bincount.h // #pragma once @@ -35875,24 +21295,23 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include - +// #include -// aten::arctan(Tensor self) -> Tensor -@Namespace("at") public static native @ByVal Tensor arctan(@Const @ByRef Tensor self); -// aten::arctan_(Tensor(a!) self) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor arctan_(@ByRef Tensor self); +// aten::bincount(Tensor self, Tensor? weights=None, int minlength=0) -> Tensor +@Namespace("at") public static native @ByVal Tensor bincount(@Const @ByRef Tensor self, @Const @ByRef(nullValue = "c10::optional{}") TensorOptional weights, @Cast("int64_t") long minlength/*=0*/); +@Namespace("at") public static native @ByVal Tensor bincount(@Const @ByRef Tensor self); -// aten::arctan.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor arctan_out(@ByRef Tensor out, @Const @ByRef Tensor self); -// aten::arctan.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor arctan_outf(@Const @ByRef Tensor self, @ByRef Tensor out); +// aten::bincount.out(Tensor self, Tensor? weights=None, int minlength=0, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor bincount_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef(nullValue = "c10::optional{}") TensorOptional weights, @Cast("int64_t") long minlength/*=0*/); +@Namespace("at") public static native @ByRef Tensor bincount_out(@ByRef Tensor out, @Const @ByRef Tensor self); +// aten::bincount.out(Tensor self, Tensor? weights=None, int minlength=0, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor bincount_outf(@Const @ByRef Tensor self, @Const @ByRef TensorOptional weights, @Cast("int64_t") long minlength, @ByRef Tensor out); -// Parsed from ATen/ops/arctan2.h +// Parsed from ATen/ops/binomial.h // #pragma once @@ -35913,21 +21332,23 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include +// #include -// aten::arctan2(Tensor self, Tensor other) -> Tensor -@Namespace("at") public static native @ByVal Tensor arctan2(@Const @ByRef Tensor self, @Const @ByRef Tensor other); +// aten::binomial(Tensor count, Tensor prob, Generator? generator=None) -> Tensor +@Namespace("at") public static native @ByVal Tensor binomial(@Const @ByRef Tensor count, @Const @ByRef Tensor prob, @ByVal(nullValue = "c10::optional(c10::nullopt)") GeneratorOptional generator); +@Namespace("at") public static native @ByVal Tensor binomial(@Const @ByRef Tensor count, @Const @ByRef Tensor prob); -// aten::arctan2.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor arctan2_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor other); -// aten::arctan2.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor arctan2_outf(@Const @ByRef Tensor self, @Const @ByRef Tensor other, @ByRef Tensor out); +// aten::binomial.out(Tensor count, Tensor prob, Generator? generator=None, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor binomial_out(@ByRef Tensor out, @Const @ByRef Tensor count, @Const @ByRef Tensor prob, @ByVal(nullValue = "c10::optional(c10::nullopt)") GeneratorOptional generator); +@Namespace("at") public static native @ByRef Tensor binomial_out(@ByRef Tensor out, @Const @ByRef Tensor count, @Const @ByRef Tensor prob); +// aten::binomial.out(Tensor count, Tensor prob, Generator? generator=None, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor binomial_outf(@Const @ByRef Tensor count, @Const @ByRef Tensor prob, @ByVal GeneratorOptional generator, @ByRef Tensor out); -// Parsed from ATen/ops/arctanh.h +// Parsed from ATen/ops/bitwise_and.h // #pragma once @@ -35948,24 +21369,37 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include +// #include -// aten::arctanh(Tensor self) -> Tensor -@Namespace("at") public static native @ByVal Tensor arctanh(@Const @ByRef Tensor self); +// aten::bitwise_and.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor bitwise_and_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor other); +// aten::bitwise_and.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor bitwise_and_outf(@Const @ByRef Tensor self, @Const @ByRef Tensor other, @ByRef Tensor out); -// aten::arctanh_(Tensor(a!) self) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor arctanh_(@ByRef Tensor self); +// aten::bitwise_and.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor bitwise_and_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Scalar other); +// aten::bitwise_and.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor bitwise_and_outf(@Const @ByRef Tensor self, @Const @ByRef Scalar other, @ByRef Tensor out); -// aten::arctanh.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor arctanh_out(@ByRef Tensor out, @Const @ByRef Tensor self); -// aten::arctanh.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor arctanh_outf(@Const @ByRef Tensor self, @ByRef Tensor out); +// aten::bitwise_and.Scalar(Tensor self, Scalar other) -> Tensor +@Namespace("at") public static native @ByVal Tensor bitwise_and(@Const @ByRef Tensor self, @Const @ByRef Scalar other); +// aten::bitwise_and.Scalar_Tensor(Scalar self, Tensor other) -> Tensor +@Namespace("at") public static native @ByVal Tensor bitwise_and(@Const @ByRef Scalar self, @Const @ByRef Tensor other); +// aten::bitwise_and.Tensor(Tensor self, Tensor other) -> Tensor +@Namespace("at") public static native @ByVal Tensor bitwise_and(@Const @ByRef Tensor self, @Const @ByRef Tensor other); +// aten::bitwise_and.Scalar_Tensor_out(Scalar self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor bitwise_and_out(@ByRef Tensor out, @Const @ByRef Scalar self, @Const @ByRef Tensor other); +// aten::bitwise_and.Scalar_Tensor_out(Scalar self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor bitwise_and_outf(@Const @ByRef Scalar self, @Const @ByRef Tensor other, @ByRef Tensor out); -// Parsed from ATen/ops/argmax.h + + + +// Parsed from ATen/ops/bitwise_left_shift.h // #pragma once @@ -35986,23 +21420,37 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include +// #include -// aten::argmax(Tensor self, int? dim=None, bool keepdim=False) -> Tensor -@Namespace("at") public static native @ByVal Tensor argmax(@Const @ByRef Tensor self, @ByVal(nullValue = "c10::optional(c10::nullopt)") LongOptional dim, @Cast("bool") boolean keepdim/*=false*/); -@Namespace("at") public static native @ByVal Tensor argmax(@Const @ByRef Tensor self); +// aten::bitwise_left_shift.Tensor(Tensor self, Tensor other) -> Tensor +@Namespace("at") public static native @ByVal Tensor bitwise_left_shift(@Const @ByRef Tensor self, @Const @ByRef Tensor other); -// aten::argmax.out(Tensor self, int? dim=None, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor argmax_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal(nullValue = "c10::optional(c10::nullopt)") LongOptional dim, @Cast("bool") boolean keepdim/*=false*/); -@Namespace("at") public static native @ByRef Tensor argmax_out(@ByRef Tensor out, @Const @ByRef Tensor self); -// aten::argmax.out(Tensor self, int? dim=None, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor argmax_outf(@Const @ByRef Tensor self, @ByVal LongOptional dim, @Cast("bool") boolean keepdim, @ByRef Tensor out); +// aten::bitwise_left_shift.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor bitwise_left_shift_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor other); +// aten::bitwise_left_shift.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor bitwise_left_shift_outf(@Const @ByRef Tensor self, @Const @ByRef Tensor other, @ByRef Tensor out); +// aten::bitwise_left_shift.Tensor_Scalar(Tensor self, Scalar other) -> Tensor +@Namespace("at") public static native @ByVal Tensor bitwise_left_shift(@Const @ByRef Tensor self, @Const @ByRef Scalar other); + +// aten::bitwise_left_shift.Tensor_Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor bitwise_left_shift_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Scalar other); +// aten::bitwise_left_shift.Tensor_Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor bitwise_left_shift_outf(@Const @ByRef Tensor self, @Const @ByRef Scalar other, @ByRef Tensor out); + +// aten::bitwise_left_shift.Scalar_Tensor(Scalar self, Tensor other) -> Tensor +@Namespace("at") public static native @ByVal Tensor bitwise_left_shift(@Const @ByRef Scalar self, @Const @ByRef Tensor other); + +// aten::bitwise_left_shift.Scalar_Tensor_out(Scalar self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor bitwise_left_shift_out(@ByRef Tensor out, @Const @ByRef Scalar self, @Const @ByRef Tensor other); +// aten::bitwise_left_shift.Scalar_Tensor_out(Scalar self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor bitwise_left_shift_outf(@Const @ByRef Scalar self, @Const @ByRef Tensor other, @ByRef Tensor out); -// Parsed from ATen/ops/argmin.h + +// Parsed from ATen/ops/bitwise_not.h // #pragma once @@ -36023,23 +21471,21 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include +// #include -// aten::argmin(Tensor self, int? dim=None, bool keepdim=False) -> Tensor -@Namespace("at") public static native @ByVal Tensor argmin(@Const @ByRef Tensor self, @ByVal(nullValue = "c10::optional(c10::nullopt)") LongOptional dim, @Cast("bool") boolean keepdim/*=false*/); -@Namespace("at") public static native @ByVal Tensor argmin(@Const @ByRef Tensor self); +// aten::bitwise_not(Tensor self) -> Tensor +@Namespace("at") public static native @ByVal Tensor bitwise_not(@Const @ByRef Tensor self); -// aten::argmin.out(Tensor self, int? dim=None, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor argmin_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal(nullValue = "c10::optional(c10::nullopt)") LongOptional dim, @Cast("bool") boolean keepdim/*=false*/); -@Namespace("at") public static native @ByRef Tensor argmin_out(@ByRef Tensor out, @Const @ByRef Tensor self); -// aten::argmin.out(Tensor self, int? dim=None, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor argmin_outf(@Const @ByRef Tensor self, @ByVal LongOptional dim, @Cast("bool") boolean keepdim, @ByRef Tensor out); +// aten::bitwise_not.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor bitwise_not_out(@ByRef Tensor out, @Const @ByRef Tensor self); +// aten::bitwise_not.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor bitwise_not_outf(@Const @ByRef Tensor self, @ByRef Tensor out); -// Parsed from ATen/ops/argsort.h +// Parsed from ATen/ops/bitwise_or.h // #pragma once @@ -36060,31 +21506,37 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include +// #include -// aten::argsort(Tensor self, int dim=-1, bool descending=False) -> Tensor -@Namespace("at") public static native @ByVal Tensor argsort(@Const @ByRef Tensor self, @Cast("int64_t") long dim/*=-1*/, @Cast("bool") boolean descending/*=false*/); -@Namespace("at") public static native @ByVal Tensor argsort(@Const @ByRef Tensor self); +// aten::bitwise_or.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor bitwise_or_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor other); +// aten::bitwise_or.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor bitwise_or_outf(@Const @ByRef Tensor self, @Const @ByRef Tensor other, @ByRef Tensor out); -// aten::argsort.stable(Tensor self, *, bool stable, int dim=-1, bool descending=False) -> Tensor -@Namespace("at") public static native @ByVal Tensor argsort(@Const @ByRef Tensor self, @Cast("bool") boolean stable, @Cast("int64_t") long dim/*=-1*/, @Cast("bool") boolean descending/*=false*/); -@Namespace("at") public static native @ByVal Tensor argsort(@Const @ByRef Tensor self, @Cast("bool") boolean stable); +// aten::bitwise_or.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor bitwise_or_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Scalar other); +// aten::bitwise_or.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor bitwise_or_outf(@Const @ByRef Tensor self, @Const @ByRef Scalar other, @ByRef Tensor out); -// aten::argsort.dimname(Tensor self, Dimname dim, bool descending=False) -> Tensor -@Namespace("at") public static native @ByVal Tensor argsort(@Const @ByRef Tensor self, @ByVal Dimname dim, @Cast("bool") boolean descending/*=false*/); -@Namespace("at") public static native @ByVal Tensor argsort(@Const @ByRef Tensor self, @ByVal Dimname dim); +// aten::bitwise_or.Scalar(Tensor self, Scalar other) -> Tensor +@Namespace("at") public static native @ByVal Tensor bitwise_or(@Const @ByRef Tensor self, @Const @ByRef Scalar other); + +// aten::bitwise_or.Scalar_Tensor(Scalar self, Tensor other) -> Tensor +@Namespace("at") public static native @ByVal Tensor bitwise_or(@Const @ByRef Scalar self, @Const @ByRef Tensor other); -// aten::argsort.stable_out(Tensor self, *, bool stable, int dim=-1, bool descending=False, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor argsort_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Cast("bool") boolean stable, @Cast("int64_t") long dim/*=-1*/, @Cast("bool") boolean descending/*=false*/); -@Namespace("at") public static native @ByRef Tensor argsort_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Cast("bool") boolean stable); -// aten::argsort.stable_out(Tensor self, *, bool stable, int dim=-1, bool descending=False, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor argsort_outf(@Const @ByRef Tensor self, @Cast("bool") boolean stable, @Cast("int64_t") long dim, @Cast("bool") boolean descending, @ByRef Tensor out); +// aten::bitwise_or.Tensor(Tensor self, Tensor other) -> Tensor +@Namespace("at") public static native @ByVal Tensor bitwise_or(@Const @ByRef Tensor self, @Const @ByRef Tensor other); + +// aten::bitwise_or.Scalar_Tensor_out(Scalar self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor bitwise_or_out(@ByRef Tensor out, @Const @ByRef Scalar self, @Const @ByRef Tensor other); +// aten::bitwise_or.Scalar_Tensor_out(Scalar self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor bitwise_or_outf(@Const @ByRef Scalar self, @Const @ByRef Tensor other, @ByRef Tensor out); -// Parsed from ATen/ops/argwhere.h +// Parsed from ATen/ops/bitwise_right_shift.h // #pragma once @@ -36105,16 +21557,37 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include +// #include -// aten::argwhere(Tensor self) -> Tensor -@Namespace("at") public static native @ByVal Tensor argwhere(@Const @ByRef Tensor self); +// aten::bitwise_right_shift.Tensor(Tensor self, Tensor other) -> Tensor +@Namespace("at") public static native @ByVal Tensor bitwise_right_shift(@Const @ByRef Tensor self, @Const @ByRef Tensor other); +// aten::bitwise_right_shift.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor bitwise_right_shift_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor other); +// aten::bitwise_right_shift.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor bitwise_right_shift_outf(@Const @ByRef Tensor self, @Const @ByRef Tensor other, @ByRef Tensor out); +// aten::bitwise_right_shift.Tensor_Scalar(Tensor self, Scalar other) -> Tensor +@Namespace("at") public static native @ByVal Tensor bitwise_right_shift(@Const @ByRef Tensor self, @Const @ByRef Scalar other); + +// aten::bitwise_right_shift.Tensor_Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor bitwise_right_shift_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Scalar other); +// aten::bitwise_right_shift.Tensor_Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor bitwise_right_shift_outf(@Const @ByRef Tensor self, @Const @ByRef Scalar other, @ByRef Tensor out); + +// aten::bitwise_right_shift.Scalar_Tensor(Scalar self, Tensor other) -> Tensor +@Namespace("at") public static native @ByVal Tensor bitwise_right_shift(@Const @ByRef Scalar self, @Const @ByRef Tensor other); + +// aten::bitwise_right_shift.Scalar_Tensor_out(Scalar self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor bitwise_right_shift_out(@ByRef Tensor out, @Const @ByRef Scalar self, @Const @ByRef Tensor other); +// aten::bitwise_right_shift.Scalar_Tensor_out(Scalar self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor bitwise_right_shift_outf(@Const @ByRef Scalar self, @Const @ByRef Tensor other, @ByRef Tensor out); -// Parsed from ATen/ops/as_strided.h + + +// Parsed from ATen/ops/bitwise_xor.h // #pragma once @@ -36135,37 +21608,37 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include - - -// aten::as_strided(Tensor(a) self, SymInt[] size, SymInt[] stride, SymInt? storage_offset=None) -> Tensor(a) -@Namespace("at") public static native @ByVal Tensor as_strided(@Const @ByRef Tensor self, @ByVal @Cast("c10::ArrayRef*") LongArrayRef size, @ByVal @Cast("c10::ArrayRef*") LongArrayRef stride, @ByVal(nullValue = "c10::optional(c10::nullopt)") LongOptional storage_offset); -@Namespace("at") public static native @ByVal Tensor as_strided(@Const @ByRef Tensor self, @ByVal @Cast("c10::ArrayRef*") LongArrayRef size, @ByVal @Cast("c10::ArrayRef*") LongArrayRef stride); -@Namespace("at") public static native @ByVal Tensor as_strided(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] stride, @ByVal(nullValue = "c10::optional(c10::nullopt)") LongOptional storage_offset); -@Namespace("at") public static native @ByVal Tensor as_strided(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... stride); +// #include -// aten::as_strided(Tensor(a) self, SymInt[] size, SymInt[] stride, SymInt? storage_offset=None) -> Tensor(a) -@Namespace("at") public static native @ByVal Tensor as_strided_symint(@Const @ByRef Tensor self, @ByVal SymIntRef size, @ByVal SymIntRef stride, @ByVal(nullValue = "c10::optional(c10::nullopt)") SymIntOptional storage_offset); -@Namespace("at") public static native @ByVal Tensor as_strided_symint(@Const @ByRef Tensor self, @ByVal SymIntRef size, @ByVal SymIntRef stride); +// aten::bitwise_xor.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor bitwise_xor_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor other); +// aten::bitwise_xor.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor bitwise_xor_outf(@Const @ByRef Tensor self, @Const @ByRef Tensor other, @ByRef Tensor out); +// aten::bitwise_xor.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor bitwise_xor_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Scalar other); +// aten::bitwise_xor.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor bitwise_xor_outf(@Const @ByRef Tensor self, @Const @ByRef Scalar other, @ByRef Tensor out); -// aten::as_strided_(Tensor(a!) self, SymInt[] size, SymInt[] stride, SymInt? storage_offset=None) -> Tensor(a!) -@Namespace("at") public static native @Const @ByRef Tensor as_strided_(@Const @ByRef Tensor self, @ByVal @Cast("c10::ArrayRef*") LongArrayRef size, @ByVal @Cast("c10::ArrayRef*") LongArrayRef stride, @ByVal(nullValue = "c10::optional(c10::nullopt)") LongOptional storage_offset); -@Namespace("at") public static native @Const @ByRef Tensor as_strided_(@Const @ByRef Tensor self, @ByVal @Cast("c10::ArrayRef*") LongArrayRef size, @ByVal @Cast("c10::ArrayRef*") LongArrayRef stride); -@Namespace("at") public static native @Const @ByRef Tensor as_strided_(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] stride, @ByVal(nullValue = "c10::optional(c10::nullopt)") LongOptional storage_offset); -@Namespace("at") public static native @Const @ByRef Tensor as_strided_(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... stride); +// aten::bitwise_xor.Scalar(Tensor self, Scalar other) -> Tensor +@Namespace("at") public static native @ByVal Tensor bitwise_xor(@Const @ByRef Tensor self, @Const @ByRef Scalar other); +// aten::bitwise_xor.Scalar_Tensor(Scalar self, Tensor other) -> Tensor +@Namespace("at") public static native @ByVal Tensor bitwise_xor(@Const @ByRef Scalar self, @Const @ByRef Tensor other); -// aten::as_strided_(Tensor(a!) self, SymInt[] size, SymInt[] stride, SymInt? storage_offset=None) -> Tensor(a!) -@Namespace("at") public static native @Const @ByRef Tensor as_strided__symint(@Const @ByRef Tensor self, @ByVal SymIntRef size, @ByVal SymIntRef stride, @ByVal(nullValue = "c10::optional(c10::nullopt)") SymIntOptional storage_offset); -@Namespace("at") public static native @Const @ByRef Tensor as_strided__symint(@Const @ByRef Tensor self, @ByVal SymIntRef size, @ByVal SymIntRef stride); +// aten::bitwise_xor.Tensor(Tensor self, Tensor other) -> Tensor +@Namespace("at") public static native @ByVal Tensor bitwise_xor(@Const @ByRef Tensor self, @Const @ByRef Tensor other); +// aten::bitwise_xor.Scalar_Tensor_out(Scalar self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor bitwise_xor_out(@ByRef Tensor out, @Const @ByRef Scalar self, @Const @ByRef Tensor other); +// aten::bitwise_xor.Scalar_Tensor_out(Scalar self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor bitwise_xor_outf(@Const @ByRef Scalar self, @Const @ByRef Tensor other, @ByRef Tensor out); -// Parsed from ATen/ops/as_strided_copy.h +// Parsed from ATen/ops/blackman_window.h // #pragma once @@ -36186,46 +21659,35 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include - - -// aten::as_strided_copy(Tensor self, SymInt[] size, SymInt[] stride, SymInt? storage_offset=None) -> Tensor -@Namespace("at") public static native @ByVal Tensor as_strided_copy(@Const @ByRef Tensor self, @ByVal @Cast("c10::ArrayRef*") LongArrayRef size, @ByVal @Cast("c10::ArrayRef*") LongArrayRef stride, @ByVal(nullValue = "c10::optional(c10::nullopt)") LongOptional storage_offset); -@Namespace("at") public static native @ByVal Tensor as_strided_copy(@Const @ByRef Tensor self, @ByVal @Cast("c10::ArrayRef*") LongArrayRef size, @ByVal @Cast("c10::ArrayRef*") LongArrayRef stride); -@Namespace("at") public static native @ByVal Tensor as_strided_copy(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] stride, @ByVal(nullValue = "c10::optional(c10::nullopt)") LongOptional storage_offset); -@Namespace("at") public static native @ByVal Tensor as_strided_copy(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... stride); - - -// aten::as_strided_copy(Tensor self, SymInt[] size, SymInt[] stride, SymInt? storage_offset=None) -> Tensor -@Namespace("at") public static native @ByVal Tensor as_strided_copy_symint(@Const @ByRef Tensor self, @ByVal SymIntRef size, @ByVal SymIntRef stride, @ByVal(nullValue = "c10::optional(c10::nullopt)") SymIntOptional storage_offset); -@Namespace("at") public static native @ByVal Tensor as_strided_copy_symint(@Const @ByRef Tensor self, @ByVal SymIntRef size, @ByVal SymIntRef stride); - - -// aten::as_strided_copy.out(Tensor self, SymInt[] size, SymInt[] stride, SymInt? storage_offset=None, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor as_strided_copy_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal @Cast("c10::ArrayRef*") LongArrayRef size, @ByVal @Cast("c10::ArrayRef*") LongArrayRef stride, @ByVal(nullValue = "c10::optional(c10::nullopt)") LongOptional storage_offset); -@Namespace("at") public static native @ByRef Tensor as_strided_copy_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal @Cast("c10::ArrayRef*") LongArrayRef size, @ByVal @Cast("c10::ArrayRef*") LongArrayRef stride); -@Namespace("at") public static native @ByRef Tensor as_strided_copy_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] stride, @ByVal(nullValue = "c10::optional(c10::nullopt)") LongOptional storage_offset); -@Namespace("at") public static native @ByRef Tensor as_strided_copy_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... stride); - - -// aten::as_strided_copy.out(Tensor self, SymInt[] size, SymInt[] stride, SymInt? storage_offset=None, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor as_strided_copy_outf(@Const @ByRef Tensor self, @ByVal @Cast("c10::ArrayRef*") LongArrayRef size, @ByVal @Cast("c10::ArrayRef*") LongArrayRef stride, @ByVal LongOptional storage_offset, @ByRef Tensor out); -@Namespace("at") public static native @ByRef Tensor as_strided_copy_outf(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] stride, @ByVal LongOptional storage_offset, @ByRef Tensor out); +// #include -// aten::as_strided_copy.out(Tensor self, SymInt[] size, SymInt[] stride, SymInt? storage_offset=None, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor as_strided_copy_symint_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal SymIntRef size, @ByVal SymIntRef stride, @ByVal(nullValue = "c10::optional(c10::nullopt)") SymIntOptional storage_offset); -@Namespace("at") public static native @ByRef Tensor as_strided_copy_symint_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal SymIntRef size, @ByVal SymIntRef stride); +// aten::blackman_window(int window_length, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor +@Namespace("at") public static native @ByVal Tensor blackman_window(@Cast("int64_t") long window_length, @ByVal(nullValue = "at::TensorOptions{}") TensorOptions options); +@Namespace("at") public static native @ByVal Tensor blackman_window(@Cast("int64_t") long window_length); +// aten::blackman_window(int window_length, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor +@Namespace("at") public static native @ByVal Tensor blackman_window(@Cast("int64_t") long window_length, @ByVal ScalarTypeOptional dtype, @ByVal LayoutOptional layout, @ByVal DeviceOptional device, @ByVal BoolOptional pin_memory); +// aten::blackman_window.periodic(int window_length, bool periodic, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor +@Namespace("at") public static native @ByVal Tensor blackman_window(@Cast("int64_t") long window_length, @Cast("bool") boolean periodic, @ByVal(nullValue = "at::TensorOptions{}") TensorOptions options); +@Namespace("at") public static native @ByVal Tensor blackman_window(@Cast("int64_t") long window_length, @Cast("bool") boolean periodic); +// aten::blackman_window.periodic(int window_length, bool periodic, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor +@Namespace("at") public static native @ByVal Tensor blackman_window(@Cast("int64_t") long window_length, @Cast("bool") boolean periodic, @ByVal ScalarTypeOptional dtype, @ByVal LayoutOptional layout, @ByVal DeviceOptional device, @ByVal BoolOptional pin_memory); -// aten::as_strided_copy.out(Tensor self, SymInt[] size, SymInt[] stride, SymInt? storage_offset=None, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor as_strided_copy_symint_outf(@Const @ByRef Tensor self, @ByVal SymIntRef size, @ByVal SymIntRef stride, @ByVal SymIntOptional storage_offset, @ByRef Tensor out); +// aten::blackman_window.out(int window_length, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor blackman_window_out(@ByRef Tensor out, @Cast("int64_t") long window_length); +// aten::blackman_window.out(int window_length, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor blackman_window_outf(@Cast("int64_t") long window_length, @ByRef Tensor out); +// aten::blackman_window.periodic_out(int window_length, bool periodic, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor blackman_window_out(@ByRef Tensor out, @Cast("int64_t") long window_length, @Cast("bool") boolean periodic); +// aten::blackman_window.periodic_out(int window_length, bool periodic, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor blackman_window_outf(@Cast("int64_t") long window_length, @Cast("bool") boolean periodic, @ByRef Tensor out); -// Parsed from ATen/ops/as_strided_scatter.h +// Parsed from ATen/ops/block_diag.h // #pragma once @@ -36246,46 +21708,56 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include +// #include -// aten::as_strided_scatter(Tensor self, Tensor src, SymInt[] size, SymInt[] stride, SymInt? storage_offset=None) -> Tensor -@Namespace("at") public static native @ByVal Tensor as_strided_scatter(@Const @ByRef Tensor self, @Const @ByRef Tensor src, @ByVal @Cast("c10::ArrayRef*") LongArrayRef size, @ByVal @Cast("c10::ArrayRef*") LongArrayRef stride, @ByVal(nullValue = "c10::optional(c10::nullopt)") LongOptional storage_offset); -@Namespace("at") public static native @ByVal Tensor as_strided_scatter(@Const @ByRef Tensor self, @Const @ByRef Tensor src, @ByVal @Cast("c10::ArrayRef*") LongArrayRef size, @ByVal @Cast("c10::ArrayRef*") LongArrayRef stride); -@Namespace("at") public static native @ByVal Tensor as_strided_scatter(@Const @ByRef Tensor self, @Const @ByRef Tensor src, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] stride, @ByVal(nullValue = "c10::optional(c10::nullopt)") LongOptional storage_offset); -@Namespace("at") public static native @ByVal Tensor as_strided_scatter(@Const @ByRef Tensor self, @Const @ByRef Tensor src, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... stride); +// aten::block_diag(Tensor[] tensors) -> Tensor +@Namespace("at") public static native @ByVal Tensor block_diag(@ByVal @Cast("at::TensorList*") TensorArrayRef tensors); +// aten::block_diag.out(Tensor[] tensors, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor block_diag_out(@ByRef Tensor out, @ByVal @Cast("at::TensorList*") TensorArrayRef tensors); +// aten::block_diag.out(Tensor[] tensors, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor block_diag_outf(@ByVal @Cast("at::TensorList*") TensorArrayRef tensors, @ByRef Tensor out); -// aten::as_strided_scatter(Tensor self, Tensor src, SymInt[] size, SymInt[] stride, SymInt? storage_offset=None) -> Tensor -@Namespace("at") public static native @ByVal Tensor as_strided_scatter_symint(@Const @ByRef Tensor self, @Const @ByRef Tensor src, @ByVal SymIntRef size, @ByVal SymIntRef stride, @ByVal(nullValue = "c10::optional(c10::nullopt)") SymIntOptional storage_offset); -@Namespace("at") public static native @ByVal Tensor as_strided_scatter_symint(@Const @ByRef Tensor self, @Const @ByRef Tensor src, @ByVal SymIntRef size, @ByVal SymIntRef stride); -// aten::as_strided_scatter.out(Tensor self, Tensor src, SymInt[] size, SymInt[] stride, SymInt? storage_offset=None, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor as_strided_scatter_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor src, @ByVal @Cast("c10::ArrayRef*") LongArrayRef size, @ByVal @Cast("c10::ArrayRef*") LongArrayRef stride, @ByVal(nullValue = "c10::optional(c10::nullopt)") LongOptional storage_offset); -@Namespace("at") public static native @ByRef Tensor as_strided_scatter_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor src, @ByVal @Cast("c10::ArrayRef*") LongArrayRef size, @ByVal @Cast("c10::ArrayRef*") LongArrayRef stride); -@Namespace("at") public static native @ByRef Tensor as_strided_scatter_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor src, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] stride, @ByVal(nullValue = "c10::optional(c10::nullopt)") LongOptional storage_offset); -@Namespace("at") public static native @ByRef Tensor as_strided_scatter_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor src, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... stride); +// Parsed from ATen/ops/bmm.h -// aten::as_strided_scatter.out(Tensor self, Tensor src, SymInt[] size, SymInt[] stride, SymInt? storage_offset=None, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor as_strided_scatter_outf(@Const @ByRef Tensor self, @Const @ByRef Tensor src, @ByVal @Cast("c10::ArrayRef*") LongArrayRef size, @ByVal @Cast("c10::ArrayRef*") LongArrayRef stride, @ByVal LongOptional storage_offset, @ByRef Tensor out); -@Namespace("at") public static native @ByRef Tensor as_strided_scatter_outf(@Const @ByRef Tensor self, @Const @ByRef Tensor src, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] stride, @ByVal LongOptional storage_offset, @ByRef Tensor out); +// #pragma once +// @generated by torchgen/gen.py from Function.h -// aten::as_strided_scatter.out(Tensor self, Tensor src, SymInt[] size, SymInt[] stride, SymInt? storage_offset=None, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor as_strided_scatter_symint_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor src, @ByVal SymIntRef size, @ByVal SymIntRef stride, @ByVal(nullValue = "c10::optional(c10::nullopt)") SymIntOptional storage_offset); -@Namespace("at") public static native @ByRef Tensor as_strided_scatter_symint_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor src, @ByVal SymIntRef size, @ByVal SymIntRef stride); +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include -// aten::as_strided_scatter.out(Tensor self, Tensor src, SymInt[] size, SymInt[] stride, SymInt? storage_offset=None, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor as_strided_scatter_symint_outf(@Const @ByRef Tensor self, @Const @ByRef Tensor src, @ByVal SymIntRef size, @ByVal SymIntRef stride, @ByVal SymIntOptional storage_offset, @ByRef Tensor out); +// #include + + +// aten::bmm(Tensor self, Tensor mat2) -> Tensor +@Namespace("at") public static native @ByVal Tensor bmm(@Const @ByRef Tensor self, @Const @ByRef Tensor mat2); + +// aten::bmm.out(Tensor self, Tensor mat2, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor bmm_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor mat2); +// aten::bmm.out(Tensor self, Tensor mat2, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor bmm_outf(@Const @ByRef Tensor self, @Const @ByRef Tensor mat2, @ByRef Tensor out); -// Parsed from ATen/ops/asin.h +// Parsed from ATen/ops/broadcast_tensors.h // #pragma once @@ -36306,24 +21778,16 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include - - -// aten::asin(Tensor self) -> Tensor -@Namespace("at") public static native @ByVal Tensor asin(@Const @ByRef Tensor self); +// #include -// aten::asin_(Tensor(a!) self) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor asin_(@ByRef Tensor self); -// aten::asin.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor asin_out(@ByRef Tensor out, @Const @ByRef Tensor self); -// aten::asin.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor asin_outf(@Const @ByRef Tensor self, @ByRef Tensor out); +// aten::broadcast_tensors(Tensor[] tensors) -> Tensor[] +@Namespace("at") public static native @Cast({"", "std::vector"}) @StdMove TensorVector broadcast_tensors(@ByVal @Cast("at::TensorList*") TensorArrayRef tensors); -// Parsed from ATen/ops/asinh.h +// Parsed from ATen/ops/broadcast_to.h // #pragma once @@ -36344,24 +21808,22 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include +// #include -// aten::asinh(Tensor self) -> Tensor -@Namespace("at") public static native @ByVal Tensor asinh(@Const @ByRef Tensor self); +// aten::broadcast_to(Tensor(a) self, SymInt[] size) -> Tensor(a) +@Namespace("at") public static native @ByVal Tensor broadcast_to(@Const @ByRef Tensor self, @ByVal LongArrayRef size); +@Namespace("at") public static native @ByVal Tensor broadcast_to(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... size); -// aten::asinh_(Tensor(a!) self) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor asinh_(@ByRef Tensor self); -// aten::asinh.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor asinh_out(@ByRef Tensor out, @Const @ByRef Tensor self); -// aten::asinh.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor asinh_outf(@Const @ByRef Tensor self, @ByRef Tensor out); +// aten::broadcast_to(Tensor(a) self, SymInt[] size) -> Tensor(a) +@Namespace("at") public static native @ByVal Tensor broadcast_to_symint(@Const @ByRef Tensor self, @ByVal SymIntArrayRef size); -// Parsed from ATen/ops/atan.h + +// Parsed from ATen/ops/bucketize.h // #pragma once @@ -36382,24 +21844,33 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include +// #include -// aten::atan(Tensor self) -> Tensor -@Namespace("at") public static native @ByVal Tensor atan(@Const @ByRef Tensor self); +// aten::bucketize.Tensor(Tensor self, Tensor boundaries, *, bool out_int32=False, bool right=False) -> Tensor +@Namespace("at") public static native @ByVal Tensor bucketize(@Const @ByRef Tensor self, @Const @ByRef Tensor boundaries, @Cast("bool") boolean out_int32/*=false*/, @Cast("bool") boolean right/*=false*/); +@Namespace("at") public static native @ByVal Tensor bucketize(@Const @ByRef Tensor self, @Const @ByRef Tensor boundaries); -// aten::atan_(Tensor(a!) self) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor atan_(@ByRef Tensor self); +// aten::bucketize.Tensor_out(Tensor self, Tensor boundaries, *, bool out_int32=False, bool right=False, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor bucketize_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor boundaries, @Cast("bool") boolean out_int32/*=false*/, @Cast("bool") boolean right/*=false*/); +@Namespace("at") public static native @ByRef Tensor bucketize_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor boundaries); +// aten::bucketize.Tensor_out(Tensor self, Tensor boundaries, *, bool out_int32=False, bool right=False, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor bucketize_outf(@Const @ByRef Tensor self, @Const @ByRef Tensor boundaries, @Cast("bool") boolean out_int32, @Cast("bool") boolean right, @ByRef Tensor out); -// aten::atan.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor atan_out(@ByRef Tensor out, @Const @ByRef Tensor self); -// aten::atan.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor atan_outf(@Const @ByRef Tensor self, @ByRef Tensor out); +// aten::bucketize.Scalar(Scalar self, Tensor boundaries, *, bool out_int32=False, bool right=False) -> Tensor +@Namespace("at") public static native @ByVal Tensor bucketize(@Const @ByRef Scalar self, @Const @ByRef Tensor boundaries, @Cast("bool") boolean out_int32/*=false*/, @Cast("bool") boolean right/*=false*/); +@Namespace("at") public static native @ByVal Tensor bucketize(@Const @ByRef Scalar self, @Const @ByRef Tensor boundaries); + +// aten::bucketize.Scalar_out(Scalar self, Tensor boundaries, *, bool out_int32=False, bool right=False, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor bucketize_out(@ByRef Tensor out, @Const @ByRef Scalar self, @Const @ByRef Tensor boundaries, @Cast("bool") boolean out_int32/*=false*/, @Cast("bool") boolean right/*=false*/); +@Namespace("at") public static native @ByRef Tensor bucketize_out(@ByRef Tensor out, @Const @ByRef Scalar self, @Const @ByRef Tensor boundaries); +// aten::bucketize.Scalar_out(Scalar self, Tensor boundaries, *, bool out_int32=False, bool right=False, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor bucketize_outf(@Const @ByRef Scalar self, @Const @ByRef Tensor boundaries, @Cast("bool") boolean out_int32, @Cast("bool") boolean right, @ByRef Tensor out); -// Parsed from ATen/ops/atan2.h +// Parsed from ATen/ops/can_cast.h // #pragma once @@ -36420,21 +21891,16 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include - +// #include -// aten::atan2.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor atan2_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor other); -// aten::atan2.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor atan2_outf(@Const @ByRef Tensor self, @Const @ByRef Tensor other, @ByRef Tensor out); -// aten::atan2(Tensor self, Tensor other) -> Tensor -@Namespace("at") public static native @ByVal Tensor atan2(@Const @ByRef Tensor self, @Const @ByRef Tensor other); +// aten::can_cast(ScalarType from, ScalarType to) -> bool +@Namespace("at") public static native @Cast("bool") boolean can_cast(ScalarType from, ScalarType to); -// Parsed from ATen/ops/atanh.h +// Parsed from ATen/ops/cartesian_prod.h // #pragma once @@ -36455,24 +21921,16 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include - - -// aten::atanh(Tensor self) -> Tensor -@Namespace("at") public static native @ByVal Tensor atanh(@Const @ByRef Tensor self); +// #include -// aten::atanh_(Tensor(a!) self) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor atanh_(@ByRef Tensor self); -// aten::atanh.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor atanh_out(@ByRef Tensor out, @Const @ByRef Tensor self); -// aten::atanh.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor atanh_outf(@Const @ByRef Tensor self, @ByRef Tensor out); +// aten::cartesian_prod(Tensor[] tensors) -> Tensor +@Namespace("at") public static native @ByVal Tensor cartesian_prod(@ByVal @Cast("at::TensorList*") TensorArrayRef tensors); -// Parsed from ATen/ops/atleast_1d.h +// Parsed from ATen/ops/cat.h // #pragma once @@ -36493,19 +21951,31 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include +// #include -// aten::atleast_1d(Tensor self) -> Tensor -@Namespace("at") public static native @ByVal Tensor atleast_1d(@Const @ByRef Tensor self); +// aten::cat(Tensor[] tensors, int dim=0) -> Tensor +@Namespace("at") public static native @ByVal Tensor cat(@Const @ByRef TensorArrayRef tensors, @Cast("int64_t") long dim/*=0*/); +@Namespace("at") public static native @ByVal Tensor cat(@Const @ByRef TensorArrayRef tensors); -// aten::atleast_1d.Sequence(Tensor[] tensors) -> Tensor[] -@Namespace("at") public static native @Cast({"", "std::vector"}) @StdMove TensorVector atleast_1d(@ByVal TensorArrayRef tensors); +// aten::cat.out(Tensor[] tensors, int dim=0, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor cat_out(@ByRef Tensor out, @Const @ByRef TensorArrayRef tensors, @Cast("int64_t") long dim/*=0*/); +@Namespace("at") public static native @ByRef Tensor cat_out(@ByRef Tensor out, @Const @ByRef TensorArrayRef tensors); +// aten::cat.out(Tensor[] tensors, int dim=0, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor cat_outf(@Const @ByRef TensorArrayRef tensors, @Cast("int64_t") long dim, @ByRef Tensor out); +// aten::cat.names(Tensor[] tensors, Dimname dim) -> Tensor +@Namespace("at") public static native @ByVal Tensor cat(@ByVal @Cast("at::TensorList*") TensorArrayRef tensors, @ByVal Dimname dim); +// aten::cat.names_out(Tensor[] tensors, Dimname dim, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor cat_out(@ByRef Tensor out, @ByVal @Cast("at::TensorList*") TensorArrayRef tensors, @ByVal Dimname dim); +// aten::cat.names_out(Tensor[] tensors, Dimname dim, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor cat_outf(@ByVal @Cast("at::TensorList*") TensorArrayRef tensors, @ByVal Dimname dim, @ByRef Tensor out); -// Parsed from ATen/ops/atleast_2d.h + + +// Parsed from ATen/ops/cauchy.h // #pragma once @@ -36526,19 +21996,23 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include +// #include -// aten::atleast_2d(Tensor self) -> Tensor -@Namespace("at") public static native @ByVal Tensor atleast_2d(@Const @ByRef Tensor self); +// aten::cauchy.out(Tensor self, float median=0, float sigma=1, *, Generator? generator=None, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor cauchy_out(@ByRef Tensor out, @Const @ByRef Tensor self, double median/*=0*/, double sigma/*=1*/, @ByVal(nullValue = "c10::optional(c10::nullopt)") GeneratorOptional generator); +@Namespace("at") public static native @ByRef Tensor cauchy_out(@ByRef Tensor out, @Const @ByRef Tensor self); +// aten::cauchy.out(Tensor self, float median=0, float sigma=1, *, Generator? generator=None, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor cauchy_outf(@Const @ByRef Tensor self, double median, double sigma, @ByVal GeneratorOptional generator, @ByRef Tensor out); -// aten::atleast_2d.Sequence(Tensor[] tensors) -> Tensor[] -@Namespace("at") public static native @Cast({"", "std::vector"}) @StdMove TensorVector atleast_2d(@ByVal TensorArrayRef tensors); +// aten::cauchy(Tensor self, float median=0, float sigma=1, *, Generator? generator=None) -> Tensor +@Namespace("at") public static native @ByVal Tensor cauchy(@Const @ByRef Tensor self, double median/*=0*/, double sigma/*=1*/, @ByVal(nullValue = "c10::optional(c10::nullopt)") GeneratorOptional generator); +@Namespace("at") public static native @ByVal Tensor cauchy(@Const @ByRef Tensor self); -// Parsed from ATen/ops/atleast_3d.h +// Parsed from ATen/ops/ccol_indices.h // #pragma once @@ -36559,19 +22033,14 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include - +// #include -// aten::atleast_3d(Tensor self) -> Tensor -@Namespace("at") public static native @ByVal Tensor atleast_3d(@Const @ByRef Tensor self); -// aten::atleast_3d.Sequence(Tensor[] tensors) -> Tensor[] -@Namespace("at") public static native @Cast({"", "std::vector"}) @StdMove TensorVector atleast_3d(@ByVal TensorArrayRef tensors); -// Parsed from ATen/ops/avg_pool1d.h +// Parsed from ATen/ops/ccol_indices_copy.h // #pragma once @@ -36592,19 +22061,21 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include +// #include -// aten::avg_pool1d(Tensor self, int[1] kernel_size, int[1] stride=[], int[1] padding=0, bool ceil_mode=False, bool count_include_pad=True) -> Tensor -@Namespace("at") public static native @ByVal Tensor avg_pool1d(@Const @ByRef Tensor self, @ByVal @Cast("c10::ArrayRef*") LongArrayRef kernel_size, @ByVal(nullValue = "at::IntArrayRef{}") @Cast("c10::ArrayRef*") LongArrayRef stride, @ByVal(nullValue = "at::IntArrayRef(0)") @Cast("c10::ArrayRef*") LongArrayRef padding, @Cast("bool") boolean ceil_mode/*=false*/, @Cast("bool") boolean count_include_pad/*=true*/); -@Namespace("at") public static native @ByVal Tensor avg_pool1d(@Const @ByRef Tensor self, @ByVal @Cast("c10::ArrayRef*") LongArrayRef kernel_size); -@Namespace("at") public static native @ByVal Tensor avg_pool1d(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] kernel_size, @ByVal(nullValue = "at::IntArrayRef{}") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] stride, @ByVal(nullValue = "at::IntArrayRef(0)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] padding, @Cast("bool") boolean ceil_mode/*=false*/, @Cast("bool") boolean count_include_pad/*=true*/); -@Namespace("at") public static native @ByVal Tensor avg_pool1d(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... kernel_size); +// aten::ccol_indices_copy(Tensor self) -> Tensor +@Namespace("at") public static native @ByVal Tensor ccol_indices_copy(@Const @ByRef Tensor self); + +// aten::ccol_indices_copy.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor ccol_indices_copy_out(@ByRef Tensor out, @Const @ByRef Tensor self); +// aten::ccol_indices_copy.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor ccol_indices_copy_outf(@Const @ByRef Tensor self, @ByRef Tensor out); -// Parsed from ATen/ops/avg_pool2d.h +// Parsed from ATen/ops/cdist.h // #pragma once @@ -36625,28 +22096,17 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include - +// #include -// aten::avg_pool2d.out(Tensor self, int[2] kernel_size, int[2] stride=[], int[2] padding=0, bool ceil_mode=False, bool count_include_pad=True, int? divisor_override=None, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor avg_pool2d_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal @Cast("c10::ArrayRef*") LongArrayRef kernel_size, @ByVal(nullValue = "at::IntArrayRef{}") @Cast("c10::ArrayRef*") LongArrayRef stride, @ByVal(nullValue = "at::IntArrayRef(0)") @Cast("c10::ArrayRef*") LongArrayRef padding, @Cast("bool") boolean ceil_mode/*=false*/, @Cast("bool") boolean count_include_pad/*=true*/, @ByVal(nullValue = "c10::optional(c10::nullopt)") LongOptional divisor_override); -@Namespace("at") public static native @ByRef Tensor avg_pool2d_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal @Cast("c10::ArrayRef*") LongArrayRef kernel_size); -@Namespace("at") public static native @ByRef Tensor avg_pool2d_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] kernel_size, @ByVal(nullValue = "at::IntArrayRef{}") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] stride, @ByVal(nullValue = "at::IntArrayRef(0)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] padding, @Cast("bool") boolean ceil_mode/*=false*/, @Cast("bool") boolean count_include_pad/*=true*/, @ByVal(nullValue = "c10::optional(c10::nullopt)") LongOptional divisor_override); -@Namespace("at") public static native @ByRef Tensor avg_pool2d_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... kernel_size); -// aten::avg_pool2d.out(Tensor self, int[2] kernel_size, int[2] stride=[], int[2] padding=0, bool ceil_mode=False, bool count_include_pad=True, int? divisor_override=None, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor avg_pool2d_outf(@Const @ByRef Tensor self, @ByVal @Cast("c10::ArrayRef*") LongArrayRef kernel_size, @ByVal @Cast("c10::ArrayRef*") LongArrayRef stride, @ByVal @Cast("c10::ArrayRef*") LongArrayRef padding, @Cast("bool") boolean ceil_mode, @Cast("bool") boolean count_include_pad, @ByVal LongOptional divisor_override, @ByRef Tensor out); -@Namespace("at") public static native @ByRef Tensor avg_pool2d_outf(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] kernel_size, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] stride, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] padding, @Cast("bool") boolean ceil_mode, @Cast("bool") boolean count_include_pad, @ByVal LongOptional divisor_override, @ByRef Tensor out); -// aten::avg_pool2d(Tensor self, int[2] kernel_size, int[2] stride=[], int[2] padding=0, bool ceil_mode=False, bool count_include_pad=True, int? divisor_override=None) -> Tensor -@Namespace("at") public static native @ByVal Tensor avg_pool2d(@Const @ByRef Tensor self, @ByVal @Cast("c10::ArrayRef*") LongArrayRef kernel_size, @ByVal(nullValue = "at::IntArrayRef{}") @Cast("c10::ArrayRef*") LongArrayRef stride, @ByVal(nullValue = "at::IntArrayRef(0)") @Cast("c10::ArrayRef*") LongArrayRef padding, @Cast("bool") boolean ceil_mode/*=false*/, @Cast("bool") boolean count_include_pad/*=true*/, @ByVal(nullValue = "c10::optional(c10::nullopt)") LongOptional divisor_override); -@Namespace("at") public static native @ByVal Tensor avg_pool2d(@Const @ByRef Tensor self, @ByVal @Cast("c10::ArrayRef*") LongArrayRef kernel_size); -@Namespace("at") public static native @ByVal Tensor avg_pool2d(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] kernel_size, @ByVal(nullValue = "at::IntArrayRef{}") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] stride, @ByVal(nullValue = "at::IntArrayRef(0)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] padding, @Cast("bool") boolean ceil_mode/*=false*/, @Cast("bool") boolean count_include_pad/*=true*/, @ByVal(nullValue = "c10::optional(c10::nullopt)") LongOptional divisor_override); -@Namespace("at") public static native @ByVal Tensor avg_pool2d(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... kernel_size); +// aten::cdist(Tensor x1, Tensor x2, float p=2, int? compute_mode=None) -> Tensor +@Namespace("at") public static native @ByVal Tensor cdist(@Const @ByRef Tensor x1, @Const @ByRef Tensor x2, double p/*=2*/, @ByVal(nullValue = "c10::optional(c10::nullopt)") LongOptional compute_mode); +@Namespace("at") public static native @ByVal Tensor cdist(@Const @ByRef Tensor x1, @Const @ByRef Tensor x2); -// Parsed from ATen/ops/avg_pool2d_backward.h +// Parsed from ATen/ops/ceil.h // #pragma once @@ -36667,24 +22127,24 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include +// #include -// aten::avg_pool2d_backward.grad_input(Tensor grad_output, Tensor self, int[2] kernel_size, int[2] stride, int[2] padding, bool ceil_mode, bool count_include_pad, int? divisor_override, *, Tensor(a!) grad_input) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor avg_pool2d_backward_out(@ByRef Tensor grad_input, @Const @ByRef Tensor grad_output, @Const @ByRef Tensor self, @ByVal @Cast("c10::ArrayRef*") LongArrayRef kernel_size, @ByVal @Cast("c10::ArrayRef*") LongArrayRef stride, @ByVal @Cast("c10::ArrayRef*") LongArrayRef padding, @Cast("bool") boolean ceil_mode, @Cast("bool") boolean count_include_pad, @ByVal LongOptional divisor_override); -@Namespace("at") public static native @ByRef Tensor avg_pool2d_backward_out(@ByRef Tensor grad_input, @Const @ByRef Tensor grad_output, @Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] kernel_size, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] stride, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] padding, @Cast("bool") boolean ceil_mode, @Cast("bool") boolean count_include_pad, @ByVal LongOptional divisor_override); -// aten::avg_pool2d_backward.grad_input(Tensor grad_output, Tensor self, int[2] kernel_size, int[2] stride, int[2] padding, bool ceil_mode, bool count_include_pad, int? divisor_override, *, Tensor(a!) grad_input) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor avg_pool2d_backward_outf(@Const @ByRef Tensor grad_output, @Const @ByRef Tensor self, @ByVal @Cast("c10::ArrayRef*") LongArrayRef kernel_size, @ByVal @Cast("c10::ArrayRef*") LongArrayRef stride, @ByVal @Cast("c10::ArrayRef*") LongArrayRef padding, @Cast("bool") boolean ceil_mode, @Cast("bool") boolean count_include_pad, @ByVal LongOptional divisor_override, @ByRef Tensor grad_input); -@Namespace("at") public static native @ByRef Tensor avg_pool2d_backward_outf(@Const @ByRef Tensor grad_output, @Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] kernel_size, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] stride, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] padding, @Cast("bool") boolean ceil_mode, @Cast("bool") boolean count_include_pad, @ByVal LongOptional divisor_override, @ByRef Tensor grad_input); +// aten::ceil(Tensor self) -> Tensor +@Namespace("at") public static native @ByVal Tensor ceil(@Const @ByRef Tensor self); -// aten::avg_pool2d_backward(Tensor grad_output, Tensor self, int[2] kernel_size, int[2] stride, int[2] padding, bool ceil_mode, bool count_include_pad, int? divisor_override) -> Tensor -@Namespace("at") public static native @ByVal Tensor avg_pool2d_backward(@Const @ByRef Tensor grad_output, @Const @ByRef Tensor self, @ByVal @Cast("c10::ArrayRef*") LongArrayRef kernel_size, @ByVal @Cast("c10::ArrayRef*") LongArrayRef stride, @ByVal @Cast("c10::ArrayRef*") LongArrayRef padding, @Cast("bool") boolean ceil_mode, @Cast("bool") boolean count_include_pad, @ByVal LongOptional divisor_override); -@Namespace("at") public static native @ByVal Tensor avg_pool2d_backward(@Const @ByRef Tensor grad_output, @Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] kernel_size, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] stride, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] padding, @Cast("bool") boolean ceil_mode, @Cast("bool") boolean count_include_pad, @ByVal LongOptional divisor_override); +// aten::ceil_(Tensor(a!) self) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor ceil_(@ByRef Tensor self); + +// aten::ceil.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor ceil_out(@ByRef Tensor out, @Const @ByRef Tensor self); +// aten::ceil.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor ceil_outf(@Const @ByRef Tensor self, @ByRef Tensor out); -// Parsed from ATen/ops/avg_pool3d.h +// Parsed from ATen/ops/celu.h // #pragma once @@ -36705,28 +22165,27 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include +// #include -// aten::avg_pool3d.out(Tensor self, int[3] kernel_size, int[3] stride=[], int[3] padding=0, bool ceil_mode=False, bool count_include_pad=True, int? divisor_override=None, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor avg_pool3d_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal @Cast("c10::ArrayRef*") LongArrayRef kernel_size, @ByVal(nullValue = "at::IntArrayRef{}") @Cast("c10::ArrayRef*") LongArrayRef stride, @ByVal(nullValue = "at::IntArrayRef(0)") @Cast("c10::ArrayRef*") LongArrayRef padding, @Cast("bool") boolean ceil_mode/*=false*/, @Cast("bool") boolean count_include_pad/*=true*/, @ByVal(nullValue = "c10::optional(c10::nullopt)") LongOptional divisor_override); -@Namespace("at") public static native @ByRef Tensor avg_pool3d_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal @Cast("c10::ArrayRef*") LongArrayRef kernel_size); -@Namespace("at") public static native @ByRef Tensor avg_pool3d_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] kernel_size, @ByVal(nullValue = "at::IntArrayRef{}") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] stride, @ByVal(nullValue = "at::IntArrayRef(0)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] padding, @Cast("bool") boolean ceil_mode/*=false*/, @Cast("bool") boolean count_include_pad/*=true*/, @ByVal(nullValue = "c10::optional(c10::nullopt)") LongOptional divisor_override); -@Namespace("at") public static native @ByRef Tensor avg_pool3d_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... kernel_size); -// aten::avg_pool3d.out(Tensor self, int[3] kernel_size, int[3] stride=[], int[3] padding=0, bool ceil_mode=False, bool count_include_pad=True, int? divisor_override=None, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor avg_pool3d_outf(@Const @ByRef Tensor self, @ByVal @Cast("c10::ArrayRef*") LongArrayRef kernel_size, @ByVal @Cast("c10::ArrayRef*") LongArrayRef stride, @ByVal @Cast("c10::ArrayRef*") LongArrayRef padding, @Cast("bool") boolean ceil_mode, @Cast("bool") boolean count_include_pad, @ByVal LongOptional divisor_override, @ByRef Tensor out); -@Namespace("at") public static native @ByRef Tensor avg_pool3d_outf(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] kernel_size, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] stride, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] padding, @Cast("bool") boolean ceil_mode, @Cast("bool") boolean count_include_pad, @ByVal LongOptional divisor_override, @ByRef Tensor out); +// aten::celu(Tensor self, Scalar alpha=1.0) -> Tensor +@Namespace("at") public static native @ByVal Tensor celu(@Const @ByRef Tensor self, @Const @ByRef(nullValue = "at::Scalar(1.0)") Scalar alpha); +@Namespace("at") public static native @ByVal Tensor celu(@Const @ByRef Tensor self); -// aten::avg_pool3d(Tensor self, int[3] kernel_size, int[3] stride=[], int[3] padding=0, bool ceil_mode=False, bool count_include_pad=True, int? divisor_override=None) -> Tensor -@Namespace("at") public static native @ByVal Tensor avg_pool3d(@Const @ByRef Tensor self, @ByVal @Cast("c10::ArrayRef*") LongArrayRef kernel_size, @ByVal(nullValue = "at::IntArrayRef{}") @Cast("c10::ArrayRef*") LongArrayRef stride, @ByVal(nullValue = "at::IntArrayRef(0)") @Cast("c10::ArrayRef*") LongArrayRef padding, @Cast("bool") boolean ceil_mode/*=false*/, @Cast("bool") boolean count_include_pad/*=true*/, @ByVal(nullValue = "c10::optional(c10::nullopt)") LongOptional divisor_override); -@Namespace("at") public static native @ByVal Tensor avg_pool3d(@Const @ByRef Tensor self, @ByVal @Cast("c10::ArrayRef*") LongArrayRef kernel_size); -@Namespace("at") public static native @ByVal Tensor avg_pool3d(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] kernel_size, @ByVal(nullValue = "at::IntArrayRef{}") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] stride, @ByVal(nullValue = "at::IntArrayRef(0)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] padding, @Cast("bool") boolean ceil_mode/*=false*/, @Cast("bool") boolean count_include_pad/*=true*/, @ByVal(nullValue = "c10::optional(c10::nullopt)") LongOptional divisor_override); -@Namespace("at") public static native @ByVal Tensor avg_pool3d(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... kernel_size); +// aten::celu_(Tensor(a!) self, Scalar alpha=1.0) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor celu_(@ByRef Tensor self, @Const @ByRef(nullValue = "at::Scalar(1.0)") Scalar alpha); +@Namespace("at") public static native @ByRef Tensor celu_(@ByRef Tensor self); + +// aten::celu.out(Tensor self, Scalar alpha=1.0, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor celu_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef(nullValue = "at::Scalar(1.0)") Scalar alpha); +@Namespace("at") public static native @ByRef Tensor celu_out(@ByRef Tensor out, @Const @ByRef Tensor self); +// aten::celu.out(Tensor self, Scalar alpha=1.0, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor celu_outf(@Const @ByRef Tensor self, @Const @ByRef Scalar alpha, @ByRef Tensor out); -// Parsed from ATen/ops/avg_pool3d_backward.h +// Parsed from ATen/ops/chain_matmul.h // #pragma once @@ -36747,24 +22206,21 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include +// #include -// aten::avg_pool3d_backward.grad_input(Tensor grad_output, Tensor self, int[3] kernel_size, int[3] stride, int[3] padding, bool ceil_mode, bool count_include_pad, int? divisor_override, *, Tensor(a!) grad_input) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor avg_pool3d_backward_out(@ByRef Tensor grad_input, @Const @ByRef Tensor grad_output, @Const @ByRef Tensor self, @ByVal @Cast("c10::ArrayRef*") LongArrayRef kernel_size, @ByVal @Cast("c10::ArrayRef*") LongArrayRef stride, @ByVal @Cast("c10::ArrayRef*") LongArrayRef padding, @Cast("bool") boolean ceil_mode, @Cast("bool") boolean count_include_pad, @ByVal LongOptional divisor_override); -@Namespace("at") public static native @ByRef Tensor avg_pool3d_backward_out(@ByRef Tensor grad_input, @Const @ByRef Tensor grad_output, @Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] kernel_size, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] stride, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] padding, @Cast("bool") boolean ceil_mode, @Cast("bool") boolean count_include_pad, @ByVal LongOptional divisor_override); -// aten::avg_pool3d_backward.grad_input(Tensor grad_output, Tensor self, int[3] kernel_size, int[3] stride, int[3] padding, bool ceil_mode, bool count_include_pad, int? divisor_override, *, Tensor(a!) grad_input) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor avg_pool3d_backward_outf(@Const @ByRef Tensor grad_output, @Const @ByRef Tensor self, @ByVal @Cast("c10::ArrayRef*") LongArrayRef kernel_size, @ByVal @Cast("c10::ArrayRef*") LongArrayRef stride, @ByVal @Cast("c10::ArrayRef*") LongArrayRef padding, @Cast("bool") boolean ceil_mode, @Cast("bool") boolean count_include_pad, @ByVal LongOptional divisor_override, @ByRef Tensor grad_input); -@Namespace("at") public static native @ByRef Tensor avg_pool3d_backward_outf(@Const @ByRef Tensor grad_output, @Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] kernel_size, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] stride, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] padding, @Cast("bool") boolean ceil_mode, @Cast("bool") boolean count_include_pad, @ByVal LongOptional divisor_override, @ByRef Tensor grad_input); +// aten::chain_matmul(Tensor[] matrices) -> Tensor +@Namespace("at") public static native @ByVal Tensor chain_matmul(@ByVal @Cast("at::TensorList*") TensorArrayRef matrices); -// aten::avg_pool3d_backward(Tensor grad_output, Tensor self, int[3] kernel_size, int[3] stride, int[3] padding, bool ceil_mode, bool count_include_pad, int? divisor_override) -> Tensor -@Namespace("at") public static native @ByVal Tensor avg_pool3d_backward(@Const @ByRef Tensor grad_output, @Const @ByRef Tensor self, @ByVal @Cast("c10::ArrayRef*") LongArrayRef kernel_size, @ByVal @Cast("c10::ArrayRef*") LongArrayRef stride, @ByVal @Cast("c10::ArrayRef*") LongArrayRef padding, @Cast("bool") boolean ceil_mode, @Cast("bool") boolean count_include_pad, @ByVal LongOptional divisor_override); -@Namespace("at") public static native @ByVal Tensor avg_pool3d_backward(@Const @ByRef Tensor grad_output, @Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] kernel_size, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] stride, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] padding, @Cast("bool") boolean ceil_mode, @Cast("bool") boolean count_include_pad, @ByVal LongOptional divisor_override); +// aten::chain_matmul.out(Tensor[] matrices, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor chain_matmul_out(@ByRef Tensor out, @ByVal @Cast("at::TensorList*") TensorArrayRef matrices); +// aten::chain_matmul.out(Tensor[] matrices, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor chain_matmul_outf(@ByVal @Cast("at::TensorList*") TensorArrayRef matrices, @ByRef Tensor out); -// Parsed from ATen/ops/baddbmm.h +// Parsed from ATen/ops/chalf.h // #pragma once @@ -36785,23 +22241,14 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include - +// #include -// aten::baddbmm(Tensor self, Tensor batch1, Tensor batch2, *, Scalar beta=1, Scalar alpha=1) -> Tensor -@Namespace("at") public static native @ByVal Tensor baddbmm(@Const @ByRef Tensor self, @Const @ByRef Tensor batch1, @Const @ByRef Tensor batch2, @Const @ByRef(nullValue = "at::Scalar(1)") Scalar beta, @Const @ByRef(nullValue = "at::Scalar(1)") Scalar alpha); -@Namespace("at") public static native @ByVal Tensor baddbmm(@Const @ByRef Tensor self, @Const @ByRef Tensor batch1, @Const @ByRef Tensor batch2); -// aten::baddbmm.out(Tensor self, Tensor batch1, Tensor batch2, *, Scalar beta=1, Scalar alpha=1, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor baddbmm_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor batch1, @Const @ByRef Tensor batch2, @Const @ByRef(nullValue = "at::Scalar(1)") Scalar beta, @Const @ByRef(nullValue = "at::Scalar(1)") Scalar alpha); -@Namespace("at") public static native @ByRef Tensor baddbmm_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor batch1, @Const @ByRef Tensor batch2); -// aten::baddbmm.out(Tensor self, Tensor batch1, Tensor batch2, *, Scalar beta=1, Scalar alpha=1, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor baddbmm_outf(@Const @ByRef Tensor self, @Const @ByRef Tensor batch1, @Const @ByRef Tensor batch2, @Const @ByRef Scalar beta, @Const @ByRef Scalar alpha, @ByRef Tensor out); -// Parsed from ATen/ops/bartlett_window.h +// Parsed from ATen/ops/channel_shuffle.h // #pragma once @@ -36822,35 +22269,21 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include - - -// aten::bartlett_window(int window_length, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor -@Namespace("at") public static native @ByVal Tensor bartlett_window(@Cast("int64_t") long window_length, @ByVal(nullValue = "at::TensorOptions{}") TensorOptions options); -@Namespace("at") public static native @ByVal Tensor bartlett_window(@Cast("int64_t") long window_length); -// aten::bartlett_window(int window_length, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor -@Namespace("at") public static native @ByVal Tensor bartlett_window(@Cast("int64_t") long window_length, @ByVal ScalarTypeOptional dtype, @ByVal LayoutOptional layout, @ByVal DeviceOptional device, @ByVal BoolOptional pin_memory); +// #include -// aten::bartlett_window.periodic(int window_length, bool periodic, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor -@Namespace("at") public static native @ByVal Tensor bartlett_window(@Cast("int64_t") long window_length, @Cast("bool") boolean periodic, @ByVal(nullValue = "at::TensorOptions{}") TensorOptions options); -@Namespace("at") public static native @ByVal Tensor bartlett_window(@Cast("int64_t") long window_length, @Cast("bool") boolean periodic); -// aten::bartlett_window.periodic(int window_length, bool periodic, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor -@Namespace("at") public static native @ByVal Tensor bartlett_window(@Cast("int64_t") long window_length, @Cast("bool") boolean periodic, @ByVal ScalarTypeOptional dtype, @ByVal LayoutOptional layout, @ByVal DeviceOptional device, @ByVal BoolOptional pin_memory); -// aten::bartlett_window.out(int window_length, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor bartlett_window_out(@ByRef Tensor out, @Cast("int64_t") long window_length); -// aten::bartlett_window.out(int window_length, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor bartlett_window_outf(@Cast("int64_t") long window_length, @ByRef Tensor out); +// aten::channel_shuffle(Tensor self, int groups) -> Tensor +@Namespace("at") public static native @ByVal Tensor channel_shuffle(@Const @ByRef Tensor self, @Cast("int64_t") long groups); -// aten::bartlett_window.periodic_out(int window_length, bool periodic, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor bartlett_window_out(@ByRef Tensor out, @Cast("int64_t") long window_length, @Cast("bool") boolean periodic); -// aten::bartlett_window.periodic_out(int window_length, bool periodic, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor bartlett_window_outf(@Cast("int64_t") long window_length, @Cast("bool") boolean periodic, @ByRef Tensor out); +// aten::channel_shuffle.out(Tensor self, int groups, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor channel_shuffle_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Cast("int64_t") long groups); +// aten::channel_shuffle.out(Tensor self, int groups, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor channel_shuffle_outf(@Const @ByRef Tensor self, @Cast("int64_t") long groups, @ByRef Tensor out); -// Parsed from ATen/ops/batch_norm.h +// Parsed from ATen/ops/cholesky.h // #pragma once @@ -36871,16 +22304,23 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include +// #include -// aten::batch_norm(Tensor input, Tensor? weight, Tensor? bias, Tensor? running_mean, Tensor? running_var, bool training, float momentum, float eps, bool cudnn_enabled) -> Tensor -@Namespace("at") public static native @ByVal Tensor batch_norm(@Const @ByRef Tensor input, @Const @ByRef TensorOptional weight, @Const @ByRef TensorOptional bias, @Const @ByRef TensorOptional running_mean, @Const @ByRef TensorOptional running_var, @Cast("bool") boolean training, double momentum, double eps, @Cast("bool") boolean cudnn_enabled); +// aten::cholesky.out(Tensor self, bool upper=False, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor cholesky_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Cast("bool") boolean upper/*=false*/); +@Namespace("at") public static native @ByRef Tensor cholesky_out(@ByRef Tensor out, @Const @ByRef Tensor self); +// aten::cholesky.out(Tensor self, bool upper=False, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor cholesky_outf(@Const @ByRef Tensor self, @Cast("bool") boolean upper, @ByRef Tensor out); +// aten::cholesky(Tensor self, bool upper=False) -> Tensor +@Namespace("at") public static native @ByVal Tensor cholesky(@Const @ByRef Tensor self, @Cast("bool") boolean upper/*=false*/); +@Namespace("at") public static native @ByVal Tensor cholesky(@Const @ByRef Tensor self); -// Parsed from ATen/ops/batch_norm_backward_elemt.h + +// Parsed from ATen/ops/cholesky_inverse.h // #pragma once @@ -36901,21 +22341,23 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include +// #include -// aten::batch_norm_backward_elemt(Tensor grad_out, Tensor input, Tensor mean, Tensor invstd, Tensor? weight, Tensor mean_dy, Tensor mean_dy_xmu, Tensor count) -> Tensor -@Namespace("at") public static native @ByVal Tensor batch_norm_backward_elemt(@Const @ByRef Tensor grad_out, @Const @ByRef Tensor input, @Const @ByRef Tensor mean, @Const @ByRef Tensor invstd, @Const @ByRef TensorOptional weight, @Const @ByRef Tensor mean_dy, @Const @ByRef Tensor mean_dy_xmu, @Const @ByRef Tensor count); +// aten::cholesky_inverse(Tensor self, bool upper=False) -> Tensor +@Namespace("at") public static native @ByVal Tensor cholesky_inverse(@Const @ByRef Tensor self, @Cast("bool") boolean upper/*=false*/); +@Namespace("at") public static native @ByVal Tensor cholesky_inverse(@Const @ByRef Tensor self); -// aten::batch_norm_backward_elemt.out(Tensor grad_out, Tensor input, Tensor mean, Tensor invstd, Tensor? weight, Tensor mean_dy, Tensor mean_dy_xmu, Tensor count, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor batch_norm_backward_elemt_out(@ByRef Tensor out, @Const @ByRef Tensor grad_out, @Const @ByRef Tensor input, @Const @ByRef Tensor mean, @Const @ByRef Tensor invstd, @Const @ByRef TensorOptional weight, @Const @ByRef Tensor mean_dy, @Const @ByRef Tensor mean_dy_xmu, @Const @ByRef Tensor count); -// aten::batch_norm_backward_elemt.out(Tensor grad_out, Tensor input, Tensor mean, Tensor invstd, Tensor? weight, Tensor mean_dy, Tensor mean_dy_xmu, Tensor count, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor batch_norm_backward_elemt_outf(@Const @ByRef Tensor grad_out, @Const @ByRef Tensor input, @Const @ByRef Tensor mean, @Const @ByRef Tensor invstd, @Const @ByRef TensorOptional weight, @Const @ByRef Tensor mean_dy, @Const @ByRef Tensor mean_dy_xmu, @Const @ByRef Tensor count, @ByRef Tensor out); +// aten::cholesky_inverse.out(Tensor self, bool upper=False, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor cholesky_inverse_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Cast("bool") boolean upper/*=false*/); +@Namespace("at") public static native @ByRef Tensor cholesky_inverse_out(@ByRef Tensor out, @Const @ByRef Tensor self); +// aten::cholesky_inverse.out(Tensor self, bool upper=False, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor cholesky_inverse_outf(@Const @ByRef Tensor self, @Cast("bool") boolean upper, @ByRef Tensor out); -// Parsed from ATen/ops/batch_norm_backward_reduce.h +// Parsed from ATen/ops/cholesky_solve.h // #pragma once @@ -36936,21 +22378,23 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include +// #include -// aten::batch_norm_backward_reduce(Tensor grad_out, Tensor input, Tensor mean, Tensor invstd, Tensor? weight, bool input_g, bool weight_g, bool bias_g) -> (Tensor, Tensor, Tensor, Tensor) -@Namespace("at") public static native @ByVal TensorTensorTensorTensorTuple batch_norm_backward_reduce(@Const @ByRef Tensor grad_out, @Const @ByRef Tensor input, @Const @ByRef Tensor mean, @Const @ByRef Tensor invstd, @Const @ByRef TensorOptional weight, @Cast("bool") boolean input_g, @Cast("bool") boolean weight_g, @Cast("bool") boolean bias_g); +// aten::cholesky_solve.out(Tensor self, Tensor input2, bool upper=False, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor cholesky_solve_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor input2, @Cast("bool") boolean upper/*=false*/); +@Namespace("at") public static native @ByRef Tensor cholesky_solve_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor input2); +// aten::cholesky_solve.out(Tensor self, Tensor input2, bool upper=False, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor cholesky_solve_outf(@Const @ByRef Tensor self, @Const @ByRef Tensor input2, @Cast("bool") boolean upper, @ByRef Tensor out); -// aten::batch_norm_backward_reduce.out(Tensor grad_out, Tensor input, Tensor mean, Tensor invstd, Tensor? weight, bool input_g, bool weight_g, bool bias_g, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2, Tensor(d!) out3) -> (Tensor(a!), Tensor(b!), Tensor(c!), Tensor(d!)) -@Namespace("at") public static native @ByVal @Cast("std::tuple*") PointerPointer batch_norm_backward_reduce_out(@ByRef Tensor out0, @ByRef Tensor out1, @ByRef Tensor out2, @ByRef Tensor out3, @Const @ByRef Tensor grad_out, @Const @ByRef Tensor input, @Const @ByRef Tensor mean, @Const @ByRef Tensor invstd, @Const @ByRef TensorOptional weight, @Cast("bool") boolean input_g, @Cast("bool") boolean weight_g, @Cast("bool") boolean bias_g); -// aten::batch_norm_backward_reduce.out(Tensor grad_out, Tensor input, Tensor mean, Tensor invstd, Tensor? weight, bool input_g, bool weight_g, bool bias_g, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2, Tensor(d!) out3) -> (Tensor(a!), Tensor(b!), Tensor(c!), Tensor(d!)) -@Namespace("at") public static native @ByVal @Cast("std::tuple*") PointerPointer batch_norm_backward_reduce_outf(@Const @ByRef Tensor grad_out, @Const @ByRef Tensor input, @Const @ByRef Tensor mean, @Const @ByRef Tensor invstd, @Const @ByRef TensorOptional weight, @Cast("bool") boolean input_g, @Cast("bool") boolean weight_g, @Cast("bool") boolean bias_g, @ByRef Tensor out0, @ByRef Tensor out1, @ByRef Tensor out2, @ByRef Tensor out3); +// aten::cholesky_solve(Tensor self, Tensor input2, bool upper=False) -> Tensor +@Namespace("at") public static native @ByVal Tensor cholesky_solve(@Const @ByRef Tensor self, @Const @ByRef Tensor input2, @Cast("bool") boolean upper/*=false*/); +@Namespace("at") public static native @ByVal Tensor cholesky_solve(@Const @ByRef Tensor self, @Const @ByRef Tensor input2); -// Parsed from ATen/ops/batch_norm_elemt.h +// Parsed from ATen/ops/choose_qparams_optimized.h // #pragma once @@ -36971,21 +22415,16 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include - +// #include -// aten::batch_norm_elemt(Tensor input, Tensor? weight, Tensor? bias, Tensor mean, Tensor invstd, float eps) -> Tensor -@Namespace("at") public static native @ByVal Tensor batch_norm_elemt(@Const @ByRef Tensor input, @Const @ByRef TensorOptional weight, @Const @ByRef TensorOptional bias, @Const @ByRef Tensor mean, @Const @ByRef Tensor invstd, double eps); -// aten::batch_norm_elemt.out(Tensor input, Tensor? weight, Tensor? bias, Tensor mean, Tensor invstd, float eps, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor batch_norm_elemt_out(@ByRef Tensor out, @Const @ByRef Tensor input, @Const @ByRef TensorOptional weight, @Const @ByRef TensorOptional bias, @Const @ByRef Tensor mean, @Const @ByRef Tensor invstd, double eps); -// aten::batch_norm_elemt.out(Tensor input, Tensor? weight, Tensor? bias, Tensor mean, Tensor invstd, float eps, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor batch_norm_elemt_outf(@Const @ByRef Tensor input, @Const @ByRef TensorOptional weight, @Const @ByRef TensorOptional bias, @Const @ByRef Tensor mean, @Const @ByRef Tensor invstd, double eps, @ByRef Tensor out); +// aten::choose_qparams_optimized(Tensor input, int numel, int n_bins, float ratio, int bit_width) -> (Tensor, Tensor) +@Namespace("at") public static native @ByVal T_TensorTensor_T choose_qparams_optimized(@Const @ByRef Tensor input, @Cast("int64_t") long numel, @Cast("int64_t") long n_bins, double ratio, @Cast("int64_t") long bit_width); -// Parsed from ATen/ops/batch_norm_gather_stats.h +// Parsed from ATen/ops/chunk.h // #pragma once @@ -37006,21 +22445,17 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include - +// #include -// aten::batch_norm_gather_stats(Tensor input, Tensor mean, Tensor invstd, Tensor? running_mean, Tensor? running_var, float momentum, float eps, int count) -> (Tensor, Tensor) -@Namespace("at") public static native @ByVal TensorTensorTuple batch_norm_gather_stats(@Const @ByRef Tensor input, @Const @ByRef Tensor mean, @Const @ByRef Tensor invstd, @Const @ByRef TensorOptional running_mean, @Const @ByRef TensorOptional running_var, double momentum, double eps, @Cast("int64_t") long count); -// aten::batch_norm_gather_stats.out(Tensor input, Tensor mean, Tensor invstd, Tensor? running_mean, Tensor? running_var, float momentum, float eps, int count, *, Tensor(a!) out0, Tensor(b!) out1) -> (Tensor(a!), Tensor(b!)) -@Namespace("at") public static native @ByVal @Cast("std::tuple*") PointerPointer batch_norm_gather_stats_out(@ByRef Tensor out0, @ByRef Tensor out1, @Const @ByRef Tensor input, @Const @ByRef Tensor mean, @Const @ByRef Tensor invstd, @Const @ByRef TensorOptional running_mean, @Const @ByRef TensorOptional running_var, double momentum, double eps, @Cast("int64_t") long count); -// aten::batch_norm_gather_stats.out(Tensor input, Tensor mean, Tensor invstd, Tensor? running_mean, Tensor? running_var, float momentum, float eps, int count, *, Tensor(a!) out0, Tensor(b!) out1) -> (Tensor(a!), Tensor(b!)) -@Namespace("at") public static native @ByVal @Cast("std::tuple*") PointerPointer batch_norm_gather_stats_outf(@Const @ByRef Tensor input, @Const @ByRef Tensor mean, @Const @ByRef Tensor invstd, @Const @ByRef TensorOptional running_mean, @Const @ByRef TensorOptional running_var, double momentum, double eps, @Cast("int64_t") long count, @ByRef Tensor out0, @ByRef Tensor out1); +// aten::chunk(Tensor(a -> *) self, int chunks, int dim=0) -> Tensor(a)[] +@Namespace("at") public static native @Cast({"", "std::vector"}) @StdMove TensorVector chunk(@Const @ByRef Tensor self, @Cast("int64_t") long chunks, @Cast("int64_t") long dim/*=0*/); +@Namespace("at") public static native @Cast({"", "std::vector"}) @StdMove TensorVector chunk(@Const @ByRef Tensor self, @Cast("int64_t") long chunks); -// Parsed from ATen/ops/batch_norm_gather_stats_with_counts.h +// Parsed from ATen/ops/clamp.h // #pragma once @@ -37041,21 +22476,41 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include +// #include -// aten::batch_norm_gather_stats_with_counts(Tensor input, Tensor mean, Tensor invstd, Tensor? running_mean, Tensor? running_var, float momentum, float eps, Tensor counts) -> (Tensor, Tensor) -@Namespace("at") public static native @ByVal TensorTensorTuple batch_norm_gather_stats_with_counts(@Const @ByRef Tensor input, @Const @ByRef Tensor mean, @Const @ByRef Tensor invstd, @Const @ByRef TensorOptional running_mean, @Const @ByRef TensorOptional running_var, double momentum, double eps, @Const @ByRef Tensor counts); +// aten::clamp(Tensor self, Scalar? min=None, Scalar? max=None) -> Tensor +@Namespace("at") public static native @ByVal Tensor clamp(@Const @ByRef Tensor self, @Const @ByRef ScalarOptional min, @Const @ByRef(nullValue = "c10::optional(c10::nullopt)") ScalarOptional max); +@Namespace("at") public static native @ByVal Tensor clamp(@Const @ByRef Tensor self, @Const @ByRef ScalarOptional min); + +// aten::clamp.Tensor(Tensor self, Tensor? min=None, Tensor? max=None) -> Tensor +@Namespace("at") public static native @ByVal Tensor clamp(@Const @ByRef Tensor self, @Const @ByRef(nullValue = "c10::optional{}") TensorOptional min, @Const @ByRef(nullValue = "c10::optional{}") TensorOptional max); +@Namespace("at") public static native @ByVal Tensor clamp(@Const @ByRef Tensor self); + +// aten::clamp_(Tensor(a!) self, Scalar? min=None, Scalar? max=None) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor clamp_(@ByRef Tensor self, @Const @ByRef ScalarOptional min, @Const @ByRef(nullValue = "c10::optional(c10::nullopt)") ScalarOptional max); +@Namespace("at") public static native @ByRef Tensor clamp_(@ByRef Tensor self, @Const @ByRef ScalarOptional min); + +// aten::clamp_.Tensor(Tensor(a!) self, Tensor? min=None, Tensor? max=None) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor clamp_(@ByRef Tensor self, @Const @ByRef(nullValue = "c10::optional{}") TensorOptional min, @Const @ByRef(nullValue = "c10::optional{}") TensorOptional max); +@Namespace("at") public static native @ByRef Tensor clamp_(@ByRef Tensor self); + +// aten::clamp.out(Tensor self, Scalar? min=None, Scalar? max=None, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor clamp_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef ScalarOptional min, @Const @ByRef(nullValue = "c10::optional(c10::nullopt)") ScalarOptional max); +@Namespace("at") public static native @ByRef Tensor clamp_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef ScalarOptional min); +// aten::clamp.out(Tensor self, Scalar? min=None, Scalar? max=None, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor clamp_outf(@Const @ByRef Tensor self, @Const @ByRef ScalarOptional min, @Const @ByRef ScalarOptional max, @ByRef Tensor out); -// aten::batch_norm_gather_stats_with_counts.out(Tensor input, Tensor mean, Tensor invstd, Tensor? running_mean, Tensor? running_var, float momentum, float eps, Tensor counts, *, Tensor(a!) out0, Tensor(b!) out1) -> (Tensor(a!), Tensor(b!)) -@Namespace("at") public static native @ByVal @Cast("std::tuple*") PointerPointer batch_norm_gather_stats_with_counts_out(@ByRef Tensor out0, @ByRef Tensor out1, @Const @ByRef Tensor input, @Const @ByRef Tensor mean, @Const @ByRef Tensor invstd, @Const @ByRef TensorOptional running_mean, @Const @ByRef TensorOptional running_var, double momentum, double eps, @Const @ByRef Tensor counts); -// aten::batch_norm_gather_stats_with_counts.out(Tensor input, Tensor mean, Tensor invstd, Tensor? running_mean, Tensor? running_var, float momentum, float eps, Tensor counts, *, Tensor(a!) out0, Tensor(b!) out1) -> (Tensor(a!), Tensor(b!)) -@Namespace("at") public static native @ByVal @Cast("std::tuple*") PointerPointer batch_norm_gather_stats_with_counts_outf(@Const @ByRef Tensor input, @Const @ByRef Tensor mean, @Const @ByRef Tensor invstd, @Const @ByRef TensorOptional running_mean, @Const @ByRef TensorOptional running_var, double momentum, double eps, @Const @ByRef Tensor counts, @ByRef Tensor out0, @ByRef Tensor out1); +// aten::clamp.Tensor_out(Tensor self, Tensor? min=None, Tensor? max=None, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor clamp_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef(nullValue = "c10::optional{}") TensorOptional min, @Const @ByRef(nullValue = "c10::optional{}") TensorOptional max); +@Namespace("at") public static native @ByRef Tensor clamp_out(@ByRef Tensor out, @Const @ByRef Tensor self); +// aten::clamp.Tensor_out(Tensor self, Tensor? min=None, Tensor? max=None, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor clamp_outf(@Const @ByRef Tensor self, @Const @ByRef TensorOptional min, @Const @ByRef TensorOptional max, @ByRef Tensor out); -// Parsed from ATen/ops/batch_norm_stats.h +// Parsed from ATen/ops/clamp_max.h // #pragma once @@ -37076,21 +22531,35 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include +// #include -// aten::batch_norm_stats(Tensor input, float eps) -> (Tensor, Tensor) -@Namespace("at") public static native @ByVal TensorTensorTuple batch_norm_stats(@Const @ByRef Tensor input, double eps); +// aten::clamp_max(Tensor self, Scalar max) -> Tensor +@Namespace("at") public static native @ByVal Tensor clamp_max(@Const @ByRef Tensor self, @Const @ByRef Scalar max); -// aten::batch_norm_stats.out(Tensor input, float eps, *, Tensor(a!) out0, Tensor(b!) out1) -> (Tensor(a!), Tensor(b!)) -@Namespace("at") public static native @ByVal @Cast("std::tuple*") PointerPointer batch_norm_stats_out(@ByRef Tensor out0, @ByRef Tensor out1, @Const @ByRef Tensor input, double eps); -// aten::batch_norm_stats.out(Tensor input, float eps, *, Tensor(a!) out0, Tensor(b!) out1) -> (Tensor(a!), Tensor(b!)) -@Namespace("at") public static native @ByVal @Cast("std::tuple*") PointerPointer batch_norm_stats_outf(@Const @ByRef Tensor input, double eps, @ByRef Tensor out0, @ByRef Tensor out1); +// aten::clamp_max.Tensor(Tensor self, Tensor max) -> Tensor +@Namespace("at") public static native @ByVal Tensor clamp_max(@Const @ByRef Tensor self, @Const @ByRef Tensor max); + +// aten::clamp_max_(Tensor(a!) self, Scalar max) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor clamp_max_(@ByRef Tensor self, @Const @ByRef Scalar max); +// aten::clamp_max_.Tensor(Tensor(a!) self, Tensor max) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor clamp_max_(@ByRef Tensor self, @Const @ByRef Tensor max); +// aten::clamp_max.out(Tensor self, Scalar max, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor clamp_max_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Scalar max); +// aten::clamp_max.out(Tensor self, Scalar max, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor clamp_max_outf(@Const @ByRef Tensor self, @Const @ByRef Scalar max, @ByRef Tensor out); +// aten::clamp_max.Tensor_out(Tensor self, Tensor max, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor clamp_max_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor max); +// aten::clamp_max.Tensor_out(Tensor self, Tensor max, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor clamp_max_outf(@Const @ByRef Tensor self, @Const @ByRef Tensor max, @ByRef Tensor out); -// Parsed from ATen/ops/batch_norm_update_stats.h + + + +// Parsed from ATen/ops/clamp_min.h // #pragma once @@ -37111,21 +22580,35 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include +// #include -// aten::batch_norm_update_stats(Tensor input, Tensor? running_mean, Tensor? running_var, float momentum) -> (Tensor, Tensor) -@Namespace("at") public static native @ByVal TensorTensorTuple batch_norm_update_stats(@Const @ByRef Tensor input, @Const @ByRef TensorOptional running_mean, @Const @ByRef TensorOptional running_var, double momentum); +// aten::clamp_min(Tensor self, Scalar min) -> Tensor +@Namespace("at") public static native @ByVal Tensor clamp_min(@Const @ByRef Tensor self, @Const @ByRef Scalar min); -// aten::batch_norm_update_stats.out(Tensor input, Tensor? running_mean, Tensor? running_var, float momentum, *, Tensor(a!) out0, Tensor(b!) out1) -> (Tensor(a!), Tensor(b!)) -@Namespace("at") public static native @ByVal @Cast("std::tuple*") PointerPointer batch_norm_update_stats_out(@ByRef Tensor out0, @ByRef Tensor out1, @Const @ByRef Tensor input, @Const @ByRef TensorOptional running_mean, @Const @ByRef TensorOptional running_var, double momentum); -// aten::batch_norm_update_stats.out(Tensor input, Tensor? running_mean, Tensor? running_var, float momentum, *, Tensor(a!) out0, Tensor(b!) out1) -> (Tensor(a!), Tensor(b!)) -@Namespace("at") public static native @ByVal @Cast("std::tuple*") PointerPointer batch_norm_update_stats_outf(@Const @ByRef Tensor input, @Const @ByRef TensorOptional running_mean, @Const @ByRef TensorOptional running_var, double momentum, @ByRef Tensor out0, @ByRef Tensor out1); +// aten::clamp_min.Tensor(Tensor self, Tensor min) -> Tensor +@Namespace("at") public static native @ByVal Tensor clamp_min(@Const @ByRef Tensor self, @Const @ByRef Tensor min); + +// aten::clamp_min_(Tensor(a!) self, Scalar min) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor clamp_min_(@ByRef Tensor self, @Const @ByRef Scalar min); +// aten::clamp_min_.Tensor(Tensor(a!) self, Tensor min) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor clamp_min_(@ByRef Tensor self, @Const @ByRef Tensor min); + +// aten::clamp_min.out(Tensor self, Scalar min, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor clamp_min_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Scalar min); +// aten::clamp_min.out(Tensor self, Scalar min, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor clamp_min_outf(@Const @ByRef Tensor self, @Const @ByRef Scalar min, @ByRef Tensor out); + +// aten::clamp_min.Tensor_out(Tensor self, Tensor min, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor clamp_min_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor min); +// aten::clamp_min.Tensor_out(Tensor self, Tensor min, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor clamp_min_outf(@Const @ByRef Tensor self, @Const @ByRef Tensor min, @ByRef Tensor out); -// Parsed from ATen/ops/bernoulli.h + +// Parsed from ATen/ops/clip.h // #pragma once @@ -37146,40 +22629,41 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include +// #include -// aten::bernoulli(Tensor self, *, Generator? generator=None) -> Tensor -@Namespace("at") public static native @ByVal Tensor bernoulli(@Const @ByRef Tensor self, @ByVal(nullValue = "c10::optional(c10::nullopt)") GeneratorOptional generator); -@Namespace("at") public static native @ByVal Tensor bernoulli(@Const @ByRef Tensor self); +// aten::clip(Tensor self, Scalar? min=None, Scalar? max=None) -> Tensor +@Namespace("at") public static native @ByVal Tensor clip(@Const @ByRef Tensor self, @Const @ByRef ScalarOptional min, @Const @ByRef(nullValue = "c10::optional(c10::nullopt)") ScalarOptional max); +@Namespace("at") public static native @ByVal Tensor clip(@Const @ByRef Tensor self, @Const @ByRef ScalarOptional min); -// aten::bernoulli.out(Tensor self, *, Generator? generator=None, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor bernoulli_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal(nullValue = "c10::optional(c10::nullopt)") GeneratorOptional generator); -// aten::bernoulli.out(Tensor self, *, Generator? generator=None, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor bernoulli_outf(@Const @ByRef Tensor self, @ByVal GeneratorOptional generator, @ByRef Tensor out); +// aten::clip.Tensor(Tensor self, Tensor? min=None, Tensor? max=None) -> Tensor +@Namespace("at") public static native @ByVal Tensor clip(@Const @ByRef Tensor self, @Const @ByRef(nullValue = "c10::optional{}") TensorOptional min, @Const @ByRef(nullValue = "c10::optional{}") TensorOptional max); +@Namespace("at") public static native @ByVal Tensor clip(@Const @ByRef Tensor self); -// aten::bernoulli.p(Tensor self, float p, *, Generator? generator=None) -> Tensor -@Namespace("at") public static native @ByVal Tensor bernoulli(@Const @ByRef Tensor self, double p, @ByVal(nullValue = "c10::optional(c10::nullopt)") GeneratorOptional generator); -@Namespace("at") public static native @ByVal Tensor bernoulli(@Const @ByRef Tensor self, double p); +// aten::clip_(Tensor(a!) self, Scalar? min=None, Scalar? max=None) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor clip_(@ByRef Tensor self, @Const @ByRef ScalarOptional min, @Const @ByRef(nullValue = "c10::optional(c10::nullopt)") ScalarOptional max); +@Namespace("at") public static native @ByRef Tensor clip_(@ByRef Tensor self, @Const @ByRef ScalarOptional min); -// aten::bernoulli.Tensor_out(Tensor self, Tensor p, *, Generator? generator=None, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor bernoulli_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor p, @ByVal(nullValue = "c10::optional(c10::nullopt)") GeneratorOptional generator); -// aten::bernoulli.Tensor_out(Tensor self, Tensor p, *, Generator? generator=None, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor bernoulli_outf(@Const @ByRef Tensor self, @Const @ByRef Tensor p, @ByVal GeneratorOptional generator, @ByRef Tensor out); +// aten::clip_.Tensor(Tensor(a!) self, Tensor? min=None, Tensor? max=None) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor clip_(@ByRef Tensor self, @Const @ByRef(nullValue = "c10::optional{}") TensorOptional min, @Const @ByRef(nullValue = "c10::optional{}") TensorOptional max); +@Namespace("at") public static native @ByRef Tensor clip_(@ByRef Tensor self); -// aten::bernoulli.Tensor(Tensor self, Tensor p, *, Generator? generator=None) -> Tensor -@Namespace("at") public static native @ByVal Tensor bernoulli(@Const @ByRef Tensor self, @Const @ByRef Tensor p, @ByVal(nullValue = "c10::optional(c10::nullopt)") GeneratorOptional generator); -@Namespace("at") public static native @ByVal Tensor bernoulli(@Const @ByRef Tensor self, @Const @ByRef Tensor p); +// aten::clip.out(Tensor self, Scalar? min=None, Scalar? max=None, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor clip_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef ScalarOptional min, @Const @ByRef(nullValue = "c10::optional(c10::nullopt)") ScalarOptional max); +@Namespace("at") public static native @ByRef Tensor clip_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef ScalarOptional min); +// aten::clip.out(Tensor self, Scalar? min=None, Scalar? max=None, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor clip_outf(@Const @ByRef Tensor self, @Const @ByRef ScalarOptional min, @Const @ByRef ScalarOptional max, @ByRef Tensor out); -// aten::bernoulli.float_out(Tensor self, float p=0.5, *, Generator? generator=None, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor bernoulli_out(@ByRef Tensor out, @Const @ByRef Tensor self, double p/*=0.5*/, @ByVal(nullValue = "c10::optional(c10::nullopt)") GeneratorOptional generator); -// aten::bernoulli.float_out(Tensor self, float p=0.5, *, Generator? generator=None, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor bernoulli_outf(@Const @ByRef Tensor self, double p, @ByVal GeneratorOptional generator, @ByRef Tensor out); +// aten::clip.Tensor_out(Tensor self, Tensor? min=None, Tensor? max=None, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor clip_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef(nullValue = "c10::optional{}") TensorOptional min, @Const @ByRef(nullValue = "c10::optional{}") TensorOptional max); +@Namespace("at") public static native @ByRef Tensor clip_out(@ByRef Tensor out, @Const @ByRef Tensor self); +// aten::clip.Tensor_out(Tensor self, Tensor? min=None, Tensor? max=None, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor clip_outf(@Const @ByRef Tensor self, @Const @ByRef TensorOptional min, @Const @ByRef TensorOptional max, @ByRef Tensor out); -// Parsed from ATen/ops/bilinear.h +// Parsed from ATen/ops/clone.h // #pragma once @@ -37200,17 +22684,23 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include +// #include -// aten::bilinear(Tensor input1, Tensor input2, Tensor weight, Tensor? bias=None) -> Tensor -@Namespace("at") public static native @ByVal Tensor bilinear(@Const @ByRef Tensor input1, @Const @ByRef Tensor input2, @Const @ByRef Tensor weight, @Const @ByRef(nullValue = "c10::optional{}") TensorOptional bias); -@Namespace("at") public static native @ByVal Tensor bilinear(@Const @ByRef Tensor input1, @Const @ByRef Tensor input2, @Const @ByRef Tensor weight); +// aten::clone(Tensor self, *, MemoryFormat? memory_format=None) -> Tensor +@Namespace("at") public static native @ByVal Tensor clone(@Const @ByRef Tensor self, @ByVal(nullValue = "c10::optional(c10::nullopt)") MemoryFormatOptional memory_format); +@Namespace("at") public static native @ByVal Tensor clone(@Const @ByRef Tensor self); + +// aten::clone.out(Tensor self, *, MemoryFormat? memory_format=None, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor clone_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal(nullValue = "c10::optional(c10::nullopt)") MemoryFormatOptional memory_format); +@Namespace("at") public static native @ByRef Tensor clone_out(@ByRef Tensor out, @Const @ByRef Tensor self); +// aten::clone.out(Tensor self, *, MemoryFormat? memory_format=None, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor clone_outf(@Const @ByRef Tensor self, @ByVal MemoryFormatOptional memory_format, @ByRef Tensor out); -// Parsed from ATen/ops/binary_cross_entropy.h +// Parsed from ATen/ops/coalesce.h // #pragma once @@ -37231,23 +22721,14 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include - +// #include -// aten::binary_cross_entropy(Tensor self, Tensor target, Tensor? weight=None, int reduction=Mean) -> Tensor -@Namespace("at") public static native @ByVal Tensor binary_cross_entropy(@Const @ByRef Tensor self, @Const @ByRef Tensor target, @Const @ByRef(nullValue = "c10::optional{}") TensorOptional weight, @Cast("int64_t") long reduction/*=at::Reduction::Mean*/); -@Namespace("at") public static native @ByVal Tensor binary_cross_entropy(@Const @ByRef Tensor self, @Const @ByRef Tensor target); -// aten::binary_cross_entropy.out(Tensor self, Tensor target, Tensor? weight=None, int reduction=Mean, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor binary_cross_entropy_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor target, @Const @ByRef(nullValue = "c10::optional{}") TensorOptional weight, @Cast("int64_t") long reduction/*=at::Reduction::Mean*/); -@Namespace("at") public static native @ByRef Tensor binary_cross_entropy_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor target); -// aten::binary_cross_entropy.out(Tensor self, Tensor target, Tensor? weight=None, int reduction=Mean, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor binary_cross_entropy_outf(@Const @ByRef Tensor self, @Const @ByRef Tensor target, @Const @ByRef TensorOptional weight, @Cast("int64_t") long reduction, @ByRef Tensor out); -// Parsed from ATen/ops/binary_cross_entropy_backward.h +// Parsed from ATen/ops/col2im.h // #pragma once @@ -37268,23 +22749,43 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include +// #include -// aten::binary_cross_entropy_backward(Tensor grad_output, Tensor self, Tensor target, Tensor? weight=None, int reduction=Mean) -> Tensor -@Namespace("at") public static native @ByVal Tensor binary_cross_entropy_backward(@Const @ByRef Tensor grad_output, @Const @ByRef Tensor self, @Const @ByRef Tensor target, @Const @ByRef(nullValue = "c10::optional{}") TensorOptional weight, @Cast("int64_t") long reduction/*=at::Reduction::Mean*/); -@Namespace("at") public static native @ByVal Tensor binary_cross_entropy_backward(@Const @ByRef Tensor grad_output, @Const @ByRef Tensor self, @Const @ByRef Tensor target); +// aten::col2im.out(Tensor self, SymInt[2] output_size, int[2] kernel_size, int[2] dilation, int[2] padding, int[2] stride, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor col2im_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal LongArrayRef output_size, @ByVal LongArrayRef kernel_size, @ByVal LongArrayRef dilation, @ByVal LongArrayRef padding, @ByVal LongArrayRef stride); +@Namespace("at") public static native @ByRef Tensor col2im_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] output_size, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] kernel_size, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dilation, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] padding, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... stride); -// aten::binary_cross_entropy_backward.grad_input(Tensor grad_output, Tensor self, Tensor target, Tensor? weight=None, int reduction=Mean, *, Tensor(a!) grad_input) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor binary_cross_entropy_backward_out(@ByRef Tensor grad_input, @Const @ByRef Tensor grad_output, @Const @ByRef Tensor self, @Const @ByRef Tensor target, @Const @ByRef(nullValue = "c10::optional{}") TensorOptional weight, @Cast("int64_t") long reduction/*=at::Reduction::Mean*/); -@Namespace("at") public static native @ByRef Tensor binary_cross_entropy_backward_out(@ByRef Tensor grad_input, @Const @ByRef Tensor grad_output, @Const @ByRef Tensor self, @Const @ByRef Tensor target); -// aten::binary_cross_entropy_backward.grad_input(Tensor grad_output, Tensor self, Tensor target, Tensor? weight=None, int reduction=Mean, *, Tensor(a!) grad_input) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor binary_cross_entropy_backward_outf(@Const @ByRef Tensor grad_output, @Const @ByRef Tensor self, @Const @ByRef Tensor target, @Const @ByRef TensorOptional weight, @Cast("int64_t") long reduction, @ByRef Tensor grad_input); + +// aten::col2im.out(Tensor self, SymInt[2] output_size, int[2] kernel_size, int[2] dilation, int[2] padding, int[2] stride, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor col2im_outf(@Const @ByRef Tensor self, @ByVal LongArrayRef output_size, @ByVal LongArrayRef kernel_size, @ByVal LongArrayRef dilation, @ByVal LongArrayRef padding, @ByVal LongArrayRef stride, @ByRef Tensor out); +@Namespace("at") public static native @ByRef Tensor col2im_outf(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] output_size, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] kernel_size, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dilation, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] padding, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] stride, @ByRef Tensor out); + + +// aten::col2im.out(Tensor self, SymInt[2] output_size, int[2] kernel_size, int[2] dilation, int[2] padding, int[2] stride, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor col2im_symint_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal SymIntArrayRef output_size, @ByVal LongArrayRef kernel_size, @ByVal LongArrayRef dilation, @ByVal LongArrayRef padding, @ByVal LongArrayRef stride); +@Namespace("at") public static native @ByRef Tensor col2im_symint_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal SymIntArrayRef output_size, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] kernel_size, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dilation, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] padding, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... stride); + + +// aten::col2im.out(Tensor self, SymInt[2] output_size, int[2] kernel_size, int[2] dilation, int[2] padding, int[2] stride, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor col2im_symint_outf(@Const @ByRef Tensor self, @ByVal SymIntArrayRef output_size, @ByVal LongArrayRef kernel_size, @ByVal LongArrayRef dilation, @ByVal LongArrayRef padding, @ByVal LongArrayRef stride, @ByRef Tensor out); +@Namespace("at") public static native @ByRef Tensor col2im_symint_outf(@Const @ByRef Tensor self, @ByVal SymIntArrayRef output_size, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] kernel_size, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dilation, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] padding, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] stride, @ByRef Tensor out); + + +// aten::col2im(Tensor self, SymInt[2] output_size, int[2] kernel_size, int[2] dilation, int[2] padding, int[2] stride) -> Tensor +@Namespace("at") public static native @ByVal Tensor col2im(@Const @ByRef Tensor self, @ByVal LongArrayRef output_size, @ByVal LongArrayRef kernel_size, @ByVal LongArrayRef dilation, @ByVal LongArrayRef padding, @ByVal LongArrayRef stride); +@Namespace("at") public static native @ByVal Tensor col2im(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] output_size, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] kernel_size, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dilation, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] padding, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... stride); + + +// aten::col2im(Tensor self, SymInt[2] output_size, int[2] kernel_size, int[2] dilation, int[2] padding, int[2] stride) -> Tensor +@Namespace("at") public static native @ByVal Tensor col2im_symint(@Const @ByRef Tensor self, @ByVal SymIntArrayRef output_size, @ByVal LongArrayRef kernel_size, @ByVal LongArrayRef dilation, @ByVal LongArrayRef padding, @ByVal LongArrayRef stride); +@Namespace("at") public static native @ByVal Tensor col2im_symint(@Const @ByRef Tensor self, @ByVal SymIntArrayRef output_size, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] kernel_size, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dilation, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] padding, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... stride); -// Parsed from ATen/ops/binary_cross_entropy_with_logits.h + +// Parsed from ATen/ops/col_indices.h // #pragma once @@ -37305,23 +22806,14 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include - +// #include -// aten::binary_cross_entropy_with_logits(Tensor self, Tensor target, Tensor? weight=None, Tensor? pos_weight=None, int reduction=Mean) -> Tensor -@Namespace("at") public static native @ByVal Tensor binary_cross_entropy_with_logits(@Const @ByRef Tensor self, @Const @ByRef Tensor target, @Const @ByRef(nullValue = "c10::optional{}") TensorOptional weight, @Const @ByRef(nullValue = "c10::optional{}") TensorOptional pos_weight, @Cast("int64_t") long reduction/*=at::Reduction::Mean*/); -@Namespace("at") public static native @ByVal Tensor binary_cross_entropy_with_logits(@Const @ByRef Tensor self, @Const @ByRef Tensor target); -// aten::binary_cross_entropy_with_logits.out(Tensor self, Tensor target, Tensor? weight=None, Tensor? pos_weight=None, int reduction=Mean, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor binary_cross_entropy_with_logits_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor target, @Const @ByRef(nullValue = "c10::optional{}") TensorOptional weight, @Const @ByRef(nullValue = "c10::optional{}") TensorOptional pos_weight, @Cast("int64_t") long reduction/*=at::Reduction::Mean*/); -@Namespace("at") public static native @ByRef Tensor binary_cross_entropy_with_logits_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor target); -// aten::binary_cross_entropy_with_logits.out(Tensor self, Tensor target, Tensor? weight=None, Tensor? pos_weight=None, int reduction=Mean, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor binary_cross_entropy_with_logits_outf(@Const @ByRef Tensor self, @Const @ByRef Tensor target, @Const @ByRef TensorOptional weight, @Const @ByRef TensorOptional pos_weight, @Cast("int64_t") long reduction, @ByRef Tensor out); -// Parsed from ATen/ops/bincount.h +// Parsed from ATen/ops/col_indices_copy.h // #pragma once @@ -37342,23 +22834,21 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include +// #include -// aten::bincount(Tensor self, Tensor? weights=None, int minlength=0) -> Tensor -@Namespace("at") public static native @ByVal Tensor bincount(@Const @ByRef Tensor self, @Const @ByRef(nullValue = "c10::optional{}") TensorOptional weights, @Cast("int64_t") long minlength/*=0*/); -@Namespace("at") public static native @ByVal Tensor bincount(@Const @ByRef Tensor self); +// aten::col_indices_copy(Tensor self) -> Tensor +@Namespace("at") public static native @ByVal Tensor col_indices_copy(@Const @ByRef Tensor self); -// aten::bincount.out(Tensor self, Tensor? weights=None, int minlength=0, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor bincount_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef(nullValue = "c10::optional{}") TensorOptional weights, @Cast("int64_t") long minlength/*=0*/); -@Namespace("at") public static native @ByRef Tensor bincount_out(@ByRef Tensor out, @Const @ByRef Tensor self); -// aten::bincount.out(Tensor self, Tensor? weights=None, int minlength=0, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor bincount_outf(@Const @ByRef Tensor self, @Const @ByRef TensorOptional weights, @Cast("int64_t") long minlength, @ByRef Tensor out); +// aten::col_indices_copy.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor col_indices_copy_out(@ByRef Tensor out, @Const @ByRef Tensor self); +// aten::col_indices_copy.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor col_indices_copy_outf(@Const @ByRef Tensor self, @ByRef Tensor out); -// Parsed from ATen/ops/binomial.h +// Parsed from ATen/ops/column_stack.h // #pragma once @@ -37379,23 +22869,21 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include +// #include -// aten::binomial(Tensor count, Tensor prob, Generator? generator=None) -> Tensor -@Namespace("at") public static native @ByVal Tensor binomial(@Const @ByRef Tensor count, @Const @ByRef Tensor prob, @ByVal(nullValue = "c10::optional(c10::nullopt)") GeneratorOptional generator); -@Namespace("at") public static native @ByVal Tensor binomial(@Const @ByRef Tensor count, @Const @ByRef Tensor prob); +// aten::column_stack(Tensor[] tensors) -> Tensor +@Namespace("at") public static native @ByVal Tensor column_stack(@ByVal @Cast("at::TensorList*") TensorArrayRef tensors); -// aten::binomial.out(Tensor count, Tensor prob, Generator? generator=None, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor binomial_out(@ByRef Tensor out, @Const @ByRef Tensor count, @Const @ByRef Tensor prob, @ByVal(nullValue = "c10::optional(c10::nullopt)") GeneratorOptional generator); -@Namespace("at") public static native @ByRef Tensor binomial_out(@ByRef Tensor out, @Const @ByRef Tensor count, @Const @ByRef Tensor prob); -// aten::binomial.out(Tensor count, Tensor prob, Generator? generator=None, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor binomial_outf(@Const @ByRef Tensor count, @Const @ByRef Tensor prob, @ByVal GeneratorOptional generator, @ByRef Tensor out); +// aten::column_stack.out(Tensor[] tensors, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor column_stack_out(@ByRef Tensor out, @ByVal @Cast("at::TensorList*") TensorArrayRef tensors); +// aten::column_stack.out(Tensor[] tensors, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor column_stack_outf(@ByVal @Cast("at::TensorList*") TensorArrayRef tensors, @ByRef Tensor out); -// Parsed from ATen/ops/bitwise_and.h +// Parsed from ATen/ops/combinations.h // #pragma once @@ -37416,37 +22904,17 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include - - -// aten::bitwise_and.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor bitwise_and_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor other); -// aten::bitwise_and.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor bitwise_and_outf(@Const @ByRef Tensor self, @Const @ByRef Tensor other, @ByRef Tensor out); - -// aten::bitwise_and.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor bitwise_and_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Scalar other); -// aten::bitwise_and.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor bitwise_and_outf(@Const @ByRef Tensor self, @Const @ByRef Scalar other, @ByRef Tensor out); - -// aten::bitwise_and.Scalar(Tensor self, Scalar other) -> Tensor -@Namespace("at") public static native @ByVal Tensor bitwise_and(@Const @ByRef Tensor self, @Const @ByRef Scalar other); - -// aten::bitwise_and.Scalar_Tensor(Scalar self, Tensor other) -> Tensor -@Namespace("at") public static native @ByVal Tensor bitwise_and(@Const @ByRef Scalar self, @Const @ByRef Tensor other); +// #include -// aten::bitwise_and.Tensor(Tensor self, Tensor other) -> Tensor -@Namespace("at") public static native @ByVal Tensor bitwise_and(@Const @ByRef Tensor self, @Const @ByRef Tensor other); -// aten::bitwise_and.Scalar_Tensor_out(Scalar self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor bitwise_and_out(@ByRef Tensor out, @Const @ByRef Scalar self, @Const @ByRef Tensor other); -// aten::bitwise_and.Scalar_Tensor_out(Scalar self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor bitwise_and_outf(@Const @ByRef Scalar self, @Const @ByRef Tensor other, @ByRef Tensor out); +// aten::combinations(Tensor self, int r=2, bool with_replacement=False) -> Tensor +@Namespace("at") public static native @ByVal Tensor combinations(@Const @ByRef Tensor self, @Cast("int64_t") long r/*=2*/, @Cast("bool") boolean with_replacement/*=false*/); +@Namespace("at") public static native @ByVal Tensor combinations(@Const @ByRef Tensor self); -// Parsed from ATen/ops/bitwise_left_shift.h +// Parsed from ATen/ops/complex.h // #pragma once @@ -37467,37 +22935,21 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include - - -// aten::bitwise_left_shift.Tensor(Tensor self, Tensor other) -> Tensor -@Namespace("at") public static native @ByVal Tensor bitwise_left_shift(@Const @ByRef Tensor self, @Const @ByRef Tensor other); - -// aten::bitwise_left_shift.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor bitwise_left_shift_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor other); -// aten::bitwise_left_shift.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor bitwise_left_shift_outf(@Const @ByRef Tensor self, @Const @ByRef Tensor other, @ByRef Tensor out); +// #include -// aten::bitwise_left_shift.Tensor_Scalar(Tensor self, Scalar other) -> Tensor -@Namespace("at") public static native @ByVal Tensor bitwise_left_shift(@Const @ByRef Tensor self, @Const @ByRef Scalar other); -// aten::bitwise_left_shift.Tensor_Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor bitwise_left_shift_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Scalar other); -// aten::bitwise_left_shift.Tensor_Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor bitwise_left_shift_outf(@Const @ByRef Tensor self, @Const @ByRef Scalar other, @ByRef Tensor out); +// aten::complex(Tensor real, Tensor imag) -> Tensor -// aten::bitwise_left_shift.Scalar_Tensor(Scalar self, Tensor other) -> Tensor -@Namespace("at") public static native @ByVal Tensor bitwise_left_shift(@Const @ByRef Scalar self, @Const @ByRef Tensor other); -// aten::bitwise_left_shift.Scalar_Tensor_out(Scalar self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor bitwise_left_shift_out(@ByRef Tensor out, @Const @ByRef Scalar self, @Const @ByRef Tensor other); -// aten::bitwise_left_shift.Scalar_Tensor_out(Scalar self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor bitwise_left_shift_outf(@Const @ByRef Scalar self, @Const @ByRef Tensor other, @ByRef Tensor out); +// aten::complex.out(Tensor real, Tensor imag, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor complex_out(@ByRef Tensor out, @Const @ByRef Tensor real, @Const @ByRef Tensor imag); +// aten::complex.out(Tensor real, Tensor imag, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor complex_outf(@Const @ByRef Tensor real, @Const @ByRef Tensor imag, @ByRef Tensor out); -// Parsed from ATen/ops/bitwise_not.h +// Parsed from ATen/ops/concat.h // #pragma once @@ -37518,21 +22970,31 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include +// #include -// aten::bitwise_not(Tensor self) -> Tensor -@Namespace("at") public static native @ByVal Tensor bitwise_not(@Const @ByRef Tensor self); +// aten::concat(Tensor[] tensors, int dim=0) -> Tensor +@Namespace("at") public static native @ByVal Tensor concat(@ByVal @Cast("at::TensorList*") TensorArrayRef tensors, @Cast("int64_t") long dim/*=0*/); +@Namespace("at") public static native @ByVal Tensor concat(@ByVal @Cast("at::TensorList*") TensorArrayRef tensors); -// aten::bitwise_not.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor bitwise_not_out(@ByRef Tensor out, @Const @ByRef Tensor self); -// aten::bitwise_not.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor bitwise_not_outf(@Const @ByRef Tensor self, @ByRef Tensor out); +// aten::concat.out(Tensor[] tensors, int dim=0, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor concat_out(@ByRef Tensor out, @ByVal @Cast("at::TensorList*") TensorArrayRef tensors, @Cast("int64_t") long dim/*=0*/); +@Namespace("at") public static native @ByRef Tensor concat_out(@ByRef Tensor out, @ByVal @Cast("at::TensorList*") TensorArrayRef tensors); +// aten::concat.out(Tensor[] tensors, int dim=0, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor concat_outf(@ByVal @Cast("at::TensorList*") TensorArrayRef tensors, @Cast("int64_t") long dim, @ByRef Tensor out); + +// aten::concat.names(Tensor[] tensors, Dimname dim) -> Tensor +@Namespace("at") public static native @ByVal Tensor concat(@ByVal @Cast("at::TensorList*") TensorArrayRef tensors, @ByVal Dimname dim); +// aten::concat.names_out(Tensor[] tensors, Dimname dim, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor concat_out(@ByRef Tensor out, @ByVal @Cast("at::TensorList*") TensorArrayRef tensors, @ByVal Dimname dim); +// aten::concat.names_out(Tensor[] tensors, Dimname dim, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor concat_outf(@ByVal @Cast("at::TensorList*") TensorArrayRef tensors, @ByVal Dimname dim, @ByRef Tensor out); -// Parsed from ATen/ops/bitwise_or.h + +// Parsed from ATen/ops/concatenate.h // #pragma once @@ -37553,37 +23015,31 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include - - -// aten::bitwise_or.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor bitwise_or_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor other); -// aten::bitwise_or.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor bitwise_or_outf(@Const @ByRef Tensor self, @Const @ByRef Tensor other, @ByRef Tensor out); +// #include -// aten::bitwise_or.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor bitwise_or_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Scalar other); -// aten::bitwise_or.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor bitwise_or_outf(@Const @ByRef Tensor self, @Const @ByRef Scalar other, @ByRef Tensor out); -// aten::bitwise_or.Scalar(Tensor self, Scalar other) -> Tensor -@Namespace("at") public static native @ByVal Tensor bitwise_or(@Const @ByRef Tensor self, @Const @ByRef Scalar other); +// aten::concatenate(Tensor[] tensors, int dim=0) -> Tensor +@Namespace("at") public static native @ByVal Tensor concatenate(@ByVal @Cast("at::TensorList*") TensorArrayRef tensors, @Cast("int64_t") long dim/*=0*/); +@Namespace("at") public static native @ByVal Tensor concatenate(@ByVal @Cast("at::TensorList*") TensorArrayRef tensors); -// aten::bitwise_or.Scalar_Tensor(Scalar self, Tensor other) -> Tensor -@Namespace("at") public static native @ByVal Tensor bitwise_or(@Const @ByRef Scalar self, @Const @ByRef Tensor other); +// aten::concatenate.out(Tensor[] tensors, int dim=0, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor concatenate_out(@ByRef Tensor out, @ByVal @Cast("at::TensorList*") TensorArrayRef tensors, @Cast("int64_t") long dim/*=0*/); +@Namespace("at") public static native @ByRef Tensor concatenate_out(@ByRef Tensor out, @ByVal @Cast("at::TensorList*") TensorArrayRef tensors); +// aten::concatenate.out(Tensor[] tensors, int dim=0, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor concatenate_outf(@ByVal @Cast("at::TensorList*") TensorArrayRef tensors, @Cast("int64_t") long dim, @ByRef Tensor out); -// aten::bitwise_or.Tensor(Tensor self, Tensor other) -> Tensor -@Namespace("at") public static native @ByVal Tensor bitwise_or(@Const @ByRef Tensor self, @Const @ByRef Tensor other); +// aten::concatenate.names(Tensor[] tensors, Dimname dim) -> Tensor +@Namespace("at") public static native @ByVal Tensor concatenate(@ByVal @Cast("at::TensorList*") TensorArrayRef tensors, @ByVal Dimname dim); -// aten::bitwise_or.Scalar_Tensor_out(Scalar self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor bitwise_or_out(@ByRef Tensor out, @Const @ByRef Scalar self, @Const @ByRef Tensor other); -// aten::bitwise_or.Scalar_Tensor_out(Scalar self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor bitwise_or_outf(@Const @ByRef Scalar self, @Const @ByRef Tensor other, @ByRef Tensor out); +// aten::concatenate.names_out(Tensor[] tensors, Dimname dim, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor concatenate_out(@ByRef Tensor out, @ByVal @Cast("at::TensorList*") TensorArrayRef tensors, @ByVal Dimname dim); +// aten::concatenate.names_out(Tensor[] tensors, Dimname dim, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor concatenate_outf(@ByVal @Cast("at::TensorList*") TensorArrayRef tensors, @ByVal Dimname dim, @ByRef Tensor out); -// Parsed from ATen/ops/bitwise_right_shift.h +// Parsed from ATen/ops/conj.h // #pragma once @@ -37604,37 +23060,16 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include - - -// aten::bitwise_right_shift.Tensor(Tensor self, Tensor other) -> Tensor -@Namespace("at") public static native @ByVal Tensor bitwise_right_shift(@Const @ByRef Tensor self, @Const @ByRef Tensor other); - -// aten::bitwise_right_shift.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor bitwise_right_shift_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor other); -// aten::bitwise_right_shift.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor bitwise_right_shift_outf(@Const @ByRef Tensor self, @Const @ByRef Tensor other, @ByRef Tensor out); - -// aten::bitwise_right_shift.Tensor_Scalar(Tensor self, Scalar other) -> Tensor -@Namespace("at") public static native @ByVal Tensor bitwise_right_shift(@Const @ByRef Tensor self, @Const @ByRef Scalar other); - -// aten::bitwise_right_shift.Tensor_Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor bitwise_right_shift_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Scalar other); -// aten::bitwise_right_shift.Tensor_Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor bitwise_right_shift_outf(@Const @ByRef Tensor self, @Const @ByRef Scalar other, @ByRef Tensor out); +// #include -// aten::bitwise_right_shift.Scalar_Tensor(Scalar self, Tensor other) -> Tensor -@Namespace("at") public static native @ByVal Tensor bitwise_right_shift(@Const @ByRef Scalar self, @Const @ByRef Tensor other); -// aten::bitwise_right_shift.Scalar_Tensor_out(Scalar self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor bitwise_right_shift_out(@ByRef Tensor out, @Const @ByRef Scalar self, @Const @ByRef Tensor other); -// aten::bitwise_right_shift.Scalar_Tensor_out(Scalar self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor bitwise_right_shift_outf(@Const @ByRef Scalar self, @Const @ByRef Tensor other, @ByRef Tensor out); +// aten::conj(Tensor(a) self) -> Tensor(a) +@Namespace("at") public static native @ByVal Tensor __dispatch_conj(@Const @ByRef Tensor self); -// Parsed from ATen/ops/bitwise_xor.h +// Parsed from ATen/ops/conj_physical.h // #pragma once @@ -37655,37 +23090,24 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include - - -// aten::bitwise_xor.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor bitwise_xor_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor other); -// aten::bitwise_xor.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor bitwise_xor_outf(@Const @ByRef Tensor self, @Const @ByRef Tensor other, @ByRef Tensor out); - -// aten::bitwise_xor.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor bitwise_xor_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Scalar other); -// aten::bitwise_xor.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor bitwise_xor_outf(@Const @ByRef Tensor self, @Const @ByRef Scalar other, @ByRef Tensor out); +// #include -// aten::bitwise_xor.Scalar(Tensor self, Scalar other) -> Tensor -@Namespace("at") public static native @ByVal Tensor bitwise_xor(@Const @ByRef Tensor self, @Const @ByRef Scalar other); -// aten::bitwise_xor.Scalar_Tensor(Scalar self, Tensor other) -> Tensor -@Namespace("at") public static native @ByVal Tensor bitwise_xor(@Const @ByRef Scalar self, @Const @ByRef Tensor other); +// aten::conj_physical(Tensor self) -> Tensor +@Namespace("at") public static native @ByVal Tensor conj_physical(@Const @ByRef Tensor self); -// aten::bitwise_xor.Tensor(Tensor self, Tensor other) -> Tensor -@Namespace("at") public static native @ByVal Tensor bitwise_xor(@Const @ByRef Tensor self, @Const @ByRef Tensor other); +// aten::conj_physical.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor conj_physical_out(@ByRef Tensor out, @Const @ByRef Tensor self); +// aten::conj_physical.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor conj_physical_outf(@Const @ByRef Tensor self, @ByRef Tensor out); -// aten::bitwise_xor.Scalar_Tensor_out(Scalar self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor bitwise_xor_out(@ByRef Tensor out, @Const @ByRef Scalar self, @Const @ByRef Tensor other); -// aten::bitwise_xor.Scalar_Tensor_out(Scalar self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor bitwise_xor_outf(@Const @ByRef Scalar self, @Const @ByRef Tensor other, @ByRef Tensor out); +// aten::conj_physical_(Tensor(a!) self) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor conj_physical_(@ByRef Tensor self); -// Parsed from ATen/ops/blackman_window.h +// Parsed from ATen/ops/constant_pad_nd.h // #pragma once @@ -37706,35 +23128,46 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include +// #include -// aten::blackman_window(int window_length, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor -@Namespace("at") public static native @ByVal Tensor blackman_window(@Cast("int64_t") long window_length, @ByVal(nullValue = "at::TensorOptions{}") TensorOptions options); -@Namespace("at") public static native @ByVal Tensor blackman_window(@Cast("int64_t") long window_length); -// aten::blackman_window(int window_length, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor -@Namespace("at") public static native @ByVal Tensor blackman_window(@Cast("int64_t") long window_length, @ByVal ScalarTypeOptional dtype, @ByVal LayoutOptional layout, @ByVal DeviceOptional device, @ByVal BoolOptional pin_memory); +// aten::constant_pad_nd(Tensor self, SymInt[] pad, Scalar value=0) -> Tensor +@Namespace("at") public static native @ByVal Tensor constant_pad_nd(@Const @ByRef Tensor self, @ByVal LongArrayRef pad, @Const @ByRef(nullValue = "at::Scalar(0)") Scalar value); +@Namespace("at") public static native @ByVal Tensor constant_pad_nd(@Const @ByRef Tensor self, @ByVal LongArrayRef pad); +@Namespace("at") public static native @ByVal Tensor constant_pad_nd(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] pad, @Const @ByRef(nullValue = "at::Scalar(0)") Scalar value); +@Namespace("at") public static native @ByVal Tensor constant_pad_nd(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... pad); -// aten::blackman_window.periodic(int window_length, bool periodic, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor -@Namespace("at") public static native @ByVal Tensor blackman_window(@Cast("int64_t") long window_length, @Cast("bool") boolean periodic, @ByVal(nullValue = "at::TensorOptions{}") TensorOptions options); -@Namespace("at") public static native @ByVal Tensor blackman_window(@Cast("int64_t") long window_length, @Cast("bool") boolean periodic); -// aten::blackman_window.periodic(int window_length, bool periodic, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor -@Namespace("at") public static native @ByVal Tensor blackman_window(@Cast("int64_t") long window_length, @Cast("bool") boolean periodic, @ByVal ScalarTypeOptional dtype, @ByVal LayoutOptional layout, @ByVal DeviceOptional device, @ByVal BoolOptional pin_memory); -// aten::blackman_window.out(int window_length, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor blackman_window_out(@ByRef Tensor out, @Cast("int64_t") long window_length); -// aten::blackman_window.out(int window_length, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor blackman_window_outf(@Cast("int64_t") long window_length, @ByRef Tensor out); +// aten::constant_pad_nd(Tensor self, SymInt[] pad, Scalar value=0) -> Tensor +@Namespace("at") public static native @ByVal Tensor constant_pad_nd_symint(@Const @ByRef Tensor self, @ByVal SymIntArrayRef pad, @Const @ByRef(nullValue = "at::Scalar(0)") Scalar value); +@Namespace("at") public static native @ByVal Tensor constant_pad_nd_symint(@Const @ByRef Tensor self, @ByVal SymIntArrayRef pad); -// aten::blackman_window.periodic_out(int window_length, bool periodic, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor blackman_window_out(@ByRef Tensor out, @Cast("int64_t") long window_length, @Cast("bool") boolean periodic); -// aten::blackman_window.periodic_out(int window_length, bool periodic, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor blackman_window_outf(@Cast("int64_t") long window_length, @Cast("bool") boolean periodic, @ByRef Tensor out); +// aten::constant_pad_nd.out(Tensor self, SymInt[] pad, Scalar value=0, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor constant_pad_nd_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal LongArrayRef pad, @Const @ByRef(nullValue = "at::Scalar(0)") Scalar value); +@Namespace("at") public static native @ByRef Tensor constant_pad_nd_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal LongArrayRef pad); +@Namespace("at") public static native @ByRef Tensor constant_pad_nd_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] pad, @Const @ByRef(nullValue = "at::Scalar(0)") Scalar value); +@Namespace("at") public static native @ByRef Tensor constant_pad_nd_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... pad); +// aten::constant_pad_nd.out(Tensor self, SymInt[] pad, Scalar value=0, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor constant_pad_nd_outf(@Const @ByRef Tensor self, @ByVal LongArrayRef pad, @Const @ByRef Scalar value, @ByRef Tensor out); +@Namespace("at") public static native @ByRef Tensor constant_pad_nd_outf(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] pad, @Const @ByRef Scalar value, @ByRef Tensor out); -// Parsed from ATen/ops/block_diag.h + +// aten::constant_pad_nd.out(Tensor self, SymInt[] pad, Scalar value=0, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor constant_pad_nd_symint_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal SymIntArrayRef pad, @Const @ByRef(nullValue = "at::Scalar(0)") Scalar value); +@Namespace("at") public static native @ByRef Tensor constant_pad_nd_symint_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal SymIntArrayRef pad); + + +// aten::constant_pad_nd.out(Tensor self, SymInt[] pad, Scalar value=0, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor constant_pad_nd_symint_outf(@Const @ByRef Tensor self, @ByVal SymIntArrayRef pad, @Const @ByRef Scalar value, @ByRef Tensor out); + + + + + +// Parsed from ATen/ops/contiguous.h // #pragma once @@ -37755,21 +23188,14 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include - +// #include -// aten::block_diag(Tensor[] tensors) -> Tensor -@Namespace("at") public static native @ByVal Tensor block_diag(@ByVal TensorArrayRef tensors); -// aten::block_diag.out(Tensor[] tensors, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor block_diag_out(@ByRef Tensor out, @ByVal TensorArrayRef tensors); -// aten::block_diag.out(Tensor[] tensors, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor block_diag_outf(@ByVal TensorArrayRef tensors, @ByRef Tensor out); -// Parsed from ATen/ops/bmm.h +// Parsed from ATen/ops/conv1d.h // #pragma once @@ -37790,21 +23216,24 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include +// #include -// aten::bmm(Tensor self, Tensor mat2) -> Tensor -@Namespace("at") public static native @ByVal Tensor bmm(@Const @ByRef Tensor self, @Const @ByRef Tensor mat2); +// aten::conv1d(Tensor input, Tensor weight, Tensor? bias=None, int[1] stride=1, int[1] padding=0, int[1] dilation=1, int groups=1) -> Tensor +@Namespace("at") public static native @ByVal Tensor conv1d(@Const @ByRef Tensor input, @Const @ByRef Tensor weight, @Const @ByRef(nullValue = "c10::optional{}") TensorOptional bias, @ByVal(nullValue = "at::IntArrayRef(1)") LongArrayRef stride, @ByVal(nullValue = "at::IntArrayRef(0)") LongArrayRef padding, @ByVal(nullValue = "at::IntArrayRef(1)") LongArrayRef dilation, @Cast("int64_t") long groups/*=1*/); +@Namespace("at") public static native @ByVal Tensor conv1d(@Const @ByRef Tensor input, @Const @ByRef Tensor weight); +@Namespace("at") public static native @ByVal Tensor conv1d(@Const @ByRef Tensor input, @Const @ByRef Tensor weight, @Const @ByRef(nullValue = "c10::optional{}") TensorOptional bias, @ByVal(nullValue = "at::IntArrayRef(1)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] stride, @ByVal(nullValue = "at::IntArrayRef(0)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] padding, @ByVal(nullValue = "at::IntArrayRef(1)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dilation, @Cast("int64_t") long groups/*=1*/); -// aten::bmm.out(Tensor self, Tensor mat2, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor bmm_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor mat2); -// aten::bmm.out(Tensor self, Tensor mat2, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor bmm_outf(@Const @ByRef Tensor self, @Const @ByRef Tensor mat2, @ByRef Tensor out); +// aten::conv1d.padding(Tensor input, Tensor weight, Tensor? bias=None, int[1] stride=1, str padding="valid", int[1] dilation=1, int groups=1) -> Tensor +@Namespace("at") public static native @ByVal Tensor conv1d(@Const @ByRef Tensor input, @Const @ByRef Tensor weight, @Const @ByRef TensorOptional bias, @ByVal LongArrayRef stride, @ByVal @Cast("c10::string_view*") Pointer padding, @ByVal(nullValue = "at::IntArrayRef(1)") LongArrayRef dilation, @Cast("int64_t") long groups/*=1*/); +@Namespace("at") public static native @ByVal Tensor conv1d(@Const @ByRef Tensor input, @Const @ByRef Tensor weight, @Const @ByRef TensorOptional bias, @ByVal LongArrayRef stride, @ByVal @Cast("c10::string_view*") Pointer padding); +@Namespace("at") public static native @ByVal Tensor conv1d(@Const @ByRef Tensor input, @Const @ByRef Tensor weight, @Const @ByRef TensorOptional bias, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] stride, @ByVal @Cast("c10::string_view*") Pointer padding, @ByVal(nullValue = "at::IntArrayRef(1)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dilation, @Cast("int64_t") long groups/*=1*/); +@Namespace("at") public static native @ByVal Tensor conv1d(@Const @ByRef Tensor input, @Const @ByRef Tensor weight, @Const @ByRef TensorOptional bias, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] stride, @ByVal @Cast("c10::string_view*") Pointer padding); -// Parsed from ATen/ops/broadcast_tensors.h +// Parsed from ATen/ops/conv2d.h // #pragma once @@ -37825,16 +23254,24 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include +// #include -// aten::broadcast_tensors(Tensor[] tensors) -> Tensor[] -@Namespace("at") public static native @Cast({"", "std::vector"}) @StdMove TensorVector broadcast_tensors(@ByVal TensorArrayRef tensors); +// aten::conv2d(Tensor input, Tensor weight, Tensor? bias=None, int[2] stride=1, int[2] padding=0, int[2] dilation=1, int groups=1) -> Tensor +@Namespace("at") public static native @ByVal Tensor conv2d(@Const @ByRef Tensor input, @Const @ByRef Tensor weight, @Const @ByRef(nullValue = "c10::optional{}") TensorOptional bias, @ByVal(nullValue = "at::IntArrayRef(1)") LongArrayRef stride, @ByVal(nullValue = "at::IntArrayRef(0)") LongArrayRef padding, @ByVal(nullValue = "at::IntArrayRef(1)") LongArrayRef dilation, @Cast("int64_t") long groups/*=1*/); +@Namespace("at") public static native @ByVal Tensor conv2d(@Const @ByRef Tensor input, @Const @ByRef Tensor weight); +@Namespace("at") public static native @ByVal Tensor conv2d(@Const @ByRef Tensor input, @Const @ByRef Tensor weight, @Const @ByRef(nullValue = "c10::optional{}") TensorOptional bias, @ByVal(nullValue = "at::IntArrayRef(1)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] stride, @ByVal(nullValue = "at::IntArrayRef(0)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] padding, @ByVal(nullValue = "at::IntArrayRef(1)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dilation, @Cast("int64_t") long groups/*=1*/); + +// aten::conv2d.padding(Tensor input, Tensor weight, Tensor? bias=None, int[2] stride=1, str padding="valid", int[2] dilation=1, int groups=1) -> Tensor +@Namespace("at") public static native @ByVal Tensor conv2d(@Const @ByRef Tensor input, @Const @ByRef Tensor weight, @Const @ByRef TensorOptional bias, @ByVal LongArrayRef stride, @ByVal @Cast("c10::string_view*") Pointer padding, @ByVal(nullValue = "at::IntArrayRef(1)") LongArrayRef dilation, @Cast("int64_t") long groups/*=1*/); +@Namespace("at") public static native @ByVal Tensor conv2d(@Const @ByRef Tensor input, @Const @ByRef Tensor weight, @Const @ByRef TensorOptional bias, @ByVal LongArrayRef stride, @ByVal @Cast("c10::string_view*") Pointer padding); +@Namespace("at") public static native @ByVal Tensor conv2d(@Const @ByRef Tensor input, @Const @ByRef Tensor weight, @Const @ByRef TensorOptional bias, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] stride, @ByVal @Cast("c10::string_view*") Pointer padding, @ByVal(nullValue = "at::IntArrayRef(1)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dilation, @Cast("int64_t") long groups/*=1*/); +@Namespace("at") public static native @ByVal Tensor conv2d(@Const @ByRef Tensor input, @Const @ByRef Tensor weight, @Const @ByRef TensorOptional bias, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] stride, @ByVal @Cast("c10::string_view*") Pointer padding); -// Parsed from ATen/ops/broadcast_to.h +// Parsed from ATen/ops/conv3d.h // #pragma once @@ -37855,22 +23292,24 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include - - -// aten::broadcast_to(Tensor(a) self, SymInt[] size) -> Tensor(a) -@Namespace("at") public static native @ByVal Tensor broadcast_to(@Const @ByRef Tensor self, @ByVal @Cast("c10::ArrayRef*") LongArrayRef size); -@Namespace("at") public static native @ByVal Tensor broadcast_to(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... size); +// #include -// aten::broadcast_to(Tensor(a) self, SymInt[] size) -> Tensor(a) -@Namespace("at") public static native @ByVal Tensor broadcast_to_symint(@Const @ByRef Tensor self, @ByVal SymIntRef size); +// aten::conv3d(Tensor input, Tensor weight, Tensor? bias=None, int[3] stride=1, int[3] padding=0, int[3] dilation=1, int groups=1) -> Tensor +@Namespace("at") public static native @ByVal Tensor conv3d(@Const @ByRef Tensor input, @Const @ByRef Tensor weight, @Const @ByRef(nullValue = "c10::optional{}") TensorOptional bias, @ByVal(nullValue = "at::IntArrayRef(1)") LongArrayRef stride, @ByVal(nullValue = "at::IntArrayRef(0)") LongArrayRef padding, @ByVal(nullValue = "at::IntArrayRef(1)") LongArrayRef dilation, @Cast("int64_t") long groups/*=1*/); +@Namespace("at") public static native @ByVal Tensor conv3d(@Const @ByRef Tensor input, @Const @ByRef Tensor weight); +@Namespace("at") public static native @ByVal Tensor conv3d(@Const @ByRef Tensor input, @Const @ByRef Tensor weight, @Const @ByRef(nullValue = "c10::optional{}") TensorOptional bias, @ByVal(nullValue = "at::IntArrayRef(1)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] stride, @ByVal(nullValue = "at::IntArrayRef(0)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] padding, @ByVal(nullValue = "at::IntArrayRef(1)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dilation, @Cast("int64_t") long groups/*=1*/); +// aten::conv3d.padding(Tensor input, Tensor weight, Tensor? bias=None, int[3] stride=1, str padding="valid", int[3] dilation=1, int groups=1) -> Tensor +@Namespace("at") public static native @ByVal Tensor conv3d(@Const @ByRef Tensor input, @Const @ByRef Tensor weight, @Const @ByRef TensorOptional bias, @ByVal LongArrayRef stride, @ByVal @Cast("c10::string_view*") Pointer padding, @ByVal(nullValue = "at::IntArrayRef(1)") LongArrayRef dilation, @Cast("int64_t") long groups/*=1*/); +@Namespace("at") public static native @ByVal Tensor conv3d(@Const @ByRef Tensor input, @Const @ByRef Tensor weight, @Const @ByRef TensorOptional bias, @ByVal LongArrayRef stride, @ByVal @Cast("c10::string_view*") Pointer padding); +@Namespace("at") public static native @ByVal Tensor conv3d(@Const @ByRef Tensor input, @Const @ByRef Tensor weight, @Const @ByRef TensorOptional bias, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] stride, @ByVal @Cast("c10::string_view*") Pointer padding, @ByVal(nullValue = "at::IntArrayRef(1)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dilation, @Cast("int64_t") long groups/*=1*/); +@Namespace("at") public static native @ByVal Tensor conv3d(@Const @ByRef Tensor input, @Const @ByRef Tensor weight, @Const @ByRef TensorOptional bias, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] stride, @ByVal @Cast("c10::string_view*") Pointer padding); -// Parsed from ATen/ops/bucketize.h +// Parsed from ATen/ops/conv_depthwise3d.h // #pragma once @@ -37891,33 +23330,43 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include +// #include -// aten::bucketize.Tensor(Tensor self, Tensor boundaries, *, bool out_int32=False, bool right=False) -> Tensor -@Namespace("at") public static native @ByVal Tensor bucketize(@Const @ByRef Tensor self, @Const @ByRef Tensor boundaries, @Cast("bool") boolean out_int32/*=false*/, @Cast("bool") boolean right/*=false*/); -@Namespace("at") public static native @ByVal Tensor bucketize(@Const @ByRef Tensor self, @Const @ByRef Tensor boundaries); +// aten::conv_depthwise3d(Tensor self, Tensor weight, int[3] kernel_size, Tensor? bias, int[3] stride, SymInt[3] padding, int[3] dilation) -> Tensor +@Namespace("at") public static native @ByVal Tensor conv_depthwise3d(@Const @ByRef Tensor self, @Const @ByRef Tensor weight, @ByVal LongArrayRef kernel_size, @Const @ByRef TensorOptional bias, @ByVal LongArrayRef stride, @ByVal LongArrayRef padding, @ByVal LongArrayRef dilation); +@Namespace("at") public static native @ByVal Tensor conv_depthwise3d(@Const @ByRef Tensor self, @Const @ByRef Tensor weight, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] kernel_size, @Const @ByRef TensorOptional bias, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] stride, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] padding, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... dilation); -// aten::bucketize.Tensor_out(Tensor self, Tensor boundaries, *, bool out_int32=False, bool right=False, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor bucketize_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor boundaries, @Cast("bool") boolean out_int32/*=false*/, @Cast("bool") boolean right/*=false*/); -@Namespace("at") public static native @ByRef Tensor bucketize_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor boundaries); -// aten::bucketize.Tensor_out(Tensor self, Tensor boundaries, *, bool out_int32=False, bool right=False, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor bucketize_outf(@Const @ByRef Tensor self, @Const @ByRef Tensor boundaries, @Cast("bool") boolean out_int32, @Cast("bool") boolean right, @ByRef Tensor out); -// aten::bucketize.Scalar(Scalar self, Tensor boundaries, *, bool out_int32=False, bool right=False) -> Tensor -@Namespace("at") public static native @ByVal Tensor bucketize(@Const @ByRef Scalar self, @Const @ByRef Tensor boundaries, @Cast("bool") boolean out_int32/*=false*/, @Cast("bool") boolean right/*=false*/); -@Namespace("at") public static native @ByVal Tensor bucketize(@Const @ByRef Scalar self, @Const @ByRef Tensor boundaries); +// aten::conv_depthwise3d(Tensor self, Tensor weight, int[3] kernel_size, Tensor? bias, int[3] stride, SymInt[3] padding, int[3] dilation) -> Tensor +@Namespace("at") public static native @ByVal Tensor conv_depthwise3d_symint(@Const @ByRef Tensor self, @Const @ByRef Tensor weight, @ByVal LongArrayRef kernel_size, @Const @ByRef TensorOptional bias, @ByVal LongArrayRef stride, @ByVal SymIntArrayRef padding, @ByVal LongArrayRef dilation); +@Namespace("at") public static native @ByVal Tensor conv_depthwise3d_symint(@Const @ByRef Tensor self, @Const @ByRef Tensor weight, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] kernel_size, @Const @ByRef TensorOptional bias, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] stride, @ByVal SymIntArrayRef padding, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... dilation); -// aten::bucketize.Scalar_out(Scalar self, Tensor boundaries, *, bool out_int32=False, bool right=False, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor bucketize_out(@ByRef Tensor out, @Const @ByRef Scalar self, @Const @ByRef Tensor boundaries, @Cast("bool") boolean out_int32/*=false*/, @Cast("bool") boolean right/*=false*/); -@Namespace("at") public static native @ByRef Tensor bucketize_out(@ByRef Tensor out, @Const @ByRef Scalar self, @Const @ByRef Tensor boundaries); -// aten::bucketize.Scalar_out(Scalar self, Tensor boundaries, *, bool out_int32=False, bool right=False, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor bucketize_outf(@Const @ByRef Scalar self, @Const @ByRef Tensor boundaries, @Cast("bool") boolean out_int32, @Cast("bool") boolean right, @ByRef Tensor out); + +// aten::conv_depthwise3d.out(Tensor self, Tensor weight, int[3] kernel_size, Tensor? bias, int[3] stride, SymInt[3] padding, int[3] dilation, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor conv_depthwise3d_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor weight, @ByVal LongArrayRef kernel_size, @Const @ByRef TensorOptional bias, @ByVal LongArrayRef stride, @ByVal LongArrayRef padding, @ByVal LongArrayRef dilation); +@Namespace("at") public static native @ByRef Tensor conv_depthwise3d_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor weight, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] kernel_size, @Const @ByRef TensorOptional bias, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] stride, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] padding, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... dilation); + + +// aten::conv_depthwise3d.out(Tensor self, Tensor weight, int[3] kernel_size, Tensor? bias, int[3] stride, SymInt[3] padding, int[3] dilation, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor conv_depthwise3d_outf(@Const @ByRef Tensor self, @Const @ByRef Tensor weight, @ByVal LongArrayRef kernel_size, @Const @ByRef TensorOptional bias, @ByVal LongArrayRef stride, @ByVal LongArrayRef padding, @ByVal LongArrayRef dilation, @ByRef Tensor out); +@Namespace("at") public static native @ByRef Tensor conv_depthwise3d_outf(@Const @ByRef Tensor self, @Const @ByRef Tensor weight, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] kernel_size, @Const @ByRef TensorOptional bias, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] stride, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] padding, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dilation, @ByRef Tensor out); + + +// aten::conv_depthwise3d.out(Tensor self, Tensor weight, int[3] kernel_size, Tensor? bias, int[3] stride, SymInt[3] padding, int[3] dilation, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor conv_depthwise3d_symint_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor weight, @ByVal LongArrayRef kernel_size, @Const @ByRef TensorOptional bias, @ByVal LongArrayRef stride, @ByVal SymIntArrayRef padding, @ByVal LongArrayRef dilation); +@Namespace("at") public static native @ByRef Tensor conv_depthwise3d_symint_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor weight, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] kernel_size, @Const @ByRef TensorOptional bias, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] stride, @ByVal SymIntArrayRef padding, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... dilation); + + +// aten::conv_depthwise3d.out(Tensor self, Tensor weight, int[3] kernel_size, Tensor? bias, int[3] stride, SymInt[3] padding, int[3] dilation, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor conv_depthwise3d_symint_outf(@Const @ByRef Tensor self, @Const @ByRef Tensor weight, @ByVal LongArrayRef kernel_size, @Const @ByRef TensorOptional bias, @ByVal LongArrayRef stride, @ByVal SymIntArrayRef padding, @ByVal LongArrayRef dilation, @ByRef Tensor out); +@Namespace("at") public static native @ByRef Tensor conv_depthwise3d_symint_outf(@Const @ByRef Tensor self, @Const @ByRef Tensor weight, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] kernel_size, @Const @ByRef TensorOptional bias, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] stride, @ByVal SymIntArrayRef padding, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dilation, @ByRef Tensor out); -// Parsed from ATen/ops/can_cast.h + +// Parsed from ATen/ops/conv_tbc.h // #pragma once @@ -37938,16 +23387,23 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include +// #include -// aten::can_cast(ScalarType from, ScalarType to) -> bool -@Namespace("at") public static native @Cast("bool") boolean can_cast(ScalarType from, ScalarType to); +// aten::conv_tbc(Tensor self, Tensor weight, Tensor bias, int pad=0) -> Tensor +@Namespace("at") public static native @ByVal Tensor conv_tbc(@Const @ByRef Tensor self, @Const @ByRef Tensor weight, @Const @ByRef Tensor bias, @Cast("int64_t") long pad/*=0*/); +@Namespace("at") public static native @ByVal Tensor conv_tbc(@Const @ByRef Tensor self, @Const @ByRef Tensor weight, @Const @ByRef Tensor bias); +// aten::conv_tbc.out(Tensor self, Tensor weight, Tensor bias, int pad=0, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor conv_tbc_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor weight, @Const @ByRef Tensor bias, @Cast("int64_t") long pad/*=0*/); +@Namespace("at") public static native @ByRef Tensor conv_tbc_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor weight, @Const @ByRef Tensor bias); +// aten::conv_tbc.out(Tensor self, Tensor weight, Tensor bias, int pad=0, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor conv_tbc_outf(@Const @ByRef Tensor self, @Const @ByRef Tensor weight, @Const @ByRef Tensor bias, @Cast("int64_t") long pad, @ByRef Tensor out); -// Parsed from ATen/ops/cartesian_prod.h + +// Parsed from ATen/ops/conv_tbc_backward.h // #pragma once @@ -37968,16 +23424,16 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include +// #include -// aten::cartesian_prod(Tensor[] tensors) -> Tensor -@Namespace("at") public static native @ByVal Tensor cartesian_prod(@ByVal TensorArrayRef tensors); +// aten::conv_tbc_backward(Tensor self, Tensor input, Tensor weight, Tensor bias, int pad) -> (Tensor, Tensor, Tensor) +@Namespace("at") public static native @ByVal T_TensorTensorTensor_T conv_tbc_backward(@Const @ByRef Tensor self, @Const @ByRef Tensor input, @Const @ByRef Tensor weight, @Const @ByRef Tensor bias, @Cast("int64_t") long pad); -// Parsed from ATen/ops/cat.h +// Parsed from ATen/ops/conv_transpose1d.h // #pragma once @@ -37998,31 +23454,18 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include - - -// aten::cat(Tensor[] tensors, int dim=0) -> Tensor -@Namespace("at") public static native @ByVal Tensor cat(@Const @ByRef TensorArrayRef tensors, @Cast("int64_t") long dim/*=0*/); -@Namespace("at") public static native @ByVal Tensor cat(@Const @ByRef TensorArrayRef tensors); - -// aten::cat.out(Tensor[] tensors, int dim=0, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor cat_out(@ByRef Tensor out, @Const @ByRef TensorArrayRef tensors, @Cast("int64_t") long dim/*=0*/); -@Namespace("at") public static native @ByRef Tensor cat_out(@ByRef Tensor out, @Const @ByRef TensorArrayRef tensors); -// aten::cat.out(Tensor[] tensors, int dim=0, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor cat_outf(@Const @ByRef TensorArrayRef tensors, @Cast("int64_t") long dim, @ByRef Tensor out); +// #include -// aten::cat.names(Tensor[] tensors, Dimname dim) -> Tensor -@Namespace("at") public static native @ByVal Tensor cat(@ByVal TensorArrayRef tensors, @ByVal Dimname dim); -// aten::cat.names_out(Tensor[] tensors, Dimname dim, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor cat_out(@ByRef Tensor out, @ByVal TensorArrayRef tensors, @ByVal Dimname dim); -// aten::cat.names_out(Tensor[] tensors, Dimname dim, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor cat_outf(@ByVal TensorArrayRef tensors, @ByVal Dimname dim, @ByRef Tensor out); +// aten::conv_transpose1d(Tensor input, Tensor weight, Tensor? bias=None, int[1] stride=1, int[1] padding=0, int[1] output_padding=0, int groups=1, int[1] dilation=1) -> Tensor +@Namespace("at") public static native @ByVal Tensor conv_transpose1d(@Const @ByRef Tensor input, @Const @ByRef Tensor weight, @Const @ByRef(nullValue = "c10::optional{}") TensorOptional bias, @ByVal(nullValue = "at::IntArrayRef(1)") LongArrayRef stride, @ByVal(nullValue = "at::IntArrayRef(0)") LongArrayRef padding, @ByVal(nullValue = "at::IntArrayRef(0)") LongArrayRef output_padding, @Cast("int64_t") long groups/*=1*/, @ByVal(nullValue = "at::IntArrayRef(1)") LongArrayRef dilation); +@Namespace("at") public static native @ByVal Tensor conv_transpose1d(@Const @ByRef Tensor input, @Const @ByRef Tensor weight); +@Namespace("at") public static native @ByVal Tensor conv_transpose1d(@Const @ByRef Tensor input, @Const @ByRef Tensor weight, @Const @ByRef(nullValue = "c10::optional{}") TensorOptional bias, @ByVal(nullValue = "at::IntArrayRef(1)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] stride, @ByVal(nullValue = "at::IntArrayRef(0)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] padding, @ByVal(nullValue = "at::IntArrayRef(0)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] output_padding, @Cast("int64_t") long groups/*=1*/, @ByVal(nullValue = "at::IntArrayRef(1)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... dilation); -// Parsed from ATen/ops/cauchy.h +// Parsed from ATen/ops/conv_transpose2d.h // #pragma once @@ -38043,23 +23486,18 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include - +// #include -// aten::cauchy.out(Tensor self, float median=0, float sigma=1, *, Generator? generator=None, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor cauchy_out(@ByRef Tensor out, @Const @ByRef Tensor self, double median/*=0*/, double sigma/*=1*/, @ByVal(nullValue = "c10::optional(c10::nullopt)") GeneratorOptional generator); -@Namespace("at") public static native @ByRef Tensor cauchy_out(@ByRef Tensor out, @Const @ByRef Tensor self); -// aten::cauchy.out(Tensor self, float median=0, float sigma=1, *, Generator? generator=None, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor cauchy_outf(@Const @ByRef Tensor self, double median, double sigma, @ByVal GeneratorOptional generator, @ByRef Tensor out); -// aten::cauchy(Tensor self, float median=0, float sigma=1, *, Generator? generator=None) -> Tensor -@Namespace("at") public static native @ByVal Tensor cauchy(@Const @ByRef Tensor self, double median/*=0*/, double sigma/*=1*/, @ByVal(nullValue = "c10::optional(c10::nullopt)") GeneratorOptional generator); -@Namespace("at") public static native @ByVal Tensor cauchy(@Const @ByRef Tensor self); +// aten::conv_transpose2d.input(Tensor input, Tensor weight, Tensor? bias=None, int[2] stride=1, int[2] padding=0, int[2] output_padding=0, int groups=1, int[2] dilation=1) -> Tensor +@Namespace("at") public static native @ByVal Tensor conv_transpose2d(@Const @ByRef Tensor input, @Const @ByRef Tensor weight, @Const @ByRef(nullValue = "c10::optional{}") TensorOptional bias, @ByVal(nullValue = "at::IntArrayRef(1)") LongArrayRef stride, @ByVal(nullValue = "at::IntArrayRef(0)") LongArrayRef padding, @ByVal(nullValue = "at::IntArrayRef(0)") LongArrayRef output_padding, @Cast("int64_t") long groups/*=1*/, @ByVal(nullValue = "at::IntArrayRef(1)") LongArrayRef dilation); +@Namespace("at") public static native @ByVal Tensor conv_transpose2d(@Const @ByRef Tensor input, @Const @ByRef Tensor weight); +@Namespace("at") public static native @ByVal Tensor conv_transpose2d(@Const @ByRef Tensor input, @Const @ByRef Tensor weight, @Const @ByRef(nullValue = "c10::optional{}") TensorOptional bias, @ByVal(nullValue = "at::IntArrayRef(1)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] stride, @ByVal(nullValue = "at::IntArrayRef(0)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] padding, @ByVal(nullValue = "at::IntArrayRef(0)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] output_padding, @Cast("int64_t") long groups/*=1*/, @ByVal(nullValue = "at::IntArrayRef(1)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... dilation); -// Parsed from ATen/ops/ccol_indices.h +// Parsed from ATen/ops/conv_transpose3d.h // #pragma once @@ -38080,14 +23518,18 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include +// #include +// aten::conv_transpose3d.input(Tensor input, Tensor weight, Tensor? bias=None, int[3] stride=1, int[3] padding=0, int[3] output_padding=0, int groups=1, int[3] dilation=1) -> Tensor +@Namespace("at") public static native @ByVal Tensor conv_transpose3d(@Const @ByRef Tensor input, @Const @ByRef Tensor weight, @Const @ByRef(nullValue = "c10::optional{}") TensorOptional bias, @ByVal(nullValue = "at::IntArrayRef(1)") LongArrayRef stride, @ByVal(nullValue = "at::IntArrayRef(0)") LongArrayRef padding, @ByVal(nullValue = "at::IntArrayRef(0)") LongArrayRef output_padding, @Cast("int64_t") long groups/*=1*/, @ByVal(nullValue = "at::IntArrayRef(1)") LongArrayRef dilation); +@Namespace("at") public static native @ByVal Tensor conv_transpose3d(@Const @ByRef Tensor input, @Const @ByRef Tensor weight); +@Namespace("at") public static native @ByVal Tensor conv_transpose3d(@Const @ByRef Tensor input, @Const @ByRef Tensor weight, @Const @ByRef(nullValue = "c10::optional{}") TensorOptional bias, @ByVal(nullValue = "at::IntArrayRef(1)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] stride, @ByVal(nullValue = "at::IntArrayRef(0)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] padding, @ByVal(nullValue = "at::IntArrayRef(0)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] output_padding, @Cast("int64_t") long groups/*=1*/, @ByVal(nullValue = "at::IntArrayRef(1)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... dilation); -// Parsed from ATen/ops/ccol_indices_copy.h +// Parsed from ATen/ops/convolution.h // #pragma once @@ -38108,21 +23550,43 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include +// #include -// aten::ccol_indices_copy(Tensor self) -> Tensor -@Namespace("at") public static native @ByVal Tensor ccol_indices_copy(@Const @ByRef Tensor self); +// aten::convolution(Tensor input, Tensor weight, Tensor? bias, int[] stride, SymInt[] padding, int[] dilation, bool transposed, SymInt[] output_padding, int groups) -> Tensor +@Namespace("at") public static native @ByVal Tensor convolution(@Const @ByRef Tensor input, @Const @ByRef Tensor weight, @Const @ByRef TensorOptional bias, @ByVal LongArrayRef stride, @ByVal LongArrayRef padding, @ByVal LongArrayRef dilation, @Cast("bool") boolean transposed, @ByVal LongArrayRef output_padding, @Cast("int64_t") long groups); +@Namespace("at") public static native @ByVal Tensor convolution(@Const @ByRef Tensor input, @Const @ByRef Tensor weight, @Const @ByRef TensorOptional bias, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] stride, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] padding, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dilation, @Cast("bool") boolean transposed, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] output_padding, @Cast("int64_t") long groups); -// aten::ccol_indices_copy.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor ccol_indices_copy_out(@ByRef Tensor out, @Const @ByRef Tensor self); -// aten::ccol_indices_copy.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor ccol_indices_copy_outf(@Const @ByRef Tensor self, @ByRef Tensor out); + +// aten::convolution(Tensor input, Tensor weight, Tensor? bias, int[] stride, SymInt[] padding, int[] dilation, bool transposed, SymInt[] output_padding, int groups) -> Tensor +@Namespace("at") public static native @ByVal Tensor convolution_symint(@Const @ByRef Tensor input, @Const @ByRef Tensor weight, @Const @ByRef TensorOptional bias, @ByVal LongArrayRef stride, @ByVal SymIntArrayRef padding, @ByVal LongArrayRef dilation, @Cast("bool") boolean transposed, @ByVal SymIntArrayRef output_padding, @Cast("int64_t") long groups); +@Namespace("at") public static native @ByVal Tensor convolution_symint(@Const @ByRef Tensor input, @Const @ByRef Tensor weight, @Const @ByRef TensorOptional bias, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] stride, @ByVal SymIntArrayRef padding, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dilation, @Cast("bool") boolean transposed, @ByVal SymIntArrayRef output_padding, @Cast("int64_t") long groups); +// aten::convolution.out(Tensor input, Tensor weight, Tensor? bias, int[] stride, SymInt[] padding, int[] dilation, bool transposed, SymInt[] output_padding, int groups, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor convolution_out(@ByRef Tensor out, @Const @ByRef Tensor input, @Const @ByRef Tensor weight, @Const @ByRef TensorOptional bias, @ByVal LongArrayRef stride, @ByVal LongArrayRef padding, @ByVal LongArrayRef dilation, @Cast("bool") boolean transposed, @ByVal LongArrayRef output_padding, @Cast("int64_t") long groups); +@Namespace("at") public static native @ByRef Tensor convolution_out(@ByRef Tensor out, @Const @ByRef Tensor input, @Const @ByRef Tensor weight, @Const @ByRef TensorOptional bias, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] stride, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] padding, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dilation, @Cast("bool") boolean transposed, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] output_padding, @Cast("int64_t") long groups); -// Parsed from ATen/ops/cdist.h +// aten::convolution.out(Tensor input, Tensor weight, Tensor? bias, int[] stride, SymInt[] padding, int[] dilation, bool transposed, SymInt[] output_padding, int groups, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor convolution_outf(@Const @ByRef Tensor input, @Const @ByRef Tensor weight, @Const @ByRef TensorOptional bias, @ByVal LongArrayRef stride, @ByVal LongArrayRef padding, @ByVal LongArrayRef dilation, @Cast("bool") boolean transposed, @ByVal LongArrayRef output_padding, @Cast("int64_t") long groups, @ByRef Tensor out); +@Namespace("at") public static native @ByRef Tensor convolution_outf(@Const @ByRef Tensor input, @Const @ByRef Tensor weight, @Const @ByRef TensorOptional bias, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] stride, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] padding, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dilation, @Cast("bool") boolean transposed, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] output_padding, @Cast("int64_t") long groups, @ByRef Tensor out); + + +// aten::convolution.out(Tensor input, Tensor weight, Tensor? bias, int[] stride, SymInt[] padding, int[] dilation, bool transposed, SymInt[] output_padding, int groups, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor convolution_symint_out(@ByRef Tensor out, @Const @ByRef Tensor input, @Const @ByRef Tensor weight, @Const @ByRef TensorOptional bias, @ByVal LongArrayRef stride, @ByVal SymIntArrayRef padding, @ByVal LongArrayRef dilation, @Cast("bool") boolean transposed, @ByVal SymIntArrayRef output_padding, @Cast("int64_t") long groups); +@Namespace("at") public static native @ByRef Tensor convolution_symint_out(@ByRef Tensor out, @Const @ByRef Tensor input, @Const @ByRef Tensor weight, @Const @ByRef TensorOptional bias, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] stride, @ByVal SymIntArrayRef padding, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dilation, @Cast("bool") boolean transposed, @ByVal SymIntArrayRef output_padding, @Cast("int64_t") long groups); + + +// aten::convolution.out(Tensor input, Tensor weight, Tensor? bias, int[] stride, SymInt[] padding, int[] dilation, bool transposed, SymInt[] output_padding, int groups, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor convolution_symint_outf(@Const @ByRef Tensor input, @Const @ByRef Tensor weight, @Const @ByRef TensorOptional bias, @ByVal LongArrayRef stride, @ByVal SymIntArrayRef padding, @ByVal LongArrayRef dilation, @Cast("bool") boolean transposed, @ByVal SymIntArrayRef output_padding, @Cast("int64_t") long groups, @ByRef Tensor out); +@Namespace("at") public static native @ByRef Tensor convolution_symint_outf(@Const @ByRef Tensor input, @Const @ByRef Tensor weight, @Const @ByRef TensorOptional bias, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] stride, @ByVal SymIntArrayRef padding, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dilation, @Cast("bool") boolean transposed, @ByVal SymIntArrayRef output_padding, @Cast("int64_t") long groups, @ByRef Tensor out); + + + + + +// Parsed from ATen/ops/convolution_backward.h // #pragma once @@ -38143,17 +23607,43 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include +// #include -// aten::cdist(Tensor x1, Tensor x2, float p=2, int? compute_mode=None) -> Tensor -@Namespace("at") public static native @ByVal Tensor cdist(@Const @ByRef Tensor x1, @Const @ByRef Tensor x2, double p/*=2*/, @ByVal(nullValue = "c10::optional(c10::nullopt)") LongOptional compute_mode); -@Namespace("at") public static native @ByVal Tensor cdist(@Const @ByRef Tensor x1, @Const @ByRef Tensor x2); +// aten::convolution_backward(Tensor grad_output, Tensor input, Tensor weight, SymInt[]? bias_sizes, int[] stride, SymInt[] padding, int[] dilation, bool transposed, SymInt[] output_padding, int groups, bool[3] output_mask) -> (Tensor, Tensor, Tensor) +@Namespace("at") public static native @ByVal T_TensorTensorTensor_T convolution_backward(@Const @ByRef Tensor grad_output, @Const @ByRef Tensor input, @Const @ByRef Tensor weight, @ByVal LongArrayRefOptional bias_sizes, @ByVal LongArrayRef stride, @ByVal LongArrayRef padding, @ByVal LongArrayRef dilation, @Cast("bool") boolean transposed, @ByVal LongArrayRef output_padding, @Cast("int64_t") long groups, @ByVal @Cast("std::array*") BoolPointer output_mask); +@Namespace("at") public static native @ByVal T_TensorTensorTensor_T convolution_backward(@Const @ByRef Tensor grad_output, @Const @ByRef Tensor input, @Const @ByRef Tensor weight, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] bias_sizes, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] stride, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] padding, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dilation, @Cast("bool") boolean transposed, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] output_padding, @Cast("int64_t") long groups, @ByVal @Cast("std::array*") BoolPointer output_mask); + + +// aten::convolution_backward(Tensor grad_output, Tensor input, Tensor weight, SymInt[]? bias_sizes, int[] stride, SymInt[] padding, int[] dilation, bool transposed, SymInt[] output_padding, int groups, bool[3] output_mask) -> (Tensor, Tensor, Tensor) +@Namespace("at") public static native @ByVal T_TensorTensorTensor_T convolution_backward_symint(@Const @ByRef Tensor grad_output, @Const @ByRef Tensor input, @Const @ByRef Tensor weight, @ByVal SymIntArrayRefOptional bias_sizes, @ByVal LongArrayRef stride, @ByVal SymIntArrayRef padding, @ByVal LongArrayRef dilation, @Cast("bool") boolean transposed, @ByVal SymIntArrayRef output_padding, @Cast("int64_t") long groups, @ByVal @Cast("std::array*") BoolPointer output_mask); +@Namespace("at") public static native @ByVal T_TensorTensorTensor_T convolution_backward_symint(@Const @ByRef Tensor grad_output, @Const @ByRef Tensor input, @Const @ByRef Tensor weight, @ByVal SymIntArrayRefOptional bias_sizes, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] stride, @ByVal SymIntArrayRef padding, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dilation, @Cast("bool") boolean transposed, @ByVal SymIntArrayRef output_padding, @Cast("int64_t") long groups, @ByVal @Cast("std::array*") BoolPointer output_mask); +// aten::convolution_backward.out(Tensor grad_output, Tensor input, Tensor weight, SymInt[]? bias_sizes, int[] stride, SymInt[] padding, int[] dilation, bool transposed, SymInt[] output_padding, int groups, bool[3] output_mask, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2) -> (Tensor(a!), Tensor(b!), Tensor(c!)) +@Namespace("at") public static native @ByVal T_TensorTensorTensor_T convolution_backward_out(@ByRef Tensor out0, @ByRef Tensor out1, @ByRef Tensor out2, @Const @ByRef Tensor grad_output, @Const @ByRef Tensor input, @Const @ByRef Tensor weight, @ByVal LongArrayRefOptional bias_sizes, @ByVal LongArrayRef stride, @ByVal LongArrayRef padding, @ByVal LongArrayRef dilation, @Cast("bool") boolean transposed, @ByVal LongArrayRef output_padding, @Cast("int64_t") long groups, @ByVal @Cast("std::array*") BoolPointer output_mask); +@Namespace("at") public static native @ByVal T_TensorTensorTensor_T convolution_backward_out(@ByRef Tensor out0, @ByRef Tensor out1, @ByRef Tensor out2, @Const @ByRef Tensor grad_output, @Const @ByRef Tensor input, @Const @ByRef Tensor weight, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] bias_sizes, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] stride, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] padding, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dilation, @Cast("bool") boolean transposed, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] output_padding, @Cast("int64_t") long groups, @ByVal @Cast("std::array*") BoolPointer output_mask); -// Parsed from ATen/ops/ceil.h +// aten::convolution_backward.out(Tensor grad_output, Tensor input, Tensor weight, SymInt[]? bias_sizes, int[] stride, SymInt[] padding, int[] dilation, bool transposed, SymInt[] output_padding, int groups, bool[3] output_mask, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2) -> (Tensor(a!), Tensor(b!), Tensor(c!)) +@Namespace("at") public static native @ByVal T_TensorTensorTensor_T convolution_backward_outf(@Const @ByRef Tensor grad_output, @Const @ByRef Tensor input, @Const @ByRef Tensor weight, @ByVal LongArrayRefOptional bias_sizes, @ByVal LongArrayRef stride, @ByVal LongArrayRef padding, @ByVal LongArrayRef dilation, @Cast("bool") boolean transposed, @ByVal LongArrayRef output_padding, @Cast("int64_t") long groups, @ByVal @Cast("std::array*") BoolPointer output_mask, @ByRef Tensor out0, @ByRef Tensor out1, @ByRef Tensor out2); +@Namespace("at") public static native @ByVal T_TensorTensorTensor_T convolution_backward_outf(@Const @ByRef Tensor grad_output, @Const @ByRef Tensor input, @Const @ByRef Tensor weight, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] bias_sizes, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] stride, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] padding, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dilation, @Cast("bool") boolean transposed, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] output_padding, @Cast("int64_t") long groups, @ByVal @Cast("std::array*") BoolPointer output_mask, @ByRef Tensor out0, @ByRef Tensor out1, @ByRef Tensor out2); + + +// aten::convolution_backward.out(Tensor grad_output, Tensor input, Tensor weight, SymInt[]? bias_sizes, int[] stride, SymInt[] padding, int[] dilation, bool transposed, SymInt[] output_padding, int groups, bool[3] output_mask, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2) -> (Tensor(a!), Tensor(b!), Tensor(c!)) +@Namespace("at") public static native @ByVal T_TensorTensorTensor_T convolution_backward_symint_out(@ByRef Tensor out0, @ByRef Tensor out1, @ByRef Tensor out2, @Const @ByRef Tensor grad_output, @Const @ByRef Tensor input, @Const @ByRef Tensor weight, @ByVal SymIntArrayRefOptional bias_sizes, @ByVal LongArrayRef stride, @ByVal SymIntArrayRef padding, @ByVal LongArrayRef dilation, @Cast("bool") boolean transposed, @ByVal SymIntArrayRef output_padding, @Cast("int64_t") long groups, @ByVal @Cast("std::array*") BoolPointer output_mask); +@Namespace("at") public static native @ByVal T_TensorTensorTensor_T convolution_backward_symint_out(@ByRef Tensor out0, @ByRef Tensor out1, @ByRef Tensor out2, @Const @ByRef Tensor grad_output, @Const @ByRef Tensor input, @Const @ByRef Tensor weight, @ByVal SymIntArrayRefOptional bias_sizes, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] stride, @ByVal SymIntArrayRef padding, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dilation, @Cast("bool") boolean transposed, @ByVal SymIntArrayRef output_padding, @Cast("int64_t") long groups, @ByVal @Cast("std::array*") BoolPointer output_mask); + + +// aten::convolution_backward.out(Tensor grad_output, Tensor input, Tensor weight, SymInt[]? bias_sizes, int[] stride, SymInt[] padding, int[] dilation, bool transposed, SymInt[] output_padding, int groups, bool[3] output_mask, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2) -> (Tensor(a!), Tensor(b!), Tensor(c!)) +@Namespace("at") public static native @ByVal T_TensorTensorTensor_T convolution_backward_symint_outf(@Const @ByRef Tensor grad_output, @Const @ByRef Tensor input, @Const @ByRef Tensor weight, @ByVal SymIntArrayRefOptional bias_sizes, @ByVal LongArrayRef stride, @ByVal SymIntArrayRef padding, @ByVal LongArrayRef dilation, @Cast("bool") boolean transposed, @ByVal SymIntArrayRef output_padding, @Cast("int64_t") long groups, @ByVal @Cast("std::array*") BoolPointer output_mask, @ByRef Tensor out0, @ByRef Tensor out1, @ByRef Tensor out2); +@Namespace("at") public static native @ByVal T_TensorTensorTensor_T convolution_backward_symint_outf(@Const @ByRef Tensor grad_output, @Const @ByRef Tensor input, @Const @ByRef Tensor weight, @ByVal SymIntArrayRefOptional bias_sizes, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] stride, @ByVal SymIntArrayRef padding, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dilation, @Cast("bool") boolean transposed, @ByVal SymIntArrayRef output_padding, @Cast("int64_t") long groups, @ByVal @Cast("std::array*") BoolPointer output_mask, @ByRef Tensor out0, @ByRef Tensor out1, @ByRef Tensor out2); + + + + + +// Parsed from ATen/ops/convolution_backward_overrideable.h // #pragma once @@ -38174,24 +23664,24 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include - +// #include -// aten::ceil(Tensor self) -> Tensor -@Namespace("at") public static native @ByVal Tensor ceil(@Const @ByRef Tensor self); -// aten::ceil_(Tensor(a!) self) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor ceil_(@ByRef Tensor self); +// aten::convolution_backward_overrideable(Tensor grad_output, Tensor input, Tensor weight, int[] stride, int[] padding, int[] dilation, bool transposed, int[] output_padding, int groups, bool[3] output_mask) -> (Tensor grad_input, Tensor grad_weight, Tensor grad_bias) +@Namespace("at") public static native @ByVal T_TensorTensorTensor_T convolution_backward_overrideable(@Const @ByRef Tensor grad_output, @Const @ByRef Tensor input, @Const @ByRef Tensor weight, @ByVal LongArrayRef stride, @ByVal LongArrayRef padding, @ByVal LongArrayRef dilation, @Cast("bool") boolean transposed, @ByVal LongArrayRef output_padding, @Cast("int64_t") long groups, @ByVal @Cast("std::array*") BoolPointer output_mask); +@Namespace("at") public static native @ByVal T_TensorTensorTensor_T convolution_backward_overrideable(@Const @ByRef Tensor grad_output, @Const @ByRef Tensor input, @Const @ByRef Tensor weight, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] stride, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] padding, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dilation, @Cast("bool") boolean transposed, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] output_padding, @Cast("int64_t") long groups, @ByVal @Cast("std::array*") BoolPointer output_mask); -// aten::ceil.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor ceil_out(@ByRef Tensor out, @Const @ByRef Tensor self); -// aten::ceil.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor ceil_outf(@Const @ByRef Tensor self, @ByRef Tensor out); +// aten::convolution_backward_overrideable.out(Tensor grad_output, Tensor input, Tensor weight, int[] stride, int[] padding, int[] dilation, bool transposed, int[] output_padding, int groups, bool[3] output_mask, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2) -> (Tensor(a!), Tensor(b!), Tensor(c!)) +@Namespace("at") public static native @ByVal T_TensorTensorTensor_T convolution_backward_overrideable_out(@ByRef Tensor out0, @ByRef Tensor out1, @ByRef Tensor out2, @Const @ByRef Tensor grad_output, @Const @ByRef Tensor input, @Const @ByRef Tensor weight, @ByVal LongArrayRef stride, @ByVal LongArrayRef padding, @ByVal LongArrayRef dilation, @Cast("bool") boolean transposed, @ByVal LongArrayRef output_padding, @Cast("int64_t") long groups, @ByVal @Cast("std::array*") BoolPointer output_mask); +@Namespace("at") public static native @ByVal T_TensorTensorTensor_T convolution_backward_overrideable_out(@ByRef Tensor out0, @ByRef Tensor out1, @ByRef Tensor out2, @Const @ByRef Tensor grad_output, @Const @ByRef Tensor input, @Const @ByRef Tensor weight, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] stride, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] padding, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dilation, @Cast("bool") boolean transposed, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] output_padding, @Cast("int64_t") long groups, @ByVal @Cast("std::array*") BoolPointer output_mask); +// aten::convolution_backward_overrideable.out(Tensor grad_output, Tensor input, Tensor weight, int[] stride, int[] padding, int[] dilation, bool transposed, int[] output_padding, int groups, bool[3] output_mask, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2) -> (Tensor(a!), Tensor(b!), Tensor(c!)) +@Namespace("at") public static native @ByVal T_TensorTensorTensor_T convolution_backward_overrideable_outf(@Const @ByRef Tensor grad_output, @Const @ByRef Tensor input, @Const @ByRef Tensor weight, @ByVal LongArrayRef stride, @ByVal LongArrayRef padding, @ByVal LongArrayRef dilation, @Cast("bool") boolean transposed, @ByVal LongArrayRef output_padding, @Cast("int64_t") long groups, @ByVal @Cast("std::array*") BoolPointer output_mask, @ByRef Tensor out0, @ByRef Tensor out1, @ByRef Tensor out2); +@Namespace("at") public static native @ByVal T_TensorTensorTensor_T convolution_backward_overrideable_outf(@Const @ByRef Tensor grad_output, @Const @ByRef Tensor input, @Const @ByRef Tensor weight, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] stride, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] padding, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dilation, @Cast("bool") boolean transposed, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] output_padding, @Cast("int64_t") long groups, @ByVal @Cast("std::array*") BoolPointer output_mask, @ByRef Tensor out0, @ByRef Tensor out1, @ByRef Tensor out2); -// Parsed from ATen/ops/celu.h +// Parsed from ATen/ops/convolution_overrideable.h // #pragma once @@ -38212,27 +23702,24 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include - +// #include -// aten::celu(Tensor self, Scalar alpha=1.0) -> Tensor -@Namespace("at") public static native @ByVal Tensor celu(@Const @ByRef Tensor self, @Const @ByRef(nullValue = "at::Scalar(1.0)") Scalar alpha); -@Namespace("at") public static native @ByVal Tensor celu(@Const @ByRef Tensor self); -// aten::celu_(Tensor(a!) self, Scalar alpha=1.0) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor celu_(@ByRef Tensor self, @Const @ByRef(nullValue = "at::Scalar(1.0)") Scalar alpha); -@Namespace("at") public static native @ByRef Tensor celu_(@ByRef Tensor self); +// aten::convolution_overrideable(Tensor input, Tensor weight, Tensor? bias, int[] stride, int[] padding, int[] dilation, bool transposed, int[] output_padding, int groups) -> Tensor +@Namespace("at") public static native @ByVal Tensor convolution_overrideable(@Const @ByRef Tensor input, @Const @ByRef Tensor weight, @Const @ByRef TensorOptional bias, @ByVal LongArrayRef stride, @ByVal LongArrayRef padding, @ByVal LongArrayRef dilation, @Cast("bool") boolean transposed, @ByVal LongArrayRef output_padding, @Cast("int64_t") long groups); +@Namespace("at") public static native @ByVal Tensor convolution_overrideable(@Const @ByRef Tensor input, @Const @ByRef Tensor weight, @Const @ByRef TensorOptional bias, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] stride, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] padding, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dilation, @Cast("bool") boolean transposed, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] output_padding, @Cast("int64_t") long groups); -// aten::celu.out(Tensor self, Scalar alpha=1.0, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor celu_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef(nullValue = "at::Scalar(1.0)") Scalar alpha); -@Namespace("at") public static native @ByRef Tensor celu_out(@ByRef Tensor out, @Const @ByRef Tensor self); -// aten::celu.out(Tensor self, Scalar alpha=1.0, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor celu_outf(@Const @ByRef Tensor self, @Const @ByRef Scalar alpha, @ByRef Tensor out); +// aten::convolution_overrideable.out(Tensor input, Tensor weight, Tensor? bias, int[] stride, int[] padding, int[] dilation, bool transposed, int[] output_padding, int groups, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor convolution_overrideable_out(@ByRef Tensor out, @Const @ByRef Tensor input, @Const @ByRef Tensor weight, @Const @ByRef TensorOptional bias, @ByVal LongArrayRef stride, @ByVal LongArrayRef padding, @ByVal LongArrayRef dilation, @Cast("bool") boolean transposed, @ByVal LongArrayRef output_padding, @Cast("int64_t") long groups); +@Namespace("at") public static native @ByRef Tensor convolution_overrideable_out(@ByRef Tensor out, @Const @ByRef Tensor input, @Const @ByRef Tensor weight, @Const @ByRef TensorOptional bias, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] stride, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] padding, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dilation, @Cast("bool") boolean transposed, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] output_padding, @Cast("int64_t") long groups); +// aten::convolution_overrideable.out(Tensor input, Tensor weight, Tensor? bias, int[] stride, int[] padding, int[] dilation, bool transposed, int[] output_padding, int groups, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor convolution_overrideable_outf(@Const @ByRef Tensor input, @Const @ByRef Tensor weight, @Const @ByRef TensorOptional bias, @ByVal LongArrayRef stride, @ByVal LongArrayRef padding, @ByVal LongArrayRef dilation, @Cast("bool") boolean transposed, @ByVal LongArrayRef output_padding, @Cast("int64_t") long groups, @ByRef Tensor out); +@Namespace("at") public static native @ByRef Tensor convolution_overrideable_outf(@Const @ByRef Tensor input, @Const @ByRef Tensor weight, @Const @ByRef TensorOptional bias, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] stride, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] padding, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dilation, @Cast("bool") boolean transposed, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] output_padding, @Cast("int64_t") long groups, @ByRef Tensor out); -// Parsed from ATen/ops/chain_matmul.h +// Parsed from ATen/ops/copy.h // #pragma once @@ -38253,21 +23740,23 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include +// #include -// aten::chain_matmul(Tensor[] matrices) -> Tensor -@Namespace("at") public static native @ByVal Tensor chain_matmul(@ByVal TensorArrayRef matrices); +// aten::copy(Tensor self, Tensor src, bool non_blocking=False) -> Tensor +@Namespace("at") public static native @ByVal Tensor copy(@Const @ByRef Tensor self, @Const @ByRef Tensor src, @Cast("bool") boolean non_blocking/*=false*/); +@Namespace("at") public static native @ByVal Tensor copy(@Const @ByRef Tensor self, @Const @ByRef Tensor src); -// aten::chain_matmul.out(Tensor[] matrices, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor chain_matmul_out(@ByRef Tensor out, @ByVal TensorArrayRef matrices); -// aten::chain_matmul.out(Tensor[] matrices, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor chain_matmul_outf(@ByVal TensorArrayRef matrices, @ByRef Tensor out); +// aten::copy.out(Tensor self, Tensor src, bool non_blocking=False, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor copy_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor src, @Cast("bool") boolean non_blocking/*=false*/); +@Namespace("at") public static native @ByRef Tensor copy_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor src); +// aten::copy.out(Tensor self, Tensor src, bool non_blocking=False, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor copy_outf(@Const @ByRef Tensor self, @Const @ByRef Tensor src, @Cast("bool") boolean non_blocking, @ByRef Tensor out); -// Parsed from ATen/ops/chalf.h +// Parsed from ATen/ops/copy_sparse_to_sparse.h // #pragma once @@ -38288,14 +23777,27 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include +// #include +// aten::copy_sparse_to_sparse_(Tensor(a!) self, Tensor src, bool non_blocking=False) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor copy_sparse_to_sparse_(@ByRef Tensor self, @Const @ByRef Tensor src, @Cast("bool") boolean non_blocking/*=false*/); +@Namespace("at") public static native @ByRef Tensor copy_sparse_to_sparse_(@ByRef Tensor self, @Const @ByRef Tensor src); + +// aten::copy_sparse_to_sparse.out(Tensor self, Tensor src, bool non_blocking=False, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor copy_sparse_to_sparse_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor src, @Cast("bool") boolean non_blocking/*=false*/); +@Namespace("at") public static native @ByRef Tensor copy_sparse_to_sparse_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor src); +// aten::copy_sparse_to_sparse.out(Tensor self, Tensor src, bool non_blocking=False, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor copy_sparse_to_sparse_outf(@Const @ByRef Tensor self, @Const @ByRef Tensor src, @Cast("bool") boolean non_blocking, @ByRef Tensor out); + +// aten::copy_sparse_to_sparse(Tensor self, Tensor src, bool non_blocking=False) -> Tensor +@Namespace("at") public static native @ByVal Tensor copy_sparse_to_sparse(@Const @ByRef Tensor self, @Const @ByRef Tensor src, @Cast("bool") boolean non_blocking/*=false*/); +@Namespace("at") public static native @ByVal Tensor copy_sparse_to_sparse(@Const @ByRef Tensor self, @Const @ByRef Tensor src); -// Parsed from ATen/ops/channel_shuffle.h +// Parsed from ATen/ops/copysign.h // #pragma once @@ -38316,21 +23818,29 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include +// #include -// aten::channel_shuffle(Tensor self, int groups) -> Tensor -@Namespace("at") public static native @ByVal Tensor channel_shuffle(@Const @ByRef Tensor self, @Cast("int64_t") long groups); +// aten::copysign.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor copysign_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor other); +// aten::copysign.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor copysign_outf(@Const @ByRef Tensor self, @Const @ByRef Tensor other, @ByRef Tensor out); -// aten::channel_shuffle.out(Tensor self, int groups, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor channel_shuffle_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Cast("int64_t") long groups); -// aten::channel_shuffle.out(Tensor self, int groups, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor channel_shuffle_outf(@Const @ByRef Tensor self, @Cast("int64_t") long groups, @ByRef Tensor out); +// aten::copysign.Tensor(Tensor self, Tensor other) -> Tensor +@Namespace("at") public static native @ByVal Tensor copysign(@Const @ByRef Tensor self, @Const @ByRef Tensor other); + +// aten::copysign.Scalar(Tensor self, Scalar other) -> Tensor +@Namespace("at") public static native @ByVal Tensor copysign(@Const @ByRef Tensor self, @Const @ByRef Scalar other); +// aten::copysign.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor copysign_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Scalar other); +// aten::copysign.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor copysign_outf(@Const @ByRef Tensor self, @Const @ByRef Scalar other, @ByRef Tensor out); -// Parsed from ATen/ops/cholesky.h + +// Parsed from ATen/ops/corrcoef.h // #pragma once @@ -38351,23 +23861,16 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include - +// #include -// aten::cholesky.out(Tensor self, bool upper=False, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor cholesky_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Cast("bool") boolean upper/*=false*/); -@Namespace("at") public static native @ByRef Tensor cholesky_out(@ByRef Tensor out, @Const @ByRef Tensor self); -// aten::cholesky.out(Tensor self, bool upper=False, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor cholesky_outf(@Const @ByRef Tensor self, @Cast("bool") boolean upper, @ByRef Tensor out); -// aten::cholesky(Tensor self, bool upper=False) -> Tensor -@Namespace("at") public static native @ByVal Tensor cholesky(@Const @ByRef Tensor self, @Cast("bool") boolean upper/*=false*/); -@Namespace("at") public static native @ByVal Tensor cholesky(@Const @ByRef Tensor self); +// aten::corrcoef(Tensor self) -> Tensor +@Namespace("at") public static native @ByVal Tensor corrcoef(@Const @ByRef Tensor self); -// Parsed from ATen/ops/cholesky_inverse.h +// Parsed from ATen/ops/cos.h // #pragma once @@ -38388,23 +23891,24 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include +// #include -// aten::cholesky_inverse(Tensor self, bool upper=False) -> Tensor -@Namespace("at") public static native @ByVal Tensor cholesky_inverse(@Const @ByRef Tensor self, @Cast("bool") boolean upper/*=false*/); -@Namespace("at") public static native @ByVal Tensor cholesky_inverse(@Const @ByRef Tensor self); +// aten::cos(Tensor self) -> Tensor +@Namespace("at") public static native @ByVal Tensor cos(@Const @ByRef Tensor self); -// aten::cholesky_inverse.out(Tensor self, bool upper=False, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor cholesky_inverse_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Cast("bool") boolean upper/*=false*/); -@Namespace("at") public static native @ByRef Tensor cholesky_inverse_out(@ByRef Tensor out, @Const @ByRef Tensor self); -// aten::cholesky_inverse.out(Tensor self, bool upper=False, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor cholesky_inverse_outf(@Const @ByRef Tensor self, @Cast("bool") boolean upper, @ByRef Tensor out); +// aten::cos_(Tensor(a!) self) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor cos_(@ByRef Tensor self); + +// aten::cos.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor cos_out(@ByRef Tensor out, @Const @ByRef Tensor self); +// aten::cos.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor cos_outf(@Const @ByRef Tensor self, @ByRef Tensor out); -// Parsed from ATen/ops/cholesky_solve.h +// Parsed from ATen/ops/cosh.h // #pragma once @@ -38425,23 +23929,24 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include +// #include -// aten::cholesky_solve.out(Tensor self, Tensor input2, bool upper=False, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor cholesky_solve_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor input2, @Cast("bool") boolean upper/*=false*/); -@Namespace("at") public static native @ByRef Tensor cholesky_solve_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor input2); -// aten::cholesky_solve.out(Tensor self, Tensor input2, bool upper=False, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor cholesky_solve_outf(@Const @ByRef Tensor self, @Const @ByRef Tensor input2, @Cast("bool") boolean upper, @ByRef Tensor out); +// aten::cosh(Tensor self) -> Tensor +@Namespace("at") public static native @ByVal Tensor cosh(@Const @ByRef Tensor self); -// aten::cholesky_solve(Tensor self, Tensor input2, bool upper=False) -> Tensor -@Namespace("at") public static native @ByVal Tensor cholesky_solve(@Const @ByRef Tensor self, @Const @ByRef Tensor input2, @Cast("bool") boolean upper/*=false*/); -@Namespace("at") public static native @ByVal Tensor cholesky_solve(@Const @ByRef Tensor self, @Const @ByRef Tensor input2); +// aten::cosh_(Tensor(a!) self) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor cosh_(@ByRef Tensor self); + +// aten::cosh.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor cosh_out(@ByRef Tensor out, @Const @ByRef Tensor self); +// aten::cosh.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor cosh_outf(@Const @ByRef Tensor self, @ByRef Tensor out); -// Parsed from ATen/ops/choose_qparams_optimized.h +// Parsed from ATen/ops/cosine_embedding_loss.h // #pragma once @@ -38462,16 +23967,17 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include +// #include -// aten::choose_qparams_optimized(Tensor input, int numel, int n_bins, float ratio, int bit_width) -> (Tensor, Tensor) -@Namespace("at") public static native @ByVal TensorTensorTuple choose_qparams_optimized(@Const @ByRef Tensor input, @Cast("int64_t") long numel, @Cast("int64_t") long n_bins, double ratio, @Cast("int64_t") long bit_width); +// aten::cosine_embedding_loss(Tensor input1, Tensor input2, Tensor target, float margin=0.0, int reduction=Mean) -> Tensor +@Namespace("at") public static native @ByVal Tensor cosine_embedding_loss(@Const @ByRef Tensor input1, @Const @ByRef Tensor input2, @Const @ByRef Tensor target, double margin/*=0.0*/, @Cast("int64_t") long reduction/*=at::Reduction::Mean*/); +@Namespace("at") public static native @ByVal Tensor cosine_embedding_loss(@Const @ByRef Tensor input1, @Const @ByRef Tensor input2, @Const @ByRef Tensor target); -// Parsed from ATen/ops/chunk.h +// Parsed from ATen/ops/cosine_similarity.h // #pragma once @@ -38492,17 +23998,17 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include +// #include -// aten::chunk(Tensor(a -> *) self, int chunks, int dim=0) -> Tensor(a)[] -@Namespace("at") public static native @Cast({"", "std::vector"}) @StdMove TensorVector chunk(@Const @ByRef Tensor self, @Cast("int64_t") long chunks, @Cast("int64_t") long dim/*=0*/); -@Namespace("at") public static native @Cast({"", "std::vector"}) @StdMove TensorVector chunk(@Const @ByRef Tensor self, @Cast("int64_t") long chunks); +// aten::cosine_similarity(Tensor x1, Tensor x2, int dim=1, float eps=1e-08) -> Tensor +@Namespace("at") public static native @ByVal Tensor cosine_similarity(@Const @ByRef Tensor x1, @Const @ByRef Tensor x2, @Cast("int64_t") long dim/*=1*/, double eps/*=1e-08*/); +@Namespace("at") public static native @ByVal Tensor cosine_similarity(@Const @ByRef Tensor x1, @Const @ByRef Tensor x2); -// Parsed from ATen/ops/clamp.h +// Parsed from ATen/ops/count_nonzero.h // #pragma once @@ -38523,41 +24029,34 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include - - -// aten::clamp(Tensor self, Scalar? min=None, Scalar? max=None) -> Tensor -@Namespace("at") public static native @ByVal Tensor clamp(@Const @ByRef Tensor self, @Const @ByRef ScalarOptional min, @Const @ByRef(nullValue = "c10::optional(c10::nullopt)") ScalarOptional max); -@Namespace("at") public static native @ByVal Tensor clamp(@Const @ByRef Tensor self, @Const @ByRef ScalarOptional min); +// #include -// aten::clamp.Tensor(Tensor self, Tensor? min=None, Tensor? max=None) -> Tensor -@Namespace("at") public static native @ByVal Tensor clamp(@Const @ByRef Tensor self, @Const @ByRef(nullValue = "c10::optional{}") TensorOptional min, @Const @ByRef(nullValue = "c10::optional{}") TensorOptional max); -@Namespace("at") public static native @ByVal Tensor clamp(@Const @ByRef Tensor self); -// aten::clamp_(Tensor(a!) self, Scalar? min=None, Scalar? max=None) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor clamp_(@ByRef Tensor self, @Const @ByRef ScalarOptional min, @Const @ByRef(nullValue = "c10::optional(c10::nullopt)") ScalarOptional max); -@Namespace("at") public static native @ByRef Tensor clamp_(@ByRef Tensor self, @Const @ByRef ScalarOptional min); +// aten::count_nonzero.dim_IntList(Tensor self, int[] dim) -> Tensor +@Namespace("at") public static native @ByVal Tensor count_nonzero(@Const @ByRef Tensor self, @ByVal LongArrayRef dim); +@Namespace("at") public static native @ByVal Tensor count_nonzero(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... dim); -// aten::clamp_.Tensor(Tensor(a!) self, Tensor? min=None, Tensor? max=None) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor clamp_(@ByRef Tensor self, @Const @ByRef(nullValue = "c10::optional{}") TensorOptional min, @Const @ByRef(nullValue = "c10::optional{}") TensorOptional max); -@Namespace("at") public static native @ByRef Tensor clamp_(@ByRef Tensor self); +// aten::count_nonzero(Tensor self, int? dim=None) -> Tensor +@Namespace("at") public static native @ByVal Tensor count_nonzero(@Const @ByRef Tensor self, @ByVal(nullValue = "c10::optional(c10::nullopt)") LongOptional dim); +@Namespace("at") public static native @ByVal Tensor count_nonzero(@Const @ByRef Tensor self); -// aten::clamp.out(Tensor self, Scalar? min=None, Scalar? max=None, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor clamp_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef ScalarOptional min, @Const @ByRef(nullValue = "c10::optional(c10::nullopt)") ScalarOptional max); -@Namespace("at") public static native @ByRef Tensor clamp_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef ScalarOptional min); -// aten::clamp.out(Tensor self, Scalar? min=None, Scalar? max=None, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor clamp_outf(@Const @ByRef Tensor self, @Const @ByRef ScalarOptional min, @Const @ByRef ScalarOptional max, @ByRef Tensor out); +// aten::count_nonzero.dim_IntList_out(Tensor self, int[] dim, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor count_nonzero_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal LongArrayRef dim); +@Namespace("at") public static native @ByRef Tensor count_nonzero_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... dim); +// aten::count_nonzero.dim_IntList_out(Tensor self, int[] dim, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor count_nonzero_outf(@Const @ByRef Tensor self, @ByVal LongArrayRef dim, @ByRef Tensor out); +@Namespace("at") public static native @ByRef Tensor count_nonzero_outf(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dim, @ByRef Tensor out); -// aten::clamp.Tensor_out(Tensor self, Tensor? min=None, Tensor? max=None, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor clamp_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef(nullValue = "c10::optional{}") TensorOptional min, @Const @ByRef(nullValue = "c10::optional{}") TensorOptional max); -@Namespace("at") public static native @ByRef Tensor clamp_out(@ByRef Tensor out, @Const @ByRef Tensor self); -// aten::clamp.Tensor_out(Tensor self, Tensor? min=None, Tensor? max=None, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor clamp_outf(@Const @ByRef Tensor self, @Const @ByRef TensorOptional min, @Const @ByRef TensorOptional max, @ByRef Tensor out); +// aten::count_nonzero.out(Tensor self, int? dim=None, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor count_nonzero_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal(nullValue = "c10::optional(c10::nullopt)") LongOptional dim); +@Namespace("at") public static native @ByRef Tensor count_nonzero_out(@ByRef Tensor out, @Const @ByRef Tensor self); +// aten::count_nonzero.out(Tensor self, int? dim=None, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor count_nonzero_outf(@Const @ByRef Tensor self, @ByVal LongOptional dim, @ByRef Tensor out); -// Parsed from ATen/ops/clamp_max.h +// Parsed from ATen/ops/cov.h // #pragma once @@ -38578,35 +24077,17 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include - - -// aten::clamp_max(Tensor self, Scalar max) -> Tensor -@Namespace("at") public static native @ByVal Tensor clamp_max(@Const @ByRef Tensor self, @Const @ByRef Scalar max); - -// aten::clamp_max.Tensor(Tensor self, Tensor max) -> Tensor -@Namespace("at") public static native @ByVal Tensor clamp_max(@Const @ByRef Tensor self, @Const @ByRef Tensor max); - -// aten::clamp_max_(Tensor(a!) self, Scalar max) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor clamp_max_(@ByRef Tensor self, @Const @ByRef Scalar max); - -// aten::clamp_max_.Tensor(Tensor(a!) self, Tensor max) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor clamp_max_(@ByRef Tensor self, @Const @ByRef Tensor max); +// #include -// aten::clamp_max.out(Tensor self, Scalar max, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor clamp_max_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Scalar max); -// aten::clamp_max.out(Tensor self, Scalar max, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor clamp_max_outf(@Const @ByRef Tensor self, @Const @ByRef Scalar max, @ByRef Tensor out); -// aten::clamp_max.Tensor_out(Tensor self, Tensor max, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor clamp_max_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor max); -// aten::clamp_max.Tensor_out(Tensor self, Tensor max, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor clamp_max_outf(@Const @ByRef Tensor self, @Const @ByRef Tensor max, @ByRef Tensor out); +// aten::cov(Tensor self, *, int correction=1, Tensor? fweights=None, Tensor? aweights=None) -> Tensor +@Namespace("at") public static native @ByVal Tensor cov(@Const @ByRef Tensor self, @Cast("int64_t") long correction/*=1*/, @Const @ByRef(nullValue = "c10::optional{}") TensorOptional fweights, @Const @ByRef(nullValue = "c10::optional{}") TensorOptional aweights); +@Namespace("at") public static native @ByVal Tensor cov(@Const @ByRef Tensor self); -// Parsed from ATen/ops/clamp_min.h +// Parsed from ATen/ops/cross.h // #pragma once @@ -38627,35 +24108,23 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include - - -// aten::clamp_min(Tensor self, Scalar min) -> Tensor -@Namespace("at") public static native @ByVal Tensor clamp_min(@Const @ByRef Tensor self, @Const @ByRef Scalar min); - -// aten::clamp_min.Tensor(Tensor self, Tensor min) -> Tensor -@Namespace("at") public static native @ByVal Tensor clamp_min(@Const @ByRef Tensor self, @Const @ByRef Tensor min); - -// aten::clamp_min_(Tensor(a!) self, Scalar min) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor clamp_min_(@ByRef Tensor self, @Const @ByRef Scalar min); +// #include -// aten::clamp_min_.Tensor(Tensor(a!) self, Tensor min) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor clamp_min_(@ByRef Tensor self, @Const @ByRef Tensor min); -// aten::clamp_min.out(Tensor self, Scalar min, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor clamp_min_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Scalar min); -// aten::clamp_min.out(Tensor self, Scalar min, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor clamp_min_outf(@Const @ByRef Tensor self, @Const @ByRef Scalar min, @ByRef Tensor out); +// aten::cross.out(Tensor self, Tensor other, int? dim=None, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor cross_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor other, @ByVal(nullValue = "c10::optional(c10::nullopt)") LongOptional dim); +@Namespace("at") public static native @ByRef Tensor cross_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor other); +// aten::cross.out(Tensor self, Tensor other, int? dim=None, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor cross_outf(@Const @ByRef Tensor self, @Const @ByRef Tensor other, @ByVal LongOptional dim, @ByRef Tensor out); -// aten::clamp_min.Tensor_out(Tensor self, Tensor min, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor clamp_min_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor min); -// aten::clamp_min.Tensor_out(Tensor self, Tensor min, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor clamp_min_outf(@Const @ByRef Tensor self, @Const @ByRef Tensor min, @ByRef Tensor out); +// aten::cross(Tensor self, Tensor other, int? dim=None) -> Tensor +@Namespace("at") public static native @ByVal Tensor cross(@Const @ByRef Tensor self, @Const @ByRef Tensor other, @ByVal(nullValue = "c10::optional(c10::nullopt)") LongOptional dim); +@Namespace("at") public static native @ByVal Tensor cross(@Const @ByRef Tensor self, @Const @ByRef Tensor other); -// Parsed from ATen/ops/clip.h +// Parsed from ATen/ops/cross_entropy_loss.h // #pragma once @@ -38676,41 +24145,23 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include - - -// aten::clip(Tensor self, Scalar? min=None, Scalar? max=None) -> Tensor -@Namespace("at") public static native @ByVal Tensor clip(@Const @ByRef Tensor self, @Const @ByRef ScalarOptional min, @Const @ByRef(nullValue = "c10::optional(c10::nullopt)") ScalarOptional max); -@Namespace("at") public static native @ByVal Tensor clip(@Const @ByRef Tensor self, @Const @ByRef ScalarOptional min); +// #include -// aten::clip.Tensor(Tensor self, Tensor? min=None, Tensor? max=None) -> Tensor -@Namespace("at") public static native @ByVal Tensor clip(@Const @ByRef Tensor self, @Const @ByRef(nullValue = "c10::optional{}") TensorOptional min, @Const @ByRef(nullValue = "c10::optional{}") TensorOptional max); -@Namespace("at") public static native @ByVal Tensor clip(@Const @ByRef Tensor self); -// aten::clip_(Tensor(a!) self, Scalar? min=None, Scalar? max=None) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor clip_(@ByRef Tensor self, @Const @ByRef ScalarOptional min, @Const @ByRef(nullValue = "c10::optional(c10::nullopt)") ScalarOptional max); -@Namespace("at") public static native @ByRef Tensor clip_(@ByRef Tensor self, @Const @ByRef ScalarOptional min); +// aten::cross_entropy_loss(Tensor self, Tensor target, Tensor? weight=None, int reduction=Mean, SymInt ignore_index=-100, float label_smoothing=0.0) -> Tensor +@Namespace("at") public static native @ByVal Tensor cross_entropy_loss(@Const @ByRef Tensor self, @Const @ByRef Tensor target, @Const @ByRef(nullValue = "c10::optional{}") TensorOptional weight, @Cast("int64_t") long reduction/*=at::Reduction::Mean*/, @Cast("int64_t") long ignore_index/*=-100*/, double label_smoothing/*=0.0*/); +@Namespace("at") public static native @ByVal Tensor cross_entropy_loss(@Const @ByRef Tensor self, @Const @ByRef Tensor target); -// aten::clip_.Tensor(Tensor(a!) self, Tensor? min=None, Tensor? max=None) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor clip_(@ByRef Tensor self, @Const @ByRef(nullValue = "c10::optional{}") TensorOptional min, @Const @ByRef(nullValue = "c10::optional{}") TensorOptional max); -@Namespace("at") public static native @ByRef Tensor clip_(@ByRef Tensor self); -// aten::clip.out(Tensor self, Scalar? min=None, Scalar? max=None, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor clip_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef ScalarOptional min, @Const @ByRef(nullValue = "c10::optional(c10::nullopt)") ScalarOptional max); -@Namespace("at") public static native @ByRef Tensor clip_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef ScalarOptional min); -// aten::clip.out(Tensor self, Scalar? min=None, Scalar? max=None, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor clip_outf(@Const @ByRef Tensor self, @Const @ByRef ScalarOptional min, @Const @ByRef ScalarOptional max, @ByRef Tensor out); +// aten::cross_entropy_loss(Tensor self, Tensor target, Tensor? weight=None, int reduction=Mean, SymInt ignore_index=-100, float label_smoothing=0.0) -> Tensor +@Namespace("at") public static native @ByVal Tensor cross_entropy_loss_symint(@Const @ByRef Tensor self, @Const @ByRef Tensor target, @Const @ByRef(nullValue = "c10::optional{}") TensorOptional weight, @Cast("int64_t") long reduction/*=at::Reduction::Mean*/, @ByVal(nullValue = "c10::SymInt(-100)") SymInt ignore_index, double label_smoothing/*=0.0*/); +@Namespace("at") public static native @ByVal Tensor cross_entropy_loss_symint(@Const @ByRef Tensor self, @Const @ByRef Tensor target); -// aten::clip.Tensor_out(Tensor self, Tensor? min=None, Tensor? max=None, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor clip_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef(nullValue = "c10::optional{}") TensorOptional min, @Const @ByRef(nullValue = "c10::optional{}") TensorOptional max); -@Namespace("at") public static native @ByRef Tensor clip_out(@ByRef Tensor out, @Const @ByRef Tensor self); -// aten::clip.Tensor_out(Tensor self, Tensor? min=None, Tensor? max=None, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor clip_outf(@Const @ByRef Tensor self, @Const @ByRef TensorOptional min, @Const @ByRef TensorOptional max, @ByRef Tensor out); -// Parsed from ATen/ops/clone.h +// Parsed from ATen/ops/crow_indices.h // #pragma once @@ -38731,23 +24182,14 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include - +// #include -// aten::clone(Tensor self, *, MemoryFormat? memory_format=None) -> Tensor -@Namespace("at") public static native @ByVal Tensor clone(@Const @ByRef Tensor self, @ByVal(nullValue = "c10::optional(c10::nullopt)") MemoryFormatOptional memory_format); -@Namespace("at") public static native @ByVal Tensor clone(@Const @ByRef Tensor self); -// aten::clone.out(Tensor self, *, MemoryFormat? memory_format=None, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor clone_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal(nullValue = "c10::optional(c10::nullopt)") MemoryFormatOptional memory_format); -@Namespace("at") public static native @ByRef Tensor clone_out(@ByRef Tensor out, @Const @ByRef Tensor self); -// aten::clone.out(Tensor self, *, MemoryFormat? memory_format=None, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor clone_outf(@Const @ByRef Tensor self, @ByVal MemoryFormatOptional memory_format, @ByRef Tensor out); -// Parsed from ATen/ops/coalesce.h +// Parsed from ATen/ops/crow_indices_copy.h // #pragma once @@ -38768,14 +24210,21 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include +// #include + +// aten::crow_indices_copy(Tensor self) -> Tensor +@Namespace("at") public static native @ByVal Tensor crow_indices_copy(@Const @ByRef Tensor self); +// aten::crow_indices_copy.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor crow_indices_copy_out(@ByRef Tensor out, @Const @ByRef Tensor self); +// aten::crow_indices_copy.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor crow_indices_copy_outf(@Const @ByRef Tensor self, @ByRef Tensor out); -// Parsed from ATen/ops/col2im.h +// Parsed from ATen/ops/ctc_loss.h // #pragma once @@ -38796,43 +24245,58 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include +// #include -// aten::col2im.out(Tensor self, SymInt[2] output_size, int[2] kernel_size, int[2] dilation, int[2] padding, int[2] stride, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor col2im_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal @Cast("c10::ArrayRef*") LongArrayRef output_size, @ByVal @Cast("c10::ArrayRef*") LongArrayRef kernel_size, @ByVal @Cast("c10::ArrayRef*") LongArrayRef dilation, @ByVal @Cast("c10::ArrayRef*") LongArrayRef padding, @ByVal @Cast("c10::ArrayRef*") LongArrayRef stride); -@Namespace("at") public static native @ByRef Tensor col2im_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] output_size, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] kernel_size, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dilation, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] padding, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... stride); +// aten::ctc_loss.IntList(Tensor log_probs, Tensor targets, int[] input_lengths, int[] target_lengths, int blank=0, int reduction=Mean, bool zero_infinity=False) -> Tensor +@Namespace("at") public static native @ByVal Tensor ctc_loss(@Const @ByRef Tensor log_probs, @Const @ByRef Tensor targets, @ByVal LongArrayRef input_lengths, @ByVal LongArrayRef target_lengths, @Cast("int64_t") long blank/*=0*/, @Cast("int64_t") long reduction/*=at::Reduction::Mean*/, @Cast("bool") boolean zero_infinity/*=false*/); +@Namespace("at") public static native @ByVal Tensor ctc_loss(@Const @ByRef Tensor log_probs, @Const @ByRef Tensor targets, @ByVal LongArrayRef input_lengths, @ByVal LongArrayRef target_lengths); +@Namespace("at") public static native @ByVal Tensor ctc_loss(@Const @ByRef Tensor log_probs, @Const @ByRef Tensor targets, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] input_lengths, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] target_lengths, @Cast("int64_t") long blank/*=0*/, @Cast("int64_t") long reduction/*=at::Reduction::Mean*/, @Cast("bool") boolean zero_infinity/*=false*/); +@Namespace("at") public static native @ByVal Tensor ctc_loss(@Const @ByRef Tensor log_probs, @Const @ByRef Tensor targets, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] input_lengths, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... target_lengths); +// aten::ctc_loss.Tensor(Tensor log_probs, Tensor targets, Tensor input_lengths, Tensor target_lengths, int blank=0, int reduction=Mean, bool zero_infinity=False) -> Tensor +@Namespace("at") public static native @ByVal Tensor ctc_loss(@Const @ByRef Tensor log_probs, @Const @ByRef Tensor targets, @Const @ByRef Tensor input_lengths, @Const @ByRef Tensor target_lengths, @Cast("int64_t") long blank/*=0*/, @Cast("int64_t") long reduction/*=at::Reduction::Mean*/, @Cast("bool") boolean zero_infinity/*=false*/); +@Namespace("at") public static native @ByVal Tensor ctc_loss(@Const @ByRef Tensor log_probs, @Const @ByRef Tensor targets, @Const @ByRef Tensor input_lengths, @Const @ByRef Tensor target_lengths); -// aten::col2im.out(Tensor self, SymInt[2] output_size, int[2] kernel_size, int[2] dilation, int[2] padding, int[2] stride, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor col2im_outf(@Const @ByRef Tensor self, @ByVal @Cast("c10::ArrayRef*") LongArrayRef output_size, @ByVal @Cast("c10::ArrayRef*") LongArrayRef kernel_size, @ByVal @Cast("c10::ArrayRef*") LongArrayRef dilation, @ByVal @Cast("c10::ArrayRef*") LongArrayRef padding, @ByVal @Cast("c10::ArrayRef*") LongArrayRef stride, @ByRef Tensor out); -@Namespace("at") public static native @ByRef Tensor col2im_outf(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] output_size, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] kernel_size, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dilation, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] padding, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] stride, @ByRef Tensor out); -// aten::col2im.out(Tensor self, SymInt[2] output_size, int[2] kernel_size, int[2] dilation, int[2] padding, int[2] stride, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor col2im_symint_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal SymIntRef output_size, @ByVal @Cast("c10::ArrayRef*") LongArrayRef kernel_size, @ByVal @Cast("c10::ArrayRef*") LongArrayRef dilation, @ByVal @Cast("c10::ArrayRef*") LongArrayRef padding, @ByVal @Cast("c10::ArrayRef*") LongArrayRef stride); -@Namespace("at") public static native @ByRef Tensor col2im_symint_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal SymIntRef output_size, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] kernel_size, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dilation, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] padding, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... stride); +// Parsed from ATen/ops/cudnn_affine_grid_generator.h -// aten::col2im.out(Tensor self, SymInt[2] output_size, int[2] kernel_size, int[2] dilation, int[2] padding, int[2] stride, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor col2im_symint_outf(@Const @ByRef Tensor self, @ByVal SymIntRef output_size, @ByVal @Cast("c10::ArrayRef*") LongArrayRef kernel_size, @ByVal @Cast("c10::ArrayRef*") LongArrayRef dilation, @ByVal @Cast("c10::ArrayRef*") LongArrayRef padding, @ByVal @Cast("c10::ArrayRef*") LongArrayRef stride, @ByRef Tensor out); -@Namespace("at") public static native @ByRef Tensor col2im_symint_outf(@Const @ByRef Tensor self, @ByVal SymIntRef output_size, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] kernel_size, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dilation, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] padding, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] stride, @ByRef Tensor out); +// #pragma once +// @generated by torchgen/gen.py from Function.h -// aten::col2im(Tensor self, SymInt[2] output_size, int[2] kernel_size, int[2] dilation, int[2] padding, int[2] stride) -> Tensor -@Namespace("at") public static native @ByVal Tensor col2im(@Const @ByRef Tensor self, @ByVal @Cast("c10::ArrayRef*") LongArrayRef output_size, @ByVal @Cast("c10::ArrayRef*") LongArrayRef kernel_size, @ByVal @Cast("c10::ArrayRef*") LongArrayRef dilation, @ByVal @Cast("c10::ArrayRef*") LongArrayRef padding, @ByVal @Cast("c10::ArrayRef*") LongArrayRef stride); -@Namespace("at") public static native @ByVal Tensor col2im(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] output_size, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] kernel_size, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dilation, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] padding, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... stride); +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include -// aten::col2im(Tensor self, SymInt[2] output_size, int[2] kernel_size, int[2] dilation, int[2] padding, int[2] stride) -> Tensor -@Namespace("at") public static native @ByVal Tensor col2im_symint(@Const @ByRef Tensor self, @ByVal SymIntRef output_size, @ByVal @Cast("c10::ArrayRef*") LongArrayRef kernel_size, @ByVal @Cast("c10::ArrayRef*") LongArrayRef dilation, @ByVal @Cast("c10::ArrayRef*") LongArrayRef padding, @ByVal @Cast("c10::ArrayRef*") LongArrayRef stride); -@Namespace("at") public static native @ByVal Tensor col2im_symint(@Const @ByRef Tensor self, @ByVal SymIntRef output_size, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] kernel_size, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dilation, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] padding, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... stride); +// #include +// aten::cudnn_affine_grid_generator(Tensor theta, int N, int C, int H, int W) -> Tensor grid +@Namespace("at") public static native @ByVal Tensor cudnn_affine_grid_generator(@Const @ByRef Tensor theta, @Cast("int64_t") long N, @Cast("int64_t") long C, @Cast("int64_t") long H, @Cast("int64_t") long W); +// aten::cudnn_affine_grid_generator.out(Tensor theta, int N, int C, int H, int W, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor cudnn_affine_grid_generator_out(@ByRef Tensor out, @Const @ByRef Tensor theta, @Cast("int64_t") long N, @Cast("int64_t") long C, @Cast("int64_t") long H, @Cast("int64_t") long W); +// aten::cudnn_affine_grid_generator.out(Tensor theta, int N, int C, int H, int W, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor cudnn_affine_grid_generator_outf(@Const @ByRef Tensor theta, @Cast("int64_t") long N, @Cast("int64_t") long C, @Cast("int64_t") long H, @Cast("int64_t") long W, @ByRef Tensor out); -// Parsed from ATen/ops/col_indices.h + + + +// Parsed from ATen/ops/cudnn_affine_grid_generator_backward.h // #pragma once @@ -38853,14 +24317,21 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include +// #include +// aten::cudnn_affine_grid_generator_backward(Tensor grad, int N, int C, int H, int W) -> Tensor grad_theta +@Namespace("at") public static native @ByVal Tensor cudnn_affine_grid_generator_backward(@Const @ByRef Tensor grad, @Cast("int64_t") long N, @Cast("int64_t") long C, @Cast("int64_t") long H, @Cast("int64_t") long W); + +// aten::cudnn_affine_grid_generator_backward.out(Tensor grad, int N, int C, int H, int W, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor cudnn_affine_grid_generator_backward_out(@ByRef Tensor out, @Const @ByRef Tensor grad, @Cast("int64_t") long N, @Cast("int64_t") long C, @Cast("int64_t") long H, @Cast("int64_t") long W); +// aten::cudnn_affine_grid_generator_backward.out(Tensor grad, int N, int C, int H, int W, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor cudnn_affine_grid_generator_backward_outf(@Const @ByRef Tensor grad, @Cast("int64_t") long N, @Cast("int64_t") long C, @Cast("int64_t") long H, @Cast("int64_t") long W, @ByRef Tensor out); -// Parsed from ATen/ops/col_indices_copy.h +// Parsed from ATen/ops/cudnn_batch_norm.h // #pragma once @@ -38881,21 +24352,21 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include - +// #include -// aten::col_indices_copy(Tensor self) -> Tensor -@Namespace("at") public static native @ByVal Tensor col_indices_copy(@Const @ByRef Tensor self); -// aten::col_indices_copy.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor col_indices_copy_out(@ByRef Tensor out, @Const @ByRef Tensor self); -// aten::col_indices_copy.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor col_indices_copy_outf(@Const @ByRef Tensor self, @ByRef Tensor out); +// aten::cudnn_batch_norm(Tensor input, Tensor weight, Tensor? bias, Tensor? running_mean, Tensor? running_var, bool training, float exponential_average_factor, float epsilon) -> (Tensor, Tensor, Tensor, Tensor) +@Namespace("at") public static native @ByVal T_TensorTensorTensorTensor_T cudnn_batch_norm(@Const @ByRef Tensor input, @Const @ByRef Tensor weight, @Const @ByRef TensorOptional bias, @Const @ByRef TensorOptional running_mean, @Const @ByRef TensorOptional running_var, @Cast("bool") boolean training, double exponential_average_factor, double epsilon); +// aten::cudnn_batch_norm.out(Tensor input, Tensor weight, Tensor? bias, Tensor? running_mean, Tensor? running_var, bool training, float exponential_average_factor, float epsilon, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2, Tensor(d!) out3) -> (Tensor(a!), Tensor(b!), Tensor(c!), Tensor(d!)) +@Namespace("at") public static native @ByVal T_TensorTensorTensorTensor_T cudnn_batch_norm_out(@ByRef Tensor out0, @ByRef Tensor out1, @ByRef Tensor out2, @ByRef Tensor out3, @Const @ByRef Tensor input, @Const @ByRef Tensor weight, @Const @ByRef TensorOptional bias, @Const @ByRef TensorOptional running_mean, @Const @ByRef TensorOptional running_var, @Cast("bool") boolean training, double exponential_average_factor, double epsilon); +// aten::cudnn_batch_norm.out(Tensor input, Tensor weight, Tensor? bias, Tensor? running_mean, Tensor? running_var, bool training, float exponential_average_factor, float epsilon, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2, Tensor(d!) out3) -> (Tensor(a!), Tensor(b!), Tensor(c!), Tensor(d!)) +@Namespace("at") public static native @ByVal T_TensorTensorTensorTensor_T cudnn_batch_norm_outf(@Const @ByRef Tensor input, @Const @ByRef Tensor weight, @Const @ByRef TensorOptional bias, @Const @ByRef TensorOptional running_mean, @Const @ByRef TensorOptional running_var, @Cast("bool") boolean training, double exponential_average_factor, double epsilon, @ByRef Tensor out0, @ByRef Tensor out1, @ByRef Tensor out2, @ByRef Tensor out3); -// Parsed from ATen/ops/column_stack.h + +// Parsed from ATen/ops/cudnn_batch_norm_backward.h // #pragma once @@ -38916,21 +24387,21 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include +// #include -// aten::column_stack(Tensor[] tensors) -> Tensor -@Namespace("at") public static native @ByVal Tensor column_stack(@ByVal TensorArrayRef tensors); +// aten::cudnn_batch_norm_backward(Tensor input, Tensor grad_output, Tensor weight, Tensor? running_mean, Tensor? running_var, Tensor? save_mean, Tensor? save_var, float epsilon, Tensor reserveSpace) -> (Tensor, Tensor, Tensor) +@Namespace("at") public static native @ByVal T_TensorTensorTensor_T cudnn_batch_norm_backward(@Const @ByRef Tensor input, @Const @ByRef Tensor grad_output, @Const @ByRef Tensor weight, @Const @ByRef TensorOptional running_mean, @Const @ByRef TensorOptional running_var, @Const @ByRef TensorOptional save_mean, @Const @ByRef TensorOptional save_var, double epsilon, @Const @ByRef Tensor reserveSpace); -// aten::column_stack.out(Tensor[] tensors, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor column_stack_out(@ByRef Tensor out, @ByVal TensorArrayRef tensors); -// aten::column_stack.out(Tensor[] tensors, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor column_stack_outf(@ByVal TensorArrayRef tensors, @ByRef Tensor out); +// aten::cudnn_batch_norm_backward.out(Tensor input, Tensor grad_output, Tensor weight, Tensor? running_mean, Tensor? running_var, Tensor? save_mean, Tensor? save_var, float epsilon, Tensor reserveSpace, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2) -> (Tensor(a!), Tensor(b!), Tensor(c!)) +@Namespace("at") public static native @ByVal T_TensorTensorTensor_T cudnn_batch_norm_backward_out(@ByRef Tensor out0, @ByRef Tensor out1, @ByRef Tensor out2, @Const @ByRef Tensor input, @Const @ByRef Tensor grad_output, @Const @ByRef Tensor weight, @Const @ByRef TensorOptional running_mean, @Const @ByRef TensorOptional running_var, @Const @ByRef TensorOptional save_mean, @Const @ByRef TensorOptional save_var, double epsilon, @Const @ByRef Tensor reserveSpace); +// aten::cudnn_batch_norm_backward.out(Tensor input, Tensor grad_output, Tensor weight, Tensor? running_mean, Tensor? running_var, Tensor? save_mean, Tensor? save_var, float epsilon, Tensor reserveSpace, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2) -> (Tensor(a!), Tensor(b!), Tensor(c!)) +@Namespace("at") public static native @ByVal T_TensorTensorTensor_T cudnn_batch_norm_backward_outf(@Const @ByRef Tensor input, @Const @ByRef Tensor grad_output, @Const @ByRef Tensor weight, @Const @ByRef TensorOptional running_mean, @Const @ByRef TensorOptional running_var, @Const @ByRef TensorOptional save_mean, @Const @ByRef TensorOptional save_var, double epsilon, @Const @ByRef Tensor reserveSpace, @ByRef Tensor out0, @ByRef Tensor out1, @ByRef Tensor out2); -// Parsed from ATen/ops/combinations.h +// Parsed from ATen/ops/cudnn_convolution.h // #pragma once @@ -38951,17 +24422,24 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include +// #include -// aten::combinations(Tensor self, int r=2, bool with_replacement=False) -> Tensor -@Namespace("at") public static native @ByVal Tensor combinations(@Const @ByRef Tensor self, @Cast("int64_t") long r/*=2*/, @Cast("bool") boolean with_replacement/*=false*/); -@Namespace("at") public static native @ByVal Tensor combinations(@Const @ByRef Tensor self); +// aten::cudnn_convolution(Tensor self, Tensor weight, int[] padding, int[] stride, int[] dilation, int groups, bool benchmark, bool deterministic, bool allow_tf32) -> Tensor +@Namespace("at") public static native @ByVal Tensor cudnn_convolution(@Const @ByRef Tensor self, @Const @ByRef Tensor weight, @ByVal LongArrayRef padding, @ByVal LongArrayRef stride, @ByVal LongArrayRef dilation, @Cast("int64_t") long groups, @Cast("bool") boolean benchmark, @Cast("bool") boolean deterministic, @Cast("bool") boolean allow_tf32); +@Namespace("at") public static native @ByVal Tensor cudnn_convolution(@Const @ByRef Tensor self, @Const @ByRef Tensor weight, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] padding, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] stride, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dilation, @Cast("int64_t") long groups, @Cast("bool") boolean benchmark, @Cast("bool") boolean deterministic, @Cast("bool") boolean allow_tf32); + +// aten::cudnn_convolution.out(Tensor self, Tensor weight, int[] padding, int[] stride, int[] dilation, int groups, bool benchmark, bool deterministic, bool allow_tf32, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor cudnn_convolution_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor weight, @ByVal LongArrayRef padding, @ByVal LongArrayRef stride, @ByVal LongArrayRef dilation, @Cast("int64_t") long groups, @Cast("bool") boolean benchmark, @Cast("bool") boolean deterministic, @Cast("bool") boolean allow_tf32); +@Namespace("at") public static native @ByRef Tensor cudnn_convolution_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor weight, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] padding, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] stride, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dilation, @Cast("int64_t") long groups, @Cast("bool") boolean benchmark, @Cast("bool") boolean deterministic, @Cast("bool") boolean allow_tf32); +// aten::cudnn_convolution.out(Tensor self, Tensor weight, int[] padding, int[] stride, int[] dilation, int groups, bool benchmark, bool deterministic, bool allow_tf32, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor cudnn_convolution_outf(@Const @ByRef Tensor self, @Const @ByRef Tensor weight, @ByVal LongArrayRef padding, @ByVal LongArrayRef stride, @ByVal LongArrayRef dilation, @Cast("int64_t") long groups, @Cast("bool") boolean benchmark, @Cast("bool") boolean deterministic, @Cast("bool") boolean allow_tf32, @ByRef Tensor out); +@Namespace("at") public static native @ByRef Tensor cudnn_convolution_outf(@Const @ByRef Tensor self, @Const @ByRef Tensor weight, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] padding, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] stride, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dilation, @Cast("int64_t") long groups, @Cast("bool") boolean benchmark, @Cast("bool") boolean deterministic, @Cast("bool") boolean allow_tf32, @ByRef Tensor out); -// Parsed from ATen/ops/complex.h +// Parsed from ATen/ops/cudnn_convolution_add_relu.h // #pragma once @@ -38982,21 +24460,24 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include - +// #include -// aten::complex(Tensor real, Tensor imag) -> Tensor +// aten::cudnn_convolution_add_relu(Tensor self, Tensor weight, Tensor z, Scalar? alpha, Tensor? bias, int[] stride, int[] padding, int[] dilation, int groups) -> Tensor +@Namespace("at") public static native @ByVal Tensor cudnn_convolution_add_relu(@Const @ByRef Tensor self, @Const @ByRef Tensor weight, @Const @ByRef Tensor z, @Const @ByRef ScalarOptional alpha, @Const @ByRef TensorOptional bias, @ByVal LongArrayRef stride, @ByVal LongArrayRef padding, @ByVal LongArrayRef dilation, @Cast("int64_t") long groups); +@Namespace("at") public static native @ByVal Tensor cudnn_convolution_add_relu(@Const @ByRef Tensor self, @Const @ByRef Tensor weight, @Const @ByRef Tensor z, @Const @ByRef ScalarOptional alpha, @Const @ByRef TensorOptional bias, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] stride, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] padding, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dilation, @Cast("int64_t") long groups); -// aten::complex.out(Tensor real, Tensor imag, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor complex_out(@ByRef Tensor out, @Const @ByRef Tensor real, @Const @ByRef Tensor imag); -// aten::complex.out(Tensor real, Tensor imag, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor complex_outf(@Const @ByRef Tensor real, @Const @ByRef Tensor imag, @ByRef Tensor out); +// aten::cudnn_convolution_add_relu.out(Tensor self, Tensor weight, Tensor z, Scalar? alpha, Tensor? bias, int[] stride, int[] padding, int[] dilation, int groups, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor cudnn_convolution_add_relu_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor weight, @Const @ByRef Tensor z, @Const @ByRef ScalarOptional alpha, @Const @ByRef TensorOptional bias, @ByVal LongArrayRef stride, @ByVal LongArrayRef padding, @ByVal LongArrayRef dilation, @Cast("int64_t") long groups); +@Namespace("at") public static native @ByRef Tensor cudnn_convolution_add_relu_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor weight, @Const @ByRef Tensor z, @Const @ByRef ScalarOptional alpha, @Const @ByRef TensorOptional bias, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] stride, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] padding, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dilation, @Cast("int64_t") long groups); +// aten::cudnn_convolution_add_relu.out(Tensor self, Tensor weight, Tensor z, Scalar? alpha, Tensor? bias, int[] stride, int[] padding, int[] dilation, int groups, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor cudnn_convolution_add_relu_outf(@Const @ByRef Tensor self, @Const @ByRef Tensor weight, @Const @ByRef Tensor z, @Const @ByRef ScalarOptional alpha, @Const @ByRef TensorOptional bias, @ByVal LongArrayRef stride, @ByVal LongArrayRef padding, @ByVal LongArrayRef dilation, @Cast("int64_t") long groups, @ByRef Tensor out); +@Namespace("at") public static native @ByRef Tensor cudnn_convolution_add_relu_outf(@Const @ByRef Tensor self, @Const @ByRef Tensor weight, @Const @ByRef Tensor z, @Const @ByRef ScalarOptional alpha, @Const @ByRef TensorOptional bias, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] stride, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] padding, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dilation, @Cast("int64_t") long groups, @ByRef Tensor out); -// Parsed from ATen/ops/concat.h +// Parsed from ATen/ops/cudnn_convolution_relu.h // #pragma once @@ -39017,31 +24498,24 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include - - -// aten::concat(Tensor[] tensors, int dim=0) -> Tensor -@Namespace("at") public static native @ByVal Tensor concat(@ByVal TensorArrayRef tensors, @Cast("int64_t") long dim/*=0*/); -@Namespace("at") public static native @ByVal Tensor concat(@ByVal TensorArrayRef tensors); +// #include -// aten::concat.out(Tensor[] tensors, int dim=0, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor concat_out(@ByRef Tensor out, @ByVal TensorArrayRef tensors, @Cast("int64_t") long dim/*=0*/); -@Namespace("at") public static native @ByRef Tensor concat_out(@ByRef Tensor out, @ByVal TensorArrayRef tensors); -// aten::concat.out(Tensor[] tensors, int dim=0, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor concat_outf(@ByVal TensorArrayRef tensors, @Cast("int64_t") long dim, @ByRef Tensor out); -// aten::concat.names(Tensor[] tensors, Dimname dim) -> Tensor -@Namespace("at") public static native @ByVal Tensor concat(@ByVal TensorArrayRef tensors, @ByVal Dimname dim); +// aten::cudnn_convolution_relu(Tensor self, Tensor weight, Tensor? bias, int[] stride, int[] padding, int[] dilation, int groups) -> Tensor +@Namespace("at") public static native @ByVal Tensor cudnn_convolution_relu(@Const @ByRef Tensor self, @Const @ByRef Tensor weight, @Const @ByRef TensorOptional bias, @ByVal LongArrayRef stride, @ByVal LongArrayRef padding, @ByVal LongArrayRef dilation, @Cast("int64_t") long groups); +@Namespace("at") public static native @ByVal Tensor cudnn_convolution_relu(@Const @ByRef Tensor self, @Const @ByRef Tensor weight, @Const @ByRef TensorOptional bias, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] stride, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] padding, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dilation, @Cast("int64_t") long groups); -// aten::concat.names_out(Tensor[] tensors, Dimname dim, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor concat_out(@ByRef Tensor out, @ByVal TensorArrayRef tensors, @ByVal Dimname dim); -// aten::concat.names_out(Tensor[] tensors, Dimname dim, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor concat_outf(@ByVal TensorArrayRef tensors, @ByVal Dimname dim, @ByRef Tensor out); +// aten::cudnn_convolution_relu.out(Tensor self, Tensor weight, Tensor? bias, int[] stride, int[] padding, int[] dilation, int groups, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor cudnn_convolution_relu_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor weight, @Const @ByRef TensorOptional bias, @ByVal LongArrayRef stride, @ByVal LongArrayRef padding, @ByVal LongArrayRef dilation, @Cast("int64_t") long groups); +@Namespace("at") public static native @ByRef Tensor cudnn_convolution_relu_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor weight, @Const @ByRef TensorOptional bias, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] stride, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] padding, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dilation, @Cast("int64_t") long groups); +// aten::cudnn_convolution_relu.out(Tensor self, Tensor weight, Tensor? bias, int[] stride, int[] padding, int[] dilation, int groups, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor cudnn_convolution_relu_outf(@Const @ByRef Tensor self, @Const @ByRef Tensor weight, @Const @ByRef TensorOptional bias, @ByVal LongArrayRef stride, @ByVal LongArrayRef padding, @ByVal LongArrayRef dilation, @Cast("int64_t") long groups, @ByRef Tensor out); +@Namespace("at") public static native @ByRef Tensor cudnn_convolution_relu_outf(@Const @ByRef Tensor self, @Const @ByRef Tensor weight, @Const @ByRef TensorOptional bias, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] stride, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] padding, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dilation, @Cast("int64_t") long groups, @ByRef Tensor out); -// Parsed from ATen/ops/concatenate.h +// Parsed from ATen/ops/cudnn_convolution_transpose.h // #pragma once @@ -39062,31 +24536,24 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include - - -// aten::concatenate(Tensor[] tensors, int dim=0) -> Tensor -@Namespace("at") public static native @ByVal Tensor concatenate(@ByVal TensorArrayRef tensors, @Cast("int64_t") long dim/*=0*/); -@Namespace("at") public static native @ByVal Tensor concatenate(@ByVal TensorArrayRef tensors); +// #include -// aten::concatenate.out(Tensor[] tensors, int dim=0, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor concatenate_out(@ByRef Tensor out, @ByVal TensorArrayRef tensors, @Cast("int64_t") long dim/*=0*/); -@Namespace("at") public static native @ByRef Tensor concatenate_out(@ByRef Tensor out, @ByVal TensorArrayRef tensors); -// aten::concatenate.out(Tensor[] tensors, int dim=0, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor concatenate_outf(@ByVal TensorArrayRef tensors, @Cast("int64_t") long dim, @ByRef Tensor out); -// aten::concatenate.names(Tensor[] tensors, Dimname dim) -> Tensor -@Namespace("at") public static native @ByVal Tensor concatenate(@ByVal TensorArrayRef tensors, @ByVal Dimname dim); +// aten::cudnn_convolution_transpose(Tensor self, Tensor weight, int[] padding, int[] output_padding, int[] stride, int[] dilation, int groups, bool benchmark, bool deterministic, bool allow_tf32) -> Tensor +@Namespace("at") public static native @ByVal Tensor cudnn_convolution_transpose(@Const @ByRef Tensor self, @Const @ByRef Tensor weight, @ByVal LongArrayRef padding, @ByVal LongArrayRef output_padding, @ByVal LongArrayRef stride, @ByVal LongArrayRef dilation, @Cast("int64_t") long groups, @Cast("bool") boolean benchmark, @Cast("bool") boolean deterministic, @Cast("bool") boolean allow_tf32); +@Namespace("at") public static native @ByVal Tensor cudnn_convolution_transpose(@Const @ByRef Tensor self, @Const @ByRef Tensor weight, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] padding, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] output_padding, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] stride, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dilation, @Cast("int64_t") long groups, @Cast("bool") boolean benchmark, @Cast("bool") boolean deterministic, @Cast("bool") boolean allow_tf32); -// aten::concatenate.names_out(Tensor[] tensors, Dimname dim, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor concatenate_out(@ByRef Tensor out, @ByVal TensorArrayRef tensors, @ByVal Dimname dim); -// aten::concatenate.names_out(Tensor[] tensors, Dimname dim, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor concatenate_outf(@ByVal TensorArrayRef tensors, @ByVal Dimname dim, @ByRef Tensor out); +// aten::cudnn_convolution_transpose.out(Tensor self, Tensor weight, int[] padding, int[] output_padding, int[] stride, int[] dilation, int groups, bool benchmark, bool deterministic, bool allow_tf32, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor cudnn_convolution_transpose_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor weight, @ByVal LongArrayRef padding, @ByVal LongArrayRef output_padding, @ByVal LongArrayRef stride, @ByVal LongArrayRef dilation, @Cast("int64_t") long groups, @Cast("bool") boolean benchmark, @Cast("bool") boolean deterministic, @Cast("bool") boolean allow_tf32); +@Namespace("at") public static native @ByRef Tensor cudnn_convolution_transpose_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor weight, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] padding, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] output_padding, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] stride, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dilation, @Cast("int64_t") long groups, @Cast("bool") boolean benchmark, @Cast("bool") boolean deterministic, @Cast("bool") boolean allow_tf32); +// aten::cudnn_convolution_transpose.out(Tensor self, Tensor weight, int[] padding, int[] output_padding, int[] stride, int[] dilation, int groups, bool benchmark, bool deterministic, bool allow_tf32, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor cudnn_convolution_transpose_outf(@Const @ByRef Tensor self, @Const @ByRef Tensor weight, @ByVal LongArrayRef padding, @ByVal LongArrayRef output_padding, @ByVal LongArrayRef stride, @ByVal LongArrayRef dilation, @Cast("int64_t") long groups, @Cast("bool") boolean benchmark, @Cast("bool") boolean deterministic, @Cast("bool") boolean allow_tf32, @ByRef Tensor out); +@Namespace("at") public static native @ByRef Tensor cudnn_convolution_transpose_outf(@Const @ByRef Tensor self, @Const @ByRef Tensor weight, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] padding, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] output_padding, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] stride, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dilation, @Cast("int64_t") long groups, @Cast("bool") boolean benchmark, @Cast("bool") boolean deterministic, @Cast("bool") boolean allow_tf32, @ByRef Tensor out); -// Parsed from ATen/ops/conj.h +// Parsed from ATen/ops/cudnn_grid_sampler.h // #pragma once @@ -39107,16 +24574,21 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include +// #include -// aten::conj(Tensor(a) self) -> Tensor(a) -@Namespace("at") public static native @ByVal Tensor __dispatch_conj(@Const @ByRef Tensor self); +// aten::cudnn_grid_sampler(Tensor self, Tensor grid) -> Tensor output +@Namespace("at") public static native @ByVal Tensor cudnn_grid_sampler(@Const @ByRef Tensor self, @Const @ByRef Tensor grid); +// aten::cudnn_grid_sampler.out(Tensor self, Tensor grid, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor cudnn_grid_sampler_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor grid); +// aten::cudnn_grid_sampler.out(Tensor self, Tensor grid, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor cudnn_grid_sampler_outf(@Const @ByRef Tensor self, @Const @ByRef Tensor grid, @ByRef Tensor out); -// Parsed from ATen/ops/conj_physical.h + +// Parsed from ATen/ops/cudnn_grid_sampler_backward.h // #pragma once @@ -39137,24 +24609,21 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include - +// #include -// aten::conj_physical(Tensor self) -> Tensor -@Namespace("at") public static native @ByVal Tensor conj_physical(@Const @ByRef Tensor self); -// aten::conj_physical.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor conj_physical_out(@ByRef Tensor out, @Const @ByRef Tensor self); -// aten::conj_physical.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor conj_physical_outf(@Const @ByRef Tensor self, @ByRef Tensor out); +// aten::cudnn_grid_sampler_backward(Tensor self, Tensor grid, Tensor grad_output) -> (Tensor grad_self, Tensor grad_grid) +@Namespace("at") public static native @ByVal T_TensorTensor_T cudnn_grid_sampler_backward(@Const @ByRef Tensor self, @Const @ByRef Tensor grid, @Const @ByRef Tensor grad_output); -// aten::conj_physical_(Tensor(a!) self) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor conj_physical_(@ByRef Tensor self); +// aten::cudnn_grid_sampler_backward.out(Tensor self, Tensor grid, Tensor grad_output, *, Tensor(a!) out0, Tensor(b!) out1) -> (Tensor(a!), Tensor(b!)) +@Namespace("at") public static native @ByVal T_TensorTensor_T cudnn_grid_sampler_backward_out(@ByRef Tensor out0, @ByRef Tensor out1, @Const @ByRef Tensor self, @Const @ByRef Tensor grid, @Const @ByRef Tensor grad_output); +// aten::cudnn_grid_sampler_backward.out(Tensor self, Tensor grid, Tensor grad_output, *, Tensor(a!) out0, Tensor(b!) out1) -> (Tensor(a!), Tensor(b!)) +@Namespace("at") public static native @ByVal T_TensorTensor_T cudnn_grid_sampler_backward_outf(@Const @ByRef Tensor self, @Const @ByRef Tensor grid, @Const @ByRef Tensor grad_output, @ByRef Tensor out0, @ByRef Tensor out1); -// Parsed from ATen/ops/constant_pad_nd.h +// Parsed from ATen/ops/cudnn_is_acceptable.h // #pragma once @@ -39175,46 +24644,16 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include - - -// aten::constant_pad_nd(Tensor self, SymInt[] pad, Scalar value=0) -> Tensor -@Namespace("at") public static native @ByVal Tensor constant_pad_nd(@Const @ByRef Tensor self, @ByVal @Cast("c10::ArrayRef*") LongArrayRef pad, @Const @ByRef(nullValue = "at::Scalar(0)") Scalar value); -@Namespace("at") public static native @ByVal Tensor constant_pad_nd(@Const @ByRef Tensor self, @ByVal @Cast("c10::ArrayRef*") LongArrayRef pad); -@Namespace("at") public static native @ByVal Tensor constant_pad_nd(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] pad, @Const @ByRef(nullValue = "at::Scalar(0)") Scalar value); -@Namespace("at") public static native @ByVal Tensor constant_pad_nd(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... pad); - - -// aten::constant_pad_nd(Tensor self, SymInt[] pad, Scalar value=0) -> Tensor -@Namespace("at") public static native @ByVal Tensor constant_pad_nd_symint(@Const @ByRef Tensor self, @ByVal SymIntRef pad, @Const @ByRef(nullValue = "at::Scalar(0)") Scalar value); -@Namespace("at") public static native @ByVal Tensor constant_pad_nd_symint(@Const @ByRef Tensor self, @ByVal SymIntRef pad); - - -// aten::constant_pad_nd.out(Tensor self, SymInt[] pad, Scalar value=0, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor constant_pad_nd_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal @Cast("c10::ArrayRef*") LongArrayRef pad, @Const @ByRef(nullValue = "at::Scalar(0)") Scalar value); -@Namespace("at") public static native @ByRef Tensor constant_pad_nd_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal @Cast("c10::ArrayRef*") LongArrayRef pad); -@Namespace("at") public static native @ByRef Tensor constant_pad_nd_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] pad, @Const @ByRef(nullValue = "at::Scalar(0)") Scalar value); -@Namespace("at") public static native @ByRef Tensor constant_pad_nd_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... pad); - - -// aten::constant_pad_nd.out(Tensor self, SymInt[] pad, Scalar value=0, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor constant_pad_nd_outf(@Const @ByRef Tensor self, @ByVal @Cast("c10::ArrayRef*") LongArrayRef pad, @Const @ByRef Scalar value, @ByRef Tensor out); -@Namespace("at") public static native @ByRef Tensor constant_pad_nd_outf(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] pad, @Const @ByRef Scalar value, @ByRef Tensor out); - - -// aten::constant_pad_nd.out(Tensor self, SymInt[] pad, Scalar value=0, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor constant_pad_nd_symint_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal SymIntRef pad, @Const @ByRef(nullValue = "at::Scalar(0)") Scalar value); -@Namespace("at") public static native @ByRef Tensor constant_pad_nd_symint_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal SymIntRef pad); - +// #include -// aten::constant_pad_nd.out(Tensor self, SymInt[] pad, Scalar value=0, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor constant_pad_nd_symint_outf(@Const @ByRef Tensor self, @ByVal SymIntRef pad, @Const @ByRef Scalar value, @ByRef Tensor out); +// aten::cudnn_is_acceptable(Tensor self) -> bool +@Namespace("at") public static native @Cast("bool") boolean cudnn_is_acceptable(@Const @ByRef Tensor self); -// Parsed from ATen/ops/contiguous.h +// Parsed from ATen/ops/cummax.h // #pragma once @@ -39235,14 +24674,29 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include +// #include + + +// aten::cummax(Tensor self, int dim) -> (Tensor values, Tensor indices) +@Namespace("at") public static native @ByVal T_TensorTensor_T cummax(@Const @ByRef Tensor self, @Cast("int64_t") long dim); +// aten::cummax.out(Tensor self, int dim, *, Tensor(a!) values, Tensor(b!) indices) -> (Tensor(a!) values, Tensor(b!) indices) +@Namespace("at") public static native @ByVal T_TensorTensor_T cummax_out(@ByRef Tensor values, @ByRef Tensor indices, @Const @ByRef Tensor self, @Cast("int64_t") long dim); +// aten::cummax.out(Tensor self, int dim, *, Tensor(a!) values, Tensor(b!) indices) -> (Tensor(a!) values, Tensor(b!) indices) +@Namespace("at") public static native @ByVal T_TensorTensor_T cummax_outf(@Const @ByRef Tensor self, @Cast("int64_t") long dim, @ByRef Tensor values, @ByRef Tensor indices); + +// aten::cummax.dimname(Tensor self, Dimname dim) -> (Tensor values, Tensor indices) +@Namespace("at") public static native @ByVal T_TensorTensor_T cummax(@Const @ByRef Tensor self, @ByVal Dimname dim); +// aten::cummax.dimname_out(Tensor self, Dimname dim, *, Tensor(a!) values, Tensor(b!) indices) -> (Tensor(a!) values, Tensor(b!) indices) +@Namespace("at") public static native @ByVal T_TensorTensor_T cummax_out(@ByRef Tensor values, @ByRef Tensor indices, @Const @ByRef Tensor self, @ByVal Dimname dim); +// aten::cummax.dimname_out(Tensor self, Dimname dim, *, Tensor(a!) values, Tensor(b!) indices) -> (Tensor(a!) values, Tensor(b!) indices) +@Namespace("at") public static native @ByVal T_TensorTensor_T cummax_outf(@Const @ByRef Tensor self, @ByVal Dimname dim, @ByRef Tensor values, @ByRef Tensor indices); -// Parsed from ATen/ops/conv1d.h +// Parsed from ATen/ops/cummaxmin_backward.h // #pragma once @@ -39263,24 +24717,16 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include - +// #include -// aten::conv1d(Tensor input, Tensor weight, Tensor? bias=None, int[1] stride=1, int[1] padding=0, int[1] dilation=1, int groups=1) -> Tensor -@Namespace("at") public static native @ByVal Tensor conv1d(@Const @ByRef Tensor input, @Const @ByRef Tensor weight, @Const @ByRef(nullValue = "c10::optional{}") TensorOptional bias, @ByVal(nullValue = "at::IntArrayRef(1)") @Cast("c10::ArrayRef*") LongArrayRef stride, @ByVal(nullValue = "at::IntArrayRef(0)") @Cast("c10::ArrayRef*") LongArrayRef padding, @ByVal(nullValue = "at::IntArrayRef(1)") @Cast("c10::ArrayRef*") LongArrayRef dilation, @Cast("int64_t") long groups/*=1*/); -@Namespace("at") public static native @ByVal Tensor conv1d(@Const @ByRef Tensor input, @Const @ByRef Tensor weight); -@Namespace("at") public static native @ByVal Tensor conv1d(@Const @ByRef Tensor input, @Const @ByRef Tensor weight, @Const @ByRef(nullValue = "c10::optional{}") TensorOptional bias, @ByVal(nullValue = "at::IntArrayRef(1)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] stride, @ByVal(nullValue = "at::IntArrayRef(0)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] padding, @ByVal(nullValue = "at::IntArrayRef(1)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dilation, @Cast("int64_t") long groups/*=1*/); -// aten::conv1d.padding(Tensor input, Tensor weight, Tensor? bias=None, int[1] stride=1, str padding="valid", int[1] dilation=1, int groups=1) -> Tensor -@Namespace("at") public static native @ByVal Tensor conv1d(@Const @ByRef Tensor input, @Const @ByRef Tensor weight, @Const @ByRef TensorOptional bias, @ByVal @Cast("c10::ArrayRef*") LongArrayRef stride, @ByVal @Cast("c10::string_view*") Pointer padding, @ByVal(nullValue = "at::IntArrayRef(1)") @Cast("c10::ArrayRef*") LongArrayRef dilation, @Cast("int64_t") long groups/*=1*/); -@Namespace("at") public static native @ByVal Tensor conv1d(@Const @ByRef Tensor input, @Const @ByRef Tensor weight, @Const @ByRef TensorOptional bias, @ByVal @Cast("c10::ArrayRef*") LongArrayRef stride, @ByVal @Cast("c10::string_view*") Pointer padding); -@Namespace("at") public static native @ByVal Tensor conv1d(@Const @ByRef Tensor input, @Const @ByRef Tensor weight, @Const @ByRef TensorOptional bias, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] stride, @ByVal @Cast("c10::string_view*") Pointer padding, @ByVal(nullValue = "at::IntArrayRef(1)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dilation, @Cast("int64_t") long groups/*=1*/); -@Namespace("at") public static native @ByVal Tensor conv1d(@Const @ByRef Tensor input, @Const @ByRef Tensor weight, @Const @ByRef TensorOptional bias, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] stride, @ByVal @Cast("c10::string_view*") Pointer padding); +// aten::cummaxmin_backward(Tensor grad, Tensor input, Tensor indices, int dim) -> Tensor +@Namespace("at") public static native @ByVal Tensor cummaxmin_backward(@Const @ByRef Tensor grad, @Const @ByRef Tensor input, @Const @ByRef Tensor indices, @Cast("int64_t") long dim); -// Parsed from ATen/ops/conv2d.h +// Parsed from ATen/ops/cummin.h // #pragma once @@ -39301,24 +24747,29 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include +// #include -// aten::conv2d(Tensor input, Tensor weight, Tensor? bias=None, int[2] stride=1, int[2] padding=0, int[2] dilation=1, int groups=1) -> Tensor -@Namespace("at") public static native @ByVal Tensor conv2d(@Const @ByRef Tensor input, @Const @ByRef Tensor weight, @Const @ByRef(nullValue = "c10::optional{}") TensorOptional bias, @ByVal(nullValue = "at::IntArrayRef(1)") @Cast("c10::ArrayRef*") LongArrayRef stride, @ByVal(nullValue = "at::IntArrayRef(0)") @Cast("c10::ArrayRef*") LongArrayRef padding, @ByVal(nullValue = "at::IntArrayRef(1)") @Cast("c10::ArrayRef*") LongArrayRef dilation, @Cast("int64_t") long groups/*=1*/); -@Namespace("at") public static native @ByVal Tensor conv2d(@Const @ByRef Tensor input, @Const @ByRef Tensor weight); -@Namespace("at") public static native @ByVal Tensor conv2d(@Const @ByRef Tensor input, @Const @ByRef Tensor weight, @Const @ByRef(nullValue = "c10::optional{}") TensorOptional bias, @ByVal(nullValue = "at::IntArrayRef(1)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] stride, @ByVal(nullValue = "at::IntArrayRef(0)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] padding, @ByVal(nullValue = "at::IntArrayRef(1)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dilation, @Cast("int64_t") long groups/*=1*/); +// aten::cummin(Tensor self, int dim) -> (Tensor values, Tensor indices) +@Namespace("at") public static native @ByVal T_TensorTensor_T cummin(@Const @ByRef Tensor self, @Cast("int64_t") long dim); -// aten::conv2d.padding(Tensor input, Tensor weight, Tensor? bias=None, int[2] stride=1, str padding="valid", int[2] dilation=1, int groups=1) -> Tensor -@Namespace("at") public static native @ByVal Tensor conv2d(@Const @ByRef Tensor input, @Const @ByRef Tensor weight, @Const @ByRef TensorOptional bias, @ByVal @Cast("c10::ArrayRef*") LongArrayRef stride, @ByVal @Cast("c10::string_view*") Pointer padding, @ByVal(nullValue = "at::IntArrayRef(1)") @Cast("c10::ArrayRef*") LongArrayRef dilation, @Cast("int64_t") long groups/*=1*/); -@Namespace("at") public static native @ByVal Tensor conv2d(@Const @ByRef Tensor input, @Const @ByRef Tensor weight, @Const @ByRef TensorOptional bias, @ByVal @Cast("c10::ArrayRef*") LongArrayRef stride, @ByVal @Cast("c10::string_view*") Pointer padding); -@Namespace("at") public static native @ByVal Tensor conv2d(@Const @ByRef Tensor input, @Const @ByRef Tensor weight, @Const @ByRef TensorOptional bias, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] stride, @ByVal @Cast("c10::string_view*") Pointer padding, @ByVal(nullValue = "at::IntArrayRef(1)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dilation, @Cast("int64_t") long groups/*=1*/); -@Namespace("at") public static native @ByVal Tensor conv2d(@Const @ByRef Tensor input, @Const @ByRef Tensor weight, @Const @ByRef TensorOptional bias, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] stride, @ByVal @Cast("c10::string_view*") Pointer padding); +// aten::cummin.out(Tensor self, int dim, *, Tensor(a!) values, Tensor(b!) indices) -> (Tensor(a!) values, Tensor(b!) indices) +@Namespace("at") public static native @ByVal T_TensorTensor_T cummin_out(@ByRef Tensor values, @ByRef Tensor indices, @Const @ByRef Tensor self, @Cast("int64_t") long dim); +// aten::cummin.out(Tensor self, int dim, *, Tensor(a!) values, Tensor(b!) indices) -> (Tensor(a!) values, Tensor(b!) indices) +@Namespace("at") public static native @ByVal T_TensorTensor_T cummin_outf(@Const @ByRef Tensor self, @Cast("int64_t") long dim, @ByRef Tensor values, @ByRef Tensor indices); +// aten::cummin.dimname(Tensor self, Dimname dim) -> (Tensor values, Tensor indices) +@Namespace("at") public static native @ByVal T_TensorTensor_T cummin(@Const @ByRef Tensor self, @ByVal Dimname dim); +// aten::cummin.dimname_out(Tensor self, Dimname dim, *, Tensor(a!) values, Tensor(b!) indices) -> (Tensor(a!) values, Tensor(b!) indices) +@Namespace("at") public static native @ByVal T_TensorTensor_T cummin_out(@ByRef Tensor values, @ByRef Tensor indices, @Const @ByRef Tensor self, @ByVal Dimname dim); +// aten::cummin.dimname_out(Tensor self, Dimname dim, *, Tensor(a!) values, Tensor(b!) indices) -> (Tensor(a!) values, Tensor(b!) indices) +@Namespace("at") public static native @ByVal T_TensorTensor_T cummin_outf(@Const @ByRef Tensor self, @ByVal Dimname dim, @ByRef Tensor values, @ByRef Tensor indices); -// Parsed from ATen/ops/conv3d.h + + +// Parsed from ATen/ops/cumprod.h // #pragma once @@ -39339,24 +24790,33 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include +// #include -// aten::conv3d(Tensor input, Tensor weight, Tensor? bias=None, int[3] stride=1, int[3] padding=0, int[3] dilation=1, int groups=1) -> Tensor -@Namespace("at") public static native @ByVal Tensor conv3d(@Const @ByRef Tensor input, @Const @ByRef Tensor weight, @Const @ByRef(nullValue = "c10::optional{}") TensorOptional bias, @ByVal(nullValue = "at::IntArrayRef(1)") @Cast("c10::ArrayRef*") LongArrayRef stride, @ByVal(nullValue = "at::IntArrayRef(0)") @Cast("c10::ArrayRef*") LongArrayRef padding, @ByVal(nullValue = "at::IntArrayRef(1)") @Cast("c10::ArrayRef*") LongArrayRef dilation, @Cast("int64_t") long groups/*=1*/); -@Namespace("at") public static native @ByVal Tensor conv3d(@Const @ByRef Tensor input, @Const @ByRef Tensor weight); -@Namespace("at") public static native @ByVal Tensor conv3d(@Const @ByRef Tensor input, @Const @ByRef Tensor weight, @Const @ByRef(nullValue = "c10::optional{}") TensorOptional bias, @ByVal(nullValue = "at::IntArrayRef(1)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] stride, @ByVal(nullValue = "at::IntArrayRef(0)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] padding, @ByVal(nullValue = "at::IntArrayRef(1)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dilation, @Cast("int64_t") long groups/*=1*/); +// aten::cumprod(Tensor self, int dim, *, ScalarType? dtype=None) -> Tensor +@Namespace("at") public static native @ByVal Tensor cumprod(@Const @ByRef Tensor self, @Cast("int64_t") long dim, @ByVal(nullValue = "c10::optional(c10::nullopt)") ScalarTypeOptional dtype); +@Namespace("at") public static native @ByVal Tensor cumprod(@Const @ByRef Tensor self, @Cast("int64_t") long dim); -// aten::conv3d.padding(Tensor input, Tensor weight, Tensor? bias=None, int[3] stride=1, str padding="valid", int[3] dilation=1, int groups=1) -> Tensor -@Namespace("at") public static native @ByVal Tensor conv3d(@Const @ByRef Tensor input, @Const @ByRef Tensor weight, @Const @ByRef TensorOptional bias, @ByVal @Cast("c10::ArrayRef*") LongArrayRef stride, @ByVal @Cast("c10::string_view*") Pointer padding, @ByVal(nullValue = "at::IntArrayRef(1)") @Cast("c10::ArrayRef*") LongArrayRef dilation, @Cast("int64_t") long groups/*=1*/); -@Namespace("at") public static native @ByVal Tensor conv3d(@Const @ByRef Tensor input, @Const @ByRef Tensor weight, @Const @ByRef TensorOptional bias, @ByVal @Cast("c10::ArrayRef*") LongArrayRef stride, @ByVal @Cast("c10::string_view*") Pointer padding); -@Namespace("at") public static native @ByVal Tensor conv3d(@Const @ByRef Tensor input, @Const @ByRef Tensor weight, @Const @ByRef TensorOptional bias, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] stride, @ByVal @Cast("c10::string_view*") Pointer padding, @ByVal(nullValue = "at::IntArrayRef(1)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dilation, @Cast("int64_t") long groups/*=1*/); -@Namespace("at") public static native @ByVal Tensor conv3d(@Const @ByRef Tensor input, @Const @ByRef Tensor weight, @Const @ByRef TensorOptional bias, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] stride, @ByVal @Cast("c10::string_view*") Pointer padding); +// aten::cumprod.out(Tensor self, int dim, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor cumprod_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Cast("int64_t") long dim, @ByVal(nullValue = "c10::optional(c10::nullopt)") ScalarTypeOptional dtype); +@Namespace("at") public static native @ByRef Tensor cumprod_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Cast("int64_t") long dim); +// aten::cumprod.out(Tensor self, int dim, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor cumprod_outf(@Const @ByRef Tensor self, @Cast("int64_t") long dim, @ByVal ScalarTypeOptional dtype, @ByRef Tensor out); +// aten::cumprod.dimname(Tensor self, Dimname dim, *, ScalarType? dtype=None) -> Tensor +@Namespace("at") public static native @ByVal Tensor cumprod(@Const @ByRef Tensor self, @ByVal Dimname dim, @ByVal(nullValue = "c10::optional(c10::nullopt)") ScalarTypeOptional dtype); +@Namespace("at") public static native @ByVal Tensor cumprod(@Const @ByRef Tensor self, @ByVal Dimname dim); +// aten::cumprod.dimname_out(Tensor self, Dimname dim, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor cumprod_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal Dimname dim, @ByVal(nullValue = "c10::optional(c10::nullopt)") ScalarTypeOptional dtype); +@Namespace("at") public static native @ByRef Tensor cumprod_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal Dimname dim); +// aten::cumprod.dimname_out(Tensor self, Dimname dim, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor cumprod_outf(@Const @ByRef Tensor self, @ByVal Dimname dim, @ByVal ScalarTypeOptional dtype, @ByRef Tensor out); -// Parsed from ATen/ops/conv_depthwise3d.h + + +// Parsed from ATen/ops/cumprod_backward.h // #pragma once @@ -39377,43 +24837,16 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include - - -// aten::conv_depthwise3d(Tensor self, Tensor weight, int[3] kernel_size, Tensor? bias, int[3] stride, SymInt[3] padding, int[3] dilation) -> Tensor -@Namespace("at") public static native @ByVal Tensor conv_depthwise3d(@Const @ByRef Tensor self, @Const @ByRef Tensor weight, @ByVal @Cast("c10::ArrayRef*") LongArrayRef kernel_size, @Const @ByRef TensorOptional bias, @ByVal @Cast("c10::ArrayRef*") LongArrayRef stride, @ByVal @Cast("c10::ArrayRef*") LongArrayRef padding, @ByVal @Cast("c10::ArrayRef*") LongArrayRef dilation); -@Namespace("at") public static native @ByVal Tensor conv_depthwise3d(@Const @ByRef Tensor self, @Const @ByRef Tensor weight, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] kernel_size, @Const @ByRef TensorOptional bias, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] stride, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] padding, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... dilation); - - -// aten::conv_depthwise3d(Tensor self, Tensor weight, int[3] kernel_size, Tensor? bias, int[3] stride, SymInt[3] padding, int[3] dilation) -> Tensor -@Namespace("at") public static native @ByVal Tensor conv_depthwise3d_symint(@Const @ByRef Tensor self, @Const @ByRef Tensor weight, @ByVal @Cast("c10::ArrayRef*") LongArrayRef kernel_size, @Const @ByRef TensorOptional bias, @ByVal @Cast("c10::ArrayRef*") LongArrayRef stride, @ByVal SymIntRef padding, @ByVal @Cast("c10::ArrayRef*") LongArrayRef dilation); -@Namespace("at") public static native @ByVal Tensor conv_depthwise3d_symint(@Const @ByRef Tensor self, @Const @ByRef Tensor weight, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] kernel_size, @Const @ByRef TensorOptional bias, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] stride, @ByVal SymIntRef padding, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... dilation); - - -// aten::conv_depthwise3d.out(Tensor self, Tensor weight, int[3] kernel_size, Tensor? bias, int[3] stride, SymInt[3] padding, int[3] dilation, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor conv_depthwise3d_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor weight, @ByVal @Cast("c10::ArrayRef*") LongArrayRef kernel_size, @Const @ByRef TensorOptional bias, @ByVal @Cast("c10::ArrayRef*") LongArrayRef stride, @ByVal @Cast("c10::ArrayRef*") LongArrayRef padding, @ByVal @Cast("c10::ArrayRef*") LongArrayRef dilation); -@Namespace("at") public static native @ByRef Tensor conv_depthwise3d_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor weight, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] kernel_size, @Const @ByRef TensorOptional bias, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] stride, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] padding, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... dilation); - - -// aten::conv_depthwise3d.out(Tensor self, Tensor weight, int[3] kernel_size, Tensor? bias, int[3] stride, SymInt[3] padding, int[3] dilation, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor conv_depthwise3d_outf(@Const @ByRef Tensor self, @Const @ByRef Tensor weight, @ByVal @Cast("c10::ArrayRef*") LongArrayRef kernel_size, @Const @ByRef TensorOptional bias, @ByVal @Cast("c10::ArrayRef*") LongArrayRef stride, @ByVal @Cast("c10::ArrayRef*") LongArrayRef padding, @ByVal @Cast("c10::ArrayRef*") LongArrayRef dilation, @ByRef Tensor out); -@Namespace("at") public static native @ByRef Tensor conv_depthwise3d_outf(@Const @ByRef Tensor self, @Const @ByRef Tensor weight, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] kernel_size, @Const @ByRef TensorOptional bias, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] stride, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] padding, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dilation, @ByRef Tensor out); - - -// aten::conv_depthwise3d.out(Tensor self, Tensor weight, int[3] kernel_size, Tensor? bias, int[3] stride, SymInt[3] padding, int[3] dilation, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor conv_depthwise3d_symint_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor weight, @ByVal @Cast("c10::ArrayRef*") LongArrayRef kernel_size, @Const @ByRef TensorOptional bias, @ByVal @Cast("c10::ArrayRef*") LongArrayRef stride, @ByVal SymIntRef padding, @ByVal @Cast("c10::ArrayRef*") LongArrayRef dilation); -@Namespace("at") public static native @ByRef Tensor conv_depthwise3d_symint_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor weight, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] kernel_size, @Const @ByRef TensorOptional bias, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] stride, @ByVal SymIntRef padding, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... dilation); - +// #include -// aten::conv_depthwise3d.out(Tensor self, Tensor weight, int[3] kernel_size, Tensor? bias, int[3] stride, SymInt[3] padding, int[3] dilation, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor conv_depthwise3d_symint_outf(@Const @ByRef Tensor self, @Const @ByRef Tensor weight, @ByVal @Cast("c10::ArrayRef*") LongArrayRef kernel_size, @Const @ByRef TensorOptional bias, @ByVal @Cast("c10::ArrayRef*") LongArrayRef stride, @ByVal SymIntRef padding, @ByVal @Cast("c10::ArrayRef*") LongArrayRef dilation, @ByRef Tensor out); -@Namespace("at") public static native @ByRef Tensor conv_depthwise3d_symint_outf(@Const @ByRef Tensor self, @Const @ByRef Tensor weight, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] kernel_size, @Const @ByRef TensorOptional bias, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] stride, @ByVal SymIntRef padding, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dilation, @ByRef Tensor out); +// aten::cumprod_backward(Tensor grad, Tensor input, int dim, Tensor output) -> Tensor +@Namespace("at") public static native @ByVal Tensor cumprod_backward(@Const @ByRef Tensor grad, @Const @ByRef Tensor input, @Cast("int64_t") long dim, @Const @ByRef Tensor output); -// Parsed from ATen/ops/conv_tbc.h +// Parsed from ATen/ops/cumsum.h // #pragma once @@ -39434,23 +24867,33 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include +// #include -// aten::conv_tbc(Tensor self, Tensor weight, Tensor bias, int pad=0) -> Tensor -@Namespace("at") public static native @ByVal Tensor conv_tbc(@Const @ByRef Tensor self, @Const @ByRef Tensor weight, @Const @ByRef Tensor bias, @Cast("int64_t") long pad/*=0*/); -@Namespace("at") public static native @ByVal Tensor conv_tbc(@Const @ByRef Tensor self, @Const @ByRef Tensor weight, @Const @ByRef Tensor bias); +// aten::cumsum(Tensor self, int dim, *, ScalarType? dtype=None) -> Tensor +@Namespace("at") public static native @ByVal Tensor cumsum(@Const @ByRef Tensor self, @Cast("int64_t") long dim, @ByVal(nullValue = "c10::optional(c10::nullopt)") ScalarTypeOptional dtype); +@Namespace("at") public static native @ByVal Tensor cumsum(@Const @ByRef Tensor self, @Cast("int64_t") long dim); -// aten::conv_tbc.out(Tensor self, Tensor weight, Tensor bias, int pad=0, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor conv_tbc_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor weight, @Const @ByRef Tensor bias, @Cast("int64_t") long pad/*=0*/); -@Namespace("at") public static native @ByRef Tensor conv_tbc_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor weight, @Const @ByRef Tensor bias); -// aten::conv_tbc.out(Tensor self, Tensor weight, Tensor bias, int pad=0, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor conv_tbc_outf(@Const @ByRef Tensor self, @Const @ByRef Tensor weight, @Const @ByRef Tensor bias, @Cast("int64_t") long pad, @ByRef Tensor out); +// aten::cumsum.out(Tensor self, int dim, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor cumsum_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Cast("int64_t") long dim, @ByVal(nullValue = "c10::optional(c10::nullopt)") ScalarTypeOptional dtype); +@Namespace("at") public static native @ByRef Tensor cumsum_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Cast("int64_t") long dim); +// aten::cumsum.out(Tensor self, int dim, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor cumsum_outf(@Const @ByRef Tensor self, @Cast("int64_t") long dim, @ByVal ScalarTypeOptional dtype, @ByRef Tensor out); + +// aten::cumsum.dimname(Tensor self, Dimname dim, *, ScalarType? dtype=None) -> Tensor +@Namespace("at") public static native @ByVal Tensor cumsum(@Const @ByRef Tensor self, @ByVal Dimname dim, @ByVal(nullValue = "c10::optional(c10::nullopt)") ScalarTypeOptional dtype); +@Namespace("at") public static native @ByVal Tensor cumsum(@Const @ByRef Tensor self, @ByVal Dimname dim); + +// aten::cumsum.dimname_out(Tensor self, Dimname dim, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor cumsum_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal Dimname dim, @ByVal(nullValue = "c10::optional(c10::nullopt)") ScalarTypeOptional dtype); +@Namespace("at") public static native @ByRef Tensor cumsum_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal Dimname dim); +// aten::cumsum.dimname_out(Tensor self, Dimname dim, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor cumsum_outf(@Const @ByRef Tensor self, @ByVal Dimname dim, @ByVal ScalarTypeOptional dtype, @ByRef Tensor out); -// Parsed from ATen/ops/conv_tbc_backward.h +// Parsed from ATen/ops/cumulative_trapezoid.h // #pragma once @@ -39471,16 +24914,21 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include +// #include -// aten::conv_tbc_backward(Tensor self, Tensor input, Tensor weight, Tensor bias, int pad) -> (Tensor, Tensor, Tensor) -@Namespace("at") public static native @ByVal TensorTensorTensorTuple conv_tbc_backward(@Const @ByRef Tensor self, @Const @ByRef Tensor input, @Const @ByRef Tensor weight, @Const @ByRef Tensor bias, @Cast("int64_t") long pad); +// aten::cumulative_trapezoid.x(Tensor y, Tensor x, *, int dim=-1) -> Tensor +@Namespace("at") public static native @ByVal Tensor cumulative_trapezoid(@Const @ByRef Tensor y, @Const @ByRef Tensor x, @Cast("int64_t") long dim/*=-1*/); +@Namespace("at") public static native @ByVal Tensor cumulative_trapezoid(@Const @ByRef Tensor y, @Const @ByRef Tensor x); + +// aten::cumulative_trapezoid.dx(Tensor y, *, Scalar dx=1, int dim=-1) -> Tensor +@Namespace("at") public static native @ByVal Tensor cumulative_trapezoid(@Const @ByRef Tensor y, @Const @ByRef(nullValue = "at::Scalar(1)") Scalar dx, @Cast("int64_t") long dim/*=-1*/); +@Namespace("at") public static native @ByVal Tensor cumulative_trapezoid(@Const @ByRef Tensor y); -// Parsed from ATen/ops/conv_transpose1d.h +// Parsed from ATen/ops/data.h // #pragma once @@ -39501,18 +24949,14 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include +// #include -// aten::conv_transpose1d(Tensor input, Tensor weight, Tensor? bias=None, int[1] stride=1, int[1] padding=0, int[1] output_padding=0, int groups=1, int[1] dilation=1) -> Tensor -@Namespace("at") public static native @ByVal Tensor conv_transpose1d(@Const @ByRef Tensor input, @Const @ByRef Tensor weight, @Const @ByRef(nullValue = "c10::optional{}") TensorOptional bias, @ByVal(nullValue = "at::IntArrayRef(1)") @Cast("c10::ArrayRef*") LongArrayRef stride, @ByVal(nullValue = "at::IntArrayRef(0)") @Cast("c10::ArrayRef*") LongArrayRef padding, @ByVal(nullValue = "at::IntArrayRef(0)") @Cast("c10::ArrayRef*") LongArrayRef output_padding, @Cast("int64_t") long groups/*=1*/, @ByVal(nullValue = "at::IntArrayRef(1)") @Cast("c10::ArrayRef*") LongArrayRef dilation); -@Namespace("at") public static native @ByVal Tensor conv_transpose1d(@Const @ByRef Tensor input, @Const @ByRef Tensor weight); -@Namespace("at") public static native @ByVal Tensor conv_transpose1d(@Const @ByRef Tensor input, @Const @ByRef Tensor weight, @Const @ByRef(nullValue = "c10::optional{}") TensorOptional bias, @ByVal(nullValue = "at::IntArrayRef(1)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] stride, @ByVal(nullValue = "at::IntArrayRef(0)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] padding, @ByVal(nullValue = "at::IntArrayRef(0)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] output_padding, @Cast("int64_t") long groups/*=1*/, @ByVal(nullValue = "at::IntArrayRef(1)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... dilation); -// Parsed from ATen/ops/conv_transpose2d.h +// Parsed from ATen/ops/deg2rad.h // #pragma once @@ -39533,18 +24977,24 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include +// #include -// aten::conv_transpose2d.input(Tensor input, Tensor weight, Tensor? bias=None, int[2] stride=1, int[2] padding=0, int[2] output_padding=0, int groups=1, int[2] dilation=1) -> Tensor -@Namespace("at") public static native @ByVal Tensor conv_transpose2d(@Const @ByRef Tensor input, @Const @ByRef Tensor weight, @Const @ByRef(nullValue = "c10::optional{}") TensorOptional bias, @ByVal(nullValue = "at::IntArrayRef(1)") @Cast("c10::ArrayRef*") LongArrayRef stride, @ByVal(nullValue = "at::IntArrayRef(0)") @Cast("c10::ArrayRef*") LongArrayRef padding, @ByVal(nullValue = "at::IntArrayRef(0)") @Cast("c10::ArrayRef*") LongArrayRef output_padding, @Cast("int64_t") long groups/*=1*/, @ByVal(nullValue = "at::IntArrayRef(1)") @Cast("c10::ArrayRef*") LongArrayRef dilation); -@Namespace("at") public static native @ByVal Tensor conv_transpose2d(@Const @ByRef Tensor input, @Const @ByRef Tensor weight); -@Namespace("at") public static native @ByVal Tensor conv_transpose2d(@Const @ByRef Tensor input, @Const @ByRef Tensor weight, @Const @ByRef(nullValue = "c10::optional{}") TensorOptional bias, @ByVal(nullValue = "at::IntArrayRef(1)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] stride, @ByVal(nullValue = "at::IntArrayRef(0)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] padding, @ByVal(nullValue = "at::IntArrayRef(0)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] output_padding, @Cast("int64_t") long groups/*=1*/, @ByVal(nullValue = "at::IntArrayRef(1)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... dilation); +// aten::deg2rad(Tensor self) -> Tensor +@Namespace("at") public static native @ByVal Tensor deg2rad(@Const @ByRef Tensor self); + +// aten::deg2rad_(Tensor(a!) self) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor deg2rad_(@ByRef Tensor self); +// aten::deg2rad.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor deg2rad_out(@ByRef Tensor out, @Const @ByRef Tensor self); +// aten::deg2rad.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor deg2rad_outf(@Const @ByRef Tensor self, @ByRef Tensor out); -// Parsed from ATen/ops/conv_transpose3d.h + +// Parsed from ATen/ops/dense_dim.h // #pragma once @@ -39565,18 +25015,14 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include +// #include -// aten::conv_transpose3d.input(Tensor input, Tensor weight, Tensor? bias=None, int[3] stride=1, int[3] padding=0, int[3] output_padding=0, int groups=1, int[3] dilation=1) -> Tensor -@Namespace("at") public static native @ByVal Tensor conv_transpose3d(@Const @ByRef Tensor input, @Const @ByRef Tensor weight, @Const @ByRef(nullValue = "c10::optional{}") TensorOptional bias, @ByVal(nullValue = "at::IntArrayRef(1)") @Cast("c10::ArrayRef*") LongArrayRef stride, @ByVal(nullValue = "at::IntArrayRef(0)") @Cast("c10::ArrayRef*") LongArrayRef padding, @ByVal(nullValue = "at::IntArrayRef(0)") @Cast("c10::ArrayRef*") LongArrayRef output_padding, @Cast("int64_t") long groups/*=1*/, @ByVal(nullValue = "at::IntArrayRef(1)") @Cast("c10::ArrayRef*") LongArrayRef dilation); -@Namespace("at") public static native @ByVal Tensor conv_transpose3d(@Const @ByRef Tensor input, @Const @ByRef Tensor weight); -@Namespace("at") public static native @ByVal Tensor conv_transpose3d(@Const @ByRef Tensor input, @Const @ByRef Tensor weight, @Const @ByRef(nullValue = "c10::optional{}") TensorOptional bias, @ByVal(nullValue = "at::IntArrayRef(1)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] stride, @ByVal(nullValue = "at::IntArrayRef(0)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] padding, @ByVal(nullValue = "at::IntArrayRef(0)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] output_padding, @Cast("int64_t") long groups/*=1*/, @ByVal(nullValue = "at::IntArrayRef(1)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... dilation); -// Parsed from ATen/ops/convolution.h +// Parsed from ATen/ops/dequantize.h // #pragma once @@ -39597,43 +25043,29 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include - - -// aten::convolution(Tensor input, Tensor weight, Tensor? bias, int[] stride, SymInt[] padding, int[] dilation, bool transposed, SymInt[] output_padding, int groups) -> Tensor -@Namespace("at") public static native @ByVal Tensor convolution(@Const @ByRef Tensor input, @Const @ByRef Tensor weight, @Const @ByRef TensorOptional bias, @ByVal @Cast("c10::ArrayRef*") LongArrayRef stride, @ByVal @Cast("c10::ArrayRef*") LongArrayRef padding, @ByVal @Cast("c10::ArrayRef*") LongArrayRef dilation, @Cast("bool") boolean transposed, @ByVal @Cast("c10::ArrayRef*") LongArrayRef output_padding, @Cast("int64_t") long groups); -@Namespace("at") public static native @ByVal Tensor convolution(@Const @ByRef Tensor input, @Const @ByRef Tensor weight, @Const @ByRef TensorOptional bias, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] stride, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] padding, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dilation, @Cast("bool") boolean transposed, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] output_padding, @Cast("int64_t") long groups); - - -// aten::convolution(Tensor input, Tensor weight, Tensor? bias, int[] stride, SymInt[] padding, int[] dilation, bool transposed, SymInt[] output_padding, int groups) -> Tensor -@Namespace("at") public static native @ByVal Tensor convolution_symint(@Const @ByRef Tensor input, @Const @ByRef Tensor weight, @Const @ByRef TensorOptional bias, @ByVal @Cast("c10::ArrayRef*") LongArrayRef stride, @ByVal SymIntRef padding, @ByVal @Cast("c10::ArrayRef*") LongArrayRef dilation, @Cast("bool") boolean transposed, @ByVal SymIntRef output_padding, @Cast("int64_t") long groups); -@Namespace("at") public static native @ByVal Tensor convolution_symint(@Const @ByRef Tensor input, @Const @ByRef Tensor weight, @Const @ByRef TensorOptional bias, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] stride, @ByVal SymIntRef padding, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dilation, @Cast("bool") boolean transposed, @ByVal SymIntRef output_padding, @Cast("int64_t") long groups); - - -// aten::convolution.out(Tensor input, Tensor weight, Tensor? bias, int[] stride, SymInt[] padding, int[] dilation, bool transposed, SymInt[] output_padding, int groups, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor convolution_out(@ByRef Tensor out, @Const @ByRef Tensor input, @Const @ByRef Tensor weight, @Const @ByRef TensorOptional bias, @ByVal @Cast("c10::ArrayRef*") LongArrayRef stride, @ByVal @Cast("c10::ArrayRef*") LongArrayRef padding, @ByVal @Cast("c10::ArrayRef*") LongArrayRef dilation, @Cast("bool") boolean transposed, @ByVal @Cast("c10::ArrayRef*") LongArrayRef output_padding, @Cast("int64_t") long groups); -@Namespace("at") public static native @ByRef Tensor convolution_out(@ByRef Tensor out, @Const @ByRef Tensor input, @Const @ByRef Tensor weight, @Const @ByRef TensorOptional bias, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] stride, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] padding, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dilation, @Cast("bool") boolean transposed, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] output_padding, @Cast("int64_t") long groups); - - -// aten::convolution.out(Tensor input, Tensor weight, Tensor? bias, int[] stride, SymInt[] padding, int[] dilation, bool transposed, SymInt[] output_padding, int groups, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor convolution_outf(@Const @ByRef Tensor input, @Const @ByRef Tensor weight, @Const @ByRef TensorOptional bias, @ByVal @Cast("c10::ArrayRef*") LongArrayRef stride, @ByVal @Cast("c10::ArrayRef*") LongArrayRef padding, @ByVal @Cast("c10::ArrayRef*") LongArrayRef dilation, @Cast("bool") boolean transposed, @ByVal @Cast("c10::ArrayRef*") LongArrayRef output_padding, @Cast("int64_t") long groups, @ByRef Tensor out); -@Namespace("at") public static native @ByRef Tensor convolution_outf(@Const @ByRef Tensor input, @Const @ByRef Tensor weight, @Const @ByRef TensorOptional bias, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] stride, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] padding, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dilation, @Cast("bool") boolean transposed, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] output_padding, @Cast("int64_t") long groups, @ByRef Tensor out); +// #include -// aten::convolution.out(Tensor input, Tensor weight, Tensor? bias, int[] stride, SymInt[] padding, int[] dilation, bool transposed, SymInt[] output_padding, int groups, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor convolution_symint_out(@ByRef Tensor out, @Const @ByRef Tensor input, @Const @ByRef Tensor weight, @Const @ByRef TensorOptional bias, @ByVal @Cast("c10::ArrayRef*") LongArrayRef stride, @ByVal SymIntRef padding, @ByVal @Cast("c10::ArrayRef*") LongArrayRef dilation, @Cast("bool") boolean transposed, @ByVal SymIntRef output_padding, @Cast("int64_t") long groups); -@Namespace("at") public static native @ByRef Tensor convolution_symint_out(@ByRef Tensor out, @Const @ByRef Tensor input, @Const @ByRef Tensor weight, @Const @ByRef TensorOptional bias, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] stride, @ByVal SymIntRef padding, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dilation, @Cast("bool") boolean transposed, @ByVal SymIntRef output_padding, @Cast("int64_t") long groups); +// aten::dequantize.self(Tensor self) -> Tensor +@Namespace("at") public static native @ByVal Tensor dequantize(@Const @ByRef Tensor self); +// aten::dequantize.tensors(Tensor[] tensors) -> Tensor[] +@Namespace("at") public static native @Cast({"", "std::vector"}) @StdMove TensorVector dequantize(@ByVal @Cast("at::TensorList*") TensorArrayRef tensors); -// aten::convolution.out(Tensor input, Tensor weight, Tensor? bias, int[] stride, SymInt[] padding, int[] dilation, bool transposed, SymInt[] output_padding, int groups, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor convolution_symint_outf(@Const @ByRef Tensor input, @Const @ByRef Tensor weight, @Const @ByRef TensorOptional bias, @ByVal @Cast("c10::ArrayRef*") LongArrayRef stride, @ByVal SymIntRef padding, @ByVal @Cast("c10::ArrayRef*") LongArrayRef dilation, @Cast("bool") boolean transposed, @ByVal SymIntRef output_padding, @Cast("int64_t") long groups, @ByRef Tensor out); -@Namespace("at") public static native @ByRef Tensor convolution_symint_outf(@Const @ByRef Tensor input, @Const @ByRef Tensor weight, @Const @ByRef TensorOptional bias, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] stride, @ByVal SymIntRef padding, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dilation, @Cast("bool") boolean transposed, @ByVal SymIntRef output_padding, @Cast("int64_t") long groups, @ByRef Tensor out); +// aten::dequantize.self_out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor dequantize_out(@ByRef Tensor out, @Const @ByRef Tensor self); +// aten::dequantize.self_out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor dequantize_outf(@Const @ByRef Tensor self, @ByRef Tensor out); +// aten::dequantize.tensors_out(Tensor[] tensors, *, Tensor(a!)[] out) -> () +@Namespace("at") public static native void dequantize_out(@ByVal @Cast("at::TensorList*") TensorArrayRef out, @ByVal @Cast("at::TensorList*") TensorArrayRef tensors); +// aten::dequantize.tensors_out(Tensor[] tensors, *, Tensor(a!)[] out) -> () +@Namespace("at") public static native void dequantize_outf(@ByVal @Cast("at::TensorList*") TensorArrayRef tensors, @ByVal @Cast("at::TensorList*") TensorArrayRef out); -// Parsed from ATen/ops/convolution_backward.h +// Parsed from ATen/ops/det.h // #pragma once @@ -39654,43 +25086,16 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include - - -// aten::convolution_backward(Tensor grad_output, Tensor input, Tensor weight, SymInt[]? bias_sizes, int[] stride, SymInt[] padding, int[] dilation, bool transposed, SymInt[] output_padding, int groups, bool[3] output_mask) -> (Tensor, Tensor, Tensor) -@Namespace("at") public static native @ByVal TensorTensorTensorTuple convolution_backward(@Const @ByRef Tensor grad_output, @Const @ByRef Tensor input, @Const @ByRef Tensor weight, @ByVal LongArrayRefOptional bias_sizes, @ByVal @Cast("c10::ArrayRef*") LongArrayRef stride, @ByVal @Cast("c10::ArrayRef*") LongArrayRef padding, @ByVal @Cast("c10::ArrayRef*") LongArrayRef dilation, @Cast("bool") boolean transposed, @ByVal @Cast("c10::ArrayRef*") LongArrayRef output_padding, @Cast("int64_t") long groups, @ByVal @Cast("std::array*") BoolPointer output_mask); -@Namespace("at") public static native @ByVal TensorTensorTensorTuple convolution_backward(@Const @ByRef Tensor grad_output, @Const @ByRef Tensor input, @Const @ByRef Tensor weight, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] bias_sizes, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] stride, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] padding, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dilation, @Cast("bool") boolean transposed, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] output_padding, @Cast("int64_t") long groups, @ByVal @Cast("std::array*") BoolPointer output_mask); - - -// aten::convolution_backward(Tensor grad_output, Tensor input, Tensor weight, SymInt[]? bias_sizes, int[] stride, SymInt[] padding, int[] dilation, bool transposed, SymInt[] output_padding, int groups, bool[3] output_mask) -> (Tensor, Tensor, Tensor) -@Namespace("at") public static native @ByVal TensorTensorTensorTuple convolution_backward_symint(@Const @ByRef Tensor grad_output, @Const @ByRef Tensor input, @Const @ByRef Tensor weight, @ByVal SymIntArrayRefOptional bias_sizes, @ByVal @Cast("c10::ArrayRef*") LongArrayRef stride, @ByVal SymIntRef padding, @ByVal @Cast("c10::ArrayRef*") LongArrayRef dilation, @Cast("bool") boolean transposed, @ByVal SymIntRef output_padding, @Cast("int64_t") long groups, @ByVal @Cast("std::array*") BoolPointer output_mask); -@Namespace("at") public static native @ByVal TensorTensorTensorTuple convolution_backward_symint(@Const @ByRef Tensor grad_output, @Const @ByRef Tensor input, @Const @ByRef Tensor weight, @ByVal SymIntArrayRefOptional bias_sizes, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] stride, @ByVal SymIntRef padding, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dilation, @Cast("bool") boolean transposed, @ByVal SymIntRef output_padding, @Cast("int64_t") long groups, @ByVal @Cast("std::array*") BoolPointer output_mask); - - -// aten::convolution_backward.out(Tensor grad_output, Tensor input, Tensor weight, SymInt[]? bias_sizes, int[] stride, SymInt[] padding, int[] dilation, bool transposed, SymInt[] output_padding, int groups, bool[3] output_mask, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2) -> (Tensor(a!), Tensor(b!), Tensor(c!)) -@Namespace("at") public static native @ByVal @Cast("std::tuple*") PointerPointer convolution_backward_out(@ByRef Tensor out0, @ByRef Tensor out1, @ByRef Tensor out2, @Const @ByRef Tensor grad_output, @Const @ByRef Tensor input, @Const @ByRef Tensor weight, @ByVal LongArrayRefOptional bias_sizes, @ByVal @Cast("c10::ArrayRef*") LongArrayRef stride, @ByVal @Cast("c10::ArrayRef*") LongArrayRef padding, @ByVal @Cast("c10::ArrayRef*") LongArrayRef dilation, @Cast("bool") boolean transposed, @ByVal @Cast("c10::ArrayRef*") LongArrayRef output_padding, @Cast("int64_t") long groups, @ByVal @Cast("std::array*") BoolPointer output_mask); -@Namespace("at") public static native @ByVal @Cast("std::tuple*") PointerPointer convolution_backward_out(@ByRef Tensor out0, @ByRef Tensor out1, @ByRef Tensor out2, @Const @ByRef Tensor grad_output, @Const @ByRef Tensor input, @Const @ByRef Tensor weight, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] bias_sizes, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] stride, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] padding, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dilation, @Cast("bool") boolean transposed, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] output_padding, @Cast("int64_t") long groups, @ByVal @Cast("std::array*") BoolPointer output_mask); - - -// aten::convolution_backward.out(Tensor grad_output, Tensor input, Tensor weight, SymInt[]? bias_sizes, int[] stride, SymInt[] padding, int[] dilation, bool transposed, SymInt[] output_padding, int groups, bool[3] output_mask, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2) -> (Tensor(a!), Tensor(b!), Tensor(c!)) -@Namespace("at") public static native @ByVal @Cast("std::tuple*") PointerPointer convolution_backward_outf(@Const @ByRef Tensor grad_output, @Const @ByRef Tensor input, @Const @ByRef Tensor weight, @ByVal LongArrayRefOptional bias_sizes, @ByVal @Cast("c10::ArrayRef*") LongArrayRef stride, @ByVal @Cast("c10::ArrayRef*") LongArrayRef padding, @ByVal @Cast("c10::ArrayRef*") LongArrayRef dilation, @Cast("bool") boolean transposed, @ByVal @Cast("c10::ArrayRef*") LongArrayRef output_padding, @Cast("int64_t") long groups, @ByVal @Cast("std::array*") BoolPointer output_mask, @ByRef Tensor out0, @ByRef Tensor out1, @ByRef Tensor out2); -@Namespace("at") public static native @ByVal @Cast("std::tuple*") PointerPointer convolution_backward_outf(@Const @ByRef Tensor grad_output, @Const @ByRef Tensor input, @Const @ByRef Tensor weight, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] bias_sizes, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] stride, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] padding, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dilation, @Cast("bool") boolean transposed, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] output_padding, @Cast("int64_t") long groups, @ByVal @Cast("std::array*") BoolPointer output_mask, @ByRef Tensor out0, @ByRef Tensor out1, @ByRef Tensor out2); - - -// aten::convolution_backward.out(Tensor grad_output, Tensor input, Tensor weight, SymInt[]? bias_sizes, int[] stride, SymInt[] padding, int[] dilation, bool transposed, SymInt[] output_padding, int groups, bool[3] output_mask, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2) -> (Tensor(a!), Tensor(b!), Tensor(c!)) -@Namespace("at") public static native @ByVal @Cast("std::tuple*") PointerPointer convolution_backward_symint_out(@ByRef Tensor out0, @ByRef Tensor out1, @ByRef Tensor out2, @Const @ByRef Tensor grad_output, @Const @ByRef Tensor input, @Const @ByRef Tensor weight, @ByVal SymIntArrayRefOptional bias_sizes, @ByVal @Cast("c10::ArrayRef*") LongArrayRef stride, @ByVal SymIntRef padding, @ByVal @Cast("c10::ArrayRef*") LongArrayRef dilation, @Cast("bool") boolean transposed, @ByVal SymIntRef output_padding, @Cast("int64_t") long groups, @ByVal @Cast("std::array*") BoolPointer output_mask); -@Namespace("at") public static native @ByVal @Cast("std::tuple*") PointerPointer convolution_backward_symint_out(@ByRef Tensor out0, @ByRef Tensor out1, @ByRef Tensor out2, @Const @ByRef Tensor grad_output, @Const @ByRef Tensor input, @Const @ByRef Tensor weight, @ByVal SymIntArrayRefOptional bias_sizes, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] stride, @ByVal SymIntRef padding, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dilation, @Cast("bool") boolean transposed, @ByVal SymIntRef output_padding, @Cast("int64_t") long groups, @ByVal @Cast("std::array*") BoolPointer output_mask); - +// #include -// aten::convolution_backward.out(Tensor grad_output, Tensor input, Tensor weight, SymInt[]? bias_sizes, int[] stride, SymInt[] padding, int[] dilation, bool transposed, SymInt[] output_padding, int groups, bool[3] output_mask, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2) -> (Tensor(a!), Tensor(b!), Tensor(c!)) -@Namespace("at") public static native @ByVal @Cast("std::tuple*") PointerPointer convolution_backward_symint_outf(@Const @ByRef Tensor grad_output, @Const @ByRef Tensor input, @Const @ByRef Tensor weight, @ByVal SymIntArrayRefOptional bias_sizes, @ByVal @Cast("c10::ArrayRef*") LongArrayRef stride, @ByVal SymIntRef padding, @ByVal @Cast("c10::ArrayRef*") LongArrayRef dilation, @Cast("bool") boolean transposed, @ByVal SymIntRef output_padding, @Cast("int64_t") long groups, @ByVal @Cast("std::array*") BoolPointer output_mask, @ByRef Tensor out0, @ByRef Tensor out1, @ByRef Tensor out2); -@Namespace("at") public static native @ByVal @Cast("std::tuple*") PointerPointer convolution_backward_symint_outf(@Const @ByRef Tensor grad_output, @Const @ByRef Tensor input, @Const @ByRef Tensor weight, @ByVal SymIntArrayRefOptional bias_sizes, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] stride, @ByVal SymIntRef padding, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dilation, @Cast("bool") boolean transposed, @ByVal SymIntRef output_padding, @Cast("int64_t") long groups, @ByVal @Cast("std::array*") BoolPointer output_mask, @ByRef Tensor out0, @ByRef Tensor out1, @ByRef Tensor out2); +// aten::det(Tensor self) -> Tensor +@Namespace("at") public static native @ByVal Tensor det(@Const @ByRef Tensor self); -// Parsed from ATen/ops/convolution_backward_overrideable.h +// Parsed from ATen/ops/detach.h // #pragma once @@ -39711,24 +25116,19 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include +// #include -// aten::convolution_backward_overrideable(Tensor grad_output, Tensor input, Tensor weight, int[] stride, int[] padding, int[] dilation, bool transposed, int[] output_padding, int groups, bool[3] output_mask) -> (Tensor grad_input, Tensor grad_weight, Tensor grad_bias) -@Namespace("at") public static native @ByVal TensorTensorTensorTuple convolution_backward_overrideable(@Const @ByRef Tensor grad_output, @Const @ByRef Tensor input, @Const @ByRef Tensor weight, @ByVal @Cast("c10::ArrayRef*") LongArrayRef stride, @ByVal @Cast("c10::ArrayRef*") LongArrayRef padding, @ByVal @Cast("c10::ArrayRef*") LongArrayRef dilation, @Cast("bool") boolean transposed, @ByVal @Cast("c10::ArrayRef*") LongArrayRef output_padding, @Cast("int64_t") long groups, @ByVal @Cast("std::array*") BoolPointer output_mask); -@Namespace("at") public static native @ByVal TensorTensorTensorTuple convolution_backward_overrideable(@Const @ByRef Tensor grad_output, @Const @ByRef Tensor input, @Const @ByRef Tensor weight, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] stride, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] padding, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dilation, @Cast("bool") boolean transposed, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] output_padding, @Cast("int64_t") long groups, @ByVal @Cast("std::array*") BoolPointer output_mask); +// aten::detach(Tensor(a) self) -> Tensor(a) +@Namespace("at") public static native @ByVal Tensor detach(@Const @ByRef Tensor self); -// aten::convolution_backward_overrideable.out(Tensor grad_output, Tensor input, Tensor weight, int[] stride, int[] padding, int[] dilation, bool transposed, int[] output_padding, int groups, bool[3] output_mask, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2) -> (Tensor(a!), Tensor(b!), Tensor(c!)) -@Namespace("at") public static native @ByVal @Cast("std::tuple*") PointerPointer convolution_backward_overrideable_out(@ByRef Tensor out0, @ByRef Tensor out1, @ByRef Tensor out2, @Const @ByRef Tensor grad_output, @Const @ByRef Tensor input, @Const @ByRef Tensor weight, @ByVal @Cast("c10::ArrayRef*") LongArrayRef stride, @ByVal @Cast("c10::ArrayRef*") LongArrayRef padding, @ByVal @Cast("c10::ArrayRef*") LongArrayRef dilation, @Cast("bool") boolean transposed, @ByVal @Cast("c10::ArrayRef*") LongArrayRef output_padding, @Cast("int64_t") long groups, @ByVal @Cast("std::array*") BoolPointer output_mask); -@Namespace("at") public static native @ByVal @Cast("std::tuple*") PointerPointer convolution_backward_overrideable_out(@ByRef Tensor out0, @ByRef Tensor out1, @ByRef Tensor out2, @Const @ByRef Tensor grad_output, @Const @ByRef Tensor input, @Const @ByRef Tensor weight, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] stride, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] padding, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dilation, @Cast("bool") boolean transposed, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] output_padding, @Cast("int64_t") long groups, @ByVal @Cast("std::array*") BoolPointer output_mask); -// aten::convolution_backward_overrideable.out(Tensor grad_output, Tensor input, Tensor weight, int[] stride, int[] padding, int[] dilation, bool transposed, int[] output_padding, int groups, bool[3] output_mask, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2) -> (Tensor(a!), Tensor(b!), Tensor(c!)) -@Namespace("at") public static native @ByVal @Cast("std::tuple*") PointerPointer convolution_backward_overrideable_outf(@Const @ByRef Tensor grad_output, @Const @ByRef Tensor input, @Const @ByRef Tensor weight, @ByVal @Cast("c10::ArrayRef*") LongArrayRef stride, @ByVal @Cast("c10::ArrayRef*") LongArrayRef padding, @ByVal @Cast("c10::ArrayRef*") LongArrayRef dilation, @Cast("bool") boolean transposed, @ByVal @Cast("c10::ArrayRef*") LongArrayRef output_padding, @Cast("int64_t") long groups, @ByVal @Cast("std::array*") BoolPointer output_mask, @ByRef Tensor out0, @ByRef Tensor out1, @ByRef Tensor out2); -@Namespace("at") public static native @ByVal @Cast("std::tuple*") PointerPointer convolution_backward_overrideable_outf(@Const @ByRef Tensor grad_output, @Const @ByRef Tensor input, @Const @ByRef Tensor weight, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] stride, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] padding, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dilation, @Cast("bool") boolean transposed, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] output_padding, @Cast("int64_t") long groups, @ByVal @Cast("std::array*") BoolPointer output_mask, @ByRef Tensor out0, @ByRef Tensor out1, @ByRef Tensor out2); +// aten::detach_(Tensor(a!) self) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor detach_(@ByRef Tensor self); -// Parsed from ATen/ops/convolution_overrideable.h +// Parsed from ATen/ops/detach_copy.h // #pragma once @@ -39749,24 +25149,21 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include +// #include -// aten::convolution_overrideable(Tensor input, Tensor weight, Tensor? bias, int[] stride, int[] padding, int[] dilation, bool transposed, int[] output_padding, int groups) -> Tensor -@Namespace("at") public static native @ByVal Tensor convolution_overrideable(@Const @ByRef Tensor input, @Const @ByRef Tensor weight, @Const @ByRef TensorOptional bias, @ByVal @Cast("c10::ArrayRef*") LongArrayRef stride, @ByVal @Cast("c10::ArrayRef*") LongArrayRef padding, @ByVal @Cast("c10::ArrayRef*") LongArrayRef dilation, @Cast("bool") boolean transposed, @ByVal @Cast("c10::ArrayRef*") LongArrayRef output_padding, @Cast("int64_t") long groups); -@Namespace("at") public static native @ByVal Tensor convolution_overrideable(@Const @ByRef Tensor input, @Const @ByRef Tensor weight, @Const @ByRef TensorOptional bias, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] stride, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] padding, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dilation, @Cast("bool") boolean transposed, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] output_padding, @Cast("int64_t") long groups); +// aten::detach_copy(Tensor self) -> Tensor +@Namespace("at") public static native @ByVal Tensor detach_copy(@Const @ByRef Tensor self); -// aten::convolution_overrideable.out(Tensor input, Tensor weight, Tensor? bias, int[] stride, int[] padding, int[] dilation, bool transposed, int[] output_padding, int groups, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor convolution_overrideable_out(@ByRef Tensor out, @Const @ByRef Tensor input, @Const @ByRef Tensor weight, @Const @ByRef TensorOptional bias, @ByVal @Cast("c10::ArrayRef*") LongArrayRef stride, @ByVal @Cast("c10::ArrayRef*") LongArrayRef padding, @ByVal @Cast("c10::ArrayRef*") LongArrayRef dilation, @Cast("bool") boolean transposed, @ByVal @Cast("c10::ArrayRef*") LongArrayRef output_padding, @Cast("int64_t") long groups); -@Namespace("at") public static native @ByRef Tensor convolution_overrideable_out(@ByRef Tensor out, @Const @ByRef Tensor input, @Const @ByRef Tensor weight, @Const @ByRef TensorOptional bias, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] stride, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] padding, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dilation, @Cast("bool") boolean transposed, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] output_padding, @Cast("int64_t") long groups); -// aten::convolution_overrideable.out(Tensor input, Tensor weight, Tensor? bias, int[] stride, int[] padding, int[] dilation, bool transposed, int[] output_padding, int groups, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor convolution_overrideable_outf(@Const @ByRef Tensor input, @Const @ByRef Tensor weight, @Const @ByRef TensorOptional bias, @ByVal @Cast("c10::ArrayRef*") LongArrayRef stride, @ByVal @Cast("c10::ArrayRef*") LongArrayRef padding, @ByVal @Cast("c10::ArrayRef*") LongArrayRef dilation, @Cast("bool") boolean transposed, @ByVal @Cast("c10::ArrayRef*") LongArrayRef output_padding, @Cast("int64_t") long groups, @ByRef Tensor out); -@Namespace("at") public static native @ByRef Tensor convolution_overrideable_outf(@Const @ByRef Tensor input, @Const @ByRef Tensor weight, @Const @ByRef TensorOptional bias, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] stride, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] padding, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dilation, @Cast("bool") boolean transposed, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] output_padding, @Cast("int64_t") long groups, @ByRef Tensor out); +// aten::detach_copy.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor detach_copy_out(@ByRef Tensor out, @Const @ByRef Tensor self); +// aten::detach_copy.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor detach_copy_outf(@Const @ByRef Tensor self, @ByRef Tensor out); -// Parsed from ATen/ops/copy.h +// Parsed from ATen/ops/diag.h // #pragma once @@ -39787,23 +25184,23 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include +// #include -// aten::copy(Tensor self, Tensor src, bool non_blocking=False) -> Tensor -@Namespace("at") public static native @ByVal Tensor copy(@Const @ByRef Tensor self, @Const @ByRef Tensor src, @Cast("bool") boolean non_blocking/*=false*/); -@Namespace("at") public static native @ByVal Tensor copy(@Const @ByRef Tensor self, @Const @ByRef Tensor src); +// aten::diag.out(Tensor self, int diagonal=0, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor diag_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Cast("int64_t") long diagonal/*=0*/); +@Namespace("at") public static native @ByRef Tensor diag_out(@ByRef Tensor out, @Const @ByRef Tensor self); +// aten::diag.out(Tensor self, int diagonal=0, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor diag_outf(@Const @ByRef Tensor self, @Cast("int64_t") long diagonal, @ByRef Tensor out); -// aten::copy.out(Tensor self, Tensor src, bool non_blocking=False, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor copy_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor src, @Cast("bool") boolean non_blocking/*=false*/); -@Namespace("at") public static native @ByRef Tensor copy_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor src); -// aten::copy.out(Tensor self, Tensor src, bool non_blocking=False, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor copy_outf(@Const @ByRef Tensor self, @Const @ByRef Tensor src, @Cast("bool") boolean non_blocking, @ByRef Tensor out); +// aten::diag(Tensor self, int diagonal=0) -> Tensor +@Namespace("at") public static native @ByVal Tensor diag(@Const @ByRef Tensor self, @Cast("int64_t") long diagonal/*=0*/); +@Namespace("at") public static native @ByVal Tensor diag(@Const @ByRef Tensor self); -// Parsed from ATen/ops/copy_sparse_to_sparse.h +// Parsed from ATen/ops/diag_embed.h // #pragma once @@ -39824,27 +25221,23 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include - +// #include -// aten::copy_sparse_to_sparse_(Tensor(a!) self, Tensor src, bool non_blocking=False) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor copy_sparse_to_sparse_(@ByRef Tensor self, @Const @ByRef Tensor src, @Cast("bool") boolean non_blocking/*=false*/); -@Namespace("at") public static native @ByRef Tensor copy_sparse_to_sparse_(@ByRef Tensor self, @Const @ByRef Tensor src); -// aten::copy_sparse_to_sparse.out(Tensor self, Tensor src, bool non_blocking=False, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor copy_sparse_to_sparse_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor src, @Cast("bool") boolean non_blocking/*=false*/); -@Namespace("at") public static native @ByRef Tensor copy_sparse_to_sparse_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor src); -// aten::copy_sparse_to_sparse.out(Tensor self, Tensor src, bool non_blocking=False, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor copy_sparse_to_sparse_outf(@Const @ByRef Tensor self, @Const @ByRef Tensor src, @Cast("bool") boolean non_blocking, @ByRef Tensor out); +// aten::diag_embed(Tensor self, int offset=0, int dim1=-2, int dim2=-1) -> Tensor +@Namespace("at") public static native @ByVal Tensor diag_embed(@Const @ByRef Tensor self, @Cast("int64_t") long offset/*=0*/, @Cast("int64_t") long dim1/*=-2*/, @Cast("int64_t") long dim2/*=-1*/); +@Namespace("at") public static native @ByVal Tensor diag_embed(@Const @ByRef Tensor self); -// aten::copy_sparse_to_sparse(Tensor self, Tensor src, bool non_blocking=False) -> Tensor -@Namespace("at") public static native @ByVal Tensor copy_sparse_to_sparse(@Const @ByRef Tensor self, @Const @ByRef Tensor src, @Cast("bool") boolean non_blocking/*=false*/); -@Namespace("at") public static native @ByVal Tensor copy_sparse_to_sparse(@Const @ByRef Tensor self, @Const @ByRef Tensor src); +// aten::diag_embed.out(Tensor self, int offset=0, int dim1=-2, int dim2=-1, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor diag_embed_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Cast("int64_t") long offset/*=0*/, @Cast("int64_t") long dim1/*=-2*/, @Cast("int64_t") long dim2/*=-1*/); +@Namespace("at") public static native @ByRef Tensor diag_embed_out(@ByRef Tensor out, @Const @ByRef Tensor self); +// aten::diag_embed.out(Tensor self, int offset=0, int dim1=-2, int dim2=-1, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor diag_embed_outf(@Const @ByRef Tensor self, @Cast("int64_t") long offset, @Cast("int64_t") long dim1, @Cast("int64_t") long dim2, @ByRef Tensor out); -// Parsed from ATen/ops/copysign.h +// Parsed from ATen/ops/diagflat.h // #pragma once @@ -39865,29 +25258,17 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include - - -// aten::copysign.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor copysign_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor other); -// aten::copysign.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor copysign_outf(@Const @ByRef Tensor self, @Const @ByRef Tensor other, @ByRef Tensor out); - -// aten::copysign.Tensor(Tensor self, Tensor other) -> Tensor -@Namespace("at") public static native @ByVal Tensor copysign(@Const @ByRef Tensor self, @Const @ByRef Tensor other); +// #include -// aten::copysign.Scalar(Tensor self, Scalar other) -> Tensor -@Namespace("at") public static native @ByVal Tensor copysign(@Const @ByRef Tensor self, @Const @ByRef Scalar other); -// aten::copysign.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor copysign_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Scalar other); -// aten::copysign.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor copysign_outf(@Const @ByRef Tensor self, @Const @ByRef Scalar other, @ByRef Tensor out); +// aten::diagflat(Tensor self, int offset=0) -> Tensor +@Namespace("at") public static native @ByVal Tensor diagflat(@Const @ByRef Tensor self, @Cast("int64_t") long offset/*=0*/); +@Namespace("at") public static native @ByVal Tensor diagflat(@Const @ByRef Tensor self); -// Parsed from ATen/ops/corrcoef.h +// Parsed from ATen/ops/diagonal.h // #pragma once @@ -39908,16 +25289,21 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include +// #include -// aten::corrcoef(Tensor self) -> Tensor -@Namespace("at") public static native @ByVal Tensor corrcoef(@Const @ByRef Tensor self); +// aten::diagonal(Tensor(a) self, int offset=0, int dim1=0, int dim2=1) -> Tensor(a) +@Namespace("at") public static native @ByVal Tensor diagonal(@Const @ByRef Tensor self, @Cast("int64_t") long offset/*=0*/, @Cast("int64_t") long dim1/*=0*/, @Cast("int64_t") long dim2/*=1*/); +@Namespace("at") public static native @ByVal Tensor diagonal(@Const @ByRef Tensor self); + +// aten::diagonal.Dimname(Tensor(a) self, *, Dimname outdim, Dimname dim1, Dimname dim2, int offset=0) -> Tensor(a) +@Namespace("at") public static native @ByVal Tensor diagonal(@Const @ByRef Tensor self, @ByVal Dimname outdim, @ByVal Dimname dim1, @ByVal Dimname dim2, @Cast("int64_t") long offset/*=0*/); +@Namespace("at") public static native @ByVal Tensor diagonal(@Const @ByRef Tensor self, @ByVal Dimname outdim, @ByVal Dimname dim1, @ByVal Dimname dim2); -// Parsed from ATen/ops/cos.h +// Parsed from ATen/ops/diagonal_backward.h // #pragma once @@ -39938,24 +25324,40 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include +// #include -// aten::cos(Tensor self) -> Tensor -@Namespace("at") public static native @ByVal Tensor cos(@Const @ByRef Tensor self); +// aten::diagonal_backward(Tensor grad_output, SymInt[] input_sizes, int offset, int dim1, int dim2) -> Tensor +@Namespace("at") public static native @ByVal Tensor diagonal_backward(@Const @ByRef Tensor grad_output, @ByVal LongArrayRef input_sizes, @Cast("int64_t") long offset, @Cast("int64_t") long dim1, @Cast("int64_t") long dim2); +@Namespace("at") public static native @ByVal Tensor diagonal_backward(@Const @ByRef Tensor grad_output, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] input_sizes, @Cast("int64_t") long offset, @Cast("int64_t") long dim1, @Cast("int64_t") long dim2); -// aten::cos_(Tensor(a!) self) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor cos_(@ByRef Tensor self); -// aten::cos.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor cos_out(@ByRef Tensor out, @Const @ByRef Tensor self); -// aten::cos.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor cos_outf(@Const @ByRef Tensor self, @ByRef Tensor out); +// aten::diagonal_backward(Tensor grad_output, SymInt[] input_sizes, int offset, int dim1, int dim2) -> Tensor +@Namespace("at") public static native @ByVal Tensor diagonal_backward_symint(@Const @ByRef Tensor grad_output, @ByVal SymIntArrayRef input_sizes, @Cast("int64_t") long offset, @Cast("int64_t") long dim1, @Cast("int64_t") long dim2); +// aten::diagonal_backward.out(Tensor grad_output, SymInt[] input_sizes, int offset, int dim1, int dim2, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor diagonal_backward_out(@ByRef Tensor out, @Const @ByRef Tensor grad_output, @ByVal LongArrayRef input_sizes, @Cast("int64_t") long offset, @Cast("int64_t") long dim1, @Cast("int64_t") long dim2); +@Namespace("at") public static native @ByRef Tensor diagonal_backward_out(@ByRef Tensor out, @Const @ByRef Tensor grad_output, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] input_sizes, @Cast("int64_t") long offset, @Cast("int64_t") long dim1, @Cast("int64_t") long dim2); + + +// aten::diagonal_backward.out(Tensor grad_output, SymInt[] input_sizes, int offset, int dim1, int dim2, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor diagonal_backward_outf(@Const @ByRef Tensor grad_output, @ByVal LongArrayRef input_sizes, @Cast("int64_t") long offset, @Cast("int64_t") long dim1, @Cast("int64_t") long dim2, @ByRef Tensor out); +@Namespace("at") public static native @ByRef Tensor diagonal_backward_outf(@Const @ByRef Tensor grad_output, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] input_sizes, @Cast("int64_t") long offset, @Cast("int64_t") long dim1, @Cast("int64_t") long dim2, @ByRef Tensor out); + + +// aten::diagonal_backward.out(Tensor grad_output, SymInt[] input_sizes, int offset, int dim1, int dim2, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor diagonal_backward_symint_out(@ByRef Tensor out, @Const @ByRef Tensor grad_output, @ByVal SymIntArrayRef input_sizes, @Cast("int64_t") long offset, @Cast("int64_t") long dim1, @Cast("int64_t") long dim2); + + +// aten::diagonal_backward.out(Tensor grad_output, SymInt[] input_sizes, int offset, int dim1, int dim2, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor diagonal_backward_symint_outf(@Const @ByRef Tensor grad_output, @ByVal SymIntArrayRef input_sizes, @Cast("int64_t") long offset, @Cast("int64_t") long dim1, @Cast("int64_t") long dim2, @ByRef Tensor out); + -// Parsed from ATen/ops/cosh.h + + +// Parsed from ATen/ops/diagonal_copy.h // #pragma once @@ -39976,24 +25378,23 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include - - -// aten::cosh(Tensor self) -> Tensor -@Namespace("at") public static native @ByVal Tensor cosh(@Const @ByRef Tensor self); +// #include -// aten::cosh_(Tensor(a!) self) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor cosh_(@ByRef Tensor self); -// aten::cosh.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor cosh_out(@ByRef Tensor out, @Const @ByRef Tensor self); -// aten::cosh.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor cosh_outf(@Const @ByRef Tensor self, @ByRef Tensor out); +// aten::diagonal_copy(Tensor self, int offset=0, int dim1=0, int dim2=1) -> Tensor +@Namespace("at") public static native @ByVal Tensor diagonal_copy(@Const @ByRef Tensor self, @Cast("int64_t") long offset/*=0*/, @Cast("int64_t") long dim1/*=0*/, @Cast("int64_t") long dim2/*=1*/); +@Namespace("at") public static native @ByVal Tensor diagonal_copy(@Const @ByRef Tensor self); +// aten::diagonal_copy.out(Tensor self, int offset=0, int dim1=0, int dim2=1, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor diagonal_copy_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Cast("int64_t") long offset/*=0*/, @Cast("int64_t") long dim1/*=0*/, @Cast("int64_t") long dim2/*=1*/); +@Namespace("at") public static native @ByRef Tensor diagonal_copy_out(@ByRef Tensor out, @Const @ByRef Tensor self); +// aten::diagonal_copy.out(Tensor self, int offset=0, int dim1=0, int dim2=1, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor diagonal_copy_outf(@Const @ByRef Tensor self, @Cast("int64_t") long offset, @Cast("int64_t") long dim1, @Cast("int64_t") long dim2, @ByRef Tensor out); -// Parsed from ATen/ops/cosine_embedding_loss.h + +// Parsed from ATen/ops/diagonal_scatter.h // #pragma once @@ -40014,17 +25415,23 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include +// #include -// aten::cosine_embedding_loss(Tensor input1, Tensor input2, Tensor target, float margin=0.0, int reduction=Mean) -> Tensor -@Namespace("at") public static native @ByVal Tensor cosine_embedding_loss(@Const @ByRef Tensor input1, @Const @ByRef Tensor input2, @Const @ByRef Tensor target, double margin/*=0.0*/, @Cast("int64_t") long reduction/*=at::Reduction::Mean*/); -@Namespace("at") public static native @ByVal Tensor cosine_embedding_loss(@Const @ByRef Tensor input1, @Const @ByRef Tensor input2, @Const @ByRef Tensor target); +// aten::diagonal_scatter(Tensor self, Tensor src, int offset=0, int dim1=0, int dim2=1) -> Tensor +@Namespace("at") public static native @ByVal Tensor diagonal_scatter(@Const @ByRef Tensor self, @Const @ByRef Tensor src, @Cast("int64_t") long offset/*=0*/, @Cast("int64_t") long dim1/*=0*/, @Cast("int64_t") long dim2/*=1*/); +@Namespace("at") public static native @ByVal Tensor diagonal_scatter(@Const @ByRef Tensor self, @Const @ByRef Tensor src); +// aten::diagonal_scatter.out(Tensor self, Tensor src, int offset=0, int dim1=0, int dim2=1, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor diagonal_scatter_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor src, @Cast("int64_t") long offset/*=0*/, @Cast("int64_t") long dim1/*=0*/, @Cast("int64_t") long dim2/*=1*/); +@Namespace("at") public static native @ByRef Tensor diagonal_scatter_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor src); +// aten::diagonal_scatter.out(Tensor self, Tensor src, int offset=0, int dim1=0, int dim2=1, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor diagonal_scatter_outf(@Const @ByRef Tensor self, @Const @ByRef Tensor src, @Cast("int64_t") long offset, @Cast("int64_t") long dim1, @Cast("int64_t") long dim2, @ByRef Tensor out); -// Parsed from ATen/ops/cosine_similarity.h + +// Parsed from ATen/ops/diff.h // #pragma once @@ -40045,17 +25452,23 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include +// #include -// aten::cosine_similarity(Tensor x1, Tensor x2, int dim=1, float eps=1e-08) -> Tensor -@Namespace("at") public static native @ByVal Tensor cosine_similarity(@Const @ByRef Tensor x1, @Const @ByRef Tensor x2, @Cast("int64_t") long dim/*=1*/, double eps/*=1e-08*/); -@Namespace("at") public static native @ByVal Tensor cosine_similarity(@Const @ByRef Tensor x1, @Const @ByRef Tensor x2); +// aten::diff(Tensor self, int n=1, int dim=-1, Tensor? prepend=None, Tensor? append=None) -> Tensor +@Namespace("at") public static native @ByVal Tensor diff(@Const @ByRef Tensor self, @Cast("int64_t") long n/*=1*/, @Cast("int64_t") long dim/*=-1*/, @Const @ByRef(nullValue = "c10::optional{}") TensorOptional prepend, @Const @ByRef(nullValue = "c10::optional{}") TensorOptional append); +@Namespace("at") public static native @ByVal Tensor diff(@Const @ByRef Tensor self); +// aten::diff.out(Tensor self, int n=1, int dim=-1, Tensor? prepend=None, Tensor? append=None, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor diff_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Cast("int64_t") long n/*=1*/, @Cast("int64_t") long dim/*=-1*/, @Const @ByRef(nullValue = "c10::optional{}") TensorOptional prepend, @Const @ByRef(nullValue = "c10::optional{}") TensorOptional append); +@Namespace("at") public static native @ByRef Tensor diff_out(@ByRef Tensor out, @Const @ByRef Tensor self); +// aten::diff.out(Tensor self, int n=1, int dim=-1, Tensor? prepend=None, Tensor? append=None, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor diff_outf(@Const @ByRef Tensor self, @Cast("int64_t") long n, @Cast("int64_t") long dim, @Const @ByRef TensorOptional prepend, @Const @ByRef TensorOptional append, @ByRef Tensor out); -// Parsed from ATen/ops/count_nonzero.h + +// Parsed from ATen/ops/digamma.h // #pragma once @@ -40076,34 +25489,21 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include - - -// aten::count_nonzero.dim_IntList(Tensor self, int[] dim) -> Tensor -@Namespace("at") public static native @ByVal Tensor count_nonzero(@Const @ByRef Tensor self, @ByVal @Cast("c10::ArrayRef*") LongArrayRef dim); -@Namespace("at") public static native @ByVal Tensor count_nonzero(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... dim); +// #include -// aten::count_nonzero(Tensor self, int? dim=None) -> Tensor -@Namespace("at") public static native @ByVal Tensor count_nonzero(@Const @ByRef Tensor self, @ByVal(nullValue = "c10::optional(c10::nullopt)") LongOptional dim); -@Namespace("at") public static native @ByVal Tensor count_nonzero(@Const @ByRef Tensor self); -// aten::count_nonzero.dim_IntList_out(Tensor self, int[] dim, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor count_nonzero_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal @Cast("c10::ArrayRef*") LongArrayRef dim); -@Namespace("at") public static native @ByRef Tensor count_nonzero_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... dim); -// aten::count_nonzero.dim_IntList_out(Tensor self, int[] dim, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor count_nonzero_outf(@Const @ByRef Tensor self, @ByVal @Cast("c10::ArrayRef*") LongArrayRef dim, @ByRef Tensor out); -@Namespace("at") public static native @ByRef Tensor count_nonzero_outf(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dim, @ByRef Tensor out); +// aten::digamma.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor digamma_out(@ByRef Tensor out, @Const @ByRef Tensor self); +// aten::digamma.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor digamma_outf(@Const @ByRef Tensor self, @ByRef Tensor out); -// aten::count_nonzero.out(Tensor self, int? dim=None, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor count_nonzero_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal(nullValue = "c10::optional(c10::nullopt)") LongOptional dim); -@Namespace("at") public static native @ByRef Tensor count_nonzero_out(@ByRef Tensor out, @Const @ByRef Tensor self); -// aten::count_nonzero.out(Tensor self, int? dim=None, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor count_nonzero_outf(@Const @ByRef Tensor self, @ByVal LongOptional dim, @ByRef Tensor out); +// aten::digamma(Tensor self) -> Tensor +@Namespace("at") public static native @ByVal Tensor digamma(@Const @ByRef Tensor self); -// Parsed from ATen/ops/cov.h +// Parsed from ATen/ops/dist.h // #pragma once @@ -40124,17 +25524,23 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include +// #include -// aten::cov(Tensor self, *, int correction=1, Tensor? fweights=None, Tensor? aweights=None) -> Tensor -@Namespace("at") public static native @ByVal Tensor cov(@Const @ByRef Tensor self, @Cast("int64_t") long correction/*=1*/, @Const @ByRef(nullValue = "c10::optional{}") TensorOptional fweights, @Const @ByRef(nullValue = "c10::optional{}") TensorOptional aweights); -@Namespace("at") public static native @ByVal Tensor cov(@Const @ByRef Tensor self); +// aten::dist(Tensor self, Tensor other, Scalar p=2) -> Tensor +@Namespace("at") public static native @ByVal Tensor dist(@Const @ByRef Tensor self, @Const @ByRef Tensor other, @Const @ByRef(nullValue = "at::Scalar(2)") Scalar p); +@Namespace("at") public static native @ByVal Tensor dist(@Const @ByRef Tensor self, @Const @ByRef Tensor other); + +// aten::dist.out(Tensor self, Tensor other, Scalar p=2, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor dist_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor other, @Const @ByRef(nullValue = "at::Scalar(2)") Scalar p); +@Namespace("at") public static native @ByRef Tensor dist_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor other); +// aten::dist.out(Tensor self, Tensor other, Scalar p=2, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor dist_outf(@Const @ByRef Tensor self, @Const @ByRef Tensor other, @Const @ByRef Scalar p, @ByRef Tensor out); -// Parsed from ATen/ops/cross.h +// Parsed from ATen/ops/div.h // #pragma once @@ -40155,23 +25561,45 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include +// #include -// aten::cross.out(Tensor self, Tensor other, int? dim=None, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor cross_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor other, @ByVal(nullValue = "c10::optional(c10::nullopt)") LongOptional dim); -@Namespace("at") public static native @ByRef Tensor cross_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor other); -// aten::cross.out(Tensor self, Tensor other, int? dim=None, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor cross_outf(@Const @ByRef Tensor self, @Const @ByRef Tensor other, @ByVal LongOptional dim, @ByRef Tensor out); +// aten::div.Tensor(Tensor self, Tensor other) -> Tensor +@Namespace("at") public static native @ByVal Tensor div(@Const @ByRef Tensor self, @Const @ByRef Tensor other); -// aten::cross(Tensor self, Tensor other, int? dim=None) -> Tensor -@Namespace("at") public static native @ByVal Tensor cross(@Const @ByRef Tensor self, @Const @ByRef Tensor other, @ByVal(nullValue = "c10::optional(c10::nullopt)") LongOptional dim); -@Namespace("at") public static native @ByVal Tensor cross(@Const @ByRef Tensor self, @Const @ByRef Tensor other); +// aten::div.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor div_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor other); +// aten::div.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor div_outf(@Const @ByRef Tensor self, @Const @ByRef Tensor other, @ByRef Tensor out); +// aten::div.Tensor_mode(Tensor self, Tensor other, *, str? rounding_mode) -> Tensor +@Namespace("at") public static native @ByVal Tensor div(@Const @ByRef Tensor self, @Const @ByRef Tensor other, @ByVal @Cast("c10::optional*") Pointer rounding_mode); + +// aten::div.out_mode(Tensor self, Tensor other, *, str? rounding_mode, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor div_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor other, @ByVal @Cast("c10::optional*") Pointer rounding_mode); +// aten::div.out_mode(Tensor self, Tensor other, *, str? rounding_mode, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor div_outf(@Const @ByRef Tensor self, @Const @ByRef Tensor other, @ByVal @Cast("c10::optional*") Pointer rounding_mode, @ByRef Tensor out); + +// aten::div.Scalar(Tensor self, Scalar other) -> Tensor +@Namespace("at") public static native @ByVal Tensor div(@Const @ByRef Tensor self, @Const @ByRef Scalar other); + +// aten::div.Scalar_mode(Tensor self, Scalar other, *, str? rounding_mode) -> Tensor +@Namespace("at") public static native @ByVal Tensor div(@Const @ByRef Tensor self, @Const @ByRef Scalar other, @ByVal @Cast("c10::optional*") Pointer rounding_mode); + +// aten::div.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor div_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Scalar other); +// aten::div.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor div_outf(@Const @ByRef Tensor self, @Const @ByRef Scalar other, @ByRef Tensor out); + +// aten::div.Scalar_mode_out(Tensor self, Scalar other, *, str? rounding_mode, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor div_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Scalar other, @ByVal @Cast("c10::optional*") Pointer rounding_mode); +// aten::div.Scalar_mode_out(Tensor self, Scalar other, *, str? rounding_mode, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor div_outf(@Const @ByRef Tensor self, @Const @ByRef Scalar other, @ByVal @Cast("c10::optional*") Pointer rounding_mode, @ByRef Tensor out); -// Parsed from ATen/ops/cross_entropy_loss.h + +// Parsed from ATen/ops/divide.h // #pragma once @@ -40192,23 +25620,35 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include +// #include -// aten::cross_entropy_loss(Tensor self, Tensor target, Tensor? weight=None, int reduction=Mean, SymInt ignore_index=-100, float label_smoothing=0.0) -> Tensor -@Namespace("at") public static native @ByVal Tensor cross_entropy_loss(@Const @ByRef Tensor self, @Const @ByRef Tensor target, @Const @ByRef(nullValue = "c10::optional{}") TensorOptional weight, @Cast("int64_t") long reduction/*=at::Reduction::Mean*/, @Cast("int64_t") long ignore_index/*=-100*/, double label_smoothing/*=0.0*/); -@Namespace("at") public static native @ByVal Tensor cross_entropy_loss(@Const @ByRef Tensor self, @Const @ByRef Tensor target); +// aten::divide.Tensor(Tensor self, Tensor other) -> Tensor +@Namespace("at") public static native @ByVal Tensor divide(@Const @ByRef Tensor self, @Const @ByRef Tensor other); + +// aten::divide.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor divide_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor other); +// aten::divide.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor divide_outf(@Const @ByRef Tensor self, @Const @ByRef Tensor other, @ByRef Tensor out); +// aten::divide.Scalar(Tensor self, Scalar other) -> Tensor +@Namespace("at") public static native @ByVal Tensor divide(@Const @ByRef Tensor self, @Const @ByRef Scalar other); -// aten::cross_entropy_loss(Tensor self, Tensor target, Tensor? weight=None, int reduction=Mean, SymInt ignore_index=-100, float label_smoothing=0.0) -> Tensor -@Namespace("at") public static native @ByVal Tensor cross_entropy_loss_symint(@Const @ByRef Tensor self, @Const @ByRef Tensor target, @Const @ByRef(nullValue = "c10::optional{}") TensorOptional weight, @Cast("int64_t") long reduction/*=at::Reduction::Mean*/, @ByVal(nullValue = "c10::SymInt(-100)") SymInt ignore_index, double label_smoothing/*=0.0*/); -@Namespace("at") public static native @ByVal Tensor cross_entropy_loss_symint(@Const @ByRef Tensor self, @Const @ByRef Tensor target); +// aten::divide.Tensor_mode(Tensor self, Tensor other, *, str? rounding_mode) -> Tensor +@Namespace("at") public static native @ByVal Tensor divide(@Const @ByRef Tensor self, @Const @ByRef Tensor other, @ByVal @Cast("c10::optional*") Pointer rounding_mode); + +// aten::divide.out_mode(Tensor self, Tensor other, *, str? rounding_mode, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor divide_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor other, @ByVal @Cast("c10::optional*") Pointer rounding_mode); +// aten::divide.out_mode(Tensor self, Tensor other, *, str? rounding_mode, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor divide_outf(@Const @ByRef Tensor self, @Const @ByRef Tensor other, @ByVal @Cast("c10::optional*") Pointer rounding_mode, @ByRef Tensor out); +// aten::divide.Scalar_mode(Tensor self, Scalar other, *, str? rounding_mode) -> Tensor +@Namespace("at") public static native @ByVal Tensor divide(@Const @ByRef Tensor self, @Const @ByRef Scalar other, @ByVal @Cast("c10::optional*") Pointer rounding_mode); -// Parsed from ATen/ops/crow_indices.h +// Parsed from ATen/ops/dot.h // #pragma once @@ -40229,14 +25669,21 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include +// #include +// aten::dot(Tensor self, Tensor tensor) -> Tensor +@Namespace("at") public static native @ByVal Tensor dot(@Const @ByRef Tensor self, @Const @ByRef Tensor tensor); + +// aten::dot.out(Tensor self, Tensor tensor, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor dot_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor tensor); +// aten::dot.out(Tensor self, Tensor tensor, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor dot_outf(@Const @ByRef Tensor self, @Const @ByRef Tensor tensor, @ByRef Tensor out); -// Parsed from ATen/ops/crow_indices_copy.h +// Parsed from ATen/ops/dropout.h // #pragma once @@ -40257,21 +25704,19 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include +// #include -// aten::crow_indices_copy(Tensor self) -> Tensor -@Namespace("at") public static native @ByVal Tensor crow_indices_copy(@Const @ByRef Tensor self); +// aten::dropout(Tensor input, float p, bool train) -> Tensor +@Namespace("at") public static native @ByVal Tensor dropout(@Const @ByRef Tensor input, double p, @Cast("bool") boolean train); -// aten::crow_indices_copy.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor crow_indices_copy_out(@ByRef Tensor out, @Const @ByRef Tensor self); -// aten::crow_indices_copy.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor crow_indices_copy_outf(@Const @ByRef Tensor self, @ByRef Tensor out); +// aten::dropout_(Tensor(a!) self, float p, bool train) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor dropout_(@ByRef Tensor self, double p, @Cast("bool") boolean train); -// Parsed from ATen/ops/ctc_loss.h +// Parsed from ATen/ops/dsplit.h // #pragma once @@ -40292,23 +25737,20 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include +// #include -// aten::ctc_loss.IntList(Tensor log_probs, Tensor targets, int[] input_lengths, int[] target_lengths, int blank=0, int reduction=Mean, bool zero_infinity=False) -> Tensor -@Namespace("at") public static native @ByVal Tensor ctc_loss(@Const @ByRef Tensor log_probs, @Const @ByRef Tensor targets, @ByVal @Cast("c10::ArrayRef*") LongArrayRef input_lengths, @ByVal @Cast("c10::ArrayRef*") LongArrayRef target_lengths, @Cast("int64_t") long blank/*=0*/, @Cast("int64_t") long reduction/*=at::Reduction::Mean*/, @Cast("bool") boolean zero_infinity/*=false*/); -@Namespace("at") public static native @ByVal Tensor ctc_loss(@Const @ByRef Tensor log_probs, @Const @ByRef Tensor targets, @ByVal @Cast("c10::ArrayRef*") LongArrayRef input_lengths, @ByVal @Cast("c10::ArrayRef*") LongArrayRef target_lengths); -@Namespace("at") public static native @ByVal Tensor ctc_loss(@Const @ByRef Tensor log_probs, @Const @ByRef Tensor targets, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] input_lengths, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] target_lengths, @Cast("int64_t") long blank/*=0*/, @Cast("int64_t") long reduction/*=at::Reduction::Mean*/, @Cast("bool") boolean zero_infinity/*=false*/); -@Namespace("at") public static native @ByVal Tensor ctc_loss(@Const @ByRef Tensor log_probs, @Const @ByRef Tensor targets, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] input_lengths, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... target_lengths); +// aten::dsplit.int(Tensor(a -> *) self, int sections) -> Tensor(a)[] +@Namespace("at") public static native @Cast({"", "std::vector"}) @StdMove TensorVector dsplit(@Const @ByRef Tensor self, @Cast("int64_t") long sections); -// aten::ctc_loss.Tensor(Tensor log_probs, Tensor targets, Tensor input_lengths, Tensor target_lengths, int blank=0, int reduction=Mean, bool zero_infinity=False) -> Tensor -@Namespace("at") public static native @ByVal Tensor ctc_loss(@Const @ByRef Tensor log_probs, @Const @ByRef Tensor targets, @Const @ByRef Tensor input_lengths, @Const @ByRef Tensor target_lengths, @Cast("int64_t") long blank/*=0*/, @Cast("int64_t") long reduction/*=at::Reduction::Mean*/, @Cast("bool") boolean zero_infinity/*=false*/); -@Namespace("at") public static native @ByVal Tensor ctc_loss(@Const @ByRef Tensor log_probs, @Const @ByRef Tensor targets, @Const @ByRef Tensor input_lengths, @Const @ByRef Tensor target_lengths); +// aten::dsplit.array(Tensor(a -> *) self, int[] indices) -> Tensor(a)[] +@Namespace("at") public static native @Cast({"", "std::vector"}) @StdMove TensorVector dsplit(@Const @ByRef Tensor self, @ByVal LongArrayRef indices); +@Namespace("at") public static native @Cast({"", "std::vector"}) @StdMove TensorVector dsplit(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... indices); -// Parsed from ATen/ops/cudnn_affine_grid_generator.h +// Parsed from ATen/ops/dstack.h // #pragma once @@ -40329,21 +25771,21 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include +// #include -// aten::cudnn_affine_grid_generator(Tensor theta, int N, int C, int H, int W) -> Tensor grid -@Namespace("at") public static native @ByVal Tensor cudnn_affine_grid_generator(@Const @ByRef Tensor theta, @Cast("int64_t") long N, @Cast("int64_t") long C, @Cast("int64_t") long H, @Cast("int64_t") long W); +// aten::dstack(Tensor[] tensors) -> Tensor +@Namespace("at") public static native @ByVal Tensor dstack(@ByVal @Cast("at::TensorList*") TensorArrayRef tensors); -// aten::cudnn_affine_grid_generator.out(Tensor theta, int N, int C, int H, int W, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor cudnn_affine_grid_generator_out(@ByRef Tensor out, @Const @ByRef Tensor theta, @Cast("int64_t") long N, @Cast("int64_t") long C, @Cast("int64_t") long H, @Cast("int64_t") long W); -// aten::cudnn_affine_grid_generator.out(Tensor theta, int N, int C, int H, int W, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor cudnn_affine_grid_generator_outf(@Const @ByRef Tensor theta, @Cast("int64_t") long N, @Cast("int64_t") long C, @Cast("int64_t") long H, @Cast("int64_t") long W, @ByRef Tensor out); +// aten::dstack.out(Tensor[] tensors, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor dstack_out(@ByRef Tensor out, @ByVal @Cast("at::TensorList*") TensorArrayRef tensors); +// aten::dstack.out(Tensor[] tensors, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor dstack_outf(@ByVal @Cast("at::TensorList*") TensorArrayRef tensors, @ByRef Tensor out); -// Parsed from ATen/ops/cudnn_affine_grid_generator_backward.h +// Parsed from ATen/ops/einsum.h // #pragma once @@ -40364,21 +25806,18 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include - +// #include -// aten::cudnn_affine_grid_generator_backward(Tensor grad, int N, int C, int H, int W) -> Tensor grad_theta -@Namespace("at") public static native @ByVal Tensor cudnn_affine_grid_generator_backward(@Const @ByRef Tensor grad, @Cast("int64_t") long N, @Cast("int64_t") long C, @Cast("int64_t") long H, @Cast("int64_t") long W); -// aten::cudnn_affine_grid_generator_backward.out(Tensor grad, int N, int C, int H, int W, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor cudnn_affine_grid_generator_backward_out(@ByRef Tensor out, @Const @ByRef Tensor grad, @Cast("int64_t") long N, @Cast("int64_t") long C, @Cast("int64_t") long H, @Cast("int64_t") long W); -// aten::cudnn_affine_grid_generator_backward.out(Tensor grad, int N, int C, int H, int W, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor cudnn_affine_grid_generator_backward_outf(@Const @ByRef Tensor grad, @Cast("int64_t") long N, @Cast("int64_t") long C, @Cast("int64_t") long H, @Cast("int64_t") long W, @ByRef Tensor out); +// aten::einsum(str equation, Tensor[] tensors, *, int[]? path=None) -> Tensor +@Namespace("at") public static native @ByVal Tensor einsum(@ByVal @Cast("c10::string_view*") Pointer equation, @ByVal @Cast("at::TensorList*") TensorArrayRef tensors, @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") LongArrayRefOptional path); +@Namespace("at") public static native @ByVal Tensor einsum(@ByVal @Cast("c10::string_view*") Pointer equation, @ByVal @Cast("at::TensorList*") TensorArrayRef tensors); +@Namespace("at") public static native @ByVal Tensor einsum(@ByVal @Cast("c10::string_view*") Pointer equation, @ByVal @Cast("at::TensorList*") TensorArrayRef tensors, @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... path); -// Parsed from ATen/ops/cudnn_batch_norm.h +// Parsed from ATen/ops/elu.h // #pragma once @@ -40399,21 +25838,27 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include +// #include -// aten::cudnn_batch_norm(Tensor input, Tensor weight, Tensor? bias, Tensor? running_mean, Tensor? running_var, bool training, float exponential_average_factor, float epsilon) -> (Tensor, Tensor, Tensor, Tensor) -@Namespace("at") public static native @ByVal TensorTensorTensorTensorTuple cudnn_batch_norm(@Const @ByRef Tensor input, @Const @ByRef Tensor weight, @Const @ByRef TensorOptional bias, @Const @ByRef TensorOptional running_mean, @Const @ByRef TensorOptional running_var, @Cast("bool") boolean training, double exponential_average_factor, double epsilon); +// aten::elu.out(Tensor self, Scalar alpha=1, Scalar scale=1, Scalar input_scale=1, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor elu_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef(nullValue = "at::Scalar(1)") Scalar alpha, @Const @ByRef(nullValue = "at::Scalar(1)") Scalar scale, @Const @ByRef(nullValue = "at::Scalar(1)") Scalar input_scale); +@Namespace("at") public static native @ByRef Tensor elu_out(@ByRef Tensor out, @Const @ByRef Tensor self); +// aten::elu.out(Tensor self, Scalar alpha=1, Scalar scale=1, Scalar input_scale=1, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor elu_outf(@Const @ByRef Tensor self, @Const @ByRef Scalar alpha, @Const @ByRef Scalar scale, @Const @ByRef Scalar input_scale, @ByRef Tensor out); -// aten::cudnn_batch_norm.out(Tensor input, Tensor weight, Tensor? bias, Tensor? running_mean, Tensor? running_var, bool training, float exponential_average_factor, float epsilon, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2, Tensor(d!) out3) -> (Tensor(a!), Tensor(b!), Tensor(c!), Tensor(d!)) -@Namespace("at") public static native @ByVal @Cast("std::tuple*") PointerPointer cudnn_batch_norm_out(@ByRef Tensor out0, @ByRef Tensor out1, @ByRef Tensor out2, @ByRef Tensor out3, @Const @ByRef Tensor input, @Const @ByRef Tensor weight, @Const @ByRef TensorOptional bias, @Const @ByRef TensorOptional running_mean, @Const @ByRef TensorOptional running_var, @Cast("bool") boolean training, double exponential_average_factor, double epsilon); -// aten::cudnn_batch_norm.out(Tensor input, Tensor weight, Tensor? bias, Tensor? running_mean, Tensor? running_var, bool training, float exponential_average_factor, float epsilon, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2, Tensor(d!) out3) -> (Tensor(a!), Tensor(b!), Tensor(c!), Tensor(d!)) -@Namespace("at") public static native @ByVal @Cast("std::tuple*") PointerPointer cudnn_batch_norm_outf(@Const @ByRef Tensor input, @Const @ByRef Tensor weight, @Const @ByRef TensorOptional bias, @Const @ByRef TensorOptional running_mean, @Const @ByRef TensorOptional running_var, @Cast("bool") boolean training, double exponential_average_factor, double epsilon, @ByRef Tensor out0, @ByRef Tensor out1, @ByRef Tensor out2, @ByRef Tensor out3); +// aten::elu(Tensor self, Scalar alpha=1, Scalar scale=1, Scalar input_scale=1) -> Tensor +@Namespace("at") public static native @ByVal Tensor elu(@Const @ByRef Tensor self, @Const @ByRef(nullValue = "at::Scalar(1)") Scalar alpha, @Const @ByRef(nullValue = "at::Scalar(1)") Scalar scale, @Const @ByRef(nullValue = "at::Scalar(1)") Scalar input_scale); +@Namespace("at") public static native @ByVal Tensor elu(@Const @ByRef Tensor self); + +// aten::elu_(Tensor(a!) self, Scalar alpha=1, Scalar scale=1, Scalar input_scale=1) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor elu_(@ByRef Tensor self, @Const @ByRef(nullValue = "at::Scalar(1)") Scalar alpha, @Const @ByRef(nullValue = "at::Scalar(1)") Scalar scale, @Const @ByRef(nullValue = "at::Scalar(1)") Scalar input_scale); +@Namespace("at") public static native @ByRef Tensor elu_(@ByRef Tensor self); -// Parsed from ATen/ops/cudnn_batch_norm_backward.h +// Parsed from ATen/ops/elu_backward.h // #pragma once @@ -40434,21 +25879,21 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include +// #include -// aten::cudnn_batch_norm_backward(Tensor input, Tensor grad_output, Tensor weight, Tensor? running_mean, Tensor? running_var, Tensor? save_mean, Tensor? save_var, float epsilon, Tensor reserveSpace) -> (Tensor, Tensor, Tensor) -@Namespace("at") public static native @ByVal TensorTensorTensorTuple cudnn_batch_norm_backward(@Const @ByRef Tensor input, @Const @ByRef Tensor grad_output, @Const @ByRef Tensor weight, @Const @ByRef TensorOptional running_mean, @Const @ByRef TensorOptional running_var, @Const @ByRef TensorOptional save_mean, @Const @ByRef TensorOptional save_var, double epsilon, @Const @ByRef Tensor reserveSpace); +// aten::elu_backward.grad_input(Tensor grad_output, Scalar alpha, Scalar scale, Scalar input_scale, bool is_result, Tensor self_or_result, *, Tensor(a!) grad_input) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor elu_backward_out(@ByRef Tensor grad_input, @Const @ByRef Tensor grad_output, @Const @ByRef Scalar alpha, @Const @ByRef Scalar scale, @Const @ByRef Scalar input_scale, @Cast("bool") boolean is_result, @Const @ByRef Tensor self_or_result); +// aten::elu_backward.grad_input(Tensor grad_output, Scalar alpha, Scalar scale, Scalar input_scale, bool is_result, Tensor self_or_result, *, Tensor(a!) grad_input) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor elu_backward_outf(@Const @ByRef Tensor grad_output, @Const @ByRef Scalar alpha, @Const @ByRef Scalar scale, @Const @ByRef Scalar input_scale, @Cast("bool") boolean is_result, @Const @ByRef Tensor self_or_result, @ByRef Tensor grad_input); -// aten::cudnn_batch_norm_backward.out(Tensor input, Tensor grad_output, Tensor weight, Tensor? running_mean, Tensor? running_var, Tensor? save_mean, Tensor? save_var, float epsilon, Tensor reserveSpace, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2) -> (Tensor(a!), Tensor(b!), Tensor(c!)) -@Namespace("at") public static native @ByVal @Cast("std::tuple*") PointerPointer cudnn_batch_norm_backward_out(@ByRef Tensor out0, @ByRef Tensor out1, @ByRef Tensor out2, @Const @ByRef Tensor input, @Const @ByRef Tensor grad_output, @Const @ByRef Tensor weight, @Const @ByRef TensorOptional running_mean, @Const @ByRef TensorOptional running_var, @Const @ByRef TensorOptional save_mean, @Const @ByRef TensorOptional save_var, double epsilon, @Const @ByRef Tensor reserveSpace); -// aten::cudnn_batch_norm_backward.out(Tensor input, Tensor grad_output, Tensor weight, Tensor? running_mean, Tensor? running_var, Tensor? save_mean, Tensor? save_var, float epsilon, Tensor reserveSpace, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2) -> (Tensor(a!), Tensor(b!), Tensor(c!)) -@Namespace("at") public static native @ByVal @Cast("std::tuple*") PointerPointer cudnn_batch_norm_backward_outf(@Const @ByRef Tensor input, @Const @ByRef Tensor grad_output, @Const @ByRef Tensor weight, @Const @ByRef TensorOptional running_mean, @Const @ByRef TensorOptional running_var, @Const @ByRef TensorOptional save_mean, @Const @ByRef TensorOptional save_var, double epsilon, @Const @ByRef Tensor reserveSpace, @ByRef Tensor out0, @ByRef Tensor out1, @ByRef Tensor out2); +// aten::elu_backward(Tensor grad_output, Scalar alpha, Scalar scale, Scalar input_scale, bool is_result, Tensor self_or_result) -> Tensor +@Namespace("at") public static native @ByVal Tensor elu_backward(@Const @ByRef Tensor grad_output, @Const @ByRef Scalar alpha, @Const @ByRef Scalar scale, @Const @ByRef Scalar input_scale, @Cast("bool") boolean is_result, @Const @ByRef Tensor self_or_result); -// Parsed from ATen/ops/cudnn_convolution.h +// Parsed from ATen/ops/embedding.h // #pragma once @@ -40469,24 +25914,41 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include +// #include -// aten::cudnn_convolution(Tensor self, Tensor weight, int[] padding, int[] stride, int[] dilation, int groups, bool benchmark, bool deterministic, bool allow_tf32) -> Tensor -@Namespace("at") public static native @ByVal Tensor cudnn_convolution(@Const @ByRef Tensor self, @Const @ByRef Tensor weight, @ByVal @Cast("c10::ArrayRef*") LongArrayRef padding, @ByVal @Cast("c10::ArrayRef*") LongArrayRef stride, @ByVal @Cast("c10::ArrayRef*") LongArrayRef dilation, @Cast("int64_t") long groups, @Cast("bool") boolean benchmark, @Cast("bool") boolean deterministic, @Cast("bool") boolean allow_tf32); -@Namespace("at") public static native @ByVal Tensor cudnn_convolution(@Const @ByRef Tensor self, @Const @ByRef Tensor weight, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] padding, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] stride, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dilation, @Cast("int64_t") long groups, @Cast("bool") boolean benchmark, @Cast("bool") boolean deterministic, @Cast("bool") boolean allow_tf32); +// aten::embedding(Tensor weight, Tensor indices, SymInt padding_idx=-1, bool scale_grad_by_freq=False, bool sparse=False) -> Tensor +@Namespace("at") public static native @ByVal Tensor embedding(@Const @ByRef Tensor weight, @Const @ByRef Tensor indices, @Cast("int64_t") long padding_idx/*=-1*/, @Cast("bool") boolean scale_grad_by_freq/*=false*/, @Cast("bool") boolean sparse/*=false*/); +@Namespace("at") public static native @ByVal Tensor embedding(@Const @ByRef Tensor weight, @Const @ByRef Tensor indices); -// aten::cudnn_convolution.out(Tensor self, Tensor weight, int[] padding, int[] stride, int[] dilation, int groups, bool benchmark, bool deterministic, bool allow_tf32, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor cudnn_convolution_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor weight, @ByVal @Cast("c10::ArrayRef*") LongArrayRef padding, @ByVal @Cast("c10::ArrayRef*") LongArrayRef stride, @ByVal @Cast("c10::ArrayRef*") LongArrayRef dilation, @Cast("int64_t") long groups, @Cast("bool") boolean benchmark, @Cast("bool") boolean deterministic, @Cast("bool") boolean allow_tf32); -@Namespace("at") public static native @ByRef Tensor cudnn_convolution_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor weight, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] padding, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] stride, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dilation, @Cast("int64_t") long groups, @Cast("bool") boolean benchmark, @Cast("bool") boolean deterministic, @Cast("bool") boolean allow_tf32); -// aten::cudnn_convolution.out(Tensor self, Tensor weight, int[] padding, int[] stride, int[] dilation, int groups, bool benchmark, bool deterministic, bool allow_tf32, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor cudnn_convolution_outf(@Const @ByRef Tensor self, @Const @ByRef Tensor weight, @ByVal @Cast("c10::ArrayRef*") LongArrayRef padding, @ByVal @Cast("c10::ArrayRef*") LongArrayRef stride, @ByVal @Cast("c10::ArrayRef*") LongArrayRef dilation, @Cast("int64_t") long groups, @Cast("bool") boolean benchmark, @Cast("bool") boolean deterministic, @Cast("bool") boolean allow_tf32, @ByRef Tensor out); -@Namespace("at") public static native @ByRef Tensor cudnn_convolution_outf(@Const @ByRef Tensor self, @Const @ByRef Tensor weight, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] padding, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] stride, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dilation, @Cast("int64_t") long groups, @Cast("bool") boolean benchmark, @Cast("bool") boolean deterministic, @Cast("bool") boolean allow_tf32, @ByRef Tensor out); +// aten::embedding(Tensor weight, Tensor indices, SymInt padding_idx=-1, bool scale_grad_by_freq=False, bool sparse=False) -> Tensor +@Namespace("at") public static native @ByVal Tensor embedding_symint(@Const @ByRef Tensor weight, @Const @ByRef Tensor indices, @ByVal(nullValue = "c10::SymInt(-1)") SymInt padding_idx, @Cast("bool") boolean scale_grad_by_freq/*=false*/, @Cast("bool") boolean sparse/*=false*/); +@Namespace("at") public static native @ByVal Tensor embedding_symint(@Const @ByRef Tensor weight, @Const @ByRef Tensor indices); +// aten::embedding.out(Tensor weight, Tensor indices, SymInt padding_idx=-1, bool scale_grad_by_freq=False, bool sparse=False, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor embedding_out(@ByRef Tensor out, @Const @ByRef Tensor weight, @Const @ByRef Tensor indices, @Cast("int64_t") long padding_idx/*=-1*/, @Cast("bool") boolean scale_grad_by_freq/*=false*/, @Cast("bool") boolean sparse/*=false*/); +@Namespace("at") public static native @ByRef Tensor embedding_out(@ByRef Tensor out, @Const @ByRef Tensor weight, @Const @ByRef Tensor indices); -// Parsed from ATen/ops/cudnn_convolution_add_relu.h + +// aten::embedding.out(Tensor weight, Tensor indices, SymInt padding_idx=-1, bool scale_grad_by_freq=False, bool sparse=False, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor embedding_outf(@Const @ByRef Tensor weight, @Const @ByRef Tensor indices, @Cast("int64_t") long padding_idx, @Cast("bool") boolean scale_grad_by_freq, @Cast("bool") boolean sparse, @ByRef Tensor out); + + +// aten::embedding.out(Tensor weight, Tensor indices, SymInt padding_idx=-1, bool scale_grad_by_freq=False, bool sparse=False, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor embedding_symint_out(@ByRef Tensor out, @Const @ByRef Tensor weight, @Const @ByRef Tensor indices, @ByVal(nullValue = "c10::SymInt(-1)") SymInt padding_idx, @Cast("bool") boolean scale_grad_by_freq/*=false*/, @Cast("bool") boolean sparse/*=false*/); +@Namespace("at") public static native @ByRef Tensor embedding_symint_out(@ByRef Tensor out, @Const @ByRef Tensor weight, @Const @ByRef Tensor indices); + + +// aten::embedding.out(Tensor weight, Tensor indices, SymInt padding_idx=-1, bool scale_grad_by_freq=False, bool sparse=False, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor embedding_symint_outf(@Const @ByRef Tensor weight, @Const @ByRef Tensor indices, @ByVal SymInt padding_idx, @Cast("bool") boolean scale_grad_by_freq, @Cast("bool") boolean sparse, @ByRef Tensor out); + + + + + +// Parsed from ATen/ops/embedding_backward.h // #pragma once @@ -40507,24 +25969,21 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include +// #include -// aten::cudnn_convolution_add_relu(Tensor self, Tensor weight, Tensor z, Scalar? alpha, Tensor? bias, int[] stride, int[] padding, int[] dilation, int groups) -> Tensor -@Namespace("at") public static native @ByVal Tensor cudnn_convolution_add_relu(@Const @ByRef Tensor self, @Const @ByRef Tensor weight, @Const @ByRef Tensor z, @Const @ByRef ScalarOptional alpha, @Const @ByRef TensorOptional bias, @ByVal @Cast("c10::ArrayRef*") LongArrayRef stride, @ByVal @Cast("c10::ArrayRef*") LongArrayRef padding, @ByVal @Cast("c10::ArrayRef*") LongArrayRef dilation, @Cast("int64_t") long groups); -@Namespace("at") public static native @ByVal Tensor cudnn_convolution_add_relu(@Const @ByRef Tensor self, @Const @ByRef Tensor weight, @Const @ByRef Tensor z, @Const @ByRef ScalarOptional alpha, @Const @ByRef TensorOptional bias, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] stride, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] padding, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dilation, @Cast("int64_t") long groups); +// aten::embedding_backward(Tensor grad, Tensor indices, SymInt num_weights, SymInt padding_idx, bool scale_grad_by_freq, bool sparse) -> Tensor +@Namespace("at") public static native @ByVal Tensor embedding_backward(@Const @ByRef Tensor grad, @Const @ByRef Tensor indices, @Cast("int64_t") long num_weights, @Cast("int64_t") long padding_idx, @Cast("bool") boolean scale_grad_by_freq, @Cast("bool") boolean sparse); -// aten::cudnn_convolution_add_relu.out(Tensor self, Tensor weight, Tensor z, Scalar? alpha, Tensor? bias, int[] stride, int[] padding, int[] dilation, int groups, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor cudnn_convolution_add_relu_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor weight, @Const @ByRef Tensor z, @Const @ByRef ScalarOptional alpha, @Const @ByRef TensorOptional bias, @ByVal @Cast("c10::ArrayRef*") LongArrayRef stride, @ByVal @Cast("c10::ArrayRef*") LongArrayRef padding, @ByVal @Cast("c10::ArrayRef*") LongArrayRef dilation, @Cast("int64_t") long groups); -@Namespace("at") public static native @ByRef Tensor cudnn_convolution_add_relu_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor weight, @Const @ByRef Tensor z, @Const @ByRef ScalarOptional alpha, @Const @ByRef TensorOptional bias, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] stride, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] padding, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dilation, @Cast("int64_t") long groups); -// aten::cudnn_convolution_add_relu.out(Tensor self, Tensor weight, Tensor z, Scalar? alpha, Tensor? bias, int[] stride, int[] padding, int[] dilation, int groups, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor cudnn_convolution_add_relu_outf(@Const @ByRef Tensor self, @Const @ByRef Tensor weight, @Const @ByRef Tensor z, @Const @ByRef ScalarOptional alpha, @Const @ByRef TensorOptional bias, @ByVal @Cast("c10::ArrayRef*") LongArrayRef stride, @ByVal @Cast("c10::ArrayRef*") LongArrayRef padding, @ByVal @Cast("c10::ArrayRef*") LongArrayRef dilation, @Cast("int64_t") long groups, @ByRef Tensor out); -@Namespace("at") public static native @ByRef Tensor cudnn_convolution_add_relu_outf(@Const @ByRef Tensor self, @Const @ByRef Tensor weight, @Const @ByRef Tensor z, @Const @ByRef ScalarOptional alpha, @Const @ByRef TensorOptional bias, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] stride, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] padding, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dilation, @Cast("int64_t") long groups, @ByRef Tensor out); + +// aten::embedding_backward(Tensor grad, Tensor indices, SymInt num_weights, SymInt padding_idx, bool scale_grad_by_freq, bool sparse) -> Tensor +@Namespace("at") public static native @ByVal Tensor embedding_backward_symint(@Const @ByRef Tensor grad, @Const @ByRef Tensor indices, @ByVal SymInt num_weights, @ByVal SymInt padding_idx, @Cast("bool") boolean scale_grad_by_freq, @Cast("bool") boolean sparse); -// Parsed from ATen/ops/cudnn_convolution_relu.h + +// Parsed from ATen/ops/embedding_bag.h // #pragma once @@ -40545,24 +26004,20 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include +// #include -// aten::cudnn_convolution_relu(Tensor self, Tensor weight, Tensor? bias, int[] stride, int[] padding, int[] dilation, int groups) -> Tensor -@Namespace("at") public static native @ByVal Tensor cudnn_convolution_relu(@Const @ByRef Tensor self, @Const @ByRef Tensor weight, @Const @ByRef TensorOptional bias, @ByVal @Cast("c10::ArrayRef*") LongArrayRef stride, @ByVal @Cast("c10::ArrayRef*") LongArrayRef padding, @ByVal @Cast("c10::ArrayRef*") LongArrayRef dilation, @Cast("int64_t") long groups); -@Namespace("at") public static native @ByVal Tensor cudnn_convolution_relu(@Const @ByRef Tensor self, @Const @ByRef Tensor weight, @Const @ByRef TensorOptional bias, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] stride, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] padding, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dilation, @Cast("int64_t") long groups); +// aten::embedding_bag(Tensor weight, Tensor indices, Tensor offsets, bool scale_grad_by_freq=False, int mode=0, bool sparse=False, Tensor? per_sample_weights=None, bool include_last_offset=False) -> (Tensor, Tensor, Tensor, Tensor) +@Namespace("at") public static native @ByVal T_TensorTensorTensorTensor_T embedding_bag(@Const @ByRef Tensor weight, @Const @ByRef Tensor indices, @Const @ByRef Tensor offsets, @Cast("bool") boolean scale_grad_by_freq/*=false*/, @Cast("int64_t") long mode/*=0*/, @Cast("bool") boolean sparse/*=false*/, @Const @ByRef(nullValue = "c10::optional{}") TensorOptional per_sample_weights, @Cast("bool") boolean include_last_offset/*=false*/); +@Namespace("at") public static native @ByVal T_TensorTensorTensorTensor_T embedding_bag(@Const @ByRef Tensor weight, @Const @ByRef Tensor indices, @Const @ByRef Tensor offsets); -// aten::cudnn_convolution_relu.out(Tensor self, Tensor weight, Tensor? bias, int[] stride, int[] padding, int[] dilation, int groups, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor cudnn_convolution_relu_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor weight, @Const @ByRef TensorOptional bias, @ByVal @Cast("c10::ArrayRef*") LongArrayRef stride, @ByVal @Cast("c10::ArrayRef*") LongArrayRef padding, @ByVal @Cast("c10::ArrayRef*") LongArrayRef dilation, @Cast("int64_t") long groups); -@Namespace("at") public static native @ByRef Tensor cudnn_convolution_relu_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor weight, @Const @ByRef TensorOptional bias, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] stride, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] padding, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dilation, @Cast("int64_t") long groups); -// aten::cudnn_convolution_relu.out(Tensor self, Tensor weight, Tensor? bias, int[] stride, int[] padding, int[] dilation, int groups, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor cudnn_convolution_relu_outf(@Const @ByRef Tensor self, @Const @ByRef Tensor weight, @Const @ByRef TensorOptional bias, @ByVal @Cast("c10::ArrayRef*") LongArrayRef stride, @ByVal @Cast("c10::ArrayRef*") LongArrayRef padding, @ByVal @Cast("c10::ArrayRef*") LongArrayRef dilation, @Cast("int64_t") long groups, @ByRef Tensor out); -@Namespace("at") public static native @ByRef Tensor cudnn_convolution_relu_outf(@Const @ByRef Tensor self, @Const @ByRef Tensor weight, @Const @ByRef TensorOptional bias, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] stride, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] padding, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dilation, @Cast("int64_t") long groups, @ByRef Tensor out); +// aten::embedding_bag.padding_idx(Tensor weight, Tensor indices, Tensor offsets, bool scale_grad_by_freq, int mode, bool sparse, Tensor? per_sample_weights, bool include_last_offset, int? padding_idx) -> (Tensor, Tensor, Tensor, Tensor) +@Namespace("at") public static native @ByVal T_TensorTensorTensorTensor_T embedding_bag(@Const @ByRef Tensor weight, @Const @ByRef Tensor indices, @Const @ByRef Tensor offsets, @Cast("bool") boolean scale_grad_by_freq, @Cast("int64_t") long mode, @Cast("bool") boolean sparse, @Const @ByRef TensorOptional per_sample_weights, @Cast("bool") boolean include_last_offset, @ByVal LongOptional padding_idx); -// Parsed from ATen/ops/cudnn_convolution_transpose.h +// Parsed from ATen/ops/embedding_dense_backward.h // #pragma once @@ -40583,24 +26038,37 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include +// #include -// aten::cudnn_convolution_transpose(Tensor self, Tensor weight, int[] padding, int[] output_padding, int[] stride, int[] dilation, int groups, bool benchmark, bool deterministic, bool allow_tf32) -> Tensor -@Namespace("at") public static native @ByVal Tensor cudnn_convolution_transpose(@Const @ByRef Tensor self, @Const @ByRef Tensor weight, @ByVal @Cast("c10::ArrayRef*") LongArrayRef padding, @ByVal @Cast("c10::ArrayRef*") LongArrayRef output_padding, @ByVal @Cast("c10::ArrayRef*") LongArrayRef stride, @ByVal @Cast("c10::ArrayRef*") LongArrayRef dilation, @Cast("int64_t") long groups, @Cast("bool") boolean benchmark, @Cast("bool") boolean deterministic, @Cast("bool") boolean allow_tf32); -@Namespace("at") public static native @ByVal Tensor cudnn_convolution_transpose(@Const @ByRef Tensor self, @Const @ByRef Tensor weight, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] padding, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] output_padding, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] stride, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dilation, @Cast("int64_t") long groups, @Cast("bool") boolean benchmark, @Cast("bool") boolean deterministic, @Cast("bool") boolean allow_tf32); +// aten::embedding_dense_backward(Tensor grad_output, Tensor indices, SymInt num_weights, SymInt padding_idx, bool scale_grad_by_freq) -> Tensor +@Namespace("at") public static native @ByVal Tensor embedding_dense_backward(@Const @ByRef Tensor grad_output, @Const @ByRef Tensor indices, @Cast("int64_t") long num_weights, @Cast("int64_t") long padding_idx, @Cast("bool") boolean scale_grad_by_freq); -// aten::cudnn_convolution_transpose.out(Tensor self, Tensor weight, int[] padding, int[] output_padding, int[] stride, int[] dilation, int groups, bool benchmark, bool deterministic, bool allow_tf32, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor cudnn_convolution_transpose_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor weight, @ByVal @Cast("c10::ArrayRef*") LongArrayRef padding, @ByVal @Cast("c10::ArrayRef*") LongArrayRef output_padding, @ByVal @Cast("c10::ArrayRef*") LongArrayRef stride, @ByVal @Cast("c10::ArrayRef*") LongArrayRef dilation, @Cast("int64_t") long groups, @Cast("bool") boolean benchmark, @Cast("bool") boolean deterministic, @Cast("bool") boolean allow_tf32); -@Namespace("at") public static native @ByRef Tensor cudnn_convolution_transpose_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor weight, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] padding, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] output_padding, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] stride, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dilation, @Cast("int64_t") long groups, @Cast("bool") boolean benchmark, @Cast("bool") boolean deterministic, @Cast("bool") boolean allow_tf32); -// aten::cudnn_convolution_transpose.out(Tensor self, Tensor weight, int[] padding, int[] output_padding, int[] stride, int[] dilation, int groups, bool benchmark, bool deterministic, bool allow_tf32, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor cudnn_convolution_transpose_outf(@Const @ByRef Tensor self, @Const @ByRef Tensor weight, @ByVal @Cast("c10::ArrayRef*") LongArrayRef padding, @ByVal @Cast("c10::ArrayRef*") LongArrayRef output_padding, @ByVal @Cast("c10::ArrayRef*") LongArrayRef stride, @ByVal @Cast("c10::ArrayRef*") LongArrayRef dilation, @Cast("int64_t") long groups, @Cast("bool") boolean benchmark, @Cast("bool") boolean deterministic, @Cast("bool") boolean allow_tf32, @ByRef Tensor out); -@Namespace("at") public static native @ByRef Tensor cudnn_convolution_transpose_outf(@Const @ByRef Tensor self, @Const @ByRef Tensor weight, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] padding, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] output_padding, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] stride, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dilation, @Cast("int64_t") long groups, @Cast("bool") boolean benchmark, @Cast("bool") boolean deterministic, @Cast("bool") boolean allow_tf32, @ByRef Tensor out); +// aten::embedding_dense_backward(Tensor grad_output, Tensor indices, SymInt num_weights, SymInt padding_idx, bool scale_grad_by_freq) -> Tensor +@Namespace("at") public static native @ByVal Tensor embedding_dense_backward_symint(@Const @ByRef Tensor grad_output, @Const @ByRef Tensor indices, @ByVal SymInt num_weights, @ByVal SymInt padding_idx, @Cast("bool") boolean scale_grad_by_freq); +// aten::embedding_dense_backward.out(Tensor grad_output, Tensor indices, SymInt num_weights, SymInt padding_idx, bool scale_grad_by_freq, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor embedding_dense_backward_out(@ByRef Tensor out, @Const @ByRef Tensor grad_output, @Const @ByRef Tensor indices, @Cast("int64_t") long num_weights, @Cast("int64_t") long padding_idx, @Cast("bool") boolean scale_grad_by_freq); + -// Parsed from ATen/ops/cudnn_grid_sampler.h +// aten::embedding_dense_backward.out(Tensor grad_output, Tensor indices, SymInt num_weights, SymInt padding_idx, bool scale_grad_by_freq, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor embedding_dense_backward_outf(@Const @ByRef Tensor grad_output, @Const @ByRef Tensor indices, @Cast("int64_t") long num_weights, @Cast("int64_t") long padding_idx, @Cast("bool") boolean scale_grad_by_freq, @ByRef Tensor out); + + +// aten::embedding_dense_backward.out(Tensor grad_output, Tensor indices, SymInt num_weights, SymInt padding_idx, bool scale_grad_by_freq, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor embedding_dense_backward_symint_out(@ByRef Tensor out, @Const @ByRef Tensor grad_output, @Const @ByRef Tensor indices, @ByVal SymInt num_weights, @ByVal SymInt padding_idx, @Cast("bool") boolean scale_grad_by_freq); + + +// aten::embedding_dense_backward.out(Tensor grad_output, Tensor indices, SymInt num_weights, SymInt padding_idx, bool scale_grad_by_freq, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor embedding_dense_backward_symint_outf(@Const @ByRef Tensor grad_output, @Const @ByRef Tensor indices, @ByVal SymInt num_weights, @ByVal SymInt padding_idx, @Cast("bool") boolean scale_grad_by_freq, @ByRef Tensor out); + + + + + +// Parsed from ATen/ops/embedding_renorm.h // #pragma once @@ -40621,21 +26089,24 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include +// #include -// aten::cudnn_grid_sampler(Tensor self, Tensor grid) -> Tensor output -@Namespace("at") public static native @ByVal Tensor cudnn_grid_sampler(@Const @ByRef Tensor self, @Const @ByRef Tensor grid); +// aten::embedding_renorm_(Tensor(a!) self, Tensor indices, float max_norm, float norm_type) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor embedding_renorm_(@ByRef Tensor self, @Const @ByRef Tensor indices, double max_norm, double norm_type); -// aten::cudnn_grid_sampler.out(Tensor self, Tensor grid, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor cudnn_grid_sampler_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor grid); -// aten::cudnn_grid_sampler.out(Tensor self, Tensor grid, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor cudnn_grid_sampler_outf(@Const @ByRef Tensor self, @Const @ByRef Tensor grid, @ByRef Tensor out); +// aten::embedding_renorm.out(Tensor self, Tensor indices, float max_norm, float norm_type, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor embedding_renorm_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor indices, double max_norm, double norm_type); +// aten::embedding_renorm.out(Tensor self, Tensor indices, float max_norm, float norm_type, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor embedding_renorm_outf(@Const @ByRef Tensor self, @Const @ByRef Tensor indices, double max_norm, double norm_type, @ByRef Tensor out); +// aten::embedding_renorm(Tensor self, Tensor indices, float max_norm, float norm_type) -> Tensor +@Namespace("at") public static native @ByVal Tensor embedding_renorm(@Const @ByRef Tensor self, @Const @ByRef Tensor indices, double max_norm, double norm_type); -// Parsed from ATen/ops/cudnn_grid_sampler_backward.h + +// Parsed from ATen/ops/embedding_sparse_backward.h // #pragma once @@ -40656,21 +26127,16 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include - +// #include -// aten::cudnn_grid_sampler_backward(Tensor self, Tensor grid, Tensor grad_output) -> (Tensor grad_self, Tensor grad_grid) -@Namespace("at") public static native @ByVal TensorTensorTuple cudnn_grid_sampler_backward(@Const @ByRef Tensor self, @Const @ByRef Tensor grid, @Const @ByRef Tensor grad_output); -// aten::cudnn_grid_sampler_backward.out(Tensor self, Tensor grid, Tensor grad_output, *, Tensor(a!) out0, Tensor(b!) out1) -> (Tensor(a!), Tensor(b!)) -@Namespace("at") public static native @ByVal @Cast("std::tuple*") PointerPointer cudnn_grid_sampler_backward_out(@ByRef Tensor out0, @ByRef Tensor out1, @Const @ByRef Tensor self, @Const @ByRef Tensor grid, @Const @ByRef Tensor grad_output); -// aten::cudnn_grid_sampler_backward.out(Tensor self, Tensor grid, Tensor grad_output, *, Tensor(a!) out0, Tensor(b!) out1) -> (Tensor(a!), Tensor(b!)) -@Namespace("at") public static native @ByVal @Cast("std::tuple*") PointerPointer cudnn_grid_sampler_backward_outf(@Const @ByRef Tensor self, @Const @ByRef Tensor grid, @Const @ByRef Tensor grad_output, @ByRef Tensor out0, @ByRef Tensor out1); +// aten::embedding_sparse_backward(Tensor grad, Tensor indices, int num_weights, int padding_idx, bool scale_grad_by_freq) -> Tensor +@Namespace("at") public static native @ByVal Tensor embedding_sparse_backward(@Const @ByRef Tensor grad, @Const @ByRef Tensor indices, @Cast("int64_t") long num_weights, @Cast("int64_t") long padding_idx, @Cast("bool") boolean scale_grad_by_freq); -// Parsed from ATen/ops/cudnn_is_acceptable.h +// Parsed from ATen/ops/empty.h // #pragma once @@ -40691,16 +26157,73 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include +// #include -// aten::cudnn_is_acceptable(Tensor self) -> bool -@Namespace("at") public static native @Cast("bool") boolean cudnn_is_acceptable(@Const @ByRef Tensor self); +// aten::empty.names(int[] size, *, Dimname[]? names, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, MemoryFormat? memory_format=None) -> Tensor +@Namespace("at") public static native @ByVal Tensor empty(@ByVal LongArrayRef size, @ByVal DimnameListOptional names, @ByVal(nullValue = "at::TensorOptions{}") TensorOptions options, @ByVal(nullValue = "c10::optional(c10::nullopt)") MemoryFormatOptional memory_format); +@Namespace("at") public static native @ByVal Tensor empty(@ByVal LongArrayRef size, @ByVal DimnameListOptional names); +@Namespace("at") public static native @ByVal Tensor empty(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @ByVal DimnameListOptional names, @ByVal(nullValue = "at::TensorOptions{}") TensorOptions options, @ByVal(nullValue = "c10::optional(c10::nullopt)") MemoryFormatOptional memory_format); +@Namespace("at") public static native @ByVal Tensor empty(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @ByVal DimnameListOptional names); +// aten::empty.names(int[] size, *, Dimname[]? names, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, MemoryFormat? memory_format=None) -> Tensor +@Namespace("at") public static native @ByVal Tensor empty(@ByVal LongArrayRef size, @ByVal DimnameListOptional names, @ByVal ScalarTypeOptional dtype, @ByVal LayoutOptional layout, @ByVal DeviceOptional device, @ByVal BoolOptional pin_memory, @ByVal MemoryFormatOptional memory_format); +@Namespace("at") public static native @ByVal Tensor empty(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @ByVal DimnameListOptional names, @ByVal ScalarTypeOptional dtype, @ByVal LayoutOptional layout, @ByVal DeviceOptional device, @ByVal BoolOptional pin_memory, @ByVal MemoryFormatOptional memory_format); + +// aten::empty.memory_format(SymInt[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, MemoryFormat? memory_format=None) -> Tensor +@Namespace("at") public static native @ByVal Tensor empty(@ByVal LongArrayRef size, @ByVal(nullValue = "at::TensorOptions{}") TensorOptions options, @ByVal(nullValue = "c10::optional(c10::nullopt)") MemoryFormatOptional memory_format); +@Namespace("at") public static native @ByVal Tensor empty(@ByVal LongArrayRef size); +@Namespace("at") public static native @ByVal Tensor empty(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @ByVal(nullValue = "at::TensorOptions{}") TensorOptions options, @ByVal(nullValue = "c10::optional(c10::nullopt)") MemoryFormatOptional memory_format); +@Namespace("at") public static native @ByVal Tensor empty(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... size); +// aten::empty.memory_format(SymInt[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, MemoryFormat? memory_format=None) -> Tensor +@Namespace("at") public static native @ByVal Tensor empty(@ByVal LongArrayRef size, @ByVal ScalarTypeOptional dtype, @ByVal LayoutOptional layout, @ByVal DeviceOptional device, @ByVal BoolOptional pin_memory, @ByVal MemoryFormatOptional memory_format); +@Namespace("at") public static native @ByVal Tensor empty(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @ByVal ScalarTypeOptional dtype, @ByVal LayoutOptional layout, @ByVal DeviceOptional device, @ByVal BoolOptional pin_memory, @ByVal MemoryFormatOptional memory_format); + + +// aten::empty.memory_format(SymInt[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, MemoryFormat? memory_format=None) -> Tensor +@Namespace("at") public static native @ByVal Tensor empty_symint(@ByVal SymIntArrayRef size, @ByVal(nullValue = "at::TensorOptions{}") TensorOptions options, @ByVal(nullValue = "c10::optional(c10::nullopt)") MemoryFormatOptional memory_format); +@Namespace("at") public static native @ByVal Tensor empty_symint(@ByVal SymIntArrayRef size); + + +// aten::empty.memory_format(SymInt[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, MemoryFormat? memory_format=None) -> Tensor +@Namespace("at") public static native @ByVal Tensor empty_symint(@ByVal SymIntArrayRef size, @ByVal ScalarTypeOptional dtype, @ByVal LayoutOptional layout, @ByVal DeviceOptional device, @ByVal BoolOptional pin_memory, @ByVal MemoryFormatOptional memory_format); + + +// aten::empty.out(SymInt[] size, *, MemoryFormat? memory_format=None, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor empty_out(@ByRef Tensor out, @ByVal LongArrayRef size, @ByVal(nullValue = "c10::optional(c10::nullopt)") MemoryFormatOptional memory_format); +@Namespace("at") public static native @ByRef Tensor empty_out(@ByRef Tensor out, @ByVal LongArrayRef size); +@Namespace("at") public static native @ByRef Tensor empty_out(@ByRef Tensor out, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @ByVal(nullValue = "c10::optional(c10::nullopt)") MemoryFormatOptional memory_format); +@Namespace("at") public static native @ByRef Tensor empty_out(@ByRef Tensor out, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... size); + + +// aten::empty.out(SymInt[] size, *, MemoryFormat? memory_format=None, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor empty_outf(@ByVal LongArrayRef size, @ByVal MemoryFormatOptional memory_format, @ByRef Tensor out); +@Namespace("at") public static native @ByRef Tensor empty_outf(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @ByVal MemoryFormatOptional memory_format, @ByRef Tensor out); + + +// aten::empty.out(SymInt[] size, *, MemoryFormat? memory_format=None, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor empty_symint_out(@ByRef Tensor out, @ByVal SymIntArrayRef size, @ByVal(nullValue = "c10::optional(c10::nullopt)") MemoryFormatOptional memory_format); +@Namespace("at") public static native @ByRef Tensor empty_symint_out(@ByRef Tensor out, @ByVal SymIntArrayRef size); + + +// aten::empty.out(SymInt[] size, *, MemoryFormat? memory_format=None, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor empty_symint_outf(@ByVal SymIntArrayRef size, @ByVal MemoryFormatOptional memory_format, @ByRef Tensor out); + + +// aten::empty.names_out(int[] size, *, Dimname[]? names, MemoryFormat? memory_format=None, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor empty_out(@ByRef Tensor out, @ByVal LongArrayRef size, @ByVal DimnameListOptional names, @ByVal(nullValue = "c10::optional(c10::nullopt)") MemoryFormatOptional memory_format); +@Namespace("at") public static native @ByRef Tensor empty_out(@ByRef Tensor out, @ByVal LongArrayRef size, @ByVal DimnameListOptional names); +@Namespace("at") public static native @ByRef Tensor empty_out(@ByRef Tensor out, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @ByVal DimnameListOptional names, @ByVal(nullValue = "c10::optional(c10::nullopt)") MemoryFormatOptional memory_format); +@Namespace("at") public static native @ByRef Tensor empty_out(@ByRef Tensor out, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @ByVal DimnameListOptional names); +// aten::empty.names_out(int[] size, *, Dimname[]? names, MemoryFormat? memory_format=None, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor empty_outf(@ByVal LongArrayRef size, @ByVal DimnameListOptional names, @ByVal MemoryFormatOptional memory_format, @ByRef Tensor out); +@Namespace("at") public static native @ByRef Tensor empty_outf(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @ByVal DimnameListOptional names, @ByVal MemoryFormatOptional memory_format, @ByRef Tensor out); + -// Parsed from ATen/ops/cummax.h + +// Parsed from ATen/ops/empty_like.h // #pragma once @@ -40721,29 +26244,25 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include - - -// aten::cummax(Tensor self, int dim) -> (Tensor values, Tensor indices) -@Namespace("at") public static native @ByVal TensorTensorTuple cummax(@Const @ByRef Tensor self, @Cast("int64_t") long dim); +// #include -// aten::cummax.out(Tensor self, int dim, *, Tensor(a!) values, Tensor(b!) indices) -> (Tensor(a!) values, Tensor(b!) indices) -@Namespace("at") public static native @ByVal @Cast("std::tuple*") PointerPointer cummax_out(@ByRef Tensor values, @ByRef Tensor indices, @Const @ByRef Tensor self, @Cast("int64_t") long dim); -// aten::cummax.out(Tensor self, int dim, *, Tensor(a!) values, Tensor(b!) indices) -> (Tensor(a!) values, Tensor(b!) indices) -@Namespace("at") public static native @ByVal @Cast("std::tuple*") PointerPointer cummax_outf(@Const @ByRef Tensor self, @Cast("int64_t") long dim, @ByRef Tensor values, @ByRef Tensor indices); -// aten::cummax.dimname(Tensor self, Dimname dim) -> (Tensor values, Tensor indices) -@Namespace("at") public static native @ByVal TensorTensorTuple cummax(@Const @ByRef Tensor self, @ByVal Dimname dim); +// aten::empty_like(Tensor self, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, MemoryFormat? memory_format=None) -> Tensor +@Namespace("at") public static native @ByVal Tensor empty_like(@Const @ByRef Tensor self, @ByVal(nullValue = "at::TensorOptions{}") TensorOptions options, @ByVal(nullValue = "c10::optional(c10::nullopt)") MemoryFormatOptional memory_format); +@Namespace("at") public static native @ByVal Tensor empty_like(@Const @ByRef Tensor self); +// aten::empty_like(Tensor self, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, MemoryFormat? memory_format=None) -> Tensor +@Namespace("at") public static native @ByVal Tensor empty_like(@Const @ByRef Tensor self, @ByVal ScalarTypeOptional dtype, @ByVal LayoutOptional layout, @ByVal DeviceOptional device, @ByVal BoolOptional pin_memory, @ByVal MemoryFormatOptional memory_format); -// aten::cummax.dimname_out(Tensor self, Dimname dim, *, Tensor(a!) values, Tensor(b!) indices) -> (Tensor(a!) values, Tensor(b!) indices) -@Namespace("at") public static native @ByVal @Cast("std::tuple*") PointerPointer cummax_out(@ByRef Tensor values, @ByRef Tensor indices, @Const @ByRef Tensor self, @ByVal Dimname dim); -// aten::cummax.dimname_out(Tensor self, Dimname dim, *, Tensor(a!) values, Tensor(b!) indices) -> (Tensor(a!) values, Tensor(b!) indices) -@Namespace("at") public static native @ByVal @Cast("std::tuple*") PointerPointer cummax_outf(@Const @ByRef Tensor self, @ByVal Dimname dim, @ByRef Tensor values, @ByRef Tensor indices); +// aten::empty_like.out(Tensor self, *, MemoryFormat? memory_format=None, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor empty_like_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal(nullValue = "c10::optional(c10::nullopt)") MemoryFormatOptional memory_format); +@Namespace("at") public static native @ByRef Tensor empty_like_out(@ByRef Tensor out, @Const @ByRef Tensor self); +// aten::empty_like.out(Tensor self, *, MemoryFormat? memory_format=None, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor empty_like_outf(@Const @ByRef Tensor self, @ByVal MemoryFormatOptional memory_format, @ByRef Tensor out); -// Parsed from ATen/ops/cummaxmin_backward.h +// Parsed from ATen/ops/empty_quantized.h // #pragma once @@ -40764,16 +26283,31 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include +// #include -// aten::cummaxmin_backward(Tensor grad, Tensor input, Tensor indices, int dim) -> Tensor -@Namespace("at") public static native @ByVal Tensor cummaxmin_backward(@Const @ByRef Tensor grad, @Const @ByRef Tensor input, @Const @ByRef Tensor indices, @Cast("int64_t") long dim); +// aten::empty_quantized(int[] size, Tensor qtensor, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, MemoryFormat? memory_format=None) -> Tensor +@Namespace("at") public static native @ByVal Tensor empty_quantized(@ByVal LongArrayRef size, @Const @ByRef Tensor qtensor, @ByVal(nullValue = "at::TensorOptions{}") TensorOptions options, @ByVal(nullValue = "c10::optional(c10::nullopt)") MemoryFormatOptional memory_format); +@Namespace("at") public static native @ByVal Tensor empty_quantized(@ByVal LongArrayRef size, @Const @ByRef Tensor qtensor); +@Namespace("at") public static native @ByVal Tensor empty_quantized(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @Const @ByRef Tensor qtensor, @ByVal(nullValue = "at::TensorOptions{}") TensorOptions options, @ByVal(nullValue = "c10::optional(c10::nullopt)") MemoryFormatOptional memory_format); +@Namespace("at") public static native @ByVal Tensor empty_quantized(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @Const @ByRef Tensor qtensor); +// aten::empty_quantized(int[] size, Tensor qtensor, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, MemoryFormat? memory_format=None) -> Tensor +@Namespace("at") public static native @ByVal Tensor empty_quantized(@ByVal LongArrayRef size, @Const @ByRef Tensor qtensor, @ByVal ScalarTypeOptional dtype, @ByVal LayoutOptional layout, @ByVal DeviceOptional device, @ByVal BoolOptional pin_memory, @ByVal MemoryFormatOptional memory_format); +@Namespace("at") public static native @ByVal Tensor empty_quantized(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @Const @ByRef Tensor qtensor, @ByVal ScalarTypeOptional dtype, @ByVal LayoutOptional layout, @ByVal DeviceOptional device, @ByVal BoolOptional pin_memory, @ByVal MemoryFormatOptional memory_format); + +// aten::empty_quantized.out(int[] size, Tensor qtensor, *, MemoryFormat? memory_format=None, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor empty_quantized_out(@ByRef Tensor out, @ByVal LongArrayRef size, @Const @ByRef Tensor qtensor, @ByVal(nullValue = "c10::optional(c10::nullopt)") MemoryFormatOptional memory_format); +@Namespace("at") public static native @ByRef Tensor empty_quantized_out(@ByRef Tensor out, @ByVal LongArrayRef size, @Const @ByRef Tensor qtensor); +@Namespace("at") public static native @ByRef Tensor empty_quantized_out(@ByRef Tensor out, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @Const @ByRef Tensor qtensor, @ByVal(nullValue = "c10::optional(c10::nullopt)") MemoryFormatOptional memory_format); +@Namespace("at") public static native @ByRef Tensor empty_quantized_out(@ByRef Tensor out, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @Const @ByRef Tensor qtensor); +// aten::empty_quantized.out(int[] size, Tensor qtensor, *, MemoryFormat? memory_format=None, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor empty_quantized_outf(@ByVal LongArrayRef size, @Const @ByRef Tensor qtensor, @ByVal MemoryFormatOptional memory_format, @ByRef Tensor out); +@Namespace("at") public static native @ByRef Tensor empty_quantized_outf(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @Const @ByRef Tensor qtensor, @ByVal MemoryFormatOptional memory_format, @ByRef Tensor out); -// Parsed from ATen/ops/cummin.h +// Parsed from ATen/ops/empty_strided.h // #pragma once @@ -40794,29 +26328,52 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include +// #include -// aten::cummin(Tensor self, int dim) -> (Tensor values, Tensor indices) -@Namespace("at") public static native @ByVal TensorTensorTuple cummin(@Const @ByRef Tensor self, @Cast("int64_t") long dim); +// aten::empty_strided(SymInt[] size, SymInt[] stride, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor +@Namespace("at") public static native @ByVal Tensor empty_strided(@ByVal LongArrayRef size, @ByVal LongArrayRef stride, @ByVal(nullValue = "at::TensorOptions{}") TensorOptions options); +@Namespace("at") public static native @ByVal Tensor empty_strided(@ByVal LongArrayRef size, @ByVal LongArrayRef stride); +@Namespace("at") public static native @ByVal Tensor empty_strided(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] stride, @ByVal(nullValue = "at::TensorOptions{}") TensorOptions options); +@Namespace("at") public static native @ByVal Tensor empty_strided(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... stride); -// aten::cummin.out(Tensor self, int dim, *, Tensor(a!) values, Tensor(b!) indices) -> (Tensor(a!) values, Tensor(b!) indices) -@Namespace("at") public static native @ByVal @Cast("std::tuple*") PointerPointer cummin_out(@ByRef Tensor values, @ByRef Tensor indices, @Const @ByRef Tensor self, @Cast("int64_t") long dim); -// aten::cummin.out(Tensor self, int dim, *, Tensor(a!) values, Tensor(b!) indices) -> (Tensor(a!) values, Tensor(b!) indices) -@Namespace("at") public static native @ByVal @Cast("std::tuple*") PointerPointer cummin_outf(@Const @ByRef Tensor self, @Cast("int64_t") long dim, @ByRef Tensor values, @ByRef Tensor indices); -// aten::cummin.dimname(Tensor self, Dimname dim) -> (Tensor values, Tensor indices) -@Namespace("at") public static native @ByVal TensorTensorTuple cummin(@Const @ByRef Tensor self, @ByVal Dimname dim); +// aten::empty_strided(SymInt[] size, SymInt[] stride, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor +@Namespace("at") public static native @ByVal Tensor empty_strided(@ByVal LongArrayRef size, @ByVal LongArrayRef stride, @ByVal ScalarTypeOptional dtype, @ByVal LayoutOptional layout, @ByVal DeviceOptional device, @ByVal BoolOptional pin_memory); +@Namespace("at") public static native @ByVal Tensor empty_strided(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] stride, @ByVal ScalarTypeOptional dtype, @ByVal LayoutOptional layout, @ByVal DeviceOptional device, @ByVal BoolOptional pin_memory); -// aten::cummin.dimname_out(Tensor self, Dimname dim, *, Tensor(a!) values, Tensor(b!) indices) -> (Tensor(a!) values, Tensor(b!) indices) -@Namespace("at") public static native @ByVal @Cast("std::tuple*") PointerPointer cummin_out(@ByRef Tensor values, @ByRef Tensor indices, @Const @ByRef Tensor self, @ByVal Dimname dim); -// aten::cummin.dimname_out(Tensor self, Dimname dim, *, Tensor(a!) values, Tensor(b!) indices) -> (Tensor(a!) values, Tensor(b!) indices) -@Namespace("at") public static native @ByVal @Cast("std::tuple*") PointerPointer cummin_outf(@Const @ByRef Tensor self, @ByVal Dimname dim, @ByRef Tensor values, @ByRef Tensor indices); + +// aten::empty_strided(SymInt[] size, SymInt[] stride, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor +@Namespace("at") public static native @ByVal Tensor empty_strided_symint(@ByVal SymIntArrayRef size, @ByVal SymIntArrayRef stride, @ByVal(nullValue = "at::TensorOptions{}") TensorOptions options); +@Namespace("at") public static native @ByVal Tensor empty_strided_symint(@ByVal SymIntArrayRef size, @ByVal SymIntArrayRef stride); +// aten::empty_strided(SymInt[] size, SymInt[] stride, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor +@Namespace("at") public static native @ByVal Tensor empty_strided_symint(@ByVal SymIntArrayRef size, @ByVal SymIntArrayRef stride, @ByVal ScalarTypeOptional dtype, @ByVal LayoutOptional layout, @ByVal DeviceOptional device, @ByVal BoolOptional pin_memory); -// Parsed from ATen/ops/cumprod.h +// aten::empty_strided.out(SymInt[] size, SymInt[] stride, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor empty_strided_out(@ByRef Tensor out, @ByVal LongArrayRef size, @ByVal LongArrayRef stride); +@Namespace("at") public static native @ByRef Tensor empty_strided_out(@ByRef Tensor out, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... stride); + + +// aten::empty_strided.out(SymInt[] size, SymInt[] stride, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor empty_strided_outf(@ByVal LongArrayRef size, @ByVal LongArrayRef stride, @ByRef Tensor out); +@Namespace("at") public static native @ByRef Tensor empty_strided_outf(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] stride, @ByRef Tensor out); + + +// aten::empty_strided.out(SymInt[] size, SymInt[] stride, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor empty_strided_symint_out(@ByRef Tensor out, @ByVal SymIntArrayRef size, @ByVal SymIntArrayRef stride); + + +// aten::empty_strided.out(SymInt[] size, SymInt[] stride, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor empty_strided_symint_outf(@ByVal SymIntArrayRef size, @ByVal SymIntArrayRef stride, @ByRef Tensor out); + + + + + +// Parsed from ATen/ops/eq.h // #pragma once @@ -40837,33 +26394,29 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include +// #include -// aten::cumprod(Tensor self, int dim, *, ScalarType? dtype=None) -> Tensor -@Namespace("at") public static native @ByVal Tensor cumprod(@Const @ByRef Tensor self, @Cast("int64_t") long dim, @ByVal(nullValue = "c10::optional(c10::nullopt)") ScalarTypeOptional dtype); -@Namespace("at") public static native @ByVal Tensor cumprod(@Const @ByRef Tensor self, @Cast("int64_t") long dim); +// aten::eq.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor eq_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Scalar other); +// aten::eq.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor eq_outf(@Const @ByRef Tensor self, @Const @ByRef Scalar other, @ByRef Tensor out); -// aten::cumprod.out(Tensor self, int dim, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor cumprod_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Cast("int64_t") long dim, @ByVal(nullValue = "c10::optional(c10::nullopt)") ScalarTypeOptional dtype); -@Namespace("at") public static native @ByRef Tensor cumprod_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Cast("int64_t") long dim); -// aten::cumprod.out(Tensor self, int dim, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor cumprod_outf(@Const @ByRef Tensor self, @Cast("int64_t") long dim, @ByVal ScalarTypeOptional dtype, @ByRef Tensor out); +// aten::eq.Scalar(Tensor self, Scalar other) -> Tensor +@Namespace("at") public static native @ByVal Tensor eq(@Const @ByRef Tensor self, @Const @ByRef Scalar other); -// aten::cumprod.dimname(Tensor self, Dimname dim, *, ScalarType? dtype=None) -> Tensor -@Namespace("at") public static native @ByVal Tensor cumprod(@Const @ByRef Tensor self, @ByVal Dimname dim, @ByVal(nullValue = "c10::optional(c10::nullopt)") ScalarTypeOptional dtype); -@Namespace("at") public static native @ByVal Tensor cumprod(@Const @ByRef Tensor self, @ByVal Dimname dim); +// aten::eq.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor eq_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor other); +// aten::eq.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor eq_outf(@Const @ByRef Tensor self, @Const @ByRef Tensor other, @ByRef Tensor out); -// aten::cumprod.dimname_out(Tensor self, Dimname dim, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor cumprod_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal Dimname dim, @ByVal(nullValue = "c10::optional(c10::nullopt)") ScalarTypeOptional dtype); -@Namespace("at") public static native @ByRef Tensor cumprod_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal Dimname dim); -// aten::cumprod.dimname_out(Tensor self, Dimname dim, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor cumprod_outf(@Const @ByRef Tensor self, @ByVal Dimname dim, @ByVal ScalarTypeOptional dtype, @ByRef Tensor out); +// aten::eq.Tensor(Tensor self, Tensor other) -> Tensor +@Namespace("at") public static native @ByVal Tensor eq(@Const @ByRef Tensor self, @Const @ByRef Tensor other); -// Parsed from ATen/ops/cumprod_backward.h +// Parsed from ATen/ops/equal.h // #pragma once @@ -40884,16 +26437,16 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include +// #include -// aten::cumprod_backward(Tensor grad, Tensor input, int dim, Tensor output) -> Tensor -@Namespace("at") public static native @ByVal Tensor cumprod_backward(@Const @ByRef Tensor grad, @Const @ByRef Tensor input, @Cast("int64_t") long dim, @Const @ByRef Tensor output); +// aten::equal(Tensor self, Tensor other) -> bool +@Namespace("at") public static native @Cast("bool") boolean equal(@Const @ByRef Tensor self, @Const @ByRef Tensor other); -// Parsed from ATen/ops/cumsum.h +// Parsed from ATen/ops/erf.h // #pragma once @@ -40914,33 +26467,24 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include - +// #include -// aten::cumsum(Tensor self, int dim, *, ScalarType? dtype=None) -> Tensor -@Namespace("at") public static native @ByVal Tensor cumsum(@Const @ByRef Tensor self, @Cast("int64_t") long dim, @ByVal(nullValue = "c10::optional(c10::nullopt)") ScalarTypeOptional dtype); -@Namespace("at") public static native @ByVal Tensor cumsum(@Const @ByRef Tensor self, @Cast("int64_t") long dim); -// aten::cumsum.out(Tensor self, int dim, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor cumsum_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Cast("int64_t") long dim, @ByVal(nullValue = "c10::optional(c10::nullopt)") ScalarTypeOptional dtype); -@Namespace("at") public static native @ByRef Tensor cumsum_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Cast("int64_t") long dim); -// aten::cumsum.out(Tensor self, int dim, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor cumsum_outf(@Const @ByRef Tensor self, @Cast("int64_t") long dim, @ByVal ScalarTypeOptional dtype, @ByRef Tensor out); +// aten::erf(Tensor self) -> Tensor +@Namespace("at") public static native @ByVal Tensor erf(@Const @ByRef Tensor self); -// aten::cumsum.dimname(Tensor self, Dimname dim, *, ScalarType? dtype=None) -> Tensor -@Namespace("at") public static native @ByVal Tensor cumsum(@Const @ByRef Tensor self, @ByVal Dimname dim, @ByVal(nullValue = "c10::optional(c10::nullopt)") ScalarTypeOptional dtype); -@Namespace("at") public static native @ByVal Tensor cumsum(@Const @ByRef Tensor self, @ByVal Dimname dim); +// aten::erf_(Tensor(a!) self) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor erf_(@ByRef Tensor self); -// aten::cumsum.dimname_out(Tensor self, Dimname dim, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor cumsum_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal Dimname dim, @ByVal(nullValue = "c10::optional(c10::nullopt)") ScalarTypeOptional dtype); -@Namespace("at") public static native @ByRef Tensor cumsum_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal Dimname dim); -// aten::cumsum.dimname_out(Tensor self, Dimname dim, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor cumsum_outf(@Const @ByRef Tensor self, @ByVal Dimname dim, @ByVal ScalarTypeOptional dtype, @ByRef Tensor out); +// aten::erf.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor erf_out(@ByRef Tensor out, @Const @ByRef Tensor self); +// aten::erf.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor erf_outf(@Const @ByRef Tensor self, @ByRef Tensor out); -// Parsed from ATen/ops/cumulative_trapezoid.h +// Parsed from ATen/ops/erfc.h // #pragma once @@ -40961,21 +26505,24 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include +// #include -// aten::cumulative_trapezoid.x(Tensor y, Tensor x, *, int dim=-1) -> Tensor -@Namespace("at") public static native @ByVal Tensor cumulative_trapezoid(@Const @ByRef Tensor y, @Const @ByRef Tensor x, @Cast("int64_t") long dim/*=-1*/); -@Namespace("at") public static native @ByVal Tensor cumulative_trapezoid(@Const @ByRef Tensor y, @Const @ByRef Tensor x); +// aten::erfc(Tensor self) -> Tensor +@Namespace("at") public static native @ByVal Tensor erfc(@Const @ByRef Tensor self); -// aten::cumulative_trapezoid.dx(Tensor y, *, Scalar dx=1, int dim=-1) -> Tensor -@Namespace("at") public static native @ByVal Tensor cumulative_trapezoid(@Const @ByRef Tensor y, @Const @ByRef(nullValue = "at::Scalar(1)") Scalar dx, @Cast("int64_t") long dim/*=-1*/); -@Namespace("at") public static native @ByVal Tensor cumulative_trapezoid(@Const @ByRef Tensor y); +// aten::erfc_(Tensor(a!) self) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor erfc_(@ByRef Tensor self); + +// aten::erfc.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor erfc_out(@ByRef Tensor out, @Const @ByRef Tensor self); +// aten::erfc.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor erfc_outf(@Const @ByRef Tensor self, @ByRef Tensor out); -// Parsed from ATen/ops/data.h +// Parsed from ATen/ops/erfinv.h // #pragma once @@ -40996,14 +26543,21 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include +// #include +// aten::erfinv(Tensor self) -> Tensor +@Namespace("at") public static native @ByVal Tensor erfinv(@Const @ByRef Tensor self); + +// aten::erfinv.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor erfinv_out(@ByRef Tensor out, @Const @ByRef Tensor self); +// aten::erfinv.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor erfinv_outf(@Const @ByRef Tensor self, @ByRef Tensor out); -// Parsed from ATen/ops/deg2rad.h +// Parsed from ATen/ops/exp.h // #pragma once @@ -41024,24 +26578,24 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include +// #include -// aten::deg2rad(Tensor self) -> Tensor -@Namespace("at") public static native @ByVal Tensor deg2rad(@Const @ByRef Tensor self); +// aten::exp(Tensor self) -> Tensor +@Namespace("at") public static native @ByVal Tensor exp(@Const @ByRef Tensor self); -// aten::deg2rad_(Tensor(a!) self) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor deg2rad_(@ByRef Tensor self); +// aten::exp_(Tensor(a!) self) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor exp_(@ByRef Tensor self); -// aten::deg2rad.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor deg2rad_out(@ByRef Tensor out, @Const @ByRef Tensor self); -// aten::deg2rad.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor deg2rad_outf(@Const @ByRef Tensor self, @ByRef Tensor out); +// aten::exp.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor exp_out(@ByRef Tensor out, @Const @ByRef Tensor self); +// aten::exp.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor exp_outf(@Const @ByRef Tensor self, @ByRef Tensor out); -// Parsed from ATen/ops/dense_dim.h +// Parsed from ATen/ops/exp2.h // #pragma once @@ -41062,14 +26616,24 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include +// #include + + +// aten::exp2(Tensor self) -> Tensor +@Namespace("at") public static native @ByVal Tensor exp2(@Const @ByRef Tensor self); +// aten::exp2_(Tensor(a!) self) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor exp2_(@ByRef Tensor self); +// aten::exp2.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor exp2_out(@ByRef Tensor out, @Const @ByRef Tensor self); +// aten::exp2.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor exp2_outf(@Const @ByRef Tensor self, @ByRef Tensor out); -// Parsed from ATen/ops/dequantize.h +// Parsed from ATen/ops/expand.h // #pragma once @@ -41090,29 +26654,14 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include - - -// aten::dequantize.self(Tensor self) -> Tensor -@Namespace("at") public static native @ByVal Tensor dequantize(@Const @ByRef Tensor self); - -// aten::dequantize.tensors(Tensor[] tensors) -> Tensor[] -@Namespace("at") public static native @Cast({"", "std::vector"}) @StdMove TensorVector dequantize(@ByVal TensorArrayRef tensors); +// #include -// aten::dequantize.self_out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor dequantize_out(@ByRef Tensor out, @Const @ByRef Tensor self); -// aten::dequantize.self_out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor dequantize_outf(@Const @ByRef Tensor self, @ByRef Tensor out); -// aten::dequantize.tensors_out(Tensor[] tensors, *, Tensor(a!)[] out) -> () -@Namespace("at") public static native void dequantize_out(@ByVal TensorArrayRef out, @ByVal TensorArrayRef tensors); -// aten::dequantize.tensors_out(Tensor[] tensors, *, Tensor(a!)[] out) -> () -@Namespace("at") public static native void dequantize_outf(@ByVal TensorArrayRef tensors, @ByVal TensorArrayRef out); -// Parsed from ATen/ops/det.h +// Parsed from ATen/ops/expand_as.h // #pragma once @@ -41133,16 +26682,14 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include +// #include -// aten::det(Tensor self) -> Tensor -@Namespace("at") public static native @ByVal Tensor det(@Const @ByRef Tensor self); -// Parsed from ATen/ops/detach.h +// Parsed from ATen/ops/expand_copy.h // #pragma once @@ -41163,19 +26710,46 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include +// #include -// aten::detach(Tensor(a) self) -> Tensor(a) -@Namespace("at") public static native @ByVal Tensor detach(@Const @ByRef Tensor self); +// aten::expand_copy(Tensor self, SymInt[] size, *, bool implicit=False) -> Tensor +@Namespace("at") public static native @ByVal Tensor expand_copy(@Const @ByRef Tensor self, @ByVal LongArrayRef size, @Cast("bool") boolean implicit/*=false*/); +@Namespace("at") public static native @ByVal Tensor expand_copy(@Const @ByRef Tensor self, @ByVal LongArrayRef size); +@Namespace("at") public static native @ByVal Tensor expand_copy(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @Cast("bool") boolean implicit/*=false*/); +@Namespace("at") public static native @ByVal Tensor expand_copy(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... size); -// aten::detach_(Tensor(a!) self) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor detach_(@ByRef Tensor self); + +// aten::expand_copy(Tensor self, SymInt[] size, *, bool implicit=False) -> Tensor +@Namespace("at") public static native @ByVal Tensor expand_copy_symint(@Const @ByRef Tensor self, @ByVal SymIntArrayRef size, @Cast("bool") boolean implicit/*=false*/); +@Namespace("at") public static native @ByVal Tensor expand_copy_symint(@Const @ByRef Tensor self, @ByVal SymIntArrayRef size); +// aten::expand_copy.out(Tensor self, SymInt[] size, *, bool implicit=False, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor expand_copy_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal LongArrayRef size, @Cast("bool") boolean implicit/*=false*/); +@Namespace("at") public static native @ByRef Tensor expand_copy_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal LongArrayRef size); +@Namespace("at") public static native @ByRef Tensor expand_copy_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @Cast("bool") boolean implicit/*=false*/); +@Namespace("at") public static native @ByRef Tensor expand_copy_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... size); + + +// aten::expand_copy.out(Tensor self, SymInt[] size, *, bool implicit=False, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor expand_copy_outf(@Const @ByRef Tensor self, @ByVal LongArrayRef size, @Cast("bool") boolean implicit, @ByRef Tensor out); +@Namespace("at") public static native @ByRef Tensor expand_copy_outf(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @Cast("bool") boolean implicit, @ByRef Tensor out); + + +// aten::expand_copy.out(Tensor self, SymInt[] size, *, bool implicit=False, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor expand_copy_symint_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal SymIntArrayRef size, @Cast("bool") boolean implicit/*=false*/); +@Namespace("at") public static native @ByRef Tensor expand_copy_symint_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal SymIntArrayRef size); + + +// aten::expand_copy.out(Tensor self, SymInt[] size, *, bool implicit=False, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor expand_copy_symint_outf(@Const @ByRef Tensor self, @ByVal SymIntArrayRef size, @Cast("bool") boolean implicit, @ByRef Tensor out); -// Parsed from ATen/ops/detach_copy.h + + + +// Parsed from ATen/ops/expm1.h // #pragma once @@ -41196,21 +26770,24 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include +// #include -// aten::detach_copy(Tensor self) -> Tensor -@Namespace("at") public static native @ByVal Tensor detach_copy(@Const @ByRef Tensor self); +// aten::expm1(Tensor self) -> Tensor +@Namespace("at") public static native @ByVal Tensor expm1(@Const @ByRef Tensor self); -// aten::detach_copy.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor detach_copy_out(@ByRef Tensor out, @Const @ByRef Tensor self); -// aten::detach_copy.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor detach_copy_outf(@Const @ByRef Tensor self, @ByRef Tensor out); +// aten::expm1_(Tensor(a!) self) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor expm1_(@ByRef Tensor self); +// aten::expm1.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor expm1_out(@ByRef Tensor out, @Const @ByRef Tensor self); +// aten::expm1.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor expm1_outf(@Const @ByRef Tensor self, @ByRef Tensor out); -// Parsed from ATen/ops/diag.h + +// Parsed from ATen/ops/exponential.h // #pragma once @@ -41231,23 +26808,23 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include +// #include -// aten::diag.out(Tensor self, int diagonal=0, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor diag_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Cast("int64_t") long diagonal/*=0*/); -@Namespace("at") public static native @ByRef Tensor diag_out(@ByRef Tensor out, @Const @ByRef Tensor self); -// aten::diag.out(Tensor self, int diagonal=0, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor diag_outf(@Const @ByRef Tensor self, @Cast("int64_t") long diagonal, @ByRef Tensor out); +// aten::exponential.out(Tensor self, float lambd=1, *, Generator? generator=None, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor exponential_out(@ByRef Tensor out, @Const @ByRef Tensor self, double lambd/*=1*/, @ByVal(nullValue = "c10::optional(c10::nullopt)") GeneratorOptional generator); +@Namespace("at") public static native @ByRef Tensor exponential_out(@ByRef Tensor out, @Const @ByRef Tensor self); +// aten::exponential.out(Tensor self, float lambd=1, *, Generator? generator=None, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor exponential_outf(@Const @ByRef Tensor self, double lambd, @ByVal GeneratorOptional generator, @ByRef Tensor out); -// aten::diag(Tensor self, int diagonal=0) -> Tensor -@Namespace("at") public static native @ByVal Tensor diag(@Const @ByRef Tensor self, @Cast("int64_t") long diagonal/*=0*/); -@Namespace("at") public static native @ByVal Tensor diag(@Const @ByRef Tensor self); +// aten::exponential(Tensor self, float lambd=1, *, Generator? generator=None) -> Tensor +@Namespace("at") public static native @ByVal Tensor exponential(@Const @ByRef Tensor self, double lambd/*=1*/, @ByVal(nullValue = "c10::optional(c10::nullopt)") GeneratorOptional generator); +@Namespace("at") public static native @ByVal Tensor exponential(@Const @ByRef Tensor self); -// Parsed from ATen/ops/diag_embed.h +// Parsed from ATen/ops/eye.h // #pragma once @@ -41268,23 +26845,35 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include +// #include -// aten::diag_embed(Tensor self, int offset=0, int dim1=-2, int dim2=-1) -> Tensor -@Namespace("at") public static native @ByVal Tensor diag_embed(@Const @ByRef Tensor self, @Cast("int64_t") long offset/*=0*/, @Cast("int64_t") long dim1/*=-2*/, @Cast("int64_t") long dim2/*=-1*/); -@Namespace("at") public static native @ByVal Tensor diag_embed(@Const @ByRef Tensor self); +// aten::eye(int n, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor +@Namespace("at") public static native @ByVal Tensor eye(@Cast("int64_t") long n, @ByVal(nullValue = "at::TensorOptions{}") TensorOptions options); +@Namespace("at") public static native @ByVal Tensor eye(@Cast("int64_t") long n); +// aten::eye(int n, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor +@Namespace("at") public static native @ByVal Tensor eye(@Cast("int64_t") long n, @ByVal ScalarTypeOptional dtype, @ByVal LayoutOptional layout, @ByVal DeviceOptional device, @ByVal BoolOptional pin_memory); -// aten::diag_embed.out(Tensor self, int offset=0, int dim1=-2, int dim2=-1, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor diag_embed_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Cast("int64_t") long offset/*=0*/, @Cast("int64_t") long dim1/*=-2*/, @Cast("int64_t") long dim2/*=-1*/); -@Namespace("at") public static native @ByRef Tensor diag_embed_out(@ByRef Tensor out, @Const @ByRef Tensor self); -// aten::diag_embed.out(Tensor self, int offset=0, int dim1=-2, int dim2=-1, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor diag_embed_outf(@Const @ByRef Tensor self, @Cast("int64_t") long offset, @Cast("int64_t") long dim1, @Cast("int64_t") long dim2, @ByRef Tensor out); +// aten::eye.m(int n, int m, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor +@Namespace("at") public static native @ByVal Tensor eye(@Cast("int64_t") long n, @Cast("int64_t") long m, @ByVal(nullValue = "at::TensorOptions{}") TensorOptions options); +@Namespace("at") public static native @ByVal Tensor eye(@Cast("int64_t") long n, @Cast("int64_t") long m); +// aten::eye.m(int n, int m, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor +@Namespace("at") public static native @ByVal Tensor eye(@Cast("int64_t") long n, @Cast("int64_t") long m, @ByVal ScalarTypeOptional dtype, @ByVal LayoutOptional layout, @ByVal DeviceOptional device, @ByVal BoolOptional pin_memory); + +// aten::eye.out(int n, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor eye_out(@ByRef Tensor out, @Cast("int64_t") long n); +// aten::eye.out(int n, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor eye_outf(@Cast("int64_t") long n, @ByRef Tensor out); + +// aten::eye.m_out(int n, int m, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor eye_out(@ByRef Tensor out, @Cast("int64_t") long n, @Cast("int64_t") long m); +// aten::eye.m_out(int n, int m, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor eye_outf(@Cast("int64_t") long n, @Cast("int64_t") long m, @ByRef Tensor out); -// Parsed from ATen/ops/diagflat.h +// Parsed from ATen/ops/fake_quantize_per_channel_affine.h // #pragma once @@ -41305,17 +26894,16 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include +// #include -// aten::diagflat(Tensor self, int offset=0) -> Tensor -@Namespace("at") public static native @ByVal Tensor diagflat(@Const @ByRef Tensor self, @Cast("int64_t") long offset/*=0*/); -@Namespace("at") public static native @ByVal Tensor diagflat(@Const @ByRef Tensor self); +// aten::fake_quantize_per_channel_affine(Tensor self, Tensor scale, Tensor zero_point, int axis, int quant_min, int quant_max) -> Tensor +@Namespace("at") public static native @ByVal Tensor fake_quantize_per_channel_affine(@Const @ByRef Tensor self, @Const @ByRef Tensor scale, @Const @ByRef Tensor zero_point, @Cast("int64_t") long axis, @Cast("int64_t") long quant_min, @Cast("int64_t") long quant_max); -// Parsed from ATen/ops/diagonal.h +// Parsed from ATen/ops/fake_quantize_per_channel_affine_cachemask.h // #pragma once @@ -41336,21 +26924,21 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include +// #include -// aten::diagonal(Tensor(a) self, int offset=0, int dim1=0, int dim2=1) -> Tensor(a) -@Namespace("at") public static native @ByVal Tensor diagonal(@Const @ByRef Tensor self, @Cast("int64_t") long offset/*=0*/, @Cast("int64_t") long dim1/*=0*/, @Cast("int64_t") long dim2/*=1*/); -@Namespace("at") public static native @ByVal Tensor diagonal(@Const @ByRef Tensor self); +// aten::fake_quantize_per_channel_affine_cachemask(Tensor self, Tensor scale, Tensor zero_point, int axis, int quant_min, int quant_max) -> (Tensor output, Tensor mask) +@Namespace("at") public static native @ByVal T_TensorTensor_T fake_quantize_per_channel_affine_cachemask(@Const @ByRef Tensor self, @Const @ByRef Tensor scale, @Const @ByRef Tensor zero_point, @Cast("int64_t") long axis, @Cast("int64_t") long quant_min, @Cast("int64_t") long quant_max); -// aten::diagonal.Dimname(Tensor(a) self, *, Dimname outdim, Dimname dim1, Dimname dim2, int offset=0) -> Tensor(a) -@Namespace("at") public static native @ByVal Tensor diagonal(@Const @ByRef Tensor self, @ByVal Dimname outdim, @ByVal Dimname dim1, @ByVal Dimname dim2, @Cast("int64_t") long offset/*=0*/); -@Namespace("at") public static native @ByVal Tensor diagonal(@Const @ByRef Tensor self, @ByVal Dimname outdim, @ByVal Dimname dim1, @ByVal Dimname dim2); +// aten::fake_quantize_per_channel_affine_cachemask.out(Tensor self, Tensor scale, Tensor zero_point, int axis, int quant_min, int quant_max, *, Tensor(a!) out0, Tensor(b!) out1) -> (Tensor(a!), Tensor(b!)) +@Namespace("at") public static native @ByVal T_TensorTensor_T fake_quantize_per_channel_affine_cachemask_out(@ByRef Tensor out0, @ByRef Tensor out1, @Const @ByRef Tensor self, @Const @ByRef Tensor scale, @Const @ByRef Tensor zero_point, @Cast("int64_t") long axis, @Cast("int64_t") long quant_min, @Cast("int64_t") long quant_max); +// aten::fake_quantize_per_channel_affine_cachemask.out(Tensor self, Tensor scale, Tensor zero_point, int axis, int quant_min, int quant_max, *, Tensor(a!) out0, Tensor(b!) out1) -> (Tensor(a!), Tensor(b!)) +@Namespace("at") public static native @ByVal T_TensorTensor_T fake_quantize_per_channel_affine_cachemask_outf(@Const @ByRef Tensor self, @Const @ByRef Tensor scale, @Const @ByRef Tensor zero_point, @Cast("int64_t") long axis, @Cast("int64_t") long quant_min, @Cast("int64_t") long quant_max, @ByRef Tensor out0, @ByRef Tensor out1); -// Parsed from ATen/ops/diagonal_backward.h +// Parsed from ATen/ops/fake_quantize_per_channel_affine_cachemask_backward.h // #pragma once @@ -41371,40 +26959,49 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include +// #include -// aten::diagonal_backward(Tensor grad_output, SymInt[] input_sizes, int offset, int dim1, int dim2) -> Tensor -@Namespace("at") public static native @ByVal Tensor diagonal_backward(@Const @ByRef Tensor grad_output, @ByVal @Cast("c10::ArrayRef*") LongArrayRef input_sizes, @Cast("int64_t") long offset, @Cast("int64_t") long dim1, @Cast("int64_t") long dim2); -@Namespace("at") public static native @ByVal Tensor diagonal_backward(@Const @ByRef Tensor grad_output, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] input_sizes, @Cast("int64_t") long offset, @Cast("int64_t") long dim1, @Cast("int64_t") long dim2); +// aten::fake_quantize_per_channel_affine_cachemask_backward(Tensor grad, Tensor mask) -> Tensor +@Namespace("at") public static native @ByVal Tensor fake_quantize_per_channel_affine_cachemask_backward(@Const @ByRef Tensor grad, @Const @ByRef Tensor mask); -// aten::diagonal_backward(Tensor grad_output, SymInt[] input_sizes, int offset, int dim1, int dim2) -> Tensor -@Namespace("at") public static native @ByVal Tensor diagonal_backward_symint(@Const @ByRef Tensor grad_output, @ByVal SymIntRef input_sizes, @Cast("int64_t") long offset, @Cast("int64_t") long dim1, @Cast("int64_t") long dim2); -// aten::diagonal_backward.out(Tensor grad_output, SymInt[] input_sizes, int offset, int dim1, int dim2, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor diagonal_backward_out(@ByRef Tensor out, @Const @ByRef Tensor grad_output, @ByVal @Cast("c10::ArrayRef*") LongArrayRef input_sizes, @Cast("int64_t") long offset, @Cast("int64_t") long dim1, @Cast("int64_t") long dim2); -@Namespace("at") public static native @ByRef Tensor diagonal_backward_out(@ByRef Tensor out, @Const @ByRef Tensor grad_output, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] input_sizes, @Cast("int64_t") long offset, @Cast("int64_t") long dim1, @Cast("int64_t") long dim2); +// Parsed from ATen/ops/fake_quantize_per_tensor_affine.h + +// #pragma once +// @generated by torchgen/gen.py from Function.h -// aten::diagonal_backward.out(Tensor grad_output, SymInt[] input_sizes, int offset, int dim1, int dim2, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor diagonal_backward_outf(@Const @ByRef Tensor grad_output, @ByVal @Cast("c10::ArrayRef*") LongArrayRef input_sizes, @Cast("int64_t") long offset, @Cast("int64_t") long dim1, @Cast("int64_t") long dim2, @ByRef Tensor out); -@Namespace("at") public static native @ByRef Tensor diagonal_backward_outf(@Const @ByRef Tensor grad_output, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] input_sizes, @Cast("int64_t") long offset, @Cast("int64_t") long dim1, @Cast("int64_t") long dim2, @ByRef Tensor out); +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include -// aten::diagonal_backward.out(Tensor grad_output, SymInt[] input_sizes, int offset, int dim1, int dim2, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor diagonal_backward_symint_out(@ByRef Tensor out, @Const @ByRef Tensor grad_output, @ByVal SymIntRef input_sizes, @Cast("int64_t") long offset, @Cast("int64_t") long dim1, @Cast("int64_t") long dim2); + +// #include -// aten::diagonal_backward.out(Tensor grad_output, SymInt[] input_sizes, int offset, int dim1, int dim2, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor diagonal_backward_symint_outf(@Const @ByRef Tensor grad_output, @ByVal SymIntRef input_sizes, @Cast("int64_t") long offset, @Cast("int64_t") long dim1, @Cast("int64_t") long dim2, @ByRef Tensor out); +// aten::fake_quantize_per_tensor_affine(Tensor self, float scale, int zero_point, int quant_min, int quant_max) -> Tensor +@Namespace("at") public static native @ByVal Tensor fake_quantize_per_tensor_affine(@Const @ByRef Tensor self, double scale, @Cast("int64_t") long zero_point, @Cast("int64_t") long quant_min, @Cast("int64_t") long quant_max); +// aten::fake_quantize_per_tensor_affine.tensor_qparams(Tensor self, Tensor scale, Tensor zero_point, int quant_min, int quant_max) -> Tensor +@Namespace("at") public static native @ByVal Tensor fake_quantize_per_tensor_affine(@Const @ByRef Tensor self, @Const @ByRef Tensor scale, @Const @ByRef Tensor zero_point, @Cast("int64_t") long quant_min, @Cast("int64_t") long quant_max); -// Parsed from ATen/ops/diagonal_copy.h +// Parsed from ATen/ops/fake_quantize_per_tensor_affine_cachemask.h // #pragma once @@ -41425,23 +27022,21 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include +// #include -// aten::diagonal_copy(Tensor self, int offset=0, int dim1=0, int dim2=1) -> Tensor -@Namespace("at") public static native @ByVal Tensor diagonal_copy(@Const @ByRef Tensor self, @Cast("int64_t") long offset/*=0*/, @Cast("int64_t") long dim1/*=0*/, @Cast("int64_t") long dim2/*=1*/); -@Namespace("at") public static native @ByVal Tensor diagonal_copy(@Const @ByRef Tensor self); +// aten::fake_quantize_per_tensor_affine_cachemask(Tensor self, float scale, int zero_point, int quant_min, int quant_max) -> (Tensor output, Tensor mask) +@Namespace("at") public static native @ByVal T_TensorTensor_T fake_quantize_per_tensor_affine_cachemask(@Const @ByRef Tensor self, double scale, @Cast("int64_t") long zero_point, @Cast("int64_t") long quant_min, @Cast("int64_t") long quant_max); -// aten::diagonal_copy.out(Tensor self, int offset=0, int dim1=0, int dim2=1, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor diagonal_copy_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Cast("int64_t") long offset/*=0*/, @Cast("int64_t") long dim1/*=0*/, @Cast("int64_t") long dim2/*=1*/); -@Namespace("at") public static native @ByRef Tensor diagonal_copy_out(@ByRef Tensor out, @Const @ByRef Tensor self); -// aten::diagonal_copy.out(Tensor self, int offset=0, int dim1=0, int dim2=1, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor diagonal_copy_outf(@Const @ByRef Tensor self, @Cast("int64_t") long offset, @Cast("int64_t") long dim1, @Cast("int64_t") long dim2, @ByRef Tensor out); +// aten::fake_quantize_per_tensor_affine_cachemask.out(Tensor self, float scale, int zero_point, int quant_min, int quant_max, *, Tensor(a!) out0, Tensor(b!) out1) -> (Tensor(a!), Tensor(b!)) +@Namespace("at") public static native @ByVal T_TensorTensor_T fake_quantize_per_tensor_affine_cachemask_out(@ByRef Tensor out0, @ByRef Tensor out1, @Const @ByRef Tensor self, double scale, @Cast("int64_t") long zero_point, @Cast("int64_t") long quant_min, @Cast("int64_t") long quant_max); +// aten::fake_quantize_per_tensor_affine_cachemask.out(Tensor self, float scale, int zero_point, int quant_min, int quant_max, *, Tensor(a!) out0, Tensor(b!) out1) -> (Tensor(a!), Tensor(b!)) +@Namespace("at") public static native @ByVal T_TensorTensor_T fake_quantize_per_tensor_affine_cachemask_outf(@Const @ByRef Tensor self, double scale, @Cast("int64_t") long zero_point, @Cast("int64_t") long quant_min, @Cast("int64_t") long quant_max, @ByRef Tensor out0, @ByRef Tensor out1); -// Parsed from ATen/ops/diagonal_scatter.h +// Parsed from ATen/ops/fake_quantize_per_tensor_affine_cachemask_backward.h // #pragma once @@ -41462,23 +27057,16 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include - +// #include -// aten::diagonal_scatter(Tensor self, Tensor src, int offset=0, int dim1=0, int dim2=1) -> Tensor -@Namespace("at") public static native @ByVal Tensor diagonal_scatter(@Const @ByRef Tensor self, @Const @ByRef Tensor src, @Cast("int64_t") long offset/*=0*/, @Cast("int64_t") long dim1/*=0*/, @Cast("int64_t") long dim2/*=1*/); -@Namespace("at") public static native @ByVal Tensor diagonal_scatter(@Const @ByRef Tensor self, @Const @ByRef Tensor src); -// aten::diagonal_scatter.out(Tensor self, Tensor src, int offset=0, int dim1=0, int dim2=1, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor diagonal_scatter_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor src, @Cast("int64_t") long offset/*=0*/, @Cast("int64_t") long dim1/*=0*/, @Cast("int64_t") long dim2/*=1*/); -@Namespace("at") public static native @ByRef Tensor diagonal_scatter_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor src); -// aten::diagonal_scatter.out(Tensor self, Tensor src, int offset=0, int dim1=0, int dim2=1, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor diagonal_scatter_outf(@Const @ByRef Tensor self, @Const @ByRef Tensor src, @Cast("int64_t") long offset, @Cast("int64_t") long dim1, @Cast("int64_t") long dim2, @ByRef Tensor out); +// aten::fake_quantize_per_tensor_affine_cachemask_backward(Tensor grad, Tensor mask) -> Tensor +@Namespace("at") public static native @ByVal Tensor fake_quantize_per_tensor_affine_cachemask_backward(@Const @ByRef Tensor grad, @Const @ByRef Tensor mask); -// Parsed from ATen/ops/diff.h +// Parsed from ATen/ops/fbgemm_linear_fp16_weight.h // #pragma once @@ -41499,23 +27087,16 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include - +// #include -// aten::diff(Tensor self, int n=1, int dim=-1, Tensor? prepend=None, Tensor? append=None) -> Tensor -@Namespace("at") public static native @ByVal Tensor diff(@Const @ByRef Tensor self, @Cast("int64_t") long n/*=1*/, @Cast("int64_t") long dim/*=-1*/, @Const @ByRef(nullValue = "c10::optional{}") TensorOptional prepend, @Const @ByRef(nullValue = "c10::optional{}") TensorOptional append); -@Namespace("at") public static native @ByVal Tensor diff(@Const @ByRef Tensor self); -// aten::diff.out(Tensor self, int n=1, int dim=-1, Tensor? prepend=None, Tensor? append=None, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor diff_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Cast("int64_t") long n/*=1*/, @Cast("int64_t") long dim/*=-1*/, @Const @ByRef(nullValue = "c10::optional{}") TensorOptional prepend, @Const @ByRef(nullValue = "c10::optional{}") TensorOptional append); -@Namespace("at") public static native @ByRef Tensor diff_out(@ByRef Tensor out, @Const @ByRef Tensor self); -// aten::diff.out(Tensor self, int n=1, int dim=-1, Tensor? prepend=None, Tensor? append=None, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor diff_outf(@Const @ByRef Tensor self, @Cast("int64_t") long n, @Cast("int64_t") long dim, @Const @ByRef TensorOptional prepend, @Const @ByRef TensorOptional append, @ByRef Tensor out); +// aten::fbgemm_linear_fp16_weight(Tensor input, Tensor packed_weight, Tensor bias) -> Tensor +@Namespace("at") public static native @ByVal Tensor fbgemm_linear_fp16_weight(@Const @ByRef Tensor input, @Const @ByRef Tensor packed_weight, @Const @ByRef Tensor bias); -// Parsed from ATen/ops/digamma.h +// Parsed from ATen/ops/fbgemm_linear_fp16_weight_fp32_activation.h // #pragma once @@ -41536,21 +27117,16 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include - +// #include -// aten::digamma.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor digamma_out(@ByRef Tensor out, @Const @ByRef Tensor self); -// aten::digamma.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor digamma_outf(@Const @ByRef Tensor self, @ByRef Tensor out); -// aten::digamma(Tensor self) -> Tensor -@Namespace("at") public static native @ByVal Tensor digamma(@Const @ByRef Tensor self); +// aten::fbgemm_linear_fp16_weight_fp32_activation(Tensor input, Tensor packed_weight, Tensor bias) -> Tensor +@Namespace("at") public static native @ByVal Tensor fbgemm_linear_fp16_weight_fp32_activation(@Const @ByRef Tensor input, @Const @ByRef Tensor packed_weight, @Const @ByRef Tensor bias); -// Parsed from ATen/ops/dist.h +// Parsed from ATen/ops/fbgemm_linear_int8_weight.h // #pragma once @@ -41571,23 +27147,16 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include - +// #include -// aten::dist(Tensor self, Tensor other, Scalar p=2) -> Tensor -@Namespace("at") public static native @ByVal Tensor dist(@Const @ByRef Tensor self, @Const @ByRef Tensor other, @Const @ByRef(nullValue = "at::Scalar(2)") Scalar p); -@Namespace("at") public static native @ByVal Tensor dist(@Const @ByRef Tensor self, @Const @ByRef Tensor other); -// aten::dist.out(Tensor self, Tensor other, Scalar p=2, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor dist_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor other, @Const @ByRef(nullValue = "at::Scalar(2)") Scalar p); -@Namespace("at") public static native @ByRef Tensor dist_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor other); -// aten::dist.out(Tensor self, Tensor other, Scalar p=2, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor dist_outf(@Const @ByRef Tensor self, @Const @ByRef Tensor other, @Const @ByRef Scalar p, @ByRef Tensor out); +// aten::fbgemm_linear_int8_weight(Tensor input, Tensor weight, Tensor packed, Tensor col_offsets, Scalar weight_scale, Scalar weight_zero_point, Tensor bias) -> Tensor +@Namespace("at") public static native @ByVal Tensor fbgemm_linear_int8_weight(@Const @ByRef Tensor input, @Const @ByRef Tensor weight, @Const @ByRef Tensor packed, @Const @ByRef Tensor col_offsets, @Const @ByRef Scalar weight_scale, @Const @ByRef Scalar weight_zero_point, @Const @ByRef Tensor bias); -// Parsed from ATen/ops/div.h +// Parsed from ATen/ops/fbgemm_linear_int8_weight_fp32_activation.h // #pragma once @@ -41608,45 +27177,16 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include - - -// aten::div.Tensor(Tensor self, Tensor other) -> Tensor -@Namespace("at") public static native @ByVal Tensor div(@Const @ByRef Tensor self, @Const @ByRef Tensor other); - -// aten::div.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor div_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor other); -// aten::div.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor div_outf(@Const @ByRef Tensor self, @Const @ByRef Tensor other, @ByRef Tensor out); - -// aten::div.Tensor_mode(Tensor self, Tensor other, *, str? rounding_mode) -> Tensor -@Namespace("at") public static native @ByVal Tensor div(@Const @ByRef Tensor self, @Const @ByRef Tensor other, @ByVal @Cast("c10::optional*") Pointer rounding_mode); - -// aten::div.out_mode(Tensor self, Tensor other, *, str? rounding_mode, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor div_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor other, @ByVal @Cast("c10::optional*") Pointer rounding_mode); -// aten::div.out_mode(Tensor self, Tensor other, *, str? rounding_mode, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor div_outf(@Const @ByRef Tensor self, @Const @ByRef Tensor other, @ByVal @Cast("c10::optional*") Pointer rounding_mode, @ByRef Tensor out); - -// aten::div.Scalar(Tensor self, Scalar other) -> Tensor -@Namespace("at") public static native @ByVal Tensor div(@Const @ByRef Tensor self, @Const @ByRef Scalar other); - -// aten::div.Scalar_mode(Tensor self, Scalar other, *, str? rounding_mode) -> Tensor -@Namespace("at") public static native @ByVal Tensor div(@Const @ByRef Tensor self, @Const @ByRef Scalar other, @ByVal @Cast("c10::optional*") Pointer rounding_mode); +// #include -// aten::div.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor div_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Scalar other); -// aten::div.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor div_outf(@Const @ByRef Tensor self, @Const @ByRef Scalar other, @ByRef Tensor out); -// aten::div.Scalar_mode_out(Tensor self, Scalar other, *, str? rounding_mode, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor div_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Scalar other, @ByVal @Cast("c10::optional*") Pointer rounding_mode); -// aten::div.Scalar_mode_out(Tensor self, Scalar other, *, str? rounding_mode, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor div_outf(@Const @ByRef Tensor self, @Const @ByRef Scalar other, @ByVal @Cast("c10::optional*") Pointer rounding_mode, @ByRef Tensor out); +// aten::fbgemm_linear_int8_weight_fp32_activation(Tensor input, Tensor weight, Tensor packed, Tensor col_offsets, Scalar weight_scale, Scalar weight_zero_point, Tensor bias) -> Tensor +@Namespace("at") public static native @ByVal Tensor fbgemm_linear_int8_weight_fp32_activation(@Const @ByRef Tensor input, @Const @ByRef Tensor weight, @Const @ByRef Tensor packed, @Const @ByRef Tensor col_offsets, @Const @ByRef Scalar weight_scale, @Const @ByRef Scalar weight_zero_point, @Const @ByRef Tensor bias); -// Parsed from ATen/ops/divide.h +// Parsed from ATen/ops/fbgemm_linear_quantize_weight.h // #pragma once @@ -41667,33 +27207,16 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include - - -// aten::divide.Tensor(Tensor self, Tensor other) -> Tensor - -// aten::divide.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor divide_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor other); -// aten::divide.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor divide_outf(@Const @ByRef Tensor self, @Const @ByRef Tensor other, @ByRef Tensor out); - -// aten::divide.Scalar(Tensor self, Scalar other) -> Tensor - -// aten::divide.Tensor_mode(Tensor self, Tensor other, *, str? rounding_mode) -> Tensor -@Namespace("at") public static native @ByVal Tensor divide(@Const @ByRef Tensor self, @Const @ByRef Tensor other, @ByVal @Cast("c10::optional*") Pointer rounding_mode); +// #include -// aten::divide.out_mode(Tensor self, Tensor other, *, str? rounding_mode, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor divide_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor other, @ByVal @Cast("c10::optional*") Pointer rounding_mode); -// aten::divide.out_mode(Tensor self, Tensor other, *, str? rounding_mode, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor divide_outf(@Const @ByRef Tensor self, @Const @ByRef Tensor other, @ByVal @Cast("c10::optional*") Pointer rounding_mode, @ByRef Tensor out); -// aten::divide.Scalar_mode(Tensor self, Scalar other, *, str? rounding_mode) -> Tensor -@Namespace("at") public static native @ByVal Tensor divide(@Const @ByRef Tensor self, @Const @ByRef Scalar other, @ByVal @Cast("c10::optional*") Pointer rounding_mode); +// aten::fbgemm_linear_quantize_weight(Tensor input) -> (Tensor, Tensor, float, int) +@Namespace("at") public static native @ByVal T_TensorTensorDoubleLong_T fbgemm_linear_quantize_weight(@Const @ByRef Tensor input); -// Parsed from ATen/ops/dot.h +// Parsed from ATen/ops/fbgemm_pack_gemm_matrix_fp16.h // #pragma once @@ -41714,21 +27237,16 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include - +// #include -// aten::dot(Tensor self, Tensor tensor) -> Tensor -@Namespace("at") public static native @ByVal Tensor dot(@Const @ByRef Tensor self, @Const @ByRef Tensor tensor); -// aten::dot.out(Tensor self, Tensor tensor, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor dot_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor tensor); -// aten::dot.out(Tensor self, Tensor tensor, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor dot_outf(@Const @ByRef Tensor self, @Const @ByRef Tensor tensor, @ByRef Tensor out); +// aten::fbgemm_pack_gemm_matrix_fp16(Tensor input) -> Tensor +@Namespace("at") public static native @ByVal Tensor fbgemm_pack_gemm_matrix_fp16(@Const @ByRef Tensor input); -// Parsed from ATen/ops/dropout.h +// Parsed from ATen/ops/fbgemm_pack_quantized_matrix.h // #pragma once @@ -41749,19 +27267,19 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include +// #include -// aten::dropout(Tensor input, float p, bool train) -> Tensor -@Namespace("at") public static native @ByVal Tensor dropout(@Const @ByRef Tensor input, double p, @Cast("bool") boolean train); +// aten::fbgemm_pack_quantized_matrix(Tensor input) -> Tensor +@Namespace("at") public static native @ByVal Tensor fbgemm_pack_quantized_matrix(@Const @ByRef Tensor input); -// aten::dropout_(Tensor(a!) self, float p, bool train) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor dropout_(@ByRef Tensor self, double p, @Cast("bool") boolean train); +// aten::fbgemm_pack_quantized_matrix.KN(Tensor input, int K, int N) -> Tensor +@Namespace("at") public static native @ByVal Tensor fbgemm_pack_quantized_matrix(@Const @ByRef Tensor input, @Cast("int64_t") long K, @Cast("int64_t") long N); -// Parsed from ATen/ops/dsplit.h +// Parsed from ATen/ops/feature_alpha_dropout.h // #pragma once @@ -41782,20 +27300,19 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include +// #include -// aten::dsplit.int(Tensor(a -> *) self, int sections) -> Tensor(a)[] -@Namespace("at") public static native @Cast({"", "std::vector"}) @StdMove TensorVector dsplit(@Const @ByRef Tensor self, @Cast("int64_t") long sections); +// aten::feature_alpha_dropout(Tensor input, float p, bool train) -> Tensor +@Namespace("at") public static native @ByVal Tensor feature_alpha_dropout(@Const @ByRef Tensor input, double p, @Cast("bool") boolean train); -// aten::dsplit.array(Tensor(a -> *) self, int[] indices) -> Tensor(a)[] -@Namespace("at") public static native @Cast({"", "std::vector"}) @StdMove TensorVector dsplit(@Const @ByRef Tensor self, @ByVal @Cast("c10::ArrayRef*") LongArrayRef indices); -@Namespace("at") public static native @Cast({"", "std::vector"}) @StdMove TensorVector dsplit(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... indices); +// aten::feature_alpha_dropout_(Tensor(a!) self, float p, bool train) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor feature_alpha_dropout_(@ByRef Tensor self, double p, @Cast("bool") boolean train); -// Parsed from ATen/ops/dstack.h +// Parsed from ATen/ops/feature_dropout.h // #pragma once @@ -41816,21 +27333,19 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include +// #include -// aten::dstack(Tensor[] tensors) -> Tensor -@Namespace("at") public static native @ByVal Tensor dstack(@ByVal TensorArrayRef tensors); +// aten::feature_dropout(Tensor input, float p, bool train) -> Tensor +@Namespace("at") public static native @ByVal Tensor feature_dropout(@Const @ByRef Tensor input, double p, @Cast("bool") boolean train); -// aten::dstack.out(Tensor[] tensors, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor dstack_out(@ByRef Tensor out, @ByVal TensorArrayRef tensors); -// aten::dstack.out(Tensor[] tensors, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor dstack_outf(@ByVal TensorArrayRef tensors, @ByRef Tensor out); +// aten::feature_dropout_(Tensor(a!) self, float p, bool train) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor feature_dropout_(@ByRef Tensor self, double p, @Cast("bool") boolean train); -// Parsed from ATen/ops/einsum.h +// Parsed from ATen/ops/fft_fft.h // #pragma once @@ -41851,18 +27366,23 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include +// #include -// aten::einsum(str equation, Tensor[] tensors, *, int[]? path=None) -> Tensor -@Namespace("at") public static native @ByVal Tensor einsum(@ByVal @Cast("c10::string_view*") Pointer equation, @ByVal TensorArrayRef tensors, @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") LongArrayRefOptional path); -@Namespace("at") public static native @ByVal Tensor einsum(@ByVal @Cast("c10::string_view*") Pointer equation, @ByVal TensorArrayRef tensors); -@Namespace("at") public static native @ByVal Tensor einsum(@ByVal @Cast("c10::string_view*") Pointer equation, @ByVal TensorArrayRef tensors, @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... path); +// aten::fft_fft(Tensor self, int? n=None, int dim=-1, str? norm=None) -> Tensor +@Namespace("at") public static native @ByVal Tensor fft_fft(@Const @ByRef Tensor self, @ByVal(nullValue = "c10::optional(c10::nullopt)") LongOptional n, @Cast("int64_t") long dim/*=-1*/, @ByVal(nullValue = "c10::optional(c10::nullopt)") @Cast("c10::optional*") Pointer norm); +@Namespace("at") public static native @ByVal Tensor fft_fft(@Const @ByRef Tensor self); +// aten::fft_fft.out(Tensor self, int? n=None, int dim=-1, str? norm=None, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor fft_fft_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal(nullValue = "c10::optional(c10::nullopt)") LongOptional n, @Cast("int64_t") long dim/*=-1*/, @ByVal(nullValue = "c10::optional(c10::nullopt)") @Cast("c10::optional*") Pointer norm); +@Namespace("at") public static native @ByRef Tensor fft_fft_out(@ByRef Tensor out, @Const @ByRef Tensor self); +// aten::fft_fft.out(Tensor self, int? n=None, int dim=-1, str? norm=None, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor fft_fft_outf(@Const @ByRef Tensor self, @ByVal LongOptional n, @Cast("int64_t") long dim, @ByVal @Cast("c10::optional*") Pointer norm, @ByRef Tensor out); -// Parsed from ATen/ops/elu.h + +// Parsed from ATen/ops/fft_fft2.h // #pragma once @@ -41883,27 +27403,26 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include - +// #include -// aten::elu.out(Tensor self, Scalar alpha=1, Scalar scale=1, Scalar input_scale=1, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor elu_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef(nullValue = "at::Scalar(1)") Scalar alpha, @Const @ByRef(nullValue = "at::Scalar(1)") Scalar scale, @Const @ByRef(nullValue = "at::Scalar(1)") Scalar input_scale); -@Namespace("at") public static native @ByRef Tensor elu_out(@ByRef Tensor out, @Const @ByRef Tensor self); -// aten::elu.out(Tensor self, Scalar alpha=1, Scalar scale=1, Scalar input_scale=1, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor elu_outf(@Const @ByRef Tensor self, @Const @ByRef Scalar alpha, @Const @ByRef Scalar scale, @Const @ByRef Scalar input_scale, @ByRef Tensor out); -// aten::elu(Tensor self, Scalar alpha=1, Scalar scale=1, Scalar input_scale=1) -> Tensor -@Namespace("at") public static native @ByVal Tensor elu(@Const @ByRef Tensor self, @Const @ByRef(nullValue = "at::Scalar(1)") Scalar alpha, @Const @ByRef(nullValue = "at::Scalar(1)") Scalar scale, @Const @ByRef(nullValue = "at::Scalar(1)") Scalar input_scale); -@Namespace("at") public static native @ByVal Tensor elu(@Const @ByRef Tensor self); +// aten::fft_fft2(Tensor self, int[1]? s=None, int[1] dim=[-2,-1], str? norm=None) -> Tensor +@Namespace("at") public static native @ByVal Tensor fft_fft2(@Const @ByRef Tensor self, @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") LongArrayRefOptional s, @ByVal(nullValue = "at::IntArrayRef({-2,-1})") LongArrayRef dim, @ByVal(nullValue = "c10::optional(c10::nullopt)") @Cast("c10::optional*") Pointer norm); +@Namespace("at") public static native @ByVal Tensor fft_fft2(@Const @ByRef Tensor self); +@Namespace("at") public static native @ByVal Tensor fft_fft2(@Const @ByRef Tensor self, @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] s, @ByVal(nullValue = "at::IntArrayRef({-2,-1})") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dim, @ByVal(nullValue = "c10::optional(c10::nullopt)") @Cast("c10::optional*") Pointer norm); -// aten::elu_(Tensor(a!) self, Scalar alpha=1, Scalar scale=1, Scalar input_scale=1) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor elu_(@ByRef Tensor self, @Const @ByRef(nullValue = "at::Scalar(1)") Scalar alpha, @Const @ByRef(nullValue = "at::Scalar(1)") Scalar scale, @Const @ByRef(nullValue = "at::Scalar(1)") Scalar input_scale); -@Namespace("at") public static native @ByRef Tensor elu_(@ByRef Tensor self); +// aten::fft_fft2.out(Tensor self, int[1]? s=None, int[1] dim=[-2,-1], str? norm=None, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor fft_fft2_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") LongArrayRefOptional s, @ByVal(nullValue = "at::IntArrayRef({-2,-1})") LongArrayRef dim, @ByVal(nullValue = "c10::optional(c10::nullopt)") @Cast("c10::optional*") Pointer norm); +@Namespace("at") public static native @ByRef Tensor fft_fft2_out(@ByRef Tensor out, @Const @ByRef Tensor self); +@Namespace("at") public static native @ByRef Tensor fft_fft2_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] s, @ByVal(nullValue = "at::IntArrayRef({-2,-1})") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dim, @ByVal(nullValue = "c10::optional(c10::nullopt)") @Cast("c10::optional*") Pointer norm); +// aten::fft_fft2.out(Tensor self, int[1]? s=None, int[1] dim=[-2,-1], str? norm=None, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor fft_fft2_outf(@Const @ByRef Tensor self, @ByVal LongArrayRefOptional s, @ByVal LongArrayRef dim, @ByVal @Cast("c10::optional*") Pointer norm, @ByRef Tensor out); +@Namespace("at") public static native @ByRef Tensor fft_fft2_outf(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] s, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dim, @ByVal @Cast("c10::optional*") Pointer norm, @ByRef Tensor out); -// Parsed from ATen/ops/elu_backward.h +// Parsed from ATen/ops/fft_fftfreq.h // #pragma once @@ -41924,21 +27443,25 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include +// #include -// aten::elu_backward.grad_input(Tensor grad_output, Scalar alpha, Scalar scale, Scalar input_scale, bool is_result, Tensor self_or_result, *, Tensor(a!) grad_input) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor elu_backward_out(@ByRef Tensor grad_input, @Const @ByRef Tensor grad_output, @Const @ByRef Scalar alpha, @Const @ByRef Scalar scale, @Const @ByRef Scalar input_scale, @Cast("bool") boolean is_result, @Const @ByRef Tensor self_or_result); -// aten::elu_backward.grad_input(Tensor grad_output, Scalar alpha, Scalar scale, Scalar input_scale, bool is_result, Tensor self_or_result, *, Tensor(a!) grad_input) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor elu_backward_outf(@Const @ByRef Tensor grad_output, @Const @ByRef Scalar alpha, @Const @ByRef Scalar scale, @Const @ByRef Scalar input_scale, @Cast("bool") boolean is_result, @Const @ByRef Tensor self_or_result, @ByRef Tensor grad_input); +// aten::fft_fftfreq(int n, float d=1.0, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor +@Namespace("at") public static native @ByVal Tensor fft_fftfreq(@Cast("int64_t") long n, double d/*=1.0*/, @ByVal(nullValue = "at::TensorOptions{}") TensorOptions options); +@Namespace("at") public static native @ByVal Tensor fft_fftfreq(@Cast("int64_t") long n); +// aten::fft_fftfreq(int n, float d=1.0, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor +@Namespace("at") public static native @ByVal Tensor fft_fftfreq(@Cast("int64_t") long n, double d, @ByVal ScalarTypeOptional dtype, @ByVal LayoutOptional layout, @ByVal DeviceOptional device, @ByVal BoolOptional pin_memory); -// aten::elu_backward(Tensor grad_output, Scalar alpha, Scalar scale, Scalar input_scale, bool is_result, Tensor self_or_result) -> Tensor -@Namespace("at") public static native @ByVal Tensor elu_backward(@Const @ByRef Tensor grad_output, @Const @ByRef Scalar alpha, @Const @ByRef Scalar scale, @Const @ByRef Scalar input_scale, @Cast("bool") boolean is_result, @Const @ByRef Tensor self_or_result); +// aten::fft_fftfreq.out(int n, float d=1.0, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor fft_fftfreq_out(@ByRef Tensor out, @Cast("int64_t") long n, double d/*=1.0*/); +@Namespace("at") public static native @ByRef Tensor fft_fftfreq_out(@ByRef Tensor out, @Cast("int64_t") long n); +// aten::fft_fftfreq.out(int n, float d=1.0, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor fft_fftfreq_outf(@Cast("int64_t") long n, double d, @ByRef Tensor out); -// Parsed from ATen/ops/embedding.h +// Parsed from ATen/ops/fft_fftn.h // #pragma once @@ -41959,41 +27482,26 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include - - -// aten::embedding(Tensor weight, Tensor indices, SymInt padding_idx=-1, bool scale_grad_by_freq=False, bool sparse=False) -> Tensor -@Namespace("at") public static native @ByVal Tensor embedding(@Const @ByRef Tensor weight, @Const @ByRef Tensor indices, @Cast("int64_t") long padding_idx/*=-1*/, @Cast("bool") boolean scale_grad_by_freq/*=false*/, @Cast("bool") boolean sparse/*=false*/); -@Namespace("at") public static native @ByVal Tensor embedding(@Const @ByRef Tensor weight, @Const @ByRef Tensor indices); - - -// aten::embedding(Tensor weight, Tensor indices, SymInt padding_idx=-1, bool scale_grad_by_freq=False, bool sparse=False) -> Tensor -@Namespace("at") public static native @ByVal Tensor embedding_symint(@Const @ByRef Tensor weight, @Const @ByRef Tensor indices, @ByVal(nullValue = "c10::SymInt(-1)") SymInt padding_idx, @Cast("bool") boolean scale_grad_by_freq/*=false*/, @Cast("bool") boolean sparse/*=false*/); -@Namespace("at") public static native @ByVal Tensor embedding_symint(@Const @ByRef Tensor weight, @Const @ByRef Tensor indices); - - -// aten::embedding.out(Tensor weight, Tensor indices, SymInt padding_idx=-1, bool scale_grad_by_freq=False, bool sparse=False, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor embedding_out(@ByRef Tensor out, @Const @ByRef Tensor weight, @Const @ByRef Tensor indices, @Cast("int64_t") long padding_idx/*=-1*/, @Cast("bool") boolean scale_grad_by_freq/*=false*/, @Cast("bool") boolean sparse/*=false*/); -@Namespace("at") public static native @ByRef Tensor embedding_out(@ByRef Tensor out, @Const @ByRef Tensor weight, @Const @ByRef Tensor indices); - - -// aten::embedding.out(Tensor weight, Tensor indices, SymInt padding_idx=-1, bool scale_grad_by_freq=False, bool sparse=False, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor embedding_outf(@Const @ByRef Tensor weight, @Const @ByRef Tensor indices, @Cast("int64_t") long padding_idx, @Cast("bool") boolean scale_grad_by_freq, @Cast("bool") boolean sparse, @ByRef Tensor out); - - -// aten::embedding.out(Tensor weight, Tensor indices, SymInt padding_idx=-1, bool scale_grad_by_freq=False, bool sparse=False, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor embedding_symint_out(@ByRef Tensor out, @Const @ByRef Tensor weight, @Const @ByRef Tensor indices, @ByVal(nullValue = "c10::SymInt(-1)") SymInt padding_idx, @Cast("bool") boolean scale_grad_by_freq/*=false*/, @Cast("bool") boolean sparse/*=false*/); -@Namespace("at") public static native @ByRef Tensor embedding_symint_out(@ByRef Tensor out, @Const @ByRef Tensor weight, @Const @ByRef Tensor indices); +// #include -// aten::embedding.out(Tensor weight, Tensor indices, SymInt padding_idx=-1, bool scale_grad_by_freq=False, bool sparse=False, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor embedding_symint_outf(@Const @ByRef Tensor weight, @Const @ByRef Tensor indices, @ByVal SymInt padding_idx, @Cast("bool") boolean scale_grad_by_freq, @Cast("bool") boolean sparse, @ByRef Tensor out); +// aten::fft_fftn(Tensor self, int[1]? s=None, int[1]? dim=None, str? norm=None) -> Tensor +@Namespace("at") public static native @ByVal Tensor fft_fftn(@Const @ByRef Tensor self, @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") LongArrayRefOptional s, @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") LongArrayRefOptional dim, @ByVal(nullValue = "c10::optional(c10::nullopt)") @Cast("c10::optional*") Pointer norm); +@Namespace("at") public static native @ByVal Tensor fft_fftn(@Const @ByRef Tensor self); +@Namespace("at") public static native @ByVal Tensor fft_fftn(@Const @ByRef Tensor self, @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] s, @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dim, @ByVal(nullValue = "c10::optional(c10::nullopt)") @Cast("c10::optional*") Pointer norm); +// aten::fft_fftn.out(Tensor self, int[1]? s=None, int[1]? dim=None, str? norm=None, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor fft_fftn_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") LongArrayRefOptional s, @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") LongArrayRefOptional dim, @ByVal(nullValue = "c10::optional(c10::nullopt)") @Cast("c10::optional*") Pointer norm); +@Namespace("at") public static native @ByRef Tensor fft_fftn_out(@ByRef Tensor out, @Const @ByRef Tensor self); +@Namespace("at") public static native @ByRef Tensor fft_fftn_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] s, @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dim, @ByVal(nullValue = "c10::optional(c10::nullopt)") @Cast("c10::optional*") Pointer norm); +// aten::fft_fftn.out(Tensor self, int[1]? s=None, int[1]? dim=None, str? norm=None, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor fft_fftn_outf(@Const @ByRef Tensor self, @ByVal LongArrayRefOptional s, @ByVal LongArrayRefOptional dim, @ByVal @Cast("c10::optional*") Pointer norm, @ByRef Tensor out); +@Namespace("at") public static native @ByRef Tensor fft_fftn_outf(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] s, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dim, @ByVal @Cast("c10::optional*") Pointer norm, @ByRef Tensor out); -// Parsed from ATen/ops/embedding_backward.h +// Parsed from ATen/ops/fft_fftshift.h // #pragma once @@ -42014,21 +27522,18 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include - - -// aten::embedding_backward(Tensor grad, Tensor indices, SymInt num_weights, SymInt padding_idx, bool scale_grad_by_freq, bool sparse) -> Tensor -@Namespace("at") public static native @ByVal Tensor embedding_backward(@Const @ByRef Tensor grad, @Const @ByRef Tensor indices, @Cast("int64_t") long num_weights, @Cast("int64_t") long padding_idx, @Cast("bool") boolean scale_grad_by_freq, @Cast("bool") boolean sparse); - +// #include -// aten::embedding_backward(Tensor grad, Tensor indices, SymInt num_weights, SymInt padding_idx, bool scale_grad_by_freq, bool sparse) -> Tensor -@Namespace("at") public static native @ByVal Tensor embedding_backward_symint(@Const @ByRef Tensor grad, @Const @ByRef Tensor indices, @ByVal SymInt num_weights, @ByVal SymInt padding_idx, @Cast("bool") boolean scale_grad_by_freq, @Cast("bool") boolean sparse); +// aten::fft_fftshift(Tensor self, int[1]? dim=None) -> Tensor +@Namespace("at") public static native @ByVal Tensor fft_fftshift(@Const @ByRef Tensor self, @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") LongArrayRefOptional dim); +@Namespace("at") public static native @ByVal Tensor fft_fftshift(@Const @ByRef Tensor self); +@Namespace("at") public static native @ByVal Tensor fft_fftshift(@Const @ByRef Tensor self, @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... dim); -// Parsed from ATen/ops/embedding_bag.h +// Parsed from ATen/ops/fft_hfft.h // #pragma once @@ -42049,20 +27554,23 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include +// #include -// aten::embedding_bag(Tensor weight, Tensor indices, Tensor offsets, bool scale_grad_by_freq=False, int mode=0, bool sparse=False, Tensor? per_sample_weights=None, bool include_last_offset=False) -> (Tensor, Tensor, Tensor, Tensor) -@Namespace("at") public static native @ByVal TensorTensorTensorTensorTuple embedding_bag(@Const @ByRef Tensor weight, @Const @ByRef Tensor indices, @Const @ByRef Tensor offsets, @Cast("bool") boolean scale_grad_by_freq/*=false*/, @Cast("int64_t") long mode/*=0*/, @Cast("bool") boolean sparse/*=false*/, @Const @ByRef(nullValue = "c10::optional{}") TensorOptional per_sample_weights, @Cast("bool") boolean include_last_offset/*=false*/); -@Namespace("at") public static native @ByVal TensorTensorTensorTensorTuple embedding_bag(@Const @ByRef Tensor weight, @Const @ByRef Tensor indices, @Const @ByRef Tensor offsets); +// aten::fft_hfft(Tensor self, int? n=None, int dim=-1, str? norm=None) -> Tensor +@Namespace("at") public static native @ByVal Tensor fft_hfft(@Const @ByRef Tensor self, @ByVal(nullValue = "c10::optional(c10::nullopt)") LongOptional n, @Cast("int64_t") long dim/*=-1*/, @ByVal(nullValue = "c10::optional(c10::nullopt)") @Cast("c10::optional*") Pointer norm); +@Namespace("at") public static native @ByVal Tensor fft_hfft(@Const @ByRef Tensor self); -// aten::embedding_bag.padding_idx(Tensor weight, Tensor indices, Tensor offsets, bool scale_grad_by_freq, int mode, bool sparse, Tensor? per_sample_weights, bool include_last_offset, int? padding_idx) -> (Tensor, Tensor, Tensor, Tensor) -@Namespace("at") public static native @ByVal TensorTensorTensorTensorTuple embedding_bag(@Const @ByRef Tensor weight, @Const @ByRef Tensor indices, @Const @ByRef Tensor offsets, @Cast("bool") boolean scale_grad_by_freq, @Cast("int64_t") long mode, @Cast("bool") boolean sparse, @Const @ByRef TensorOptional per_sample_weights, @Cast("bool") boolean include_last_offset, @ByVal LongOptional padding_idx); +// aten::fft_hfft.out(Tensor self, int? n=None, int dim=-1, str? norm=None, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor fft_hfft_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal(nullValue = "c10::optional(c10::nullopt)") LongOptional n, @Cast("int64_t") long dim/*=-1*/, @ByVal(nullValue = "c10::optional(c10::nullopt)") @Cast("c10::optional*") Pointer norm); +@Namespace("at") public static native @ByRef Tensor fft_hfft_out(@ByRef Tensor out, @Const @ByRef Tensor self); +// aten::fft_hfft.out(Tensor self, int? n=None, int dim=-1, str? norm=None, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor fft_hfft_outf(@Const @ByRef Tensor self, @ByVal LongOptional n, @Cast("int64_t") long dim, @ByVal @Cast("c10::optional*") Pointer norm, @ByRef Tensor out); -// Parsed from ATen/ops/embedding_dense_backward.h +// Parsed from ATen/ops/fft_hfft2.h // #pragma once @@ -42083,37 +27591,26 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include - - -// aten::embedding_dense_backward(Tensor grad_output, Tensor indices, SymInt num_weights, SymInt padding_idx, bool scale_grad_by_freq) -> Tensor -@Namespace("at") public static native @ByVal Tensor embedding_dense_backward(@Const @ByRef Tensor grad_output, @Const @ByRef Tensor indices, @Cast("int64_t") long num_weights, @Cast("int64_t") long padding_idx, @Cast("bool") boolean scale_grad_by_freq); - - -// aten::embedding_dense_backward(Tensor grad_output, Tensor indices, SymInt num_weights, SymInt padding_idx, bool scale_grad_by_freq) -> Tensor -@Namespace("at") public static native @ByVal Tensor embedding_dense_backward_symint(@Const @ByRef Tensor grad_output, @Const @ByRef Tensor indices, @ByVal SymInt num_weights, @ByVal SymInt padding_idx, @Cast("bool") boolean scale_grad_by_freq); - - -// aten::embedding_dense_backward.out(Tensor grad_output, Tensor indices, SymInt num_weights, SymInt padding_idx, bool scale_grad_by_freq, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor embedding_dense_backward_out(@ByRef Tensor out, @Const @ByRef Tensor grad_output, @Const @ByRef Tensor indices, @Cast("int64_t") long num_weights, @Cast("int64_t") long padding_idx, @Cast("bool") boolean scale_grad_by_freq); - - -// aten::embedding_dense_backward.out(Tensor grad_output, Tensor indices, SymInt num_weights, SymInt padding_idx, bool scale_grad_by_freq, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor embedding_dense_backward_outf(@Const @ByRef Tensor grad_output, @Const @ByRef Tensor indices, @Cast("int64_t") long num_weights, @Cast("int64_t") long padding_idx, @Cast("bool") boolean scale_grad_by_freq, @ByRef Tensor out); - - -// aten::embedding_dense_backward.out(Tensor grad_output, Tensor indices, SymInt num_weights, SymInt padding_idx, bool scale_grad_by_freq, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor embedding_dense_backward_symint_out(@ByRef Tensor out, @Const @ByRef Tensor grad_output, @Const @ByRef Tensor indices, @ByVal SymInt num_weights, @ByVal SymInt padding_idx, @Cast("bool") boolean scale_grad_by_freq); +// #include -// aten::embedding_dense_backward.out(Tensor grad_output, Tensor indices, SymInt num_weights, SymInt padding_idx, bool scale_grad_by_freq, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor embedding_dense_backward_symint_outf(@Const @ByRef Tensor grad_output, @Const @ByRef Tensor indices, @ByVal SymInt num_weights, @ByVal SymInt padding_idx, @Cast("bool") boolean scale_grad_by_freq, @ByRef Tensor out); +// aten::fft_hfft2(Tensor self, int[1]? s=None, int[1] dim=[-2,-1], str? norm=None) -> Tensor +@Namespace("at") public static native @ByVal Tensor fft_hfft2(@Const @ByRef Tensor self, @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") LongArrayRefOptional s, @ByVal(nullValue = "at::IntArrayRef({-2,-1})") LongArrayRef dim, @ByVal(nullValue = "c10::optional(c10::nullopt)") @Cast("c10::optional*") Pointer norm); +@Namespace("at") public static native @ByVal Tensor fft_hfft2(@Const @ByRef Tensor self); +@Namespace("at") public static native @ByVal Tensor fft_hfft2(@Const @ByRef Tensor self, @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] s, @ByVal(nullValue = "at::IntArrayRef({-2,-1})") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dim, @ByVal(nullValue = "c10::optional(c10::nullopt)") @Cast("c10::optional*") Pointer norm); +// aten::fft_hfft2.out(Tensor self, int[1]? s=None, int[1] dim=[-2,-1], str? norm=None, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @Const @ByRef Tensor fft_hfft2_out(@Const @ByRef Tensor out, @Const @ByRef Tensor self, @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") LongArrayRefOptional s, @ByVal(nullValue = "at::IntArrayRef({-2,-1})") LongArrayRef dim, @ByVal(nullValue = "c10::optional(c10::nullopt)") @Cast("c10::optional*") Pointer norm); +@Namespace("at") public static native @Const @ByRef Tensor fft_hfft2_out(@Const @ByRef Tensor out, @Const @ByRef Tensor self); +@Namespace("at") public static native @Const @ByRef Tensor fft_hfft2_out(@Const @ByRef Tensor out, @Const @ByRef Tensor self, @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] s, @ByVal(nullValue = "at::IntArrayRef({-2,-1})") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dim, @ByVal(nullValue = "c10::optional(c10::nullopt)") @Cast("c10::optional*") Pointer norm); +// aten::fft_hfft2.out(Tensor self, int[1]? s=None, int[1] dim=[-2,-1], str? norm=None, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @Const @ByRef Tensor fft_hfft2_outf(@Const @ByRef Tensor self, @ByVal LongArrayRefOptional s, @ByVal LongArrayRef dim, @ByVal @Cast("c10::optional*") Pointer norm, @Const @ByRef Tensor out); +@Namespace("at") public static native @Const @ByRef Tensor fft_hfft2_outf(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] s, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dim, @ByVal @Cast("c10::optional*") Pointer norm, @Const @ByRef Tensor out); -// Parsed from ATen/ops/embedding_renorm.h +// Parsed from ATen/ops/fft_hfftn.h // #pragma once @@ -42134,24 +27631,26 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include - +// #include -// aten::embedding_renorm_(Tensor(a!) self, Tensor indices, float max_norm, float norm_type) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor embedding_renorm_(@ByRef Tensor self, @Const @ByRef Tensor indices, double max_norm, double norm_type); -// aten::embedding_renorm.out(Tensor self, Tensor indices, float max_norm, float norm_type, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor embedding_renorm_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor indices, double max_norm, double norm_type); -// aten::embedding_renorm.out(Tensor self, Tensor indices, float max_norm, float norm_type, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor embedding_renorm_outf(@Const @ByRef Tensor self, @Const @ByRef Tensor indices, double max_norm, double norm_type, @ByRef Tensor out); +// aten::fft_hfftn(Tensor self, int[1]? s=None, int[1]? dim=None, str? norm=None) -> Tensor +@Namespace("at") public static native @ByVal Tensor fft_hfftn(@Const @ByRef Tensor self, @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") LongArrayRefOptional s, @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") LongArrayRefOptional dim, @ByVal(nullValue = "c10::optional(c10::nullopt)") @Cast("c10::optional*") Pointer norm); +@Namespace("at") public static native @ByVal Tensor fft_hfftn(@Const @ByRef Tensor self); +@Namespace("at") public static native @ByVal Tensor fft_hfftn(@Const @ByRef Tensor self, @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] s, @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dim, @ByVal(nullValue = "c10::optional(c10::nullopt)") @Cast("c10::optional*") Pointer norm); -// aten::embedding_renorm(Tensor self, Tensor indices, float max_norm, float norm_type) -> Tensor -@Namespace("at") public static native @ByVal Tensor embedding_renorm(@Const @ByRef Tensor self, @Const @ByRef Tensor indices, double max_norm, double norm_type); +// aten::fft_hfftn.out(Tensor self, int[1]? s=None, int[1]? dim=None, str? norm=None, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @Const @ByRef Tensor fft_hfftn_out(@Const @ByRef Tensor out, @Const @ByRef Tensor self, @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") LongArrayRefOptional s, @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") LongArrayRefOptional dim, @ByVal(nullValue = "c10::optional(c10::nullopt)") @Cast("c10::optional*") Pointer norm); +@Namespace("at") public static native @Const @ByRef Tensor fft_hfftn_out(@Const @ByRef Tensor out, @Const @ByRef Tensor self); +@Namespace("at") public static native @Const @ByRef Tensor fft_hfftn_out(@Const @ByRef Tensor out, @Const @ByRef Tensor self, @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] s, @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dim, @ByVal(nullValue = "c10::optional(c10::nullopt)") @Cast("c10::optional*") Pointer norm); +// aten::fft_hfftn.out(Tensor self, int[1]? s=None, int[1]? dim=None, str? norm=None, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @Const @ByRef Tensor fft_hfftn_outf(@Const @ByRef Tensor self, @ByVal LongArrayRefOptional s, @ByVal LongArrayRefOptional dim, @ByVal @Cast("c10::optional*") Pointer norm, @Const @ByRef Tensor out); +@Namespace("at") public static native @Const @ByRef Tensor fft_hfftn_outf(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] s, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dim, @ByVal @Cast("c10::optional*") Pointer norm, @Const @ByRef Tensor out); -// Parsed from ATen/ops/embedding_sparse_backward.h +// Parsed from ATen/ops/fft_ifft.h // #pragma once @@ -42172,16 +27671,23 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include +// #include -// aten::embedding_sparse_backward(Tensor grad, Tensor indices, int num_weights, int padding_idx, bool scale_grad_by_freq) -> Tensor -@Namespace("at") public static native @ByVal Tensor embedding_sparse_backward(@Const @ByRef Tensor grad, @Const @ByRef Tensor indices, @Cast("int64_t") long num_weights, @Cast("int64_t") long padding_idx, @Cast("bool") boolean scale_grad_by_freq); +// aten::fft_ifft(Tensor self, int? n=None, int dim=-1, str? norm=None) -> Tensor +@Namespace("at") public static native @ByVal Tensor fft_ifft(@Const @ByRef Tensor self, @ByVal(nullValue = "c10::optional(c10::nullopt)") LongOptional n, @Cast("int64_t") long dim/*=-1*/, @ByVal(nullValue = "c10::optional(c10::nullopt)") @Cast("c10::optional*") Pointer norm); +@Namespace("at") public static native @ByVal Tensor fft_ifft(@Const @ByRef Tensor self); + +// aten::fft_ifft.out(Tensor self, int? n=None, int dim=-1, str? norm=None, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor fft_ifft_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal(nullValue = "c10::optional(c10::nullopt)") LongOptional n, @Cast("int64_t") long dim/*=-1*/, @ByVal(nullValue = "c10::optional(c10::nullopt)") @Cast("c10::optional*") Pointer norm); +@Namespace("at") public static native @ByRef Tensor fft_ifft_out(@ByRef Tensor out, @Const @ByRef Tensor self); +// aten::fft_ifft.out(Tensor self, int? n=None, int dim=-1, str? norm=None, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor fft_ifft_outf(@Const @ByRef Tensor self, @ByVal LongOptional n, @Cast("int64_t") long dim, @ByVal @Cast("c10::optional*") Pointer norm, @ByRef Tensor out); -// Parsed from ATen/ops/empty.h +// Parsed from ATen/ops/fft_ifft2.h // #pragma once @@ -42202,73 +27708,26 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include - - -// aten::empty.names(int[] size, *, Dimname[]? names, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, MemoryFormat? memory_format=None) -> Tensor -@Namespace("at") public static native @ByVal Tensor empty(@ByVal @Cast("c10::ArrayRef*") LongArrayRef size, @ByVal DimnameListOptional names, @ByVal(nullValue = "at::TensorOptions{}") TensorOptions options, @ByVal(nullValue = "c10::optional(c10::nullopt)") MemoryFormatOptional memory_format); -@Namespace("at") public static native @ByVal Tensor empty(@ByVal @Cast("c10::ArrayRef*") LongArrayRef size, @ByVal DimnameListOptional names); -@Namespace("at") public static native @ByVal Tensor empty(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @ByVal DimnameListOptional names, @ByVal(nullValue = "at::TensorOptions{}") TensorOptions options, @ByVal(nullValue = "c10::optional(c10::nullopt)") MemoryFormatOptional memory_format); -@Namespace("at") public static native @ByVal Tensor empty(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @ByVal DimnameListOptional names); -// aten::empty.names(int[] size, *, Dimname[]? names, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, MemoryFormat? memory_format=None) -> Tensor -@Namespace("at") public static native @ByVal Tensor empty(@ByVal @Cast("c10::ArrayRef*") LongArrayRef size, @ByVal DimnameListOptional names, @ByVal ScalarTypeOptional dtype, @ByVal LayoutOptional layout, @ByVal DeviceOptional device, @ByVal BoolOptional pin_memory, @ByVal MemoryFormatOptional memory_format); -@Namespace("at") public static native @ByVal Tensor empty(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @ByVal DimnameListOptional names, @ByVal ScalarTypeOptional dtype, @ByVal LayoutOptional layout, @ByVal DeviceOptional device, @ByVal BoolOptional pin_memory, @ByVal MemoryFormatOptional memory_format); - -// aten::empty.memory_format(SymInt[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, MemoryFormat? memory_format=None) -> Tensor -@Namespace("at") public static native @ByVal Tensor empty(@ByVal @Cast("c10::ArrayRef*") LongArrayRef size, @ByVal(nullValue = "at::TensorOptions{}") TensorOptions options, @ByVal(nullValue = "c10::optional(c10::nullopt)") MemoryFormatOptional memory_format); -@Namespace("at") public static native @ByVal Tensor empty(@ByVal @Cast("c10::ArrayRef*") LongArrayRef size); -@Namespace("at") public static native @ByVal Tensor empty(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @ByVal(nullValue = "at::TensorOptions{}") TensorOptions options, @ByVal(nullValue = "c10::optional(c10::nullopt)") MemoryFormatOptional memory_format); -@Namespace("at") public static native @ByVal Tensor empty(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... size); - - -// aten::empty.memory_format(SymInt[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, MemoryFormat? memory_format=None) -> Tensor -@Namespace("at") public static native @ByVal Tensor empty(@ByVal @Cast("c10::ArrayRef*") LongArrayRef size, @ByVal ScalarTypeOptional dtype, @ByVal LayoutOptional layout, @ByVal DeviceOptional device, @ByVal BoolOptional pin_memory, @ByVal MemoryFormatOptional memory_format); -@Namespace("at") public static native @ByVal Tensor empty(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @ByVal ScalarTypeOptional dtype, @ByVal LayoutOptional layout, @ByVal DeviceOptional device, @ByVal BoolOptional pin_memory, @ByVal MemoryFormatOptional memory_format); - - -// aten::empty.memory_format(SymInt[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, MemoryFormat? memory_format=None) -> Tensor -@Namespace("at") public static native @ByVal Tensor empty_symint(@ByVal SymIntRef size, @ByVal(nullValue = "at::TensorOptions{}") TensorOptions options, @ByVal(nullValue = "c10::optional(c10::nullopt)") MemoryFormatOptional memory_format); -@Namespace("at") public static native @ByVal Tensor empty_symint(@ByVal SymIntRef size); - - -// aten::empty.memory_format(SymInt[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, MemoryFormat? memory_format=None) -> Tensor -@Namespace("at") public static native @ByVal Tensor empty_symint(@ByVal SymIntRef size, @ByVal ScalarTypeOptional dtype, @ByVal LayoutOptional layout, @ByVal DeviceOptional device, @ByVal BoolOptional pin_memory, @ByVal MemoryFormatOptional memory_format); - - -// aten::empty.out(SymInt[] size, *, MemoryFormat? memory_format=None, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor empty_out(@ByRef Tensor out, @ByVal @Cast("c10::ArrayRef*") LongArrayRef size, @ByVal(nullValue = "c10::optional(c10::nullopt)") MemoryFormatOptional memory_format); -@Namespace("at") public static native @ByRef Tensor empty_out(@ByRef Tensor out, @ByVal @Cast("c10::ArrayRef*") LongArrayRef size); -@Namespace("at") public static native @ByRef Tensor empty_out(@ByRef Tensor out, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @ByVal(nullValue = "c10::optional(c10::nullopt)") MemoryFormatOptional memory_format); -@Namespace("at") public static native @ByRef Tensor empty_out(@ByRef Tensor out, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... size); - - -// aten::empty.out(SymInt[] size, *, MemoryFormat? memory_format=None, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor empty_outf(@ByVal @Cast("c10::ArrayRef*") LongArrayRef size, @ByVal MemoryFormatOptional memory_format, @ByRef Tensor out); -@Namespace("at") public static native @ByRef Tensor empty_outf(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @ByVal MemoryFormatOptional memory_format, @ByRef Tensor out); - - -// aten::empty.out(SymInt[] size, *, MemoryFormat? memory_format=None, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor empty_symint_out(@ByRef Tensor out, @ByVal SymIntRef size, @ByVal(nullValue = "c10::optional(c10::nullopt)") MemoryFormatOptional memory_format); -@Namespace("at") public static native @ByRef Tensor empty_symint_out(@ByRef Tensor out, @ByVal SymIntRef size); - +// #include -// aten::empty.out(SymInt[] size, *, MemoryFormat? memory_format=None, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor empty_symint_outf(@ByVal SymIntRef size, @ByVal MemoryFormatOptional memory_format, @ByRef Tensor out); +// aten::fft_ifft2(Tensor self, int[1]? s=None, int[1] dim=[-2,-1], str? norm=None) -> Tensor +@Namespace("at") public static native @ByVal Tensor fft_ifft2(@Const @ByRef Tensor self, @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") LongArrayRefOptional s, @ByVal(nullValue = "at::IntArrayRef({-2,-1})") LongArrayRef dim, @ByVal(nullValue = "c10::optional(c10::nullopt)") @Cast("c10::optional*") Pointer norm); +@Namespace("at") public static native @ByVal Tensor fft_ifft2(@Const @ByRef Tensor self); +@Namespace("at") public static native @ByVal Tensor fft_ifft2(@Const @ByRef Tensor self, @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] s, @ByVal(nullValue = "at::IntArrayRef({-2,-1})") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dim, @ByVal(nullValue = "c10::optional(c10::nullopt)") @Cast("c10::optional*") Pointer norm); -// aten::empty.names_out(int[] size, *, Dimname[]? names, MemoryFormat? memory_format=None, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor empty_out(@ByRef Tensor out, @ByVal @Cast("c10::ArrayRef*") LongArrayRef size, @ByVal DimnameListOptional names, @ByVal(nullValue = "c10::optional(c10::nullopt)") MemoryFormatOptional memory_format); -@Namespace("at") public static native @ByRef Tensor empty_out(@ByRef Tensor out, @ByVal @Cast("c10::ArrayRef*") LongArrayRef size, @ByVal DimnameListOptional names); -@Namespace("at") public static native @ByRef Tensor empty_out(@ByRef Tensor out, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @ByVal DimnameListOptional names, @ByVal(nullValue = "c10::optional(c10::nullopt)") MemoryFormatOptional memory_format); -@Namespace("at") public static native @ByRef Tensor empty_out(@ByRef Tensor out, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @ByVal DimnameListOptional names); -// aten::empty.names_out(int[] size, *, Dimname[]? names, MemoryFormat? memory_format=None, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor empty_outf(@ByVal @Cast("c10::ArrayRef*") LongArrayRef size, @ByVal DimnameListOptional names, @ByVal MemoryFormatOptional memory_format, @ByRef Tensor out); -@Namespace("at") public static native @ByRef Tensor empty_outf(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @ByVal DimnameListOptional names, @ByVal MemoryFormatOptional memory_format, @ByRef Tensor out); +// aten::fft_ifft2.out(Tensor self, int[1]? s=None, int[1] dim=[-2,-1], str? norm=None, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor fft_ifft2_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") LongArrayRefOptional s, @ByVal(nullValue = "at::IntArrayRef({-2,-1})") LongArrayRef dim, @ByVal(nullValue = "c10::optional(c10::nullopt)") @Cast("c10::optional*") Pointer norm); +@Namespace("at") public static native @ByRef Tensor fft_ifft2_out(@ByRef Tensor out, @Const @ByRef Tensor self); +@Namespace("at") public static native @ByRef Tensor fft_ifft2_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] s, @ByVal(nullValue = "at::IntArrayRef({-2,-1})") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dim, @ByVal(nullValue = "c10::optional(c10::nullopt)") @Cast("c10::optional*") Pointer norm); +// aten::fft_ifft2.out(Tensor self, int[1]? s=None, int[1] dim=[-2,-1], str? norm=None, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor fft_ifft2_outf(@Const @ByRef Tensor self, @ByVal LongArrayRefOptional s, @ByVal LongArrayRef dim, @ByVal @Cast("c10::optional*") Pointer norm, @ByRef Tensor out); +@Namespace("at") public static native @ByRef Tensor fft_ifft2_outf(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] s, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dim, @ByVal @Cast("c10::optional*") Pointer norm, @ByRef Tensor out); -// Parsed from ATen/ops/empty_like.h +// Parsed from ATen/ops/fft_ifftn.h // #pragma once @@ -42289,25 +27748,26 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include +// #include -// aten::empty_like(Tensor self, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, MemoryFormat? memory_format=None) -> Tensor -@Namespace("at") public static native @ByVal Tensor empty_like(@Const @ByRef Tensor self, @ByVal(nullValue = "at::TensorOptions{}") TensorOptions options, @ByVal(nullValue = "c10::optional(c10::nullopt)") MemoryFormatOptional memory_format); -@Namespace("at") public static native @ByVal Tensor empty_like(@Const @ByRef Tensor self); -// aten::empty_like(Tensor self, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, MemoryFormat? memory_format=None) -> Tensor -@Namespace("at") public static native @ByVal Tensor empty_like(@Const @ByRef Tensor self, @ByVal ScalarTypeOptional dtype, @ByVal LayoutOptional layout, @ByVal DeviceOptional device, @ByVal BoolOptional pin_memory, @ByVal MemoryFormatOptional memory_format); +// aten::fft_ifftn(Tensor self, int[1]? s=None, int[1]? dim=None, str? norm=None) -> Tensor +@Namespace("at") public static native @ByVal Tensor fft_ifftn(@Const @ByRef Tensor self, @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") LongArrayRefOptional s, @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") LongArrayRefOptional dim, @ByVal(nullValue = "c10::optional(c10::nullopt)") @Cast("c10::optional*") Pointer norm); +@Namespace("at") public static native @ByVal Tensor fft_ifftn(@Const @ByRef Tensor self); +@Namespace("at") public static native @ByVal Tensor fft_ifftn(@Const @ByRef Tensor self, @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] s, @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dim, @ByVal(nullValue = "c10::optional(c10::nullopt)") @Cast("c10::optional*") Pointer norm); -// aten::empty_like.out(Tensor self, *, MemoryFormat? memory_format=None, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor empty_like_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal(nullValue = "c10::optional(c10::nullopt)") MemoryFormatOptional memory_format); -@Namespace("at") public static native @ByRef Tensor empty_like_out(@ByRef Tensor out, @Const @ByRef Tensor self); -// aten::empty_like.out(Tensor self, *, MemoryFormat? memory_format=None, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor empty_like_outf(@Const @ByRef Tensor self, @ByVal MemoryFormatOptional memory_format, @ByRef Tensor out); +// aten::fft_ifftn.out(Tensor self, int[1]? s=None, int[1]? dim=None, str? norm=None, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor fft_ifftn_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") LongArrayRefOptional s, @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") LongArrayRefOptional dim, @ByVal(nullValue = "c10::optional(c10::nullopt)") @Cast("c10::optional*") Pointer norm); +@Namespace("at") public static native @ByRef Tensor fft_ifftn_out(@ByRef Tensor out, @Const @ByRef Tensor self); +@Namespace("at") public static native @ByRef Tensor fft_ifftn_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] s, @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dim, @ByVal(nullValue = "c10::optional(c10::nullopt)") @Cast("c10::optional*") Pointer norm); +// aten::fft_ifftn.out(Tensor self, int[1]? s=None, int[1]? dim=None, str? norm=None, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor fft_ifftn_outf(@Const @ByRef Tensor self, @ByVal LongArrayRefOptional s, @ByVal LongArrayRefOptional dim, @ByVal @Cast("c10::optional*") Pointer norm, @ByRef Tensor out); +@Namespace("at") public static native @ByRef Tensor fft_ifftn_outf(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] s, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dim, @ByVal @Cast("c10::optional*") Pointer norm, @ByRef Tensor out); -// Parsed from ATen/ops/empty_quantized.h +// Parsed from ATen/ops/fft_ifftshift.h // #pragma once @@ -42328,31 +27788,18 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include - +// #include -// aten::empty_quantized(int[] size, Tensor qtensor, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, MemoryFormat? memory_format=None) -> Tensor -@Namespace("at") public static native @ByVal Tensor empty_quantized(@ByVal @Cast("c10::ArrayRef*") LongArrayRef size, @Const @ByRef Tensor qtensor, @ByVal(nullValue = "at::TensorOptions{}") TensorOptions options, @ByVal(nullValue = "c10::optional(c10::nullopt)") MemoryFormatOptional memory_format); -@Namespace("at") public static native @ByVal Tensor empty_quantized(@ByVal @Cast("c10::ArrayRef*") LongArrayRef size, @Const @ByRef Tensor qtensor); -@Namespace("at") public static native @ByVal Tensor empty_quantized(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @Const @ByRef Tensor qtensor, @ByVal(nullValue = "at::TensorOptions{}") TensorOptions options, @ByVal(nullValue = "c10::optional(c10::nullopt)") MemoryFormatOptional memory_format); -@Namespace("at") public static native @ByVal Tensor empty_quantized(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @Const @ByRef Tensor qtensor); -// aten::empty_quantized(int[] size, Tensor qtensor, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, MemoryFormat? memory_format=None) -> Tensor -@Namespace("at") public static native @ByVal Tensor empty_quantized(@ByVal @Cast("c10::ArrayRef*") LongArrayRef size, @Const @ByRef Tensor qtensor, @ByVal ScalarTypeOptional dtype, @ByVal LayoutOptional layout, @ByVal DeviceOptional device, @ByVal BoolOptional pin_memory, @ByVal MemoryFormatOptional memory_format); -@Namespace("at") public static native @ByVal Tensor empty_quantized(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @Const @ByRef Tensor qtensor, @ByVal ScalarTypeOptional dtype, @ByVal LayoutOptional layout, @ByVal DeviceOptional device, @ByVal BoolOptional pin_memory, @ByVal MemoryFormatOptional memory_format); -// aten::empty_quantized.out(int[] size, Tensor qtensor, *, MemoryFormat? memory_format=None, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor empty_quantized_out(@ByRef Tensor out, @ByVal @Cast("c10::ArrayRef*") LongArrayRef size, @Const @ByRef Tensor qtensor, @ByVal(nullValue = "c10::optional(c10::nullopt)") MemoryFormatOptional memory_format); -@Namespace("at") public static native @ByRef Tensor empty_quantized_out(@ByRef Tensor out, @ByVal @Cast("c10::ArrayRef*") LongArrayRef size, @Const @ByRef Tensor qtensor); -@Namespace("at") public static native @ByRef Tensor empty_quantized_out(@ByRef Tensor out, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @Const @ByRef Tensor qtensor, @ByVal(nullValue = "c10::optional(c10::nullopt)") MemoryFormatOptional memory_format); -@Namespace("at") public static native @ByRef Tensor empty_quantized_out(@ByRef Tensor out, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @Const @ByRef Tensor qtensor); -// aten::empty_quantized.out(int[] size, Tensor qtensor, *, MemoryFormat? memory_format=None, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor empty_quantized_outf(@ByVal @Cast("c10::ArrayRef*") LongArrayRef size, @Const @ByRef Tensor qtensor, @ByVal MemoryFormatOptional memory_format, @ByRef Tensor out); -@Namespace("at") public static native @ByRef Tensor empty_quantized_outf(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @Const @ByRef Tensor qtensor, @ByVal MemoryFormatOptional memory_format, @ByRef Tensor out); +// aten::fft_ifftshift(Tensor self, int[1]? dim=None) -> Tensor +@Namespace("at") public static native @ByVal Tensor fft_ifftshift(@Const @ByRef Tensor self, @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") LongArrayRefOptional dim); +@Namespace("at") public static native @ByVal Tensor fft_ifftshift(@Const @ByRef Tensor self); +@Namespace("at") public static native @ByVal Tensor fft_ifftshift(@Const @ByRef Tensor self, @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... dim); -// Parsed from ATen/ops/empty_strided.h +// Parsed from ATen/ops/fft_ihfft.h // #pragma once @@ -42373,52 +27820,23 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include - - -// aten::empty_strided(SymInt[] size, SymInt[] stride, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor -@Namespace("at") public static native @ByVal Tensor empty_strided(@ByVal @Cast("c10::ArrayRef*") LongArrayRef size, @ByVal @Cast("c10::ArrayRef*") LongArrayRef stride, @ByVal(nullValue = "at::TensorOptions{}") TensorOptions options); -@Namespace("at") public static native @ByVal Tensor empty_strided(@ByVal @Cast("c10::ArrayRef*") LongArrayRef size, @ByVal @Cast("c10::ArrayRef*") LongArrayRef stride); -@Namespace("at") public static native @ByVal Tensor empty_strided(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] stride, @ByVal(nullValue = "at::TensorOptions{}") TensorOptions options); -@Namespace("at") public static native @ByVal Tensor empty_strided(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... stride); - - -// aten::empty_strided(SymInt[] size, SymInt[] stride, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor -@Namespace("at") public static native @ByVal Tensor empty_strided(@ByVal @Cast("c10::ArrayRef*") LongArrayRef size, @ByVal @Cast("c10::ArrayRef*") LongArrayRef stride, @ByVal ScalarTypeOptional dtype, @ByVal LayoutOptional layout, @ByVal DeviceOptional device, @ByVal BoolOptional pin_memory); -@Namespace("at") public static native @ByVal Tensor empty_strided(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] stride, @ByVal ScalarTypeOptional dtype, @ByVal LayoutOptional layout, @ByVal DeviceOptional device, @ByVal BoolOptional pin_memory); - - -// aten::empty_strided(SymInt[] size, SymInt[] stride, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor -@Namespace("at") public static native @ByVal Tensor empty_strided_symint(@ByVal SymIntRef size, @ByVal SymIntRef stride, @ByVal(nullValue = "at::TensorOptions{}") TensorOptions options); -@Namespace("at") public static native @ByVal Tensor empty_strided_symint(@ByVal SymIntRef size, @ByVal SymIntRef stride); - - -// aten::empty_strided(SymInt[] size, SymInt[] stride, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor -@Namespace("at") public static native @ByVal Tensor empty_strided_symint(@ByVal SymIntRef size, @ByVal SymIntRef stride, @ByVal ScalarTypeOptional dtype, @ByVal LayoutOptional layout, @ByVal DeviceOptional device, @ByVal BoolOptional pin_memory); - - -// aten::empty_strided.out(SymInt[] size, SymInt[] stride, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor empty_strided_out(@ByRef Tensor out, @ByVal @Cast("c10::ArrayRef*") LongArrayRef size, @ByVal @Cast("c10::ArrayRef*") LongArrayRef stride); -@Namespace("at") public static native @ByRef Tensor empty_strided_out(@ByRef Tensor out, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... stride); - - -// aten::empty_strided.out(SymInt[] size, SymInt[] stride, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor empty_strided_outf(@ByVal @Cast("c10::ArrayRef*") LongArrayRef size, @ByVal @Cast("c10::ArrayRef*") LongArrayRef stride, @ByRef Tensor out); -@Namespace("at") public static native @ByRef Tensor empty_strided_outf(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] stride, @ByRef Tensor out); - - -// aten::empty_strided.out(SymInt[] size, SymInt[] stride, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor empty_strided_symint_out(@ByRef Tensor out, @ByVal SymIntRef size, @ByVal SymIntRef stride); +// #include -// aten::empty_strided.out(SymInt[] size, SymInt[] stride, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor empty_strided_symint_outf(@ByVal SymIntRef size, @ByVal SymIntRef stride, @ByRef Tensor out); +// aten::fft_ihfft(Tensor self, int? n=None, int dim=-1, str? norm=None) -> Tensor +@Namespace("at") public static native @ByVal Tensor fft_ihfft(@Const @ByRef Tensor self, @ByVal(nullValue = "c10::optional(c10::nullopt)") LongOptional n, @Cast("int64_t") long dim/*=-1*/, @ByVal(nullValue = "c10::optional(c10::nullopt)") @Cast("c10::optional*") Pointer norm); +@Namespace("at") public static native @ByVal Tensor fft_ihfft(@Const @ByRef Tensor self); +// aten::fft_ihfft.out(Tensor self, int? n=None, int dim=-1, str? norm=None, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor fft_ihfft_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal(nullValue = "c10::optional(c10::nullopt)") LongOptional n, @Cast("int64_t") long dim/*=-1*/, @ByVal(nullValue = "c10::optional(c10::nullopt)") @Cast("c10::optional*") Pointer norm); +@Namespace("at") public static native @ByRef Tensor fft_ihfft_out(@ByRef Tensor out, @Const @ByRef Tensor self); +// aten::fft_ihfft.out(Tensor self, int? n=None, int dim=-1, str? norm=None, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor fft_ihfft_outf(@Const @ByRef Tensor self, @ByVal LongOptional n, @Cast("int64_t") long dim, @ByVal @Cast("c10::optional*") Pointer norm, @ByRef Tensor out); -// Parsed from ATen/ops/eq.h +// Parsed from ATen/ops/fft_ihfft2.h // #pragma once @@ -42439,29 +27857,26 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include - - -// aten::eq.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor eq_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Scalar other); -// aten::eq.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor eq_outf(@Const @ByRef Tensor self, @Const @ByRef Scalar other, @ByRef Tensor out); +// #include -// aten::eq.Scalar(Tensor self, Scalar other) -> Tensor -@Namespace("at") public static native @ByVal Tensor eq(@Const @ByRef Tensor self, @Const @ByRef Scalar other); -// aten::eq.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor eq_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor other); -// aten::eq.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor eq_outf(@Const @ByRef Tensor self, @Const @ByRef Tensor other, @ByRef Tensor out); +// aten::fft_ihfft2(Tensor self, int[1]? s=None, int[1] dim=[-2,-1], str? norm=None) -> Tensor +@Namespace("at") public static native @ByVal Tensor fft_ihfft2(@Const @ByRef Tensor self, @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") LongArrayRefOptional s, @ByVal(nullValue = "at::IntArrayRef({-2,-1})") LongArrayRef dim, @ByVal(nullValue = "c10::optional(c10::nullopt)") @Cast("c10::optional*") Pointer norm); +@Namespace("at") public static native @ByVal Tensor fft_ihfft2(@Const @ByRef Tensor self); +@Namespace("at") public static native @ByVal Tensor fft_ihfft2(@Const @ByRef Tensor self, @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] s, @ByVal(nullValue = "at::IntArrayRef({-2,-1})") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dim, @ByVal(nullValue = "c10::optional(c10::nullopt)") @Cast("c10::optional*") Pointer norm); -// aten::eq.Tensor(Tensor self, Tensor other) -> Tensor -@Namespace("at") public static native @ByVal Tensor eq(@Const @ByRef Tensor self, @Const @ByRef Tensor other); +// aten::fft_ihfft2.out(Tensor self, int[1]? s=None, int[1] dim=[-2,-1], str? norm=None, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @Const @ByRef Tensor fft_ihfft2_out(@Const @ByRef Tensor out, @Const @ByRef Tensor self, @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") LongArrayRefOptional s, @ByVal(nullValue = "at::IntArrayRef({-2,-1})") LongArrayRef dim, @ByVal(nullValue = "c10::optional(c10::nullopt)") @Cast("c10::optional*") Pointer norm); +@Namespace("at") public static native @Const @ByRef Tensor fft_ihfft2_out(@Const @ByRef Tensor out, @Const @ByRef Tensor self); +@Namespace("at") public static native @Const @ByRef Tensor fft_ihfft2_out(@Const @ByRef Tensor out, @Const @ByRef Tensor self, @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] s, @ByVal(nullValue = "at::IntArrayRef({-2,-1})") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dim, @ByVal(nullValue = "c10::optional(c10::nullopt)") @Cast("c10::optional*") Pointer norm); +// aten::fft_ihfft2.out(Tensor self, int[1]? s=None, int[1] dim=[-2,-1], str? norm=None, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @Const @ByRef Tensor fft_ihfft2_outf(@Const @ByRef Tensor self, @ByVal LongArrayRefOptional s, @ByVal LongArrayRef dim, @ByVal @Cast("c10::optional*") Pointer norm, @Const @ByRef Tensor out); +@Namespace("at") public static native @Const @ByRef Tensor fft_ihfft2_outf(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] s, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dim, @ByVal @Cast("c10::optional*") Pointer norm, @Const @ByRef Tensor out); -// Parsed from ATen/ops/equal.h +// Parsed from ATen/ops/fft_ihfftn.h // #pragma once @@ -42482,16 +27897,26 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include +// #include -// aten::equal(Tensor self, Tensor other) -> bool -@Namespace("at") public static native @Cast("bool") boolean equal(@Const @ByRef Tensor self, @Const @ByRef Tensor other); +// aten::fft_ihfftn(Tensor self, int[1]? s=None, int[1]? dim=None, str? norm=None) -> Tensor +@Namespace("at") public static native @ByVal Tensor fft_ihfftn(@Const @ByRef Tensor self, @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") LongArrayRefOptional s, @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") LongArrayRefOptional dim, @ByVal(nullValue = "c10::optional(c10::nullopt)") @Cast("c10::optional*") Pointer norm); +@Namespace("at") public static native @ByVal Tensor fft_ihfftn(@Const @ByRef Tensor self); +@Namespace("at") public static native @ByVal Tensor fft_ihfftn(@Const @ByRef Tensor self, @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] s, @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dim, @ByVal(nullValue = "c10::optional(c10::nullopt)") @Cast("c10::optional*") Pointer norm); + +// aten::fft_ihfftn.out(Tensor self, int[1]? s=None, int[1]? dim=None, str? norm=None, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @Const @ByRef Tensor fft_ihfftn_out(@Const @ByRef Tensor out, @Const @ByRef Tensor self, @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") LongArrayRefOptional s, @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") LongArrayRefOptional dim, @ByVal(nullValue = "c10::optional(c10::nullopt)") @Cast("c10::optional*") Pointer norm); +@Namespace("at") public static native @Const @ByRef Tensor fft_ihfftn_out(@Const @ByRef Tensor out, @Const @ByRef Tensor self); +@Namespace("at") public static native @Const @ByRef Tensor fft_ihfftn_out(@Const @ByRef Tensor out, @Const @ByRef Tensor self, @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] s, @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dim, @ByVal(nullValue = "c10::optional(c10::nullopt)") @Cast("c10::optional*") Pointer norm); +// aten::fft_ihfftn.out(Tensor self, int[1]? s=None, int[1]? dim=None, str? norm=None, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @Const @ByRef Tensor fft_ihfftn_outf(@Const @ByRef Tensor self, @ByVal LongArrayRefOptional s, @ByVal LongArrayRefOptional dim, @ByVal @Cast("c10::optional*") Pointer norm, @Const @ByRef Tensor out); +@Namespace("at") public static native @Const @ByRef Tensor fft_ihfftn_outf(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] s, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dim, @ByVal @Cast("c10::optional*") Pointer norm, @Const @ByRef Tensor out); -// Parsed from ATen/ops/erf.h +// Parsed from ATen/ops/fft_irfft.h // #pragma once @@ -42512,24 +27937,23 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include - +// #include -// aten::erf(Tensor self) -> Tensor -@Namespace("at") public static native @ByVal Tensor erf(@Const @ByRef Tensor self); -// aten::erf_(Tensor(a!) self) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor erf_(@ByRef Tensor self); +// aten::fft_irfft(Tensor self, int? n=None, int dim=-1, str? norm=None) -> Tensor +@Namespace("at") public static native @ByVal Tensor fft_irfft(@Const @ByRef Tensor self, @ByVal(nullValue = "c10::optional(c10::nullopt)") LongOptional n, @Cast("int64_t") long dim/*=-1*/, @ByVal(nullValue = "c10::optional(c10::nullopt)") @Cast("c10::optional*") Pointer norm); +@Namespace("at") public static native @ByVal Tensor fft_irfft(@Const @ByRef Tensor self); -// aten::erf.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor erf_out(@ByRef Tensor out, @Const @ByRef Tensor self); -// aten::erf.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor erf_outf(@Const @ByRef Tensor self, @ByRef Tensor out); +// aten::fft_irfft.out(Tensor self, int? n=None, int dim=-1, str? norm=None, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor fft_irfft_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal(nullValue = "c10::optional(c10::nullopt)") LongOptional n, @Cast("int64_t") long dim/*=-1*/, @ByVal(nullValue = "c10::optional(c10::nullopt)") @Cast("c10::optional*") Pointer norm); +@Namespace("at") public static native @ByRef Tensor fft_irfft_out(@ByRef Tensor out, @Const @ByRef Tensor self); +// aten::fft_irfft.out(Tensor self, int? n=None, int dim=-1, str? norm=None, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor fft_irfft_outf(@Const @ByRef Tensor self, @ByVal LongOptional n, @Cast("int64_t") long dim, @ByVal @Cast("c10::optional*") Pointer norm, @ByRef Tensor out); -// Parsed from ATen/ops/erfc.h +// Parsed from ATen/ops/fft_irfft2.h // #pragma once @@ -42550,24 +27974,26 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include - +// #include -// aten::erfc(Tensor self) -> Tensor -@Namespace("at") public static native @ByVal Tensor erfc(@Const @ByRef Tensor self); -// aten::erfc_(Tensor(a!) self) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor erfc_(@ByRef Tensor self); +// aten::fft_irfft2(Tensor self, int[1]? s=None, int[1] dim=[-2,-1], str? norm=None) -> Tensor +@Namespace("at") public static native @ByVal Tensor fft_irfft2(@Const @ByRef Tensor self, @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") LongArrayRefOptional s, @ByVal(nullValue = "at::IntArrayRef({-2,-1})") LongArrayRef dim, @ByVal(nullValue = "c10::optional(c10::nullopt)") @Cast("c10::optional*") Pointer norm); +@Namespace("at") public static native @ByVal Tensor fft_irfft2(@Const @ByRef Tensor self); +@Namespace("at") public static native @ByVal Tensor fft_irfft2(@Const @ByRef Tensor self, @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] s, @ByVal(nullValue = "at::IntArrayRef({-2,-1})") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dim, @ByVal(nullValue = "c10::optional(c10::nullopt)") @Cast("c10::optional*") Pointer norm); -// aten::erfc.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor erfc_out(@ByRef Tensor out, @Const @ByRef Tensor self); -// aten::erfc.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor erfc_outf(@Const @ByRef Tensor self, @ByRef Tensor out); +// aten::fft_irfft2.out(Tensor self, int[1]? s=None, int[1] dim=[-2,-1], str? norm=None, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor fft_irfft2_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") LongArrayRefOptional s, @ByVal(nullValue = "at::IntArrayRef({-2,-1})") LongArrayRef dim, @ByVal(nullValue = "c10::optional(c10::nullopt)") @Cast("c10::optional*") Pointer norm); +@Namespace("at") public static native @ByRef Tensor fft_irfft2_out(@ByRef Tensor out, @Const @ByRef Tensor self); +@Namespace("at") public static native @ByRef Tensor fft_irfft2_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] s, @ByVal(nullValue = "at::IntArrayRef({-2,-1})") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dim, @ByVal(nullValue = "c10::optional(c10::nullopt)") @Cast("c10::optional*") Pointer norm); +// aten::fft_irfft2.out(Tensor self, int[1]? s=None, int[1] dim=[-2,-1], str? norm=None, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor fft_irfft2_outf(@Const @ByRef Tensor self, @ByVal LongArrayRefOptional s, @ByVal LongArrayRef dim, @ByVal @Cast("c10::optional*") Pointer norm, @ByRef Tensor out); +@Namespace("at") public static native @ByRef Tensor fft_irfft2_outf(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] s, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dim, @ByVal @Cast("c10::optional*") Pointer norm, @ByRef Tensor out); -// Parsed from ATen/ops/erfinv.h +// Parsed from ATen/ops/fft_irfftn.h // #pragma once @@ -42588,21 +28014,26 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include +// #include -// aten::erfinv(Tensor self) -> Tensor -@Namespace("at") public static native @ByVal Tensor erfinv(@Const @ByRef Tensor self); +// aten::fft_irfftn(Tensor self, int[1]? s=None, int[1]? dim=None, str? norm=None) -> Tensor +@Namespace("at") public static native @ByVal Tensor fft_irfftn(@Const @ByRef Tensor self, @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") LongArrayRefOptional s, @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") LongArrayRefOptional dim, @ByVal(nullValue = "c10::optional(c10::nullopt)") @Cast("c10::optional*") Pointer norm); +@Namespace("at") public static native @ByVal Tensor fft_irfftn(@Const @ByRef Tensor self); +@Namespace("at") public static native @ByVal Tensor fft_irfftn(@Const @ByRef Tensor self, @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] s, @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dim, @ByVal(nullValue = "c10::optional(c10::nullopt)") @Cast("c10::optional*") Pointer norm); -// aten::erfinv.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor erfinv_out(@ByRef Tensor out, @Const @ByRef Tensor self); -// aten::erfinv.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor erfinv_outf(@Const @ByRef Tensor self, @ByRef Tensor out); +// aten::fft_irfftn.out(Tensor self, int[1]? s=None, int[1]? dim=None, str? norm=None, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor fft_irfftn_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") LongArrayRefOptional s, @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") LongArrayRefOptional dim, @ByVal(nullValue = "c10::optional(c10::nullopt)") @Cast("c10::optional*") Pointer norm); +@Namespace("at") public static native @ByRef Tensor fft_irfftn_out(@ByRef Tensor out, @Const @ByRef Tensor self); +@Namespace("at") public static native @ByRef Tensor fft_irfftn_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] s, @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dim, @ByVal(nullValue = "c10::optional(c10::nullopt)") @Cast("c10::optional*") Pointer norm); +// aten::fft_irfftn.out(Tensor self, int[1]? s=None, int[1]? dim=None, str? norm=None, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor fft_irfftn_outf(@Const @ByRef Tensor self, @ByVal LongArrayRefOptional s, @ByVal LongArrayRefOptional dim, @ByVal @Cast("c10::optional*") Pointer norm, @ByRef Tensor out); +@Namespace("at") public static native @ByRef Tensor fft_irfftn_outf(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] s, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dim, @ByVal @Cast("c10::optional*") Pointer norm, @ByRef Tensor out); -// Parsed from ATen/ops/exp.h +// Parsed from ATen/ops/fft_rfft.h // #pragma once @@ -42623,24 +28054,23 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include - +// #include -// aten::exp(Tensor self) -> Tensor -@Namespace("at") public static native @ByVal Tensor exp(@Const @ByRef Tensor self); -// aten::exp_(Tensor(a!) self) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor exp_(@ByRef Tensor self); +// aten::fft_rfft(Tensor self, int? n=None, int dim=-1, str? norm=None) -> Tensor +@Namespace("at") public static native @ByVal Tensor fft_rfft(@Const @ByRef Tensor self, @ByVal(nullValue = "c10::optional(c10::nullopt)") LongOptional n, @Cast("int64_t") long dim/*=-1*/, @ByVal(nullValue = "c10::optional(c10::nullopt)") @Cast("c10::optional*") Pointer norm); +@Namespace("at") public static native @ByVal Tensor fft_rfft(@Const @ByRef Tensor self); -// aten::exp.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor exp_out(@ByRef Tensor out, @Const @ByRef Tensor self); -// aten::exp.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor exp_outf(@Const @ByRef Tensor self, @ByRef Tensor out); +// aten::fft_rfft.out(Tensor self, int? n=None, int dim=-1, str? norm=None, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor fft_rfft_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal(nullValue = "c10::optional(c10::nullopt)") LongOptional n, @Cast("int64_t") long dim/*=-1*/, @ByVal(nullValue = "c10::optional(c10::nullopt)") @Cast("c10::optional*") Pointer norm); +@Namespace("at") public static native @ByRef Tensor fft_rfft_out(@ByRef Tensor out, @Const @ByRef Tensor self); +// aten::fft_rfft.out(Tensor self, int? n=None, int dim=-1, str? norm=None, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor fft_rfft_outf(@Const @ByRef Tensor self, @ByVal LongOptional n, @Cast("int64_t") long dim, @ByVal @Cast("c10::optional*") Pointer norm, @ByRef Tensor out); -// Parsed from ATen/ops/exp2.h +// Parsed from ATen/ops/fft_rfft2.h // #pragma once @@ -42661,24 +28091,26 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include - +// #include -// aten::exp2(Tensor self) -> Tensor -@Namespace("at") public static native @ByVal Tensor exp2(@Const @ByRef Tensor self); -// aten::exp2_(Tensor(a!) self) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor exp2_(@ByRef Tensor self); +// aten::fft_rfft2(Tensor self, int[1]? s=None, int[1] dim=[-2,-1], str? norm=None) -> Tensor +@Namespace("at") public static native @ByVal Tensor fft_rfft2(@Const @ByRef Tensor self, @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") LongArrayRefOptional s, @ByVal(nullValue = "at::IntArrayRef({-2,-1})") LongArrayRef dim, @ByVal(nullValue = "c10::optional(c10::nullopt)") @Cast("c10::optional*") Pointer norm); +@Namespace("at") public static native @ByVal Tensor fft_rfft2(@Const @ByRef Tensor self); +@Namespace("at") public static native @ByVal Tensor fft_rfft2(@Const @ByRef Tensor self, @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] s, @ByVal(nullValue = "at::IntArrayRef({-2,-1})") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dim, @ByVal(nullValue = "c10::optional(c10::nullopt)") @Cast("c10::optional*") Pointer norm); -// aten::exp2.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor exp2_out(@ByRef Tensor out, @Const @ByRef Tensor self); -// aten::exp2.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor exp2_outf(@Const @ByRef Tensor self, @ByRef Tensor out); +// aten::fft_rfft2.out(Tensor self, int[1]? s=None, int[1] dim=[-2,-1], str? norm=None, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor fft_rfft2_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") LongArrayRefOptional s, @ByVal(nullValue = "at::IntArrayRef({-2,-1})") LongArrayRef dim, @ByVal(nullValue = "c10::optional(c10::nullopt)") @Cast("c10::optional*") Pointer norm); +@Namespace("at") public static native @ByRef Tensor fft_rfft2_out(@ByRef Tensor out, @Const @ByRef Tensor self); +@Namespace("at") public static native @ByRef Tensor fft_rfft2_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] s, @ByVal(nullValue = "at::IntArrayRef({-2,-1})") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dim, @ByVal(nullValue = "c10::optional(c10::nullopt)") @Cast("c10::optional*") Pointer norm); +// aten::fft_rfft2.out(Tensor self, int[1]? s=None, int[1] dim=[-2,-1], str? norm=None, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor fft_rfft2_outf(@Const @ByRef Tensor self, @ByVal LongArrayRefOptional s, @ByVal LongArrayRef dim, @ByVal @Cast("c10::optional*") Pointer norm, @ByRef Tensor out); +@Namespace("at") public static native @ByRef Tensor fft_rfft2_outf(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] s, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dim, @ByVal @Cast("c10::optional*") Pointer norm, @ByRef Tensor out); -// Parsed from ATen/ops/expand.h +// Parsed from ATen/ops/fft_rfftfreq.h // #pragma once @@ -42699,14 +28131,25 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include +// #include + +// aten::fft_rfftfreq(int n, float d=1.0, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor +@Namespace("at") public static native @ByVal Tensor fft_rfftfreq(@Cast("int64_t") long n, double d/*=1.0*/, @ByVal(nullValue = "at::TensorOptions{}") TensorOptions options); +@Namespace("at") public static native @ByVal Tensor fft_rfftfreq(@Cast("int64_t") long n); +// aten::fft_rfftfreq(int n, float d=1.0, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor +@Namespace("at") public static native @ByVal Tensor fft_rfftfreq(@Cast("int64_t") long n, double d, @ByVal ScalarTypeOptional dtype, @ByVal LayoutOptional layout, @ByVal DeviceOptional device, @ByVal BoolOptional pin_memory); +// aten::fft_rfftfreq.out(int n, float d=1.0, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor fft_rfftfreq_out(@ByRef Tensor out, @Cast("int64_t") long n, double d/*=1.0*/); +@Namespace("at") public static native @ByRef Tensor fft_rfftfreq_out(@ByRef Tensor out, @Cast("int64_t") long n); +// aten::fft_rfftfreq.out(int n, float d=1.0, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor fft_rfftfreq_outf(@Cast("int64_t") long n, double d, @ByRef Tensor out); -// Parsed from ATen/ops/expand_as.h +// Parsed from ATen/ops/fft_rfftn.h // #pragma once @@ -42727,14 +28170,26 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include +// #include +// aten::fft_rfftn(Tensor self, int[1]? s=None, int[1]? dim=None, str? norm=None) -> Tensor +@Namespace("at") public static native @ByVal Tensor fft_rfftn(@Const @ByRef Tensor self, @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") LongArrayRefOptional s, @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") LongArrayRefOptional dim, @ByVal(nullValue = "c10::optional(c10::nullopt)") @Cast("c10::optional*") Pointer norm); +@Namespace("at") public static native @ByVal Tensor fft_rfftn(@Const @ByRef Tensor self); +@Namespace("at") public static native @ByVal Tensor fft_rfftn(@Const @ByRef Tensor self, @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] s, @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dim, @ByVal(nullValue = "c10::optional(c10::nullopt)") @Cast("c10::optional*") Pointer norm); + +// aten::fft_rfftn.out(Tensor self, int[1]? s=None, int[1]? dim=None, str? norm=None, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor fft_rfftn_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") LongArrayRefOptional s, @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") LongArrayRefOptional dim, @ByVal(nullValue = "c10::optional(c10::nullopt)") @Cast("c10::optional*") Pointer norm); +@Namespace("at") public static native @ByRef Tensor fft_rfftn_out(@ByRef Tensor out, @Const @ByRef Tensor self); +@Namespace("at") public static native @ByRef Tensor fft_rfftn_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] s, @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dim, @ByVal(nullValue = "c10::optional(c10::nullopt)") @Cast("c10::optional*") Pointer norm); +// aten::fft_rfftn.out(Tensor self, int[1]? s=None, int[1]? dim=None, str? norm=None, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor fft_rfftn_outf(@Const @ByRef Tensor self, @ByVal LongArrayRefOptional s, @ByVal LongArrayRefOptional dim, @ByVal @Cast("c10::optional*") Pointer norm, @ByRef Tensor out); +@Namespace("at") public static native @ByRef Tensor fft_rfftn_outf(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] s, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dim, @ByVal @Cast("c10::optional*") Pointer norm, @ByRef Tensor out); -// Parsed from ATen/ops/expand_copy.h +// Parsed from ATen/ops/fill.h // #pragma once @@ -42755,46 +28210,35 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include - - -// aten::expand_copy(Tensor self, SymInt[] size, *, bool implicit=False) -> Tensor -@Namespace("at") public static native @ByVal Tensor expand_copy(@Const @ByRef Tensor self, @ByVal @Cast("c10::ArrayRef*") LongArrayRef size, @Cast("bool") boolean implicit/*=false*/); -@Namespace("at") public static native @ByVal Tensor expand_copy(@Const @ByRef Tensor self, @ByVal @Cast("c10::ArrayRef*") LongArrayRef size); -@Namespace("at") public static native @ByVal Tensor expand_copy(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @Cast("bool") boolean implicit/*=false*/); -@Namespace("at") public static native @ByVal Tensor expand_copy(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... size); - - -// aten::expand_copy(Tensor self, SymInt[] size, *, bool implicit=False) -> Tensor -@Namespace("at") public static native @ByVal Tensor expand_copy_symint(@Const @ByRef Tensor self, @ByVal SymIntRef size, @Cast("bool") boolean implicit/*=false*/); -@Namespace("at") public static native @ByVal Tensor expand_copy_symint(@Const @ByRef Tensor self, @ByVal SymIntRef size); - - -// aten::expand_copy.out(Tensor self, SymInt[] size, *, bool implicit=False, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor expand_copy_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal @Cast("c10::ArrayRef*") LongArrayRef size, @Cast("bool") boolean implicit/*=false*/); -@Namespace("at") public static native @ByRef Tensor expand_copy_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal @Cast("c10::ArrayRef*") LongArrayRef size); -@Namespace("at") public static native @ByRef Tensor expand_copy_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @Cast("bool") boolean implicit/*=false*/); -@Namespace("at") public static native @ByRef Tensor expand_copy_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... size); +// #include -// aten::expand_copy.out(Tensor self, SymInt[] size, *, bool implicit=False, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor expand_copy_outf(@Const @ByRef Tensor self, @ByVal @Cast("c10::ArrayRef*") LongArrayRef size, @Cast("bool") boolean implicit, @ByRef Tensor out); -@Namespace("at") public static native @ByRef Tensor expand_copy_outf(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @Cast("bool") boolean implicit, @ByRef Tensor out); +// aten::fill.Scalar(Tensor self, Scalar value) -> Tensor +@Namespace("at") public static native @ByVal @Name("fill") Tensor _fill(@Const @ByRef Tensor self, @Const @ByRef Scalar value); +// aten::fill.Tensor(Tensor self, Tensor value) -> Tensor +@Namespace("at") public static native @ByVal @Name("fill") Tensor _fill(@Const @ByRef Tensor self, @Const @ByRef Tensor value); -// aten::expand_copy.out(Tensor self, SymInt[] size, *, bool implicit=False, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor expand_copy_symint_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal SymIntRef size, @Cast("bool") boolean implicit/*=false*/); -@Namespace("at") public static native @ByRef Tensor expand_copy_symint_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal SymIntRef size); +// aten::fill_.Scalar(Tensor(a!) self, Scalar value) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor fill_(@ByRef Tensor self, @Const @ByRef Scalar value); +// aten::fill_.Tensor(Tensor(a!) self, Tensor value) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor fill_(@ByRef Tensor self, @Const @ByRef Tensor value); -// aten::expand_copy.out(Tensor self, SymInt[] size, *, bool implicit=False, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor expand_copy_symint_outf(@Const @ByRef Tensor self, @ByVal SymIntRef size, @Cast("bool") boolean implicit, @ByRef Tensor out); +// aten::fill.Scalar_out(Tensor self, Scalar value, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor fill_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Scalar value); +// aten::fill.Scalar_out(Tensor self, Scalar value, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor fill_outf(@Const @ByRef Tensor self, @Const @ByRef Scalar value, @ByRef Tensor out); +// aten::fill.Tensor_out(Tensor self, Tensor value, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor fill_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor value); +// aten::fill.Tensor_out(Tensor self, Tensor value, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor fill_outf(@Const @ByRef Tensor self, @Const @ByRef Tensor value, @ByRef Tensor out); -// Parsed from ATen/ops/expm1.h +// Parsed from ATen/ops/fill_diagonal.h // #pragma once @@ -42815,24 +28259,14 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include - - -// aten::expm1(Tensor self) -> Tensor -@Namespace("at") public static native @ByVal Tensor expm1(@Const @ByRef Tensor self); +// #include -// aten::expm1_(Tensor(a!) self) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor expm1_(@ByRef Tensor self); -// aten::expm1.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor expm1_out(@ByRef Tensor out, @Const @ByRef Tensor self); -// aten::expm1.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor expm1_outf(@Const @ByRef Tensor self, @ByRef Tensor out); -// Parsed from ATen/ops/exponential.h +// Parsed from ATen/ops/fix.h // #pragma once @@ -42853,23 +28287,24 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include +// #include -// aten::exponential.out(Tensor self, float lambd=1, *, Generator? generator=None, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor exponential_out(@ByRef Tensor out, @Const @ByRef Tensor self, double lambd/*=1*/, @ByVal(nullValue = "c10::optional(c10::nullopt)") GeneratorOptional generator); -@Namespace("at") public static native @ByRef Tensor exponential_out(@ByRef Tensor out, @Const @ByRef Tensor self); -// aten::exponential.out(Tensor self, float lambd=1, *, Generator? generator=None, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor exponential_outf(@Const @ByRef Tensor self, double lambd, @ByVal GeneratorOptional generator, @ByRef Tensor out); +// aten::fix(Tensor self) -> Tensor +@Namespace("at") public static native @ByVal Tensor fix(@Const @ByRef Tensor self); -// aten::exponential(Tensor self, float lambd=1, *, Generator? generator=None) -> Tensor -@Namespace("at") public static native @ByVal Tensor exponential(@Const @ByRef Tensor self, double lambd/*=1*/, @ByVal(nullValue = "c10::optional(c10::nullopt)") GeneratorOptional generator); -@Namespace("at") public static native @ByVal Tensor exponential(@Const @ByRef Tensor self); +// aten::fix_(Tensor(a!) self) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor fix_(@ByRef Tensor self); + +// aten::fix.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor fix_out(@ByRef Tensor out, @Const @ByRef Tensor self); +// aten::fix.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor fix_outf(@Const @ByRef Tensor self, @ByRef Tensor out); -// Parsed from ATen/ops/eye.h +// Parsed from ATen/ops/flatten.h // #pragma once @@ -42890,35 +28325,26 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include +// #include -// aten::eye(int n, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor -@Namespace("at") public static native @ByVal Tensor eye(@Cast("int64_t") long n, @ByVal(nullValue = "at::TensorOptions{}") TensorOptions options); -@Namespace("at") public static native @ByVal Tensor eye(@Cast("int64_t") long n); -// aten::eye(int n, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor -@Namespace("at") public static native @ByVal Tensor eye(@Cast("int64_t") long n, @ByVal ScalarTypeOptional dtype, @ByVal LayoutOptional layout, @ByVal DeviceOptional device, @ByVal BoolOptional pin_memory); +// aten::flatten.using_ints(Tensor(a) self, int start_dim=0, int end_dim=-1) -> Tensor(a) +@Namespace("at") public static native @ByVal Tensor flatten(@Const @ByRef Tensor self, @Cast("int64_t") long start_dim/*=0*/, @Cast("int64_t") long end_dim/*=-1*/); +@Namespace("at") public static native @ByVal Tensor flatten(@Const @ByRef Tensor self); -// aten::eye.m(int n, int m, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor -@Namespace("at") public static native @ByVal Tensor eye(@Cast("int64_t") long n, @Cast("int64_t") long m, @ByVal(nullValue = "at::TensorOptions{}") TensorOptions options); -@Namespace("at") public static native @ByVal Tensor eye(@Cast("int64_t") long n, @Cast("int64_t") long m); -// aten::eye.m(int n, int m, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor -@Namespace("at") public static native @ByVal Tensor eye(@Cast("int64_t") long n, @Cast("int64_t") long m, @ByVal ScalarTypeOptional dtype, @ByVal LayoutOptional layout, @ByVal DeviceOptional device, @ByVal BoolOptional pin_memory); +// aten::flatten.named_out_dim(Tensor(a) self, int start_dim, int end_dim, Dimname out_dim) -> Tensor(a) +@Namespace("at") public static native @ByVal Tensor flatten(@Const @ByRef Tensor self, @Cast("int64_t") long start_dim, @Cast("int64_t") long end_dim, @ByVal Dimname out_dim); -// aten::eye.out(int n, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor eye_out(@ByRef Tensor out, @Cast("int64_t") long n); -// aten::eye.out(int n, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor eye_outf(@Cast("int64_t") long n, @ByRef Tensor out); +// aten::flatten.using_names(Tensor(a) self, Dimname start_dim, Dimname end_dim, Dimname out_dim) -> Tensor(a) +@Namespace("at") public static native @ByVal Tensor flatten(@Const @ByRef Tensor self, @ByVal Dimname start_dim, @ByVal Dimname end_dim, @ByVal Dimname out_dim); -// aten::eye.m_out(int n, int m, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor eye_out(@ByRef Tensor out, @Cast("int64_t") long n, @Cast("int64_t") long m); -// aten::eye.m_out(int n, int m, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor eye_outf(@Cast("int64_t") long n, @Cast("int64_t") long m, @ByRef Tensor out); +// aten::flatten.DimnameList(Tensor(a) self, Dimname[] dims, Dimname out_dim) -> Tensor(a) +@Namespace("at") public static native @ByVal Tensor flatten(@Const @ByRef Tensor self, @ByVal DimnameArrayRef dims, @ByVal Dimname out_dim); -// Parsed from ATen/ops/fake_quantize_per_channel_affine.h +// Parsed from ATen/ops/flatten_dense_tensors.h // #pragma once @@ -42939,16 +28365,16 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include +// #include -// aten::fake_quantize_per_channel_affine(Tensor self, Tensor scale, Tensor zero_point, int axis, int quant_min, int quant_max) -> Tensor -@Namespace("at") public static native @ByVal Tensor fake_quantize_per_channel_affine(@Const @ByRef Tensor self, @Const @ByRef Tensor scale, @Const @ByRef Tensor zero_point, @Cast("int64_t") long axis, @Cast("int64_t") long quant_min, @Cast("int64_t") long quant_max); +// aten::flatten_dense_tensors(Tensor[] tensors) -> Tensor +@Namespace("at") public static native @ByVal Tensor flatten_dense_tensors(@ByVal @Cast("at::TensorList*") TensorArrayRef tensors); -// Parsed from ATen/ops/fake_quantize_per_channel_affine_cachemask.h +// Parsed from ATen/ops/flip.h // #pragma once @@ -42969,21 +28395,24 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include +// #include -// aten::fake_quantize_per_channel_affine_cachemask(Tensor self, Tensor scale, Tensor zero_point, int axis, int quant_min, int quant_max) -> (Tensor output, Tensor mask) -@Namespace("at") public static native @ByVal TensorTensorTuple fake_quantize_per_channel_affine_cachemask(@Const @ByRef Tensor self, @Const @ByRef Tensor scale, @Const @ByRef Tensor zero_point, @Cast("int64_t") long axis, @Cast("int64_t") long quant_min, @Cast("int64_t") long quant_max); +// aten::flip(Tensor self, int[] dims) -> Tensor +@Namespace("at") public static native @ByVal Tensor flip(@Const @ByRef Tensor self, @ByVal LongArrayRef dims); +@Namespace("at") public static native @ByVal Tensor flip(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... dims); -// aten::fake_quantize_per_channel_affine_cachemask.out(Tensor self, Tensor scale, Tensor zero_point, int axis, int quant_min, int quant_max, *, Tensor(a!) out0, Tensor(b!) out1) -> (Tensor(a!), Tensor(b!)) -@Namespace("at") public static native @ByVal @Cast("std::tuple*") PointerPointer fake_quantize_per_channel_affine_cachemask_out(@ByRef Tensor out0, @ByRef Tensor out1, @Const @ByRef Tensor self, @Const @ByRef Tensor scale, @Const @ByRef Tensor zero_point, @Cast("int64_t") long axis, @Cast("int64_t") long quant_min, @Cast("int64_t") long quant_max); -// aten::fake_quantize_per_channel_affine_cachemask.out(Tensor self, Tensor scale, Tensor zero_point, int axis, int quant_min, int quant_max, *, Tensor(a!) out0, Tensor(b!) out1) -> (Tensor(a!), Tensor(b!)) -@Namespace("at") public static native @ByVal @Cast("std::tuple*") PointerPointer fake_quantize_per_channel_affine_cachemask_outf(@Const @ByRef Tensor self, @Const @ByRef Tensor scale, @Const @ByRef Tensor zero_point, @Cast("int64_t") long axis, @Cast("int64_t") long quant_min, @Cast("int64_t") long quant_max, @ByRef Tensor out0, @ByRef Tensor out1); +// aten::flip.out(Tensor self, int[] dims, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor flip_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal LongArrayRef dims); +@Namespace("at") public static native @ByRef Tensor flip_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... dims); +// aten::flip.out(Tensor self, int[] dims, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor flip_outf(@Const @ByRef Tensor self, @ByVal LongArrayRef dims, @ByRef Tensor out); +@Namespace("at") public static native @ByRef Tensor flip_outf(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dims, @ByRef Tensor out); -// Parsed from ATen/ops/fake_quantize_per_channel_affine_cachemask_backward.h +// Parsed from ATen/ops/fliplr.h // #pragma once @@ -43004,16 +28433,16 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include +// #include -// aten::fake_quantize_per_channel_affine_cachemask_backward(Tensor grad, Tensor mask) -> Tensor -@Namespace("at") public static native @ByVal Tensor fake_quantize_per_channel_affine_cachemask_backward(@Const @ByRef Tensor grad, @Const @ByRef Tensor mask); +// aten::fliplr(Tensor self) -> Tensor +@Namespace("at") public static native @ByVal Tensor fliplr(@Const @ByRef Tensor self); -// Parsed from ATen/ops/fake_quantize_per_tensor_affine.h +// Parsed from ATen/ops/flipud.h // #pragma once @@ -43034,19 +28463,16 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include - +// #include -// aten::fake_quantize_per_tensor_affine(Tensor self, float scale, int zero_point, int quant_min, int quant_max) -> Tensor -@Namespace("at") public static native @ByVal Tensor fake_quantize_per_tensor_affine(@Const @ByRef Tensor self, double scale, @Cast("int64_t") long zero_point, @Cast("int64_t") long quant_min, @Cast("int64_t") long quant_max); -// aten::fake_quantize_per_tensor_affine.tensor_qparams(Tensor self, Tensor scale, Tensor zero_point, int quant_min, int quant_max) -> Tensor -@Namespace("at") public static native @ByVal Tensor fake_quantize_per_tensor_affine(@Const @ByRef Tensor self, @Const @ByRef Tensor scale, @Const @ByRef Tensor zero_point, @Cast("int64_t") long quant_min, @Cast("int64_t") long quant_max); +// aten::flipud(Tensor self) -> Tensor +@Namespace("at") public static native @ByVal Tensor flipud(@Const @ByRef Tensor self); -// Parsed from ATen/ops/fake_quantize_per_tensor_affine_cachemask.h +// Parsed from ATen/ops/float_power.h // #pragma once @@ -43067,21 +28493,37 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include +// #include -// aten::fake_quantize_per_tensor_affine_cachemask(Tensor self, float scale, int zero_point, int quant_min, int quant_max) -> (Tensor output, Tensor mask) -@Namespace("at") public static native @ByVal TensorTensorTuple fake_quantize_per_tensor_affine_cachemask(@Const @ByRef Tensor self, double scale, @Cast("int64_t") long zero_point, @Cast("int64_t") long quant_min, @Cast("int64_t") long quant_max); +// aten::float_power.Tensor_Tensor_out(Tensor self, Tensor exponent, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor float_power_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor exponent); +// aten::float_power.Tensor_Tensor_out(Tensor self, Tensor exponent, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor float_power_outf(@Const @ByRef Tensor self, @Const @ByRef Tensor exponent, @ByRef Tensor out); -// aten::fake_quantize_per_tensor_affine_cachemask.out(Tensor self, float scale, int zero_point, int quant_min, int quant_max, *, Tensor(a!) out0, Tensor(b!) out1) -> (Tensor(a!), Tensor(b!)) -@Namespace("at") public static native @ByVal @Cast("std::tuple*") PointerPointer fake_quantize_per_tensor_affine_cachemask_out(@ByRef Tensor out0, @ByRef Tensor out1, @Const @ByRef Tensor self, double scale, @Cast("int64_t") long zero_point, @Cast("int64_t") long quant_min, @Cast("int64_t") long quant_max); -// aten::fake_quantize_per_tensor_affine_cachemask.out(Tensor self, float scale, int zero_point, int quant_min, int quant_max, *, Tensor(a!) out0, Tensor(b!) out1) -> (Tensor(a!), Tensor(b!)) -@Namespace("at") public static native @ByVal @Cast("std::tuple*") PointerPointer fake_quantize_per_tensor_affine_cachemask_outf(@Const @ByRef Tensor self, double scale, @Cast("int64_t") long zero_point, @Cast("int64_t") long quant_min, @Cast("int64_t") long quant_max, @ByRef Tensor out0, @ByRef Tensor out1); +// aten::float_power.Tensor_Tensor(Tensor self, Tensor exponent) -> Tensor +@Namespace("at") public static native @ByVal Tensor float_power(@Const @ByRef Tensor self, @Const @ByRef Tensor exponent); + +// aten::float_power.Scalar_out(Scalar self, Tensor exponent, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor float_power_out(@ByRef Tensor out, @Const @ByRef Scalar self, @Const @ByRef Tensor exponent); +// aten::float_power.Scalar_out(Scalar self, Tensor exponent, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor float_power_outf(@Const @ByRef Scalar self, @Const @ByRef Tensor exponent, @ByRef Tensor out); + +// aten::float_power.Scalar(Scalar self, Tensor exponent) -> Tensor +@Namespace("at") public static native @ByVal Tensor float_power(@Const @ByRef Scalar self, @Const @ByRef Tensor exponent); + +// aten::float_power.Tensor_Scalar_out(Tensor self, Scalar exponent, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor float_power_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Scalar exponent); +// aten::float_power.Tensor_Scalar_out(Tensor self, Scalar exponent, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor float_power_outf(@Const @ByRef Tensor self, @Const @ByRef Scalar exponent, @ByRef Tensor out); +// aten::float_power.Tensor_Scalar(Tensor self, Scalar exponent) -> Tensor +@Namespace("at") public static native @ByVal Tensor float_power(@Const @ByRef Tensor self, @Const @ByRef Scalar exponent); -// Parsed from ATen/ops/fake_quantize_per_tensor_affine_cachemask_backward.h + +// Parsed from ATen/ops/floor.h // #pragma once @@ -43102,16 +28544,24 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include +// #include -// aten::fake_quantize_per_tensor_affine_cachemask_backward(Tensor grad, Tensor mask) -> Tensor -@Namespace("at") public static native @ByVal Tensor fake_quantize_per_tensor_affine_cachemask_backward(@Const @ByRef Tensor grad, @Const @ByRef Tensor mask); +// aten::floor(Tensor self) -> Tensor +@Namespace("at") public static native @ByVal Tensor floor(@Const @ByRef Tensor self); + +// aten::floor_(Tensor(a!) self) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor floor_(@ByRef Tensor self); + +// aten::floor.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor floor_out(@ByRef Tensor out, @Const @ByRef Tensor self); +// aten::floor.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor floor_outf(@Const @ByRef Tensor self, @ByRef Tensor out); -// Parsed from ATen/ops/fbgemm_linear_fp16_weight.h +// Parsed from ATen/ops/floor_divide.h // #pragma once @@ -43132,16 +28582,24 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include +// #include -// aten::fbgemm_linear_fp16_weight(Tensor input, Tensor packed_weight, Tensor bias) -> Tensor -@Namespace("at") public static native @ByVal Tensor fbgemm_linear_fp16_weight(@Const @ByRef Tensor input, @Const @ByRef Tensor packed_weight, @Const @ByRef Tensor bias); +// aten::floor_divide(Tensor self, Tensor other) -> Tensor +@Namespace("at") public static native @ByVal Tensor floor_divide(@Const @ByRef Tensor self, @Const @ByRef Tensor other); + +// aten::floor_divide.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor floor_divide_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor other); +// aten::floor_divide.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor floor_divide_outf(@Const @ByRef Tensor self, @Const @ByRef Tensor other, @ByRef Tensor out); +// aten::floor_divide.Scalar(Tensor self, Scalar other) -> Tensor +@Namespace("at") public static native @ByVal Tensor floor_divide(@Const @ByRef Tensor self, @Const @ByRef Scalar other); -// Parsed from ATen/ops/fbgemm_linear_fp16_weight_fp32_activation.h + +// Parsed from ATen/ops/fmax.h // #pragma once @@ -43162,16 +28620,21 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include +// #include -// aten::fbgemm_linear_fp16_weight_fp32_activation(Tensor input, Tensor packed_weight, Tensor bias) -> Tensor -@Namespace("at") public static native @ByVal Tensor fbgemm_linear_fp16_weight_fp32_activation(@Const @ByRef Tensor input, @Const @ByRef Tensor packed_weight, @Const @ByRef Tensor bias); +// aten::fmax(Tensor self, Tensor other) -> Tensor +@Namespace("at") public static native @ByVal Tensor fmax(@Const @ByRef Tensor self, @Const @ByRef Tensor other); + +// aten::fmax.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor fmax_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor other); +// aten::fmax.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor fmax_outf(@Const @ByRef Tensor self, @Const @ByRef Tensor other, @ByRef Tensor out); -// Parsed from ATen/ops/fbgemm_linear_int8_weight.h +// Parsed from ATen/ops/fmin.h // #pragma once @@ -43192,16 +28655,21 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include +// #include -// aten::fbgemm_linear_int8_weight(Tensor input, Tensor weight, Tensor packed, Tensor col_offsets, Scalar weight_scale, Scalar weight_zero_point, Tensor bias) -> Tensor -@Namespace("at") public static native @ByVal Tensor fbgemm_linear_int8_weight(@Const @ByRef Tensor input, @Const @ByRef Tensor weight, @Const @ByRef Tensor packed, @Const @ByRef Tensor col_offsets, @Const @ByRef Scalar weight_scale, @Const @ByRef Scalar weight_zero_point, @Const @ByRef Tensor bias); +// aten::fmin(Tensor self, Tensor other) -> Tensor +@Namespace("at") public static native @ByVal Tensor fmin(@Const @ByRef Tensor self, @Const @ByRef Tensor other); + +// aten::fmin.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor fmin_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor other); +// aten::fmin.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor fmin_outf(@Const @ByRef Tensor self, @Const @ByRef Tensor other, @ByRef Tensor out); -// Parsed from ATen/ops/fbgemm_linear_int8_weight_fp32_activation.h +// Parsed from ATen/ops/fmod.h // #pragma once @@ -43222,16 +28690,29 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include +// #include -// aten::fbgemm_linear_int8_weight_fp32_activation(Tensor input, Tensor weight, Tensor packed, Tensor col_offsets, Scalar weight_scale, Scalar weight_zero_point, Tensor bias) -> Tensor -@Namespace("at") public static native @ByVal Tensor fbgemm_linear_int8_weight_fp32_activation(@Const @ByRef Tensor input, @Const @ByRef Tensor weight, @Const @ByRef Tensor packed, @Const @ByRef Tensor col_offsets, @Const @ByRef Scalar weight_scale, @Const @ByRef Scalar weight_zero_point, @Const @ByRef Tensor bias); +// aten::fmod.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor fmod_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Scalar other); +// aten::fmod.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor fmod_outf(@Const @ByRef Tensor self, @Const @ByRef Scalar other, @ByRef Tensor out); + +// aten::fmod.Scalar(Tensor self, Scalar other) -> Tensor +@Namespace("at") public static native @ByVal Tensor fmod(@Const @ByRef Tensor self, @Const @ByRef Scalar other); + +// aten::fmod.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor fmod_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor other); +// aten::fmod.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor fmod_outf(@Const @ByRef Tensor self, @Const @ByRef Tensor other, @ByRef Tensor out); + +// aten::fmod.Tensor(Tensor self, Tensor other) -> Tensor +@Namespace("at") public static native @ByVal Tensor fmod(@Const @ByRef Tensor self, @Const @ByRef Tensor other); -// Parsed from ATen/ops/fbgemm_linear_quantize_weight.h +// Parsed from ATen/ops/frac.h // #pragma once @@ -43252,16 +28733,24 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include +// #include -// aten::fbgemm_linear_quantize_weight(Tensor input) -> (Tensor, Tensor, float, int) -@Namespace("at") public static native @ByVal TensorTensorDoubleLongTuple fbgemm_linear_quantize_weight(@Const @ByRef Tensor input); +// aten::frac(Tensor self) -> Tensor +@Namespace("at") public static native @ByVal Tensor frac(@Const @ByRef Tensor self); + +// aten::frac_(Tensor(a!) self) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor frac_(@ByRef Tensor self); +// aten::frac.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor frac_out(@ByRef Tensor out, @Const @ByRef Tensor self); +// aten::frac.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor frac_outf(@Const @ByRef Tensor self, @ByRef Tensor out); -// Parsed from ATen/ops/fbgemm_pack_gemm_matrix_fp16.h + +// Parsed from ATen/ops/fractional_max_pool2d.h // #pragma once @@ -43282,16 +28771,24 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include +// #include -// aten::fbgemm_pack_gemm_matrix_fp16(Tensor input) -> Tensor -@Namespace("at") public static native @ByVal Tensor fbgemm_pack_gemm_matrix_fp16(@Const @ByRef Tensor input); +// aten::fractional_max_pool2d.output(Tensor self, int[2] kernel_size, int[2] output_size, Tensor random_samples, *, Tensor(a!) output, Tensor(b!) indices) -> (Tensor(a!), Tensor(b!)) +@Namespace("at") public static native @ByVal T_TensorTensor_T fractional_max_pool2d_out(@ByRef Tensor output, @ByRef Tensor indices, @Const @ByRef Tensor self, @ByVal LongArrayRef kernel_size, @ByVal LongArrayRef output_size, @Const @ByRef Tensor random_samples); +@Namespace("at") public static native @ByVal T_TensorTensor_T fractional_max_pool2d_out(@ByRef Tensor output, @ByRef Tensor indices, @Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] kernel_size, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] output_size, @Const @ByRef Tensor random_samples); +// aten::fractional_max_pool2d.output(Tensor self, int[2] kernel_size, int[2] output_size, Tensor random_samples, *, Tensor(a!) output, Tensor(b!) indices) -> (Tensor(a!), Tensor(b!)) +@Namespace("at") public static native @ByVal T_TensorTensor_T fractional_max_pool2d_outf(@Const @ByRef Tensor self, @ByVal LongArrayRef kernel_size, @ByVal LongArrayRef output_size, @Const @ByRef Tensor random_samples, @ByRef Tensor output, @ByRef Tensor indices); +@Namespace("at") public static native @ByVal T_TensorTensor_T fractional_max_pool2d_outf(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] kernel_size, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] output_size, @Const @ByRef Tensor random_samples, @ByRef Tensor output, @ByRef Tensor indices); + +// aten::fractional_max_pool2d(Tensor self, int[2] kernel_size, int[2] output_size, Tensor random_samples) -> (Tensor, Tensor) +@Namespace("at") public static native @ByVal T_TensorTensor_T fractional_max_pool2d(@Const @ByRef Tensor self, @ByVal LongArrayRef kernel_size, @ByVal LongArrayRef output_size, @Const @ByRef Tensor random_samples); +@Namespace("at") public static native @ByVal T_TensorTensor_T fractional_max_pool2d(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] kernel_size, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] output_size, @Const @ByRef Tensor random_samples); -// Parsed from ATen/ops/fbgemm_pack_quantized_matrix.h +// Parsed from ATen/ops/fractional_max_pool2d_backward.h // #pragma once @@ -43312,19 +28809,24 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include +// #include -// aten::fbgemm_pack_quantized_matrix(Tensor input) -> Tensor -@Namespace("at") public static native @ByVal Tensor fbgemm_pack_quantized_matrix(@Const @ByRef Tensor input); +// aten::fractional_max_pool2d_backward.grad_input(Tensor grad_output, Tensor self, int[2] kernel_size, int[2] output_size, Tensor indices, *, Tensor(a!) grad_input) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor fractional_max_pool2d_backward_out(@ByRef Tensor grad_input, @Const @ByRef Tensor grad_output, @Const @ByRef Tensor self, @ByVal LongArrayRef kernel_size, @ByVal LongArrayRef output_size, @Const @ByRef Tensor indices); +@Namespace("at") public static native @ByRef Tensor fractional_max_pool2d_backward_out(@ByRef Tensor grad_input, @Const @ByRef Tensor grad_output, @Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] kernel_size, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] output_size, @Const @ByRef Tensor indices); +// aten::fractional_max_pool2d_backward.grad_input(Tensor grad_output, Tensor self, int[2] kernel_size, int[2] output_size, Tensor indices, *, Tensor(a!) grad_input) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor fractional_max_pool2d_backward_outf(@Const @ByRef Tensor grad_output, @Const @ByRef Tensor self, @ByVal LongArrayRef kernel_size, @ByVal LongArrayRef output_size, @Const @ByRef Tensor indices, @ByRef Tensor grad_input); +@Namespace("at") public static native @ByRef Tensor fractional_max_pool2d_backward_outf(@Const @ByRef Tensor grad_output, @Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] kernel_size, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] output_size, @Const @ByRef Tensor indices, @ByRef Tensor grad_input); -// aten::fbgemm_pack_quantized_matrix.KN(Tensor input, int K, int N) -> Tensor -@Namespace("at") public static native @ByVal Tensor fbgemm_pack_quantized_matrix(@Const @ByRef Tensor input, @Cast("int64_t") long K, @Cast("int64_t") long N); +// aten::fractional_max_pool2d_backward(Tensor grad_output, Tensor self, int[2] kernel_size, int[2] output_size, Tensor indices) -> Tensor +@Namespace("at") public static native @ByVal Tensor fractional_max_pool2d_backward(@Const @ByRef Tensor grad_output, @Const @ByRef Tensor self, @ByVal LongArrayRef kernel_size, @ByVal LongArrayRef output_size, @Const @ByRef Tensor indices); +@Namespace("at") public static native @ByVal Tensor fractional_max_pool2d_backward(@Const @ByRef Tensor grad_output, @Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] kernel_size, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] output_size, @Const @ByRef Tensor indices); -// Parsed from ATen/ops/feature_alpha_dropout.h +// Parsed from ATen/ops/fractional_max_pool3d.h // #pragma once @@ -43345,19 +28847,24 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include +// #include -// aten::feature_alpha_dropout(Tensor input, float p, bool train) -> Tensor -@Namespace("at") public static native @ByVal Tensor feature_alpha_dropout(@Const @ByRef Tensor input, double p, @Cast("bool") boolean train); +// aten::fractional_max_pool3d.output(Tensor self, int[3] kernel_size, int[3] output_size, Tensor random_samples, *, Tensor(a!) output, Tensor(b!) indices) -> (Tensor(a!), Tensor(b!)) +@Namespace("at") public static native @ByVal T_TensorTensor_T fractional_max_pool3d_out(@ByRef Tensor output, @ByRef Tensor indices, @Const @ByRef Tensor self, @ByVal LongArrayRef kernel_size, @ByVal LongArrayRef output_size, @Const @ByRef Tensor random_samples); +@Namespace("at") public static native @ByVal T_TensorTensor_T fractional_max_pool3d_out(@ByRef Tensor output, @ByRef Tensor indices, @Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] kernel_size, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] output_size, @Const @ByRef Tensor random_samples); +// aten::fractional_max_pool3d.output(Tensor self, int[3] kernel_size, int[3] output_size, Tensor random_samples, *, Tensor(a!) output, Tensor(b!) indices) -> (Tensor(a!), Tensor(b!)) +@Namespace("at") public static native @ByVal T_TensorTensor_T fractional_max_pool3d_outf(@Const @ByRef Tensor self, @ByVal LongArrayRef kernel_size, @ByVal LongArrayRef output_size, @Const @ByRef Tensor random_samples, @ByRef Tensor output, @ByRef Tensor indices); +@Namespace("at") public static native @ByVal T_TensorTensor_T fractional_max_pool3d_outf(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] kernel_size, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] output_size, @Const @ByRef Tensor random_samples, @ByRef Tensor output, @ByRef Tensor indices); -// aten::feature_alpha_dropout_(Tensor(a!) self, float p, bool train) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor feature_alpha_dropout_(@ByRef Tensor self, double p, @Cast("bool") boolean train); +// aten::fractional_max_pool3d(Tensor self, int[3] kernel_size, int[3] output_size, Tensor random_samples) -> (Tensor, Tensor) +@Namespace("at") public static native @ByVal T_TensorTensor_T fractional_max_pool3d(@Const @ByRef Tensor self, @ByVal LongArrayRef kernel_size, @ByVal LongArrayRef output_size, @Const @ByRef Tensor random_samples); +@Namespace("at") public static native @ByVal T_TensorTensor_T fractional_max_pool3d(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] kernel_size, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] output_size, @Const @ByRef Tensor random_samples); -// Parsed from ATen/ops/feature_dropout.h +// Parsed from ATen/ops/fractional_max_pool3d_backward.h // #pragma once @@ -43378,19 +28885,24 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include +// #include -// aten::feature_dropout(Tensor input, float p, bool train) -> Tensor -@Namespace("at") public static native @ByVal Tensor feature_dropout(@Const @ByRef Tensor input, double p, @Cast("bool") boolean train); +// aten::fractional_max_pool3d_backward.grad_input(Tensor grad_output, Tensor self, int[3] kernel_size, int[3] output_size, Tensor indices, *, Tensor(a!) grad_input) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor fractional_max_pool3d_backward_out(@ByRef Tensor grad_input, @Const @ByRef Tensor grad_output, @Const @ByRef Tensor self, @ByVal LongArrayRef kernel_size, @ByVal LongArrayRef output_size, @Const @ByRef Tensor indices); +@Namespace("at") public static native @ByRef Tensor fractional_max_pool3d_backward_out(@ByRef Tensor grad_input, @Const @ByRef Tensor grad_output, @Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] kernel_size, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] output_size, @Const @ByRef Tensor indices); +// aten::fractional_max_pool3d_backward.grad_input(Tensor grad_output, Tensor self, int[3] kernel_size, int[3] output_size, Tensor indices, *, Tensor(a!) grad_input) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor fractional_max_pool3d_backward_outf(@Const @ByRef Tensor grad_output, @Const @ByRef Tensor self, @ByVal LongArrayRef kernel_size, @ByVal LongArrayRef output_size, @Const @ByRef Tensor indices, @ByRef Tensor grad_input); +@Namespace("at") public static native @ByRef Tensor fractional_max_pool3d_backward_outf(@Const @ByRef Tensor grad_output, @Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] kernel_size, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] output_size, @Const @ByRef Tensor indices, @ByRef Tensor grad_input); -// aten::feature_dropout_(Tensor(a!) self, float p, bool train) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor feature_dropout_(@ByRef Tensor self, double p, @Cast("bool") boolean train); +// aten::fractional_max_pool3d_backward(Tensor grad_output, Tensor self, int[3] kernel_size, int[3] output_size, Tensor indices) -> Tensor +@Namespace("at") public static native @ByVal Tensor fractional_max_pool3d_backward(@Const @ByRef Tensor grad_output, @Const @ByRef Tensor self, @ByVal LongArrayRef kernel_size, @ByVal LongArrayRef output_size, @Const @ByRef Tensor indices); +@Namespace("at") public static native @ByVal Tensor fractional_max_pool3d_backward(@Const @ByRef Tensor grad_output, @Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] kernel_size, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] output_size, @Const @ByRef Tensor indices); -// Parsed from ATen/ops/fft_fft.h +// Parsed from ATen/ops/frexp.h // #pragma once @@ -43411,23 +28923,21 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include +// #include -// aten::fft_fft(Tensor self, int? n=None, int dim=-1, str? norm=None) -> Tensor -@Namespace("at") public static native @ByVal Tensor fft_fft(@Const @ByRef Tensor self, @ByVal(nullValue = "c10::optional(c10::nullopt)") LongOptional n, @Cast("int64_t") long dim/*=-1*/, @ByVal(nullValue = "c10::optional(c10::nullopt)") @Cast("c10::optional*") Pointer norm); -@Namespace("at") public static native @ByVal Tensor fft_fft(@Const @ByRef Tensor self); +// aten::frexp.Tensor(Tensor self) -> (Tensor mantissa, Tensor exponent) +@Namespace("at") public static native @ByVal T_TensorTensor_T frexp(@Const @ByRef Tensor self); -// aten::fft_fft.out(Tensor self, int? n=None, int dim=-1, str? norm=None, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor fft_fft_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal(nullValue = "c10::optional(c10::nullopt)") LongOptional n, @Cast("int64_t") long dim/*=-1*/, @ByVal(nullValue = "c10::optional(c10::nullopt)") @Cast("c10::optional*") Pointer norm); -@Namespace("at") public static native @ByRef Tensor fft_fft_out(@ByRef Tensor out, @Const @ByRef Tensor self); -// aten::fft_fft.out(Tensor self, int? n=None, int dim=-1, str? norm=None, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor fft_fft_outf(@Const @ByRef Tensor self, @ByVal LongOptional n, @Cast("int64_t") long dim, @ByVal @Cast("c10::optional*") Pointer norm, @ByRef Tensor out); +// aten::frexp.Tensor_out(Tensor self, *, Tensor(a!) mantissa, Tensor(b!) exponent) -> (Tensor(a!) mantissa, Tensor(b!) exponent) +@Namespace("at") public static native @ByVal T_TensorTensor_T frexp_out(@ByRef Tensor mantissa, @ByRef Tensor exponent, @Const @ByRef Tensor self); +// aten::frexp.Tensor_out(Tensor self, *, Tensor(a!) mantissa, Tensor(b!) exponent) -> (Tensor(a!) mantissa, Tensor(b!) exponent) +@Namespace("at") public static native @ByVal T_TensorTensor_T frexp_outf(@Const @ByRef Tensor self, @ByRef Tensor mantissa, @ByRef Tensor exponent); -// Parsed from ATen/ops/fft_fft2.h +// Parsed from ATen/ops/frobenius_norm.h // #pragma once @@ -43448,65 +28958,293 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include +// #include -// aten::fft_fft2(Tensor self, int[1]? s=None, int[1] dim=[-2,-1], str? norm=None) -> Tensor -@Namespace("at") public static native @ByVal Tensor fft_fft2(@Const @ByRef Tensor self, @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") LongArrayRefOptional s, @ByVal(nullValue = "at::IntArrayRef({-2,-1})") @Cast("c10::ArrayRef*") LongArrayRef dim, @ByVal(nullValue = "c10::optional(c10::nullopt)") @Cast("c10::optional*") Pointer norm); -@Namespace("at") public static native @ByVal Tensor fft_fft2(@Const @ByRef Tensor self); -@Namespace("at") public static native @ByVal Tensor fft_fft2(@Const @ByRef Tensor self, @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] s, @ByVal(nullValue = "at::IntArrayRef({-2,-1})") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dim, @ByVal(nullValue = "c10::optional(c10::nullopt)") @Cast("c10::optional*") Pointer norm); +// aten::frobenius_norm.dim(Tensor self, int[1] dim, bool keepdim=False) -> Tensor +@Namespace("at") public static native @ByVal Tensor frobenius_norm(@Const @ByRef Tensor self, @ByVal LongArrayRef dim, @Cast("bool") boolean keepdim/*=false*/); +@Namespace("at") public static native @ByVal Tensor frobenius_norm(@Const @ByRef Tensor self, @ByVal LongArrayRef dim); +@Namespace("at") public static native @ByVal Tensor frobenius_norm(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dim, @Cast("bool") boolean keepdim/*=false*/); +@Namespace("at") public static native @ByVal Tensor frobenius_norm(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... dim); -// aten::fft_fft2.out(Tensor self, int[1]? s=None, int[1] dim=[-2,-1], str? norm=None, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor fft_fft2_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") LongArrayRefOptional s, @ByVal(nullValue = "at::IntArrayRef({-2,-1})") @Cast("c10::ArrayRef*") LongArrayRef dim, @ByVal(nullValue = "c10::optional(c10::nullopt)") @Cast("c10::optional*") Pointer norm); -@Namespace("at") public static native @ByRef Tensor fft_fft2_out(@ByRef Tensor out, @Const @ByRef Tensor self); -@Namespace("at") public static native @ByRef Tensor fft_fft2_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] s, @ByVal(nullValue = "at::IntArrayRef({-2,-1})") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dim, @ByVal(nullValue = "c10::optional(c10::nullopt)") @Cast("c10::optional*") Pointer norm); -// aten::fft_fft2.out(Tensor self, int[1]? s=None, int[1] dim=[-2,-1], str? norm=None, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor fft_fft2_outf(@Const @ByRef Tensor self, @ByVal LongArrayRefOptional s, @ByVal @Cast("c10::ArrayRef*") LongArrayRef dim, @ByVal @Cast("c10::optional*") Pointer norm, @ByRef Tensor out); -@Namespace("at") public static native @ByRef Tensor fft_fft2_outf(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] s, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dim, @ByVal @Cast("c10::optional*") Pointer norm, @ByRef Tensor out); +// aten::frobenius_norm.out(Tensor self, int[1] dim, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor frobenius_norm_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal LongArrayRef dim, @Cast("bool") boolean keepdim/*=false*/); +@Namespace("at") public static native @ByRef Tensor frobenius_norm_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal LongArrayRef dim); +@Namespace("at") public static native @ByRef Tensor frobenius_norm_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dim, @Cast("bool") boolean keepdim/*=false*/); +@Namespace("at") public static native @ByRef Tensor frobenius_norm_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... dim); +// aten::frobenius_norm.out(Tensor self, int[1] dim, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor frobenius_norm_outf(@Const @ByRef Tensor self, @ByVal LongArrayRef dim, @Cast("bool") boolean keepdim, @ByRef Tensor out); +@Namespace("at") public static native @ByRef Tensor frobenius_norm_outf(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dim, @Cast("bool") boolean keepdim, @ByRef Tensor out); -// Parsed from ATen/ops/fft_fftfreq.h +// Parsed from ATen/ops/from_blob.h // #pragma once +// #include -// @generated by torchgen/gen.py from Function.h +@Namespace("at::detail") public static native void noopDelete(Pointer arg0); -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include +// Targeting ../TensorMaker.java -// #include +@Namespace("at") public static native @ByVal @NoException(true) TensorMaker for_blob(Pointer data, @ByVal LongArrayRef sizes); +@Namespace("at") public static native @ByVal @NoException(true) TensorMaker for_blob(Pointer data, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... sizes); -// aten::fft_fftfreq(int n, float d=1.0, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor -@Namespace("at") public static native @ByVal Tensor fft_fftfreq(@Cast("int64_t") long n, double d/*=1.0*/, @ByVal(nullValue = "at::TensorOptions{}") TensorOptions options); -@Namespace("at") public static native @ByVal Tensor fft_fftfreq(@Cast("int64_t") long n); -// aten::fft_fftfreq(int n, float d=1.0, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor -@Namespace("at") public static native @ByVal Tensor fft_fftfreq(@Cast("int64_t") long n, double d, @ByVal ScalarTypeOptional dtype, @ByVal LayoutOptional layout, @ByVal DeviceOptional device, @ByVal BoolOptional pin_memory); +@Namespace("at") public static native @ByVal Tensor from_blob( + Pointer data, + @ByVal LongArrayRef sizes, + @ByVal LongArrayRef strides, + @Const @ByRef PointerConsumer deleter, + @Const @ByRef(nullValue = "c10::TensorOptions{}") TensorOptions options, + @Const @ByVal(nullValue = "c10::optional(c10::nullopt)") DeviceOptional target_device); +@Namespace("at") public static native @ByVal Tensor from_blob( + Pointer data, + @ByVal LongArrayRef sizes, + @ByVal LongArrayRef strides, + @Const @ByRef PointerConsumer deleter); +@Namespace("at") public static native @ByVal Tensor from_blob( + Pointer data, + @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] sizes, + @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] strides, + @ByRef @Cast("void(*)(void*)") Pointer deleter, + @Const @ByRef(nullValue = "c10::TensorOptions{}") TensorOptions options, + @Const @ByVal(nullValue = "c10::optional(c10::nullopt)") DeviceOptional target_device); +@Namespace("at") public static native @ByVal Tensor from_blob( + Pointer data, + @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] sizes, + @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] strides, + @ByRef @Cast("void(*)(void*)") Pointer deleter); +@Namespace("at") public static native @ByVal Tensor from_blob( + Pointer data, + @ByVal LongArrayRef sizes, + @ByVal LongArrayRef strides, + @ByRef @Cast("void(*)(void*)") long deleter, + @Const @ByRef(nullValue = "c10::TensorOptions{}") TensorOptions options, + @Const @ByVal(nullValue = "c10::optional(c10::nullopt)") DeviceOptional target_device); +@Namespace("at") public static native @ByVal Tensor from_blob( + Pointer data, + @ByVal LongArrayRef sizes, + @ByVal LongArrayRef strides, + @ByRef @Cast("void(*)(void*)") long deleter); +@Namespace("at") public static native @ByVal Tensor from_blob( + Pointer data, + @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] sizes, + @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] strides, + @Const @ByRef PointerConsumer deleter, + @Const @ByRef(nullValue = "c10::TensorOptions{}") TensorOptions options, + @Const @ByVal(nullValue = "c10::optional(c10::nullopt)") DeviceOptional target_device); +@Namespace("at") public static native @ByVal Tensor from_blob( + Pointer data, + @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] sizes, + @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] strides, + @Const @ByRef PointerConsumer deleter); +@Namespace("at") public static native @ByVal Tensor from_blob( + Pointer data, + @ByVal LongArrayRef sizes, + @ByVal LongArrayRef strides, + @ByRef @Cast("void(*)(void*)") Pointer deleter, + @Const @ByRef(nullValue = "c10::TensorOptions{}") TensorOptions options, + @Const @ByVal(nullValue = "c10::optional(c10::nullopt)") DeviceOptional target_device); +@Namespace("at") public static native @ByVal Tensor from_blob( + Pointer data, + @ByVal LongArrayRef sizes, + @ByVal LongArrayRef strides, + @ByRef @Cast("void(*)(void*)") Pointer deleter); +@Namespace("at") public static native @ByVal Tensor from_blob( + Pointer data, + @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] sizes, + @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] strides, + @ByRef @Cast("void(*)(void*)") long deleter, + @Const @ByRef(nullValue = "c10::TensorOptions{}") TensorOptions options, + @Const @ByVal(nullValue = "c10::optional(c10::nullopt)") DeviceOptional target_device); +@Namespace("at") public static native @ByVal Tensor from_blob( + Pointer data, + @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] sizes, + @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] strides, + @ByRef @Cast("void(*)(void*)") long deleter); -// aten::fft_fftfreq.out(int n, float d=1.0, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor fft_fftfreq_out(@ByRef Tensor out, @Cast("int64_t") long n, double d/*=1.0*/); -@Namespace("at") public static native @ByRef Tensor fft_fftfreq_out(@ByRef Tensor out, @Cast("int64_t") long n); -// aten::fft_fftfreq.out(int n, float d=1.0, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor fft_fftfreq_outf(@Cast("int64_t") long n, double d, @ByRef Tensor out); +@Namespace("at") public static native @ByVal Tensor from_blob( + Pointer data, + @ByVal LongArrayRef sizes, + @ByVal LongArrayRef strides, + @Cast("int64_t") long storage_offset, + @Const @ByRef PointerConsumer deleter, + @Const @ByRef(nullValue = "c10::TensorOptions{}") TensorOptions options, + @Const @ByVal(nullValue = "c10::optional(c10::nullopt)") DeviceOptional target_device); +@Namespace("at") public static native @ByVal Tensor from_blob( + Pointer data, + @ByVal LongArrayRef sizes, + @ByVal LongArrayRef strides, + @Cast("int64_t") long storage_offset, + @Const @ByRef PointerConsumer deleter); +@Namespace("at") public static native @ByVal Tensor from_blob( + Pointer data, + @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] sizes, + @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] strides, + @Cast("int64_t") long storage_offset, + @ByRef @Cast("void(*)(void*)") Pointer deleter, + @Const @ByRef(nullValue = "c10::TensorOptions{}") TensorOptions options, + @Const @ByVal(nullValue = "c10::optional(c10::nullopt)") DeviceOptional target_device); +@Namespace("at") public static native @ByVal Tensor from_blob( + Pointer data, + @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] sizes, + @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] strides, + @Cast("int64_t") long storage_offset, + @ByRef @Cast("void(*)(void*)") Pointer deleter); +@Namespace("at") public static native @ByVal Tensor from_blob( + Pointer data, + @ByVal LongArrayRef sizes, + @ByVal LongArrayRef strides, + @Cast("int64_t") long storage_offset, + @ByRef @Cast("void(*)(void*)") long deleter, + @Const @ByRef(nullValue = "c10::TensorOptions{}") TensorOptions options, + @Const @ByVal(nullValue = "c10::optional(c10::nullopt)") DeviceOptional target_device); +@Namespace("at") public static native @ByVal Tensor from_blob( + Pointer data, + @ByVal LongArrayRef sizes, + @ByVal LongArrayRef strides, + @Cast("int64_t") long storage_offset, + @ByRef @Cast("void(*)(void*)") long deleter); +@Namespace("at") public static native @ByVal Tensor from_blob( + Pointer data, + @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] sizes, + @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] strides, + @Cast("int64_t") long storage_offset, + @Const @ByRef PointerConsumer deleter, + @Const @ByRef(nullValue = "c10::TensorOptions{}") TensorOptions options, + @Const @ByVal(nullValue = "c10::optional(c10::nullopt)") DeviceOptional target_device); +@Namespace("at") public static native @ByVal Tensor from_blob( + Pointer data, + @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] sizes, + @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] strides, + @Cast("int64_t") long storage_offset, + @Const @ByRef PointerConsumer deleter); +@Namespace("at") public static native @ByVal Tensor from_blob( + Pointer data, + @ByVal LongArrayRef sizes, + @ByVal LongArrayRef strides, + @Cast("int64_t") long storage_offset, + @ByRef @Cast("void(*)(void*)") Pointer deleter, + @Const @ByRef(nullValue = "c10::TensorOptions{}") TensorOptions options, + @Const @ByVal(nullValue = "c10::optional(c10::nullopt)") DeviceOptional target_device); +@Namespace("at") public static native @ByVal Tensor from_blob( + Pointer data, + @ByVal LongArrayRef sizes, + @ByVal LongArrayRef strides, + @Cast("int64_t") long storage_offset, + @ByRef @Cast("void(*)(void*)") Pointer deleter); +@Namespace("at") public static native @ByVal Tensor from_blob( + Pointer data, + @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] sizes, + @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] strides, + @Cast("int64_t") long storage_offset, + @ByRef @Cast("void(*)(void*)") long deleter, + @Const @ByRef(nullValue = "c10::TensorOptions{}") TensorOptions options, + @Const @ByVal(nullValue = "c10::optional(c10::nullopt)") DeviceOptional target_device); +@Namespace("at") public static native @ByVal Tensor from_blob( + Pointer data, + @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] sizes, + @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] strides, + @Cast("int64_t") long storage_offset, + @ByRef @Cast("void(*)(void*)") long deleter); + +@Namespace("at") public static native @ByVal Tensor from_blob( + Pointer data, + @ByVal LongArrayRef sizes, + @Const @ByRef PointerConsumer deleter, + @Const @ByRef(nullValue = "c10::TensorOptions{}") TensorOptions options); +@Namespace("at") public static native @ByVal Tensor from_blob( + Pointer data, + @ByVal LongArrayRef sizes, + @Const @ByRef PointerConsumer deleter); +@Namespace("at") public static native @ByVal Tensor from_blob( + Pointer data, + @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] sizes, + @ByRef @Cast("void(*)(void*)") Pointer deleter, + @Const @ByRef(nullValue = "c10::TensorOptions{}") TensorOptions options); +@Namespace("at") public static native @ByVal Tensor from_blob( + Pointer data, + @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] sizes, + @ByRef @Cast("void(*)(void*)") Pointer deleter); +@Namespace("at") public static native @ByVal Tensor from_blob( + Pointer data, + @ByVal LongArrayRef sizes, + @ByRef @Cast("void(*)(void*)") long deleter, + @Const @ByRef(nullValue = "c10::TensorOptions{}") TensorOptions options); +@Namespace("at") public static native @ByVal Tensor from_blob( + Pointer data, + @ByVal LongArrayRef sizes, + @ByRef @Cast("void(*)(void*)") long deleter); +@Namespace("at") public static native @ByVal Tensor from_blob( + Pointer data, + @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] sizes, + @Const @ByRef PointerConsumer deleter, + @Const @ByRef(nullValue = "c10::TensorOptions{}") TensorOptions options); +@Namespace("at") public static native @ByVal Tensor from_blob( + Pointer data, + @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] sizes, + @Const @ByRef PointerConsumer deleter); +@Namespace("at") public static native @ByVal Tensor from_blob( + Pointer data, + @ByVal LongArrayRef sizes, + @ByRef @Cast("void(*)(void*)") Pointer deleter, + @Const @ByRef(nullValue = "c10::TensorOptions{}") TensorOptions options); +@Namespace("at") public static native @ByVal Tensor from_blob( + Pointer data, + @ByVal LongArrayRef sizes, + @ByRef @Cast("void(*)(void*)") Pointer deleter); +@Namespace("at") public static native @ByVal Tensor from_blob( + Pointer data, + @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] sizes, + @ByRef @Cast("void(*)(void*)") long deleter, + @Const @ByRef(nullValue = "c10::TensorOptions{}") TensorOptions options); +@Namespace("at") public static native @ByVal Tensor from_blob( + Pointer data, + @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] sizes, + @ByRef @Cast("void(*)(void*)") long deleter); + +@Namespace("at") public static native @ByVal Tensor from_blob( + Pointer data, + @ByVal LongArrayRef sizes, + @ByVal LongArrayRef strides, + @Const @ByRef(nullValue = "c10::TensorOptions{}") TensorOptions options); +@Namespace("at") public static native @ByVal Tensor from_blob( + Pointer data, + @ByVal LongArrayRef sizes, + @ByVal LongArrayRef strides); +@Namespace("at") public static native @ByVal Tensor from_blob( + Pointer data, + @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] sizes, + @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] strides, + @Const @ByRef(nullValue = "c10::TensorOptions{}") TensorOptions options); +@Namespace("at") public static native @ByVal Tensor from_blob( + Pointer data, + @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] sizes, + @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... strides); +@Namespace("at") public static native @ByVal Tensor from_blob( + Pointer data, + @ByVal LongArrayRef sizes, + @Const @ByRef(nullValue = "c10::TensorOptions{}") TensorOptions options); +@Namespace("at") public static native @ByVal Tensor from_blob( + Pointer data, + @ByVal LongArrayRef sizes); +@Namespace("at") public static native @ByVal Tensor from_blob( + Pointer data, + @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] sizes, + @Const @ByRef(nullValue = "c10::TensorOptions{}") TensorOptions options); +@Namespace("at") public static native @ByVal Tensor from_blob( + Pointer data, + @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... sizes); + // namespace at -// Parsed from ATen/ops/fft_fftn.h +// Parsed from ATen/ops/from_file.h // #pragma once @@ -43527,26 +29265,25 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include +// #include -// aten::fft_fftn(Tensor self, int[1]? s=None, int[1]? dim=None, str? norm=None) -> Tensor -@Namespace("at") public static native @ByVal Tensor fft_fftn(@Const @ByRef Tensor self, @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") LongArrayRefOptional s, @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") LongArrayRefOptional dim, @ByVal(nullValue = "c10::optional(c10::nullopt)") @Cast("c10::optional*") Pointer norm); -@Namespace("at") public static native @ByVal Tensor fft_fftn(@Const @ByRef Tensor self); -@Namespace("at") public static native @ByVal Tensor fft_fftn(@Const @ByRef Tensor self, @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] s, @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dim, @ByVal(nullValue = "c10::optional(c10::nullopt)") @Cast("c10::optional*") Pointer norm); +// aten::from_file(str filename, bool? shared=None, int? size=0, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor +@Namespace("at") public static native @ByVal Tensor from_file(@ByVal @Cast("c10::string_view*") Pointer filename, @ByVal(nullValue = "c10::optional(c10::nullopt)") BoolOptional shared, @ByVal(nullValue = "c10::optional(0)") LongOptional size, @ByVal(nullValue = "at::TensorOptions{}") TensorOptions options); +@Namespace("at") public static native @ByVal Tensor from_file(@ByVal @Cast("c10::string_view*") Pointer filename); +// aten::from_file(str filename, bool? shared=None, int? size=0, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor +@Namespace("at") public static native @ByVal Tensor from_file(@ByVal @Cast("c10::string_view*") Pointer filename, @ByVal BoolOptional shared, @ByVal LongOptional size, @ByVal ScalarTypeOptional dtype, @ByVal LayoutOptional layout, @ByVal DeviceOptional device, @ByVal BoolOptional pin_memory); -// aten::fft_fftn.out(Tensor self, int[1]? s=None, int[1]? dim=None, str? norm=None, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor fft_fftn_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") LongArrayRefOptional s, @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") LongArrayRefOptional dim, @ByVal(nullValue = "c10::optional(c10::nullopt)") @Cast("c10::optional*") Pointer norm); -@Namespace("at") public static native @ByRef Tensor fft_fftn_out(@ByRef Tensor out, @Const @ByRef Tensor self); -@Namespace("at") public static native @ByRef Tensor fft_fftn_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] s, @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dim, @ByVal(nullValue = "c10::optional(c10::nullopt)") @Cast("c10::optional*") Pointer norm); -// aten::fft_fftn.out(Tensor self, int[1]? s=None, int[1]? dim=None, str? norm=None, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor fft_fftn_outf(@Const @ByRef Tensor self, @ByVal LongArrayRefOptional s, @ByVal LongArrayRefOptional dim, @ByVal @Cast("c10::optional*") Pointer norm, @ByRef Tensor out); -@Namespace("at") public static native @ByRef Tensor fft_fftn_outf(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] s, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dim, @ByVal @Cast("c10::optional*") Pointer norm, @ByRef Tensor out); +// aten::from_file.out(str filename, bool? shared=None, int? size=0, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor from_file_out(@ByRef Tensor out, @ByVal @Cast("c10::string_view*") Pointer filename, @ByVal(nullValue = "c10::optional(c10::nullopt)") BoolOptional shared, @ByVal(nullValue = "c10::optional(0)") LongOptional size); +@Namespace("at") public static native @ByRef Tensor from_file_out(@ByRef Tensor out, @ByVal @Cast("c10::string_view*") Pointer filename); +// aten::from_file.out(str filename, bool? shared=None, int? size=0, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor from_file_outf(@ByVal @Cast("c10::string_view*") Pointer filename, @ByVal BoolOptional shared, @ByVal LongOptional size, @ByRef Tensor out); -// Parsed from ATen/ops/fft_fftshift.h +// Parsed from ATen/ops/full.h // #pragma once @@ -43567,95 +29304,68 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include - - -// aten::fft_fftshift(Tensor self, int[1]? dim=None) -> Tensor -@Namespace("at") public static native @ByVal Tensor fft_fftshift(@Const @ByRef Tensor self, @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") LongArrayRefOptional dim); -@Namespace("at") public static native @ByVal Tensor fft_fftshift(@Const @ByRef Tensor self); -@Namespace("at") public static native @ByVal Tensor fft_fftshift(@Const @ByRef Tensor self, @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... dim); - - - - -// Parsed from ATen/ops/fft_hfft.h - -// #pragma once - -// @generated by torchgen/gen.py from Function.h - -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include +// #include +// aten::full.names(int[] size, Scalar fill_value, *, Dimname[]? names, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor +@Namespace("at") public static native @ByVal Tensor full(@ByVal LongArrayRef size, @Const @ByRef Scalar fill_value, @ByVal DimnameListOptional names, @ByVal(nullValue = "at::TensorOptions{}") TensorOptions options); +@Namespace("at") public static native @ByVal Tensor full(@ByVal LongArrayRef size, @Const @ByRef Scalar fill_value, @ByVal DimnameListOptional names); +@Namespace("at") public static native @ByVal Tensor full(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @Const @ByRef Scalar fill_value, @ByVal DimnameListOptional names, @ByVal(nullValue = "at::TensorOptions{}") TensorOptions options); +@Namespace("at") public static native @ByVal Tensor full(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @Const @ByRef Scalar fill_value, @ByVal DimnameListOptional names); +// aten::full.names(int[] size, Scalar fill_value, *, Dimname[]? names, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor +@Namespace("at") public static native @ByVal Tensor full(@ByVal LongArrayRef size, @Const @ByRef Scalar fill_value, @ByVal DimnameListOptional names, @ByVal ScalarTypeOptional dtype, @ByVal LayoutOptional layout, @ByVal DeviceOptional device, @ByVal BoolOptional pin_memory); +@Namespace("at") public static native @ByVal Tensor full(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @Const @ByRef Scalar fill_value, @ByVal DimnameListOptional names, @ByVal ScalarTypeOptional dtype, @ByVal LayoutOptional layout, @ByVal DeviceOptional device, @ByVal BoolOptional pin_memory); -// #include +// aten::full(SymInt[] size, Scalar fill_value, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor +@Namespace("at") public static native @ByVal Tensor full(@ByVal LongArrayRef size, @Const @ByRef Scalar fill_value, @ByVal(nullValue = "at::TensorOptions{}") TensorOptions options); +@Namespace("at") public static native @ByVal Tensor full(@ByVal LongArrayRef size, @Const @ByRef Scalar fill_value); +@Namespace("at") public static native @ByVal Tensor full(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @Const @ByRef Scalar fill_value, @ByVal(nullValue = "at::TensorOptions{}") TensorOptions options); +@Namespace("at") public static native @ByVal Tensor full(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @Const @ByRef Scalar fill_value); -// aten::fft_hfft(Tensor self, int? n=None, int dim=-1, str? norm=None) -> Tensor -@Namespace("at") public static native @ByVal Tensor fft_hfft(@Const @ByRef Tensor self, @ByVal(nullValue = "c10::optional(c10::nullopt)") LongOptional n, @Cast("int64_t") long dim/*=-1*/, @ByVal(nullValue = "c10::optional(c10::nullopt)") @Cast("c10::optional*") Pointer norm); -@Namespace("at") public static native @ByVal Tensor fft_hfft(@Const @ByRef Tensor self); +// aten::full(SymInt[] size, Scalar fill_value, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor +@Namespace("at") public static native @ByVal Tensor full(@ByVal LongArrayRef size, @Const @ByRef Scalar fill_value, @ByVal ScalarTypeOptional dtype, @ByVal LayoutOptional layout, @ByVal DeviceOptional device, @ByVal BoolOptional pin_memory); +@Namespace("at") public static native @ByVal Tensor full(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @Const @ByRef Scalar fill_value, @ByVal ScalarTypeOptional dtype, @ByVal LayoutOptional layout, @ByVal DeviceOptional device, @ByVal BoolOptional pin_memory); -// aten::fft_hfft.out(Tensor self, int? n=None, int dim=-1, str? norm=None, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor fft_hfft_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal(nullValue = "c10::optional(c10::nullopt)") LongOptional n, @Cast("int64_t") long dim/*=-1*/, @ByVal(nullValue = "c10::optional(c10::nullopt)") @Cast("c10::optional*") Pointer norm); -@Namespace("at") public static native @ByRef Tensor fft_hfft_out(@ByRef Tensor out, @Const @ByRef Tensor self); -// aten::fft_hfft.out(Tensor self, int? n=None, int dim=-1, str? norm=None, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor fft_hfft_outf(@Const @ByRef Tensor self, @ByVal LongOptional n, @Cast("int64_t") long dim, @ByVal @Cast("c10::optional*") Pointer norm, @ByRef Tensor out); +// aten::full(SymInt[] size, Scalar fill_value, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor +@Namespace("at") public static native @ByVal Tensor full_symint(@ByVal SymIntArrayRef size, @Const @ByRef Scalar fill_value, @ByVal(nullValue = "at::TensorOptions{}") TensorOptions options); +@Namespace("at") public static native @ByVal Tensor full_symint(@ByVal SymIntArrayRef size, @Const @ByRef Scalar fill_value); +// aten::full(SymInt[] size, Scalar fill_value, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor +@Namespace("at") public static native @ByVal Tensor full_symint(@ByVal SymIntArrayRef size, @Const @ByRef Scalar fill_value, @ByVal ScalarTypeOptional dtype, @ByVal LayoutOptional layout, @ByVal DeviceOptional device, @ByVal BoolOptional pin_memory); -// Parsed from ATen/ops/fft_hfft2.h -// #pragma once +// aten::full.out(SymInt[] size, Scalar fill_value, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor full_out(@ByRef Tensor out, @ByVal LongArrayRef size, @Const @ByRef Scalar fill_value); +@Namespace("at") public static native @ByRef Tensor full_out(@ByRef Tensor out, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @Const @ByRef Scalar fill_value); -// @generated by torchgen/gen.py from Function.h -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include +// aten::full.out(SymInt[] size, Scalar fill_value, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor full_outf(@ByVal LongArrayRef size, @Const @ByRef Scalar fill_value, @ByRef Tensor out); +@Namespace("at") public static native @ByRef Tensor full_outf(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @Const @ByRef Scalar fill_value, @ByRef Tensor out); +// aten::full.out(SymInt[] size, Scalar fill_value, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor full_symint_out(@ByRef Tensor out, @ByVal SymIntArrayRef size, @Const @ByRef Scalar fill_value); -// #include +// aten::full.out(SymInt[] size, Scalar fill_value, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor full_symint_outf(@ByVal SymIntArrayRef size, @Const @ByRef Scalar fill_value, @ByRef Tensor out); -// aten::fft_hfft2(Tensor self, int[1]? s=None, int[1] dim=[-2,-1], str? norm=None) -> Tensor -@Namespace("at") public static native @ByVal Tensor fft_hfft2(@Const @ByRef Tensor self, @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") LongArrayRefOptional s, @ByVal(nullValue = "at::IntArrayRef({-2,-1})") @Cast("c10::ArrayRef*") LongArrayRef dim, @ByVal(nullValue = "c10::optional(c10::nullopt)") @Cast("c10::optional*") Pointer norm); -@Namespace("at") public static native @ByVal Tensor fft_hfft2(@Const @ByRef Tensor self); -@Namespace("at") public static native @ByVal Tensor fft_hfft2(@Const @ByRef Tensor self, @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] s, @ByVal(nullValue = "at::IntArrayRef({-2,-1})") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dim, @ByVal(nullValue = "c10::optional(c10::nullopt)") @Cast("c10::optional*") Pointer norm); -// aten::fft_hfft2.out(Tensor self, int[1]? s=None, int[1] dim=[-2,-1], str? norm=None, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @Const @ByRef Tensor fft_hfft2_out(@Const @ByRef Tensor out, @Const @ByRef Tensor self, @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") LongArrayRefOptional s, @ByVal(nullValue = "at::IntArrayRef({-2,-1})") @Cast("c10::ArrayRef*") LongArrayRef dim, @ByVal(nullValue = "c10::optional(c10::nullopt)") @Cast("c10::optional*") Pointer norm); -@Namespace("at") public static native @Const @ByRef Tensor fft_hfft2_out(@Const @ByRef Tensor out, @Const @ByRef Tensor self); -@Namespace("at") public static native @Const @ByRef Tensor fft_hfft2_out(@Const @ByRef Tensor out, @Const @ByRef Tensor self, @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] s, @ByVal(nullValue = "at::IntArrayRef({-2,-1})") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dim, @ByVal(nullValue = "c10::optional(c10::nullopt)") @Cast("c10::optional*") Pointer norm); -// aten::fft_hfft2.out(Tensor self, int[1]? s=None, int[1] dim=[-2,-1], str? norm=None, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @Const @ByRef Tensor fft_hfft2_outf(@Const @ByRef Tensor self, @ByVal LongArrayRefOptional s, @ByVal @Cast("c10::ArrayRef*") LongArrayRef dim, @ByVal @Cast("c10::optional*") Pointer norm, @Const @ByRef Tensor out); -@Namespace("at") public static native @Const @ByRef Tensor fft_hfft2_outf(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] s, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dim, @ByVal @Cast("c10::optional*") Pointer norm, @Const @ByRef Tensor out); +// aten::full.names_out(int[] size, Scalar fill_value, *, Dimname[]? names, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor full_out(@ByRef Tensor out, @ByVal LongArrayRef size, @Const @ByRef Scalar fill_value, @ByVal DimnameListOptional names); +@Namespace("at") public static native @ByRef Tensor full_out(@ByRef Tensor out, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @Const @ByRef Scalar fill_value, @ByVal DimnameListOptional names); +// aten::full.names_out(int[] size, Scalar fill_value, *, Dimname[]? names, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor full_outf(@ByVal LongArrayRef size, @Const @ByRef Scalar fill_value, @ByVal DimnameListOptional names, @ByRef Tensor out); +@Namespace("at") public static native @ByRef Tensor full_outf(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @Const @ByRef Scalar fill_value, @ByVal DimnameListOptional names, @ByRef Tensor out); -// Parsed from ATen/ops/fft_hfftn.h +// Parsed from ATen/ops/full_like.h // #pragma once @@ -43676,26 +29386,25 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include +// #include -// aten::fft_hfftn(Tensor self, int[1]? s=None, int[1]? dim=None, str? norm=None) -> Tensor -@Namespace("at") public static native @ByVal Tensor fft_hfftn(@Const @ByRef Tensor self, @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") LongArrayRefOptional s, @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") LongArrayRefOptional dim, @ByVal(nullValue = "c10::optional(c10::nullopt)") @Cast("c10::optional*") Pointer norm); -@Namespace("at") public static native @ByVal Tensor fft_hfftn(@Const @ByRef Tensor self); -@Namespace("at") public static native @ByVal Tensor fft_hfftn(@Const @ByRef Tensor self, @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] s, @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dim, @ByVal(nullValue = "c10::optional(c10::nullopt)") @Cast("c10::optional*") Pointer norm); +// aten::full_like(Tensor self, Scalar fill_value, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, MemoryFormat? memory_format=None) -> Tensor +@Namespace("at") public static native @ByVal Tensor full_like(@Const @ByRef Tensor self, @Const @ByRef Scalar fill_value, @ByVal(nullValue = "at::TensorOptions{}") TensorOptions options, @ByVal(nullValue = "c10::optional(c10::nullopt)") MemoryFormatOptional memory_format); +@Namespace("at") public static native @ByVal Tensor full_like(@Const @ByRef Tensor self, @Const @ByRef Scalar fill_value); +// aten::full_like(Tensor self, Scalar fill_value, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, MemoryFormat? memory_format=None) -> Tensor +@Namespace("at") public static native @ByVal Tensor full_like(@Const @ByRef Tensor self, @Const @ByRef Scalar fill_value, @ByVal ScalarTypeOptional dtype, @ByVal LayoutOptional layout, @ByVal DeviceOptional device, @ByVal BoolOptional pin_memory, @ByVal MemoryFormatOptional memory_format); -// aten::fft_hfftn.out(Tensor self, int[1]? s=None, int[1]? dim=None, str? norm=None, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @Const @ByRef Tensor fft_hfftn_out(@Const @ByRef Tensor out, @Const @ByRef Tensor self, @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") LongArrayRefOptional s, @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") LongArrayRefOptional dim, @ByVal(nullValue = "c10::optional(c10::nullopt)") @Cast("c10::optional*") Pointer norm); -@Namespace("at") public static native @Const @ByRef Tensor fft_hfftn_out(@Const @ByRef Tensor out, @Const @ByRef Tensor self); -@Namespace("at") public static native @Const @ByRef Tensor fft_hfftn_out(@Const @ByRef Tensor out, @Const @ByRef Tensor self, @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] s, @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dim, @ByVal(nullValue = "c10::optional(c10::nullopt)") @Cast("c10::optional*") Pointer norm); -// aten::fft_hfftn.out(Tensor self, int[1]? s=None, int[1]? dim=None, str? norm=None, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @Const @ByRef Tensor fft_hfftn_outf(@Const @ByRef Tensor self, @ByVal LongArrayRefOptional s, @ByVal LongArrayRefOptional dim, @ByVal @Cast("c10::optional*") Pointer norm, @Const @ByRef Tensor out); -@Namespace("at") public static native @Const @ByRef Tensor fft_hfftn_outf(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] s, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dim, @ByVal @Cast("c10::optional*") Pointer norm, @Const @ByRef Tensor out); +// aten::full_like.out(Tensor self, Scalar fill_value, *, MemoryFormat? memory_format=None, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor full_like_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Scalar fill_value, @ByVal(nullValue = "c10::optional(c10::nullopt)") MemoryFormatOptional memory_format); +@Namespace("at") public static native @ByRef Tensor full_like_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Scalar fill_value); +// aten::full_like.out(Tensor self, Scalar fill_value, *, MemoryFormat? memory_format=None, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor full_like_outf(@Const @ByRef Tensor self, @Const @ByRef Scalar fill_value, @ByVal MemoryFormatOptional memory_format, @ByRef Tensor out); -// Parsed from ATen/ops/fft_ifft.h +// Parsed from ATen/ops/fused_moving_avg_obs_fake_quant.h // #pragma once @@ -43716,23 +29425,17 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include - +// #include -// aten::fft_ifft(Tensor self, int? n=None, int dim=-1, str? norm=None) -> Tensor -@Namespace("at") public static native @ByVal Tensor fft_ifft(@Const @ByRef Tensor self, @ByVal(nullValue = "c10::optional(c10::nullopt)") LongOptional n, @Cast("int64_t") long dim/*=-1*/, @ByVal(nullValue = "c10::optional(c10::nullopt)") @Cast("c10::optional*") Pointer norm); -@Namespace("at") public static native @ByVal Tensor fft_ifft(@Const @ByRef Tensor self); -// aten::fft_ifft.out(Tensor self, int? n=None, int dim=-1, str? norm=None, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor fft_ifft_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal(nullValue = "c10::optional(c10::nullopt)") LongOptional n, @Cast("int64_t") long dim/*=-1*/, @ByVal(nullValue = "c10::optional(c10::nullopt)") @Cast("c10::optional*") Pointer norm); -@Namespace("at") public static native @ByRef Tensor fft_ifft_out(@ByRef Tensor out, @Const @ByRef Tensor self); -// aten::fft_ifft.out(Tensor self, int? n=None, int dim=-1, str? norm=None, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor fft_ifft_outf(@Const @ByRef Tensor self, @ByVal LongOptional n, @Cast("int64_t") long dim, @ByVal @Cast("c10::optional*") Pointer norm, @ByRef Tensor out); +// aten::fused_moving_avg_obs_fake_quant(Tensor self, Tensor observer_on, Tensor fake_quant_on, Tensor(a!) running_min, Tensor(b!) running_max, Tensor(c!) scale, Tensor(d!) zero_point, float averaging_const, int quant_min, int quant_max, int ch_axis, bool per_row_fake_quant=False, bool symmetric_quant=False) -> Tensor +@Namespace("at") public static native @ByVal Tensor fused_moving_avg_obs_fake_quant(@Const @ByRef Tensor self, @Const @ByRef Tensor observer_on, @Const @ByRef Tensor fake_quant_on, @ByRef Tensor running_min, @ByRef Tensor running_max, @ByRef Tensor scale, @ByRef Tensor zero_point, double averaging_const, @Cast("int64_t") long quant_min, @Cast("int64_t") long quant_max, @Cast("int64_t") long ch_axis, @Cast("bool") boolean per_row_fake_quant/*=false*/, @Cast("bool") boolean symmetric_quant/*=false*/); +@Namespace("at") public static native @ByVal Tensor fused_moving_avg_obs_fake_quant(@Const @ByRef Tensor self, @Const @ByRef Tensor observer_on, @Const @ByRef Tensor fake_quant_on, @ByRef Tensor running_min, @ByRef Tensor running_max, @ByRef Tensor scale, @ByRef Tensor zero_point, double averaging_const, @Cast("int64_t") long quant_min, @Cast("int64_t") long quant_max, @Cast("int64_t") long ch_axis); -// Parsed from ATen/ops/fft_ifft2.h +// Parsed from ATen/ops/gather.h // #pragma once @@ -43753,26 +29456,33 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include +// #include -// aten::fft_ifft2(Tensor self, int[1]? s=None, int[1] dim=[-2,-1], str? norm=None) -> Tensor -@Namespace("at") public static native @ByVal Tensor fft_ifft2(@Const @ByRef Tensor self, @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") LongArrayRefOptional s, @ByVal(nullValue = "at::IntArrayRef({-2,-1})") @Cast("c10::ArrayRef*") LongArrayRef dim, @ByVal(nullValue = "c10::optional(c10::nullopt)") @Cast("c10::optional*") Pointer norm); -@Namespace("at") public static native @ByVal Tensor fft_ifft2(@Const @ByRef Tensor self); -@Namespace("at") public static native @ByVal Tensor fft_ifft2(@Const @ByRef Tensor self, @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] s, @ByVal(nullValue = "at::IntArrayRef({-2,-1})") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dim, @ByVal(nullValue = "c10::optional(c10::nullopt)") @Cast("c10::optional*") Pointer norm); +// aten::gather.out(Tensor self, int dim, Tensor index, *, bool sparse_grad=False, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor gather_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Cast("int64_t") long dim, @Const @ByRef Tensor index, @Cast("bool") boolean sparse_grad/*=false*/); +@Namespace("at") public static native @ByRef Tensor gather_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Cast("int64_t") long dim, @Const @ByRef Tensor index); +// aten::gather.out(Tensor self, int dim, Tensor index, *, bool sparse_grad=False, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor gather_outf(@Const @ByRef Tensor self, @Cast("int64_t") long dim, @Const @ByRef Tensor index, @Cast("bool") boolean sparse_grad, @ByRef Tensor out); -// aten::fft_ifft2.out(Tensor self, int[1]? s=None, int[1] dim=[-2,-1], str? norm=None, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor fft_ifft2_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") LongArrayRefOptional s, @ByVal(nullValue = "at::IntArrayRef({-2,-1})") @Cast("c10::ArrayRef*") LongArrayRef dim, @ByVal(nullValue = "c10::optional(c10::nullopt)") @Cast("c10::optional*") Pointer norm); -@Namespace("at") public static native @ByRef Tensor fft_ifft2_out(@ByRef Tensor out, @Const @ByRef Tensor self); -@Namespace("at") public static native @ByRef Tensor fft_ifft2_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] s, @ByVal(nullValue = "at::IntArrayRef({-2,-1})") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dim, @ByVal(nullValue = "c10::optional(c10::nullopt)") @Cast("c10::optional*") Pointer norm); -// aten::fft_ifft2.out(Tensor self, int[1]? s=None, int[1] dim=[-2,-1], str? norm=None, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor fft_ifft2_outf(@Const @ByRef Tensor self, @ByVal LongArrayRefOptional s, @ByVal @Cast("c10::ArrayRef*") LongArrayRef dim, @ByVal @Cast("c10::optional*") Pointer norm, @ByRef Tensor out); -@Namespace("at") public static native @ByRef Tensor fft_ifft2_outf(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] s, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dim, @ByVal @Cast("c10::optional*") Pointer norm, @ByRef Tensor out); +// aten::gather(Tensor self, int dim, Tensor index, *, bool sparse_grad=False) -> Tensor +@Namespace("at") public static native @ByVal Tensor gather(@Const @ByRef Tensor self, @Cast("int64_t") long dim, @Const @ByRef Tensor index, @Cast("bool") boolean sparse_grad/*=false*/); +@Namespace("at") public static native @ByVal Tensor gather(@Const @ByRef Tensor self, @Cast("int64_t") long dim, @Const @ByRef Tensor index); + +// aten::gather.dimname_out(Tensor self, Dimname dim, Tensor index, *, bool sparse_grad=False, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor gather_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal Dimname dim, @Const @ByRef Tensor index, @Cast("bool") boolean sparse_grad/*=false*/); +@Namespace("at") public static native @ByRef Tensor gather_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal Dimname dim, @Const @ByRef Tensor index); +// aten::gather.dimname_out(Tensor self, Dimname dim, Tensor index, *, bool sparse_grad=False, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor gather_outf(@Const @ByRef Tensor self, @ByVal Dimname dim, @Const @ByRef Tensor index, @Cast("bool") boolean sparse_grad, @ByRef Tensor out); + +// aten::gather.dimname(Tensor self, Dimname dim, Tensor index, *, bool sparse_grad=False) -> Tensor +@Namespace("at") public static native @ByVal Tensor gather(@Const @ByRef Tensor self, @ByVal Dimname dim, @Const @ByRef Tensor index, @Cast("bool") boolean sparse_grad/*=false*/); +@Namespace("at") public static native @ByVal Tensor gather(@Const @ByRef Tensor self, @ByVal Dimname dim, @Const @ByRef Tensor index); -// Parsed from ATen/ops/fft_ifftn.h +// Parsed from ATen/ops/gather_backward.h // #pragma once @@ -43793,26 +29503,16 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include - +// #include -// aten::fft_ifftn(Tensor self, int[1]? s=None, int[1]? dim=None, str? norm=None) -> Tensor -@Namespace("at") public static native @ByVal Tensor fft_ifftn(@Const @ByRef Tensor self, @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") LongArrayRefOptional s, @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") LongArrayRefOptional dim, @ByVal(nullValue = "c10::optional(c10::nullopt)") @Cast("c10::optional*") Pointer norm); -@Namespace("at") public static native @ByVal Tensor fft_ifftn(@Const @ByRef Tensor self); -@Namespace("at") public static native @ByVal Tensor fft_ifftn(@Const @ByRef Tensor self, @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] s, @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dim, @ByVal(nullValue = "c10::optional(c10::nullopt)") @Cast("c10::optional*") Pointer norm); -// aten::fft_ifftn.out(Tensor self, int[1]? s=None, int[1]? dim=None, str? norm=None, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor fft_ifftn_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") LongArrayRefOptional s, @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") LongArrayRefOptional dim, @ByVal(nullValue = "c10::optional(c10::nullopt)") @Cast("c10::optional*") Pointer norm); -@Namespace("at") public static native @ByRef Tensor fft_ifftn_out(@ByRef Tensor out, @Const @ByRef Tensor self); -@Namespace("at") public static native @ByRef Tensor fft_ifftn_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] s, @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dim, @ByVal(nullValue = "c10::optional(c10::nullopt)") @Cast("c10::optional*") Pointer norm); -// aten::fft_ifftn.out(Tensor self, int[1]? s=None, int[1]? dim=None, str? norm=None, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor fft_ifftn_outf(@Const @ByRef Tensor self, @ByVal LongArrayRefOptional s, @ByVal LongArrayRefOptional dim, @ByVal @Cast("c10::optional*") Pointer norm, @ByRef Tensor out); -@Namespace("at") public static native @ByRef Tensor fft_ifftn_outf(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] s, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dim, @ByVal @Cast("c10::optional*") Pointer norm, @ByRef Tensor out); +// aten::gather_backward(Tensor grad, Tensor self, int dim, Tensor index, bool sparse_grad) -> Tensor +@Namespace("at") public static native @ByVal Tensor gather_backward(@Const @ByRef Tensor grad, @Const @ByRef Tensor self, @Cast("int64_t") long dim, @Const @ByRef Tensor index, @Cast("bool") boolean sparse_grad); -// Parsed from ATen/ops/fft_ifftshift.h +// Parsed from ATen/ops/gcd.h // #pragma once @@ -43833,18 +29533,24 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include +// #include -// aten::fft_ifftshift(Tensor self, int[1]? dim=None) -> Tensor -@Namespace("at") public static native @ByVal Tensor fft_ifftshift(@Const @ByRef Tensor self, @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") LongArrayRefOptional dim); -@Namespace("at") public static native @ByVal Tensor fft_ifftshift(@Const @ByRef Tensor self); -@Namespace("at") public static native @ByVal Tensor fft_ifftshift(@Const @ByRef Tensor self, @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... dim); +// aten::gcd.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor gcd_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor other); +// aten::gcd.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor gcd_outf(@Const @ByRef Tensor self, @Const @ByRef Tensor other, @ByRef Tensor out); + +// aten::gcd(Tensor self, Tensor other) -> Tensor +@Namespace("at") public static native @ByVal Tensor gcd(@Const @ByRef Tensor self, @Const @ByRef Tensor other); +// aten::gcd_(Tensor(a!) self, Tensor other) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor gcd_(@ByRef Tensor self, @Const @ByRef Tensor other); -// Parsed from ATen/ops/fft_ihfft.h + +// Parsed from ATen/ops/ge.h // #pragma once @@ -43865,23 +29571,29 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include +// #include -// aten::fft_ihfft(Tensor self, int? n=None, int dim=-1, str? norm=None) -> Tensor -@Namespace("at") public static native @ByVal Tensor fft_ihfft(@Const @ByRef Tensor self, @ByVal(nullValue = "c10::optional(c10::nullopt)") LongOptional n, @Cast("int64_t") long dim/*=-1*/, @ByVal(nullValue = "c10::optional(c10::nullopt)") @Cast("c10::optional*") Pointer norm); -@Namespace("at") public static native @ByVal Tensor fft_ihfft(@Const @ByRef Tensor self); +// aten::ge.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor ge_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Scalar other); +// aten::ge.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor ge_outf(@Const @ByRef Tensor self, @Const @ByRef Scalar other, @ByRef Tensor out); -// aten::fft_ihfft.out(Tensor self, int? n=None, int dim=-1, str? norm=None, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor fft_ihfft_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal(nullValue = "c10::optional(c10::nullopt)") LongOptional n, @Cast("int64_t") long dim/*=-1*/, @ByVal(nullValue = "c10::optional(c10::nullopt)") @Cast("c10::optional*") Pointer norm); -@Namespace("at") public static native @ByRef Tensor fft_ihfft_out(@ByRef Tensor out, @Const @ByRef Tensor self); -// aten::fft_ihfft.out(Tensor self, int? n=None, int dim=-1, str? norm=None, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor fft_ihfft_outf(@Const @ByRef Tensor self, @ByVal LongOptional n, @Cast("int64_t") long dim, @ByVal @Cast("c10::optional*") Pointer norm, @ByRef Tensor out); +// aten::ge.Scalar(Tensor self, Scalar other) -> Tensor +@Namespace("at") public static native @ByVal Tensor ge(@Const @ByRef Tensor self, @Const @ByRef Scalar other); + +// aten::ge.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor ge_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor other); +// aten::ge.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor ge_outf(@Const @ByRef Tensor self, @Const @ByRef Tensor other, @ByRef Tensor out); + +// aten::ge.Tensor(Tensor self, Tensor other) -> Tensor +@Namespace("at") public static native @ByVal Tensor ge(@Const @ByRef Tensor self, @Const @ByRef Tensor other); -// Parsed from ATen/ops/fft_ihfft2.h +// Parsed from ATen/ops/gelu.h // #pragma once @@ -43902,26 +29614,27 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include +// #include -// aten::fft_ihfft2(Tensor self, int[1]? s=None, int[1] dim=[-2,-1], str? norm=None) -> Tensor -@Namespace("at") public static native @ByVal Tensor fft_ihfft2(@Const @ByRef Tensor self, @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") LongArrayRefOptional s, @ByVal(nullValue = "at::IntArrayRef({-2,-1})") @Cast("c10::ArrayRef*") LongArrayRef dim, @ByVal(nullValue = "c10::optional(c10::nullopt)") @Cast("c10::optional*") Pointer norm); -@Namespace("at") public static native @ByVal Tensor fft_ihfft2(@Const @ByRef Tensor self); -@Namespace("at") public static native @ByVal Tensor fft_ihfft2(@Const @ByRef Tensor self, @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] s, @ByVal(nullValue = "at::IntArrayRef({-2,-1})") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dim, @ByVal(nullValue = "c10::optional(c10::nullopt)") @Cast("c10::optional*") Pointer norm); +// aten::gelu.out(Tensor self, *, str approximate='none', Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor gelu_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal(nullValue = "c10::string_view(\"none\")") @Cast("c10::string_view*") Pointer approximate); +@Namespace("at") public static native @ByRef Tensor gelu_out(@ByRef Tensor out, @Const @ByRef Tensor self); +// aten::gelu.out(Tensor self, *, str approximate='none', Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor gelu_outf(@Const @ByRef Tensor self, @ByVal @Cast("c10::string_view*") Pointer approximate, @ByRef Tensor out); -// aten::fft_ihfft2.out(Tensor self, int[1]? s=None, int[1] dim=[-2,-1], str? norm=None, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @Const @ByRef Tensor fft_ihfft2_out(@Const @ByRef Tensor out, @Const @ByRef Tensor self, @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") LongArrayRefOptional s, @ByVal(nullValue = "at::IntArrayRef({-2,-1})") @Cast("c10::ArrayRef*") LongArrayRef dim, @ByVal(nullValue = "c10::optional(c10::nullopt)") @Cast("c10::optional*") Pointer norm); -@Namespace("at") public static native @Const @ByRef Tensor fft_ihfft2_out(@Const @ByRef Tensor out, @Const @ByRef Tensor self); -@Namespace("at") public static native @Const @ByRef Tensor fft_ihfft2_out(@Const @ByRef Tensor out, @Const @ByRef Tensor self, @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] s, @ByVal(nullValue = "at::IntArrayRef({-2,-1})") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dim, @ByVal(nullValue = "c10::optional(c10::nullopt)") @Cast("c10::optional*") Pointer norm); -// aten::fft_ihfft2.out(Tensor self, int[1]? s=None, int[1] dim=[-2,-1], str? norm=None, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @Const @ByRef Tensor fft_ihfft2_outf(@Const @ByRef Tensor self, @ByVal LongArrayRefOptional s, @ByVal @Cast("c10::ArrayRef*") LongArrayRef dim, @ByVal @Cast("c10::optional*") Pointer norm, @Const @ByRef Tensor out); -@Namespace("at") public static native @Const @ByRef Tensor fft_ihfft2_outf(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] s, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dim, @ByVal @Cast("c10::optional*") Pointer norm, @Const @ByRef Tensor out); +// aten::gelu_(Tensor(a!) self, *, str approximate='none') -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor gelu_(@ByRef Tensor self, @ByVal(nullValue = "c10::string_view(\"none\")") @Cast("c10::string_view*") Pointer approximate); +@Namespace("at") public static native @ByRef Tensor gelu_(@ByRef Tensor self); + +// aten::gelu(Tensor self, *, str approximate='none') -> Tensor +@Namespace("at") public static native @ByVal Tensor gelu(@Const @ByRef Tensor self, @ByVal(nullValue = "c10::string_view(\"none\")") @Cast("c10::string_view*") Pointer approximate); +@Namespace("at") public static native @ByVal Tensor gelu(@Const @ByRef Tensor self); -// Parsed from ATen/ops/fft_ihfftn.h +// Parsed from ATen/ops/gelu_backward.h // #pragma once @@ -43942,26 +29655,23 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include +// #include -// aten::fft_ihfftn(Tensor self, int[1]? s=None, int[1]? dim=None, str? norm=None) -> Tensor -@Namespace("at") public static native @ByVal Tensor fft_ihfftn(@Const @ByRef Tensor self, @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") LongArrayRefOptional s, @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") LongArrayRefOptional dim, @ByVal(nullValue = "c10::optional(c10::nullopt)") @Cast("c10::optional*") Pointer norm); -@Namespace("at") public static native @ByVal Tensor fft_ihfftn(@Const @ByRef Tensor self); -@Namespace("at") public static native @ByVal Tensor fft_ihfftn(@Const @ByRef Tensor self, @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] s, @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dim, @ByVal(nullValue = "c10::optional(c10::nullopt)") @Cast("c10::optional*") Pointer norm); +// aten::gelu_backward.grad_input(Tensor grad_output, Tensor self, *, str approximate='none', Tensor(a!) grad_input) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor gelu_backward_out(@ByRef Tensor grad_input, @Const @ByRef Tensor grad_output, @Const @ByRef Tensor self, @ByVal(nullValue = "c10::string_view(\"none\")") @Cast("c10::string_view*") Pointer approximate); +@Namespace("at") public static native @ByRef Tensor gelu_backward_out(@ByRef Tensor grad_input, @Const @ByRef Tensor grad_output, @Const @ByRef Tensor self); +// aten::gelu_backward.grad_input(Tensor grad_output, Tensor self, *, str approximate='none', Tensor(a!) grad_input) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor gelu_backward_outf(@Const @ByRef Tensor grad_output, @Const @ByRef Tensor self, @ByVal @Cast("c10::string_view*") Pointer approximate, @ByRef Tensor grad_input); -// aten::fft_ihfftn.out(Tensor self, int[1]? s=None, int[1]? dim=None, str? norm=None, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @Const @ByRef Tensor fft_ihfftn_out(@Const @ByRef Tensor out, @Const @ByRef Tensor self, @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") LongArrayRefOptional s, @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") LongArrayRefOptional dim, @ByVal(nullValue = "c10::optional(c10::nullopt)") @Cast("c10::optional*") Pointer norm); -@Namespace("at") public static native @Const @ByRef Tensor fft_ihfftn_out(@Const @ByRef Tensor out, @Const @ByRef Tensor self); -@Namespace("at") public static native @Const @ByRef Tensor fft_ihfftn_out(@Const @ByRef Tensor out, @Const @ByRef Tensor self, @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] s, @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dim, @ByVal(nullValue = "c10::optional(c10::nullopt)") @Cast("c10::optional*") Pointer norm); -// aten::fft_ihfftn.out(Tensor self, int[1]? s=None, int[1]? dim=None, str? norm=None, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @Const @ByRef Tensor fft_ihfftn_outf(@Const @ByRef Tensor self, @ByVal LongArrayRefOptional s, @ByVal LongArrayRefOptional dim, @ByVal @Cast("c10::optional*") Pointer norm, @Const @ByRef Tensor out); -@Namespace("at") public static native @Const @ByRef Tensor fft_ihfftn_outf(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] s, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dim, @ByVal @Cast("c10::optional*") Pointer norm, @Const @ByRef Tensor out); +// aten::gelu_backward(Tensor grad_output, Tensor self, *, str approximate='none') -> Tensor +@Namespace("at") public static native @ByVal Tensor gelu_backward(@Const @ByRef Tensor grad_output, @Const @ByRef Tensor self, @ByVal(nullValue = "c10::string_view(\"none\")") @Cast("c10::string_view*") Pointer approximate); +@Namespace("at") public static native @ByVal Tensor gelu_backward(@Const @ByRef Tensor grad_output, @Const @ByRef Tensor self); -// Parsed from ATen/ops/fft_irfft.h +// Parsed from ATen/ops/geometric.h // #pragma once @@ -43982,23 +29692,23 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include +// #include -// aten::fft_irfft(Tensor self, int? n=None, int dim=-1, str? norm=None) -> Tensor -@Namespace("at") public static native @ByVal Tensor fft_irfft(@Const @ByRef Tensor self, @ByVal(nullValue = "c10::optional(c10::nullopt)") LongOptional n, @Cast("int64_t") long dim/*=-1*/, @ByVal(nullValue = "c10::optional(c10::nullopt)") @Cast("c10::optional*") Pointer norm); -@Namespace("at") public static native @ByVal Tensor fft_irfft(@Const @ByRef Tensor self); +// aten::geometric.out(Tensor self, float p, *, Generator? generator=None, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor geometric_out(@ByRef Tensor out, @Const @ByRef Tensor self, double p, @ByVal(nullValue = "c10::optional(c10::nullopt)") GeneratorOptional generator); +@Namespace("at") public static native @ByRef Tensor geometric_out(@ByRef Tensor out, @Const @ByRef Tensor self, double p); +// aten::geometric.out(Tensor self, float p, *, Generator? generator=None, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor geometric_outf(@Const @ByRef Tensor self, double p, @ByVal GeneratorOptional generator, @ByRef Tensor out); -// aten::fft_irfft.out(Tensor self, int? n=None, int dim=-1, str? norm=None, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor fft_irfft_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal(nullValue = "c10::optional(c10::nullopt)") LongOptional n, @Cast("int64_t") long dim/*=-1*/, @ByVal(nullValue = "c10::optional(c10::nullopt)") @Cast("c10::optional*") Pointer norm); -@Namespace("at") public static native @ByRef Tensor fft_irfft_out(@ByRef Tensor out, @Const @ByRef Tensor self); -// aten::fft_irfft.out(Tensor self, int? n=None, int dim=-1, str? norm=None, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor fft_irfft_outf(@Const @ByRef Tensor self, @ByVal LongOptional n, @Cast("int64_t") long dim, @ByVal @Cast("c10::optional*") Pointer norm, @ByRef Tensor out); +// aten::geometric(Tensor self, float p, *, Generator? generator=None) -> Tensor +@Namespace("at") public static native @ByVal Tensor geometric(@Const @ByRef Tensor self, double p, @ByVal(nullValue = "c10::optional(c10::nullopt)") GeneratorOptional generator); +@Namespace("at") public static native @ByVal Tensor geometric(@Const @ByRef Tensor self, double p); -// Parsed from ATen/ops/fft_irfft2.h +// Parsed from ATen/ops/geqrf.h // #pragma once @@ -44019,26 +29729,21 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include +// #include -// aten::fft_irfft2(Tensor self, int[1]? s=None, int[1] dim=[-2,-1], str? norm=None) -> Tensor -@Namespace("at") public static native @ByVal Tensor fft_irfft2(@Const @ByRef Tensor self, @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") LongArrayRefOptional s, @ByVal(nullValue = "at::IntArrayRef({-2,-1})") @Cast("c10::ArrayRef*") LongArrayRef dim, @ByVal(nullValue = "c10::optional(c10::nullopt)") @Cast("c10::optional*") Pointer norm); -@Namespace("at") public static native @ByVal Tensor fft_irfft2(@Const @ByRef Tensor self); -@Namespace("at") public static native @ByVal Tensor fft_irfft2(@Const @ByRef Tensor self, @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] s, @ByVal(nullValue = "at::IntArrayRef({-2,-1})") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dim, @ByVal(nullValue = "c10::optional(c10::nullopt)") @Cast("c10::optional*") Pointer norm); +// aten::geqrf.a(Tensor self, *, Tensor(a!) a, Tensor(b!) tau) -> (Tensor(a!) a, Tensor(b!) tau) +@Namespace("at") public static native @ByVal T_TensorTensor_T geqrf_out(@ByRef Tensor a, @ByRef Tensor tau, @Const @ByRef Tensor self); +// aten::geqrf.a(Tensor self, *, Tensor(a!) a, Tensor(b!) tau) -> (Tensor(a!) a, Tensor(b!) tau) +@Namespace("at") public static native @ByVal T_TensorTensor_T geqrf_outf(@Const @ByRef Tensor self, @ByRef Tensor a, @ByRef Tensor tau); -// aten::fft_irfft2.out(Tensor self, int[1]? s=None, int[1] dim=[-2,-1], str? norm=None, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor fft_irfft2_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") LongArrayRefOptional s, @ByVal(nullValue = "at::IntArrayRef({-2,-1})") @Cast("c10::ArrayRef*") LongArrayRef dim, @ByVal(nullValue = "c10::optional(c10::nullopt)") @Cast("c10::optional*") Pointer norm); -@Namespace("at") public static native @ByRef Tensor fft_irfft2_out(@ByRef Tensor out, @Const @ByRef Tensor self); -@Namespace("at") public static native @ByRef Tensor fft_irfft2_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] s, @ByVal(nullValue = "at::IntArrayRef({-2,-1})") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dim, @ByVal(nullValue = "c10::optional(c10::nullopt)") @Cast("c10::optional*") Pointer norm); -// aten::fft_irfft2.out(Tensor self, int[1]? s=None, int[1] dim=[-2,-1], str? norm=None, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor fft_irfft2_outf(@Const @ByRef Tensor self, @ByVal LongArrayRefOptional s, @ByVal @Cast("c10::ArrayRef*") LongArrayRef dim, @ByVal @Cast("c10::optional*") Pointer norm, @ByRef Tensor out); -@Namespace("at") public static native @ByRef Tensor fft_irfft2_outf(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] s, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dim, @ByVal @Cast("c10::optional*") Pointer norm, @ByRef Tensor out); +// aten::geqrf(Tensor self) -> (Tensor a, Tensor tau) +@Namespace("at") public static native @ByVal T_TensorTensor_T geqrf(@Const @ByRef Tensor self); -// Parsed from ATen/ops/fft_irfftn.h +// Parsed from ATen/ops/ger.h // #pragma once @@ -44059,26 +29764,21 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include +// #include -// aten::fft_irfftn(Tensor self, int[1]? s=None, int[1]? dim=None, str? norm=None) -> Tensor -@Namespace("at") public static native @ByVal Tensor fft_irfftn(@Const @ByRef Tensor self, @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") LongArrayRefOptional s, @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") LongArrayRefOptional dim, @ByVal(nullValue = "c10::optional(c10::nullopt)") @Cast("c10::optional*") Pointer norm); -@Namespace("at") public static native @ByVal Tensor fft_irfftn(@Const @ByRef Tensor self); -@Namespace("at") public static native @ByVal Tensor fft_irfftn(@Const @ByRef Tensor self, @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] s, @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dim, @ByVal(nullValue = "c10::optional(c10::nullopt)") @Cast("c10::optional*") Pointer norm); +// aten::ger(Tensor self, Tensor vec2) -> Tensor +@Namespace("at") public static native @ByVal Tensor ger(@Const @ByRef Tensor self, @Const @ByRef Tensor vec2); -// aten::fft_irfftn.out(Tensor self, int[1]? s=None, int[1]? dim=None, str? norm=None, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor fft_irfftn_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") LongArrayRefOptional s, @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") LongArrayRefOptional dim, @ByVal(nullValue = "c10::optional(c10::nullopt)") @Cast("c10::optional*") Pointer norm); -@Namespace("at") public static native @ByRef Tensor fft_irfftn_out(@ByRef Tensor out, @Const @ByRef Tensor self); -@Namespace("at") public static native @ByRef Tensor fft_irfftn_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] s, @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dim, @ByVal(nullValue = "c10::optional(c10::nullopt)") @Cast("c10::optional*") Pointer norm); -// aten::fft_irfftn.out(Tensor self, int[1]? s=None, int[1]? dim=None, str? norm=None, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor fft_irfftn_outf(@Const @ByRef Tensor self, @ByVal LongArrayRefOptional s, @ByVal LongArrayRefOptional dim, @ByVal @Cast("c10::optional*") Pointer norm, @ByRef Tensor out); -@Namespace("at") public static native @ByRef Tensor fft_irfftn_outf(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] s, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dim, @ByVal @Cast("c10::optional*") Pointer norm, @ByRef Tensor out); +// aten::ger.out(Tensor self, Tensor vec2, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor ger_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor vec2); +// aten::ger.out(Tensor self, Tensor vec2, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor ger_outf(@Const @ByRef Tensor self, @Const @ByRef Tensor vec2, @ByRef Tensor out); -// Parsed from ATen/ops/fft_rfft.h +// Parsed from ATen/ops/glu.h // #pragma once @@ -44099,23 +29799,23 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include +// #include -// aten::fft_rfft(Tensor self, int? n=None, int dim=-1, str? norm=None) -> Tensor -@Namespace("at") public static native @ByVal Tensor fft_rfft(@Const @ByRef Tensor self, @ByVal(nullValue = "c10::optional(c10::nullopt)") LongOptional n, @Cast("int64_t") long dim/*=-1*/, @ByVal(nullValue = "c10::optional(c10::nullopt)") @Cast("c10::optional*") Pointer norm); -@Namespace("at") public static native @ByVal Tensor fft_rfft(@Const @ByRef Tensor self); +// aten::glu.out(Tensor self, int dim=-1, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor glu_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Cast("int64_t") long dim/*=-1*/); +@Namespace("at") public static native @ByRef Tensor glu_out(@ByRef Tensor out, @Const @ByRef Tensor self); +// aten::glu.out(Tensor self, int dim=-1, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor glu_outf(@Const @ByRef Tensor self, @Cast("int64_t") long dim, @ByRef Tensor out); -// aten::fft_rfft.out(Tensor self, int? n=None, int dim=-1, str? norm=None, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor fft_rfft_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal(nullValue = "c10::optional(c10::nullopt)") LongOptional n, @Cast("int64_t") long dim/*=-1*/, @ByVal(nullValue = "c10::optional(c10::nullopt)") @Cast("c10::optional*") Pointer norm); -@Namespace("at") public static native @ByRef Tensor fft_rfft_out(@ByRef Tensor out, @Const @ByRef Tensor self); -// aten::fft_rfft.out(Tensor self, int? n=None, int dim=-1, str? norm=None, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor fft_rfft_outf(@Const @ByRef Tensor self, @ByVal LongOptional n, @Cast("int64_t") long dim, @ByVal @Cast("c10::optional*") Pointer norm, @ByRef Tensor out); +// aten::glu(Tensor self, int dim=-1) -> Tensor +@Namespace("at") public static native @ByVal Tensor glu(@Const @ByRef Tensor self, @Cast("int64_t") long dim/*=-1*/); +@Namespace("at") public static native @ByVal Tensor glu(@Const @ByRef Tensor self); -// Parsed from ATen/ops/fft_rfft2.h +// Parsed from ATen/ops/glu_backward.h // #pragma once @@ -44136,26 +29836,21 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include +// #include -// aten::fft_rfft2(Tensor self, int[1]? s=None, int[1] dim=[-2,-1], str? norm=None) -> Tensor -@Namespace("at") public static native @ByVal Tensor fft_rfft2(@Const @ByRef Tensor self, @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") LongArrayRefOptional s, @ByVal(nullValue = "at::IntArrayRef({-2,-1})") @Cast("c10::ArrayRef*") LongArrayRef dim, @ByVal(nullValue = "c10::optional(c10::nullopt)") @Cast("c10::optional*") Pointer norm); -@Namespace("at") public static native @ByVal Tensor fft_rfft2(@Const @ByRef Tensor self); -@Namespace("at") public static native @ByVal Tensor fft_rfft2(@Const @ByRef Tensor self, @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] s, @ByVal(nullValue = "at::IntArrayRef({-2,-1})") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dim, @ByVal(nullValue = "c10::optional(c10::nullopt)") @Cast("c10::optional*") Pointer norm); +// aten::glu_backward.grad_input(Tensor grad_output, Tensor self, int dim, *, Tensor(a!) grad_input) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor glu_backward_out(@ByRef Tensor grad_input, @Const @ByRef Tensor grad_output, @Const @ByRef Tensor self, @Cast("int64_t") long dim); +// aten::glu_backward.grad_input(Tensor grad_output, Tensor self, int dim, *, Tensor(a!) grad_input) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor glu_backward_outf(@Const @ByRef Tensor grad_output, @Const @ByRef Tensor self, @Cast("int64_t") long dim, @ByRef Tensor grad_input); -// aten::fft_rfft2.out(Tensor self, int[1]? s=None, int[1] dim=[-2,-1], str? norm=None, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor fft_rfft2_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") LongArrayRefOptional s, @ByVal(nullValue = "at::IntArrayRef({-2,-1})") @Cast("c10::ArrayRef*") LongArrayRef dim, @ByVal(nullValue = "c10::optional(c10::nullopt)") @Cast("c10::optional*") Pointer norm); -@Namespace("at") public static native @ByRef Tensor fft_rfft2_out(@ByRef Tensor out, @Const @ByRef Tensor self); -@Namespace("at") public static native @ByRef Tensor fft_rfft2_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] s, @ByVal(nullValue = "at::IntArrayRef({-2,-1})") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dim, @ByVal(nullValue = "c10::optional(c10::nullopt)") @Cast("c10::optional*") Pointer norm); -// aten::fft_rfft2.out(Tensor self, int[1]? s=None, int[1] dim=[-2,-1], str? norm=None, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor fft_rfft2_outf(@Const @ByRef Tensor self, @ByVal LongArrayRefOptional s, @ByVal @Cast("c10::ArrayRef*") LongArrayRef dim, @ByVal @Cast("c10::optional*") Pointer norm, @ByRef Tensor out); -@Namespace("at") public static native @ByRef Tensor fft_rfft2_outf(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] s, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dim, @ByVal @Cast("c10::optional*") Pointer norm, @ByRef Tensor out); +// aten::glu_backward(Tensor grad_output, Tensor self, int dim) -> Tensor +@Namespace("at") public static native @ByVal Tensor glu_backward(@Const @ByRef Tensor grad_output, @Const @ByRef Tensor self, @Cast("int64_t") long dim); -// Parsed from ATen/ops/fft_rfftfreq.h +// Parsed from ATen/ops/glu_backward_jvp.h // #pragma once @@ -44176,25 +29871,21 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include +// #include -// aten::fft_rfftfreq(int n, float d=1.0, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor -@Namespace("at") public static native @ByVal Tensor fft_rfftfreq(@Cast("int64_t") long n, double d/*=1.0*/, @ByVal(nullValue = "at::TensorOptions{}") TensorOptions options); -@Namespace("at") public static native @ByVal Tensor fft_rfftfreq(@Cast("int64_t") long n); -// aten::fft_rfftfreq(int n, float d=1.0, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor -@Namespace("at") public static native @ByVal Tensor fft_rfftfreq(@Cast("int64_t") long n, double d, @ByVal ScalarTypeOptional dtype, @ByVal LayoutOptional layout, @ByVal DeviceOptional device, @ByVal BoolOptional pin_memory); +// aten::glu_backward_jvp(Tensor grad_x, Tensor grad_glu, Tensor x, Tensor dgrad_glu, Tensor dx, int dim) -> Tensor +@Namespace("at") public static native @ByVal Tensor glu_backward_jvp(@Const @ByRef Tensor grad_x, @Const @ByRef Tensor grad_glu, @Const @ByRef Tensor x, @Const @ByRef Tensor dgrad_glu, @Const @ByRef Tensor dx, @Cast("int64_t") long dim); -// aten::fft_rfftfreq.out(int n, float d=1.0, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor fft_rfftfreq_out(@ByRef Tensor out, @Cast("int64_t") long n, double d/*=1.0*/); -@Namespace("at") public static native @ByRef Tensor fft_rfftfreq_out(@ByRef Tensor out, @Cast("int64_t") long n); -// aten::fft_rfftfreq.out(int n, float d=1.0, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor fft_rfftfreq_outf(@Cast("int64_t") long n, double d, @ByRef Tensor out); +// aten::glu_backward_jvp.out(Tensor grad_x, Tensor grad_glu, Tensor x, Tensor dgrad_glu, Tensor dx, int dim, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor glu_backward_jvp_out(@ByRef Tensor out, @Const @ByRef Tensor grad_x, @Const @ByRef Tensor grad_glu, @Const @ByRef Tensor x, @Const @ByRef Tensor dgrad_glu, @Const @ByRef Tensor dx, @Cast("int64_t") long dim); +// aten::glu_backward_jvp.out(Tensor grad_x, Tensor grad_glu, Tensor x, Tensor dgrad_glu, Tensor dx, int dim, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor glu_backward_jvp_outf(@Const @ByRef Tensor grad_x, @Const @ByRef Tensor grad_glu, @Const @ByRef Tensor x, @Const @ByRef Tensor dgrad_glu, @Const @ByRef Tensor dx, @Cast("int64_t") long dim, @ByRef Tensor out); -// Parsed from ATen/ops/fft_rfftn.h +// Parsed from ATen/ops/glu_jvp.h // #pragma once @@ -44215,26 +29906,21 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include +// #include -// aten::fft_rfftn(Tensor self, int[1]? s=None, int[1]? dim=None, str? norm=None) -> Tensor -@Namespace("at") public static native @ByVal Tensor fft_rfftn(@Const @ByRef Tensor self, @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") LongArrayRefOptional s, @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") LongArrayRefOptional dim, @ByVal(nullValue = "c10::optional(c10::nullopt)") @Cast("c10::optional*") Pointer norm); -@Namespace("at") public static native @ByVal Tensor fft_rfftn(@Const @ByRef Tensor self); -@Namespace("at") public static native @ByVal Tensor fft_rfftn(@Const @ByRef Tensor self, @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] s, @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dim, @ByVal(nullValue = "c10::optional(c10::nullopt)") @Cast("c10::optional*") Pointer norm); +// aten::glu_jvp(Tensor glu, Tensor x, Tensor dx, int dim) -> Tensor +@Namespace("at") public static native @ByVal Tensor glu_jvp(@Const @ByRef Tensor glu, @Const @ByRef Tensor x, @Const @ByRef Tensor dx, @Cast("int64_t") long dim); -// aten::fft_rfftn.out(Tensor self, int[1]? s=None, int[1]? dim=None, str? norm=None, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor fft_rfftn_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") LongArrayRefOptional s, @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") LongArrayRefOptional dim, @ByVal(nullValue = "c10::optional(c10::nullopt)") @Cast("c10::optional*") Pointer norm); -@Namespace("at") public static native @ByRef Tensor fft_rfftn_out(@ByRef Tensor out, @Const @ByRef Tensor self); -@Namespace("at") public static native @ByRef Tensor fft_rfftn_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] s, @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dim, @ByVal(nullValue = "c10::optional(c10::nullopt)") @Cast("c10::optional*") Pointer norm); -// aten::fft_rfftn.out(Tensor self, int[1]? s=None, int[1]? dim=None, str? norm=None, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor fft_rfftn_outf(@Const @ByRef Tensor self, @ByVal LongArrayRefOptional s, @ByVal LongArrayRefOptional dim, @ByVal @Cast("c10::optional*") Pointer norm, @ByRef Tensor out); -@Namespace("at") public static native @ByRef Tensor fft_rfftn_outf(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] s, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dim, @ByVal @Cast("c10::optional*") Pointer norm, @ByRef Tensor out); +// aten::glu_jvp.out(Tensor glu, Tensor x, Tensor dx, int dim, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor glu_jvp_out(@ByRef Tensor out, @Const @ByRef Tensor glu, @Const @ByRef Tensor x, @Const @ByRef Tensor dx, @Cast("int64_t") long dim); +// aten::glu_jvp.out(Tensor glu, Tensor x, Tensor dx, int dim, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor glu_jvp_outf(@Const @ByRef Tensor glu, @Const @ByRef Tensor x, @Const @ByRef Tensor dx, @Cast("int64_t") long dim, @ByRef Tensor out); -// Parsed from ATen/ops/fill.h +// Parsed from ATen/ops/gradient.h // #pragma once @@ -44255,63 +29941,49 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include - - -// aten::fill.Scalar(Tensor self, Scalar value) -> Tensor -@Namespace("at") public static native @ByVal @Name("fill") Tensor _fill(@Const @ByRef Tensor self, @Const @ByRef Scalar value); - -// aten::fill.Tensor(Tensor self, Tensor value) -> Tensor -@Namespace("at") public static native @ByVal @Name("fill") Tensor _fill(@Const @ByRef Tensor self, @Const @ByRef Tensor value); - -// aten::fill_.Scalar(Tensor(a!) self, Scalar value) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor fill_(@ByRef Tensor self, @Const @ByRef Scalar value); - -// aten::fill_.Tensor(Tensor(a!) self, Tensor value) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor fill_(@ByRef Tensor self, @Const @ByRef Tensor value); - -// aten::fill.Scalar_out(Tensor self, Scalar value, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor fill_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Scalar value); -// aten::fill.Scalar_out(Tensor self, Scalar value, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor fill_outf(@Const @ByRef Tensor self, @Const @ByRef Scalar value, @ByRef Tensor out); - -// aten::fill.Tensor_out(Tensor self, Tensor value, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor fill_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor value); -// aten::fill.Tensor_out(Tensor self, Tensor value, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor fill_outf(@Const @ByRef Tensor self, @Const @ByRef Tensor value, @ByRef Tensor out); - - - - -// Parsed from ATen/ops/fill_diagonal.h +// #include -// #pragma once -// @generated by torchgen/gen.py from Function.h +// aten::gradient.scalarint(Tensor self, *, Scalar? spacing=None, int? dim=None, int edge_order=1) -> Tensor[] +@Namespace("at") public static native @Cast({"", "std::vector"}) @StdMove TensorVector gradient(@Const @ByRef Tensor self, @Const @ByRef(nullValue = "c10::optional(c10::nullopt)") ScalarOptional spacing, @ByVal(nullValue = "c10::optional(c10::nullopt)") LongOptional dim, @Cast("int64_t") long edge_order/*=1*/); +@Namespace("at") public static native @Cast({"", "std::vector"}) @StdMove TensorVector gradient(@Const @ByRef Tensor self); -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include +// aten::gradient.scalararray(Tensor self, *, Scalar spacing, int[] dim, int edge_order=1) -> Tensor[] +@Namespace("at") public static native @Cast({"", "std::vector"}) @StdMove TensorVector gradient(@Const @ByRef Tensor self, @Const @ByRef Scalar spacing, @ByVal LongArrayRef dim, @Cast("int64_t") long edge_order/*=1*/); +@Namespace("at") public static native @Cast({"", "std::vector"}) @StdMove TensorVector gradient(@Const @ByRef Tensor self, @Const @ByRef Scalar spacing, @ByVal LongArrayRef dim); +@Namespace("at") public static native @Cast({"", "std::vector"}) @StdMove TensorVector gradient(@Const @ByRef Tensor self, @Const @ByRef Scalar spacing, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dim, @Cast("int64_t") long edge_order/*=1*/); +@Namespace("at") public static native @Cast({"", "std::vector"}) @StdMove TensorVector gradient(@Const @ByRef Tensor self, @Const @ByRef Scalar spacing, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... dim); +// aten::gradient.array(Tensor self, *, int[] dim, int edge_order=1) -> Tensor[] +@Namespace("at") public static native @Cast({"", "std::vector"}) @StdMove TensorVector gradient(@Const @ByRef Tensor self, @ByVal LongArrayRef dim, @Cast("int64_t") long edge_order/*=1*/); +@Namespace("at") public static native @Cast({"", "std::vector"}) @StdMove TensorVector gradient(@Const @ByRef Tensor self, @ByVal LongArrayRef dim); +@Namespace("at") public static native @Cast({"", "std::vector"}) @StdMove TensorVector gradient(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dim, @Cast("int64_t") long edge_order/*=1*/); +@Namespace("at") public static native @Cast({"", "std::vector"}) @StdMove TensorVector gradient(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... dim); +// aten::gradient.scalarrayint(Tensor self, *, Scalar[] spacing, int? dim=None, int edge_order=1) -> Tensor[] +@Namespace("at") public static native @Cast({"", "std::vector"}) @StdMove TensorVector gradient(@Const @ByRef Tensor self, @ByVal ScalarArrayRef spacing, @ByVal(nullValue = "c10::optional(c10::nullopt)") LongOptional dim, @Cast("int64_t") long edge_order/*=1*/); +@Namespace("at") public static native @Cast({"", "std::vector"}) @StdMove TensorVector gradient(@Const @ByRef Tensor self, @ByVal ScalarArrayRef spacing); -// #include +// aten::gradient.scalarrayarray(Tensor self, *, Scalar[] spacing, int[] dim, int edge_order=1) -> Tensor[] +@Namespace("at") public static native @Cast({"", "std::vector"}) @StdMove TensorVector gradient(@Const @ByRef Tensor self, @ByVal ScalarArrayRef spacing, @ByVal LongArrayRef dim, @Cast("int64_t") long edge_order/*=1*/); +@Namespace("at") public static native @Cast({"", "std::vector"}) @StdMove TensorVector gradient(@Const @ByRef Tensor self, @ByVal ScalarArrayRef spacing, @ByVal LongArrayRef dim); +@Namespace("at") public static native @Cast({"", "std::vector"}) @StdMove TensorVector gradient(@Const @ByRef Tensor self, @ByVal ScalarArrayRef spacing, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dim, @Cast("int64_t") long edge_order/*=1*/); +@Namespace("at") public static native @Cast({"", "std::vector"}) @StdMove TensorVector gradient(@Const @ByRef Tensor self, @ByVal ScalarArrayRef spacing, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... dim); +// aten::gradient.tensorarrayint(Tensor self, *, Tensor[] spacing, int? dim=None, int edge_order=1) -> Tensor[] +@Namespace("at") public static native @Cast({"", "std::vector"}) @StdMove TensorVector gradient(@Const @ByRef Tensor self, @ByVal @Cast("at::TensorList*") TensorArrayRef spacing, @ByVal(nullValue = "c10::optional(c10::nullopt)") LongOptional dim, @Cast("int64_t") long edge_order/*=1*/); +@Namespace("at") public static native @Cast({"", "std::vector"}) @StdMove TensorVector gradient(@Const @ByRef Tensor self, @ByVal @Cast("at::TensorList*") TensorArrayRef spacing); +// aten::gradient.tensorarray(Tensor self, *, Tensor[] spacing, int[] dim, int edge_order=1) -> Tensor[] +@Namespace("at") public static native @Cast({"", "std::vector"}) @StdMove TensorVector gradient(@Const @ByRef Tensor self, @ByVal @Cast("at::TensorList*") TensorArrayRef spacing, @ByVal LongArrayRef dim, @Cast("int64_t") long edge_order/*=1*/); +@Namespace("at") public static native @Cast({"", "std::vector"}) @StdMove TensorVector gradient(@Const @ByRef Tensor self, @ByVal @Cast("at::TensorList*") TensorArrayRef spacing, @ByVal LongArrayRef dim); +@Namespace("at") public static native @Cast({"", "std::vector"}) @StdMove TensorVector gradient(@Const @ByRef Tensor self, @ByVal @Cast("at::TensorList*") TensorArrayRef spacing, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dim, @Cast("int64_t") long edge_order/*=1*/); +@Namespace("at") public static native @Cast({"", "std::vector"}) @StdMove TensorVector gradient(@Const @ByRef Tensor self, @ByVal @Cast("at::TensorList*") TensorArrayRef spacing, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... dim); -// Parsed from ATen/ops/fix.h +// Parsed from ATen/ops/greater.h // #pragma once @@ -44332,24 +30004,29 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include +// #include -// aten::fix(Tensor self) -> Tensor -@Namespace("at") public static native @ByVal Tensor fix(@Const @ByRef Tensor self); +// aten::greater.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor greater_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Scalar other); +// aten::greater.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor greater_outf(@Const @ByRef Tensor self, @Const @ByRef Scalar other, @ByRef Tensor out); -// aten::fix_(Tensor(a!) self) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor fix_(@ByRef Tensor self); +// aten::greater.Scalar(Tensor self, Scalar other) -> Tensor +@Namespace("at") public static native @ByVal Tensor greater(@Const @ByRef Tensor self, @Const @ByRef Scalar other); + +// aten::greater.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor greater_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor other); +// aten::greater.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor greater_outf(@Const @ByRef Tensor self, @Const @ByRef Tensor other, @ByRef Tensor out); -// aten::fix.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor fix_out(@ByRef Tensor out, @Const @ByRef Tensor self); -// aten::fix.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor fix_outf(@Const @ByRef Tensor self, @ByRef Tensor out); +// aten::greater.Tensor(Tensor self, Tensor other) -> Tensor +@Namespace("at") public static native @ByVal Tensor greater(@Const @ByRef Tensor self, @Const @ByRef Tensor other); -// Parsed from ATen/ops/flatten.h +// Parsed from ATen/ops/greater_equal.h // #pragma once @@ -44370,26 +30047,29 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include +// #include -// aten::flatten.using_ints(Tensor(a) self, int start_dim=0, int end_dim=-1) -> Tensor(a) -@Namespace("at") public static native @ByVal Tensor flatten(@Const @ByRef Tensor self, @Cast("int64_t") long start_dim/*=0*/, @Cast("int64_t") long end_dim/*=-1*/); -@Namespace("at") public static native @ByVal Tensor flatten(@Const @ByRef Tensor self); +// aten::greater_equal.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor greater_equal_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Scalar other); +// aten::greater_equal.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor greater_equal_outf(@Const @ByRef Tensor self, @Const @ByRef Scalar other, @ByRef Tensor out); -// aten::flatten.named_out_dim(Tensor(a) self, int start_dim, int end_dim, Dimname out_dim) -> Tensor(a) -@Namespace("at") public static native @ByVal Tensor flatten(@Const @ByRef Tensor self, @Cast("int64_t") long start_dim, @Cast("int64_t") long end_dim, @ByVal Dimname out_dim); +// aten::greater_equal.Scalar(Tensor self, Scalar other) -> Tensor +@Namespace("at") public static native @ByVal Tensor greater_equal(@Const @ByRef Tensor self, @Const @ByRef Scalar other); -// aten::flatten.using_names(Tensor(a) self, Dimname start_dim, Dimname end_dim, Dimname out_dim) -> Tensor(a) -@Namespace("at") public static native @ByVal Tensor flatten(@Const @ByRef Tensor self, @ByVal Dimname start_dim, @ByVal Dimname end_dim, @ByVal Dimname out_dim); +// aten::greater_equal.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor greater_equal_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor other); +// aten::greater_equal.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor greater_equal_outf(@Const @ByRef Tensor self, @Const @ByRef Tensor other, @ByRef Tensor out); -// aten::flatten.DimnameList(Tensor(a) self, Dimname[] dims, Dimname out_dim) -> Tensor(a) -@Namespace("at") public static native @ByVal Tensor flatten(@Const @ByRef Tensor self, @ByVal DimnameArrayRef dims, @ByVal Dimname out_dim); +// aten::greater_equal.Tensor(Tensor self, Tensor other) -> Tensor +@Namespace("at") public static native @ByVal Tensor greater_equal(@Const @ByRef Tensor self, @Const @ByRef Tensor other); -// Parsed from ATen/ops/flatten_dense_tensors.h +// Parsed from ATen/ops/grid_sampler.h // #pragma once @@ -44410,16 +30090,16 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include +// #include -// aten::flatten_dense_tensors(Tensor[] tensors) -> Tensor -@Namespace("at") public static native @ByVal Tensor flatten_dense_tensors(@ByVal TensorArrayRef tensors); +// aten::grid_sampler(Tensor input, Tensor grid, int interpolation_mode, int padding_mode, bool align_corners) -> Tensor +@Namespace("at") public static native @ByVal Tensor grid_sampler(@Const @ByRef Tensor input, @Const @ByRef Tensor grid, @Cast("int64_t") long interpolation_mode, @Cast("int64_t") long padding_mode, @Cast("bool") boolean align_corners); -// Parsed from ATen/ops/flip.h +// Parsed from ATen/ops/grid_sampler_2d.h // #pragma once @@ -44440,24 +30120,21 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include +// #include -// aten::flip(Tensor self, int[] dims) -> Tensor -@Namespace("at") public static native @ByVal Tensor flip(@Const @ByRef Tensor self, @ByVal @Cast("c10::ArrayRef*") LongArrayRef dims); -@Namespace("at") public static native @ByVal Tensor flip(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... dims); +// aten::grid_sampler_2d(Tensor input, Tensor grid, int interpolation_mode, int padding_mode, bool align_corners) -> Tensor +@Namespace("at") public static native @ByVal Tensor grid_sampler_2d(@Const @ByRef Tensor input, @Const @ByRef Tensor grid, @Cast("int64_t") long interpolation_mode, @Cast("int64_t") long padding_mode, @Cast("bool") boolean align_corners); -// aten::flip.out(Tensor self, int[] dims, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor flip_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal @Cast("c10::ArrayRef*") LongArrayRef dims); -@Namespace("at") public static native @ByRef Tensor flip_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... dims); -// aten::flip.out(Tensor self, int[] dims, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor flip_outf(@Const @ByRef Tensor self, @ByVal @Cast("c10::ArrayRef*") LongArrayRef dims, @ByRef Tensor out); -@Namespace("at") public static native @ByRef Tensor flip_outf(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dims, @ByRef Tensor out); +// aten::grid_sampler_2d.out(Tensor input, Tensor grid, int interpolation_mode, int padding_mode, bool align_corners, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor grid_sampler_2d_out(@ByRef Tensor out, @Const @ByRef Tensor input, @Const @ByRef Tensor grid, @Cast("int64_t") long interpolation_mode, @Cast("int64_t") long padding_mode, @Cast("bool") boolean align_corners); +// aten::grid_sampler_2d.out(Tensor input, Tensor grid, int interpolation_mode, int padding_mode, bool align_corners, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor grid_sampler_2d_outf(@Const @ByRef Tensor input, @Const @ByRef Tensor grid, @Cast("int64_t") long interpolation_mode, @Cast("int64_t") long padding_mode, @Cast("bool") boolean align_corners, @ByRef Tensor out); -// Parsed from ATen/ops/fliplr.h +// Parsed from ATen/ops/grid_sampler_2d_backward.h // #pragma once @@ -44478,16 +30155,21 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include +// #include -// aten::fliplr(Tensor self) -> Tensor -@Namespace("at") public static native @ByVal Tensor fliplr(@Const @ByRef Tensor self); +// aten::grid_sampler_2d_backward(Tensor grad_output, Tensor input, Tensor grid, int interpolation_mode, int padding_mode, bool align_corners, bool[2] output_mask) -> (Tensor, Tensor) +@Namespace("at") public static native @ByVal T_TensorTensor_T grid_sampler_2d_backward(@Const @ByRef Tensor grad_output, @Const @ByRef Tensor input, @Const @ByRef Tensor grid, @Cast("int64_t") long interpolation_mode, @Cast("int64_t") long padding_mode, @Cast("bool") boolean align_corners, @ByVal @Cast("std::array*") BoolPointer output_mask); +// aten::grid_sampler_2d_backward.out(Tensor grad_output, Tensor input, Tensor grid, int interpolation_mode, int padding_mode, bool align_corners, bool[2] output_mask, *, Tensor(a!) out0, Tensor(b!) out1) -> (Tensor(a!), Tensor(b!)) +@Namespace("at") public static native @ByVal T_TensorTensor_T grid_sampler_2d_backward_out(@ByRef Tensor out0, @ByRef Tensor out1, @Const @ByRef Tensor grad_output, @Const @ByRef Tensor input, @Const @ByRef Tensor grid, @Cast("int64_t") long interpolation_mode, @Cast("int64_t") long padding_mode, @Cast("bool") boolean align_corners, @ByVal @Cast("std::array*") BoolPointer output_mask); +// aten::grid_sampler_2d_backward.out(Tensor grad_output, Tensor input, Tensor grid, int interpolation_mode, int padding_mode, bool align_corners, bool[2] output_mask, *, Tensor(a!) out0, Tensor(b!) out1) -> (Tensor(a!), Tensor(b!)) +@Namespace("at") public static native @ByVal T_TensorTensor_T grid_sampler_2d_backward_outf(@Const @ByRef Tensor grad_output, @Const @ByRef Tensor input, @Const @ByRef Tensor grid, @Cast("int64_t") long interpolation_mode, @Cast("int64_t") long padding_mode, @Cast("bool") boolean align_corners, @ByVal @Cast("std::array*") BoolPointer output_mask, @ByRef Tensor out0, @ByRef Tensor out1); -// Parsed from ATen/ops/flipud.h + +// Parsed from ATen/ops/grid_sampler_3d.h // #pragma once @@ -44508,16 +30190,21 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include +// #include -// aten::flipud(Tensor self) -> Tensor -@Namespace("at") public static native @ByVal Tensor flipud(@Const @ByRef Tensor self); +// aten::grid_sampler_3d(Tensor input, Tensor grid, int interpolation_mode, int padding_mode, bool align_corners) -> Tensor +@Namespace("at") public static native @ByVal Tensor grid_sampler_3d(@Const @ByRef Tensor input, @Const @ByRef Tensor grid, @Cast("int64_t") long interpolation_mode, @Cast("int64_t") long padding_mode, @Cast("bool") boolean align_corners); + +// aten::grid_sampler_3d.out(Tensor input, Tensor grid, int interpolation_mode, int padding_mode, bool align_corners, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor grid_sampler_3d_out(@ByRef Tensor out, @Const @ByRef Tensor input, @Const @ByRef Tensor grid, @Cast("int64_t") long interpolation_mode, @Cast("int64_t") long padding_mode, @Cast("bool") boolean align_corners); +// aten::grid_sampler_3d.out(Tensor input, Tensor grid, int interpolation_mode, int padding_mode, bool align_corners, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor grid_sampler_3d_outf(@Const @ByRef Tensor input, @Const @ByRef Tensor grid, @Cast("int64_t") long interpolation_mode, @Cast("int64_t") long padding_mode, @Cast("bool") boolean align_corners, @ByRef Tensor out); -// Parsed from ATen/ops/float_power.h +// Parsed from ATen/ops/grid_sampler_3d_backward.h // #pragma once @@ -44538,37 +30225,21 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include - - -// aten::float_power.Tensor_Tensor_out(Tensor self, Tensor exponent, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor float_power_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor exponent); -// aten::float_power.Tensor_Tensor_out(Tensor self, Tensor exponent, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor float_power_outf(@Const @ByRef Tensor self, @Const @ByRef Tensor exponent, @ByRef Tensor out); - -// aten::float_power.Tensor_Tensor(Tensor self, Tensor exponent) -> Tensor -@Namespace("at") public static native @ByVal Tensor float_power(@Const @ByRef Tensor self, @Const @ByRef Tensor exponent); - -// aten::float_power.Scalar_out(Scalar self, Tensor exponent, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor float_power_out(@ByRef Tensor out, @Const @ByRef Scalar self, @Const @ByRef Tensor exponent); -// aten::float_power.Scalar_out(Scalar self, Tensor exponent, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor float_power_outf(@Const @ByRef Scalar self, @Const @ByRef Tensor exponent, @ByRef Tensor out); +// #include -// aten::float_power.Scalar(Scalar self, Tensor exponent) -> Tensor -@Namespace("at") public static native @ByVal Tensor float_power(@Const @ByRef Scalar self, @Const @ByRef Tensor exponent); -// aten::float_power.Tensor_Scalar_out(Tensor self, Scalar exponent, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor float_power_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Scalar exponent); -// aten::float_power.Tensor_Scalar_out(Tensor self, Scalar exponent, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor float_power_outf(@Const @ByRef Tensor self, @Const @ByRef Scalar exponent, @ByRef Tensor out); +// aten::grid_sampler_3d_backward(Tensor grad_output, Tensor input, Tensor grid, int interpolation_mode, int padding_mode, bool align_corners, bool[2] output_mask) -> (Tensor, Tensor) +@Namespace("at") public static native @ByVal T_TensorTensor_T grid_sampler_3d_backward(@Const @ByRef Tensor grad_output, @Const @ByRef Tensor input, @Const @ByRef Tensor grid, @Cast("int64_t") long interpolation_mode, @Cast("int64_t") long padding_mode, @Cast("bool") boolean align_corners, @ByVal @Cast("std::array*") BoolPointer output_mask); -// aten::float_power.Tensor_Scalar(Tensor self, Scalar exponent) -> Tensor -@Namespace("at") public static native @ByVal Tensor float_power(@Const @ByRef Tensor self, @Const @ByRef Scalar exponent); +// aten::grid_sampler_3d_backward.out(Tensor grad_output, Tensor input, Tensor grid, int interpolation_mode, int padding_mode, bool align_corners, bool[2] output_mask, *, Tensor(a!) out0, Tensor(b!) out1) -> (Tensor(a!), Tensor(b!)) +@Namespace("at") public static native @ByVal T_TensorTensor_T grid_sampler_3d_backward_out(@ByRef Tensor out0, @ByRef Tensor out1, @Const @ByRef Tensor grad_output, @Const @ByRef Tensor input, @Const @ByRef Tensor grid, @Cast("int64_t") long interpolation_mode, @Cast("int64_t") long padding_mode, @Cast("bool") boolean align_corners, @ByVal @Cast("std::array*") BoolPointer output_mask); +// aten::grid_sampler_3d_backward.out(Tensor grad_output, Tensor input, Tensor grid, int interpolation_mode, int padding_mode, bool align_corners, bool[2] output_mask, *, Tensor(a!) out0, Tensor(b!) out1) -> (Tensor(a!), Tensor(b!)) +@Namespace("at") public static native @ByVal T_TensorTensor_T grid_sampler_3d_backward_outf(@Const @ByRef Tensor grad_output, @Const @ByRef Tensor input, @Const @ByRef Tensor grid, @Cast("int64_t") long interpolation_mode, @Cast("int64_t") long padding_mode, @Cast("bool") boolean align_corners, @ByVal @Cast("std::array*") BoolPointer output_mask, @ByRef Tensor out0, @ByRef Tensor out1); -// Parsed from ATen/ops/floor.h +// Parsed from ATen/ops/group_norm.h // #pragma once @@ -44589,24 +30260,17 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include - - -// aten::floor(Tensor self) -> Tensor -@Namespace("at") public static native @ByVal Tensor floor(@Const @ByRef Tensor self); +// #include -// aten::floor_(Tensor(a!) self) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor floor_(@ByRef Tensor self); -// aten::floor.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor floor_out(@ByRef Tensor out, @Const @ByRef Tensor self); -// aten::floor.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor floor_outf(@Const @ByRef Tensor self, @ByRef Tensor out); +// aten::group_norm(Tensor input, int num_groups, Tensor? weight=None, Tensor? bias=None, float eps=1e-05, bool cudnn_enabled=True) -> Tensor +@Namespace("at") public static native @ByVal Tensor group_norm(@Const @ByRef Tensor input, @Cast("int64_t") long num_groups, @Const @ByRef(nullValue = "c10::optional{}") TensorOptional weight, @Const @ByRef(nullValue = "c10::optional{}") TensorOptional bias, double eps/*=1e-05*/, @Cast("bool") boolean cudnn_enabled/*=true*/); +@Namespace("at") public static native @ByVal Tensor group_norm(@Const @ByRef Tensor input, @Cast("int64_t") long num_groups); -// Parsed from ATen/ops/floor_divide.h +// Parsed from ATen/ops/gru.h // #pragma once @@ -44627,24 +30291,19 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include - +// #include -// aten::floor_divide(Tensor self, Tensor other) -> Tensor -@Namespace("at") public static native @ByVal Tensor floor_divide(@Const @ByRef Tensor self, @Const @ByRef Tensor other); -// aten::floor_divide.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor floor_divide_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor other); -// aten::floor_divide.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor floor_divide_outf(@Const @ByRef Tensor self, @Const @ByRef Tensor other, @ByRef Tensor out); +// aten::gru.input(Tensor input, Tensor hx, Tensor[] params, bool has_biases, int num_layers, float dropout, bool train, bool bidirectional, bool batch_first) -> (Tensor, Tensor) +@Namespace("at") public static native @ByVal T_TensorTensor_T gru(@Const @ByRef Tensor input, @Const @ByRef Tensor hx, @ByVal @Cast("at::TensorList*") TensorArrayRef params, @Cast("bool") boolean has_biases, @Cast("int64_t") long num_layers, double dropout, @Cast("bool") boolean train, @Cast("bool") boolean bidirectional, @Cast("bool") boolean batch_first); -// aten::floor_divide.Scalar(Tensor self, Scalar other) -> Tensor -@Namespace("at") public static native @ByVal Tensor floor_divide(@Const @ByRef Tensor self, @Const @ByRef Scalar other); +// aten::gru.data(Tensor data, Tensor batch_sizes, Tensor hx, Tensor[] params, bool has_biases, int num_layers, float dropout, bool train, bool bidirectional) -> (Tensor, Tensor) +@Namespace("at") public static native @ByVal T_TensorTensor_T gru(@Const @ByRef Tensor data, @Const @ByRef Tensor batch_sizes, @Const @ByRef Tensor hx, @ByVal @Cast("at::TensorList*") TensorArrayRef params, @Cast("bool") boolean has_biases, @Cast("int64_t") long num_layers, double dropout, @Cast("bool") boolean train, @Cast("bool") boolean bidirectional); -// Parsed from ATen/ops/fmax.h +// Parsed from ATen/ops/gru_cell.h // #pragma once @@ -44665,21 +30324,17 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include - +// #include -// aten::fmax(Tensor self, Tensor other) -> Tensor -@Namespace("at") public static native @ByVal Tensor fmax(@Const @ByRef Tensor self, @Const @ByRef Tensor other); -// aten::fmax.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor fmax_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor other); -// aten::fmax.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor fmax_outf(@Const @ByRef Tensor self, @Const @ByRef Tensor other, @ByRef Tensor out); +// aten::gru_cell(Tensor input, Tensor hx, Tensor w_ih, Tensor w_hh, Tensor? b_ih=None, Tensor? b_hh=None) -> Tensor +@Namespace("at") public static native @ByVal Tensor gru_cell(@Const @ByRef Tensor input, @Const @ByRef Tensor hx, @Const @ByRef Tensor w_ih, @Const @ByRef Tensor w_hh, @Const @ByRef(nullValue = "c10::optional{}") TensorOptional b_ih, @Const @ByRef(nullValue = "c10::optional{}") TensorOptional b_hh); +@Namespace("at") public static native @ByVal Tensor gru_cell(@Const @ByRef Tensor input, @Const @ByRef Tensor hx, @Const @ByRef Tensor w_ih, @Const @ByRef Tensor w_hh); -// Parsed from ATen/ops/fmin.h +// Parsed from ATen/ops/gt.h // #pragma once @@ -44700,21 +30355,29 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include +// #include -// aten::fmin(Tensor self, Tensor other) -> Tensor -@Namespace("at") public static native @ByVal Tensor fmin(@Const @ByRef Tensor self, @Const @ByRef Tensor other); +// aten::gt.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor gt_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Scalar other); +// aten::gt.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor gt_outf(@Const @ByRef Tensor self, @Const @ByRef Scalar other, @ByRef Tensor out); -// aten::fmin.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor fmin_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor other); -// aten::fmin.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor fmin_outf(@Const @ByRef Tensor self, @Const @ByRef Tensor other, @ByRef Tensor out); +// aten::gt.Scalar(Tensor self, Scalar other) -> Tensor +@Namespace("at") public static native @ByVal Tensor gt(@Const @ByRef Tensor self, @Const @ByRef Scalar other); + +// aten::gt.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor gt_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor other); +// aten::gt.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor gt_outf(@Const @ByRef Tensor self, @Const @ByRef Tensor other, @ByRef Tensor out); +// aten::gt.Tensor(Tensor self, Tensor other) -> Tensor +@Namespace("at") public static native @ByVal Tensor gt(@Const @ByRef Tensor self, @Const @ByRef Tensor other); -// Parsed from ATen/ops/fmod.h + +// Parsed from ATen/ops/hamming_window.h // #pragma once @@ -44735,29 +30398,57 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include +// #include -// aten::fmod.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor fmod_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Scalar other); -// aten::fmod.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor fmod_outf(@Const @ByRef Tensor self, @Const @ByRef Scalar other, @ByRef Tensor out); +// aten::hamming_window(int window_length, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor +@Namespace("at") public static native @ByVal Tensor hamming_window(@Cast("int64_t") long window_length, @ByVal(nullValue = "at::TensorOptions{}") TensorOptions options); +@Namespace("at") public static native @ByVal Tensor hamming_window(@Cast("int64_t") long window_length); +// aten::hamming_window(int window_length, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor +@Namespace("at") public static native @ByVal Tensor hamming_window(@Cast("int64_t") long window_length, @ByVal ScalarTypeOptional dtype, @ByVal LayoutOptional layout, @ByVal DeviceOptional device, @ByVal BoolOptional pin_memory); -// aten::fmod.Scalar(Tensor self, Scalar other) -> Tensor -@Namespace("at") public static native @ByVal Tensor fmod(@Const @ByRef Tensor self, @Const @ByRef Scalar other); +// aten::hamming_window.periodic(int window_length, bool periodic, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor +@Namespace("at") public static native @ByVal Tensor hamming_window(@Cast("int64_t") long window_length, @Cast("bool") boolean periodic, @ByVal(nullValue = "at::TensorOptions{}") TensorOptions options); +@Namespace("at") public static native @ByVal Tensor hamming_window(@Cast("int64_t") long window_length, @Cast("bool") boolean periodic); +// aten::hamming_window.periodic(int window_length, bool periodic, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor +@Namespace("at") public static native @ByVal Tensor hamming_window(@Cast("int64_t") long window_length, @Cast("bool") boolean periodic, @ByVal ScalarTypeOptional dtype, @ByVal LayoutOptional layout, @ByVal DeviceOptional device, @ByVal BoolOptional pin_memory); -// aten::fmod.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor fmod_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor other); -// aten::fmod.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor fmod_outf(@Const @ByRef Tensor self, @Const @ByRef Tensor other, @ByRef Tensor out); +// aten::hamming_window.periodic_alpha(int window_length, bool periodic, float alpha, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor +@Namespace("at") public static native @ByVal Tensor hamming_window(@Cast("int64_t") long window_length, @Cast("bool") boolean periodic, double alpha, @ByVal(nullValue = "at::TensorOptions{}") TensorOptions options); +@Namespace("at") public static native @ByVal Tensor hamming_window(@Cast("int64_t") long window_length, @Cast("bool") boolean periodic, double alpha); +// aten::hamming_window.periodic_alpha(int window_length, bool periodic, float alpha, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor +@Namespace("at") public static native @ByVal Tensor hamming_window(@Cast("int64_t") long window_length, @Cast("bool") boolean periodic, double alpha, @ByVal ScalarTypeOptional dtype, @ByVal LayoutOptional layout, @ByVal DeviceOptional device, @ByVal BoolOptional pin_memory); -// aten::fmod.Tensor(Tensor self, Tensor other) -> Tensor -@Namespace("at") public static native @ByVal Tensor fmod(@Const @ByRef Tensor self, @Const @ByRef Tensor other); +// aten::hamming_window.periodic_alpha_beta(int window_length, bool periodic, float alpha, float beta, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor +@Namespace("at") public static native @ByVal Tensor hamming_window(@Cast("int64_t") long window_length, @Cast("bool") boolean periodic, double alpha, double beta, @ByVal(nullValue = "at::TensorOptions{}") TensorOptions options); +@Namespace("at") public static native @ByVal Tensor hamming_window(@Cast("int64_t") long window_length, @Cast("bool") boolean periodic, double alpha, double beta); +// aten::hamming_window.periodic_alpha_beta(int window_length, bool periodic, float alpha, float beta, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor +@Namespace("at") public static native @ByVal Tensor hamming_window(@Cast("int64_t") long window_length, @Cast("bool") boolean periodic, double alpha, double beta, @ByVal ScalarTypeOptional dtype, @ByVal LayoutOptional layout, @ByVal DeviceOptional device, @ByVal BoolOptional pin_memory); + +// aten::hamming_window.out(int window_length, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor hamming_window_out(@ByRef Tensor out, @Cast("int64_t") long window_length); +// aten::hamming_window.out(int window_length, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor hamming_window_outf(@Cast("int64_t") long window_length, @ByRef Tensor out); + +// aten::hamming_window.periodic_out(int window_length, bool periodic, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor hamming_window_out(@ByRef Tensor out, @Cast("int64_t") long window_length, @Cast("bool") boolean periodic); +// aten::hamming_window.periodic_out(int window_length, bool periodic, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor hamming_window_outf(@Cast("int64_t") long window_length, @Cast("bool") boolean periodic, @ByRef Tensor out); + +// aten::hamming_window.periodic_alpha_out(int window_length, bool periodic, float alpha, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor hamming_window_out(@ByRef Tensor out, @Cast("int64_t") long window_length, @Cast("bool") boolean periodic, double alpha); +// aten::hamming_window.periodic_alpha_out(int window_length, bool periodic, float alpha, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor hamming_window_outf(@Cast("int64_t") long window_length, @Cast("bool") boolean periodic, double alpha, @ByRef Tensor out); + +// aten::hamming_window.periodic_alpha_beta_out(int window_length, bool periodic, float alpha, float beta, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor hamming_window_out(@ByRef Tensor out, @Cast("int64_t") long window_length, @Cast("bool") boolean periodic, double alpha, double beta); +// aten::hamming_window.periodic_alpha_beta_out(int window_length, bool periodic, float alpha, float beta, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor hamming_window_outf(@Cast("int64_t") long window_length, @Cast("bool") boolean periodic, double alpha, double beta, @ByRef Tensor out); -// Parsed from ATen/ops/frac.h +// Parsed from ATen/ops/hann_window.h // #pragma once @@ -44778,24 +30469,35 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include +// #include -// aten::frac(Tensor self) -> Tensor -@Namespace("at") public static native @ByVal Tensor frac(@Const @ByRef Tensor self); +// aten::hann_window(int window_length, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor +@Namespace("at") public static native @ByVal Tensor hann_window(@Cast("int64_t") long window_length, @ByVal(nullValue = "at::TensorOptions{}") TensorOptions options); +@Namespace("at") public static native @ByVal Tensor hann_window(@Cast("int64_t") long window_length); +// aten::hann_window(int window_length, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor +@Namespace("at") public static native @ByVal Tensor hann_window(@Cast("int64_t") long window_length, @ByVal ScalarTypeOptional dtype, @ByVal LayoutOptional layout, @ByVal DeviceOptional device, @ByVal BoolOptional pin_memory); -// aten::frac_(Tensor(a!) self) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor frac_(@ByRef Tensor self); +// aten::hann_window.periodic(int window_length, bool periodic, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor +@Namespace("at") public static native @ByVal Tensor hann_window(@Cast("int64_t") long window_length, @Cast("bool") boolean periodic, @ByVal(nullValue = "at::TensorOptions{}") TensorOptions options); +@Namespace("at") public static native @ByVal Tensor hann_window(@Cast("int64_t") long window_length, @Cast("bool") boolean periodic); +// aten::hann_window.periodic(int window_length, bool periodic, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor +@Namespace("at") public static native @ByVal Tensor hann_window(@Cast("int64_t") long window_length, @Cast("bool") boolean periodic, @ByVal ScalarTypeOptional dtype, @ByVal LayoutOptional layout, @ByVal DeviceOptional device, @ByVal BoolOptional pin_memory); -// aten::frac.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor frac_out(@ByRef Tensor out, @Const @ByRef Tensor self); -// aten::frac.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor frac_outf(@Const @ByRef Tensor self, @ByRef Tensor out); +// aten::hann_window.out(int window_length, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor hann_window_out(@ByRef Tensor out, @Cast("int64_t") long window_length); +// aten::hann_window.out(int window_length, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor hann_window_outf(@Cast("int64_t") long window_length, @ByRef Tensor out); + +// aten::hann_window.periodic_out(int window_length, bool periodic, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor hann_window_out(@ByRef Tensor out, @Cast("int64_t") long window_length, @Cast("bool") boolean periodic); +// aten::hann_window.periodic_out(int window_length, bool periodic, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor hann_window_outf(@Cast("int64_t") long window_length, @Cast("bool") boolean periodic, @ByRef Tensor out); -// Parsed from ATen/ops/fractional_max_pool2d.h +// Parsed from ATen/ops/hardshrink.h // #pragma once @@ -44816,24 +30518,23 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include +// #include -// aten::fractional_max_pool2d.output(Tensor self, int[2] kernel_size, int[2] output_size, Tensor random_samples, *, Tensor(a!) output, Tensor(b!) indices) -> (Tensor(a!), Tensor(b!)) -@Namespace("at") public static native @ByVal @Cast("std::tuple*") PointerPointer fractional_max_pool2d_out(@ByRef Tensor output, @ByRef Tensor indices, @Const @ByRef Tensor self, @ByVal @Cast("c10::ArrayRef*") LongArrayRef kernel_size, @ByVal @Cast("c10::ArrayRef*") LongArrayRef output_size, @Const @ByRef Tensor random_samples); -@Namespace("at") public static native @ByVal @Cast("std::tuple*") PointerPointer fractional_max_pool2d_out(@ByRef Tensor output, @ByRef Tensor indices, @Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] kernel_size, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] output_size, @Const @ByRef Tensor random_samples); -// aten::fractional_max_pool2d.output(Tensor self, int[2] kernel_size, int[2] output_size, Tensor random_samples, *, Tensor(a!) output, Tensor(b!) indices) -> (Tensor(a!), Tensor(b!)) -@Namespace("at") public static native @ByVal @Cast("std::tuple*") PointerPointer fractional_max_pool2d_outf(@Const @ByRef Tensor self, @ByVal @Cast("c10::ArrayRef*") LongArrayRef kernel_size, @ByVal @Cast("c10::ArrayRef*") LongArrayRef output_size, @Const @ByRef Tensor random_samples, @ByRef Tensor output, @ByRef Tensor indices); -@Namespace("at") public static native @ByVal @Cast("std::tuple*") PointerPointer fractional_max_pool2d_outf(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] kernel_size, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] output_size, @Const @ByRef Tensor random_samples, @ByRef Tensor output, @ByRef Tensor indices); +// aten::hardshrink.out(Tensor self, Scalar lambd=0.5, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor hardshrink_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef(nullValue = "at::Scalar(0.5)") Scalar lambd); +@Namespace("at") public static native @ByRef Tensor hardshrink_out(@ByRef Tensor out, @Const @ByRef Tensor self); +// aten::hardshrink.out(Tensor self, Scalar lambd=0.5, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor hardshrink_outf(@Const @ByRef Tensor self, @Const @ByRef Scalar lambd, @ByRef Tensor out); -// aten::fractional_max_pool2d(Tensor self, int[2] kernel_size, int[2] output_size, Tensor random_samples) -> (Tensor, Tensor) -@Namespace("at") public static native @ByVal TensorTensorTuple fractional_max_pool2d(@Const @ByRef Tensor self, @ByVal @Cast("c10::ArrayRef*") LongArrayRef kernel_size, @ByVal @Cast("c10::ArrayRef*") LongArrayRef output_size, @Const @ByRef Tensor random_samples); -@Namespace("at") public static native @ByVal TensorTensorTuple fractional_max_pool2d(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] kernel_size, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] output_size, @Const @ByRef Tensor random_samples); +// aten::hardshrink(Tensor self, Scalar lambd=0.5) -> Tensor +@Namespace("at") public static native @ByVal Tensor hardshrink(@Const @ByRef Tensor self, @Const @ByRef(nullValue = "at::Scalar(0.5)") Scalar lambd); +@Namespace("at") public static native @ByVal Tensor hardshrink(@Const @ByRef Tensor self); -// Parsed from ATen/ops/fractional_max_pool2d_backward.h +// Parsed from ATen/ops/hardshrink_backward.h // #pragma once @@ -44854,24 +30555,21 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include +// #include -// aten::fractional_max_pool2d_backward.grad_input(Tensor grad_output, Tensor self, int[2] kernel_size, int[2] output_size, Tensor indices, *, Tensor(a!) grad_input) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor fractional_max_pool2d_backward_out(@ByRef Tensor grad_input, @Const @ByRef Tensor grad_output, @Const @ByRef Tensor self, @ByVal @Cast("c10::ArrayRef*") LongArrayRef kernel_size, @ByVal @Cast("c10::ArrayRef*") LongArrayRef output_size, @Const @ByRef Tensor indices); -@Namespace("at") public static native @ByRef Tensor fractional_max_pool2d_backward_out(@ByRef Tensor grad_input, @Const @ByRef Tensor grad_output, @Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] kernel_size, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] output_size, @Const @ByRef Tensor indices); -// aten::fractional_max_pool2d_backward.grad_input(Tensor grad_output, Tensor self, int[2] kernel_size, int[2] output_size, Tensor indices, *, Tensor(a!) grad_input) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor fractional_max_pool2d_backward_outf(@Const @ByRef Tensor grad_output, @Const @ByRef Tensor self, @ByVal @Cast("c10::ArrayRef*") LongArrayRef kernel_size, @ByVal @Cast("c10::ArrayRef*") LongArrayRef output_size, @Const @ByRef Tensor indices, @ByRef Tensor grad_input); -@Namespace("at") public static native @ByRef Tensor fractional_max_pool2d_backward_outf(@Const @ByRef Tensor grad_output, @Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] kernel_size, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] output_size, @Const @ByRef Tensor indices, @ByRef Tensor grad_input); +// aten::hardshrink_backward.grad_input(Tensor grad_out, Tensor self, Scalar lambd, *, Tensor(a!) grad_input) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor hardshrink_backward_out(@ByRef Tensor grad_input, @Const @ByRef Tensor grad_out, @Const @ByRef Tensor self, @Const @ByRef Scalar lambd); +// aten::hardshrink_backward.grad_input(Tensor grad_out, Tensor self, Scalar lambd, *, Tensor(a!) grad_input) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor hardshrink_backward_outf(@Const @ByRef Tensor grad_out, @Const @ByRef Tensor self, @Const @ByRef Scalar lambd, @ByRef Tensor grad_input); -// aten::fractional_max_pool2d_backward(Tensor grad_output, Tensor self, int[2] kernel_size, int[2] output_size, Tensor indices) -> Tensor -@Namespace("at") public static native @ByVal Tensor fractional_max_pool2d_backward(@Const @ByRef Tensor grad_output, @Const @ByRef Tensor self, @ByVal @Cast("c10::ArrayRef*") LongArrayRef kernel_size, @ByVal @Cast("c10::ArrayRef*") LongArrayRef output_size, @Const @ByRef Tensor indices); -@Namespace("at") public static native @ByVal Tensor fractional_max_pool2d_backward(@Const @ByRef Tensor grad_output, @Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] kernel_size, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] output_size, @Const @ByRef Tensor indices); +// aten::hardshrink_backward(Tensor grad_out, Tensor self, Scalar lambd) -> Tensor +@Namespace("at") public static native @ByVal Tensor hardshrink_backward(@Const @ByRef Tensor grad_out, @Const @ByRef Tensor self, @Const @ByRef Scalar lambd); -// Parsed from ATen/ops/fractional_max_pool3d.h +// Parsed from ATen/ops/hardsigmoid.h // #pragma once @@ -44892,24 +30590,24 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include +// #include -// aten::fractional_max_pool3d.output(Tensor self, int[3] kernel_size, int[3] output_size, Tensor random_samples, *, Tensor(a!) output, Tensor(b!) indices) -> (Tensor(a!), Tensor(b!)) -@Namespace("at") public static native @ByVal @Cast("std::tuple*") PointerPointer fractional_max_pool3d_out(@ByRef Tensor output, @ByRef Tensor indices, @Const @ByRef Tensor self, @ByVal @Cast("c10::ArrayRef*") LongArrayRef kernel_size, @ByVal @Cast("c10::ArrayRef*") LongArrayRef output_size, @Const @ByRef Tensor random_samples); -@Namespace("at") public static native @ByVal @Cast("std::tuple*") PointerPointer fractional_max_pool3d_out(@ByRef Tensor output, @ByRef Tensor indices, @Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] kernel_size, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] output_size, @Const @ByRef Tensor random_samples); -// aten::fractional_max_pool3d.output(Tensor self, int[3] kernel_size, int[3] output_size, Tensor random_samples, *, Tensor(a!) output, Tensor(b!) indices) -> (Tensor(a!), Tensor(b!)) -@Namespace("at") public static native @ByVal @Cast("std::tuple*") PointerPointer fractional_max_pool3d_outf(@Const @ByRef Tensor self, @ByVal @Cast("c10::ArrayRef*") LongArrayRef kernel_size, @ByVal @Cast("c10::ArrayRef*") LongArrayRef output_size, @Const @ByRef Tensor random_samples, @ByRef Tensor output, @ByRef Tensor indices); -@Namespace("at") public static native @ByVal @Cast("std::tuple*") PointerPointer fractional_max_pool3d_outf(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] kernel_size, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] output_size, @Const @ByRef Tensor random_samples, @ByRef Tensor output, @ByRef Tensor indices); +// aten::hardsigmoid.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor hardsigmoid_out(@ByRef Tensor out, @Const @ByRef Tensor self); +// aten::hardsigmoid.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor hardsigmoid_outf(@Const @ByRef Tensor self, @ByRef Tensor out); -// aten::fractional_max_pool3d(Tensor self, int[3] kernel_size, int[3] output_size, Tensor random_samples) -> (Tensor, Tensor) -@Namespace("at") public static native @ByVal TensorTensorTuple fractional_max_pool3d(@Const @ByRef Tensor self, @ByVal @Cast("c10::ArrayRef*") LongArrayRef kernel_size, @ByVal @Cast("c10::ArrayRef*") LongArrayRef output_size, @Const @ByRef Tensor random_samples); -@Namespace("at") public static native @ByVal TensorTensorTuple fractional_max_pool3d(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] kernel_size, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] output_size, @Const @ByRef Tensor random_samples); +// aten::hardsigmoid(Tensor self) -> Tensor +@Namespace("at") public static native @ByVal Tensor hardsigmoid(@Const @ByRef Tensor self); + +// aten::hardsigmoid_(Tensor(a!) self) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor hardsigmoid_(@ByRef Tensor self); -// Parsed from ATen/ops/fractional_max_pool3d_backward.h +// Parsed from ATen/ops/hardsigmoid_backward.h // #pragma once @@ -44930,24 +30628,21 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include +// #include -// aten::fractional_max_pool3d_backward.grad_input(Tensor grad_output, Tensor self, int[3] kernel_size, int[3] output_size, Tensor indices, *, Tensor(a!) grad_input) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor fractional_max_pool3d_backward_out(@ByRef Tensor grad_input, @Const @ByRef Tensor grad_output, @Const @ByRef Tensor self, @ByVal @Cast("c10::ArrayRef*") LongArrayRef kernel_size, @ByVal @Cast("c10::ArrayRef*") LongArrayRef output_size, @Const @ByRef Tensor indices); -@Namespace("at") public static native @ByRef Tensor fractional_max_pool3d_backward_out(@ByRef Tensor grad_input, @Const @ByRef Tensor grad_output, @Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] kernel_size, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] output_size, @Const @ByRef Tensor indices); -// aten::fractional_max_pool3d_backward.grad_input(Tensor grad_output, Tensor self, int[3] kernel_size, int[3] output_size, Tensor indices, *, Tensor(a!) grad_input) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor fractional_max_pool3d_backward_outf(@Const @ByRef Tensor grad_output, @Const @ByRef Tensor self, @ByVal @Cast("c10::ArrayRef*") LongArrayRef kernel_size, @ByVal @Cast("c10::ArrayRef*") LongArrayRef output_size, @Const @ByRef Tensor indices, @ByRef Tensor grad_input); -@Namespace("at") public static native @ByRef Tensor fractional_max_pool3d_backward_outf(@Const @ByRef Tensor grad_output, @Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] kernel_size, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] output_size, @Const @ByRef Tensor indices, @ByRef Tensor grad_input); +// aten::hardsigmoid_backward.grad_input(Tensor grad_output, Tensor self, *, Tensor(a!) grad_input) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor hardsigmoid_backward_out(@ByRef Tensor grad_input, @Const @ByRef Tensor grad_output, @Const @ByRef Tensor self); +// aten::hardsigmoid_backward.grad_input(Tensor grad_output, Tensor self, *, Tensor(a!) grad_input) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor hardsigmoid_backward_outf(@Const @ByRef Tensor grad_output, @Const @ByRef Tensor self, @ByRef Tensor grad_input); -// aten::fractional_max_pool3d_backward(Tensor grad_output, Tensor self, int[3] kernel_size, int[3] output_size, Tensor indices) -> Tensor -@Namespace("at") public static native @ByVal Tensor fractional_max_pool3d_backward(@Const @ByRef Tensor grad_output, @Const @ByRef Tensor self, @ByVal @Cast("c10::ArrayRef*") LongArrayRef kernel_size, @ByVal @Cast("c10::ArrayRef*") LongArrayRef output_size, @Const @ByRef Tensor indices); -@Namespace("at") public static native @ByVal Tensor fractional_max_pool3d_backward(@Const @ByRef Tensor grad_output, @Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] kernel_size, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] output_size, @Const @ByRef Tensor indices); +// aten::hardsigmoid_backward(Tensor grad_output, Tensor self) -> Tensor +@Namespace("at") public static native @ByVal Tensor hardsigmoid_backward(@Const @ByRef Tensor grad_output, @Const @ByRef Tensor self); -// Parsed from ATen/ops/frexp.h +// Parsed from ATen/ops/hardswish.h // #pragma once @@ -44968,21 +30663,24 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include +// #include -// aten::frexp.Tensor(Tensor self) -> (Tensor mantissa, Tensor exponent) -@Namespace("at") public static native @ByVal TensorTensorTuple frexp(@Const @ByRef Tensor self); +// aten::hardswish.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor hardswish_out(@ByRef Tensor out, @Const @ByRef Tensor self); +// aten::hardswish.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor hardswish_outf(@Const @ByRef Tensor self, @ByRef Tensor out); -// aten::frexp.Tensor_out(Tensor self, *, Tensor(a!) mantissa, Tensor(b!) exponent) -> (Tensor(a!) mantissa, Tensor(b!) exponent) -@Namespace("at") public static native @ByVal @Cast("std::tuple*") PointerPointer frexp_out(@ByRef Tensor mantissa, @ByRef Tensor exponent, @Const @ByRef Tensor self); -// aten::frexp.Tensor_out(Tensor self, *, Tensor(a!) mantissa, Tensor(b!) exponent) -> (Tensor(a!) mantissa, Tensor(b!) exponent) -@Namespace("at") public static native @ByVal @Cast("std::tuple*") PointerPointer frexp_outf(@Const @ByRef Tensor self, @ByRef Tensor mantissa, @ByRef Tensor exponent); +// aten::hardswish(Tensor self) -> Tensor +@Namespace("at") public static native @ByVal Tensor hardswish(@Const @ByRef Tensor self); + +// aten::hardswish_(Tensor(a!) self) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor hardswish_(@ByRef Tensor self); -// Parsed from ATen/ops/frobenius_norm.h +// Parsed from ATen/ops/hardswish_backward.h // #pragma once @@ -45003,28 +30701,21 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include +// #include -// aten::frobenius_norm.dim(Tensor self, int[1] dim, bool keepdim=False) -> Tensor -@Namespace("at") public static native @ByVal Tensor frobenius_norm(@Const @ByRef Tensor self, @ByVal @Cast("c10::ArrayRef*") LongArrayRef dim, @Cast("bool") boolean keepdim/*=false*/); -@Namespace("at") public static native @ByVal Tensor frobenius_norm(@Const @ByRef Tensor self, @ByVal @Cast("c10::ArrayRef*") LongArrayRef dim); -@Namespace("at") public static native @ByVal Tensor frobenius_norm(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dim, @Cast("bool") boolean keepdim/*=false*/); -@Namespace("at") public static native @ByVal Tensor frobenius_norm(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... dim); +// aten::hardswish_backward(Tensor grad_output, Tensor self) -> Tensor +@Namespace("at") public static native @ByVal Tensor hardswish_backward(@Const @ByRef Tensor grad_output, @Const @ByRef Tensor self); -// aten::frobenius_norm.out(Tensor self, int[1] dim, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor frobenius_norm_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal @Cast("c10::ArrayRef*") LongArrayRef dim, @Cast("bool") boolean keepdim/*=false*/); -@Namespace("at") public static native @ByRef Tensor frobenius_norm_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal @Cast("c10::ArrayRef*") LongArrayRef dim); -@Namespace("at") public static native @ByRef Tensor frobenius_norm_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dim, @Cast("bool") boolean keepdim/*=false*/); -@Namespace("at") public static native @ByRef Tensor frobenius_norm_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... dim); -// aten::frobenius_norm.out(Tensor self, int[1] dim, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor frobenius_norm_outf(@Const @ByRef Tensor self, @ByVal @Cast("c10::ArrayRef*") LongArrayRef dim, @Cast("bool") boolean keepdim, @ByRef Tensor out); -@Namespace("at") public static native @ByRef Tensor frobenius_norm_outf(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dim, @Cast("bool") boolean keepdim, @ByRef Tensor out); +// aten::hardswish_backward.out(Tensor grad_output, Tensor self, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor hardswish_backward_out(@ByRef Tensor out, @Const @ByRef Tensor grad_output, @Const @ByRef Tensor self); +// aten::hardswish_backward.out(Tensor grad_output, Tensor self, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor hardswish_backward_outf(@Const @ByRef Tensor grad_output, @Const @ByRef Tensor self, @ByRef Tensor out); -// Parsed from ATen/ops/from_file.h +// Parsed from ATen/ops/hardtanh.h // #pragma once @@ -45045,25 +30736,27 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include +// #include -// aten::from_file(str filename, bool? shared=None, int? size=0, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor -@Namespace("at") public static native @ByVal Tensor from_file(@ByVal @Cast("c10::string_view*") Pointer filename, @ByVal(nullValue = "c10::optional(c10::nullopt)") BoolOptional shared, @ByVal(nullValue = "c10::optional(0)") LongOptional size, @ByVal(nullValue = "at::TensorOptions{}") TensorOptions options); -@Namespace("at") public static native @ByVal Tensor from_file(@ByVal @Cast("c10::string_view*") Pointer filename); -// aten::from_file(str filename, bool? shared=None, int? size=0, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor -@Namespace("at") public static native @ByVal Tensor from_file(@ByVal @Cast("c10::string_view*") Pointer filename, @ByVal BoolOptional shared, @ByVal LongOptional size, @ByVal ScalarTypeOptional dtype, @ByVal LayoutOptional layout, @ByVal DeviceOptional device, @ByVal BoolOptional pin_memory); +// aten::hardtanh.out(Tensor self, Scalar min_val=-1, Scalar max_val=1, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor hardtanh_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef(nullValue = "at::Scalar(-1)") Scalar min_val, @Const @ByRef(nullValue = "at::Scalar(1)") Scalar max_val); +@Namespace("at") public static native @ByRef Tensor hardtanh_out(@ByRef Tensor out, @Const @ByRef Tensor self); +// aten::hardtanh.out(Tensor self, Scalar min_val=-1, Scalar max_val=1, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor hardtanh_outf(@Const @ByRef Tensor self, @Const @ByRef Scalar min_val, @Const @ByRef Scalar max_val, @ByRef Tensor out); -// aten::from_file.out(str filename, bool? shared=None, int? size=0, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor from_file_out(@ByRef Tensor out, @ByVal @Cast("c10::string_view*") Pointer filename, @ByVal(nullValue = "c10::optional(c10::nullopt)") BoolOptional shared, @ByVal(nullValue = "c10::optional(0)") LongOptional size); -@Namespace("at") public static native @ByRef Tensor from_file_out(@ByRef Tensor out, @ByVal @Cast("c10::string_view*") Pointer filename); -// aten::from_file.out(str filename, bool? shared=None, int? size=0, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor from_file_outf(@ByVal @Cast("c10::string_view*") Pointer filename, @ByVal BoolOptional shared, @ByVal LongOptional size, @ByRef Tensor out); +// aten::hardtanh(Tensor self, Scalar min_val=-1, Scalar max_val=1) -> Tensor +@Namespace("at") public static native @ByVal Tensor hardtanh(@Const @ByRef Tensor self, @Const @ByRef(nullValue = "at::Scalar(-1)") Scalar min_val, @Const @ByRef(nullValue = "at::Scalar(1)") Scalar max_val); +@Namespace("at") public static native @ByVal Tensor hardtanh(@Const @ByRef Tensor self); + +// aten::hardtanh_(Tensor(a!) self, Scalar min_val=-1, Scalar max_val=1) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor hardtanh_(@ByRef Tensor self, @Const @ByRef(nullValue = "at::Scalar(-1)") Scalar min_val, @Const @ByRef(nullValue = "at::Scalar(1)") Scalar max_val); +@Namespace("at") public static native @ByRef Tensor hardtanh_(@ByRef Tensor self); -// Parsed from ATen/ops/full.h +// Parsed from ATen/ops/hardtanh_backward.h // #pragma once @@ -45084,68 +30777,56 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include - - -// aten::full.names(int[] size, Scalar fill_value, *, Dimname[]? names, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor -@Namespace("at") public static native @ByVal Tensor full(@ByVal @Cast("c10::ArrayRef*") LongArrayRef size, @Const @ByRef Scalar fill_value, @ByVal DimnameListOptional names, @ByVal(nullValue = "at::TensorOptions{}") TensorOptions options); -@Namespace("at") public static native @ByVal Tensor full(@ByVal @Cast("c10::ArrayRef*") LongArrayRef size, @Const @ByRef Scalar fill_value, @ByVal DimnameListOptional names); -@Namespace("at") public static native @ByVal Tensor full(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @Const @ByRef Scalar fill_value, @ByVal DimnameListOptional names, @ByVal(nullValue = "at::TensorOptions{}") TensorOptions options); -@Namespace("at") public static native @ByVal Tensor full(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @Const @ByRef Scalar fill_value, @ByVal DimnameListOptional names); -// aten::full.names(int[] size, Scalar fill_value, *, Dimname[]? names, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor -@Namespace("at") public static native @ByVal Tensor full(@ByVal @Cast("c10::ArrayRef*") LongArrayRef size, @Const @ByRef Scalar fill_value, @ByVal DimnameListOptional names, @ByVal ScalarTypeOptional dtype, @ByVal LayoutOptional layout, @ByVal DeviceOptional device, @ByVal BoolOptional pin_memory); -@Namespace("at") public static native @ByVal Tensor full(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @Const @ByRef Scalar fill_value, @ByVal DimnameListOptional names, @ByVal ScalarTypeOptional dtype, @ByVal LayoutOptional layout, @ByVal DeviceOptional device, @ByVal BoolOptional pin_memory); - -// aten::full(SymInt[] size, Scalar fill_value, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor -@Namespace("at") public static native @ByVal Tensor full(@ByVal @Cast("c10::ArrayRef*") LongArrayRef size, @Const @ByRef Scalar fill_value, @ByVal(nullValue = "at::TensorOptions{}") TensorOptions options); -@Namespace("at") public static native @ByVal Tensor full(@ByVal @Cast("c10::ArrayRef*") LongArrayRef size, @Const @ByRef Scalar fill_value); -@Namespace("at") public static native @ByVal Tensor full(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @Const @ByRef Scalar fill_value, @ByVal(nullValue = "at::TensorOptions{}") TensorOptions options); -@Namespace("at") public static native @ByVal Tensor full(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @Const @ByRef Scalar fill_value); +// #include -// aten::full(SymInt[] size, Scalar fill_value, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor -@Namespace("at") public static native @ByVal Tensor full(@ByVal @Cast("c10::ArrayRef*") LongArrayRef size, @Const @ByRef Scalar fill_value, @ByVal ScalarTypeOptional dtype, @ByVal LayoutOptional layout, @ByVal DeviceOptional device, @ByVal BoolOptional pin_memory); -@Namespace("at") public static native @ByVal Tensor full(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @Const @ByRef Scalar fill_value, @ByVal ScalarTypeOptional dtype, @ByVal LayoutOptional layout, @ByVal DeviceOptional device, @ByVal BoolOptional pin_memory); +// aten::hardtanh_backward.grad_input(Tensor grad_output, Tensor self, Scalar min_val, Scalar max_val, *, Tensor(a!) grad_input) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor hardtanh_backward_out(@ByRef Tensor grad_input, @Const @ByRef Tensor grad_output, @Const @ByRef Tensor self, @Const @ByRef Scalar min_val, @Const @ByRef Scalar max_val); +// aten::hardtanh_backward.grad_input(Tensor grad_output, Tensor self, Scalar min_val, Scalar max_val, *, Tensor(a!) grad_input) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor hardtanh_backward_outf(@Const @ByRef Tensor grad_output, @Const @ByRef Tensor self, @Const @ByRef Scalar min_val, @Const @ByRef Scalar max_val, @ByRef Tensor grad_input); +// aten::hardtanh_backward(Tensor grad_output, Tensor self, Scalar min_val, Scalar max_val) -> Tensor +@Namespace("at") public static native @ByVal Tensor hardtanh_backward(@Const @ByRef Tensor grad_output, @Const @ByRef Tensor self, @Const @ByRef Scalar min_val, @Const @ByRef Scalar max_val); -// aten::full(SymInt[] size, Scalar fill_value, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor -@Namespace("at") public static native @ByVal Tensor full_symint(@ByVal SymIntRef size, @Const @ByRef Scalar fill_value, @ByVal(nullValue = "at::TensorOptions{}") TensorOptions options); -@Namespace("at") public static native @ByVal Tensor full_symint(@ByVal SymIntRef size, @Const @ByRef Scalar fill_value); -// aten::full(SymInt[] size, Scalar fill_value, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor -@Namespace("at") public static native @ByVal Tensor full_symint(@ByVal SymIntRef size, @Const @ByRef Scalar fill_value, @ByVal ScalarTypeOptional dtype, @ByVal LayoutOptional layout, @ByVal DeviceOptional device, @ByVal BoolOptional pin_memory); +// Parsed from ATen/ops/heaviside.h -// aten::full.out(SymInt[] size, Scalar fill_value, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor full_out(@ByRef Tensor out, @ByVal @Cast("c10::ArrayRef*") LongArrayRef size, @Const @ByRef Scalar fill_value); -@Namespace("at") public static native @ByRef Tensor full_out(@ByRef Tensor out, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @Const @ByRef Scalar fill_value); +// #pragma once +// @generated by torchgen/gen.py from Function.h -// aten::full.out(SymInt[] size, Scalar fill_value, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor full_outf(@ByVal @Cast("c10::ArrayRef*") LongArrayRef size, @Const @ByRef Scalar fill_value, @ByRef Tensor out); -@Namespace("at") public static native @ByRef Tensor full_outf(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @Const @ByRef Scalar fill_value, @ByRef Tensor out); +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include -// aten::full.out(SymInt[] size, Scalar fill_value, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor full_symint_out(@ByRef Tensor out, @ByVal SymIntRef size, @Const @ByRef Scalar fill_value); +// #include -// aten::full.out(SymInt[] size, Scalar fill_value, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor full_symint_outf(@ByVal SymIntRef size, @Const @ByRef Scalar fill_value, @ByRef Tensor out); +// aten::heaviside.out(Tensor self, Tensor values, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor heaviside_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor values); +// aten::heaviside.out(Tensor self, Tensor values, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor heaviside_outf(@Const @ByRef Tensor self, @Const @ByRef Tensor values, @ByRef Tensor out); -// aten::full.names_out(int[] size, Scalar fill_value, *, Dimname[]? names, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor full_out(@ByRef Tensor out, @ByVal @Cast("c10::ArrayRef*") LongArrayRef size, @Const @ByRef Scalar fill_value, @ByVal DimnameListOptional names); -@Namespace("at") public static native @ByRef Tensor full_out(@ByRef Tensor out, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @Const @ByRef Scalar fill_value, @ByVal DimnameListOptional names); -// aten::full.names_out(int[] size, Scalar fill_value, *, Dimname[]? names, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor full_outf(@ByVal @Cast("c10::ArrayRef*") LongArrayRef size, @Const @ByRef Scalar fill_value, @ByVal DimnameListOptional names, @ByRef Tensor out); -@Namespace("at") public static native @ByRef Tensor full_outf(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @Const @ByRef Scalar fill_value, @ByVal DimnameListOptional names, @ByRef Tensor out); +// aten::heaviside(Tensor self, Tensor values) -> Tensor +@Namespace("at") public static native @ByVal Tensor heaviside(@Const @ByRef Tensor self, @Const @ByRef Tensor values); -// Parsed from ATen/ops/full_like.h +// Parsed from ATen/ops/hinge_embedding_loss.h // #pragma once @@ -45166,25 +30847,17 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include - +// #include -// aten::full_like(Tensor self, Scalar fill_value, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, MemoryFormat? memory_format=None) -> Tensor -@Namespace("at") public static native @ByVal Tensor full_like(@Const @ByRef Tensor self, @Const @ByRef Scalar fill_value, @ByVal(nullValue = "at::TensorOptions{}") TensorOptions options, @ByVal(nullValue = "c10::optional(c10::nullopt)") MemoryFormatOptional memory_format); -@Namespace("at") public static native @ByVal Tensor full_like(@Const @ByRef Tensor self, @Const @ByRef Scalar fill_value); -// aten::full_like(Tensor self, Scalar fill_value, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, MemoryFormat? memory_format=None) -> Tensor -@Namespace("at") public static native @ByVal Tensor full_like(@Const @ByRef Tensor self, @Const @ByRef Scalar fill_value, @ByVal ScalarTypeOptional dtype, @ByVal LayoutOptional layout, @ByVal DeviceOptional device, @ByVal BoolOptional pin_memory, @ByVal MemoryFormatOptional memory_format); -// aten::full_like.out(Tensor self, Scalar fill_value, *, MemoryFormat? memory_format=None, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor full_like_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Scalar fill_value, @ByVal(nullValue = "c10::optional(c10::nullopt)") MemoryFormatOptional memory_format); -@Namespace("at") public static native @ByRef Tensor full_like_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Scalar fill_value); -// aten::full_like.out(Tensor self, Scalar fill_value, *, MemoryFormat? memory_format=None, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor full_like_outf(@Const @ByRef Tensor self, @Const @ByRef Scalar fill_value, @ByVal MemoryFormatOptional memory_format, @ByRef Tensor out); +// aten::hinge_embedding_loss(Tensor self, Tensor target, float margin=1.0, int reduction=Mean) -> Tensor +@Namespace("at") public static native @ByVal Tensor hinge_embedding_loss(@Const @ByRef Tensor self, @Const @ByRef Tensor target, double margin/*=1.0*/, @Cast("int64_t") long reduction/*=at::Reduction::Mean*/); +@Namespace("at") public static native @ByVal Tensor hinge_embedding_loss(@Const @ByRef Tensor self, @Const @ByRef Tensor target); -// Parsed from ATen/ops/fused_moving_avg_obs_fake_quant.h +// Parsed from ATen/ops/histc.h // #pragma once @@ -45205,17 +30878,23 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include +// #include -// aten::fused_moving_avg_obs_fake_quant(Tensor self, Tensor observer_on, Tensor fake_quant_on, Tensor(a!) running_min, Tensor(b!) running_max, Tensor(c!) scale, Tensor(d!) zero_point, float averaging_const, int quant_min, int quant_max, int ch_axis, bool per_row_fake_quant=False, bool symmetric_quant=False) -> Tensor -@Namespace("at") public static native @ByVal Tensor fused_moving_avg_obs_fake_quant(@Const @ByRef Tensor self, @Const @ByRef Tensor observer_on, @Const @ByRef Tensor fake_quant_on, @ByRef Tensor running_min, @ByRef Tensor running_max, @ByRef Tensor scale, @ByRef Tensor zero_point, double averaging_const, @Cast("int64_t") long quant_min, @Cast("int64_t") long quant_max, @Cast("int64_t") long ch_axis, @Cast("bool") boolean per_row_fake_quant/*=false*/, @Cast("bool") boolean symmetric_quant/*=false*/); -@Namespace("at") public static native @ByVal Tensor fused_moving_avg_obs_fake_quant(@Const @ByRef Tensor self, @Const @ByRef Tensor observer_on, @Const @ByRef Tensor fake_quant_on, @ByRef Tensor running_min, @ByRef Tensor running_max, @ByRef Tensor scale, @ByRef Tensor zero_point, double averaging_const, @Cast("int64_t") long quant_min, @Cast("int64_t") long quant_max, @Cast("int64_t") long ch_axis); +// aten::histc.out(Tensor self, int bins=100, Scalar min=0, Scalar max=0, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor histc_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Cast("int64_t") long bins/*=100*/, @Const @ByRef(nullValue = "at::Scalar(0)") Scalar min, @Const @ByRef(nullValue = "at::Scalar(0)") Scalar max); +@Namespace("at") public static native @ByRef Tensor histc_out(@ByRef Tensor out, @Const @ByRef Tensor self); +// aten::histc.out(Tensor self, int bins=100, Scalar min=0, Scalar max=0, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor histc_outf(@Const @ByRef Tensor self, @Cast("int64_t") long bins, @Const @ByRef Scalar min, @Const @ByRef Scalar max, @ByRef Tensor out); + +// aten::histc(Tensor self, int bins=100, Scalar min=0, Scalar max=0) -> Tensor +@Namespace("at") public static native @ByVal Tensor histc(@Const @ByRef Tensor self, @Cast("int64_t") long bins/*=100*/, @Const @ByRef(nullValue = "at::Scalar(0)") Scalar min, @Const @ByRef(nullValue = "at::Scalar(0)") Scalar max); +@Namespace("at") public static native @ByVal Tensor histc(@Const @ByRef Tensor self); -// Parsed from ATen/ops/gather.h +// Parsed from ATen/ops/histogram.h // #pragma once @@ -45236,33 +30915,33 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include +// #include -// aten::gather.out(Tensor self, int dim, Tensor index, *, bool sparse_grad=False, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor gather_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Cast("int64_t") long dim, @Const @ByRef Tensor index, @Cast("bool") boolean sparse_grad/*=false*/); -@Namespace("at") public static native @ByRef Tensor gather_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Cast("int64_t") long dim, @Const @ByRef Tensor index); -// aten::gather.out(Tensor self, int dim, Tensor index, *, bool sparse_grad=False, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor gather_outf(@Const @ByRef Tensor self, @Cast("int64_t") long dim, @Const @ByRef Tensor index, @Cast("bool") boolean sparse_grad, @ByRef Tensor out); +// aten::histogram.bins_tensor_out(Tensor self, Tensor bins, *, Tensor? weight=None, bool density=False, Tensor(a!) hist, Tensor(b!) bin_edges) -> (Tensor(a!) hist, Tensor(b!) bin_edges) +@Namespace("at") public static native @ByVal T_TensorTensor_T histogram_out(@ByRef Tensor hist, @ByRef Tensor bin_edges, @Const @ByRef Tensor self, @Const @ByRef Tensor bins, @Const @ByRef(nullValue = "c10::optional{}") TensorOptional weight, @Cast("bool") boolean density/*=false*/); +@Namespace("at") public static native @ByVal T_TensorTensor_T histogram_out(@ByRef Tensor hist, @ByRef Tensor bin_edges, @Const @ByRef Tensor self, @Const @ByRef Tensor bins); +// aten::histogram.bins_tensor_out(Tensor self, Tensor bins, *, Tensor? weight=None, bool density=False, Tensor(a!) hist, Tensor(b!) bin_edges) -> (Tensor(a!) hist, Tensor(b!) bin_edges) +@Namespace("at") public static native @ByVal T_TensorTensor_T histogram_outf(@Const @ByRef Tensor self, @Const @ByRef Tensor bins, @Const @ByRef TensorOptional weight, @Cast("bool") boolean density, @ByRef Tensor hist, @ByRef Tensor bin_edges); -// aten::gather(Tensor self, int dim, Tensor index, *, bool sparse_grad=False) -> Tensor -@Namespace("at") public static native @ByVal Tensor gather(@Const @ByRef Tensor self, @Cast("int64_t") long dim, @Const @ByRef Tensor index, @Cast("bool") boolean sparse_grad/*=false*/); -@Namespace("at") public static native @ByVal Tensor gather(@Const @ByRef Tensor self, @Cast("int64_t") long dim, @Const @ByRef Tensor index); +// aten::histogram.bins_tensor(Tensor self, Tensor bins, *, Tensor? weight=None, bool density=False) -> (Tensor hist, Tensor bin_edges) +@Namespace("at") public static native @ByVal T_TensorTensor_T histogram(@Const @ByRef Tensor self, @Const @ByRef Tensor bins, @Const @ByRef(nullValue = "c10::optional{}") TensorOptional weight, @Cast("bool") boolean density/*=false*/); +@Namespace("at") public static native @ByVal T_TensorTensor_T histogram(@Const @ByRef Tensor self, @Const @ByRef Tensor bins); -// aten::gather.dimname_out(Tensor self, Dimname dim, Tensor index, *, bool sparse_grad=False, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor gather_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal Dimname dim, @Const @ByRef Tensor index, @Cast("bool") boolean sparse_grad/*=false*/); -@Namespace("at") public static native @ByRef Tensor gather_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal Dimname dim, @Const @ByRef Tensor index); -// aten::gather.dimname_out(Tensor self, Dimname dim, Tensor index, *, bool sparse_grad=False, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor gather_outf(@Const @ByRef Tensor self, @ByVal Dimname dim, @Const @ByRef Tensor index, @Cast("bool") boolean sparse_grad, @ByRef Tensor out); +// aten::histogram.bin_ct_out(Tensor self, int bins=100, *, float[]? range=None, Tensor? weight=None, bool density=False, Tensor(a!) hist, Tensor(b!) bin_edges) -> (Tensor(a!) hist, Tensor(b!) bin_edges) +@Namespace("at") public static native @ByVal T_TensorTensor_T histogram_out(@ByRef Tensor hist, @ByRef Tensor bin_edges, @Const @ByRef Tensor self, @Cast("int64_t") long bins/*=100*/, @ByVal(nullValue = "c10::optional >(c10::nullopt)") DoubleArrayRefOptional range, @Const @ByRef(nullValue = "c10::optional{}") TensorOptional weight, @Cast("bool") boolean density/*=false*/); +@Namespace("at") public static native @ByVal T_TensorTensor_T histogram_out(@ByRef Tensor hist, @ByRef Tensor bin_edges, @Const @ByRef Tensor self); +// aten::histogram.bin_ct_out(Tensor self, int bins=100, *, float[]? range=None, Tensor? weight=None, bool density=False, Tensor(a!) hist, Tensor(b!) bin_edges) -> (Tensor(a!) hist, Tensor(b!) bin_edges) +@Namespace("at") public static native @ByVal T_TensorTensor_T histogram_outf(@Const @ByRef Tensor self, @Cast("int64_t") long bins, @ByVal DoubleArrayRefOptional range, @Const @ByRef TensorOptional weight, @Cast("bool") boolean density, @ByRef Tensor hist, @ByRef Tensor bin_edges); -// aten::gather.dimname(Tensor self, Dimname dim, Tensor index, *, bool sparse_grad=False) -> Tensor -@Namespace("at") public static native @ByVal Tensor gather(@Const @ByRef Tensor self, @ByVal Dimname dim, @Const @ByRef Tensor index, @Cast("bool") boolean sparse_grad/*=false*/); -@Namespace("at") public static native @ByVal Tensor gather(@Const @ByRef Tensor self, @ByVal Dimname dim, @Const @ByRef Tensor index); +// aten::histogram.bin_ct(Tensor self, int bins=100, *, float[]? range=None, Tensor? weight=None, bool density=False) -> (Tensor hist, Tensor bin_edges) +@Namespace("at") public static native @ByVal T_TensorTensor_T histogram(@Const @ByRef Tensor self, @Cast("int64_t") long bins/*=100*/, @ByVal(nullValue = "c10::optional >(c10::nullopt)") DoubleArrayRefOptional range, @Const @ByRef(nullValue = "c10::optional{}") TensorOptional weight, @Cast("bool") boolean density/*=false*/); +@Namespace("at") public static native @ByVal T_TensorTensor_T histogram(@Const @ByRef Tensor self); -// Parsed from ATen/ops/gather_backward.h +// Parsed from ATen/ops/histogramdd.h // #pragma once @@ -45283,16 +30962,27 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include +// #include + + +// aten::histogramdd(Tensor self, int[] bins, float[]? range=None, Tensor? weight=None, bool density=False) -> (Tensor hist, Tensor[] bin_edges) +@Namespace("at") public static native @ByVal T_TensorTensorVector_T histogramdd(@Const @ByRef Tensor self, @ByVal LongArrayRef bins, @ByVal(nullValue = "c10::optional >(c10::nullopt)") DoubleArrayRefOptional range, @Const @ByRef(nullValue = "c10::optional{}") TensorOptional weight, @Cast("bool") boolean density/*=false*/); +@Namespace("at") public static native @ByVal T_TensorTensorVector_T histogramdd(@Const @ByRef Tensor self, @ByVal LongArrayRef bins); +@Namespace("at") public static native @ByVal T_TensorTensorVector_T histogramdd(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] bins, @ByVal(nullValue = "c10::optional >(c10::nullopt)") DoubleArrayRefOptional range, @Const @ByRef(nullValue = "c10::optional{}") TensorOptional weight, @Cast("bool") boolean density/*=false*/); +@Namespace("at") public static native @ByVal T_TensorTensorVector_T histogramdd(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... bins); +// aten::histogramdd.int_bins(Tensor self, int bins, float[]? range=None, Tensor? weight=None, bool density=False) -> (Tensor hist, Tensor[] bin_edges) +@Namespace("at") public static native @ByVal T_TensorTensorVector_T histogramdd(@Const @ByRef Tensor self, @Cast("int64_t") long bins, @ByVal(nullValue = "c10::optional >(c10::nullopt)") DoubleArrayRefOptional range, @Const @ByRef(nullValue = "c10::optional{}") TensorOptional weight, @Cast("bool") boolean density/*=false*/); +@Namespace("at") public static native @ByVal T_TensorTensorVector_T histogramdd(@Const @ByRef Tensor self, @Cast("int64_t") long bins); -// aten::gather_backward(Tensor grad, Tensor self, int dim, Tensor index, bool sparse_grad) -> Tensor -@Namespace("at") public static native @ByVal Tensor gather_backward(@Const @ByRef Tensor grad, @Const @ByRef Tensor self, @Cast("int64_t") long dim, @Const @ByRef Tensor index, @Cast("bool") boolean sparse_grad); +// aten::histogramdd.TensorList_bins(Tensor self, Tensor[] bins, float[]? range=None, Tensor? weight=None, bool density=False) -> (Tensor hist, Tensor[] bin_edges) +@Namespace("at") public static native @ByVal T_TensorTensorVector_T histogramdd(@Const @ByRef Tensor self, @ByVal @Cast("at::TensorList*") TensorArrayRef bins, @ByVal(nullValue = "c10::optional >(c10::nullopt)") DoubleArrayRefOptional range, @Const @ByRef(nullValue = "c10::optional{}") TensorOptional weight, @Cast("bool") boolean density/*=false*/); +@Namespace("at") public static native @ByVal T_TensorTensorVector_T histogramdd(@Const @ByRef Tensor self, @ByVal @Cast("at::TensorList*") TensorArrayRef bins); -// Parsed from ATen/ops/gcd.h +// Parsed from ATen/ops/hsplit.h // #pragma once @@ -45313,24 +31003,20 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include - +// #include -// aten::gcd.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor gcd_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor other); -// aten::gcd.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor gcd_outf(@Const @ByRef Tensor self, @Const @ByRef Tensor other, @ByRef Tensor out); -// aten::gcd(Tensor self, Tensor other) -> Tensor -@Namespace("at") public static native @ByVal Tensor gcd(@Const @ByRef Tensor self, @Const @ByRef Tensor other); +// aten::hsplit.int(Tensor(a -> *) self, int sections) -> Tensor(a)[] +@Namespace("at") public static native @Cast({"", "std::vector"}) @StdMove TensorVector hsplit(@Const @ByRef Tensor self, @Cast("int64_t") long sections); -// aten::gcd_(Tensor(a!) self, Tensor other) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor gcd_(@ByRef Tensor self, @Const @ByRef Tensor other); +// aten::hsplit.array(Tensor(a -> *) self, int[] indices) -> Tensor(a)[] +@Namespace("at") public static native @Cast({"", "std::vector"}) @StdMove TensorVector hsplit(@Const @ByRef Tensor self, @ByVal LongArrayRef indices); +@Namespace("at") public static native @Cast({"", "std::vector"}) @StdMove TensorVector hsplit(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... indices); -// Parsed from ATen/ops/ge.h +// Parsed from ATen/ops/hspmm.h // #pragma once @@ -45351,29 +31037,21 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include - - -// aten::ge.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor ge_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Scalar other); -// aten::ge.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor ge_outf(@Const @ByRef Tensor self, @Const @ByRef Scalar other, @ByRef Tensor out); +// #include -// aten::ge.Scalar(Tensor self, Scalar other) -> Tensor -@Namespace("at") public static native @ByVal Tensor ge(@Const @ByRef Tensor self, @Const @ByRef Scalar other); -// aten::ge.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor ge_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor other); -// aten::ge.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor ge_outf(@Const @ByRef Tensor self, @Const @ByRef Tensor other, @ByRef Tensor out); +// aten::hspmm.out(Tensor mat1, Tensor mat2, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor hspmm_out(@ByRef Tensor out, @Const @ByRef Tensor mat1, @Const @ByRef Tensor mat2); +// aten::hspmm.out(Tensor mat1, Tensor mat2, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor hspmm_outf(@Const @ByRef Tensor mat1, @Const @ByRef Tensor mat2, @ByRef Tensor out); -// aten::ge.Tensor(Tensor self, Tensor other) -> Tensor -@Namespace("at") public static native @ByVal Tensor ge(@Const @ByRef Tensor self, @Const @ByRef Tensor other); +// aten::hspmm(Tensor mat1, Tensor mat2) -> Tensor +@Namespace("at") public static native @ByVal Tensor hspmm(@Const @ByRef Tensor mat1, @Const @ByRef Tensor mat2); -// Parsed from ATen/ops/gelu.h +// Parsed from ATen/ops/hstack.h // #pragma once @@ -45394,27 +31072,21 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include - +// #include -// aten::gelu.out(Tensor self, *, str approximate='none', Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor gelu_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal(nullValue = "c10::string_view(\"none\")") @Cast("c10::string_view*") Pointer approximate); -@Namespace("at") public static native @ByRef Tensor gelu_out(@ByRef Tensor out, @Const @ByRef Tensor self); -// aten::gelu.out(Tensor self, *, str approximate='none', Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor gelu_outf(@Const @ByRef Tensor self, @ByVal @Cast("c10::string_view*") Pointer approximate, @ByRef Tensor out); -// aten::gelu_(Tensor(a!) self, *, str approximate='none') -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor gelu_(@ByRef Tensor self, @ByVal(nullValue = "c10::string_view(\"none\")") @Cast("c10::string_view*") Pointer approximate); -@Namespace("at") public static native @ByRef Tensor gelu_(@ByRef Tensor self); +// aten::hstack(Tensor[] tensors) -> Tensor +@Namespace("at") public static native @ByVal Tensor hstack(@ByVal @Cast("at::TensorList*") TensorArrayRef tensors); -// aten::gelu(Tensor self, *, str approximate='none') -> Tensor -@Namespace("at") public static native @ByVal Tensor gelu(@Const @ByRef Tensor self, @ByVal(nullValue = "c10::string_view(\"none\")") @Cast("c10::string_view*") Pointer approximate); -@Namespace("at") public static native @ByVal Tensor gelu(@Const @ByRef Tensor self); +// aten::hstack.out(Tensor[] tensors, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor hstack_out(@ByRef Tensor out, @ByVal @Cast("at::TensorList*") TensorArrayRef tensors); +// aten::hstack.out(Tensor[] tensors, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor hstack_outf(@ByVal @Cast("at::TensorList*") TensorArrayRef tensors, @ByRef Tensor out); -// Parsed from ATen/ops/gelu_backward.h +// Parsed from ATen/ops/huber_loss.h // #pragma once @@ -45435,23 +31107,23 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include +// #include -// aten::gelu_backward.grad_input(Tensor grad_output, Tensor self, *, str approximate='none', Tensor(a!) grad_input) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor gelu_backward_out(@ByRef Tensor grad_input, @Const @ByRef Tensor grad_output, @Const @ByRef Tensor self, @ByVal(nullValue = "c10::string_view(\"none\")") @Cast("c10::string_view*") Pointer approximate); -@Namespace("at") public static native @ByRef Tensor gelu_backward_out(@ByRef Tensor grad_input, @Const @ByRef Tensor grad_output, @Const @ByRef Tensor self); -// aten::gelu_backward.grad_input(Tensor grad_output, Tensor self, *, str approximate='none', Tensor(a!) grad_input) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor gelu_backward_outf(@Const @ByRef Tensor grad_output, @Const @ByRef Tensor self, @ByVal @Cast("c10::string_view*") Pointer approximate, @ByRef Tensor grad_input); +// aten::huber_loss.out(Tensor self, Tensor target, int reduction=Mean, float delta=1.0, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor huber_loss_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor target, @Cast("int64_t") long reduction/*=at::Reduction::Mean*/, double delta/*=1.0*/); +@Namespace("at") public static native @ByRef Tensor huber_loss_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor target); +// aten::huber_loss.out(Tensor self, Tensor target, int reduction=Mean, float delta=1.0, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor huber_loss_outf(@Const @ByRef Tensor self, @Const @ByRef Tensor target, @Cast("int64_t") long reduction, double delta, @ByRef Tensor out); -// aten::gelu_backward(Tensor grad_output, Tensor self, *, str approximate='none') -> Tensor -@Namespace("at") public static native @ByVal Tensor gelu_backward(@Const @ByRef Tensor grad_output, @Const @ByRef Tensor self, @ByVal(nullValue = "c10::string_view(\"none\")") @Cast("c10::string_view*") Pointer approximate); -@Namespace("at") public static native @ByVal Tensor gelu_backward(@Const @ByRef Tensor grad_output, @Const @ByRef Tensor self); +// aten::huber_loss(Tensor self, Tensor target, int reduction=Mean, float delta=1.0) -> Tensor +@Namespace("at") public static native @ByVal Tensor huber_loss(@Const @ByRef Tensor self, @Const @ByRef Tensor target, @Cast("int64_t") long reduction/*=at::Reduction::Mean*/, double delta/*=1.0*/); +@Namespace("at") public static native @ByVal Tensor huber_loss(@Const @ByRef Tensor self, @Const @ByRef Tensor target); -// Parsed from ATen/ops/geometric.h +// Parsed from ATen/ops/huber_loss_backward.h // #pragma once @@ -45472,23 +31144,21 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include +// #include -// aten::geometric.out(Tensor self, float p, *, Generator? generator=None, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor geometric_out(@ByRef Tensor out, @Const @ByRef Tensor self, double p, @ByVal(nullValue = "c10::optional(c10::nullopt)") GeneratorOptional generator); -@Namespace("at") public static native @ByRef Tensor geometric_out(@ByRef Tensor out, @Const @ByRef Tensor self, double p); -// aten::geometric.out(Tensor self, float p, *, Generator? generator=None, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor geometric_outf(@Const @ByRef Tensor self, double p, @ByVal GeneratorOptional generator, @ByRef Tensor out); +// aten::huber_loss_backward.out(Tensor grad_output, Tensor self, Tensor target, int reduction, float delta, *, Tensor(a!) grad_input) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor huber_loss_backward_out(@ByRef Tensor grad_input, @Const @ByRef Tensor grad_output, @Const @ByRef Tensor self, @Const @ByRef Tensor target, @Cast("int64_t") long reduction, double delta); +// aten::huber_loss_backward.out(Tensor grad_output, Tensor self, Tensor target, int reduction, float delta, *, Tensor(a!) grad_input) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor huber_loss_backward_outf(@Const @ByRef Tensor grad_output, @Const @ByRef Tensor self, @Const @ByRef Tensor target, @Cast("int64_t") long reduction, double delta, @ByRef Tensor grad_input); -// aten::geometric(Tensor self, float p, *, Generator? generator=None) -> Tensor -@Namespace("at") public static native @ByVal Tensor geometric(@Const @ByRef Tensor self, double p, @ByVal(nullValue = "c10::optional(c10::nullopt)") GeneratorOptional generator); -@Namespace("at") public static native @ByVal Tensor geometric(@Const @ByRef Tensor self, double p); +// aten::huber_loss_backward(Tensor grad_output, Tensor self, Tensor target, int reduction, float delta) -> Tensor +@Namespace("at") public static native @ByVal Tensor huber_loss_backward(@Const @ByRef Tensor grad_output, @Const @ByRef Tensor self, @Const @ByRef Tensor target, @Cast("int64_t") long reduction, double delta); -// Parsed from ATen/ops/geqrf.h +// Parsed from ATen/ops/hypot.h // #pragma once @@ -45509,21 +31179,21 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include +// #include -// aten::geqrf.a(Tensor self, *, Tensor(a!) a, Tensor(b!) tau) -> (Tensor(a!) a, Tensor(b!) tau) -@Namespace("at") public static native @ByVal @Cast("std::tuple*") PointerPointer geqrf_out(@ByRef Tensor a, @ByRef Tensor tau, @Const @ByRef Tensor self); -// aten::geqrf.a(Tensor self, *, Tensor(a!) a, Tensor(b!) tau) -> (Tensor(a!) a, Tensor(b!) tau) -@Namespace("at") public static native @ByVal @Cast("std::tuple*") PointerPointer geqrf_outf(@Const @ByRef Tensor self, @ByRef Tensor a, @ByRef Tensor tau); +// aten::hypot.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor hypot_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor other); +// aten::hypot.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor hypot_outf(@Const @ByRef Tensor self, @Const @ByRef Tensor other, @ByRef Tensor out); -// aten::geqrf(Tensor self) -> (Tensor a, Tensor tau) -@Namespace("at") public static native @ByVal TensorTensorTuple geqrf(@Const @ByRef Tensor self); +// aten::hypot(Tensor self, Tensor other) -> Tensor +@Namespace("at") public static native @ByVal Tensor hypot(@Const @ByRef Tensor self, @Const @ByRef Tensor other); -// Parsed from ATen/ops/ger.h +// Parsed from ATen/ops/i0.h // #pragma once @@ -45544,21 +31214,24 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include +// #include -// aten::ger(Tensor self, Tensor vec2) -> Tensor -@Namespace("at") public static native @ByVal Tensor ger(@Const @ByRef Tensor self, @Const @ByRef Tensor vec2); +// aten::i0(Tensor self) -> Tensor +@Namespace("at") public static native @ByVal Tensor i0(@Const @ByRef Tensor self); -// aten::ger.out(Tensor self, Tensor vec2, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor ger_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor vec2); -// aten::ger.out(Tensor self, Tensor vec2, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor ger_outf(@Const @ByRef Tensor self, @Const @ByRef Tensor vec2, @ByRef Tensor out); +// aten::i0_(Tensor(a!) self) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor i0_(@ByRef Tensor self); + +// aten::i0.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor i0_out(@ByRef Tensor out, @Const @ByRef Tensor self); +// aten::i0.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor i0_outf(@Const @ByRef Tensor self, @ByRef Tensor out); -// Parsed from ATen/ops/glu.h +// Parsed from ATen/ops/igamma.h // #pragma once @@ -45579,23 +31252,21 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include +// #include -// aten::glu.out(Tensor self, int dim=-1, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor glu_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Cast("int64_t") long dim/*=-1*/); -@Namespace("at") public static native @ByRef Tensor glu_out(@ByRef Tensor out, @Const @ByRef Tensor self); -// aten::glu.out(Tensor self, int dim=-1, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor glu_outf(@Const @ByRef Tensor self, @Cast("int64_t") long dim, @ByRef Tensor out); +// aten::igamma.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor igamma_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor other); +// aten::igamma.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor igamma_outf(@Const @ByRef Tensor self, @Const @ByRef Tensor other, @ByRef Tensor out); -// aten::glu(Tensor self, int dim=-1) -> Tensor -@Namespace("at") public static native @ByVal Tensor glu(@Const @ByRef Tensor self, @Cast("int64_t") long dim/*=-1*/); -@Namespace("at") public static native @ByVal Tensor glu(@Const @ByRef Tensor self); +// aten::igamma(Tensor self, Tensor other) -> Tensor +@Namespace("at") public static native @ByVal Tensor igamma(@Const @ByRef Tensor self, @Const @ByRef Tensor other); -// Parsed from ATen/ops/glu_backward.h +// Parsed from ATen/ops/igammac.h // #pragma once @@ -45616,21 +31287,21 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include +// #include -// aten::glu_backward.grad_input(Tensor grad_output, Tensor self, int dim, *, Tensor(a!) grad_input) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor glu_backward_out(@ByRef Tensor grad_input, @Const @ByRef Tensor grad_output, @Const @ByRef Tensor self, @Cast("int64_t") long dim); -// aten::glu_backward.grad_input(Tensor grad_output, Tensor self, int dim, *, Tensor(a!) grad_input) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor glu_backward_outf(@Const @ByRef Tensor grad_output, @Const @ByRef Tensor self, @Cast("int64_t") long dim, @ByRef Tensor grad_input); +// aten::igammac.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor igammac_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor other); +// aten::igammac.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor igammac_outf(@Const @ByRef Tensor self, @Const @ByRef Tensor other, @ByRef Tensor out); -// aten::glu_backward(Tensor grad_output, Tensor self, int dim) -> Tensor -@Namespace("at") public static native @ByVal Tensor glu_backward(@Const @ByRef Tensor grad_output, @Const @ByRef Tensor self, @Cast("int64_t") long dim); +// aten::igammac(Tensor self, Tensor other) -> Tensor +@Namespace("at") public static native @ByVal Tensor igammac(@Const @ByRef Tensor self, @Const @ByRef Tensor other); -// Parsed from ATen/ops/glu_backward_jvp.h +// Parsed from ATen/ops/im2col.h // #pragma once @@ -45651,21 +31322,24 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include +// #include -// aten::glu_backward_jvp(Tensor grad_x, Tensor grad_glu, Tensor x, Tensor dgrad_glu, Tensor dx, int dim) -> Tensor -@Namespace("at") public static native @ByVal Tensor glu_backward_jvp(@Const @ByRef Tensor grad_x, @Const @ByRef Tensor grad_glu, @Const @ByRef Tensor x, @Const @ByRef Tensor dgrad_glu, @Const @ByRef Tensor dx, @Cast("int64_t") long dim); +// aten::im2col.out(Tensor self, int[2] kernel_size, int[2] dilation, int[2] padding, int[2] stride, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor im2col_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal LongArrayRef kernel_size, @ByVal LongArrayRef dilation, @ByVal LongArrayRef padding, @ByVal LongArrayRef stride); +@Namespace("at") public static native @ByRef Tensor im2col_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] kernel_size, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dilation, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] padding, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... stride); +// aten::im2col.out(Tensor self, int[2] kernel_size, int[2] dilation, int[2] padding, int[2] stride, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor im2col_outf(@Const @ByRef Tensor self, @ByVal LongArrayRef kernel_size, @ByVal LongArrayRef dilation, @ByVal LongArrayRef padding, @ByVal LongArrayRef stride, @ByRef Tensor out); +@Namespace("at") public static native @ByRef Tensor im2col_outf(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] kernel_size, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dilation, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] padding, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] stride, @ByRef Tensor out); -// aten::glu_backward_jvp.out(Tensor grad_x, Tensor grad_glu, Tensor x, Tensor dgrad_glu, Tensor dx, int dim, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor glu_backward_jvp_out(@ByRef Tensor out, @Const @ByRef Tensor grad_x, @Const @ByRef Tensor grad_glu, @Const @ByRef Tensor x, @Const @ByRef Tensor dgrad_glu, @Const @ByRef Tensor dx, @Cast("int64_t") long dim); -// aten::glu_backward_jvp.out(Tensor grad_x, Tensor grad_glu, Tensor x, Tensor dgrad_glu, Tensor dx, int dim, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor glu_backward_jvp_outf(@Const @ByRef Tensor grad_x, @Const @ByRef Tensor grad_glu, @Const @ByRef Tensor x, @Const @ByRef Tensor dgrad_glu, @Const @ByRef Tensor dx, @Cast("int64_t") long dim, @ByRef Tensor out); +// aten::im2col(Tensor self, int[2] kernel_size, int[2] dilation, int[2] padding, int[2] stride) -> Tensor +@Namespace("at") public static native @ByVal Tensor im2col(@Const @ByRef Tensor self, @ByVal LongArrayRef kernel_size, @ByVal LongArrayRef dilation, @ByVal LongArrayRef padding, @ByVal LongArrayRef stride); +@Namespace("at") public static native @ByVal Tensor im2col(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] kernel_size, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dilation, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] padding, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... stride); -// Parsed from ATen/ops/glu_jvp.h +// Parsed from ATen/ops/imag.h // #pragma once @@ -45686,21 +31360,16 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include - +// #include -// aten::glu_jvp(Tensor glu, Tensor x, Tensor dx, int dim) -> Tensor -@Namespace("at") public static native @ByVal Tensor glu_jvp(@Const @ByRef Tensor glu, @Const @ByRef Tensor x, @Const @ByRef Tensor dx, @Cast("int64_t") long dim); -// aten::glu_jvp.out(Tensor glu, Tensor x, Tensor dx, int dim, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor glu_jvp_out(@ByRef Tensor out, @Const @ByRef Tensor glu, @Const @ByRef Tensor x, @Const @ByRef Tensor dx, @Cast("int64_t") long dim); -// aten::glu_jvp.out(Tensor glu, Tensor x, Tensor dx, int dim, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor glu_jvp_outf(@Const @ByRef Tensor glu, @Const @ByRef Tensor x, @Const @ByRef Tensor dx, @Cast("int64_t") long dim, @ByRef Tensor out); +// aten::imag(Tensor(a) self) -> Tensor(a) +@Namespace("at") public static native @ByVal Tensor imag(@Const @ByRef Tensor self); -// Parsed from ATen/ops/gradient.h +// Parsed from ATen/ops/index.h // #pragma once @@ -45721,49 +31390,21 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include - - -// aten::gradient.scalarint(Tensor self, *, Scalar? spacing=None, int? dim=None, int edge_order=1) -> Tensor[] -@Namespace("at") public static native @Cast({"", "std::vector"}) @StdMove TensorVector gradient(@Const @ByRef Tensor self, @Const @ByRef(nullValue = "c10::optional(c10::nullopt)") ScalarOptional spacing, @ByVal(nullValue = "c10::optional(c10::nullopt)") LongOptional dim, @Cast("int64_t") long edge_order/*=1*/); -@Namespace("at") public static native @Cast({"", "std::vector"}) @StdMove TensorVector gradient(@Const @ByRef Tensor self); - -// aten::gradient.scalararray(Tensor self, *, Scalar spacing, int[] dim, int edge_order=1) -> Tensor[] -@Namespace("at") public static native @Cast({"", "std::vector"}) @StdMove TensorVector gradient(@Const @ByRef Tensor self, @Const @ByRef Scalar spacing, @ByVal @Cast("c10::ArrayRef*") LongArrayRef dim, @Cast("int64_t") long edge_order/*=1*/); -@Namespace("at") public static native @Cast({"", "std::vector"}) @StdMove TensorVector gradient(@Const @ByRef Tensor self, @Const @ByRef Scalar spacing, @ByVal @Cast("c10::ArrayRef*") LongArrayRef dim); -@Namespace("at") public static native @Cast({"", "std::vector"}) @StdMove TensorVector gradient(@Const @ByRef Tensor self, @Const @ByRef Scalar spacing, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dim, @Cast("int64_t") long edge_order/*=1*/); -@Namespace("at") public static native @Cast({"", "std::vector"}) @StdMove TensorVector gradient(@Const @ByRef Tensor self, @Const @ByRef Scalar spacing, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... dim); - -// aten::gradient.array(Tensor self, *, int[] dim, int edge_order=1) -> Tensor[] -@Namespace("at") public static native @Cast({"", "std::vector"}) @StdMove TensorVector gradient(@Const @ByRef Tensor self, @ByVal @Cast("c10::ArrayRef*") LongArrayRef dim, @Cast("int64_t") long edge_order/*=1*/); -@Namespace("at") public static native @Cast({"", "std::vector"}) @StdMove TensorVector gradient(@Const @ByRef Tensor self, @ByVal @Cast("c10::ArrayRef*") LongArrayRef dim); -@Namespace("at") public static native @Cast({"", "std::vector"}) @StdMove TensorVector gradient(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dim, @Cast("int64_t") long edge_order/*=1*/); -@Namespace("at") public static native @Cast({"", "std::vector"}) @StdMove TensorVector gradient(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... dim); - -// aten::gradient.scalarrayint(Tensor self, *, Scalar[] spacing, int? dim=None, int edge_order=1) -> Tensor[] -@Namespace("at") public static native @Cast({"", "std::vector"}) @StdMove TensorVector gradient(@Const @ByRef Tensor self, @ByVal ScalarArrayRef spacing, @ByVal(nullValue = "c10::optional(c10::nullopt)") LongOptional dim, @Cast("int64_t") long edge_order/*=1*/); -@Namespace("at") public static native @Cast({"", "std::vector"}) @StdMove TensorVector gradient(@Const @ByRef Tensor self, @ByVal ScalarArrayRef spacing); +// #include -// aten::gradient.scalarrayarray(Tensor self, *, Scalar[] spacing, int[] dim, int edge_order=1) -> Tensor[] -@Namespace("at") public static native @Cast({"", "std::vector"}) @StdMove TensorVector gradient(@Const @ByRef Tensor self, @ByVal ScalarArrayRef spacing, @ByVal @Cast("c10::ArrayRef*") LongArrayRef dim, @Cast("int64_t") long edge_order/*=1*/); -@Namespace("at") public static native @Cast({"", "std::vector"}) @StdMove TensorVector gradient(@Const @ByRef Tensor self, @ByVal ScalarArrayRef spacing, @ByVal @Cast("c10::ArrayRef*") LongArrayRef dim); -@Namespace("at") public static native @Cast({"", "std::vector"}) @StdMove TensorVector gradient(@Const @ByRef Tensor self, @ByVal ScalarArrayRef spacing, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dim, @Cast("int64_t") long edge_order/*=1*/); -@Namespace("at") public static native @Cast({"", "std::vector"}) @StdMove TensorVector gradient(@Const @ByRef Tensor self, @ByVal ScalarArrayRef spacing, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... dim); -// aten::gradient.tensorarrayint(Tensor self, *, Tensor[] spacing, int? dim=None, int edge_order=1) -> Tensor[] -@Namespace("at") public static native @Cast({"", "std::vector"}) @StdMove TensorVector gradient(@Const @ByRef Tensor self, @ByVal TensorArrayRef spacing, @ByVal(nullValue = "c10::optional(c10::nullopt)") LongOptional dim, @Cast("int64_t") long edge_order/*=1*/); -@Namespace("at") public static native @Cast({"", "std::vector"}) @StdMove TensorVector gradient(@Const @ByRef Tensor self, @ByVal TensorArrayRef spacing); +// aten::index.Tensor(Tensor self, Tensor?[] indices) -> Tensor +@Namespace("at") public static native @ByVal Tensor index(@Const @ByRef Tensor self, @Const @ByRef TensorOptionalList indices); -// aten::gradient.tensorarray(Tensor self, *, Tensor[] spacing, int[] dim, int edge_order=1) -> Tensor[] -@Namespace("at") public static native @Cast({"", "std::vector"}) @StdMove TensorVector gradient(@Const @ByRef Tensor self, @ByVal TensorArrayRef spacing, @ByVal @Cast("c10::ArrayRef*") LongArrayRef dim, @Cast("int64_t") long edge_order/*=1*/); -@Namespace("at") public static native @Cast({"", "std::vector"}) @StdMove TensorVector gradient(@Const @ByRef Tensor self, @ByVal TensorArrayRef spacing, @ByVal @Cast("c10::ArrayRef*") LongArrayRef dim); -@Namespace("at") public static native @Cast({"", "std::vector"}) @StdMove TensorVector gradient(@Const @ByRef Tensor self, @ByVal TensorArrayRef spacing, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dim, @Cast("int64_t") long edge_order/*=1*/); -@Namespace("at") public static native @Cast({"", "std::vector"}) @StdMove TensorVector gradient(@Const @ByRef Tensor self, @ByVal TensorArrayRef spacing, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... dim); +// aten::index.Tensor_out(Tensor self, Tensor?[] indices, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor index_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef TensorOptionalList indices); +// aten::index.Tensor_out(Tensor self, Tensor?[] indices, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor index_outf(@Const @ByRef Tensor self, @Const @ByRef TensorOptionalList indices, @ByRef Tensor out); -// Parsed from ATen/ops/greater.h +// Parsed from ATen/ops/index_add.h // #pragma once @@ -45784,29 +31425,27 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include - +// #include -// aten::greater.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor greater_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Scalar other); -// aten::greater.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor greater_outf(@Const @ByRef Tensor self, @Const @ByRef Scalar other, @ByRef Tensor out); -// aten::greater.Scalar(Tensor self, Scalar other) -> Tensor -@Namespace("at") public static native @ByVal Tensor greater(@Const @ByRef Tensor self, @Const @ByRef Scalar other); +// aten::index_add.out(Tensor self, int dim, Tensor index, Tensor source, *, Scalar alpha=1, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor index_add_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Cast("int64_t") long dim, @Const @ByRef Tensor index, @Const @ByRef Tensor source, @Const @ByRef(nullValue = "at::Scalar(1)") Scalar alpha); +@Namespace("at") public static native @ByRef Tensor index_add_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Cast("int64_t") long dim, @Const @ByRef Tensor index, @Const @ByRef Tensor source); +// aten::index_add.out(Tensor self, int dim, Tensor index, Tensor source, *, Scalar alpha=1, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor index_add_outf(@Const @ByRef Tensor self, @Cast("int64_t") long dim, @Const @ByRef Tensor index, @Const @ByRef Tensor source, @Const @ByRef Scalar alpha, @ByRef Tensor out); -// aten::greater.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor greater_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor other); -// aten::greater.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor greater_outf(@Const @ByRef Tensor self, @Const @ByRef Tensor other, @ByRef Tensor out); +// aten::index_add(Tensor self, int dim, Tensor index, Tensor source, *, Scalar alpha=1) -> Tensor +@Namespace("at") public static native @ByVal Tensor index_add(@Const @ByRef Tensor self, @Cast("int64_t") long dim, @Const @ByRef Tensor index, @Const @ByRef Tensor source, @Const @ByRef(nullValue = "at::Scalar(1)") Scalar alpha); +@Namespace("at") public static native @ByVal Tensor index_add(@Const @ByRef Tensor self, @Cast("int64_t") long dim, @Const @ByRef Tensor index, @Const @ByRef Tensor source); -// aten::greater.Tensor(Tensor self, Tensor other) -> Tensor -@Namespace("at") public static native @ByVal Tensor greater(@Const @ByRef Tensor self, @Const @ByRef Tensor other); +// aten::index_add.dimname(Tensor self, Dimname dim, Tensor index, Tensor source, *, Scalar alpha=1) -> Tensor +@Namespace("at") public static native @ByVal Tensor index_add(@Const @ByRef Tensor self, @ByVal Dimname dim, @Const @ByRef Tensor index, @Const @ByRef Tensor source, @Const @ByRef(nullValue = "at::Scalar(1)") Scalar alpha); +@Namespace("at") public static native @ByVal Tensor index_add(@Const @ByRef Tensor self, @ByVal Dimname dim, @Const @ByRef Tensor index, @Const @ByRef Tensor source); -// Parsed from ATen/ops/greater_equal.h +// Parsed from ATen/ops/index_copy.h // #pragma once @@ -45827,29 +31466,24 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include - +// #include -// aten::greater_equal.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor greater_equal_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Scalar other); -// aten::greater_equal.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor greater_equal_outf(@Const @ByRef Tensor self, @Const @ByRef Scalar other, @ByRef Tensor out); -// aten::greater_equal.Scalar(Tensor self, Scalar other) -> Tensor -@Namespace("at") public static native @ByVal Tensor greater_equal(@Const @ByRef Tensor self, @Const @ByRef Scalar other); +// aten::index_copy.out(Tensor self, int dim, Tensor index, Tensor source, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor index_copy_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Cast("int64_t") long dim, @Const @ByRef Tensor index, @Const @ByRef Tensor source); +// aten::index_copy.out(Tensor self, int dim, Tensor index, Tensor source, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor index_copy_outf(@Const @ByRef Tensor self, @Cast("int64_t") long dim, @Const @ByRef Tensor index, @Const @ByRef Tensor source, @ByRef Tensor out); -// aten::greater_equal.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor greater_equal_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor other); -// aten::greater_equal.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor greater_equal_outf(@Const @ByRef Tensor self, @Const @ByRef Tensor other, @ByRef Tensor out); +// aten::index_copy(Tensor self, int dim, Tensor index, Tensor source) -> Tensor +@Namespace("at") public static native @ByVal Tensor index_copy(@Const @ByRef Tensor self, @Cast("int64_t") long dim, @Const @ByRef Tensor index, @Const @ByRef Tensor source); -// aten::greater_equal.Tensor(Tensor self, Tensor other) -> Tensor -@Namespace("at") public static native @ByVal Tensor greater_equal(@Const @ByRef Tensor self, @Const @ByRef Tensor other); +// aten::index_copy.dimname(Tensor self, Dimname dim, Tensor index, Tensor source) -> Tensor +@Namespace("at") public static native @ByVal Tensor index_copy(@Const @ByRef Tensor self, @ByVal Dimname dim, @Const @ByRef Tensor index, @Const @ByRef Tensor source); -// Parsed from ATen/ops/grid_sampler.h +// Parsed from ATen/ops/index_fill.h // #pragma once @@ -45870,16 +31504,35 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include +// #include -// aten::grid_sampler(Tensor input, Tensor grid, int interpolation_mode, int padding_mode, bool align_corners) -> Tensor -@Namespace("at") public static native @ByVal Tensor grid_sampler(@Const @ByRef Tensor input, @Const @ByRef Tensor grid, @Cast("int64_t") long interpolation_mode, @Cast("int64_t") long padding_mode, @Cast("bool") boolean align_corners); +// aten::index_fill.int_Scalar(Tensor self, int dim, Tensor index, Scalar value) -> Tensor +@Namespace("at") public static native @ByVal Tensor index_fill(@Const @ByRef Tensor self, @Cast("int64_t") long dim, @Const @ByRef Tensor index, @Const @ByRef Scalar value); +// aten::index_fill.int_Tensor(Tensor self, int dim, Tensor index, Tensor value) -> Tensor +@Namespace("at") public static native @ByVal Tensor index_fill(@Const @ByRef Tensor self, @Cast("int64_t") long dim, @Const @ByRef Tensor index, @Const @ByRef Tensor value); +// aten::index_fill.Dimname_Scalar(Tensor self, Dimname dim, Tensor index, Scalar value) -> Tensor +@Namespace("at") public static native @ByVal Tensor index_fill(@Const @ByRef Tensor self, @ByVal Dimname dim, @Const @ByRef Tensor index, @Const @ByRef Scalar value); +// aten::index_fill.Dimname_Tensor(Tensor self, Dimname dim, Tensor index, Tensor value) -> Tensor +@Namespace("at") public static native @ByVal Tensor index_fill(@Const @ByRef Tensor self, @ByVal Dimname dim, @Const @ByRef Tensor index, @Const @ByRef Tensor value); -// Parsed from ATen/ops/grid_sampler_2d.h +// aten::index_fill.int_Scalar_out(Tensor self, int dim, Tensor index, Scalar value, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor index_fill_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Cast("int64_t") long dim, @Const @ByRef Tensor index, @Const @ByRef Scalar value); +// aten::index_fill.int_Scalar_out(Tensor self, int dim, Tensor index, Scalar value, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor index_fill_outf(@Const @ByRef Tensor self, @Cast("int64_t") long dim, @Const @ByRef Tensor index, @Const @ByRef Scalar value, @ByRef Tensor out); + +// aten::index_fill.int_Tensor_out(Tensor self, int dim, Tensor index, Tensor value, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor index_fill_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Cast("int64_t") long dim, @Const @ByRef Tensor index, @Const @ByRef Tensor value); +// aten::index_fill.int_Tensor_out(Tensor self, int dim, Tensor index, Tensor value, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor index_fill_outf(@Const @ByRef Tensor self, @Cast("int64_t") long dim, @Const @ByRef Tensor index, @Const @ByRef Tensor value, @ByRef Tensor out); + + + + +// Parsed from ATen/ops/index_put.h // #pragma once @@ -45900,21 +31553,27 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include +// #include -// aten::grid_sampler_2d(Tensor input, Tensor grid, int interpolation_mode, int padding_mode, bool align_corners) -> Tensor -@Namespace("at") public static native @ByVal Tensor grid_sampler_2d(@Const @ByRef Tensor input, @Const @ByRef Tensor grid, @Cast("int64_t") long interpolation_mode, @Cast("int64_t") long padding_mode, @Cast("bool") boolean align_corners); +// aten::index_put_(Tensor(a!) self, Tensor?[] indices, Tensor values, bool accumulate=False) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor index_put_(@ByRef Tensor self, @Const @ByRef TensorOptionalList indices, @Const @ByRef Tensor values, @Cast("bool") boolean accumulate/*=false*/); +@Namespace("at") public static native @ByRef Tensor index_put_(@ByRef Tensor self, @Const @ByRef TensorOptionalList indices, @Const @ByRef Tensor values); -// aten::grid_sampler_2d.out(Tensor input, Tensor grid, int interpolation_mode, int padding_mode, bool align_corners, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor grid_sampler_2d_out(@ByRef Tensor out, @Const @ByRef Tensor input, @Const @ByRef Tensor grid, @Cast("int64_t") long interpolation_mode, @Cast("int64_t") long padding_mode, @Cast("bool") boolean align_corners); -// aten::grid_sampler_2d.out(Tensor input, Tensor grid, int interpolation_mode, int padding_mode, bool align_corners, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor grid_sampler_2d_outf(@Const @ByRef Tensor input, @Const @ByRef Tensor grid, @Cast("int64_t") long interpolation_mode, @Cast("int64_t") long padding_mode, @Cast("bool") boolean align_corners, @ByRef Tensor out); +// aten::index_put(Tensor self, Tensor?[] indices, Tensor values, bool accumulate=False) -> Tensor +@Namespace("at") public static native @ByVal Tensor index_put(@Const @ByRef Tensor self, @Const @ByRef TensorOptionalList indices, @Const @ByRef Tensor values, @Cast("bool") boolean accumulate/*=false*/); +@Namespace("at") public static native @ByVal Tensor index_put(@Const @ByRef Tensor self, @Const @ByRef TensorOptionalList indices, @Const @ByRef Tensor values); +// aten::index_put.out(Tensor self, Tensor?[] indices, Tensor values, bool accumulate=False, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor index_put_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef TensorOptionalList indices, @Const @ByRef Tensor values, @Cast("bool") boolean accumulate/*=false*/); +@Namespace("at") public static native @ByRef Tensor index_put_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef TensorOptionalList indices, @Const @ByRef Tensor values); +// aten::index_put.out(Tensor self, Tensor?[] indices, Tensor values, bool accumulate=False, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor index_put_outf(@Const @ByRef Tensor self, @Const @ByRef TensorOptionalList indices, @Const @ByRef Tensor values, @Cast("bool") boolean accumulate, @ByRef Tensor out); -// Parsed from ATen/ops/grid_sampler_2d_backward.h + +// Parsed from ATen/ops/index_reduce.h // #pragma once @@ -45935,21 +31594,23 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include +// #include -// aten::grid_sampler_2d_backward(Tensor grad_output, Tensor input, Tensor grid, int interpolation_mode, int padding_mode, bool align_corners, bool[2] output_mask) -> (Tensor, Tensor) -@Namespace("at") public static native @ByVal TensorTensorTuple grid_sampler_2d_backward(@Const @ByRef Tensor grad_output, @Const @ByRef Tensor input, @Const @ByRef Tensor grid, @Cast("int64_t") long interpolation_mode, @Cast("int64_t") long padding_mode, @Cast("bool") boolean align_corners, @ByVal @Cast("std::array*") BoolPointer output_mask); +// aten::index_reduce.out(Tensor self, int dim, Tensor index, Tensor source, str reduce, *, bool include_self=True, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor index_reduce_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Cast("int64_t") long dim, @Const @ByRef Tensor index, @Const @ByRef Tensor source, @ByVal @Cast("c10::string_view*") Pointer reduce, @Cast("bool") boolean include_self/*=true*/); +@Namespace("at") public static native @ByRef Tensor index_reduce_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Cast("int64_t") long dim, @Const @ByRef Tensor index, @Const @ByRef Tensor source, @ByVal @Cast("c10::string_view*") Pointer reduce); +// aten::index_reduce.out(Tensor self, int dim, Tensor index, Tensor source, str reduce, *, bool include_self=True, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor index_reduce_outf(@Const @ByRef Tensor self, @Cast("int64_t") long dim, @Const @ByRef Tensor index, @Const @ByRef Tensor source, @ByVal @Cast("c10::string_view*") Pointer reduce, @Cast("bool") boolean include_self, @ByRef Tensor out); -// aten::grid_sampler_2d_backward.out(Tensor grad_output, Tensor input, Tensor grid, int interpolation_mode, int padding_mode, bool align_corners, bool[2] output_mask, *, Tensor(a!) out0, Tensor(b!) out1) -> (Tensor(a!), Tensor(b!)) -@Namespace("at") public static native @ByVal @Cast("std::tuple*") PointerPointer grid_sampler_2d_backward_out(@ByRef Tensor out0, @ByRef Tensor out1, @Const @ByRef Tensor grad_output, @Const @ByRef Tensor input, @Const @ByRef Tensor grid, @Cast("int64_t") long interpolation_mode, @Cast("int64_t") long padding_mode, @Cast("bool") boolean align_corners, @ByVal @Cast("std::array*") BoolPointer output_mask); -// aten::grid_sampler_2d_backward.out(Tensor grad_output, Tensor input, Tensor grid, int interpolation_mode, int padding_mode, bool align_corners, bool[2] output_mask, *, Tensor(a!) out0, Tensor(b!) out1) -> (Tensor(a!), Tensor(b!)) -@Namespace("at") public static native @ByVal @Cast("std::tuple*") PointerPointer grid_sampler_2d_backward_outf(@Const @ByRef Tensor grad_output, @Const @ByRef Tensor input, @Const @ByRef Tensor grid, @Cast("int64_t") long interpolation_mode, @Cast("int64_t") long padding_mode, @Cast("bool") boolean align_corners, @ByVal @Cast("std::array*") BoolPointer output_mask, @ByRef Tensor out0, @ByRef Tensor out1); +// aten::index_reduce(Tensor self, int dim, Tensor index, Tensor source, str reduce, *, bool include_self=True) -> Tensor +@Namespace("at") public static native @ByVal Tensor index_reduce(@Const @ByRef Tensor self, @Cast("int64_t") long dim, @Const @ByRef Tensor index, @Const @ByRef Tensor source, @ByVal @Cast("c10::string_view*") Pointer reduce, @Cast("bool") boolean include_self/*=true*/); +@Namespace("at") public static native @ByVal Tensor index_reduce(@Const @ByRef Tensor self, @Cast("int64_t") long dim, @Const @ByRef Tensor index, @Const @ByRef Tensor source, @ByVal @Cast("c10::string_view*") Pointer reduce); -// Parsed from ATen/ops/grid_sampler_3d.h +// Parsed from ATen/ops/index_select.h // #pragma once @@ -45970,21 +31631,29 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include +// #include -// aten::grid_sampler_3d(Tensor input, Tensor grid, int interpolation_mode, int padding_mode, bool align_corners) -> Tensor -@Namespace("at") public static native @ByVal Tensor grid_sampler_3d(@Const @ByRef Tensor input, @Const @ByRef Tensor grid, @Cast("int64_t") long interpolation_mode, @Cast("int64_t") long padding_mode, @Cast("bool") boolean align_corners); +// aten::index_select.out(Tensor self, int dim, Tensor index, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor index_select_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Cast("int64_t") long dim, @Const @ByRef Tensor index); +// aten::index_select.out(Tensor self, int dim, Tensor index, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor index_select_outf(@Const @ByRef Tensor self, @Cast("int64_t") long dim, @Const @ByRef Tensor index, @ByRef Tensor out); -// aten::grid_sampler_3d.out(Tensor input, Tensor grid, int interpolation_mode, int padding_mode, bool align_corners, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor grid_sampler_3d_out(@ByRef Tensor out, @Const @ByRef Tensor input, @Const @ByRef Tensor grid, @Cast("int64_t") long interpolation_mode, @Cast("int64_t") long padding_mode, @Cast("bool") boolean align_corners); -// aten::grid_sampler_3d.out(Tensor input, Tensor grid, int interpolation_mode, int padding_mode, bool align_corners, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor grid_sampler_3d_outf(@Const @ByRef Tensor input, @Const @ByRef Tensor grid, @Cast("int64_t") long interpolation_mode, @Cast("int64_t") long padding_mode, @Cast("bool") boolean align_corners, @ByRef Tensor out); +// aten::index_select(Tensor self, int dim, Tensor index) -> Tensor +@Namespace("at") public static native @ByVal Tensor index_select(@Const @ByRef Tensor self, @Cast("int64_t") long dim, @Const @ByRef Tensor index); + +// aten::index_select.dimname_out(Tensor self, Dimname dim, Tensor index, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor index_select_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal Dimname dim, @Const @ByRef Tensor index); +// aten::index_select.dimname_out(Tensor self, Dimname dim, Tensor index, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor index_select_outf(@Const @ByRef Tensor self, @ByVal Dimname dim, @Const @ByRef Tensor index, @ByRef Tensor out); + +// aten::index_select.dimname(Tensor self, Dimname dim, Tensor index) -> Tensor +@Namespace("at") public static native @ByVal Tensor index_select(@Const @ByRef Tensor self, @ByVal Dimname dim, @Const @ByRef Tensor index); -// Parsed from ATen/ops/grid_sampler_3d_backward.h +// Parsed from ATen/ops/index_select_backward.h // #pragma once @@ -46005,21 +31674,22 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include +// #include -// aten::grid_sampler_3d_backward(Tensor grad_output, Tensor input, Tensor grid, int interpolation_mode, int padding_mode, bool align_corners, bool[2] output_mask) -> (Tensor, Tensor) -@Namespace("at") public static native @ByVal TensorTensorTuple grid_sampler_3d_backward(@Const @ByRef Tensor grad_output, @Const @ByRef Tensor input, @Const @ByRef Tensor grid, @Cast("int64_t") long interpolation_mode, @Cast("int64_t") long padding_mode, @Cast("bool") boolean align_corners, @ByVal @Cast("std::array*") BoolPointer output_mask); +// aten::index_select_backward(Tensor grad, SymInt[] self_sizes, int dim, Tensor index) -> Tensor +@Namespace("at") public static native @ByVal Tensor index_select_backward(@Const @ByRef Tensor grad, @ByVal LongArrayRef self_sizes, @Cast("int64_t") long dim, @Const @ByRef Tensor index); +@Namespace("at") public static native @ByVal Tensor index_select_backward(@Const @ByRef Tensor grad, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] self_sizes, @Cast("int64_t") long dim, @Const @ByRef Tensor index); -// aten::grid_sampler_3d_backward.out(Tensor grad_output, Tensor input, Tensor grid, int interpolation_mode, int padding_mode, bool align_corners, bool[2] output_mask, *, Tensor(a!) out0, Tensor(b!) out1) -> (Tensor(a!), Tensor(b!)) -@Namespace("at") public static native @ByVal @Cast("std::tuple*") PointerPointer grid_sampler_3d_backward_out(@ByRef Tensor out0, @ByRef Tensor out1, @Const @ByRef Tensor grad_output, @Const @ByRef Tensor input, @Const @ByRef Tensor grid, @Cast("int64_t") long interpolation_mode, @Cast("int64_t") long padding_mode, @Cast("bool") boolean align_corners, @ByVal @Cast("std::array*") BoolPointer output_mask); -// aten::grid_sampler_3d_backward.out(Tensor grad_output, Tensor input, Tensor grid, int interpolation_mode, int padding_mode, bool align_corners, bool[2] output_mask, *, Tensor(a!) out0, Tensor(b!) out1) -> (Tensor(a!), Tensor(b!)) -@Namespace("at") public static native @ByVal @Cast("std::tuple*") PointerPointer grid_sampler_3d_backward_outf(@Const @ByRef Tensor grad_output, @Const @ByRef Tensor input, @Const @ByRef Tensor grid, @Cast("int64_t") long interpolation_mode, @Cast("int64_t") long padding_mode, @Cast("bool") boolean align_corners, @ByVal @Cast("std::array*") BoolPointer output_mask, @ByRef Tensor out0, @ByRef Tensor out1); + +// aten::index_select_backward(Tensor grad, SymInt[] self_sizes, int dim, Tensor index) -> Tensor +@Namespace("at") public static native @ByVal Tensor index_select_backward_symint(@Const @ByRef Tensor grad, @ByVal SymIntArrayRef self_sizes, @Cast("int64_t") long dim, @Const @ByRef Tensor index); -// Parsed from ATen/ops/group_norm.h + +// Parsed from ATen/ops/indices.h // #pragma once @@ -46040,17 +31710,14 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include +// #include -// aten::group_norm(Tensor input, int num_groups, Tensor? weight=None, Tensor? bias=None, float eps=1e-05, bool cudnn_enabled=True) -> Tensor -@Namespace("at") public static native @ByVal Tensor group_norm(@Const @ByRef Tensor input, @Cast("int64_t") long num_groups, @Const @ByRef(nullValue = "c10::optional{}") TensorOptional weight, @Const @ByRef(nullValue = "c10::optional{}") TensorOptional bias, double eps/*=1e-05*/, @Cast("bool") boolean cudnn_enabled/*=true*/); -@Namespace("at") public static native @ByVal Tensor group_norm(@Const @ByRef Tensor input, @Cast("int64_t") long num_groups); -// Parsed from ATen/ops/gru.h +// Parsed from ATen/ops/indices_copy.h // #pragma once @@ -46071,19 +31738,21 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include +// #include -// aten::gru.input(Tensor input, Tensor hx, Tensor[] params, bool has_biases, int num_layers, float dropout, bool train, bool bidirectional, bool batch_first) -> (Tensor, Tensor) -@Namespace("at") public static native @ByVal TensorTensorTuple gru(@Const @ByRef Tensor input, @Const @ByRef Tensor hx, @ByVal TensorArrayRef params, @Cast("bool") boolean has_biases, @Cast("int64_t") long num_layers, double dropout, @Cast("bool") boolean train, @Cast("bool") boolean bidirectional, @Cast("bool") boolean batch_first); +// aten::indices_copy(Tensor self) -> Tensor +@Namespace("at") public static native @ByVal Tensor indices_copy(@Const @ByRef Tensor self); -// aten::gru.data(Tensor data, Tensor batch_sizes, Tensor hx, Tensor[] params, bool has_biases, int num_layers, float dropout, bool train, bool bidirectional) -> (Tensor, Tensor) -@Namespace("at") public static native @ByVal TensorTensorTuple gru(@Const @ByRef Tensor data, @Const @ByRef Tensor batch_sizes, @Const @ByRef Tensor hx, @ByVal TensorArrayRef params, @Cast("bool") boolean has_biases, @Cast("int64_t") long num_layers, double dropout, @Cast("bool") boolean train, @Cast("bool") boolean bidirectional); +// aten::indices_copy.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor indices_copy_out(@ByRef Tensor out, @Const @ByRef Tensor self); +// aten::indices_copy.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor indices_copy_outf(@Const @ByRef Tensor self, @ByRef Tensor out); -// Parsed from ATen/ops/gru_cell.h +// Parsed from ATen/ops/infinitely_differentiable_gelu_backward.h // #pragma once @@ -46104,17 +31773,16 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include +// #include -// aten::gru_cell(Tensor input, Tensor hx, Tensor w_ih, Tensor w_hh, Tensor? b_ih=None, Tensor? b_hh=None) -> Tensor -@Namespace("at") public static native @ByVal Tensor gru_cell(@Const @ByRef Tensor input, @Const @ByRef Tensor hx, @Const @ByRef Tensor w_ih, @Const @ByRef Tensor w_hh, @Const @ByRef(nullValue = "c10::optional{}") TensorOptional b_ih, @Const @ByRef(nullValue = "c10::optional{}") TensorOptional b_hh); -@Namespace("at") public static native @ByVal Tensor gru_cell(@Const @ByRef Tensor input, @Const @ByRef Tensor hx, @Const @ByRef Tensor w_ih, @Const @ByRef Tensor w_hh); +// aten::infinitely_differentiable_gelu_backward(Tensor grad, Tensor self) -> Tensor +@Namespace("at") public static native @ByVal Tensor infinitely_differentiable_gelu_backward(@Const @ByRef Tensor grad, @Const @ByRef Tensor self); -// Parsed from ATen/ops/gt.h +// Parsed from ATen/ops/inner.h // #pragma once @@ -46135,29 +31803,21 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include - - -// aten::gt.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor gt_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Scalar other); -// aten::gt.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor gt_outf(@Const @ByRef Tensor self, @Const @ByRef Scalar other, @ByRef Tensor out); +// #include -// aten::gt.Scalar(Tensor self, Scalar other) -> Tensor -@Namespace("at") public static native @ByVal Tensor gt(@Const @ByRef Tensor self, @Const @ByRef Scalar other); -// aten::gt.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor gt_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor other); -// aten::gt.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor gt_outf(@Const @ByRef Tensor self, @Const @ByRef Tensor other, @ByRef Tensor out); +// aten::inner(Tensor self, Tensor other) -> Tensor +@Namespace("at") public static native @ByVal Tensor inner(@Const @ByRef Tensor self, @Const @ByRef Tensor other); -// aten::gt.Tensor(Tensor self, Tensor other) -> Tensor -@Namespace("at") public static native @ByVal Tensor gt(@Const @ByRef Tensor self, @Const @ByRef Tensor other); +// aten::inner.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor inner_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor other); +// aten::inner.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor inner_outf(@Const @ByRef Tensor self, @Const @ByRef Tensor other, @ByRef Tensor out); -// Parsed from ATen/ops/hamming_window.h +// Parsed from ATen/ops/instance_norm.h // #pragma once @@ -46178,57 +31838,16 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include - - -// aten::hamming_window(int window_length, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor -@Namespace("at") public static native @ByVal Tensor hamming_window(@Cast("int64_t") long window_length, @ByVal(nullValue = "at::TensorOptions{}") TensorOptions options); -@Namespace("at") public static native @ByVal Tensor hamming_window(@Cast("int64_t") long window_length); -// aten::hamming_window(int window_length, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor -@Namespace("at") public static native @ByVal Tensor hamming_window(@Cast("int64_t") long window_length, @ByVal ScalarTypeOptional dtype, @ByVal LayoutOptional layout, @ByVal DeviceOptional device, @ByVal BoolOptional pin_memory); - -// aten::hamming_window.periodic(int window_length, bool periodic, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor -@Namespace("at") public static native @ByVal Tensor hamming_window(@Cast("int64_t") long window_length, @Cast("bool") boolean periodic, @ByVal(nullValue = "at::TensorOptions{}") TensorOptions options); -@Namespace("at") public static native @ByVal Tensor hamming_window(@Cast("int64_t") long window_length, @Cast("bool") boolean periodic); -// aten::hamming_window.periodic(int window_length, bool periodic, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor -@Namespace("at") public static native @ByVal Tensor hamming_window(@Cast("int64_t") long window_length, @Cast("bool") boolean periodic, @ByVal ScalarTypeOptional dtype, @ByVal LayoutOptional layout, @ByVal DeviceOptional device, @ByVal BoolOptional pin_memory); - -// aten::hamming_window.periodic_alpha(int window_length, bool periodic, float alpha, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor -@Namespace("at") public static native @ByVal Tensor hamming_window(@Cast("int64_t") long window_length, @Cast("bool") boolean periodic, double alpha, @ByVal(nullValue = "at::TensorOptions{}") TensorOptions options); -@Namespace("at") public static native @ByVal Tensor hamming_window(@Cast("int64_t") long window_length, @Cast("bool") boolean periodic, double alpha); -// aten::hamming_window.periodic_alpha(int window_length, bool periodic, float alpha, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor -@Namespace("at") public static native @ByVal Tensor hamming_window(@Cast("int64_t") long window_length, @Cast("bool") boolean periodic, double alpha, @ByVal ScalarTypeOptional dtype, @ByVal LayoutOptional layout, @ByVal DeviceOptional device, @ByVal BoolOptional pin_memory); - -// aten::hamming_window.periodic_alpha_beta(int window_length, bool periodic, float alpha, float beta, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor -@Namespace("at") public static native @ByVal Tensor hamming_window(@Cast("int64_t") long window_length, @Cast("bool") boolean periodic, double alpha, double beta, @ByVal(nullValue = "at::TensorOptions{}") TensorOptions options); -@Namespace("at") public static native @ByVal Tensor hamming_window(@Cast("int64_t") long window_length, @Cast("bool") boolean periodic, double alpha, double beta); -// aten::hamming_window.periodic_alpha_beta(int window_length, bool periodic, float alpha, float beta, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor -@Namespace("at") public static native @ByVal Tensor hamming_window(@Cast("int64_t") long window_length, @Cast("bool") boolean periodic, double alpha, double beta, @ByVal ScalarTypeOptional dtype, @ByVal LayoutOptional layout, @ByVal DeviceOptional device, @ByVal BoolOptional pin_memory); - -// aten::hamming_window.out(int window_length, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor hamming_window_out(@ByRef Tensor out, @Cast("int64_t") long window_length); -// aten::hamming_window.out(int window_length, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor hamming_window_outf(@Cast("int64_t") long window_length, @ByRef Tensor out); - -// aten::hamming_window.periodic_out(int window_length, bool periodic, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor hamming_window_out(@ByRef Tensor out, @Cast("int64_t") long window_length, @Cast("bool") boolean periodic); -// aten::hamming_window.periodic_out(int window_length, bool periodic, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor hamming_window_outf(@Cast("int64_t") long window_length, @Cast("bool") boolean periodic, @ByRef Tensor out); +// #include -// aten::hamming_window.periodic_alpha_out(int window_length, bool periodic, float alpha, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor hamming_window_out(@ByRef Tensor out, @Cast("int64_t") long window_length, @Cast("bool") boolean periodic, double alpha); -// aten::hamming_window.periodic_alpha_out(int window_length, bool periodic, float alpha, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor hamming_window_outf(@Cast("int64_t") long window_length, @Cast("bool") boolean periodic, double alpha, @ByRef Tensor out); -// aten::hamming_window.periodic_alpha_beta_out(int window_length, bool periodic, float alpha, float beta, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor hamming_window_out(@ByRef Tensor out, @Cast("int64_t") long window_length, @Cast("bool") boolean periodic, double alpha, double beta); -// aten::hamming_window.periodic_alpha_beta_out(int window_length, bool periodic, float alpha, float beta, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor hamming_window_outf(@Cast("int64_t") long window_length, @Cast("bool") boolean periodic, double alpha, double beta, @ByRef Tensor out); +// aten::instance_norm(Tensor input, Tensor? weight, Tensor? bias, Tensor? running_mean, Tensor? running_var, bool use_input_stats, float momentum, float eps, bool cudnn_enabled) -> Tensor +@Namespace("at") public static native @ByVal Tensor instance_norm(@Const @ByRef Tensor input, @Const @ByRef TensorOptional weight, @Const @ByRef TensorOptional bias, @Const @ByRef TensorOptional running_mean, @Const @ByRef TensorOptional running_var, @Cast("bool") boolean use_input_stats, double momentum, double eps, @Cast("bool") boolean cudnn_enabled); -// Parsed from ATen/ops/hann_window.h +// Parsed from ATen/ops/int_repr.h // #pragma once @@ -46249,35 +31868,21 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include - - -// aten::hann_window(int window_length, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor -@Namespace("at") public static native @ByVal Tensor hann_window(@Cast("int64_t") long window_length, @ByVal(nullValue = "at::TensorOptions{}") TensorOptions options); -@Namespace("at") public static native @ByVal Tensor hann_window(@Cast("int64_t") long window_length); -// aten::hann_window(int window_length, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor -@Namespace("at") public static native @ByVal Tensor hann_window(@Cast("int64_t") long window_length, @ByVal ScalarTypeOptional dtype, @ByVal LayoutOptional layout, @ByVal DeviceOptional device, @ByVal BoolOptional pin_memory); +// #include -// aten::hann_window.periodic(int window_length, bool periodic, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor -@Namespace("at") public static native @ByVal Tensor hann_window(@Cast("int64_t") long window_length, @Cast("bool") boolean periodic, @ByVal(nullValue = "at::TensorOptions{}") TensorOptions options); -@Namespace("at") public static native @ByVal Tensor hann_window(@Cast("int64_t") long window_length, @Cast("bool") boolean periodic); -// aten::hann_window.periodic(int window_length, bool periodic, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor -@Namespace("at") public static native @ByVal Tensor hann_window(@Cast("int64_t") long window_length, @Cast("bool") boolean periodic, @ByVal ScalarTypeOptional dtype, @ByVal LayoutOptional layout, @ByVal DeviceOptional device, @ByVal BoolOptional pin_memory); -// aten::hann_window.out(int window_length, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor hann_window_out(@ByRef Tensor out, @Cast("int64_t") long window_length); -// aten::hann_window.out(int window_length, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor hann_window_outf(@Cast("int64_t") long window_length, @ByRef Tensor out); +// aten::int_repr(Tensor self) -> Tensor +@Namespace("at") public static native @ByVal Tensor int_repr(@Const @ByRef Tensor self); -// aten::hann_window.periodic_out(int window_length, bool periodic, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor hann_window_out(@ByRef Tensor out, @Cast("int64_t") long window_length, @Cast("bool") boolean periodic); -// aten::hann_window.periodic_out(int window_length, bool periodic, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor hann_window_outf(@Cast("int64_t") long window_length, @Cast("bool") boolean periodic, @ByRef Tensor out); +// aten::int_repr.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor int_repr_out(@ByRef Tensor out, @Const @ByRef Tensor self); +// aten::int_repr.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor int_repr_outf(@Const @ByRef Tensor self, @ByRef Tensor out); -// Parsed from ATen/ops/hardshrink.h +// Parsed from ATen/ops/inverse.h // #pragma once @@ -46298,23 +31903,21 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include +// #include -// aten::hardshrink.out(Tensor self, Scalar lambd=0.5, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor hardshrink_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef(nullValue = "at::Scalar(0.5)") Scalar lambd); -@Namespace("at") public static native @ByRef Tensor hardshrink_out(@ByRef Tensor out, @Const @ByRef Tensor self); -// aten::hardshrink.out(Tensor self, Scalar lambd=0.5, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor hardshrink_outf(@Const @ByRef Tensor self, @Const @ByRef Scalar lambd, @ByRef Tensor out); +// aten::inverse(Tensor self) -> Tensor +@Namespace("at") public static native @ByVal Tensor inverse(@Const @ByRef Tensor self); -// aten::hardshrink(Tensor self, Scalar lambd=0.5) -> Tensor -@Namespace("at") public static native @ByVal Tensor hardshrink(@Const @ByRef Tensor self, @Const @ByRef(nullValue = "at::Scalar(0.5)") Scalar lambd); -@Namespace("at") public static native @ByVal Tensor hardshrink(@Const @ByRef Tensor self); +// aten::inverse.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor inverse_out(@ByRef Tensor out, @Const @ByRef Tensor self); +// aten::inverse.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor inverse_outf(@Const @ByRef Tensor self, @ByRef Tensor out); -// Parsed from ATen/ops/hardshrink_backward.h +// Parsed from ATen/ops/is_coalesced.h // #pragma once @@ -46335,21 +31938,14 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include - +// #include -// aten::hardshrink_backward.grad_input(Tensor grad_out, Tensor self, Scalar lambd, *, Tensor(a!) grad_input) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor hardshrink_backward_out(@ByRef Tensor grad_input, @Const @ByRef Tensor grad_out, @Const @ByRef Tensor self, @Const @ByRef Scalar lambd); -// aten::hardshrink_backward.grad_input(Tensor grad_out, Tensor self, Scalar lambd, *, Tensor(a!) grad_input) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor hardshrink_backward_outf(@Const @ByRef Tensor grad_out, @Const @ByRef Tensor self, @Const @ByRef Scalar lambd, @ByRef Tensor grad_input); -// aten::hardshrink_backward(Tensor grad_out, Tensor self, Scalar lambd) -> Tensor -@Namespace("at") public static native @ByVal Tensor hardshrink_backward(@Const @ByRef Tensor grad_out, @Const @ByRef Tensor self, @Const @ByRef Scalar lambd); -// Parsed from ATen/ops/hardsigmoid.h +// Parsed from ATen/ops/is_complex.h // #pragma once @@ -46370,24 +31966,16 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include - - -// aten::hardsigmoid.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor hardsigmoid_out(@ByRef Tensor out, @Const @ByRef Tensor self); -// aten::hardsigmoid.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor hardsigmoid_outf(@Const @ByRef Tensor self, @ByRef Tensor out); +// #include -// aten::hardsigmoid(Tensor self) -> Tensor -@Namespace("at") public static native @ByVal Tensor hardsigmoid(@Const @ByRef Tensor self); -// aten::hardsigmoid_(Tensor(a!) self) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor hardsigmoid_(@ByRef Tensor self); +// aten::is_complex(Tensor self) -> bool +@Namespace("at") public static native @Cast("bool") boolean __dispatch_is_complex(@Const @ByRef Tensor self); -// Parsed from ATen/ops/hardsigmoid_backward.h +// Parsed from ATen/ops/is_conj.h // #pragma once @@ -46408,21 +31996,16 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include - +// #include -// aten::hardsigmoid_backward.grad_input(Tensor grad_output, Tensor self, *, Tensor(a!) grad_input) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor hardsigmoid_backward_out(@ByRef Tensor grad_input, @Const @ByRef Tensor grad_output, @Const @ByRef Tensor self); -// aten::hardsigmoid_backward.grad_input(Tensor grad_output, Tensor self, *, Tensor(a!) grad_input) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor hardsigmoid_backward_outf(@Const @ByRef Tensor grad_output, @Const @ByRef Tensor self, @ByRef Tensor grad_input); -// aten::hardsigmoid_backward(Tensor grad_output, Tensor self) -> Tensor -@Namespace("at") public static native @ByVal Tensor hardsigmoid_backward(@Const @ByRef Tensor grad_output, @Const @ByRef Tensor self); +// aten::is_conj(Tensor self) -> bool +@Namespace("at") public static native @Cast("bool") boolean __dispatch_is_conj(@Const @ByRef Tensor self); -// Parsed from ATen/ops/hardswish.h +// Parsed from ATen/ops/is_distributed.h // #pragma once @@ -46443,24 +32026,16 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include - - -// aten::hardswish.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor hardswish_out(@ByRef Tensor out, @Const @ByRef Tensor self); -// aten::hardswish.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor hardswish_outf(@Const @ByRef Tensor self, @ByRef Tensor out); +// #include -// aten::hardswish(Tensor self) -> Tensor -@Namespace("at") public static native @ByVal Tensor hardswish(@Const @ByRef Tensor self); -// aten::hardswish_(Tensor(a!) self) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor hardswish_(@ByRef Tensor self); +// aten::is_distributed(Tensor self) -> bool +@Namespace("at") public static native @Cast("bool") boolean is_distributed(@Const @ByRef Tensor self); -// Parsed from ATen/ops/hardswish_backward.h +// Parsed from ATen/ops/is_floating_point.h // #pragma once @@ -46481,21 +32056,16 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include - +// #include -// aten::hardswish_backward(Tensor grad_output, Tensor self) -> Tensor -@Namespace("at") public static native @ByVal Tensor hardswish_backward(@Const @ByRef Tensor grad_output, @Const @ByRef Tensor self); -// aten::hardswish_backward.out(Tensor grad_output, Tensor self, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor hardswish_backward_out(@ByRef Tensor out, @Const @ByRef Tensor grad_output, @Const @ByRef Tensor self); -// aten::hardswish_backward.out(Tensor grad_output, Tensor self, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor hardswish_backward_outf(@Const @ByRef Tensor grad_output, @Const @ByRef Tensor self, @ByRef Tensor out); +// aten::is_floating_point(Tensor self) -> bool +@Namespace("at") public static native @Cast("bool") boolean __dispatch_is_floating_point(@Const @ByRef Tensor self); -// Parsed from ATen/ops/hardtanh.h +// Parsed from ATen/ops/is_inference.h // #pragma once @@ -46516,27 +32086,16 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include - - -// aten::hardtanh.out(Tensor self, Scalar min_val=-1, Scalar max_val=1, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor hardtanh_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef(nullValue = "at::Scalar(-1)") Scalar min_val, @Const @ByRef(nullValue = "at::Scalar(1)") Scalar max_val); -@Namespace("at") public static native @ByRef Tensor hardtanh_out(@ByRef Tensor out, @Const @ByRef Tensor self); -// aten::hardtanh.out(Tensor self, Scalar min_val=-1, Scalar max_val=1, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor hardtanh_outf(@Const @ByRef Tensor self, @Const @ByRef Scalar min_val, @Const @ByRef Scalar max_val, @ByRef Tensor out); +// #include -// aten::hardtanh(Tensor self, Scalar min_val=-1, Scalar max_val=1) -> Tensor -@Namespace("at") public static native @ByVal Tensor hardtanh(@Const @ByRef Tensor self, @Const @ByRef(nullValue = "at::Scalar(-1)") Scalar min_val, @Const @ByRef(nullValue = "at::Scalar(1)") Scalar max_val); -@Namespace("at") public static native @ByVal Tensor hardtanh(@Const @ByRef Tensor self); -// aten::hardtanh_(Tensor(a!) self, Scalar min_val=-1, Scalar max_val=1) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor hardtanh_(@ByRef Tensor self, @Const @ByRef(nullValue = "at::Scalar(-1)") Scalar min_val, @Const @ByRef(nullValue = "at::Scalar(1)") Scalar max_val); -@Namespace("at") public static native @ByRef Tensor hardtanh_(@ByRef Tensor self); +// aten::is_inference(Tensor self) -> bool +@Namespace("at") public static native @Cast("bool") boolean __dispatch_is_inference(@Const @ByRef Tensor self); -// Parsed from ATen/ops/hardtanh_backward.h +// Parsed from ATen/ops/is_leaf.h // #pragma once @@ -46557,21 +32116,14 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include - +// #include -// aten::hardtanh_backward.grad_input(Tensor grad_output, Tensor self, Scalar min_val, Scalar max_val, *, Tensor(a!) grad_input) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor hardtanh_backward_out(@ByRef Tensor grad_input, @Const @ByRef Tensor grad_output, @Const @ByRef Tensor self, @Const @ByRef Scalar min_val, @Const @ByRef Scalar max_val); -// aten::hardtanh_backward.grad_input(Tensor grad_output, Tensor self, Scalar min_val, Scalar max_val, *, Tensor(a!) grad_input) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor hardtanh_backward_outf(@Const @ByRef Tensor grad_output, @Const @ByRef Tensor self, @Const @ByRef Scalar min_val, @Const @ByRef Scalar max_val, @ByRef Tensor grad_input); -// aten::hardtanh_backward(Tensor grad_output, Tensor self, Scalar min_val, Scalar max_val) -> Tensor -@Namespace("at") public static native @ByVal Tensor hardtanh_backward(@Const @ByRef Tensor grad_output, @Const @ByRef Tensor self, @Const @ByRef Scalar min_val, @Const @ByRef Scalar max_val); -// Parsed from ATen/ops/heaviside.h +// Parsed from ATen/ops/is_neg.h // #pragma once @@ -46592,21 +32144,16 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include - +// #include -// aten::heaviside.out(Tensor self, Tensor values, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor heaviside_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor values); -// aten::heaviside.out(Tensor self, Tensor values, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor heaviside_outf(@Const @ByRef Tensor self, @Const @ByRef Tensor values, @ByRef Tensor out); -// aten::heaviside(Tensor self, Tensor values) -> Tensor -@Namespace("at") public static native @ByVal Tensor heaviside(@Const @ByRef Tensor self, @Const @ByRef Tensor values); +// aten::is_neg(Tensor self) -> bool +@Namespace("at") public static native @Cast("bool") boolean __dispatch_is_neg(@Const @ByRef Tensor self); -// Parsed from ATen/ops/hinge_embedding_loss.h +// Parsed from ATen/ops/is_nonzero.h // #pragma once @@ -46627,17 +32174,16 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include +// #include -// aten::hinge_embedding_loss(Tensor self, Tensor target, float margin=1.0, int reduction=Mean) -> Tensor -@Namespace("at") public static native @ByVal Tensor hinge_embedding_loss(@Const @ByRef Tensor self, @Const @ByRef Tensor target, double margin/*=1.0*/, @Cast("int64_t") long reduction/*=at::Reduction::Mean*/); -@Namespace("at") public static native @ByVal Tensor hinge_embedding_loss(@Const @ByRef Tensor self, @Const @ByRef Tensor target); +// aten::is_nonzero(Tensor self) -> bool +@Namespace("at") public static native @Cast("bool") boolean is_nonzero(@Const @ByRef Tensor self); -// Parsed from ATen/ops/histc.h +// Parsed from ATen/ops/is_pinned.h // #pragma once @@ -46658,23 +32204,14 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include - +// #include -// aten::histc.out(Tensor self, int bins=100, Scalar min=0, Scalar max=0, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor histc_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Cast("int64_t") long bins/*=100*/, @Const @ByRef(nullValue = "at::Scalar(0)") Scalar min, @Const @ByRef(nullValue = "at::Scalar(0)") Scalar max); -@Namespace("at") public static native @ByRef Tensor histc_out(@ByRef Tensor out, @Const @ByRef Tensor self); -// aten::histc.out(Tensor self, int bins=100, Scalar min=0, Scalar max=0, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor histc_outf(@Const @ByRef Tensor self, @Cast("int64_t") long bins, @Const @ByRef Scalar min, @Const @ByRef Scalar max, @ByRef Tensor out); -// aten::histc(Tensor self, int bins=100, Scalar min=0, Scalar max=0) -> Tensor -@Namespace("at") public static native @ByVal Tensor histc(@Const @ByRef Tensor self, @Cast("int64_t") long bins/*=100*/, @Const @ByRef(nullValue = "at::Scalar(0)") Scalar min, @Const @ByRef(nullValue = "at::Scalar(0)") Scalar max); -@Namespace("at") public static native @ByVal Tensor histc(@Const @ByRef Tensor self); -// Parsed from ATen/ops/histogram.h +// Parsed from ATen/ops/is_same_size.h // #pragma once @@ -46695,33 +32232,16 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include - - -// aten::histogram.bins_tensor_out(Tensor self, Tensor bins, *, Tensor? weight=None, bool density=False, Tensor(a!) hist, Tensor(b!) bin_edges) -> (Tensor(a!) hist, Tensor(b!) bin_edges) -@Namespace("at") public static native @ByVal @Cast("std::tuple*") PointerPointer histogram_out(@ByRef Tensor hist, @ByRef Tensor bin_edges, @Const @ByRef Tensor self, @Const @ByRef Tensor bins, @Const @ByRef(nullValue = "c10::optional{}") TensorOptional weight, @Cast("bool") boolean density/*=false*/); -@Namespace("at") public static native @ByVal @Cast("std::tuple*") PointerPointer histogram_out(@ByRef Tensor hist, @ByRef Tensor bin_edges, @Const @ByRef Tensor self, @Const @ByRef Tensor bins); -// aten::histogram.bins_tensor_out(Tensor self, Tensor bins, *, Tensor? weight=None, bool density=False, Tensor(a!) hist, Tensor(b!) bin_edges) -> (Tensor(a!) hist, Tensor(b!) bin_edges) -@Namespace("at") public static native @ByVal @Cast("std::tuple*") PointerPointer histogram_outf(@Const @ByRef Tensor self, @Const @ByRef Tensor bins, @Const @ByRef TensorOptional weight, @Cast("bool") boolean density, @ByRef Tensor hist, @ByRef Tensor bin_edges); - -// aten::histogram.bins_tensor(Tensor self, Tensor bins, *, Tensor? weight=None, bool density=False) -> (Tensor hist, Tensor bin_edges) -@Namespace("at") public static native @ByVal TensorTensorTuple histogram(@Const @ByRef Tensor self, @Const @ByRef Tensor bins, @Const @ByRef(nullValue = "c10::optional{}") TensorOptional weight, @Cast("bool") boolean density/*=false*/); -@Namespace("at") public static native @ByVal TensorTensorTuple histogram(@Const @ByRef Tensor self, @Const @ByRef Tensor bins); +// #include -// aten::histogram.bin_ct_out(Tensor self, int bins=100, *, float[]? range=None, Tensor? weight=None, bool density=False, Tensor(a!) hist, Tensor(b!) bin_edges) -> (Tensor(a!) hist, Tensor(b!) bin_edges) -@Namespace("at") public static native @ByVal @Cast("std::tuple*") PointerPointer histogram_out(@ByRef Tensor hist, @ByRef Tensor bin_edges, @Const @ByRef Tensor self, @Cast("int64_t") long bins/*=100*/, @ByVal(nullValue = "c10::optional >(c10::nullopt)") DoubleArrayRefOptional range, @Const @ByRef(nullValue = "c10::optional{}") TensorOptional weight, @Cast("bool") boolean density/*=false*/); -@Namespace("at") public static native @ByVal @Cast("std::tuple*") PointerPointer histogram_out(@ByRef Tensor hist, @ByRef Tensor bin_edges, @Const @ByRef Tensor self); -// aten::histogram.bin_ct_out(Tensor self, int bins=100, *, float[]? range=None, Tensor? weight=None, bool density=False, Tensor(a!) hist, Tensor(b!) bin_edges) -> (Tensor(a!) hist, Tensor(b!) bin_edges) -@Namespace("at") public static native @ByVal @Cast("std::tuple*") PointerPointer histogram_outf(@Const @ByRef Tensor self, @Cast("int64_t") long bins, @ByVal DoubleArrayRefOptional range, @Const @ByRef TensorOptional weight, @Cast("bool") boolean density, @ByRef Tensor hist, @ByRef Tensor bin_edges); -// aten::histogram.bin_ct(Tensor self, int bins=100, *, float[]? range=None, Tensor? weight=None, bool density=False) -> (Tensor hist, Tensor bin_edges) -@Namespace("at") public static native @ByVal TensorTensorTuple histogram(@Const @ByRef Tensor self, @Cast("int64_t") long bins/*=100*/, @ByVal(nullValue = "c10::optional >(c10::nullopt)") DoubleArrayRefOptional range, @Const @ByRef(nullValue = "c10::optional{}") TensorOptional weight, @Cast("bool") boolean density/*=false*/); -@Namespace("at") public static native @ByVal TensorTensorTuple histogram(@Const @ByRef Tensor self); +// aten::is_same_size(Tensor self, Tensor other) -> bool +@Namespace("at") public static native @Cast("bool") boolean is_same_size(@Const @ByRef Tensor self, @Const @ByRef Tensor other); -// Parsed from ATen/ops/histogramdd.h +// Parsed from ATen/ops/is_set_to.h // #pragma once @@ -46742,27 +32262,14 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include - - -// aten::histogramdd(Tensor self, int[] bins, float[]? range=None, Tensor? weight=None, bool density=False) -> (Tensor hist, Tensor[] bin_edges) -@Namespace("at") public static native @ByVal TensorTensorVectorTuple histogramdd(@Const @ByRef Tensor self, @ByVal @Cast("c10::ArrayRef*") LongArrayRef bins, @ByVal(nullValue = "c10::optional >(c10::nullopt)") DoubleArrayRefOptional range, @Const @ByRef(nullValue = "c10::optional{}") TensorOptional weight, @Cast("bool") boolean density/*=false*/); -@Namespace("at") public static native @ByVal TensorTensorVectorTuple histogramdd(@Const @ByRef Tensor self, @ByVal @Cast("c10::ArrayRef*") LongArrayRef bins); -@Namespace("at") public static native @ByVal TensorTensorVectorTuple histogramdd(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] bins, @ByVal(nullValue = "c10::optional >(c10::nullopt)") DoubleArrayRefOptional range, @Const @ByRef(nullValue = "c10::optional{}") TensorOptional weight, @Cast("bool") boolean density/*=false*/); -@Namespace("at") public static native @ByVal TensorTensorVectorTuple histogramdd(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... bins); +// #include -// aten::histogramdd.int_bins(Tensor self, int bins, float[]? range=None, Tensor? weight=None, bool density=False) -> (Tensor hist, Tensor[] bin_edges) -@Namespace("at") public static native @ByVal TensorTensorVectorTuple histogramdd(@Const @ByRef Tensor self, @Cast("int64_t") long bins, @ByVal(nullValue = "c10::optional >(c10::nullopt)") DoubleArrayRefOptional range, @Const @ByRef(nullValue = "c10::optional{}") TensorOptional weight, @Cast("bool") boolean density/*=false*/); -@Namespace("at") public static native @ByVal TensorTensorVectorTuple histogramdd(@Const @ByRef Tensor self, @Cast("int64_t") long bins); -// aten::histogramdd.TensorList_bins(Tensor self, Tensor[] bins, float[]? range=None, Tensor? weight=None, bool density=False) -> (Tensor hist, Tensor[] bin_edges) -@Namespace("at") public static native @ByVal TensorTensorVectorTuple histogramdd(@Const @ByRef Tensor self, @ByVal TensorArrayRef bins, @ByVal(nullValue = "c10::optional >(c10::nullopt)") DoubleArrayRefOptional range, @Const @ByRef(nullValue = "c10::optional{}") TensorOptional weight, @Cast("bool") boolean density/*=false*/); -@Namespace("at") public static native @ByVal TensorTensorVectorTuple histogramdd(@Const @ByRef Tensor self, @ByVal TensorArrayRef bins); -// Parsed from ATen/ops/hsplit.h +// Parsed from ATen/ops/is_signed.h // #pragma once @@ -46783,20 +32290,16 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include - +// #include -// aten::hsplit.int(Tensor(a -> *) self, int sections) -> Tensor(a)[] -@Namespace("at") public static native @Cast({"", "std::vector"}) @StdMove TensorVector hsplit(@Const @ByRef Tensor self, @Cast("int64_t") long sections); -// aten::hsplit.array(Tensor(a -> *) self, int[] indices) -> Tensor(a)[] -@Namespace("at") public static native @Cast({"", "std::vector"}) @StdMove TensorVector hsplit(@Const @ByRef Tensor self, @ByVal @Cast("c10::ArrayRef*") LongArrayRef indices); -@Namespace("at") public static native @Cast({"", "std::vector"}) @StdMove TensorVector hsplit(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... indices); +// aten::is_signed(Tensor self) -> bool +@Namespace("at") public static native @Cast("bool") boolean __dispatch_is_signed(@Const @ByRef Tensor self); -// Parsed from ATen/ops/hspmm.h +// Parsed from ATen/ops/is_vulkan_available.h // #pragma once @@ -46817,21 +32320,16 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include - +// #include -// aten::hspmm.out(Tensor mat1, Tensor mat2, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor hspmm_out(@ByRef Tensor out, @Const @ByRef Tensor mat1, @Const @ByRef Tensor mat2); -// aten::hspmm.out(Tensor mat1, Tensor mat2, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor hspmm_outf(@Const @ByRef Tensor mat1, @Const @ByRef Tensor mat2, @ByRef Tensor out); -// aten::hspmm(Tensor mat1, Tensor mat2) -> Tensor -@Namespace("at") public static native @ByVal Tensor hspmm(@Const @ByRef Tensor mat1, @Const @ByRef Tensor mat2); +// aten::is_vulkan_available() -> bool +@Namespace("at") public static native @Cast("bool") boolean is_vulkan_available(); -// Parsed from ATen/ops/hstack.h +// Parsed from ATen/ops/isclose.h // #pragma once @@ -46852,21 +32350,17 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include - +// #include -// aten::hstack(Tensor[] tensors) -> Tensor -@Namespace("at") public static native @ByVal Tensor hstack(@ByVal TensorArrayRef tensors); -// aten::hstack.out(Tensor[] tensors, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor hstack_out(@ByRef Tensor out, @ByVal TensorArrayRef tensors); -// aten::hstack.out(Tensor[] tensors, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor hstack_outf(@ByVal TensorArrayRef tensors, @ByRef Tensor out); +// aten::isclose(Tensor self, Tensor other, float rtol=1e-05, float atol=1e-08, bool equal_nan=False) -> Tensor +@Namespace("at") public static native @ByVal Tensor isclose(@Const @ByRef Tensor self, @Const @ByRef Tensor other, double rtol/*=1e-05*/, double atol/*=1e-08*/, @Cast("bool") boolean equal_nan/*=false*/); +@Namespace("at") public static native @ByVal Tensor isclose(@Const @ByRef Tensor self, @Const @ByRef Tensor other); -// Parsed from ATen/ops/huber_loss.h +// Parsed from ATen/ops/isfinite.h // #pragma once @@ -46887,23 +32381,16 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include - +// #include -// aten::huber_loss.out(Tensor self, Tensor target, int reduction=Mean, float delta=1.0, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor huber_loss_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor target, @Cast("int64_t") long reduction/*=at::Reduction::Mean*/, double delta/*=1.0*/); -@Namespace("at") public static native @ByRef Tensor huber_loss_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor target); -// aten::huber_loss.out(Tensor self, Tensor target, int reduction=Mean, float delta=1.0, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor huber_loss_outf(@Const @ByRef Tensor self, @Const @ByRef Tensor target, @Cast("int64_t") long reduction, double delta, @ByRef Tensor out); -// aten::huber_loss(Tensor self, Tensor target, int reduction=Mean, float delta=1.0) -> Tensor -@Namespace("at") public static native @ByVal Tensor huber_loss(@Const @ByRef Tensor self, @Const @ByRef Tensor target, @Cast("int64_t") long reduction/*=at::Reduction::Mean*/, double delta/*=1.0*/); -@Namespace("at") public static native @ByVal Tensor huber_loss(@Const @ByRef Tensor self, @Const @ByRef Tensor target); +// aten::isfinite(Tensor self) -> Tensor +@Namespace("at") public static native @ByVal Tensor isfinite(@Const @ByRef Tensor self); -// Parsed from ATen/ops/huber_loss_backward.h +// Parsed from ATen/ops/isin.h // #pragma once @@ -46924,21 +32411,43 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include +// #include -// aten::huber_loss_backward.out(Tensor grad_output, Tensor self, Tensor target, int reduction, float delta, *, Tensor(a!) grad_input) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor huber_loss_backward_out(@ByRef Tensor grad_input, @Const @ByRef Tensor grad_output, @Const @ByRef Tensor self, @Const @ByRef Tensor target, @Cast("int64_t") long reduction, double delta); -// aten::huber_loss_backward.out(Tensor grad_output, Tensor self, Tensor target, int reduction, float delta, *, Tensor(a!) grad_input) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor huber_loss_backward_outf(@Const @ByRef Tensor grad_output, @Const @ByRef Tensor self, @Const @ByRef Tensor target, @Cast("int64_t") long reduction, double delta, @ByRef Tensor grad_input); +// aten::isin.Tensor_Tensor_out(Tensor elements, Tensor test_elements, *, bool assume_unique=False, bool invert=False, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor isin_out(@ByRef Tensor out, @Const @ByRef Tensor elements, @Const @ByRef Tensor test_elements, @Cast("bool") boolean assume_unique/*=false*/, @Cast("bool") boolean invert/*=false*/); +@Namespace("at") public static native @ByRef Tensor isin_out(@ByRef Tensor out, @Const @ByRef Tensor elements, @Const @ByRef Tensor test_elements); +// aten::isin.Tensor_Tensor_out(Tensor elements, Tensor test_elements, *, bool assume_unique=False, bool invert=False, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor isin_outf(@Const @ByRef Tensor elements, @Const @ByRef Tensor test_elements, @Cast("bool") boolean assume_unique, @Cast("bool") boolean invert, @ByRef Tensor out); -// aten::huber_loss_backward(Tensor grad_output, Tensor self, Tensor target, int reduction, float delta) -> Tensor -@Namespace("at") public static native @ByVal Tensor huber_loss_backward(@Const @ByRef Tensor grad_output, @Const @ByRef Tensor self, @Const @ByRef Tensor target, @Cast("int64_t") long reduction, double delta); +// aten::isin.Tensor_Tensor(Tensor elements, Tensor test_elements, *, bool assume_unique=False, bool invert=False) -> Tensor +@Namespace("at") public static native @ByVal Tensor isin(@Const @ByRef Tensor elements, @Const @ByRef Tensor test_elements, @Cast("bool") boolean assume_unique/*=false*/, @Cast("bool") boolean invert/*=false*/); +@Namespace("at") public static native @ByVal Tensor isin(@Const @ByRef Tensor elements, @Const @ByRef Tensor test_elements); + +// aten::isin.Tensor_Scalar_out(Tensor elements, Scalar test_element, *, bool assume_unique=False, bool invert=False, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor isin_out(@ByRef Tensor out, @Const @ByRef Tensor elements, @Const @ByRef Scalar test_element, @Cast("bool") boolean assume_unique/*=false*/, @Cast("bool") boolean invert/*=false*/); +@Namespace("at") public static native @ByRef Tensor isin_out(@ByRef Tensor out, @Const @ByRef Tensor elements, @Const @ByRef Scalar test_element); +// aten::isin.Tensor_Scalar_out(Tensor elements, Scalar test_element, *, bool assume_unique=False, bool invert=False, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor isin_outf(@Const @ByRef Tensor elements, @Const @ByRef Scalar test_element, @Cast("bool") boolean assume_unique, @Cast("bool") boolean invert, @ByRef Tensor out); + +// aten::isin.Tensor_Scalar(Tensor elements, Scalar test_element, *, bool assume_unique=False, bool invert=False) -> Tensor +@Namespace("at") public static native @ByVal Tensor isin(@Const @ByRef Tensor elements, @Const @ByRef Scalar test_element, @Cast("bool") boolean assume_unique/*=false*/, @Cast("bool") boolean invert/*=false*/); +@Namespace("at") public static native @ByVal Tensor isin(@Const @ByRef Tensor elements, @Const @ByRef Scalar test_element); + +// aten::isin.Scalar_Tensor_out(Scalar element, Tensor test_elements, *, bool assume_unique=False, bool invert=False, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor isin_out(@ByRef Tensor out, @Const @ByRef Scalar element, @Const @ByRef Tensor test_elements, @Cast("bool") boolean assume_unique/*=false*/, @Cast("bool") boolean invert/*=false*/); +@Namespace("at") public static native @ByRef Tensor isin_out(@ByRef Tensor out, @Const @ByRef Scalar element, @Const @ByRef Tensor test_elements); +// aten::isin.Scalar_Tensor_out(Scalar element, Tensor test_elements, *, bool assume_unique=False, bool invert=False, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor isin_outf(@Const @ByRef Scalar element, @Const @ByRef Tensor test_elements, @Cast("bool") boolean assume_unique, @Cast("bool") boolean invert, @ByRef Tensor out); + +// aten::isin.Scalar_Tensor(Scalar element, Tensor test_elements, *, bool assume_unique=False, bool invert=False) -> Tensor +@Namespace("at") public static native @ByVal Tensor isin(@Const @ByRef Scalar element, @Const @ByRef Tensor test_elements, @Cast("bool") boolean assume_unique/*=false*/, @Cast("bool") boolean invert/*=false*/); +@Namespace("at") public static native @ByVal Tensor isin(@Const @ByRef Scalar element, @Const @ByRef Tensor test_elements); -// Parsed from ATen/ops/hypot.h +// Parsed from ATen/ops/isinf.h // #pragma once @@ -46959,21 +32468,21 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include +// #include -// aten::hypot.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor hypot_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor other); -// aten::hypot.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor hypot_outf(@Const @ByRef Tensor self, @Const @ByRef Tensor other, @ByRef Tensor out); +// aten::isinf(Tensor self) -> Tensor +@Namespace("at") public static native @ByVal Tensor isinf(@Const @ByRef Tensor self); -// aten::hypot(Tensor self, Tensor other) -> Tensor -@Namespace("at") public static native @ByVal Tensor hypot(@Const @ByRef Tensor self, @Const @ByRef Tensor other); +// aten::isinf.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor isinf_out(@ByRef Tensor out, @Const @ByRef Tensor self); +// aten::isinf.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor isinf_outf(@Const @ByRef Tensor self, @ByRef Tensor out); -// Parsed from ATen/ops/i0.h +// Parsed from ATen/ops/isnan.h // #pragma once @@ -46994,24 +32503,21 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include - +// #include -// aten::i0(Tensor self) -> Tensor -@Namespace("at") public static native @ByVal Tensor i0(@Const @ByRef Tensor self); -// aten::i0_(Tensor(a!) self) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor i0_(@ByRef Tensor self); +// aten::isnan(Tensor self) -> Tensor +@Namespace("at") public static native @ByVal Tensor isnan(@Const @ByRef Tensor self); -// aten::i0.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor i0_out(@ByRef Tensor out, @Const @ByRef Tensor self); -// aten::i0.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor i0_outf(@Const @ByRef Tensor self, @ByRef Tensor out); +// aten::isnan.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor isnan_out(@ByRef Tensor out, @Const @ByRef Tensor self); +// aten::isnan.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor isnan_outf(@Const @ByRef Tensor self, @ByRef Tensor out); -// Parsed from ATen/ops/igamma.h +// Parsed from ATen/ops/isneginf.h // #pragma once @@ -47032,21 +32538,21 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include +// #include -// aten::igamma.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor igamma_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor other); -// aten::igamma.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor igamma_outf(@Const @ByRef Tensor self, @Const @ByRef Tensor other, @ByRef Tensor out); +// aten::isneginf(Tensor self) -> Tensor +@Namespace("at") public static native @ByVal Tensor isneginf(@Const @ByRef Tensor self); -// aten::igamma(Tensor self, Tensor other) -> Tensor -@Namespace("at") public static native @ByVal Tensor igamma(@Const @ByRef Tensor self, @Const @ByRef Tensor other); +// aten::isneginf.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor isneginf_out(@ByRef Tensor out, @Const @ByRef Tensor self); +// aten::isneginf.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor isneginf_outf(@Const @ByRef Tensor self, @ByRef Tensor out); -// Parsed from ATen/ops/igammac.h +// Parsed from ATen/ops/isposinf.h // #pragma once @@ -47067,21 +32573,21 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include +// #include -// aten::igammac.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor igammac_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor other); -// aten::igammac.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor igammac_outf(@Const @ByRef Tensor self, @Const @ByRef Tensor other, @ByRef Tensor out); +// aten::isposinf(Tensor self) -> Tensor +@Namespace("at") public static native @ByVal Tensor isposinf(@Const @ByRef Tensor self); -// aten::igammac(Tensor self, Tensor other) -> Tensor -@Namespace("at") public static native @ByVal Tensor igammac(@Const @ByRef Tensor self, @Const @ByRef Tensor other); +// aten::isposinf.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor isposinf_out(@ByRef Tensor out, @Const @ByRef Tensor self); +// aten::isposinf.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor isposinf_outf(@Const @ByRef Tensor self, @ByRef Tensor out); -// Parsed from ATen/ops/im2col.h +// Parsed from ATen/ops/isreal.h // #pragma once @@ -47102,24 +32608,16 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include - +// #include -// aten::im2col.out(Tensor self, int[2] kernel_size, int[2] dilation, int[2] padding, int[2] stride, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor im2col_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal @Cast("c10::ArrayRef*") LongArrayRef kernel_size, @ByVal @Cast("c10::ArrayRef*") LongArrayRef dilation, @ByVal @Cast("c10::ArrayRef*") LongArrayRef padding, @ByVal @Cast("c10::ArrayRef*") LongArrayRef stride); -@Namespace("at") public static native @ByRef Tensor im2col_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] kernel_size, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dilation, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] padding, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... stride); -// aten::im2col.out(Tensor self, int[2] kernel_size, int[2] dilation, int[2] padding, int[2] stride, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor im2col_outf(@Const @ByRef Tensor self, @ByVal @Cast("c10::ArrayRef*") LongArrayRef kernel_size, @ByVal @Cast("c10::ArrayRef*") LongArrayRef dilation, @ByVal @Cast("c10::ArrayRef*") LongArrayRef padding, @ByVal @Cast("c10::ArrayRef*") LongArrayRef stride, @ByRef Tensor out); -@Namespace("at") public static native @ByRef Tensor im2col_outf(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] kernel_size, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dilation, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] padding, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] stride, @ByRef Tensor out); -// aten::im2col(Tensor self, int[2] kernel_size, int[2] dilation, int[2] padding, int[2] stride) -> Tensor -@Namespace("at") public static native @ByVal Tensor im2col(@Const @ByRef Tensor self, @ByVal @Cast("c10::ArrayRef*") LongArrayRef kernel_size, @ByVal @Cast("c10::ArrayRef*") LongArrayRef dilation, @ByVal @Cast("c10::ArrayRef*") LongArrayRef padding, @ByVal @Cast("c10::ArrayRef*") LongArrayRef stride); -@Namespace("at") public static native @ByVal Tensor im2col(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] kernel_size, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dilation, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] padding, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... stride); +// aten::isreal(Tensor self) -> Tensor +@Namespace("at") public static native @ByVal Tensor isreal(@Const @ByRef Tensor self); -// Parsed from ATen/ops/imag.h +// Parsed from ATen/ops/istft.h // #pragma once @@ -47140,16 +32638,17 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include +// #include -// aten::imag(Tensor(a) self) -> Tensor(a) -@Namespace("at") public static native @ByVal Tensor imag(@Const @ByRef Tensor self); +// aten::istft(Tensor self, int n_fft, int? hop_length=None, int? win_length=None, Tensor? window=None, bool center=True, bool normalized=False, bool? onesided=None, int? length=None, bool return_complex=False) -> Tensor +@Namespace("at") public static native @ByVal Tensor istft(@Const @ByRef Tensor self, @Cast("int64_t") long n_fft, @ByVal(nullValue = "c10::optional(c10::nullopt)") LongOptional hop_length, @ByVal(nullValue = "c10::optional(c10::nullopt)") LongOptional win_length, @Const @ByRef(nullValue = "c10::optional{}") TensorOptional window, @Cast("bool") boolean center/*=true*/, @Cast("bool") boolean normalized/*=false*/, @ByVal(nullValue = "c10::optional(c10::nullopt)") BoolOptional onesided, @ByVal(nullValue = "c10::optional(c10::nullopt)") LongOptional length, @Cast("bool") boolean return_complex/*=false*/); +@Namespace("at") public static native @ByVal Tensor istft(@Const @ByRef Tensor self, @Cast("int64_t") long n_fft); -// Parsed from ATen/ops/index.h +// Parsed from ATen/ops/item.h // #pragma once @@ -47170,18 +32669,14 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include - +// #include -// aten::index.Tensor(Tensor self, Tensor?[] indices) -> Tensor -// aten::index.Tensor_out(Tensor self, Tensor?[] indices, *, Tensor(a!) out) -> Tensor(a!) -// aten::index.Tensor_out(Tensor self, Tensor?[] indices, *, Tensor(a!) out) -> Tensor(a!) -// Parsed from ATen/ops/index_add.h +// Parsed from ATen/ops/kaiser_window.h // #pragma once @@ -47202,27 +32697,46 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include +// #include -// aten::index_add.out(Tensor self, int dim, Tensor index, Tensor source, *, Scalar alpha=1, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor index_add_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Cast("int64_t") long dim, @Const @ByRef Tensor index, @Const @ByRef Tensor source, @Const @ByRef(nullValue = "at::Scalar(1)") Scalar alpha); -@Namespace("at") public static native @ByRef Tensor index_add_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Cast("int64_t") long dim, @Const @ByRef Tensor index, @Const @ByRef Tensor source); -// aten::index_add.out(Tensor self, int dim, Tensor index, Tensor source, *, Scalar alpha=1, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor index_add_outf(@Const @ByRef Tensor self, @Cast("int64_t") long dim, @Const @ByRef Tensor index, @Const @ByRef Tensor source, @Const @ByRef Scalar alpha, @ByRef Tensor out); +// aten::kaiser_window(int window_length, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor +@Namespace("at") public static native @ByVal Tensor kaiser_window(@Cast("int64_t") long window_length, @ByVal(nullValue = "at::TensorOptions{}") TensorOptions options); +@Namespace("at") public static native @ByVal Tensor kaiser_window(@Cast("int64_t") long window_length); +// aten::kaiser_window(int window_length, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor +@Namespace("at") public static native @ByVal Tensor kaiser_window(@Cast("int64_t") long window_length, @ByVal ScalarTypeOptional dtype, @ByVal LayoutOptional layout, @ByVal DeviceOptional device, @ByVal BoolOptional pin_memory); -// aten::index_add(Tensor self, int dim, Tensor index, Tensor source, *, Scalar alpha=1) -> Tensor -@Namespace("at") public static native @ByVal Tensor index_add(@Const @ByRef Tensor self, @Cast("int64_t") long dim, @Const @ByRef Tensor index, @Const @ByRef Tensor source, @Const @ByRef(nullValue = "at::Scalar(1)") Scalar alpha); -@Namespace("at") public static native @ByVal Tensor index_add(@Const @ByRef Tensor self, @Cast("int64_t") long dim, @Const @ByRef Tensor index, @Const @ByRef Tensor source); +// aten::kaiser_window.periodic(int window_length, bool periodic, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor +@Namespace("at") public static native @ByVal Tensor kaiser_window(@Cast("int64_t") long window_length, @Cast("bool") boolean periodic, @ByVal(nullValue = "at::TensorOptions{}") TensorOptions options); +@Namespace("at") public static native @ByVal Tensor kaiser_window(@Cast("int64_t") long window_length, @Cast("bool") boolean periodic); +// aten::kaiser_window.periodic(int window_length, bool periodic, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor +@Namespace("at") public static native @ByVal Tensor kaiser_window(@Cast("int64_t") long window_length, @Cast("bool") boolean periodic, @ByVal ScalarTypeOptional dtype, @ByVal LayoutOptional layout, @ByVal DeviceOptional device, @ByVal BoolOptional pin_memory); -// aten::index_add.dimname(Tensor self, Dimname dim, Tensor index, Tensor source, *, Scalar alpha=1) -> Tensor -@Namespace("at") public static native @ByVal Tensor index_add(@Const @ByRef Tensor self, @ByVal Dimname dim, @Const @ByRef Tensor index, @Const @ByRef Tensor source, @Const @ByRef(nullValue = "at::Scalar(1)") Scalar alpha); -@Namespace("at") public static native @ByVal Tensor index_add(@Const @ByRef Tensor self, @ByVal Dimname dim, @Const @ByRef Tensor index, @Const @ByRef Tensor source); +// aten::kaiser_window.beta(int window_length, bool periodic, float beta, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor +@Namespace("at") public static native @ByVal Tensor kaiser_window(@Cast("int64_t") long window_length, @Cast("bool") boolean periodic, double beta, @ByVal(nullValue = "at::TensorOptions{}") TensorOptions options); +@Namespace("at") public static native @ByVal Tensor kaiser_window(@Cast("int64_t") long window_length, @Cast("bool") boolean periodic, double beta); +// aten::kaiser_window.beta(int window_length, bool periodic, float beta, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor +@Namespace("at") public static native @ByVal Tensor kaiser_window(@Cast("int64_t") long window_length, @Cast("bool") boolean periodic, double beta, @ByVal ScalarTypeOptional dtype, @ByVal LayoutOptional layout, @ByVal DeviceOptional device, @ByVal BoolOptional pin_memory); + +// aten::kaiser_window.out(int window_length, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor kaiser_window_out(@ByRef Tensor out, @Cast("int64_t") long window_length); +// aten::kaiser_window.out(int window_length, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor kaiser_window_outf(@Cast("int64_t") long window_length, @ByRef Tensor out); + +// aten::kaiser_window.periodic_out(int window_length, bool periodic, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor kaiser_window_out(@ByRef Tensor out, @Cast("int64_t") long window_length, @Cast("bool") boolean periodic); +// aten::kaiser_window.periodic_out(int window_length, bool periodic, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor kaiser_window_outf(@Cast("int64_t") long window_length, @Cast("bool") boolean periodic, @ByRef Tensor out); +// aten::kaiser_window.beta_out(int window_length, bool periodic, float beta, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor kaiser_window_out(@ByRef Tensor out, @Cast("int64_t") long window_length, @Cast("bool") boolean periodic, double beta); +// aten::kaiser_window.beta_out(int window_length, bool periodic, float beta, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor kaiser_window_outf(@Cast("int64_t") long window_length, @Cast("bool") boolean periodic, double beta, @ByRef Tensor out); -// Parsed from ATen/ops/index_copy.h + +// Parsed from ATen/ops/kl_div.h // #pragma once @@ -47243,24 +32757,17 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include - - -// aten::index_copy.out(Tensor self, int dim, Tensor index, Tensor source, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor index_copy_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Cast("int64_t") long dim, @Const @ByRef Tensor index, @Const @ByRef Tensor source); -// aten::index_copy.out(Tensor self, int dim, Tensor index, Tensor source, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor index_copy_outf(@Const @ByRef Tensor self, @Cast("int64_t") long dim, @Const @ByRef Tensor index, @Const @ByRef Tensor source, @ByRef Tensor out); +// #include -// aten::index_copy(Tensor self, int dim, Tensor index, Tensor source) -> Tensor -@Namespace("at") public static native @ByVal Tensor index_copy(@Const @ByRef Tensor self, @Cast("int64_t") long dim, @Const @ByRef Tensor index, @Const @ByRef Tensor source); -// aten::index_copy.dimname(Tensor self, Dimname dim, Tensor index, Tensor source) -> Tensor -@Namespace("at") public static native @ByVal Tensor index_copy(@Const @ByRef Tensor self, @ByVal Dimname dim, @Const @ByRef Tensor index, @Const @ByRef Tensor source); +// aten::kl_div(Tensor self, Tensor target, int reduction=Mean, *, bool log_target=False) -> Tensor +@Namespace("at") public static native @ByVal Tensor kl_div(@Const @ByRef Tensor self, @Const @ByRef Tensor target, @Cast("int64_t") long reduction/*=at::Reduction::Mean*/, @Cast("bool") boolean log_target/*=false*/); +@Namespace("at") public static native @ByVal Tensor kl_div(@Const @ByRef Tensor self, @Const @ByRef Tensor target); -// Parsed from ATen/ops/index_fill.h +// Parsed from ATen/ops/kron.h // #pragma once @@ -47281,35 +32788,21 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include - - -// aten::index_fill.int_Scalar(Tensor self, int dim, Tensor index, Scalar value) -> Tensor -@Namespace("at") public static native @ByVal Tensor index_fill(@Const @ByRef Tensor self, @Cast("int64_t") long dim, @Const @ByRef Tensor index, @Const @ByRef Scalar value); - -// aten::index_fill.int_Tensor(Tensor self, int dim, Tensor index, Tensor value) -> Tensor -@Namespace("at") public static native @ByVal Tensor index_fill(@Const @ByRef Tensor self, @Cast("int64_t") long dim, @Const @ByRef Tensor index, @Const @ByRef Tensor value); - -// aten::index_fill.Dimname_Scalar(Tensor self, Dimname dim, Tensor index, Scalar value) -> Tensor -@Namespace("at") public static native @ByVal Tensor index_fill(@Const @ByRef Tensor self, @ByVal Dimname dim, @Const @ByRef Tensor index, @Const @ByRef Scalar value); +// #include -// aten::index_fill.Dimname_Tensor(Tensor self, Dimname dim, Tensor index, Tensor value) -> Tensor -@Namespace("at") public static native @ByVal Tensor index_fill(@Const @ByRef Tensor self, @ByVal Dimname dim, @Const @ByRef Tensor index, @Const @ByRef Tensor value); -// aten::index_fill.int_Scalar_out(Tensor self, int dim, Tensor index, Scalar value, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor index_fill_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Cast("int64_t") long dim, @Const @ByRef Tensor index, @Const @ByRef Scalar value); -// aten::index_fill.int_Scalar_out(Tensor self, int dim, Tensor index, Scalar value, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor index_fill_outf(@Const @ByRef Tensor self, @Cast("int64_t") long dim, @Const @ByRef Tensor index, @Const @ByRef Scalar value, @ByRef Tensor out); +// aten::kron(Tensor self, Tensor other) -> Tensor +@Namespace("at") public static native @ByVal Tensor kron(@Const @ByRef Tensor self, @Const @ByRef Tensor other); -// aten::index_fill.int_Tensor_out(Tensor self, int dim, Tensor index, Tensor value, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor index_fill_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Cast("int64_t") long dim, @Const @ByRef Tensor index, @Const @ByRef Tensor value); -// aten::index_fill.int_Tensor_out(Tensor self, int dim, Tensor index, Tensor value, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor index_fill_outf(@Const @ByRef Tensor self, @Cast("int64_t") long dim, @Const @ByRef Tensor index, @Const @ByRef Tensor value, @ByRef Tensor out); +// aten::kron.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor kron_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor other); +// aten::kron.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor kron_outf(@Const @ByRef Tensor self, @Const @ByRef Tensor other, @ByRef Tensor out); -// Parsed from ATen/ops/index_put.h +// Parsed from ATen/ops/kthvalue.h // #pragma once @@ -47330,20 +32823,33 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include +// #include -// aten::index_put_(Tensor(a!) self, Tensor?[] indices, Tensor values, bool accumulate=False) -> Tensor(a!) +// aten::kthvalue(Tensor self, int k, int dim=-1, bool keepdim=False) -> (Tensor values, Tensor indices) +@Namespace("at") public static native @ByVal T_TensorTensor_T kthvalue(@Const @ByRef Tensor self, @Cast("int64_t") long k, @Cast("int64_t") long dim/*=-1*/, @Cast("bool") boolean keepdim/*=false*/); +@Namespace("at") public static native @ByVal T_TensorTensor_T kthvalue(@Const @ByRef Tensor self, @Cast("int64_t") long k); -// aten::index_put(Tensor self, Tensor?[] indices, Tensor values, bool accumulate=False) -> Tensor +// aten::kthvalue.values(Tensor self, int k, int dim=-1, bool keepdim=False, *, Tensor(a!) values, Tensor(b!) indices) -> (Tensor(a!) values, Tensor(b!) indices) +@Namespace("at") public static native @ByVal T_TensorTensor_T kthvalue_out(@ByRef Tensor values, @ByRef Tensor indices, @Const @ByRef Tensor self, @Cast("int64_t") long k, @Cast("int64_t") long dim/*=-1*/, @Cast("bool") boolean keepdim/*=false*/); +@Namespace("at") public static native @ByVal T_TensorTensor_T kthvalue_out(@ByRef Tensor values, @ByRef Tensor indices, @Const @ByRef Tensor self, @Cast("int64_t") long k); +// aten::kthvalue.values(Tensor self, int k, int dim=-1, bool keepdim=False, *, Tensor(a!) values, Tensor(b!) indices) -> (Tensor(a!) values, Tensor(b!) indices) +@Namespace("at") public static native @ByVal T_TensorTensor_T kthvalue_outf(@Const @ByRef Tensor self, @Cast("int64_t") long k, @Cast("int64_t") long dim, @Cast("bool") boolean keepdim, @ByRef Tensor values, @ByRef Tensor indices); -// aten::index_put.out(Tensor self, Tensor?[] indices, Tensor values, bool accumulate=False, *, Tensor(a!) out) -> Tensor(a!) -// aten::index_put.out(Tensor self, Tensor?[] indices, Tensor values, bool accumulate=False, *, Tensor(a!) out) -> Tensor(a!) +// aten::kthvalue.dimname(Tensor self, int k, Dimname dim, bool keepdim=False) -> (Tensor values, Tensor indices) +@Namespace("at") public static native @ByVal T_TensorTensor_T kthvalue(@Const @ByRef Tensor self, @Cast("int64_t") long k, @ByVal Dimname dim, @Cast("bool") boolean keepdim/*=false*/); +@Namespace("at") public static native @ByVal T_TensorTensor_T kthvalue(@Const @ByRef Tensor self, @Cast("int64_t") long k, @ByVal Dimname dim); +// aten::kthvalue.dimname_out(Tensor self, int k, Dimname dim, bool keepdim=False, *, Tensor(a!) values, Tensor(b!) indices) -> (Tensor(a!) values, Tensor(b!) indices) +@Namespace("at") public static native @ByVal T_TensorTensor_T kthvalue_out(@ByRef Tensor values, @ByRef Tensor indices, @Const @ByRef Tensor self, @Cast("int64_t") long k, @ByVal Dimname dim, @Cast("bool") boolean keepdim/*=false*/); +@Namespace("at") public static native @ByVal T_TensorTensor_T kthvalue_out(@ByRef Tensor values, @ByRef Tensor indices, @Const @ByRef Tensor self, @Cast("int64_t") long k, @ByVal Dimname dim); +// aten::kthvalue.dimname_out(Tensor self, int k, Dimname dim, bool keepdim=False, *, Tensor(a!) values, Tensor(b!) indices) -> (Tensor(a!) values, Tensor(b!) indices) +@Namespace("at") public static native @ByVal T_TensorTensor_T kthvalue_outf(@Const @ByRef Tensor self, @Cast("int64_t") long k, @ByVal Dimname dim, @Cast("bool") boolean keepdim, @ByRef Tensor values, @ByRef Tensor indices); -// Parsed from ATen/ops/index_reduce.h + +// Parsed from ATen/ops/l1_loss.h // #pragma once @@ -47364,23 +32870,17 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include - +// #include -// aten::index_reduce.out(Tensor self, int dim, Tensor index, Tensor source, str reduce, *, bool include_self=True, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor index_reduce_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Cast("int64_t") long dim, @Const @ByRef Tensor index, @Const @ByRef Tensor source, @ByVal @Cast("c10::string_view*") Pointer reduce, @Cast("bool") boolean include_self/*=true*/); -@Namespace("at") public static native @ByRef Tensor index_reduce_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Cast("int64_t") long dim, @Const @ByRef Tensor index, @Const @ByRef Tensor source, @ByVal @Cast("c10::string_view*") Pointer reduce); -// aten::index_reduce.out(Tensor self, int dim, Tensor index, Tensor source, str reduce, *, bool include_self=True, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor index_reduce_outf(@Const @ByRef Tensor self, @Cast("int64_t") long dim, @Const @ByRef Tensor index, @Const @ByRef Tensor source, @ByVal @Cast("c10::string_view*") Pointer reduce, @Cast("bool") boolean include_self, @ByRef Tensor out); -// aten::index_reduce(Tensor self, int dim, Tensor index, Tensor source, str reduce, *, bool include_self=True) -> Tensor -@Namespace("at") public static native @ByVal Tensor index_reduce(@Const @ByRef Tensor self, @Cast("int64_t") long dim, @Const @ByRef Tensor index, @Const @ByRef Tensor source, @ByVal @Cast("c10::string_view*") Pointer reduce, @Cast("bool") boolean include_self/*=true*/); -@Namespace("at") public static native @ByVal Tensor index_reduce(@Const @ByRef Tensor self, @Cast("int64_t") long dim, @Const @ByRef Tensor index, @Const @ByRef Tensor source, @ByVal @Cast("c10::string_view*") Pointer reduce); +// aten::l1_loss(Tensor self, Tensor target, int reduction=Mean) -> Tensor +@Namespace("at") public static native @ByVal Tensor l1_loss(@Const @ByRef Tensor self, @Const @ByRef Tensor target, @Cast("int64_t") long reduction/*=at::Reduction::Mean*/); +@Namespace("at") public static native @ByVal Tensor l1_loss(@Const @ByRef Tensor self, @Const @ByRef Tensor target); -// Parsed from ATen/ops/index_select.h +// Parsed from ATen/ops/layer_norm.h // #pragma once @@ -47401,29 +32901,25 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include +// #include -// aten::index_select.out(Tensor self, int dim, Tensor index, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor index_select_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Cast("int64_t") long dim, @Const @ByRef Tensor index); -// aten::index_select.out(Tensor self, int dim, Tensor index, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor index_select_outf(@Const @ByRef Tensor self, @Cast("int64_t") long dim, @Const @ByRef Tensor index, @ByRef Tensor out); +// aten::layer_norm(Tensor input, SymInt[] normalized_shape, Tensor? weight=None, Tensor? bias=None, float eps=1e-05, bool cudnn_enable=True) -> Tensor +@Namespace("at") public static native @ByVal Tensor layer_norm(@Const @ByRef Tensor input, @ByVal LongArrayRef normalized_shape, @Const @ByRef(nullValue = "c10::optional{}") TensorOptional weight, @Const @ByRef(nullValue = "c10::optional{}") TensorOptional bias, double eps/*=1e-05*/, @Cast("bool") boolean cudnn_enable/*=true*/); +@Namespace("at") public static native @ByVal Tensor layer_norm(@Const @ByRef Tensor input, @ByVal LongArrayRef normalized_shape); +@Namespace("at") public static native @ByVal Tensor layer_norm(@Const @ByRef Tensor input, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] normalized_shape, @Const @ByRef(nullValue = "c10::optional{}") TensorOptional weight, @Const @ByRef(nullValue = "c10::optional{}") TensorOptional bias, double eps/*=1e-05*/, @Cast("bool") boolean cudnn_enable/*=true*/); +@Namespace("at") public static native @ByVal Tensor layer_norm(@Const @ByRef Tensor input, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... normalized_shape); -// aten::index_select(Tensor self, int dim, Tensor index) -> Tensor -@Namespace("at") public static native @ByVal Tensor index_select(@Const @ByRef Tensor self, @Cast("int64_t") long dim, @Const @ByRef Tensor index); -// aten::index_select.dimname_out(Tensor self, Dimname dim, Tensor index, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor index_select_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal Dimname dim, @Const @ByRef Tensor index); -// aten::index_select.dimname_out(Tensor self, Dimname dim, Tensor index, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor index_select_outf(@Const @ByRef Tensor self, @ByVal Dimname dim, @Const @ByRef Tensor index, @ByRef Tensor out); +// aten::layer_norm(Tensor input, SymInt[] normalized_shape, Tensor? weight=None, Tensor? bias=None, float eps=1e-05, bool cudnn_enable=True) -> Tensor +@Namespace("at") public static native @ByVal Tensor layer_norm_symint(@Const @ByRef Tensor input, @ByVal SymIntArrayRef normalized_shape, @Const @ByRef(nullValue = "c10::optional{}") TensorOptional weight, @Const @ByRef(nullValue = "c10::optional{}") TensorOptional bias, double eps/*=1e-05*/, @Cast("bool") boolean cudnn_enable/*=true*/); +@Namespace("at") public static native @ByVal Tensor layer_norm_symint(@Const @ByRef Tensor input, @ByVal SymIntArrayRef normalized_shape); -// aten::index_select.dimname(Tensor self, Dimname dim, Tensor index) -> Tensor -@Namespace("at") public static native @ByVal Tensor index_select(@Const @ByRef Tensor self, @ByVal Dimname dim, @Const @ByRef Tensor index); -// Parsed from ATen/ops/index_select_backward.h +// Parsed from ATen/ops/lcm.h // #pragma once @@ -47444,22 +32940,24 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include - +// #include -// aten::index_select_backward(Tensor grad, SymInt[] self_sizes, int dim, Tensor index) -> Tensor -@Namespace("at") public static native @ByVal Tensor index_select_backward(@Const @ByRef Tensor grad, @ByVal @Cast("c10::ArrayRef*") LongArrayRef self_sizes, @Cast("int64_t") long dim, @Const @ByRef Tensor index); -@Namespace("at") public static native @ByVal Tensor index_select_backward(@Const @ByRef Tensor grad, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] self_sizes, @Cast("int64_t") long dim, @Const @ByRef Tensor index); +// aten::lcm.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor lcm_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor other); +// aten::lcm.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor lcm_outf(@Const @ByRef Tensor self, @Const @ByRef Tensor other, @ByRef Tensor out); -// aten::index_select_backward(Tensor grad, SymInt[] self_sizes, int dim, Tensor index) -> Tensor -@Namespace("at") public static native @ByVal Tensor index_select_backward_symint(@Const @ByRef Tensor grad, @ByVal SymIntRef self_sizes, @Cast("int64_t") long dim, @Const @ByRef Tensor index); +// aten::lcm(Tensor self, Tensor other) -> Tensor +@Namespace("at") public static native @ByVal Tensor lcm(@Const @ByRef Tensor self, @Const @ByRef Tensor other); +// aten::lcm_(Tensor(a!) self, Tensor other) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor lcm_(@ByRef Tensor self, @Const @ByRef Tensor other); -// Parsed from ATen/ops/indices.h +// Parsed from ATen/ops/ldexp.h // #pragma once @@ -47480,14 +32978,24 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include +// #include + +// aten::ldexp.Tensor(Tensor self, Tensor other) -> Tensor +@Namespace("at") public static native @ByVal Tensor ldexp(@Const @ByRef Tensor self, @Const @ByRef Tensor other); + +// aten::ldexp_(Tensor(a!) self, Tensor other) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor ldexp_(@ByRef Tensor self, @Const @ByRef Tensor other); +// aten::ldexp.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor ldexp_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor other); +// aten::ldexp.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor ldexp_outf(@Const @ByRef Tensor self, @Const @ByRef Tensor other, @ByRef Tensor out); -// Parsed from ATen/ops/indices_copy.h +// Parsed from ATen/ops/le.h // #pragma once @@ -47508,21 +33016,29 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include +// #include + +// aten::le.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor le_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Scalar other); +// aten::le.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor le_outf(@Const @ByRef Tensor self, @Const @ByRef Scalar other, @ByRef Tensor out); -// aten::indices_copy(Tensor self) -> Tensor -@Namespace("at") public static native @ByVal Tensor indices_copy(@Const @ByRef Tensor self); +// aten::le.Scalar(Tensor self, Scalar other) -> Tensor +@Namespace("at") public static native @ByVal Tensor le(@Const @ByRef Tensor self, @Const @ByRef Scalar other); -// aten::indices_copy.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor indices_copy_out(@ByRef Tensor out, @Const @ByRef Tensor self); -// aten::indices_copy.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor indices_copy_outf(@Const @ByRef Tensor self, @ByRef Tensor out); +// aten::le.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor le_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor other); +// aten::le.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor le_outf(@Const @ByRef Tensor self, @Const @ByRef Tensor other, @ByRef Tensor out); + +// aten::le.Tensor(Tensor self, Tensor other) -> Tensor +@Namespace("at") public static native @ByVal Tensor le(@Const @ByRef Tensor self, @Const @ByRef Tensor other); -// Parsed from ATen/ops/infinitely_differentiable_gelu_backward.h +// Parsed from ATen/ops/leaky_relu.h // #pragma once @@ -47543,16 +33059,27 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include +// #include -// aten::infinitely_differentiable_gelu_backward(Tensor grad, Tensor self) -> Tensor -@Namespace("at") public static native @ByVal Tensor infinitely_differentiable_gelu_backward(@Const @ByRef Tensor grad, @Const @ByRef Tensor self); +// aten::leaky_relu.out(Tensor self, Scalar negative_slope=0.01, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor leaky_relu_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef(nullValue = "at::Scalar(0.01)") Scalar negative_slope); +@Namespace("at") public static native @ByRef Tensor leaky_relu_out(@ByRef Tensor out, @Const @ByRef Tensor self); +// aten::leaky_relu.out(Tensor self, Scalar negative_slope=0.01, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor leaky_relu_outf(@Const @ByRef Tensor self, @Const @ByRef Scalar negative_slope, @ByRef Tensor out); +// aten::leaky_relu(Tensor self, Scalar negative_slope=0.01) -> Tensor +@Namespace("at") public static native @ByVal Tensor leaky_relu(@Const @ByRef Tensor self, @Const @ByRef(nullValue = "at::Scalar(0.01)") Scalar negative_slope); +@Namespace("at") public static native @ByVal Tensor leaky_relu(@Const @ByRef Tensor self); + +// aten::leaky_relu_(Tensor(a!) self, Scalar negative_slope=0.01) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor leaky_relu_(@ByRef Tensor self, @Const @ByRef(nullValue = "at::Scalar(0.01)") Scalar negative_slope); +@Namespace("at") public static native @ByRef Tensor leaky_relu_(@ByRef Tensor self); -// Parsed from ATen/ops/inner.h + +// Parsed from ATen/ops/leaky_relu_backward.h // #pragma once @@ -47573,21 +33100,21 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include +// #include -// aten::inner(Tensor self, Tensor other) -> Tensor -@Namespace("at") public static native @ByVal Tensor inner(@Const @ByRef Tensor self, @Const @ByRef Tensor other); +// aten::leaky_relu_backward.grad_input(Tensor grad_output, Tensor self, Scalar negative_slope, bool self_is_result, *, Tensor(a!) grad_input) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor leaky_relu_backward_out(@ByRef Tensor grad_input, @Const @ByRef Tensor grad_output, @Const @ByRef Tensor self, @Const @ByRef Scalar negative_slope, @Cast("bool") boolean self_is_result); +// aten::leaky_relu_backward.grad_input(Tensor grad_output, Tensor self, Scalar negative_slope, bool self_is_result, *, Tensor(a!) grad_input) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor leaky_relu_backward_outf(@Const @ByRef Tensor grad_output, @Const @ByRef Tensor self, @Const @ByRef Scalar negative_slope, @Cast("bool") boolean self_is_result, @ByRef Tensor grad_input); -// aten::inner.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor inner_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor other); -// aten::inner.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor inner_outf(@Const @ByRef Tensor self, @Const @ByRef Tensor other, @ByRef Tensor out); +// aten::leaky_relu_backward(Tensor grad_output, Tensor self, Scalar negative_slope, bool self_is_result) -> Tensor +@Namespace("at") public static native @ByVal Tensor leaky_relu_backward(@Const @ByRef Tensor grad_output, @Const @ByRef Tensor self, @Const @ByRef Scalar negative_slope, @Cast("bool") boolean self_is_result); -// Parsed from ATen/ops/instance_norm.h +// Parsed from ATen/ops/lerp.h // #pragma once @@ -47608,16 +33135,29 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include +// #include -// aten::instance_norm(Tensor input, Tensor? weight, Tensor? bias, Tensor? running_mean, Tensor? running_var, bool use_input_stats, float momentum, float eps, bool cudnn_enabled) -> Tensor -@Namespace("at") public static native @ByVal Tensor instance_norm(@Const @ByRef Tensor input, @Const @ByRef TensorOptional weight, @Const @ByRef TensorOptional bias, @Const @ByRef TensorOptional running_mean, @Const @ByRef TensorOptional running_var, @Cast("bool") boolean use_input_stats, double momentum, double eps, @Cast("bool") boolean cudnn_enabled); +// aten::lerp.Scalar_out(Tensor self, Tensor end, Scalar weight, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor lerp_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor end, @Const @ByRef Scalar weight); +// aten::lerp.Scalar_out(Tensor self, Tensor end, Scalar weight, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor lerp_outf(@Const @ByRef Tensor self, @Const @ByRef Tensor end, @Const @ByRef Scalar weight, @ByRef Tensor out); + +// aten::lerp.Tensor_out(Tensor self, Tensor end, Tensor weight, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor lerp_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor end, @Const @ByRef Tensor weight); +// aten::lerp.Tensor_out(Tensor self, Tensor end, Tensor weight, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor lerp_outf(@Const @ByRef Tensor self, @Const @ByRef Tensor end, @Const @ByRef Tensor weight, @ByRef Tensor out); + +// aten::lerp.Scalar(Tensor self, Tensor end, Scalar weight) -> Tensor +@Namespace("at") public static native @ByVal Tensor lerp(@Const @ByRef Tensor self, @Const @ByRef Tensor end, @Const @ByRef Scalar weight); + +// aten::lerp.Tensor(Tensor self, Tensor end, Tensor weight) -> Tensor +@Namespace("at") public static native @ByVal Tensor lerp(@Const @ByRef Tensor self, @Const @ByRef Tensor end, @Const @ByRef Tensor weight); -// Parsed from ATen/ops/int_repr.h +// Parsed from ATen/ops/less.h // #pragma once @@ -47638,21 +33178,29 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include +// #include -// aten::int_repr(Tensor self) -> Tensor -@Namespace("at") public static native @ByVal Tensor int_repr(@Const @ByRef Tensor self); +// aten::less.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor less_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Scalar other); +// aten::less.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor less_outf(@Const @ByRef Tensor self, @Const @ByRef Scalar other, @ByRef Tensor out); -// aten::int_repr.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor int_repr_out(@ByRef Tensor out, @Const @ByRef Tensor self); -// aten::int_repr.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor int_repr_outf(@Const @ByRef Tensor self, @ByRef Tensor out); +// aten::less.Scalar(Tensor self, Scalar other) -> Tensor +@Namespace("at") public static native @ByVal Tensor less(@Const @ByRef Tensor self, @Const @ByRef Scalar other); + +// aten::less.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor less_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor other); +// aten::less.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor less_outf(@Const @ByRef Tensor self, @Const @ByRef Tensor other, @ByRef Tensor out); + +// aten::less.Tensor(Tensor self, Tensor other) -> Tensor +@Namespace("at") public static native @ByVal Tensor less(@Const @ByRef Tensor self, @Const @ByRef Tensor other); -// Parsed from ATen/ops/inverse.h +// Parsed from ATen/ops/less_equal.h // #pragma once @@ -47673,21 +33221,29 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include +// #include -// aten::inverse(Tensor self) -> Tensor -@Namespace("at") public static native @ByVal Tensor inverse(@Const @ByRef Tensor self); +// aten::less_equal.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor less_equal_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Scalar other); +// aten::less_equal.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor less_equal_outf(@Const @ByRef Tensor self, @Const @ByRef Scalar other, @ByRef Tensor out); -// aten::inverse.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor inverse_out(@ByRef Tensor out, @Const @ByRef Tensor self); -// aten::inverse.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor inverse_outf(@Const @ByRef Tensor self, @ByRef Tensor out); +// aten::less_equal.Scalar(Tensor self, Scalar other) -> Tensor +@Namespace("at") public static native @ByVal Tensor less_equal(@Const @ByRef Tensor self, @Const @ByRef Scalar other); + +// aten::less_equal.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor less_equal_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor other); +// aten::less_equal.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor less_equal_outf(@Const @ByRef Tensor self, @Const @ByRef Tensor other, @ByRef Tensor out); + +// aten::less_equal.Tensor(Tensor self, Tensor other) -> Tensor +@Namespace("at") public static native @ByVal Tensor less_equal(@Const @ByRef Tensor self, @Const @ByRef Tensor other); -// Parsed from ATen/ops/is_coalesced.h +// Parsed from ATen/ops/lgamma.h // #pragma once @@ -47708,14 +33264,21 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include +// #include +// aten::lgamma.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor lgamma_out(@ByRef Tensor out, @Const @ByRef Tensor self); +// aten::lgamma.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor lgamma_outf(@Const @ByRef Tensor self, @ByRef Tensor out); +// aten::lgamma(Tensor self) -> Tensor +@Namespace("at") public static native @ByVal Tensor lgamma(@Const @ByRef Tensor self); -// Parsed from ATen/ops/is_complex.h + +// Parsed from ATen/ops/lift.h // #pragma once @@ -47736,16 +33299,21 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include +// #include -// aten::is_complex(Tensor self) -> bool -@Namespace("at") public static native @Cast("bool") boolean __dispatch_is_complex(@Const @ByRef Tensor self); +// aten::lift(Tensor self) -> Tensor +@Namespace("at") public static native @ByVal Tensor lift(@Const @ByRef Tensor self); +// aten::lift.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor lift_out(@ByRef Tensor out, @Const @ByRef Tensor self); +// aten::lift.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor lift_outf(@Const @ByRef Tensor self, @ByRef Tensor out); -// Parsed from ATen/ops/is_conj.h + +// Parsed from ATen/ops/lift_fresh.h // #pragma once @@ -47766,16 +33334,16 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include +// #include -// aten::is_conj(Tensor self) -> bool -@Namespace("at") public static native @Cast("bool") boolean __dispatch_is_conj(@Const @ByRef Tensor self); +// aten::lift_fresh(Tensor(a) self) -> Tensor(a) +@Namespace("at") public static native @ByVal Tensor lift_fresh(@Const @ByRef Tensor self); -// Parsed from ATen/ops/is_distributed.h +// Parsed from ATen/ops/lift_fresh_copy.h // #pragma once @@ -47796,16 +33364,21 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include +// #include -// aten::is_distributed(Tensor self) -> bool -@Namespace("at") public static native @Cast("bool") boolean is_distributed(@Const @ByRef Tensor self); +// aten::lift_fresh_copy(Tensor self) -> Tensor +@Namespace("at") public static native @ByVal Tensor lift_fresh_copy(@Const @ByRef Tensor self); + +// aten::lift_fresh_copy.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor lift_fresh_copy_out(@ByRef Tensor out, @Const @ByRef Tensor self); +// aten::lift_fresh_copy.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor lift_fresh_copy_outf(@Const @ByRef Tensor self, @ByRef Tensor out); -// Parsed from ATen/ops/is_floating_point.h +// Parsed from ATen/ops/linalg_cholesky.h // #pragma once @@ -47826,16 +33399,23 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include +// #include -// aten::is_floating_point(Tensor self) -> bool -@Namespace("at") public static native @Cast("bool") boolean __dispatch_is_floating_point(@Const @ByRef Tensor self); +// aten::linalg_cholesky(Tensor self, *, bool upper=False) -> Tensor +@Namespace("at") public static native @ByVal Tensor linalg_cholesky(@Const @ByRef Tensor self, @Cast("bool") boolean upper/*=false*/); +@Namespace("at") public static native @ByVal Tensor linalg_cholesky(@Const @ByRef Tensor self); + +// aten::linalg_cholesky.out(Tensor self, *, bool upper=False, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor linalg_cholesky_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Cast("bool") boolean upper/*=false*/); +@Namespace("at") public static native @ByRef Tensor linalg_cholesky_out(@ByRef Tensor out, @Const @ByRef Tensor self); +// aten::linalg_cholesky.out(Tensor self, *, bool upper=False, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor linalg_cholesky_outf(@Const @ByRef Tensor self, @Cast("bool") boolean upper, @ByRef Tensor out); -// Parsed from ATen/ops/is_inference.h +// Parsed from ATen/ops/linalg_cholesky_ex.h // #pragma once @@ -47856,16 +33436,23 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include +// #include -// aten::is_inference(Tensor self) -> bool -@Namespace("at") public static native @Cast("bool") boolean __dispatch_is_inference(@Const @ByRef Tensor self); +// aten::linalg_cholesky_ex(Tensor self, *, bool upper=False, bool check_errors=False) -> (Tensor L, Tensor info) +@Namespace("at") public static native @ByVal T_TensorTensor_T linalg_cholesky_ex(@Const @ByRef Tensor self, @Cast("bool") boolean upper/*=false*/, @Cast("bool") boolean check_errors/*=false*/); +@Namespace("at") public static native @ByVal T_TensorTensor_T linalg_cholesky_ex(@Const @ByRef Tensor self); +// aten::linalg_cholesky_ex.L(Tensor self, *, bool upper=False, bool check_errors=False, Tensor(a!) L, Tensor(b!) info) -> (Tensor(a!) L, Tensor(b!) info) +@Namespace("at") public static native @ByVal T_TensorTensor_T linalg_cholesky_ex_out(@ByRef Tensor L, @ByRef Tensor info, @Const @ByRef Tensor self, @Cast("bool") boolean upper/*=false*/, @Cast("bool") boolean check_errors/*=false*/); +@Namespace("at") public static native @ByVal T_TensorTensor_T linalg_cholesky_ex_out(@ByRef Tensor L, @ByRef Tensor info, @Const @ByRef Tensor self); +// aten::linalg_cholesky_ex.L(Tensor self, *, bool upper=False, bool check_errors=False, Tensor(a!) L, Tensor(b!) info) -> (Tensor(a!) L, Tensor(b!) info) +@Namespace("at") public static native @ByVal T_TensorTensor_T linalg_cholesky_ex_outf(@Const @ByRef Tensor self, @Cast("bool") boolean upper, @Cast("bool") boolean check_errors, @ByRef Tensor L, @ByRef Tensor info); -// Parsed from ATen/ops/is_leaf.h + +// Parsed from ATen/ops/linalg_cond.h // #pragma once @@ -47886,14 +33473,31 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include +// #include + + +// aten::linalg_cond(Tensor self, Scalar? p=None) -> Tensor +@Namespace("at") public static native @ByVal Tensor linalg_cond(@Const @ByRef Tensor self, @Const @ByRef(nullValue = "c10::optional(c10::nullopt)") ScalarOptional p); +@Namespace("at") public static native @ByVal Tensor linalg_cond(@Const @ByRef Tensor self); + +// aten::linalg_cond.out(Tensor self, Scalar? p=None, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor linalg_cond_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef(nullValue = "c10::optional(c10::nullopt)") ScalarOptional p); +@Namespace("at") public static native @ByRef Tensor linalg_cond_out(@ByRef Tensor out, @Const @ByRef Tensor self); +// aten::linalg_cond.out(Tensor self, Scalar? p=None, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor linalg_cond_outf(@Const @ByRef Tensor self, @Const @ByRef ScalarOptional p, @ByRef Tensor out); +// aten::linalg_cond.p_str(Tensor self, str p) -> Tensor +@Namespace("at") public static native @ByVal Tensor linalg_cond(@Const @ByRef Tensor self, @ByVal @Cast("c10::string_view*") Pointer p); +// aten::linalg_cond.p_str_out(Tensor self, str p, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor linalg_cond_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal @Cast("c10::string_view*") Pointer p); +// aten::linalg_cond.p_str_out(Tensor self, str p, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor linalg_cond_outf(@Const @ByRef Tensor self, @ByVal @Cast("c10::string_view*") Pointer p, @ByRef Tensor out); -// Parsed from ATen/ops/is_neg.h +// Parsed from ATen/ops/linalg_cross.h // #pragma once @@ -47914,16 +33518,23 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include +// #include -// aten::is_neg(Tensor self) -> bool -@Namespace("at") public static native @Cast("bool") boolean __dispatch_is_neg(@Const @ByRef Tensor self); +// aten::linalg_cross(Tensor self, Tensor other, *, int dim=-1) -> Tensor +@Namespace("at") public static native @ByVal Tensor linalg_cross(@Const @ByRef Tensor self, @Const @ByRef Tensor other, @Cast("int64_t") long dim/*=-1*/); +@Namespace("at") public static native @ByVal Tensor linalg_cross(@Const @ByRef Tensor self, @Const @ByRef Tensor other); + +// aten::linalg_cross.out(Tensor self, Tensor other, *, int dim=-1, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor linalg_cross_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor other, @Cast("int64_t") long dim/*=-1*/); +@Namespace("at") public static native @ByRef Tensor linalg_cross_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor other); +// aten::linalg_cross.out(Tensor self, Tensor other, *, int dim=-1, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor linalg_cross_outf(@Const @ByRef Tensor self, @Const @ByRef Tensor other, @Cast("int64_t") long dim, @ByRef Tensor out); -// Parsed from ATen/ops/is_nonzero.h +// Parsed from ATen/ops/linalg_det.h // #pragma once @@ -47944,16 +33555,21 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include +// #include -// aten::is_nonzero(Tensor self) -> bool -@Namespace("at") public static native @Cast("bool") boolean is_nonzero(@Const @ByRef Tensor self); +// aten::linalg_det(Tensor A) -> Tensor +@Namespace("at") public static native @ByVal Tensor linalg_det(@Const @ByRef Tensor A); + +// aten::linalg_det.out(Tensor A, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor linalg_det_out(@ByRef Tensor out, @Const @ByRef Tensor A); +// aten::linalg_det.out(Tensor A, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor linalg_det_outf(@Const @ByRef Tensor A, @ByRef Tensor out); -// Parsed from ATen/ops/is_pinned.h +// Parsed from ATen/ops/linalg_diagonal.h // #pragma once @@ -47974,14 +33590,17 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include +// #include +// aten::linalg_diagonal(Tensor(a) A, *, int offset=0, int dim1=-2, int dim2=-1) -> Tensor(a) +@Namespace("at") public static native @ByVal Tensor linalg_diagonal(@Const @ByRef Tensor A, @Cast("int64_t") long offset/*=0*/, @Cast("int64_t") long dim1/*=-2*/, @Cast("int64_t") long dim2/*=-1*/); +@Namespace("at") public static native @ByVal Tensor linalg_diagonal(@Const @ByRef Tensor A); -// Parsed from ATen/ops/is_same_size.h +// Parsed from ATen/ops/linalg_eig.h // #pragma once @@ -48002,16 +33621,21 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include +// #include -// aten::is_same_size(Tensor self, Tensor other) -> bool -@Namespace("at") public static native @Cast("bool") boolean is_same_size(@Const @ByRef Tensor self, @Const @ByRef Tensor other); +// aten::linalg_eig(Tensor self) -> (Tensor eigenvalues, Tensor eigenvectors) +@Namespace("at") public static native @ByVal T_TensorTensor_T linalg_eig(@Const @ByRef Tensor self); + +// aten::linalg_eig.out(Tensor self, *, Tensor(a!) eigenvalues, Tensor(b!) eigenvectors) -> (Tensor(a!) eigenvalues, Tensor(b!) eigenvectors) +@Namespace("at") public static native @ByVal T_TensorTensor_T linalg_eig_out(@ByRef Tensor eigenvalues, @ByRef Tensor eigenvectors, @Const @ByRef Tensor self); +// aten::linalg_eig.out(Tensor self, *, Tensor(a!) eigenvalues, Tensor(b!) eigenvectors) -> (Tensor(a!) eigenvalues, Tensor(b!) eigenvectors) +@Namespace("at") public static native @ByVal T_TensorTensor_T linalg_eig_outf(@Const @ByRef Tensor self, @ByRef Tensor eigenvalues, @ByRef Tensor eigenvectors); -// Parsed from ATen/ops/is_set_to.h +// Parsed from ATen/ops/linalg_eigh.h // #pragma once @@ -48032,14 +33656,23 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include +// #include + +// aten::linalg_eigh(Tensor self, str UPLO="L") -> (Tensor eigenvalues, Tensor eigenvectors) +@Namespace("at") public static native @ByVal T_TensorTensor_T linalg_eigh(@Const @ByRef Tensor self, @ByVal(nullValue = "c10::string_view(\"L\")") @Cast("c10::string_view*") Pointer UPLO); +@Namespace("at") public static native @ByVal T_TensorTensor_T linalg_eigh(@Const @ByRef Tensor self); +// aten::linalg_eigh.eigvals(Tensor self, str UPLO="L", *, Tensor(a!) eigvals, Tensor(b!) eigvecs) -> (Tensor(a!) eigenvalues, Tensor(b!) eigenvectors) +@Namespace("at") public static native @ByVal T_TensorTensor_T linalg_eigh_out(@ByRef Tensor eigvals, @ByRef Tensor eigvecs, @Const @ByRef Tensor self, @ByVal(nullValue = "c10::string_view(\"L\")") @Cast("c10::string_view*") Pointer UPLO); +@Namespace("at") public static native @ByVal T_TensorTensor_T linalg_eigh_out(@ByRef Tensor eigvals, @ByRef Tensor eigvecs, @Const @ByRef Tensor self); +// aten::linalg_eigh.eigvals(Tensor self, str UPLO="L", *, Tensor(a!) eigvals, Tensor(b!) eigvecs) -> (Tensor(a!) eigenvalues, Tensor(b!) eigenvectors) +@Namespace("at") public static native @ByVal T_TensorTensor_T linalg_eigh_outf(@Const @ByRef Tensor self, @ByVal @Cast("c10::string_view*") Pointer UPLO, @ByRef Tensor eigvals, @ByRef Tensor eigvecs); -// Parsed from ATen/ops/is_signed.h +// Parsed from ATen/ops/linalg_eigvals.h // #pragma once @@ -48060,16 +33693,21 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include +// #include -// aten::is_signed(Tensor self) -> bool -@Namespace("at") public static native @Cast("bool") boolean __dispatch_is_signed(@Const @ByRef Tensor self); +// aten::linalg_eigvals(Tensor self) -> Tensor +@Namespace("at") public static native @ByVal Tensor linalg_eigvals(@Const @ByRef Tensor self); + +// aten::linalg_eigvals.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor linalg_eigvals_out(@ByRef Tensor out, @Const @ByRef Tensor self); +// aten::linalg_eigvals.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor linalg_eigvals_outf(@Const @ByRef Tensor self, @ByRef Tensor out); -// Parsed from ATen/ops/isclose.h +// Parsed from ATen/ops/linalg_eigvalsh.h // #pragma once @@ -48090,17 +33728,23 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include +// #include -// aten::isclose(Tensor self, Tensor other, float rtol=1e-05, float atol=1e-08, bool equal_nan=False) -> Tensor -@Namespace("at") public static native @ByVal Tensor isclose(@Const @ByRef Tensor self, @Const @ByRef Tensor other, double rtol/*=1e-05*/, double atol/*=1e-08*/, @Cast("bool") boolean equal_nan/*=false*/); -@Namespace("at") public static native @ByVal Tensor isclose(@Const @ByRef Tensor self, @Const @ByRef Tensor other); +// aten::linalg_eigvalsh(Tensor self, str UPLO="L") -> Tensor +@Namespace("at") public static native @ByVal Tensor linalg_eigvalsh(@Const @ByRef Tensor self, @ByVal(nullValue = "c10::string_view(\"L\")") @Cast("c10::string_view*") Pointer UPLO); +@Namespace("at") public static native @ByVal Tensor linalg_eigvalsh(@Const @ByRef Tensor self); + +// aten::linalg_eigvalsh.out(Tensor self, str UPLO="L", *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor linalg_eigvalsh_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal(nullValue = "c10::string_view(\"L\")") @Cast("c10::string_view*") Pointer UPLO); +@Namespace("at") public static native @ByRef Tensor linalg_eigvalsh_out(@ByRef Tensor out, @Const @ByRef Tensor self); +// aten::linalg_eigvalsh.out(Tensor self, str UPLO="L", *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor linalg_eigvalsh_outf(@Const @ByRef Tensor self, @ByVal @Cast("c10::string_view*") Pointer UPLO, @ByRef Tensor out); -// Parsed from ATen/ops/isfinite.h +// Parsed from ATen/ops/linalg_householder_product.h // #pragma once @@ -48121,16 +33765,21 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include +// #include -// aten::isfinite(Tensor self) -> Tensor -@Namespace("at") public static native @ByVal Tensor isfinite(@Const @ByRef Tensor self); +// aten::linalg_householder_product(Tensor input, Tensor tau) -> Tensor +@Namespace("at") public static native @ByVal Tensor linalg_householder_product(@Const @ByRef Tensor input, @Const @ByRef Tensor tau); + +// aten::linalg_householder_product.out(Tensor input, Tensor tau, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor linalg_householder_product_out(@ByRef Tensor out, @Const @ByRef Tensor input, @Const @ByRef Tensor tau); +// aten::linalg_householder_product.out(Tensor input, Tensor tau, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor linalg_householder_product_outf(@Const @ByRef Tensor input, @Const @ByRef Tensor tau, @ByRef Tensor out); -// Parsed from ATen/ops/isin.h +// Parsed from ATen/ops/linalg_inv.h // #pragma once @@ -48151,43 +33800,21 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include - - -// aten::isin.Tensor_Tensor_out(Tensor elements, Tensor test_elements, *, bool assume_unique=False, bool invert=False, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor isin_out(@ByRef Tensor out, @Const @ByRef Tensor elements, @Const @ByRef Tensor test_elements, @Cast("bool") boolean assume_unique/*=false*/, @Cast("bool") boolean invert/*=false*/); -@Namespace("at") public static native @ByRef Tensor isin_out(@ByRef Tensor out, @Const @ByRef Tensor elements, @Const @ByRef Tensor test_elements); -// aten::isin.Tensor_Tensor_out(Tensor elements, Tensor test_elements, *, bool assume_unique=False, bool invert=False, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor isin_outf(@Const @ByRef Tensor elements, @Const @ByRef Tensor test_elements, @Cast("bool") boolean assume_unique, @Cast("bool") boolean invert, @ByRef Tensor out); - -// aten::isin.Tensor_Tensor(Tensor elements, Tensor test_elements, *, bool assume_unique=False, bool invert=False) -> Tensor -@Namespace("at") public static native @ByVal Tensor isin(@Const @ByRef Tensor elements, @Const @ByRef Tensor test_elements, @Cast("bool") boolean assume_unique/*=false*/, @Cast("bool") boolean invert/*=false*/); -@Namespace("at") public static native @ByVal Tensor isin(@Const @ByRef Tensor elements, @Const @ByRef Tensor test_elements); - -// aten::isin.Tensor_Scalar_out(Tensor elements, Scalar test_element, *, bool assume_unique=False, bool invert=False, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor isin_out(@ByRef Tensor out, @Const @ByRef Tensor elements, @Const @ByRef Scalar test_element, @Cast("bool") boolean assume_unique/*=false*/, @Cast("bool") boolean invert/*=false*/); -@Namespace("at") public static native @ByRef Tensor isin_out(@ByRef Tensor out, @Const @ByRef Tensor elements, @Const @ByRef Scalar test_element); -// aten::isin.Tensor_Scalar_out(Tensor elements, Scalar test_element, *, bool assume_unique=False, bool invert=False, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor isin_outf(@Const @ByRef Tensor elements, @Const @ByRef Scalar test_element, @Cast("bool") boolean assume_unique, @Cast("bool") boolean invert, @ByRef Tensor out); +// #include -// aten::isin.Tensor_Scalar(Tensor elements, Scalar test_element, *, bool assume_unique=False, bool invert=False) -> Tensor -@Namespace("at") public static native @ByVal Tensor isin(@Const @ByRef Tensor elements, @Const @ByRef Scalar test_element, @Cast("bool") boolean assume_unique/*=false*/, @Cast("bool") boolean invert/*=false*/); -@Namespace("at") public static native @ByVal Tensor isin(@Const @ByRef Tensor elements, @Const @ByRef Scalar test_element); -// aten::isin.Scalar_Tensor_out(Scalar element, Tensor test_elements, *, bool assume_unique=False, bool invert=False, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor isin_out(@ByRef Tensor out, @Const @ByRef Scalar element, @Const @ByRef Tensor test_elements, @Cast("bool") boolean assume_unique/*=false*/, @Cast("bool") boolean invert/*=false*/); -@Namespace("at") public static native @ByRef Tensor isin_out(@ByRef Tensor out, @Const @ByRef Scalar element, @Const @ByRef Tensor test_elements); -// aten::isin.Scalar_Tensor_out(Scalar element, Tensor test_elements, *, bool assume_unique=False, bool invert=False, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor isin_outf(@Const @ByRef Scalar element, @Const @ByRef Tensor test_elements, @Cast("bool") boolean assume_unique, @Cast("bool") boolean invert, @ByRef Tensor out); +// aten::linalg_inv(Tensor A) -> Tensor +@Namespace("at") public static native @ByVal Tensor linalg_inv(@Const @ByRef Tensor A); -// aten::isin.Scalar_Tensor(Scalar element, Tensor test_elements, *, bool assume_unique=False, bool invert=False) -> Tensor -@Namespace("at") public static native @ByVal Tensor isin(@Const @ByRef Scalar element, @Const @ByRef Tensor test_elements, @Cast("bool") boolean assume_unique/*=false*/, @Cast("bool") boolean invert/*=false*/); -@Namespace("at") public static native @ByVal Tensor isin(@Const @ByRef Scalar element, @Const @ByRef Tensor test_elements); +// aten::linalg_inv.out(Tensor A, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor linalg_inv_out(@ByRef Tensor out, @Const @ByRef Tensor A); +// aten::linalg_inv.out(Tensor A, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor linalg_inv_outf(@Const @ByRef Tensor A, @ByRef Tensor out); -// Parsed from ATen/ops/isinf.h +// Parsed from ATen/ops/linalg_inv_ex.h // #pragma once @@ -48208,21 +33835,23 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include +// #include -// aten::isinf(Tensor self) -> Tensor -@Namespace("at") public static native @ByVal Tensor isinf(@Const @ByRef Tensor self); +// aten::linalg_inv_ex(Tensor A, *, bool check_errors=False) -> (Tensor inverse, Tensor info) +@Namespace("at") public static native @ByVal T_TensorTensor_T linalg_inv_ex(@Const @ByRef Tensor A, @Cast("bool") boolean check_errors/*=false*/); +@Namespace("at") public static native @ByVal T_TensorTensor_T linalg_inv_ex(@Const @ByRef Tensor A); -// aten::isinf.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor isinf_out(@ByRef Tensor out, @Const @ByRef Tensor self); -// aten::isinf.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor isinf_outf(@Const @ByRef Tensor self, @ByRef Tensor out); +// aten::linalg_inv_ex.inverse(Tensor A, *, bool check_errors=False, Tensor(a!) inverse, Tensor(b!) info) -> (Tensor(a!) inverse, Tensor(b!) info) +@Namespace("at") public static native @ByVal T_TensorTensor_T linalg_inv_ex_out(@ByRef Tensor inverse, @ByRef Tensor info, @Const @ByRef Tensor A, @Cast("bool") boolean check_errors/*=false*/); +@Namespace("at") public static native @ByVal T_TensorTensor_T linalg_inv_ex_out(@ByRef Tensor inverse, @ByRef Tensor info, @Const @ByRef Tensor A); +// aten::linalg_inv_ex.inverse(Tensor A, *, bool check_errors=False, Tensor(a!) inverse, Tensor(b!) info) -> (Tensor(a!) inverse, Tensor(b!) info) +@Namespace("at") public static native @ByVal T_TensorTensor_T linalg_inv_ex_outf(@Const @ByRef Tensor A, @Cast("bool") boolean check_errors, @ByRef Tensor inverse, @ByRef Tensor info); -// Parsed from ATen/ops/isnan.h +// Parsed from ATen/ops/linalg_ldl_factor.h // #pragma once @@ -48243,21 +33872,23 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include +// #include -// aten::isnan(Tensor self) -> Tensor -@Namespace("at") public static native @ByVal Tensor isnan(@Const @ByRef Tensor self); +// aten::linalg_ldl_factor(Tensor self, *, bool hermitian=False) -> (Tensor LD, Tensor pivots) +@Namespace("at") public static native @ByVal T_TensorTensor_T linalg_ldl_factor(@Const @ByRef Tensor self, @Cast("bool") boolean hermitian/*=false*/); +@Namespace("at") public static native @ByVal T_TensorTensor_T linalg_ldl_factor(@Const @ByRef Tensor self); -// aten::isnan.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor isnan_out(@ByRef Tensor out, @Const @ByRef Tensor self); -// aten::isnan.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor isnan_outf(@Const @ByRef Tensor self, @ByRef Tensor out); +// aten::linalg_ldl_factor.out(Tensor self, *, bool hermitian=False, Tensor(a!) LD, Tensor(b!) pivots) -> (Tensor(a!) LD, Tensor(b!) pivots) +@Namespace("at") public static native @ByVal T_TensorTensor_T linalg_ldl_factor_out(@ByRef Tensor LD, @ByRef Tensor pivots, @Const @ByRef Tensor self, @Cast("bool") boolean hermitian/*=false*/); +@Namespace("at") public static native @ByVal T_TensorTensor_T linalg_ldl_factor_out(@ByRef Tensor LD, @ByRef Tensor pivots, @Const @ByRef Tensor self); +// aten::linalg_ldl_factor.out(Tensor self, *, bool hermitian=False, Tensor(a!) LD, Tensor(b!) pivots) -> (Tensor(a!) LD, Tensor(b!) pivots) +@Namespace("at") public static native @ByVal T_TensorTensor_T linalg_ldl_factor_outf(@Const @ByRef Tensor self, @Cast("bool") boolean hermitian, @ByRef Tensor LD, @ByRef Tensor pivots); -// Parsed from ATen/ops/isneginf.h +// Parsed from ATen/ops/linalg_ldl_factor_ex.h // #pragma once @@ -48278,21 +33909,23 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include +// #include -// aten::isneginf(Tensor self) -> Tensor -@Namespace("at") public static native @ByVal Tensor isneginf(@Const @ByRef Tensor self); +// aten::linalg_ldl_factor_ex(Tensor self, *, bool hermitian=False, bool check_errors=False) -> (Tensor LD, Tensor pivots, Tensor info) +@Namespace("at") public static native @ByVal T_TensorTensorTensor_T linalg_ldl_factor_ex(@Const @ByRef Tensor self, @Cast("bool") boolean hermitian/*=false*/, @Cast("bool") boolean check_errors/*=false*/); +@Namespace("at") public static native @ByVal T_TensorTensorTensor_T linalg_ldl_factor_ex(@Const @ByRef Tensor self); -// aten::isneginf.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor isneginf_out(@ByRef Tensor out, @Const @ByRef Tensor self); -// aten::isneginf.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor isneginf_outf(@Const @ByRef Tensor self, @ByRef Tensor out); +// aten::linalg_ldl_factor_ex.out(Tensor self, *, bool hermitian=False, bool check_errors=False, Tensor(a!) LD, Tensor(b!) pivots, Tensor(c!) info) -> (Tensor(a!) LD, Tensor(b!) pivots, Tensor(c!) info) +@Namespace("at") public static native @ByVal T_TensorTensorTensor_T linalg_ldl_factor_ex_out(@ByRef Tensor LD, @ByRef Tensor pivots, @ByRef Tensor info, @Const @ByRef Tensor self, @Cast("bool") boolean hermitian/*=false*/, @Cast("bool") boolean check_errors/*=false*/); +@Namespace("at") public static native @ByVal T_TensorTensorTensor_T linalg_ldl_factor_ex_out(@ByRef Tensor LD, @ByRef Tensor pivots, @ByRef Tensor info, @Const @ByRef Tensor self); +// aten::linalg_ldl_factor_ex.out(Tensor self, *, bool hermitian=False, bool check_errors=False, Tensor(a!) LD, Tensor(b!) pivots, Tensor(c!) info) -> (Tensor(a!) LD, Tensor(b!) pivots, Tensor(c!) info) +@Namespace("at") public static native @ByVal T_TensorTensorTensor_T linalg_ldl_factor_ex_outf(@Const @ByRef Tensor self, @Cast("bool") boolean hermitian, @Cast("bool") boolean check_errors, @ByRef Tensor LD, @ByRef Tensor pivots, @ByRef Tensor info); -// Parsed from ATen/ops/isposinf.h +// Parsed from ATen/ops/linalg_ldl_solve.h // #pragma once @@ -48313,21 +33946,23 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include +// #include -// aten::isposinf(Tensor self) -> Tensor -@Namespace("at") public static native @ByVal Tensor isposinf(@Const @ByRef Tensor self); +// aten::linalg_ldl_solve(Tensor LD, Tensor pivots, Tensor B, *, bool hermitian=False) -> Tensor +@Namespace("at") public static native @ByVal Tensor linalg_ldl_solve(@Const @ByRef Tensor LD, @Const @ByRef Tensor pivots, @Const @ByRef Tensor B, @Cast("bool") boolean hermitian/*=false*/); +@Namespace("at") public static native @ByVal Tensor linalg_ldl_solve(@Const @ByRef Tensor LD, @Const @ByRef Tensor pivots, @Const @ByRef Tensor B); -// aten::isposinf.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor isposinf_out(@ByRef Tensor out, @Const @ByRef Tensor self); -// aten::isposinf.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor isposinf_outf(@Const @ByRef Tensor self, @ByRef Tensor out); +// aten::linalg_ldl_solve.out(Tensor LD, Tensor pivots, Tensor B, *, bool hermitian=False, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor linalg_ldl_solve_out(@ByRef Tensor out, @Const @ByRef Tensor LD, @Const @ByRef Tensor pivots, @Const @ByRef Tensor B, @Cast("bool") boolean hermitian/*=false*/); +@Namespace("at") public static native @ByRef Tensor linalg_ldl_solve_out(@ByRef Tensor out, @Const @ByRef Tensor LD, @Const @ByRef Tensor pivots, @Const @ByRef Tensor B); +// aten::linalg_ldl_solve.out(Tensor LD, Tensor pivots, Tensor B, *, bool hermitian=False, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor linalg_ldl_solve_outf(@Const @ByRef Tensor LD, @Const @ByRef Tensor pivots, @Const @ByRef Tensor B, @Cast("bool") boolean hermitian, @ByRef Tensor out); -// Parsed from ATen/ops/isreal.h +// Parsed from ATen/ops/linalg_lstsq.h // #pragma once @@ -48348,16 +33983,23 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include +// #include -// aten::isreal(Tensor self) -> Tensor -@Namespace("at") public static native @ByVal Tensor isreal(@Const @ByRef Tensor self); +// aten::linalg_lstsq(Tensor self, Tensor b, float? rcond=None, *, str? driver=None) -> (Tensor solution, Tensor residuals, Tensor rank, Tensor singular_values) +@Namespace("at") public static native @ByVal T_TensorTensorTensorTensor_T linalg_lstsq(@Const @ByRef Tensor self, @Const @ByRef Tensor b, @ByVal(nullValue = "c10::optional(c10::nullopt)") DoubleOptional rcond, @ByVal(nullValue = "c10::optional(c10::nullopt)") @Cast("c10::optional*") Pointer driver); +@Namespace("at") public static native @ByVal T_TensorTensorTensorTensor_T linalg_lstsq(@Const @ByRef Tensor self, @Const @ByRef Tensor b); +// aten::linalg_lstsq.out(Tensor self, Tensor b, float? rcond=None, *, str? driver=None, Tensor(a!) solution, Tensor(b!) residuals, Tensor(c!) rank, Tensor(d!) singular_values) -> (Tensor(a!) solution, Tensor(b!) residuals, Tensor(c!) rank, Tensor(d!) singular_values) +@Namespace("at") public static native @ByVal T_TensorTensorTensorTensor_T linalg_lstsq_out(@ByRef Tensor solution, @ByRef Tensor residuals, @ByRef Tensor rank, @ByRef Tensor singular_values, @Const @ByRef Tensor self, @Const @ByRef Tensor b, @ByVal(nullValue = "c10::optional(c10::nullopt)") DoubleOptional rcond, @ByVal(nullValue = "c10::optional(c10::nullopt)") @Cast("c10::optional*") Pointer driver); +@Namespace("at") public static native @ByVal T_TensorTensorTensorTensor_T linalg_lstsq_out(@ByRef Tensor solution, @ByRef Tensor residuals, @ByRef Tensor rank, @ByRef Tensor singular_values, @Const @ByRef Tensor self, @Const @ByRef Tensor b); +// aten::linalg_lstsq.out(Tensor self, Tensor b, float? rcond=None, *, str? driver=None, Tensor(a!) solution, Tensor(b!) residuals, Tensor(c!) rank, Tensor(d!) singular_values) -> (Tensor(a!) solution, Tensor(b!) residuals, Tensor(c!) rank, Tensor(d!) singular_values) +@Namespace("at") public static native @ByVal T_TensorTensorTensorTensor_T linalg_lstsq_outf(@Const @ByRef Tensor self, @Const @ByRef Tensor b, @ByVal DoubleOptional rcond, @ByVal @Cast("c10::optional*") Pointer driver, @ByRef Tensor solution, @ByRef Tensor residuals, @ByRef Tensor rank, @ByRef Tensor singular_values); -// Parsed from ATen/ops/istft.h + +// Parsed from ATen/ops/linalg_lu.h // #pragma once @@ -48378,17 +34020,23 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include +// #include -// aten::istft(Tensor self, int n_fft, int? hop_length=None, int? win_length=None, Tensor? window=None, bool center=True, bool normalized=False, bool? onesided=None, int? length=None, bool return_complex=False) -> Tensor -@Namespace("at") public static native @ByVal Tensor istft(@Const @ByRef Tensor self, @Cast("int64_t") long n_fft, @ByVal(nullValue = "c10::optional(c10::nullopt)") LongOptional hop_length, @ByVal(nullValue = "c10::optional(c10::nullopt)") LongOptional win_length, @Const @ByRef(nullValue = "c10::optional{}") TensorOptional window, @Cast("bool") boolean center/*=true*/, @Cast("bool") boolean normalized/*=false*/, @ByVal(nullValue = "c10::optional(c10::nullopt)") BoolOptional onesided, @ByVal(nullValue = "c10::optional(c10::nullopt)") LongOptional length, @Cast("bool") boolean return_complex/*=false*/); -@Namespace("at") public static native @ByVal Tensor istft(@Const @ByRef Tensor self, @Cast("int64_t") long n_fft); +// aten::linalg_lu(Tensor A, *, bool pivot=True) -> (Tensor P, Tensor L, Tensor U) +@Namespace("at") public static native @ByVal T_TensorTensorTensor_T linalg_lu(@Const @ByRef Tensor A, @Cast("bool") boolean pivot/*=true*/); +@Namespace("at") public static native @ByVal T_TensorTensorTensor_T linalg_lu(@Const @ByRef Tensor A); +// aten::linalg_lu.out(Tensor A, *, bool pivot=True, Tensor(a!) P, Tensor(b!) L, Tensor(c!) U) -> (Tensor(a!) P, Tensor(b!) L, Tensor(c!) U) +@Namespace("at") public static native @ByVal T_TensorTensorTensor_T linalg_lu_out(@ByRef Tensor P, @ByRef Tensor L, @ByRef Tensor U, @Const @ByRef Tensor A, @Cast("bool") boolean pivot/*=true*/); +@Namespace("at") public static native @ByVal T_TensorTensorTensor_T linalg_lu_out(@ByRef Tensor P, @ByRef Tensor L, @ByRef Tensor U, @Const @ByRef Tensor A); +// aten::linalg_lu.out(Tensor A, *, bool pivot=True, Tensor(a!) P, Tensor(b!) L, Tensor(c!) U) -> (Tensor(a!) P, Tensor(b!) L, Tensor(c!) U) +@Namespace("at") public static native @ByVal T_TensorTensorTensor_T linalg_lu_outf(@Const @ByRef Tensor A, @Cast("bool") boolean pivot, @ByRef Tensor P, @ByRef Tensor L, @ByRef Tensor U); -// Parsed from ATen/ops/item.h + +// Parsed from ATen/ops/linalg_lu_factor.h // #pragma once @@ -48409,14 +34057,23 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include +// #include + +// aten::linalg_lu_factor(Tensor A, *, bool pivot=True) -> (Tensor LU, Tensor pivots) +@Namespace("at") public static native @ByVal T_TensorTensor_T linalg_lu_factor(@Const @ByRef Tensor A, @Cast("bool") boolean pivot/*=true*/); +@Namespace("at") public static native @ByVal T_TensorTensor_T linalg_lu_factor(@Const @ByRef Tensor A); +// aten::linalg_lu_factor.out(Tensor A, *, bool pivot=True, Tensor(a!) LU, Tensor(b!) pivots) -> (Tensor(a!) LU, Tensor(b!) pivots) +@Namespace("at") public static native @ByVal T_TensorTensor_T linalg_lu_factor_out(@ByRef Tensor LU, @ByRef Tensor pivots, @Const @ByRef Tensor A, @Cast("bool") boolean pivot/*=true*/); +@Namespace("at") public static native @ByVal T_TensorTensor_T linalg_lu_factor_out(@ByRef Tensor LU, @ByRef Tensor pivots, @Const @ByRef Tensor A); +// aten::linalg_lu_factor.out(Tensor A, *, bool pivot=True, Tensor(a!) LU, Tensor(b!) pivots) -> (Tensor(a!) LU, Tensor(b!) pivots) +@Namespace("at") public static native @ByVal T_TensorTensor_T linalg_lu_factor_outf(@Const @ByRef Tensor A, @Cast("bool") boolean pivot, @ByRef Tensor LU, @ByRef Tensor pivots); -// Parsed from ATen/ops/kaiser_window.h +// Parsed from ATen/ops/linalg_lu_factor_ex.h // #pragma once @@ -48437,46 +34094,23 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include - - -// aten::kaiser_window(int window_length, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor -@Namespace("at") public static native @ByVal Tensor kaiser_window(@Cast("int64_t") long window_length, @ByVal(nullValue = "at::TensorOptions{}") TensorOptions options); -@Namespace("at") public static native @ByVal Tensor kaiser_window(@Cast("int64_t") long window_length); -// aten::kaiser_window(int window_length, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor -@Namespace("at") public static native @ByVal Tensor kaiser_window(@Cast("int64_t") long window_length, @ByVal ScalarTypeOptional dtype, @ByVal LayoutOptional layout, @ByVal DeviceOptional device, @ByVal BoolOptional pin_memory); - -// aten::kaiser_window.periodic(int window_length, bool periodic, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor -@Namespace("at") public static native @ByVal Tensor kaiser_window(@Cast("int64_t") long window_length, @Cast("bool") boolean periodic, @ByVal(nullValue = "at::TensorOptions{}") TensorOptions options); -@Namespace("at") public static native @ByVal Tensor kaiser_window(@Cast("int64_t") long window_length, @Cast("bool") boolean periodic); -// aten::kaiser_window.periodic(int window_length, bool periodic, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor -@Namespace("at") public static native @ByVal Tensor kaiser_window(@Cast("int64_t") long window_length, @Cast("bool") boolean periodic, @ByVal ScalarTypeOptional dtype, @ByVal LayoutOptional layout, @ByVal DeviceOptional device, @ByVal BoolOptional pin_memory); - -// aten::kaiser_window.beta(int window_length, bool periodic, float beta, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor -@Namespace("at") public static native @ByVal Tensor kaiser_window(@Cast("int64_t") long window_length, @Cast("bool") boolean periodic, double beta, @ByVal(nullValue = "at::TensorOptions{}") TensorOptions options); -@Namespace("at") public static native @ByVal Tensor kaiser_window(@Cast("int64_t") long window_length, @Cast("bool") boolean periodic, double beta); -// aten::kaiser_window.beta(int window_length, bool periodic, float beta, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor -@Namespace("at") public static native @ByVal Tensor kaiser_window(@Cast("int64_t") long window_length, @Cast("bool") boolean periodic, double beta, @ByVal ScalarTypeOptional dtype, @ByVal LayoutOptional layout, @ByVal DeviceOptional device, @ByVal BoolOptional pin_memory); +// #include -// aten::kaiser_window.out(int window_length, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor kaiser_window_out(@ByRef Tensor out, @Cast("int64_t") long window_length); -// aten::kaiser_window.out(int window_length, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor kaiser_window_outf(@Cast("int64_t") long window_length, @ByRef Tensor out); -// aten::kaiser_window.periodic_out(int window_length, bool periodic, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor kaiser_window_out(@ByRef Tensor out, @Cast("int64_t") long window_length, @Cast("bool") boolean periodic); -// aten::kaiser_window.periodic_out(int window_length, bool periodic, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor kaiser_window_outf(@Cast("int64_t") long window_length, @Cast("bool") boolean periodic, @ByRef Tensor out); +// aten::linalg_lu_factor_ex(Tensor A, *, bool pivot=True, bool check_errors=False) -> (Tensor LU, Tensor pivots, Tensor info) +@Namespace("at") public static native @ByVal T_TensorTensorTensor_T linalg_lu_factor_ex(@Const @ByRef Tensor A, @Cast("bool") boolean pivot/*=true*/, @Cast("bool") boolean check_errors/*=false*/); +@Namespace("at") public static native @ByVal T_TensorTensorTensor_T linalg_lu_factor_ex(@Const @ByRef Tensor A); -// aten::kaiser_window.beta_out(int window_length, bool periodic, float beta, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor kaiser_window_out(@ByRef Tensor out, @Cast("int64_t") long window_length, @Cast("bool") boolean periodic, double beta); -// aten::kaiser_window.beta_out(int window_length, bool periodic, float beta, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor kaiser_window_outf(@Cast("int64_t") long window_length, @Cast("bool") boolean periodic, double beta, @ByRef Tensor out); +// aten::linalg_lu_factor_ex.out(Tensor A, *, bool pivot=True, bool check_errors=False, Tensor(a!) LU, Tensor(b!) pivots, Tensor(c!) info) -> (Tensor(a!) LU, Tensor(b!) pivots, Tensor(c!) info) +@Namespace("at") public static native @ByVal T_TensorTensorTensor_T linalg_lu_factor_ex_out(@ByRef Tensor LU, @ByRef Tensor pivots, @ByRef Tensor info, @Const @ByRef Tensor A, @Cast("bool") boolean pivot/*=true*/, @Cast("bool") boolean check_errors/*=false*/); +@Namespace("at") public static native @ByVal T_TensorTensorTensor_T linalg_lu_factor_ex_out(@ByRef Tensor LU, @ByRef Tensor pivots, @ByRef Tensor info, @Const @ByRef Tensor A); +// aten::linalg_lu_factor_ex.out(Tensor A, *, bool pivot=True, bool check_errors=False, Tensor(a!) LU, Tensor(b!) pivots, Tensor(c!) info) -> (Tensor(a!) LU, Tensor(b!) pivots, Tensor(c!) info) +@Namespace("at") public static native @ByVal T_TensorTensorTensor_T linalg_lu_factor_ex_outf(@Const @ByRef Tensor A, @Cast("bool") boolean pivot, @Cast("bool") boolean check_errors, @ByRef Tensor LU, @ByRef Tensor pivots, @ByRef Tensor info); -// Parsed from ATen/ops/kl_div.h +// Parsed from ATen/ops/linalg_lu_solve.h // #pragma once @@ -48497,17 +34131,23 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include +// #include -// aten::kl_div(Tensor self, Tensor target, int reduction=Mean, *, bool log_target=False) -> Tensor -@Namespace("at") public static native @ByVal Tensor kl_div(@Const @ByRef Tensor self, @Const @ByRef Tensor target, @Cast("int64_t") long reduction/*=at::Reduction::Mean*/, @Cast("bool") boolean log_target/*=false*/); -@Namespace("at") public static native @ByVal Tensor kl_div(@Const @ByRef Tensor self, @Const @ByRef Tensor target); +// aten::linalg_lu_solve(Tensor LU, Tensor pivots, Tensor B, *, bool left=True, bool adjoint=False) -> Tensor +@Namespace("at") public static native @ByVal Tensor linalg_lu_solve(@Const @ByRef Tensor LU, @Const @ByRef Tensor pivots, @Const @ByRef Tensor B, @Cast("bool") boolean left/*=true*/, @Cast("bool") boolean adjoint/*=false*/); +@Namespace("at") public static native @ByVal Tensor linalg_lu_solve(@Const @ByRef Tensor LU, @Const @ByRef Tensor pivots, @Const @ByRef Tensor B); + +// aten::linalg_lu_solve.out(Tensor LU, Tensor pivots, Tensor B, *, bool left=True, bool adjoint=False, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor linalg_lu_solve_out(@ByRef Tensor out, @Const @ByRef Tensor LU, @Const @ByRef Tensor pivots, @Const @ByRef Tensor B, @Cast("bool") boolean left/*=true*/, @Cast("bool") boolean adjoint/*=false*/); +@Namespace("at") public static native @ByRef Tensor linalg_lu_solve_out(@ByRef Tensor out, @Const @ByRef Tensor LU, @Const @ByRef Tensor pivots, @Const @ByRef Tensor B); +// aten::linalg_lu_solve.out(Tensor LU, Tensor pivots, Tensor B, *, bool left=True, bool adjoint=False, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor linalg_lu_solve_outf(@Const @ByRef Tensor LU, @Const @ByRef Tensor pivots, @Const @ByRef Tensor B, @Cast("bool") boolean left, @Cast("bool") boolean adjoint, @ByRef Tensor out); -// Parsed from ATen/ops/kron.h +// Parsed from ATen/ops/linalg_matmul.h // #pragma once @@ -48528,21 +34168,21 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include +// #include -// aten::kron(Tensor self, Tensor other) -> Tensor -@Namespace("at") public static native @ByVal Tensor kron(@Const @ByRef Tensor self, @Const @ByRef Tensor other); +// aten::linalg_matmul(Tensor self, Tensor other) -> Tensor +@Namespace("at") public static native @ByVal Tensor linalg_matmul(@Const @ByRef Tensor self, @Const @ByRef Tensor other); -// aten::kron.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor kron_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor other); -// aten::kron.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor kron_outf(@Const @ByRef Tensor self, @Const @ByRef Tensor other, @ByRef Tensor out); +// aten::linalg_matmul.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor linalg_matmul_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor other); +// aten::linalg_matmul.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor linalg_matmul_outf(@Const @ByRef Tensor self, @Const @ByRef Tensor other, @ByRef Tensor out); -// Parsed from ATen/ops/kthvalue.h +// Parsed from ATen/ops/linalg_matrix_exp.h // #pragma once @@ -48563,33 +34203,21 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include - - -// aten::kthvalue(Tensor self, int k, int dim=-1, bool keepdim=False) -> (Tensor values, Tensor indices) -@Namespace("at") public static native @ByVal TensorTensorTuple kthvalue(@Const @ByRef Tensor self, @Cast("int64_t") long k, @Cast("int64_t") long dim/*=-1*/, @Cast("bool") boolean keepdim/*=false*/); -@Namespace("at") public static native @ByVal TensorTensorTuple kthvalue(@Const @ByRef Tensor self, @Cast("int64_t") long k); +// #include -// aten::kthvalue.values(Tensor self, int k, int dim=-1, bool keepdim=False, *, Tensor(a!) values, Tensor(b!) indices) -> (Tensor(a!) values, Tensor(b!) indices) -@Namespace("at") public static native @ByVal @Cast("std::tuple*") PointerPointer kthvalue_out(@ByRef Tensor values, @ByRef Tensor indices, @Const @ByRef Tensor self, @Cast("int64_t") long k, @Cast("int64_t") long dim/*=-1*/, @Cast("bool") boolean keepdim/*=false*/); -@Namespace("at") public static native @ByVal @Cast("std::tuple*") PointerPointer kthvalue_out(@ByRef Tensor values, @ByRef Tensor indices, @Const @ByRef Tensor self, @Cast("int64_t") long k); -// aten::kthvalue.values(Tensor self, int k, int dim=-1, bool keepdim=False, *, Tensor(a!) values, Tensor(b!) indices) -> (Tensor(a!) values, Tensor(b!) indices) -@Namespace("at") public static native @ByVal @Cast("std::tuple*") PointerPointer kthvalue_outf(@Const @ByRef Tensor self, @Cast("int64_t") long k, @Cast("int64_t") long dim, @Cast("bool") boolean keepdim, @ByRef Tensor values, @ByRef Tensor indices); -// aten::kthvalue.dimname(Tensor self, int k, Dimname dim, bool keepdim=False) -> (Tensor values, Tensor indices) -@Namespace("at") public static native @ByVal TensorTensorTuple kthvalue(@Const @ByRef Tensor self, @Cast("int64_t") long k, @ByVal Dimname dim, @Cast("bool") boolean keepdim/*=false*/); -@Namespace("at") public static native @ByVal TensorTensorTuple kthvalue(@Const @ByRef Tensor self, @Cast("int64_t") long k, @ByVal Dimname dim); +// aten::linalg_matrix_exp(Tensor self) -> Tensor +@Namespace("at") public static native @ByVal Tensor linalg_matrix_exp(@Const @ByRef Tensor self); -// aten::kthvalue.dimname_out(Tensor self, int k, Dimname dim, bool keepdim=False, *, Tensor(a!) values, Tensor(b!) indices) -> (Tensor(a!) values, Tensor(b!) indices) -@Namespace("at") public static native @ByVal @Cast("std::tuple*") PointerPointer kthvalue_out(@ByRef Tensor values, @ByRef Tensor indices, @Const @ByRef Tensor self, @Cast("int64_t") long k, @ByVal Dimname dim, @Cast("bool") boolean keepdim/*=false*/); -@Namespace("at") public static native @ByVal @Cast("std::tuple*") PointerPointer kthvalue_out(@ByRef Tensor values, @ByRef Tensor indices, @Const @ByRef Tensor self, @Cast("int64_t") long k, @ByVal Dimname dim); -// aten::kthvalue.dimname_out(Tensor self, int k, Dimname dim, bool keepdim=False, *, Tensor(a!) values, Tensor(b!) indices) -> (Tensor(a!) values, Tensor(b!) indices) -@Namespace("at") public static native @ByVal @Cast("std::tuple*") PointerPointer kthvalue_outf(@Const @ByRef Tensor self, @Cast("int64_t") long k, @ByVal Dimname dim, @Cast("bool") boolean keepdim, @ByRef Tensor values, @ByRef Tensor indices); +// aten::linalg_matrix_exp.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor linalg_matrix_exp_out(@ByRef Tensor out, @Const @ByRef Tensor self); +// aten::linalg_matrix_exp.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor linalg_matrix_exp_outf(@Const @ByRef Tensor self, @ByRef Tensor out); -// Parsed from ATen/ops/l1_loss.h +// Parsed from ATen/ops/linalg_matrix_norm.h // #pragma once @@ -48610,17 +34238,39 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include +// #include -// aten::l1_loss(Tensor self, Tensor target, int reduction=Mean) -> Tensor -@Namespace("at") public static native @ByVal Tensor l1_loss(@Const @ByRef Tensor self, @Const @ByRef Tensor target, @Cast("int64_t") long reduction/*=at::Reduction::Mean*/); -@Namespace("at") public static native @ByVal Tensor l1_loss(@Const @ByRef Tensor self, @Const @ByRef Tensor target); +// aten::linalg_matrix_norm(Tensor self, Scalar ord, int[] dim=[-2,-1], bool keepdim=False, *, ScalarType? dtype=None) -> Tensor +@Namespace("at") public static native @ByVal Tensor linalg_matrix_norm(@Const @ByRef Tensor self, @Const @ByRef Scalar ord, @ByVal(nullValue = "at::IntArrayRef({-2,-1})") LongArrayRef dim, @Cast("bool") boolean keepdim/*=false*/, @ByVal(nullValue = "c10::optional(c10::nullopt)") ScalarTypeOptional dtype); +@Namespace("at") public static native @ByVal Tensor linalg_matrix_norm(@Const @ByRef Tensor self, @Const @ByRef Scalar ord); +@Namespace("at") public static native @ByVal Tensor linalg_matrix_norm(@Const @ByRef Tensor self, @Const @ByRef Scalar ord, @ByVal(nullValue = "at::IntArrayRef({-2,-1})") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dim, @Cast("bool") boolean keepdim/*=false*/, @ByVal(nullValue = "c10::optional(c10::nullopt)") ScalarTypeOptional dtype); + +// aten::linalg_matrix_norm.out(Tensor self, Scalar ord, int[] dim=[-2,-1], bool keepdim=False, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor linalg_matrix_norm_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Scalar ord, @ByVal(nullValue = "at::IntArrayRef({-2,-1})") LongArrayRef dim, @Cast("bool") boolean keepdim/*=false*/, @ByVal(nullValue = "c10::optional(c10::nullopt)") ScalarTypeOptional dtype); +@Namespace("at") public static native @ByRef Tensor linalg_matrix_norm_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Scalar ord); +@Namespace("at") public static native @ByRef Tensor linalg_matrix_norm_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Scalar ord, @ByVal(nullValue = "at::IntArrayRef({-2,-1})") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dim, @Cast("bool") boolean keepdim/*=false*/, @ByVal(nullValue = "c10::optional(c10::nullopt)") ScalarTypeOptional dtype); +// aten::linalg_matrix_norm.out(Tensor self, Scalar ord, int[] dim=[-2,-1], bool keepdim=False, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor linalg_matrix_norm_outf(@Const @ByRef Tensor self, @Const @ByRef Scalar ord, @ByVal LongArrayRef dim, @Cast("bool") boolean keepdim, @ByVal ScalarTypeOptional dtype, @ByRef Tensor out); +@Namespace("at") public static native @ByRef Tensor linalg_matrix_norm_outf(@Const @ByRef Tensor self, @Const @ByRef Scalar ord, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dim, @Cast("bool") boolean keepdim, @ByVal ScalarTypeOptional dtype, @ByRef Tensor out); + +// aten::linalg_matrix_norm.str_ord(Tensor self, str ord='fro', int[] dim=[-2,-1], bool keepdim=False, *, ScalarType? dtype=None) -> Tensor +@Namespace("at") public static native @ByVal Tensor linalg_matrix_norm(@Const @ByRef Tensor self, @ByVal(nullValue = "c10::string_view(\"fro\")") @Cast("c10::string_view*") Pointer ord, @ByVal(nullValue = "at::IntArrayRef({-2,-1})") LongArrayRef dim, @Cast("bool") boolean keepdim/*=false*/, @ByVal(nullValue = "c10::optional(c10::nullopt)") ScalarTypeOptional dtype); +@Namespace("at") public static native @ByVal Tensor linalg_matrix_norm(@Const @ByRef Tensor self); +@Namespace("at") public static native @ByVal Tensor linalg_matrix_norm(@Const @ByRef Tensor self, @ByVal(nullValue = "c10::string_view(\"fro\")") @Cast("c10::string_view*") Pointer ord, @ByVal(nullValue = "at::IntArrayRef({-2,-1})") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dim, @Cast("bool") boolean keepdim/*=false*/, @ByVal(nullValue = "c10::optional(c10::nullopt)") ScalarTypeOptional dtype); + +// aten::linalg_matrix_norm.str_ord_out(Tensor self, str ord='fro', int[] dim=[-2,-1], bool keepdim=False, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor linalg_matrix_norm_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal(nullValue = "c10::string_view(\"fro\")") @Cast("c10::string_view*") Pointer ord, @ByVal(nullValue = "at::IntArrayRef({-2,-1})") LongArrayRef dim, @Cast("bool") boolean keepdim/*=false*/, @ByVal(nullValue = "c10::optional(c10::nullopt)") ScalarTypeOptional dtype); +@Namespace("at") public static native @ByRef Tensor linalg_matrix_norm_out(@ByRef Tensor out, @Const @ByRef Tensor self); +@Namespace("at") public static native @ByRef Tensor linalg_matrix_norm_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal(nullValue = "c10::string_view(\"fro\")") @Cast("c10::string_view*") Pointer ord, @ByVal(nullValue = "at::IntArrayRef({-2,-1})") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dim, @Cast("bool") boolean keepdim/*=false*/, @ByVal(nullValue = "c10::optional(c10::nullopt)") ScalarTypeOptional dtype); +// aten::linalg_matrix_norm.str_ord_out(Tensor self, str ord='fro', int[] dim=[-2,-1], bool keepdim=False, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor linalg_matrix_norm_outf(@Const @ByRef Tensor self, @ByVal @Cast("c10::string_view*") Pointer ord, @ByVal LongArrayRef dim, @Cast("bool") boolean keepdim, @ByVal ScalarTypeOptional dtype, @ByRef Tensor out); +@Namespace("at") public static native @ByRef Tensor linalg_matrix_norm_outf(@Const @ByRef Tensor self, @ByVal @Cast("c10::string_view*") Pointer ord, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dim, @Cast("bool") boolean keepdim, @ByVal ScalarTypeOptional dtype, @ByRef Tensor out); -// Parsed from ATen/ops/layer_norm.h +// Parsed from ATen/ops/linalg_matrix_power.h // #pragma once @@ -48641,25 +34291,21 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include - - -// aten::layer_norm(Tensor input, SymInt[] normalized_shape, Tensor? weight=None, Tensor? bias=None, float eps=1e-05, bool cudnn_enable=True) -> Tensor -@Namespace("at") public static native @ByVal Tensor layer_norm(@Const @ByRef Tensor input, @ByVal @Cast("c10::ArrayRef*") LongArrayRef normalized_shape, @Const @ByRef(nullValue = "c10::optional{}") TensorOptional weight, @Const @ByRef(nullValue = "c10::optional{}") TensorOptional bias, double eps/*=1e-05*/, @Cast("bool") boolean cudnn_enable/*=true*/); -@Namespace("at") public static native @ByVal Tensor layer_norm(@Const @ByRef Tensor input, @ByVal @Cast("c10::ArrayRef*") LongArrayRef normalized_shape); -@Namespace("at") public static native @ByVal Tensor layer_norm(@Const @ByRef Tensor input, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] normalized_shape, @Const @ByRef(nullValue = "c10::optional{}") TensorOptional weight, @Const @ByRef(nullValue = "c10::optional{}") TensorOptional bias, double eps/*=1e-05*/, @Cast("bool") boolean cudnn_enable/*=true*/); -@Namespace("at") public static native @ByVal Tensor layer_norm(@Const @ByRef Tensor input, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... normalized_shape); +// #include -// aten::layer_norm(Tensor input, SymInt[] normalized_shape, Tensor? weight=None, Tensor? bias=None, float eps=1e-05, bool cudnn_enable=True) -> Tensor -@Namespace("at") public static native @ByVal Tensor layer_norm_symint(@Const @ByRef Tensor input, @ByVal SymIntRef normalized_shape, @Const @ByRef(nullValue = "c10::optional{}") TensorOptional weight, @Const @ByRef(nullValue = "c10::optional{}") TensorOptional bias, double eps/*=1e-05*/, @Cast("bool") boolean cudnn_enable/*=true*/); -@Namespace("at") public static native @ByVal Tensor layer_norm_symint(@Const @ByRef Tensor input, @ByVal SymIntRef normalized_shape); +// aten::linalg_matrix_power(Tensor self, int n) -> Tensor +@Namespace("at") public static native @ByVal Tensor linalg_matrix_power(@Const @ByRef Tensor self, @Cast("int64_t") long n); +// aten::linalg_matrix_power.out(Tensor self, int n, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor linalg_matrix_power_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Cast("int64_t") long n); +// aten::linalg_matrix_power.out(Tensor self, int n, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor linalg_matrix_power_outf(@Const @ByRef Tensor self, @Cast("int64_t") long n, @ByRef Tensor out); -// Parsed from ATen/ops/lcm.h +// Parsed from ATen/ops/linalg_matrix_rank.h // #pragma once @@ -48680,24 +34326,53 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include +// #include -// aten::lcm.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor lcm_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor other); -// aten::lcm.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor lcm_outf(@Const @ByRef Tensor self, @Const @ByRef Tensor other, @ByRef Tensor out); +// aten::linalg_matrix_rank.atol_rtol_tensor(Tensor input, *, Tensor? atol=None, Tensor? rtol=None, bool hermitian=False) -> Tensor +@Namespace("at") public static native @ByVal Tensor linalg_matrix_rank(@Const @ByRef Tensor input, @Const @ByRef(nullValue = "c10::optional{}") TensorOptional atol, @Const @ByRef(nullValue = "c10::optional{}") TensorOptional rtol, @Cast("bool") boolean hermitian/*=false*/); +@Namespace("at") public static native @ByVal Tensor linalg_matrix_rank(@Const @ByRef Tensor input); -// aten::lcm(Tensor self, Tensor other) -> Tensor -@Namespace("at") public static native @ByVal Tensor lcm(@Const @ByRef Tensor self, @Const @ByRef Tensor other); +// aten::linalg_matrix_rank.atol_rtol_tensor_out(Tensor input, *, Tensor? atol=None, Tensor? rtol=None, bool hermitian=False, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor linalg_matrix_rank_out(@ByRef Tensor out, @Const @ByRef Tensor input, @Const @ByRef(nullValue = "c10::optional{}") TensorOptional atol, @Const @ByRef(nullValue = "c10::optional{}") TensorOptional rtol, @Cast("bool") boolean hermitian/*=false*/); +@Namespace("at") public static native @ByRef Tensor linalg_matrix_rank_out(@ByRef Tensor out, @Const @ByRef Tensor input); +// aten::linalg_matrix_rank.atol_rtol_tensor_out(Tensor input, *, Tensor? atol=None, Tensor? rtol=None, bool hermitian=False, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor linalg_matrix_rank_outf(@Const @ByRef Tensor input, @Const @ByRef TensorOptional atol, @Const @ByRef TensorOptional rtol, @Cast("bool") boolean hermitian, @ByRef Tensor out); -// aten::lcm_(Tensor(a!) self, Tensor other) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor lcm_(@ByRef Tensor self, @Const @ByRef Tensor other); +// aten::linalg_matrix_rank.atol_rtol_float(Tensor self, *, float? atol=None, float? rtol=None, bool hermitian=False) -> Tensor +@Namespace("at") public static native @ByVal Tensor linalg_matrix_rank(@Const @ByRef Tensor self, @ByVal DoubleOptional atol, @ByVal DoubleOptional rtol, @Cast("bool") boolean hermitian/*=false*/); +@Namespace("at") public static native @ByVal Tensor linalg_matrix_rank(@Const @ByRef Tensor self, @ByVal DoubleOptional atol, @ByVal DoubleOptional rtol); + +// aten::linalg_matrix_rank.atol_rtol_float_out(Tensor self, *, float? atol=None, float? rtol=None, bool hermitian=False, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor linalg_matrix_rank_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal DoubleOptional atol, @ByVal DoubleOptional rtol, @Cast("bool") boolean hermitian/*=false*/); +@Namespace("at") public static native @ByRef Tensor linalg_matrix_rank_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal DoubleOptional atol, @ByVal DoubleOptional rtol); +// aten::linalg_matrix_rank.atol_rtol_float_out(Tensor self, *, float? atol=None, float? rtol=None, bool hermitian=False, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor linalg_matrix_rank_outf(@Const @ByRef Tensor self, @ByVal DoubleOptional atol, @ByVal DoubleOptional rtol, @Cast("bool") boolean hermitian, @ByRef Tensor out); +// aten::linalg_matrix_rank(Tensor self, float tol, bool hermitian=False) -> Tensor +@Namespace("at") public static native @ByVal Tensor linalg_matrix_rank(@Const @ByRef Tensor self, double tol, @Cast("bool") boolean hermitian/*=false*/); +@Namespace("at") public static native @ByVal Tensor linalg_matrix_rank(@Const @ByRef Tensor self, double tol); +// aten::linalg_matrix_rank.out(Tensor self, float tol, bool hermitian=False, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor linalg_matrix_rank_out(@ByRef Tensor out, @Const @ByRef Tensor self, double tol, @Cast("bool") boolean hermitian/*=false*/); +@Namespace("at") public static native @ByRef Tensor linalg_matrix_rank_out(@ByRef Tensor out, @Const @ByRef Tensor self, double tol); +// aten::linalg_matrix_rank.out(Tensor self, float tol, bool hermitian=False, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor linalg_matrix_rank_outf(@Const @ByRef Tensor self, double tol, @Cast("bool") boolean hermitian, @ByRef Tensor out); +// aten::linalg_matrix_rank.tol_tensor(Tensor input, Tensor tol, bool hermitian=False) -> Tensor +@Namespace("at") public static native @ByVal Tensor linalg_matrix_rank(@Const @ByRef Tensor input, @Const @ByRef Tensor tol, @Cast("bool") boolean hermitian/*=false*/); +@Namespace("at") public static native @ByVal Tensor linalg_matrix_rank(@Const @ByRef Tensor input, @Const @ByRef Tensor tol); -// Parsed from ATen/ops/ldexp.h +// aten::linalg_matrix_rank.out_tol_tensor(Tensor input, Tensor tol, bool hermitian=False, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor linalg_matrix_rank_out(@ByRef Tensor out, @Const @ByRef Tensor input, @Const @ByRef Tensor tol, @Cast("bool") boolean hermitian/*=false*/); +@Namespace("at") public static native @ByRef Tensor linalg_matrix_rank_out(@ByRef Tensor out, @Const @ByRef Tensor input, @Const @ByRef Tensor tol); +// aten::linalg_matrix_rank.out_tol_tensor(Tensor input, Tensor tol, bool hermitian=False, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor linalg_matrix_rank_outf(@Const @ByRef Tensor input, @Const @ByRef Tensor tol, @Cast("bool") boolean hermitian, @ByRef Tensor out); + + + + +// Parsed from ATen/ops/linalg_multi_dot.h // #pragma once @@ -48718,24 +34393,21 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include - +// #include -// aten::ldexp.Tensor(Tensor self, Tensor other) -> Tensor -@Namespace("at") public static native @ByVal Tensor ldexp(@Const @ByRef Tensor self, @Const @ByRef Tensor other); -// aten::ldexp_(Tensor(a!) self, Tensor other) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor ldexp_(@ByRef Tensor self, @Const @ByRef Tensor other); +// aten::linalg_multi_dot(Tensor[] tensors) -> Tensor +@Namespace("at") public static native @ByVal Tensor linalg_multi_dot(@ByVal @Cast("at::TensorList*") TensorArrayRef tensors); -// aten::ldexp.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor ldexp_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor other); -// aten::ldexp.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor ldexp_outf(@Const @ByRef Tensor self, @Const @ByRef Tensor other, @ByRef Tensor out); +// aten::linalg_multi_dot.out(Tensor[] tensors, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor linalg_multi_dot_out(@ByRef Tensor out, @ByVal @Cast("at::TensorList*") TensorArrayRef tensors); +// aten::linalg_multi_dot.out(Tensor[] tensors, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor linalg_multi_dot_outf(@ByVal @Cast("at::TensorList*") TensorArrayRef tensors, @ByRef Tensor out); -// Parsed from ATen/ops/le.h +// Parsed from ATen/ops/linalg_norm.h // #pragma once @@ -48756,29 +34428,39 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include +// #include -// aten::le.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor le_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Scalar other); -// aten::le.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor le_outf(@Const @ByRef Tensor self, @Const @ByRef Scalar other, @ByRef Tensor out); +// aten::linalg_norm(Tensor self, Scalar? ord=None, int[1]? dim=None, bool keepdim=False, *, ScalarType? dtype=None) -> Tensor +@Namespace("at") public static native @ByVal Tensor linalg_norm(@Const @ByRef Tensor self, @Const @ByRef(nullValue = "c10::optional(c10::nullopt)") ScalarOptional ord, @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") LongArrayRefOptional dim, @Cast("bool") boolean keepdim/*=false*/, @ByVal(nullValue = "c10::optional(c10::nullopt)") ScalarTypeOptional dtype); +@Namespace("at") public static native @ByVal Tensor linalg_norm(@Const @ByRef Tensor self); +@Namespace("at") public static native @ByVal Tensor linalg_norm(@Const @ByRef Tensor self, @Const @ByRef(nullValue = "c10::optional(c10::nullopt)") ScalarOptional ord, @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dim, @Cast("bool") boolean keepdim/*=false*/, @ByVal(nullValue = "c10::optional(c10::nullopt)") ScalarTypeOptional dtype); -// aten::le.Scalar(Tensor self, Scalar other) -> Tensor -@Namespace("at") public static native @ByVal Tensor le(@Const @ByRef Tensor self, @Const @ByRef Scalar other); +// aten::linalg_norm.ord_str(Tensor self, str ord, int[1]? dim=None, bool keepdim=False, *, ScalarType? dtype=None) -> Tensor +@Namespace("at") public static native @ByVal Tensor linalg_norm(@Const @ByRef Tensor self, @ByVal @Cast("c10::string_view*") Pointer ord, @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") LongArrayRefOptional dim, @Cast("bool") boolean keepdim/*=false*/, @ByVal(nullValue = "c10::optional(c10::nullopt)") ScalarTypeOptional dtype); +@Namespace("at") public static native @ByVal Tensor linalg_norm(@Const @ByRef Tensor self, @ByVal @Cast("c10::string_view*") Pointer ord); +@Namespace("at") public static native @ByVal Tensor linalg_norm(@Const @ByRef Tensor self, @ByVal @Cast("c10::string_view*") Pointer ord, @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dim, @Cast("bool") boolean keepdim/*=false*/, @ByVal(nullValue = "c10::optional(c10::nullopt)") ScalarTypeOptional dtype); -// aten::le.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor le_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor other); -// aten::le.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor le_outf(@Const @ByRef Tensor self, @Const @ByRef Tensor other, @ByRef Tensor out); +// aten::linalg_norm.out(Tensor self, Scalar? ord=None, int[1]? dim=None, bool keepdim=False, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor linalg_norm_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef(nullValue = "c10::optional(c10::nullopt)") ScalarOptional ord, @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") LongArrayRefOptional dim, @Cast("bool") boolean keepdim/*=false*/, @ByVal(nullValue = "c10::optional(c10::nullopt)") ScalarTypeOptional dtype); +@Namespace("at") public static native @ByRef Tensor linalg_norm_out(@ByRef Tensor out, @Const @ByRef Tensor self); +@Namespace("at") public static native @ByRef Tensor linalg_norm_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef(nullValue = "c10::optional(c10::nullopt)") ScalarOptional ord, @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dim, @Cast("bool") boolean keepdim/*=false*/, @ByVal(nullValue = "c10::optional(c10::nullopt)") ScalarTypeOptional dtype); +// aten::linalg_norm.out(Tensor self, Scalar? ord=None, int[1]? dim=None, bool keepdim=False, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor linalg_norm_outf(@Const @ByRef Tensor self, @Const @ByRef ScalarOptional ord, @ByVal LongArrayRefOptional dim, @Cast("bool") boolean keepdim, @ByVal ScalarTypeOptional dtype, @ByRef Tensor out); +@Namespace("at") public static native @ByRef Tensor linalg_norm_outf(@Const @ByRef Tensor self, @Const @ByRef ScalarOptional ord, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dim, @Cast("bool") boolean keepdim, @ByVal ScalarTypeOptional dtype, @ByRef Tensor out); -// aten::le.Tensor(Tensor self, Tensor other) -> Tensor -@Namespace("at") public static native @ByVal Tensor le(@Const @ByRef Tensor self, @Const @ByRef Tensor other); +// aten::linalg_norm.ord_str_out(Tensor self, str ord, int[1]? dim=None, bool keepdim=False, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor linalg_norm_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal @Cast("c10::string_view*") Pointer ord, @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") LongArrayRefOptional dim, @Cast("bool") boolean keepdim/*=false*/, @ByVal(nullValue = "c10::optional(c10::nullopt)") ScalarTypeOptional dtype); +@Namespace("at") public static native @ByRef Tensor linalg_norm_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal @Cast("c10::string_view*") Pointer ord); +@Namespace("at") public static native @ByRef Tensor linalg_norm_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal @Cast("c10::string_view*") Pointer ord, @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dim, @Cast("bool") boolean keepdim/*=false*/, @ByVal(nullValue = "c10::optional(c10::nullopt)") ScalarTypeOptional dtype); +// aten::linalg_norm.ord_str_out(Tensor self, str ord, int[1]? dim=None, bool keepdim=False, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor linalg_norm_outf(@Const @ByRef Tensor self, @ByVal @Cast("c10::string_view*") Pointer ord, @ByVal LongArrayRefOptional dim, @Cast("bool") boolean keepdim, @ByVal ScalarTypeOptional dtype, @ByRef Tensor out); +@Namespace("at") public static native @ByRef Tensor linalg_norm_outf(@Const @ByRef Tensor self, @ByVal @Cast("c10::string_view*") Pointer ord, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dim, @Cast("bool") boolean keepdim, @ByVal ScalarTypeOptional dtype, @ByRef Tensor out); -// Parsed from ATen/ops/leaky_relu.h +// Parsed from ATen/ops/linalg_pinv.h // #pragma once @@ -48799,27 +34481,53 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include +// #include -// aten::leaky_relu.out(Tensor self, Scalar negative_slope=0.01, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor leaky_relu_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef(nullValue = "at::Scalar(0.01)") Scalar negative_slope); -@Namespace("at") public static native @ByRef Tensor leaky_relu_out(@ByRef Tensor out, @Const @ByRef Tensor self); -// aten::leaky_relu.out(Tensor self, Scalar negative_slope=0.01, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor leaky_relu_outf(@Const @ByRef Tensor self, @Const @ByRef Scalar negative_slope, @ByRef Tensor out); +// aten::linalg_pinv.atol_rtol_tensor(Tensor self, *, Tensor? atol=None, Tensor? rtol=None, bool hermitian=False) -> Tensor +@Namespace("at") public static native @ByVal Tensor linalg_pinv(@Const @ByRef Tensor self, @Const @ByRef(nullValue = "c10::optional{}") TensorOptional atol, @Const @ByRef(nullValue = "c10::optional{}") TensorOptional rtol, @Cast("bool") boolean hermitian/*=false*/); +@Namespace("at") public static native @ByVal Tensor linalg_pinv(@Const @ByRef Tensor self); -// aten::leaky_relu(Tensor self, Scalar negative_slope=0.01) -> Tensor -@Namespace("at") public static native @ByVal Tensor leaky_relu(@Const @ByRef Tensor self, @Const @ByRef(nullValue = "at::Scalar(0.01)") Scalar negative_slope); -@Namespace("at") public static native @ByVal Tensor leaky_relu(@Const @ByRef Tensor self); +// aten::linalg_pinv.atol_rtol_tensor_out(Tensor self, *, Tensor? atol=None, Tensor? rtol=None, bool hermitian=False, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor linalg_pinv_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef(nullValue = "c10::optional{}") TensorOptional atol, @Const @ByRef(nullValue = "c10::optional{}") TensorOptional rtol, @Cast("bool") boolean hermitian/*=false*/); +@Namespace("at") public static native @ByRef Tensor linalg_pinv_out(@ByRef Tensor out, @Const @ByRef Tensor self); +// aten::linalg_pinv.atol_rtol_tensor_out(Tensor self, *, Tensor? atol=None, Tensor? rtol=None, bool hermitian=False, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor linalg_pinv_outf(@Const @ByRef Tensor self, @Const @ByRef TensorOptional atol, @Const @ByRef TensorOptional rtol, @Cast("bool") boolean hermitian, @ByRef Tensor out); -// aten::leaky_relu_(Tensor(a!) self, Scalar negative_slope=0.01) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor leaky_relu_(@ByRef Tensor self, @Const @ByRef(nullValue = "at::Scalar(0.01)") Scalar negative_slope); -@Namespace("at") public static native @ByRef Tensor leaky_relu_(@ByRef Tensor self); +// aten::linalg_pinv.atol_rtol_float(Tensor self, *, float? atol=None, float? rtol=None, bool hermitian=False) -> Tensor +@Namespace("at") public static native @ByVal Tensor linalg_pinv(@Const @ByRef Tensor self, @ByVal DoubleOptional atol, @ByVal DoubleOptional rtol, @Cast("bool") boolean hermitian/*=false*/); +@Namespace("at") public static native @ByVal Tensor linalg_pinv(@Const @ByRef Tensor self, @ByVal DoubleOptional atol, @ByVal DoubleOptional rtol); + +// aten::linalg_pinv.atol_rtol_float_out(Tensor self, *, float? atol=None, float? rtol=None, bool hermitian=False, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor linalg_pinv_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal DoubleOptional atol, @ByVal DoubleOptional rtol, @Cast("bool") boolean hermitian/*=false*/); +@Namespace("at") public static native @ByRef Tensor linalg_pinv_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal DoubleOptional atol, @ByVal DoubleOptional rtol); +// aten::linalg_pinv.atol_rtol_float_out(Tensor self, *, float? atol=None, float? rtol=None, bool hermitian=False, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor linalg_pinv_outf(@Const @ByRef Tensor self, @ByVal DoubleOptional atol, @ByVal DoubleOptional rtol, @Cast("bool") boolean hermitian, @ByRef Tensor out); + +// aten::linalg_pinv(Tensor self, float rcond, bool hermitian=False) -> Tensor +@Namespace("at") public static native @ByVal Tensor linalg_pinv(@Const @ByRef Tensor self, double rcond, @Cast("bool") boolean hermitian/*=false*/); +@Namespace("at") public static native @ByVal Tensor linalg_pinv(@Const @ByRef Tensor self, double rcond); +// aten::linalg_pinv.rcond_tensor(Tensor self, Tensor rcond, bool hermitian=False) -> Tensor +@Namespace("at") public static native @ByVal Tensor linalg_pinv(@Const @ByRef Tensor self, @Const @ByRef Tensor rcond, @Cast("bool") boolean hermitian/*=false*/); +@Namespace("at") public static native @ByVal Tensor linalg_pinv(@Const @ByRef Tensor self, @Const @ByRef Tensor rcond); + +// aten::linalg_pinv.out(Tensor self, float rcond, bool hermitian=False, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor linalg_pinv_out(@ByRef Tensor out, @Const @ByRef Tensor self, double rcond, @Cast("bool") boolean hermitian/*=false*/); +@Namespace("at") public static native @ByRef Tensor linalg_pinv_out(@ByRef Tensor out, @Const @ByRef Tensor self, double rcond); +// aten::linalg_pinv.out(Tensor self, float rcond, bool hermitian=False, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor linalg_pinv_outf(@Const @ByRef Tensor self, double rcond, @Cast("bool") boolean hermitian, @ByRef Tensor out); + +// aten::linalg_pinv.out_rcond_tensor(Tensor self, Tensor rcond, bool hermitian=False, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor linalg_pinv_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor rcond, @Cast("bool") boolean hermitian/*=false*/); +@Namespace("at") public static native @ByRef Tensor linalg_pinv_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor rcond); +// aten::linalg_pinv.out_rcond_tensor(Tensor self, Tensor rcond, bool hermitian=False, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor linalg_pinv_outf(@Const @ByRef Tensor self, @Const @ByRef Tensor rcond, @Cast("bool") boolean hermitian, @ByRef Tensor out); -// Parsed from ATen/ops/leaky_relu_backward.h + +// Parsed from ATen/ops/linalg_qr.h // #pragma once @@ -48840,21 +34548,23 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include +// #include -// aten::leaky_relu_backward.grad_input(Tensor grad_output, Tensor self, Scalar negative_slope, bool self_is_result, *, Tensor(a!) grad_input) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor leaky_relu_backward_out(@ByRef Tensor grad_input, @Const @ByRef Tensor grad_output, @Const @ByRef Tensor self, @Const @ByRef Scalar negative_slope, @Cast("bool") boolean self_is_result); -// aten::leaky_relu_backward.grad_input(Tensor grad_output, Tensor self, Scalar negative_slope, bool self_is_result, *, Tensor(a!) grad_input) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor leaky_relu_backward_outf(@Const @ByRef Tensor grad_output, @Const @ByRef Tensor self, @Const @ByRef Scalar negative_slope, @Cast("bool") boolean self_is_result, @ByRef Tensor grad_input); +// aten::linalg_qr(Tensor A, str mode='reduced') -> (Tensor Q, Tensor R) +@Namespace("at") public static native @ByVal T_TensorTensor_T linalg_qr(@Const @ByRef Tensor A, @ByVal(nullValue = "c10::string_view(\"reduced\")") @Cast("c10::string_view*") Pointer mode); +@Namespace("at") public static native @ByVal T_TensorTensor_T linalg_qr(@Const @ByRef Tensor A); -// aten::leaky_relu_backward(Tensor grad_output, Tensor self, Scalar negative_slope, bool self_is_result) -> Tensor -@Namespace("at") public static native @ByVal Tensor leaky_relu_backward(@Const @ByRef Tensor grad_output, @Const @ByRef Tensor self, @Const @ByRef Scalar negative_slope, @Cast("bool") boolean self_is_result); +// aten::linalg_qr.out(Tensor A, str mode='reduced', *, Tensor(a!) Q, Tensor(b!) R) -> (Tensor(a!) Q, Tensor(b!) R) +@Namespace("at") public static native @ByVal T_TensorTensor_T linalg_qr_out(@ByRef Tensor Q, @ByRef Tensor R, @Const @ByRef Tensor A, @ByVal(nullValue = "c10::string_view(\"reduced\")") @Cast("c10::string_view*") Pointer mode); +@Namespace("at") public static native @ByVal T_TensorTensor_T linalg_qr_out(@ByRef Tensor Q, @ByRef Tensor R, @Const @ByRef Tensor A); +// aten::linalg_qr.out(Tensor A, str mode='reduced', *, Tensor(a!) Q, Tensor(b!) R) -> (Tensor(a!) Q, Tensor(b!) R) +@Namespace("at") public static native @ByVal T_TensorTensor_T linalg_qr_outf(@Const @ByRef Tensor A, @ByVal @Cast("c10::string_view*") Pointer mode, @ByRef Tensor Q, @ByRef Tensor R); -// Parsed from ATen/ops/lerp.h +// Parsed from ATen/ops/linalg_slogdet.h // #pragma once @@ -48875,29 +34585,21 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include - - -// aten::lerp.Scalar_out(Tensor self, Tensor end, Scalar weight, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor lerp_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor end, @Const @ByRef Scalar weight); -// aten::lerp.Scalar_out(Tensor self, Tensor end, Scalar weight, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor lerp_outf(@Const @ByRef Tensor self, @Const @ByRef Tensor end, @Const @ByRef Scalar weight, @ByRef Tensor out); +// #include -// aten::lerp.Tensor_out(Tensor self, Tensor end, Tensor weight, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor lerp_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor end, @Const @ByRef Tensor weight); -// aten::lerp.Tensor_out(Tensor self, Tensor end, Tensor weight, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor lerp_outf(@Const @ByRef Tensor self, @Const @ByRef Tensor end, @Const @ByRef Tensor weight, @ByRef Tensor out); -// aten::lerp.Scalar(Tensor self, Tensor end, Scalar weight) -> Tensor -@Namespace("at") public static native @ByVal Tensor lerp(@Const @ByRef Tensor self, @Const @ByRef Tensor end, @Const @ByRef Scalar weight); +// aten::linalg_slogdet(Tensor A) -> (Tensor sign, Tensor logabsdet) +@Namespace("at") public static native @ByVal T_TensorTensor_T linalg_slogdet(@Const @ByRef Tensor A); -// aten::lerp.Tensor(Tensor self, Tensor end, Tensor weight) -> Tensor -@Namespace("at") public static native @ByVal Tensor lerp(@Const @ByRef Tensor self, @Const @ByRef Tensor end, @Const @ByRef Tensor weight); +// aten::linalg_slogdet.out(Tensor A, *, Tensor(a!) sign, Tensor(b!) logabsdet) -> (Tensor(a!) sign, Tensor(b!) logabsdet) +@Namespace("at") public static native @ByVal T_TensorTensor_T linalg_slogdet_out(@ByRef Tensor sign, @ByRef Tensor logabsdet, @Const @ByRef Tensor A); +// aten::linalg_slogdet.out(Tensor A, *, Tensor(a!) sign, Tensor(b!) logabsdet) -> (Tensor(a!) sign, Tensor(b!) logabsdet) +@Namespace("at") public static native @ByVal T_TensorTensor_T linalg_slogdet_outf(@Const @ByRef Tensor A, @ByRef Tensor sign, @ByRef Tensor logabsdet); -// Parsed from ATen/ops/less.h +// Parsed from ATen/ops/linalg_solve.h // #pragma once @@ -48918,29 +34620,23 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include - - -// aten::less.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor less_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Scalar other); -// aten::less.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor less_outf(@Const @ByRef Tensor self, @Const @ByRef Scalar other, @ByRef Tensor out); +// #include -// aten::less.Scalar(Tensor self, Scalar other) -> Tensor -@Namespace("at") public static native @ByVal Tensor less(@Const @ByRef Tensor self, @Const @ByRef Scalar other); -// aten::less.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor less_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor other); -// aten::less.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor less_outf(@Const @ByRef Tensor self, @Const @ByRef Tensor other, @ByRef Tensor out); +// aten::linalg_solve(Tensor A, Tensor B, *, bool left=True) -> Tensor +@Namespace("at") public static native @ByVal Tensor linalg_solve(@Const @ByRef Tensor A, @Const @ByRef Tensor B, @Cast("bool") boolean left/*=true*/); +@Namespace("at") public static native @ByVal Tensor linalg_solve(@Const @ByRef Tensor A, @Const @ByRef Tensor B); -// aten::less.Tensor(Tensor self, Tensor other) -> Tensor -@Namespace("at") public static native @ByVal Tensor less(@Const @ByRef Tensor self, @Const @ByRef Tensor other); +// aten::linalg_solve.out(Tensor A, Tensor B, *, bool left=True, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor linalg_solve_out(@ByRef Tensor out, @Const @ByRef Tensor A, @Const @ByRef Tensor B, @Cast("bool") boolean left/*=true*/); +@Namespace("at") public static native @ByRef Tensor linalg_solve_out(@ByRef Tensor out, @Const @ByRef Tensor A, @Const @ByRef Tensor B); +// aten::linalg_solve.out(Tensor A, Tensor B, *, bool left=True, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor linalg_solve_outf(@Const @ByRef Tensor A, @Const @ByRef Tensor B, @Cast("bool") boolean left, @ByRef Tensor out); -// Parsed from ATen/ops/less_equal.h +// Parsed from ATen/ops/linalg_solve_ex.h // #pragma once @@ -48961,29 +34657,23 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include - - -// aten::less_equal.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor less_equal_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Scalar other); -// aten::less_equal.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor less_equal_outf(@Const @ByRef Tensor self, @Const @ByRef Scalar other, @ByRef Tensor out); +// #include -// aten::less_equal.Scalar(Tensor self, Scalar other) -> Tensor -@Namespace("at") public static native @ByVal Tensor less_equal(@Const @ByRef Tensor self, @Const @ByRef Scalar other); -// aten::less_equal.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor less_equal_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor other); -// aten::less_equal.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor less_equal_outf(@Const @ByRef Tensor self, @Const @ByRef Tensor other, @ByRef Tensor out); +// aten::linalg_solve_ex(Tensor A, Tensor B, *, bool left=True, bool check_errors=False) -> (Tensor result, Tensor info) +@Namespace("at") public static native @ByVal T_TensorTensor_T linalg_solve_ex(@Const @ByRef Tensor A, @Const @ByRef Tensor B, @Cast("bool") boolean left/*=true*/, @Cast("bool") boolean check_errors/*=false*/); +@Namespace("at") public static native @ByVal T_TensorTensor_T linalg_solve_ex(@Const @ByRef Tensor A, @Const @ByRef Tensor B); -// aten::less_equal.Tensor(Tensor self, Tensor other) -> Tensor -@Namespace("at") public static native @ByVal Tensor less_equal(@Const @ByRef Tensor self, @Const @ByRef Tensor other); +// aten::linalg_solve_ex.out(Tensor A, Tensor B, *, bool left=True, bool check_errors=False, Tensor(a!) result, Tensor(b!) info) -> (Tensor(a!) result, Tensor(b!) info) +@Namespace("at") public static native @ByVal T_TensorTensor_T linalg_solve_ex_out(@ByRef Tensor result, @ByRef Tensor info, @Const @ByRef Tensor A, @Const @ByRef Tensor B, @Cast("bool") boolean left/*=true*/, @Cast("bool") boolean check_errors/*=false*/); +@Namespace("at") public static native @ByVal T_TensorTensor_T linalg_solve_ex_out(@ByRef Tensor result, @ByRef Tensor info, @Const @ByRef Tensor A, @Const @ByRef Tensor B); +// aten::linalg_solve_ex.out(Tensor A, Tensor B, *, bool left=True, bool check_errors=False, Tensor(a!) result, Tensor(b!) info) -> (Tensor(a!) result, Tensor(b!) info) +@Namespace("at") public static native @ByVal T_TensorTensor_T linalg_solve_ex_outf(@Const @ByRef Tensor A, @Const @ByRef Tensor B, @Cast("bool") boolean left, @Cast("bool") boolean check_errors, @ByRef Tensor result, @ByRef Tensor info); -// Parsed from ATen/ops/lgamma.h +// Parsed from ATen/ops/linalg_solve_triangular.h // #pragma once @@ -49004,21 +34694,23 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include +// #include -// aten::lgamma.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor lgamma_out(@ByRef Tensor out, @Const @ByRef Tensor self); -// aten::lgamma.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor lgamma_outf(@Const @ByRef Tensor self, @ByRef Tensor out); +// aten::linalg_solve_triangular.out(Tensor self, Tensor B, *, bool upper, bool left=True, bool unitriangular=False, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor linalg_solve_triangular_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor B, @Cast("bool") boolean upper, @Cast("bool") boolean left/*=true*/, @Cast("bool") boolean unitriangular/*=false*/); +@Namespace("at") public static native @ByRef Tensor linalg_solve_triangular_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor B, @Cast("bool") boolean upper); +// aten::linalg_solve_triangular.out(Tensor self, Tensor B, *, bool upper, bool left=True, bool unitriangular=False, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor linalg_solve_triangular_outf(@Const @ByRef Tensor self, @Const @ByRef Tensor B, @Cast("bool") boolean upper, @Cast("bool") boolean left, @Cast("bool") boolean unitriangular, @ByRef Tensor out); -// aten::lgamma(Tensor self) -> Tensor -@Namespace("at") public static native @ByVal Tensor lgamma(@Const @ByRef Tensor self); +// aten::linalg_solve_triangular(Tensor self, Tensor B, *, bool upper, bool left=True, bool unitriangular=False) -> Tensor +@Namespace("at") public static native @ByVal Tensor linalg_solve_triangular(@Const @ByRef Tensor self, @Const @ByRef Tensor B, @Cast("bool") boolean upper, @Cast("bool") boolean left/*=true*/, @Cast("bool") boolean unitriangular/*=false*/); +@Namespace("at") public static native @ByVal Tensor linalg_solve_triangular(@Const @ByRef Tensor self, @Const @ByRef Tensor B, @Cast("bool") boolean upper); -// Parsed from ATen/ops/lift.h +// Parsed from ATen/ops/linalg_svd.h // #pragma once @@ -49039,21 +34731,23 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include +// #include -// aten::lift(Tensor self) -> Tensor -@Namespace("at") public static native @ByVal Tensor lift(@Const @ByRef Tensor self); +// aten::linalg_svd(Tensor A, bool full_matrices=True, *, str? driver=None) -> (Tensor U, Tensor S, Tensor Vh) +@Namespace("at") public static native @ByVal T_TensorTensorTensor_T linalg_svd(@Const @ByRef Tensor A, @Cast("bool") boolean full_matrices/*=true*/, @ByVal(nullValue = "c10::optional(c10::nullopt)") @Cast("c10::optional*") Pointer driver); +@Namespace("at") public static native @ByVal T_TensorTensorTensor_T linalg_svd(@Const @ByRef Tensor A); -// aten::lift.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor lift_out(@ByRef Tensor out, @Const @ByRef Tensor self); -// aten::lift.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor lift_outf(@Const @ByRef Tensor self, @ByRef Tensor out); +// aten::linalg_svd.U(Tensor A, bool full_matrices=True, *, str? driver=None, Tensor(a!) U, Tensor(b!) S, Tensor(c!) Vh) -> (Tensor(a!) U, Tensor(b!) S, Tensor(c!) Vh) +@Namespace("at") public static native @ByVal T_TensorTensorTensor_T linalg_svd_out(@ByRef Tensor U, @ByRef Tensor S, @ByRef Tensor Vh, @Const @ByRef Tensor A, @Cast("bool") boolean full_matrices/*=true*/, @ByVal(nullValue = "c10::optional(c10::nullopt)") @Cast("c10::optional*") Pointer driver); +@Namespace("at") public static native @ByVal T_TensorTensorTensor_T linalg_svd_out(@ByRef Tensor U, @ByRef Tensor S, @ByRef Tensor Vh, @Const @ByRef Tensor A); +// aten::linalg_svd.U(Tensor A, bool full_matrices=True, *, str? driver=None, Tensor(a!) U, Tensor(b!) S, Tensor(c!) Vh) -> (Tensor(a!) U, Tensor(b!) S, Tensor(c!) Vh) +@Namespace("at") public static native @ByVal T_TensorTensorTensor_T linalg_svd_outf(@Const @ByRef Tensor A, @Cast("bool") boolean full_matrices, @ByVal @Cast("c10::optional*") Pointer driver, @ByRef Tensor U, @ByRef Tensor S, @ByRef Tensor Vh); -// Parsed from ATen/ops/lift_fresh.h +// Parsed from ATen/ops/linalg_svdvals.h // #pragma once @@ -49074,16 +34768,23 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include +// #include -// aten::lift_fresh(Tensor(a) self) -> Tensor(a) -@Namespace("at") public static native @ByVal Tensor lift_fresh(@Const @ByRef Tensor self); +// aten::linalg_svdvals(Tensor A, *, str? driver=None) -> Tensor +@Namespace("at") public static native @ByVal Tensor linalg_svdvals(@Const @ByRef Tensor A, @ByVal(nullValue = "c10::optional(c10::nullopt)") @Cast("c10::optional*") Pointer driver); +@Namespace("at") public static native @ByVal Tensor linalg_svdvals(@Const @ByRef Tensor A); + +// aten::linalg_svdvals.out(Tensor A, *, str? driver=None, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor linalg_svdvals_out(@ByRef Tensor out, @Const @ByRef Tensor A, @ByVal(nullValue = "c10::optional(c10::nullopt)") @Cast("c10::optional*") Pointer driver); +@Namespace("at") public static native @ByRef Tensor linalg_svdvals_out(@ByRef Tensor out, @Const @ByRef Tensor A); +// aten::linalg_svdvals.out(Tensor A, *, str? driver=None, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor linalg_svdvals_outf(@Const @ByRef Tensor A, @ByVal @Cast("c10::optional*") Pointer driver, @ByRef Tensor out); -// Parsed from ATen/ops/lift_fresh_copy.h +// Parsed from ATen/ops/linalg_tensorinv.h // #pragma once @@ -49104,21 +34805,23 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include +// #include -// aten::lift_fresh_copy(Tensor self) -> Tensor -@Namespace("at") public static native @ByVal Tensor lift_fresh_copy(@Const @ByRef Tensor self); +// aten::linalg_tensorinv(Tensor self, int ind=2) -> Tensor +@Namespace("at") public static native @ByVal Tensor linalg_tensorinv(@Const @ByRef Tensor self, @Cast("int64_t") long ind/*=2*/); +@Namespace("at") public static native @ByVal Tensor linalg_tensorinv(@Const @ByRef Tensor self); -// aten::lift_fresh_copy.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor lift_fresh_copy_out(@ByRef Tensor out, @Const @ByRef Tensor self); -// aten::lift_fresh_copy.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor lift_fresh_copy_outf(@Const @ByRef Tensor self, @ByRef Tensor out); +// aten::linalg_tensorinv.out(Tensor self, int ind=2, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor linalg_tensorinv_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Cast("int64_t") long ind/*=2*/); +@Namespace("at") public static native @ByRef Tensor linalg_tensorinv_out(@ByRef Tensor out, @Const @ByRef Tensor self); +// aten::linalg_tensorinv.out(Tensor self, int ind=2, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor linalg_tensorinv_outf(@Const @ByRef Tensor self, @Cast("int64_t") long ind, @ByRef Tensor out); -// Parsed from ATen/ops/linalg_cholesky.h +// Parsed from ATen/ops/linalg_tensorsolve.h // #pragma once @@ -49139,23 +34842,26 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include +// #include -// aten::linalg_cholesky(Tensor self, *, bool upper=False) -> Tensor -@Namespace("at") public static native @ByVal Tensor linalg_cholesky(@Const @ByRef Tensor self, @Cast("bool") boolean upper/*=false*/); -@Namespace("at") public static native @ByVal Tensor linalg_cholesky(@Const @ByRef Tensor self); +// aten::linalg_tensorsolve(Tensor self, Tensor other, int[]? dims=None) -> Tensor +@Namespace("at") public static native @ByVal Tensor linalg_tensorsolve(@Const @ByRef Tensor self, @Const @ByRef Tensor other, @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") LongArrayRefOptional dims); +@Namespace("at") public static native @ByVal Tensor linalg_tensorsolve(@Const @ByRef Tensor self, @Const @ByRef Tensor other); +@Namespace("at") public static native @ByVal Tensor linalg_tensorsolve(@Const @ByRef Tensor self, @Const @ByRef Tensor other, @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... dims); -// aten::linalg_cholesky.out(Tensor self, *, bool upper=False, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor linalg_cholesky_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Cast("bool") boolean upper/*=false*/); -@Namespace("at") public static native @ByRef Tensor linalg_cholesky_out(@ByRef Tensor out, @Const @ByRef Tensor self); -// aten::linalg_cholesky.out(Tensor self, *, bool upper=False, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor linalg_cholesky_outf(@Const @ByRef Tensor self, @Cast("bool") boolean upper, @ByRef Tensor out); +// aten::linalg_tensorsolve.out(Tensor self, Tensor other, int[]? dims=None, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor linalg_tensorsolve_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor other, @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") LongArrayRefOptional dims); +@Namespace("at") public static native @ByRef Tensor linalg_tensorsolve_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor other); +@Namespace("at") public static native @ByRef Tensor linalg_tensorsolve_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor other, @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... dims); +// aten::linalg_tensorsolve.out(Tensor self, Tensor other, int[]? dims=None, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor linalg_tensorsolve_outf(@Const @ByRef Tensor self, @Const @ByRef Tensor other, @ByVal LongArrayRefOptional dims, @ByRef Tensor out); +@Namespace("at") public static native @ByRef Tensor linalg_tensorsolve_outf(@Const @ByRef Tensor self, @Const @ByRef Tensor other, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dims, @ByRef Tensor out); -// Parsed from ATen/ops/linalg_cholesky_ex.h +// Parsed from ATen/ops/linalg_vander.h // #pragma once @@ -49176,23 +34882,17 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include - +// #include -// aten::linalg_cholesky_ex(Tensor self, *, bool upper=False, bool check_errors=False) -> (Tensor L, Tensor info) -@Namespace("at") public static native @ByVal TensorTensorTuple linalg_cholesky_ex(@Const @ByRef Tensor self, @Cast("bool") boolean upper/*=false*/, @Cast("bool") boolean check_errors/*=false*/); -@Namespace("at") public static native @ByVal TensorTensorTuple linalg_cholesky_ex(@Const @ByRef Tensor self); -// aten::linalg_cholesky_ex.L(Tensor self, *, bool upper=False, bool check_errors=False, Tensor(a!) L, Tensor(b!) info) -> (Tensor(a!) L, Tensor(b!) info) -@Namespace("at") public static native @ByVal @Cast("std::tuple*") PointerPointer linalg_cholesky_ex_out(@ByRef Tensor L, @ByRef Tensor info, @Const @ByRef Tensor self, @Cast("bool") boolean upper/*=false*/, @Cast("bool") boolean check_errors/*=false*/); -@Namespace("at") public static native @ByVal @Cast("std::tuple*") PointerPointer linalg_cholesky_ex_out(@ByRef Tensor L, @ByRef Tensor info, @Const @ByRef Tensor self); -// aten::linalg_cholesky_ex.L(Tensor self, *, bool upper=False, bool check_errors=False, Tensor(a!) L, Tensor(b!) info) -> (Tensor(a!) L, Tensor(b!) info) -@Namespace("at") public static native @ByVal @Cast("std::tuple*") PointerPointer linalg_cholesky_ex_outf(@Const @ByRef Tensor self, @Cast("bool") boolean upper, @Cast("bool") boolean check_errors, @ByRef Tensor L, @ByRef Tensor info); +// aten::linalg_vander(Tensor x, *, int? N=None) -> Tensor +@Namespace("at") public static native @ByVal Tensor linalg_vander(@Const @ByRef Tensor x, @ByVal(nullValue = "c10::optional(c10::nullopt)") LongOptional N); +@Namespace("at") public static native @ByVal Tensor linalg_vander(@Const @ByRef Tensor x); -// Parsed from ATen/ops/linalg_cond.h +// Parsed from ATen/ops/linalg_vecdot.h // #pragma once @@ -49213,31 +34913,23 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include - - -// aten::linalg_cond(Tensor self, Scalar? p=None) -> Tensor -@Namespace("at") public static native @ByVal Tensor linalg_cond(@Const @ByRef Tensor self, @Const @ByRef(nullValue = "c10::optional(c10::nullopt)") ScalarOptional p); -@Namespace("at") public static native @ByVal Tensor linalg_cond(@Const @ByRef Tensor self); +// #include -// aten::linalg_cond.out(Tensor self, Scalar? p=None, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor linalg_cond_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef(nullValue = "c10::optional(c10::nullopt)") ScalarOptional p); -@Namespace("at") public static native @ByRef Tensor linalg_cond_out(@ByRef Tensor out, @Const @ByRef Tensor self); -// aten::linalg_cond.out(Tensor self, Scalar? p=None, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor linalg_cond_outf(@Const @ByRef Tensor self, @Const @ByRef ScalarOptional p, @ByRef Tensor out); -// aten::linalg_cond.p_str(Tensor self, str p) -> Tensor -@Namespace("at") public static native @ByVal Tensor linalg_cond(@Const @ByRef Tensor self, @ByVal @Cast("c10::string_view*") Pointer p); +// aten::linalg_vecdot(Tensor x, Tensor y, *, int dim=-1) -> Tensor +@Namespace("at") public static native @ByVal Tensor linalg_vecdot(@Const @ByRef Tensor x, @Const @ByRef Tensor y, @Cast("int64_t") long dim/*=-1*/); +@Namespace("at") public static native @ByVal Tensor linalg_vecdot(@Const @ByRef Tensor x, @Const @ByRef Tensor y); -// aten::linalg_cond.p_str_out(Tensor self, str p, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor linalg_cond_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal @Cast("c10::string_view*") Pointer p); -// aten::linalg_cond.p_str_out(Tensor self, str p, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor linalg_cond_outf(@Const @ByRef Tensor self, @ByVal @Cast("c10::string_view*") Pointer p, @ByRef Tensor out); +// aten::linalg_vecdot.out(Tensor x, Tensor y, *, int dim=-1, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor linalg_vecdot_out(@ByRef Tensor out, @Const @ByRef Tensor x, @Const @ByRef Tensor y, @Cast("int64_t") long dim/*=-1*/); +@Namespace("at") public static native @ByRef Tensor linalg_vecdot_out(@ByRef Tensor out, @Const @ByRef Tensor x, @Const @ByRef Tensor y); +// aten::linalg_vecdot.out(Tensor x, Tensor y, *, int dim=-1, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor linalg_vecdot_outf(@Const @ByRef Tensor x, @Const @ByRef Tensor y, @Cast("int64_t") long dim, @ByRef Tensor out); -// Parsed from ATen/ops/linalg_cross.h +// Parsed from ATen/ops/linalg_vector_norm.h // #pragma once @@ -49258,23 +34950,26 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include +// #include -// aten::linalg_cross(Tensor self, Tensor other, *, int dim=-1) -> Tensor -@Namespace("at") public static native @ByVal Tensor linalg_cross(@Const @ByRef Tensor self, @Const @ByRef Tensor other, @Cast("int64_t") long dim/*=-1*/); -@Namespace("at") public static native @ByVal Tensor linalg_cross(@Const @ByRef Tensor self, @Const @ByRef Tensor other); +// aten::linalg_vector_norm(Tensor self, Scalar ord=2, int[1]? dim=None, bool keepdim=False, *, ScalarType? dtype=None) -> Tensor +@Namespace("at") public static native @ByVal Tensor linalg_vector_norm(@Const @ByRef Tensor self, @Const @ByRef(nullValue = "at::Scalar(2)") Scalar ord, @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") LongArrayRefOptional dim, @Cast("bool") boolean keepdim/*=false*/, @ByVal(nullValue = "c10::optional(c10::nullopt)") ScalarTypeOptional dtype); +@Namespace("at") public static native @ByVal Tensor linalg_vector_norm(@Const @ByRef Tensor self); +@Namespace("at") public static native @ByVal Tensor linalg_vector_norm(@Const @ByRef Tensor self, @Const @ByRef(nullValue = "at::Scalar(2)") Scalar ord, @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dim, @Cast("bool") boolean keepdim/*=false*/, @ByVal(nullValue = "c10::optional(c10::nullopt)") ScalarTypeOptional dtype); -// aten::linalg_cross.out(Tensor self, Tensor other, *, int dim=-1, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor linalg_cross_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor other, @Cast("int64_t") long dim/*=-1*/); -@Namespace("at") public static native @ByRef Tensor linalg_cross_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor other); -// aten::linalg_cross.out(Tensor self, Tensor other, *, int dim=-1, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor linalg_cross_outf(@Const @ByRef Tensor self, @Const @ByRef Tensor other, @Cast("int64_t") long dim, @ByRef Tensor out); +// aten::linalg_vector_norm.out(Tensor self, Scalar ord=2, int[1]? dim=None, bool keepdim=False, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor linalg_vector_norm_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef(nullValue = "at::Scalar(2)") Scalar ord, @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") LongArrayRefOptional dim, @Cast("bool") boolean keepdim/*=false*/, @ByVal(nullValue = "c10::optional(c10::nullopt)") ScalarTypeOptional dtype); +@Namespace("at") public static native @ByRef Tensor linalg_vector_norm_out(@ByRef Tensor out, @Const @ByRef Tensor self); +@Namespace("at") public static native @ByRef Tensor linalg_vector_norm_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef(nullValue = "at::Scalar(2)") Scalar ord, @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dim, @Cast("bool") boolean keepdim/*=false*/, @ByVal(nullValue = "c10::optional(c10::nullopt)") ScalarTypeOptional dtype); +// aten::linalg_vector_norm.out(Tensor self, Scalar ord=2, int[1]? dim=None, bool keepdim=False, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor linalg_vector_norm_outf(@Const @ByRef Tensor self, @Const @ByRef Scalar ord, @ByVal LongArrayRefOptional dim, @Cast("bool") boolean keepdim, @ByVal ScalarTypeOptional dtype, @ByRef Tensor out); +@Namespace("at") public static native @ByRef Tensor linalg_vector_norm_outf(@Const @ByRef Tensor self, @Const @ByRef Scalar ord, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dim, @Cast("bool") boolean keepdim, @ByVal ScalarTypeOptional dtype, @ByRef Tensor out); -// Parsed from ATen/ops/linalg_det.h +// Parsed from ATen/ops/linear.h // #pragma once @@ -49295,21 +34990,23 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include +// #include -// aten::linalg_det(Tensor A) -> Tensor -@Namespace("at") public static native @ByVal Tensor linalg_det(@Const @ByRef Tensor A); +// aten::linear(Tensor input, Tensor weight, Tensor? bias=None) -> Tensor +@Namespace("at") public static native @ByVal Tensor linear(@Const @ByRef Tensor input, @Const @ByRef Tensor weight, @Const @ByRef(nullValue = "c10::optional{}") TensorOptional bias); +@Namespace("at") public static native @ByVal Tensor linear(@Const @ByRef Tensor input, @Const @ByRef Tensor weight); -// aten::linalg_det.out(Tensor A, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor linalg_det_out(@ByRef Tensor out, @Const @ByRef Tensor A); -// aten::linalg_det.out(Tensor A, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor linalg_det_outf(@Const @ByRef Tensor A, @ByRef Tensor out); +// aten::linear.out(Tensor input, Tensor weight, Tensor? bias=None, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor linear_out(@ByRef Tensor out, @Const @ByRef Tensor input, @Const @ByRef Tensor weight, @Const @ByRef(nullValue = "c10::optional{}") TensorOptional bias); +@Namespace("at") public static native @ByRef Tensor linear_out(@ByRef Tensor out, @Const @ByRef Tensor input, @Const @ByRef Tensor weight); +// aten::linear.out(Tensor input, Tensor weight, Tensor? bias=None, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor linear_outf(@Const @ByRef Tensor input, @Const @ByRef Tensor weight, @Const @ByRef TensorOptional bias, @ByRef Tensor out); -// Parsed from ATen/ops/linalg_diagonal.h +// Parsed from ATen/ops/linear_backward.h // #pragma once @@ -49330,17 +35027,21 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include +// #include -// aten::linalg_diagonal(Tensor(a) A, *, int offset=0, int dim1=-2, int dim2=-1) -> Tensor(a) -@Namespace("at") public static native @ByVal Tensor linalg_diagonal(@Const @ByRef Tensor A, @Cast("int64_t") long offset/*=0*/, @Cast("int64_t") long dim1/*=-2*/, @Cast("int64_t") long dim2/*=-1*/); -@Namespace("at") public static native @ByVal Tensor linalg_diagonal(@Const @ByRef Tensor A); +// aten::linear_backward(Tensor self, Tensor grad_output, Tensor weight, bool[3] output_mask) -> (Tensor, Tensor, Tensor) +@Namespace("at") public static native @ByVal T_TensorTensorTensor_T linear_backward(@Const @ByRef Tensor self, @Const @ByRef Tensor grad_output, @Const @ByRef Tensor weight, @ByVal @Cast("std::array*") BoolPointer output_mask); +// aten::linear_backward.out(Tensor self, Tensor grad_output, Tensor weight, bool[3] output_mask, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2) -> (Tensor(a!), Tensor(b!), Tensor(c!)) +@Namespace("at") public static native @ByVal T_TensorTensorTensor_T linear_backward_out(@ByRef Tensor out0, @ByRef Tensor out1, @ByRef Tensor out2, @Const @ByRef Tensor self, @Const @ByRef Tensor grad_output, @Const @ByRef Tensor weight, @ByVal @Cast("std::array*") BoolPointer output_mask); +// aten::linear_backward.out(Tensor self, Tensor grad_output, Tensor weight, bool[3] output_mask, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2) -> (Tensor(a!), Tensor(b!), Tensor(c!)) +@Namespace("at") public static native @ByVal T_TensorTensorTensor_T linear_backward_outf(@Const @ByRef Tensor self, @Const @ByRef Tensor grad_output, @Const @ByRef Tensor weight, @ByVal @Cast("std::array*") BoolPointer output_mask, @ByRef Tensor out0, @ByRef Tensor out1, @ByRef Tensor out2); -// Parsed from ATen/ops/linalg_eig.h + +// Parsed from ATen/ops/linspace.h // #pragma once @@ -49361,21 +35062,24 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include +// #include -// aten::linalg_eig(Tensor self) -> (Tensor eigenvalues, Tensor eigenvectors) -@Namespace("at") public static native @ByVal TensorTensorTuple linalg_eig(@Const @ByRef Tensor self); +// aten::linspace(Scalar start, Scalar end, int steps, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor +@Namespace("at") public static native @ByVal Tensor linspace(@Const @ByRef Scalar start, @Const @ByRef Scalar end, @Cast("int64_t") long steps, @ByVal(nullValue = "at::TensorOptions{}") TensorOptions options); +@Namespace("at") public static native @ByVal Tensor linspace(@Const @ByRef Scalar start, @Const @ByRef Scalar end, @Cast("int64_t") long steps); +// aten::linspace(Scalar start, Scalar end, int steps, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor +@Namespace("at") public static native @ByVal Tensor linspace(@Const @ByRef Scalar start, @Const @ByRef Scalar end, @Cast("int64_t") long steps, @ByVal ScalarTypeOptional dtype, @ByVal LayoutOptional layout, @ByVal DeviceOptional device, @ByVal BoolOptional pin_memory); -// aten::linalg_eig.out(Tensor self, *, Tensor(a!) eigenvalues, Tensor(b!) eigenvectors) -> (Tensor(a!) eigenvalues, Tensor(b!) eigenvectors) -@Namespace("at") public static native @ByVal @Cast("std::tuple*") PointerPointer linalg_eig_out(@ByRef Tensor eigenvalues, @ByRef Tensor eigenvectors, @Const @ByRef Tensor self); -// aten::linalg_eig.out(Tensor self, *, Tensor(a!) eigenvalues, Tensor(b!) eigenvectors) -> (Tensor(a!) eigenvalues, Tensor(b!) eigenvectors) -@Namespace("at") public static native @ByVal @Cast("std::tuple*") PointerPointer linalg_eig_outf(@Const @ByRef Tensor self, @ByRef Tensor eigenvalues, @ByRef Tensor eigenvectors); +// aten::linspace.out(Scalar start, Scalar end, int steps, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor linspace_out(@ByRef Tensor out, @Const @ByRef Scalar start, @Const @ByRef Scalar end, @Cast("int64_t") long steps); +// aten::linspace.out(Scalar start, Scalar end, int steps, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor linspace_outf(@Const @ByRef Scalar start, @Const @ByRef Scalar end, @Cast("int64_t") long steps, @ByRef Tensor out); -// Parsed from ATen/ops/linalg_eigh.h +// Parsed from ATen/ops/log.h // #pragma once @@ -49396,23 +35100,24 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include +// #include -// aten::linalg_eigh(Tensor self, str UPLO="L") -> (Tensor eigenvalues, Tensor eigenvectors) -@Namespace("at") public static native @ByVal TensorTensorTuple linalg_eigh(@Const @ByRef Tensor self, @ByVal(nullValue = "c10::string_view(\"L\")") @Cast("c10::string_view*") Pointer UPLO); -@Namespace("at") public static native @ByVal TensorTensorTuple linalg_eigh(@Const @ByRef Tensor self); +// aten::log(Tensor self) -> Tensor +@Namespace("at") public static native @ByVal Tensor log(@Const @ByRef Tensor self); -// aten::linalg_eigh.eigvals(Tensor self, str UPLO="L", *, Tensor(a!) eigvals, Tensor(b!) eigvecs) -> (Tensor(a!) eigenvalues, Tensor(b!) eigenvectors) -@Namespace("at") public static native @ByVal @Cast("std::tuple*") PointerPointer linalg_eigh_out(@ByRef Tensor eigvals, @ByRef Tensor eigvecs, @Const @ByRef Tensor self, @ByVal(nullValue = "c10::string_view(\"L\")") @Cast("c10::string_view*") Pointer UPLO); -@Namespace("at") public static native @ByVal @Cast("std::tuple*") PointerPointer linalg_eigh_out(@ByRef Tensor eigvals, @ByRef Tensor eigvecs, @Const @ByRef Tensor self); -// aten::linalg_eigh.eigvals(Tensor self, str UPLO="L", *, Tensor(a!) eigvals, Tensor(b!) eigvecs) -> (Tensor(a!) eigenvalues, Tensor(b!) eigenvectors) -@Namespace("at") public static native @ByVal @Cast("std::tuple*") PointerPointer linalg_eigh_outf(@Const @ByRef Tensor self, @ByVal @Cast("c10::string_view*") Pointer UPLO, @ByRef Tensor eigvals, @ByRef Tensor eigvecs); +// aten::log_(Tensor(a!) self) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor log_(@ByRef Tensor self); + +// aten::log.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor log_out(@ByRef Tensor out, @Const @ByRef Tensor self); +// aten::log.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor log_outf(@Const @ByRef Tensor self, @ByRef Tensor out); -// Parsed from ATen/ops/linalg_eigvals.h +// Parsed from ATen/ops/log10.h // #pragma once @@ -49433,21 +35138,24 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include +// #include -// aten::linalg_eigvals(Tensor self) -> Tensor -@Namespace("at") public static native @ByVal Tensor linalg_eigvals(@Const @ByRef Tensor self); +// aten::log10(Tensor self) -> Tensor +@Namespace("at") public static native @ByVal Tensor log10(@Const @ByRef Tensor self); -// aten::linalg_eigvals.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor linalg_eigvals_out(@ByRef Tensor out, @Const @ByRef Tensor self); -// aten::linalg_eigvals.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor linalg_eigvals_outf(@Const @ByRef Tensor self, @ByRef Tensor out); +// aten::log10_(Tensor(a!) self) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor log10_(@ByRef Tensor self); + +// aten::log10.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor log10_out(@ByRef Tensor out, @Const @ByRef Tensor self); +// aten::log10.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor log10_outf(@Const @ByRef Tensor self, @ByRef Tensor out); -// Parsed from ATen/ops/linalg_eigvalsh.h +// Parsed from ATen/ops/log1p.h // #pragma once @@ -49468,23 +35176,24 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include +// #include -// aten::linalg_eigvalsh(Tensor self, str UPLO="L") -> Tensor -@Namespace("at") public static native @ByVal Tensor linalg_eigvalsh(@Const @ByRef Tensor self, @ByVal(nullValue = "c10::string_view(\"L\")") @Cast("c10::string_view*") Pointer UPLO); -@Namespace("at") public static native @ByVal Tensor linalg_eigvalsh(@Const @ByRef Tensor self); +// aten::log1p(Tensor self) -> Tensor +@Namespace("at") public static native @ByVal Tensor log1p(@Const @ByRef Tensor self); -// aten::linalg_eigvalsh.out(Tensor self, str UPLO="L", *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor linalg_eigvalsh_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal(nullValue = "c10::string_view(\"L\")") @Cast("c10::string_view*") Pointer UPLO); -@Namespace("at") public static native @ByRef Tensor linalg_eigvalsh_out(@ByRef Tensor out, @Const @ByRef Tensor self); -// aten::linalg_eigvalsh.out(Tensor self, str UPLO="L", *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor linalg_eigvalsh_outf(@Const @ByRef Tensor self, @ByVal @Cast("c10::string_view*") Pointer UPLO, @ByRef Tensor out); +// aten::log1p_(Tensor(a!) self) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor log1p_(@ByRef Tensor self); + +// aten::log1p.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor log1p_out(@ByRef Tensor out, @Const @ByRef Tensor self); +// aten::log1p.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor log1p_outf(@Const @ByRef Tensor self, @ByRef Tensor out); -// Parsed from ATen/ops/linalg_householder_product.h +// Parsed from ATen/ops/log2.h // #pragma once @@ -49505,21 +35214,24 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include +// #include -// aten::linalg_householder_product(Tensor input, Tensor tau) -> Tensor -@Namespace("at") public static native @ByVal Tensor linalg_householder_product(@Const @ByRef Tensor input, @Const @ByRef Tensor tau); +// aten::log2(Tensor self) -> Tensor +@Namespace("at") public static native @ByVal Tensor log2(@Const @ByRef Tensor self); -// aten::linalg_householder_product.out(Tensor input, Tensor tau, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor linalg_householder_product_out(@ByRef Tensor out, @Const @ByRef Tensor input, @Const @ByRef Tensor tau); -// aten::linalg_householder_product.out(Tensor input, Tensor tau, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor linalg_householder_product_outf(@Const @ByRef Tensor input, @Const @ByRef Tensor tau, @ByRef Tensor out); +// aten::log2_(Tensor(a!) self) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor log2_(@ByRef Tensor self); + +// aten::log2.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor log2_out(@ByRef Tensor out, @Const @ByRef Tensor self); +// aten::log2.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor log2_outf(@Const @ByRef Tensor self, @ByRef Tensor out); -// Parsed from ATen/ops/linalg_inv.h +// Parsed from ATen/ops/log_normal.h // #pragma once @@ -49540,21 +35252,23 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include +// #include -// aten::linalg_inv(Tensor A) -> Tensor -@Namespace("at") public static native @ByVal Tensor linalg_inv(@Const @ByRef Tensor A); +// aten::log_normal.out(Tensor self, float mean=1, float std=2, *, Generator? generator=None, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor log_normal_out(@ByRef Tensor out, @Const @ByRef Tensor self, double mean/*=1*/, double std/*=2*/, @ByVal(nullValue = "c10::optional(c10::nullopt)") GeneratorOptional generator); +@Namespace("at") public static native @ByRef Tensor log_normal_out(@ByRef Tensor out, @Const @ByRef Tensor self); +// aten::log_normal.out(Tensor self, float mean=1, float std=2, *, Generator? generator=None, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor log_normal_outf(@Const @ByRef Tensor self, double mean, double std, @ByVal GeneratorOptional generator, @ByRef Tensor out); -// aten::linalg_inv.out(Tensor A, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor linalg_inv_out(@ByRef Tensor out, @Const @ByRef Tensor A); -// aten::linalg_inv.out(Tensor A, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor linalg_inv_outf(@Const @ByRef Tensor A, @ByRef Tensor out); +// aten::log_normal(Tensor self, float mean=1, float std=2, *, Generator? generator=None) -> Tensor +@Namespace("at") public static native @ByVal Tensor log_normal(@Const @ByRef Tensor self, double mean/*=1*/, double std/*=2*/, @ByVal(nullValue = "c10::optional(c10::nullopt)") GeneratorOptional generator); +@Namespace("at") public static native @ByVal Tensor log_normal(@Const @ByRef Tensor self); -// Parsed from ATen/ops/linalg_inv_ex.h +// Parsed from ATen/ops/log_sigmoid.h // #pragma once @@ -49575,23 +35289,21 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include +// #include -// aten::linalg_inv_ex(Tensor A, *, bool check_errors=False) -> (Tensor inverse, Tensor info) -@Namespace("at") public static native @ByVal TensorTensorTuple linalg_inv_ex(@Const @ByRef Tensor A, @Cast("bool") boolean check_errors/*=false*/); -@Namespace("at") public static native @ByVal TensorTensorTuple linalg_inv_ex(@Const @ByRef Tensor A); +// aten::log_sigmoid.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor log_sigmoid_out(@ByRef Tensor out, @Const @ByRef Tensor self); +// aten::log_sigmoid.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor log_sigmoid_outf(@Const @ByRef Tensor self, @ByRef Tensor out); -// aten::linalg_inv_ex.inverse(Tensor A, *, bool check_errors=False, Tensor(a!) inverse, Tensor(b!) info) -> (Tensor(a!) inverse, Tensor(b!) info) -@Namespace("at") public static native @ByVal @Cast("std::tuple*") PointerPointer linalg_inv_ex_out(@ByRef Tensor inverse, @ByRef Tensor info, @Const @ByRef Tensor A, @Cast("bool") boolean check_errors/*=false*/); -@Namespace("at") public static native @ByVal @Cast("std::tuple*") PointerPointer linalg_inv_ex_out(@ByRef Tensor inverse, @ByRef Tensor info, @Const @ByRef Tensor A); -// aten::linalg_inv_ex.inverse(Tensor A, *, bool check_errors=False, Tensor(a!) inverse, Tensor(b!) info) -> (Tensor(a!) inverse, Tensor(b!) info) -@Namespace("at") public static native @ByVal @Cast("std::tuple*") PointerPointer linalg_inv_ex_outf(@Const @ByRef Tensor A, @Cast("bool") boolean check_errors, @ByRef Tensor inverse, @ByRef Tensor info); +// aten::log_sigmoid(Tensor self) -> Tensor +@Namespace("at") public static native @ByVal Tensor log_sigmoid(@Const @ByRef Tensor self); -// Parsed from ATen/ops/linalg_ldl_factor.h +// Parsed from ATen/ops/log_sigmoid_backward.h // #pragma once @@ -49612,23 +35324,21 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include +// #include -// aten::linalg_ldl_factor(Tensor self, *, bool hermitian=False) -> (Tensor LD, Tensor pivots) -@Namespace("at") public static native @ByVal TensorTensorTuple linalg_ldl_factor(@Const @ByRef Tensor self, @Cast("bool") boolean hermitian/*=false*/); -@Namespace("at") public static native @ByVal TensorTensorTuple linalg_ldl_factor(@Const @ByRef Tensor self); +// aten::log_sigmoid_backward.grad_input(Tensor grad_output, Tensor self, Tensor buffer, *, Tensor(a!) grad_input) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor log_sigmoid_backward_out(@ByRef Tensor grad_input, @Const @ByRef Tensor grad_output, @Const @ByRef Tensor self, @Const @ByRef Tensor buffer); +// aten::log_sigmoid_backward.grad_input(Tensor grad_output, Tensor self, Tensor buffer, *, Tensor(a!) grad_input) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor log_sigmoid_backward_outf(@Const @ByRef Tensor grad_output, @Const @ByRef Tensor self, @Const @ByRef Tensor buffer, @ByRef Tensor grad_input); -// aten::linalg_ldl_factor.out(Tensor self, *, bool hermitian=False, Tensor(a!) LD, Tensor(b!) pivots) -> (Tensor(a!) LD, Tensor(b!) pivots) -@Namespace("at") public static native @ByVal @Cast("std::tuple*") PointerPointer linalg_ldl_factor_out(@ByRef Tensor LD, @ByRef Tensor pivots, @Const @ByRef Tensor self, @Cast("bool") boolean hermitian/*=false*/); -@Namespace("at") public static native @ByVal @Cast("std::tuple*") PointerPointer linalg_ldl_factor_out(@ByRef Tensor LD, @ByRef Tensor pivots, @Const @ByRef Tensor self); -// aten::linalg_ldl_factor.out(Tensor self, *, bool hermitian=False, Tensor(a!) LD, Tensor(b!) pivots) -> (Tensor(a!) LD, Tensor(b!) pivots) -@Namespace("at") public static native @ByVal @Cast("std::tuple*") PointerPointer linalg_ldl_factor_outf(@Const @ByRef Tensor self, @Cast("bool") boolean hermitian, @ByRef Tensor LD, @ByRef Tensor pivots); +// aten::log_sigmoid_backward(Tensor grad_output, Tensor self, Tensor buffer) -> Tensor +@Namespace("at") public static native @ByVal Tensor log_sigmoid_backward(@Const @ByRef Tensor grad_output, @Const @ByRef Tensor self, @Const @ByRef Tensor buffer); -// Parsed from ATen/ops/linalg_ldl_factor_ex.h +// Parsed from ATen/ops/log_sigmoid_forward.h // #pragma once @@ -49649,23 +35359,21 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include +// #include -// aten::linalg_ldl_factor_ex(Tensor self, *, bool hermitian=False, bool check_errors=False) -> (Tensor LD, Tensor pivots, Tensor info) -@Namespace("at") public static native @ByVal TensorTensorTensorTuple linalg_ldl_factor_ex(@Const @ByRef Tensor self, @Cast("bool") boolean hermitian/*=false*/, @Cast("bool") boolean check_errors/*=false*/); -@Namespace("at") public static native @ByVal TensorTensorTensorTuple linalg_ldl_factor_ex(@Const @ByRef Tensor self); +// aten::log_sigmoid_forward.output(Tensor self, *, Tensor(a!) output, Tensor(b!) buffer) -> (Tensor(a!), Tensor(b!)) +@Namespace("at") public static native @ByVal T_TensorTensor_T log_sigmoid_forward_out(@ByRef Tensor output, @ByRef Tensor buffer, @Const @ByRef Tensor self); +// aten::log_sigmoid_forward.output(Tensor self, *, Tensor(a!) output, Tensor(b!) buffer) -> (Tensor(a!), Tensor(b!)) +@Namespace("at") public static native @ByVal T_TensorTensor_T log_sigmoid_forward_outf(@Const @ByRef Tensor self, @ByRef Tensor output, @ByRef Tensor buffer); -// aten::linalg_ldl_factor_ex.out(Tensor self, *, bool hermitian=False, bool check_errors=False, Tensor(a!) LD, Tensor(b!) pivots, Tensor(c!) info) -> (Tensor(a!) LD, Tensor(b!) pivots, Tensor(c!) info) -@Namespace("at") public static native @ByVal @Cast("std::tuple*") PointerPointer linalg_ldl_factor_ex_out(@ByRef Tensor LD, @ByRef Tensor pivots, @ByRef Tensor info, @Const @ByRef Tensor self, @Cast("bool") boolean hermitian/*=false*/, @Cast("bool") boolean check_errors/*=false*/); -@Namespace("at") public static native @ByVal @Cast("std::tuple*") PointerPointer linalg_ldl_factor_ex_out(@ByRef Tensor LD, @ByRef Tensor pivots, @ByRef Tensor info, @Const @ByRef Tensor self); -// aten::linalg_ldl_factor_ex.out(Tensor self, *, bool hermitian=False, bool check_errors=False, Tensor(a!) LD, Tensor(b!) pivots, Tensor(c!) info) -> (Tensor(a!) LD, Tensor(b!) pivots, Tensor(c!) info) -@Namespace("at") public static native @ByVal @Cast("std::tuple*") PointerPointer linalg_ldl_factor_ex_outf(@Const @ByRef Tensor self, @Cast("bool") boolean hermitian, @Cast("bool") boolean check_errors, @ByRef Tensor LD, @ByRef Tensor pivots, @ByRef Tensor info); +// aten::log_sigmoid_forward(Tensor self) -> (Tensor output, Tensor buffer) +@Namespace("at") public static native @ByVal T_TensorTensor_T log_sigmoid_forward(@Const @ByRef Tensor self); -// Parsed from ATen/ops/linalg_ldl_solve.h +// Parsed from ATen/ops/log_softmax.h // #pragma once @@ -49686,23 +35394,27 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include +// #include -// aten::linalg_ldl_solve(Tensor LD, Tensor pivots, Tensor B, *, bool hermitian=False) -> Tensor -@Namespace("at") public static native @ByVal Tensor linalg_ldl_solve(@Const @ByRef Tensor LD, @Const @ByRef Tensor pivots, @Const @ByRef Tensor B, @Cast("bool") boolean hermitian/*=false*/); -@Namespace("at") public static native @ByVal Tensor linalg_ldl_solve(@Const @ByRef Tensor LD, @Const @ByRef Tensor pivots, @Const @ByRef Tensor B); +// aten::log_softmax.int(Tensor self, int dim, ScalarType? dtype=None) -> Tensor +@Namespace("at") public static native @ByVal Tensor log_softmax(@Const @ByRef Tensor self, @Cast("int64_t") long dim, @ByVal(nullValue = "c10::optional(c10::nullopt)") ScalarTypeOptional dtype); +@Namespace("at") public static native @ByVal Tensor log_softmax(@Const @ByRef Tensor self, @Cast("int64_t") long dim); -// aten::linalg_ldl_solve.out(Tensor LD, Tensor pivots, Tensor B, *, bool hermitian=False, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor linalg_ldl_solve_out(@ByRef Tensor out, @Const @ByRef Tensor LD, @Const @ByRef Tensor pivots, @Const @ByRef Tensor B, @Cast("bool") boolean hermitian/*=false*/); -@Namespace("at") public static native @ByRef Tensor linalg_ldl_solve_out(@ByRef Tensor out, @Const @ByRef Tensor LD, @Const @ByRef Tensor pivots, @Const @ByRef Tensor B); -// aten::linalg_ldl_solve.out(Tensor LD, Tensor pivots, Tensor B, *, bool hermitian=False, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor linalg_ldl_solve_outf(@Const @ByRef Tensor LD, @Const @ByRef Tensor pivots, @Const @ByRef Tensor B, @Cast("bool") boolean hermitian, @ByRef Tensor out); +// aten::log_softmax.int_out(Tensor self, int dim, ScalarType? dtype=None, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor log_softmax_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Cast("int64_t") long dim, @ByVal(nullValue = "c10::optional(c10::nullopt)") ScalarTypeOptional dtype); +@Namespace("at") public static native @ByRef Tensor log_softmax_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Cast("int64_t") long dim); +// aten::log_softmax.int_out(Tensor self, int dim, ScalarType? dtype=None, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor log_softmax_outf(@Const @ByRef Tensor self, @Cast("int64_t") long dim, @ByVal ScalarTypeOptional dtype, @ByRef Tensor out); +// aten::log_softmax.Dimname(Tensor self, Dimname dim, *, ScalarType? dtype=None) -> Tensor +@Namespace("at") public static native @ByVal Tensor log_softmax(@Const @ByRef Tensor self, @ByVal Dimname dim, @ByVal(nullValue = "c10::optional(c10::nullopt)") ScalarTypeOptional dtype); +@Namespace("at") public static native @ByVal Tensor log_softmax(@Const @ByRef Tensor self, @ByVal Dimname dim); -// Parsed from ATen/ops/linalg_lstsq.h + +// Parsed from ATen/ops/logaddexp.h // #pragma once @@ -49723,23 +35435,21 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include +// #include -// aten::linalg_lstsq(Tensor self, Tensor b, float? rcond=None, *, str? driver=None) -> (Tensor solution, Tensor residuals, Tensor rank, Tensor singular_values) -@Namespace("at") public static native @ByVal TensorTensorTensorTensorTuple linalg_lstsq(@Const @ByRef Tensor self, @Const @ByRef Tensor b, @ByVal(nullValue = "c10::optional(c10::nullopt)") DoubleOptional rcond, @ByVal(nullValue = "c10::optional(c10::nullopt)") @Cast("c10::optional*") Pointer driver); -@Namespace("at") public static native @ByVal TensorTensorTensorTensorTuple linalg_lstsq(@Const @ByRef Tensor self, @Const @ByRef Tensor b); +// aten::logaddexp.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor logaddexp_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor other); +// aten::logaddexp.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor logaddexp_outf(@Const @ByRef Tensor self, @Const @ByRef Tensor other, @ByRef Tensor out); -// aten::linalg_lstsq.out(Tensor self, Tensor b, float? rcond=None, *, str? driver=None, Tensor(a!) solution, Tensor(b!) residuals, Tensor(c!) rank, Tensor(d!) singular_values) -> (Tensor(a!) solution, Tensor(b!) residuals, Tensor(c!) rank, Tensor(d!) singular_values) -@Namespace("at") public static native @ByVal @Cast("std::tuple*") PointerPointer linalg_lstsq_out(@ByRef Tensor solution, @ByRef Tensor residuals, @ByRef Tensor rank, @ByRef Tensor singular_values, @Const @ByRef Tensor self, @Const @ByRef Tensor b, @ByVal(nullValue = "c10::optional(c10::nullopt)") DoubleOptional rcond, @ByVal(nullValue = "c10::optional(c10::nullopt)") @Cast("c10::optional*") Pointer driver); -@Namespace("at") public static native @ByVal @Cast("std::tuple*") PointerPointer linalg_lstsq_out(@ByRef Tensor solution, @ByRef Tensor residuals, @ByRef Tensor rank, @ByRef Tensor singular_values, @Const @ByRef Tensor self, @Const @ByRef Tensor b); -// aten::linalg_lstsq.out(Tensor self, Tensor b, float? rcond=None, *, str? driver=None, Tensor(a!) solution, Tensor(b!) residuals, Tensor(c!) rank, Tensor(d!) singular_values) -> (Tensor(a!) solution, Tensor(b!) residuals, Tensor(c!) rank, Tensor(d!) singular_values) -@Namespace("at") public static native @ByVal @Cast("std::tuple*") PointerPointer linalg_lstsq_outf(@Const @ByRef Tensor self, @Const @ByRef Tensor b, @ByVal DoubleOptional rcond, @ByVal @Cast("c10::optional*") Pointer driver, @ByRef Tensor solution, @ByRef Tensor residuals, @ByRef Tensor rank, @ByRef Tensor singular_values); +// aten::logaddexp(Tensor self, Tensor other) -> Tensor +@Namespace("at") public static native @ByVal Tensor logaddexp(@Const @ByRef Tensor self, @Const @ByRef Tensor other); -// Parsed from ATen/ops/linalg_lu.h +// Parsed from ATen/ops/logaddexp2.h // #pragma once @@ -49760,23 +35470,21 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include +// #include -// aten::linalg_lu(Tensor A, *, bool pivot=True) -> (Tensor P, Tensor L, Tensor U) -@Namespace("at") public static native @ByVal TensorTensorTensorTuple linalg_lu(@Const @ByRef Tensor A, @Cast("bool") boolean pivot/*=true*/); -@Namespace("at") public static native @ByVal TensorTensorTensorTuple linalg_lu(@Const @ByRef Tensor A); +// aten::logaddexp2.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor logaddexp2_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor other); +// aten::logaddexp2.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor logaddexp2_outf(@Const @ByRef Tensor self, @Const @ByRef Tensor other, @ByRef Tensor out); -// aten::linalg_lu.out(Tensor A, *, bool pivot=True, Tensor(a!) P, Tensor(b!) L, Tensor(c!) U) -> (Tensor(a!) P, Tensor(b!) L, Tensor(c!) U) -@Namespace("at") public static native @ByVal @Cast("std::tuple*") PointerPointer linalg_lu_out(@ByRef Tensor P, @ByRef Tensor L, @ByRef Tensor U, @Const @ByRef Tensor A, @Cast("bool") boolean pivot/*=true*/); -@Namespace("at") public static native @ByVal @Cast("std::tuple*") PointerPointer linalg_lu_out(@ByRef Tensor P, @ByRef Tensor L, @ByRef Tensor U, @Const @ByRef Tensor A); -// aten::linalg_lu.out(Tensor A, *, bool pivot=True, Tensor(a!) P, Tensor(b!) L, Tensor(c!) U) -> (Tensor(a!) P, Tensor(b!) L, Tensor(c!) U) -@Namespace("at") public static native @ByVal @Cast("std::tuple*") PointerPointer linalg_lu_outf(@Const @ByRef Tensor A, @Cast("bool") boolean pivot, @ByRef Tensor P, @ByRef Tensor L, @ByRef Tensor U); +// aten::logaddexp2(Tensor self, Tensor other) -> Tensor +@Namespace("at") public static native @ByVal Tensor logaddexp2(@Const @ByRef Tensor self, @Const @ByRef Tensor other); -// Parsed from ATen/ops/linalg_lu_factor.h +// Parsed from ATen/ops/logcumsumexp.h // #pragma once @@ -49797,23 +35505,29 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include +// #include -// aten::linalg_lu_factor(Tensor A, *, bool pivot=True) -> (Tensor LU, Tensor pivots) -@Namespace("at") public static native @ByVal TensorTensorTuple linalg_lu_factor(@Const @ByRef Tensor A, @Cast("bool") boolean pivot/*=true*/); -@Namespace("at") public static native @ByVal TensorTensorTuple linalg_lu_factor(@Const @ByRef Tensor A); +// aten::logcumsumexp(Tensor self, int dim) -> Tensor +@Namespace("at") public static native @ByVal Tensor logcumsumexp(@Const @ByRef Tensor self, @Cast("int64_t") long dim); -// aten::linalg_lu_factor.out(Tensor A, *, bool pivot=True, Tensor(a!) LU, Tensor(b!) pivots) -> (Tensor(a!) LU, Tensor(b!) pivots) -@Namespace("at") public static native @ByVal @Cast("std::tuple*") PointerPointer linalg_lu_factor_out(@ByRef Tensor LU, @ByRef Tensor pivots, @Const @ByRef Tensor A, @Cast("bool") boolean pivot/*=true*/); -@Namespace("at") public static native @ByVal @Cast("std::tuple*") PointerPointer linalg_lu_factor_out(@ByRef Tensor LU, @ByRef Tensor pivots, @Const @ByRef Tensor A); -// aten::linalg_lu_factor.out(Tensor A, *, bool pivot=True, Tensor(a!) LU, Tensor(b!) pivots) -> (Tensor(a!) LU, Tensor(b!) pivots) -@Namespace("at") public static native @ByVal @Cast("std::tuple*") PointerPointer linalg_lu_factor_outf(@Const @ByRef Tensor A, @Cast("bool") boolean pivot, @ByRef Tensor LU, @ByRef Tensor pivots); +// aten::logcumsumexp.out(Tensor self, int dim, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor logcumsumexp_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Cast("int64_t") long dim); +// aten::logcumsumexp.out(Tensor self, int dim, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor logcumsumexp_outf(@Const @ByRef Tensor self, @Cast("int64_t") long dim, @ByRef Tensor out); + +// aten::logcumsumexp.dimname(Tensor self, Dimname dim) -> Tensor +@Namespace("at") public static native @ByVal Tensor logcumsumexp(@Const @ByRef Tensor self, @ByVal Dimname dim); + +// aten::logcumsumexp.dimname_out(Tensor self, Dimname dim, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor logcumsumexp_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal Dimname dim); +// aten::logcumsumexp.dimname_out(Tensor self, Dimname dim, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor logcumsumexp_outf(@Const @ByRef Tensor self, @ByVal Dimname dim, @ByRef Tensor out); -// Parsed from ATen/ops/linalg_lu_factor_ex.h +// Parsed from ATen/ops/logdet.h // #pragma once @@ -49834,23 +35548,16 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include - +// #include -// aten::linalg_lu_factor_ex(Tensor A, *, bool pivot=True, bool check_errors=False) -> (Tensor LU, Tensor pivots, Tensor info) -@Namespace("at") public static native @ByVal TensorTensorTensorTuple linalg_lu_factor_ex(@Const @ByRef Tensor A, @Cast("bool") boolean pivot/*=true*/, @Cast("bool") boolean check_errors/*=false*/); -@Namespace("at") public static native @ByVal TensorTensorTensorTuple linalg_lu_factor_ex(@Const @ByRef Tensor A); -// aten::linalg_lu_factor_ex.out(Tensor A, *, bool pivot=True, bool check_errors=False, Tensor(a!) LU, Tensor(b!) pivots, Tensor(c!) info) -> (Tensor(a!) LU, Tensor(b!) pivots, Tensor(c!) info) -@Namespace("at") public static native @ByVal @Cast("std::tuple*") PointerPointer linalg_lu_factor_ex_out(@ByRef Tensor LU, @ByRef Tensor pivots, @ByRef Tensor info, @Const @ByRef Tensor A, @Cast("bool") boolean pivot/*=true*/, @Cast("bool") boolean check_errors/*=false*/); -@Namespace("at") public static native @ByVal @Cast("std::tuple*") PointerPointer linalg_lu_factor_ex_out(@ByRef Tensor LU, @ByRef Tensor pivots, @ByRef Tensor info, @Const @ByRef Tensor A); -// aten::linalg_lu_factor_ex.out(Tensor A, *, bool pivot=True, bool check_errors=False, Tensor(a!) LU, Tensor(b!) pivots, Tensor(c!) info) -> (Tensor(a!) LU, Tensor(b!) pivots, Tensor(c!) info) -@Namespace("at") public static native @ByVal @Cast("std::tuple*") PointerPointer linalg_lu_factor_ex_outf(@Const @ByRef Tensor A, @Cast("bool") boolean pivot, @Cast("bool") boolean check_errors, @ByRef Tensor LU, @ByRef Tensor pivots, @ByRef Tensor info); +// aten::logdet(Tensor self) -> Tensor +@Namespace("at") public static native @ByVal Tensor logdet(@Const @ByRef Tensor self); -// Parsed from ATen/ops/linalg_lu_solve.h +// Parsed from ATen/ops/logical_and.h // #pragma once @@ -49871,23 +35578,21 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include +// #include -// aten::linalg_lu_solve(Tensor LU, Tensor pivots, Tensor B, *, bool left=True, bool adjoint=False) -> Tensor -@Namespace("at") public static native @ByVal Tensor linalg_lu_solve(@Const @ByRef Tensor LU, @Const @ByRef Tensor pivots, @Const @ByRef Tensor B, @Cast("bool") boolean left/*=true*/, @Cast("bool") boolean adjoint/*=false*/); -@Namespace("at") public static native @ByVal Tensor linalg_lu_solve(@Const @ByRef Tensor LU, @Const @ByRef Tensor pivots, @Const @ByRef Tensor B); +// aten::logical_and(Tensor self, Tensor other) -> Tensor +@Namespace("at") public static native @ByVal Tensor logical_and(@Const @ByRef Tensor self, @Const @ByRef Tensor other); -// aten::linalg_lu_solve.out(Tensor LU, Tensor pivots, Tensor B, *, bool left=True, bool adjoint=False, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor linalg_lu_solve_out(@ByRef Tensor out, @Const @ByRef Tensor LU, @Const @ByRef Tensor pivots, @Const @ByRef Tensor B, @Cast("bool") boolean left/*=true*/, @Cast("bool") boolean adjoint/*=false*/); -@Namespace("at") public static native @ByRef Tensor linalg_lu_solve_out(@ByRef Tensor out, @Const @ByRef Tensor LU, @Const @ByRef Tensor pivots, @Const @ByRef Tensor B); -// aten::linalg_lu_solve.out(Tensor LU, Tensor pivots, Tensor B, *, bool left=True, bool adjoint=False, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor linalg_lu_solve_outf(@Const @ByRef Tensor LU, @Const @ByRef Tensor pivots, @Const @ByRef Tensor B, @Cast("bool") boolean left, @Cast("bool") boolean adjoint, @ByRef Tensor out); +// aten::logical_and.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor logical_and_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor other); +// aten::logical_and.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor logical_and_outf(@Const @ByRef Tensor self, @Const @ByRef Tensor other, @ByRef Tensor out); -// Parsed from ATen/ops/linalg_matmul.h +// Parsed from ATen/ops/logical_not.h // #pragma once @@ -49908,21 +35613,21 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include +// #include -// aten::linalg_matmul(Tensor self, Tensor other) -> Tensor -@Namespace("at") public static native @ByVal Tensor linalg_matmul(@Const @ByRef Tensor self, @Const @ByRef Tensor other); +// aten::logical_not(Tensor self) -> Tensor +@Namespace("at") public static native @ByVal Tensor logical_not(@Const @ByRef Tensor self); -// aten::linalg_matmul.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor linalg_matmul_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor other); -// aten::linalg_matmul.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor linalg_matmul_outf(@Const @ByRef Tensor self, @Const @ByRef Tensor other, @ByRef Tensor out); +// aten::logical_not.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor logical_not_out(@ByRef Tensor out, @Const @ByRef Tensor self); +// aten::logical_not.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor logical_not_outf(@Const @ByRef Tensor self, @ByRef Tensor out); -// Parsed from ATen/ops/linalg_matrix_exp.h +// Parsed from ATen/ops/logical_or.h // #pragma once @@ -49943,21 +35648,21 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include +// #include -// aten::linalg_matrix_exp(Tensor self) -> Tensor -@Namespace("at") public static native @ByVal Tensor linalg_matrix_exp(@Const @ByRef Tensor self); +// aten::logical_or(Tensor self, Tensor other) -> Tensor +@Namespace("at") public static native @ByVal Tensor logical_or(@Const @ByRef Tensor self, @Const @ByRef Tensor other); -// aten::linalg_matrix_exp.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor linalg_matrix_exp_out(@ByRef Tensor out, @Const @ByRef Tensor self); -// aten::linalg_matrix_exp.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor linalg_matrix_exp_outf(@Const @ByRef Tensor self, @ByRef Tensor out); +// aten::logical_or.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor logical_or_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor other); +// aten::logical_or.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor logical_or_outf(@Const @ByRef Tensor self, @Const @ByRef Tensor other, @ByRef Tensor out); -// Parsed from ATen/ops/linalg_matrix_norm.h +// Parsed from ATen/ops/logical_xor.h // #pragma once @@ -49978,39 +35683,21 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include - - -// aten::linalg_matrix_norm(Tensor self, Scalar ord, int[] dim=[-2,-1], bool keepdim=False, *, ScalarType? dtype=None) -> Tensor -@Namespace("at") public static native @ByVal Tensor linalg_matrix_norm(@Const @ByRef Tensor self, @Const @ByRef Scalar ord, @ByVal(nullValue = "at::IntArrayRef({-2,-1})") @Cast("c10::ArrayRef*") LongArrayRef dim, @Cast("bool") boolean keepdim/*=false*/, @ByVal(nullValue = "c10::optional(c10::nullopt)") ScalarTypeOptional dtype); -@Namespace("at") public static native @ByVal Tensor linalg_matrix_norm(@Const @ByRef Tensor self, @Const @ByRef Scalar ord); -@Namespace("at") public static native @ByVal Tensor linalg_matrix_norm(@Const @ByRef Tensor self, @Const @ByRef Scalar ord, @ByVal(nullValue = "at::IntArrayRef({-2,-1})") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dim, @Cast("bool") boolean keepdim/*=false*/, @ByVal(nullValue = "c10::optional(c10::nullopt)") ScalarTypeOptional dtype); +// #include -// aten::linalg_matrix_norm.out(Tensor self, Scalar ord, int[] dim=[-2,-1], bool keepdim=False, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor linalg_matrix_norm_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Scalar ord, @ByVal(nullValue = "at::IntArrayRef({-2,-1})") @Cast("c10::ArrayRef*") LongArrayRef dim, @Cast("bool") boolean keepdim/*=false*/, @ByVal(nullValue = "c10::optional(c10::nullopt)") ScalarTypeOptional dtype); -@Namespace("at") public static native @ByRef Tensor linalg_matrix_norm_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Scalar ord); -@Namespace("at") public static native @ByRef Tensor linalg_matrix_norm_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Scalar ord, @ByVal(nullValue = "at::IntArrayRef({-2,-1})") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dim, @Cast("bool") boolean keepdim/*=false*/, @ByVal(nullValue = "c10::optional(c10::nullopt)") ScalarTypeOptional dtype); -// aten::linalg_matrix_norm.out(Tensor self, Scalar ord, int[] dim=[-2,-1], bool keepdim=False, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor linalg_matrix_norm_outf(@Const @ByRef Tensor self, @Const @ByRef Scalar ord, @ByVal @Cast("c10::ArrayRef*") LongArrayRef dim, @Cast("bool") boolean keepdim, @ByVal ScalarTypeOptional dtype, @ByRef Tensor out); -@Namespace("at") public static native @ByRef Tensor linalg_matrix_norm_outf(@Const @ByRef Tensor self, @Const @ByRef Scalar ord, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dim, @Cast("bool") boolean keepdim, @ByVal ScalarTypeOptional dtype, @ByRef Tensor out); -// aten::linalg_matrix_norm.str_ord(Tensor self, str ord='fro', int[] dim=[-2,-1], bool keepdim=False, *, ScalarType? dtype=None) -> Tensor -@Namespace("at") public static native @ByVal Tensor linalg_matrix_norm(@Const @ByRef Tensor self, @ByVal(nullValue = "c10::string_view(\"fro\")") @Cast("c10::string_view*") Pointer ord, @ByVal(nullValue = "at::IntArrayRef({-2,-1})") @Cast("c10::ArrayRef*") LongArrayRef dim, @Cast("bool") boolean keepdim/*=false*/, @ByVal(nullValue = "c10::optional(c10::nullopt)") ScalarTypeOptional dtype); -@Namespace("at") public static native @ByVal Tensor linalg_matrix_norm(@Const @ByRef Tensor self); -@Namespace("at") public static native @ByVal Tensor linalg_matrix_norm(@Const @ByRef Tensor self, @ByVal(nullValue = "c10::string_view(\"fro\")") @Cast("c10::string_view*") Pointer ord, @ByVal(nullValue = "at::IntArrayRef({-2,-1})") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dim, @Cast("bool") boolean keepdim/*=false*/, @ByVal(nullValue = "c10::optional(c10::nullopt)") ScalarTypeOptional dtype); +// aten::logical_xor(Tensor self, Tensor other) -> Tensor +@Namespace("at") public static native @ByVal Tensor logical_xor(@Const @ByRef Tensor self, @Const @ByRef Tensor other); -// aten::linalg_matrix_norm.str_ord_out(Tensor self, str ord='fro', int[] dim=[-2,-1], bool keepdim=False, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor linalg_matrix_norm_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal(nullValue = "c10::string_view(\"fro\")") @Cast("c10::string_view*") Pointer ord, @ByVal(nullValue = "at::IntArrayRef({-2,-1})") @Cast("c10::ArrayRef*") LongArrayRef dim, @Cast("bool") boolean keepdim/*=false*/, @ByVal(nullValue = "c10::optional(c10::nullopt)") ScalarTypeOptional dtype); -@Namespace("at") public static native @ByRef Tensor linalg_matrix_norm_out(@ByRef Tensor out, @Const @ByRef Tensor self); -@Namespace("at") public static native @ByRef Tensor linalg_matrix_norm_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal(nullValue = "c10::string_view(\"fro\")") @Cast("c10::string_view*") Pointer ord, @ByVal(nullValue = "at::IntArrayRef({-2,-1})") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dim, @Cast("bool") boolean keepdim/*=false*/, @ByVal(nullValue = "c10::optional(c10::nullopt)") ScalarTypeOptional dtype); -// aten::linalg_matrix_norm.str_ord_out(Tensor self, str ord='fro', int[] dim=[-2,-1], bool keepdim=False, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor linalg_matrix_norm_outf(@Const @ByRef Tensor self, @ByVal @Cast("c10::string_view*") Pointer ord, @ByVal @Cast("c10::ArrayRef*") LongArrayRef dim, @Cast("bool") boolean keepdim, @ByVal ScalarTypeOptional dtype, @ByRef Tensor out); -@Namespace("at") public static native @ByRef Tensor linalg_matrix_norm_outf(@Const @ByRef Tensor self, @ByVal @Cast("c10::string_view*") Pointer ord, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dim, @Cast("bool") boolean keepdim, @ByVal ScalarTypeOptional dtype, @ByRef Tensor out); +// aten::logical_xor.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor logical_xor_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor other); +// aten::logical_xor.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor logical_xor_outf(@Const @ByRef Tensor self, @Const @ByRef Tensor other, @ByRef Tensor out); -// Parsed from ATen/ops/linalg_matrix_power.h +// Parsed from ATen/ops/logit.h // #pragma once @@ -50031,21 +35718,27 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include +// #include -// aten::linalg_matrix_power(Tensor self, int n) -> Tensor -@Namespace("at") public static native @ByVal Tensor linalg_matrix_power(@Const @ByRef Tensor self, @Cast("int64_t") long n); +// aten::logit(Tensor self, float? eps=None) -> Tensor +@Namespace("at") public static native @ByVal Tensor logit(@Const @ByRef Tensor self, @ByVal(nullValue = "c10::optional(c10::nullopt)") DoubleOptional eps); +@Namespace("at") public static native @ByVal Tensor logit(@Const @ByRef Tensor self); -// aten::linalg_matrix_power.out(Tensor self, int n, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor linalg_matrix_power_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Cast("int64_t") long n); -// aten::linalg_matrix_power.out(Tensor self, int n, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor linalg_matrix_power_outf(@Const @ByRef Tensor self, @Cast("int64_t") long n, @ByRef Tensor out); +// aten::logit_(Tensor(a!) self, float? eps=None) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor logit_(@ByRef Tensor self, @ByVal(nullValue = "c10::optional(c10::nullopt)") DoubleOptional eps); +@Namespace("at") public static native @ByRef Tensor logit_(@ByRef Tensor self); + +// aten::logit.out(Tensor self, float? eps=None, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor logit_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal(nullValue = "c10::optional(c10::nullopt)") DoubleOptional eps); +@Namespace("at") public static native @ByRef Tensor logit_out(@ByRef Tensor out, @Const @ByRef Tensor self); +// aten::logit.out(Tensor self, float? eps=None, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor logit_outf(@Const @ByRef Tensor self, @ByVal DoubleOptional eps, @ByRef Tensor out); -// Parsed from ATen/ops/linalg_matrix_rank.h +// Parsed from ATen/ops/logit_backward.h // #pragma once @@ -50066,53 +35759,23 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include - - -// aten::linalg_matrix_rank.atol_rtol_tensor(Tensor input, *, Tensor? atol=None, Tensor? rtol=None, bool hermitian=False) -> Tensor -@Namespace("at") public static native @ByVal Tensor linalg_matrix_rank(@Const @ByRef Tensor input, @Const @ByRef(nullValue = "c10::optional{}") TensorOptional atol, @Const @ByRef(nullValue = "c10::optional{}") TensorOptional rtol, @Cast("bool") boolean hermitian/*=false*/); -@Namespace("at") public static native @ByVal Tensor linalg_matrix_rank(@Const @ByRef Tensor input); - -// aten::linalg_matrix_rank.atol_rtol_tensor_out(Tensor input, *, Tensor? atol=None, Tensor? rtol=None, bool hermitian=False, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor linalg_matrix_rank_out(@ByRef Tensor out, @Const @ByRef Tensor input, @Const @ByRef(nullValue = "c10::optional{}") TensorOptional atol, @Const @ByRef(nullValue = "c10::optional{}") TensorOptional rtol, @Cast("bool") boolean hermitian/*=false*/); -@Namespace("at") public static native @ByRef Tensor linalg_matrix_rank_out(@ByRef Tensor out, @Const @ByRef Tensor input); -// aten::linalg_matrix_rank.atol_rtol_tensor_out(Tensor input, *, Tensor? atol=None, Tensor? rtol=None, bool hermitian=False, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor linalg_matrix_rank_outf(@Const @ByRef Tensor input, @Const @ByRef TensorOptional atol, @Const @ByRef TensorOptional rtol, @Cast("bool") boolean hermitian, @ByRef Tensor out); - -// aten::linalg_matrix_rank.atol_rtol_float(Tensor self, *, float? atol=None, float? rtol=None, bool hermitian=False) -> Tensor -@Namespace("at") public static native @ByVal Tensor linalg_matrix_rank(@Const @ByRef Tensor self, @ByVal DoubleOptional atol, @ByVal DoubleOptional rtol, @Cast("bool") boolean hermitian/*=false*/); -@Namespace("at") public static native @ByVal Tensor linalg_matrix_rank(@Const @ByRef Tensor self, @ByVal DoubleOptional atol, @ByVal DoubleOptional rtol); - -// aten::linalg_matrix_rank.atol_rtol_float_out(Tensor self, *, float? atol=None, float? rtol=None, bool hermitian=False, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor linalg_matrix_rank_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal DoubleOptional atol, @ByVal DoubleOptional rtol, @Cast("bool") boolean hermitian/*=false*/); -@Namespace("at") public static native @ByRef Tensor linalg_matrix_rank_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal DoubleOptional atol, @ByVal DoubleOptional rtol); -// aten::linalg_matrix_rank.atol_rtol_float_out(Tensor self, *, float? atol=None, float? rtol=None, bool hermitian=False, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor linalg_matrix_rank_outf(@Const @ByRef Tensor self, @ByVal DoubleOptional atol, @ByVal DoubleOptional rtol, @Cast("bool") boolean hermitian, @ByRef Tensor out); - -// aten::linalg_matrix_rank(Tensor self, float tol, bool hermitian=False) -> Tensor -@Namespace("at") public static native @ByVal Tensor linalg_matrix_rank(@Const @ByRef Tensor self, double tol, @Cast("bool") boolean hermitian/*=false*/); -@Namespace("at") public static native @ByVal Tensor linalg_matrix_rank(@Const @ByRef Tensor self, double tol); +// #include -// aten::linalg_matrix_rank.out(Tensor self, float tol, bool hermitian=False, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor linalg_matrix_rank_out(@ByRef Tensor out, @Const @ByRef Tensor self, double tol, @Cast("bool") boolean hermitian/*=false*/); -@Namespace("at") public static native @ByRef Tensor linalg_matrix_rank_out(@ByRef Tensor out, @Const @ByRef Tensor self, double tol); -// aten::linalg_matrix_rank.out(Tensor self, float tol, bool hermitian=False, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor linalg_matrix_rank_outf(@Const @ByRef Tensor self, double tol, @Cast("bool") boolean hermitian, @ByRef Tensor out); -// aten::linalg_matrix_rank.tol_tensor(Tensor input, Tensor tol, bool hermitian=False) -> Tensor -@Namespace("at") public static native @ByVal Tensor linalg_matrix_rank(@Const @ByRef Tensor input, @Const @ByRef Tensor tol, @Cast("bool") boolean hermitian/*=false*/); -@Namespace("at") public static native @ByVal Tensor linalg_matrix_rank(@Const @ByRef Tensor input, @Const @ByRef Tensor tol); +// aten::logit_backward.grad_input(Tensor grad_output, Tensor self, float? eps=None, *, Tensor(a!) grad_input) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor logit_backward_out(@ByRef Tensor grad_input, @Const @ByRef Tensor grad_output, @Const @ByRef Tensor self, @ByVal(nullValue = "c10::optional(c10::nullopt)") DoubleOptional eps); +@Namespace("at") public static native @ByRef Tensor logit_backward_out(@ByRef Tensor grad_input, @Const @ByRef Tensor grad_output, @Const @ByRef Tensor self); +// aten::logit_backward.grad_input(Tensor grad_output, Tensor self, float? eps=None, *, Tensor(a!) grad_input) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor logit_backward_outf(@Const @ByRef Tensor grad_output, @Const @ByRef Tensor self, @ByVal DoubleOptional eps, @ByRef Tensor grad_input); -// aten::linalg_matrix_rank.out_tol_tensor(Tensor input, Tensor tol, bool hermitian=False, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor linalg_matrix_rank_out(@ByRef Tensor out, @Const @ByRef Tensor input, @Const @ByRef Tensor tol, @Cast("bool") boolean hermitian/*=false*/); -@Namespace("at") public static native @ByRef Tensor linalg_matrix_rank_out(@ByRef Tensor out, @Const @ByRef Tensor input, @Const @ByRef Tensor tol); -// aten::linalg_matrix_rank.out_tol_tensor(Tensor input, Tensor tol, bool hermitian=False, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor linalg_matrix_rank_outf(@Const @ByRef Tensor input, @Const @ByRef Tensor tol, @Cast("bool") boolean hermitian, @ByRef Tensor out); +// aten::logit_backward(Tensor grad_output, Tensor self, float? eps=None) -> Tensor +@Namespace("at") public static native @ByVal Tensor logit_backward(@Const @ByRef Tensor grad_output, @Const @ByRef Tensor self, @ByVal(nullValue = "c10::optional(c10::nullopt)") DoubleOptional eps); +@Namespace("at") public static native @ByVal Tensor logit_backward(@Const @ByRef Tensor grad_output, @Const @ByRef Tensor self); -// Parsed from ATen/ops/linalg_multi_dot.h +// Parsed from ATen/ops/logspace.h // #pragma once @@ -50133,21 +35796,25 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include +// #include -// aten::linalg_multi_dot(Tensor[] tensors) -> Tensor -@Namespace("at") public static native @ByVal Tensor linalg_multi_dot(@ByVal TensorArrayRef tensors); +// aten::logspace(Scalar start, Scalar end, int steps, float base=10.0, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor +@Namespace("at") public static native @ByVal Tensor logspace(@Const @ByRef Scalar start, @Const @ByRef Scalar end, @Cast("int64_t") long steps, double base/*=10.0*/, @ByVal(nullValue = "at::TensorOptions{}") TensorOptions options); +@Namespace("at") public static native @ByVal Tensor logspace(@Const @ByRef Scalar start, @Const @ByRef Scalar end, @Cast("int64_t") long steps); +// aten::logspace(Scalar start, Scalar end, int steps, float base=10.0, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor +@Namespace("at") public static native @ByVal Tensor logspace(@Const @ByRef Scalar start, @Const @ByRef Scalar end, @Cast("int64_t") long steps, double base, @ByVal ScalarTypeOptional dtype, @ByVal LayoutOptional layout, @ByVal DeviceOptional device, @ByVal BoolOptional pin_memory); -// aten::linalg_multi_dot.out(Tensor[] tensors, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor linalg_multi_dot_out(@ByRef Tensor out, @ByVal TensorArrayRef tensors); -// aten::linalg_multi_dot.out(Tensor[] tensors, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor linalg_multi_dot_outf(@ByVal TensorArrayRef tensors, @ByRef Tensor out); +// aten::logspace.out(Scalar start, Scalar end, int steps, float base=10.0, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor logspace_out(@ByRef Tensor out, @Const @ByRef Scalar start, @Const @ByRef Scalar end, @Cast("int64_t") long steps, double base/*=10.0*/); +@Namespace("at") public static native @ByRef Tensor logspace_out(@ByRef Tensor out, @Const @ByRef Scalar start, @Const @ByRef Scalar end, @Cast("int64_t") long steps); +// aten::logspace.out(Scalar start, Scalar end, int steps, float base=10.0, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor logspace_outf(@Const @ByRef Scalar start, @Const @ByRef Scalar end, @Cast("int64_t") long steps, double base, @ByRef Tensor out); -// Parsed from ATen/ops/linalg_norm.h +// Parsed from ATen/ops/logsumexp.h // #pragma once @@ -50168,39 +35835,38 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include +// #include -// aten::linalg_norm(Tensor self, Scalar? ord=None, int[1]? dim=None, bool keepdim=False, *, ScalarType? dtype=None) -> Tensor -@Namespace("at") public static native @ByVal Tensor linalg_norm(@Const @ByRef Tensor self, @Const @ByRef(nullValue = "c10::optional(c10::nullopt)") ScalarOptional ord, @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") LongArrayRefOptional dim, @Cast("bool") boolean keepdim/*=false*/, @ByVal(nullValue = "c10::optional(c10::nullopt)") ScalarTypeOptional dtype); -@Namespace("at") public static native @ByVal Tensor linalg_norm(@Const @ByRef Tensor self); -@Namespace("at") public static native @ByVal Tensor linalg_norm(@Const @ByRef Tensor self, @Const @ByRef(nullValue = "c10::optional(c10::nullopt)") ScalarOptional ord, @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dim, @Cast("bool") boolean keepdim/*=false*/, @ByVal(nullValue = "c10::optional(c10::nullopt)") ScalarTypeOptional dtype); +// aten::logsumexp(Tensor self, int[1] dim, bool keepdim=False) -> Tensor +@Namespace("at") public static native @ByVal Tensor logsumexp(@Const @ByRef Tensor self, @ByVal LongArrayRef dim, @Cast("bool") boolean keepdim/*=false*/); +@Namespace("at") public static native @ByVal Tensor logsumexp(@Const @ByRef Tensor self, @ByVal LongArrayRef dim); +@Namespace("at") public static native @ByVal Tensor logsumexp(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dim, @Cast("bool") boolean keepdim/*=false*/); +@Namespace("at") public static native @ByVal Tensor logsumexp(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... dim); -// aten::linalg_norm.ord_str(Tensor self, str ord, int[1]? dim=None, bool keepdim=False, *, ScalarType? dtype=None) -> Tensor -@Namespace("at") public static native @ByVal Tensor linalg_norm(@Const @ByRef Tensor self, @ByVal @Cast("c10::string_view*") Pointer ord, @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") LongArrayRefOptional dim, @Cast("bool") boolean keepdim/*=false*/, @ByVal(nullValue = "c10::optional(c10::nullopt)") ScalarTypeOptional dtype); -@Namespace("at") public static native @ByVal Tensor linalg_norm(@Const @ByRef Tensor self, @ByVal @Cast("c10::string_view*") Pointer ord); -@Namespace("at") public static native @ByVal Tensor linalg_norm(@Const @ByRef Tensor self, @ByVal @Cast("c10::string_view*") Pointer ord, @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dim, @Cast("bool") boolean keepdim/*=false*/, @ByVal(nullValue = "c10::optional(c10::nullopt)") ScalarTypeOptional dtype); +// aten::logsumexp.out(Tensor self, int[1] dim, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor logsumexp_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal LongArrayRef dim, @Cast("bool") boolean keepdim/*=false*/); +@Namespace("at") public static native @ByRef Tensor logsumexp_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal LongArrayRef dim); +@Namespace("at") public static native @ByRef Tensor logsumexp_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dim, @Cast("bool") boolean keepdim/*=false*/); +@Namespace("at") public static native @ByRef Tensor logsumexp_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... dim); +// aten::logsumexp.out(Tensor self, int[1] dim, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor logsumexp_outf(@Const @ByRef Tensor self, @ByVal LongArrayRef dim, @Cast("bool") boolean keepdim, @ByRef Tensor out); +@Namespace("at") public static native @ByRef Tensor logsumexp_outf(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dim, @Cast("bool") boolean keepdim, @ByRef Tensor out); -// aten::linalg_norm.out(Tensor self, Scalar? ord=None, int[1]? dim=None, bool keepdim=False, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor linalg_norm_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef(nullValue = "c10::optional(c10::nullopt)") ScalarOptional ord, @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") LongArrayRefOptional dim, @Cast("bool") boolean keepdim/*=false*/, @ByVal(nullValue = "c10::optional(c10::nullopt)") ScalarTypeOptional dtype); -@Namespace("at") public static native @ByRef Tensor linalg_norm_out(@ByRef Tensor out, @Const @ByRef Tensor self); -@Namespace("at") public static native @ByRef Tensor linalg_norm_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef(nullValue = "c10::optional(c10::nullopt)") ScalarOptional ord, @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dim, @Cast("bool") boolean keepdim/*=false*/, @ByVal(nullValue = "c10::optional(c10::nullopt)") ScalarTypeOptional dtype); -// aten::linalg_norm.out(Tensor self, Scalar? ord=None, int[1]? dim=None, bool keepdim=False, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor linalg_norm_outf(@Const @ByRef Tensor self, @Const @ByRef ScalarOptional ord, @ByVal LongArrayRefOptional dim, @Cast("bool") boolean keepdim, @ByVal ScalarTypeOptional dtype, @ByRef Tensor out); -@Namespace("at") public static native @ByRef Tensor linalg_norm_outf(@Const @ByRef Tensor self, @Const @ByRef ScalarOptional ord, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dim, @Cast("bool") boolean keepdim, @ByVal ScalarTypeOptional dtype, @ByRef Tensor out); +// aten::logsumexp.names(Tensor self, Dimname[1] dim, bool keepdim=False) -> Tensor +@Namespace("at") public static native @ByVal Tensor logsumexp(@Const @ByRef Tensor self, @ByVal DimnameArrayRef dim, @Cast("bool") boolean keepdim/*=false*/); +@Namespace("at") public static native @ByVal Tensor logsumexp(@Const @ByRef Tensor self, @ByVal DimnameArrayRef dim); -// aten::linalg_norm.ord_str_out(Tensor self, str ord, int[1]? dim=None, bool keepdim=False, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor linalg_norm_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal @Cast("c10::string_view*") Pointer ord, @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") LongArrayRefOptional dim, @Cast("bool") boolean keepdim/*=false*/, @ByVal(nullValue = "c10::optional(c10::nullopt)") ScalarTypeOptional dtype); -@Namespace("at") public static native @ByRef Tensor linalg_norm_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal @Cast("c10::string_view*") Pointer ord); -@Namespace("at") public static native @ByRef Tensor linalg_norm_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal @Cast("c10::string_view*") Pointer ord, @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dim, @Cast("bool") boolean keepdim/*=false*/, @ByVal(nullValue = "c10::optional(c10::nullopt)") ScalarTypeOptional dtype); -// aten::linalg_norm.ord_str_out(Tensor self, str ord, int[1]? dim=None, bool keepdim=False, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor linalg_norm_outf(@Const @ByRef Tensor self, @ByVal @Cast("c10::string_view*") Pointer ord, @ByVal LongArrayRefOptional dim, @Cast("bool") boolean keepdim, @ByVal ScalarTypeOptional dtype, @ByRef Tensor out); -@Namespace("at") public static native @ByRef Tensor linalg_norm_outf(@Const @ByRef Tensor self, @ByVal @Cast("c10::string_view*") Pointer ord, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dim, @Cast("bool") boolean keepdim, @ByVal ScalarTypeOptional dtype, @ByRef Tensor out); +// aten::logsumexp.names_out(Tensor self, Dimname[1] dim, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor logsumexp_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal DimnameArrayRef dim, @Cast("bool") boolean keepdim/*=false*/); +@Namespace("at") public static native @ByRef Tensor logsumexp_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal DimnameArrayRef dim); +// aten::logsumexp.names_out(Tensor self, Dimname[1] dim, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor logsumexp_outf(@Const @ByRef Tensor self, @ByVal DimnameArrayRef dim, @Cast("bool") boolean keepdim, @ByRef Tensor out); -// Parsed from ATen/ops/linalg_pinv.h +// Parsed from ATen/ops/lshift.h // #pragma once @@ -50221,53 +35887,29 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include - - -// aten::linalg_pinv.atol_rtol_tensor(Tensor self, *, Tensor? atol=None, Tensor? rtol=None, bool hermitian=False) -> Tensor -@Namespace("at") public static native @ByVal Tensor linalg_pinv(@Const @ByRef Tensor self, @Const @ByRef(nullValue = "c10::optional{}") TensorOptional atol, @Const @ByRef(nullValue = "c10::optional{}") TensorOptional rtol, @Cast("bool") boolean hermitian/*=false*/); -@Namespace("at") public static native @ByVal Tensor linalg_pinv(@Const @ByRef Tensor self); - -// aten::linalg_pinv.atol_rtol_tensor_out(Tensor self, *, Tensor? atol=None, Tensor? rtol=None, bool hermitian=False, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor linalg_pinv_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef(nullValue = "c10::optional{}") TensorOptional atol, @Const @ByRef(nullValue = "c10::optional{}") TensorOptional rtol, @Cast("bool") boolean hermitian/*=false*/); -@Namespace("at") public static native @ByRef Tensor linalg_pinv_out(@ByRef Tensor out, @Const @ByRef Tensor self); -// aten::linalg_pinv.atol_rtol_tensor_out(Tensor self, *, Tensor? atol=None, Tensor? rtol=None, bool hermitian=False, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor linalg_pinv_outf(@Const @ByRef Tensor self, @Const @ByRef TensorOptional atol, @Const @ByRef TensorOptional rtol, @Cast("bool") boolean hermitian, @ByRef Tensor out); - -// aten::linalg_pinv.atol_rtol_float(Tensor self, *, float? atol=None, float? rtol=None, bool hermitian=False) -> Tensor -@Namespace("at") public static native @ByVal Tensor linalg_pinv(@Const @ByRef Tensor self, @ByVal DoubleOptional atol, @ByVal DoubleOptional rtol, @Cast("bool") boolean hermitian/*=false*/); -@Namespace("at") public static native @ByVal Tensor linalg_pinv(@Const @ByRef Tensor self, @ByVal DoubleOptional atol, @ByVal DoubleOptional rtol); +// #include -// aten::linalg_pinv.atol_rtol_float_out(Tensor self, *, float? atol=None, float? rtol=None, bool hermitian=False, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor linalg_pinv_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal DoubleOptional atol, @ByVal DoubleOptional rtol, @Cast("bool") boolean hermitian/*=false*/); -@Namespace("at") public static native @ByRef Tensor linalg_pinv_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal DoubleOptional atol, @ByVal DoubleOptional rtol); -// aten::linalg_pinv.atol_rtol_float_out(Tensor self, *, float? atol=None, float? rtol=None, bool hermitian=False, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor linalg_pinv_outf(@Const @ByRef Tensor self, @ByVal DoubleOptional atol, @ByVal DoubleOptional rtol, @Cast("bool") boolean hermitian, @ByRef Tensor out); -// aten::linalg_pinv(Tensor self, float rcond, bool hermitian=False) -> Tensor -@Namespace("at") public static native @ByVal Tensor linalg_pinv(@Const @ByRef Tensor self, double rcond, @Cast("bool") boolean hermitian/*=false*/); -@Namespace("at") public static native @ByVal Tensor linalg_pinv(@Const @ByRef Tensor self, double rcond); +// aten::__lshift__.Scalar(Tensor self, Scalar other) -> Tensor +@Namespace("at") public static native @ByVal Tensor __lshift__(@Const @ByRef Tensor self, @Const @ByRef Scalar other); -// aten::linalg_pinv.rcond_tensor(Tensor self, Tensor rcond, bool hermitian=False) -> Tensor -@Namespace("at") public static native @ByVal Tensor linalg_pinv(@Const @ByRef Tensor self, @Const @ByRef Tensor rcond, @Cast("bool") boolean hermitian/*=false*/); -@Namespace("at") public static native @ByVal Tensor linalg_pinv(@Const @ByRef Tensor self, @Const @ByRef Tensor rcond); +// aten::__lshift__.Tensor(Tensor self, Tensor other) -> Tensor +@Namespace("at") public static native @ByVal Tensor __lshift__(@Const @ByRef Tensor self, @Const @ByRef Tensor other); -// aten::linalg_pinv.out(Tensor self, float rcond, bool hermitian=False, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor linalg_pinv_out(@ByRef Tensor out, @Const @ByRef Tensor self, double rcond, @Cast("bool") boolean hermitian/*=false*/); -@Namespace("at") public static native @ByRef Tensor linalg_pinv_out(@ByRef Tensor out, @Const @ByRef Tensor self, double rcond); -// aten::linalg_pinv.out(Tensor self, float rcond, bool hermitian=False, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor linalg_pinv_outf(@Const @ByRef Tensor self, double rcond, @Cast("bool") boolean hermitian, @ByRef Tensor out); +// aten::__lshift__.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor __lshift___out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Scalar other); +// aten::__lshift__.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor __lshift___outf(@Const @ByRef Tensor self, @Const @ByRef Scalar other, @ByRef Tensor out); -// aten::linalg_pinv.out_rcond_tensor(Tensor self, Tensor rcond, bool hermitian=False, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor linalg_pinv_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor rcond, @Cast("bool") boolean hermitian/*=false*/); -@Namespace("at") public static native @ByRef Tensor linalg_pinv_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor rcond); -// aten::linalg_pinv.out_rcond_tensor(Tensor self, Tensor rcond, bool hermitian=False, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor linalg_pinv_outf(@Const @ByRef Tensor self, @Const @ByRef Tensor rcond, @Cast("bool") boolean hermitian, @ByRef Tensor out); +// aten::__lshift__.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor __lshift___out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor other); +// aten::__lshift__.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor __lshift___outf(@Const @ByRef Tensor self, @Const @ByRef Tensor other, @ByRef Tensor out); -// Parsed from ATen/ops/linalg_qr.h +// Parsed from ATen/ops/lstm.h // #pragma once @@ -50288,23 +35930,19 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include +// #include -// aten::linalg_qr(Tensor A, str mode='reduced') -> (Tensor Q, Tensor R) -@Namespace("at") public static native @ByVal TensorTensorTuple linalg_qr(@Const @ByRef Tensor A, @ByVal(nullValue = "c10::string_view(\"reduced\")") @Cast("c10::string_view*") Pointer mode); -@Namespace("at") public static native @ByVal TensorTensorTuple linalg_qr(@Const @ByRef Tensor A); +// aten::lstm.input(Tensor input, Tensor[] hx, Tensor[] params, bool has_biases, int num_layers, float dropout, bool train, bool bidirectional, bool batch_first) -> (Tensor, Tensor, Tensor) +@Namespace("at") public static native @ByVal T_TensorTensorTensor_T lstm(@Const @ByRef Tensor input, @ByVal @Cast("at::TensorList*") TensorArrayRef hx, @ByVal @Cast("at::TensorList*") TensorArrayRef params, @Cast("bool") boolean has_biases, @Cast("int64_t") long num_layers, double dropout, @Cast("bool") boolean train, @Cast("bool") boolean bidirectional, @Cast("bool") boolean batch_first); -// aten::linalg_qr.out(Tensor A, str mode='reduced', *, Tensor(a!) Q, Tensor(b!) R) -> (Tensor(a!) Q, Tensor(b!) R) -@Namespace("at") public static native @ByVal @Cast("std::tuple*") PointerPointer linalg_qr_out(@ByRef Tensor Q, @ByRef Tensor R, @Const @ByRef Tensor A, @ByVal(nullValue = "c10::string_view(\"reduced\")") @Cast("c10::string_view*") Pointer mode); -@Namespace("at") public static native @ByVal @Cast("std::tuple*") PointerPointer linalg_qr_out(@ByRef Tensor Q, @ByRef Tensor R, @Const @ByRef Tensor A); -// aten::linalg_qr.out(Tensor A, str mode='reduced', *, Tensor(a!) Q, Tensor(b!) R) -> (Tensor(a!) Q, Tensor(b!) R) -@Namespace("at") public static native @ByVal @Cast("std::tuple*") PointerPointer linalg_qr_outf(@Const @ByRef Tensor A, @ByVal @Cast("c10::string_view*") Pointer mode, @ByRef Tensor Q, @ByRef Tensor R); +// aten::lstm.data(Tensor data, Tensor batch_sizes, Tensor[] hx, Tensor[] params, bool has_biases, int num_layers, float dropout, bool train, bool bidirectional) -> (Tensor, Tensor, Tensor) +@Namespace("at") public static native @ByVal T_TensorTensorTensor_T lstm(@Const @ByRef Tensor data, @Const @ByRef Tensor batch_sizes, @ByVal @Cast("at::TensorList*") TensorArrayRef hx, @ByVal @Cast("at::TensorList*") TensorArrayRef params, @Cast("bool") boolean has_biases, @Cast("int64_t") long num_layers, double dropout, @Cast("bool") boolean train, @Cast("bool") boolean bidirectional); -// Parsed from ATen/ops/linalg_slogdet.h +// Parsed from ATen/ops/lstm_cell.h // #pragma once @@ -50325,21 +35963,17 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include - +// #include -// aten::linalg_slogdet(Tensor A) -> (Tensor sign, Tensor logabsdet) -@Namespace("at") public static native @ByVal TensorTensorTuple linalg_slogdet(@Const @ByRef Tensor A); -// aten::linalg_slogdet.out(Tensor A, *, Tensor(a!) sign, Tensor(b!) logabsdet) -> (Tensor(a!) sign, Tensor(b!) logabsdet) -@Namespace("at") public static native @ByVal @Cast("std::tuple*") PointerPointer linalg_slogdet_out(@ByRef Tensor sign, @ByRef Tensor logabsdet, @Const @ByRef Tensor A); -// aten::linalg_slogdet.out(Tensor A, *, Tensor(a!) sign, Tensor(b!) logabsdet) -> (Tensor(a!) sign, Tensor(b!) logabsdet) -@Namespace("at") public static native @ByVal @Cast("std::tuple*") PointerPointer linalg_slogdet_outf(@Const @ByRef Tensor A, @ByRef Tensor sign, @ByRef Tensor logabsdet); +// aten::lstm_cell(Tensor input, Tensor[] hx, Tensor w_ih, Tensor w_hh, Tensor? b_ih=None, Tensor? b_hh=None) -> (Tensor, Tensor) +@Namespace("at") public static native @ByVal T_TensorTensor_T lstm_cell(@Const @ByRef Tensor input, @ByVal @Cast("at::TensorList*") TensorArrayRef hx, @Const @ByRef Tensor w_ih, @Const @ByRef Tensor w_hh, @Const @ByRef(nullValue = "c10::optional{}") TensorOptional b_ih, @Const @ByRef(nullValue = "c10::optional{}") TensorOptional b_hh); +@Namespace("at") public static native @ByVal T_TensorTensor_T lstm_cell(@Const @ByRef Tensor input, @ByVal @Cast("at::TensorList*") TensorArrayRef hx, @Const @ByRef Tensor w_ih, @Const @ByRef Tensor w_hh); -// Parsed from ATen/ops/linalg_solve.h +// Parsed from ATen/ops/lstm_mps_backward.h // #pragma once @@ -50360,23 +35994,21 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include +// #include -// aten::linalg_solve(Tensor A, Tensor B, *, bool left=True) -> Tensor -@Namespace("at") public static native @ByVal Tensor linalg_solve(@Const @ByRef Tensor A, @Const @ByRef Tensor B, @Cast("bool") boolean left/*=true*/); -@Namespace("at") public static native @ByVal Tensor linalg_solve(@Const @ByRef Tensor A, @Const @ByRef Tensor B); +// aten::lstm_mps_backward(Tensor grad_y, Tensor? grad_hy, Tensor? grad_cy, Tensor z_state, Tensor cell_state_fwd, Tensor input, Tensor layersOutputs, Tensor[] hx, Tensor[] params, bool has_biases, int num_layers, float dropout, bool train, bool bidirectional, bool batch_first) -> (Tensor, Tensor[], Tensor[]) +@Namespace("at") public static native @ByVal T_TensorTensorVectorTensorVector_T lstm_mps_backward(@Const @ByRef Tensor grad_y, @Const @ByRef TensorOptional grad_hy, @Const @ByRef TensorOptional grad_cy, @Const @ByRef Tensor z_state, @Const @ByRef Tensor cell_state_fwd, @Const @ByRef Tensor input, @Const @ByRef Tensor layersOutputs, @ByVal @Cast("at::TensorList*") TensorArrayRef hx, @ByVal @Cast("at::TensorList*") TensorArrayRef params, @Cast("bool") boolean has_biases, @Cast("int64_t") long num_layers, double dropout, @Cast("bool") boolean train, @Cast("bool") boolean bidirectional, @Cast("bool") boolean batch_first); -// aten::linalg_solve.out(Tensor A, Tensor B, *, bool left=True, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor linalg_solve_out(@ByRef Tensor out, @Const @ByRef Tensor A, @Const @ByRef Tensor B, @Cast("bool") boolean left/*=true*/); -@Namespace("at") public static native @ByRef Tensor linalg_solve_out(@ByRef Tensor out, @Const @ByRef Tensor A, @Const @ByRef Tensor B); -// aten::linalg_solve.out(Tensor A, Tensor B, *, bool left=True, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor linalg_solve_outf(@Const @ByRef Tensor A, @Const @ByRef Tensor B, @Cast("bool") boolean left, @ByRef Tensor out); +// aten::lstm_mps_backward.out(Tensor grad_y, Tensor? grad_hy, Tensor? grad_cy, Tensor z_state, Tensor cell_state_fwd, Tensor input, Tensor layersOutputs, Tensor[] hx, Tensor[] params, bool has_biases, int num_layers, float dropout, bool train, bool bidirectional, bool batch_first, *, Tensor(a!) out0, Tensor(b!)[] out1, Tensor(c!)[] out2) -> () +@Namespace("at") public static native void lstm_mps_backward_out(@ByRef Tensor out0, @ByVal @Cast("at::TensorList*") TensorArrayRef out1, @ByVal @Cast("at::TensorList*") TensorArrayRef out2, @Const @ByRef Tensor grad_y, @Const @ByRef TensorOptional grad_hy, @Const @ByRef TensorOptional grad_cy, @Const @ByRef Tensor z_state, @Const @ByRef Tensor cell_state_fwd, @Const @ByRef Tensor input, @Const @ByRef Tensor layersOutputs, @ByVal @Cast("at::TensorList*") TensorArrayRef hx, @ByVal @Cast("at::TensorList*") TensorArrayRef params, @Cast("bool") boolean has_biases, @Cast("int64_t") long num_layers, double dropout, @Cast("bool") boolean train, @Cast("bool") boolean bidirectional, @Cast("bool") boolean batch_first); +// aten::lstm_mps_backward.out(Tensor grad_y, Tensor? grad_hy, Tensor? grad_cy, Tensor z_state, Tensor cell_state_fwd, Tensor input, Tensor layersOutputs, Tensor[] hx, Tensor[] params, bool has_biases, int num_layers, float dropout, bool train, bool bidirectional, bool batch_first, *, Tensor(a!) out0, Tensor(b!)[] out1, Tensor(c!)[] out2) -> () +@Namespace("at") public static native void lstm_mps_backward_outf(@Const @ByRef Tensor grad_y, @Const @ByRef TensorOptional grad_hy, @Const @ByRef TensorOptional grad_cy, @Const @ByRef Tensor z_state, @Const @ByRef Tensor cell_state_fwd, @Const @ByRef Tensor input, @Const @ByRef Tensor layersOutputs, @ByVal @Cast("at::TensorList*") TensorArrayRef hx, @ByVal @Cast("at::TensorList*") TensorArrayRef params, @Cast("bool") boolean has_biases, @Cast("int64_t") long num_layers, double dropout, @Cast("bool") boolean train, @Cast("bool") boolean bidirectional, @Cast("bool") boolean batch_first, @ByRef Tensor out0, @ByVal @Cast("at::TensorList*") TensorArrayRef out1, @ByVal @Cast("at::TensorList*") TensorArrayRef out2); -// Parsed from ATen/ops/linalg_solve_ex.h +// Parsed from ATen/ops/lt.h // #pragma once @@ -50397,23 +36029,29 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include +// #include -// aten::linalg_solve_ex(Tensor A, Tensor B, *, bool left=True, bool check_errors=False) -> (Tensor result, Tensor info) -@Namespace("at") public static native @ByVal TensorTensorTuple linalg_solve_ex(@Const @ByRef Tensor A, @Const @ByRef Tensor B, @Cast("bool") boolean left/*=true*/, @Cast("bool") boolean check_errors/*=false*/); -@Namespace("at") public static native @ByVal TensorTensorTuple linalg_solve_ex(@Const @ByRef Tensor A, @Const @ByRef Tensor B); +// aten::lt.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor lt_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Scalar other); +// aten::lt.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor lt_outf(@Const @ByRef Tensor self, @Const @ByRef Scalar other, @ByRef Tensor out); -// aten::linalg_solve_ex.out(Tensor A, Tensor B, *, bool left=True, bool check_errors=False, Tensor(a!) result, Tensor(b!) info) -> (Tensor(a!) result, Tensor(b!) info) -@Namespace("at") public static native @ByVal @Cast("std::tuple*") PointerPointer linalg_solve_ex_out(@ByRef Tensor result, @ByRef Tensor info, @Const @ByRef Tensor A, @Const @ByRef Tensor B, @Cast("bool") boolean left/*=true*/, @Cast("bool") boolean check_errors/*=false*/); -@Namespace("at") public static native @ByVal @Cast("std::tuple*") PointerPointer linalg_solve_ex_out(@ByRef Tensor result, @ByRef Tensor info, @Const @ByRef Tensor A, @Const @ByRef Tensor B); -// aten::linalg_solve_ex.out(Tensor A, Tensor B, *, bool left=True, bool check_errors=False, Tensor(a!) result, Tensor(b!) info) -> (Tensor(a!) result, Tensor(b!) info) -@Namespace("at") public static native @ByVal @Cast("std::tuple*") PointerPointer linalg_solve_ex_outf(@Const @ByRef Tensor A, @Const @ByRef Tensor B, @Cast("bool") boolean left, @Cast("bool") boolean check_errors, @ByRef Tensor result, @ByRef Tensor info); +// aten::lt.Scalar(Tensor self, Scalar other) -> Tensor +@Namespace("at") public static native @ByVal Tensor lt(@Const @ByRef Tensor self, @Const @ByRef Scalar other); +// aten::lt.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor lt_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor other); +// aten::lt.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor lt_outf(@Const @ByRef Tensor self, @Const @ByRef Tensor other, @ByRef Tensor out); + +// aten::lt.Tensor(Tensor self, Tensor other) -> Tensor +@Namespace("at") public static native @ByVal Tensor lt(@Const @ByRef Tensor self, @Const @ByRef Tensor other); -// Parsed from ATen/ops/linalg_solve_triangular.h + +// Parsed from ATen/ops/lu_solve.h // #pragma once @@ -50434,23 +36072,21 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include - +// #include -// aten::linalg_solve_triangular.out(Tensor self, Tensor B, *, bool upper, bool left=True, bool unitriangular=False, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor linalg_solve_triangular_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor B, @Cast("bool") boolean upper, @Cast("bool") boolean left/*=true*/, @Cast("bool") boolean unitriangular/*=false*/); -@Namespace("at") public static native @ByRef Tensor linalg_solve_triangular_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor B, @Cast("bool") boolean upper); -// aten::linalg_solve_triangular.out(Tensor self, Tensor B, *, bool upper, bool left=True, bool unitriangular=False, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor linalg_solve_triangular_outf(@Const @ByRef Tensor self, @Const @ByRef Tensor B, @Cast("bool") boolean upper, @Cast("bool") boolean left, @Cast("bool") boolean unitriangular, @ByRef Tensor out); -// aten::linalg_solve_triangular(Tensor self, Tensor B, *, bool upper, bool left=True, bool unitriangular=False) -> Tensor -@Namespace("at") public static native @ByVal Tensor linalg_solve_triangular(@Const @ByRef Tensor self, @Const @ByRef Tensor B, @Cast("bool") boolean upper, @Cast("bool") boolean left/*=true*/, @Cast("bool") boolean unitriangular/*=false*/); -@Namespace("at") public static native @ByVal Tensor linalg_solve_triangular(@Const @ByRef Tensor self, @Const @ByRef Tensor B, @Cast("bool") boolean upper); +// aten::lu_solve.out(Tensor self, Tensor LU_data, Tensor LU_pivots, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor lu_solve_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor LU_data, @Const @ByRef Tensor LU_pivots); +// aten::lu_solve.out(Tensor self, Tensor LU_data, Tensor LU_pivots, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor lu_solve_outf(@Const @ByRef Tensor self, @Const @ByRef Tensor LU_data, @Const @ByRef Tensor LU_pivots, @ByRef Tensor out); +// aten::lu_solve(Tensor self, Tensor LU_data, Tensor LU_pivots) -> Tensor +@Namespace("at") public static native @ByVal Tensor lu_solve(@Const @ByRef Tensor self, @Const @ByRef Tensor LU_data, @Const @ByRef Tensor LU_pivots); -// Parsed from ATen/ops/linalg_svd.h + +// Parsed from ATen/ops/lu_unpack.h // #pragma once @@ -50471,23 +36107,23 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include +// #include -// aten::linalg_svd(Tensor A, bool full_matrices=True, *, str? driver=None) -> (Tensor U, Tensor S, Tensor Vh) -@Namespace("at") public static native @ByVal TensorTensorTensorTuple linalg_svd(@Const @ByRef Tensor A, @Cast("bool") boolean full_matrices/*=true*/, @ByVal(nullValue = "c10::optional(c10::nullopt)") @Cast("c10::optional*") Pointer driver); -@Namespace("at") public static native @ByVal TensorTensorTensorTuple linalg_svd(@Const @ByRef Tensor A); +// aten::lu_unpack(Tensor LU_data, Tensor LU_pivots, bool unpack_data=True, bool unpack_pivots=True) -> (Tensor P, Tensor L, Tensor U) +@Namespace("at") public static native @ByVal T_TensorTensorTensor_T lu_unpack(@Const @ByRef Tensor LU_data, @Const @ByRef Tensor LU_pivots, @Cast("bool") boolean unpack_data/*=true*/, @Cast("bool") boolean unpack_pivots/*=true*/); +@Namespace("at") public static native @ByVal T_TensorTensorTensor_T lu_unpack(@Const @ByRef Tensor LU_data, @Const @ByRef Tensor LU_pivots); -// aten::linalg_svd.U(Tensor A, bool full_matrices=True, *, str? driver=None, Tensor(a!) U, Tensor(b!) S, Tensor(c!) Vh) -> (Tensor(a!) U, Tensor(b!) S, Tensor(c!) Vh) -@Namespace("at") public static native @ByVal @Cast("std::tuple*") PointerPointer linalg_svd_out(@ByRef Tensor U, @ByRef Tensor S, @ByRef Tensor Vh, @Const @ByRef Tensor A, @Cast("bool") boolean full_matrices/*=true*/, @ByVal(nullValue = "c10::optional(c10::nullopt)") @Cast("c10::optional*") Pointer driver); -@Namespace("at") public static native @ByVal @Cast("std::tuple*") PointerPointer linalg_svd_out(@ByRef Tensor U, @ByRef Tensor S, @ByRef Tensor Vh, @Const @ByRef Tensor A); -// aten::linalg_svd.U(Tensor A, bool full_matrices=True, *, str? driver=None, Tensor(a!) U, Tensor(b!) S, Tensor(c!) Vh) -> (Tensor(a!) U, Tensor(b!) S, Tensor(c!) Vh) -@Namespace("at") public static native @ByVal @Cast("std::tuple*") PointerPointer linalg_svd_outf(@Const @ByRef Tensor A, @Cast("bool") boolean full_matrices, @ByVal @Cast("c10::optional*") Pointer driver, @ByRef Tensor U, @ByRef Tensor S, @ByRef Tensor Vh); +// aten::lu_unpack.out(Tensor LU_data, Tensor LU_pivots, bool unpack_data=True, bool unpack_pivots=True, *, Tensor(a!) P, Tensor(b!) L, Tensor(c!) U) -> (Tensor(a!) P, Tensor(b!) L, Tensor(c!) U) +@Namespace("at") public static native @ByVal T_TensorTensorTensor_T lu_unpack_out(@ByRef Tensor P, @ByRef Tensor L, @ByRef Tensor U, @Const @ByRef Tensor LU_data, @Const @ByRef Tensor LU_pivots, @Cast("bool") boolean unpack_data/*=true*/, @Cast("bool") boolean unpack_pivots/*=true*/); +@Namespace("at") public static native @ByVal T_TensorTensorTensor_T lu_unpack_out(@ByRef Tensor P, @ByRef Tensor L, @ByRef Tensor U, @Const @ByRef Tensor LU_data, @Const @ByRef Tensor LU_pivots); +// aten::lu_unpack.out(Tensor LU_data, Tensor LU_pivots, bool unpack_data=True, bool unpack_pivots=True, *, Tensor(a!) P, Tensor(b!) L, Tensor(c!) U) -> (Tensor(a!) P, Tensor(b!) L, Tensor(c!) U) +@Namespace("at") public static native @ByVal T_TensorTensorTensor_T lu_unpack_outf(@Const @ByRef Tensor LU_data, @Const @ByRef Tensor LU_pivots, @Cast("bool") boolean unpack_data, @Cast("bool") boolean unpack_pivots, @ByRef Tensor P, @ByRef Tensor L, @ByRef Tensor U); -// Parsed from ATen/ops/linalg_svdvals.h +// Parsed from ATen/ops/mH.h // #pragma once @@ -50508,23 +36144,14 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include - +// #include -// aten::linalg_svdvals(Tensor A, *, str? driver=None) -> Tensor -@Namespace("at") public static native @ByVal Tensor linalg_svdvals(@Const @ByRef Tensor A, @ByVal(nullValue = "c10::optional(c10::nullopt)") @Cast("c10::optional*") Pointer driver); -@Namespace("at") public static native @ByVal Tensor linalg_svdvals(@Const @ByRef Tensor A); -// aten::linalg_svdvals.out(Tensor A, *, str? driver=None, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor linalg_svdvals_out(@ByRef Tensor out, @Const @ByRef Tensor A, @ByVal(nullValue = "c10::optional(c10::nullopt)") @Cast("c10::optional*") Pointer driver); -@Namespace("at") public static native @ByRef Tensor linalg_svdvals_out(@ByRef Tensor out, @Const @ByRef Tensor A); -// aten::linalg_svdvals.out(Tensor A, *, str? driver=None, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor linalg_svdvals_outf(@Const @ByRef Tensor A, @ByVal @Cast("c10::optional*") Pointer driver, @ByRef Tensor out); -// Parsed from ATen/ops/linalg_tensorinv.h +// Parsed from ATen/ops/mT.h // #pragma once @@ -50545,23 +36172,14 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include - +// #include -// aten::linalg_tensorinv(Tensor self, int ind=2) -> Tensor -@Namespace("at") public static native @ByVal Tensor linalg_tensorinv(@Const @ByRef Tensor self, @Cast("int64_t") long ind/*=2*/); -@Namespace("at") public static native @ByVal Tensor linalg_tensorinv(@Const @ByRef Tensor self); -// aten::linalg_tensorinv.out(Tensor self, int ind=2, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor linalg_tensorinv_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Cast("int64_t") long ind/*=2*/); -@Namespace("at") public static native @ByRef Tensor linalg_tensorinv_out(@ByRef Tensor out, @Const @ByRef Tensor self); -// aten::linalg_tensorinv.out(Tensor self, int ind=2, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor linalg_tensorinv_outf(@Const @ByRef Tensor self, @Cast("int64_t") long ind, @ByRef Tensor out); -// Parsed from ATen/ops/linalg_tensorsolve.h +// Parsed from ATen/ops/margin_ranking_loss.h // #pragma once @@ -50582,26 +36200,17 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include - +// #include -// aten::linalg_tensorsolve(Tensor self, Tensor other, int[]? dims=None) -> Tensor -@Namespace("at") public static native @ByVal Tensor linalg_tensorsolve(@Const @ByRef Tensor self, @Const @ByRef Tensor other, @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") LongArrayRefOptional dims); -@Namespace("at") public static native @ByVal Tensor linalg_tensorsolve(@Const @ByRef Tensor self, @Const @ByRef Tensor other); -@Namespace("at") public static native @ByVal Tensor linalg_tensorsolve(@Const @ByRef Tensor self, @Const @ByRef Tensor other, @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... dims); -// aten::linalg_tensorsolve.out(Tensor self, Tensor other, int[]? dims=None, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor linalg_tensorsolve_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor other, @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") LongArrayRefOptional dims); -@Namespace("at") public static native @ByRef Tensor linalg_tensorsolve_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor other); -@Namespace("at") public static native @ByRef Tensor linalg_tensorsolve_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor other, @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... dims); -// aten::linalg_tensorsolve.out(Tensor self, Tensor other, int[]? dims=None, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor linalg_tensorsolve_outf(@Const @ByRef Tensor self, @Const @ByRef Tensor other, @ByVal LongArrayRefOptional dims, @ByRef Tensor out); -@Namespace("at") public static native @ByRef Tensor linalg_tensorsolve_outf(@Const @ByRef Tensor self, @Const @ByRef Tensor other, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dims, @ByRef Tensor out); +// aten::margin_ranking_loss(Tensor input1, Tensor input2, Tensor target, float margin=0.0, int reduction=Mean) -> Tensor +@Namespace("at") public static native @ByVal Tensor margin_ranking_loss(@Const @ByRef Tensor input1, @Const @ByRef Tensor input2, @Const @ByRef Tensor target, double margin/*=0.0*/, @Cast("int64_t") long reduction/*=at::Reduction::Mean*/); +@Namespace("at") public static native @ByVal Tensor margin_ranking_loss(@Const @ByRef Tensor input1, @Const @ByRef Tensor input2, @Const @ByRef Tensor target); -// Parsed from ATen/ops/linalg_vander.h +// Parsed from ATen/ops/masked_fill.h // #pragma once @@ -50622,17 +36231,29 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include +// #include -// aten::linalg_vander(Tensor x, *, int? N=None) -> Tensor -@Namespace("at") public static native @ByVal Tensor linalg_vander(@Const @ByRef Tensor x, @ByVal(nullValue = "c10::optional(c10::nullopt)") LongOptional N); -@Namespace("at") public static native @ByVal Tensor linalg_vander(@Const @ByRef Tensor x); +// aten::masked_fill.Scalar(Tensor self, Tensor mask, Scalar value) -> Tensor +@Namespace("at") public static native @ByVal Tensor masked_fill(@Const @ByRef Tensor self, @Const @ByRef Tensor mask, @Const @ByRef Scalar value); +// aten::masked_fill.Tensor(Tensor self, Tensor mask, Tensor value) -> Tensor +@Namespace("at") public static native @ByVal Tensor masked_fill(@Const @ByRef Tensor self, @Const @ByRef Tensor mask, @Const @ByRef Tensor value); + +// aten::masked_fill.Scalar_out(Tensor self, Tensor mask, Scalar value, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor masked_fill_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor mask, @Const @ByRef Scalar value); +// aten::masked_fill.Scalar_out(Tensor self, Tensor mask, Scalar value, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor masked_fill_outf(@Const @ByRef Tensor self, @Const @ByRef Tensor mask, @Const @ByRef Scalar value, @ByRef Tensor out); +// aten::masked_fill.Tensor_out(Tensor self, Tensor mask, Tensor value, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor masked_fill_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor mask, @Const @ByRef Tensor value); +// aten::masked_fill.Tensor_out(Tensor self, Tensor mask, Tensor value, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor masked_fill_outf(@Const @ByRef Tensor self, @Const @ByRef Tensor mask, @Const @ByRef Tensor value, @ByRef Tensor out); -// Parsed from ATen/ops/linalg_vecdot.h + + +// Parsed from ATen/ops/masked_scatter.h // #pragma once @@ -50653,23 +36274,21 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include +// #include -// aten::linalg_vecdot(Tensor x, Tensor y, *, int dim=-1) -> Tensor -@Namespace("at") public static native @ByVal Tensor linalg_vecdot(@Const @ByRef Tensor x, @Const @ByRef Tensor y, @Cast("int64_t") long dim/*=-1*/); -@Namespace("at") public static native @ByVal Tensor linalg_vecdot(@Const @ByRef Tensor x, @Const @ByRef Tensor y); +// aten::masked_scatter(Tensor self, Tensor mask, Tensor source) -> Tensor +@Namespace("at") public static native @ByVal Tensor masked_scatter(@Const @ByRef Tensor self, @Const @ByRef Tensor mask, @Const @ByRef Tensor source); -// aten::linalg_vecdot.out(Tensor x, Tensor y, *, int dim=-1, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor linalg_vecdot_out(@ByRef Tensor out, @Const @ByRef Tensor x, @Const @ByRef Tensor y, @Cast("int64_t") long dim/*=-1*/); -@Namespace("at") public static native @ByRef Tensor linalg_vecdot_out(@ByRef Tensor out, @Const @ByRef Tensor x, @Const @ByRef Tensor y); -// aten::linalg_vecdot.out(Tensor x, Tensor y, *, int dim=-1, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor linalg_vecdot_outf(@Const @ByRef Tensor x, @Const @ByRef Tensor y, @Cast("int64_t") long dim, @ByRef Tensor out); +// aten::masked_scatter.out(Tensor self, Tensor mask, Tensor source, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor masked_scatter_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor mask, @Const @ByRef Tensor source); +// aten::masked_scatter.out(Tensor self, Tensor mask, Tensor source, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor masked_scatter_outf(@Const @ByRef Tensor self, @Const @ByRef Tensor mask, @Const @ByRef Tensor source, @ByRef Tensor out); -// Parsed from ATen/ops/linalg_vector_norm.h +// Parsed from ATen/ops/masked_select.h // #pragma once @@ -50690,26 +36309,21 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include +// #include -// aten::linalg_vector_norm(Tensor self, Scalar ord=2, int[1]? dim=None, bool keepdim=False, *, ScalarType? dtype=None) -> Tensor -@Namespace("at") public static native @ByVal Tensor linalg_vector_norm(@Const @ByRef Tensor self, @Const @ByRef(nullValue = "at::Scalar(2)") Scalar ord, @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") LongArrayRefOptional dim, @Cast("bool") boolean keepdim/*=false*/, @ByVal(nullValue = "c10::optional(c10::nullopt)") ScalarTypeOptional dtype); -@Namespace("at") public static native @ByVal Tensor linalg_vector_norm(@Const @ByRef Tensor self); -@Namespace("at") public static native @ByVal Tensor linalg_vector_norm(@Const @ByRef Tensor self, @Const @ByRef(nullValue = "at::Scalar(2)") Scalar ord, @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dim, @Cast("bool") boolean keepdim/*=false*/, @ByVal(nullValue = "c10::optional(c10::nullopt)") ScalarTypeOptional dtype); +// aten::masked_select.out(Tensor self, Tensor mask, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor masked_select_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor mask); +// aten::masked_select.out(Tensor self, Tensor mask, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor masked_select_outf(@Const @ByRef Tensor self, @Const @ByRef Tensor mask, @ByRef Tensor out); -// aten::linalg_vector_norm.out(Tensor self, Scalar ord=2, int[1]? dim=None, bool keepdim=False, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor linalg_vector_norm_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef(nullValue = "at::Scalar(2)") Scalar ord, @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") LongArrayRefOptional dim, @Cast("bool") boolean keepdim/*=false*/, @ByVal(nullValue = "c10::optional(c10::nullopt)") ScalarTypeOptional dtype); -@Namespace("at") public static native @ByRef Tensor linalg_vector_norm_out(@ByRef Tensor out, @Const @ByRef Tensor self); -@Namespace("at") public static native @ByRef Tensor linalg_vector_norm_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef(nullValue = "at::Scalar(2)") Scalar ord, @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dim, @Cast("bool") boolean keepdim/*=false*/, @ByVal(nullValue = "c10::optional(c10::nullopt)") ScalarTypeOptional dtype); -// aten::linalg_vector_norm.out(Tensor self, Scalar ord=2, int[1]? dim=None, bool keepdim=False, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor linalg_vector_norm_outf(@Const @ByRef Tensor self, @Const @ByRef Scalar ord, @ByVal LongArrayRefOptional dim, @Cast("bool") boolean keepdim, @ByVal ScalarTypeOptional dtype, @ByRef Tensor out); -@Namespace("at") public static native @ByRef Tensor linalg_vector_norm_outf(@Const @ByRef Tensor self, @Const @ByRef Scalar ord, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dim, @Cast("bool") boolean keepdim, @ByVal ScalarTypeOptional dtype, @ByRef Tensor out); +// aten::masked_select(Tensor self, Tensor mask) -> Tensor +@Namespace("at") public static native @ByVal Tensor masked_select(@Const @ByRef Tensor self, @Const @ByRef Tensor mask); -// Parsed from ATen/ops/linear.h +// Parsed from ATen/ops/masked_select_backward.h // #pragma once @@ -50730,23 +36344,16 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include - +// #include -// aten::linear(Tensor input, Tensor weight, Tensor? bias=None) -> Tensor -@Namespace("at") public static native @ByVal Tensor linear(@Const @ByRef Tensor input, @Const @ByRef Tensor weight, @Const @ByRef(nullValue = "c10::optional{}") TensorOptional bias); -@Namespace("at") public static native @ByVal Tensor linear(@Const @ByRef Tensor input, @Const @ByRef Tensor weight); -// aten::linear.out(Tensor input, Tensor weight, Tensor? bias=None, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor linear_out(@ByRef Tensor out, @Const @ByRef Tensor input, @Const @ByRef Tensor weight, @Const @ByRef(nullValue = "c10::optional{}") TensorOptional bias); -@Namespace("at") public static native @ByRef Tensor linear_out(@ByRef Tensor out, @Const @ByRef Tensor input, @Const @ByRef Tensor weight); -// aten::linear.out(Tensor input, Tensor weight, Tensor? bias=None, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor linear_outf(@Const @ByRef Tensor input, @Const @ByRef Tensor weight, @Const @ByRef TensorOptional bias, @ByRef Tensor out); +// aten::masked_select_backward(Tensor grad, Tensor input, Tensor mask) -> Tensor +@Namespace("at") public static native @ByVal Tensor masked_select_backward(@Const @ByRef Tensor grad, @Const @ByRef Tensor input, @Const @ByRef Tensor mask); -// Parsed from ATen/ops/linear_backward.h +// Parsed from ATen/ops/matmul.h // #pragma once @@ -50767,21 +36374,21 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include +// #include -// aten::linear_backward(Tensor self, Tensor grad_output, Tensor weight, bool[3] output_mask) -> (Tensor, Tensor, Tensor) -@Namespace("at") public static native @ByVal TensorTensorTensorTuple linear_backward(@Const @ByRef Tensor self, @Const @ByRef Tensor grad_output, @Const @ByRef Tensor weight, @ByVal @Cast("std::array*") BoolPointer output_mask); +// aten::matmul(Tensor self, Tensor other) -> Tensor +@Namespace("at") public static native @ByVal Tensor matmul(@Const @ByRef Tensor self, @Const @ByRef Tensor other); -// aten::linear_backward.out(Tensor self, Tensor grad_output, Tensor weight, bool[3] output_mask, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2) -> (Tensor(a!), Tensor(b!), Tensor(c!)) -@Namespace("at") public static native @ByVal @Cast("std::tuple*") PointerPointer linear_backward_out(@ByRef Tensor out0, @ByRef Tensor out1, @ByRef Tensor out2, @Const @ByRef Tensor self, @Const @ByRef Tensor grad_output, @Const @ByRef Tensor weight, @ByVal @Cast("std::array*") BoolPointer output_mask); -// aten::linear_backward.out(Tensor self, Tensor grad_output, Tensor weight, bool[3] output_mask, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2) -> (Tensor(a!), Tensor(b!), Tensor(c!)) -@Namespace("at") public static native @ByVal @Cast("std::tuple*") PointerPointer linear_backward_outf(@Const @ByRef Tensor self, @Const @ByRef Tensor grad_output, @Const @ByRef Tensor weight, @ByVal @Cast("std::array*") BoolPointer output_mask, @ByRef Tensor out0, @ByRef Tensor out1, @ByRef Tensor out2); +// aten::matmul.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor matmul_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor other); +// aten::matmul.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor matmul_outf(@Const @ByRef Tensor self, @Const @ByRef Tensor other, @ByRef Tensor out); -// Parsed from ATen/ops/linspace.h +// Parsed from ATen/ops/matmul_backward.h // #pragma once @@ -50802,24 +36409,21 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include +// #include -// aten::linspace(Scalar start, Scalar end, int steps, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor -@Namespace("at") public static native @ByVal Tensor linspace(@Const @ByRef Scalar start, @Const @ByRef Scalar end, @Cast("int64_t") long steps, @ByVal(nullValue = "at::TensorOptions{}") TensorOptions options); -@Namespace("at") public static native @ByVal Tensor linspace(@Const @ByRef Scalar start, @Const @ByRef Scalar end, @Cast("int64_t") long steps); -// aten::linspace(Scalar start, Scalar end, int steps, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor -@Namespace("at") public static native @ByVal Tensor linspace(@Const @ByRef Scalar start, @Const @ByRef Scalar end, @Cast("int64_t") long steps, @ByVal ScalarTypeOptional dtype, @ByVal LayoutOptional layout, @ByVal DeviceOptional device, @ByVal BoolOptional pin_memory); +// aten::matmul_backward(Tensor grad, Tensor self, Tensor other, bool[2] mask) -> (Tensor, Tensor) +@Namespace("at") public static native @ByVal T_TensorTensor_T matmul_backward(@Const @ByRef Tensor grad, @Const @ByRef Tensor self, @Const @ByRef Tensor other, @ByVal @Cast("std::array*") BoolPointer mask); -// aten::linspace.out(Scalar start, Scalar end, int steps, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor linspace_out(@ByRef Tensor out, @Const @ByRef Scalar start, @Const @ByRef Scalar end, @Cast("int64_t") long steps); -// aten::linspace.out(Scalar start, Scalar end, int steps, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor linspace_outf(@Const @ByRef Scalar start, @Const @ByRef Scalar end, @Cast("int64_t") long steps, @ByRef Tensor out); +// aten::matmul_backward.out(Tensor grad, Tensor self, Tensor other, bool[2] mask, *, Tensor(a!) out0, Tensor(b!) out1) -> (Tensor(a!), Tensor(b!)) +@Namespace("at") public static native @ByVal T_TensorTensor_T matmul_backward_out(@ByRef Tensor out0, @ByRef Tensor out1, @Const @ByRef Tensor grad, @Const @ByRef Tensor self, @Const @ByRef Tensor other, @ByVal @Cast("std::array*") BoolPointer mask); +// aten::matmul_backward.out(Tensor grad, Tensor self, Tensor other, bool[2] mask, *, Tensor(a!) out0, Tensor(b!) out1) -> (Tensor(a!), Tensor(b!)) +@Namespace("at") public static native @ByVal T_TensorTensor_T matmul_backward_outf(@Const @ByRef Tensor grad, @Const @ByRef Tensor self, @Const @ByRef Tensor other, @ByVal @Cast("std::array*") BoolPointer mask, @ByRef Tensor out0, @ByRef Tensor out1); -// Parsed from ATen/ops/log.h +// Parsed from ATen/ops/matrix_H.h // #pragma once @@ -50840,24 +36444,14 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include - - -// aten::log(Tensor self) -> Tensor -@Namespace("at") public static native @ByVal Tensor log(@Const @ByRef Tensor self); +// #include -// aten::log_(Tensor(a!) self) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor log_(@ByRef Tensor self); -// aten::log.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor log_out(@ByRef Tensor out, @Const @ByRef Tensor self); -// aten::log.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor log_outf(@Const @ByRef Tensor self, @ByRef Tensor out); -// Parsed from ATen/ops/log10.h +// Parsed from ATen/ops/matrix_exp.h // #pragma once @@ -50878,24 +36472,16 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include - - -// aten::log10(Tensor self) -> Tensor -@Namespace("at") public static native @ByVal Tensor log10(@Const @ByRef Tensor self); +// #include -// aten::log10_(Tensor(a!) self) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor log10_(@ByRef Tensor self); -// aten::log10.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor log10_out(@ByRef Tensor out, @Const @ByRef Tensor self); -// aten::log10.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor log10_outf(@Const @ByRef Tensor self, @ByRef Tensor out); +// aten::matrix_exp(Tensor self) -> Tensor +@Namespace("at") public static native @ByVal Tensor matrix_exp(@Const @ByRef Tensor self); -// Parsed from ATen/ops/log1p.h +// Parsed from ATen/ops/matrix_exp_backward.h // #pragma once @@ -50916,24 +36502,16 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include - - -// aten::log1p(Tensor self) -> Tensor -@Namespace("at") public static native @ByVal Tensor log1p(@Const @ByRef Tensor self); +// #include -// aten::log1p_(Tensor(a!) self) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor log1p_(@ByRef Tensor self); -// aten::log1p.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor log1p_out(@ByRef Tensor out, @Const @ByRef Tensor self); -// aten::log1p.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor log1p_outf(@Const @ByRef Tensor self, @ByRef Tensor out); +// aten::matrix_exp_backward(Tensor self, Tensor grad) -> Tensor +@Namespace("at") public static native @ByVal Tensor matrix_exp_backward(@Const @ByRef Tensor self, @Const @ByRef Tensor grad); -// Parsed from ATen/ops/log2.h +// Parsed from ATen/ops/matrix_power.h // #pragma once @@ -50954,24 +36532,21 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include - +// #include -// aten::log2(Tensor self) -> Tensor -@Namespace("at") public static native @ByVal Tensor log2(@Const @ByRef Tensor self); -// aten::log2_(Tensor(a!) self) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor log2_(@ByRef Tensor self); +// aten::matrix_power(Tensor self, int n) -> Tensor +@Namespace("at") public static native @ByVal Tensor matrix_power(@Const @ByRef Tensor self, @Cast("int64_t") long n); -// aten::log2.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor log2_out(@ByRef Tensor out, @Const @ByRef Tensor self); -// aten::log2.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor log2_outf(@Const @ByRef Tensor self, @ByRef Tensor out); +// aten::matrix_power.out(Tensor self, int n, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor matrix_power_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Cast("int64_t") long n); +// aten::matrix_power.out(Tensor self, int n, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor matrix_power_outf(@Const @ByRef Tensor self, @Cast("int64_t") long n, @ByRef Tensor out); -// Parsed from ATen/ops/log_normal.h +// Parsed from ATen/ops/max.h // #pragma once @@ -50992,23 +36567,49 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include +// #include -// aten::log_normal.out(Tensor self, float mean=1, float std=2, *, Generator? generator=None, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor log_normal_out(@ByRef Tensor out, @Const @ByRef Tensor self, double mean/*=1*/, double std/*=2*/, @ByVal(nullValue = "c10::optional(c10::nullopt)") GeneratorOptional generator); -@Namespace("at") public static native @ByRef Tensor log_normal_out(@ByRef Tensor out, @Const @ByRef Tensor self); -// aten::log_normal.out(Tensor self, float mean=1, float std=2, *, Generator? generator=None, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor log_normal_outf(@Const @ByRef Tensor self, double mean, double std, @ByVal GeneratorOptional generator, @ByRef Tensor out); +// aten::max.dim(Tensor self, int dim, bool keepdim=False) -> (Tensor values, Tensor indices) +@Namespace("at") public static native @ByVal T_TensorTensor_T max(@Const @ByRef Tensor self, @Cast("int64_t") long dim, @Cast("bool") boolean keepdim/*=false*/); +@Namespace("at") public static native @ByVal T_TensorTensor_T max(@Const @ByRef Tensor self, @Cast("int64_t") long dim); -// aten::log_normal(Tensor self, float mean=1, float std=2, *, Generator? generator=None) -> Tensor -@Namespace("at") public static native @ByVal Tensor log_normal(@Const @ByRef Tensor self, double mean/*=1*/, double std/*=2*/, @ByVal(nullValue = "c10::optional(c10::nullopt)") GeneratorOptional generator); -@Namespace("at") public static native @ByVal Tensor log_normal(@Const @ByRef Tensor self); +// aten::max.dim_max(Tensor self, int dim, bool keepdim=False, *, Tensor(a!) max, Tensor(b!) max_values) -> (Tensor(a!) values, Tensor(b!) indices) +@Namespace("at") public static native @ByVal T_TensorTensor_T max_out(@ByRef Tensor max, @ByRef Tensor max_values, @Const @ByRef Tensor self, @Cast("int64_t") long dim, @Cast("bool") boolean keepdim/*=false*/); +@Namespace("at") public static native @ByVal T_TensorTensor_T max_out(@ByRef Tensor max, @ByRef Tensor max_values, @Const @ByRef Tensor self, @Cast("int64_t") long dim); +// aten::max.dim_max(Tensor self, int dim, bool keepdim=False, *, Tensor(a!) max, Tensor(b!) max_values) -> (Tensor(a!) values, Tensor(b!) indices) +@Namespace("at") public static native @ByVal T_TensorTensor_T max_outf(@Const @ByRef Tensor self, @Cast("int64_t") long dim, @Cast("bool") boolean keepdim, @ByRef Tensor max, @ByRef Tensor max_values); +// aten::max.names_dim(Tensor self, Dimname dim, bool keepdim=False) -> (Tensor values, Tensor indices) +@Namespace("at") public static native @ByVal T_TensorTensor_T max(@Const @ByRef Tensor self, @ByVal Dimname dim, @Cast("bool") boolean keepdim/*=false*/); +@Namespace("at") public static native @ByVal T_TensorTensor_T max(@Const @ByRef Tensor self, @ByVal Dimname dim); + +// aten::max.names_dim_max(Tensor self, Dimname dim, bool keepdim=False, *, Tensor(a!) max, Tensor(b!) max_values) -> (Tensor(a!) values, Tensor(b!) indices) +@Namespace("at") public static native @ByVal T_TensorTensor_T max_out(@ByRef Tensor max, @ByRef Tensor max_values, @Const @ByRef Tensor self, @ByVal Dimname dim, @Cast("bool") boolean keepdim/*=false*/); +@Namespace("at") public static native @ByVal T_TensorTensor_T max_out(@ByRef Tensor max, @ByRef Tensor max_values, @Const @ByRef Tensor self, @ByVal Dimname dim); +// aten::max.names_dim_max(Tensor self, Dimname dim, bool keepdim=False, *, Tensor(a!) max, Tensor(b!) max_values) -> (Tensor(a!) values, Tensor(b!) indices) +@Namespace("at") public static native @ByVal T_TensorTensor_T max_outf(@Const @ByRef Tensor self, @ByVal Dimname dim, @Cast("bool") boolean keepdim, @ByRef Tensor max, @ByRef Tensor max_values); +// aten::max(Tensor self) -> Tensor +@Namespace("at") public static native @ByVal Tensor max(@Const @ByRef Tensor self); + +// aten::max.other(Tensor self, Tensor other) -> Tensor +@Namespace("at") public static native @ByVal Tensor max(@Const @ByRef Tensor self, @Const @ByRef Tensor other); + +// aten::max.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor max_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor other); +// aten::max.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor max_outf(@Const @ByRef Tensor self, @Const @ByRef Tensor other, @ByRef Tensor out); + +// aten::max.unary_out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor max_out(@ByRef Tensor out, @Const @ByRef Tensor self); +// aten::max.unary_out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor max_outf(@Const @ByRef Tensor self, @ByRef Tensor out); -// Parsed from ATen/ops/log_sigmoid.h + + +// Parsed from ATen/ops/max_pool1d.h // #pragma once @@ -51029,21 +36630,19 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include - +// #include -// aten::log_sigmoid.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor log_sigmoid_out(@ByRef Tensor out, @Const @ByRef Tensor self); -// aten::log_sigmoid.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor log_sigmoid_outf(@Const @ByRef Tensor self, @ByRef Tensor out); -// aten::log_sigmoid(Tensor self) -> Tensor -@Namespace("at") public static native @ByVal Tensor log_sigmoid(@Const @ByRef Tensor self); +// aten::max_pool1d(Tensor self, int[1] kernel_size, int[1] stride=[], int[1] padding=0, int[1] dilation=1, bool ceil_mode=False) -> Tensor +@Namespace("at") public static native @ByVal Tensor max_pool1d(@Const @ByRef Tensor self, @ByVal LongArrayRef kernel_size, @ByVal(nullValue = "at::IntArrayRef{}") LongArrayRef stride, @ByVal(nullValue = "at::IntArrayRef(0)") LongArrayRef padding, @ByVal(nullValue = "at::IntArrayRef(1)") LongArrayRef dilation, @Cast("bool") boolean ceil_mode/*=false*/); +@Namespace("at") public static native @ByVal Tensor max_pool1d(@Const @ByRef Tensor self, @ByVal LongArrayRef kernel_size); +@Namespace("at") public static native @ByVal Tensor max_pool1d(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] kernel_size, @ByVal(nullValue = "at::IntArrayRef{}") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] stride, @ByVal(nullValue = "at::IntArrayRef(0)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] padding, @ByVal(nullValue = "at::IntArrayRef(1)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dilation, @Cast("bool") boolean ceil_mode/*=false*/); +@Namespace("at") public static native @ByVal Tensor max_pool1d(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... kernel_size); -// Parsed from ATen/ops/log_sigmoid_backward.h +// Parsed from ATen/ops/max_pool1d_with_indices.h // #pragma once @@ -51064,21 +36663,19 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include - +// #include -// aten::log_sigmoid_backward.grad_input(Tensor grad_output, Tensor self, Tensor buffer, *, Tensor(a!) grad_input) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor log_sigmoid_backward_out(@ByRef Tensor grad_input, @Const @ByRef Tensor grad_output, @Const @ByRef Tensor self, @Const @ByRef Tensor buffer); -// aten::log_sigmoid_backward.grad_input(Tensor grad_output, Tensor self, Tensor buffer, *, Tensor(a!) grad_input) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor log_sigmoid_backward_outf(@Const @ByRef Tensor grad_output, @Const @ByRef Tensor self, @Const @ByRef Tensor buffer, @ByRef Tensor grad_input); -// aten::log_sigmoid_backward(Tensor grad_output, Tensor self, Tensor buffer) -> Tensor -@Namespace("at") public static native @ByVal Tensor log_sigmoid_backward(@Const @ByRef Tensor grad_output, @Const @ByRef Tensor self, @Const @ByRef Tensor buffer); +// aten::max_pool1d_with_indices(Tensor self, int[1] kernel_size, int[1] stride=[], int[1] padding=0, int[1] dilation=1, bool ceil_mode=False) -> (Tensor, Tensor) +@Namespace("at") public static native @ByVal T_TensorTensor_T max_pool1d_with_indices(@Const @ByRef Tensor self, @ByVal LongArrayRef kernel_size, @ByVal(nullValue = "at::IntArrayRef{}") LongArrayRef stride, @ByVal(nullValue = "at::IntArrayRef(0)") LongArrayRef padding, @ByVal(nullValue = "at::IntArrayRef(1)") LongArrayRef dilation, @Cast("bool") boolean ceil_mode/*=false*/); +@Namespace("at") public static native @ByVal T_TensorTensor_T max_pool1d_with_indices(@Const @ByRef Tensor self, @ByVal LongArrayRef kernel_size); +@Namespace("at") public static native @ByVal T_TensorTensor_T max_pool1d_with_indices(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] kernel_size, @ByVal(nullValue = "at::IntArrayRef{}") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] stride, @ByVal(nullValue = "at::IntArrayRef(0)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] padding, @ByVal(nullValue = "at::IntArrayRef(1)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dilation, @Cast("bool") boolean ceil_mode/*=false*/); +@Namespace("at") public static native @ByVal T_TensorTensor_T max_pool1d_with_indices(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... kernel_size); -// Parsed from ATen/ops/log_sigmoid_forward.h +// Parsed from ATen/ops/max_pool2d.h // #pragma once @@ -51099,21 +36696,19 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include - +// #include -// aten::log_sigmoid_forward.output(Tensor self, *, Tensor(a!) output, Tensor(b!) buffer) -> (Tensor(a!), Tensor(b!)) -@Namespace("at") public static native @ByVal @Cast("std::tuple*") PointerPointer log_sigmoid_forward_out(@ByRef Tensor output, @ByRef Tensor buffer, @Const @ByRef Tensor self); -// aten::log_sigmoid_forward.output(Tensor self, *, Tensor(a!) output, Tensor(b!) buffer) -> (Tensor(a!), Tensor(b!)) -@Namespace("at") public static native @ByVal @Cast("std::tuple*") PointerPointer log_sigmoid_forward_outf(@Const @ByRef Tensor self, @ByRef Tensor output, @ByRef Tensor buffer); -// aten::log_sigmoid_forward(Tensor self) -> (Tensor output, Tensor buffer) -@Namespace("at") public static native @ByVal TensorTensorTuple log_sigmoid_forward(@Const @ByRef Tensor self); +// aten::max_pool2d(Tensor self, int[2] kernel_size, int[2] stride=[], int[2] padding=0, int[2] dilation=1, bool ceil_mode=False) -> Tensor +@Namespace("at") public static native @ByVal Tensor max_pool2d(@Const @ByRef Tensor self, @ByVal LongArrayRef kernel_size, @ByVal(nullValue = "at::IntArrayRef{}") LongArrayRef stride, @ByVal(nullValue = "at::IntArrayRef(0)") LongArrayRef padding, @ByVal(nullValue = "at::IntArrayRef(1)") LongArrayRef dilation, @Cast("bool") boolean ceil_mode/*=false*/); +@Namespace("at") public static native @ByVal Tensor max_pool2d(@Const @ByRef Tensor self, @ByVal LongArrayRef kernel_size); +@Namespace("at") public static native @ByVal Tensor max_pool2d(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] kernel_size, @ByVal(nullValue = "at::IntArrayRef{}") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] stride, @ByVal(nullValue = "at::IntArrayRef(0)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] padding, @ByVal(nullValue = "at::IntArrayRef(1)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dilation, @Cast("bool") boolean ceil_mode/*=false*/); +@Namespace("at") public static native @ByVal Tensor max_pool2d(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... kernel_size); -// Parsed from ATen/ops/log_softmax.h +// Parsed from ATen/ops/max_pool2d_backward.h // #pragma once @@ -51134,27 +36729,28 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include - +// #include -// aten::log_softmax.int(Tensor self, int dim, ScalarType? dtype=None) -> Tensor -@Namespace("at") public static native @ByVal Tensor log_softmax(@Const @ByRef Tensor self, @Cast("int64_t") long dim, @ByVal(nullValue = "c10::optional(c10::nullopt)") ScalarTypeOptional dtype); -@Namespace("at") public static native @ByVal Tensor log_softmax(@Const @ByRef Tensor self, @Cast("int64_t") long dim); -// aten::log_softmax.int_out(Tensor self, int dim, ScalarType? dtype=None, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor log_softmax_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Cast("int64_t") long dim, @ByVal(nullValue = "c10::optional(c10::nullopt)") ScalarTypeOptional dtype); -@Namespace("at") public static native @ByRef Tensor log_softmax_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Cast("int64_t") long dim); -// aten::log_softmax.int_out(Tensor self, int dim, ScalarType? dtype=None, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor log_softmax_outf(@Const @ByRef Tensor self, @Cast("int64_t") long dim, @ByVal ScalarTypeOptional dtype, @ByRef Tensor out); +// aten::max_pool2d_backward(Tensor grad_output, Tensor self, int[2] kernel_size, int[2] stride=[], int[2] padding=0, int[2] dilation=1, bool ceil_mode=False) -> Tensor +@Namespace("at") public static native @ByVal Tensor max_pool2d_backward(@Const @ByRef Tensor grad_output, @Const @ByRef Tensor self, @ByVal LongArrayRef kernel_size, @ByVal(nullValue = "at::IntArrayRef{}") LongArrayRef stride, @ByVal(nullValue = "at::IntArrayRef(0)") LongArrayRef padding, @ByVal(nullValue = "at::IntArrayRef(1)") LongArrayRef dilation, @Cast("bool") boolean ceil_mode/*=false*/); +@Namespace("at") public static native @ByVal Tensor max_pool2d_backward(@Const @ByRef Tensor grad_output, @Const @ByRef Tensor self, @ByVal LongArrayRef kernel_size); +@Namespace("at") public static native @ByVal Tensor max_pool2d_backward(@Const @ByRef Tensor grad_output, @Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] kernel_size, @ByVal(nullValue = "at::IntArrayRef{}") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] stride, @ByVal(nullValue = "at::IntArrayRef(0)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] padding, @ByVal(nullValue = "at::IntArrayRef(1)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dilation, @Cast("bool") boolean ceil_mode/*=false*/); +@Namespace("at") public static native @ByVal Tensor max_pool2d_backward(@Const @ByRef Tensor grad_output, @Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... kernel_size); -// aten::log_softmax.Dimname(Tensor self, Dimname dim, *, ScalarType? dtype=None) -> Tensor -@Namespace("at") public static native @ByVal Tensor log_softmax(@Const @ByRef Tensor self, @ByVal Dimname dim, @ByVal(nullValue = "c10::optional(c10::nullopt)") ScalarTypeOptional dtype); -@Namespace("at") public static native @ByVal Tensor log_softmax(@Const @ByRef Tensor self, @ByVal Dimname dim); +// aten::max_pool2d_backward.out(Tensor grad_output, Tensor self, int[2] kernel_size, int[2] stride=[], int[2] padding=0, int[2] dilation=1, bool ceil_mode=False, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor max_pool2d_backward_out(@ByRef Tensor out, @Const @ByRef Tensor grad_output, @Const @ByRef Tensor self, @ByVal LongArrayRef kernel_size, @ByVal(nullValue = "at::IntArrayRef{}") LongArrayRef stride, @ByVal(nullValue = "at::IntArrayRef(0)") LongArrayRef padding, @ByVal(nullValue = "at::IntArrayRef(1)") LongArrayRef dilation, @Cast("bool") boolean ceil_mode/*=false*/); +@Namespace("at") public static native @ByRef Tensor max_pool2d_backward_out(@ByRef Tensor out, @Const @ByRef Tensor grad_output, @Const @ByRef Tensor self, @ByVal LongArrayRef kernel_size); +@Namespace("at") public static native @ByRef Tensor max_pool2d_backward_out(@ByRef Tensor out, @Const @ByRef Tensor grad_output, @Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] kernel_size, @ByVal(nullValue = "at::IntArrayRef{}") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] stride, @ByVal(nullValue = "at::IntArrayRef(0)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] padding, @ByVal(nullValue = "at::IntArrayRef(1)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dilation, @Cast("bool") boolean ceil_mode/*=false*/); +@Namespace("at") public static native @ByRef Tensor max_pool2d_backward_out(@ByRef Tensor out, @Const @ByRef Tensor grad_output, @Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... kernel_size); +// aten::max_pool2d_backward.out(Tensor grad_output, Tensor self, int[2] kernel_size, int[2] stride=[], int[2] padding=0, int[2] dilation=1, bool ceil_mode=False, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor max_pool2d_backward_outf(@Const @ByRef Tensor grad_output, @Const @ByRef Tensor self, @ByVal LongArrayRef kernel_size, @ByVal LongArrayRef stride, @ByVal LongArrayRef padding, @ByVal LongArrayRef dilation, @Cast("bool") boolean ceil_mode, @ByRef Tensor out); +@Namespace("at") public static native @ByRef Tensor max_pool2d_backward_outf(@Const @ByRef Tensor grad_output, @Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] kernel_size, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] stride, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] padding, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dilation, @Cast("bool") boolean ceil_mode, @ByRef Tensor out); -// Parsed from ATen/ops/logaddexp.h +// Parsed from ATen/ops/max_pool2d_with_indices.h // #pragma once @@ -51175,21 +36771,28 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include +// #include -// aten::logaddexp.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor logaddexp_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor other); -// aten::logaddexp.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor logaddexp_outf(@Const @ByRef Tensor self, @Const @ByRef Tensor other, @ByRef Tensor out); +// aten::max_pool2d_with_indices.out(Tensor self, int[2] kernel_size, int[2] stride=[], int[2] padding=0, int[2] dilation=1, bool ceil_mode=False, *, Tensor(a!) out, Tensor(b!) indices) -> (Tensor(a!), Tensor(b!)) +@Namespace("at") public static native @ByVal T_TensorTensor_T max_pool2d_with_indices_out(@ByRef Tensor out, @ByRef Tensor indices, @Const @ByRef Tensor self, @ByVal LongArrayRef kernel_size, @ByVal(nullValue = "at::IntArrayRef{}") LongArrayRef stride, @ByVal(nullValue = "at::IntArrayRef(0)") LongArrayRef padding, @ByVal(nullValue = "at::IntArrayRef(1)") LongArrayRef dilation, @Cast("bool") boolean ceil_mode/*=false*/); +@Namespace("at") public static native @ByVal T_TensorTensor_T max_pool2d_with_indices_out(@ByRef Tensor out, @ByRef Tensor indices, @Const @ByRef Tensor self, @ByVal LongArrayRef kernel_size); +@Namespace("at") public static native @ByVal T_TensorTensor_T max_pool2d_with_indices_out(@ByRef Tensor out, @ByRef Tensor indices, @Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] kernel_size, @ByVal(nullValue = "at::IntArrayRef{}") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] stride, @ByVal(nullValue = "at::IntArrayRef(0)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] padding, @ByVal(nullValue = "at::IntArrayRef(1)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dilation, @Cast("bool") boolean ceil_mode/*=false*/); +@Namespace("at") public static native @ByVal T_TensorTensor_T max_pool2d_with_indices_out(@ByRef Tensor out, @ByRef Tensor indices, @Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... kernel_size); +// aten::max_pool2d_with_indices.out(Tensor self, int[2] kernel_size, int[2] stride=[], int[2] padding=0, int[2] dilation=1, bool ceil_mode=False, *, Tensor(a!) out, Tensor(b!) indices) -> (Tensor(a!), Tensor(b!)) +@Namespace("at") public static native @ByVal T_TensorTensor_T max_pool2d_with_indices_outf(@Const @ByRef Tensor self, @ByVal LongArrayRef kernel_size, @ByVal LongArrayRef stride, @ByVal LongArrayRef padding, @ByVal LongArrayRef dilation, @Cast("bool") boolean ceil_mode, @ByRef Tensor out, @ByRef Tensor indices); +@Namespace("at") public static native @ByVal T_TensorTensor_T max_pool2d_with_indices_outf(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] kernel_size, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] stride, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] padding, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dilation, @Cast("bool") boolean ceil_mode, @ByRef Tensor out, @ByRef Tensor indices); -// aten::logaddexp(Tensor self, Tensor other) -> Tensor -@Namespace("at") public static native @ByVal Tensor logaddexp(@Const @ByRef Tensor self, @Const @ByRef Tensor other); +// aten::max_pool2d_with_indices(Tensor self, int[2] kernel_size, int[2] stride=[], int[2] padding=0, int[2] dilation=1, bool ceil_mode=False) -> (Tensor, Tensor) +@Namespace("at") public static native @ByVal T_TensorTensor_T max_pool2d_with_indices(@Const @ByRef Tensor self, @ByVal LongArrayRef kernel_size, @ByVal(nullValue = "at::IntArrayRef{}") LongArrayRef stride, @ByVal(nullValue = "at::IntArrayRef(0)") LongArrayRef padding, @ByVal(nullValue = "at::IntArrayRef(1)") LongArrayRef dilation, @Cast("bool") boolean ceil_mode/*=false*/); +@Namespace("at") public static native @ByVal T_TensorTensor_T max_pool2d_with_indices(@Const @ByRef Tensor self, @ByVal LongArrayRef kernel_size); +@Namespace("at") public static native @ByVal T_TensorTensor_T max_pool2d_with_indices(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] kernel_size, @ByVal(nullValue = "at::IntArrayRef{}") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] stride, @ByVal(nullValue = "at::IntArrayRef(0)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] padding, @ByVal(nullValue = "at::IntArrayRef(1)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dilation, @Cast("bool") boolean ceil_mode/*=false*/); +@Namespace("at") public static native @ByVal T_TensorTensor_T max_pool2d_with_indices(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... kernel_size); -// Parsed from ATen/ops/logaddexp2.h +// Parsed from ATen/ops/max_pool2d_with_indices_backward.h // #pragma once @@ -51210,21 +36813,24 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include +// #include -// aten::logaddexp2.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor logaddexp2_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor other); -// aten::logaddexp2.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor logaddexp2_outf(@Const @ByRef Tensor self, @Const @ByRef Tensor other, @ByRef Tensor out); +// aten::max_pool2d_with_indices_backward.grad_input(Tensor grad_output, Tensor self, int[2] kernel_size, int[2] stride, int[2] padding, int[2] dilation, bool ceil_mode, Tensor indices, *, Tensor(a!) grad_input) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor max_pool2d_with_indices_backward_out(@ByRef Tensor grad_input, @Const @ByRef Tensor grad_output, @Const @ByRef Tensor self, @ByVal LongArrayRef kernel_size, @ByVal LongArrayRef stride, @ByVal LongArrayRef padding, @ByVal LongArrayRef dilation, @Cast("bool") boolean ceil_mode, @Const @ByRef Tensor indices); +@Namespace("at") public static native @ByRef Tensor max_pool2d_with_indices_backward_out(@ByRef Tensor grad_input, @Const @ByRef Tensor grad_output, @Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] kernel_size, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] stride, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] padding, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dilation, @Cast("bool") boolean ceil_mode, @Const @ByRef Tensor indices); +// aten::max_pool2d_with_indices_backward.grad_input(Tensor grad_output, Tensor self, int[2] kernel_size, int[2] stride, int[2] padding, int[2] dilation, bool ceil_mode, Tensor indices, *, Tensor(a!) grad_input) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor max_pool2d_with_indices_backward_outf(@Const @ByRef Tensor grad_output, @Const @ByRef Tensor self, @ByVal LongArrayRef kernel_size, @ByVal LongArrayRef stride, @ByVal LongArrayRef padding, @ByVal LongArrayRef dilation, @Cast("bool") boolean ceil_mode, @Const @ByRef Tensor indices, @ByRef Tensor grad_input); +@Namespace("at") public static native @ByRef Tensor max_pool2d_with_indices_backward_outf(@Const @ByRef Tensor grad_output, @Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] kernel_size, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] stride, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] padding, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dilation, @Cast("bool") boolean ceil_mode, @Const @ByRef Tensor indices, @ByRef Tensor grad_input); -// aten::logaddexp2(Tensor self, Tensor other) -> Tensor -@Namespace("at") public static native @ByVal Tensor logaddexp2(@Const @ByRef Tensor self, @Const @ByRef Tensor other); +// aten::max_pool2d_with_indices_backward(Tensor grad_output, Tensor self, int[2] kernel_size, int[2] stride, int[2] padding, int[2] dilation, bool ceil_mode, Tensor indices) -> Tensor +@Namespace("at") public static native @ByVal Tensor max_pool2d_with_indices_backward(@Const @ByRef Tensor grad_output, @Const @ByRef Tensor self, @ByVal LongArrayRef kernel_size, @ByVal LongArrayRef stride, @ByVal LongArrayRef padding, @ByVal LongArrayRef dilation, @Cast("bool") boolean ceil_mode, @Const @ByRef Tensor indices); +@Namespace("at") public static native @ByVal Tensor max_pool2d_with_indices_backward(@Const @ByRef Tensor grad_output, @Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] kernel_size, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] stride, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] padding, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dilation, @Cast("bool") boolean ceil_mode, @Const @ByRef Tensor indices); -// Parsed from ATen/ops/logcumsumexp.h +// Parsed from ATen/ops/max_pool3d.h // #pragma once @@ -51245,29 +36851,19 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include - - -// aten::logcumsumexp(Tensor self, int dim) -> Tensor -@Namespace("at") public static native @ByVal Tensor logcumsumexp(@Const @ByRef Tensor self, @Cast("int64_t") long dim); - -// aten::logcumsumexp.out(Tensor self, int dim, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor logcumsumexp_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Cast("int64_t") long dim); -// aten::logcumsumexp.out(Tensor self, int dim, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor logcumsumexp_outf(@Const @ByRef Tensor self, @Cast("int64_t") long dim, @ByRef Tensor out); +// #include -// aten::logcumsumexp.dimname(Tensor self, Dimname dim) -> Tensor -@Namespace("at") public static native @ByVal Tensor logcumsumexp(@Const @ByRef Tensor self, @ByVal Dimname dim); -// aten::logcumsumexp.dimname_out(Tensor self, Dimname dim, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor logcumsumexp_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal Dimname dim); -// aten::logcumsumexp.dimname_out(Tensor self, Dimname dim, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor logcumsumexp_outf(@Const @ByRef Tensor self, @ByVal Dimname dim, @ByRef Tensor out); +// aten::max_pool3d(Tensor self, int[3] kernel_size, int[3] stride=[], int[3] padding=0, int[3] dilation=1, bool ceil_mode=False) -> Tensor +@Namespace("at") public static native @ByVal Tensor max_pool3d(@Const @ByRef Tensor self, @ByVal LongArrayRef kernel_size, @ByVal(nullValue = "at::IntArrayRef{}") LongArrayRef stride, @ByVal(nullValue = "at::IntArrayRef(0)") LongArrayRef padding, @ByVal(nullValue = "at::IntArrayRef(1)") LongArrayRef dilation, @Cast("bool") boolean ceil_mode/*=false*/); +@Namespace("at") public static native @ByVal Tensor max_pool3d(@Const @ByRef Tensor self, @ByVal LongArrayRef kernel_size); +@Namespace("at") public static native @ByVal Tensor max_pool3d(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] kernel_size, @ByVal(nullValue = "at::IntArrayRef{}") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] stride, @ByVal(nullValue = "at::IntArrayRef(0)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] padding, @ByVal(nullValue = "at::IntArrayRef(1)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dilation, @Cast("bool") boolean ceil_mode/*=false*/); +@Namespace("at") public static native @ByVal Tensor max_pool3d(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... kernel_size); -// Parsed from ATen/ops/logdet.h +// Parsed from ATen/ops/max_pool3d_with_indices.h // #pragma once @@ -51288,16 +36884,28 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include +// #include -// aten::logdet(Tensor self) -> Tensor -@Namespace("at") public static native @ByVal Tensor logdet(@Const @ByRef Tensor self); +// aten::max_pool3d_with_indices.out(Tensor self, int[3] kernel_size, int[3] stride=[], int[3] padding=0, int[3] dilation=1, bool ceil_mode=False, *, Tensor(a!) out, Tensor(b!) indices) -> (Tensor(a!), Tensor(b!)) +@Namespace("at") public static native @ByVal T_TensorTensor_T max_pool3d_with_indices_out(@ByRef Tensor out, @ByRef Tensor indices, @Const @ByRef Tensor self, @ByVal LongArrayRef kernel_size, @ByVal(nullValue = "at::IntArrayRef{}") LongArrayRef stride, @ByVal(nullValue = "at::IntArrayRef(0)") LongArrayRef padding, @ByVal(nullValue = "at::IntArrayRef(1)") LongArrayRef dilation, @Cast("bool") boolean ceil_mode/*=false*/); +@Namespace("at") public static native @ByVal T_TensorTensor_T max_pool3d_with_indices_out(@ByRef Tensor out, @ByRef Tensor indices, @Const @ByRef Tensor self, @ByVal LongArrayRef kernel_size); +@Namespace("at") public static native @ByVal T_TensorTensor_T max_pool3d_with_indices_out(@ByRef Tensor out, @ByRef Tensor indices, @Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] kernel_size, @ByVal(nullValue = "at::IntArrayRef{}") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] stride, @ByVal(nullValue = "at::IntArrayRef(0)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] padding, @ByVal(nullValue = "at::IntArrayRef(1)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dilation, @Cast("bool") boolean ceil_mode/*=false*/); +@Namespace("at") public static native @ByVal T_TensorTensor_T max_pool3d_with_indices_out(@ByRef Tensor out, @ByRef Tensor indices, @Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... kernel_size); +// aten::max_pool3d_with_indices.out(Tensor self, int[3] kernel_size, int[3] stride=[], int[3] padding=0, int[3] dilation=1, bool ceil_mode=False, *, Tensor(a!) out, Tensor(b!) indices) -> (Tensor(a!), Tensor(b!)) +@Namespace("at") public static native @ByVal T_TensorTensor_T max_pool3d_with_indices_outf(@Const @ByRef Tensor self, @ByVal LongArrayRef kernel_size, @ByVal LongArrayRef stride, @ByVal LongArrayRef padding, @ByVal LongArrayRef dilation, @Cast("bool") boolean ceil_mode, @ByRef Tensor out, @ByRef Tensor indices); +@Namespace("at") public static native @ByVal T_TensorTensor_T max_pool3d_with_indices_outf(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] kernel_size, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] stride, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] padding, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dilation, @Cast("bool") boolean ceil_mode, @ByRef Tensor out, @ByRef Tensor indices); + +// aten::max_pool3d_with_indices(Tensor self, int[3] kernel_size, int[3] stride=[], int[3] padding=0, int[3] dilation=1, bool ceil_mode=False) -> (Tensor, Tensor) +@Namespace("at") public static native @ByVal T_TensorTensor_T max_pool3d_with_indices(@Const @ByRef Tensor self, @ByVal LongArrayRef kernel_size, @ByVal(nullValue = "at::IntArrayRef{}") LongArrayRef stride, @ByVal(nullValue = "at::IntArrayRef(0)") LongArrayRef padding, @ByVal(nullValue = "at::IntArrayRef(1)") LongArrayRef dilation, @Cast("bool") boolean ceil_mode/*=false*/); +@Namespace("at") public static native @ByVal T_TensorTensor_T max_pool3d_with_indices(@Const @ByRef Tensor self, @ByVal LongArrayRef kernel_size); +@Namespace("at") public static native @ByVal T_TensorTensor_T max_pool3d_with_indices(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] kernel_size, @ByVal(nullValue = "at::IntArrayRef{}") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] stride, @ByVal(nullValue = "at::IntArrayRef(0)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] padding, @ByVal(nullValue = "at::IntArrayRef(1)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dilation, @Cast("bool") boolean ceil_mode/*=false*/); +@Namespace("at") public static native @ByVal T_TensorTensor_T max_pool3d_with_indices(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... kernel_size); -// Parsed from ATen/ops/logical_and.h +// Parsed from ATen/ops/max_pool3d_with_indices_backward.h // #pragma once @@ -51318,21 +36926,24 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include +// #include -// aten::logical_and(Tensor self, Tensor other) -> Tensor -@Namespace("at") public static native @ByVal Tensor logical_and(@Const @ByRef Tensor self, @Const @ByRef Tensor other); +// aten::max_pool3d_with_indices_backward.grad_input(Tensor grad_output, Tensor self, int[3] kernel_size, int[3] stride, int[3] padding, int[3] dilation, bool ceil_mode, Tensor indices, *, Tensor(a!) grad_input) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor max_pool3d_with_indices_backward_out(@ByRef Tensor grad_input, @Const @ByRef Tensor grad_output, @Const @ByRef Tensor self, @ByVal LongArrayRef kernel_size, @ByVal LongArrayRef stride, @ByVal LongArrayRef padding, @ByVal LongArrayRef dilation, @Cast("bool") boolean ceil_mode, @Const @ByRef Tensor indices); +@Namespace("at") public static native @ByRef Tensor max_pool3d_with_indices_backward_out(@ByRef Tensor grad_input, @Const @ByRef Tensor grad_output, @Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] kernel_size, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] stride, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] padding, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dilation, @Cast("bool") boolean ceil_mode, @Const @ByRef Tensor indices); +// aten::max_pool3d_with_indices_backward.grad_input(Tensor grad_output, Tensor self, int[3] kernel_size, int[3] stride, int[3] padding, int[3] dilation, bool ceil_mode, Tensor indices, *, Tensor(a!) grad_input) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor max_pool3d_with_indices_backward_outf(@Const @ByRef Tensor grad_output, @Const @ByRef Tensor self, @ByVal LongArrayRef kernel_size, @ByVal LongArrayRef stride, @ByVal LongArrayRef padding, @ByVal LongArrayRef dilation, @Cast("bool") boolean ceil_mode, @Const @ByRef Tensor indices, @ByRef Tensor grad_input); +@Namespace("at") public static native @ByRef Tensor max_pool3d_with_indices_backward_outf(@Const @ByRef Tensor grad_output, @Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] kernel_size, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] stride, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] padding, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dilation, @Cast("bool") boolean ceil_mode, @Const @ByRef Tensor indices, @ByRef Tensor grad_input); -// aten::logical_and.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor logical_and_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor other); -// aten::logical_and.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor logical_and_outf(@Const @ByRef Tensor self, @Const @ByRef Tensor other, @ByRef Tensor out); +// aten::max_pool3d_with_indices_backward(Tensor grad_output, Tensor self, int[3] kernel_size, int[3] stride, int[3] padding, int[3] dilation, bool ceil_mode, Tensor indices) -> Tensor +@Namespace("at") public static native @ByVal Tensor max_pool3d_with_indices_backward(@Const @ByRef Tensor grad_output, @Const @ByRef Tensor self, @ByVal LongArrayRef kernel_size, @ByVal LongArrayRef stride, @ByVal LongArrayRef padding, @ByVal LongArrayRef dilation, @Cast("bool") boolean ceil_mode, @Const @ByRef Tensor indices); +@Namespace("at") public static native @ByVal Tensor max_pool3d_with_indices_backward(@Const @ByRef Tensor grad_output, @Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] kernel_size, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] stride, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] padding, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dilation, @Cast("bool") boolean ceil_mode, @Const @ByRef Tensor indices); -// Parsed from ATen/ops/logical_not.h +// Parsed from ATen/ops/max_unpool2d.h // #pragma once @@ -51353,21 +36964,24 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include +// #include -// aten::logical_not(Tensor self) -> Tensor -@Namespace("at") public static native @ByVal Tensor logical_not(@Const @ByRef Tensor self); +// aten::max_unpool2d.out(Tensor self, Tensor indices, int[2] output_size, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor max_unpool2d_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor indices, @ByVal LongArrayRef output_size); +@Namespace("at") public static native @ByRef Tensor max_unpool2d_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor indices, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... output_size); +// aten::max_unpool2d.out(Tensor self, Tensor indices, int[2] output_size, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor max_unpool2d_outf(@Const @ByRef Tensor self, @Const @ByRef Tensor indices, @ByVal LongArrayRef output_size, @ByRef Tensor out); +@Namespace("at") public static native @ByRef Tensor max_unpool2d_outf(@Const @ByRef Tensor self, @Const @ByRef Tensor indices, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] output_size, @ByRef Tensor out); -// aten::logical_not.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor logical_not_out(@ByRef Tensor out, @Const @ByRef Tensor self); -// aten::logical_not.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor logical_not_outf(@Const @ByRef Tensor self, @ByRef Tensor out); +// aten::max_unpool2d(Tensor self, Tensor indices, int[2] output_size) -> Tensor +@Namespace("at") public static native @ByVal Tensor max_unpool2d(@Const @ByRef Tensor self, @Const @ByRef Tensor indices, @ByVal LongArrayRef output_size); +@Namespace("at") public static native @ByVal Tensor max_unpool2d(@Const @ByRef Tensor self, @Const @ByRef Tensor indices, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... output_size); -// Parsed from ATen/ops/logical_or.h +// Parsed from ATen/ops/max_unpool3d.h // #pragma once @@ -51388,21 +37002,24 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include +// #include -// aten::logical_or(Tensor self, Tensor other) -> Tensor -@Namespace("at") public static native @ByVal Tensor logical_or(@Const @ByRef Tensor self, @Const @ByRef Tensor other); +// aten::max_unpool3d.out(Tensor self, Tensor indices, int[3] output_size, int[3] stride, int[3] padding, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor max_unpool3d_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor indices, @ByVal LongArrayRef output_size, @ByVal LongArrayRef stride, @ByVal LongArrayRef padding); +@Namespace("at") public static native @ByRef Tensor max_unpool3d_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor indices, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] output_size, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] stride, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... padding); +// aten::max_unpool3d.out(Tensor self, Tensor indices, int[3] output_size, int[3] stride, int[3] padding, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor max_unpool3d_outf(@Const @ByRef Tensor self, @Const @ByRef Tensor indices, @ByVal LongArrayRef output_size, @ByVal LongArrayRef stride, @ByVal LongArrayRef padding, @ByRef Tensor out); +@Namespace("at") public static native @ByRef Tensor max_unpool3d_outf(@Const @ByRef Tensor self, @Const @ByRef Tensor indices, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] output_size, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] stride, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] padding, @ByRef Tensor out); -// aten::logical_or.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor logical_or_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor other); -// aten::logical_or.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor logical_or_outf(@Const @ByRef Tensor self, @Const @ByRef Tensor other, @ByRef Tensor out); +// aten::max_unpool3d(Tensor self, Tensor indices, int[3] output_size, int[3] stride, int[3] padding) -> Tensor +@Namespace("at") public static native @ByVal Tensor max_unpool3d(@Const @ByRef Tensor self, @Const @ByRef Tensor indices, @ByVal LongArrayRef output_size, @ByVal LongArrayRef stride, @ByVal LongArrayRef padding); +@Namespace("at") public static native @ByVal Tensor max_unpool3d(@Const @ByRef Tensor self, @Const @ByRef Tensor indices, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] output_size, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] stride, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... padding); -// Parsed from ATen/ops/logical_xor.h +// Parsed from ATen/ops/maximum.h // #pragma once @@ -51423,21 +37040,21 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include +// #include -// aten::logical_xor(Tensor self, Tensor other) -> Tensor -@Namespace("at") public static native @ByVal Tensor logical_xor(@Const @ByRef Tensor self, @Const @ByRef Tensor other); +// aten::maximum(Tensor self, Tensor other) -> Tensor +@Namespace("at") public static native @ByVal Tensor maximum(@Const @ByRef Tensor self, @Const @ByRef Tensor other); -// aten::logical_xor.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor logical_xor_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor other); -// aten::logical_xor.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor logical_xor_outf(@Const @ByRef Tensor self, @Const @ByRef Tensor other, @ByRef Tensor out); +// aten::maximum.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor maximum_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor other); +// aten::maximum.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor maximum_outf(@Const @ByRef Tensor self, @Const @ByRef Tensor other, @ByRef Tensor out); -// Parsed from ATen/ops/logit.h +// Parsed from ATen/ops/mean.h // #pragma once @@ -51458,27 +37075,42 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include +// #include -// aten::logit(Tensor self, float? eps=None) -> Tensor -@Namespace("at") public static native @ByVal Tensor logit(@Const @ByRef Tensor self, @ByVal(nullValue = "c10::optional(c10::nullopt)") DoubleOptional eps); -@Namespace("at") public static native @ByVal Tensor logit(@Const @ByRef Tensor self); +// aten::mean(Tensor self, *, ScalarType? dtype=None) -> Tensor +@Namespace("at") public static native @ByVal Tensor mean(@Const @ByRef Tensor self, @ByVal(nullValue = "c10::optional(c10::nullopt)") ScalarTypeOptional dtype); +@Namespace("at") public static native @ByVal Tensor mean(@Const @ByRef Tensor self); -// aten::logit_(Tensor(a!) self, float? eps=None) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor logit_(@ByRef Tensor self, @ByVal(nullValue = "c10::optional(c10::nullopt)") DoubleOptional eps); -@Namespace("at") public static native @ByRef Tensor logit_(@ByRef Tensor self); +// aten::mean.dim(Tensor self, int[1]? dim, bool keepdim=False, *, ScalarType? dtype=None) -> Tensor +@Namespace("at") public static native @ByVal Tensor mean(@Const @ByRef Tensor self, @ByVal LongArrayRefOptional dim, @Cast("bool") boolean keepdim/*=false*/, @ByVal(nullValue = "c10::optional(c10::nullopt)") ScalarTypeOptional dtype); +@Namespace("at") public static native @ByVal Tensor mean(@Const @ByRef Tensor self, @ByVal LongArrayRefOptional dim); +@Namespace("at") public static native @ByVal Tensor mean(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dim, @Cast("bool") boolean keepdim/*=false*/, @ByVal(nullValue = "c10::optional(c10::nullopt)") ScalarTypeOptional dtype); +@Namespace("at") public static native @ByVal Tensor mean(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... dim); -// aten::logit.out(Tensor self, float? eps=None, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor logit_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal(nullValue = "c10::optional(c10::nullopt)") DoubleOptional eps); -@Namespace("at") public static native @ByRef Tensor logit_out(@ByRef Tensor out, @Const @ByRef Tensor self); -// aten::logit.out(Tensor self, float? eps=None, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor logit_outf(@Const @ByRef Tensor self, @ByVal DoubleOptional eps, @ByRef Tensor out); +// aten::mean.out(Tensor self, int[1]? dim, bool keepdim=False, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor mean_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal LongArrayRefOptional dim, @Cast("bool") boolean keepdim/*=false*/, @ByVal(nullValue = "c10::optional(c10::nullopt)") ScalarTypeOptional dtype); +@Namespace("at") public static native @ByRef Tensor mean_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal LongArrayRefOptional dim); +@Namespace("at") public static native @ByRef Tensor mean_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dim, @Cast("bool") boolean keepdim/*=false*/, @ByVal(nullValue = "c10::optional(c10::nullopt)") ScalarTypeOptional dtype); +@Namespace("at") public static native @ByRef Tensor mean_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... dim); +// aten::mean.out(Tensor self, int[1]? dim, bool keepdim=False, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor mean_outf(@Const @ByRef Tensor self, @ByVal LongArrayRefOptional dim, @Cast("bool") boolean keepdim, @ByVal ScalarTypeOptional dtype, @ByRef Tensor out); +@Namespace("at") public static native @ByRef Tensor mean_outf(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dim, @Cast("bool") boolean keepdim, @ByVal ScalarTypeOptional dtype, @ByRef Tensor out); + +// aten::mean.names_dim(Tensor self, Dimname[1] dim, bool keepdim=False, *, ScalarType? dtype=None) -> Tensor +@Namespace("at") public static native @ByVal Tensor mean(@Const @ByRef Tensor self, @ByVal DimnameArrayRef dim, @Cast("bool") boolean keepdim/*=false*/, @ByVal(nullValue = "c10::optional(c10::nullopt)") ScalarTypeOptional dtype); +@Namespace("at") public static native @ByVal Tensor mean(@Const @ByRef Tensor self, @ByVal DimnameArrayRef dim); + +// aten::mean.names_out(Tensor self, Dimname[1] dim, bool keepdim=False, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor mean_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal DimnameArrayRef dim, @Cast("bool") boolean keepdim/*=false*/, @ByVal(nullValue = "c10::optional(c10::nullopt)") ScalarTypeOptional dtype); +@Namespace("at") public static native @ByRef Tensor mean_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal DimnameArrayRef dim); +// aten::mean.names_out(Tensor self, Dimname[1] dim, bool keepdim=False, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor mean_outf(@Const @ByRef Tensor self, @ByVal DimnameArrayRef dim, @Cast("bool") boolean keepdim, @ByVal ScalarTypeOptional dtype, @ByRef Tensor out); -// Parsed from ATen/ops/logit_backward.h +// Parsed from ATen/ops/median.h // #pragma once @@ -51499,23 +37131,41 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include +// #include -// aten::logit_backward.grad_input(Tensor grad_output, Tensor self, float? eps=None, *, Tensor(a!) grad_input) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor logit_backward_out(@ByRef Tensor grad_input, @Const @ByRef Tensor grad_output, @Const @ByRef Tensor self, @ByVal(nullValue = "c10::optional(c10::nullopt)") DoubleOptional eps); -@Namespace("at") public static native @ByRef Tensor logit_backward_out(@ByRef Tensor grad_input, @Const @ByRef Tensor grad_output, @Const @ByRef Tensor self); -// aten::logit_backward.grad_input(Tensor grad_output, Tensor self, float? eps=None, *, Tensor(a!) grad_input) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor logit_backward_outf(@Const @ByRef Tensor grad_output, @Const @ByRef Tensor self, @ByVal DoubleOptional eps, @ByRef Tensor grad_input); +// aten::median(Tensor self) -> Tensor +@Namespace("at") public static native @ByVal Tensor median(@Const @ByRef Tensor self); + +// aten::median.dim(Tensor self, int dim, bool keepdim=False) -> (Tensor values, Tensor indices) +@Namespace("at") public static native @ByVal T_TensorTensor_T median(@Const @ByRef Tensor self, @Cast("int64_t") long dim, @Cast("bool") boolean keepdim/*=false*/); +@Namespace("at") public static native @ByVal T_TensorTensor_T median(@Const @ByRef Tensor self, @Cast("int64_t") long dim); + +// aten::median.dim_values(Tensor self, int dim, bool keepdim=False, *, Tensor(a!) values, Tensor(b!) indices) -> (Tensor(a!) values, Tensor(b!) indices) +@Namespace("at") public static native @ByVal T_TensorTensor_T median_out(@ByRef Tensor values, @ByRef Tensor indices, @Const @ByRef Tensor self, @Cast("int64_t") long dim, @Cast("bool") boolean keepdim/*=false*/); +@Namespace("at") public static native @ByVal T_TensorTensor_T median_out(@ByRef Tensor values, @ByRef Tensor indices, @Const @ByRef Tensor self, @Cast("int64_t") long dim); +// aten::median.dim_values(Tensor self, int dim, bool keepdim=False, *, Tensor(a!) values, Tensor(b!) indices) -> (Tensor(a!) values, Tensor(b!) indices) +@Namespace("at") public static native @ByVal T_TensorTensor_T median_outf(@Const @ByRef Tensor self, @Cast("int64_t") long dim, @Cast("bool") boolean keepdim, @ByRef Tensor values, @ByRef Tensor indices); + +// aten::median.names_dim(Tensor self, Dimname dim, bool keepdim=False) -> (Tensor values, Tensor indices) +@Namespace("at") public static native @ByVal T_TensorTensor_T median(@Const @ByRef Tensor self, @ByVal Dimname dim, @Cast("bool") boolean keepdim/*=false*/); +@Namespace("at") public static native @ByVal T_TensorTensor_T median(@Const @ByRef Tensor self, @ByVal Dimname dim); + +// aten::median.names_dim_values(Tensor self, Dimname dim, bool keepdim=False, *, Tensor(a!) values, Tensor(b!) indices) -> (Tensor(a!) values, Tensor(b!) indices) +@Namespace("at") public static native @ByVal T_TensorTensor_T median_out(@ByRef Tensor values, @ByRef Tensor indices, @Const @ByRef Tensor self, @ByVal Dimname dim, @Cast("bool") boolean keepdim/*=false*/); +@Namespace("at") public static native @ByVal T_TensorTensor_T median_out(@ByRef Tensor values, @ByRef Tensor indices, @Const @ByRef Tensor self, @ByVal Dimname dim); +// aten::median.names_dim_values(Tensor self, Dimname dim, bool keepdim=False, *, Tensor(a!) values, Tensor(b!) indices) -> (Tensor(a!) values, Tensor(b!) indices) +@Namespace("at") public static native @ByVal T_TensorTensor_T median_outf(@Const @ByRef Tensor self, @ByVal Dimname dim, @Cast("bool") boolean keepdim, @ByRef Tensor values, @ByRef Tensor indices); -// aten::logit_backward(Tensor grad_output, Tensor self, float? eps=None) -> Tensor -@Namespace("at") public static native @ByVal Tensor logit_backward(@Const @ByRef Tensor grad_output, @Const @ByRef Tensor self, @ByVal(nullValue = "c10::optional(c10::nullopt)") DoubleOptional eps); -@Namespace("at") public static native @ByVal Tensor logit_backward(@Const @ByRef Tensor grad_output, @Const @ByRef Tensor self); +// aten::median.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor median_out(@ByRef Tensor out, @Const @ByRef Tensor self); +// aten::median.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor median_outf(@Const @ByRef Tensor self, @ByRef Tensor out); -// Parsed from ATen/ops/logspace.h +// Parsed from ATen/ops/meshgrid.h // #pragma once @@ -51536,25 +37186,19 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include +// #include -// aten::logspace(Scalar start, Scalar end, int steps, float base=10.0, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor -@Namespace("at") public static native @ByVal Tensor logspace(@Const @ByRef Scalar start, @Const @ByRef Scalar end, @Cast("int64_t") long steps, double base/*=10.0*/, @ByVal(nullValue = "at::TensorOptions{}") TensorOptions options); -@Namespace("at") public static native @ByVal Tensor logspace(@Const @ByRef Scalar start, @Const @ByRef Scalar end, @Cast("int64_t") long steps); -// aten::logspace(Scalar start, Scalar end, int steps, float base=10.0, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor -@Namespace("at") public static native @ByVal Tensor logspace(@Const @ByRef Scalar start, @Const @ByRef Scalar end, @Cast("int64_t") long steps, double base, @ByVal ScalarTypeOptional dtype, @ByVal LayoutOptional layout, @ByVal DeviceOptional device, @ByVal BoolOptional pin_memory); +// aten::meshgrid(Tensor[] tensors) -> Tensor[] +@Namespace("at") public static native @Cast({"", "std::vector"}) @StdMove TensorVector meshgrid(@ByVal @Cast("at::TensorList*") TensorArrayRef tensors); -// aten::logspace.out(Scalar start, Scalar end, int steps, float base=10.0, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor logspace_out(@ByRef Tensor out, @Const @ByRef Scalar start, @Const @ByRef Scalar end, @Cast("int64_t") long steps, double base/*=10.0*/); -@Namespace("at") public static native @ByRef Tensor logspace_out(@ByRef Tensor out, @Const @ByRef Scalar start, @Const @ByRef Scalar end, @Cast("int64_t") long steps); -// aten::logspace.out(Scalar start, Scalar end, int steps, float base=10.0, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor logspace_outf(@Const @ByRef Scalar start, @Const @ByRef Scalar end, @Cast("int64_t") long steps, double base, @ByRef Tensor out); +// aten::meshgrid.indexing(Tensor[] tensors, *, str indexing) -> Tensor[] +@Namespace("at") public static native @Cast({"", "std::vector"}) @StdMove TensorVector meshgrid(@ByVal @Cast("at::TensorList*") TensorArrayRef tensors, @ByVal @Cast("c10::string_view*") Pointer indexing); -// Parsed from ATen/ops/logsumexp.h +// Parsed from ATen/ops/min.h // #pragma once @@ -51575,38 +37219,44 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include +// #include -// aten::logsumexp(Tensor self, int[1] dim, bool keepdim=False) -> Tensor -@Namespace("at") public static native @ByVal Tensor logsumexp(@Const @ByRef Tensor self, @ByVal @Cast("c10::ArrayRef*") LongArrayRef dim, @Cast("bool") boolean keepdim/*=false*/); -@Namespace("at") public static native @ByVal Tensor logsumexp(@Const @ByRef Tensor self, @ByVal @Cast("c10::ArrayRef*") LongArrayRef dim); -@Namespace("at") public static native @ByVal Tensor logsumexp(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dim, @Cast("bool") boolean keepdim/*=false*/); -@Namespace("at") public static native @ByVal Tensor logsumexp(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... dim); +// aten::min.dim(Tensor self, int dim, bool keepdim=False) -> (Tensor values, Tensor indices) +@Namespace("at") public static native @ByVal T_TensorTensor_T min(@Const @ByRef Tensor self, @Cast("int64_t") long dim, @Cast("bool") boolean keepdim/*=false*/); +@Namespace("at") public static native @ByVal T_TensorTensor_T min(@Const @ByRef Tensor self, @Cast("int64_t") long dim); -// aten::logsumexp.out(Tensor self, int[1] dim, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor logsumexp_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal @Cast("c10::ArrayRef*") LongArrayRef dim, @Cast("bool") boolean keepdim/*=false*/); -@Namespace("at") public static native @ByRef Tensor logsumexp_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal @Cast("c10::ArrayRef*") LongArrayRef dim); -@Namespace("at") public static native @ByRef Tensor logsumexp_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dim, @Cast("bool") boolean keepdim/*=false*/); -@Namespace("at") public static native @ByRef Tensor logsumexp_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... dim); -// aten::logsumexp.out(Tensor self, int[1] dim, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor logsumexp_outf(@Const @ByRef Tensor self, @ByVal @Cast("c10::ArrayRef*") LongArrayRef dim, @Cast("bool") boolean keepdim, @ByRef Tensor out); -@Namespace("at") public static native @ByRef Tensor logsumexp_outf(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dim, @Cast("bool") boolean keepdim, @ByRef Tensor out); +// aten::min.dim_min(Tensor self, int dim, bool keepdim=False, *, Tensor(a!) min, Tensor(b!) min_indices) -> (Tensor(a!) values, Tensor(b!) indices) +@Namespace("at") public static native @ByVal T_TensorTensor_T min_out(@ByRef Tensor min, @ByRef Tensor min_indices, @Const @ByRef Tensor self, @Cast("int64_t") long dim, @Cast("bool") boolean keepdim/*=false*/); +@Namespace("at") public static native @ByVal T_TensorTensor_T min_out(@ByRef Tensor min, @ByRef Tensor min_indices, @Const @ByRef Tensor self, @Cast("int64_t") long dim); +// aten::min.dim_min(Tensor self, int dim, bool keepdim=False, *, Tensor(a!) min, Tensor(b!) min_indices) -> (Tensor(a!) values, Tensor(b!) indices) +@Namespace("at") public static native @ByVal T_TensorTensor_T min_outf(@Const @ByRef Tensor self, @Cast("int64_t") long dim, @Cast("bool") boolean keepdim, @ByRef Tensor min, @ByRef Tensor min_indices); -// aten::logsumexp.names(Tensor self, Dimname[1] dim, bool keepdim=False) -> Tensor -@Namespace("at") public static native @ByVal Tensor logsumexp(@Const @ByRef Tensor self, @ByVal DimnameArrayRef dim, @Cast("bool") boolean keepdim/*=false*/); -@Namespace("at") public static native @ByVal Tensor logsumexp(@Const @ByRef Tensor self, @ByVal DimnameArrayRef dim); +// aten::min.names_dim(Tensor self, Dimname dim, bool keepdim=False) -> (Tensor values, Tensor indices) +@Namespace("at") public static native @ByVal T_TensorTensor_T min(@Const @ByRef Tensor self, @ByVal Dimname dim, @Cast("bool") boolean keepdim/*=false*/); +@Namespace("at") public static native @ByVal T_TensorTensor_T min(@Const @ByRef Tensor self, @ByVal Dimname dim); -// aten::logsumexp.names_out(Tensor self, Dimname[1] dim, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor logsumexp_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal DimnameArrayRef dim, @Cast("bool") boolean keepdim/*=false*/); -@Namespace("at") public static native @ByRef Tensor logsumexp_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal DimnameArrayRef dim); -// aten::logsumexp.names_out(Tensor self, Dimname[1] dim, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor logsumexp_outf(@Const @ByRef Tensor self, @ByVal DimnameArrayRef dim, @Cast("bool") boolean keepdim, @ByRef Tensor out); +// aten::min.names_dim_min(Tensor self, Dimname dim, bool keepdim=False, *, Tensor(a!) min, Tensor(b!) min_indices) -> (Tensor(a!) values, Tensor(b!) indices) +@Namespace("at") public static native @ByVal T_TensorTensor_T min_out(@ByRef Tensor min, @ByRef Tensor min_indices, @Const @ByRef Tensor self, @ByVal Dimname dim, @Cast("bool") boolean keepdim/*=false*/); +@Namespace("at") public static native @ByVal T_TensorTensor_T min_out(@ByRef Tensor min, @ByRef Tensor min_indices, @Const @ByRef Tensor self, @ByVal Dimname dim); +// aten::min.names_dim_min(Tensor self, Dimname dim, bool keepdim=False, *, Tensor(a!) min, Tensor(b!) min_indices) -> (Tensor(a!) values, Tensor(b!) indices) +@Namespace("at") public static native @ByVal T_TensorTensor_T min_outf(@Const @ByRef Tensor self, @ByVal Dimname dim, @Cast("bool") boolean keepdim, @ByRef Tensor min, @ByRef Tensor min_indices); + +// aten::min(Tensor self) -> Tensor +@Namespace("at") public static native @ByVal Tensor min(@Const @ByRef Tensor self); + +// aten::min.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor min_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor other); +// aten::min.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor min_outf(@Const @ByRef Tensor self, @Const @ByRef Tensor other, @ByRef Tensor out); + +// aten::min.other(Tensor self, Tensor other) -> Tensor +@Namespace("at") public static native @ByVal Tensor min(@Const @ByRef Tensor self, @Const @ByRef Tensor other); -// Parsed from ATen/ops/lshift.h +// Parsed from ATen/ops/minimum.h // #pragma once @@ -51627,29 +37277,21 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include - - -// aten::__lshift__.Scalar(Tensor self, Scalar other) -> Tensor -@Namespace("at") public static native @ByVal Tensor __lshift__(@Const @ByRef Tensor self, @Const @ByRef Scalar other); +// #include -// aten::__lshift__.Tensor(Tensor self, Tensor other) -> Tensor -@Namespace("at") public static native @ByVal Tensor __lshift__(@Const @ByRef Tensor self, @Const @ByRef Tensor other); -// aten::__lshift__.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor __lshift___out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Scalar other); -// aten::__lshift__.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor __lshift___outf(@Const @ByRef Tensor self, @Const @ByRef Scalar other, @ByRef Tensor out); +// aten::minimum(Tensor self, Tensor other) -> Tensor +@Namespace("at") public static native @ByVal Tensor minimum(@Const @ByRef Tensor self, @Const @ByRef Tensor other); -// aten::__lshift__.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor __lshift___out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor other); -// aten::__lshift__.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor __lshift___outf(@Const @ByRef Tensor self, @Const @ByRef Tensor other, @ByRef Tensor out); +// aten::minimum.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor minimum_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor other); +// aten::minimum.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor minimum_outf(@Const @ByRef Tensor self, @Const @ByRef Tensor other, @ByRef Tensor out); -// Parsed from ATen/ops/lstm.h +// Parsed from ATen/ops/miopen_batch_norm.h // #pragma once @@ -51670,19 +37312,21 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include +// #include -// aten::lstm.input(Tensor input, Tensor[] hx, Tensor[] params, bool has_biases, int num_layers, float dropout, bool train, bool bidirectional, bool batch_first) -> (Tensor, Tensor, Tensor) -@Namespace("at") public static native @ByVal TensorTensorTensorTuple lstm(@Const @ByRef Tensor input, @ByVal TensorArrayRef hx, @ByVal TensorArrayRef params, @Cast("bool") boolean has_biases, @Cast("int64_t") long num_layers, double dropout, @Cast("bool") boolean train, @Cast("bool") boolean bidirectional, @Cast("bool") boolean batch_first); +// aten::miopen_batch_norm(Tensor input, Tensor weight, Tensor? bias, Tensor? running_mean, Tensor? running_var, bool training, float exponential_average_factor, float epsilon) -> (Tensor, Tensor, Tensor) +@Namespace("at") public static native @ByVal T_TensorTensorTensor_T miopen_batch_norm(@Const @ByRef Tensor input, @Const @ByRef Tensor weight, @Const @ByRef TensorOptional bias, @Const @ByRef TensorOptional running_mean, @Const @ByRef TensorOptional running_var, @Cast("bool") boolean training, double exponential_average_factor, double epsilon); -// aten::lstm.data(Tensor data, Tensor batch_sizes, Tensor[] hx, Tensor[] params, bool has_biases, int num_layers, float dropout, bool train, bool bidirectional) -> (Tensor, Tensor, Tensor) -@Namespace("at") public static native @ByVal TensorTensorTensorTuple lstm(@Const @ByRef Tensor data, @Const @ByRef Tensor batch_sizes, @ByVal TensorArrayRef hx, @ByVal TensorArrayRef params, @Cast("bool") boolean has_biases, @Cast("int64_t") long num_layers, double dropout, @Cast("bool") boolean train, @Cast("bool") boolean bidirectional); +// aten::miopen_batch_norm.out(Tensor input, Tensor weight, Tensor? bias, Tensor? running_mean, Tensor? running_var, bool training, float exponential_average_factor, float epsilon, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2) -> (Tensor(a!), Tensor(b!), Tensor(c!)) +@Namespace("at") public static native @ByVal T_TensorTensorTensor_T miopen_batch_norm_out(@ByRef Tensor out0, @ByRef Tensor out1, @ByRef Tensor out2, @Const @ByRef Tensor input, @Const @ByRef Tensor weight, @Const @ByRef TensorOptional bias, @Const @ByRef TensorOptional running_mean, @Const @ByRef TensorOptional running_var, @Cast("bool") boolean training, double exponential_average_factor, double epsilon); +// aten::miopen_batch_norm.out(Tensor input, Tensor weight, Tensor? bias, Tensor? running_mean, Tensor? running_var, bool training, float exponential_average_factor, float epsilon, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2) -> (Tensor(a!), Tensor(b!), Tensor(c!)) +@Namespace("at") public static native @ByVal T_TensorTensorTensor_T miopen_batch_norm_outf(@Const @ByRef Tensor input, @Const @ByRef Tensor weight, @Const @ByRef TensorOptional bias, @Const @ByRef TensorOptional running_mean, @Const @ByRef TensorOptional running_var, @Cast("bool") boolean training, double exponential_average_factor, double epsilon, @ByRef Tensor out0, @ByRef Tensor out1, @ByRef Tensor out2); -// Parsed from ATen/ops/lstm_cell.h +// Parsed from ATen/ops/miopen_batch_norm_backward.h // #pragma once @@ -51703,17 +37347,21 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include +// #include -// aten::lstm_cell(Tensor input, Tensor[] hx, Tensor w_ih, Tensor w_hh, Tensor? b_ih=None, Tensor? b_hh=None) -> (Tensor, Tensor) -@Namespace("at") public static native @ByVal TensorTensorTuple lstm_cell(@Const @ByRef Tensor input, @ByVal TensorArrayRef hx, @Const @ByRef Tensor w_ih, @Const @ByRef Tensor w_hh, @Const @ByRef(nullValue = "c10::optional{}") TensorOptional b_ih, @Const @ByRef(nullValue = "c10::optional{}") TensorOptional b_hh); -@Namespace("at") public static native @ByVal TensorTensorTuple lstm_cell(@Const @ByRef Tensor input, @ByVal TensorArrayRef hx, @Const @ByRef Tensor w_ih, @Const @ByRef Tensor w_hh); +// aten::miopen_batch_norm_backward(Tensor input, Tensor grad_output, Tensor weight, Tensor? running_mean, Tensor? running_var, Tensor? save_mean, Tensor? save_var, float epsilon) -> (Tensor, Tensor, Tensor) +@Namespace("at") public static native @ByVal T_TensorTensorTensor_T miopen_batch_norm_backward(@Const @ByRef Tensor input, @Const @ByRef Tensor grad_output, @Const @ByRef Tensor weight, @Const @ByRef TensorOptional running_mean, @Const @ByRef TensorOptional running_var, @Const @ByRef TensorOptional save_mean, @Const @ByRef TensorOptional save_var, double epsilon); +// aten::miopen_batch_norm_backward.out(Tensor input, Tensor grad_output, Tensor weight, Tensor? running_mean, Tensor? running_var, Tensor? save_mean, Tensor? save_var, float epsilon, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2) -> (Tensor(a!), Tensor(b!), Tensor(c!)) +@Namespace("at") public static native @ByVal T_TensorTensorTensor_T miopen_batch_norm_backward_out(@ByRef Tensor out0, @ByRef Tensor out1, @ByRef Tensor out2, @Const @ByRef Tensor input, @Const @ByRef Tensor grad_output, @Const @ByRef Tensor weight, @Const @ByRef TensorOptional running_mean, @Const @ByRef TensorOptional running_var, @Const @ByRef TensorOptional save_mean, @Const @ByRef TensorOptional save_var, double epsilon); +// aten::miopen_batch_norm_backward.out(Tensor input, Tensor grad_output, Tensor weight, Tensor? running_mean, Tensor? running_var, Tensor? save_mean, Tensor? save_var, float epsilon, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2) -> (Tensor(a!), Tensor(b!), Tensor(c!)) +@Namespace("at") public static native @ByVal T_TensorTensorTensor_T miopen_batch_norm_backward_outf(@Const @ByRef Tensor input, @Const @ByRef Tensor grad_output, @Const @ByRef Tensor weight, @Const @ByRef TensorOptional running_mean, @Const @ByRef TensorOptional running_var, @Const @ByRef TensorOptional save_mean, @Const @ByRef TensorOptional save_var, double epsilon, @ByRef Tensor out0, @ByRef Tensor out1, @ByRef Tensor out2); -// Parsed from ATen/ops/lstm_mps_backward.h + +// Parsed from ATen/ops/miopen_convolution.h // #pragma once @@ -51734,21 +37382,43 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include +// #include -// aten::lstm_mps_backward(Tensor grad_y, Tensor? grad_hy, Tensor? grad_cy, Tensor z_state, Tensor cell_state_fwd, Tensor input, Tensor layersOutputs, Tensor[] hx, Tensor[] params, bool has_biases, int num_layers, float dropout, bool train, bool bidirectional, bool batch_first) -> (Tensor, Tensor[], Tensor[]) -@Namespace("at") public static native @ByVal TensorTensorVectorTensorVectorTuple lstm_mps_backward(@Const @ByRef Tensor grad_y, @Const @ByRef TensorOptional grad_hy, @Const @ByRef TensorOptional grad_cy, @Const @ByRef Tensor z_state, @Const @ByRef Tensor cell_state_fwd, @Const @ByRef Tensor input, @Const @ByRef Tensor layersOutputs, @ByVal TensorArrayRef hx, @ByVal TensorArrayRef params, @Cast("bool") boolean has_biases, @Cast("int64_t") long num_layers, double dropout, @Cast("bool") boolean train, @Cast("bool") boolean bidirectional, @Cast("bool") boolean batch_first); +// aten::miopen_convolution(Tensor self, Tensor weight, Tensor? bias, SymInt[] padding, int[] stride, int[] dilation, int groups, bool benchmark, bool deterministic) -> Tensor +@Namespace("at") public static native @ByVal Tensor miopen_convolution(@Const @ByRef Tensor self, @Const @ByRef Tensor weight, @Const @ByRef TensorOptional bias, @ByVal LongArrayRef padding, @ByVal LongArrayRef stride, @ByVal LongArrayRef dilation, @Cast("int64_t") long groups, @Cast("bool") boolean benchmark, @Cast("bool") boolean deterministic); +@Namespace("at") public static native @ByVal Tensor miopen_convolution(@Const @ByRef Tensor self, @Const @ByRef Tensor weight, @Const @ByRef TensorOptional bias, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] padding, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] stride, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dilation, @Cast("int64_t") long groups, @Cast("bool") boolean benchmark, @Cast("bool") boolean deterministic); -// aten::lstm_mps_backward.out(Tensor grad_y, Tensor? grad_hy, Tensor? grad_cy, Tensor z_state, Tensor cell_state_fwd, Tensor input, Tensor layersOutputs, Tensor[] hx, Tensor[] params, bool has_biases, int num_layers, float dropout, bool train, bool bidirectional, bool batch_first, *, Tensor(a!) out0, Tensor(b!)[] out1, Tensor(c!)[] out2) -> () -@Namespace("at") public static native void lstm_mps_backward_out(@ByRef Tensor out0, @ByVal TensorArrayRef out1, @ByVal TensorArrayRef out2, @Const @ByRef Tensor grad_y, @Const @ByRef TensorOptional grad_hy, @Const @ByRef TensorOptional grad_cy, @Const @ByRef Tensor z_state, @Const @ByRef Tensor cell_state_fwd, @Const @ByRef Tensor input, @Const @ByRef Tensor layersOutputs, @ByVal TensorArrayRef hx, @ByVal TensorArrayRef params, @Cast("bool") boolean has_biases, @Cast("int64_t") long num_layers, double dropout, @Cast("bool") boolean train, @Cast("bool") boolean bidirectional, @Cast("bool") boolean batch_first); -// aten::lstm_mps_backward.out(Tensor grad_y, Tensor? grad_hy, Tensor? grad_cy, Tensor z_state, Tensor cell_state_fwd, Tensor input, Tensor layersOutputs, Tensor[] hx, Tensor[] params, bool has_biases, int num_layers, float dropout, bool train, bool bidirectional, bool batch_first, *, Tensor(a!) out0, Tensor(b!)[] out1, Tensor(c!)[] out2) -> () -@Namespace("at") public static native void lstm_mps_backward_outf(@Const @ByRef Tensor grad_y, @Const @ByRef TensorOptional grad_hy, @Const @ByRef TensorOptional grad_cy, @Const @ByRef Tensor z_state, @Const @ByRef Tensor cell_state_fwd, @Const @ByRef Tensor input, @Const @ByRef Tensor layersOutputs, @ByVal TensorArrayRef hx, @ByVal TensorArrayRef params, @Cast("bool") boolean has_biases, @Cast("int64_t") long num_layers, double dropout, @Cast("bool") boolean train, @Cast("bool") boolean bidirectional, @Cast("bool") boolean batch_first, @ByRef Tensor out0, @ByVal TensorArrayRef out1, @ByVal TensorArrayRef out2); +// aten::miopen_convolution(Tensor self, Tensor weight, Tensor? bias, SymInt[] padding, int[] stride, int[] dilation, int groups, bool benchmark, bool deterministic) -> Tensor +@Namespace("at") public static native @ByVal Tensor miopen_convolution_symint(@Const @ByRef Tensor self, @Const @ByRef Tensor weight, @Const @ByRef TensorOptional bias, @ByVal SymIntArrayRef padding, @ByVal LongArrayRef stride, @ByVal LongArrayRef dilation, @Cast("int64_t") long groups, @Cast("bool") boolean benchmark, @Cast("bool") boolean deterministic); +@Namespace("at") public static native @ByVal Tensor miopen_convolution_symint(@Const @ByRef Tensor self, @Const @ByRef Tensor weight, @Const @ByRef TensorOptional bias, @ByVal SymIntArrayRef padding, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] stride, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dilation, @Cast("int64_t") long groups, @Cast("bool") boolean benchmark, @Cast("bool") boolean deterministic); + + +// aten::miopen_convolution.out(Tensor self, Tensor weight, Tensor? bias, SymInt[] padding, int[] stride, int[] dilation, int groups, bool benchmark, bool deterministic, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor miopen_convolution_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor weight, @Const @ByRef TensorOptional bias, @ByVal LongArrayRef padding, @ByVal LongArrayRef stride, @ByVal LongArrayRef dilation, @Cast("int64_t") long groups, @Cast("bool") boolean benchmark, @Cast("bool") boolean deterministic); +@Namespace("at") public static native @ByRef Tensor miopen_convolution_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor weight, @Const @ByRef TensorOptional bias, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] padding, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] stride, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dilation, @Cast("int64_t") long groups, @Cast("bool") boolean benchmark, @Cast("bool") boolean deterministic); + + +// aten::miopen_convolution.out(Tensor self, Tensor weight, Tensor? bias, SymInt[] padding, int[] stride, int[] dilation, int groups, bool benchmark, bool deterministic, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor miopen_convolution_outf(@Const @ByRef Tensor self, @Const @ByRef Tensor weight, @Const @ByRef TensorOptional bias, @ByVal LongArrayRef padding, @ByVal LongArrayRef stride, @ByVal LongArrayRef dilation, @Cast("int64_t") long groups, @Cast("bool") boolean benchmark, @Cast("bool") boolean deterministic, @ByRef Tensor out); +@Namespace("at") public static native @ByRef Tensor miopen_convolution_outf(@Const @ByRef Tensor self, @Const @ByRef Tensor weight, @Const @ByRef TensorOptional bias, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] padding, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] stride, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dilation, @Cast("int64_t") long groups, @Cast("bool") boolean benchmark, @Cast("bool") boolean deterministic, @ByRef Tensor out); + + +// aten::miopen_convolution.out(Tensor self, Tensor weight, Tensor? bias, SymInt[] padding, int[] stride, int[] dilation, int groups, bool benchmark, bool deterministic, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor miopen_convolution_symint_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor weight, @Const @ByRef TensorOptional bias, @ByVal SymIntArrayRef padding, @ByVal LongArrayRef stride, @ByVal LongArrayRef dilation, @Cast("int64_t") long groups, @Cast("bool") boolean benchmark, @Cast("bool") boolean deterministic); +@Namespace("at") public static native @ByRef Tensor miopen_convolution_symint_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor weight, @Const @ByRef TensorOptional bias, @ByVal SymIntArrayRef padding, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] stride, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dilation, @Cast("int64_t") long groups, @Cast("bool") boolean benchmark, @Cast("bool") boolean deterministic); + + +// aten::miopen_convolution.out(Tensor self, Tensor weight, Tensor? bias, SymInt[] padding, int[] stride, int[] dilation, int groups, bool benchmark, bool deterministic, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor miopen_convolution_symint_outf(@Const @ByRef Tensor self, @Const @ByRef Tensor weight, @Const @ByRef TensorOptional bias, @ByVal SymIntArrayRef padding, @ByVal LongArrayRef stride, @ByVal LongArrayRef dilation, @Cast("int64_t") long groups, @Cast("bool") boolean benchmark, @Cast("bool") boolean deterministic, @ByRef Tensor out); +@Namespace("at") public static native @ByRef Tensor miopen_convolution_symint_outf(@Const @ByRef Tensor self, @Const @ByRef Tensor weight, @Const @ByRef TensorOptional bias, @ByVal SymIntArrayRef padding, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] stride, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dilation, @Cast("int64_t") long groups, @Cast("bool") boolean benchmark, @Cast("bool") boolean deterministic, @ByRef Tensor out); -// Parsed from ATen/ops/lt.h + + +// Parsed from ATen/ops/miopen_convolution_add_relu.h // #pragma once @@ -51769,29 +37439,17 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include - - -// aten::lt.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor lt_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Scalar other); -// aten::lt.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor lt_outf(@Const @ByRef Tensor self, @Const @ByRef Scalar other, @ByRef Tensor out); - -// aten::lt.Scalar(Tensor self, Scalar other) -> Tensor -@Namespace("at") public static native @ByVal Tensor lt(@Const @ByRef Tensor self, @Const @ByRef Scalar other); +// #include -// aten::lt.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor lt_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor other); -// aten::lt.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor lt_outf(@Const @ByRef Tensor self, @Const @ByRef Tensor other, @ByRef Tensor out); -// aten::lt.Tensor(Tensor self, Tensor other) -> Tensor -@Namespace("at") public static native @ByVal Tensor lt(@Const @ByRef Tensor self, @Const @ByRef Tensor other); +// aten::miopen_convolution_add_relu(Tensor self, Tensor weight, Tensor z, Scalar? alpha, Tensor? bias, int[] stride, int[] padding, int[] dilation, int groups) -> Tensor +@Namespace("at") public static native @ByVal Tensor miopen_convolution_add_relu(@Const @ByRef Tensor self, @Const @ByRef Tensor weight, @Const @ByRef Tensor z, @Const @ByRef ScalarOptional alpha, @Const @ByRef TensorOptional bias, @ByVal LongArrayRef stride, @ByVal LongArrayRef padding, @ByVal LongArrayRef dilation, @Cast("int64_t") long groups); +@Namespace("at") public static native @ByVal Tensor miopen_convolution_add_relu(@Const @ByRef Tensor self, @Const @ByRef Tensor weight, @Const @ByRef Tensor z, @Const @ByRef ScalarOptional alpha, @Const @ByRef TensorOptional bias, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] stride, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] padding, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dilation, @Cast("int64_t") long groups); -// Parsed from ATen/ops/lu_solve.h +// Parsed from ATen/ops/miopen_convolution_relu.h // #pragma once @@ -51812,21 +37470,17 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include - +// #include -// aten::lu_solve.out(Tensor self, Tensor LU_data, Tensor LU_pivots, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor lu_solve_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor LU_data, @Const @ByRef Tensor LU_pivots); -// aten::lu_solve.out(Tensor self, Tensor LU_data, Tensor LU_pivots, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor lu_solve_outf(@Const @ByRef Tensor self, @Const @ByRef Tensor LU_data, @Const @ByRef Tensor LU_pivots, @ByRef Tensor out); -// aten::lu_solve(Tensor self, Tensor LU_data, Tensor LU_pivots) -> Tensor -@Namespace("at") public static native @ByVal Tensor lu_solve(@Const @ByRef Tensor self, @Const @ByRef Tensor LU_data, @Const @ByRef Tensor LU_pivots); +// aten::miopen_convolution_relu(Tensor self, Tensor weight, Tensor? bias, int[] stride, int[] padding, int[] dilation, int groups) -> Tensor +@Namespace("at") public static native @ByVal Tensor miopen_convolution_relu(@Const @ByRef Tensor self, @Const @ByRef Tensor weight, @Const @ByRef TensorOptional bias, @ByVal LongArrayRef stride, @ByVal LongArrayRef padding, @ByVal LongArrayRef dilation, @Cast("int64_t") long groups); +@Namespace("at") public static native @ByVal Tensor miopen_convolution_relu(@Const @ByRef Tensor self, @Const @ByRef Tensor weight, @Const @ByRef TensorOptional bias, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] stride, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] padding, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dilation, @Cast("int64_t") long groups); -// Parsed from ATen/ops/lu_unpack.h +// Parsed from ATen/ops/miopen_convolution_transpose.h // #pragma once @@ -51847,23 +37501,43 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include +// #include -// aten::lu_unpack(Tensor LU_data, Tensor LU_pivots, bool unpack_data=True, bool unpack_pivots=True) -> (Tensor P, Tensor L, Tensor U) -@Namespace("at") public static native @ByVal TensorTensorTensorTuple lu_unpack(@Const @ByRef Tensor LU_data, @Const @ByRef Tensor LU_pivots, @Cast("bool") boolean unpack_data/*=true*/, @Cast("bool") boolean unpack_pivots/*=true*/); -@Namespace("at") public static native @ByVal TensorTensorTensorTuple lu_unpack(@Const @ByRef Tensor LU_data, @Const @ByRef Tensor LU_pivots); +// aten::miopen_convolution_transpose(Tensor self, Tensor weight, Tensor? bias, SymInt[] padding, SymInt[] output_padding, int[] stride, int[] dilation, int groups, bool benchmark, bool deterministic) -> Tensor +@Namespace("at") public static native @ByVal Tensor miopen_convolution_transpose(@Const @ByRef Tensor self, @Const @ByRef Tensor weight, @Const @ByRef TensorOptional bias, @ByVal LongArrayRef padding, @ByVal LongArrayRef output_padding, @ByVal LongArrayRef stride, @ByVal LongArrayRef dilation, @Cast("int64_t") long groups, @Cast("bool") boolean benchmark, @Cast("bool") boolean deterministic); +@Namespace("at") public static native @ByVal Tensor miopen_convolution_transpose(@Const @ByRef Tensor self, @Const @ByRef Tensor weight, @Const @ByRef TensorOptional bias, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] padding, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] output_padding, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] stride, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dilation, @Cast("int64_t") long groups, @Cast("bool") boolean benchmark, @Cast("bool") boolean deterministic); -// aten::lu_unpack.out(Tensor LU_data, Tensor LU_pivots, bool unpack_data=True, bool unpack_pivots=True, *, Tensor(a!) P, Tensor(b!) L, Tensor(c!) U) -> (Tensor(a!) P, Tensor(b!) L, Tensor(c!) U) -@Namespace("at") public static native @ByVal @Cast("std::tuple*") PointerPointer lu_unpack_out(@ByRef Tensor P, @ByRef Tensor L, @ByRef Tensor U, @Const @ByRef Tensor LU_data, @Const @ByRef Tensor LU_pivots, @Cast("bool") boolean unpack_data/*=true*/, @Cast("bool") boolean unpack_pivots/*=true*/); -@Namespace("at") public static native @ByVal @Cast("std::tuple*") PointerPointer lu_unpack_out(@ByRef Tensor P, @ByRef Tensor L, @ByRef Tensor U, @Const @ByRef Tensor LU_data, @Const @ByRef Tensor LU_pivots); -// aten::lu_unpack.out(Tensor LU_data, Tensor LU_pivots, bool unpack_data=True, bool unpack_pivots=True, *, Tensor(a!) P, Tensor(b!) L, Tensor(c!) U) -> (Tensor(a!) P, Tensor(b!) L, Tensor(c!) U) -@Namespace("at") public static native @ByVal @Cast("std::tuple*") PointerPointer lu_unpack_outf(@Const @ByRef Tensor LU_data, @Const @ByRef Tensor LU_pivots, @Cast("bool") boolean unpack_data, @Cast("bool") boolean unpack_pivots, @ByRef Tensor P, @ByRef Tensor L, @ByRef Tensor U); +// aten::miopen_convolution_transpose(Tensor self, Tensor weight, Tensor? bias, SymInt[] padding, SymInt[] output_padding, int[] stride, int[] dilation, int groups, bool benchmark, bool deterministic) -> Tensor +@Namespace("at") public static native @ByVal Tensor miopen_convolution_transpose_symint(@Const @ByRef Tensor self, @Const @ByRef Tensor weight, @Const @ByRef TensorOptional bias, @ByVal SymIntArrayRef padding, @ByVal SymIntArrayRef output_padding, @ByVal LongArrayRef stride, @ByVal LongArrayRef dilation, @Cast("int64_t") long groups, @Cast("bool") boolean benchmark, @Cast("bool") boolean deterministic); +@Namespace("at") public static native @ByVal Tensor miopen_convolution_transpose_symint(@Const @ByRef Tensor self, @Const @ByRef Tensor weight, @Const @ByRef TensorOptional bias, @ByVal SymIntArrayRef padding, @ByVal SymIntArrayRef output_padding, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] stride, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dilation, @Cast("int64_t") long groups, @Cast("bool") boolean benchmark, @Cast("bool") boolean deterministic); +// aten::miopen_convolution_transpose.out(Tensor self, Tensor weight, Tensor? bias, SymInt[] padding, SymInt[] output_padding, int[] stride, int[] dilation, int groups, bool benchmark, bool deterministic, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor miopen_convolution_transpose_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor weight, @Const @ByRef TensorOptional bias, @ByVal LongArrayRef padding, @ByVal LongArrayRef output_padding, @ByVal LongArrayRef stride, @ByVal LongArrayRef dilation, @Cast("int64_t") long groups, @Cast("bool") boolean benchmark, @Cast("bool") boolean deterministic); +@Namespace("at") public static native @ByRef Tensor miopen_convolution_transpose_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor weight, @Const @ByRef TensorOptional bias, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] padding, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] output_padding, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] stride, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dilation, @Cast("int64_t") long groups, @Cast("bool") boolean benchmark, @Cast("bool") boolean deterministic); -// Parsed from ATen/ops/mH.h + +// aten::miopen_convolution_transpose.out(Tensor self, Tensor weight, Tensor? bias, SymInt[] padding, SymInt[] output_padding, int[] stride, int[] dilation, int groups, bool benchmark, bool deterministic, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor miopen_convolution_transpose_outf(@Const @ByRef Tensor self, @Const @ByRef Tensor weight, @Const @ByRef TensorOptional bias, @ByVal LongArrayRef padding, @ByVal LongArrayRef output_padding, @ByVal LongArrayRef stride, @ByVal LongArrayRef dilation, @Cast("int64_t") long groups, @Cast("bool") boolean benchmark, @Cast("bool") boolean deterministic, @ByRef Tensor out); +@Namespace("at") public static native @ByRef Tensor miopen_convolution_transpose_outf(@Const @ByRef Tensor self, @Const @ByRef Tensor weight, @Const @ByRef TensorOptional bias, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] padding, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] output_padding, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] stride, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dilation, @Cast("int64_t") long groups, @Cast("bool") boolean benchmark, @Cast("bool") boolean deterministic, @ByRef Tensor out); + + +// aten::miopen_convolution_transpose.out(Tensor self, Tensor weight, Tensor? bias, SymInt[] padding, SymInt[] output_padding, int[] stride, int[] dilation, int groups, bool benchmark, bool deterministic, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor miopen_convolution_transpose_symint_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor weight, @Const @ByRef TensorOptional bias, @ByVal SymIntArrayRef padding, @ByVal SymIntArrayRef output_padding, @ByVal LongArrayRef stride, @ByVal LongArrayRef dilation, @Cast("int64_t") long groups, @Cast("bool") boolean benchmark, @Cast("bool") boolean deterministic); +@Namespace("at") public static native @ByRef Tensor miopen_convolution_transpose_symint_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor weight, @Const @ByRef TensorOptional bias, @ByVal SymIntArrayRef padding, @ByVal SymIntArrayRef output_padding, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] stride, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dilation, @Cast("int64_t") long groups, @Cast("bool") boolean benchmark, @Cast("bool") boolean deterministic); + + +// aten::miopen_convolution_transpose.out(Tensor self, Tensor weight, Tensor? bias, SymInt[] padding, SymInt[] output_padding, int[] stride, int[] dilation, int groups, bool benchmark, bool deterministic, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor miopen_convolution_transpose_symint_outf(@Const @ByRef Tensor self, @Const @ByRef Tensor weight, @Const @ByRef TensorOptional bias, @ByVal SymIntArrayRef padding, @ByVal SymIntArrayRef output_padding, @ByVal LongArrayRef stride, @ByVal LongArrayRef dilation, @Cast("int64_t") long groups, @Cast("bool") boolean benchmark, @Cast("bool") boolean deterministic, @ByRef Tensor out); +@Namespace("at") public static native @ByRef Tensor miopen_convolution_transpose_symint_outf(@Const @ByRef Tensor self, @Const @ByRef Tensor weight, @Const @ByRef TensorOptional bias, @ByVal SymIntArrayRef padding, @ByVal SymIntArrayRef output_padding, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] stride, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dilation, @Cast("int64_t") long groups, @Cast("bool") boolean benchmark, @Cast("bool") boolean deterministic, @ByRef Tensor out); + + + + + +// Parsed from ATen/ops/miopen_depthwise_convolution.h // #pragma once @@ -51884,14 +37558,43 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include +// #include + +// aten::miopen_depthwise_convolution(Tensor self, Tensor weight, Tensor? bias, SymInt[] padding, int[] stride, int[] dilation, int groups, bool benchmark, bool deterministic) -> Tensor +@Namespace("at") public static native @ByVal Tensor miopen_depthwise_convolution(@Const @ByRef Tensor self, @Const @ByRef Tensor weight, @Const @ByRef TensorOptional bias, @ByVal LongArrayRef padding, @ByVal LongArrayRef stride, @ByVal LongArrayRef dilation, @Cast("int64_t") long groups, @Cast("bool") boolean benchmark, @Cast("bool") boolean deterministic); +@Namespace("at") public static native @ByVal Tensor miopen_depthwise_convolution(@Const @ByRef Tensor self, @Const @ByRef Tensor weight, @Const @ByRef TensorOptional bias, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] padding, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] stride, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dilation, @Cast("int64_t") long groups, @Cast("bool") boolean benchmark, @Cast("bool") boolean deterministic); +// aten::miopen_depthwise_convolution(Tensor self, Tensor weight, Tensor? bias, SymInt[] padding, int[] stride, int[] dilation, int groups, bool benchmark, bool deterministic) -> Tensor +@Namespace("at") public static native @ByVal Tensor miopen_depthwise_convolution_symint(@Const @ByRef Tensor self, @Const @ByRef Tensor weight, @Const @ByRef TensorOptional bias, @ByVal SymIntArrayRef padding, @ByVal LongArrayRef stride, @ByVal LongArrayRef dilation, @Cast("int64_t") long groups, @Cast("bool") boolean benchmark, @Cast("bool") boolean deterministic); +@Namespace("at") public static native @ByVal Tensor miopen_depthwise_convolution_symint(@Const @ByRef Tensor self, @Const @ByRef Tensor weight, @Const @ByRef TensorOptional bias, @ByVal SymIntArrayRef padding, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] stride, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dilation, @Cast("int64_t") long groups, @Cast("bool") boolean benchmark, @Cast("bool") boolean deterministic); + + +// aten::miopen_depthwise_convolution.out(Tensor self, Tensor weight, Tensor? bias, SymInt[] padding, int[] stride, int[] dilation, int groups, bool benchmark, bool deterministic, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor miopen_depthwise_convolution_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor weight, @Const @ByRef TensorOptional bias, @ByVal LongArrayRef padding, @ByVal LongArrayRef stride, @ByVal LongArrayRef dilation, @Cast("int64_t") long groups, @Cast("bool") boolean benchmark, @Cast("bool") boolean deterministic); +@Namespace("at") public static native @ByRef Tensor miopen_depthwise_convolution_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor weight, @Const @ByRef TensorOptional bias, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] padding, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] stride, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dilation, @Cast("int64_t") long groups, @Cast("bool") boolean benchmark, @Cast("bool") boolean deterministic); + + +// aten::miopen_depthwise_convolution.out(Tensor self, Tensor weight, Tensor? bias, SymInt[] padding, int[] stride, int[] dilation, int groups, bool benchmark, bool deterministic, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor miopen_depthwise_convolution_outf(@Const @ByRef Tensor self, @Const @ByRef Tensor weight, @Const @ByRef TensorOptional bias, @ByVal LongArrayRef padding, @ByVal LongArrayRef stride, @ByVal LongArrayRef dilation, @Cast("int64_t") long groups, @Cast("bool") boolean benchmark, @Cast("bool") boolean deterministic, @ByRef Tensor out); +@Namespace("at") public static native @ByRef Tensor miopen_depthwise_convolution_outf(@Const @ByRef Tensor self, @Const @ByRef Tensor weight, @Const @ByRef TensorOptional bias, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] padding, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] stride, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dilation, @Cast("int64_t") long groups, @Cast("bool") boolean benchmark, @Cast("bool") boolean deterministic, @ByRef Tensor out); + + +// aten::miopen_depthwise_convolution.out(Tensor self, Tensor weight, Tensor? bias, SymInt[] padding, int[] stride, int[] dilation, int groups, bool benchmark, bool deterministic, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor miopen_depthwise_convolution_symint_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor weight, @Const @ByRef TensorOptional bias, @ByVal SymIntArrayRef padding, @ByVal LongArrayRef stride, @ByVal LongArrayRef dilation, @Cast("int64_t") long groups, @Cast("bool") boolean benchmark, @Cast("bool") boolean deterministic); +@Namespace("at") public static native @ByRef Tensor miopen_depthwise_convolution_symint_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor weight, @Const @ByRef TensorOptional bias, @ByVal SymIntArrayRef padding, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] stride, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dilation, @Cast("int64_t") long groups, @Cast("bool") boolean benchmark, @Cast("bool") boolean deterministic); + + +// aten::miopen_depthwise_convolution.out(Tensor self, Tensor weight, Tensor? bias, SymInt[] padding, int[] stride, int[] dilation, int groups, bool benchmark, bool deterministic, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor miopen_depthwise_convolution_symint_outf(@Const @ByRef Tensor self, @Const @ByRef Tensor weight, @Const @ByRef TensorOptional bias, @ByVal SymIntArrayRef padding, @ByVal LongArrayRef stride, @ByVal LongArrayRef dilation, @Cast("int64_t") long groups, @Cast("bool") boolean benchmark, @Cast("bool") boolean deterministic, @ByRef Tensor out); +@Namespace("at") public static native @ByRef Tensor miopen_depthwise_convolution_symint_outf(@Const @ByRef Tensor self, @Const @ByRef Tensor weight, @Const @ByRef TensorOptional bias, @ByVal SymIntArrayRef padding, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] stride, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dilation, @Cast("int64_t") long groups, @Cast("bool") boolean benchmark, @Cast("bool") boolean deterministic, @ByRef Tensor out); -// Parsed from ATen/ops/mT.h + + +// Parsed from ATen/ops/miopen_rnn.h // #pragma once @@ -51912,14 +37615,24 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include +// #include + +// aten::miopen_rnn(Tensor input, Tensor[] weight, int weight_stride0, Tensor hx, Tensor? cx, int mode, int hidden_size, int num_layers, bool batch_first, float dropout, bool train, bool bidirectional, int[] batch_sizes, Tensor? dropout_state) -> (Tensor, Tensor, Tensor, Tensor, Tensor) +@Namespace("at") public static native @ByVal T_TensorTensorTensorTensorTensor_T miopen_rnn(@Const @ByRef Tensor input, @ByVal @Cast("at::TensorList*") TensorArrayRef weight, @Cast("int64_t") long weight_stride0, @Const @ByRef Tensor hx, @Const @ByRef TensorOptional cx, @Cast("int64_t") long mode, @Cast("int64_t") long hidden_size, @Cast("int64_t") long num_layers, @Cast("bool") boolean batch_first, double dropout, @Cast("bool") boolean train, @Cast("bool") boolean bidirectional, @ByVal LongArrayRef batch_sizes, @Const @ByRef TensorOptional dropout_state); +@Namespace("at") public static native @ByVal T_TensorTensorTensorTensorTensor_T miopen_rnn(@Const @ByRef Tensor input, @ByVal @Cast("at::TensorList*") TensorArrayRef weight, @Cast("int64_t") long weight_stride0, @Const @ByRef Tensor hx, @Const @ByRef TensorOptional cx, @Cast("int64_t") long mode, @Cast("int64_t") long hidden_size, @Cast("int64_t") long num_layers, @Cast("bool") boolean batch_first, double dropout, @Cast("bool") boolean train, @Cast("bool") boolean bidirectional, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] batch_sizes, @Const @ByRef TensorOptional dropout_state); +// aten::miopen_rnn.out(Tensor input, Tensor[] weight, int weight_stride0, Tensor hx, Tensor? cx, int mode, int hidden_size, int num_layers, bool batch_first, float dropout, bool train, bool bidirectional, int[] batch_sizes, Tensor? dropout_state, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2, Tensor(d!) out3, Tensor(e!) out4) -> (Tensor(a!), Tensor(b!), Tensor(c!), Tensor(d!), Tensor(e!)) +@Namespace("at") public static native @ByVal T_TensorTensorTensorTensorTensor_T miopen_rnn_out(@ByRef Tensor out0, @ByRef Tensor out1, @ByRef Tensor out2, @ByRef Tensor out3, @ByRef Tensor out4, @Const @ByRef Tensor input, @ByVal @Cast("at::TensorList*") TensorArrayRef weight, @Cast("int64_t") long weight_stride0, @Const @ByRef Tensor hx, @Const @ByRef TensorOptional cx, @Cast("int64_t") long mode, @Cast("int64_t") long hidden_size, @Cast("int64_t") long num_layers, @Cast("bool") boolean batch_first, double dropout, @Cast("bool") boolean train, @Cast("bool") boolean bidirectional, @ByVal LongArrayRef batch_sizes, @Const @ByRef TensorOptional dropout_state); +@Namespace("at") public static native @ByVal T_TensorTensorTensorTensorTensor_T miopen_rnn_out(@ByRef Tensor out0, @ByRef Tensor out1, @ByRef Tensor out2, @ByRef Tensor out3, @ByRef Tensor out4, @Const @ByRef Tensor input, @ByVal @Cast("at::TensorList*") TensorArrayRef weight, @Cast("int64_t") long weight_stride0, @Const @ByRef Tensor hx, @Const @ByRef TensorOptional cx, @Cast("int64_t") long mode, @Cast("int64_t") long hidden_size, @Cast("int64_t") long num_layers, @Cast("bool") boolean batch_first, double dropout, @Cast("bool") boolean train, @Cast("bool") boolean bidirectional, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] batch_sizes, @Const @ByRef TensorOptional dropout_state); +// aten::miopen_rnn.out(Tensor input, Tensor[] weight, int weight_stride0, Tensor hx, Tensor? cx, int mode, int hidden_size, int num_layers, bool batch_first, float dropout, bool train, bool bidirectional, int[] batch_sizes, Tensor? dropout_state, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2, Tensor(d!) out3, Tensor(e!) out4) -> (Tensor(a!), Tensor(b!), Tensor(c!), Tensor(d!), Tensor(e!)) +@Namespace("at") public static native @ByVal T_TensorTensorTensorTensorTensor_T miopen_rnn_outf(@Const @ByRef Tensor input, @ByVal @Cast("at::TensorList*") TensorArrayRef weight, @Cast("int64_t") long weight_stride0, @Const @ByRef Tensor hx, @Const @ByRef TensorOptional cx, @Cast("int64_t") long mode, @Cast("int64_t") long hidden_size, @Cast("int64_t") long num_layers, @Cast("bool") boolean batch_first, double dropout, @Cast("bool") boolean train, @Cast("bool") boolean bidirectional, @ByVal LongArrayRef batch_sizes, @Const @ByRef TensorOptional dropout_state, @ByRef Tensor out0, @ByRef Tensor out1, @ByRef Tensor out2, @ByRef Tensor out3, @ByRef Tensor out4); +@Namespace("at") public static native @ByVal T_TensorTensorTensorTensorTensor_T miopen_rnn_outf(@Const @ByRef Tensor input, @ByVal @Cast("at::TensorList*") TensorArrayRef weight, @Cast("int64_t") long weight_stride0, @Const @ByRef Tensor hx, @Const @ByRef TensorOptional cx, @Cast("int64_t") long mode, @Cast("int64_t") long hidden_size, @Cast("int64_t") long num_layers, @Cast("bool") boolean batch_first, double dropout, @Cast("bool") boolean train, @Cast("bool") boolean bidirectional, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] batch_sizes, @Const @ByRef TensorOptional dropout_state, @ByRef Tensor out0, @ByRef Tensor out1, @ByRef Tensor out2, @ByRef Tensor out3, @ByRef Tensor out4); -// Parsed from ATen/ops/margin_ranking_loss.h +// Parsed from ATen/ops/miopen_rnn_backward.h // #pragma once @@ -51940,17 +37653,24 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include +// #include -// aten::margin_ranking_loss(Tensor input1, Tensor input2, Tensor target, float margin=0.0, int reduction=Mean) -> Tensor -@Namespace("at") public static native @ByVal Tensor margin_ranking_loss(@Const @ByRef Tensor input1, @Const @ByRef Tensor input2, @Const @ByRef Tensor target, double margin/*=0.0*/, @Cast("int64_t") long reduction/*=at::Reduction::Mean*/); -@Namespace("at") public static native @ByVal Tensor margin_ranking_loss(@Const @ByRef Tensor input1, @Const @ByRef Tensor input2, @Const @ByRef Tensor target); +// aten::miopen_rnn_backward(Tensor input, Tensor[] weight, int weight_stride0, Tensor weight_buf, Tensor hx, Tensor? cx, Tensor output, Tensor? grad_output, Tensor? grad_hy, Tensor? grad_cy, int mode, int hidden_size, int num_layers, bool batch_first, float dropout, bool train, bool bidirectional, int[] batch_sizes, Tensor? dropout_state, Tensor reserve, bool[4] output_mask) -> (Tensor, Tensor, Tensor, Tensor[]) +@Namespace("at") public static native @ByVal T_TensorTensorTensorTensorVector_T miopen_rnn_backward(@Const @ByRef Tensor input, @ByVal @Cast("at::TensorList*") TensorArrayRef weight, @Cast("int64_t") long weight_stride0, @Const @ByRef Tensor weight_buf, @Const @ByRef Tensor hx, @Const @ByRef TensorOptional cx, @Const @ByRef Tensor output, @Const @ByRef TensorOptional grad_output, @Const @ByRef TensorOptional grad_hy, @Const @ByRef TensorOptional grad_cy, @Cast("int64_t") long mode, @Cast("int64_t") long hidden_size, @Cast("int64_t") long num_layers, @Cast("bool") boolean batch_first, double dropout, @Cast("bool") boolean train, @Cast("bool") boolean bidirectional, @ByVal LongArrayRef batch_sizes, @Const @ByRef TensorOptional dropout_state, @Const @ByRef Tensor reserve, @ByVal @Cast("std::array*") BoolPointer output_mask); +@Namespace("at") public static native @ByVal T_TensorTensorTensorTensorVector_T miopen_rnn_backward(@Const @ByRef Tensor input, @ByVal @Cast("at::TensorList*") TensorArrayRef weight, @Cast("int64_t") long weight_stride0, @Const @ByRef Tensor weight_buf, @Const @ByRef Tensor hx, @Const @ByRef TensorOptional cx, @Const @ByRef Tensor output, @Const @ByRef TensorOptional grad_output, @Const @ByRef TensorOptional grad_hy, @Const @ByRef TensorOptional grad_cy, @Cast("int64_t") long mode, @Cast("int64_t") long hidden_size, @Cast("int64_t") long num_layers, @Cast("bool") boolean batch_first, double dropout, @Cast("bool") boolean train, @Cast("bool") boolean bidirectional, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] batch_sizes, @Const @ByRef TensorOptional dropout_state, @Const @ByRef Tensor reserve, @ByVal @Cast("std::array*") BoolPointer output_mask); +// aten::miopen_rnn_backward.out(Tensor input, Tensor[] weight, int weight_stride0, Tensor weight_buf, Tensor hx, Tensor? cx, Tensor output, Tensor? grad_output, Tensor? grad_hy, Tensor? grad_cy, int mode, int hidden_size, int num_layers, bool batch_first, float dropout, bool train, bool bidirectional, int[] batch_sizes, Tensor? dropout_state, Tensor reserve, bool[4] output_mask, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2, Tensor(d!)[] out3) -> () +@Namespace("at") public static native void miopen_rnn_backward_out(@ByRef Tensor out0, @ByRef Tensor out1, @ByRef Tensor out2, @ByVal @Cast("at::TensorList*") TensorArrayRef out3, @Const @ByRef Tensor input, @ByVal @Cast("at::TensorList*") TensorArrayRef weight, @Cast("int64_t") long weight_stride0, @Const @ByRef Tensor weight_buf, @Const @ByRef Tensor hx, @Const @ByRef TensorOptional cx, @Const @ByRef Tensor output, @Const @ByRef TensorOptional grad_output, @Const @ByRef TensorOptional grad_hy, @Const @ByRef TensorOptional grad_cy, @Cast("int64_t") long mode, @Cast("int64_t") long hidden_size, @Cast("int64_t") long num_layers, @Cast("bool") boolean batch_first, double dropout, @Cast("bool") boolean train, @Cast("bool") boolean bidirectional, @ByVal LongArrayRef batch_sizes, @Const @ByRef TensorOptional dropout_state, @Const @ByRef Tensor reserve, @ByVal @Cast("std::array*") BoolPointer output_mask); +@Namespace("at") public static native void miopen_rnn_backward_out(@ByRef Tensor out0, @ByRef Tensor out1, @ByRef Tensor out2, @ByVal @Cast("at::TensorList*") TensorArrayRef out3, @Const @ByRef Tensor input, @ByVal @Cast("at::TensorList*") TensorArrayRef weight, @Cast("int64_t") long weight_stride0, @Const @ByRef Tensor weight_buf, @Const @ByRef Tensor hx, @Const @ByRef TensorOptional cx, @Const @ByRef Tensor output, @Const @ByRef TensorOptional grad_output, @Const @ByRef TensorOptional grad_hy, @Const @ByRef TensorOptional grad_cy, @Cast("int64_t") long mode, @Cast("int64_t") long hidden_size, @Cast("int64_t") long num_layers, @Cast("bool") boolean batch_first, double dropout, @Cast("bool") boolean train, @Cast("bool") boolean bidirectional, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] batch_sizes, @Const @ByRef TensorOptional dropout_state, @Const @ByRef Tensor reserve, @ByVal @Cast("std::array*") BoolPointer output_mask); +// aten::miopen_rnn_backward.out(Tensor input, Tensor[] weight, int weight_stride0, Tensor weight_buf, Tensor hx, Tensor? cx, Tensor output, Tensor? grad_output, Tensor? grad_hy, Tensor? grad_cy, int mode, int hidden_size, int num_layers, bool batch_first, float dropout, bool train, bool bidirectional, int[] batch_sizes, Tensor? dropout_state, Tensor reserve, bool[4] output_mask, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2, Tensor(d!)[] out3) -> () +@Namespace("at") public static native void miopen_rnn_backward_outf(@Const @ByRef Tensor input, @ByVal @Cast("at::TensorList*") TensorArrayRef weight, @Cast("int64_t") long weight_stride0, @Const @ByRef Tensor weight_buf, @Const @ByRef Tensor hx, @Const @ByRef TensorOptional cx, @Const @ByRef Tensor output, @Const @ByRef TensorOptional grad_output, @Const @ByRef TensorOptional grad_hy, @Const @ByRef TensorOptional grad_cy, @Cast("int64_t") long mode, @Cast("int64_t") long hidden_size, @Cast("int64_t") long num_layers, @Cast("bool") boolean batch_first, double dropout, @Cast("bool") boolean train, @Cast("bool") boolean bidirectional, @ByVal LongArrayRef batch_sizes, @Const @ByRef TensorOptional dropout_state, @Const @ByRef Tensor reserve, @ByVal @Cast("std::array*") BoolPointer output_mask, @ByRef Tensor out0, @ByRef Tensor out1, @ByRef Tensor out2, @ByVal @Cast("at::TensorList*") TensorArrayRef out3); +@Namespace("at") public static native void miopen_rnn_backward_outf(@Const @ByRef Tensor input, @ByVal @Cast("at::TensorList*") TensorArrayRef weight, @Cast("int64_t") long weight_stride0, @Const @ByRef Tensor weight_buf, @Const @ByRef Tensor hx, @Const @ByRef TensorOptional cx, @Const @ByRef Tensor output, @Const @ByRef TensorOptional grad_output, @Const @ByRef TensorOptional grad_hy, @Const @ByRef TensorOptional grad_cy, @Cast("int64_t") long mode, @Cast("int64_t") long hidden_size, @Cast("int64_t") long num_layers, @Cast("bool") boolean batch_first, double dropout, @Cast("bool") boolean train, @Cast("bool") boolean bidirectional, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] batch_sizes, @Const @ByRef TensorOptional dropout_state, @Const @ByRef Tensor reserve, @ByVal @Cast("std::array*") BoolPointer output_mask, @ByRef Tensor out0, @ByRef Tensor out1, @ByRef Tensor out2, @ByVal @Cast("at::TensorList*") TensorArrayRef out3); -// Parsed from ATen/ops/masked_fill.h + +// Parsed from ATen/ops/mish.h // #pragma once @@ -51971,29 +37691,24 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include - +// #include -// aten::masked_fill.Scalar(Tensor self, Tensor mask, Scalar value) -> Tensor -@Namespace("at") public static native @ByVal Tensor masked_fill(@Const @ByRef Tensor self, @Const @ByRef Tensor mask, @Const @ByRef Scalar value); -// aten::masked_fill.Tensor(Tensor self, Tensor mask, Tensor value) -> Tensor -@Namespace("at") public static native @ByVal Tensor masked_fill(@Const @ByRef Tensor self, @Const @ByRef Tensor mask, @Const @ByRef Tensor value); +// aten::mish(Tensor self) -> Tensor +@Namespace("at") public static native @ByVal Tensor mish(@Const @ByRef Tensor self); -// aten::masked_fill.Scalar_out(Tensor self, Tensor mask, Scalar value, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor masked_fill_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor mask, @Const @ByRef Scalar value); -// aten::masked_fill.Scalar_out(Tensor self, Tensor mask, Scalar value, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor masked_fill_outf(@Const @ByRef Tensor self, @Const @ByRef Tensor mask, @Const @ByRef Scalar value, @ByRef Tensor out); +// aten::mish_(Tensor(a!) self) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor mish_(@ByRef Tensor self); -// aten::masked_fill.Tensor_out(Tensor self, Tensor mask, Tensor value, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor masked_fill_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor mask, @Const @ByRef Tensor value); -// aten::masked_fill.Tensor_out(Tensor self, Tensor mask, Tensor value, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor masked_fill_outf(@Const @ByRef Tensor self, @Const @ByRef Tensor mask, @Const @ByRef Tensor value, @ByRef Tensor out); +// aten::mish.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor mish_out(@ByRef Tensor out, @Const @ByRef Tensor self); +// aten::mish.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor mish_outf(@Const @ByRef Tensor self, @ByRef Tensor out); -// Parsed from ATen/ops/masked_scatter.h +// Parsed from ATen/ops/mish_backward.h // #pragma once @@ -52014,21 +37729,16 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include - +// #include -// aten::masked_scatter(Tensor self, Tensor mask, Tensor source) -> Tensor -@Namespace("at") public static native @ByVal Tensor masked_scatter(@Const @ByRef Tensor self, @Const @ByRef Tensor mask, @Const @ByRef Tensor source); -// aten::masked_scatter.out(Tensor self, Tensor mask, Tensor source, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor masked_scatter_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor mask, @Const @ByRef Tensor source); -// aten::masked_scatter.out(Tensor self, Tensor mask, Tensor source, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor masked_scatter_outf(@Const @ByRef Tensor self, @Const @ByRef Tensor mask, @Const @ByRef Tensor source, @ByRef Tensor out); +// aten::mish_backward(Tensor grad_output, Tensor self) -> Tensor +@Namespace("at") public static native @ByVal Tensor mish_backward(@Const @ByRef Tensor grad_output, @Const @ByRef Tensor self); -// Parsed from ATen/ops/masked_select.h +// Parsed from ATen/ops/mkldnn_adaptive_avg_pool2d.h // #pragma once @@ -52049,21 +37759,24 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include +// #include -// aten::masked_select.out(Tensor self, Tensor mask, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor masked_select_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor mask); -// aten::masked_select.out(Tensor self, Tensor mask, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor masked_select_outf(@Const @ByRef Tensor self, @Const @ByRef Tensor mask, @ByRef Tensor out); +// aten::mkldnn_adaptive_avg_pool2d(Tensor self, int[2] output_size) -> Tensor +@Namespace("at") public static native @ByVal Tensor mkldnn_adaptive_avg_pool2d(@Const @ByRef Tensor self, @ByVal LongArrayRef output_size); +@Namespace("at") public static native @ByVal Tensor mkldnn_adaptive_avg_pool2d(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... output_size); -// aten::masked_select(Tensor self, Tensor mask) -> Tensor -@Namespace("at") public static native @ByVal Tensor masked_select(@Const @ByRef Tensor self, @Const @ByRef Tensor mask); +// aten::mkldnn_adaptive_avg_pool2d.out(Tensor self, int[2] output_size, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor mkldnn_adaptive_avg_pool2d_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal LongArrayRef output_size); +@Namespace("at") public static native @ByRef Tensor mkldnn_adaptive_avg_pool2d_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... output_size); +// aten::mkldnn_adaptive_avg_pool2d.out(Tensor self, int[2] output_size, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor mkldnn_adaptive_avg_pool2d_outf(@Const @ByRef Tensor self, @ByVal LongArrayRef output_size, @ByRef Tensor out); +@Namespace("at") public static native @ByRef Tensor mkldnn_adaptive_avg_pool2d_outf(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] output_size, @ByRef Tensor out); -// Parsed from ATen/ops/masked_select_backward.h +// Parsed from ATen/ops/mkldnn_adaptive_avg_pool2d_backward.h // #pragma once @@ -52084,16 +37797,21 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include +// #include -// aten::masked_select_backward(Tensor grad, Tensor input, Tensor mask) -> Tensor -@Namespace("at") public static native @ByVal Tensor masked_select_backward(@Const @ByRef Tensor grad, @Const @ByRef Tensor input, @Const @ByRef Tensor mask); +// aten::mkldnn_adaptive_avg_pool2d_backward(Tensor grad_output, Tensor self) -> Tensor +@Namespace("at") public static native @ByVal Tensor mkldnn_adaptive_avg_pool2d_backward(@Const @ByRef Tensor grad_output, @Const @ByRef Tensor self); + +// aten::mkldnn_adaptive_avg_pool2d_backward.out(Tensor grad_output, Tensor self, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor mkldnn_adaptive_avg_pool2d_backward_out(@ByRef Tensor out, @Const @ByRef Tensor grad_output, @Const @ByRef Tensor self); +// aten::mkldnn_adaptive_avg_pool2d_backward.out(Tensor grad_output, Tensor self, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor mkldnn_adaptive_avg_pool2d_backward_outf(@Const @ByRef Tensor grad_output, @Const @ByRef Tensor self, @ByRef Tensor out); -// Parsed from ATen/ops/matmul.h +// Parsed from ATen/ops/mkldnn_convolution.h // #pragma once @@ -52114,21 +37832,43 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include +// #include -// aten::matmul(Tensor self, Tensor other) -> Tensor -@Namespace("at") public static native @ByVal Tensor matmul(@Const @ByRef Tensor self, @Const @ByRef Tensor other); +// aten::mkldnn_convolution(Tensor self, Tensor weight, Tensor? bias, SymInt[] padding, int[] stride, int[] dilation, int groups) -> Tensor +@Namespace("at") public static native @ByVal Tensor mkldnn_convolution(@Const @ByRef Tensor self, @Const @ByRef Tensor weight, @Const @ByRef TensorOptional bias, @ByVal LongArrayRef padding, @ByVal LongArrayRef stride, @ByVal LongArrayRef dilation, @Cast("int64_t") long groups); +@Namespace("at") public static native @ByVal Tensor mkldnn_convolution(@Const @ByRef Tensor self, @Const @ByRef Tensor weight, @Const @ByRef TensorOptional bias, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] padding, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] stride, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dilation, @Cast("int64_t") long groups); -// aten::matmul.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor matmul_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor other); -// aten::matmul.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor matmul_outf(@Const @ByRef Tensor self, @Const @ByRef Tensor other, @ByRef Tensor out); + +// aten::mkldnn_convolution(Tensor self, Tensor weight, Tensor? bias, SymInt[] padding, int[] stride, int[] dilation, int groups) -> Tensor +@Namespace("at") public static native @ByVal Tensor mkldnn_convolution_symint(@Const @ByRef Tensor self, @Const @ByRef Tensor weight, @Const @ByRef TensorOptional bias, @ByVal SymIntArrayRef padding, @ByVal LongArrayRef stride, @ByVal LongArrayRef dilation, @Cast("int64_t") long groups); +@Namespace("at") public static native @ByVal Tensor mkldnn_convolution_symint(@Const @ByRef Tensor self, @Const @ByRef Tensor weight, @Const @ByRef TensorOptional bias, @ByVal SymIntArrayRef padding, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] stride, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dilation, @Cast("int64_t") long groups); + + +// aten::mkldnn_convolution.out(Tensor self, Tensor weight, Tensor? bias, SymInt[] padding, int[] stride, int[] dilation, int groups, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor mkldnn_convolution_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor weight, @Const @ByRef TensorOptional bias, @ByVal LongArrayRef padding, @ByVal LongArrayRef stride, @ByVal LongArrayRef dilation, @Cast("int64_t") long groups); +@Namespace("at") public static native @ByRef Tensor mkldnn_convolution_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor weight, @Const @ByRef TensorOptional bias, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] padding, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] stride, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dilation, @Cast("int64_t") long groups); + + +// aten::mkldnn_convolution.out(Tensor self, Tensor weight, Tensor? bias, SymInt[] padding, int[] stride, int[] dilation, int groups, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor mkldnn_convolution_outf(@Const @ByRef Tensor self, @Const @ByRef Tensor weight, @Const @ByRef TensorOptional bias, @ByVal LongArrayRef padding, @ByVal LongArrayRef stride, @ByVal LongArrayRef dilation, @Cast("int64_t") long groups, @ByRef Tensor out); +@Namespace("at") public static native @ByRef Tensor mkldnn_convolution_outf(@Const @ByRef Tensor self, @Const @ByRef Tensor weight, @Const @ByRef TensorOptional bias, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] padding, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] stride, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dilation, @Cast("int64_t") long groups, @ByRef Tensor out); + + +// aten::mkldnn_convolution.out(Tensor self, Tensor weight, Tensor? bias, SymInt[] padding, int[] stride, int[] dilation, int groups, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor mkldnn_convolution_symint_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor weight, @Const @ByRef TensorOptional bias, @ByVal SymIntArrayRef padding, @ByVal LongArrayRef stride, @ByVal LongArrayRef dilation, @Cast("int64_t") long groups); +@Namespace("at") public static native @ByRef Tensor mkldnn_convolution_symint_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor weight, @Const @ByRef TensorOptional bias, @ByVal SymIntArrayRef padding, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] stride, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dilation, @Cast("int64_t") long groups); + + +// aten::mkldnn_convolution.out(Tensor self, Tensor weight, Tensor? bias, SymInt[] padding, int[] stride, int[] dilation, int groups, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor mkldnn_convolution_symint_outf(@Const @ByRef Tensor self, @Const @ByRef Tensor weight, @Const @ByRef TensorOptional bias, @ByVal SymIntArrayRef padding, @ByVal LongArrayRef stride, @ByVal LongArrayRef dilation, @Cast("int64_t") long groups, @ByRef Tensor out); +@Namespace("at") public static native @ByRef Tensor mkldnn_convolution_symint_outf(@Const @ByRef Tensor self, @Const @ByRef Tensor weight, @Const @ByRef TensorOptional bias, @ByVal SymIntArrayRef padding, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] stride, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dilation, @Cast("int64_t") long groups, @ByRef Tensor out); -// Parsed from ATen/ops/matmul_backward.h + +// Parsed from ATen/ops/mkldnn_linear.h // #pragma once @@ -52149,21 +37889,23 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include +// #include -// aten::matmul_backward(Tensor grad, Tensor self, Tensor other, bool[2] mask) -> (Tensor, Tensor) -@Namespace("at") public static native @ByVal TensorTensorTuple matmul_backward(@Const @ByRef Tensor grad, @Const @ByRef Tensor self, @Const @ByRef Tensor other, @ByVal @Cast("std::array*") BoolPointer mask); +// aten::mkldnn_linear(Tensor self, Tensor weight, Tensor? bias=None) -> Tensor +@Namespace("at") public static native @ByVal Tensor mkldnn_linear(@Const @ByRef Tensor self, @Const @ByRef Tensor weight, @Const @ByRef(nullValue = "c10::optional{}") TensorOptional bias); +@Namespace("at") public static native @ByVal Tensor mkldnn_linear(@Const @ByRef Tensor self, @Const @ByRef Tensor weight); -// aten::matmul_backward.out(Tensor grad, Tensor self, Tensor other, bool[2] mask, *, Tensor(a!) out0, Tensor(b!) out1) -> (Tensor(a!), Tensor(b!)) -@Namespace("at") public static native @ByVal @Cast("std::tuple*") PointerPointer matmul_backward_out(@ByRef Tensor out0, @ByRef Tensor out1, @Const @ByRef Tensor grad, @Const @ByRef Tensor self, @Const @ByRef Tensor other, @ByVal @Cast("std::array*") BoolPointer mask); -// aten::matmul_backward.out(Tensor grad, Tensor self, Tensor other, bool[2] mask, *, Tensor(a!) out0, Tensor(b!) out1) -> (Tensor(a!), Tensor(b!)) -@Namespace("at") public static native @ByVal @Cast("std::tuple*") PointerPointer matmul_backward_outf(@Const @ByRef Tensor grad, @Const @ByRef Tensor self, @Const @ByRef Tensor other, @ByVal @Cast("std::array*") BoolPointer mask, @ByRef Tensor out0, @ByRef Tensor out1); +// aten::mkldnn_linear.out(Tensor self, Tensor weight, Tensor? bias=None, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor mkldnn_linear_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor weight, @Const @ByRef(nullValue = "c10::optional{}") TensorOptional bias); +@Namespace("at") public static native @ByRef Tensor mkldnn_linear_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor weight); +// aten::mkldnn_linear.out(Tensor self, Tensor weight, Tensor? bias=None, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor mkldnn_linear_outf(@Const @ByRef Tensor self, @Const @ByRef Tensor weight, @Const @ByRef TensorOptional bias, @ByRef Tensor out); -// Parsed from ATen/ops/matrix_H.h +// Parsed from ATen/ops/mkldnn_linear_backward.h // #pragma once @@ -52184,14 +37926,21 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include +// #include + +// aten::mkldnn_linear_backward(Tensor self, Tensor grad_output, Tensor weight, bool[3] output_mask) -> (Tensor, Tensor, Tensor) +@Namespace("at") public static native @ByVal T_TensorTensorTensor_T mkldnn_linear_backward(@Const @ByRef Tensor self, @Const @ByRef Tensor grad_output, @Const @ByRef Tensor weight, @ByVal @Cast("std::array*") BoolPointer output_mask); +// aten::mkldnn_linear_backward.out(Tensor self, Tensor grad_output, Tensor weight, bool[3] output_mask, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2) -> (Tensor(a!), Tensor(b!), Tensor(c!)) +@Namespace("at") public static native @ByVal T_TensorTensorTensor_T mkldnn_linear_backward_out(@ByRef Tensor out0, @ByRef Tensor out1, @ByRef Tensor out2, @Const @ByRef Tensor self, @Const @ByRef Tensor grad_output, @Const @ByRef Tensor weight, @ByVal @Cast("std::array*") BoolPointer output_mask); +// aten::mkldnn_linear_backward.out(Tensor self, Tensor grad_output, Tensor weight, bool[3] output_mask, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2) -> (Tensor(a!), Tensor(b!), Tensor(c!)) +@Namespace("at") public static native @ByVal T_TensorTensorTensor_T mkldnn_linear_backward_outf(@Const @ByRef Tensor self, @Const @ByRef Tensor grad_output, @Const @ByRef Tensor weight, @ByVal @Cast("std::array*") BoolPointer output_mask, @ByRef Tensor out0, @ByRef Tensor out1, @ByRef Tensor out2); -// Parsed from ATen/ops/matrix_exp.h +// Parsed from ATen/ops/mkldnn_linear_backward_input.h // #pragma once @@ -52212,16 +37961,24 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include +// #include -// aten::matrix_exp(Tensor self) -> Tensor -@Namespace("at") public static native @ByVal Tensor matrix_exp(@Const @ByRef Tensor self); +// aten::mkldnn_linear_backward_input(int[] input_size, Tensor grad_output, Tensor weight) -> Tensor +@Namespace("at") public static native @ByVal Tensor mkldnn_linear_backward_input(@ByVal LongArrayRef input_size, @Const @ByRef Tensor grad_output, @Const @ByRef Tensor weight); +@Namespace("at") public static native @ByVal Tensor mkldnn_linear_backward_input(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] input_size, @Const @ByRef Tensor grad_output, @Const @ByRef Tensor weight); + +// aten::mkldnn_linear_backward_input.out(int[] input_size, Tensor grad_output, Tensor weight, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor mkldnn_linear_backward_input_out(@ByRef Tensor out, @ByVal LongArrayRef input_size, @Const @ByRef Tensor grad_output, @Const @ByRef Tensor weight); +@Namespace("at") public static native @ByRef Tensor mkldnn_linear_backward_input_out(@ByRef Tensor out, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] input_size, @Const @ByRef Tensor grad_output, @Const @ByRef Tensor weight); +// aten::mkldnn_linear_backward_input.out(int[] input_size, Tensor grad_output, Tensor weight, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor mkldnn_linear_backward_input_outf(@ByVal LongArrayRef input_size, @Const @ByRef Tensor grad_output, @Const @ByRef Tensor weight, @ByRef Tensor out); +@Namespace("at") public static native @ByRef Tensor mkldnn_linear_backward_input_outf(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] input_size, @Const @ByRef Tensor grad_output, @Const @ByRef Tensor weight, @ByRef Tensor out); -// Parsed from ATen/ops/matrix_exp_backward.h +// Parsed from ATen/ops/mkldnn_linear_backward_weights.h // #pragma once @@ -52242,16 +37999,21 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include +// #include -// aten::matrix_exp_backward(Tensor self, Tensor grad) -> Tensor -@Namespace("at") public static native @ByVal Tensor matrix_exp_backward(@Const @ByRef Tensor self, @Const @ByRef Tensor grad); +// aten::mkldnn_linear_backward_weights(Tensor grad_output, Tensor input, Tensor weight, bool bias_defined) -> (Tensor, Tensor) +@Namespace("at") public static native @ByVal T_TensorTensor_T mkldnn_linear_backward_weights(@Const @ByRef Tensor grad_output, @Const @ByRef Tensor input, @Const @ByRef Tensor weight, @Cast("bool") boolean bias_defined); + +// aten::mkldnn_linear_backward_weights.out(Tensor grad_output, Tensor input, Tensor weight, bool bias_defined, *, Tensor(a!) out0, Tensor(b!) out1) -> (Tensor(a!), Tensor(b!)) +@Namespace("at") public static native @ByVal T_TensorTensor_T mkldnn_linear_backward_weights_out(@ByRef Tensor out0, @ByRef Tensor out1, @Const @ByRef Tensor grad_output, @Const @ByRef Tensor input, @Const @ByRef Tensor weight, @Cast("bool") boolean bias_defined); +// aten::mkldnn_linear_backward_weights.out(Tensor grad_output, Tensor input, Tensor weight, bool bias_defined, *, Tensor(a!) out0, Tensor(b!) out1) -> (Tensor(a!), Tensor(b!)) +@Namespace("at") public static native @ByVal T_TensorTensor_T mkldnn_linear_backward_weights_outf(@Const @ByRef Tensor grad_output, @Const @ByRef Tensor input, @Const @ByRef Tensor weight, @Cast("bool") boolean bias_defined, @ByRef Tensor out0, @ByRef Tensor out1); -// Parsed from ATen/ops/matrix_power.h +// Parsed from ATen/ops/mkldnn_max_pool2d.h // #pragma once @@ -52272,21 +38034,28 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include +// #include -// aten::matrix_power(Tensor self, int n) -> Tensor -@Namespace("at") public static native @ByVal Tensor matrix_power(@Const @ByRef Tensor self, @Cast("int64_t") long n); +// aten::mkldnn_max_pool2d(Tensor self, int[2] kernel_size, int[2] stride=[], int[2] padding=0, int[2] dilation=1, bool ceil_mode=False) -> Tensor +@Namespace("at") public static native @ByVal Tensor mkldnn_max_pool2d(@Const @ByRef Tensor self, @ByVal LongArrayRef kernel_size, @ByVal(nullValue = "at::IntArrayRef{}") LongArrayRef stride, @ByVal(nullValue = "at::IntArrayRef(0)") LongArrayRef padding, @ByVal(nullValue = "at::IntArrayRef(1)") LongArrayRef dilation, @Cast("bool") boolean ceil_mode/*=false*/); +@Namespace("at") public static native @ByVal Tensor mkldnn_max_pool2d(@Const @ByRef Tensor self, @ByVal LongArrayRef kernel_size); +@Namespace("at") public static native @ByVal Tensor mkldnn_max_pool2d(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] kernel_size, @ByVal(nullValue = "at::IntArrayRef{}") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] stride, @ByVal(nullValue = "at::IntArrayRef(0)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] padding, @ByVal(nullValue = "at::IntArrayRef(1)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dilation, @Cast("bool") boolean ceil_mode/*=false*/); +@Namespace("at") public static native @ByVal Tensor mkldnn_max_pool2d(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... kernel_size); -// aten::matrix_power.out(Tensor self, int n, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor matrix_power_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Cast("int64_t") long n); -// aten::matrix_power.out(Tensor self, int n, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor matrix_power_outf(@Const @ByRef Tensor self, @Cast("int64_t") long n, @ByRef Tensor out); +// aten::mkldnn_max_pool2d.out(Tensor self, int[2] kernel_size, int[2] stride=[], int[2] padding=0, int[2] dilation=1, bool ceil_mode=False, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor mkldnn_max_pool2d_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal LongArrayRef kernel_size, @ByVal(nullValue = "at::IntArrayRef{}") LongArrayRef stride, @ByVal(nullValue = "at::IntArrayRef(0)") LongArrayRef padding, @ByVal(nullValue = "at::IntArrayRef(1)") LongArrayRef dilation, @Cast("bool") boolean ceil_mode/*=false*/); +@Namespace("at") public static native @ByRef Tensor mkldnn_max_pool2d_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal LongArrayRef kernel_size); +@Namespace("at") public static native @ByRef Tensor mkldnn_max_pool2d_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] kernel_size, @ByVal(nullValue = "at::IntArrayRef{}") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] stride, @ByVal(nullValue = "at::IntArrayRef(0)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] padding, @ByVal(nullValue = "at::IntArrayRef(1)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dilation, @Cast("bool") boolean ceil_mode/*=false*/); +@Namespace("at") public static native @ByRef Tensor mkldnn_max_pool2d_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... kernel_size); +// aten::mkldnn_max_pool2d.out(Tensor self, int[2] kernel_size, int[2] stride=[], int[2] padding=0, int[2] dilation=1, bool ceil_mode=False, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor mkldnn_max_pool2d_outf(@Const @ByRef Tensor self, @ByVal LongArrayRef kernel_size, @ByVal LongArrayRef stride, @ByVal LongArrayRef padding, @ByVal LongArrayRef dilation, @Cast("bool") boolean ceil_mode, @ByRef Tensor out); +@Namespace("at") public static native @ByRef Tensor mkldnn_max_pool2d_outf(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] kernel_size, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] stride, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] padding, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dilation, @Cast("bool") boolean ceil_mode, @ByRef Tensor out); -// Parsed from ATen/ops/max.h +// Parsed from ATen/ops/mkldnn_max_pool2d_backward.h // #pragma once @@ -52307,49 +38076,28 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include - - -// aten::max.dim(Tensor self, int dim, bool keepdim=False) -> (Tensor values, Tensor indices) -@Namespace("at") public static native @ByVal TensorTensorTuple max(@Const @ByRef Tensor self, @Cast("int64_t") long dim, @Cast("bool") boolean keepdim/*=false*/); -@Namespace("at") public static native @ByVal TensorTensorTuple max(@Const @ByRef Tensor self, @Cast("int64_t") long dim); - -// aten::max.dim_max(Tensor self, int dim, bool keepdim=False, *, Tensor(a!) max, Tensor(b!) max_values) -> (Tensor(a!) values, Tensor(b!) indices) -@Namespace("at") public static native @ByVal @Cast("std::tuple*") PointerPointer max_out(@ByRef Tensor max, @ByRef Tensor max_values, @Const @ByRef Tensor self, @Cast("int64_t") long dim, @Cast("bool") boolean keepdim/*=false*/); -@Namespace("at") public static native @ByVal @Cast("std::tuple*") PointerPointer max_out(@ByRef Tensor max, @ByRef Tensor max_values, @Const @ByRef Tensor self, @Cast("int64_t") long dim); -// aten::max.dim_max(Tensor self, int dim, bool keepdim=False, *, Tensor(a!) max, Tensor(b!) max_values) -> (Tensor(a!) values, Tensor(b!) indices) -@Namespace("at") public static native @ByVal @Cast("std::tuple*") PointerPointer max_outf(@Const @ByRef Tensor self, @Cast("int64_t") long dim, @Cast("bool") boolean keepdim, @ByRef Tensor max, @ByRef Tensor max_values); - -// aten::max.names_dim(Tensor self, Dimname dim, bool keepdim=False) -> (Tensor values, Tensor indices) -@Namespace("at") public static native @ByVal TensorTensorTuple max(@Const @ByRef Tensor self, @ByVal Dimname dim, @Cast("bool") boolean keepdim/*=false*/); -@Namespace("at") public static native @ByVal TensorTensorTuple max(@Const @ByRef Tensor self, @ByVal Dimname dim); - -// aten::max.names_dim_max(Tensor self, Dimname dim, bool keepdim=False, *, Tensor(a!) max, Tensor(b!) max_values) -> (Tensor(a!) values, Tensor(b!) indices) -@Namespace("at") public static native @ByVal @Cast("std::tuple*") PointerPointer max_out(@ByRef Tensor max, @ByRef Tensor max_values, @Const @ByRef Tensor self, @ByVal Dimname dim, @Cast("bool") boolean keepdim/*=false*/); -@Namespace("at") public static native @ByVal @Cast("std::tuple*") PointerPointer max_out(@ByRef Tensor max, @ByRef Tensor max_values, @Const @ByRef Tensor self, @ByVal Dimname dim); -// aten::max.names_dim_max(Tensor self, Dimname dim, bool keepdim=False, *, Tensor(a!) max, Tensor(b!) max_values) -> (Tensor(a!) values, Tensor(b!) indices) -@Namespace("at") public static native @ByVal @Cast("std::tuple*") PointerPointer max_outf(@Const @ByRef Tensor self, @ByVal Dimname dim, @Cast("bool") boolean keepdim, @ByRef Tensor max, @ByRef Tensor max_values); - -// aten::max(Tensor self) -> Tensor -@Namespace("at") public static native @ByVal Tensor max(@Const @ByRef Tensor self); +// #include -// aten::max.other(Tensor self, Tensor other) -> Tensor -@Namespace("at") public static native @ByVal Tensor max(@Const @ByRef Tensor self, @Const @ByRef Tensor other); -// aten::max.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor max_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor other); -// aten::max.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor max_outf(@Const @ByRef Tensor self, @Const @ByRef Tensor other, @ByRef Tensor out); +// aten::mkldnn_max_pool2d_backward(Tensor grad_output, Tensor output, Tensor input, int[2] kernel_size, int[2] stride=[], int[2] padding=0, int[2] dilation=1, bool ceil_mode=False) -> Tensor +@Namespace("at") public static native @ByVal Tensor mkldnn_max_pool2d_backward(@Const @ByRef Tensor grad_output, @Const @ByRef Tensor output, @Const @ByRef Tensor input, @ByVal LongArrayRef kernel_size, @ByVal(nullValue = "at::IntArrayRef{}") LongArrayRef stride, @ByVal(nullValue = "at::IntArrayRef(0)") LongArrayRef padding, @ByVal(nullValue = "at::IntArrayRef(1)") LongArrayRef dilation, @Cast("bool") boolean ceil_mode/*=false*/); +@Namespace("at") public static native @ByVal Tensor mkldnn_max_pool2d_backward(@Const @ByRef Tensor grad_output, @Const @ByRef Tensor output, @Const @ByRef Tensor input, @ByVal LongArrayRef kernel_size); +@Namespace("at") public static native @ByVal Tensor mkldnn_max_pool2d_backward(@Const @ByRef Tensor grad_output, @Const @ByRef Tensor output, @Const @ByRef Tensor input, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] kernel_size, @ByVal(nullValue = "at::IntArrayRef{}") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] stride, @ByVal(nullValue = "at::IntArrayRef(0)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] padding, @ByVal(nullValue = "at::IntArrayRef(1)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dilation, @Cast("bool") boolean ceil_mode/*=false*/); +@Namespace("at") public static native @ByVal Tensor mkldnn_max_pool2d_backward(@Const @ByRef Tensor grad_output, @Const @ByRef Tensor output, @Const @ByRef Tensor input, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... kernel_size); -// aten::max.unary_out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor max_out(@ByRef Tensor out, @Const @ByRef Tensor self); -// aten::max.unary_out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor max_outf(@Const @ByRef Tensor self, @ByRef Tensor out); +// aten::mkldnn_max_pool2d_backward.out(Tensor grad_output, Tensor output, Tensor input, int[2] kernel_size, int[2] stride=[], int[2] padding=0, int[2] dilation=1, bool ceil_mode=False, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor mkldnn_max_pool2d_backward_out(@ByRef Tensor out, @Const @ByRef Tensor grad_output, @Const @ByRef Tensor output, @Const @ByRef Tensor input, @ByVal LongArrayRef kernel_size, @ByVal(nullValue = "at::IntArrayRef{}") LongArrayRef stride, @ByVal(nullValue = "at::IntArrayRef(0)") LongArrayRef padding, @ByVal(nullValue = "at::IntArrayRef(1)") LongArrayRef dilation, @Cast("bool") boolean ceil_mode/*=false*/); +@Namespace("at") public static native @ByRef Tensor mkldnn_max_pool2d_backward_out(@ByRef Tensor out, @Const @ByRef Tensor grad_output, @Const @ByRef Tensor output, @Const @ByRef Tensor input, @ByVal LongArrayRef kernel_size); +@Namespace("at") public static native @ByRef Tensor mkldnn_max_pool2d_backward_out(@ByRef Tensor out, @Const @ByRef Tensor grad_output, @Const @ByRef Tensor output, @Const @ByRef Tensor input, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] kernel_size, @ByVal(nullValue = "at::IntArrayRef{}") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] stride, @ByVal(nullValue = "at::IntArrayRef(0)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] padding, @ByVal(nullValue = "at::IntArrayRef(1)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dilation, @Cast("bool") boolean ceil_mode/*=false*/); +@Namespace("at") public static native @ByRef Tensor mkldnn_max_pool2d_backward_out(@ByRef Tensor out, @Const @ByRef Tensor grad_output, @Const @ByRef Tensor output, @Const @ByRef Tensor input, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... kernel_size); +// aten::mkldnn_max_pool2d_backward.out(Tensor grad_output, Tensor output, Tensor input, int[2] kernel_size, int[2] stride=[], int[2] padding=0, int[2] dilation=1, bool ceil_mode=False, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor mkldnn_max_pool2d_backward_outf(@Const @ByRef Tensor grad_output, @Const @ByRef Tensor output, @Const @ByRef Tensor input, @ByVal LongArrayRef kernel_size, @ByVal LongArrayRef stride, @ByVal LongArrayRef padding, @ByVal LongArrayRef dilation, @Cast("bool") boolean ceil_mode, @ByRef Tensor out); +@Namespace("at") public static native @ByRef Tensor mkldnn_max_pool2d_backward_outf(@Const @ByRef Tensor grad_output, @Const @ByRef Tensor output, @Const @ByRef Tensor input, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] kernel_size, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] stride, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] padding, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dilation, @Cast("bool") boolean ceil_mode, @ByRef Tensor out); -// Parsed from ATen/ops/max_pool1d.h +// Parsed from ATen/ops/mkldnn_max_pool3d.h // #pragma once @@ -52370,19 +38118,28 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include +// #include -// aten::max_pool1d(Tensor self, int[1] kernel_size, int[1] stride=[], int[1] padding=0, int[1] dilation=1, bool ceil_mode=False) -> Tensor -@Namespace("at") public static native @ByVal Tensor max_pool1d(@Const @ByRef Tensor self, @ByVal @Cast("c10::ArrayRef*") LongArrayRef kernel_size, @ByVal(nullValue = "at::IntArrayRef{}") @Cast("c10::ArrayRef*") LongArrayRef stride, @ByVal(nullValue = "at::IntArrayRef(0)") @Cast("c10::ArrayRef*") LongArrayRef padding, @ByVal(nullValue = "at::IntArrayRef(1)") @Cast("c10::ArrayRef*") LongArrayRef dilation, @Cast("bool") boolean ceil_mode/*=false*/); -@Namespace("at") public static native @ByVal Tensor max_pool1d(@Const @ByRef Tensor self, @ByVal @Cast("c10::ArrayRef*") LongArrayRef kernel_size); -@Namespace("at") public static native @ByVal Tensor max_pool1d(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] kernel_size, @ByVal(nullValue = "at::IntArrayRef{}") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] stride, @ByVal(nullValue = "at::IntArrayRef(0)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] padding, @ByVal(nullValue = "at::IntArrayRef(1)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dilation, @Cast("bool") boolean ceil_mode/*=false*/); -@Namespace("at") public static native @ByVal Tensor max_pool1d(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... kernel_size); +// aten::mkldnn_max_pool3d(Tensor self, int[3] kernel_size, int[3] stride=[], int[3] padding=0, int[3] dilation=1, bool ceil_mode=False) -> Tensor +@Namespace("at") public static native @ByVal Tensor mkldnn_max_pool3d(@Const @ByRef Tensor self, @ByVal LongArrayRef kernel_size, @ByVal(nullValue = "at::IntArrayRef{}") LongArrayRef stride, @ByVal(nullValue = "at::IntArrayRef(0)") LongArrayRef padding, @ByVal(nullValue = "at::IntArrayRef(1)") LongArrayRef dilation, @Cast("bool") boolean ceil_mode/*=false*/); +@Namespace("at") public static native @ByVal Tensor mkldnn_max_pool3d(@Const @ByRef Tensor self, @ByVal LongArrayRef kernel_size); +@Namespace("at") public static native @ByVal Tensor mkldnn_max_pool3d(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] kernel_size, @ByVal(nullValue = "at::IntArrayRef{}") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] stride, @ByVal(nullValue = "at::IntArrayRef(0)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] padding, @ByVal(nullValue = "at::IntArrayRef(1)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dilation, @Cast("bool") boolean ceil_mode/*=false*/); +@Namespace("at") public static native @ByVal Tensor mkldnn_max_pool3d(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... kernel_size); + +// aten::mkldnn_max_pool3d.out(Tensor self, int[3] kernel_size, int[3] stride=[], int[3] padding=0, int[3] dilation=1, bool ceil_mode=False, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor mkldnn_max_pool3d_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal LongArrayRef kernel_size, @ByVal(nullValue = "at::IntArrayRef{}") LongArrayRef stride, @ByVal(nullValue = "at::IntArrayRef(0)") LongArrayRef padding, @ByVal(nullValue = "at::IntArrayRef(1)") LongArrayRef dilation, @Cast("bool") boolean ceil_mode/*=false*/); +@Namespace("at") public static native @ByRef Tensor mkldnn_max_pool3d_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal LongArrayRef kernel_size); +@Namespace("at") public static native @ByRef Tensor mkldnn_max_pool3d_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] kernel_size, @ByVal(nullValue = "at::IntArrayRef{}") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] stride, @ByVal(nullValue = "at::IntArrayRef(0)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] padding, @ByVal(nullValue = "at::IntArrayRef(1)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dilation, @Cast("bool") boolean ceil_mode/*=false*/); +@Namespace("at") public static native @ByRef Tensor mkldnn_max_pool3d_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... kernel_size); +// aten::mkldnn_max_pool3d.out(Tensor self, int[3] kernel_size, int[3] stride=[], int[3] padding=0, int[3] dilation=1, bool ceil_mode=False, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor mkldnn_max_pool3d_outf(@Const @ByRef Tensor self, @ByVal LongArrayRef kernel_size, @ByVal LongArrayRef stride, @ByVal LongArrayRef padding, @ByVal LongArrayRef dilation, @Cast("bool") boolean ceil_mode, @ByRef Tensor out); +@Namespace("at") public static native @ByRef Tensor mkldnn_max_pool3d_outf(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] kernel_size, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] stride, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] padding, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dilation, @Cast("bool") boolean ceil_mode, @ByRef Tensor out); -// Parsed from ATen/ops/max_pool1d_with_indices.h +// Parsed from ATen/ops/mkldnn_max_pool3d_backward.h // #pragma once @@ -52403,19 +38160,28 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include +// #include -// aten::max_pool1d_with_indices(Tensor self, int[1] kernel_size, int[1] stride=[], int[1] padding=0, int[1] dilation=1, bool ceil_mode=False) -> (Tensor, Tensor) -@Namespace("at") public static native @ByVal TensorTensorTuple max_pool1d_with_indices(@Const @ByRef Tensor self, @ByVal @Cast("c10::ArrayRef*") LongArrayRef kernel_size, @ByVal(nullValue = "at::IntArrayRef{}") @Cast("c10::ArrayRef*") LongArrayRef stride, @ByVal(nullValue = "at::IntArrayRef(0)") @Cast("c10::ArrayRef*") LongArrayRef padding, @ByVal(nullValue = "at::IntArrayRef(1)") @Cast("c10::ArrayRef*") LongArrayRef dilation, @Cast("bool") boolean ceil_mode/*=false*/); -@Namespace("at") public static native @ByVal TensorTensorTuple max_pool1d_with_indices(@Const @ByRef Tensor self, @ByVal @Cast("c10::ArrayRef*") LongArrayRef kernel_size); -@Namespace("at") public static native @ByVal TensorTensorTuple max_pool1d_with_indices(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] kernel_size, @ByVal(nullValue = "at::IntArrayRef{}") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] stride, @ByVal(nullValue = "at::IntArrayRef(0)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] padding, @ByVal(nullValue = "at::IntArrayRef(1)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dilation, @Cast("bool") boolean ceil_mode/*=false*/); -@Namespace("at") public static native @ByVal TensorTensorTuple max_pool1d_with_indices(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... kernel_size); +// aten::mkldnn_max_pool3d_backward(Tensor grad_output, Tensor output, Tensor input, int[3] kernel_size, int[3] stride=[], int[3] padding=0, int[3] dilation=1, bool ceil_mode=False) -> Tensor +@Namespace("at") public static native @ByVal Tensor mkldnn_max_pool3d_backward(@Const @ByRef Tensor grad_output, @Const @ByRef Tensor output, @Const @ByRef Tensor input, @ByVal LongArrayRef kernel_size, @ByVal(nullValue = "at::IntArrayRef{}") LongArrayRef stride, @ByVal(nullValue = "at::IntArrayRef(0)") LongArrayRef padding, @ByVal(nullValue = "at::IntArrayRef(1)") LongArrayRef dilation, @Cast("bool") boolean ceil_mode/*=false*/); +@Namespace("at") public static native @ByVal Tensor mkldnn_max_pool3d_backward(@Const @ByRef Tensor grad_output, @Const @ByRef Tensor output, @Const @ByRef Tensor input, @ByVal LongArrayRef kernel_size); +@Namespace("at") public static native @ByVal Tensor mkldnn_max_pool3d_backward(@Const @ByRef Tensor grad_output, @Const @ByRef Tensor output, @Const @ByRef Tensor input, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] kernel_size, @ByVal(nullValue = "at::IntArrayRef{}") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] stride, @ByVal(nullValue = "at::IntArrayRef(0)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] padding, @ByVal(nullValue = "at::IntArrayRef(1)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dilation, @Cast("bool") boolean ceil_mode/*=false*/); +@Namespace("at") public static native @ByVal Tensor mkldnn_max_pool3d_backward(@Const @ByRef Tensor grad_output, @Const @ByRef Tensor output, @Const @ByRef Tensor input, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... kernel_size); + +// aten::mkldnn_max_pool3d_backward.out(Tensor grad_output, Tensor output, Tensor input, int[3] kernel_size, int[3] stride=[], int[3] padding=0, int[3] dilation=1, bool ceil_mode=False, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor mkldnn_max_pool3d_backward_out(@ByRef Tensor out, @Const @ByRef Tensor grad_output, @Const @ByRef Tensor output, @Const @ByRef Tensor input, @ByVal LongArrayRef kernel_size, @ByVal(nullValue = "at::IntArrayRef{}") LongArrayRef stride, @ByVal(nullValue = "at::IntArrayRef(0)") LongArrayRef padding, @ByVal(nullValue = "at::IntArrayRef(1)") LongArrayRef dilation, @Cast("bool") boolean ceil_mode/*=false*/); +@Namespace("at") public static native @ByRef Tensor mkldnn_max_pool3d_backward_out(@ByRef Tensor out, @Const @ByRef Tensor grad_output, @Const @ByRef Tensor output, @Const @ByRef Tensor input, @ByVal LongArrayRef kernel_size); +@Namespace("at") public static native @ByRef Tensor mkldnn_max_pool3d_backward_out(@ByRef Tensor out, @Const @ByRef Tensor grad_output, @Const @ByRef Tensor output, @Const @ByRef Tensor input, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] kernel_size, @ByVal(nullValue = "at::IntArrayRef{}") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] stride, @ByVal(nullValue = "at::IntArrayRef(0)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] padding, @ByVal(nullValue = "at::IntArrayRef(1)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dilation, @Cast("bool") boolean ceil_mode/*=false*/); +@Namespace("at") public static native @ByRef Tensor mkldnn_max_pool3d_backward_out(@ByRef Tensor out, @Const @ByRef Tensor grad_output, @Const @ByRef Tensor output, @Const @ByRef Tensor input, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... kernel_size); +// aten::mkldnn_max_pool3d_backward.out(Tensor grad_output, Tensor output, Tensor input, int[3] kernel_size, int[3] stride=[], int[3] padding=0, int[3] dilation=1, bool ceil_mode=False, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor mkldnn_max_pool3d_backward_outf(@Const @ByRef Tensor grad_output, @Const @ByRef Tensor output, @Const @ByRef Tensor input, @ByVal LongArrayRef kernel_size, @ByVal LongArrayRef stride, @ByVal LongArrayRef padding, @ByVal LongArrayRef dilation, @Cast("bool") boolean ceil_mode, @ByRef Tensor out); +@Namespace("at") public static native @ByRef Tensor mkldnn_max_pool3d_backward_outf(@Const @ByRef Tensor grad_output, @Const @ByRef Tensor output, @Const @ByRef Tensor input, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] kernel_size, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] stride, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] padding, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dilation, @Cast("bool") boolean ceil_mode, @ByRef Tensor out); -// Parsed from ATen/ops/max_pool2d.h +// Parsed from ATen/ops/mkldnn_reorder_conv2d_weight.h // #pragma once @@ -52436,19 +38202,26 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include +// #include -// aten::max_pool2d(Tensor self, int[2] kernel_size, int[2] stride=[], int[2] padding=0, int[2] dilation=1, bool ceil_mode=False) -> Tensor -@Namespace("at") public static native @ByVal Tensor max_pool2d(@Const @ByRef Tensor self, @ByVal @Cast("c10::ArrayRef*") LongArrayRef kernel_size, @ByVal(nullValue = "at::IntArrayRef{}") @Cast("c10::ArrayRef*") LongArrayRef stride, @ByVal(nullValue = "at::IntArrayRef(0)") @Cast("c10::ArrayRef*") LongArrayRef padding, @ByVal(nullValue = "at::IntArrayRef(1)") @Cast("c10::ArrayRef*") LongArrayRef dilation, @Cast("bool") boolean ceil_mode/*=false*/); -@Namespace("at") public static native @ByVal Tensor max_pool2d(@Const @ByRef Tensor self, @ByVal @Cast("c10::ArrayRef*") LongArrayRef kernel_size); -@Namespace("at") public static native @ByVal Tensor max_pool2d(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] kernel_size, @ByVal(nullValue = "at::IntArrayRef{}") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] stride, @ByVal(nullValue = "at::IntArrayRef(0)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] padding, @ByVal(nullValue = "at::IntArrayRef(1)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dilation, @Cast("bool") boolean ceil_mode/*=false*/); -@Namespace("at") public static native @ByVal Tensor max_pool2d(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... kernel_size); +// aten::mkldnn_reorder_conv2d_weight(Tensor self, int[2] padding=0, int[2] stride=1, int[2] dilation=1, int groups=1, int[]? input_size=None) -> Tensor +@Namespace("at") public static native @ByVal Tensor mkldnn_reorder_conv2d_weight(@Const @ByRef Tensor self, @ByVal(nullValue = "at::IntArrayRef(0)") LongArrayRef padding, @ByVal(nullValue = "at::IntArrayRef(1)") LongArrayRef stride, @ByVal(nullValue = "at::IntArrayRef(1)") LongArrayRef dilation, @Cast("int64_t") long groups/*=1*/, @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") LongArrayRefOptional input_size); +@Namespace("at") public static native @ByVal Tensor mkldnn_reorder_conv2d_weight(@Const @ByRef Tensor self); +@Namespace("at") public static native @ByVal Tensor mkldnn_reorder_conv2d_weight(@Const @ByRef Tensor self, @ByVal(nullValue = "at::IntArrayRef(0)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] padding, @ByVal(nullValue = "at::IntArrayRef(1)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] stride, @ByVal(nullValue = "at::IntArrayRef(1)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dilation, @Cast("int64_t") long groups/*=1*/, @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... input_size); + +// aten::mkldnn_reorder_conv2d_weight.out(Tensor self, int[2] padding=0, int[2] stride=1, int[2] dilation=1, int groups=1, int[]? input_size=None, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor mkldnn_reorder_conv2d_weight_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal(nullValue = "at::IntArrayRef(0)") LongArrayRef padding, @ByVal(nullValue = "at::IntArrayRef(1)") LongArrayRef stride, @ByVal(nullValue = "at::IntArrayRef(1)") LongArrayRef dilation, @Cast("int64_t") long groups/*=1*/, @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") LongArrayRefOptional input_size); +@Namespace("at") public static native @ByRef Tensor mkldnn_reorder_conv2d_weight_out(@ByRef Tensor out, @Const @ByRef Tensor self); +@Namespace("at") public static native @ByRef Tensor mkldnn_reorder_conv2d_weight_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal(nullValue = "at::IntArrayRef(0)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] padding, @ByVal(nullValue = "at::IntArrayRef(1)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] stride, @ByVal(nullValue = "at::IntArrayRef(1)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dilation, @Cast("int64_t") long groups/*=1*/, @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... input_size); +// aten::mkldnn_reorder_conv2d_weight.out(Tensor self, int[2] padding=0, int[2] stride=1, int[2] dilation=1, int groups=1, int[]? input_size=None, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor mkldnn_reorder_conv2d_weight_outf(@Const @ByRef Tensor self, @ByVal LongArrayRef padding, @ByVal LongArrayRef stride, @ByVal LongArrayRef dilation, @Cast("int64_t") long groups, @ByVal LongArrayRefOptional input_size, @ByRef Tensor out); +@Namespace("at") public static native @ByRef Tensor mkldnn_reorder_conv2d_weight_outf(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] padding, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] stride, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dilation, @Cast("int64_t") long groups, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] input_size, @ByRef Tensor out); -// Parsed from ATen/ops/max_pool2d_backward.h +// Parsed from ATen/ops/mkldnn_reorder_conv3d_weight.h // #pragma once @@ -52469,28 +38242,26 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include +// #include -// aten::max_pool2d_backward(Tensor grad_output, Tensor self, int[2] kernel_size, int[2] stride=[], int[2] padding=0, int[2] dilation=1, bool ceil_mode=False) -> Tensor -@Namespace("at") public static native @ByVal Tensor max_pool2d_backward(@Const @ByRef Tensor grad_output, @Const @ByRef Tensor self, @ByVal @Cast("c10::ArrayRef*") LongArrayRef kernel_size, @ByVal(nullValue = "at::IntArrayRef{}") @Cast("c10::ArrayRef*") LongArrayRef stride, @ByVal(nullValue = "at::IntArrayRef(0)") @Cast("c10::ArrayRef*") LongArrayRef padding, @ByVal(nullValue = "at::IntArrayRef(1)") @Cast("c10::ArrayRef*") LongArrayRef dilation, @Cast("bool") boolean ceil_mode/*=false*/); -@Namespace("at") public static native @ByVal Tensor max_pool2d_backward(@Const @ByRef Tensor grad_output, @Const @ByRef Tensor self, @ByVal @Cast("c10::ArrayRef*") LongArrayRef kernel_size); -@Namespace("at") public static native @ByVal Tensor max_pool2d_backward(@Const @ByRef Tensor grad_output, @Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] kernel_size, @ByVal(nullValue = "at::IntArrayRef{}") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] stride, @ByVal(nullValue = "at::IntArrayRef(0)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] padding, @ByVal(nullValue = "at::IntArrayRef(1)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dilation, @Cast("bool") boolean ceil_mode/*=false*/); -@Namespace("at") public static native @ByVal Tensor max_pool2d_backward(@Const @ByRef Tensor grad_output, @Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... kernel_size); +// aten::mkldnn_reorder_conv3d_weight(Tensor self, int[3] padding=0, int[3] stride=1, int[3] dilation=1, int groups=1) -> Tensor +@Namespace("at") public static native @ByVal Tensor mkldnn_reorder_conv3d_weight(@Const @ByRef Tensor self, @ByVal(nullValue = "at::IntArrayRef(0)") LongArrayRef padding, @ByVal(nullValue = "at::IntArrayRef(1)") LongArrayRef stride, @ByVal(nullValue = "at::IntArrayRef(1)") LongArrayRef dilation, @Cast("int64_t") long groups/*=1*/); +@Namespace("at") public static native @ByVal Tensor mkldnn_reorder_conv3d_weight(@Const @ByRef Tensor self); +@Namespace("at") public static native @ByVal Tensor mkldnn_reorder_conv3d_weight(@Const @ByRef Tensor self, @ByVal(nullValue = "at::IntArrayRef(0)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] padding, @ByVal(nullValue = "at::IntArrayRef(1)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] stride, @ByVal(nullValue = "at::IntArrayRef(1)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dilation, @Cast("int64_t") long groups/*=1*/); -// aten::max_pool2d_backward.out(Tensor grad_output, Tensor self, int[2] kernel_size, int[2] stride=[], int[2] padding=0, int[2] dilation=1, bool ceil_mode=False, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor max_pool2d_backward_out(@ByRef Tensor out, @Const @ByRef Tensor grad_output, @Const @ByRef Tensor self, @ByVal @Cast("c10::ArrayRef*") LongArrayRef kernel_size, @ByVal(nullValue = "at::IntArrayRef{}") @Cast("c10::ArrayRef*") LongArrayRef stride, @ByVal(nullValue = "at::IntArrayRef(0)") @Cast("c10::ArrayRef*") LongArrayRef padding, @ByVal(nullValue = "at::IntArrayRef(1)") @Cast("c10::ArrayRef*") LongArrayRef dilation, @Cast("bool") boolean ceil_mode/*=false*/); -@Namespace("at") public static native @ByRef Tensor max_pool2d_backward_out(@ByRef Tensor out, @Const @ByRef Tensor grad_output, @Const @ByRef Tensor self, @ByVal @Cast("c10::ArrayRef*") LongArrayRef kernel_size); -@Namespace("at") public static native @ByRef Tensor max_pool2d_backward_out(@ByRef Tensor out, @Const @ByRef Tensor grad_output, @Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] kernel_size, @ByVal(nullValue = "at::IntArrayRef{}") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] stride, @ByVal(nullValue = "at::IntArrayRef(0)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] padding, @ByVal(nullValue = "at::IntArrayRef(1)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dilation, @Cast("bool") boolean ceil_mode/*=false*/); -@Namespace("at") public static native @ByRef Tensor max_pool2d_backward_out(@ByRef Tensor out, @Const @ByRef Tensor grad_output, @Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... kernel_size); -// aten::max_pool2d_backward.out(Tensor grad_output, Tensor self, int[2] kernel_size, int[2] stride=[], int[2] padding=0, int[2] dilation=1, bool ceil_mode=False, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor max_pool2d_backward_outf(@Const @ByRef Tensor grad_output, @Const @ByRef Tensor self, @ByVal @Cast("c10::ArrayRef*") LongArrayRef kernel_size, @ByVal @Cast("c10::ArrayRef*") LongArrayRef stride, @ByVal @Cast("c10::ArrayRef*") LongArrayRef padding, @ByVal @Cast("c10::ArrayRef*") LongArrayRef dilation, @Cast("bool") boolean ceil_mode, @ByRef Tensor out); -@Namespace("at") public static native @ByRef Tensor max_pool2d_backward_outf(@Const @ByRef Tensor grad_output, @Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] kernel_size, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] stride, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] padding, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dilation, @Cast("bool") boolean ceil_mode, @ByRef Tensor out); +// aten::mkldnn_reorder_conv3d_weight.out(Tensor self, int[3] padding=0, int[3] stride=1, int[3] dilation=1, int groups=1, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor mkldnn_reorder_conv3d_weight_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal(nullValue = "at::IntArrayRef(0)") LongArrayRef padding, @ByVal(nullValue = "at::IntArrayRef(1)") LongArrayRef stride, @ByVal(nullValue = "at::IntArrayRef(1)") LongArrayRef dilation, @Cast("int64_t") long groups/*=1*/); +@Namespace("at") public static native @ByRef Tensor mkldnn_reorder_conv3d_weight_out(@ByRef Tensor out, @Const @ByRef Tensor self); +@Namespace("at") public static native @ByRef Tensor mkldnn_reorder_conv3d_weight_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal(nullValue = "at::IntArrayRef(0)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] padding, @ByVal(nullValue = "at::IntArrayRef(1)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] stride, @ByVal(nullValue = "at::IntArrayRef(1)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dilation, @Cast("int64_t") long groups/*=1*/); +// aten::mkldnn_reorder_conv3d_weight.out(Tensor self, int[3] padding=0, int[3] stride=1, int[3] dilation=1, int groups=1, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor mkldnn_reorder_conv3d_weight_outf(@Const @ByRef Tensor self, @ByVal LongArrayRef padding, @ByVal LongArrayRef stride, @ByVal LongArrayRef dilation, @Cast("int64_t") long groups, @ByRef Tensor out); +@Namespace("at") public static native @ByRef Tensor mkldnn_reorder_conv3d_weight_outf(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] padding, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] stride, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dilation, @Cast("int64_t") long groups, @ByRef Tensor out); -// Parsed from ATen/ops/max_pool2d_with_indices.h +// Parsed from ATen/ops/mkldnn_rnn_layer.h // #pragma once @@ -52511,28 +38282,24 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include +// #include -// aten::max_pool2d_with_indices.out(Tensor self, int[2] kernel_size, int[2] stride=[], int[2] padding=0, int[2] dilation=1, bool ceil_mode=False, *, Tensor(a!) out, Tensor(b!) indices) -> (Tensor(a!), Tensor(b!)) -@Namespace("at") public static native @ByVal @Cast("std::tuple*") PointerPointer max_pool2d_with_indices_out(@ByRef Tensor out, @ByRef Tensor indices, @Const @ByRef Tensor self, @ByVal @Cast("c10::ArrayRef*") LongArrayRef kernel_size, @ByVal(nullValue = "at::IntArrayRef{}") @Cast("c10::ArrayRef*") LongArrayRef stride, @ByVal(nullValue = "at::IntArrayRef(0)") @Cast("c10::ArrayRef*") LongArrayRef padding, @ByVal(nullValue = "at::IntArrayRef(1)") @Cast("c10::ArrayRef*") LongArrayRef dilation, @Cast("bool") boolean ceil_mode/*=false*/); -@Namespace("at") public static native @ByVal @Cast("std::tuple*") PointerPointer max_pool2d_with_indices_out(@ByRef Tensor out, @ByRef Tensor indices, @Const @ByRef Tensor self, @ByVal @Cast("c10::ArrayRef*") LongArrayRef kernel_size); -@Namespace("at") public static native @ByVal @Cast("std::tuple*") PointerPointer max_pool2d_with_indices_out(@ByRef Tensor out, @ByRef Tensor indices, @Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] kernel_size, @ByVal(nullValue = "at::IntArrayRef{}") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] stride, @ByVal(nullValue = "at::IntArrayRef(0)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] padding, @ByVal(nullValue = "at::IntArrayRef(1)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dilation, @Cast("bool") boolean ceil_mode/*=false*/); -@Namespace("at") public static native @ByVal @Cast("std::tuple*") PointerPointer max_pool2d_with_indices_out(@ByRef Tensor out, @ByRef Tensor indices, @Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... kernel_size); -// aten::max_pool2d_with_indices.out(Tensor self, int[2] kernel_size, int[2] stride=[], int[2] padding=0, int[2] dilation=1, bool ceil_mode=False, *, Tensor(a!) out, Tensor(b!) indices) -> (Tensor(a!), Tensor(b!)) -@Namespace("at") public static native @ByVal @Cast("std::tuple*") PointerPointer max_pool2d_with_indices_outf(@Const @ByRef Tensor self, @ByVal @Cast("c10::ArrayRef*") LongArrayRef kernel_size, @ByVal @Cast("c10::ArrayRef*") LongArrayRef stride, @ByVal @Cast("c10::ArrayRef*") LongArrayRef padding, @ByVal @Cast("c10::ArrayRef*") LongArrayRef dilation, @Cast("bool") boolean ceil_mode, @ByRef Tensor out, @ByRef Tensor indices); -@Namespace("at") public static native @ByVal @Cast("std::tuple*") PointerPointer max_pool2d_with_indices_outf(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] kernel_size, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] stride, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] padding, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dilation, @Cast("bool") boolean ceil_mode, @ByRef Tensor out, @ByRef Tensor indices); +// aten::mkldnn_rnn_layer(Tensor input, Tensor weight0, Tensor weight1, Tensor weight2, Tensor weight3, Tensor hx_, Tensor cx_, bool reverse, int[] batch_sizes, int mode, int hidden_size, int num_layers, bool has_biases, bool bidirectional, bool batch_first, bool train) -> (Tensor, Tensor, Tensor, Tensor) +@Namespace("at") public static native @ByVal T_TensorTensorTensorTensor_T mkldnn_rnn_layer(@Const @ByRef Tensor input, @Const @ByRef Tensor weight0, @Const @ByRef Tensor weight1, @Const @ByRef Tensor weight2, @Const @ByRef Tensor weight3, @Const @ByRef Tensor hx_, @Const @ByRef Tensor cx_, @Cast("bool") boolean reverse, @ByVal LongArrayRef batch_sizes, @Cast("int64_t") long mode, @Cast("int64_t") long hidden_size, @Cast("int64_t") long num_layers, @Cast("bool") boolean has_biases, @Cast("bool") boolean bidirectional, @Cast("bool") boolean batch_first, @Cast("bool") boolean train); +@Namespace("at") public static native @ByVal T_TensorTensorTensorTensor_T mkldnn_rnn_layer(@Const @ByRef Tensor input, @Const @ByRef Tensor weight0, @Const @ByRef Tensor weight1, @Const @ByRef Tensor weight2, @Const @ByRef Tensor weight3, @Const @ByRef Tensor hx_, @Const @ByRef Tensor cx_, @Cast("bool") boolean reverse, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] batch_sizes, @Cast("int64_t") long mode, @Cast("int64_t") long hidden_size, @Cast("int64_t") long num_layers, @Cast("bool") boolean has_biases, @Cast("bool") boolean bidirectional, @Cast("bool") boolean batch_first, @Cast("bool") boolean train); -// aten::max_pool2d_with_indices(Tensor self, int[2] kernel_size, int[2] stride=[], int[2] padding=0, int[2] dilation=1, bool ceil_mode=False) -> (Tensor, Tensor) -@Namespace("at") public static native @ByVal TensorTensorTuple max_pool2d_with_indices(@Const @ByRef Tensor self, @ByVal @Cast("c10::ArrayRef*") LongArrayRef kernel_size, @ByVal(nullValue = "at::IntArrayRef{}") @Cast("c10::ArrayRef*") LongArrayRef stride, @ByVal(nullValue = "at::IntArrayRef(0)") @Cast("c10::ArrayRef*") LongArrayRef padding, @ByVal(nullValue = "at::IntArrayRef(1)") @Cast("c10::ArrayRef*") LongArrayRef dilation, @Cast("bool") boolean ceil_mode/*=false*/); -@Namespace("at") public static native @ByVal TensorTensorTuple max_pool2d_with_indices(@Const @ByRef Tensor self, @ByVal @Cast("c10::ArrayRef*") LongArrayRef kernel_size); -@Namespace("at") public static native @ByVal TensorTensorTuple max_pool2d_with_indices(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] kernel_size, @ByVal(nullValue = "at::IntArrayRef{}") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] stride, @ByVal(nullValue = "at::IntArrayRef(0)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] padding, @ByVal(nullValue = "at::IntArrayRef(1)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dilation, @Cast("bool") boolean ceil_mode/*=false*/); -@Namespace("at") public static native @ByVal TensorTensorTuple max_pool2d_with_indices(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... kernel_size); +// aten::mkldnn_rnn_layer.out(Tensor input, Tensor weight0, Tensor weight1, Tensor weight2, Tensor weight3, Tensor hx_, Tensor cx_, bool reverse, int[] batch_sizes, int mode, int hidden_size, int num_layers, bool has_biases, bool bidirectional, bool batch_first, bool train, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2, Tensor(d!) out3) -> (Tensor(a!), Tensor(b!), Tensor(c!), Tensor(d!)) +@Namespace("at") public static native @ByVal T_TensorTensorTensorTensor_T mkldnn_rnn_layer_out(@ByRef Tensor out0, @ByRef Tensor out1, @ByRef Tensor out2, @ByRef Tensor out3, @Const @ByRef Tensor input, @Const @ByRef Tensor weight0, @Const @ByRef Tensor weight1, @Const @ByRef Tensor weight2, @Const @ByRef Tensor weight3, @Const @ByRef Tensor hx_, @Const @ByRef Tensor cx_, @Cast("bool") boolean reverse, @ByVal LongArrayRef batch_sizes, @Cast("int64_t") long mode, @Cast("int64_t") long hidden_size, @Cast("int64_t") long num_layers, @Cast("bool") boolean has_biases, @Cast("bool") boolean bidirectional, @Cast("bool") boolean batch_first, @Cast("bool") boolean train); +@Namespace("at") public static native @ByVal T_TensorTensorTensorTensor_T mkldnn_rnn_layer_out(@ByRef Tensor out0, @ByRef Tensor out1, @ByRef Tensor out2, @ByRef Tensor out3, @Const @ByRef Tensor input, @Const @ByRef Tensor weight0, @Const @ByRef Tensor weight1, @Const @ByRef Tensor weight2, @Const @ByRef Tensor weight3, @Const @ByRef Tensor hx_, @Const @ByRef Tensor cx_, @Cast("bool") boolean reverse, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] batch_sizes, @Cast("int64_t") long mode, @Cast("int64_t") long hidden_size, @Cast("int64_t") long num_layers, @Cast("bool") boolean has_biases, @Cast("bool") boolean bidirectional, @Cast("bool") boolean batch_first, @Cast("bool") boolean train); +// aten::mkldnn_rnn_layer.out(Tensor input, Tensor weight0, Tensor weight1, Tensor weight2, Tensor weight3, Tensor hx_, Tensor cx_, bool reverse, int[] batch_sizes, int mode, int hidden_size, int num_layers, bool has_biases, bool bidirectional, bool batch_first, bool train, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2, Tensor(d!) out3) -> (Tensor(a!), Tensor(b!), Tensor(c!), Tensor(d!)) +@Namespace("at") public static native @ByVal T_TensorTensorTensorTensor_T mkldnn_rnn_layer_outf(@Const @ByRef Tensor input, @Const @ByRef Tensor weight0, @Const @ByRef Tensor weight1, @Const @ByRef Tensor weight2, @Const @ByRef Tensor weight3, @Const @ByRef Tensor hx_, @Const @ByRef Tensor cx_, @Cast("bool") boolean reverse, @ByVal LongArrayRef batch_sizes, @Cast("int64_t") long mode, @Cast("int64_t") long hidden_size, @Cast("int64_t") long num_layers, @Cast("bool") boolean has_biases, @Cast("bool") boolean bidirectional, @Cast("bool") boolean batch_first, @Cast("bool") boolean train, @ByRef Tensor out0, @ByRef Tensor out1, @ByRef Tensor out2, @ByRef Tensor out3); +@Namespace("at") public static native @ByVal T_TensorTensorTensorTensor_T mkldnn_rnn_layer_outf(@Const @ByRef Tensor input, @Const @ByRef Tensor weight0, @Const @ByRef Tensor weight1, @Const @ByRef Tensor weight2, @Const @ByRef Tensor weight3, @Const @ByRef Tensor hx_, @Const @ByRef Tensor cx_, @Cast("bool") boolean reverse, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] batch_sizes, @Cast("int64_t") long mode, @Cast("int64_t") long hidden_size, @Cast("int64_t") long num_layers, @Cast("bool") boolean has_biases, @Cast("bool") boolean bidirectional, @Cast("bool") boolean batch_first, @Cast("bool") boolean train, @ByRef Tensor out0, @ByRef Tensor out1, @ByRef Tensor out2, @ByRef Tensor out3); -// Parsed from ATen/ops/max_pool2d_with_indices_backward.h +// Parsed from ATen/ops/mkldnn_rnn_layer_backward.h // #pragma once @@ -52553,24 +38320,24 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include +// #include -// aten::max_pool2d_with_indices_backward.grad_input(Tensor grad_output, Tensor self, int[2] kernel_size, int[2] stride, int[2] padding, int[2] dilation, bool ceil_mode, Tensor indices, *, Tensor(a!) grad_input) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor max_pool2d_with_indices_backward_out(@ByRef Tensor grad_input, @Const @ByRef Tensor grad_output, @Const @ByRef Tensor self, @ByVal @Cast("c10::ArrayRef*") LongArrayRef kernel_size, @ByVal @Cast("c10::ArrayRef*") LongArrayRef stride, @ByVal @Cast("c10::ArrayRef*") LongArrayRef padding, @ByVal @Cast("c10::ArrayRef*") LongArrayRef dilation, @Cast("bool") boolean ceil_mode, @Const @ByRef Tensor indices); -@Namespace("at") public static native @ByRef Tensor max_pool2d_with_indices_backward_out(@ByRef Tensor grad_input, @Const @ByRef Tensor grad_output, @Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] kernel_size, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] stride, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] padding, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dilation, @Cast("bool") boolean ceil_mode, @Const @ByRef Tensor indices); -// aten::max_pool2d_with_indices_backward.grad_input(Tensor grad_output, Tensor self, int[2] kernel_size, int[2] stride, int[2] padding, int[2] dilation, bool ceil_mode, Tensor indices, *, Tensor(a!) grad_input) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor max_pool2d_with_indices_backward_outf(@Const @ByRef Tensor grad_output, @Const @ByRef Tensor self, @ByVal @Cast("c10::ArrayRef*") LongArrayRef kernel_size, @ByVal @Cast("c10::ArrayRef*") LongArrayRef stride, @ByVal @Cast("c10::ArrayRef*") LongArrayRef padding, @ByVal @Cast("c10::ArrayRef*") LongArrayRef dilation, @Cast("bool") boolean ceil_mode, @Const @ByRef Tensor indices, @ByRef Tensor grad_input); -@Namespace("at") public static native @ByRef Tensor max_pool2d_with_indices_backward_outf(@Const @ByRef Tensor grad_output, @Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] kernel_size, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] stride, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] padding, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dilation, @Cast("bool") boolean ceil_mode, @Const @ByRef Tensor indices, @ByRef Tensor grad_input); +// aten::mkldnn_rnn_layer_backward(Tensor input, Tensor weight1, Tensor weight2, Tensor weight3, Tensor weight4, Tensor hx_, Tensor cx_tmp, Tensor output, Tensor hy_, Tensor cy_, Tensor? grad_output, Tensor? grad_hy, Tensor? grad_cy, bool reverse, int mode, int hidden_size, int num_layers, bool has_biases, bool train, bool bidirectional, int[] batch_sizes, bool batch_first, Tensor workspace) -> (Tensor, Tensor, Tensor, Tensor, Tensor, Tensor, Tensor) +@Namespace("at") public static native @ByVal T_TensorTensorTensorTensorTensorTensorTensor_T mkldnn_rnn_layer_backward(@Const @ByRef Tensor input, @Const @ByRef Tensor weight1, @Const @ByRef Tensor weight2, @Const @ByRef Tensor weight3, @Const @ByRef Tensor weight4, @Const @ByRef Tensor hx_, @Const @ByRef Tensor cx_tmp, @Const @ByRef Tensor output, @Const @ByRef Tensor hy_, @Const @ByRef Tensor cy_, @Const @ByRef TensorOptional grad_output, @Const @ByRef TensorOptional grad_hy, @Const @ByRef TensorOptional grad_cy, @Cast("bool") boolean reverse, @Cast("int64_t") long mode, @Cast("int64_t") long hidden_size, @Cast("int64_t") long num_layers, @Cast("bool") boolean has_biases, @Cast("bool") boolean train, @Cast("bool") boolean bidirectional, @ByVal LongArrayRef batch_sizes, @Cast("bool") boolean batch_first, @Const @ByRef Tensor workspace); +@Namespace("at") public static native @ByVal T_TensorTensorTensorTensorTensorTensorTensor_T mkldnn_rnn_layer_backward(@Const @ByRef Tensor input, @Const @ByRef Tensor weight1, @Const @ByRef Tensor weight2, @Const @ByRef Tensor weight3, @Const @ByRef Tensor weight4, @Const @ByRef Tensor hx_, @Const @ByRef Tensor cx_tmp, @Const @ByRef Tensor output, @Const @ByRef Tensor hy_, @Const @ByRef Tensor cy_, @Const @ByRef TensorOptional grad_output, @Const @ByRef TensorOptional grad_hy, @Const @ByRef TensorOptional grad_cy, @Cast("bool") boolean reverse, @Cast("int64_t") long mode, @Cast("int64_t") long hidden_size, @Cast("int64_t") long num_layers, @Cast("bool") boolean has_biases, @Cast("bool") boolean train, @Cast("bool") boolean bidirectional, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] batch_sizes, @Cast("bool") boolean batch_first, @Const @ByRef Tensor workspace); -// aten::max_pool2d_with_indices_backward(Tensor grad_output, Tensor self, int[2] kernel_size, int[2] stride, int[2] padding, int[2] dilation, bool ceil_mode, Tensor indices) -> Tensor -@Namespace("at") public static native @ByVal Tensor max_pool2d_with_indices_backward(@Const @ByRef Tensor grad_output, @Const @ByRef Tensor self, @ByVal @Cast("c10::ArrayRef*") LongArrayRef kernel_size, @ByVal @Cast("c10::ArrayRef*") LongArrayRef stride, @ByVal @Cast("c10::ArrayRef*") LongArrayRef padding, @ByVal @Cast("c10::ArrayRef*") LongArrayRef dilation, @Cast("bool") boolean ceil_mode, @Const @ByRef Tensor indices); -@Namespace("at") public static native @ByVal Tensor max_pool2d_with_indices_backward(@Const @ByRef Tensor grad_output, @Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] kernel_size, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] stride, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] padding, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dilation, @Cast("bool") boolean ceil_mode, @Const @ByRef Tensor indices); +// aten::mkldnn_rnn_layer_backward.out(Tensor input, Tensor weight1, Tensor weight2, Tensor weight3, Tensor weight4, Tensor hx_, Tensor cx_tmp, Tensor output, Tensor hy_, Tensor cy_, Tensor? grad_output, Tensor? grad_hy, Tensor? grad_cy, bool reverse, int mode, int hidden_size, int num_layers, bool has_biases, bool train, bool bidirectional, int[] batch_sizes, bool batch_first, Tensor workspace, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2, Tensor(d!) out3, Tensor(e!) out4, Tensor(f!) out5, Tensor(g!) out6) -> (Tensor(a!), Tensor(b!), Tensor(c!), Tensor(d!), Tensor(e!), Tensor(f!), Tensor(g!)) +@Namespace("at") public static native @ByVal T_TensorTensorTensorTensorTensorTensorTensor_T mkldnn_rnn_layer_backward_out(@ByRef Tensor out0, @ByRef Tensor out1, @ByRef Tensor out2, @ByRef Tensor out3, @ByRef Tensor out4, @ByRef Tensor out5, @ByRef Tensor out6, @Const @ByRef Tensor input, @Const @ByRef Tensor weight1, @Const @ByRef Tensor weight2, @Const @ByRef Tensor weight3, @Const @ByRef Tensor weight4, @Const @ByRef Tensor hx_, @Const @ByRef Tensor cx_tmp, @Const @ByRef Tensor output, @Const @ByRef Tensor hy_, @Const @ByRef Tensor cy_, @Const @ByRef TensorOptional grad_output, @Const @ByRef TensorOptional grad_hy, @Const @ByRef TensorOptional grad_cy, @Cast("bool") boolean reverse, @Cast("int64_t") long mode, @Cast("int64_t") long hidden_size, @Cast("int64_t") long num_layers, @Cast("bool") boolean has_biases, @Cast("bool") boolean train, @Cast("bool") boolean bidirectional, @ByVal LongArrayRef batch_sizes, @Cast("bool") boolean batch_first, @Const @ByRef Tensor workspace); +@Namespace("at") public static native @ByVal T_TensorTensorTensorTensorTensorTensorTensor_T mkldnn_rnn_layer_backward_out(@ByRef Tensor out0, @ByRef Tensor out1, @ByRef Tensor out2, @ByRef Tensor out3, @ByRef Tensor out4, @ByRef Tensor out5, @ByRef Tensor out6, @Const @ByRef Tensor input, @Const @ByRef Tensor weight1, @Const @ByRef Tensor weight2, @Const @ByRef Tensor weight3, @Const @ByRef Tensor weight4, @Const @ByRef Tensor hx_, @Const @ByRef Tensor cx_tmp, @Const @ByRef Tensor output, @Const @ByRef Tensor hy_, @Const @ByRef Tensor cy_, @Const @ByRef TensorOptional grad_output, @Const @ByRef TensorOptional grad_hy, @Const @ByRef TensorOptional grad_cy, @Cast("bool") boolean reverse, @Cast("int64_t") long mode, @Cast("int64_t") long hidden_size, @Cast("int64_t") long num_layers, @Cast("bool") boolean has_biases, @Cast("bool") boolean train, @Cast("bool") boolean bidirectional, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] batch_sizes, @Cast("bool") boolean batch_first, @Const @ByRef Tensor workspace); +// aten::mkldnn_rnn_layer_backward.out(Tensor input, Tensor weight1, Tensor weight2, Tensor weight3, Tensor weight4, Tensor hx_, Tensor cx_tmp, Tensor output, Tensor hy_, Tensor cy_, Tensor? grad_output, Tensor? grad_hy, Tensor? grad_cy, bool reverse, int mode, int hidden_size, int num_layers, bool has_biases, bool train, bool bidirectional, int[] batch_sizes, bool batch_first, Tensor workspace, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2, Tensor(d!) out3, Tensor(e!) out4, Tensor(f!) out5, Tensor(g!) out6) -> (Tensor(a!), Tensor(b!), Tensor(c!), Tensor(d!), Tensor(e!), Tensor(f!), Tensor(g!)) +@Namespace("at") public static native @ByVal T_TensorTensorTensorTensorTensorTensorTensor_T mkldnn_rnn_layer_backward_outf(@Const @ByRef Tensor input, @Const @ByRef Tensor weight1, @Const @ByRef Tensor weight2, @Const @ByRef Tensor weight3, @Const @ByRef Tensor weight4, @Const @ByRef Tensor hx_, @Const @ByRef Tensor cx_tmp, @Const @ByRef Tensor output, @Const @ByRef Tensor hy_, @Const @ByRef Tensor cy_, @Const @ByRef TensorOptional grad_output, @Const @ByRef TensorOptional grad_hy, @Const @ByRef TensorOptional grad_cy, @Cast("bool") boolean reverse, @Cast("int64_t") long mode, @Cast("int64_t") long hidden_size, @Cast("int64_t") long num_layers, @Cast("bool") boolean has_biases, @Cast("bool") boolean train, @Cast("bool") boolean bidirectional, @ByVal LongArrayRef batch_sizes, @Cast("bool") boolean batch_first, @Const @ByRef Tensor workspace, @ByRef Tensor out0, @ByRef Tensor out1, @ByRef Tensor out2, @ByRef Tensor out3, @ByRef Tensor out4, @ByRef Tensor out5, @ByRef Tensor out6); +@Namespace("at") public static native @ByVal T_TensorTensorTensorTensorTensorTensorTensor_T mkldnn_rnn_layer_backward_outf(@Const @ByRef Tensor input, @Const @ByRef Tensor weight1, @Const @ByRef Tensor weight2, @Const @ByRef Tensor weight3, @Const @ByRef Tensor weight4, @Const @ByRef Tensor hx_, @Const @ByRef Tensor cx_tmp, @Const @ByRef Tensor output, @Const @ByRef Tensor hy_, @Const @ByRef Tensor cy_, @Const @ByRef TensorOptional grad_output, @Const @ByRef TensorOptional grad_hy, @Const @ByRef TensorOptional grad_cy, @Cast("bool") boolean reverse, @Cast("int64_t") long mode, @Cast("int64_t") long hidden_size, @Cast("int64_t") long num_layers, @Cast("bool") boolean has_biases, @Cast("bool") boolean train, @Cast("bool") boolean bidirectional, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] batch_sizes, @Cast("bool") boolean batch_first, @Const @ByRef Tensor workspace, @ByRef Tensor out0, @ByRef Tensor out1, @ByRef Tensor out2, @ByRef Tensor out3, @ByRef Tensor out4, @ByRef Tensor out5, @ByRef Tensor out6); -// Parsed from ATen/ops/max_pool3d.h +// Parsed from ATen/ops/mm.h // #pragma once @@ -52591,19 +38358,21 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include +// #include -// aten::max_pool3d(Tensor self, int[3] kernel_size, int[3] stride=[], int[3] padding=0, int[3] dilation=1, bool ceil_mode=False) -> Tensor -@Namespace("at") public static native @ByVal Tensor max_pool3d(@Const @ByRef Tensor self, @ByVal @Cast("c10::ArrayRef*") LongArrayRef kernel_size, @ByVal(nullValue = "at::IntArrayRef{}") @Cast("c10::ArrayRef*") LongArrayRef stride, @ByVal(nullValue = "at::IntArrayRef(0)") @Cast("c10::ArrayRef*") LongArrayRef padding, @ByVal(nullValue = "at::IntArrayRef(1)") @Cast("c10::ArrayRef*") LongArrayRef dilation, @Cast("bool") boolean ceil_mode/*=false*/); -@Namespace("at") public static native @ByVal Tensor max_pool3d(@Const @ByRef Tensor self, @ByVal @Cast("c10::ArrayRef*") LongArrayRef kernel_size); -@Namespace("at") public static native @ByVal Tensor max_pool3d(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] kernel_size, @ByVal(nullValue = "at::IntArrayRef{}") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] stride, @ByVal(nullValue = "at::IntArrayRef(0)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] padding, @ByVal(nullValue = "at::IntArrayRef(1)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dilation, @Cast("bool") boolean ceil_mode/*=false*/); -@Namespace("at") public static native @ByVal Tensor max_pool3d(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... kernel_size); +// aten::mm(Tensor self, Tensor mat2) -> Tensor +@Namespace("at") public static native @ByVal Tensor mm(@Const @ByRef Tensor self, @Const @ByRef Tensor mat2); + +// aten::mm.out(Tensor self, Tensor mat2, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor mm_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor mat2); +// aten::mm.out(Tensor self, Tensor mat2, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor mm_outf(@Const @ByRef Tensor self, @Const @ByRef Tensor mat2, @ByRef Tensor out); -// Parsed from ATen/ops/max_pool3d_with_indices.h +// Parsed from ATen/ops/mode.h // #pragma once @@ -52624,28 +38393,33 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include +// #include -// aten::max_pool3d_with_indices.out(Tensor self, int[3] kernel_size, int[3] stride=[], int[3] padding=0, int[3] dilation=1, bool ceil_mode=False, *, Tensor(a!) out, Tensor(b!) indices) -> (Tensor(a!), Tensor(b!)) -@Namespace("at") public static native @ByVal @Cast("std::tuple*") PointerPointer max_pool3d_with_indices_out(@ByRef Tensor out, @ByRef Tensor indices, @Const @ByRef Tensor self, @ByVal @Cast("c10::ArrayRef*") LongArrayRef kernel_size, @ByVal(nullValue = "at::IntArrayRef{}") @Cast("c10::ArrayRef*") LongArrayRef stride, @ByVal(nullValue = "at::IntArrayRef(0)") @Cast("c10::ArrayRef*") LongArrayRef padding, @ByVal(nullValue = "at::IntArrayRef(1)") @Cast("c10::ArrayRef*") LongArrayRef dilation, @Cast("bool") boolean ceil_mode/*=false*/); -@Namespace("at") public static native @ByVal @Cast("std::tuple*") PointerPointer max_pool3d_with_indices_out(@ByRef Tensor out, @ByRef Tensor indices, @Const @ByRef Tensor self, @ByVal @Cast("c10::ArrayRef*") LongArrayRef kernel_size); -@Namespace("at") public static native @ByVal @Cast("std::tuple*") PointerPointer max_pool3d_with_indices_out(@ByRef Tensor out, @ByRef Tensor indices, @Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] kernel_size, @ByVal(nullValue = "at::IntArrayRef{}") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] stride, @ByVal(nullValue = "at::IntArrayRef(0)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] padding, @ByVal(nullValue = "at::IntArrayRef(1)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dilation, @Cast("bool") boolean ceil_mode/*=false*/); -@Namespace("at") public static native @ByVal @Cast("std::tuple*") PointerPointer max_pool3d_with_indices_out(@ByRef Tensor out, @ByRef Tensor indices, @Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... kernel_size); -// aten::max_pool3d_with_indices.out(Tensor self, int[3] kernel_size, int[3] stride=[], int[3] padding=0, int[3] dilation=1, bool ceil_mode=False, *, Tensor(a!) out, Tensor(b!) indices) -> (Tensor(a!), Tensor(b!)) -@Namespace("at") public static native @ByVal @Cast("std::tuple*") PointerPointer max_pool3d_with_indices_outf(@Const @ByRef Tensor self, @ByVal @Cast("c10::ArrayRef*") LongArrayRef kernel_size, @ByVal @Cast("c10::ArrayRef*") LongArrayRef stride, @ByVal @Cast("c10::ArrayRef*") LongArrayRef padding, @ByVal @Cast("c10::ArrayRef*") LongArrayRef dilation, @Cast("bool") boolean ceil_mode, @ByRef Tensor out, @ByRef Tensor indices); -@Namespace("at") public static native @ByVal @Cast("std::tuple*") PointerPointer max_pool3d_with_indices_outf(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] kernel_size, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] stride, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] padding, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dilation, @Cast("bool") boolean ceil_mode, @ByRef Tensor out, @ByRef Tensor indices); +// aten::mode(Tensor self, int dim=-1, bool keepdim=False) -> (Tensor values, Tensor indices) +@Namespace("at") public static native @ByVal T_TensorTensor_T mode(@Const @ByRef Tensor self, @Cast("int64_t") long dim/*=-1*/, @Cast("bool") boolean keepdim/*=false*/); +@Namespace("at") public static native @ByVal T_TensorTensor_T mode(@Const @ByRef Tensor self); -// aten::max_pool3d_with_indices(Tensor self, int[3] kernel_size, int[3] stride=[], int[3] padding=0, int[3] dilation=1, bool ceil_mode=False) -> (Tensor, Tensor) -@Namespace("at") public static native @ByVal TensorTensorTuple max_pool3d_with_indices(@Const @ByRef Tensor self, @ByVal @Cast("c10::ArrayRef*") LongArrayRef kernel_size, @ByVal(nullValue = "at::IntArrayRef{}") @Cast("c10::ArrayRef*") LongArrayRef stride, @ByVal(nullValue = "at::IntArrayRef(0)") @Cast("c10::ArrayRef*") LongArrayRef padding, @ByVal(nullValue = "at::IntArrayRef(1)") @Cast("c10::ArrayRef*") LongArrayRef dilation, @Cast("bool") boolean ceil_mode/*=false*/); -@Namespace("at") public static native @ByVal TensorTensorTuple max_pool3d_with_indices(@Const @ByRef Tensor self, @ByVal @Cast("c10::ArrayRef*") LongArrayRef kernel_size); -@Namespace("at") public static native @ByVal TensorTensorTuple max_pool3d_with_indices(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] kernel_size, @ByVal(nullValue = "at::IntArrayRef{}") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] stride, @ByVal(nullValue = "at::IntArrayRef(0)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] padding, @ByVal(nullValue = "at::IntArrayRef(1)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dilation, @Cast("bool") boolean ceil_mode/*=false*/); -@Namespace("at") public static native @ByVal TensorTensorTuple max_pool3d_with_indices(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... kernel_size); +// aten::mode.values(Tensor self, int dim=-1, bool keepdim=False, *, Tensor(a!) values, Tensor(b!) indices) -> (Tensor(a!) values, Tensor(b!) indices) +@Namespace("at") public static native @ByVal T_TensorTensor_T mode_out(@ByRef Tensor values, @ByRef Tensor indices, @Const @ByRef Tensor self, @Cast("int64_t") long dim/*=-1*/, @Cast("bool") boolean keepdim/*=false*/); +@Namespace("at") public static native @ByVal T_TensorTensor_T mode_out(@ByRef Tensor values, @ByRef Tensor indices, @Const @ByRef Tensor self); +// aten::mode.values(Tensor self, int dim=-1, bool keepdim=False, *, Tensor(a!) values, Tensor(b!) indices) -> (Tensor(a!) values, Tensor(b!) indices) +@Namespace("at") public static native @ByVal T_TensorTensor_T mode_outf(@Const @ByRef Tensor self, @Cast("int64_t") long dim, @Cast("bool") boolean keepdim, @ByRef Tensor values, @ByRef Tensor indices); + +// aten::mode.dimname(Tensor self, Dimname dim, bool keepdim=False) -> (Tensor values, Tensor indices) +@Namespace("at") public static native @ByVal T_TensorTensor_T mode(@Const @ByRef Tensor self, @ByVal Dimname dim, @Cast("bool") boolean keepdim/*=false*/); +@Namespace("at") public static native @ByVal T_TensorTensor_T mode(@Const @ByRef Tensor self, @ByVal Dimname dim); + +// aten::mode.dimname_out(Tensor self, Dimname dim, bool keepdim=False, *, Tensor(a!) values, Tensor(b!) indices) -> (Tensor(a!) values, Tensor(b!) indices) +@Namespace("at") public static native @ByVal T_TensorTensor_T mode_out(@ByRef Tensor values, @ByRef Tensor indices, @Const @ByRef Tensor self, @ByVal Dimname dim, @Cast("bool") boolean keepdim/*=false*/); +@Namespace("at") public static native @ByVal T_TensorTensor_T mode_out(@ByRef Tensor values, @ByRef Tensor indices, @Const @ByRef Tensor self, @ByVal Dimname dim); +// aten::mode.dimname_out(Tensor self, Dimname dim, bool keepdim=False, *, Tensor(a!) values, Tensor(b!) indices) -> (Tensor(a!) values, Tensor(b!) indices) +@Namespace("at") public static native @ByVal T_TensorTensor_T mode_outf(@Const @ByRef Tensor self, @ByVal Dimname dim, @Cast("bool") boolean keepdim, @ByRef Tensor values, @ByRef Tensor indices); -// Parsed from ATen/ops/max_pool3d_with_indices_backward.h +// Parsed from ATen/ops/moveaxis.h // #pragma once @@ -52666,24 +38440,20 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include +// #include -// aten::max_pool3d_with_indices_backward.grad_input(Tensor grad_output, Tensor self, int[3] kernel_size, int[3] stride, int[3] padding, int[3] dilation, bool ceil_mode, Tensor indices, *, Tensor(a!) grad_input) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor max_pool3d_with_indices_backward_out(@ByRef Tensor grad_input, @Const @ByRef Tensor grad_output, @Const @ByRef Tensor self, @ByVal @Cast("c10::ArrayRef*") LongArrayRef kernel_size, @ByVal @Cast("c10::ArrayRef*") LongArrayRef stride, @ByVal @Cast("c10::ArrayRef*") LongArrayRef padding, @ByVal @Cast("c10::ArrayRef*") LongArrayRef dilation, @Cast("bool") boolean ceil_mode, @Const @ByRef Tensor indices); -@Namespace("at") public static native @ByRef Tensor max_pool3d_with_indices_backward_out(@ByRef Tensor grad_input, @Const @ByRef Tensor grad_output, @Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] kernel_size, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] stride, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] padding, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dilation, @Cast("bool") boolean ceil_mode, @Const @ByRef Tensor indices); -// aten::max_pool3d_with_indices_backward.grad_input(Tensor grad_output, Tensor self, int[3] kernel_size, int[3] stride, int[3] padding, int[3] dilation, bool ceil_mode, Tensor indices, *, Tensor(a!) grad_input) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor max_pool3d_with_indices_backward_outf(@Const @ByRef Tensor grad_output, @Const @ByRef Tensor self, @ByVal @Cast("c10::ArrayRef*") LongArrayRef kernel_size, @ByVal @Cast("c10::ArrayRef*") LongArrayRef stride, @ByVal @Cast("c10::ArrayRef*") LongArrayRef padding, @ByVal @Cast("c10::ArrayRef*") LongArrayRef dilation, @Cast("bool") boolean ceil_mode, @Const @ByRef Tensor indices, @ByRef Tensor grad_input); -@Namespace("at") public static native @ByRef Tensor max_pool3d_with_indices_backward_outf(@Const @ByRef Tensor grad_output, @Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] kernel_size, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] stride, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] padding, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dilation, @Cast("bool") boolean ceil_mode, @Const @ByRef Tensor indices, @ByRef Tensor grad_input); +// aten::moveaxis.intlist(Tensor(a) self, int[] source, int[] destination) -> Tensor(a) +@Namespace("at") public static native @ByVal Tensor moveaxis(@Const @ByRef Tensor self, @ByVal LongArrayRef source, @ByVal LongArrayRef destination); +@Namespace("at") public static native @ByVal Tensor moveaxis(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] source, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... destination); -// aten::max_pool3d_with_indices_backward(Tensor grad_output, Tensor self, int[3] kernel_size, int[3] stride, int[3] padding, int[3] dilation, bool ceil_mode, Tensor indices) -> Tensor -@Namespace("at") public static native @ByVal Tensor max_pool3d_with_indices_backward(@Const @ByRef Tensor grad_output, @Const @ByRef Tensor self, @ByVal @Cast("c10::ArrayRef*") LongArrayRef kernel_size, @ByVal @Cast("c10::ArrayRef*") LongArrayRef stride, @ByVal @Cast("c10::ArrayRef*") LongArrayRef padding, @ByVal @Cast("c10::ArrayRef*") LongArrayRef dilation, @Cast("bool") boolean ceil_mode, @Const @ByRef Tensor indices); -@Namespace("at") public static native @ByVal Tensor max_pool3d_with_indices_backward(@Const @ByRef Tensor grad_output, @Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] kernel_size, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] stride, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] padding, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dilation, @Cast("bool") boolean ceil_mode, @Const @ByRef Tensor indices); +// aten::moveaxis.int(Tensor(a) self, int source, int destination) -> Tensor(a) +@Namespace("at") public static native @ByVal Tensor moveaxis(@Const @ByRef Tensor self, @Cast("int64_t") long source, @Cast("int64_t") long destination); -// Parsed from ATen/ops/max_unpool2d.h +// Parsed from ATen/ops/movedim.h // #pragma once @@ -52704,24 +38474,20 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include +// #include -// aten::max_unpool2d.out(Tensor self, Tensor indices, int[2] output_size, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor max_unpool2d_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor indices, @ByVal @Cast("c10::ArrayRef*") LongArrayRef output_size); -@Namespace("at") public static native @ByRef Tensor max_unpool2d_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor indices, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... output_size); -// aten::max_unpool2d.out(Tensor self, Tensor indices, int[2] output_size, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor max_unpool2d_outf(@Const @ByRef Tensor self, @Const @ByRef Tensor indices, @ByVal @Cast("c10::ArrayRef*") LongArrayRef output_size, @ByRef Tensor out); -@Namespace("at") public static native @ByRef Tensor max_unpool2d_outf(@Const @ByRef Tensor self, @Const @ByRef Tensor indices, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] output_size, @ByRef Tensor out); +// aten::movedim.intlist(Tensor(a) self, int[] source, int[] destination) -> Tensor(a) +@Namespace("at") public static native @ByVal Tensor movedim(@Const @ByRef Tensor self, @ByVal LongArrayRef source, @ByVal LongArrayRef destination); +@Namespace("at") public static native @ByVal Tensor movedim(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] source, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... destination); -// aten::max_unpool2d(Tensor self, Tensor indices, int[2] output_size) -> Tensor -@Namespace("at") public static native @ByVal Tensor max_unpool2d(@Const @ByRef Tensor self, @Const @ByRef Tensor indices, @ByVal @Cast("c10::ArrayRef*") LongArrayRef output_size); -@Namespace("at") public static native @ByVal Tensor max_unpool2d(@Const @ByRef Tensor self, @Const @ByRef Tensor indices, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... output_size); +// aten::movedim.int(Tensor(a) self, int source, int destination) -> Tensor(a) +@Namespace("at") public static native @ByVal Tensor movedim(@Const @ByRef Tensor self, @Cast("int64_t") long source, @Cast("int64_t") long destination); -// Parsed from ATen/ops/max_unpool3d.h +// Parsed from ATen/ops/mps_convolution_backward.h // #pragma once @@ -52742,24 +38508,24 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include +// #include -// aten::max_unpool3d.out(Tensor self, Tensor indices, int[3] output_size, int[3] stride, int[3] padding, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor max_unpool3d_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor indices, @ByVal @Cast("c10::ArrayRef*") LongArrayRef output_size, @ByVal @Cast("c10::ArrayRef*") LongArrayRef stride, @ByVal @Cast("c10::ArrayRef*") LongArrayRef padding); -@Namespace("at") public static native @ByRef Tensor max_unpool3d_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor indices, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] output_size, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] stride, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... padding); -// aten::max_unpool3d.out(Tensor self, Tensor indices, int[3] output_size, int[3] stride, int[3] padding, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor max_unpool3d_outf(@Const @ByRef Tensor self, @Const @ByRef Tensor indices, @ByVal @Cast("c10::ArrayRef*") LongArrayRef output_size, @ByVal @Cast("c10::ArrayRef*") LongArrayRef stride, @ByVal @Cast("c10::ArrayRef*") LongArrayRef padding, @ByRef Tensor out); -@Namespace("at") public static native @ByRef Tensor max_unpool3d_outf(@Const @ByRef Tensor self, @Const @ByRef Tensor indices, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] output_size, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] stride, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] padding, @ByRef Tensor out); +// aten::mps_convolution_backward(Tensor self, Tensor grad_output, Tensor weight, int[] padding, int[] stride, int[] dilation, int groups, bool[3] output_mask) -> (Tensor, Tensor, Tensor) +@Namespace("at") public static native @ByVal T_TensorTensorTensor_T mps_convolution_backward(@Const @ByRef Tensor self, @Const @ByRef Tensor grad_output, @Const @ByRef Tensor weight, @ByVal LongArrayRef padding, @ByVal LongArrayRef stride, @ByVal LongArrayRef dilation, @Cast("int64_t") long groups, @ByVal @Cast("std::array*") BoolPointer output_mask); +@Namespace("at") public static native @ByVal T_TensorTensorTensor_T mps_convolution_backward(@Const @ByRef Tensor self, @Const @ByRef Tensor grad_output, @Const @ByRef Tensor weight, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] padding, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] stride, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dilation, @Cast("int64_t") long groups, @ByVal @Cast("std::array*") BoolPointer output_mask); -// aten::max_unpool3d(Tensor self, Tensor indices, int[3] output_size, int[3] stride, int[3] padding) -> Tensor -@Namespace("at") public static native @ByVal Tensor max_unpool3d(@Const @ByRef Tensor self, @Const @ByRef Tensor indices, @ByVal @Cast("c10::ArrayRef*") LongArrayRef output_size, @ByVal @Cast("c10::ArrayRef*") LongArrayRef stride, @ByVal @Cast("c10::ArrayRef*") LongArrayRef padding); -@Namespace("at") public static native @ByVal Tensor max_unpool3d(@Const @ByRef Tensor self, @Const @ByRef Tensor indices, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] output_size, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] stride, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... padding); +// aten::mps_convolution_backward.out(Tensor self, Tensor grad_output, Tensor weight, int[] padding, int[] stride, int[] dilation, int groups, bool[3] output_mask, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2) -> (Tensor(a!), Tensor(b!), Tensor(c!)) +@Namespace("at") public static native @ByVal T_TensorTensorTensor_T mps_convolution_backward_out(@ByRef Tensor out0, @ByRef Tensor out1, @ByRef Tensor out2, @Const @ByRef Tensor self, @Const @ByRef Tensor grad_output, @Const @ByRef Tensor weight, @ByVal LongArrayRef padding, @ByVal LongArrayRef stride, @ByVal LongArrayRef dilation, @Cast("int64_t") long groups, @ByVal @Cast("std::array*") BoolPointer output_mask); +@Namespace("at") public static native @ByVal T_TensorTensorTensor_T mps_convolution_backward_out(@ByRef Tensor out0, @ByRef Tensor out1, @ByRef Tensor out2, @Const @ByRef Tensor self, @Const @ByRef Tensor grad_output, @Const @ByRef Tensor weight, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] padding, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] stride, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dilation, @Cast("int64_t") long groups, @ByVal @Cast("std::array*") BoolPointer output_mask); +// aten::mps_convolution_backward.out(Tensor self, Tensor grad_output, Tensor weight, int[] padding, int[] stride, int[] dilation, int groups, bool[3] output_mask, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2) -> (Tensor(a!), Tensor(b!), Tensor(c!)) +@Namespace("at") public static native @ByVal T_TensorTensorTensor_T mps_convolution_backward_outf(@Const @ByRef Tensor self, @Const @ByRef Tensor grad_output, @Const @ByRef Tensor weight, @ByVal LongArrayRef padding, @ByVal LongArrayRef stride, @ByVal LongArrayRef dilation, @Cast("int64_t") long groups, @ByVal @Cast("std::array*") BoolPointer output_mask, @ByRef Tensor out0, @ByRef Tensor out1, @ByRef Tensor out2); +@Namespace("at") public static native @ByVal T_TensorTensorTensor_T mps_convolution_backward_outf(@Const @ByRef Tensor self, @Const @ByRef Tensor grad_output, @Const @ByRef Tensor weight, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] padding, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] stride, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dilation, @Cast("int64_t") long groups, @ByVal @Cast("std::array*") BoolPointer output_mask, @ByRef Tensor out0, @ByRef Tensor out1, @ByRef Tensor out2); -// Parsed from ATen/ops/maximum.h +// Parsed from ATen/ops/mps_convolution_transpose_backward.h // #pragma once @@ -52780,21 +38546,24 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include +// #include -// aten::maximum(Tensor self, Tensor other) -> Tensor -@Namespace("at") public static native @ByVal Tensor maximum(@Const @ByRef Tensor self, @Const @ByRef Tensor other); +// aten::mps_convolution_transpose_backward(Tensor self, Tensor grad_output, Tensor weight, int[] padding, int[] output_padding, int[] stride, int[] dilation, int groups, bool[2] output_mask) -> (Tensor, Tensor) +@Namespace("at") public static native @ByVal T_TensorTensor_T mps_convolution_transpose_backward(@Const @ByRef Tensor self, @Const @ByRef Tensor grad_output, @Const @ByRef Tensor weight, @ByVal LongArrayRef padding, @ByVal LongArrayRef output_padding, @ByVal LongArrayRef stride, @ByVal LongArrayRef dilation, @Cast("int64_t") long groups, @ByVal @Cast("std::array*") BoolPointer output_mask); +@Namespace("at") public static native @ByVal T_TensorTensor_T mps_convolution_transpose_backward(@Const @ByRef Tensor self, @Const @ByRef Tensor grad_output, @Const @ByRef Tensor weight, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] padding, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] output_padding, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] stride, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dilation, @Cast("int64_t") long groups, @ByVal @Cast("std::array*") BoolPointer output_mask); -// aten::maximum.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor maximum_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor other); -// aten::maximum.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor maximum_outf(@Const @ByRef Tensor self, @Const @ByRef Tensor other, @ByRef Tensor out); +// aten::mps_convolution_transpose_backward.out(Tensor self, Tensor grad_output, Tensor weight, int[] padding, int[] output_padding, int[] stride, int[] dilation, int groups, bool[2] output_mask, *, Tensor(a!) out0, Tensor(b!) out1) -> (Tensor(a!), Tensor(b!)) +@Namespace("at") public static native @ByVal T_TensorTensor_T mps_convolution_transpose_backward_out(@ByRef Tensor out0, @ByRef Tensor out1, @Const @ByRef Tensor self, @Const @ByRef Tensor grad_output, @Const @ByRef Tensor weight, @ByVal LongArrayRef padding, @ByVal LongArrayRef output_padding, @ByVal LongArrayRef stride, @ByVal LongArrayRef dilation, @Cast("int64_t") long groups, @ByVal @Cast("std::array*") BoolPointer output_mask); +@Namespace("at") public static native @ByVal T_TensorTensor_T mps_convolution_transpose_backward_out(@ByRef Tensor out0, @ByRef Tensor out1, @Const @ByRef Tensor self, @Const @ByRef Tensor grad_output, @Const @ByRef Tensor weight, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] padding, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] output_padding, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] stride, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dilation, @Cast("int64_t") long groups, @ByVal @Cast("std::array*") BoolPointer output_mask); +// aten::mps_convolution_transpose_backward.out(Tensor self, Tensor grad_output, Tensor weight, int[] padding, int[] output_padding, int[] stride, int[] dilation, int groups, bool[2] output_mask, *, Tensor(a!) out0, Tensor(b!) out1) -> (Tensor(a!), Tensor(b!)) +@Namespace("at") public static native @ByVal T_TensorTensor_T mps_convolution_transpose_backward_outf(@Const @ByRef Tensor self, @Const @ByRef Tensor grad_output, @Const @ByRef Tensor weight, @ByVal LongArrayRef padding, @ByVal LongArrayRef output_padding, @ByVal LongArrayRef stride, @ByVal LongArrayRef dilation, @Cast("int64_t") long groups, @ByVal @Cast("std::array*") BoolPointer output_mask, @ByRef Tensor out0, @ByRef Tensor out1); +@Namespace("at") public static native @ByVal T_TensorTensor_T mps_convolution_transpose_backward_outf(@Const @ByRef Tensor self, @Const @ByRef Tensor grad_output, @Const @ByRef Tensor weight, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] padding, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] output_padding, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] stride, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dilation, @Cast("int64_t") long groups, @ByVal @Cast("std::array*") BoolPointer output_mask, @ByRef Tensor out0, @ByRef Tensor out1); -// Parsed from ATen/ops/mean.h +// Parsed from ATen/ops/mse_loss.h // #pragma once @@ -52815,42 +38584,23 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include - - -// aten::mean(Tensor self, *, ScalarType? dtype=None) -> Tensor -@Namespace("at") public static native @ByVal Tensor mean(@Const @ByRef Tensor self, @ByVal(nullValue = "c10::optional(c10::nullopt)") ScalarTypeOptional dtype); -@Namespace("at") public static native @ByVal Tensor mean(@Const @ByRef Tensor self); - -// aten::mean.dim(Tensor self, int[1]? dim, bool keepdim=False, *, ScalarType? dtype=None) -> Tensor -@Namespace("at") public static native @ByVal Tensor mean(@Const @ByRef Tensor self, @ByVal LongArrayRefOptional dim, @Cast("bool") boolean keepdim/*=false*/, @ByVal(nullValue = "c10::optional(c10::nullopt)") ScalarTypeOptional dtype); -@Namespace("at") public static native @ByVal Tensor mean(@Const @ByRef Tensor self, @ByVal LongArrayRefOptional dim); -@Namespace("at") public static native @ByVal Tensor mean(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dim, @Cast("bool") boolean keepdim/*=false*/, @ByVal(nullValue = "c10::optional(c10::nullopt)") ScalarTypeOptional dtype); -@Namespace("at") public static native @ByVal Tensor mean(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... dim); +// #include -// aten::mean.out(Tensor self, int[1]? dim, bool keepdim=False, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor mean_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal LongArrayRefOptional dim, @Cast("bool") boolean keepdim/*=false*/, @ByVal(nullValue = "c10::optional(c10::nullopt)") ScalarTypeOptional dtype); -@Namespace("at") public static native @ByRef Tensor mean_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal LongArrayRefOptional dim); -@Namespace("at") public static native @ByRef Tensor mean_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dim, @Cast("bool") boolean keepdim/*=false*/, @ByVal(nullValue = "c10::optional(c10::nullopt)") ScalarTypeOptional dtype); -@Namespace("at") public static native @ByRef Tensor mean_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... dim); -// aten::mean.out(Tensor self, int[1]? dim, bool keepdim=False, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor mean_outf(@Const @ByRef Tensor self, @ByVal LongArrayRefOptional dim, @Cast("bool") boolean keepdim, @ByVal ScalarTypeOptional dtype, @ByRef Tensor out); -@Namespace("at") public static native @ByRef Tensor mean_outf(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dim, @Cast("bool") boolean keepdim, @ByVal ScalarTypeOptional dtype, @ByRef Tensor out); -// aten::mean.names_dim(Tensor self, Dimname[1] dim, bool keepdim=False, *, ScalarType? dtype=None) -> Tensor -@Namespace("at") public static native @ByVal Tensor mean(@Const @ByRef Tensor self, @ByVal DimnameArrayRef dim, @Cast("bool") boolean keepdim/*=false*/, @ByVal(nullValue = "c10::optional(c10::nullopt)") ScalarTypeOptional dtype); -@Namespace("at") public static native @ByVal Tensor mean(@Const @ByRef Tensor self, @ByVal DimnameArrayRef dim); +// aten::mse_loss.out(Tensor self, Tensor target, int reduction=Mean, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor mse_loss_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor target, @Cast("int64_t") long reduction/*=at::Reduction::Mean*/); +@Namespace("at") public static native @ByRef Tensor mse_loss_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor target); +// aten::mse_loss.out(Tensor self, Tensor target, int reduction=Mean, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor mse_loss_outf(@Const @ByRef Tensor self, @Const @ByRef Tensor target, @Cast("int64_t") long reduction, @ByRef Tensor out); -// aten::mean.names_out(Tensor self, Dimname[1] dim, bool keepdim=False, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor mean_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal DimnameArrayRef dim, @Cast("bool") boolean keepdim/*=false*/, @ByVal(nullValue = "c10::optional(c10::nullopt)") ScalarTypeOptional dtype); -@Namespace("at") public static native @ByRef Tensor mean_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal DimnameArrayRef dim); -// aten::mean.names_out(Tensor self, Dimname[1] dim, bool keepdim=False, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor mean_outf(@Const @ByRef Tensor self, @ByVal DimnameArrayRef dim, @Cast("bool") boolean keepdim, @ByVal ScalarTypeOptional dtype, @ByRef Tensor out); +// aten::mse_loss(Tensor self, Tensor target, int reduction=Mean) -> Tensor +@Namespace("at") public static native @ByVal Tensor mse_loss(@Const @ByRef Tensor self, @Const @ByRef Tensor target, @Cast("int64_t") long reduction/*=at::Reduction::Mean*/); +@Namespace("at") public static native @ByVal Tensor mse_loss(@Const @ByRef Tensor self, @Const @ByRef Tensor target); -// Parsed from ATen/ops/median.h +// Parsed from ATen/ops/mse_loss_backward.h // #pragma once @@ -52871,41 +38621,21 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include - - -// aten::median(Tensor self) -> Tensor -@Namespace("at") public static native @ByVal Tensor median(@Const @ByRef Tensor self); - -// aten::median.dim(Tensor self, int dim, bool keepdim=False) -> (Tensor values, Tensor indices) -@Namespace("at") public static native @ByVal TensorTensorTuple median(@Const @ByRef Tensor self, @Cast("int64_t") long dim, @Cast("bool") boolean keepdim/*=false*/); -@Namespace("at") public static native @ByVal TensorTensorTuple median(@Const @ByRef Tensor self, @Cast("int64_t") long dim); - -// aten::median.dim_values(Tensor self, int dim, bool keepdim=False, *, Tensor(a!) values, Tensor(b!) indices) -> (Tensor(a!) values, Tensor(b!) indices) -@Namespace("at") public static native @ByVal @Cast("std::tuple*") PointerPointer median_out(@ByRef Tensor values, @ByRef Tensor indices, @Const @ByRef Tensor self, @Cast("int64_t") long dim, @Cast("bool") boolean keepdim/*=false*/); -@Namespace("at") public static native @ByVal @Cast("std::tuple*") PointerPointer median_out(@ByRef Tensor values, @ByRef Tensor indices, @Const @ByRef Tensor self, @Cast("int64_t") long dim); -// aten::median.dim_values(Tensor self, int dim, bool keepdim=False, *, Tensor(a!) values, Tensor(b!) indices) -> (Tensor(a!) values, Tensor(b!) indices) -@Namespace("at") public static native @ByVal @Cast("std::tuple*") PointerPointer median_outf(@Const @ByRef Tensor self, @Cast("int64_t") long dim, @Cast("bool") boolean keepdim, @ByRef Tensor values, @ByRef Tensor indices); +// #include -// aten::median.names_dim(Tensor self, Dimname dim, bool keepdim=False) -> (Tensor values, Tensor indices) -@Namespace("at") public static native @ByVal TensorTensorTuple median(@Const @ByRef Tensor self, @ByVal Dimname dim, @Cast("bool") boolean keepdim/*=false*/); -@Namespace("at") public static native @ByVal TensorTensorTuple median(@Const @ByRef Tensor self, @ByVal Dimname dim); -// aten::median.names_dim_values(Tensor self, Dimname dim, bool keepdim=False, *, Tensor(a!) values, Tensor(b!) indices) -> (Tensor(a!) values, Tensor(b!) indices) -@Namespace("at") public static native @ByVal @Cast("std::tuple*") PointerPointer median_out(@ByRef Tensor values, @ByRef Tensor indices, @Const @ByRef Tensor self, @ByVal Dimname dim, @Cast("bool") boolean keepdim/*=false*/); -@Namespace("at") public static native @ByVal @Cast("std::tuple*") PointerPointer median_out(@ByRef Tensor values, @ByRef Tensor indices, @Const @ByRef Tensor self, @ByVal Dimname dim); -// aten::median.names_dim_values(Tensor self, Dimname dim, bool keepdim=False, *, Tensor(a!) values, Tensor(b!) indices) -> (Tensor(a!) values, Tensor(b!) indices) -@Namespace("at") public static native @ByVal @Cast("std::tuple*") PointerPointer median_outf(@Const @ByRef Tensor self, @ByVal Dimname dim, @Cast("bool") boolean keepdim, @ByRef Tensor values, @ByRef Tensor indices); +// aten::mse_loss_backward.grad_input(Tensor grad_output, Tensor self, Tensor target, int reduction, *, Tensor(a!) grad_input) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor mse_loss_backward_out(@ByRef Tensor grad_input, @Const @ByRef Tensor grad_output, @Const @ByRef Tensor self, @Const @ByRef Tensor target, @Cast("int64_t") long reduction); +// aten::mse_loss_backward.grad_input(Tensor grad_output, Tensor self, Tensor target, int reduction, *, Tensor(a!) grad_input) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor mse_loss_backward_outf(@Const @ByRef Tensor grad_output, @Const @ByRef Tensor self, @Const @ByRef Tensor target, @Cast("int64_t") long reduction, @ByRef Tensor grad_input); -// aten::median.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor median_out(@ByRef Tensor out, @Const @ByRef Tensor self); -// aten::median.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor median_outf(@Const @ByRef Tensor self, @ByRef Tensor out); +// aten::mse_loss_backward(Tensor grad_output, Tensor self, Tensor target, int reduction) -> Tensor +@Namespace("at") public static native @ByVal Tensor mse_loss_backward(@Const @ByRef Tensor grad_output, @Const @ByRef Tensor self, @Const @ByRef Tensor target, @Cast("int64_t") long reduction); -// Parsed from ATen/ops/meshgrid.h +// Parsed from ATen/ops/msort.h // #pragma once @@ -52926,19 +38656,21 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include +// #include -// aten::meshgrid(Tensor[] tensors) -> Tensor[] -@Namespace("at") public static native @Cast({"", "std::vector"}) @StdMove TensorVector meshgrid(@ByVal TensorArrayRef tensors); +// aten::msort.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor msort_out(@ByRef Tensor out, @Const @ByRef Tensor self); +// aten::msort.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor msort_outf(@Const @ByRef Tensor self, @ByRef Tensor out); -// aten::meshgrid.indexing(Tensor[] tensors, *, str indexing) -> Tensor[] -@Namespace("at") public static native @Cast({"", "std::vector"}) @StdMove TensorVector meshgrid(@ByVal TensorArrayRef tensors, @ByVal @Cast("c10::string_view*") Pointer indexing); +// aten::msort(Tensor self) -> Tensor +@Namespace("at") public static native @ByVal Tensor msort(@Const @ByRef Tensor self); -// Parsed from ATen/ops/min.h +// Parsed from ATen/ops/mul.h // #pragma once @@ -52959,44 +38691,29 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include - - -// aten::min.dim(Tensor self, int dim, bool keepdim=False) -> (Tensor values, Tensor indices) -@Namespace("at") public static native @ByVal TensorTensorTuple min(@Const @ByRef Tensor self, @Cast("int64_t") long dim, @Cast("bool") boolean keepdim/*=false*/); -@Namespace("at") public static native @ByVal TensorTensorTuple min(@Const @ByRef Tensor self, @Cast("int64_t") long dim); - -// aten::min.dim_min(Tensor self, int dim, bool keepdim=False, *, Tensor(a!) min, Tensor(b!) min_indices) -> (Tensor(a!) values, Tensor(b!) indices) -@Namespace("at") public static native @ByVal @Cast("std::tuple*") PointerPointer min_out(@ByRef Tensor min, @ByRef Tensor min_indices, @Const @ByRef Tensor self, @Cast("int64_t") long dim, @Cast("bool") boolean keepdim/*=false*/); -@Namespace("at") public static native @ByVal @Cast("std::tuple*") PointerPointer min_out(@ByRef Tensor min, @ByRef Tensor min_indices, @Const @ByRef Tensor self, @Cast("int64_t") long dim); -// aten::min.dim_min(Tensor self, int dim, bool keepdim=False, *, Tensor(a!) min, Tensor(b!) min_indices) -> (Tensor(a!) values, Tensor(b!) indices) -@Namespace("at") public static native @ByVal @Cast("std::tuple*") PointerPointer min_outf(@Const @ByRef Tensor self, @Cast("int64_t") long dim, @Cast("bool") boolean keepdim, @ByRef Tensor min, @ByRef Tensor min_indices); +// #include -// aten::min.names_dim(Tensor self, Dimname dim, bool keepdim=False) -> (Tensor values, Tensor indices) -@Namespace("at") public static native @ByVal TensorTensorTuple min(@Const @ByRef Tensor self, @ByVal Dimname dim, @Cast("bool") boolean keepdim/*=false*/); -@Namespace("at") public static native @ByVal TensorTensorTuple min(@Const @ByRef Tensor self, @ByVal Dimname dim); -// aten::min.names_dim_min(Tensor self, Dimname dim, bool keepdim=False, *, Tensor(a!) min, Tensor(b!) min_indices) -> (Tensor(a!) values, Tensor(b!) indices) -@Namespace("at") public static native @ByVal @Cast("std::tuple*") PointerPointer min_out(@ByRef Tensor min, @ByRef Tensor min_indices, @Const @ByRef Tensor self, @ByVal Dimname dim, @Cast("bool") boolean keepdim/*=false*/); -@Namespace("at") public static native @ByVal @Cast("std::tuple*") PointerPointer min_out(@ByRef Tensor min, @ByRef Tensor min_indices, @Const @ByRef Tensor self, @ByVal Dimname dim); -// aten::min.names_dim_min(Tensor self, Dimname dim, bool keepdim=False, *, Tensor(a!) min, Tensor(b!) min_indices) -> (Tensor(a!) values, Tensor(b!) indices) -@Namespace("at") public static native @ByVal @Cast("std::tuple*") PointerPointer min_outf(@Const @ByRef Tensor self, @ByVal Dimname dim, @Cast("bool") boolean keepdim, @ByRef Tensor min, @ByRef Tensor min_indices); +// aten::mul.Tensor(Tensor self, Tensor other) -> Tensor +@Namespace("at") public static native @ByVal Tensor mul(@Const @ByRef Tensor self, @Const @ByRef Tensor other); -// aten::min(Tensor self) -> Tensor -@Namespace("at") public static native @ByVal Tensor min(@Const @ByRef Tensor self); +// aten::mul.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor mul_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor other); +// aten::mul.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor mul_outf(@Const @ByRef Tensor self, @Const @ByRef Tensor other, @ByRef Tensor out); -// aten::min.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor min_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor other); -// aten::min.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor min_outf(@Const @ByRef Tensor self, @Const @ByRef Tensor other, @ByRef Tensor out); +// aten::mul.Scalar(Tensor self, Scalar other) -> Tensor +@Namespace("at") public static native @ByVal Tensor mul(@Const @ByRef Tensor self, @Const @ByRef Scalar other); -// aten::min.other(Tensor self, Tensor other) -> Tensor -@Namespace("at") public static native @ByVal Tensor min(@Const @ByRef Tensor self, @Const @ByRef Tensor other); +// aten::mul.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor mul_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Scalar other); +// aten::mul.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor mul_outf(@Const @ByRef Tensor self, @Const @ByRef Scalar other, @ByRef Tensor out); -// Parsed from ATen/ops/minimum.h +// Parsed from ATen/ops/multi_margin_loss.h // #pragma once @@ -53017,21 +38734,23 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include +// #include -// aten::minimum(Tensor self, Tensor other) -> Tensor -@Namespace("at") public static native @ByVal Tensor minimum(@Const @ByRef Tensor self, @Const @ByRef Tensor other); +// aten::multi_margin_loss.out(Tensor self, Tensor target, Scalar p=1, Scalar margin=1, Tensor? weight=None, int reduction=Mean, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor multi_margin_loss_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor target, @Const @ByRef(nullValue = "at::Scalar(1)") Scalar p, @Const @ByRef(nullValue = "at::Scalar(1)") Scalar margin, @Const @ByRef(nullValue = "c10::optional{}") TensorOptional weight, @Cast("int64_t") long reduction/*=at::Reduction::Mean*/); +@Namespace("at") public static native @ByRef Tensor multi_margin_loss_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor target); +// aten::multi_margin_loss.out(Tensor self, Tensor target, Scalar p=1, Scalar margin=1, Tensor? weight=None, int reduction=Mean, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor multi_margin_loss_outf(@Const @ByRef Tensor self, @Const @ByRef Tensor target, @Const @ByRef Scalar p, @Const @ByRef Scalar margin, @Const @ByRef TensorOptional weight, @Cast("int64_t") long reduction, @ByRef Tensor out); -// aten::minimum.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor minimum_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor other); -// aten::minimum.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor minimum_outf(@Const @ByRef Tensor self, @Const @ByRef Tensor other, @ByRef Tensor out); +// aten::multi_margin_loss(Tensor self, Tensor target, Scalar p=1, Scalar margin=1, Tensor? weight=None, int reduction=Mean) -> Tensor +@Namespace("at") public static native @ByVal Tensor multi_margin_loss(@Const @ByRef Tensor self, @Const @ByRef Tensor target, @Const @ByRef(nullValue = "at::Scalar(1)") Scalar p, @Const @ByRef(nullValue = "at::Scalar(1)") Scalar margin, @Const @ByRef(nullValue = "c10::optional{}") TensorOptional weight, @Cast("int64_t") long reduction/*=at::Reduction::Mean*/); +@Namespace("at") public static native @ByVal Tensor multi_margin_loss(@Const @ByRef Tensor self, @Const @ByRef Tensor target); -// Parsed from ATen/ops/miopen_batch_norm.h +// Parsed from ATen/ops/multi_margin_loss_backward.h // #pragma once @@ -53052,21 +38771,23 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include +// #include -// aten::miopen_batch_norm(Tensor input, Tensor weight, Tensor? bias, Tensor? running_mean, Tensor? running_var, bool training, float exponential_average_factor, float epsilon) -> (Tensor, Tensor, Tensor) -@Namespace("at") public static native @ByVal TensorTensorTensorTuple miopen_batch_norm(@Const @ByRef Tensor input, @Const @ByRef Tensor weight, @Const @ByRef TensorOptional bias, @Const @ByRef TensorOptional running_mean, @Const @ByRef TensorOptional running_var, @Cast("bool") boolean training, double exponential_average_factor, double epsilon); +// aten::multi_margin_loss_backward.grad_input(Tensor grad_output, Tensor self, Tensor target, Scalar p, Scalar margin, Tensor? weight=None, int reduction=Mean, *, Tensor(a!) grad_input) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor multi_margin_loss_backward_out(@ByRef Tensor grad_input, @Const @ByRef Tensor grad_output, @Const @ByRef Tensor self, @Const @ByRef Tensor target, @Const @ByRef Scalar p, @Const @ByRef Scalar margin, @Const @ByRef(nullValue = "c10::optional{}") TensorOptional weight, @Cast("int64_t") long reduction/*=at::Reduction::Mean*/); +@Namespace("at") public static native @ByRef Tensor multi_margin_loss_backward_out(@ByRef Tensor grad_input, @Const @ByRef Tensor grad_output, @Const @ByRef Tensor self, @Const @ByRef Tensor target, @Const @ByRef Scalar p, @Const @ByRef Scalar margin); +// aten::multi_margin_loss_backward.grad_input(Tensor grad_output, Tensor self, Tensor target, Scalar p, Scalar margin, Tensor? weight=None, int reduction=Mean, *, Tensor(a!) grad_input) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor multi_margin_loss_backward_outf(@Const @ByRef Tensor grad_output, @Const @ByRef Tensor self, @Const @ByRef Tensor target, @Const @ByRef Scalar p, @Const @ByRef Scalar margin, @Const @ByRef TensorOptional weight, @Cast("int64_t") long reduction, @ByRef Tensor grad_input); -// aten::miopen_batch_norm.out(Tensor input, Tensor weight, Tensor? bias, Tensor? running_mean, Tensor? running_var, bool training, float exponential_average_factor, float epsilon, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2) -> (Tensor(a!), Tensor(b!), Tensor(c!)) -@Namespace("at") public static native @ByVal @Cast("std::tuple*") PointerPointer miopen_batch_norm_out(@ByRef Tensor out0, @ByRef Tensor out1, @ByRef Tensor out2, @Const @ByRef Tensor input, @Const @ByRef Tensor weight, @Const @ByRef TensorOptional bias, @Const @ByRef TensorOptional running_mean, @Const @ByRef TensorOptional running_var, @Cast("bool") boolean training, double exponential_average_factor, double epsilon); -// aten::miopen_batch_norm.out(Tensor input, Tensor weight, Tensor? bias, Tensor? running_mean, Tensor? running_var, bool training, float exponential_average_factor, float epsilon, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2) -> (Tensor(a!), Tensor(b!), Tensor(c!)) -@Namespace("at") public static native @ByVal @Cast("std::tuple*") PointerPointer miopen_batch_norm_outf(@Const @ByRef Tensor input, @Const @ByRef Tensor weight, @Const @ByRef TensorOptional bias, @Const @ByRef TensorOptional running_mean, @Const @ByRef TensorOptional running_var, @Cast("bool") boolean training, double exponential_average_factor, double epsilon, @ByRef Tensor out0, @ByRef Tensor out1, @ByRef Tensor out2); +// aten::multi_margin_loss_backward(Tensor grad_output, Tensor self, Tensor target, Scalar p, Scalar margin, Tensor? weight=None, int reduction=Mean) -> Tensor +@Namespace("at") public static native @ByVal Tensor multi_margin_loss_backward(@Const @ByRef Tensor grad_output, @Const @ByRef Tensor self, @Const @ByRef Tensor target, @Const @ByRef Scalar p, @Const @ByRef Scalar margin, @Const @ByRef(nullValue = "c10::optional{}") TensorOptional weight, @Cast("int64_t") long reduction/*=at::Reduction::Mean*/); +@Namespace("at") public static native @ByVal Tensor multi_margin_loss_backward(@Const @ByRef Tensor grad_output, @Const @ByRef Tensor self, @Const @ByRef Tensor target, @Const @ByRef Scalar p, @Const @ByRef Scalar margin); -// Parsed from ATen/ops/miopen_batch_norm_backward.h +// Parsed from ATen/ops/multilabel_margin_loss.h // #pragma once @@ -53087,21 +38808,23 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include +// #include -// aten::miopen_batch_norm_backward(Tensor input, Tensor grad_output, Tensor weight, Tensor? running_mean, Tensor? running_var, Tensor? save_mean, Tensor? save_var, float epsilon) -> (Tensor, Tensor, Tensor) -@Namespace("at") public static native @ByVal TensorTensorTensorTuple miopen_batch_norm_backward(@Const @ByRef Tensor input, @Const @ByRef Tensor grad_output, @Const @ByRef Tensor weight, @Const @ByRef TensorOptional running_mean, @Const @ByRef TensorOptional running_var, @Const @ByRef TensorOptional save_mean, @Const @ByRef TensorOptional save_var, double epsilon); +// aten::multilabel_margin_loss.out(Tensor self, Tensor target, int reduction=Mean, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor multilabel_margin_loss_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor target, @Cast("int64_t") long reduction/*=at::Reduction::Mean*/); +@Namespace("at") public static native @ByRef Tensor multilabel_margin_loss_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor target); +// aten::multilabel_margin_loss.out(Tensor self, Tensor target, int reduction=Mean, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor multilabel_margin_loss_outf(@Const @ByRef Tensor self, @Const @ByRef Tensor target, @Cast("int64_t") long reduction, @ByRef Tensor out); -// aten::miopen_batch_norm_backward.out(Tensor input, Tensor grad_output, Tensor weight, Tensor? running_mean, Tensor? running_var, Tensor? save_mean, Tensor? save_var, float epsilon, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2) -> (Tensor(a!), Tensor(b!), Tensor(c!)) -@Namespace("at") public static native @ByVal @Cast("std::tuple*") PointerPointer miopen_batch_norm_backward_out(@ByRef Tensor out0, @ByRef Tensor out1, @ByRef Tensor out2, @Const @ByRef Tensor input, @Const @ByRef Tensor grad_output, @Const @ByRef Tensor weight, @Const @ByRef TensorOptional running_mean, @Const @ByRef TensorOptional running_var, @Const @ByRef TensorOptional save_mean, @Const @ByRef TensorOptional save_var, double epsilon); -// aten::miopen_batch_norm_backward.out(Tensor input, Tensor grad_output, Tensor weight, Tensor? running_mean, Tensor? running_var, Tensor? save_mean, Tensor? save_var, float epsilon, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2) -> (Tensor(a!), Tensor(b!), Tensor(c!)) -@Namespace("at") public static native @ByVal @Cast("std::tuple*") PointerPointer miopen_batch_norm_backward_outf(@Const @ByRef Tensor input, @Const @ByRef Tensor grad_output, @Const @ByRef Tensor weight, @Const @ByRef TensorOptional running_mean, @Const @ByRef TensorOptional running_var, @Const @ByRef TensorOptional save_mean, @Const @ByRef TensorOptional save_var, double epsilon, @ByRef Tensor out0, @ByRef Tensor out1, @ByRef Tensor out2); +// aten::multilabel_margin_loss(Tensor self, Tensor target, int reduction=Mean) -> Tensor +@Namespace("at") public static native @ByVal Tensor multilabel_margin_loss(@Const @ByRef Tensor self, @Const @ByRef Tensor target, @Cast("int64_t") long reduction/*=at::Reduction::Mean*/); +@Namespace("at") public static native @ByVal Tensor multilabel_margin_loss(@Const @ByRef Tensor self, @Const @ByRef Tensor target); -// Parsed from ATen/ops/miopen_convolution.h +// Parsed from ATen/ops/multilabel_margin_loss_backward.h // #pragma once @@ -53122,43 +38845,21 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include - - -// aten::miopen_convolution(Tensor self, Tensor weight, Tensor? bias, SymInt[] padding, int[] stride, int[] dilation, int groups, bool benchmark, bool deterministic) -> Tensor -@Namespace("at") public static native @ByVal Tensor miopen_convolution(@Const @ByRef Tensor self, @Const @ByRef Tensor weight, @Const @ByRef TensorOptional bias, @ByVal @Cast("c10::ArrayRef*") LongArrayRef padding, @ByVal @Cast("c10::ArrayRef*") LongArrayRef stride, @ByVal @Cast("c10::ArrayRef*") LongArrayRef dilation, @Cast("int64_t") long groups, @Cast("bool") boolean benchmark, @Cast("bool") boolean deterministic); -@Namespace("at") public static native @ByVal Tensor miopen_convolution(@Const @ByRef Tensor self, @Const @ByRef Tensor weight, @Const @ByRef TensorOptional bias, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] padding, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] stride, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dilation, @Cast("int64_t") long groups, @Cast("bool") boolean benchmark, @Cast("bool") boolean deterministic); - - -// aten::miopen_convolution(Tensor self, Tensor weight, Tensor? bias, SymInt[] padding, int[] stride, int[] dilation, int groups, bool benchmark, bool deterministic) -> Tensor -@Namespace("at") public static native @ByVal Tensor miopen_convolution_symint(@Const @ByRef Tensor self, @Const @ByRef Tensor weight, @Const @ByRef TensorOptional bias, @ByVal SymIntRef padding, @ByVal @Cast("c10::ArrayRef*") LongArrayRef stride, @ByVal @Cast("c10::ArrayRef*") LongArrayRef dilation, @Cast("int64_t") long groups, @Cast("bool") boolean benchmark, @Cast("bool") boolean deterministic); -@Namespace("at") public static native @ByVal Tensor miopen_convolution_symint(@Const @ByRef Tensor self, @Const @ByRef Tensor weight, @Const @ByRef TensorOptional bias, @ByVal SymIntRef padding, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] stride, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dilation, @Cast("int64_t") long groups, @Cast("bool") boolean benchmark, @Cast("bool") boolean deterministic); - - -// aten::miopen_convolution.out(Tensor self, Tensor weight, Tensor? bias, SymInt[] padding, int[] stride, int[] dilation, int groups, bool benchmark, bool deterministic, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor miopen_convolution_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor weight, @Const @ByRef TensorOptional bias, @ByVal @Cast("c10::ArrayRef*") LongArrayRef padding, @ByVal @Cast("c10::ArrayRef*") LongArrayRef stride, @ByVal @Cast("c10::ArrayRef*") LongArrayRef dilation, @Cast("int64_t") long groups, @Cast("bool") boolean benchmark, @Cast("bool") boolean deterministic); -@Namespace("at") public static native @ByRef Tensor miopen_convolution_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor weight, @Const @ByRef TensorOptional bias, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] padding, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] stride, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dilation, @Cast("int64_t") long groups, @Cast("bool") boolean benchmark, @Cast("bool") boolean deterministic); - - -// aten::miopen_convolution.out(Tensor self, Tensor weight, Tensor? bias, SymInt[] padding, int[] stride, int[] dilation, int groups, bool benchmark, bool deterministic, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor miopen_convolution_outf(@Const @ByRef Tensor self, @Const @ByRef Tensor weight, @Const @ByRef TensorOptional bias, @ByVal @Cast("c10::ArrayRef*") LongArrayRef padding, @ByVal @Cast("c10::ArrayRef*") LongArrayRef stride, @ByVal @Cast("c10::ArrayRef*") LongArrayRef dilation, @Cast("int64_t") long groups, @Cast("bool") boolean benchmark, @Cast("bool") boolean deterministic, @ByRef Tensor out); -@Namespace("at") public static native @ByRef Tensor miopen_convolution_outf(@Const @ByRef Tensor self, @Const @ByRef Tensor weight, @Const @ByRef TensorOptional bias, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] padding, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] stride, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dilation, @Cast("int64_t") long groups, @Cast("bool") boolean benchmark, @Cast("bool") boolean deterministic, @ByRef Tensor out); - - -// aten::miopen_convolution.out(Tensor self, Tensor weight, Tensor? bias, SymInt[] padding, int[] stride, int[] dilation, int groups, bool benchmark, bool deterministic, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor miopen_convolution_symint_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor weight, @Const @ByRef TensorOptional bias, @ByVal SymIntRef padding, @ByVal @Cast("c10::ArrayRef*") LongArrayRef stride, @ByVal @Cast("c10::ArrayRef*") LongArrayRef dilation, @Cast("int64_t") long groups, @Cast("bool") boolean benchmark, @Cast("bool") boolean deterministic); -@Namespace("at") public static native @ByRef Tensor miopen_convolution_symint_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor weight, @Const @ByRef TensorOptional bias, @ByVal SymIntRef padding, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] stride, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dilation, @Cast("int64_t") long groups, @Cast("bool") boolean benchmark, @Cast("bool") boolean deterministic); +// #include -// aten::miopen_convolution.out(Tensor self, Tensor weight, Tensor? bias, SymInt[] padding, int[] stride, int[] dilation, int groups, bool benchmark, bool deterministic, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor miopen_convolution_symint_outf(@Const @ByRef Tensor self, @Const @ByRef Tensor weight, @Const @ByRef TensorOptional bias, @ByVal SymIntRef padding, @ByVal @Cast("c10::ArrayRef*") LongArrayRef stride, @ByVal @Cast("c10::ArrayRef*") LongArrayRef dilation, @Cast("int64_t") long groups, @Cast("bool") boolean benchmark, @Cast("bool") boolean deterministic, @ByRef Tensor out); -@Namespace("at") public static native @ByRef Tensor miopen_convolution_symint_outf(@Const @ByRef Tensor self, @Const @ByRef Tensor weight, @Const @ByRef TensorOptional bias, @ByVal SymIntRef padding, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] stride, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dilation, @Cast("int64_t") long groups, @Cast("bool") boolean benchmark, @Cast("bool") boolean deterministic, @ByRef Tensor out); +// aten::multilabel_margin_loss_backward.grad_input(Tensor grad_output, Tensor self, Tensor target, int reduction, Tensor is_target, *, Tensor(a!) grad_input) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor multilabel_margin_loss_backward_out(@ByRef Tensor grad_input, @Const @ByRef Tensor grad_output, @Const @ByRef Tensor self, @Const @ByRef Tensor target, @Cast("int64_t") long reduction, @Const @ByRef Tensor is_target); +// aten::multilabel_margin_loss_backward.grad_input(Tensor grad_output, Tensor self, Tensor target, int reduction, Tensor is_target, *, Tensor(a!) grad_input) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor multilabel_margin_loss_backward_outf(@Const @ByRef Tensor grad_output, @Const @ByRef Tensor self, @Const @ByRef Tensor target, @Cast("int64_t") long reduction, @Const @ByRef Tensor is_target, @ByRef Tensor grad_input); +// aten::multilabel_margin_loss_backward(Tensor grad_output, Tensor self, Tensor target, int reduction, Tensor is_target) -> Tensor +@Namespace("at") public static native @ByVal Tensor multilabel_margin_loss_backward(@Const @ByRef Tensor grad_output, @Const @ByRef Tensor self, @Const @ByRef Tensor target, @Cast("int64_t") long reduction, @Const @ByRef Tensor is_target); -// Parsed from ATen/ops/miopen_convolution_add_relu.h +// Parsed from ATen/ops/multilabel_margin_loss_forward.h // #pragma once @@ -53179,17 +38880,21 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include +// #include -// aten::miopen_convolution_add_relu(Tensor self, Tensor weight, Tensor z, Scalar? alpha, Tensor? bias, int[] stride, int[] padding, int[] dilation, int groups) -> Tensor -@Namespace("at") public static native @ByVal Tensor miopen_convolution_add_relu(@Const @ByRef Tensor self, @Const @ByRef Tensor weight, @Const @ByRef Tensor z, @Const @ByRef ScalarOptional alpha, @Const @ByRef TensorOptional bias, @ByVal @Cast("c10::ArrayRef*") LongArrayRef stride, @ByVal @Cast("c10::ArrayRef*") LongArrayRef padding, @ByVal @Cast("c10::ArrayRef*") LongArrayRef dilation, @Cast("int64_t") long groups); -@Namespace("at") public static native @ByVal Tensor miopen_convolution_add_relu(@Const @ByRef Tensor self, @Const @ByRef Tensor weight, @Const @ByRef Tensor z, @Const @ByRef ScalarOptional alpha, @Const @ByRef TensorOptional bias, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] stride, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] padding, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dilation, @Cast("int64_t") long groups); +// aten::multilabel_margin_loss_forward.output(Tensor self, Tensor target, int reduction, *, Tensor(a!) output, Tensor(b!) is_target) -> (Tensor(a!), Tensor(b!)) +@Namespace("at") public static native @ByVal T_TensorTensor_T multilabel_margin_loss_forward_out(@ByRef Tensor output, @ByRef Tensor is_target, @Const @ByRef Tensor self, @Const @ByRef Tensor target, @Cast("int64_t") long reduction); +// aten::multilabel_margin_loss_forward.output(Tensor self, Tensor target, int reduction, *, Tensor(a!) output, Tensor(b!) is_target) -> (Tensor(a!), Tensor(b!)) +@Namespace("at") public static native @ByVal T_TensorTensor_T multilabel_margin_loss_forward_outf(@Const @ByRef Tensor self, @Const @ByRef Tensor target, @Cast("int64_t") long reduction, @ByRef Tensor output, @ByRef Tensor is_target); + +// aten::multilabel_margin_loss_forward(Tensor self, Tensor target, int reduction) -> (Tensor output, Tensor is_target) +@Namespace("at") public static native @ByVal T_TensorTensor_T multilabel_margin_loss_forward(@Const @ByRef Tensor self, @Const @ByRef Tensor target, @Cast("int64_t") long reduction); -// Parsed from ATen/ops/miopen_convolution_relu.h +// Parsed from ATen/ops/multinomial.h // #pragma once @@ -53210,17 +38915,23 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include +// #include -// aten::miopen_convolution_relu(Tensor self, Tensor weight, Tensor? bias, int[] stride, int[] padding, int[] dilation, int groups) -> Tensor -@Namespace("at") public static native @ByVal Tensor miopen_convolution_relu(@Const @ByRef Tensor self, @Const @ByRef Tensor weight, @Const @ByRef TensorOptional bias, @ByVal @Cast("c10::ArrayRef*") LongArrayRef stride, @ByVal @Cast("c10::ArrayRef*") LongArrayRef padding, @ByVal @Cast("c10::ArrayRef*") LongArrayRef dilation, @Cast("int64_t") long groups); -@Namespace("at") public static native @ByVal Tensor miopen_convolution_relu(@Const @ByRef Tensor self, @Const @ByRef Tensor weight, @Const @ByRef TensorOptional bias, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] stride, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] padding, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dilation, @Cast("int64_t") long groups); +// aten::multinomial.out(Tensor self, int num_samples, bool replacement=False, *, Generator? generator=None, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor multinomial_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Cast("int64_t") long num_samples, @Cast("bool") boolean replacement/*=false*/, @ByVal(nullValue = "c10::optional(c10::nullopt)") GeneratorOptional generator); +@Namespace("at") public static native @ByRef Tensor multinomial_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Cast("int64_t") long num_samples); +// aten::multinomial.out(Tensor self, int num_samples, bool replacement=False, *, Generator? generator=None, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor multinomial_outf(@Const @ByRef Tensor self, @Cast("int64_t") long num_samples, @Cast("bool") boolean replacement, @ByVal GeneratorOptional generator, @ByRef Tensor out); +// aten::multinomial(Tensor self, int num_samples, bool replacement=False, *, Generator? generator=None) -> Tensor +@Namespace("at") public static native @ByVal Tensor multinomial(@Const @ByRef Tensor self, @Cast("int64_t") long num_samples, @Cast("bool") boolean replacement/*=false*/, @ByVal(nullValue = "c10::optional(c10::nullopt)") GeneratorOptional generator); +@Namespace("at") public static native @ByVal Tensor multinomial(@Const @ByRef Tensor self, @Cast("int64_t") long num_samples); -// Parsed from ATen/ops/miopen_convolution_transpose.h + +// Parsed from ATen/ops/multiply.h // #pragma once @@ -53241,43 +38952,24 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include - - -// aten::miopen_convolution_transpose(Tensor self, Tensor weight, Tensor? bias, SymInt[] padding, SymInt[] output_padding, int[] stride, int[] dilation, int groups, bool benchmark, bool deterministic) -> Tensor -@Namespace("at") public static native @ByVal Tensor miopen_convolution_transpose(@Const @ByRef Tensor self, @Const @ByRef Tensor weight, @Const @ByRef TensorOptional bias, @ByVal @Cast("c10::ArrayRef*") LongArrayRef padding, @ByVal @Cast("c10::ArrayRef*") LongArrayRef output_padding, @ByVal @Cast("c10::ArrayRef*") LongArrayRef stride, @ByVal @Cast("c10::ArrayRef*") LongArrayRef dilation, @Cast("int64_t") long groups, @Cast("bool") boolean benchmark, @Cast("bool") boolean deterministic); -@Namespace("at") public static native @ByVal Tensor miopen_convolution_transpose(@Const @ByRef Tensor self, @Const @ByRef Tensor weight, @Const @ByRef TensorOptional bias, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] padding, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] output_padding, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] stride, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dilation, @Cast("int64_t") long groups, @Cast("bool") boolean benchmark, @Cast("bool") boolean deterministic); - - -// aten::miopen_convolution_transpose(Tensor self, Tensor weight, Tensor? bias, SymInt[] padding, SymInt[] output_padding, int[] stride, int[] dilation, int groups, bool benchmark, bool deterministic) -> Tensor -@Namespace("at") public static native @ByVal Tensor miopen_convolution_transpose_symint(@Const @ByRef Tensor self, @Const @ByRef Tensor weight, @Const @ByRef TensorOptional bias, @ByVal SymIntRef padding, @ByVal SymIntRef output_padding, @ByVal @Cast("c10::ArrayRef*") LongArrayRef stride, @ByVal @Cast("c10::ArrayRef*") LongArrayRef dilation, @Cast("int64_t") long groups, @Cast("bool") boolean benchmark, @Cast("bool") boolean deterministic); -@Namespace("at") public static native @ByVal Tensor miopen_convolution_transpose_symint(@Const @ByRef Tensor self, @Const @ByRef Tensor weight, @Const @ByRef TensorOptional bias, @ByVal SymIntRef padding, @ByVal SymIntRef output_padding, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] stride, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dilation, @Cast("int64_t") long groups, @Cast("bool") boolean benchmark, @Cast("bool") boolean deterministic); - - -// aten::miopen_convolution_transpose.out(Tensor self, Tensor weight, Tensor? bias, SymInt[] padding, SymInt[] output_padding, int[] stride, int[] dilation, int groups, bool benchmark, bool deterministic, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor miopen_convolution_transpose_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor weight, @Const @ByRef TensorOptional bias, @ByVal @Cast("c10::ArrayRef*") LongArrayRef padding, @ByVal @Cast("c10::ArrayRef*") LongArrayRef output_padding, @ByVal @Cast("c10::ArrayRef*") LongArrayRef stride, @ByVal @Cast("c10::ArrayRef*") LongArrayRef dilation, @Cast("int64_t") long groups, @Cast("bool") boolean benchmark, @Cast("bool") boolean deterministic); -@Namespace("at") public static native @ByRef Tensor miopen_convolution_transpose_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor weight, @Const @ByRef TensorOptional bias, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] padding, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] output_padding, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] stride, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dilation, @Cast("int64_t") long groups, @Cast("bool") boolean benchmark, @Cast("bool") boolean deterministic); - - -// aten::miopen_convolution_transpose.out(Tensor self, Tensor weight, Tensor? bias, SymInt[] padding, SymInt[] output_padding, int[] stride, int[] dilation, int groups, bool benchmark, bool deterministic, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor miopen_convolution_transpose_outf(@Const @ByRef Tensor self, @Const @ByRef Tensor weight, @Const @ByRef TensorOptional bias, @ByVal @Cast("c10::ArrayRef*") LongArrayRef padding, @ByVal @Cast("c10::ArrayRef*") LongArrayRef output_padding, @ByVal @Cast("c10::ArrayRef*") LongArrayRef stride, @ByVal @Cast("c10::ArrayRef*") LongArrayRef dilation, @Cast("int64_t") long groups, @Cast("bool") boolean benchmark, @Cast("bool") boolean deterministic, @ByRef Tensor out); -@Namespace("at") public static native @ByRef Tensor miopen_convolution_transpose_outf(@Const @ByRef Tensor self, @Const @ByRef Tensor weight, @Const @ByRef TensorOptional bias, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] padding, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] output_padding, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] stride, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dilation, @Cast("int64_t") long groups, @Cast("bool") boolean benchmark, @Cast("bool") boolean deterministic, @ByRef Tensor out); - +// #include -// aten::miopen_convolution_transpose.out(Tensor self, Tensor weight, Tensor? bias, SymInt[] padding, SymInt[] output_padding, int[] stride, int[] dilation, int groups, bool benchmark, bool deterministic, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor miopen_convolution_transpose_symint_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor weight, @Const @ByRef TensorOptional bias, @ByVal SymIntRef padding, @ByVal SymIntRef output_padding, @ByVal @Cast("c10::ArrayRef*") LongArrayRef stride, @ByVal @Cast("c10::ArrayRef*") LongArrayRef dilation, @Cast("int64_t") long groups, @Cast("bool") boolean benchmark, @Cast("bool") boolean deterministic); -@Namespace("at") public static native @ByRef Tensor miopen_convolution_transpose_symint_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor weight, @Const @ByRef TensorOptional bias, @ByVal SymIntRef padding, @ByVal SymIntRef output_padding, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] stride, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dilation, @Cast("int64_t") long groups, @Cast("bool") boolean benchmark, @Cast("bool") boolean deterministic); +// aten::multiply.Tensor(Tensor self, Tensor other) -> Tensor +@Namespace("at") public static native @ByVal Tensor multiply(@Const @ByRef Tensor self, @Const @ByRef Tensor other); -// aten::miopen_convolution_transpose.out(Tensor self, Tensor weight, Tensor? bias, SymInt[] padding, SymInt[] output_padding, int[] stride, int[] dilation, int groups, bool benchmark, bool deterministic, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor miopen_convolution_transpose_symint_outf(@Const @ByRef Tensor self, @Const @ByRef Tensor weight, @Const @ByRef TensorOptional bias, @ByVal SymIntRef padding, @ByVal SymIntRef output_padding, @ByVal @Cast("c10::ArrayRef*") LongArrayRef stride, @ByVal @Cast("c10::ArrayRef*") LongArrayRef dilation, @Cast("int64_t") long groups, @Cast("bool") boolean benchmark, @Cast("bool") boolean deterministic, @ByRef Tensor out); -@Namespace("at") public static native @ByRef Tensor miopen_convolution_transpose_symint_outf(@Const @ByRef Tensor self, @Const @ByRef Tensor weight, @Const @ByRef TensorOptional bias, @ByVal SymIntRef padding, @ByVal SymIntRef output_padding, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] stride, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dilation, @Cast("int64_t") long groups, @Cast("bool") boolean benchmark, @Cast("bool") boolean deterministic, @ByRef Tensor out); +// aten::multiply.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor multiply_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor other); +// aten::multiply.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor multiply_outf(@Const @ByRef Tensor self, @Const @ByRef Tensor other, @ByRef Tensor out); +// aten::multiply.Scalar(Tensor self, Scalar other) -> Tensor +@Namespace("at") public static native @ByVal Tensor multiply(@Const @ByRef Tensor self, @Const @ByRef Scalar other); -// Parsed from ATen/ops/miopen_depthwise_convolution.h +// Parsed from ATen/ops/mv.h // #pragma once @@ -53298,43 +38990,21 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include - - -// aten::miopen_depthwise_convolution(Tensor self, Tensor weight, Tensor? bias, SymInt[] padding, int[] stride, int[] dilation, int groups, bool benchmark, bool deterministic) -> Tensor -@Namespace("at") public static native @ByVal Tensor miopen_depthwise_convolution(@Const @ByRef Tensor self, @Const @ByRef Tensor weight, @Const @ByRef TensorOptional bias, @ByVal @Cast("c10::ArrayRef*") LongArrayRef padding, @ByVal @Cast("c10::ArrayRef*") LongArrayRef stride, @ByVal @Cast("c10::ArrayRef*") LongArrayRef dilation, @Cast("int64_t") long groups, @Cast("bool") boolean benchmark, @Cast("bool") boolean deterministic); -@Namespace("at") public static native @ByVal Tensor miopen_depthwise_convolution(@Const @ByRef Tensor self, @Const @ByRef Tensor weight, @Const @ByRef TensorOptional bias, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] padding, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] stride, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dilation, @Cast("int64_t") long groups, @Cast("bool") boolean benchmark, @Cast("bool") boolean deterministic); - - -// aten::miopen_depthwise_convolution(Tensor self, Tensor weight, Tensor? bias, SymInt[] padding, int[] stride, int[] dilation, int groups, bool benchmark, bool deterministic) -> Tensor -@Namespace("at") public static native @ByVal Tensor miopen_depthwise_convolution_symint(@Const @ByRef Tensor self, @Const @ByRef Tensor weight, @Const @ByRef TensorOptional bias, @ByVal SymIntRef padding, @ByVal @Cast("c10::ArrayRef*") LongArrayRef stride, @ByVal @Cast("c10::ArrayRef*") LongArrayRef dilation, @Cast("int64_t") long groups, @Cast("bool") boolean benchmark, @Cast("bool") boolean deterministic); -@Namespace("at") public static native @ByVal Tensor miopen_depthwise_convolution_symint(@Const @ByRef Tensor self, @Const @ByRef Tensor weight, @Const @ByRef TensorOptional bias, @ByVal SymIntRef padding, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] stride, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dilation, @Cast("int64_t") long groups, @Cast("bool") boolean benchmark, @Cast("bool") boolean deterministic); - - -// aten::miopen_depthwise_convolution.out(Tensor self, Tensor weight, Tensor? bias, SymInt[] padding, int[] stride, int[] dilation, int groups, bool benchmark, bool deterministic, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor miopen_depthwise_convolution_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor weight, @Const @ByRef TensorOptional bias, @ByVal @Cast("c10::ArrayRef*") LongArrayRef padding, @ByVal @Cast("c10::ArrayRef*") LongArrayRef stride, @ByVal @Cast("c10::ArrayRef*") LongArrayRef dilation, @Cast("int64_t") long groups, @Cast("bool") boolean benchmark, @Cast("bool") boolean deterministic); -@Namespace("at") public static native @ByRef Tensor miopen_depthwise_convolution_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor weight, @Const @ByRef TensorOptional bias, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] padding, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] stride, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dilation, @Cast("int64_t") long groups, @Cast("bool") boolean benchmark, @Cast("bool") boolean deterministic); - - -// aten::miopen_depthwise_convolution.out(Tensor self, Tensor weight, Tensor? bias, SymInt[] padding, int[] stride, int[] dilation, int groups, bool benchmark, bool deterministic, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor miopen_depthwise_convolution_outf(@Const @ByRef Tensor self, @Const @ByRef Tensor weight, @Const @ByRef TensorOptional bias, @ByVal @Cast("c10::ArrayRef*") LongArrayRef padding, @ByVal @Cast("c10::ArrayRef*") LongArrayRef stride, @ByVal @Cast("c10::ArrayRef*") LongArrayRef dilation, @Cast("int64_t") long groups, @Cast("bool") boolean benchmark, @Cast("bool") boolean deterministic, @ByRef Tensor out); -@Namespace("at") public static native @ByRef Tensor miopen_depthwise_convolution_outf(@Const @ByRef Tensor self, @Const @ByRef Tensor weight, @Const @ByRef TensorOptional bias, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] padding, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] stride, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dilation, @Cast("int64_t") long groups, @Cast("bool") boolean benchmark, @Cast("bool") boolean deterministic, @ByRef Tensor out); - - -// aten::miopen_depthwise_convolution.out(Tensor self, Tensor weight, Tensor? bias, SymInt[] padding, int[] stride, int[] dilation, int groups, bool benchmark, bool deterministic, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor miopen_depthwise_convolution_symint_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor weight, @Const @ByRef TensorOptional bias, @ByVal SymIntRef padding, @ByVal @Cast("c10::ArrayRef*") LongArrayRef stride, @ByVal @Cast("c10::ArrayRef*") LongArrayRef dilation, @Cast("int64_t") long groups, @Cast("bool") boolean benchmark, @Cast("bool") boolean deterministic); -@Namespace("at") public static native @ByRef Tensor miopen_depthwise_convolution_symint_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor weight, @Const @ByRef TensorOptional bias, @ByVal SymIntRef padding, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] stride, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dilation, @Cast("int64_t") long groups, @Cast("bool") boolean benchmark, @Cast("bool") boolean deterministic); +// #include -// aten::miopen_depthwise_convolution.out(Tensor self, Tensor weight, Tensor? bias, SymInt[] padding, int[] stride, int[] dilation, int groups, bool benchmark, bool deterministic, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor miopen_depthwise_convolution_symint_outf(@Const @ByRef Tensor self, @Const @ByRef Tensor weight, @Const @ByRef TensorOptional bias, @ByVal SymIntRef padding, @ByVal @Cast("c10::ArrayRef*") LongArrayRef stride, @ByVal @Cast("c10::ArrayRef*") LongArrayRef dilation, @Cast("int64_t") long groups, @Cast("bool") boolean benchmark, @Cast("bool") boolean deterministic, @ByRef Tensor out); -@Namespace("at") public static native @ByRef Tensor miopen_depthwise_convolution_symint_outf(@Const @ByRef Tensor self, @Const @ByRef Tensor weight, @Const @ByRef TensorOptional bias, @ByVal SymIntRef padding, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] stride, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dilation, @Cast("int64_t") long groups, @Cast("bool") boolean benchmark, @Cast("bool") boolean deterministic, @ByRef Tensor out); +// aten::mv(Tensor self, Tensor vec) -> Tensor +@Namespace("at") public static native @ByVal Tensor mv(@Const @ByRef Tensor self, @Const @ByRef Tensor vec); +// aten::mv.out(Tensor self, Tensor vec, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor mv_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor vec); +// aten::mv.out(Tensor self, Tensor vec, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor mv_outf(@Const @ByRef Tensor self, @Const @ByRef Tensor vec, @ByRef Tensor out); -// Parsed from ATen/ops/miopen_rnn.h +// Parsed from ATen/ops/mvlgamma.h // #pragma once @@ -53355,24 +39025,21 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include +// #include -// aten::miopen_rnn(Tensor input, Tensor[] weight, int weight_stride0, Tensor hx, Tensor? cx, int mode, int hidden_size, int num_layers, bool batch_first, float dropout, bool train, bool bidirectional, int[] batch_sizes, Tensor? dropout_state) -> (Tensor, Tensor, Tensor, Tensor, Tensor) -@Namespace("at") public static native @ByVal TensorTensorTensorTensorTensorTuple miopen_rnn(@Const @ByRef Tensor input, @ByVal TensorArrayRef weight, @Cast("int64_t") long weight_stride0, @Const @ByRef Tensor hx, @Const @ByRef TensorOptional cx, @Cast("int64_t") long mode, @Cast("int64_t") long hidden_size, @Cast("int64_t") long num_layers, @Cast("bool") boolean batch_first, double dropout, @Cast("bool") boolean train, @Cast("bool") boolean bidirectional, @ByVal @Cast("c10::ArrayRef*") LongArrayRef batch_sizes, @Const @ByRef TensorOptional dropout_state); -@Namespace("at") public static native @ByVal TensorTensorTensorTensorTensorTuple miopen_rnn(@Const @ByRef Tensor input, @ByVal TensorArrayRef weight, @Cast("int64_t") long weight_stride0, @Const @ByRef Tensor hx, @Const @ByRef TensorOptional cx, @Cast("int64_t") long mode, @Cast("int64_t") long hidden_size, @Cast("int64_t") long num_layers, @Cast("bool") boolean batch_first, double dropout, @Cast("bool") boolean train, @Cast("bool") boolean bidirectional, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] batch_sizes, @Const @ByRef TensorOptional dropout_state); +// aten::mvlgamma.out(Tensor self, int p, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor mvlgamma_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Cast("int64_t") long p); +// aten::mvlgamma.out(Tensor self, int p, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor mvlgamma_outf(@Const @ByRef Tensor self, @Cast("int64_t") long p, @ByRef Tensor out); -// aten::miopen_rnn.out(Tensor input, Tensor[] weight, int weight_stride0, Tensor hx, Tensor? cx, int mode, int hidden_size, int num_layers, bool batch_first, float dropout, bool train, bool bidirectional, int[] batch_sizes, Tensor? dropout_state, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2, Tensor(d!) out3, Tensor(e!) out4) -> (Tensor(a!), Tensor(b!), Tensor(c!), Tensor(d!), Tensor(e!)) -@Namespace("at") public static native @ByVal @Cast("std::tuple*") PointerPointer miopen_rnn_out(@ByRef Tensor out0, @ByRef Tensor out1, @ByRef Tensor out2, @ByRef Tensor out3, @ByRef Tensor out4, @Const @ByRef Tensor input, @ByVal TensorArrayRef weight, @Cast("int64_t") long weight_stride0, @Const @ByRef Tensor hx, @Const @ByRef TensorOptional cx, @Cast("int64_t") long mode, @Cast("int64_t") long hidden_size, @Cast("int64_t") long num_layers, @Cast("bool") boolean batch_first, double dropout, @Cast("bool") boolean train, @Cast("bool") boolean bidirectional, @ByVal @Cast("c10::ArrayRef*") LongArrayRef batch_sizes, @Const @ByRef TensorOptional dropout_state); -@Namespace("at") public static native @ByVal @Cast("std::tuple*") PointerPointer miopen_rnn_out(@ByRef Tensor out0, @ByRef Tensor out1, @ByRef Tensor out2, @ByRef Tensor out3, @ByRef Tensor out4, @Const @ByRef Tensor input, @ByVal TensorArrayRef weight, @Cast("int64_t") long weight_stride0, @Const @ByRef Tensor hx, @Const @ByRef TensorOptional cx, @Cast("int64_t") long mode, @Cast("int64_t") long hidden_size, @Cast("int64_t") long num_layers, @Cast("bool") boolean batch_first, double dropout, @Cast("bool") boolean train, @Cast("bool") boolean bidirectional, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] batch_sizes, @Const @ByRef TensorOptional dropout_state); -// aten::miopen_rnn.out(Tensor input, Tensor[] weight, int weight_stride0, Tensor hx, Tensor? cx, int mode, int hidden_size, int num_layers, bool batch_first, float dropout, bool train, bool bidirectional, int[] batch_sizes, Tensor? dropout_state, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2, Tensor(d!) out3, Tensor(e!) out4) -> (Tensor(a!), Tensor(b!), Tensor(c!), Tensor(d!), Tensor(e!)) -@Namespace("at") public static native @ByVal @Cast("std::tuple*") PointerPointer miopen_rnn_outf(@Const @ByRef Tensor input, @ByVal TensorArrayRef weight, @Cast("int64_t") long weight_stride0, @Const @ByRef Tensor hx, @Const @ByRef TensorOptional cx, @Cast("int64_t") long mode, @Cast("int64_t") long hidden_size, @Cast("int64_t") long num_layers, @Cast("bool") boolean batch_first, double dropout, @Cast("bool") boolean train, @Cast("bool") boolean bidirectional, @ByVal @Cast("c10::ArrayRef*") LongArrayRef batch_sizes, @Const @ByRef TensorOptional dropout_state, @ByRef Tensor out0, @ByRef Tensor out1, @ByRef Tensor out2, @ByRef Tensor out3, @ByRef Tensor out4); -@Namespace("at") public static native @ByVal @Cast("std::tuple*") PointerPointer miopen_rnn_outf(@Const @ByRef Tensor input, @ByVal TensorArrayRef weight, @Cast("int64_t") long weight_stride0, @Const @ByRef Tensor hx, @Const @ByRef TensorOptional cx, @Cast("int64_t") long mode, @Cast("int64_t") long hidden_size, @Cast("int64_t") long num_layers, @Cast("bool") boolean batch_first, double dropout, @Cast("bool") boolean train, @Cast("bool") boolean bidirectional, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] batch_sizes, @Const @ByRef TensorOptional dropout_state, @ByRef Tensor out0, @ByRef Tensor out1, @ByRef Tensor out2, @ByRef Tensor out3, @ByRef Tensor out4); +// aten::mvlgamma(Tensor self, int p) -> Tensor +@Namespace("at") public static native @ByVal Tensor mvlgamma(@Const @ByRef Tensor self, @Cast("int64_t") long p); -// Parsed from ATen/ops/miopen_rnn_backward.h +// Parsed from ATen/ops/nan_to_num.h // #pragma once @@ -53393,24 +39060,27 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include +// #include -// aten::miopen_rnn_backward(Tensor input, Tensor[] weight, int weight_stride0, Tensor weight_buf, Tensor hx, Tensor? cx, Tensor output, Tensor? grad_output, Tensor? grad_hy, Tensor? grad_cy, int mode, int hidden_size, int num_layers, bool batch_first, float dropout, bool train, bool bidirectional, int[] batch_sizes, Tensor? dropout_state, Tensor reserve, bool[4] output_mask) -> (Tensor, Tensor, Tensor, Tensor[]) -@Namespace("at") public static native @ByVal TensorTensorTensorTensorVectorTuple miopen_rnn_backward(@Const @ByRef Tensor input, @ByVal TensorArrayRef weight, @Cast("int64_t") long weight_stride0, @Const @ByRef Tensor weight_buf, @Const @ByRef Tensor hx, @Const @ByRef TensorOptional cx, @Const @ByRef Tensor output, @Const @ByRef TensorOptional grad_output, @Const @ByRef TensorOptional grad_hy, @Const @ByRef TensorOptional grad_cy, @Cast("int64_t") long mode, @Cast("int64_t") long hidden_size, @Cast("int64_t") long num_layers, @Cast("bool") boolean batch_first, double dropout, @Cast("bool") boolean train, @Cast("bool") boolean bidirectional, @ByVal @Cast("c10::ArrayRef*") LongArrayRef batch_sizes, @Const @ByRef TensorOptional dropout_state, @Const @ByRef Tensor reserve, @ByVal @Cast("std::array*") BoolPointer output_mask); -@Namespace("at") public static native @ByVal TensorTensorTensorTensorVectorTuple miopen_rnn_backward(@Const @ByRef Tensor input, @ByVal TensorArrayRef weight, @Cast("int64_t") long weight_stride0, @Const @ByRef Tensor weight_buf, @Const @ByRef Tensor hx, @Const @ByRef TensorOptional cx, @Const @ByRef Tensor output, @Const @ByRef TensorOptional grad_output, @Const @ByRef TensorOptional grad_hy, @Const @ByRef TensorOptional grad_cy, @Cast("int64_t") long mode, @Cast("int64_t") long hidden_size, @Cast("int64_t") long num_layers, @Cast("bool") boolean batch_first, double dropout, @Cast("bool") boolean train, @Cast("bool") boolean bidirectional, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] batch_sizes, @Const @ByRef TensorOptional dropout_state, @Const @ByRef Tensor reserve, @ByVal @Cast("std::array*") BoolPointer output_mask); +// aten::nan_to_num(Tensor self, float? nan=None, float? posinf=None, float? neginf=None) -> Tensor +@Namespace("at") public static native @ByVal Tensor nan_to_num(@Const @ByRef Tensor self, @ByVal(nullValue = "c10::optional(c10::nullopt)") DoubleOptional nan, @ByVal(nullValue = "c10::optional(c10::nullopt)") DoubleOptional posinf, @ByVal(nullValue = "c10::optional(c10::nullopt)") DoubleOptional neginf); +@Namespace("at") public static native @ByVal Tensor nan_to_num(@Const @ByRef Tensor self); -// aten::miopen_rnn_backward.out(Tensor input, Tensor[] weight, int weight_stride0, Tensor weight_buf, Tensor hx, Tensor? cx, Tensor output, Tensor? grad_output, Tensor? grad_hy, Tensor? grad_cy, int mode, int hidden_size, int num_layers, bool batch_first, float dropout, bool train, bool bidirectional, int[] batch_sizes, Tensor? dropout_state, Tensor reserve, bool[4] output_mask, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2, Tensor(d!)[] out3) -> () -@Namespace("at") public static native void miopen_rnn_backward_out(@ByRef Tensor out0, @ByRef Tensor out1, @ByRef Tensor out2, @ByVal TensorArrayRef out3, @Const @ByRef Tensor input, @ByVal TensorArrayRef weight, @Cast("int64_t") long weight_stride0, @Const @ByRef Tensor weight_buf, @Const @ByRef Tensor hx, @Const @ByRef TensorOptional cx, @Const @ByRef Tensor output, @Const @ByRef TensorOptional grad_output, @Const @ByRef TensorOptional grad_hy, @Const @ByRef TensorOptional grad_cy, @Cast("int64_t") long mode, @Cast("int64_t") long hidden_size, @Cast("int64_t") long num_layers, @Cast("bool") boolean batch_first, double dropout, @Cast("bool") boolean train, @Cast("bool") boolean bidirectional, @ByVal @Cast("c10::ArrayRef*") LongArrayRef batch_sizes, @Const @ByRef TensorOptional dropout_state, @Const @ByRef Tensor reserve, @ByVal @Cast("std::array*") BoolPointer output_mask); -@Namespace("at") public static native void miopen_rnn_backward_out(@ByRef Tensor out0, @ByRef Tensor out1, @ByRef Tensor out2, @ByVal TensorArrayRef out3, @Const @ByRef Tensor input, @ByVal TensorArrayRef weight, @Cast("int64_t") long weight_stride0, @Const @ByRef Tensor weight_buf, @Const @ByRef Tensor hx, @Const @ByRef TensorOptional cx, @Const @ByRef Tensor output, @Const @ByRef TensorOptional grad_output, @Const @ByRef TensorOptional grad_hy, @Const @ByRef TensorOptional grad_cy, @Cast("int64_t") long mode, @Cast("int64_t") long hidden_size, @Cast("int64_t") long num_layers, @Cast("bool") boolean batch_first, double dropout, @Cast("bool") boolean train, @Cast("bool") boolean bidirectional, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] batch_sizes, @Const @ByRef TensorOptional dropout_state, @Const @ByRef Tensor reserve, @ByVal @Cast("std::array*") BoolPointer output_mask); -// aten::miopen_rnn_backward.out(Tensor input, Tensor[] weight, int weight_stride0, Tensor weight_buf, Tensor hx, Tensor? cx, Tensor output, Tensor? grad_output, Tensor? grad_hy, Tensor? grad_cy, int mode, int hidden_size, int num_layers, bool batch_first, float dropout, bool train, bool bidirectional, int[] batch_sizes, Tensor? dropout_state, Tensor reserve, bool[4] output_mask, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2, Tensor(d!)[] out3) -> () -@Namespace("at") public static native void miopen_rnn_backward_outf(@Const @ByRef Tensor input, @ByVal TensorArrayRef weight, @Cast("int64_t") long weight_stride0, @Const @ByRef Tensor weight_buf, @Const @ByRef Tensor hx, @Const @ByRef TensorOptional cx, @Const @ByRef Tensor output, @Const @ByRef TensorOptional grad_output, @Const @ByRef TensorOptional grad_hy, @Const @ByRef TensorOptional grad_cy, @Cast("int64_t") long mode, @Cast("int64_t") long hidden_size, @Cast("int64_t") long num_layers, @Cast("bool") boolean batch_first, double dropout, @Cast("bool") boolean train, @Cast("bool") boolean bidirectional, @ByVal @Cast("c10::ArrayRef*") LongArrayRef batch_sizes, @Const @ByRef TensorOptional dropout_state, @Const @ByRef Tensor reserve, @ByVal @Cast("std::array*") BoolPointer output_mask, @ByRef Tensor out0, @ByRef Tensor out1, @ByRef Tensor out2, @ByVal TensorArrayRef out3); -@Namespace("at") public static native void miopen_rnn_backward_outf(@Const @ByRef Tensor input, @ByVal TensorArrayRef weight, @Cast("int64_t") long weight_stride0, @Const @ByRef Tensor weight_buf, @Const @ByRef Tensor hx, @Const @ByRef TensorOptional cx, @Const @ByRef Tensor output, @Const @ByRef TensorOptional grad_output, @Const @ByRef TensorOptional grad_hy, @Const @ByRef TensorOptional grad_cy, @Cast("int64_t") long mode, @Cast("int64_t") long hidden_size, @Cast("int64_t") long num_layers, @Cast("bool") boolean batch_first, double dropout, @Cast("bool") boolean train, @Cast("bool") boolean bidirectional, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] batch_sizes, @Const @ByRef TensorOptional dropout_state, @Const @ByRef Tensor reserve, @ByVal @Cast("std::array*") BoolPointer output_mask, @ByRef Tensor out0, @ByRef Tensor out1, @ByRef Tensor out2, @ByVal TensorArrayRef out3); +// aten::nan_to_num_(Tensor(a!) self, float? nan=None, float? posinf=None, float? neginf=None) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor nan_to_num_(@ByRef Tensor self, @ByVal(nullValue = "c10::optional(c10::nullopt)") DoubleOptional nan, @ByVal(nullValue = "c10::optional(c10::nullopt)") DoubleOptional posinf, @ByVal(nullValue = "c10::optional(c10::nullopt)") DoubleOptional neginf); +@Namespace("at") public static native @ByRef Tensor nan_to_num_(@ByRef Tensor self); + +// aten::nan_to_num.out(Tensor self, float? nan=None, float? posinf=None, float? neginf=None, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor nan_to_num_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal(nullValue = "c10::optional(c10::nullopt)") DoubleOptional nan, @ByVal(nullValue = "c10::optional(c10::nullopt)") DoubleOptional posinf, @ByVal(nullValue = "c10::optional(c10::nullopt)") DoubleOptional neginf); +@Namespace("at") public static native @ByRef Tensor nan_to_num_out(@ByRef Tensor out, @Const @ByRef Tensor self); +// aten::nan_to_num.out(Tensor self, float? nan=None, float? posinf=None, float? neginf=None, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor nan_to_num_outf(@Const @ByRef Tensor self, @ByVal DoubleOptional nan, @ByVal DoubleOptional posinf, @ByVal DoubleOptional neginf, @ByRef Tensor out); -// Parsed from ATen/ops/mish.h +// Parsed from ATen/ops/nanmean.h // #pragma once @@ -53431,24 +39101,26 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include - +// #include -// aten::mish(Tensor self) -> Tensor -@Namespace("at") public static native @ByVal Tensor mish(@Const @ByRef Tensor self); -// aten::mish_(Tensor(a!) self) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor mish_(@ByRef Tensor self); +// aten::nanmean(Tensor self, int[1]? dim=None, bool keepdim=False, *, ScalarType? dtype=None) -> Tensor +@Namespace("at") public static native @ByVal Tensor nanmean(@Const @ByRef Tensor self, @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") LongArrayRefOptional dim, @Cast("bool") boolean keepdim/*=false*/, @ByVal(nullValue = "c10::optional(c10::nullopt)") ScalarTypeOptional dtype); +@Namespace("at") public static native @ByVal Tensor nanmean(@Const @ByRef Tensor self); +@Namespace("at") public static native @ByVal Tensor nanmean(@Const @ByRef Tensor self, @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dim, @Cast("bool") boolean keepdim/*=false*/, @ByVal(nullValue = "c10::optional(c10::nullopt)") ScalarTypeOptional dtype); -// aten::mish.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor mish_out(@ByRef Tensor out, @Const @ByRef Tensor self); -// aten::mish.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor mish_outf(@Const @ByRef Tensor self, @ByRef Tensor out); +// aten::nanmean.out(Tensor self, int[1]? dim=None, bool keepdim=False, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor nanmean_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") LongArrayRefOptional dim, @Cast("bool") boolean keepdim/*=false*/, @ByVal(nullValue = "c10::optional(c10::nullopt)") ScalarTypeOptional dtype); +@Namespace("at") public static native @ByRef Tensor nanmean_out(@ByRef Tensor out, @Const @ByRef Tensor self); +@Namespace("at") public static native @ByRef Tensor nanmean_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dim, @Cast("bool") boolean keepdim/*=false*/, @ByVal(nullValue = "c10::optional(c10::nullopt)") ScalarTypeOptional dtype); +// aten::nanmean.out(Tensor self, int[1]? dim=None, bool keepdim=False, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor nanmean_outf(@Const @ByRef Tensor self, @ByVal LongArrayRefOptional dim, @Cast("bool") boolean keepdim, @ByVal ScalarTypeOptional dtype, @ByRef Tensor out); +@Namespace("at") public static native @ByRef Tensor nanmean_outf(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dim, @Cast("bool") boolean keepdim, @ByVal ScalarTypeOptional dtype, @ByRef Tensor out); -// Parsed from ATen/ops/mish_backward.h +// Parsed from ATen/ops/nanmedian.h // #pragma once @@ -53469,16 +39141,41 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include +// #include -// aten::mish_backward(Tensor grad_output, Tensor self) -> Tensor -@Namespace("at") public static native @ByVal Tensor mish_backward(@Const @ByRef Tensor grad_output, @Const @ByRef Tensor self); +// aten::nanmedian(Tensor self) -> Tensor +@Namespace("at") public static native @ByVal Tensor nanmedian(@Const @ByRef Tensor self); + +// aten::nanmedian.dim(Tensor self, int dim, bool keepdim=False) -> (Tensor values, Tensor indices) +@Namespace("at") public static native @ByVal T_TensorTensor_T nanmedian(@Const @ByRef Tensor self, @Cast("int64_t") long dim, @Cast("bool") boolean keepdim/*=false*/); +@Namespace("at") public static native @ByVal T_TensorTensor_T nanmedian(@Const @ByRef Tensor self, @Cast("int64_t") long dim); +// aten::nanmedian.dim_values(Tensor self, int dim, bool keepdim=False, *, Tensor(a!) values, Tensor(b!) indices) -> (Tensor(a!) values, Tensor(b!) indices) +@Namespace("at") public static native @ByVal T_TensorTensor_T nanmedian_out(@ByRef Tensor values, @ByRef Tensor indices, @Const @ByRef Tensor self, @Cast("int64_t") long dim, @Cast("bool") boolean keepdim/*=false*/); +@Namespace("at") public static native @ByVal T_TensorTensor_T nanmedian_out(@ByRef Tensor values, @ByRef Tensor indices, @Const @ByRef Tensor self, @Cast("int64_t") long dim); +// aten::nanmedian.dim_values(Tensor self, int dim, bool keepdim=False, *, Tensor(a!) values, Tensor(b!) indices) -> (Tensor(a!) values, Tensor(b!) indices) +@Namespace("at") public static native @ByVal T_TensorTensor_T nanmedian_outf(@Const @ByRef Tensor self, @Cast("int64_t") long dim, @Cast("bool") boolean keepdim, @ByRef Tensor values, @ByRef Tensor indices); + +// aten::nanmedian.names_dim(Tensor self, Dimname dim, bool keepdim=False) -> (Tensor values, Tensor indices) +@Namespace("at") public static native @ByVal T_TensorTensor_T nanmedian(@Const @ByRef Tensor self, @ByVal Dimname dim, @Cast("bool") boolean keepdim/*=false*/); +@Namespace("at") public static native @ByVal T_TensorTensor_T nanmedian(@Const @ByRef Tensor self, @ByVal Dimname dim); + +// aten::nanmedian.names_dim_values(Tensor self, Dimname dim, bool keepdim=False, *, Tensor(a!) values, Tensor(b!) indices) -> (Tensor(a!) values, Tensor(b!) indices) +@Namespace("at") public static native @ByVal T_TensorTensor_T nanmedian_out(@ByRef Tensor values, @ByRef Tensor indices, @Const @ByRef Tensor self, @ByVal Dimname dim, @Cast("bool") boolean keepdim/*=false*/); +@Namespace("at") public static native @ByVal T_TensorTensor_T nanmedian_out(@ByRef Tensor values, @ByRef Tensor indices, @Const @ByRef Tensor self, @ByVal Dimname dim); +// aten::nanmedian.names_dim_values(Tensor self, Dimname dim, bool keepdim=False, *, Tensor(a!) values, Tensor(b!) indices) -> (Tensor(a!) values, Tensor(b!) indices) +@Namespace("at") public static native @ByVal T_TensorTensor_T nanmedian_outf(@Const @ByRef Tensor self, @ByVal Dimname dim, @Cast("bool") boolean keepdim, @ByRef Tensor values, @ByRef Tensor indices); + +// aten::nanmedian.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor nanmedian_out(@ByRef Tensor out, @Const @ByRef Tensor self); +// aten::nanmedian.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor nanmedian_outf(@Const @ByRef Tensor self, @ByRef Tensor out); -// Parsed from ATen/ops/mkldnn_adaptive_avg_pool2d.h + +// Parsed from ATen/ops/nanquantile.h // #pragma once @@ -53499,24 +39196,33 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include +// #include -// aten::mkldnn_adaptive_avg_pool2d(Tensor self, int[2] output_size) -> Tensor -@Namespace("at") public static native @ByVal Tensor mkldnn_adaptive_avg_pool2d(@Const @ByRef Tensor self, @ByVal @Cast("c10::ArrayRef*") LongArrayRef output_size); -@Namespace("at") public static native @ByVal Tensor mkldnn_adaptive_avg_pool2d(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... output_size); +// aten::nanquantile(Tensor self, Tensor q, int? dim=None, bool keepdim=False, *, str interpolation='linear') -> Tensor +@Namespace("at") public static native @ByVal Tensor nanquantile(@Const @ByRef Tensor self, @Const @ByRef Tensor q, @ByVal(nullValue = "c10::optional(c10::nullopt)") LongOptional dim, @Cast("bool") boolean keepdim/*=false*/, @ByVal(nullValue = "c10::string_view(\"linear\")") @Cast("c10::string_view*") Pointer interpolation); +@Namespace("at") public static native @ByVal Tensor nanquantile(@Const @ByRef Tensor self, @Const @ByRef Tensor q); -// aten::mkldnn_adaptive_avg_pool2d.out(Tensor self, int[2] output_size, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor mkldnn_adaptive_avg_pool2d_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal @Cast("c10::ArrayRef*") LongArrayRef output_size); -@Namespace("at") public static native @ByRef Tensor mkldnn_adaptive_avg_pool2d_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... output_size); -// aten::mkldnn_adaptive_avg_pool2d.out(Tensor self, int[2] output_size, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor mkldnn_adaptive_avg_pool2d_outf(@Const @ByRef Tensor self, @ByVal @Cast("c10::ArrayRef*") LongArrayRef output_size, @ByRef Tensor out); -@Namespace("at") public static native @ByRef Tensor mkldnn_adaptive_avg_pool2d_outf(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] output_size, @ByRef Tensor out); +// aten::nanquantile.out(Tensor self, Tensor q, int? dim=None, bool keepdim=False, *, str interpolation='linear', Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor nanquantile_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor q, @ByVal(nullValue = "c10::optional(c10::nullopt)") LongOptional dim, @Cast("bool") boolean keepdim/*=false*/, @ByVal(nullValue = "c10::string_view(\"linear\")") @Cast("c10::string_view*") Pointer interpolation); +@Namespace("at") public static native @ByRef Tensor nanquantile_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor q); +// aten::nanquantile.out(Tensor self, Tensor q, int? dim=None, bool keepdim=False, *, str interpolation='linear', Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor nanquantile_outf(@Const @ByRef Tensor self, @Const @ByRef Tensor q, @ByVal LongOptional dim, @Cast("bool") boolean keepdim, @ByVal @Cast("c10::string_view*") Pointer interpolation, @ByRef Tensor out); + +// aten::nanquantile.scalar(Tensor self, float q, int? dim=None, bool keepdim=False, *, str interpolation='linear') -> Tensor +@Namespace("at") public static native @ByVal Tensor nanquantile(@Const @ByRef Tensor self, double q, @ByVal(nullValue = "c10::optional(c10::nullopt)") LongOptional dim, @Cast("bool") boolean keepdim/*=false*/, @ByVal(nullValue = "c10::string_view(\"linear\")") @Cast("c10::string_view*") Pointer interpolation); +@Namespace("at") public static native @ByVal Tensor nanquantile(@Const @ByRef Tensor self, double q); +// aten::nanquantile.scalar_out(Tensor self, float q, int? dim=None, bool keepdim=False, *, str interpolation='linear', Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor nanquantile_out(@ByRef Tensor out, @Const @ByRef Tensor self, double q, @ByVal(nullValue = "c10::optional(c10::nullopt)") LongOptional dim, @Cast("bool") boolean keepdim/*=false*/, @ByVal(nullValue = "c10::string_view(\"linear\")") @Cast("c10::string_view*") Pointer interpolation); +@Namespace("at") public static native @ByRef Tensor nanquantile_out(@ByRef Tensor out, @Const @ByRef Tensor self, double q); +// aten::nanquantile.scalar_out(Tensor self, float q, int? dim=None, bool keepdim=False, *, str interpolation='linear', Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor nanquantile_outf(@Const @ByRef Tensor self, double q, @ByVal LongOptional dim, @Cast("bool") boolean keepdim, @ByVal @Cast("c10::string_view*") Pointer interpolation, @ByRef Tensor out); -// Parsed from ATen/ops/mkldnn_adaptive_avg_pool2d_backward.h + +// Parsed from ATen/ops/nansum.h // #pragma once @@ -53537,21 +39243,26 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include +// #include -// aten::mkldnn_adaptive_avg_pool2d_backward(Tensor grad_output, Tensor self) -> Tensor -@Namespace("at") public static native @ByVal Tensor mkldnn_adaptive_avg_pool2d_backward(@Const @ByRef Tensor grad_output, @Const @ByRef Tensor self); +// aten::nansum(Tensor self, int[1]? dim=None, bool keepdim=False, *, ScalarType? dtype=None) -> Tensor +@Namespace("at") public static native @ByVal Tensor nansum(@Const @ByRef Tensor self, @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") LongArrayRefOptional dim, @Cast("bool") boolean keepdim/*=false*/, @ByVal(nullValue = "c10::optional(c10::nullopt)") ScalarTypeOptional dtype); +@Namespace("at") public static native @ByVal Tensor nansum(@Const @ByRef Tensor self); +@Namespace("at") public static native @ByVal Tensor nansum(@Const @ByRef Tensor self, @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dim, @Cast("bool") boolean keepdim/*=false*/, @ByVal(nullValue = "c10::optional(c10::nullopt)") ScalarTypeOptional dtype); -// aten::mkldnn_adaptive_avg_pool2d_backward.out(Tensor grad_output, Tensor self, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor mkldnn_adaptive_avg_pool2d_backward_out(@ByRef Tensor out, @Const @ByRef Tensor grad_output, @Const @ByRef Tensor self); -// aten::mkldnn_adaptive_avg_pool2d_backward.out(Tensor grad_output, Tensor self, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor mkldnn_adaptive_avg_pool2d_backward_outf(@Const @ByRef Tensor grad_output, @Const @ByRef Tensor self, @ByRef Tensor out); +// aten::nansum.out(Tensor self, int[1]? dim=None, bool keepdim=False, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor nansum_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") LongArrayRefOptional dim, @Cast("bool") boolean keepdim/*=false*/, @ByVal(nullValue = "c10::optional(c10::nullopt)") ScalarTypeOptional dtype); +@Namespace("at") public static native @ByRef Tensor nansum_out(@ByRef Tensor out, @Const @ByRef Tensor self); +@Namespace("at") public static native @ByRef Tensor nansum_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dim, @Cast("bool") boolean keepdim/*=false*/, @ByVal(nullValue = "c10::optional(c10::nullopt)") ScalarTypeOptional dtype); +// aten::nansum.out(Tensor self, int[1]? dim=None, bool keepdim=False, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor nansum_outf(@Const @ByRef Tensor self, @ByVal LongArrayRefOptional dim, @Cast("bool") boolean keepdim, @ByVal ScalarTypeOptional dtype, @ByRef Tensor out); +@Namespace("at") public static native @ByRef Tensor nansum_outf(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dim, @Cast("bool") boolean keepdim, @ByVal ScalarTypeOptional dtype, @ByRef Tensor out); -// Parsed from ATen/ops/mkldnn_convolution.h +// Parsed from ATen/ops/narrow.h // #pragma once @@ -53572,43 +39283,29 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include - - -// aten::mkldnn_convolution(Tensor self, Tensor weight, Tensor? bias, SymInt[] padding, int[] stride, int[] dilation, int groups) -> Tensor -@Namespace("at") public static native @ByVal Tensor mkldnn_convolution(@Const @ByRef Tensor self, @Const @ByRef Tensor weight, @Const @ByRef TensorOptional bias, @ByVal @Cast("c10::ArrayRef*") LongArrayRef padding, @ByVal @Cast("c10::ArrayRef*") LongArrayRef stride, @ByVal @Cast("c10::ArrayRef*") LongArrayRef dilation, @Cast("int64_t") long groups); -@Namespace("at") public static native @ByVal Tensor mkldnn_convolution(@Const @ByRef Tensor self, @Const @ByRef Tensor weight, @Const @ByRef TensorOptional bias, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] padding, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] stride, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dilation, @Cast("int64_t") long groups); - - -// aten::mkldnn_convolution(Tensor self, Tensor weight, Tensor? bias, SymInt[] padding, int[] stride, int[] dilation, int groups) -> Tensor -@Namespace("at") public static native @ByVal Tensor mkldnn_convolution_symint(@Const @ByRef Tensor self, @Const @ByRef Tensor weight, @Const @ByRef TensorOptional bias, @ByVal SymIntRef padding, @ByVal @Cast("c10::ArrayRef*") LongArrayRef stride, @ByVal @Cast("c10::ArrayRef*") LongArrayRef dilation, @Cast("int64_t") long groups); -@Namespace("at") public static native @ByVal Tensor mkldnn_convolution_symint(@Const @ByRef Tensor self, @Const @ByRef Tensor weight, @Const @ByRef TensorOptional bias, @ByVal SymIntRef padding, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] stride, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dilation, @Cast("int64_t") long groups); +// #include -// aten::mkldnn_convolution.out(Tensor self, Tensor weight, Tensor? bias, SymInt[] padding, int[] stride, int[] dilation, int groups, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor mkldnn_convolution_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor weight, @Const @ByRef TensorOptional bias, @ByVal @Cast("c10::ArrayRef*") LongArrayRef padding, @ByVal @Cast("c10::ArrayRef*") LongArrayRef stride, @ByVal @Cast("c10::ArrayRef*") LongArrayRef dilation, @Cast("int64_t") long groups); -@Namespace("at") public static native @ByRef Tensor mkldnn_convolution_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor weight, @Const @ByRef TensorOptional bias, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] padding, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] stride, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dilation, @Cast("int64_t") long groups); +// aten::narrow(Tensor(a) self, int dim, SymInt start, SymInt length) -> Tensor(a) +@Namespace("at") public static native @ByVal Tensor narrow(@Const @ByRef Tensor self, @Cast("int64_t") long dim, @Cast("int64_t") long start, @Cast("int64_t") long length); -// aten::mkldnn_convolution.out(Tensor self, Tensor weight, Tensor? bias, SymInt[] padding, int[] stride, int[] dilation, int groups, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor mkldnn_convolution_outf(@Const @ByRef Tensor self, @Const @ByRef Tensor weight, @Const @ByRef TensorOptional bias, @ByVal @Cast("c10::ArrayRef*") LongArrayRef padding, @ByVal @Cast("c10::ArrayRef*") LongArrayRef stride, @ByVal @Cast("c10::ArrayRef*") LongArrayRef dilation, @Cast("int64_t") long groups, @ByRef Tensor out); -@Namespace("at") public static native @ByRef Tensor mkldnn_convolution_outf(@Const @ByRef Tensor self, @Const @ByRef Tensor weight, @Const @ByRef TensorOptional bias, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] padding, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] stride, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dilation, @Cast("int64_t") long groups, @ByRef Tensor out); +// aten::narrow(Tensor(a) self, int dim, SymInt start, SymInt length) -> Tensor(a) +@Namespace("at") public static native @ByVal Tensor narrow_symint(@Const @ByRef Tensor self, @Cast("int64_t") long dim, @ByVal SymInt start, @ByVal SymInt length); -// aten::mkldnn_convolution.out(Tensor self, Tensor weight, Tensor? bias, SymInt[] padding, int[] stride, int[] dilation, int groups, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor mkldnn_convolution_symint_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor weight, @Const @ByRef TensorOptional bias, @ByVal SymIntRef padding, @ByVal @Cast("c10::ArrayRef*") LongArrayRef stride, @ByVal @Cast("c10::ArrayRef*") LongArrayRef dilation, @Cast("int64_t") long groups); -@Namespace("at") public static native @ByRef Tensor mkldnn_convolution_symint_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor weight, @Const @ByRef TensorOptional bias, @ByVal SymIntRef padding, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] stride, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dilation, @Cast("int64_t") long groups); +// aten::narrow.Tensor(Tensor(a) self, int dim, Tensor start, SymInt length) -> Tensor(a) +@Namespace("at") public static native @ByVal Tensor narrow(@Const @ByRef Tensor self, @Cast("int64_t") long dim, @Const @ByRef Tensor start, @Cast("int64_t") long length); -// aten::mkldnn_convolution.out(Tensor self, Tensor weight, Tensor? bias, SymInt[] padding, int[] stride, int[] dilation, int groups, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor mkldnn_convolution_symint_outf(@Const @ByRef Tensor self, @Const @ByRef Tensor weight, @Const @ByRef TensorOptional bias, @ByVal SymIntRef padding, @ByVal @Cast("c10::ArrayRef*") LongArrayRef stride, @ByVal @Cast("c10::ArrayRef*") LongArrayRef dilation, @Cast("int64_t") long groups, @ByRef Tensor out); -@Namespace("at") public static native @ByRef Tensor mkldnn_convolution_symint_outf(@Const @ByRef Tensor self, @Const @ByRef Tensor weight, @Const @ByRef TensorOptional bias, @ByVal SymIntRef padding, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] stride, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dilation, @Cast("int64_t") long groups, @ByRef Tensor out); +// aten::narrow.Tensor(Tensor(a) self, int dim, Tensor start, SymInt length) -> Tensor(a) +@Namespace("at") public static native @ByVal Tensor narrow_symint(@Const @ByRef Tensor self, @Cast("int64_t") long dim, @Const @ByRef Tensor start, @ByVal SymInt length); -// Parsed from ATen/ops/mkldnn_linear.h +// Parsed from ATen/ops/narrow_copy.h // #pragma once @@ -53629,23 +39326,37 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include +// #include -// aten::mkldnn_linear(Tensor self, Tensor weight, Tensor? bias=None) -> Tensor -@Namespace("at") public static native @ByVal Tensor mkldnn_linear(@Const @ByRef Tensor self, @Const @ByRef Tensor weight, @Const @ByRef(nullValue = "c10::optional{}") TensorOptional bias); -@Namespace("at") public static native @ByVal Tensor mkldnn_linear(@Const @ByRef Tensor self, @Const @ByRef Tensor weight); +// aten::narrow_copy(Tensor self, int dim, SymInt start, SymInt length) -> Tensor +@Namespace("at") public static native @ByVal Tensor narrow_copy(@Const @ByRef Tensor self, @Cast("int64_t") long dim, @Cast("int64_t") long start, @Cast("int64_t") long length); + + +// aten::narrow_copy(Tensor self, int dim, SymInt start, SymInt length) -> Tensor +@Namespace("at") public static native @ByVal Tensor narrow_copy_symint(@Const @ByRef Tensor self, @Cast("int64_t") long dim, @ByVal SymInt start, @ByVal SymInt length); -// aten::mkldnn_linear.out(Tensor self, Tensor weight, Tensor? bias=None, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor mkldnn_linear_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor weight, @Const @ByRef(nullValue = "c10::optional{}") TensorOptional bias); -@Namespace("at") public static native @ByRef Tensor mkldnn_linear_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor weight); -// aten::mkldnn_linear.out(Tensor self, Tensor weight, Tensor? bias=None, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor mkldnn_linear_outf(@Const @ByRef Tensor self, @Const @ByRef Tensor weight, @Const @ByRef TensorOptional bias, @ByRef Tensor out); + +// aten::narrow_copy.out(Tensor self, int dim, SymInt start, SymInt length, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor narrow_copy_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Cast("int64_t") long dim, @Cast("int64_t") long start, @Cast("int64_t") long length); + + +// aten::narrow_copy.out(Tensor self, int dim, SymInt start, SymInt length, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor narrow_copy_outf(@Const @ByRef Tensor self, @Cast("int64_t") long dim, @Cast("int64_t") long start, @Cast("int64_t") long length, @ByRef Tensor out); + + +// aten::narrow_copy.out(Tensor self, int dim, SymInt start, SymInt length, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor narrow_copy_symint_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Cast("int64_t") long dim, @ByVal SymInt start, @ByVal SymInt length); + + +// aten::narrow_copy.out(Tensor self, int dim, SymInt start, SymInt length, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor narrow_copy_symint_outf(@Const @ByRef Tensor self, @Cast("int64_t") long dim, @ByVal SymInt start, @ByVal SymInt length, @ByRef Tensor out); -// Parsed from ATen/ops/mkldnn_linear_backward.h + +// Parsed from ATen/ops/native_batch_norm.h // #pragma once @@ -53666,21 +39377,21 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include +// #include -// aten::mkldnn_linear_backward(Tensor self, Tensor grad_output, Tensor weight, bool[3] output_mask) -> (Tensor, Tensor, Tensor) -@Namespace("at") public static native @ByVal TensorTensorTensorTuple mkldnn_linear_backward(@Const @ByRef Tensor self, @Const @ByRef Tensor grad_output, @Const @ByRef Tensor weight, @ByVal @Cast("std::array*") BoolPointer output_mask); +// aten::native_batch_norm(Tensor input, Tensor? weight, Tensor? bias, Tensor? running_mean, Tensor? running_var, bool training, float momentum, float eps) -> (Tensor, Tensor, Tensor) +@Namespace("at") public static native @ByVal T_TensorTensorTensor_T native_batch_norm(@Const @ByRef Tensor input, @Const @ByRef TensorOptional weight, @Const @ByRef TensorOptional bias, @Const @ByRef TensorOptional running_mean, @Const @ByRef TensorOptional running_var, @Cast("bool") boolean training, double momentum, double eps); -// aten::mkldnn_linear_backward.out(Tensor self, Tensor grad_output, Tensor weight, bool[3] output_mask, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2) -> (Tensor(a!), Tensor(b!), Tensor(c!)) -@Namespace("at") public static native @ByVal @Cast("std::tuple*") PointerPointer mkldnn_linear_backward_out(@ByRef Tensor out0, @ByRef Tensor out1, @ByRef Tensor out2, @Const @ByRef Tensor self, @Const @ByRef Tensor grad_output, @Const @ByRef Tensor weight, @ByVal @Cast("std::array*") BoolPointer output_mask); -// aten::mkldnn_linear_backward.out(Tensor self, Tensor grad_output, Tensor weight, bool[3] output_mask, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2) -> (Tensor(a!), Tensor(b!), Tensor(c!)) -@Namespace("at") public static native @ByVal @Cast("std::tuple*") PointerPointer mkldnn_linear_backward_outf(@Const @ByRef Tensor self, @Const @ByRef Tensor grad_output, @Const @ByRef Tensor weight, @ByVal @Cast("std::array*") BoolPointer output_mask, @ByRef Tensor out0, @ByRef Tensor out1, @ByRef Tensor out2); +// aten::native_batch_norm.out(Tensor input, Tensor? weight, Tensor? bias, Tensor? running_mean, Tensor? running_var, bool training, float momentum, float eps, *, Tensor(a!) out, Tensor(b!) save_mean, Tensor(c!) save_invstd) -> (Tensor(a!), Tensor(b!), Tensor(c!)) +@Namespace("at") public static native @ByVal T_TensorTensorTensor_T native_batch_norm_out(@ByRef Tensor out, @ByRef Tensor save_mean, @ByRef Tensor save_invstd, @Const @ByRef Tensor input, @Const @ByRef TensorOptional weight, @Const @ByRef TensorOptional bias, @Const @ByRef TensorOptional running_mean, @Const @ByRef TensorOptional running_var, @Cast("bool") boolean training, double momentum, double eps); +// aten::native_batch_norm.out(Tensor input, Tensor? weight, Tensor? bias, Tensor? running_mean, Tensor? running_var, bool training, float momentum, float eps, *, Tensor(a!) out, Tensor(b!) save_mean, Tensor(c!) save_invstd) -> (Tensor(a!), Tensor(b!), Tensor(c!)) +@Namespace("at") public static native @ByVal T_TensorTensorTensor_T native_batch_norm_outf(@Const @ByRef Tensor input, @Const @ByRef TensorOptional weight, @Const @ByRef TensorOptional bias, @Const @ByRef TensorOptional running_mean, @Const @ByRef TensorOptional running_var, @Cast("bool") boolean training, double momentum, double eps, @ByRef Tensor out, @ByRef Tensor save_mean, @ByRef Tensor save_invstd); -// Parsed from ATen/ops/mkldnn_linear_backward_input.h +// Parsed from ATen/ops/native_batch_norm_backward.h // #pragma once @@ -53701,24 +39412,21 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include +// #include -// aten::mkldnn_linear_backward_input(int[] input_size, Tensor grad_output, Tensor weight) -> Tensor -@Namespace("at") public static native @ByVal Tensor mkldnn_linear_backward_input(@ByVal @Cast("c10::ArrayRef*") LongArrayRef input_size, @Const @ByRef Tensor grad_output, @Const @ByRef Tensor weight); -@Namespace("at") public static native @ByVal Tensor mkldnn_linear_backward_input(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] input_size, @Const @ByRef Tensor grad_output, @Const @ByRef Tensor weight); +// aten::native_batch_norm_backward(Tensor grad_out, Tensor input, Tensor? weight, Tensor? running_mean, Tensor? running_var, Tensor? save_mean, Tensor? save_invstd, bool train, float eps, bool[3] output_mask) -> (Tensor, Tensor, Tensor) +@Namespace("at") public static native @ByVal T_TensorTensorTensor_T native_batch_norm_backward(@Const @ByRef Tensor grad_out, @Const @ByRef Tensor input, @Const @ByRef TensorOptional weight, @Const @ByRef TensorOptional running_mean, @Const @ByRef TensorOptional running_var, @Const @ByRef TensorOptional save_mean, @Const @ByRef TensorOptional save_invstd, @Cast("bool") boolean train, double eps, @ByVal @Cast("std::array*") BoolPointer output_mask); -// aten::mkldnn_linear_backward_input.out(int[] input_size, Tensor grad_output, Tensor weight, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor mkldnn_linear_backward_input_out(@ByRef Tensor out, @ByVal @Cast("c10::ArrayRef*") LongArrayRef input_size, @Const @ByRef Tensor grad_output, @Const @ByRef Tensor weight); -@Namespace("at") public static native @ByRef Tensor mkldnn_linear_backward_input_out(@ByRef Tensor out, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] input_size, @Const @ByRef Tensor grad_output, @Const @ByRef Tensor weight); -// aten::mkldnn_linear_backward_input.out(int[] input_size, Tensor grad_output, Tensor weight, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor mkldnn_linear_backward_input_outf(@ByVal @Cast("c10::ArrayRef*") LongArrayRef input_size, @Const @ByRef Tensor grad_output, @Const @ByRef Tensor weight, @ByRef Tensor out); -@Namespace("at") public static native @ByRef Tensor mkldnn_linear_backward_input_outf(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] input_size, @Const @ByRef Tensor grad_output, @Const @ByRef Tensor weight, @ByRef Tensor out); +// aten::native_batch_norm_backward.out(Tensor grad_out, Tensor input, Tensor? weight, Tensor? running_mean, Tensor? running_var, Tensor? save_mean, Tensor? save_invstd, bool train, float eps, bool[3] output_mask, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2) -> (Tensor(a!), Tensor(b!), Tensor(c!)) +@Namespace("at") public static native @ByVal T_TensorTensorTensor_T native_batch_norm_backward_out(@ByRef Tensor out0, @ByRef Tensor out1, @ByRef Tensor out2, @Const @ByRef Tensor grad_out, @Const @ByRef Tensor input, @Const @ByRef TensorOptional weight, @Const @ByRef TensorOptional running_mean, @Const @ByRef TensorOptional running_var, @Const @ByRef TensorOptional save_mean, @Const @ByRef TensorOptional save_invstd, @Cast("bool") boolean train, double eps, @ByVal @Cast("std::array*") BoolPointer output_mask); +// aten::native_batch_norm_backward.out(Tensor grad_out, Tensor input, Tensor? weight, Tensor? running_mean, Tensor? running_var, Tensor? save_mean, Tensor? save_invstd, bool train, float eps, bool[3] output_mask, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2) -> (Tensor(a!), Tensor(b!), Tensor(c!)) +@Namespace("at") public static native @ByVal T_TensorTensorTensor_T native_batch_norm_backward_outf(@Const @ByRef Tensor grad_out, @Const @ByRef Tensor input, @Const @ByRef TensorOptional weight, @Const @ByRef TensorOptional running_mean, @Const @ByRef TensorOptional running_var, @Const @ByRef TensorOptional save_mean, @Const @ByRef TensorOptional save_invstd, @Cast("bool") boolean train, double eps, @ByVal @Cast("std::array*") BoolPointer output_mask, @ByRef Tensor out0, @ByRef Tensor out1, @ByRef Tensor out2); -// Parsed from ATen/ops/mkldnn_linear_backward_weights.h +// Parsed from ATen/ops/native_channel_shuffle.h // #pragma once @@ -53739,21 +39447,16 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include - +// #include -// aten::mkldnn_linear_backward_weights(Tensor grad_output, Tensor input, Tensor weight, bool bias_defined) -> (Tensor, Tensor) -@Namespace("at") public static native @ByVal TensorTensorTuple mkldnn_linear_backward_weights(@Const @ByRef Tensor grad_output, @Const @ByRef Tensor input, @Const @ByRef Tensor weight, @Cast("bool") boolean bias_defined); -// aten::mkldnn_linear_backward_weights.out(Tensor grad_output, Tensor input, Tensor weight, bool bias_defined, *, Tensor(a!) out0, Tensor(b!) out1) -> (Tensor(a!), Tensor(b!)) -@Namespace("at") public static native @ByVal @Cast("std::tuple*") PointerPointer mkldnn_linear_backward_weights_out(@ByRef Tensor out0, @ByRef Tensor out1, @Const @ByRef Tensor grad_output, @Const @ByRef Tensor input, @Const @ByRef Tensor weight, @Cast("bool") boolean bias_defined); -// aten::mkldnn_linear_backward_weights.out(Tensor grad_output, Tensor input, Tensor weight, bool bias_defined, *, Tensor(a!) out0, Tensor(b!) out1) -> (Tensor(a!), Tensor(b!)) -@Namespace("at") public static native @ByVal @Cast("std::tuple*") PointerPointer mkldnn_linear_backward_weights_outf(@Const @ByRef Tensor grad_output, @Const @ByRef Tensor input, @Const @ByRef Tensor weight, @Cast("bool") boolean bias_defined, @ByRef Tensor out0, @ByRef Tensor out1); +// aten::native_channel_shuffle(Tensor self, int groups) -> Tensor +@Namespace("at") public static native @ByVal Tensor native_channel_shuffle(@Const @ByRef Tensor self, @Cast("int64_t") long groups); -// Parsed from ATen/ops/mkldnn_max_pool2d.h +// Parsed from ATen/ops/native_dropout.h // #pragma once @@ -53774,28 +39477,21 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include +// #include -// aten::mkldnn_max_pool2d(Tensor self, int[2] kernel_size, int[2] stride=[], int[2] padding=0, int[2] dilation=1, bool ceil_mode=False) -> Tensor -@Namespace("at") public static native @ByVal Tensor mkldnn_max_pool2d(@Const @ByRef Tensor self, @ByVal @Cast("c10::ArrayRef*") LongArrayRef kernel_size, @ByVal(nullValue = "at::IntArrayRef{}") @Cast("c10::ArrayRef*") LongArrayRef stride, @ByVal(nullValue = "at::IntArrayRef(0)") @Cast("c10::ArrayRef*") LongArrayRef padding, @ByVal(nullValue = "at::IntArrayRef(1)") @Cast("c10::ArrayRef*") LongArrayRef dilation, @Cast("bool") boolean ceil_mode/*=false*/); -@Namespace("at") public static native @ByVal Tensor mkldnn_max_pool2d(@Const @ByRef Tensor self, @ByVal @Cast("c10::ArrayRef*") LongArrayRef kernel_size); -@Namespace("at") public static native @ByVal Tensor mkldnn_max_pool2d(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] kernel_size, @ByVal(nullValue = "at::IntArrayRef{}") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] stride, @ByVal(nullValue = "at::IntArrayRef(0)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] padding, @ByVal(nullValue = "at::IntArrayRef(1)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dilation, @Cast("bool") boolean ceil_mode/*=false*/); -@Namespace("at") public static native @ByVal Tensor mkldnn_max_pool2d(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... kernel_size); +// aten::native_dropout(Tensor input, float p, bool? train) -> (Tensor, Tensor) +@Namespace("at") public static native @ByVal T_TensorTensor_T native_dropout(@Const @ByRef Tensor input, double p, @ByVal BoolOptional train); -// aten::mkldnn_max_pool2d.out(Tensor self, int[2] kernel_size, int[2] stride=[], int[2] padding=0, int[2] dilation=1, bool ceil_mode=False, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor mkldnn_max_pool2d_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal @Cast("c10::ArrayRef*") LongArrayRef kernel_size, @ByVal(nullValue = "at::IntArrayRef{}") @Cast("c10::ArrayRef*") LongArrayRef stride, @ByVal(nullValue = "at::IntArrayRef(0)") @Cast("c10::ArrayRef*") LongArrayRef padding, @ByVal(nullValue = "at::IntArrayRef(1)") @Cast("c10::ArrayRef*") LongArrayRef dilation, @Cast("bool") boolean ceil_mode/*=false*/); -@Namespace("at") public static native @ByRef Tensor mkldnn_max_pool2d_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal @Cast("c10::ArrayRef*") LongArrayRef kernel_size); -@Namespace("at") public static native @ByRef Tensor mkldnn_max_pool2d_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] kernel_size, @ByVal(nullValue = "at::IntArrayRef{}") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] stride, @ByVal(nullValue = "at::IntArrayRef(0)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] padding, @ByVal(nullValue = "at::IntArrayRef(1)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dilation, @Cast("bool") boolean ceil_mode/*=false*/); -@Namespace("at") public static native @ByRef Tensor mkldnn_max_pool2d_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... kernel_size); -// aten::mkldnn_max_pool2d.out(Tensor self, int[2] kernel_size, int[2] stride=[], int[2] padding=0, int[2] dilation=1, bool ceil_mode=False, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor mkldnn_max_pool2d_outf(@Const @ByRef Tensor self, @ByVal @Cast("c10::ArrayRef*") LongArrayRef kernel_size, @ByVal @Cast("c10::ArrayRef*") LongArrayRef stride, @ByVal @Cast("c10::ArrayRef*") LongArrayRef padding, @ByVal @Cast("c10::ArrayRef*") LongArrayRef dilation, @Cast("bool") boolean ceil_mode, @ByRef Tensor out); -@Namespace("at") public static native @ByRef Tensor mkldnn_max_pool2d_outf(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] kernel_size, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] stride, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] padding, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dilation, @Cast("bool") boolean ceil_mode, @ByRef Tensor out); +// aten::native_dropout.out(Tensor input, float p, bool? train, *, Tensor(a!) out0, Tensor(b!) out1) -> (Tensor(a!), Tensor(b!)) +@Namespace("at") public static native @ByVal T_TensorTensor_T native_dropout_out(@ByRef Tensor out0, @ByRef Tensor out1, @Const @ByRef Tensor input, double p, @ByVal BoolOptional train); +// aten::native_dropout.out(Tensor input, float p, bool? train, *, Tensor(a!) out0, Tensor(b!) out1) -> (Tensor(a!), Tensor(b!)) +@Namespace("at") public static native @ByVal T_TensorTensor_T native_dropout_outf(@Const @ByRef Tensor input, double p, @ByVal BoolOptional train, @ByRef Tensor out0, @ByRef Tensor out1); -// Parsed from ATen/ops/mkldnn_max_pool2d_backward.h +// Parsed from ATen/ops/native_dropout_backward.h // #pragma once @@ -53816,28 +39512,21 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include +// #include -// aten::mkldnn_max_pool2d_backward(Tensor grad_output, Tensor output, Tensor input, int[2] kernel_size, int[2] stride=[], int[2] padding=0, int[2] dilation=1, bool ceil_mode=False) -> Tensor -@Namespace("at") public static native @ByVal Tensor mkldnn_max_pool2d_backward(@Const @ByRef Tensor grad_output, @Const @ByRef Tensor output, @Const @ByRef Tensor input, @ByVal @Cast("c10::ArrayRef*") LongArrayRef kernel_size, @ByVal(nullValue = "at::IntArrayRef{}") @Cast("c10::ArrayRef*") LongArrayRef stride, @ByVal(nullValue = "at::IntArrayRef(0)") @Cast("c10::ArrayRef*") LongArrayRef padding, @ByVal(nullValue = "at::IntArrayRef(1)") @Cast("c10::ArrayRef*") LongArrayRef dilation, @Cast("bool") boolean ceil_mode/*=false*/); -@Namespace("at") public static native @ByVal Tensor mkldnn_max_pool2d_backward(@Const @ByRef Tensor grad_output, @Const @ByRef Tensor output, @Const @ByRef Tensor input, @ByVal @Cast("c10::ArrayRef*") LongArrayRef kernel_size); -@Namespace("at") public static native @ByVal Tensor mkldnn_max_pool2d_backward(@Const @ByRef Tensor grad_output, @Const @ByRef Tensor output, @Const @ByRef Tensor input, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] kernel_size, @ByVal(nullValue = "at::IntArrayRef{}") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] stride, @ByVal(nullValue = "at::IntArrayRef(0)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] padding, @ByVal(nullValue = "at::IntArrayRef(1)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dilation, @Cast("bool") boolean ceil_mode/*=false*/); -@Namespace("at") public static native @ByVal Tensor mkldnn_max_pool2d_backward(@Const @ByRef Tensor grad_output, @Const @ByRef Tensor output, @Const @ByRef Tensor input, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... kernel_size); +// aten::native_dropout_backward(Tensor grad_output, Tensor mask, float scale) -> Tensor +@Namespace("at") public static native @ByVal Tensor native_dropout_backward(@Const @ByRef Tensor grad_output, @Const @ByRef Tensor mask, double scale); -// aten::mkldnn_max_pool2d_backward.out(Tensor grad_output, Tensor output, Tensor input, int[2] kernel_size, int[2] stride=[], int[2] padding=0, int[2] dilation=1, bool ceil_mode=False, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor mkldnn_max_pool2d_backward_out(@ByRef Tensor out, @Const @ByRef Tensor grad_output, @Const @ByRef Tensor output, @Const @ByRef Tensor input, @ByVal @Cast("c10::ArrayRef*") LongArrayRef kernel_size, @ByVal(nullValue = "at::IntArrayRef{}") @Cast("c10::ArrayRef*") LongArrayRef stride, @ByVal(nullValue = "at::IntArrayRef(0)") @Cast("c10::ArrayRef*") LongArrayRef padding, @ByVal(nullValue = "at::IntArrayRef(1)") @Cast("c10::ArrayRef*") LongArrayRef dilation, @Cast("bool") boolean ceil_mode/*=false*/); -@Namespace("at") public static native @ByRef Tensor mkldnn_max_pool2d_backward_out(@ByRef Tensor out, @Const @ByRef Tensor grad_output, @Const @ByRef Tensor output, @Const @ByRef Tensor input, @ByVal @Cast("c10::ArrayRef*") LongArrayRef kernel_size); -@Namespace("at") public static native @ByRef Tensor mkldnn_max_pool2d_backward_out(@ByRef Tensor out, @Const @ByRef Tensor grad_output, @Const @ByRef Tensor output, @Const @ByRef Tensor input, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] kernel_size, @ByVal(nullValue = "at::IntArrayRef{}") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] stride, @ByVal(nullValue = "at::IntArrayRef(0)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] padding, @ByVal(nullValue = "at::IntArrayRef(1)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dilation, @Cast("bool") boolean ceil_mode/*=false*/); -@Namespace("at") public static native @ByRef Tensor mkldnn_max_pool2d_backward_out(@ByRef Tensor out, @Const @ByRef Tensor grad_output, @Const @ByRef Tensor output, @Const @ByRef Tensor input, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... kernel_size); -// aten::mkldnn_max_pool2d_backward.out(Tensor grad_output, Tensor output, Tensor input, int[2] kernel_size, int[2] stride=[], int[2] padding=0, int[2] dilation=1, bool ceil_mode=False, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor mkldnn_max_pool2d_backward_outf(@Const @ByRef Tensor grad_output, @Const @ByRef Tensor output, @Const @ByRef Tensor input, @ByVal @Cast("c10::ArrayRef*") LongArrayRef kernel_size, @ByVal @Cast("c10::ArrayRef*") LongArrayRef stride, @ByVal @Cast("c10::ArrayRef*") LongArrayRef padding, @ByVal @Cast("c10::ArrayRef*") LongArrayRef dilation, @Cast("bool") boolean ceil_mode, @ByRef Tensor out); -@Namespace("at") public static native @ByRef Tensor mkldnn_max_pool2d_backward_outf(@Const @ByRef Tensor grad_output, @Const @ByRef Tensor output, @Const @ByRef Tensor input, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] kernel_size, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] stride, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] padding, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dilation, @Cast("bool") boolean ceil_mode, @ByRef Tensor out); +// aten::native_dropout_backward.out(Tensor grad_output, Tensor mask, float scale, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor native_dropout_backward_out(@ByRef Tensor out, @Const @ByRef Tensor grad_output, @Const @ByRef Tensor mask, double scale); +// aten::native_dropout_backward.out(Tensor grad_output, Tensor mask, float scale, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor native_dropout_backward_outf(@Const @ByRef Tensor grad_output, @Const @ByRef Tensor mask, double scale, @ByRef Tensor out); -// Parsed from ATen/ops/mkldnn_max_pool3d.h +// Parsed from ATen/ops/native_group_norm.h // #pragma once @@ -53858,28 +39547,37 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include +// #include -// aten::mkldnn_max_pool3d(Tensor self, int[3] kernel_size, int[3] stride=[], int[3] padding=0, int[3] dilation=1, bool ceil_mode=False) -> Tensor -@Namespace("at") public static native @ByVal Tensor mkldnn_max_pool3d(@Const @ByRef Tensor self, @ByVal @Cast("c10::ArrayRef*") LongArrayRef kernel_size, @ByVal(nullValue = "at::IntArrayRef{}") @Cast("c10::ArrayRef*") LongArrayRef stride, @ByVal(nullValue = "at::IntArrayRef(0)") @Cast("c10::ArrayRef*") LongArrayRef padding, @ByVal(nullValue = "at::IntArrayRef(1)") @Cast("c10::ArrayRef*") LongArrayRef dilation, @Cast("bool") boolean ceil_mode/*=false*/); -@Namespace("at") public static native @ByVal Tensor mkldnn_max_pool3d(@Const @ByRef Tensor self, @ByVal @Cast("c10::ArrayRef*") LongArrayRef kernel_size); -@Namespace("at") public static native @ByVal Tensor mkldnn_max_pool3d(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] kernel_size, @ByVal(nullValue = "at::IntArrayRef{}") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] stride, @ByVal(nullValue = "at::IntArrayRef(0)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] padding, @ByVal(nullValue = "at::IntArrayRef(1)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dilation, @Cast("bool") boolean ceil_mode/*=false*/); -@Namespace("at") public static native @ByVal Tensor mkldnn_max_pool3d(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... kernel_size); +// aten::native_group_norm(Tensor input, Tensor? weight, Tensor? bias, SymInt N, SymInt C, SymInt HxW, int group, float eps) -> (Tensor, Tensor, Tensor) +@Namespace("at") public static native @ByVal T_TensorTensorTensor_T native_group_norm(@Const @ByRef Tensor input, @Const @ByRef TensorOptional weight, @Const @ByRef TensorOptional bias, @Cast("int64_t") long N, @Cast("int64_t") long C, @Cast("int64_t") long HxW, @Cast("int64_t") long group, double eps); -// aten::mkldnn_max_pool3d.out(Tensor self, int[3] kernel_size, int[3] stride=[], int[3] padding=0, int[3] dilation=1, bool ceil_mode=False, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor mkldnn_max_pool3d_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal @Cast("c10::ArrayRef*") LongArrayRef kernel_size, @ByVal(nullValue = "at::IntArrayRef{}") @Cast("c10::ArrayRef*") LongArrayRef stride, @ByVal(nullValue = "at::IntArrayRef(0)") @Cast("c10::ArrayRef*") LongArrayRef padding, @ByVal(nullValue = "at::IntArrayRef(1)") @Cast("c10::ArrayRef*") LongArrayRef dilation, @Cast("bool") boolean ceil_mode/*=false*/); -@Namespace("at") public static native @ByRef Tensor mkldnn_max_pool3d_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal @Cast("c10::ArrayRef*") LongArrayRef kernel_size); -@Namespace("at") public static native @ByRef Tensor mkldnn_max_pool3d_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] kernel_size, @ByVal(nullValue = "at::IntArrayRef{}") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] stride, @ByVal(nullValue = "at::IntArrayRef(0)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] padding, @ByVal(nullValue = "at::IntArrayRef(1)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dilation, @Cast("bool") boolean ceil_mode/*=false*/); -@Namespace("at") public static native @ByRef Tensor mkldnn_max_pool3d_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... kernel_size); -// aten::mkldnn_max_pool3d.out(Tensor self, int[3] kernel_size, int[3] stride=[], int[3] padding=0, int[3] dilation=1, bool ceil_mode=False, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor mkldnn_max_pool3d_outf(@Const @ByRef Tensor self, @ByVal @Cast("c10::ArrayRef*") LongArrayRef kernel_size, @ByVal @Cast("c10::ArrayRef*") LongArrayRef stride, @ByVal @Cast("c10::ArrayRef*") LongArrayRef padding, @ByVal @Cast("c10::ArrayRef*") LongArrayRef dilation, @Cast("bool") boolean ceil_mode, @ByRef Tensor out); -@Namespace("at") public static native @ByRef Tensor mkldnn_max_pool3d_outf(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] kernel_size, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] stride, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] padding, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dilation, @Cast("bool") boolean ceil_mode, @ByRef Tensor out); + +// aten::native_group_norm(Tensor input, Tensor? weight, Tensor? bias, SymInt N, SymInt C, SymInt HxW, int group, float eps) -> (Tensor, Tensor, Tensor) +@Namespace("at") public static native @ByVal T_TensorTensorTensor_T native_group_norm_symint(@Const @ByRef Tensor input, @Const @ByRef TensorOptional weight, @Const @ByRef TensorOptional bias, @ByVal SymInt N, @ByVal SymInt C, @ByVal SymInt HxW, @Cast("int64_t") long group, double eps); +// aten::native_group_norm.out(Tensor input, Tensor? weight, Tensor? bias, SymInt N, SymInt C, SymInt HxW, int group, float eps, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2) -> (Tensor(a!), Tensor(b!), Tensor(c!)) +@Namespace("at") public static native @ByVal T_TensorTensorTensor_T native_group_norm_out(@ByRef Tensor out0, @ByRef Tensor out1, @ByRef Tensor out2, @Const @ByRef Tensor input, @Const @ByRef TensorOptional weight, @Const @ByRef TensorOptional bias, @Cast("int64_t") long N, @Cast("int64_t") long C, @Cast("int64_t") long HxW, @Cast("int64_t") long group, double eps); -// Parsed from ATen/ops/mkldnn_max_pool3d_backward.h +// aten::native_group_norm.out(Tensor input, Tensor? weight, Tensor? bias, SymInt N, SymInt C, SymInt HxW, int group, float eps, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2) -> (Tensor(a!), Tensor(b!), Tensor(c!)) +@Namespace("at") public static native @ByVal T_TensorTensorTensor_T native_group_norm_outf(@Const @ByRef Tensor input, @Const @ByRef TensorOptional weight, @Const @ByRef TensorOptional bias, @Cast("int64_t") long N, @Cast("int64_t") long C, @Cast("int64_t") long HxW, @Cast("int64_t") long group, double eps, @ByRef Tensor out0, @ByRef Tensor out1, @ByRef Tensor out2); + + +// aten::native_group_norm.out(Tensor input, Tensor? weight, Tensor? bias, SymInt N, SymInt C, SymInt HxW, int group, float eps, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2) -> (Tensor(a!), Tensor(b!), Tensor(c!)) +@Namespace("at") public static native @ByVal T_TensorTensorTensor_T native_group_norm_symint_out(@ByRef Tensor out0, @ByRef Tensor out1, @ByRef Tensor out2, @Const @ByRef Tensor input, @Const @ByRef TensorOptional weight, @Const @ByRef TensorOptional bias, @ByVal SymInt N, @ByVal SymInt C, @ByVal SymInt HxW, @Cast("int64_t") long group, double eps); + + +// aten::native_group_norm.out(Tensor input, Tensor? weight, Tensor? bias, SymInt N, SymInt C, SymInt HxW, int group, float eps, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2) -> (Tensor(a!), Tensor(b!), Tensor(c!)) +@Namespace("at") public static native @ByVal T_TensorTensorTensor_T native_group_norm_symint_outf(@Const @ByRef Tensor input, @Const @ByRef TensorOptional weight, @Const @ByRef TensorOptional bias, @ByVal SymInt N, @ByVal SymInt C, @ByVal SymInt HxW, @Cast("int64_t") long group, double eps, @ByRef Tensor out0, @ByRef Tensor out1, @ByRef Tensor out2); + + + + + +// Parsed from ATen/ops/native_group_norm_backward.h // #pragma once @@ -53900,28 +39598,37 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include +// #include -// aten::mkldnn_max_pool3d_backward(Tensor grad_output, Tensor output, Tensor input, int[3] kernel_size, int[3] stride=[], int[3] padding=0, int[3] dilation=1, bool ceil_mode=False) -> Tensor -@Namespace("at") public static native @ByVal Tensor mkldnn_max_pool3d_backward(@Const @ByRef Tensor grad_output, @Const @ByRef Tensor output, @Const @ByRef Tensor input, @ByVal @Cast("c10::ArrayRef*") LongArrayRef kernel_size, @ByVal(nullValue = "at::IntArrayRef{}") @Cast("c10::ArrayRef*") LongArrayRef stride, @ByVal(nullValue = "at::IntArrayRef(0)") @Cast("c10::ArrayRef*") LongArrayRef padding, @ByVal(nullValue = "at::IntArrayRef(1)") @Cast("c10::ArrayRef*") LongArrayRef dilation, @Cast("bool") boolean ceil_mode/*=false*/); -@Namespace("at") public static native @ByVal Tensor mkldnn_max_pool3d_backward(@Const @ByRef Tensor grad_output, @Const @ByRef Tensor output, @Const @ByRef Tensor input, @ByVal @Cast("c10::ArrayRef*") LongArrayRef kernel_size); -@Namespace("at") public static native @ByVal Tensor mkldnn_max_pool3d_backward(@Const @ByRef Tensor grad_output, @Const @ByRef Tensor output, @Const @ByRef Tensor input, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] kernel_size, @ByVal(nullValue = "at::IntArrayRef{}") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] stride, @ByVal(nullValue = "at::IntArrayRef(0)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] padding, @ByVal(nullValue = "at::IntArrayRef(1)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dilation, @Cast("bool") boolean ceil_mode/*=false*/); -@Namespace("at") public static native @ByVal Tensor mkldnn_max_pool3d_backward(@Const @ByRef Tensor grad_output, @Const @ByRef Tensor output, @Const @ByRef Tensor input, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... kernel_size); +// aten::native_group_norm_backward(Tensor grad_out, Tensor input, Tensor mean, Tensor rstd, Tensor? weight, SymInt N, SymInt C, SymInt HxW, int group, bool[3] output_mask) -> (Tensor, Tensor, Tensor) +@Namespace("at") public static native @ByVal T_TensorTensorTensor_T native_group_norm_backward(@Const @ByRef Tensor grad_out, @Const @ByRef Tensor input, @Const @ByRef Tensor mean, @Const @ByRef Tensor rstd, @Const @ByRef TensorOptional weight, @Cast("int64_t") long N, @Cast("int64_t") long C, @Cast("int64_t") long HxW, @Cast("int64_t") long group, @ByVal @Cast("std::array*") BoolPointer output_mask); -// aten::mkldnn_max_pool3d_backward.out(Tensor grad_output, Tensor output, Tensor input, int[3] kernel_size, int[3] stride=[], int[3] padding=0, int[3] dilation=1, bool ceil_mode=False, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor mkldnn_max_pool3d_backward_out(@ByRef Tensor out, @Const @ByRef Tensor grad_output, @Const @ByRef Tensor output, @Const @ByRef Tensor input, @ByVal @Cast("c10::ArrayRef*") LongArrayRef kernel_size, @ByVal(nullValue = "at::IntArrayRef{}") @Cast("c10::ArrayRef*") LongArrayRef stride, @ByVal(nullValue = "at::IntArrayRef(0)") @Cast("c10::ArrayRef*") LongArrayRef padding, @ByVal(nullValue = "at::IntArrayRef(1)") @Cast("c10::ArrayRef*") LongArrayRef dilation, @Cast("bool") boolean ceil_mode/*=false*/); -@Namespace("at") public static native @ByRef Tensor mkldnn_max_pool3d_backward_out(@ByRef Tensor out, @Const @ByRef Tensor grad_output, @Const @ByRef Tensor output, @Const @ByRef Tensor input, @ByVal @Cast("c10::ArrayRef*") LongArrayRef kernel_size); -@Namespace("at") public static native @ByRef Tensor mkldnn_max_pool3d_backward_out(@ByRef Tensor out, @Const @ByRef Tensor grad_output, @Const @ByRef Tensor output, @Const @ByRef Tensor input, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] kernel_size, @ByVal(nullValue = "at::IntArrayRef{}") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] stride, @ByVal(nullValue = "at::IntArrayRef(0)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] padding, @ByVal(nullValue = "at::IntArrayRef(1)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dilation, @Cast("bool") boolean ceil_mode/*=false*/); -@Namespace("at") public static native @ByRef Tensor mkldnn_max_pool3d_backward_out(@ByRef Tensor out, @Const @ByRef Tensor grad_output, @Const @ByRef Tensor output, @Const @ByRef Tensor input, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... kernel_size); -// aten::mkldnn_max_pool3d_backward.out(Tensor grad_output, Tensor output, Tensor input, int[3] kernel_size, int[3] stride=[], int[3] padding=0, int[3] dilation=1, bool ceil_mode=False, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor mkldnn_max_pool3d_backward_outf(@Const @ByRef Tensor grad_output, @Const @ByRef Tensor output, @Const @ByRef Tensor input, @ByVal @Cast("c10::ArrayRef*") LongArrayRef kernel_size, @ByVal @Cast("c10::ArrayRef*") LongArrayRef stride, @ByVal @Cast("c10::ArrayRef*") LongArrayRef padding, @ByVal @Cast("c10::ArrayRef*") LongArrayRef dilation, @Cast("bool") boolean ceil_mode, @ByRef Tensor out); -@Namespace("at") public static native @ByRef Tensor mkldnn_max_pool3d_backward_outf(@Const @ByRef Tensor grad_output, @Const @ByRef Tensor output, @Const @ByRef Tensor input, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] kernel_size, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] stride, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] padding, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dilation, @Cast("bool") boolean ceil_mode, @ByRef Tensor out); + +// aten::native_group_norm_backward(Tensor grad_out, Tensor input, Tensor mean, Tensor rstd, Tensor? weight, SymInt N, SymInt C, SymInt HxW, int group, bool[3] output_mask) -> (Tensor, Tensor, Tensor) +@Namespace("at") public static native @ByVal T_TensorTensorTensor_T native_group_norm_backward_symint(@Const @ByRef Tensor grad_out, @Const @ByRef Tensor input, @Const @ByRef Tensor mean, @Const @ByRef Tensor rstd, @Const @ByRef TensorOptional weight, @ByVal SymInt N, @ByVal SymInt C, @ByVal SymInt HxW, @Cast("int64_t") long group, @ByVal @Cast("std::array*") BoolPointer output_mask); +// aten::native_group_norm_backward.out(Tensor grad_out, Tensor input, Tensor mean, Tensor rstd, Tensor? weight, SymInt N, SymInt C, SymInt HxW, int group, bool[3] output_mask, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2) -> (Tensor(a!), Tensor(b!), Tensor(c!)) +@Namespace("at") public static native @ByVal T_TensorTensorTensor_T native_group_norm_backward_out(@ByRef Tensor out0, @ByRef Tensor out1, @ByRef Tensor out2, @Const @ByRef Tensor grad_out, @Const @ByRef Tensor input, @Const @ByRef Tensor mean, @Const @ByRef Tensor rstd, @Const @ByRef TensorOptional weight, @Cast("int64_t") long N, @Cast("int64_t") long C, @Cast("int64_t") long HxW, @Cast("int64_t") long group, @ByVal @Cast("std::array*") BoolPointer output_mask); -// Parsed from ATen/ops/mkldnn_reorder_conv2d_weight.h +// aten::native_group_norm_backward.out(Tensor grad_out, Tensor input, Tensor mean, Tensor rstd, Tensor? weight, SymInt N, SymInt C, SymInt HxW, int group, bool[3] output_mask, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2) -> (Tensor(a!), Tensor(b!), Tensor(c!)) +@Namespace("at") public static native @ByVal T_TensorTensorTensor_T native_group_norm_backward_outf(@Const @ByRef Tensor grad_out, @Const @ByRef Tensor input, @Const @ByRef Tensor mean, @Const @ByRef Tensor rstd, @Const @ByRef TensorOptional weight, @Cast("int64_t") long N, @Cast("int64_t") long C, @Cast("int64_t") long HxW, @Cast("int64_t") long group, @ByVal @Cast("std::array*") BoolPointer output_mask, @ByRef Tensor out0, @ByRef Tensor out1, @ByRef Tensor out2); + + +// aten::native_group_norm_backward.out(Tensor grad_out, Tensor input, Tensor mean, Tensor rstd, Tensor? weight, SymInt N, SymInt C, SymInt HxW, int group, bool[3] output_mask, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2) -> (Tensor(a!), Tensor(b!), Tensor(c!)) +@Namespace("at") public static native @ByVal T_TensorTensorTensor_T native_group_norm_backward_symint_out(@ByRef Tensor out0, @ByRef Tensor out1, @ByRef Tensor out2, @Const @ByRef Tensor grad_out, @Const @ByRef Tensor input, @Const @ByRef Tensor mean, @Const @ByRef Tensor rstd, @Const @ByRef TensorOptional weight, @ByVal SymInt N, @ByVal SymInt C, @ByVal SymInt HxW, @Cast("int64_t") long group, @ByVal @Cast("std::array*") BoolPointer output_mask); + + +// aten::native_group_norm_backward.out(Tensor grad_out, Tensor input, Tensor mean, Tensor rstd, Tensor? weight, SymInt N, SymInt C, SymInt HxW, int group, bool[3] output_mask, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2) -> (Tensor(a!), Tensor(b!), Tensor(c!)) +@Namespace("at") public static native @ByVal T_TensorTensorTensor_T native_group_norm_backward_symint_outf(@Const @ByRef Tensor grad_out, @Const @ByRef Tensor input, @Const @ByRef Tensor mean, @Const @ByRef Tensor rstd, @Const @ByRef TensorOptional weight, @ByVal SymInt N, @ByVal SymInt C, @ByVal SymInt HxW, @Cast("int64_t") long group, @ByVal @Cast("std::array*") BoolPointer output_mask, @ByRef Tensor out0, @ByRef Tensor out1, @ByRef Tensor out2); + + + + + +// Parsed from ATen/ops/native_layer_norm.h // #pragma once @@ -53942,26 +39649,40 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include +// #include -// aten::mkldnn_reorder_conv2d_weight(Tensor self, int[2] padding=0, int[2] stride=1, int[2] dilation=1, int groups=1, int[]? input_size=None) -> Tensor -@Namespace("at") public static native @ByVal Tensor mkldnn_reorder_conv2d_weight(@Const @ByRef Tensor self, @ByVal(nullValue = "at::IntArrayRef(0)") @Cast("c10::ArrayRef*") LongArrayRef padding, @ByVal(nullValue = "at::IntArrayRef(1)") @Cast("c10::ArrayRef*") LongArrayRef stride, @ByVal(nullValue = "at::IntArrayRef(1)") @Cast("c10::ArrayRef*") LongArrayRef dilation, @Cast("int64_t") long groups/*=1*/, @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") LongArrayRefOptional input_size); -@Namespace("at") public static native @ByVal Tensor mkldnn_reorder_conv2d_weight(@Const @ByRef Tensor self); -@Namespace("at") public static native @ByVal Tensor mkldnn_reorder_conv2d_weight(@Const @ByRef Tensor self, @ByVal(nullValue = "at::IntArrayRef(0)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] padding, @ByVal(nullValue = "at::IntArrayRef(1)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] stride, @ByVal(nullValue = "at::IntArrayRef(1)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dilation, @Cast("int64_t") long groups/*=1*/, @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... input_size); +// aten::native_layer_norm(Tensor input, SymInt[] normalized_shape, Tensor? weight, Tensor? bias, float eps) -> (Tensor, Tensor, Tensor) +@Namespace("at") public static native @ByVal T_TensorTensorTensor_T native_layer_norm(@Const @ByRef Tensor input, @ByVal LongArrayRef normalized_shape, @Const @ByRef TensorOptional weight, @Const @ByRef TensorOptional bias, double eps); +@Namespace("at") public static native @ByVal T_TensorTensorTensor_T native_layer_norm(@Const @ByRef Tensor input, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] normalized_shape, @Const @ByRef TensorOptional weight, @Const @ByRef TensorOptional bias, double eps); -// aten::mkldnn_reorder_conv2d_weight.out(Tensor self, int[2] padding=0, int[2] stride=1, int[2] dilation=1, int groups=1, int[]? input_size=None, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor mkldnn_reorder_conv2d_weight_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal(nullValue = "at::IntArrayRef(0)") @Cast("c10::ArrayRef*") LongArrayRef padding, @ByVal(nullValue = "at::IntArrayRef(1)") @Cast("c10::ArrayRef*") LongArrayRef stride, @ByVal(nullValue = "at::IntArrayRef(1)") @Cast("c10::ArrayRef*") LongArrayRef dilation, @Cast("int64_t") long groups/*=1*/, @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") LongArrayRefOptional input_size); -@Namespace("at") public static native @ByRef Tensor mkldnn_reorder_conv2d_weight_out(@ByRef Tensor out, @Const @ByRef Tensor self); -@Namespace("at") public static native @ByRef Tensor mkldnn_reorder_conv2d_weight_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal(nullValue = "at::IntArrayRef(0)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] padding, @ByVal(nullValue = "at::IntArrayRef(1)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] stride, @ByVal(nullValue = "at::IntArrayRef(1)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dilation, @Cast("int64_t") long groups/*=1*/, @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... input_size); -// aten::mkldnn_reorder_conv2d_weight.out(Tensor self, int[2] padding=0, int[2] stride=1, int[2] dilation=1, int groups=1, int[]? input_size=None, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor mkldnn_reorder_conv2d_weight_outf(@Const @ByRef Tensor self, @ByVal @Cast("c10::ArrayRef*") LongArrayRef padding, @ByVal @Cast("c10::ArrayRef*") LongArrayRef stride, @ByVal @Cast("c10::ArrayRef*") LongArrayRef dilation, @Cast("int64_t") long groups, @ByVal LongArrayRefOptional input_size, @ByRef Tensor out); -@Namespace("at") public static native @ByRef Tensor mkldnn_reorder_conv2d_weight_outf(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] padding, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] stride, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dilation, @Cast("int64_t") long groups, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] input_size, @ByRef Tensor out); + +// aten::native_layer_norm(Tensor input, SymInt[] normalized_shape, Tensor? weight, Tensor? bias, float eps) -> (Tensor, Tensor, Tensor) +@Namespace("at") public static native @ByVal T_TensorTensorTensor_T native_layer_norm_symint(@Const @ByRef Tensor input, @ByVal SymIntArrayRef normalized_shape, @Const @ByRef TensorOptional weight, @Const @ByRef TensorOptional bias, double eps); +// aten::native_layer_norm.out(Tensor input, SymInt[] normalized_shape, Tensor? weight, Tensor? bias, float eps, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2) -> (Tensor(a!), Tensor(b!), Tensor(c!)) +@Namespace("at") public static native @ByVal T_TensorTensorTensor_T native_layer_norm_out(@ByRef Tensor out0, @ByRef Tensor out1, @ByRef Tensor out2, @Const @ByRef Tensor input, @ByVal LongArrayRef normalized_shape, @Const @ByRef TensorOptional weight, @Const @ByRef TensorOptional bias, double eps); +@Namespace("at") public static native @ByVal T_TensorTensorTensor_T native_layer_norm_out(@ByRef Tensor out0, @ByRef Tensor out1, @ByRef Tensor out2, @Const @ByRef Tensor input, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] normalized_shape, @Const @ByRef TensorOptional weight, @Const @ByRef TensorOptional bias, double eps); -// Parsed from ATen/ops/mkldnn_reorder_conv3d_weight.h +// aten::native_layer_norm.out(Tensor input, SymInt[] normalized_shape, Tensor? weight, Tensor? bias, float eps, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2) -> (Tensor(a!), Tensor(b!), Tensor(c!)) +@Namespace("at") public static native @ByVal T_TensorTensorTensor_T native_layer_norm_outf(@Const @ByRef Tensor input, @ByVal LongArrayRef normalized_shape, @Const @ByRef TensorOptional weight, @Const @ByRef TensorOptional bias, double eps, @ByRef Tensor out0, @ByRef Tensor out1, @ByRef Tensor out2); +@Namespace("at") public static native @ByVal T_TensorTensorTensor_T native_layer_norm_outf(@Const @ByRef Tensor input, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] normalized_shape, @Const @ByRef TensorOptional weight, @Const @ByRef TensorOptional bias, double eps, @ByRef Tensor out0, @ByRef Tensor out1, @ByRef Tensor out2); + + +// aten::native_layer_norm.out(Tensor input, SymInt[] normalized_shape, Tensor? weight, Tensor? bias, float eps, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2) -> (Tensor(a!), Tensor(b!), Tensor(c!)) +@Namespace("at") public static native @ByVal T_TensorTensorTensor_T native_layer_norm_symint_out(@ByRef Tensor out0, @ByRef Tensor out1, @ByRef Tensor out2, @Const @ByRef Tensor input, @ByVal SymIntArrayRef normalized_shape, @Const @ByRef TensorOptional weight, @Const @ByRef TensorOptional bias, double eps); + + +// aten::native_layer_norm.out(Tensor input, SymInt[] normalized_shape, Tensor? weight, Tensor? bias, float eps, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2) -> (Tensor(a!), Tensor(b!), Tensor(c!)) +@Namespace("at") public static native @ByVal T_TensorTensorTensor_T native_layer_norm_symint_outf(@Const @ByRef Tensor input, @ByVal SymIntArrayRef normalized_shape, @Const @ByRef TensorOptional weight, @Const @ByRef TensorOptional bias, double eps, @ByRef Tensor out0, @ByRef Tensor out1, @ByRef Tensor out2); + + + + + +// Parsed from ATen/ops/native_layer_norm_backward.h // #pragma once @@ -53982,26 +39703,40 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include +// #include -// aten::mkldnn_reorder_conv3d_weight(Tensor self, int[3] padding=0, int[3] stride=1, int[3] dilation=1, int groups=1) -> Tensor -@Namespace("at") public static native @ByVal Tensor mkldnn_reorder_conv3d_weight(@Const @ByRef Tensor self, @ByVal(nullValue = "at::IntArrayRef(0)") @Cast("c10::ArrayRef*") LongArrayRef padding, @ByVal(nullValue = "at::IntArrayRef(1)") @Cast("c10::ArrayRef*") LongArrayRef stride, @ByVal(nullValue = "at::IntArrayRef(1)") @Cast("c10::ArrayRef*") LongArrayRef dilation, @Cast("int64_t") long groups/*=1*/); -@Namespace("at") public static native @ByVal Tensor mkldnn_reorder_conv3d_weight(@Const @ByRef Tensor self); -@Namespace("at") public static native @ByVal Tensor mkldnn_reorder_conv3d_weight(@Const @ByRef Tensor self, @ByVal(nullValue = "at::IntArrayRef(0)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] padding, @ByVal(nullValue = "at::IntArrayRef(1)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] stride, @ByVal(nullValue = "at::IntArrayRef(1)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dilation, @Cast("int64_t") long groups/*=1*/); +// aten::native_layer_norm_backward(Tensor grad_out, Tensor input, SymInt[] normalized_shape, Tensor mean, Tensor rstd, Tensor? weight, Tensor? bias, bool[3] output_mask) -> (Tensor, Tensor, Tensor) +@Namespace("at") public static native @ByVal T_TensorTensorTensor_T native_layer_norm_backward(@Const @ByRef Tensor grad_out, @Const @ByRef Tensor input, @ByVal LongArrayRef normalized_shape, @Const @ByRef Tensor mean, @Const @ByRef Tensor rstd, @Const @ByRef TensorOptional weight, @Const @ByRef TensorOptional bias, @ByVal @Cast("std::array*") BoolPointer output_mask); +@Namespace("at") public static native @ByVal T_TensorTensorTensor_T native_layer_norm_backward(@Const @ByRef Tensor grad_out, @Const @ByRef Tensor input, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] normalized_shape, @Const @ByRef Tensor mean, @Const @ByRef Tensor rstd, @Const @ByRef TensorOptional weight, @Const @ByRef TensorOptional bias, @ByVal @Cast("std::array*") BoolPointer output_mask); -// aten::mkldnn_reorder_conv3d_weight.out(Tensor self, int[3] padding=0, int[3] stride=1, int[3] dilation=1, int groups=1, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor mkldnn_reorder_conv3d_weight_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal(nullValue = "at::IntArrayRef(0)") @Cast("c10::ArrayRef*") LongArrayRef padding, @ByVal(nullValue = "at::IntArrayRef(1)") @Cast("c10::ArrayRef*") LongArrayRef stride, @ByVal(nullValue = "at::IntArrayRef(1)") @Cast("c10::ArrayRef*") LongArrayRef dilation, @Cast("int64_t") long groups/*=1*/); -@Namespace("at") public static native @ByRef Tensor mkldnn_reorder_conv3d_weight_out(@ByRef Tensor out, @Const @ByRef Tensor self); -@Namespace("at") public static native @ByRef Tensor mkldnn_reorder_conv3d_weight_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal(nullValue = "at::IntArrayRef(0)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] padding, @ByVal(nullValue = "at::IntArrayRef(1)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] stride, @ByVal(nullValue = "at::IntArrayRef(1)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dilation, @Cast("int64_t") long groups/*=1*/); -// aten::mkldnn_reorder_conv3d_weight.out(Tensor self, int[3] padding=0, int[3] stride=1, int[3] dilation=1, int groups=1, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor mkldnn_reorder_conv3d_weight_outf(@Const @ByRef Tensor self, @ByVal @Cast("c10::ArrayRef*") LongArrayRef padding, @ByVal @Cast("c10::ArrayRef*") LongArrayRef stride, @ByVal @Cast("c10::ArrayRef*") LongArrayRef dilation, @Cast("int64_t") long groups, @ByRef Tensor out); -@Namespace("at") public static native @ByRef Tensor mkldnn_reorder_conv3d_weight_outf(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] padding, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] stride, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dilation, @Cast("int64_t") long groups, @ByRef Tensor out); + +// aten::native_layer_norm_backward(Tensor grad_out, Tensor input, SymInt[] normalized_shape, Tensor mean, Tensor rstd, Tensor? weight, Tensor? bias, bool[3] output_mask) -> (Tensor, Tensor, Tensor) +@Namespace("at") public static native @ByVal T_TensorTensorTensor_T native_layer_norm_backward_symint(@Const @ByRef Tensor grad_out, @Const @ByRef Tensor input, @ByVal SymIntArrayRef normalized_shape, @Const @ByRef Tensor mean, @Const @ByRef Tensor rstd, @Const @ByRef TensorOptional weight, @Const @ByRef TensorOptional bias, @ByVal @Cast("std::array*") BoolPointer output_mask); +// aten::native_layer_norm_backward.out(Tensor grad_out, Tensor input, SymInt[] normalized_shape, Tensor mean, Tensor rstd, Tensor? weight, Tensor? bias, bool[3] output_mask, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2) -> (Tensor(a!), Tensor(b!), Tensor(c!)) +@Namespace("at") public static native @ByVal T_TensorTensorTensor_T native_layer_norm_backward_out(@ByRef Tensor out0, @ByRef Tensor out1, @ByRef Tensor out2, @Const @ByRef Tensor grad_out, @Const @ByRef Tensor input, @ByVal LongArrayRef normalized_shape, @Const @ByRef Tensor mean, @Const @ByRef Tensor rstd, @Const @ByRef TensorOptional weight, @Const @ByRef TensorOptional bias, @ByVal @Cast("std::array*") BoolPointer output_mask); +@Namespace("at") public static native @ByVal T_TensorTensorTensor_T native_layer_norm_backward_out(@ByRef Tensor out0, @ByRef Tensor out1, @ByRef Tensor out2, @Const @ByRef Tensor grad_out, @Const @ByRef Tensor input, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] normalized_shape, @Const @ByRef Tensor mean, @Const @ByRef Tensor rstd, @Const @ByRef TensorOptional weight, @Const @ByRef TensorOptional bias, @ByVal @Cast("std::array*") BoolPointer output_mask); + + +// aten::native_layer_norm_backward.out(Tensor grad_out, Tensor input, SymInt[] normalized_shape, Tensor mean, Tensor rstd, Tensor? weight, Tensor? bias, bool[3] output_mask, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2) -> (Tensor(a!), Tensor(b!), Tensor(c!)) +@Namespace("at") public static native @ByVal T_TensorTensorTensor_T native_layer_norm_backward_outf(@Const @ByRef Tensor grad_out, @Const @ByRef Tensor input, @ByVal LongArrayRef normalized_shape, @Const @ByRef Tensor mean, @Const @ByRef Tensor rstd, @Const @ByRef TensorOptional weight, @Const @ByRef TensorOptional bias, @ByVal @Cast("std::array*") BoolPointer output_mask, @ByRef Tensor out0, @ByRef Tensor out1, @ByRef Tensor out2); +@Namespace("at") public static native @ByVal T_TensorTensorTensor_T native_layer_norm_backward_outf(@Const @ByRef Tensor grad_out, @Const @ByRef Tensor input, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] normalized_shape, @Const @ByRef Tensor mean, @Const @ByRef Tensor rstd, @Const @ByRef TensorOptional weight, @Const @ByRef TensorOptional bias, @ByVal @Cast("std::array*") BoolPointer output_mask, @ByRef Tensor out0, @ByRef Tensor out1, @ByRef Tensor out2); + + +// aten::native_layer_norm_backward.out(Tensor grad_out, Tensor input, SymInt[] normalized_shape, Tensor mean, Tensor rstd, Tensor? weight, Tensor? bias, bool[3] output_mask, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2) -> (Tensor(a!), Tensor(b!), Tensor(c!)) +@Namespace("at") public static native @ByVal T_TensorTensorTensor_T native_layer_norm_backward_symint_out(@ByRef Tensor out0, @ByRef Tensor out1, @ByRef Tensor out2, @Const @ByRef Tensor grad_out, @Const @ByRef Tensor input, @ByVal SymIntArrayRef normalized_shape, @Const @ByRef Tensor mean, @Const @ByRef Tensor rstd, @Const @ByRef TensorOptional weight, @Const @ByRef TensorOptional bias, @ByVal @Cast("std::array*") BoolPointer output_mask); + + +// aten::native_layer_norm_backward.out(Tensor grad_out, Tensor input, SymInt[] normalized_shape, Tensor mean, Tensor rstd, Tensor? weight, Tensor? bias, bool[3] output_mask, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2) -> (Tensor(a!), Tensor(b!), Tensor(c!)) +@Namespace("at") public static native @ByVal T_TensorTensorTensor_T native_layer_norm_backward_symint_outf(@Const @ByRef Tensor grad_out, @Const @ByRef Tensor input, @ByVal SymIntArrayRef normalized_shape, @Const @ByRef Tensor mean, @Const @ByRef Tensor rstd, @Const @ByRef TensorOptional weight, @Const @ByRef TensorOptional bias, @ByVal @Cast("std::array*") BoolPointer output_mask, @ByRef Tensor out0, @ByRef Tensor out1, @ByRef Tensor out2); -// Parsed from ATen/ops/mkldnn_rnn_layer.h + + + +// Parsed from ATen/ops/native_norm.h // #pragma once @@ -54022,24 +39757,34 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include +// #include -// aten::mkldnn_rnn_layer(Tensor input, Tensor weight0, Tensor weight1, Tensor weight2, Tensor weight3, Tensor hx_, Tensor cx_, bool reverse, int[] batch_sizes, int mode, int hidden_size, int num_layers, bool has_biases, bool bidirectional, bool batch_first, bool train) -> (Tensor, Tensor, Tensor, Tensor) -@Namespace("at") public static native @ByVal TensorTensorTensorTensorTuple mkldnn_rnn_layer(@Const @ByRef Tensor input, @Const @ByRef Tensor weight0, @Const @ByRef Tensor weight1, @Const @ByRef Tensor weight2, @Const @ByRef Tensor weight3, @Const @ByRef Tensor hx_, @Const @ByRef Tensor cx_, @Cast("bool") boolean reverse, @ByVal @Cast("c10::ArrayRef*") LongArrayRef batch_sizes, @Cast("int64_t") long mode, @Cast("int64_t") long hidden_size, @Cast("int64_t") long num_layers, @Cast("bool") boolean has_biases, @Cast("bool") boolean bidirectional, @Cast("bool") boolean batch_first, @Cast("bool") boolean train); -@Namespace("at") public static native @ByVal TensorTensorTensorTensorTuple mkldnn_rnn_layer(@Const @ByRef Tensor input, @Const @ByRef Tensor weight0, @Const @ByRef Tensor weight1, @Const @ByRef Tensor weight2, @Const @ByRef Tensor weight3, @Const @ByRef Tensor hx_, @Const @ByRef Tensor cx_, @Cast("bool") boolean reverse, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] batch_sizes, @Cast("int64_t") long mode, @Cast("int64_t") long hidden_size, @Cast("int64_t") long num_layers, @Cast("bool") boolean has_biases, @Cast("bool") boolean bidirectional, @Cast("bool") boolean batch_first, @Cast("bool") boolean train); +// aten::native_norm(Tensor self, Scalar p=2) -> Tensor +@Namespace("at") public static native @ByVal Tensor native_norm(@Const @ByRef Tensor self, @Const @ByRef(nullValue = "at::Scalar(2)") Scalar p); +@Namespace("at") public static native @ByVal Tensor native_norm(@Const @ByRef Tensor self); -// aten::mkldnn_rnn_layer.out(Tensor input, Tensor weight0, Tensor weight1, Tensor weight2, Tensor weight3, Tensor hx_, Tensor cx_, bool reverse, int[] batch_sizes, int mode, int hidden_size, int num_layers, bool has_biases, bool bidirectional, bool batch_first, bool train, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2, Tensor(d!) out3) -> (Tensor(a!), Tensor(b!), Tensor(c!), Tensor(d!)) -@Namespace("at") public static native @ByVal @Cast("std::tuple*") PointerPointer mkldnn_rnn_layer_out(@ByRef Tensor out0, @ByRef Tensor out1, @ByRef Tensor out2, @ByRef Tensor out3, @Const @ByRef Tensor input, @Const @ByRef Tensor weight0, @Const @ByRef Tensor weight1, @Const @ByRef Tensor weight2, @Const @ByRef Tensor weight3, @Const @ByRef Tensor hx_, @Const @ByRef Tensor cx_, @Cast("bool") boolean reverse, @ByVal @Cast("c10::ArrayRef*") LongArrayRef batch_sizes, @Cast("int64_t") long mode, @Cast("int64_t") long hidden_size, @Cast("int64_t") long num_layers, @Cast("bool") boolean has_biases, @Cast("bool") boolean bidirectional, @Cast("bool") boolean batch_first, @Cast("bool") boolean train); -@Namespace("at") public static native @ByVal @Cast("std::tuple*") PointerPointer mkldnn_rnn_layer_out(@ByRef Tensor out0, @ByRef Tensor out1, @ByRef Tensor out2, @ByRef Tensor out3, @Const @ByRef Tensor input, @Const @ByRef Tensor weight0, @Const @ByRef Tensor weight1, @Const @ByRef Tensor weight2, @Const @ByRef Tensor weight3, @Const @ByRef Tensor hx_, @Const @ByRef Tensor cx_, @Cast("bool") boolean reverse, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] batch_sizes, @Cast("int64_t") long mode, @Cast("int64_t") long hidden_size, @Cast("int64_t") long num_layers, @Cast("bool") boolean has_biases, @Cast("bool") boolean bidirectional, @Cast("bool") boolean batch_first, @Cast("bool") boolean train); -// aten::mkldnn_rnn_layer.out(Tensor input, Tensor weight0, Tensor weight1, Tensor weight2, Tensor weight3, Tensor hx_, Tensor cx_, bool reverse, int[] batch_sizes, int mode, int hidden_size, int num_layers, bool has_biases, bool bidirectional, bool batch_first, bool train, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2, Tensor(d!) out3) -> (Tensor(a!), Tensor(b!), Tensor(c!), Tensor(d!)) -@Namespace("at") public static native @ByVal @Cast("std::tuple*") PointerPointer mkldnn_rnn_layer_outf(@Const @ByRef Tensor input, @Const @ByRef Tensor weight0, @Const @ByRef Tensor weight1, @Const @ByRef Tensor weight2, @Const @ByRef Tensor weight3, @Const @ByRef Tensor hx_, @Const @ByRef Tensor cx_, @Cast("bool") boolean reverse, @ByVal @Cast("c10::ArrayRef*") LongArrayRef batch_sizes, @Cast("int64_t") long mode, @Cast("int64_t") long hidden_size, @Cast("int64_t") long num_layers, @Cast("bool") boolean has_biases, @Cast("bool") boolean bidirectional, @Cast("bool") boolean batch_first, @Cast("bool") boolean train, @ByRef Tensor out0, @ByRef Tensor out1, @ByRef Tensor out2, @ByRef Tensor out3); -@Namespace("at") public static native @ByVal @Cast("std::tuple*") PointerPointer mkldnn_rnn_layer_outf(@Const @ByRef Tensor input, @Const @ByRef Tensor weight0, @Const @ByRef Tensor weight1, @Const @ByRef Tensor weight2, @Const @ByRef Tensor weight3, @Const @ByRef Tensor hx_, @Const @ByRef Tensor cx_, @Cast("bool") boolean reverse, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] batch_sizes, @Cast("int64_t") long mode, @Cast("int64_t") long hidden_size, @Cast("int64_t") long num_layers, @Cast("bool") boolean has_biases, @Cast("bool") boolean bidirectional, @Cast("bool") boolean batch_first, @Cast("bool") boolean train, @ByRef Tensor out0, @ByRef Tensor out1, @ByRef Tensor out2, @ByRef Tensor out3); +// aten::native_norm.ScalarOpt_dim_dtype(Tensor self, Scalar? p, int[1] dim, bool keepdim, ScalarType? dtype) -> Tensor +@Namespace("at") public static native @ByVal Tensor native_norm(@Const @ByRef Tensor self, @Const @ByRef ScalarOptional p, @ByVal LongArrayRef dim, @Cast("bool") boolean keepdim, @ByVal ScalarTypeOptional dtype); +@Namespace("at") public static native @ByVal Tensor native_norm(@Const @ByRef Tensor self, @Const @ByRef ScalarOptional p, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dim, @Cast("bool") boolean keepdim, @ByVal ScalarTypeOptional dtype); + +// aten::native_norm.out(Tensor self, Scalar p=2, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor native_norm_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef(nullValue = "at::Scalar(2)") Scalar p); +@Namespace("at") public static native @ByRef Tensor native_norm_out(@ByRef Tensor out, @Const @ByRef Tensor self); +// aten::native_norm.out(Tensor self, Scalar p=2, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor native_norm_outf(@Const @ByRef Tensor self, @Const @ByRef Scalar p, @ByRef Tensor out); + +// aten::native_norm.ScalarOpt_dim_dtype_out(Tensor self, Scalar? p, int[1] dim, bool keepdim, ScalarType? dtype, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor native_norm_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef ScalarOptional p, @ByVal LongArrayRef dim, @Cast("bool") boolean keepdim, @ByVal ScalarTypeOptional dtype); +@Namespace("at") public static native @ByRef Tensor native_norm_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef ScalarOptional p, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dim, @Cast("bool") boolean keepdim, @ByVal ScalarTypeOptional dtype); +// aten::native_norm.ScalarOpt_dim_dtype_out(Tensor self, Scalar? p, int[1] dim, bool keepdim, ScalarType? dtype, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor native_norm_outf(@Const @ByRef Tensor self, @Const @ByRef ScalarOptional p, @ByVal LongArrayRef dim, @Cast("bool") boolean keepdim, @ByVal ScalarTypeOptional dtype, @ByRef Tensor out); +@Namespace("at") public static native @ByRef Tensor native_norm_outf(@Const @ByRef Tensor self, @Const @ByRef ScalarOptional p, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dim, @Cast("bool") boolean keepdim, @ByVal ScalarTypeOptional dtype, @ByRef Tensor out); -// Parsed from ATen/ops/mkldnn_rnn_layer_backward.h +// Parsed from ATen/ops/ne.h // #pragma once @@ -54060,24 +39805,29 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include +// #include -// aten::mkldnn_rnn_layer_backward(Tensor input, Tensor weight1, Tensor weight2, Tensor weight3, Tensor weight4, Tensor hx_, Tensor cx_tmp, Tensor output, Tensor hy_, Tensor cy_, Tensor? grad_output, Tensor? grad_hy, Tensor? grad_cy, bool reverse, int mode, int hidden_size, int num_layers, bool has_biases, bool train, bool bidirectional, int[] batch_sizes, bool batch_first, Tensor workspace) -> (Tensor, Tensor, Tensor, Tensor, Tensor, Tensor, Tensor) -@Namespace("at") public static native @ByVal TensorTensorTensorTensorTensorTensorTensorTuple mkldnn_rnn_layer_backward(@Const @ByRef Tensor input, @Const @ByRef Tensor weight1, @Const @ByRef Tensor weight2, @Const @ByRef Tensor weight3, @Const @ByRef Tensor weight4, @Const @ByRef Tensor hx_, @Const @ByRef Tensor cx_tmp, @Const @ByRef Tensor output, @Const @ByRef Tensor hy_, @Const @ByRef Tensor cy_, @Const @ByRef TensorOptional grad_output, @Const @ByRef TensorOptional grad_hy, @Const @ByRef TensorOptional grad_cy, @Cast("bool") boolean reverse, @Cast("int64_t") long mode, @Cast("int64_t") long hidden_size, @Cast("int64_t") long num_layers, @Cast("bool") boolean has_biases, @Cast("bool") boolean train, @Cast("bool") boolean bidirectional, @ByVal @Cast("c10::ArrayRef*") LongArrayRef batch_sizes, @Cast("bool") boolean batch_first, @Const @ByRef Tensor workspace); -@Namespace("at") public static native @ByVal TensorTensorTensorTensorTensorTensorTensorTuple mkldnn_rnn_layer_backward(@Const @ByRef Tensor input, @Const @ByRef Tensor weight1, @Const @ByRef Tensor weight2, @Const @ByRef Tensor weight3, @Const @ByRef Tensor weight4, @Const @ByRef Tensor hx_, @Const @ByRef Tensor cx_tmp, @Const @ByRef Tensor output, @Const @ByRef Tensor hy_, @Const @ByRef Tensor cy_, @Const @ByRef TensorOptional grad_output, @Const @ByRef TensorOptional grad_hy, @Const @ByRef TensorOptional grad_cy, @Cast("bool") boolean reverse, @Cast("int64_t") long mode, @Cast("int64_t") long hidden_size, @Cast("int64_t") long num_layers, @Cast("bool") boolean has_biases, @Cast("bool") boolean train, @Cast("bool") boolean bidirectional, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] batch_sizes, @Cast("bool") boolean batch_first, @Const @ByRef Tensor workspace); +// aten::ne.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor ne_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Scalar other); +// aten::ne.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor ne_outf(@Const @ByRef Tensor self, @Const @ByRef Scalar other, @ByRef Tensor out); -// aten::mkldnn_rnn_layer_backward.out(Tensor input, Tensor weight1, Tensor weight2, Tensor weight3, Tensor weight4, Tensor hx_, Tensor cx_tmp, Tensor output, Tensor hy_, Tensor cy_, Tensor? grad_output, Tensor? grad_hy, Tensor? grad_cy, bool reverse, int mode, int hidden_size, int num_layers, bool has_biases, bool train, bool bidirectional, int[] batch_sizes, bool batch_first, Tensor workspace, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2, Tensor(d!) out3, Tensor(e!) out4, Tensor(f!) out5, Tensor(g!) out6) -> (Tensor(a!), Tensor(b!), Tensor(c!), Tensor(d!), Tensor(e!), Tensor(f!), Tensor(g!)) -@Namespace("at") public static native @ByVal @Cast("std::tuple*") PointerPointer mkldnn_rnn_layer_backward_out(@ByRef Tensor out0, @ByRef Tensor out1, @ByRef Tensor out2, @ByRef Tensor out3, @ByRef Tensor out4, @ByRef Tensor out5, @ByRef Tensor out6, @Const @ByRef Tensor input, @Const @ByRef Tensor weight1, @Const @ByRef Tensor weight2, @Const @ByRef Tensor weight3, @Const @ByRef Tensor weight4, @Const @ByRef Tensor hx_, @Const @ByRef Tensor cx_tmp, @Const @ByRef Tensor output, @Const @ByRef Tensor hy_, @Const @ByRef Tensor cy_, @Const @ByRef TensorOptional grad_output, @Const @ByRef TensorOptional grad_hy, @Const @ByRef TensorOptional grad_cy, @Cast("bool") boolean reverse, @Cast("int64_t") long mode, @Cast("int64_t") long hidden_size, @Cast("int64_t") long num_layers, @Cast("bool") boolean has_biases, @Cast("bool") boolean train, @Cast("bool") boolean bidirectional, @ByVal @Cast("c10::ArrayRef*") LongArrayRef batch_sizes, @Cast("bool") boolean batch_first, @Const @ByRef Tensor workspace); -@Namespace("at") public static native @ByVal @Cast("std::tuple*") PointerPointer mkldnn_rnn_layer_backward_out(@ByRef Tensor out0, @ByRef Tensor out1, @ByRef Tensor out2, @ByRef Tensor out3, @ByRef Tensor out4, @ByRef Tensor out5, @ByRef Tensor out6, @Const @ByRef Tensor input, @Const @ByRef Tensor weight1, @Const @ByRef Tensor weight2, @Const @ByRef Tensor weight3, @Const @ByRef Tensor weight4, @Const @ByRef Tensor hx_, @Const @ByRef Tensor cx_tmp, @Const @ByRef Tensor output, @Const @ByRef Tensor hy_, @Const @ByRef Tensor cy_, @Const @ByRef TensorOptional grad_output, @Const @ByRef TensorOptional grad_hy, @Const @ByRef TensorOptional grad_cy, @Cast("bool") boolean reverse, @Cast("int64_t") long mode, @Cast("int64_t") long hidden_size, @Cast("int64_t") long num_layers, @Cast("bool") boolean has_biases, @Cast("bool") boolean train, @Cast("bool") boolean bidirectional, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] batch_sizes, @Cast("bool") boolean batch_first, @Const @ByRef Tensor workspace); -// aten::mkldnn_rnn_layer_backward.out(Tensor input, Tensor weight1, Tensor weight2, Tensor weight3, Tensor weight4, Tensor hx_, Tensor cx_tmp, Tensor output, Tensor hy_, Tensor cy_, Tensor? grad_output, Tensor? grad_hy, Tensor? grad_cy, bool reverse, int mode, int hidden_size, int num_layers, bool has_biases, bool train, bool bidirectional, int[] batch_sizes, bool batch_first, Tensor workspace, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2, Tensor(d!) out3, Tensor(e!) out4, Tensor(f!) out5, Tensor(g!) out6) -> (Tensor(a!), Tensor(b!), Tensor(c!), Tensor(d!), Tensor(e!), Tensor(f!), Tensor(g!)) -@Namespace("at") public static native @ByVal @Cast("std::tuple*") PointerPointer mkldnn_rnn_layer_backward_outf(@Const @ByRef Tensor input, @Const @ByRef Tensor weight1, @Const @ByRef Tensor weight2, @Const @ByRef Tensor weight3, @Const @ByRef Tensor weight4, @Const @ByRef Tensor hx_, @Const @ByRef Tensor cx_tmp, @Const @ByRef Tensor output, @Const @ByRef Tensor hy_, @Const @ByRef Tensor cy_, @Const @ByRef TensorOptional grad_output, @Const @ByRef TensorOptional grad_hy, @Const @ByRef TensorOptional grad_cy, @Cast("bool") boolean reverse, @Cast("int64_t") long mode, @Cast("int64_t") long hidden_size, @Cast("int64_t") long num_layers, @Cast("bool") boolean has_biases, @Cast("bool") boolean train, @Cast("bool") boolean bidirectional, @ByVal @Cast("c10::ArrayRef*") LongArrayRef batch_sizes, @Cast("bool") boolean batch_first, @Const @ByRef Tensor workspace, @ByRef Tensor out0, @ByRef Tensor out1, @ByRef Tensor out2, @ByRef Tensor out3, @ByRef Tensor out4, @ByRef Tensor out5, @ByRef Tensor out6); -@Namespace("at") public static native @ByVal @Cast("std::tuple*") PointerPointer mkldnn_rnn_layer_backward_outf(@Const @ByRef Tensor input, @Const @ByRef Tensor weight1, @Const @ByRef Tensor weight2, @Const @ByRef Tensor weight3, @Const @ByRef Tensor weight4, @Const @ByRef Tensor hx_, @Const @ByRef Tensor cx_tmp, @Const @ByRef Tensor output, @Const @ByRef Tensor hy_, @Const @ByRef Tensor cy_, @Const @ByRef TensorOptional grad_output, @Const @ByRef TensorOptional grad_hy, @Const @ByRef TensorOptional grad_cy, @Cast("bool") boolean reverse, @Cast("int64_t") long mode, @Cast("int64_t") long hidden_size, @Cast("int64_t") long num_layers, @Cast("bool") boolean has_biases, @Cast("bool") boolean train, @Cast("bool") boolean bidirectional, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] batch_sizes, @Cast("bool") boolean batch_first, @Const @ByRef Tensor workspace, @ByRef Tensor out0, @ByRef Tensor out1, @ByRef Tensor out2, @ByRef Tensor out3, @ByRef Tensor out4, @ByRef Tensor out5, @ByRef Tensor out6); +// aten::ne.Scalar(Tensor self, Scalar other) -> Tensor +@Namespace("at") public static native @ByVal Tensor ne(@Const @ByRef Tensor self, @Const @ByRef Scalar other); + +// aten::ne.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor ne_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor other); +// aten::ne.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor ne_outf(@Const @ByRef Tensor self, @Const @ByRef Tensor other, @ByRef Tensor out); + +// aten::ne.Tensor(Tensor self, Tensor other) -> Tensor +@Namespace("at") public static native @ByVal Tensor ne(@Const @ByRef Tensor self, @Const @ByRef Tensor other); -// Parsed from ATen/ops/mm.h +// Parsed from ATen/ops/neg.h // #pragma once @@ -54098,21 +39848,24 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include +// #include -// aten::mm(Tensor self, Tensor mat2) -> Tensor -@Namespace("at") public static native @ByVal Tensor mm(@Const @ByRef Tensor self, @Const @ByRef Tensor mat2); +// aten::neg(Tensor self) -> Tensor +@Namespace("at") public static native @ByVal Tensor neg(@Const @ByRef Tensor self); -// aten::mm.out(Tensor self, Tensor mat2, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor mm_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor mat2); -// aten::mm.out(Tensor self, Tensor mat2, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor mm_outf(@Const @ByRef Tensor self, @Const @ByRef Tensor mat2, @ByRef Tensor out); +// aten::neg_(Tensor(a!) self) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor neg_(@ByRef Tensor self); + +// aten::neg.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor neg_out(@ByRef Tensor out, @Const @ByRef Tensor self); +// aten::neg.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor neg_outf(@Const @ByRef Tensor self, @ByRef Tensor out); -// Parsed from ATen/ops/mode.h +// Parsed from ATen/ops/negative.h // #pragma once @@ -54133,33 +39886,24 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include - +// #include -// aten::mode(Tensor self, int dim=-1, bool keepdim=False) -> (Tensor values, Tensor indices) -@Namespace("at") public static native @ByVal TensorTensorTuple mode(@Const @ByRef Tensor self, @Cast("int64_t") long dim/*=-1*/, @Cast("bool") boolean keepdim/*=false*/); -@Namespace("at") public static native @ByVal TensorTensorTuple mode(@Const @ByRef Tensor self); -// aten::mode.values(Tensor self, int dim=-1, bool keepdim=False, *, Tensor(a!) values, Tensor(b!) indices) -> (Tensor(a!) values, Tensor(b!) indices) -@Namespace("at") public static native @ByVal @Cast("std::tuple*") PointerPointer mode_out(@ByRef Tensor values, @ByRef Tensor indices, @Const @ByRef Tensor self, @Cast("int64_t") long dim/*=-1*/, @Cast("bool") boolean keepdim/*=false*/); -@Namespace("at") public static native @ByVal @Cast("std::tuple*") PointerPointer mode_out(@ByRef Tensor values, @ByRef Tensor indices, @Const @ByRef Tensor self); -// aten::mode.values(Tensor self, int dim=-1, bool keepdim=False, *, Tensor(a!) values, Tensor(b!) indices) -> (Tensor(a!) values, Tensor(b!) indices) -@Namespace("at") public static native @ByVal @Cast("std::tuple*") PointerPointer mode_outf(@Const @ByRef Tensor self, @Cast("int64_t") long dim, @Cast("bool") boolean keepdim, @ByRef Tensor values, @ByRef Tensor indices); +// aten::negative(Tensor self) -> Tensor +@Namespace("at") public static native @ByVal Tensor negative(@Const @ByRef Tensor self); -// aten::mode.dimname(Tensor self, Dimname dim, bool keepdim=False) -> (Tensor values, Tensor indices) -@Namespace("at") public static native @ByVal TensorTensorTuple mode(@Const @ByRef Tensor self, @ByVal Dimname dim, @Cast("bool") boolean keepdim/*=false*/); -@Namespace("at") public static native @ByVal TensorTensorTuple mode(@Const @ByRef Tensor self, @ByVal Dimname dim); +// aten::negative_(Tensor(a!) self) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor negative_(@ByRef Tensor self); -// aten::mode.dimname_out(Tensor self, Dimname dim, bool keepdim=False, *, Tensor(a!) values, Tensor(b!) indices) -> (Tensor(a!) values, Tensor(b!) indices) -@Namespace("at") public static native @ByVal @Cast("std::tuple*") PointerPointer mode_out(@ByRef Tensor values, @ByRef Tensor indices, @Const @ByRef Tensor self, @ByVal Dimname dim, @Cast("bool") boolean keepdim/*=false*/); -@Namespace("at") public static native @ByVal @Cast("std::tuple*") PointerPointer mode_out(@ByRef Tensor values, @ByRef Tensor indices, @Const @ByRef Tensor self, @ByVal Dimname dim); -// aten::mode.dimname_out(Tensor self, Dimname dim, bool keepdim=False, *, Tensor(a!) values, Tensor(b!) indices) -> (Tensor(a!) values, Tensor(b!) indices) -@Namespace("at") public static native @ByVal @Cast("std::tuple*") PointerPointer mode_outf(@Const @ByRef Tensor self, @ByVal Dimname dim, @Cast("bool") boolean keepdim, @ByRef Tensor values, @ByRef Tensor indices); +// aten::negative.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor negative_out(@ByRef Tensor out, @Const @ByRef Tensor self); +// aten::negative.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor negative_outf(@Const @ByRef Tensor self, @ByRef Tensor out); -// Parsed from ATen/ops/moveaxis.h +// Parsed from ATen/ops/nested_to_padded_tensor.h // #pragma once @@ -54180,20 +39924,18 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include - +// #include -// aten::moveaxis.intlist(Tensor(a) self, int[] source, int[] destination) -> Tensor(a) -@Namespace("at") public static native @ByVal Tensor moveaxis(@Const @ByRef Tensor self, @ByVal @Cast("c10::ArrayRef*") LongArrayRef source, @ByVal @Cast("c10::ArrayRef*") LongArrayRef destination); -@Namespace("at") public static native @ByVal Tensor moveaxis(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] source, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... destination); -// aten::moveaxis.int(Tensor(a) self, int source, int destination) -> Tensor(a) -@Namespace("at") public static native @ByVal Tensor moveaxis(@Const @ByRef Tensor self, @Cast("int64_t") long source, @Cast("int64_t") long destination); +// aten::nested_to_padded_tensor(Tensor self, float padding, int[]? output_size=None) -> Tensor +@Namespace("at") public static native @ByVal Tensor nested_to_padded_tensor(@Const @ByRef Tensor self, double padding, @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") LongArrayRefOptional output_size); +@Namespace("at") public static native @ByVal Tensor nested_to_padded_tensor(@Const @ByRef Tensor self, double padding); +@Namespace("at") public static native @ByVal Tensor nested_to_padded_tensor(@Const @ByRef Tensor self, double padding, @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... output_size); -// Parsed from ATen/ops/movedim.h +// Parsed from ATen/ops/new_empty.h // #pragma once @@ -54214,20 +39956,34 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include +// #include -// aten::movedim.intlist(Tensor(a) self, int[] source, int[] destination) -> Tensor(a) -@Namespace("at") public static native @ByVal Tensor movedim(@Const @ByRef Tensor self, @ByVal @Cast("c10::ArrayRef*") LongArrayRef source, @ByVal @Cast("c10::ArrayRef*") LongArrayRef destination); -@Namespace("at") public static native @ByVal Tensor movedim(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] source, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... destination); -// aten::movedim.int(Tensor(a) self, int source, int destination) -> Tensor(a) -@Namespace("at") public static native @ByVal Tensor movedim(@Const @ByRef Tensor self, @Cast("int64_t") long source, @Cast("int64_t") long destination); + + +// aten::new_empty.out(Tensor self, SymInt[] size, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor new_empty_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal LongArrayRef size); +@Namespace("at") public static native @ByRef Tensor new_empty_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... size); + + +// aten::new_empty.out(Tensor self, SymInt[] size, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor new_empty_outf(@Const @ByRef Tensor self, @ByVal LongArrayRef size, @ByRef Tensor out); +@Namespace("at") public static native @ByRef Tensor new_empty_outf(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @ByRef Tensor out); + + +// aten::new_empty.out(Tensor self, SymInt[] size, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor new_empty_symint_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal SymIntArrayRef size); + + +// aten::new_empty.out(Tensor self, SymInt[] size, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor new_empty_symint_outf(@Const @ByRef Tensor self, @ByVal SymIntArrayRef size, @ByRef Tensor out); -// Parsed from ATen/ops/mps_convolution_backward.h + +// Parsed from ATen/ops/new_empty_strided.h // #pragma once @@ -54248,24 +40004,34 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include +// #include + -// aten::mps_convolution_backward(Tensor self, Tensor grad_output, Tensor weight, int[] padding, int[] stride, int[] dilation, int groups, bool[3] output_mask) -> (Tensor, Tensor, Tensor) -@Namespace("at") public static native @ByVal TensorTensorTensorTuple mps_convolution_backward(@Const @ByRef Tensor self, @Const @ByRef Tensor grad_output, @Const @ByRef Tensor weight, @ByVal @Cast("c10::ArrayRef*") LongArrayRef padding, @ByVal @Cast("c10::ArrayRef*") LongArrayRef stride, @ByVal @Cast("c10::ArrayRef*") LongArrayRef dilation, @Cast("int64_t") long groups, @ByVal @Cast("std::array*") BoolPointer output_mask); -@Namespace("at") public static native @ByVal TensorTensorTensorTuple mps_convolution_backward(@Const @ByRef Tensor self, @Const @ByRef Tensor grad_output, @Const @ByRef Tensor weight, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] padding, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] stride, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dilation, @Cast("int64_t") long groups, @ByVal @Cast("std::array*") BoolPointer output_mask); -// aten::mps_convolution_backward.out(Tensor self, Tensor grad_output, Tensor weight, int[] padding, int[] stride, int[] dilation, int groups, bool[3] output_mask, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2) -> (Tensor(a!), Tensor(b!), Tensor(c!)) -@Namespace("at") public static native @ByVal @Cast("std::tuple*") PointerPointer mps_convolution_backward_out(@ByRef Tensor out0, @ByRef Tensor out1, @ByRef Tensor out2, @Const @ByRef Tensor self, @Const @ByRef Tensor grad_output, @Const @ByRef Tensor weight, @ByVal @Cast("c10::ArrayRef*") LongArrayRef padding, @ByVal @Cast("c10::ArrayRef*") LongArrayRef stride, @ByVal @Cast("c10::ArrayRef*") LongArrayRef dilation, @Cast("int64_t") long groups, @ByVal @Cast("std::array*") BoolPointer output_mask); -@Namespace("at") public static native @ByVal @Cast("std::tuple*") PointerPointer mps_convolution_backward_out(@ByRef Tensor out0, @ByRef Tensor out1, @ByRef Tensor out2, @Const @ByRef Tensor self, @Const @ByRef Tensor grad_output, @Const @ByRef Tensor weight, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] padding, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] stride, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dilation, @Cast("int64_t") long groups, @ByVal @Cast("std::array*") BoolPointer output_mask); -// aten::mps_convolution_backward.out(Tensor self, Tensor grad_output, Tensor weight, int[] padding, int[] stride, int[] dilation, int groups, bool[3] output_mask, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2) -> (Tensor(a!), Tensor(b!), Tensor(c!)) -@Namespace("at") public static native @ByVal @Cast("std::tuple*") PointerPointer mps_convolution_backward_outf(@Const @ByRef Tensor self, @Const @ByRef Tensor grad_output, @Const @ByRef Tensor weight, @ByVal @Cast("c10::ArrayRef*") LongArrayRef padding, @ByVal @Cast("c10::ArrayRef*") LongArrayRef stride, @ByVal @Cast("c10::ArrayRef*") LongArrayRef dilation, @Cast("int64_t") long groups, @ByVal @Cast("std::array*") BoolPointer output_mask, @ByRef Tensor out0, @ByRef Tensor out1, @ByRef Tensor out2); -@Namespace("at") public static native @ByVal @Cast("std::tuple*") PointerPointer mps_convolution_backward_outf(@Const @ByRef Tensor self, @Const @ByRef Tensor grad_output, @Const @ByRef Tensor weight, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] padding, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] stride, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dilation, @Cast("int64_t") long groups, @ByVal @Cast("std::array*") BoolPointer output_mask, @ByRef Tensor out0, @ByRef Tensor out1, @ByRef Tensor out2); +// aten::new_empty_strided.out(Tensor self, SymInt[] size, SymInt[] stride, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor new_empty_strided_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal LongArrayRef size, @ByVal LongArrayRef stride); +@Namespace("at") public static native @ByRef Tensor new_empty_strided_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... stride); + + +// aten::new_empty_strided.out(Tensor self, SymInt[] size, SymInt[] stride, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor new_empty_strided_outf(@Const @ByRef Tensor self, @ByVal LongArrayRef size, @ByVal LongArrayRef stride, @ByRef Tensor out); +@Namespace("at") public static native @ByRef Tensor new_empty_strided_outf(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] stride, @ByRef Tensor out); +// aten::new_empty_strided.out(Tensor self, SymInt[] size, SymInt[] stride, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor new_empty_strided_symint_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal SymIntArrayRef size, @ByVal SymIntArrayRef stride); -// Parsed from ATen/ops/mps_convolution_transpose_backward.h + +// aten::new_empty_strided.out(Tensor self, SymInt[] size, SymInt[] stride, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor new_empty_strided_symint_outf(@Const @ByRef Tensor self, @ByVal SymIntArrayRef size, @ByVal SymIntArrayRef stride, @ByRef Tensor out); + + + + + +// Parsed from ATen/ops/new_full.h // #pragma once @@ -54286,24 +40052,34 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include +// #include -// aten::mps_convolution_transpose_backward(Tensor self, Tensor grad_output, Tensor weight, int[] padding, int[] output_padding, int[] stride, int[] dilation, int groups, bool[2] output_mask) -> (Tensor, Tensor) -@Namespace("at") public static native @ByVal TensorTensorTuple mps_convolution_transpose_backward(@Const @ByRef Tensor self, @Const @ByRef Tensor grad_output, @Const @ByRef Tensor weight, @ByVal @Cast("c10::ArrayRef*") LongArrayRef padding, @ByVal @Cast("c10::ArrayRef*") LongArrayRef output_padding, @ByVal @Cast("c10::ArrayRef*") LongArrayRef stride, @ByVal @Cast("c10::ArrayRef*") LongArrayRef dilation, @Cast("int64_t") long groups, @ByVal @Cast("std::array*") BoolPointer output_mask); -@Namespace("at") public static native @ByVal TensorTensorTuple mps_convolution_transpose_backward(@Const @ByRef Tensor self, @Const @ByRef Tensor grad_output, @Const @ByRef Tensor weight, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] padding, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] output_padding, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] stride, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dilation, @Cast("int64_t") long groups, @ByVal @Cast("std::array*") BoolPointer output_mask); -// aten::mps_convolution_transpose_backward.out(Tensor self, Tensor grad_output, Tensor weight, int[] padding, int[] output_padding, int[] stride, int[] dilation, int groups, bool[2] output_mask, *, Tensor(a!) out0, Tensor(b!) out1) -> (Tensor(a!), Tensor(b!)) -@Namespace("at") public static native @ByVal @Cast("std::tuple*") PointerPointer mps_convolution_transpose_backward_out(@ByRef Tensor out0, @ByRef Tensor out1, @Const @ByRef Tensor self, @Const @ByRef Tensor grad_output, @Const @ByRef Tensor weight, @ByVal @Cast("c10::ArrayRef*") LongArrayRef padding, @ByVal @Cast("c10::ArrayRef*") LongArrayRef output_padding, @ByVal @Cast("c10::ArrayRef*") LongArrayRef stride, @ByVal @Cast("c10::ArrayRef*") LongArrayRef dilation, @Cast("int64_t") long groups, @ByVal @Cast("std::array*") BoolPointer output_mask); -@Namespace("at") public static native @ByVal @Cast("std::tuple*") PointerPointer mps_convolution_transpose_backward_out(@ByRef Tensor out0, @ByRef Tensor out1, @Const @ByRef Tensor self, @Const @ByRef Tensor grad_output, @Const @ByRef Tensor weight, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] padding, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] output_padding, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] stride, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dilation, @Cast("int64_t") long groups, @ByVal @Cast("std::array*") BoolPointer output_mask); -// aten::mps_convolution_transpose_backward.out(Tensor self, Tensor grad_output, Tensor weight, int[] padding, int[] output_padding, int[] stride, int[] dilation, int groups, bool[2] output_mask, *, Tensor(a!) out0, Tensor(b!) out1) -> (Tensor(a!), Tensor(b!)) -@Namespace("at") public static native @ByVal @Cast("std::tuple*") PointerPointer mps_convolution_transpose_backward_outf(@Const @ByRef Tensor self, @Const @ByRef Tensor grad_output, @Const @ByRef Tensor weight, @ByVal @Cast("c10::ArrayRef*") LongArrayRef padding, @ByVal @Cast("c10::ArrayRef*") LongArrayRef output_padding, @ByVal @Cast("c10::ArrayRef*") LongArrayRef stride, @ByVal @Cast("c10::ArrayRef*") LongArrayRef dilation, @Cast("int64_t") long groups, @ByVal @Cast("std::array*") BoolPointer output_mask, @ByRef Tensor out0, @ByRef Tensor out1); -@Namespace("at") public static native @ByVal @Cast("std::tuple*") PointerPointer mps_convolution_transpose_backward_outf(@Const @ByRef Tensor self, @Const @ByRef Tensor grad_output, @Const @ByRef Tensor weight, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] padding, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] output_padding, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] stride, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dilation, @Cast("int64_t") long groups, @ByVal @Cast("std::array*") BoolPointer output_mask, @ByRef Tensor out0, @ByRef Tensor out1); +// aten::new_full.out(Tensor self, SymInt[] size, Scalar fill_value, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor new_full_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal LongArrayRef size, @Const @ByRef Scalar fill_value); +@Namespace("at") public static native @ByRef Tensor new_full_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @Const @ByRef Scalar fill_value); -// Parsed from ATen/ops/mse_loss.h +// aten::new_full.out(Tensor self, SymInt[] size, Scalar fill_value, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor new_full_outf(@Const @ByRef Tensor self, @ByVal LongArrayRef size, @Const @ByRef Scalar fill_value, @ByRef Tensor out); +@Namespace("at") public static native @ByRef Tensor new_full_outf(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @Const @ByRef Scalar fill_value, @ByRef Tensor out); + + +// aten::new_full.out(Tensor self, SymInt[] size, Scalar fill_value, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor new_full_symint_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal SymIntArrayRef size, @Const @ByRef Scalar fill_value); + + +// aten::new_full.out(Tensor self, SymInt[] size, Scalar fill_value, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor new_full_symint_outf(@Const @ByRef Tensor self, @ByVal SymIntArrayRef size, @Const @ByRef Scalar fill_value, @ByRef Tensor out); + + + + + +// Parsed from ATen/ops/new_ones.h // #pragma once @@ -54324,23 +40100,34 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include +// #include -// aten::mse_loss.out(Tensor self, Tensor target, int reduction=Mean, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor mse_loss_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor target, @Cast("int64_t") long reduction/*=at::Reduction::Mean*/); -@Namespace("at") public static native @ByRef Tensor mse_loss_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor target); -// aten::mse_loss.out(Tensor self, Tensor target, int reduction=Mean, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor mse_loss_outf(@Const @ByRef Tensor self, @Const @ByRef Tensor target, @Cast("int64_t") long reduction, @ByRef Tensor out); -// aten::mse_loss(Tensor self, Tensor target, int reduction=Mean) -> Tensor -@Namespace("at") public static native @ByVal Tensor mse_loss(@Const @ByRef Tensor self, @Const @ByRef Tensor target, @Cast("int64_t") long reduction/*=at::Reduction::Mean*/); -@Namespace("at") public static native @ByVal Tensor mse_loss(@Const @ByRef Tensor self, @Const @ByRef Tensor target); +// aten::new_ones.out(Tensor self, SymInt[] size, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor new_ones_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal LongArrayRef size); +@Namespace("at") public static native @ByRef Tensor new_ones_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... size); -// Parsed from ATen/ops/mse_loss_backward.h +// aten::new_ones.out(Tensor self, SymInt[] size, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor new_ones_outf(@Const @ByRef Tensor self, @ByVal LongArrayRef size, @ByRef Tensor out); +@Namespace("at") public static native @ByRef Tensor new_ones_outf(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @ByRef Tensor out); + + +// aten::new_ones.out(Tensor self, SymInt[] size, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor new_ones_symint_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal SymIntArrayRef size); + + +// aten::new_ones.out(Tensor self, SymInt[] size, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor new_ones_symint_outf(@Const @ByRef Tensor self, @ByVal SymIntArrayRef size, @ByRef Tensor out); + + + + + +// Parsed from ATen/ops/new_zeros.h // #pragma once @@ -54361,21 +40148,34 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include +// #include -// aten::mse_loss_backward.grad_input(Tensor grad_output, Tensor self, Tensor target, int reduction, *, Tensor(a!) grad_input) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor mse_loss_backward_out(@ByRef Tensor grad_input, @Const @ByRef Tensor grad_output, @Const @ByRef Tensor self, @Const @ByRef Tensor target, @Cast("int64_t") long reduction); -// aten::mse_loss_backward.grad_input(Tensor grad_output, Tensor self, Tensor target, int reduction, *, Tensor(a!) grad_input) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor mse_loss_backward_outf(@Const @ByRef Tensor grad_output, @Const @ByRef Tensor self, @Const @ByRef Tensor target, @Cast("int64_t") long reduction, @ByRef Tensor grad_input); -// aten::mse_loss_backward(Tensor grad_output, Tensor self, Tensor target, int reduction) -> Tensor -@Namespace("at") public static native @ByVal Tensor mse_loss_backward(@Const @ByRef Tensor grad_output, @Const @ByRef Tensor self, @Const @ByRef Tensor target, @Cast("int64_t") long reduction); + + +// aten::new_zeros.out(Tensor self, SymInt[] size, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor new_zeros_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal LongArrayRef size); +@Namespace("at") public static native @ByRef Tensor new_zeros_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... size); + + +// aten::new_zeros.out(Tensor self, SymInt[] size, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor new_zeros_outf(@Const @ByRef Tensor self, @ByVal LongArrayRef size, @ByRef Tensor out); +@Namespace("at") public static native @ByRef Tensor new_zeros_outf(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @ByRef Tensor out); + + +// aten::new_zeros.out(Tensor self, SymInt[] size, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor new_zeros_symint_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal SymIntArrayRef size); + + +// aten::new_zeros.out(Tensor self, SymInt[] size, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor new_zeros_symint_outf(@Const @ByRef Tensor self, @ByVal SymIntArrayRef size, @ByRef Tensor out); -// Parsed from ATen/ops/msort.h + +// Parsed from ATen/ops/nextafter.h // #pragma once @@ -54396,21 +40196,21 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include +// #include -// aten::msort.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor msort_out(@ByRef Tensor out, @Const @ByRef Tensor self); -// aten::msort.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor msort_outf(@Const @ByRef Tensor self, @ByRef Tensor out); +// aten::nextafter.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor nextafter_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor other); +// aten::nextafter.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor nextafter_outf(@Const @ByRef Tensor self, @Const @ByRef Tensor other, @ByRef Tensor out); -// aten::msort(Tensor self) -> Tensor -@Namespace("at") public static native @ByVal Tensor msort(@Const @ByRef Tensor self); +// aten::nextafter(Tensor self, Tensor other) -> Tensor +@Namespace("at") public static native @ByVal Tensor nextafter(@Const @ByRef Tensor self, @Const @ByRef Tensor other); -// Parsed from ATen/ops/mul.h +// Parsed from ATen/ops/nll_loss.h // #pragma once @@ -54431,29 +40231,41 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include +// #include -// aten::mul.Tensor(Tensor self, Tensor other) -> Tensor -@Namespace("at") public static native @ByVal Tensor mul(@Const @ByRef Tensor self, @Const @ByRef Tensor other); +// aten::nll_loss.out(Tensor self, Tensor target, Tensor? weight=None, int reduction=Mean, SymInt ignore_index=-100, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor nll_loss_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor target, @Const @ByRef(nullValue = "c10::optional{}") TensorOptional weight, @Cast("int64_t") long reduction/*=at::Reduction::Mean*/, @Cast("int64_t") long ignore_index/*=-100*/); +@Namespace("at") public static native @ByRef Tensor nll_loss_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor target); -// aten::mul.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor mul_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor other); -// aten::mul.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor mul_outf(@Const @ByRef Tensor self, @Const @ByRef Tensor other, @ByRef Tensor out); -// aten::mul.Scalar(Tensor self, Scalar other) -> Tensor -@Namespace("at") public static native @ByVal Tensor mul(@Const @ByRef Tensor self, @Const @ByRef Scalar other); +// aten::nll_loss.out(Tensor self, Tensor target, Tensor? weight=None, int reduction=Mean, SymInt ignore_index=-100, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor nll_loss_outf(@Const @ByRef Tensor self, @Const @ByRef Tensor target, @Const @ByRef TensorOptional weight, @Cast("int64_t") long reduction, @Cast("int64_t") long ignore_index, @ByRef Tensor out); -// aten::mul.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor mul_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Scalar other); -// aten::mul.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor mul_outf(@Const @ByRef Tensor self, @Const @ByRef Scalar other, @ByRef Tensor out); + +// aten::nll_loss.out(Tensor self, Tensor target, Tensor? weight=None, int reduction=Mean, SymInt ignore_index=-100, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor nll_loss_symint_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor target, @Const @ByRef(nullValue = "c10::optional{}") TensorOptional weight, @Cast("int64_t") long reduction/*=at::Reduction::Mean*/, @ByVal(nullValue = "c10::SymInt(-100)") SymInt ignore_index); +@Namespace("at") public static native @ByRef Tensor nll_loss_symint_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor target); + + +// aten::nll_loss.out(Tensor self, Tensor target, Tensor? weight=None, int reduction=Mean, SymInt ignore_index=-100, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor nll_loss_symint_outf(@Const @ByRef Tensor self, @Const @ByRef Tensor target, @Const @ByRef TensorOptional weight, @Cast("int64_t") long reduction, @ByVal SymInt ignore_index, @ByRef Tensor out); + + +// aten::nll_loss(Tensor self, Tensor target, Tensor? weight=None, int reduction=Mean, SymInt ignore_index=-100) -> Tensor +@Namespace("at") public static native @ByVal Tensor nll_loss(@Const @ByRef Tensor self, @Const @ByRef Tensor target, @Const @ByRef(nullValue = "c10::optional{}") TensorOptional weight, @Cast("int64_t") long reduction/*=at::Reduction::Mean*/, @Cast("int64_t") long ignore_index/*=-100*/); +@Namespace("at") public static native @ByVal Tensor nll_loss(@Const @ByRef Tensor self, @Const @ByRef Tensor target); + + +// aten::nll_loss(Tensor self, Tensor target, Tensor? weight=None, int reduction=Mean, SymInt ignore_index=-100) -> Tensor +@Namespace("at") public static native @ByVal Tensor nll_loss_symint(@Const @ByRef Tensor self, @Const @ByRef Tensor target, @Const @ByRef(nullValue = "c10::optional{}") TensorOptional weight, @Cast("int64_t") long reduction/*=at::Reduction::Mean*/, @ByVal(nullValue = "c10::SymInt(-100)") SymInt ignore_index); +@Namespace("at") public static native @ByVal Tensor nll_loss_symint(@Const @ByRef Tensor self, @Const @ByRef Tensor target); -// Parsed from ATen/ops/multi_margin_loss.h + +// Parsed from ATen/ops/nll_loss2d.h // #pragma once @@ -54474,23 +40286,41 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include +// #include -// aten::multi_margin_loss.out(Tensor self, Tensor target, Scalar p=1, Scalar margin=1, Tensor? weight=None, int reduction=Mean, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor multi_margin_loss_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor target, @Const @ByRef(nullValue = "at::Scalar(1)") Scalar p, @Const @ByRef(nullValue = "at::Scalar(1)") Scalar margin, @Const @ByRef(nullValue = "c10::optional{}") TensorOptional weight, @Cast("int64_t") long reduction/*=at::Reduction::Mean*/); -@Namespace("at") public static native @ByRef Tensor multi_margin_loss_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor target); -// aten::multi_margin_loss.out(Tensor self, Tensor target, Scalar p=1, Scalar margin=1, Tensor? weight=None, int reduction=Mean, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor multi_margin_loss_outf(@Const @ByRef Tensor self, @Const @ByRef Tensor target, @Const @ByRef Scalar p, @Const @ByRef Scalar margin, @Const @ByRef TensorOptional weight, @Cast("int64_t") long reduction, @ByRef Tensor out); +// aten::nll_loss2d.out(Tensor self, Tensor target, Tensor? weight=None, int reduction=Mean, SymInt ignore_index=-100, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor nll_loss2d_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor target, @Const @ByRef(nullValue = "c10::optional{}") TensorOptional weight, @Cast("int64_t") long reduction/*=at::Reduction::Mean*/, @Cast("int64_t") long ignore_index/*=-100*/); +@Namespace("at") public static native @ByRef Tensor nll_loss2d_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor target); + + +// aten::nll_loss2d.out(Tensor self, Tensor target, Tensor? weight=None, int reduction=Mean, SymInt ignore_index=-100, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor nll_loss2d_outf(@Const @ByRef Tensor self, @Const @ByRef Tensor target, @Const @ByRef TensorOptional weight, @Cast("int64_t") long reduction, @Cast("int64_t") long ignore_index, @ByRef Tensor out); + + +// aten::nll_loss2d.out(Tensor self, Tensor target, Tensor? weight=None, int reduction=Mean, SymInt ignore_index=-100, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor nll_loss2d_symint_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor target, @Const @ByRef(nullValue = "c10::optional{}") TensorOptional weight, @Cast("int64_t") long reduction/*=at::Reduction::Mean*/, @ByVal(nullValue = "c10::SymInt(-100)") SymInt ignore_index); +@Namespace("at") public static native @ByRef Tensor nll_loss2d_symint_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor target); + + +// aten::nll_loss2d.out(Tensor self, Tensor target, Tensor? weight=None, int reduction=Mean, SymInt ignore_index=-100, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor nll_loss2d_symint_outf(@Const @ByRef Tensor self, @Const @ByRef Tensor target, @Const @ByRef TensorOptional weight, @Cast("int64_t") long reduction, @ByVal SymInt ignore_index, @ByRef Tensor out); + + +// aten::nll_loss2d(Tensor self, Tensor target, Tensor? weight=None, int reduction=Mean, SymInt ignore_index=-100) -> Tensor +@Namespace("at") public static native @ByVal Tensor nll_loss2d(@Const @ByRef Tensor self, @Const @ByRef Tensor target, @Const @ByRef(nullValue = "c10::optional{}") TensorOptional weight, @Cast("int64_t") long reduction/*=at::Reduction::Mean*/, @Cast("int64_t") long ignore_index/*=-100*/); +@Namespace("at") public static native @ByVal Tensor nll_loss2d(@Const @ByRef Tensor self, @Const @ByRef Tensor target); + + +// aten::nll_loss2d(Tensor self, Tensor target, Tensor? weight=None, int reduction=Mean, SymInt ignore_index=-100) -> Tensor +@Namespace("at") public static native @ByVal Tensor nll_loss2d_symint(@Const @ByRef Tensor self, @Const @ByRef Tensor target, @Const @ByRef(nullValue = "c10::optional{}") TensorOptional weight, @Cast("int64_t") long reduction/*=at::Reduction::Mean*/, @ByVal(nullValue = "c10::SymInt(-100)") SymInt ignore_index); +@Namespace("at") public static native @ByVal Tensor nll_loss2d_symint(@Const @ByRef Tensor self, @Const @ByRef Tensor target); -// aten::multi_margin_loss(Tensor self, Tensor target, Scalar p=1, Scalar margin=1, Tensor? weight=None, int reduction=Mean) -> Tensor -@Namespace("at") public static native @ByVal Tensor multi_margin_loss(@Const @ByRef Tensor self, @Const @ByRef Tensor target, @Const @ByRef(nullValue = "at::Scalar(1)") Scalar p, @Const @ByRef(nullValue = "at::Scalar(1)") Scalar margin, @Const @ByRef(nullValue = "c10::optional{}") TensorOptional weight, @Cast("int64_t") long reduction/*=at::Reduction::Mean*/); -@Namespace("at") public static native @ByVal Tensor multi_margin_loss(@Const @ByRef Tensor self, @Const @ByRef Tensor target); -// Parsed from ATen/ops/multi_margin_loss_backward.h +// Parsed from ATen/ops/nll_loss2d_backward.h // #pragma once @@ -54511,23 +40341,37 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include +// #include -// aten::multi_margin_loss_backward.grad_input(Tensor grad_output, Tensor self, Tensor target, Scalar p, Scalar margin, Tensor? weight=None, int reduction=Mean, *, Tensor(a!) grad_input) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor multi_margin_loss_backward_out(@ByRef Tensor grad_input, @Const @ByRef Tensor grad_output, @Const @ByRef Tensor self, @Const @ByRef Tensor target, @Const @ByRef Scalar p, @Const @ByRef Scalar margin, @Const @ByRef(nullValue = "c10::optional{}") TensorOptional weight, @Cast("int64_t") long reduction/*=at::Reduction::Mean*/); -@Namespace("at") public static native @ByRef Tensor multi_margin_loss_backward_out(@ByRef Tensor grad_input, @Const @ByRef Tensor grad_output, @Const @ByRef Tensor self, @Const @ByRef Tensor target, @Const @ByRef Scalar p, @Const @ByRef Scalar margin); -// aten::multi_margin_loss_backward.grad_input(Tensor grad_output, Tensor self, Tensor target, Scalar p, Scalar margin, Tensor? weight=None, int reduction=Mean, *, Tensor(a!) grad_input) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor multi_margin_loss_backward_outf(@Const @ByRef Tensor grad_output, @Const @ByRef Tensor self, @Const @ByRef Tensor target, @Const @ByRef Scalar p, @Const @ByRef Scalar margin, @Const @ByRef TensorOptional weight, @Cast("int64_t") long reduction, @ByRef Tensor grad_input); +// aten::nll_loss2d_backward.grad_input(Tensor grad_output, Tensor self, Tensor target, Tensor? weight, int reduction, SymInt ignore_index, Tensor total_weight, *, Tensor(a!) grad_input) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor nll_loss2d_backward_out(@ByRef Tensor grad_input, @Const @ByRef Tensor grad_output, @Const @ByRef Tensor self, @Const @ByRef Tensor target, @Const @ByRef TensorOptional weight, @Cast("int64_t") long reduction, @Cast("int64_t") long ignore_index, @Const @ByRef Tensor total_weight); + + +// aten::nll_loss2d_backward.grad_input(Tensor grad_output, Tensor self, Tensor target, Tensor? weight, int reduction, SymInt ignore_index, Tensor total_weight, *, Tensor(a!) grad_input) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor nll_loss2d_backward_outf(@Const @ByRef Tensor grad_output, @Const @ByRef Tensor self, @Const @ByRef Tensor target, @Const @ByRef TensorOptional weight, @Cast("int64_t") long reduction, @Cast("int64_t") long ignore_index, @Const @ByRef Tensor total_weight, @ByRef Tensor grad_input); -// aten::multi_margin_loss_backward(Tensor grad_output, Tensor self, Tensor target, Scalar p, Scalar margin, Tensor? weight=None, int reduction=Mean) -> Tensor -@Namespace("at") public static native @ByVal Tensor multi_margin_loss_backward(@Const @ByRef Tensor grad_output, @Const @ByRef Tensor self, @Const @ByRef Tensor target, @Const @ByRef Scalar p, @Const @ByRef Scalar margin, @Const @ByRef(nullValue = "c10::optional{}") TensorOptional weight, @Cast("int64_t") long reduction/*=at::Reduction::Mean*/); -@Namespace("at") public static native @ByVal Tensor multi_margin_loss_backward(@Const @ByRef Tensor grad_output, @Const @ByRef Tensor self, @Const @ByRef Tensor target, @Const @ByRef Scalar p, @Const @ByRef Scalar margin); +// aten::nll_loss2d_backward.grad_input(Tensor grad_output, Tensor self, Tensor target, Tensor? weight, int reduction, SymInt ignore_index, Tensor total_weight, *, Tensor(a!) grad_input) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor nll_loss2d_backward_symint_out(@ByRef Tensor grad_input, @Const @ByRef Tensor grad_output, @Const @ByRef Tensor self, @Const @ByRef Tensor target, @Const @ByRef TensorOptional weight, @Cast("int64_t") long reduction, @ByVal SymInt ignore_index, @Const @ByRef Tensor total_weight); +// aten::nll_loss2d_backward.grad_input(Tensor grad_output, Tensor self, Tensor target, Tensor? weight, int reduction, SymInt ignore_index, Tensor total_weight, *, Tensor(a!) grad_input) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor nll_loss2d_backward_symint_outf(@Const @ByRef Tensor grad_output, @Const @ByRef Tensor self, @Const @ByRef Tensor target, @Const @ByRef TensorOptional weight, @Cast("int64_t") long reduction, @ByVal SymInt ignore_index, @Const @ByRef Tensor total_weight, @ByRef Tensor grad_input); + -// Parsed from ATen/ops/multilabel_margin_loss.h +// aten::nll_loss2d_backward(Tensor grad_output, Tensor self, Tensor target, Tensor? weight, int reduction, SymInt ignore_index, Tensor total_weight) -> Tensor +@Namespace("at") public static native @ByVal Tensor nll_loss2d_backward(@Const @ByRef Tensor grad_output, @Const @ByRef Tensor self, @Const @ByRef Tensor target, @Const @ByRef TensorOptional weight, @Cast("int64_t") long reduction, @Cast("int64_t") long ignore_index, @Const @ByRef Tensor total_weight); + + +// aten::nll_loss2d_backward(Tensor grad_output, Tensor self, Tensor target, Tensor? weight, int reduction, SymInt ignore_index, Tensor total_weight) -> Tensor +@Namespace("at") public static native @ByVal Tensor nll_loss2d_backward_symint(@Const @ByRef Tensor grad_output, @Const @ByRef Tensor self, @Const @ByRef Tensor target, @Const @ByRef TensorOptional weight, @Cast("int64_t") long reduction, @ByVal SymInt ignore_index, @Const @ByRef Tensor total_weight); + + + + + +// Parsed from ATen/ops/nll_loss2d_forward.h // #pragma once @@ -54548,23 +40392,37 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include +// #include -// aten::multilabel_margin_loss.out(Tensor self, Tensor target, int reduction=Mean, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor multilabel_margin_loss_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor target, @Cast("int64_t") long reduction/*=at::Reduction::Mean*/); -@Namespace("at") public static native @ByRef Tensor multilabel_margin_loss_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor target); -// aten::multilabel_margin_loss.out(Tensor self, Tensor target, int reduction=Mean, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor multilabel_margin_loss_outf(@Const @ByRef Tensor self, @Const @ByRef Tensor target, @Cast("int64_t") long reduction, @ByRef Tensor out); +// aten::nll_loss2d_forward.output(Tensor self, Tensor target, Tensor? weight, int reduction, SymInt ignore_index, *, Tensor(a!) output, Tensor(b!) total_weight) -> (Tensor(a!), Tensor(b!)) +@Namespace("at") public static native @ByVal T_TensorTensor_T nll_loss2d_forward_out(@ByRef Tensor output, @ByRef Tensor total_weight, @Const @ByRef Tensor self, @Const @ByRef Tensor target, @Const @ByRef TensorOptional weight, @Cast("int64_t") long reduction, @Cast("int64_t") long ignore_index); -// aten::multilabel_margin_loss(Tensor self, Tensor target, int reduction=Mean) -> Tensor -@Namespace("at") public static native @ByVal Tensor multilabel_margin_loss(@Const @ByRef Tensor self, @Const @ByRef Tensor target, @Cast("int64_t") long reduction/*=at::Reduction::Mean*/); -@Namespace("at") public static native @ByVal Tensor multilabel_margin_loss(@Const @ByRef Tensor self, @Const @ByRef Tensor target); +// aten::nll_loss2d_forward.output(Tensor self, Tensor target, Tensor? weight, int reduction, SymInt ignore_index, *, Tensor(a!) output, Tensor(b!) total_weight) -> (Tensor(a!), Tensor(b!)) +@Namespace("at") public static native @ByVal T_TensorTensor_T nll_loss2d_forward_outf(@Const @ByRef Tensor self, @Const @ByRef Tensor target, @Const @ByRef TensorOptional weight, @Cast("int64_t") long reduction, @Cast("int64_t") long ignore_index, @ByRef Tensor output, @ByRef Tensor total_weight); +// aten::nll_loss2d_forward.output(Tensor self, Tensor target, Tensor? weight, int reduction, SymInt ignore_index, *, Tensor(a!) output, Tensor(b!) total_weight) -> (Tensor(a!), Tensor(b!)) +@Namespace("at") public static native @ByVal T_TensorTensor_T nll_loss2d_forward_symint_out(@ByRef Tensor output, @ByRef Tensor total_weight, @Const @ByRef Tensor self, @Const @ByRef Tensor target, @Const @ByRef TensorOptional weight, @Cast("int64_t") long reduction, @ByVal SymInt ignore_index); -// Parsed from ATen/ops/multilabel_margin_loss_backward.h + +// aten::nll_loss2d_forward.output(Tensor self, Tensor target, Tensor? weight, int reduction, SymInt ignore_index, *, Tensor(a!) output, Tensor(b!) total_weight) -> (Tensor(a!), Tensor(b!)) +@Namespace("at") public static native @ByVal T_TensorTensor_T nll_loss2d_forward_symint_outf(@Const @ByRef Tensor self, @Const @ByRef Tensor target, @Const @ByRef TensorOptional weight, @Cast("int64_t") long reduction, @ByVal SymInt ignore_index, @ByRef Tensor output, @ByRef Tensor total_weight); + + +// aten::nll_loss2d_forward(Tensor self, Tensor target, Tensor? weight, int reduction, SymInt ignore_index) -> (Tensor output, Tensor total_weight) +@Namespace("at") public static native @ByVal T_TensorTensor_T nll_loss2d_forward(@Const @ByRef Tensor self, @Const @ByRef Tensor target, @Const @ByRef TensorOptional weight, @Cast("int64_t") long reduction, @Cast("int64_t") long ignore_index); + + +// aten::nll_loss2d_forward(Tensor self, Tensor target, Tensor? weight, int reduction, SymInt ignore_index) -> (Tensor output, Tensor total_weight) +@Namespace("at") public static native @ByVal T_TensorTensor_T nll_loss2d_forward_symint(@Const @ByRef Tensor self, @Const @ByRef Tensor target, @Const @ByRef TensorOptional weight, @Cast("int64_t") long reduction, @ByVal SymInt ignore_index); + + + + + +// Parsed from ATen/ops/nll_loss_backward.h // #pragma once @@ -54585,21 +40443,37 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include +// #include -// aten::multilabel_margin_loss_backward.grad_input(Tensor grad_output, Tensor self, Tensor target, int reduction, Tensor is_target, *, Tensor(a!) grad_input) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor multilabel_margin_loss_backward_out(@ByRef Tensor grad_input, @Const @ByRef Tensor grad_output, @Const @ByRef Tensor self, @Const @ByRef Tensor target, @Cast("int64_t") long reduction, @Const @ByRef Tensor is_target); -// aten::multilabel_margin_loss_backward.grad_input(Tensor grad_output, Tensor self, Tensor target, int reduction, Tensor is_target, *, Tensor(a!) grad_input) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor multilabel_margin_loss_backward_outf(@Const @ByRef Tensor grad_output, @Const @ByRef Tensor self, @Const @ByRef Tensor target, @Cast("int64_t") long reduction, @Const @ByRef Tensor is_target, @ByRef Tensor grad_input); +// aten::nll_loss_backward.grad_input(Tensor grad_output, Tensor self, Tensor target, Tensor? weight, int reduction, SymInt ignore_index, Tensor total_weight, *, Tensor(a!) grad_input) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor nll_loss_backward_out(@ByRef Tensor grad_input, @Const @ByRef Tensor grad_output, @Const @ByRef Tensor self, @Const @ByRef Tensor target, @Const @ByRef TensorOptional weight, @Cast("int64_t") long reduction, @Cast("int64_t") long ignore_index, @Const @ByRef Tensor total_weight); -// aten::multilabel_margin_loss_backward(Tensor grad_output, Tensor self, Tensor target, int reduction, Tensor is_target) -> Tensor -@Namespace("at") public static native @ByVal Tensor multilabel_margin_loss_backward(@Const @ByRef Tensor grad_output, @Const @ByRef Tensor self, @Const @ByRef Tensor target, @Cast("int64_t") long reduction, @Const @ByRef Tensor is_target); +// aten::nll_loss_backward.grad_input(Tensor grad_output, Tensor self, Tensor target, Tensor? weight, int reduction, SymInt ignore_index, Tensor total_weight, *, Tensor(a!) grad_input) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor nll_loss_backward_outf(@Const @ByRef Tensor grad_output, @Const @ByRef Tensor self, @Const @ByRef Tensor target, @Const @ByRef TensorOptional weight, @Cast("int64_t") long reduction, @Cast("int64_t") long ignore_index, @Const @ByRef Tensor total_weight, @ByRef Tensor grad_input); +// aten::nll_loss_backward.grad_input(Tensor grad_output, Tensor self, Tensor target, Tensor? weight, int reduction, SymInt ignore_index, Tensor total_weight, *, Tensor(a!) grad_input) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor nll_loss_backward_symint_out(@ByRef Tensor grad_input, @Const @ByRef Tensor grad_output, @Const @ByRef Tensor self, @Const @ByRef Tensor target, @Const @ByRef TensorOptional weight, @Cast("int64_t") long reduction, @ByVal SymInt ignore_index, @Const @ByRef Tensor total_weight); + -// Parsed from ATen/ops/multilabel_margin_loss_forward.h +// aten::nll_loss_backward.grad_input(Tensor grad_output, Tensor self, Tensor target, Tensor? weight, int reduction, SymInt ignore_index, Tensor total_weight, *, Tensor(a!) grad_input) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor nll_loss_backward_symint_outf(@Const @ByRef Tensor grad_output, @Const @ByRef Tensor self, @Const @ByRef Tensor target, @Const @ByRef TensorOptional weight, @Cast("int64_t") long reduction, @ByVal SymInt ignore_index, @Const @ByRef Tensor total_weight, @ByRef Tensor grad_input); + + +// aten::nll_loss_backward(Tensor grad_output, Tensor self, Tensor target, Tensor? weight, int reduction, SymInt ignore_index, Tensor total_weight) -> Tensor +@Namespace("at") public static native @ByVal Tensor nll_loss_backward(@Const @ByRef Tensor grad_output, @Const @ByRef Tensor self, @Const @ByRef Tensor target, @Const @ByRef TensorOptional weight, @Cast("int64_t") long reduction, @Cast("int64_t") long ignore_index, @Const @ByRef Tensor total_weight); + + +// aten::nll_loss_backward(Tensor grad_output, Tensor self, Tensor target, Tensor? weight, int reduction, SymInt ignore_index, Tensor total_weight) -> Tensor +@Namespace("at") public static native @ByVal Tensor nll_loss_backward_symint(@Const @ByRef Tensor grad_output, @Const @ByRef Tensor self, @Const @ByRef Tensor target, @Const @ByRef TensorOptional weight, @Cast("int64_t") long reduction, @ByVal SymInt ignore_index, @Const @ByRef Tensor total_weight); + + + + + +// Parsed from ATen/ops/nll_loss_forward.h // #pragma once @@ -54620,21 +40494,37 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include +// #include -// aten::multilabel_margin_loss_forward.output(Tensor self, Tensor target, int reduction, *, Tensor(a!) output, Tensor(b!) is_target) -> (Tensor(a!), Tensor(b!)) -@Namespace("at") public static native @ByVal @Cast("std::tuple*") PointerPointer multilabel_margin_loss_forward_out(@ByRef Tensor output, @ByRef Tensor is_target, @Const @ByRef Tensor self, @Const @ByRef Tensor target, @Cast("int64_t") long reduction); -// aten::multilabel_margin_loss_forward.output(Tensor self, Tensor target, int reduction, *, Tensor(a!) output, Tensor(b!) is_target) -> (Tensor(a!), Tensor(b!)) -@Namespace("at") public static native @ByVal @Cast("std::tuple*") PointerPointer multilabel_margin_loss_forward_outf(@Const @ByRef Tensor self, @Const @ByRef Tensor target, @Cast("int64_t") long reduction, @ByRef Tensor output, @ByRef Tensor is_target); +// aten::nll_loss_forward.output(Tensor self, Tensor target, Tensor? weight, int reduction, SymInt ignore_index, *, Tensor(a!) output, Tensor(b!) total_weight) -> (Tensor(a!), Tensor(b!)) +@Namespace("at") public static native @ByVal T_TensorTensor_T nll_loss_forward_out(@ByRef Tensor output, @ByRef Tensor total_weight, @Const @ByRef Tensor self, @Const @ByRef Tensor target, @Const @ByRef TensorOptional weight, @Cast("int64_t") long reduction, @Cast("int64_t") long ignore_index); -// aten::multilabel_margin_loss_forward(Tensor self, Tensor target, int reduction) -> (Tensor output, Tensor is_target) -@Namespace("at") public static native @ByVal TensorTensorTuple multilabel_margin_loss_forward(@Const @ByRef Tensor self, @Const @ByRef Tensor target, @Cast("int64_t") long reduction); + +// aten::nll_loss_forward.output(Tensor self, Tensor target, Tensor? weight, int reduction, SymInt ignore_index, *, Tensor(a!) output, Tensor(b!) total_weight) -> (Tensor(a!), Tensor(b!)) +@Namespace("at") public static native @ByVal T_TensorTensor_T nll_loss_forward_outf(@Const @ByRef Tensor self, @Const @ByRef Tensor target, @Const @ByRef TensorOptional weight, @Cast("int64_t") long reduction, @Cast("int64_t") long ignore_index, @ByRef Tensor output, @ByRef Tensor total_weight); +// aten::nll_loss_forward.output(Tensor self, Tensor target, Tensor? weight, int reduction, SymInt ignore_index, *, Tensor(a!) output, Tensor(b!) total_weight) -> (Tensor(a!), Tensor(b!)) +@Namespace("at") public static native @ByVal T_TensorTensor_T nll_loss_forward_symint_out(@ByRef Tensor output, @ByRef Tensor total_weight, @Const @ByRef Tensor self, @Const @ByRef Tensor target, @Const @ByRef TensorOptional weight, @Cast("int64_t") long reduction, @ByVal SymInt ignore_index); -// Parsed from ATen/ops/multinomial.h +// aten::nll_loss_forward.output(Tensor self, Tensor target, Tensor? weight, int reduction, SymInt ignore_index, *, Tensor(a!) output, Tensor(b!) total_weight) -> (Tensor(a!), Tensor(b!)) +@Namespace("at") public static native @ByVal T_TensorTensor_T nll_loss_forward_symint_outf(@Const @ByRef Tensor self, @Const @ByRef Tensor target, @Const @ByRef TensorOptional weight, @Cast("int64_t") long reduction, @ByVal SymInt ignore_index, @ByRef Tensor output, @ByRef Tensor total_weight); + + +// aten::nll_loss_forward(Tensor self, Tensor target, Tensor? weight, int reduction, SymInt ignore_index) -> (Tensor output, Tensor total_weight) +@Namespace("at") public static native @ByVal T_TensorTensor_T nll_loss_forward(@Const @ByRef Tensor self, @Const @ByRef Tensor target, @Const @ByRef TensorOptional weight, @Cast("int64_t") long reduction, @Cast("int64_t") long ignore_index); + + +// aten::nll_loss_forward(Tensor self, Tensor target, Tensor? weight, int reduction, SymInt ignore_index) -> (Tensor output, Tensor total_weight) +@Namespace("at") public static native @ByVal T_TensorTensor_T nll_loss_forward_symint(@Const @ByRef Tensor self, @Const @ByRef Tensor target, @Const @ByRef TensorOptional weight, @Cast("int64_t") long reduction, @ByVal SymInt ignore_index); + + + + + +// Parsed from ATen/ops/nll_loss_nd.h // #pragma once @@ -54655,23 +40545,23 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include +// #include -// aten::multinomial.out(Tensor self, int num_samples, bool replacement=False, *, Generator? generator=None, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor multinomial_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Cast("int64_t") long num_samples, @Cast("bool") boolean replacement/*=false*/, @ByVal(nullValue = "c10::optional(c10::nullopt)") GeneratorOptional generator); -@Namespace("at") public static native @ByRef Tensor multinomial_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Cast("int64_t") long num_samples); -// aten::multinomial.out(Tensor self, int num_samples, bool replacement=False, *, Generator? generator=None, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor multinomial_outf(@Const @ByRef Tensor self, @Cast("int64_t") long num_samples, @Cast("bool") boolean replacement, @ByVal GeneratorOptional generator, @ByRef Tensor out); +// aten::nll_loss_nd(Tensor self, Tensor target, Tensor? weight=None, int reduction=Mean, SymInt ignore_index=-100) -> Tensor +@Namespace("at") public static native @ByVal Tensor nll_loss_nd(@Const @ByRef Tensor self, @Const @ByRef Tensor target, @Const @ByRef(nullValue = "c10::optional{}") TensorOptional weight, @Cast("int64_t") long reduction/*=at::Reduction::Mean*/, @Cast("int64_t") long ignore_index/*=-100*/); +@Namespace("at") public static native @ByVal Tensor nll_loss_nd(@Const @ByRef Tensor self, @Const @ByRef Tensor target); -// aten::multinomial(Tensor self, int num_samples, bool replacement=False, *, Generator? generator=None) -> Tensor -@Namespace("at") public static native @ByVal Tensor multinomial(@Const @ByRef Tensor self, @Cast("int64_t") long num_samples, @Cast("bool") boolean replacement/*=false*/, @ByVal(nullValue = "c10::optional(c10::nullopt)") GeneratorOptional generator); -@Namespace("at") public static native @ByVal Tensor multinomial(@Const @ByRef Tensor self, @Cast("int64_t") long num_samples); + +// aten::nll_loss_nd(Tensor self, Tensor target, Tensor? weight=None, int reduction=Mean, SymInt ignore_index=-100) -> Tensor +@Namespace("at") public static native @ByVal Tensor nll_loss_nd_symint(@Const @ByRef Tensor self, @Const @ByRef Tensor target, @Const @ByRef(nullValue = "c10::optional{}") TensorOptional weight, @Cast("int64_t") long reduction/*=at::Reduction::Mean*/, @ByVal(nullValue = "c10::SymInt(-100)") SymInt ignore_index); +@Namespace("at") public static native @ByVal Tensor nll_loss_nd_symint(@Const @ByRef Tensor self, @Const @ByRef Tensor target); -// Parsed from ATen/ops/multiply.h + +// Parsed from ATen/ops/nonzero.h // #pragma once @@ -54692,22 +40582,21 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include - +// #include -// aten::multiply.Tensor(Tensor self, Tensor other) -> Tensor -// aten::multiply.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor multiply_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor other); -// aten::multiply.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor multiply_outf(@Const @ByRef Tensor self, @Const @ByRef Tensor other, @ByRef Tensor out); +// aten::nonzero.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor nonzero_out(@ByRef Tensor out, @Const @ByRef Tensor self); +// aten::nonzero.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor nonzero_outf(@Const @ByRef Tensor self, @ByRef Tensor out); -// aten::multiply.Scalar(Tensor self, Scalar other) -> Tensor +// aten::nonzero(Tensor self) -> Tensor +@Namespace("at") public static native @ByVal Tensor nonzero(@Const @ByRef Tensor self); -// Parsed from ATen/ops/mv.h +// Parsed from ATen/ops/nonzero_numpy.h // #pragma once @@ -54728,21 +40617,16 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include - +// #include -// aten::mv(Tensor self, Tensor vec) -> Tensor -@Namespace("at") public static native @ByVal Tensor mv(@Const @ByRef Tensor self, @Const @ByRef Tensor vec); -// aten::mv.out(Tensor self, Tensor vec, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor mv_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor vec); -// aten::mv.out(Tensor self, Tensor vec, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor mv_outf(@Const @ByRef Tensor self, @Const @ByRef Tensor vec, @ByRef Tensor out); +// aten::nonzero_numpy(Tensor self) -> Tensor[] +@Namespace("at") public static native @Cast({"", "std::vector"}) @StdMove TensorVector nonzero_numpy(@Const @ByRef Tensor self); -// Parsed from ATen/ops/mvlgamma.h +// Parsed from ATen/ops/norm.h // #pragma once @@ -54763,21 +40647,75 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include +// #include -// aten::mvlgamma.out(Tensor self, int p, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor mvlgamma_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Cast("int64_t") long p); -// aten::mvlgamma.out(Tensor self, int p, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor mvlgamma_outf(@Const @ByRef Tensor self, @Cast("int64_t") long p, @ByRef Tensor out); +// aten::norm.ScalarOpt_dtype(Tensor self, Scalar? p, *, ScalarType dtype) -> Tensor +@Namespace("at") public static native @ByVal Tensor norm(@Const @ByRef Tensor self, @Const @ByRef ScalarOptional p, ScalarType dtype); -// aten::mvlgamma(Tensor self, int p) -> Tensor -@Namespace("at") public static native @ByVal Tensor mvlgamma(@Const @ByRef Tensor self, @Cast("int64_t") long p); +// aten::norm.Scalar(Tensor self, Scalar p=2) -> Tensor +@Namespace("at") public static native @ByVal Tensor norm(@Const @ByRef Tensor self, @Const @ByRef(nullValue = "at::Scalar(2)") Scalar p); +@Namespace("at") public static native @ByVal Tensor norm(@Const @ByRef Tensor self); + +// aten::norm.ScalarOpt_dim_dtype(Tensor self, Scalar? p, int[1] dim, bool keepdim, *, ScalarType dtype) -> Tensor +@Namespace("at") public static native @ByVal Tensor norm(@Const @ByRef Tensor self, @Const @ByRef ScalarOptional p, @ByVal LongArrayRef dim, @Cast("bool") boolean keepdim, ScalarType dtype); +@Namespace("at") public static native @ByVal Tensor norm(@Const @ByRef Tensor self, @Const @ByRef ScalarOptional p, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dim, @Cast("bool") boolean keepdim, ScalarType dtype); + +// aten::norm.ScalarOpt_dim(Tensor self, Scalar? p, int[1] dim, bool keepdim=False) -> Tensor +@Namespace("at") public static native @ByVal Tensor norm(@Const @ByRef Tensor self, @Const @ByRef ScalarOptional p, @ByVal LongArrayRef dim, @Cast("bool") boolean keepdim/*=false*/); +@Namespace("at") public static native @ByVal Tensor norm(@Const @ByRef Tensor self, @Const @ByRef ScalarOptional p, @ByVal LongArrayRef dim); +@Namespace("at") public static native @ByVal Tensor norm(@Const @ByRef Tensor self, @Const @ByRef ScalarOptional p, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dim, @Cast("bool") boolean keepdim/*=false*/); +@Namespace("at") public static native @ByVal Tensor norm(@Const @ByRef Tensor self, @Const @ByRef ScalarOptional p, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... dim); +// aten::norm.dtype_out(Tensor self, Scalar? p, int[1] dim, bool keepdim, *, ScalarType dtype, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor norm_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef ScalarOptional p, @ByVal LongArrayRef dim, @Cast("bool") boolean keepdim, ScalarType dtype); +@Namespace("at") public static native @ByRef Tensor norm_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef ScalarOptional p, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dim, @Cast("bool") boolean keepdim, ScalarType dtype); +// aten::norm.dtype_out(Tensor self, Scalar? p, int[1] dim, bool keepdim, *, ScalarType dtype, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor norm_outf(@Const @ByRef Tensor self, @Const @ByRef ScalarOptional p, @ByVal LongArrayRef dim, @Cast("bool") boolean keepdim, ScalarType dtype, @ByRef Tensor out); +@Namespace("at") public static native @ByRef Tensor norm_outf(@Const @ByRef Tensor self, @Const @ByRef ScalarOptional p, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dim, @Cast("bool") boolean keepdim, ScalarType dtype, @ByRef Tensor out); +// aten::norm.out(Tensor self, Scalar? p, int[1] dim, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor norm_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef ScalarOptional p, @ByVal LongArrayRef dim, @Cast("bool") boolean keepdim/*=false*/); +@Namespace("at") public static native @ByRef Tensor norm_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef ScalarOptional p, @ByVal LongArrayRef dim); +@Namespace("at") public static native @ByRef Tensor norm_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef ScalarOptional p, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dim, @Cast("bool") boolean keepdim/*=false*/); +@Namespace("at") public static native @ByRef Tensor norm_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef ScalarOptional p, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... dim); +// aten::norm.out(Tensor self, Scalar? p, int[1] dim, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor norm_outf(@Const @ByRef Tensor self, @Const @ByRef ScalarOptional p, @ByVal LongArrayRef dim, @Cast("bool") boolean keepdim, @ByRef Tensor out); +@Namespace("at") public static native @ByRef Tensor norm_outf(@Const @ByRef Tensor self, @Const @ByRef ScalarOptional p, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dim, @Cast("bool") boolean keepdim, @ByRef Tensor out); +// aten::norm.names_ScalarOpt_dim_dtype(Tensor self, Scalar? p, Dimname[1] dim, bool keepdim, *, ScalarType dtype) -> Tensor +@Namespace("at") public static native @ByVal Tensor norm(@Const @ByRef Tensor self, @Const @ByRef ScalarOptional p, @ByVal DimnameArrayRef dim, @Cast("bool") boolean keepdim, ScalarType dtype); + +// aten::norm.names_ScalarOpt_dim(Tensor self, Scalar? p, Dimname[1] dim, bool keepdim=False) -> Tensor +@Namespace("at") public static native @ByVal Tensor norm(@Const @ByRef Tensor self, @Const @ByRef ScalarOptional p, @ByVal DimnameArrayRef dim, @Cast("bool") boolean keepdim/*=false*/); +@Namespace("at") public static native @ByVal Tensor norm(@Const @ByRef Tensor self, @Const @ByRef ScalarOptional p, @ByVal DimnameArrayRef dim); + +// aten::norm.names_dtype_out(Tensor self, Scalar? p, Dimname[1] dim, bool keepdim, *, ScalarType dtype, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor norm_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef ScalarOptional p, @ByVal DimnameArrayRef dim, @Cast("bool") boolean keepdim, ScalarType dtype); +// aten::norm.names_dtype_out(Tensor self, Scalar? p, Dimname[1] dim, bool keepdim, *, ScalarType dtype, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor norm_outf(@Const @ByRef Tensor self, @Const @ByRef ScalarOptional p, @ByVal DimnameArrayRef dim, @Cast("bool") boolean keepdim, ScalarType dtype, @ByRef Tensor out); + +// aten::norm.names_out(Tensor self, Scalar? p, Dimname[1] dim, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor norm_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef ScalarOptional p, @ByVal DimnameArrayRef dim, @Cast("bool") boolean keepdim/*=false*/); +@Namespace("at") public static native @ByRef Tensor norm_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef ScalarOptional p, @ByVal DimnameArrayRef dim); +// aten::norm.names_out(Tensor self, Scalar? p, Dimname[1] dim, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor norm_outf(@Const @ByRef Tensor self, @Const @ByRef ScalarOptional p, @ByVal DimnameArrayRef dim, @Cast("bool") boolean keepdim, @ByRef Tensor out); + +// aten::norm.ScalarOpt_dtype_out(Tensor self, Scalar? p, *, ScalarType dtype, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor norm_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef ScalarOptional p, ScalarType dtype); +// aten::norm.ScalarOpt_dtype_out(Tensor self, Scalar? p, *, ScalarType dtype, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor norm_outf(@Const @ByRef Tensor self, @Const @ByRef ScalarOptional p, ScalarType dtype, @ByRef Tensor out); + +// aten::norm.Scalar_out(Tensor self, Scalar p=2, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor norm_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef(nullValue = "at::Scalar(2)") Scalar p); +@Namespace("at") public static native @ByRef Tensor norm_out(@ByRef Tensor out, @Const @ByRef Tensor self); +// aten::norm.Scalar_out(Tensor self, Scalar p=2, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor norm_outf(@Const @ByRef Tensor self, @Const @ByRef Scalar p, @ByRef Tensor out); -// Parsed from ATen/ops/nan_to_num.h + + + +// Parsed from ATen/ops/norm_except_dim.h // #pragma once @@ -54798,27 +40736,17 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include - - -// aten::nan_to_num(Tensor self, float? nan=None, float? posinf=None, float? neginf=None) -> Tensor -@Namespace("at") public static native @ByVal Tensor nan_to_num(@Const @ByRef Tensor self, @ByVal(nullValue = "c10::optional(c10::nullopt)") DoubleOptional nan, @ByVal(nullValue = "c10::optional(c10::nullopt)") DoubleOptional posinf, @ByVal(nullValue = "c10::optional(c10::nullopt)") DoubleOptional neginf); -@Namespace("at") public static native @ByVal Tensor nan_to_num(@Const @ByRef Tensor self); +// #include -// aten::nan_to_num_(Tensor(a!) self, float? nan=None, float? posinf=None, float? neginf=None) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor nan_to_num_(@ByRef Tensor self, @ByVal(nullValue = "c10::optional(c10::nullopt)") DoubleOptional nan, @ByVal(nullValue = "c10::optional(c10::nullopt)") DoubleOptional posinf, @ByVal(nullValue = "c10::optional(c10::nullopt)") DoubleOptional neginf); -@Namespace("at") public static native @ByRef Tensor nan_to_num_(@ByRef Tensor self); -// aten::nan_to_num.out(Tensor self, float? nan=None, float? posinf=None, float? neginf=None, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor nan_to_num_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal(nullValue = "c10::optional(c10::nullopt)") DoubleOptional nan, @ByVal(nullValue = "c10::optional(c10::nullopt)") DoubleOptional posinf, @ByVal(nullValue = "c10::optional(c10::nullopt)") DoubleOptional neginf); -@Namespace("at") public static native @ByRef Tensor nan_to_num_out(@ByRef Tensor out, @Const @ByRef Tensor self); -// aten::nan_to_num.out(Tensor self, float? nan=None, float? posinf=None, float? neginf=None, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor nan_to_num_outf(@Const @ByRef Tensor self, @ByVal DoubleOptional nan, @ByVal DoubleOptional posinf, @ByVal DoubleOptional neginf, @ByRef Tensor out); +// aten::norm_except_dim(Tensor v, int pow=2, int dim=0) -> Tensor +@Namespace("at") public static native @ByVal Tensor norm_except_dim(@Const @ByRef Tensor v, @Cast("int64_t") long pow/*=2*/, @Cast("int64_t") long dim/*=0*/); +@Namespace("at") public static native @ByVal Tensor norm_except_dim(@Const @ByRef Tensor v); -// Parsed from ATen/ops/nanmean.h +// Parsed from ATen/ops/normal.h // #pragma once @@ -54839,26 +40767,91 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include +// #include -// aten::nanmean(Tensor self, int[1]? dim=None, bool keepdim=False, *, ScalarType? dtype=None) -> Tensor -@Namespace("at") public static native @ByVal Tensor nanmean(@Const @ByRef Tensor self, @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") LongArrayRefOptional dim, @Cast("bool") boolean keepdim/*=false*/, @ByVal(nullValue = "c10::optional(c10::nullopt)") ScalarTypeOptional dtype); -@Namespace("at") public static native @ByVal Tensor nanmean(@Const @ByRef Tensor self); -@Namespace("at") public static native @ByVal Tensor nanmean(@Const @ByRef Tensor self, @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dim, @Cast("bool") boolean keepdim/*=false*/, @ByVal(nullValue = "c10::optional(c10::nullopt)") ScalarTypeOptional dtype); +// aten::normal_functional(Tensor self, float mean=0, float std=1, *, Generator? generator=None) -> Tensor +@Namespace("at") public static native @ByVal Tensor normal_functional(@Const @ByRef Tensor self, double mean/*=0*/, double std/*=1*/, @ByVal(nullValue = "c10::optional(c10::nullopt)") GeneratorOptional generator); +@Namespace("at") public static native @ByVal Tensor normal_functional(@Const @ByRef Tensor self); -// aten::nanmean.out(Tensor self, int[1]? dim=None, bool keepdim=False, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor nanmean_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") LongArrayRefOptional dim, @Cast("bool") boolean keepdim/*=false*/, @ByVal(nullValue = "c10::optional(c10::nullopt)") ScalarTypeOptional dtype); -@Namespace("at") public static native @ByRef Tensor nanmean_out(@ByRef Tensor out, @Const @ByRef Tensor self); -@Namespace("at") public static native @ByRef Tensor nanmean_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dim, @Cast("bool") boolean keepdim/*=false*/, @ByVal(nullValue = "c10::optional(c10::nullopt)") ScalarTypeOptional dtype); -// aten::nanmean.out(Tensor self, int[1]? dim=None, bool keepdim=False, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor nanmean_outf(@Const @ByRef Tensor self, @ByVal LongArrayRefOptional dim, @Cast("bool") boolean keepdim, @ByVal ScalarTypeOptional dtype, @ByRef Tensor out); -@Namespace("at") public static native @ByRef Tensor nanmean_outf(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dim, @Cast("bool") boolean keepdim, @ByVal ScalarTypeOptional dtype, @ByRef Tensor out); +// aten::normal.Tensor_float_out(Tensor mean, float std=1, *, Generator? generator=None, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor normal_out(@ByRef Tensor out, @Const @ByRef Tensor mean, double std/*=1*/, @ByVal(nullValue = "c10::optional(c10::nullopt)") GeneratorOptional generator); +// aten::normal.Tensor_float_out(Tensor mean, float std=1, *, Generator? generator=None, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor normal_outf(@Const @ByRef Tensor mean, double std, @ByVal GeneratorOptional generator, @ByRef Tensor out); +// aten::normal.Tensor_float(Tensor mean, float std=1, *, Generator? generator=None) -> Tensor +@Namespace("at") public static native @ByVal Tensor normal(@Const @ByRef Tensor mean, double std/*=1*/, @ByVal(nullValue = "c10::optional(c10::nullopt)") GeneratorOptional generator); +@Namespace("at") public static native @ByVal Tensor normal(@Const @ByRef Tensor mean); +// aten::normal.float_Tensor_out(float mean, Tensor std, *, Generator? generator=None, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor normal_out(@ByRef Tensor out, double mean, @Const @ByRef Tensor std, @ByVal(nullValue = "c10::optional(c10::nullopt)") GeneratorOptional generator); +// aten::normal.float_Tensor_out(float mean, Tensor std, *, Generator? generator=None, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor normal_outf(double mean, @Const @ByRef Tensor std, @ByVal GeneratorOptional generator, @ByRef Tensor out); +// aten::normal.float_Tensor(float mean, Tensor std, *, Generator? generator=None) -> Tensor +@Namespace("at") public static native @ByVal Tensor normal(double mean, @Const @ByRef Tensor std, @ByVal(nullValue = "c10::optional(c10::nullopt)") GeneratorOptional generator); +@Namespace("at") public static native @ByVal Tensor normal(double mean, @Const @ByRef Tensor std); -// Parsed from ATen/ops/nanmedian.h +// aten::normal.Tensor_Tensor_out(Tensor mean, Tensor std, *, Generator? generator=None, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor normal_out(@ByRef Tensor out, @Const @ByRef Tensor mean, @Const @ByRef Tensor std, @ByVal(nullValue = "c10::optional(c10::nullopt)") GeneratorOptional generator); +// aten::normal.Tensor_Tensor_out(Tensor mean, Tensor std, *, Generator? generator=None, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor normal_outf(@Const @ByRef Tensor mean, @Const @ByRef Tensor std, @ByVal GeneratorOptional generator, @ByRef Tensor out); + +// aten::normal.Tensor_Tensor(Tensor mean, Tensor std, *, Generator? generator=None) -> Tensor +@Namespace("at") public static native @ByVal Tensor normal(@Const @ByRef Tensor mean, @Const @ByRef Tensor std, @ByVal(nullValue = "c10::optional(c10::nullopt)") GeneratorOptional generator); +@Namespace("at") public static native @ByVal Tensor normal(@Const @ByRef Tensor mean, @Const @ByRef Tensor std); + +// aten::normal.float_float(float mean, float std, SymInt[] size, *, Generator? generator=None, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor +@Namespace("at") public static native @ByVal Tensor normal(double mean, double std, @ByVal LongArrayRef size, @ByVal(nullValue = "c10::optional(c10::nullopt)") GeneratorOptional generator, @ByVal(nullValue = "at::TensorOptions{}") TensorOptions options); +@Namespace("at") public static native @ByVal Tensor normal(double mean, double std, @ByVal LongArrayRef size); +@Namespace("at") public static native @ByVal Tensor normal(double mean, double std, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @ByVal(nullValue = "c10::optional(c10::nullopt)") GeneratorOptional generator, @ByVal(nullValue = "at::TensorOptions{}") TensorOptions options); +@Namespace("at") public static native @ByVal Tensor normal(double mean, double std, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... size); + + +// aten::normal.float_float(float mean, float std, SymInt[] size, *, Generator? generator=None, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor +@Namespace("at") public static native @ByVal Tensor normal(double mean, double std, @ByVal LongArrayRef size, @ByVal GeneratorOptional generator, @ByVal ScalarTypeOptional dtype, @ByVal LayoutOptional layout, @ByVal DeviceOptional device, @ByVal BoolOptional pin_memory); +@Namespace("at") public static native @ByVal Tensor normal(double mean, double std, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @ByVal GeneratorOptional generator, @ByVal ScalarTypeOptional dtype, @ByVal LayoutOptional layout, @ByVal DeviceOptional device, @ByVal BoolOptional pin_memory); + + +// aten::normal.float_float(float mean, float std, SymInt[] size, *, Generator? generator=None, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor +@Namespace("at") public static native @ByVal Tensor normal_symint(double mean, double std, @ByVal SymIntArrayRef size, @ByVal(nullValue = "c10::optional(c10::nullopt)") GeneratorOptional generator, @ByVal(nullValue = "at::TensorOptions{}") TensorOptions options); +@Namespace("at") public static native @ByVal Tensor normal_symint(double mean, double std, @ByVal SymIntArrayRef size); + + +// aten::normal.float_float(float mean, float std, SymInt[] size, *, Generator? generator=None, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor +@Namespace("at") public static native @ByVal Tensor normal_symint(double mean, double std, @ByVal SymIntArrayRef size, @ByVal GeneratorOptional generator, @ByVal ScalarTypeOptional dtype, @ByVal LayoutOptional layout, @ByVal DeviceOptional device, @ByVal BoolOptional pin_memory); + + +// aten::normal.float_float_out(float mean, float std, SymInt[] size, *, Generator? generator=None, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor normal_out(@ByRef Tensor out, double mean, double std, @ByVal LongArrayRef size, @ByVal(nullValue = "c10::optional(c10::nullopt)") GeneratorOptional generator); +@Namespace("at") public static native @ByRef Tensor normal_out(@ByRef Tensor out, double mean, double std, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @ByVal(nullValue = "c10::optional(c10::nullopt)") GeneratorOptional generator); + + + +// aten::normal.float_float_out(float mean, float std, SymInt[] size, *, Generator? generator=None, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor normal_outf(double mean, double std, @ByVal LongArrayRef size, @ByVal GeneratorOptional generator, @ByRef Tensor out); +@Namespace("at") public static native @ByRef Tensor normal_outf(double mean, double std, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @ByVal GeneratorOptional generator, @ByRef Tensor out); + + +// aten::normal.float_float_out(float mean, float std, SymInt[] size, *, Generator? generator=None, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor normal_symint_out(@ByRef Tensor out, double mean, double std, @ByVal SymIntArrayRef size, @ByVal(nullValue = "c10::optional(c10::nullopt)") GeneratorOptional generator); +@Namespace("at") public static native @ByRef Tensor normal_symint_out(@ByRef Tensor out, double mean, double std, @ByVal SymIntArrayRef size); + + + +// aten::normal.float_float_out(float mean, float std, SymInt[] size, *, Generator? generator=None, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor normal_symint_outf(double mean, double std, @ByVal SymIntArrayRef size, @ByVal GeneratorOptional generator, @ByRef Tensor out); + + +// aten::normal.out(Tensor self, float mean=0, float std=1, *, Generator? generator=None, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor normal_out(@ByRef Tensor out, @Const @ByRef Tensor self, double mean/*=0*/, double std/*=1*/, @ByVal(nullValue = "c10::optional(c10::nullopt)") GeneratorOptional generator); +// aten::normal.out(Tensor self, float mean=0, float std=1, *, Generator? generator=None, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor normal_outf(@Const @ByRef Tensor self, double mean, double std, @ByVal GeneratorOptional generator, @ByRef Tensor out); + + + + +// Parsed from ATen/ops/not_equal.h // #pragma once @@ -54879,41 +40872,29 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include - - -// aten::nanmedian(Tensor self) -> Tensor -@Namespace("at") public static native @ByVal Tensor nanmedian(@Const @ByRef Tensor self); +// #include -// aten::nanmedian.dim(Tensor self, int dim, bool keepdim=False) -> (Tensor values, Tensor indices) -@Namespace("at") public static native @ByVal TensorTensorTuple nanmedian(@Const @ByRef Tensor self, @Cast("int64_t") long dim, @Cast("bool") boolean keepdim/*=false*/); -@Namespace("at") public static native @ByVal TensorTensorTuple nanmedian(@Const @ByRef Tensor self, @Cast("int64_t") long dim); -// aten::nanmedian.dim_values(Tensor self, int dim, bool keepdim=False, *, Tensor(a!) values, Tensor(b!) indices) -> (Tensor(a!) values, Tensor(b!) indices) -@Namespace("at") public static native @ByVal @Cast("std::tuple*") PointerPointer nanmedian_out(@ByRef Tensor values, @ByRef Tensor indices, @Const @ByRef Tensor self, @Cast("int64_t") long dim, @Cast("bool") boolean keepdim/*=false*/); -@Namespace("at") public static native @ByVal @Cast("std::tuple*") PointerPointer nanmedian_out(@ByRef Tensor values, @ByRef Tensor indices, @Const @ByRef Tensor self, @Cast("int64_t") long dim); -// aten::nanmedian.dim_values(Tensor self, int dim, bool keepdim=False, *, Tensor(a!) values, Tensor(b!) indices) -> (Tensor(a!) values, Tensor(b!) indices) -@Namespace("at") public static native @ByVal @Cast("std::tuple*") PointerPointer nanmedian_outf(@Const @ByRef Tensor self, @Cast("int64_t") long dim, @Cast("bool") boolean keepdim, @ByRef Tensor values, @ByRef Tensor indices); +// aten::not_equal.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor not_equal_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Scalar other); +// aten::not_equal.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor not_equal_outf(@Const @ByRef Tensor self, @Const @ByRef Scalar other, @ByRef Tensor out); -// aten::nanmedian.names_dim(Tensor self, Dimname dim, bool keepdim=False) -> (Tensor values, Tensor indices) -@Namespace("at") public static native @ByVal TensorTensorTuple nanmedian(@Const @ByRef Tensor self, @ByVal Dimname dim, @Cast("bool") boolean keepdim/*=false*/); -@Namespace("at") public static native @ByVal TensorTensorTuple nanmedian(@Const @ByRef Tensor self, @ByVal Dimname dim); +// aten::not_equal.Scalar(Tensor self, Scalar other) -> Tensor +@Namespace("at") public static native @ByVal Tensor not_equal(@Const @ByRef Tensor self, @Const @ByRef Scalar other); -// aten::nanmedian.names_dim_values(Tensor self, Dimname dim, bool keepdim=False, *, Tensor(a!) values, Tensor(b!) indices) -> (Tensor(a!) values, Tensor(b!) indices) -@Namespace("at") public static native @ByVal @Cast("std::tuple*") PointerPointer nanmedian_out(@ByRef Tensor values, @ByRef Tensor indices, @Const @ByRef Tensor self, @ByVal Dimname dim, @Cast("bool") boolean keepdim/*=false*/); -@Namespace("at") public static native @ByVal @Cast("std::tuple*") PointerPointer nanmedian_out(@ByRef Tensor values, @ByRef Tensor indices, @Const @ByRef Tensor self, @ByVal Dimname dim); -// aten::nanmedian.names_dim_values(Tensor self, Dimname dim, bool keepdim=False, *, Tensor(a!) values, Tensor(b!) indices) -> (Tensor(a!) values, Tensor(b!) indices) -@Namespace("at") public static native @ByVal @Cast("std::tuple*") PointerPointer nanmedian_outf(@Const @ByRef Tensor self, @ByVal Dimname dim, @Cast("bool") boolean keepdim, @ByRef Tensor values, @ByRef Tensor indices); +// aten::not_equal.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor not_equal_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor other); +// aten::not_equal.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor not_equal_outf(@Const @ByRef Tensor self, @Const @ByRef Tensor other, @ByRef Tensor out); -// aten::nanmedian.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor nanmedian_out(@ByRef Tensor out, @Const @ByRef Tensor self); -// aten::nanmedian.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor nanmedian_outf(@Const @ByRef Tensor self, @ByRef Tensor out); +// aten::not_equal.Tensor(Tensor self, Tensor other) -> Tensor +@Namespace("at") public static native @ByVal Tensor not_equal(@Const @ByRef Tensor self, @Const @ByRef Tensor other); -// Parsed from ATen/ops/nanquantile.h +// Parsed from ATen/ops/nuclear_norm.h // #pragma once @@ -54934,33 +40915,38 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include +// #include -// aten::nanquantile(Tensor self, Tensor q, int? dim=None, bool keepdim=False, *, str interpolation='linear') -> Tensor -@Namespace("at") public static native @ByVal Tensor nanquantile(@Const @ByRef Tensor self, @Const @ByRef Tensor q, @ByVal(nullValue = "c10::optional(c10::nullopt)") LongOptional dim, @Cast("bool") boolean keepdim/*=false*/, @ByVal(nullValue = "c10::string_view(\"linear\")") @Cast("c10::string_view*") Pointer interpolation); -@Namespace("at") public static native @ByVal Tensor nanquantile(@Const @ByRef Tensor self, @Const @ByRef Tensor q); +// aten::nuclear_norm(Tensor self, bool keepdim=False) -> Tensor +@Namespace("at") public static native @ByVal Tensor nuclear_norm(@Const @ByRef Tensor self, @Cast("bool") boolean keepdim/*=false*/); +@Namespace("at") public static native @ByVal Tensor nuclear_norm(@Const @ByRef Tensor self); -// aten::nanquantile.out(Tensor self, Tensor q, int? dim=None, bool keepdim=False, *, str interpolation='linear', Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor nanquantile_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor q, @ByVal(nullValue = "c10::optional(c10::nullopt)") LongOptional dim, @Cast("bool") boolean keepdim/*=false*/, @ByVal(nullValue = "c10::string_view(\"linear\")") @Cast("c10::string_view*") Pointer interpolation); -@Namespace("at") public static native @ByRef Tensor nanquantile_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor q); -// aten::nanquantile.out(Tensor self, Tensor q, int? dim=None, bool keepdim=False, *, str interpolation='linear', Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor nanquantile_outf(@Const @ByRef Tensor self, @Const @ByRef Tensor q, @ByVal LongOptional dim, @Cast("bool") boolean keepdim, @ByVal @Cast("c10::string_view*") Pointer interpolation, @ByRef Tensor out); +// aten::nuclear_norm.out(Tensor self, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor nuclear_norm_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Cast("bool") boolean keepdim/*=false*/); +@Namespace("at") public static native @ByRef Tensor nuclear_norm_out(@ByRef Tensor out, @Const @ByRef Tensor self); +// aten::nuclear_norm.out(Tensor self, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor nuclear_norm_outf(@Const @ByRef Tensor self, @Cast("bool") boolean keepdim, @ByRef Tensor out); -// aten::nanquantile.scalar(Tensor self, float q, int? dim=None, bool keepdim=False, *, str interpolation='linear') -> Tensor -@Namespace("at") public static native @ByVal Tensor nanquantile(@Const @ByRef Tensor self, double q, @ByVal(nullValue = "c10::optional(c10::nullopt)") LongOptional dim, @Cast("bool") boolean keepdim/*=false*/, @ByVal(nullValue = "c10::string_view(\"linear\")") @Cast("c10::string_view*") Pointer interpolation); -@Namespace("at") public static native @ByVal Tensor nanquantile(@Const @ByRef Tensor self, double q); +// aten::nuclear_norm.dim(Tensor self, int[2] dim, bool keepdim=False) -> Tensor +@Namespace("at") public static native @ByVal Tensor nuclear_norm(@Const @ByRef Tensor self, @ByVal LongArrayRef dim, @Cast("bool") boolean keepdim/*=false*/); +@Namespace("at") public static native @ByVal Tensor nuclear_norm(@Const @ByRef Tensor self, @ByVal LongArrayRef dim); +@Namespace("at") public static native @ByVal Tensor nuclear_norm(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dim, @Cast("bool") boolean keepdim/*=false*/); +@Namespace("at") public static native @ByVal Tensor nuclear_norm(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... dim); -// aten::nanquantile.scalar_out(Tensor self, float q, int? dim=None, bool keepdim=False, *, str interpolation='linear', Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor nanquantile_out(@ByRef Tensor out, @Const @ByRef Tensor self, double q, @ByVal(nullValue = "c10::optional(c10::nullopt)") LongOptional dim, @Cast("bool") boolean keepdim/*=false*/, @ByVal(nullValue = "c10::string_view(\"linear\")") @Cast("c10::string_view*") Pointer interpolation); -@Namespace("at") public static native @ByRef Tensor nanquantile_out(@ByRef Tensor out, @Const @ByRef Tensor self, double q); -// aten::nanquantile.scalar_out(Tensor self, float q, int? dim=None, bool keepdim=False, *, str interpolation='linear', Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor nanquantile_outf(@Const @ByRef Tensor self, double q, @ByVal LongOptional dim, @Cast("bool") boolean keepdim, @ByVal @Cast("c10::string_view*") Pointer interpolation, @ByRef Tensor out); +// aten::nuclear_norm.dim_out(Tensor self, int[2] dim, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor nuclear_norm_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal LongArrayRef dim, @Cast("bool") boolean keepdim/*=false*/); +@Namespace("at") public static native @ByRef Tensor nuclear_norm_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal LongArrayRef dim); +@Namespace("at") public static native @ByRef Tensor nuclear_norm_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dim, @Cast("bool") boolean keepdim/*=false*/); +@Namespace("at") public static native @ByRef Tensor nuclear_norm_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... dim); +// aten::nuclear_norm.dim_out(Tensor self, int[2] dim, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor nuclear_norm_outf(@Const @ByRef Tensor self, @ByVal LongArrayRef dim, @Cast("bool") boolean keepdim, @ByRef Tensor out); +@Namespace("at") public static native @ByRef Tensor nuclear_norm_outf(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dim, @Cast("bool") boolean keepdim, @ByRef Tensor out); -// Parsed from ATen/ops/nansum.h +// Parsed from ATen/ops/numpy_T.h // #pragma once @@ -54981,26 +40967,14 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include - +// #include -// aten::nansum(Tensor self, int[1]? dim=None, bool keepdim=False, *, ScalarType? dtype=None) -> Tensor -@Namespace("at") public static native @ByVal Tensor nansum(@Const @ByRef Tensor self, @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") LongArrayRefOptional dim, @Cast("bool") boolean keepdim/*=false*/, @ByVal(nullValue = "c10::optional(c10::nullopt)") ScalarTypeOptional dtype); -@Namespace("at") public static native @ByVal Tensor nansum(@Const @ByRef Tensor self); -@Namespace("at") public static native @ByVal Tensor nansum(@Const @ByRef Tensor self, @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dim, @Cast("bool") boolean keepdim/*=false*/, @ByVal(nullValue = "c10::optional(c10::nullopt)") ScalarTypeOptional dtype); -// aten::nansum.out(Tensor self, int[1]? dim=None, bool keepdim=False, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor nansum_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") LongArrayRefOptional dim, @Cast("bool") boolean keepdim/*=false*/, @ByVal(nullValue = "c10::optional(c10::nullopt)") ScalarTypeOptional dtype); -@Namespace("at") public static native @ByRef Tensor nansum_out(@ByRef Tensor out, @Const @ByRef Tensor self); -@Namespace("at") public static native @ByRef Tensor nansum_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dim, @Cast("bool") boolean keepdim/*=false*/, @ByVal(nullValue = "c10::optional(c10::nullopt)") ScalarTypeOptional dtype); -// aten::nansum.out(Tensor self, int[1]? dim=None, bool keepdim=False, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor nansum_outf(@Const @ByRef Tensor self, @ByVal LongArrayRefOptional dim, @Cast("bool") boolean keepdim, @ByVal ScalarTypeOptional dtype, @ByRef Tensor out); -@Namespace("at") public static native @ByRef Tensor nansum_outf(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dim, @Cast("bool") boolean keepdim, @ByVal ScalarTypeOptional dtype, @ByRef Tensor out); -// Parsed from ATen/ops/narrow.h +// Parsed from ATen/ops/one_hot.h // #pragma once @@ -55021,29 +40995,17 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include - - -// aten::narrow(Tensor(a) self, int dim, SymInt start, SymInt length) -> Tensor(a) -@Namespace("at") public static native @ByVal Tensor narrow(@Const @ByRef Tensor self, @Cast("int64_t") long dim, @Cast("int64_t") long start, @Cast("int64_t") long length); - - -// aten::narrow(Tensor(a) self, int dim, SymInt start, SymInt length) -> Tensor(a) -@Namespace("at") public static native @ByVal Tensor narrow_symint(@Const @ByRef Tensor self, @Cast("int64_t") long dim, @ByVal SymInt start, @ByVal SymInt length); - - -// aten::narrow.Tensor(Tensor(a) self, int dim, Tensor start, SymInt length) -> Tensor(a) -@Namespace("at") public static native @ByVal Tensor narrow(@Const @ByRef Tensor self, @Cast("int64_t") long dim, @Const @ByRef Tensor start, @Cast("int64_t") long length); - +// #include -// aten::narrow.Tensor(Tensor(a) self, int dim, Tensor start, SymInt length) -> Tensor(a) -@Namespace("at") public static native @ByVal Tensor narrow_symint(@Const @ByRef Tensor self, @Cast("int64_t") long dim, @Const @ByRef Tensor start, @ByVal SymInt length); +// aten::one_hot(Tensor self, int num_classes=-1) -> Tensor +@Namespace("at") public static native @ByVal Tensor one_hot(@Const @ByRef Tensor self, @Cast("int64_t") long num_classes/*=-1*/); +@Namespace("at") public static native @ByVal Tensor one_hot(@Const @ByRef Tensor self); -// Parsed from ATen/ops/narrow_copy.h +// Parsed from ATen/ops/ones.h // #pragma once @@ -55064,37 +41026,68 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include +// #include -// aten::narrow_copy(Tensor self, int dim, SymInt start, SymInt length) -> Tensor -@Namespace("at") public static native @ByVal Tensor narrow_copy(@Const @ByRef Tensor self, @Cast("int64_t") long dim, @Cast("int64_t") long start, @Cast("int64_t") long length); +// aten::ones.names(int[] size, *, Dimname[]? names, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor +@Namespace("at") public static native @ByVal Tensor ones(@ByVal LongArrayRef size, @ByVal DimnameListOptional names, @ByVal(nullValue = "at::TensorOptions{}") TensorOptions options); +@Namespace("at") public static native @ByVal Tensor ones(@ByVal LongArrayRef size, @ByVal DimnameListOptional names); +@Namespace("at") public static native @ByVal Tensor ones(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @ByVal DimnameListOptional names, @ByVal(nullValue = "at::TensorOptions{}") TensorOptions options); +@Namespace("at") public static native @ByVal Tensor ones(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @ByVal DimnameListOptional names); +// aten::ones.names(int[] size, *, Dimname[]? names, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor +@Namespace("at") public static native @ByVal Tensor ones(@ByVal LongArrayRef size, @ByVal DimnameListOptional names, @ByVal ScalarTypeOptional dtype, @ByVal LayoutOptional layout, @ByVal DeviceOptional device, @ByVal BoolOptional pin_memory); +@Namespace("at") public static native @ByVal Tensor ones(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @ByVal DimnameListOptional names, @ByVal ScalarTypeOptional dtype, @ByVal LayoutOptional layout, @ByVal DeviceOptional device, @ByVal BoolOptional pin_memory); +// aten::ones(SymInt[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor +@Namespace("at") public static native @ByVal Tensor ones(@ByVal LongArrayRef size, @ByVal(nullValue = "at::TensorOptions{}") TensorOptions options); +@Namespace("at") public static native @ByVal Tensor ones(@ByVal LongArrayRef size); +@Namespace("at") public static native @ByVal Tensor ones(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @ByVal(nullValue = "at::TensorOptions{}") TensorOptions options); +@Namespace("at") public static native @ByVal Tensor ones(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... size); -// aten::narrow_copy(Tensor self, int dim, SymInt start, SymInt length) -> Tensor -@Namespace("at") public static native @ByVal Tensor narrow_copy_symint(@Const @ByRef Tensor self, @Cast("int64_t") long dim, @ByVal SymInt start, @ByVal SymInt length); +// aten::ones(SymInt[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor +@Namespace("at") public static native @ByVal Tensor ones(@ByVal LongArrayRef size, @ByVal ScalarTypeOptional dtype, @ByVal LayoutOptional layout, @ByVal DeviceOptional device, @ByVal BoolOptional pin_memory); +@Namespace("at") public static native @ByVal Tensor ones(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @ByVal ScalarTypeOptional dtype, @ByVal LayoutOptional layout, @ByVal DeviceOptional device, @ByVal BoolOptional pin_memory); -// aten::narrow_copy.out(Tensor self, int dim, SymInt start, SymInt length, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor narrow_copy_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Cast("int64_t") long dim, @Cast("int64_t") long start, @Cast("int64_t") long length); +// aten::ones(SymInt[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor +@Namespace("at") public static native @ByVal Tensor ones_symint(@ByVal SymIntArrayRef size, @ByVal(nullValue = "at::TensorOptions{}") TensorOptions options); +@Namespace("at") public static native @ByVal Tensor ones_symint(@ByVal SymIntArrayRef size); -// aten::narrow_copy.out(Tensor self, int dim, SymInt start, SymInt length, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor narrow_copy_outf(@Const @ByRef Tensor self, @Cast("int64_t") long dim, @Cast("int64_t") long start, @Cast("int64_t") long length, @ByRef Tensor out); +// aten::ones(SymInt[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor +@Namespace("at") public static native @ByVal Tensor ones_symint(@ByVal SymIntArrayRef size, @ByVal ScalarTypeOptional dtype, @ByVal LayoutOptional layout, @ByVal DeviceOptional device, @ByVal BoolOptional pin_memory); -// aten::narrow_copy.out(Tensor self, int dim, SymInt start, SymInt length, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor narrow_copy_symint_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Cast("int64_t") long dim, @ByVal SymInt start, @ByVal SymInt length); +// aten::ones.out(SymInt[] size, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor ones_out(@ByRef Tensor out, @ByVal LongArrayRef size); +@Namespace("at") public static native @ByRef Tensor ones_out(@ByRef Tensor out, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... size); -// aten::narrow_copy.out(Tensor self, int dim, SymInt start, SymInt length, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor narrow_copy_symint_outf(@Const @ByRef Tensor self, @Cast("int64_t") long dim, @ByVal SymInt start, @ByVal SymInt length, @ByRef Tensor out); +// aten::ones.out(SymInt[] size, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor ones_outf(@ByVal LongArrayRef size, @ByRef Tensor out); +@Namespace("at") public static native @ByRef Tensor ones_outf(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @ByRef Tensor out); +// aten::ones.out(SymInt[] size, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor ones_symint_out(@ByRef Tensor out, @ByVal SymIntArrayRef size); -// Parsed from ATen/ops/native_batch_norm.h +// aten::ones.out(SymInt[] size, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor ones_symint_outf(@ByVal SymIntArrayRef size, @ByRef Tensor out); + + +// aten::ones.names_out(int[] size, *, Dimname[]? names, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor ones_out(@ByRef Tensor out, @ByVal LongArrayRef size, @ByVal DimnameListOptional names); +@Namespace("at") public static native @ByRef Tensor ones_out(@ByRef Tensor out, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @ByVal DimnameListOptional names); +// aten::ones.names_out(int[] size, *, Dimname[]? names, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor ones_outf(@ByVal LongArrayRef size, @ByVal DimnameListOptional names, @ByRef Tensor out); +@Namespace("at") public static native @ByRef Tensor ones_outf(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @ByVal DimnameListOptional names, @ByRef Tensor out); + + + + +// Parsed from ATen/ops/ones_like.h // #pragma once @@ -55115,21 +41108,25 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include +// #include -// aten::native_batch_norm(Tensor input, Tensor? weight, Tensor? bias, Tensor? running_mean, Tensor? running_var, bool training, float momentum, float eps) -> (Tensor, Tensor, Tensor) -@Namespace("at") public static native @ByVal TensorTensorTensorTuple native_batch_norm(@Const @ByRef Tensor input, @Const @ByRef TensorOptional weight, @Const @ByRef TensorOptional bias, @Const @ByRef TensorOptional running_mean, @Const @ByRef TensorOptional running_var, @Cast("bool") boolean training, double momentum, double eps); +// aten::ones_like(Tensor self, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, MemoryFormat? memory_format=None) -> Tensor +@Namespace("at") public static native @ByVal Tensor ones_like(@Const @ByRef Tensor self, @ByVal(nullValue = "at::TensorOptions{}") TensorOptions options, @ByVal(nullValue = "c10::optional(c10::nullopt)") MemoryFormatOptional memory_format); +@Namespace("at") public static native @ByVal Tensor ones_like(@Const @ByRef Tensor self); +// aten::ones_like(Tensor self, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, MemoryFormat? memory_format=None) -> Tensor +@Namespace("at") public static native @ByVal Tensor ones_like(@Const @ByRef Tensor self, @ByVal ScalarTypeOptional dtype, @ByVal LayoutOptional layout, @ByVal DeviceOptional device, @ByVal BoolOptional pin_memory, @ByVal MemoryFormatOptional memory_format); -// aten::native_batch_norm.out(Tensor input, Tensor? weight, Tensor? bias, Tensor? running_mean, Tensor? running_var, bool training, float momentum, float eps, *, Tensor(a!) out, Tensor(b!) save_mean, Tensor(c!) save_invstd) -> (Tensor(a!), Tensor(b!), Tensor(c!)) -@Namespace("at") public static native @ByVal @Cast("std::tuple*") PointerPointer native_batch_norm_out(@ByRef Tensor out, @ByRef Tensor save_mean, @ByRef Tensor save_invstd, @Const @ByRef Tensor input, @Const @ByRef TensorOptional weight, @Const @ByRef TensorOptional bias, @Const @ByRef TensorOptional running_mean, @Const @ByRef TensorOptional running_var, @Cast("bool") boolean training, double momentum, double eps); -// aten::native_batch_norm.out(Tensor input, Tensor? weight, Tensor? bias, Tensor? running_mean, Tensor? running_var, bool training, float momentum, float eps, *, Tensor(a!) out, Tensor(b!) save_mean, Tensor(c!) save_invstd) -> (Tensor(a!), Tensor(b!), Tensor(c!)) -@Namespace("at") public static native @ByVal @Cast("std::tuple*") PointerPointer native_batch_norm_outf(@Const @ByRef Tensor input, @Const @ByRef TensorOptional weight, @Const @ByRef TensorOptional bias, @Const @ByRef TensorOptional running_mean, @Const @ByRef TensorOptional running_var, @Cast("bool") boolean training, double momentum, double eps, @ByRef Tensor out, @ByRef Tensor save_mean, @ByRef Tensor save_invstd); +// aten::ones_like.out(Tensor self, *, MemoryFormat? memory_format=None, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor ones_like_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal(nullValue = "c10::optional(c10::nullopt)") MemoryFormatOptional memory_format); +@Namespace("at") public static native @ByRef Tensor ones_like_out(@ByRef Tensor out, @Const @ByRef Tensor self); +// aten::ones_like.out(Tensor self, *, MemoryFormat? memory_format=None, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor ones_like_outf(@Const @ByRef Tensor self, @ByVal MemoryFormatOptional memory_format, @ByRef Tensor out); -// Parsed from ATen/ops/native_batch_norm_backward.h +// Parsed from ATen/ops/or.h // #pragma once @@ -55150,21 +41147,19 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include +// #include -// aten::native_batch_norm_backward(Tensor grad_out, Tensor input, Tensor? weight, Tensor? running_mean, Tensor? running_var, Tensor? save_mean, Tensor? save_invstd, bool train, float eps, bool[3] output_mask) -> (Tensor, Tensor, Tensor) -@Namespace("at") public static native @ByVal TensorTensorTensorTuple native_batch_norm_backward(@Const @ByRef Tensor grad_out, @Const @ByRef Tensor input, @Const @ByRef TensorOptional weight, @Const @ByRef TensorOptional running_mean, @Const @ByRef TensorOptional running_var, @Const @ByRef TensorOptional save_mean, @Const @ByRef TensorOptional save_invstd, @Cast("bool") boolean train, double eps, @ByVal @Cast("std::array*") BoolPointer output_mask); +// aten::__or__.Scalar(Tensor self, Scalar other) -> Tensor +@Namespace("at") public static native @ByVal Tensor __or__(@Const @ByRef Tensor self, @Const @ByRef Scalar other); -// aten::native_batch_norm_backward.out(Tensor grad_out, Tensor input, Tensor? weight, Tensor? running_mean, Tensor? running_var, Tensor? save_mean, Tensor? save_invstd, bool train, float eps, bool[3] output_mask, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2) -> (Tensor(a!), Tensor(b!), Tensor(c!)) -@Namespace("at") public static native @ByVal @Cast("std::tuple*") PointerPointer native_batch_norm_backward_out(@ByRef Tensor out0, @ByRef Tensor out1, @ByRef Tensor out2, @Const @ByRef Tensor grad_out, @Const @ByRef Tensor input, @Const @ByRef TensorOptional weight, @Const @ByRef TensorOptional running_mean, @Const @ByRef TensorOptional running_var, @Const @ByRef TensorOptional save_mean, @Const @ByRef TensorOptional save_invstd, @Cast("bool") boolean train, double eps, @ByVal @Cast("std::array*") BoolPointer output_mask); -// aten::native_batch_norm_backward.out(Tensor grad_out, Tensor input, Tensor? weight, Tensor? running_mean, Tensor? running_var, Tensor? save_mean, Tensor? save_invstd, bool train, float eps, bool[3] output_mask, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2) -> (Tensor(a!), Tensor(b!), Tensor(c!)) -@Namespace("at") public static native @ByVal @Cast("std::tuple*") PointerPointer native_batch_norm_backward_outf(@Const @ByRef Tensor grad_out, @Const @ByRef Tensor input, @Const @ByRef TensorOptional weight, @Const @ByRef TensorOptional running_mean, @Const @ByRef TensorOptional running_var, @Const @ByRef TensorOptional save_mean, @Const @ByRef TensorOptional save_invstd, @Cast("bool") boolean train, double eps, @ByVal @Cast("std::array*") BoolPointer output_mask, @ByRef Tensor out0, @ByRef Tensor out1, @ByRef Tensor out2); +// aten::__or__.Tensor(Tensor self, Tensor other) -> Tensor +@Namespace("at") public static native @ByVal Tensor __or__(@Const @ByRef Tensor self, @Const @ByRef Tensor other); -// Parsed from ATen/ops/native_channel_shuffle.h +// Parsed from ATen/ops/orgqr.h // #pragma once @@ -55185,16 +41180,21 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include +// #include -// aten::native_channel_shuffle(Tensor self, int groups) -> Tensor -@Namespace("at") public static native @ByVal Tensor native_channel_shuffle(@Const @ByRef Tensor self, @Cast("int64_t") long groups); +// aten::orgqr(Tensor self, Tensor input2) -> Tensor +@Namespace("at") public static native @ByVal Tensor orgqr(@Const @ByRef Tensor self, @Const @ByRef Tensor input2); + +// aten::orgqr.out(Tensor self, Tensor input2, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor orgqr_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor input2); +// aten::orgqr.out(Tensor self, Tensor input2, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor orgqr_outf(@Const @ByRef Tensor self, @Const @ByRef Tensor input2, @ByRef Tensor out); -// Parsed from ATen/ops/native_dropout.h +// Parsed from ATen/ops/ormqr.h // #pragma once @@ -55215,21 +41215,23 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include +// #include -// aten::native_dropout(Tensor input, float p, bool? train) -> (Tensor, Tensor) -@Namespace("at") public static native @ByVal TensorTensorTuple native_dropout(@Const @ByRef Tensor input, double p, @ByVal BoolOptional train); +// aten::ormqr.out(Tensor self, Tensor input2, Tensor input3, bool left=True, bool transpose=False, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor ormqr_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor input2, @Const @ByRef Tensor input3, @Cast("bool") boolean left/*=true*/, @Cast("bool") boolean transpose/*=false*/); +@Namespace("at") public static native @ByRef Tensor ormqr_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor input2, @Const @ByRef Tensor input3); +// aten::ormqr.out(Tensor self, Tensor input2, Tensor input3, bool left=True, bool transpose=False, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor ormqr_outf(@Const @ByRef Tensor self, @Const @ByRef Tensor input2, @Const @ByRef Tensor input3, @Cast("bool") boolean left, @Cast("bool") boolean transpose, @ByRef Tensor out); -// aten::native_dropout.out(Tensor input, float p, bool? train, *, Tensor(a!) out0, Tensor(b!) out1) -> (Tensor(a!), Tensor(b!)) -@Namespace("at") public static native @ByVal @Cast("std::tuple*") PointerPointer native_dropout_out(@ByRef Tensor out0, @ByRef Tensor out1, @Const @ByRef Tensor input, double p, @ByVal BoolOptional train); -// aten::native_dropout.out(Tensor input, float p, bool? train, *, Tensor(a!) out0, Tensor(b!) out1) -> (Tensor(a!), Tensor(b!)) -@Namespace("at") public static native @ByVal @Cast("std::tuple*") PointerPointer native_dropout_outf(@Const @ByRef Tensor input, double p, @ByVal BoolOptional train, @ByRef Tensor out0, @ByRef Tensor out1); +// aten::ormqr(Tensor self, Tensor input2, Tensor input3, bool left=True, bool transpose=False) -> Tensor +@Namespace("at") public static native @ByVal Tensor ormqr(@Const @ByRef Tensor self, @Const @ByRef Tensor input2, @Const @ByRef Tensor input3, @Cast("bool") boolean left/*=true*/, @Cast("bool") boolean transpose/*=false*/); +@Namespace("at") public static native @ByVal Tensor ormqr(@Const @ByRef Tensor self, @Const @ByRef Tensor input2, @Const @ByRef Tensor input3); -// Parsed from ATen/ops/native_dropout_backward.h +// Parsed from ATen/ops/outer.h // #pragma once @@ -55250,21 +41252,21 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include +// #include -// aten::native_dropout_backward(Tensor grad_output, Tensor mask, float scale) -> Tensor -@Namespace("at") public static native @ByVal Tensor native_dropout_backward(@Const @ByRef Tensor grad_output, @Const @ByRef Tensor mask, double scale); +// aten::outer(Tensor self, Tensor vec2) -> Tensor +@Namespace("at") public static native @ByVal Tensor outer(@Const @ByRef Tensor self, @Const @ByRef Tensor vec2); -// aten::native_dropout_backward.out(Tensor grad_output, Tensor mask, float scale, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor native_dropout_backward_out(@ByRef Tensor out, @Const @ByRef Tensor grad_output, @Const @ByRef Tensor mask, double scale); -// aten::native_dropout_backward.out(Tensor grad_output, Tensor mask, float scale, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor native_dropout_backward_outf(@Const @ByRef Tensor grad_output, @Const @ByRef Tensor mask, double scale, @ByRef Tensor out); +// aten::outer.out(Tensor self, Tensor vec2, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor outer_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor vec2); +// aten::outer.out(Tensor self, Tensor vec2, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor outer_outf(@Const @ByRef Tensor self, @Const @ByRef Tensor vec2, @ByRef Tensor out); -// Parsed from ATen/ops/native_group_norm.h +// Parsed from ATen/ops/output_nr.h // #pragma once @@ -55285,37 +41287,14 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include - - -// aten::native_group_norm(Tensor input, Tensor? weight, Tensor? bias, SymInt N, SymInt C, SymInt HxW, int group, float eps) -> (Tensor, Tensor, Tensor) -@Namespace("at") public static native @ByVal TensorTensorTensorTuple native_group_norm(@Const @ByRef Tensor input, @Const @ByRef TensorOptional weight, @Const @ByRef TensorOptional bias, @Cast("int64_t") long N, @Cast("int64_t") long C, @Cast("int64_t") long HxW, @Cast("int64_t") long group, double eps); - - -// aten::native_group_norm(Tensor input, Tensor? weight, Tensor? bias, SymInt N, SymInt C, SymInt HxW, int group, float eps) -> (Tensor, Tensor, Tensor) -@Namespace("at") public static native @ByVal TensorTensorTensorTuple native_group_norm_symint(@Const @ByRef Tensor input, @Const @ByRef TensorOptional weight, @Const @ByRef TensorOptional bias, @ByVal SymInt N, @ByVal SymInt C, @ByVal SymInt HxW, @Cast("int64_t") long group, double eps); - - -// aten::native_group_norm.out(Tensor input, Tensor? weight, Tensor? bias, SymInt N, SymInt C, SymInt HxW, int group, float eps, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2) -> (Tensor(a!), Tensor(b!), Tensor(c!)) -@Namespace("at") public static native @ByVal @Cast("std::tuple*") PointerPointer native_group_norm_out(@ByRef Tensor out0, @ByRef Tensor out1, @ByRef Tensor out2, @Const @ByRef Tensor input, @Const @ByRef TensorOptional weight, @Const @ByRef TensorOptional bias, @Cast("int64_t") long N, @Cast("int64_t") long C, @Cast("int64_t") long HxW, @Cast("int64_t") long group, double eps); - - -// aten::native_group_norm.out(Tensor input, Tensor? weight, Tensor? bias, SymInt N, SymInt C, SymInt HxW, int group, float eps, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2) -> (Tensor(a!), Tensor(b!), Tensor(c!)) -@Namespace("at") public static native @ByVal @Cast("std::tuple*") PointerPointer native_group_norm_outf(@Const @ByRef Tensor input, @Const @ByRef TensorOptional weight, @Const @ByRef TensorOptional bias, @Cast("int64_t") long N, @Cast("int64_t") long C, @Cast("int64_t") long HxW, @Cast("int64_t") long group, double eps, @ByRef Tensor out0, @ByRef Tensor out1, @ByRef Tensor out2); - - -// aten::native_group_norm.out(Tensor input, Tensor? weight, Tensor? bias, SymInt N, SymInt C, SymInt HxW, int group, float eps, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2) -> (Tensor(a!), Tensor(b!), Tensor(c!)) -@Namespace("at") public static native @ByVal @Cast("std::tuple*") PointerPointer native_group_norm_symint_out(@ByRef Tensor out0, @ByRef Tensor out1, @ByRef Tensor out2, @Const @ByRef Tensor input, @Const @ByRef TensorOptional weight, @Const @ByRef TensorOptional bias, @ByVal SymInt N, @ByVal SymInt C, @ByVal SymInt HxW, @Cast("int64_t") long group, double eps); - +// #include -// aten::native_group_norm.out(Tensor input, Tensor? weight, Tensor? bias, SymInt N, SymInt C, SymInt HxW, int group, float eps, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2) -> (Tensor(a!), Tensor(b!), Tensor(c!)) -@Namespace("at") public static native @ByVal @Cast("std::tuple*") PointerPointer native_group_norm_symint_outf(@Const @ByRef Tensor input, @Const @ByRef TensorOptional weight, @Const @ByRef TensorOptional bias, @ByVal SymInt N, @ByVal SymInt C, @ByVal SymInt HxW, @Cast("int64_t") long group, double eps, @ByRef Tensor out0, @ByRef Tensor out1, @ByRef Tensor out2); -// Parsed from ATen/ops/native_group_norm_backward.h +// Parsed from ATen/ops/pad.h // #pragma once @@ -55336,37 +41315,25 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include - - -// aten::native_group_norm_backward(Tensor grad_out, Tensor input, Tensor mean, Tensor rstd, Tensor? weight, SymInt N, SymInt C, SymInt HxW, int group, bool[3] output_mask) -> (Tensor, Tensor, Tensor) -@Namespace("at") public static native @ByVal TensorTensorTensorTuple native_group_norm_backward(@Const @ByRef Tensor grad_out, @Const @ByRef Tensor input, @Const @ByRef Tensor mean, @Const @ByRef Tensor rstd, @Const @ByRef TensorOptional weight, @Cast("int64_t") long N, @Cast("int64_t") long C, @Cast("int64_t") long HxW, @Cast("int64_t") long group, @ByVal @Cast("std::array*") BoolPointer output_mask); - - -// aten::native_group_norm_backward(Tensor grad_out, Tensor input, Tensor mean, Tensor rstd, Tensor? weight, SymInt N, SymInt C, SymInt HxW, int group, bool[3] output_mask) -> (Tensor, Tensor, Tensor) -@Namespace("at") public static native @ByVal TensorTensorTensorTuple native_group_norm_backward_symint(@Const @ByRef Tensor grad_out, @Const @ByRef Tensor input, @Const @ByRef Tensor mean, @Const @ByRef Tensor rstd, @Const @ByRef TensorOptional weight, @ByVal SymInt N, @ByVal SymInt C, @ByVal SymInt HxW, @Cast("int64_t") long group, @ByVal @Cast("std::array*") BoolPointer output_mask); - - -// aten::native_group_norm_backward.out(Tensor grad_out, Tensor input, Tensor mean, Tensor rstd, Tensor? weight, SymInt N, SymInt C, SymInt HxW, int group, bool[3] output_mask, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2) -> (Tensor(a!), Tensor(b!), Tensor(c!)) -@Namespace("at") public static native @ByVal @Cast("std::tuple*") PointerPointer native_group_norm_backward_out(@ByRef Tensor out0, @ByRef Tensor out1, @ByRef Tensor out2, @Const @ByRef Tensor grad_out, @Const @ByRef Tensor input, @Const @ByRef Tensor mean, @Const @ByRef Tensor rstd, @Const @ByRef TensorOptional weight, @Cast("int64_t") long N, @Cast("int64_t") long C, @Cast("int64_t") long HxW, @Cast("int64_t") long group, @ByVal @Cast("std::array*") BoolPointer output_mask); - - -// aten::native_group_norm_backward.out(Tensor grad_out, Tensor input, Tensor mean, Tensor rstd, Tensor? weight, SymInt N, SymInt C, SymInt HxW, int group, bool[3] output_mask, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2) -> (Tensor(a!), Tensor(b!), Tensor(c!)) -@Namespace("at") public static native @ByVal @Cast("std::tuple*") PointerPointer native_group_norm_backward_outf(@Const @ByRef Tensor grad_out, @Const @ByRef Tensor input, @Const @ByRef Tensor mean, @Const @ByRef Tensor rstd, @Const @ByRef TensorOptional weight, @Cast("int64_t") long N, @Cast("int64_t") long C, @Cast("int64_t") long HxW, @Cast("int64_t") long group, @ByVal @Cast("std::array*") BoolPointer output_mask, @ByRef Tensor out0, @ByRef Tensor out1, @ByRef Tensor out2); +// #include -// aten::native_group_norm_backward.out(Tensor grad_out, Tensor input, Tensor mean, Tensor rstd, Tensor? weight, SymInt N, SymInt C, SymInt HxW, int group, bool[3] output_mask, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2) -> (Tensor(a!), Tensor(b!), Tensor(c!)) -@Namespace("at") public static native @ByVal @Cast("std::tuple*") PointerPointer native_group_norm_backward_symint_out(@ByRef Tensor out0, @ByRef Tensor out1, @ByRef Tensor out2, @Const @ByRef Tensor grad_out, @Const @ByRef Tensor input, @Const @ByRef Tensor mean, @Const @ByRef Tensor rstd, @Const @ByRef TensorOptional weight, @ByVal SymInt N, @ByVal SymInt C, @ByVal SymInt HxW, @Cast("int64_t") long group, @ByVal @Cast("std::array*") BoolPointer output_mask); +// aten::pad(Tensor self, SymInt[] pad, str mode="constant", float? value=None) -> Tensor +@Namespace("at") public static native @ByVal Tensor pad(@Const @ByRef Tensor self, @ByVal LongArrayRef pad, @ByVal(nullValue = "c10::string_view(\"constant\")") @Cast("c10::string_view*") Pointer mode, @ByVal(nullValue = "c10::optional(c10::nullopt)") DoubleOptional value); +@Namespace("at") public static native @ByVal Tensor pad(@Const @ByRef Tensor self, @ByVal LongArrayRef pad); +@Namespace("at") public static native @ByVal Tensor pad(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] pad, @ByVal(nullValue = "c10::string_view(\"constant\")") @Cast("c10::string_view*") Pointer mode, @ByVal(nullValue = "c10::optional(c10::nullopt)") DoubleOptional value); +@Namespace("at") public static native @ByVal Tensor pad(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... pad); -// aten::native_group_norm_backward.out(Tensor grad_out, Tensor input, Tensor mean, Tensor rstd, Tensor? weight, SymInt N, SymInt C, SymInt HxW, int group, bool[3] output_mask, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2) -> (Tensor(a!), Tensor(b!), Tensor(c!)) -@Namespace("at") public static native @ByVal @Cast("std::tuple*") PointerPointer native_group_norm_backward_symint_outf(@Const @ByRef Tensor grad_out, @Const @ByRef Tensor input, @Const @ByRef Tensor mean, @Const @ByRef Tensor rstd, @Const @ByRef TensorOptional weight, @ByVal SymInt N, @ByVal SymInt C, @ByVal SymInt HxW, @Cast("int64_t") long group, @ByVal @Cast("std::array*") BoolPointer output_mask, @ByRef Tensor out0, @ByRef Tensor out1, @ByRef Tensor out2); +// aten::pad(Tensor self, SymInt[] pad, str mode="constant", float? value=None) -> Tensor +@Namespace("at") public static native @ByVal Tensor pad_symint(@Const @ByRef Tensor self, @ByVal SymIntArrayRef pad, @ByVal(nullValue = "c10::string_view(\"constant\")") @Cast("c10::string_view*") Pointer mode, @ByVal(nullValue = "c10::optional(c10::nullopt)") DoubleOptional value); +@Namespace("at") public static native @ByVal Tensor pad_symint(@Const @ByRef Tensor self, @ByVal SymIntArrayRef pad); -// Parsed from ATen/ops/native_layer_norm.h +// Parsed from ATen/ops/pad_sequence.h // #pragma once @@ -55387,40 +41354,17 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include - - -// aten::native_layer_norm(Tensor input, SymInt[] normalized_shape, Tensor? weight, Tensor? bias, float eps) -> (Tensor, Tensor, Tensor) -@Namespace("at") public static native @ByVal TensorTensorTensorTuple native_layer_norm(@Const @ByRef Tensor input, @ByVal @Cast("c10::ArrayRef*") LongArrayRef normalized_shape, @Const @ByRef TensorOptional weight, @Const @ByRef TensorOptional bias, double eps); -@Namespace("at") public static native @ByVal TensorTensorTensorTuple native_layer_norm(@Const @ByRef Tensor input, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] normalized_shape, @Const @ByRef TensorOptional weight, @Const @ByRef TensorOptional bias, double eps); - - -// aten::native_layer_norm(Tensor input, SymInt[] normalized_shape, Tensor? weight, Tensor? bias, float eps) -> (Tensor, Tensor, Tensor) -@Namespace("at") public static native @ByVal TensorTensorTensorTuple native_layer_norm_symint(@Const @ByRef Tensor input, @ByVal SymIntRef normalized_shape, @Const @ByRef TensorOptional weight, @Const @ByRef TensorOptional bias, double eps); - - -// aten::native_layer_norm.out(Tensor input, SymInt[] normalized_shape, Tensor? weight, Tensor? bias, float eps, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2) -> (Tensor(a!), Tensor(b!), Tensor(c!)) -@Namespace("at") public static native @ByVal @Cast("std::tuple*") PointerPointer native_layer_norm_out(@ByRef Tensor out0, @ByRef Tensor out1, @ByRef Tensor out2, @Const @ByRef Tensor input, @ByVal @Cast("c10::ArrayRef*") LongArrayRef normalized_shape, @Const @ByRef TensorOptional weight, @Const @ByRef TensorOptional bias, double eps); -@Namespace("at") public static native @ByVal @Cast("std::tuple*") PointerPointer native_layer_norm_out(@ByRef Tensor out0, @ByRef Tensor out1, @ByRef Tensor out2, @Const @ByRef Tensor input, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] normalized_shape, @Const @ByRef TensorOptional weight, @Const @ByRef TensorOptional bias, double eps); - - -// aten::native_layer_norm.out(Tensor input, SymInt[] normalized_shape, Tensor? weight, Tensor? bias, float eps, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2) -> (Tensor(a!), Tensor(b!), Tensor(c!)) -@Namespace("at") public static native @ByVal @Cast("std::tuple*") PointerPointer native_layer_norm_outf(@Const @ByRef Tensor input, @ByVal @Cast("c10::ArrayRef*") LongArrayRef normalized_shape, @Const @ByRef TensorOptional weight, @Const @ByRef TensorOptional bias, double eps, @ByRef Tensor out0, @ByRef Tensor out1, @ByRef Tensor out2); -@Namespace("at") public static native @ByVal @Cast("std::tuple*") PointerPointer native_layer_norm_outf(@Const @ByRef Tensor input, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] normalized_shape, @Const @ByRef TensorOptional weight, @Const @ByRef TensorOptional bias, double eps, @ByRef Tensor out0, @ByRef Tensor out1, @ByRef Tensor out2); - - -// aten::native_layer_norm.out(Tensor input, SymInt[] normalized_shape, Tensor? weight, Tensor? bias, float eps, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2) -> (Tensor(a!), Tensor(b!), Tensor(c!)) -@Namespace("at") public static native @ByVal @Cast("std::tuple*") PointerPointer native_layer_norm_symint_out(@ByRef Tensor out0, @ByRef Tensor out1, @ByRef Tensor out2, @Const @ByRef Tensor input, @ByVal SymIntRef normalized_shape, @Const @ByRef TensorOptional weight, @Const @ByRef TensorOptional bias, double eps); - +// #include -// aten::native_layer_norm.out(Tensor input, SymInt[] normalized_shape, Tensor? weight, Tensor? bias, float eps, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2) -> (Tensor(a!), Tensor(b!), Tensor(c!)) -@Namespace("at") public static native @ByVal @Cast("std::tuple*") PointerPointer native_layer_norm_symint_outf(@Const @ByRef Tensor input, @ByVal SymIntRef normalized_shape, @Const @ByRef TensorOptional weight, @Const @ByRef TensorOptional bias, double eps, @ByRef Tensor out0, @ByRef Tensor out1, @ByRef Tensor out2); +// aten::pad_sequence(Tensor[] sequences, bool batch_first=False, float padding_value=0.0) -> Tensor +@Namespace("at") public static native @ByVal Tensor pad_sequence(@ByVal @Cast("at::TensorList*") TensorArrayRef sequences, @Cast("bool") boolean batch_first/*=false*/, double padding_value/*=0.0*/); +@Namespace("at") public static native @ByVal Tensor pad_sequence(@ByVal @Cast("at::TensorList*") TensorArrayRef sequences); -// Parsed from ATen/ops/native_layer_norm_backward.h +// Parsed from ATen/ops/pairwise_distance.h // #pragma once @@ -55441,40 +41385,48 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include +// #include -// aten::native_layer_norm_backward(Tensor grad_out, Tensor input, SymInt[] normalized_shape, Tensor mean, Tensor rstd, Tensor? weight, Tensor? bias, bool[3] output_mask) -> (Tensor, Tensor, Tensor) -@Namespace("at") public static native @ByVal TensorTensorTensorTuple native_layer_norm_backward(@Const @ByRef Tensor grad_out, @Const @ByRef Tensor input, @ByVal @Cast("c10::ArrayRef*") LongArrayRef normalized_shape, @Const @ByRef Tensor mean, @Const @ByRef Tensor rstd, @Const @ByRef TensorOptional weight, @Const @ByRef TensorOptional bias, @ByVal @Cast("std::array*") BoolPointer output_mask); -@Namespace("at") public static native @ByVal TensorTensorTensorTuple native_layer_norm_backward(@Const @ByRef Tensor grad_out, @Const @ByRef Tensor input, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] normalized_shape, @Const @ByRef Tensor mean, @Const @ByRef Tensor rstd, @Const @ByRef TensorOptional weight, @Const @ByRef TensorOptional bias, @ByVal @Cast("std::array*") BoolPointer output_mask); +// aten::pairwise_distance(Tensor x1, Tensor x2, float p=2, float eps=1e-06, bool keepdim=False) -> Tensor +@Namespace("at") public static native @ByVal Tensor pairwise_distance(@Const @ByRef Tensor x1, @Const @ByRef Tensor x2, double p/*=2*/, double eps/*=1e-06*/, @Cast("bool") boolean keepdim/*=false*/); +@Namespace("at") public static native @ByVal Tensor pairwise_distance(@Const @ByRef Tensor x1, @Const @ByRef Tensor x2); -// aten::native_layer_norm_backward(Tensor grad_out, Tensor input, SymInt[] normalized_shape, Tensor mean, Tensor rstd, Tensor? weight, Tensor? bias, bool[3] output_mask) -> (Tensor, Tensor, Tensor) -@Namespace("at") public static native @ByVal TensorTensorTensorTuple native_layer_norm_backward_symint(@Const @ByRef Tensor grad_out, @Const @ByRef Tensor input, @ByVal SymIntRef normalized_shape, @Const @ByRef Tensor mean, @Const @ByRef Tensor rstd, @Const @ByRef TensorOptional weight, @Const @ByRef TensorOptional bias, @ByVal @Cast("std::array*") BoolPointer output_mask); -// aten::native_layer_norm_backward.out(Tensor grad_out, Tensor input, SymInt[] normalized_shape, Tensor mean, Tensor rstd, Tensor? weight, Tensor? bias, bool[3] output_mask, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2) -> (Tensor(a!), Tensor(b!), Tensor(c!)) -@Namespace("at") public static native @ByVal @Cast("std::tuple*") PointerPointer native_layer_norm_backward_out(@ByRef Tensor out0, @ByRef Tensor out1, @ByRef Tensor out2, @Const @ByRef Tensor grad_out, @Const @ByRef Tensor input, @ByVal @Cast("c10::ArrayRef*") LongArrayRef normalized_shape, @Const @ByRef Tensor mean, @Const @ByRef Tensor rstd, @Const @ByRef TensorOptional weight, @Const @ByRef TensorOptional bias, @ByVal @Cast("std::array*") BoolPointer output_mask); -@Namespace("at") public static native @ByVal @Cast("std::tuple*") PointerPointer native_layer_norm_backward_out(@ByRef Tensor out0, @ByRef Tensor out1, @ByRef Tensor out2, @Const @ByRef Tensor grad_out, @Const @ByRef Tensor input, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] normalized_shape, @Const @ByRef Tensor mean, @Const @ByRef Tensor rstd, @Const @ByRef TensorOptional weight, @Const @ByRef TensorOptional bias, @ByVal @Cast("std::array*") BoolPointer output_mask); +// Parsed from ATen/ops/pdist.h +// #pragma once -// aten::native_layer_norm_backward.out(Tensor grad_out, Tensor input, SymInt[] normalized_shape, Tensor mean, Tensor rstd, Tensor? weight, Tensor? bias, bool[3] output_mask, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2) -> (Tensor(a!), Tensor(b!), Tensor(c!)) -@Namespace("at") public static native @ByVal @Cast("std::tuple*") PointerPointer native_layer_norm_backward_outf(@Const @ByRef Tensor grad_out, @Const @ByRef Tensor input, @ByVal @Cast("c10::ArrayRef*") LongArrayRef normalized_shape, @Const @ByRef Tensor mean, @Const @ByRef Tensor rstd, @Const @ByRef TensorOptional weight, @Const @ByRef TensorOptional bias, @ByVal @Cast("std::array*") BoolPointer output_mask, @ByRef Tensor out0, @ByRef Tensor out1, @ByRef Tensor out2); -@Namespace("at") public static native @ByVal @Cast("std::tuple*") PointerPointer native_layer_norm_backward_outf(@Const @ByRef Tensor grad_out, @Const @ByRef Tensor input, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] normalized_shape, @Const @ByRef Tensor mean, @Const @ByRef Tensor rstd, @Const @ByRef TensorOptional weight, @Const @ByRef TensorOptional bias, @ByVal @Cast("std::array*") BoolPointer output_mask, @ByRef Tensor out0, @ByRef Tensor out1, @ByRef Tensor out2); +// @generated by torchgen/gen.py from Function.h + +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include -// aten::native_layer_norm_backward.out(Tensor grad_out, Tensor input, SymInt[] normalized_shape, Tensor mean, Tensor rstd, Tensor? weight, Tensor? bias, bool[3] output_mask, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2) -> (Tensor(a!), Tensor(b!), Tensor(c!)) -@Namespace("at") public static native @ByVal @Cast("std::tuple*") PointerPointer native_layer_norm_backward_symint_out(@ByRef Tensor out0, @ByRef Tensor out1, @ByRef Tensor out2, @Const @ByRef Tensor grad_out, @Const @ByRef Tensor input, @ByVal SymIntRef normalized_shape, @Const @ByRef Tensor mean, @Const @ByRef Tensor rstd, @Const @ByRef TensorOptional weight, @Const @ByRef TensorOptional bias, @ByVal @Cast("std::array*") BoolPointer output_mask); +// #include -// aten::native_layer_norm_backward.out(Tensor grad_out, Tensor input, SymInt[] normalized_shape, Tensor mean, Tensor rstd, Tensor? weight, Tensor? bias, bool[3] output_mask, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2) -> (Tensor(a!), Tensor(b!), Tensor(c!)) -@Namespace("at") public static native @ByVal @Cast("std::tuple*") PointerPointer native_layer_norm_backward_symint_outf(@Const @ByRef Tensor grad_out, @Const @ByRef Tensor input, @ByVal SymIntRef normalized_shape, @Const @ByRef Tensor mean, @Const @ByRef Tensor rstd, @Const @ByRef TensorOptional weight, @Const @ByRef TensorOptional bias, @ByVal @Cast("std::array*") BoolPointer output_mask, @ByRef Tensor out0, @ByRef Tensor out1, @ByRef Tensor out2); +// aten::pdist(Tensor self, float p=2) -> Tensor +@Namespace("at") public static native @ByVal Tensor pdist(@Const @ByRef Tensor self, double p/*=2*/); +@Namespace("at") public static native @ByVal Tensor pdist(@Const @ByRef Tensor self); -// Parsed from ATen/ops/native_norm.h +// Parsed from ATen/ops/permute.h // #pragma once @@ -55495,34 +41447,17 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include - - -// aten::native_norm(Tensor self, Scalar p=2) -> Tensor -@Namespace("at") public static native @ByVal Tensor native_norm(@Const @ByRef Tensor self, @Const @ByRef(nullValue = "at::Scalar(2)") Scalar p); -@Namespace("at") public static native @ByVal Tensor native_norm(@Const @ByRef Tensor self); - -// aten::native_norm.ScalarOpt_dim_dtype(Tensor self, Scalar? p, int[1] dim, bool keepdim, ScalarType? dtype) -> Tensor -@Namespace("at") public static native @ByVal Tensor native_norm(@Const @ByRef Tensor self, @Const @ByRef ScalarOptional p, @ByVal @Cast("c10::ArrayRef*") LongArrayRef dim, @Cast("bool") boolean keepdim, @ByVal ScalarTypeOptional dtype); -@Namespace("at") public static native @ByVal Tensor native_norm(@Const @ByRef Tensor self, @Const @ByRef ScalarOptional p, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dim, @Cast("bool") boolean keepdim, @ByVal ScalarTypeOptional dtype); +// #include -// aten::native_norm.out(Tensor self, Scalar p=2, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor native_norm_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef(nullValue = "at::Scalar(2)") Scalar p); -@Namespace("at") public static native @ByRef Tensor native_norm_out(@ByRef Tensor out, @Const @ByRef Tensor self); -// aten::native_norm.out(Tensor self, Scalar p=2, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor native_norm_outf(@Const @ByRef Tensor self, @Const @ByRef Scalar p, @ByRef Tensor out); -// aten::native_norm.ScalarOpt_dim_dtype_out(Tensor self, Scalar? p, int[1] dim, bool keepdim, ScalarType? dtype, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor native_norm_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef ScalarOptional p, @ByVal @Cast("c10::ArrayRef*") LongArrayRef dim, @Cast("bool") boolean keepdim, @ByVal ScalarTypeOptional dtype); -@Namespace("at") public static native @ByRef Tensor native_norm_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef ScalarOptional p, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dim, @Cast("bool") boolean keepdim, @ByVal ScalarTypeOptional dtype); -// aten::native_norm.ScalarOpt_dim_dtype_out(Tensor self, Scalar? p, int[1] dim, bool keepdim, ScalarType? dtype, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor native_norm_outf(@Const @ByRef Tensor self, @Const @ByRef ScalarOptional p, @ByVal @Cast("c10::ArrayRef*") LongArrayRef dim, @Cast("bool") boolean keepdim, @ByVal ScalarTypeOptional dtype, @ByRef Tensor out); -@Namespace("at") public static native @ByRef Tensor native_norm_outf(@Const @ByRef Tensor self, @Const @ByRef ScalarOptional p, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dim, @Cast("bool") boolean keepdim, @ByVal ScalarTypeOptional dtype, @ByRef Tensor out); +// aten::permute(Tensor(a) self, int[] dims) -> Tensor(a) +@Namespace("at") public static native @ByVal Tensor permute(@Const @ByRef Tensor self, @ByVal LongArrayRef dims); +@Namespace("at") public static native @ByVal Tensor permute(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... dims); -// Parsed from ATen/ops/ne.h +// Parsed from ATen/ops/permute_copy.h // #pragma once @@ -55543,29 +41478,24 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include - - -// aten::ne.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor ne_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Scalar other); -// aten::ne.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor ne_outf(@Const @ByRef Tensor self, @Const @ByRef Scalar other, @ByRef Tensor out); +// #include -// aten::ne.Scalar(Tensor self, Scalar other) -> Tensor -@Namespace("at") public static native @ByVal Tensor ne(@Const @ByRef Tensor self, @Const @ByRef Scalar other); -// aten::ne.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor ne_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor other); -// aten::ne.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor ne_outf(@Const @ByRef Tensor self, @Const @ByRef Tensor other, @ByRef Tensor out); +// aten::permute_copy(Tensor self, int[] dims) -> Tensor +@Namespace("at") public static native @ByVal Tensor permute_copy(@Const @ByRef Tensor self, @ByVal LongArrayRef dims); +@Namespace("at") public static native @ByVal Tensor permute_copy(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... dims); -// aten::ne.Tensor(Tensor self, Tensor other) -> Tensor -@Namespace("at") public static native @ByVal Tensor ne(@Const @ByRef Tensor self, @Const @ByRef Tensor other); +// aten::permute_copy.out(Tensor self, int[] dims, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor permute_copy_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal LongArrayRef dims); +@Namespace("at") public static native @ByRef Tensor permute_copy_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... dims); +// aten::permute_copy.out(Tensor self, int[] dims, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor permute_copy_outf(@Const @ByRef Tensor self, @ByVal LongArrayRef dims, @ByRef Tensor out); +@Namespace("at") public static native @ByRef Tensor permute_copy_outf(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dims, @ByRef Tensor out); -// Parsed from ATen/ops/neg.h +// Parsed from ATen/ops/pin_memory.h // #pragma once @@ -55586,24 +41516,14 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include - - -// aten::neg(Tensor self) -> Tensor -@Namespace("at") public static native @ByVal Tensor neg(@Const @ByRef Tensor self); +// #include -// aten::neg_(Tensor(a!) self) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor neg_(@ByRef Tensor self); -// aten::neg.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor neg_out(@ByRef Tensor out, @Const @ByRef Tensor self); -// aten::neg.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor neg_outf(@Const @ByRef Tensor self, @ByRef Tensor out); -// Parsed from ATen/ops/negative.h +// Parsed from ATen/ops/pinverse.h // #pragma once @@ -55624,24 +41544,17 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include - - -// aten::negative(Tensor self) -> Tensor -@Namespace("at") public static native @ByVal Tensor negative(@Const @ByRef Tensor self); +// #include -// aten::negative_(Tensor(a!) self) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor negative_(@ByRef Tensor self); -// aten::negative.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor negative_out(@ByRef Tensor out, @Const @ByRef Tensor self); -// aten::negative.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor negative_outf(@Const @ByRef Tensor self, @ByRef Tensor out); +// aten::pinverse(Tensor self, float rcond=1e-15) -> Tensor +@Namespace("at") public static native @ByVal Tensor pinverse(@Const @ByRef Tensor self, double rcond/*=1e-15*/); +@Namespace("at") public static native @ByVal Tensor pinverse(@Const @ByRef Tensor self); -// Parsed from ATen/ops/nested_to_padded_tensor.h +// Parsed from ATen/ops/pixel_shuffle.h // #pragma once @@ -55662,18 +41575,21 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include +// #include -// aten::nested_to_padded_tensor(Tensor self, float padding, int[]? output_size=None) -> Tensor -@Namespace("at") public static native @ByVal Tensor nested_to_padded_tensor(@Const @ByRef Tensor self, double padding, @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") LongArrayRefOptional output_size); -@Namespace("at") public static native @ByVal Tensor nested_to_padded_tensor(@Const @ByRef Tensor self, double padding); -@Namespace("at") public static native @ByVal Tensor nested_to_padded_tensor(@Const @ByRef Tensor self, double padding, @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... output_size); +// aten::pixel_shuffle(Tensor self, int upscale_factor) -> Tensor +@Namespace("at") public static native @ByVal Tensor pixel_shuffle(@Const @ByRef Tensor self, @Cast("int64_t") long upscale_factor); + +// aten::pixel_shuffle.out(Tensor self, int upscale_factor, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor pixel_shuffle_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Cast("int64_t") long upscale_factor); +// aten::pixel_shuffle.out(Tensor self, int upscale_factor, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor pixel_shuffle_outf(@Const @ByRef Tensor self, @Cast("int64_t") long upscale_factor, @ByRef Tensor out); -// Parsed from ATen/ops/new_empty.h +// Parsed from ATen/ops/pixel_unshuffle.h // #pragma once @@ -55694,34 +41610,21 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include - - - - - -// aten::new_empty.out(Tensor self, SymInt[] size, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor new_empty_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal @Cast("c10::ArrayRef*") LongArrayRef size); -@Namespace("at") public static native @ByRef Tensor new_empty_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... size); - - -// aten::new_empty.out(Tensor self, SymInt[] size, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor new_empty_outf(@Const @ByRef Tensor self, @ByVal @Cast("c10::ArrayRef*") LongArrayRef size, @ByRef Tensor out); -@Namespace("at") public static native @ByRef Tensor new_empty_outf(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @ByRef Tensor out); - - -// aten::new_empty.out(Tensor self, SymInt[] size, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor new_empty_symint_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal SymIntRef size); +// #include -// aten::new_empty.out(Tensor self, SymInt[] size, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor new_empty_symint_outf(@Const @ByRef Tensor self, @ByVal SymIntRef size, @ByRef Tensor out); +// aten::pixel_unshuffle(Tensor self, int downscale_factor) -> Tensor +@Namespace("at") public static native @ByVal Tensor pixel_unshuffle(@Const @ByRef Tensor self, @Cast("int64_t") long downscale_factor); +// aten::pixel_unshuffle.out(Tensor self, int downscale_factor, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor pixel_unshuffle_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Cast("int64_t") long downscale_factor); +// aten::pixel_unshuffle.out(Tensor self, int downscale_factor, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor pixel_unshuffle_outf(@Const @ByRef Tensor self, @Cast("int64_t") long downscale_factor, @ByRef Tensor out); -// Parsed from ATen/ops/new_empty_strided.h +// Parsed from ATen/ops/poisson.h // #pragma once @@ -55742,34 +41645,23 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include - - - - - -// aten::new_empty_strided.out(Tensor self, SymInt[] size, SymInt[] stride, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor new_empty_strided_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal @Cast("c10::ArrayRef*") LongArrayRef size, @ByVal @Cast("c10::ArrayRef*") LongArrayRef stride); -@Namespace("at") public static native @ByRef Tensor new_empty_strided_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... stride); - - -// aten::new_empty_strided.out(Tensor self, SymInt[] size, SymInt[] stride, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor new_empty_strided_outf(@Const @ByRef Tensor self, @ByVal @Cast("c10::ArrayRef*") LongArrayRef size, @ByVal @Cast("c10::ArrayRef*") LongArrayRef stride, @ByRef Tensor out); -@Namespace("at") public static native @ByRef Tensor new_empty_strided_outf(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] stride, @ByRef Tensor out); - - -// aten::new_empty_strided.out(Tensor self, SymInt[] size, SymInt[] stride, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor new_empty_strided_symint_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal SymIntRef size, @ByVal SymIntRef stride); +// #include -// aten::new_empty_strided.out(Tensor self, SymInt[] size, SymInt[] stride, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor new_empty_strided_symint_outf(@Const @ByRef Tensor self, @ByVal SymIntRef size, @ByVal SymIntRef stride, @ByRef Tensor out); +// aten::poisson(Tensor self, Generator? generator=None) -> Tensor +@Namespace("at") public static native @ByVal Tensor poisson(@Const @ByRef Tensor self, @ByVal(nullValue = "c10::optional(c10::nullopt)") GeneratorOptional generator); +@Namespace("at") public static native @ByVal Tensor poisson(@Const @ByRef Tensor self); +// aten::poisson.out(Tensor self, Generator? generator=None, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor poisson_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal(nullValue = "c10::optional(c10::nullopt)") GeneratorOptional generator); +@Namespace("at") public static native @ByRef Tensor poisson_out(@ByRef Tensor out, @Const @ByRef Tensor self); +// aten::poisson.out(Tensor self, Generator? generator=None, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor poisson_outf(@Const @ByRef Tensor self, @ByVal GeneratorOptional generator, @ByRef Tensor out); -// Parsed from ATen/ops/new_full.h +// Parsed from ATen/ops/poisson_nll_loss.h // #pragma once @@ -55790,34 +41682,16 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include - - - - - -// aten::new_full.out(Tensor self, SymInt[] size, Scalar fill_value, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor new_full_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal @Cast("c10::ArrayRef*") LongArrayRef size, @Const @ByRef Scalar fill_value); -@Namespace("at") public static native @ByRef Tensor new_full_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @Const @ByRef Scalar fill_value); - - -// aten::new_full.out(Tensor self, SymInt[] size, Scalar fill_value, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor new_full_outf(@Const @ByRef Tensor self, @ByVal @Cast("c10::ArrayRef*") LongArrayRef size, @Const @ByRef Scalar fill_value, @ByRef Tensor out); -@Namespace("at") public static native @ByRef Tensor new_full_outf(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @Const @ByRef Scalar fill_value, @ByRef Tensor out); - - -// aten::new_full.out(Tensor self, SymInt[] size, Scalar fill_value, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor new_full_symint_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal SymIntRef size, @Const @ByRef Scalar fill_value); - +// #include -// aten::new_full.out(Tensor self, SymInt[] size, Scalar fill_value, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor new_full_symint_outf(@Const @ByRef Tensor self, @ByVal SymIntRef size, @Const @ByRef Scalar fill_value, @ByRef Tensor out); +// aten::poisson_nll_loss(Tensor input, Tensor target, bool log_input, bool full, float eps, int reduction) -> Tensor +@Namespace("at") public static native @ByVal Tensor poisson_nll_loss(@Const @ByRef Tensor input, @Const @ByRef Tensor target, @Cast("bool") boolean log_input, @Cast("bool") boolean full, double eps, @Cast("int64_t") long reduction); -// Parsed from ATen/ops/new_ones.h +// Parsed from ATen/ops/polar.h // #pragma once @@ -55838,34 +41712,21 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include - - - - - -// aten::new_ones.out(Tensor self, SymInt[] size, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor new_ones_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal @Cast("c10::ArrayRef*") LongArrayRef size); -@Namespace("at") public static native @ByRef Tensor new_ones_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... size); - - -// aten::new_ones.out(Tensor self, SymInt[] size, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor new_ones_outf(@Const @ByRef Tensor self, @ByVal @Cast("c10::ArrayRef*") LongArrayRef size, @ByRef Tensor out); -@Namespace("at") public static native @ByRef Tensor new_ones_outf(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @ByRef Tensor out); - - -// aten::new_ones.out(Tensor self, SymInt[] size, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor new_ones_symint_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal SymIntRef size); +// #include -// aten::new_ones.out(Tensor self, SymInt[] size, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor new_ones_symint_outf(@Const @ByRef Tensor self, @ByVal SymIntRef size, @ByRef Tensor out); +// aten::polar(Tensor abs, Tensor angle) -> Tensor +@Namespace("at") public static native @ByVal Tensor polar(@Const @ByRef Tensor abs, @Const @ByRef Tensor angle); +// aten::polar.out(Tensor abs, Tensor angle, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor polar_out(@ByRef Tensor out, @Const @ByRef Tensor abs, @Const @ByRef Tensor angle); +// aten::polar.out(Tensor abs, Tensor angle, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor polar_outf(@Const @ByRef Tensor abs, @Const @ByRef Tensor angle, @ByRef Tensor out); -// Parsed from ATen/ops/new_zeros.h +// Parsed from ATen/ops/polygamma.h // #pragma once @@ -55886,34 +41747,21 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include - - - - - -// aten::new_zeros.out(Tensor self, SymInt[] size, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor new_zeros_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal @Cast("c10::ArrayRef*") LongArrayRef size); -@Namespace("at") public static native @ByRef Tensor new_zeros_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... size); - - -// aten::new_zeros.out(Tensor self, SymInt[] size, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor new_zeros_outf(@Const @ByRef Tensor self, @ByVal @Cast("c10::ArrayRef*") LongArrayRef size, @ByRef Tensor out); -@Namespace("at") public static native @ByRef Tensor new_zeros_outf(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @ByRef Tensor out); - - -// aten::new_zeros.out(Tensor self, SymInt[] size, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor new_zeros_symint_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal SymIntRef size); +// #include -// aten::new_zeros.out(Tensor self, SymInt[] size, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor new_zeros_symint_outf(@Const @ByRef Tensor self, @ByVal SymIntRef size, @ByRef Tensor out); +// aten::polygamma.out(int n, Tensor self, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor polygamma_out(@ByRef Tensor out, @Cast("int64_t") long n, @Const @ByRef Tensor self); +// aten::polygamma.out(int n, Tensor self, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor polygamma_outf(@Cast("int64_t") long n, @Const @ByRef Tensor self, @ByRef Tensor out); +// aten::polygamma(int n, Tensor self) -> Tensor +@Namespace("at") public static native @ByVal Tensor polygamma(@Cast("int64_t") long n, @Const @ByRef Tensor self); -// Parsed from ATen/ops/nextafter.h +// Parsed from ATen/ops/positive.h // #pragma once @@ -55934,21 +41782,16 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include - +// #include -// aten::nextafter.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor nextafter_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor other); -// aten::nextafter.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor nextafter_outf(@Const @ByRef Tensor self, @Const @ByRef Tensor other, @ByRef Tensor out); -// aten::nextafter(Tensor self, Tensor other) -> Tensor -@Namespace("at") public static native @ByVal Tensor nextafter(@Const @ByRef Tensor self, @Const @ByRef Tensor other); +// aten::positive(Tensor(a) self) -> Tensor(a) +@Namespace("at") public static native @ByVal Tensor positive(@Const @ByRef Tensor self); -// Parsed from ATen/ops/nll_loss.h +// Parsed from ATen/ops/pow.h // #pragma once @@ -55969,41 +41812,37 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include - - -// aten::nll_loss.out(Tensor self, Tensor target, Tensor? weight=None, int reduction=Mean, SymInt ignore_index=-100, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor nll_loss_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor target, @Const @ByRef(nullValue = "c10::optional{}") TensorOptional weight, @Cast("int64_t") long reduction/*=at::Reduction::Mean*/, @Cast("int64_t") long ignore_index/*=-100*/); -@Namespace("at") public static native @ByRef Tensor nll_loss_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor target); - - -// aten::nll_loss.out(Tensor self, Tensor target, Tensor? weight=None, int reduction=Mean, SymInt ignore_index=-100, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor nll_loss_outf(@Const @ByRef Tensor self, @Const @ByRef Tensor target, @Const @ByRef TensorOptional weight, @Cast("int64_t") long reduction, @Cast("int64_t") long ignore_index, @ByRef Tensor out); - - -// aten::nll_loss.out(Tensor self, Tensor target, Tensor? weight=None, int reduction=Mean, SymInt ignore_index=-100, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor nll_loss_symint_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor target, @Const @ByRef(nullValue = "c10::optional{}") TensorOptional weight, @Cast("int64_t") long reduction/*=at::Reduction::Mean*/, @ByVal(nullValue = "c10::SymInt(-100)") SymInt ignore_index); -@Namespace("at") public static native @ByRef Tensor nll_loss_symint_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor target); +// #include -// aten::nll_loss.out(Tensor self, Tensor target, Tensor? weight=None, int reduction=Mean, SymInt ignore_index=-100, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor nll_loss_symint_outf(@Const @ByRef Tensor self, @Const @ByRef Tensor target, @Const @ByRef TensorOptional weight, @Cast("int64_t") long reduction, @ByVal SymInt ignore_index, @ByRef Tensor out); +// aten::pow.Tensor_Tensor_out(Tensor self, Tensor exponent, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor pow_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor exponent); +// aten::pow.Tensor_Tensor_out(Tensor self, Tensor exponent, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor pow_outf(@Const @ByRef Tensor self, @Const @ByRef Tensor exponent, @ByRef Tensor out); +// aten::pow.Tensor_Tensor(Tensor self, Tensor exponent) -> Tensor +@Namespace("at") public static native @ByVal Tensor pow(@Const @ByRef Tensor self, @Const @ByRef Tensor exponent); -// aten::nll_loss(Tensor self, Tensor target, Tensor? weight=None, int reduction=Mean, SymInt ignore_index=-100) -> Tensor -@Namespace("at") public static native @ByVal Tensor nll_loss(@Const @ByRef Tensor self, @Const @ByRef Tensor target, @Const @ByRef(nullValue = "c10::optional{}") TensorOptional weight, @Cast("int64_t") long reduction/*=at::Reduction::Mean*/, @Cast("int64_t") long ignore_index/*=-100*/); -@Namespace("at") public static native @ByVal Tensor nll_loss(@Const @ByRef Tensor self, @Const @ByRef Tensor target); +// aten::pow.Scalar_out(Scalar self, Tensor exponent, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor pow_out(@ByRef Tensor out, @Const @ByRef Scalar self, @Const @ByRef Tensor exponent); +// aten::pow.Scalar_out(Scalar self, Tensor exponent, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor pow_outf(@Const @ByRef Scalar self, @Const @ByRef Tensor exponent, @ByRef Tensor out); +// aten::pow.Scalar(Scalar self, Tensor exponent) -> Tensor +@Namespace("at") public static native @ByVal Tensor pow(@Const @ByRef Scalar self, @Const @ByRef Tensor exponent); -// aten::nll_loss(Tensor self, Tensor target, Tensor? weight=None, int reduction=Mean, SymInt ignore_index=-100) -> Tensor -@Namespace("at") public static native @ByVal Tensor nll_loss_symint(@Const @ByRef Tensor self, @Const @ByRef Tensor target, @Const @ByRef(nullValue = "c10::optional{}") TensorOptional weight, @Cast("int64_t") long reduction/*=at::Reduction::Mean*/, @ByVal(nullValue = "c10::SymInt(-100)") SymInt ignore_index); -@Namespace("at") public static native @ByVal Tensor nll_loss_symint(@Const @ByRef Tensor self, @Const @ByRef Tensor target); +// aten::pow.Tensor_Scalar_out(Tensor self, Scalar exponent, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor pow_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Scalar exponent); +// aten::pow.Tensor_Scalar_out(Tensor self, Scalar exponent, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor pow_outf(@Const @ByRef Tensor self, @Const @ByRef Scalar exponent, @ByRef Tensor out); +// aten::pow.Tensor_Scalar(Tensor self, Scalar exponent) -> Tensor +@Namespace("at") public static native @ByVal Tensor pow(@Const @ByRef Tensor self, @Const @ByRef Scalar exponent); -// Parsed from ATen/ops/nll_loss2d.h +// Parsed from ATen/ops/prelu.h // #pragma once @@ -56024,41 +41863,16 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include - - -// aten::nll_loss2d.out(Tensor self, Tensor target, Tensor? weight=None, int reduction=Mean, SymInt ignore_index=-100, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor nll_loss2d_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor target, @Const @ByRef(nullValue = "c10::optional{}") TensorOptional weight, @Cast("int64_t") long reduction/*=at::Reduction::Mean*/, @Cast("int64_t") long ignore_index/*=-100*/); -@Namespace("at") public static native @ByRef Tensor nll_loss2d_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor target); - - -// aten::nll_loss2d.out(Tensor self, Tensor target, Tensor? weight=None, int reduction=Mean, SymInt ignore_index=-100, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor nll_loss2d_outf(@Const @ByRef Tensor self, @Const @ByRef Tensor target, @Const @ByRef TensorOptional weight, @Cast("int64_t") long reduction, @Cast("int64_t") long ignore_index, @ByRef Tensor out); - - -// aten::nll_loss2d.out(Tensor self, Tensor target, Tensor? weight=None, int reduction=Mean, SymInt ignore_index=-100, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor nll_loss2d_symint_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor target, @Const @ByRef(nullValue = "c10::optional{}") TensorOptional weight, @Cast("int64_t") long reduction/*=at::Reduction::Mean*/, @ByVal(nullValue = "c10::SymInt(-100)") SymInt ignore_index); -@Namespace("at") public static native @ByRef Tensor nll_loss2d_symint_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor target); - - -// aten::nll_loss2d.out(Tensor self, Tensor target, Tensor? weight=None, int reduction=Mean, SymInt ignore_index=-100, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor nll_loss2d_symint_outf(@Const @ByRef Tensor self, @Const @ByRef Tensor target, @Const @ByRef TensorOptional weight, @Cast("int64_t") long reduction, @ByVal SymInt ignore_index, @ByRef Tensor out); - - -// aten::nll_loss2d(Tensor self, Tensor target, Tensor? weight=None, int reduction=Mean, SymInt ignore_index=-100) -> Tensor -@Namespace("at") public static native @ByVal Tensor nll_loss2d(@Const @ByRef Tensor self, @Const @ByRef Tensor target, @Const @ByRef(nullValue = "c10::optional{}") TensorOptional weight, @Cast("int64_t") long reduction/*=at::Reduction::Mean*/, @Cast("int64_t") long ignore_index/*=-100*/); -@Namespace("at") public static native @ByVal Tensor nll_loss2d(@Const @ByRef Tensor self, @Const @ByRef Tensor target); - +// #include -// aten::nll_loss2d(Tensor self, Tensor target, Tensor? weight=None, int reduction=Mean, SymInt ignore_index=-100) -> Tensor -@Namespace("at") public static native @ByVal Tensor nll_loss2d_symint(@Const @ByRef Tensor self, @Const @ByRef Tensor target, @Const @ByRef(nullValue = "c10::optional{}") TensorOptional weight, @Cast("int64_t") long reduction/*=at::Reduction::Mean*/, @ByVal(nullValue = "c10::SymInt(-100)") SymInt ignore_index); -@Namespace("at") public static native @ByVal Tensor nll_loss2d_symint(@Const @ByRef Tensor self, @Const @ByRef Tensor target); +// aten::prelu(Tensor self, Tensor weight) -> Tensor +@Namespace("at") public static native @ByVal Tensor prelu(@Const @ByRef Tensor self, @Const @ByRef Tensor weight); -// Parsed from ATen/ops/nll_loss2d_backward.h +// Parsed from ATen/ops/prod.h // #pragma once @@ -56079,37 +41893,43 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include - - -// aten::nll_loss2d_backward.grad_input(Tensor grad_output, Tensor self, Tensor target, Tensor? weight, int reduction, SymInt ignore_index, Tensor total_weight, *, Tensor(a!) grad_input) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor nll_loss2d_backward_out(@ByRef Tensor grad_input, @Const @ByRef Tensor grad_output, @Const @ByRef Tensor self, @Const @ByRef Tensor target, @Const @ByRef TensorOptional weight, @Cast("int64_t") long reduction, @Cast("int64_t") long ignore_index, @Const @ByRef Tensor total_weight); - - -// aten::nll_loss2d_backward.grad_input(Tensor grad_output, Tensor self, Tensor target, Tensor? weight, int reduction, SymInt ignore_index, Tensor total_weight, *, Tensor(a!) grad_input) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor nll_loss2d_backward_outf(@Const @ByRef Tensor grad_output, @Const @ByRef Tensor self, @Const @ByRef Tensor target, @Const @ByRef TensorOptional weight, @Cast("int64_t") long reduction, @Cast("int64_t") long ignore_index, @Const @ByRef Tensor total_weight, @ByRef Tensor grad_input); - - -// aten::nll_loss2d_backward.grad_input(Tensor grad_output, Tensor self, Tensor target, Tensor? weight, int reduction, SymInt ignore_index, Tensor total_weight, *, Tensor(a!) grad_input) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor nll_loss2d_backward_symint_out(@ByRef Tensor grad_input, @Const @ByRef Tensor grad_output, @Const @ByRef Tensor self, @Const @ByRef Tensor target, @Const @ByRef TensorOptional weight, @Cast("int64_t") long reduction, @ByVal SymInt ignore_index, @Const @ByRef Tensor total_weight); +// #include -// aten::nll_loss2d_backward.grad_input(Tensor grad_output, Tensor self, Tensor target, Tensor? weight, int reduction, SymInt ignore_index, Tensor total_weight, *, Tensor(a!) grad_input) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor nll_loss2d_backward_symint_outf(@Const @ByRef Tensor grad_output, @Const @ByRef Tensor self, @Const @ByRef Tensor target, @Const @ByRef TensorOptional weight, @Cast("int64_t") long reduction, @ByVal SymInt ignore_index, @Const @ByRef Tensor total_weight, @ByRef Tensor grad_input); +// aten::prod(Tensor self, *, ScalarType? dtype=None) -> Tensor +@Namespace("at") public static native @ByVal Tensor prod(@Const @ByRef Tensor self, @ByVal(nullValue = "c10::optional(c10::nullopt)") ScalarTypeOptional dtype); +@Namespace("at") public static native @ByVal Tensor prod(@Const @ByRef Tensor self); +// aten::prod.dim_int(Tensor self, int dim, bool keepdim=False, *, ScalarType? dtype=None) -> Tensor +@Namespace("at") public static native @ByVal Tensor prod(@Const @ByRef Tensor self, @Cast("int64_t") long dim, @Cast("bool") boolean keepdim/*=false*/, @ByVal(nullValue = "c10::optional(c10::nullopt)") ScalarTypeOptional dtype); +@Namespace("at") public static native @ByVal Tensor prod(@Const @ByRef Tensor self, @Cast("int64_t") long dim); -// aten::nll_loss2d_backward(Tensor grad_output, Tensor self, Tensor target, Tensor? weight, int reduction, SymInt ignore_index, Tensor total_weight) -> Tensor -@Namespace("at") public static native @ByVal Tensor nll_loss2d_backward(@Const @ByRef Tensor grad_output, @Const @ByRef Tensor self, @Const @ByRef Tensor target, @Const @ByRef TensorOptional weight, @Cast("int64_t") long reduction, @Cast("int64_t") long ignore_index, @Const @ByRef Tensor total_weight); +// aten::prod.int_out(Tensor self, int dim, bool keepdim=False, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor prod_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Cast("int64_t") long dim, @Cast("bool") boolean keepdim/*=false*/, @ByVal(nullValue = "c10::optional(c10::nullopt)") ScalarTypeOptional dtype); +@Namespace("at") public static native @ByRef Tensor prod_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Cast("int64_t") long dim); +// aten::prod.int_out(Tensor self, int dim, bool keepdim=False, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor prod_outf(@Const @ByRef Tensor self, @Cast("int64_t") long dim, @Cast("bool") boolean keepdim, @ByVal ScalarTypeOptional dtype, @ByRef Tensor out); +// aten::prod.dim_Dimname(Tensor self, Dimname dim, bool keepdim=False, *, ScalarType? dtype=None) -> Tensor +@Namespace("at") public static native @ByVal Tensor prod(@Const @ByRef Tensor self, @ByVal Dimname dim, @Cast("bool") boolean keepdim/*=false*/, @ByVal(nullValue = "c10::optional(c10::nullopt)") ScalarTypeOptional dtype); +@Namespace("at") public static native @ByVal Tensor prod(@Const @ByRef Tensor self, @ByVal Dimname dim); -// aten::nll_loss2d_backward(Tensor grad_output, Tensor self, Tensor target, Tensor? weight, int reduction, SymInt ignore_index, Tensor total_weight) -> Tensor -@Namespace("at") public static native @ByVal Tensor nll_loss2d_backward_symint(@Const @ByRef Tensor grad_output, @Const @ByRef Tensor self, @Const @ByRef Tensor target, @Const @ByRef TensorOptional weight, @Cast("int64_t") long reduction, @ByVal SymInt ignore_index, @Const @ByRef Tensor total_weight); +// aten::prod.Dimname_out(Tensor self, Dimname dim, bool keepdim=False, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor prod_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal Dimname dim, @Cast("bool") boolean keepdim/*=false*/, @ByVal(nullValue = "c10::optional(c10::nullopt)") ScalarTypeOptional dtype); +@Namespace("at") public static native @ByRef Tensor prod_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal Dimname dim); +// aten::prod.Dimname_out(Tensor self, Dimname dim, bool keepdim=False, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor prod_outf(@Const @ByRef Tensor self, @ByVal Dimname dim, @Cast("bool") boolean keepdim, @ByVal ScalarTypeOptional dtype, @ByRef Tensor out); +// aten::prod.out(Tensor self, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor prod_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal(nullValue = "c10::optional(c10::nullopt)") ScalarTypeOptional dtype); +@Namespace("at") public static native @ByRef Tensor prod_out(@ByRef Tensor out, @Const @ByRef Tensor self); +// aten::prod.out(Tensor self, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor prod_outf(@Const @ByRef Tensor self, @ByVal ScalarTypeOptional dtype, @ByRef Tensor out); -// Parsed from ATen/ops/nll_loss2d_forward.h +// Parsed from ATen/ops/promote_types.h // #pragma once @@ -56130,37 +41950,16 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include - - -// aten::nll_loss2d_forward.output(Tensor self, Tensor target, Tensor? weight, int reduction, SymInt ignore_index, *, Tensor(a!) output, Tensor(b!) total_weight) -> (Tensor(a!), Tensor(b!)) -@Namespace("at") public static native @ByVal @Cast("std::tuple*") PointerPointer nll_loss2d_forward_out(@ByRef Tensor output, @ByRef Tensor total_weight, @Const @ByRef Tensor self, @Const @ByRef Tensor target, @Const @ByRef TensorOptional weight, @Cast("int64_t") long reduction, @Cast("int64_t") long ignore_index); - - -// aten::nll_loss2d_forward.output(Tensor self, Tensor target, Tensor? weight, int reduction, SymInt ignore_index, *, Tensor(a!) output, Tensor(b!) total_weight) -> (Tensor(a!), Tensor(b!)) -@Namespace("at") public static native @ByVal @Cast("std::tuple*") PointerPointer nll_loss2d_forward_outf(@Const @ByRef Tensor self, @Const @ByRef Tensor target, @Const @ByRef TensorOptional weight, @Cast("int64_t") long reduction, @Cast("int64_t") long ignore_index, @ByRef Tensor output, @ByRef Tensor total_weight); - - -// aten::nll_loss2d_forward.output(Tensor self, Tensor target, Tensor? weight, int reduction, SymInt ignore_index, *, Tensor(a!) output, Tensor(b!) total_weight) -> (Tensor(a!), Tensor(b!)) -@Namespace("at") public static native @ByVal @Cast("std::tuple*") PointerPointer nll_loss2d_forward_symint_out(@ByRef Tensor output, @ByRef Tensor total_weight, @Const @ByRef Tensor self, @Const @ByRef Tensor target, @Const @ByRef TensorOptional weight, @Cast("int64_t") long reduction, @ByVal SymInt ignore_index); - - -// aten::nll_loss2d_forward.output(Tensor self, Tensor target, Tensor? weight, int reduction, SymInt ignore_index, *, Tensor(a!) output, Tensor(b!) total_weight) -> (Tensor(a!), Tensor(b!)) -@Namespace("at") public static native @ByVal @Cast("std::tuple*") PointerPointer nll_loss2d_forward_symint_outf(@Const @ByRef Tensor self, @Const @ByRef Tensor target, @Const @ByRef TensorOptional weight, @Cast("int64_t") long reduction, @ByVal SymInt ignore_index, @ByRef Tensor output, @ByRef Tensor total_weight); - - -// aten::nll_loss2d_forward(Tensor self, Tensor target, Tensor? weight, int reduction, SymInt ignore_index) -> (Tensor output, Tensor total_weight) -@Namespace("at") public static native @ByVal TensorTensorTuple nll_loss2d_forward(@Const @ByRef Tensor self, @Const @ByRef Tensor target, @Const @ByRef TensorOptional weight, @Cast("int64_t") long reduction, @Cast("int64_t") long ignore_index); - +// #include -// aten::nll_loss2d_forward(Tensor self, Tensor target, Tensor? weight, int reduction, SymInt ignore_index) -> (Tensor output, Tensor total_weight) -@Namespace("at") public static native @ByVal TensorTensorTuple nll_loss2d_forward_symint(@Const @ByRef Tensor self, @Const @ByRef Tensor target, @Const @ByRef TensorOptional weight, @Cast("int64_t") long reduction, @ByVal SymInt ignore_index); +// aten::promote_types(ScalarType type1, ScalarType type2) -> ScalarType +@Namespace("at") public static native ScalarType promote_types(ScalarType type1, ScalarType type2); -// Parsed from ATen/ops/nll_loss_backward.h +// Parsed from ATen/ops/put.h // #pragma once @@ -56181,37 +41980,23 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include - - -// aten::nll_loss_backward.grad_input(Tensor grad_output, Tensor self, Tensor target, Tensor? weight, int reduction, SymInt ignore_index, Tensor total_weight, *, Tensor(a!) grad_input) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor nll_loss_backward_out(@ByRef Tensor grad_input, @Const @ByRef Tensor grad_output, @Const @ByRef Tensor self, @Const @ByRef Tensor target, @Const @ByRef TensorOptional weight, @Cast("int64_t") long reduction, @Cast("int64_t") long ignore_index, @Const @ByRef Tensor total_weight); - - -// aten::nll_loss_backward.grad_input(Tensor grad_output, Tensor self, Tensor target, Tensor? weight, int reduction, SymInt ignore_index, Tensor total_weight, *, Tensor(a!) grad_input) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor nll_loss_backward_outf(@Const @ByRef Tensor grad_output, @Const @ByRef Tensor self, @Const @ByRef Tensor target, @Const @ByRef TensorOptional weight, @Cast("int64_t") long reduction, @Cast("int64_t") long ignore_index, @Const @ByRef Tensor total_weight, @ByRef Tensor grad_input); - - -// aten::nll_loss_backward.grad_input(Tensor grad_output, Tensor self, Tensor target, Tensor? weight, int reduction, SymInt ignore_index, Tensor total_weight, *, Tensor(a!) grad_input) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor nll_loss_backward_symint_out(@ByRef Tensor grad_input, @Const @ByRef Tensor grad_output, @Const @ByRef Tensor self, @Const @ByRef Tensor target, @Const @ByRef TensorOptional weight, @Cast("int64_t") long reduction, @ByVal SymInt ignore_index, @Const @ByRef Tensor total_weight); - - -// aten::nll_loss_backward.grad_input(Tensor grad_output, Tensor self, Tensor target, Tensor? weight, int reduction, SymInt ignore_index, Tensor total_weight, *, Tensor(a!) grad_input) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor nll_loss_backward_symint_outf(@Const @ByRef Tensor grad_output, @Const @ByRef Tensor self, @Const @ByRef Tensor target, @Const @ByRef TensorOptional weight, @Cast("int64_t") long reduction, @ByVal SymInt ignore_index, @Const @ByRef Tensor total_weight, @ByRef Tensor grad_input); - - -// aten::nll_loss_backward(Tensor grad_output, Tensor self, Tensor target, Tensor? weight, int reduction, SymInt ignore_index, Tensor total_weight) -> Tensor -@Namespace("at") public static native @ByVal Tensor nll_loss_backward(@Const @ByRef Tensor grad_output, @Const @ByRef Tensor self, @Const @ByRef Tensor target, @Const @ByRef TensorOptional weight, @Cast("int64_t") long reduction, @Cast("int64_t") long ignore_index, @Const @ByRef Tensor total_weight); +// #include -// aten::nll_loss_backward(Tensor grad_output, Tensor self, Tensor target, Tensor? weight, int reduction, SymInt ignore_index, Tensor total_weight) -> Tensor -@Namespace("at") public static native @ByVal Tensor nll_loss_backward_symint(@Const @ByRef Tensor grad_output, @Const @ByRef Tensor self, @Const @ByRef Tensor target, @Const @ByRef TensorOptional weight, @Cast("int64_t") long reduction, @ByVal SymInt ignore_index, @Const @ByRef Tensor total_weight); +// aten::put(Tensor self, Tensor index, Tensor source, bool accumulate=False) -> Tensor +@Namespace("at") public static native @ByVal Tensor put(@Const @ByRef Tensor self, @Const @ByRef Tensor index, @Const @ByRef Tensor source, @Cast("bool") boolean accumulate/*=false*/); +@Namespace("at") public static native @ByVal Tensor put(@Const @ByRef Tensor self, @Const @ByRef Tensor index, @Const @ByRef Tensor source); +// aten::put.out(Tensor self, Tensor index, Tensor source, bool accumulate=False, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor put_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor index, @Const @ByRef Tensor source, @Cast("bool") boolean accumulate/*=false*/); +@Namespace("at") public static native @ByRef Tensor put_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor index, @Const @ByRef Tensor source); +// aten::put.out(Tensor self, Tensor index, Tensor source, bool accumulate=False, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor put_outf(@Const @ByRef Tensor self, @Const @ByRef Tensor index, @Const @ByRef Tensor source, @Cast("bool") boolean accumulate, @ByRef Tensor out); -// Parsed from ATen/ops/nll_loss_forward.h +// Parsed from ATen/ops/q_per_channel_axis.h // #pragma once @@ -56232,37 +42017,16 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include - - -// aten::nll_loss_forward.output(Tensor self, Tensor target, Tensor? weight, int reduction, SymInt ignore_index, *, Tensor(a!) output, Tensor(b!) total_weight) -> (Tensor(a!), Tensor(b!)) -@Namespace("at") public static native @ByVal @Cast("std::tuple*") PointerPointer nll_loss_forward_out(@ByRef Tensor output, @ByRef Tensor total_weight, @Const @ByRef Tensor self, @Const @ByRef Tensor target, @Const @ByRef TensorOptional weight, @Cast("int64_t") long reduction, @Cast("int64_t") long ignore_index); - - -// aten::nll_loss_forward.output(Tensor self, Tensor target, Tensor? weight, int reduction, SymInt ignore_index, *, Tensor(a!) output, Tensor(b!) total_weight) -> (Tensor(a!), Tensor(b!)) -@Namespace("at") public static native @ByVal @Cast("std::tuple*") PointerPointer nll_loss_forward_outf(@Const @ByRef Tensor self, @Const @ByRef Tensor target, @Const @ByRef TensorOptional weight, @Cast("int64_t") long reduction, @Cast("int64_t") long ignore_index, @ByRef Tensor output, @ByRef Tensor total_weight); - - -// aten::nll_loss_forward.output(Tensor self, Tensor target, Tensor? weight, int reduction, SymInt ignore_index, *, Tensor(a!) output, Tensor(b!) total_weight) -> (Tensor(a!), Tensor(b!)) -@Namespace("at") public static native @ByVal @Cast("std::tuple*") PointerPointer nll_loss_forward_symint_out(@ByRef Tensor output, @ByRef Tensor total_weight, @Const @ByRef Tensor self, @Const @ByRef Tensor target, @Const @ByRef TensorOptional weight, @Cast("int64_t") long reduction, @ByVal SymInt ignore_index); - - -// aten::nll_loss_forward.output(Tensor self, Tensor target, Tensor? weight, int reduction, SymInt ignore_index, *, Tensor(a!) output, Tensor(b!) total_weight) -> (Tensor(a!), Tensor(b!)) -@Namespace("at") public static native @ByVal @Cast("std::tuple*") PointerPointer nll_loss_forward_symint_outf(@Const @ByRef Tensor self, @Const @ByRef Tensor target, @Const @ByRef TensorOptional weight, @Cast("int64_t") long reduction, @ByVal SymInt ignore_index, @ByRef Tensor output, @ByRef Tensor total_weight); - - -// aten::nll_loss_forward(Tensor self, Tensor target, Tensor? weight, int reduction, SymInt ignore_index) -> (Tensor output, Tensor total_weight) -@Namespace("at") public static native @ByVal TensorTensorTuple nll_loss_forward(@Const @ByRef Tensor self, @Const @ByRef Tensor target, @Const @ByRef TensorOptional weight, @Cast("int64_t") long reduction, @Cast("int64_t") long ignore_index); - +// #include -// aten::nll_loss_forward(Tensor self, Tensor target, Tensor? weight, int reduction, SymInt ignore_index) -> (Tensor output, Tensor total_weight) -@Namespace("at") public static native @ByVal TensorTensorTuple nll_loss_forward_symint(@Const @ByRef Tensor self, @Const @ByRef Tensor target, @Const @ByRef TensorOptional weight, @Cast("int64_t") long reduction, @ByVal SymInt ignore_index); +// aten::q_per_channel_axis(Tensor self) -> int +@Namespace("at") public static native @Cast("int64_t") long q_per_channel_axis(@Const @ByRef Tensor self); -// Parsed from ATen/ops/nll_loss_nd.h +// Parsed from ATen/ops/q_per_channel_scales.h // #pragma once @@ -56283,23 +42047,21 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include - - -// aten::nll_loss_nd(Tensor self, Tensor target, Tensor? weight=None, int reduction=Mean, SymInt ignore_index=-100) -> Tensor -@Namespace("at") public static native @ByVal Tensor nll_loss_nd(@Const @ByRef Tensor self, @Const @ByRef Tensor target, @Const @ByRef(nullValue = "c10::optional{}") TensorOptional weight, @Cast("int64_t") long reduction/*=at::Reduction::Mean*/, @Cast("int64_t") long ignore_index/*=-100*/); -@Namespace("at") public static native @ByVal Tensor nll_loss_nd(@Const @ByRef Tensor self, @Const @ByRef Tensor target); +// #include -// aten::nll_loss_nd(Tensor self, Tensor target, Tensor? weight=None, int reduction=Mean, SymInt ignore_index=-100) -> Tensor -@Namespace("at") public static native @ByVal Tensor nll_loss_nd_symint(@Const @ByRef Tensor self, @Const @ByRef Tensor target, @Const @ByRef(nullValue = "c10::optional{}") TensorOptional weight, @Cast("int64_t") long reduction/*=at::Reduction::Mean*/, @ByVal(nullValue = "c10::SymInt(-100)") SymInt ignore_index); -@Namespace("at") public static native @ByVal Tensor nll_loss_nd_symint(@Const @ByRef Tensor self, @Const @ByRef Tensor target); +// aten::q_per_channel_scales(Tensor self) -> Tensor +@Namespace("at") public static native @ByVal Tensor q_per_channel_scales(@Const @ByRef Tensor self); +// aten::q_per_channel_scales.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor q_per_channel_scales_out(@ByRef Tensor out, @Const @ByRef Tensor self); +// aten::q_per_channel_scales.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor q_per_channel_scales_outf(@Const @ByRef Tensor self, @ByRef Tensor out); -// Parsed from ATen/ops/nonzero.h +// Parsed from ATen/ops/q_per_channel_zero_points.h // #pragma once @@ -56320,21 +42082,21 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include +// #include -// aten::nonzero.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor nonzero_out(@ByRef Tensor out, @Const @ByRef Tensor self); -// aten::nonzero.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor nonzero_outf(@Const @ByRef Tensor self, @ByRef Tensor out); +// aten::q_per_channel_zero_points(Tensor self) -> Tensor +@Namespace("at") public static native @ByVal Tensor q_per_channel_zero_points(@Const @ByRef Tensor self); -// aten::nonzero(Tensor self) -> Tensor -@Namespace("at") public static native @ByVal Tensor nonzero(@Const @ByRef Tensor self); +// aten::q_per_channel_zero_points.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor q_per_channel_zero_points_out(@ByRef Tensor out, @Const @ByRef Tensor self); +// aten::q_per_channel_zero_points.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor q_per_channel_zero_points_outf(@Const @ByRef Tensor self, @ByRef Tensor out); -// Parsed from ATen/ops/nonzero_numpy.h +// Parsed from ATen/ops/q_scale.h // #pragma once @@ -56355,16 +42117,16 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include +// #include -// aten::nonzero_numpy(Tensor self) -> Tensor[] -@Namespace("at") public static native @Cast({"", "std::vector"}) @StdMove TensorVector nonzero_numpy(@Const @ByRef Tensor self); +// aten::q_scale(Tensor self) -> float +@Namespace("at") public static native double q_scale(@Const @ByRef Tensor self); -// Parsed from ATen/ops/norm.h +// Parsed from ATen/ops/q_zero_point.h // #pragma once @@ -56385,75 +42147,16 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include - - -// aten::norm.ScalarOpt_dtype(Tensor self, Scalar? p, *, ScalarType dtype) -> Tensor -@Namespace("at") public static native @ByVal Tensor norm(@Const @ByRef Tensor self, @Const @ByRef ScalarOptional p, ScalarType dtype); - -// aten::norm.Scalar(Tensor self, Scalar p=2) -> Tensor -@Namespace("at") public static native @ByVal Tensor norm(@Const @ByRef Tensor self, @Const @ByRef(nullValue = "at::Scalar(2)") Scalar p); -@Namespace("at") public static native @ByVal Tensor norm(@Const @ByRef Tensor self); - -// aten::norm.ScalarOpt_dim_dtype(Tensor self, Scalar? p, int[1] dim, bool keepdim, *, ScalarType dtype) -> Tensor -@Namespace("at") public static native @ByVal Tensor norm(@Const @ByRef Tensor self, @Const @ByRef ScalarOptional p, @ByVal @Cast("c10::ArrayRef*") LongArrayRef dim, @Cast("bool") boolean keepdim, ScalarType dtype); -@Namespace("at") public static native @ByVal Tensor norm(@Const @ByRef Tensor self, @Const @ByRef ScalarOptional p, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dim, @Cast("bool") boolean keepdim, ScalarType dtype); - -// aten::norm.ScalarOpt_dim(Tensor self, Scalar? p, int[1] dim, bool keepdim=False) -> Tensor -@Namespace("at") public static native @ByVal Tensor norm(@Const @ByRef Tensor self, @Const @ByRef ScalarOptional p, @ByVal @Cast("c10::ArrayRef*") LongArrayRef dim, @Cast("bool") boolean keepdim/*=false*/); -@Namespace("at") public static native @ByVal Tensor norm(@Const @ByRef Tensor self, @Const @ByRef ScalarOptional p, @ByVal @Cast("c10::ArrayRef*") LongArrayRef dim); -@Namespace("at") public static native @ByVal Tensor norm(@Const @ByRef Tensor self, @Const @ByRef ScalarOptional p, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dim, @Cast("bool") boolean keepdim/*=false*/); -@Namespace("at") public static native @ByVal Tensor norm(@Const @ByRef Tensor self, @Const @ByRef ScalarOptional p, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... dim); - -// aten::norm.dtype_out(Tensor self, Scalar? p, int[1] dim, bool keepdim, *, ScalarType dtype, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor norm_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef ScalarOptional p, @ByVal @Cast("c10::ArrayRef*") LongArrayRef dim, @Cast("bool") boolean keepdim, ScalarType dtype); -@Namespace("at") public static native @ByRef Tensor norm_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef ScalarOptional p, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dim, @Cast("bool") boolean keepdim, ScalarType dtype); -// aten::norm.dtype_out(Tensor self, Scalar? p, int[1] dim, bool keepdim, *, ScalarType dtype, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor norm_outf(@Const @ByRef Tensor self, @Const @ByRef ScalarOptional p, @ByVal @Cast("c10::ArrayRef*") LongArrayRef dim, @Cast("bool") boolean keepdim, ScalarType dtype, @ByRef Tensor out); -@Namespace("at") public static native @ByRef Tensor norm_outf(@Const @ByRef Tensor self, @Const @ByRef ScalarOptional p, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dim, @Cast("bool") boolean keepdim, ScalarType dtype, @ByRef Tensor out); - -// aten::norm.out(Tensor self, Scalar? p, int[1] dim, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor norm_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef ScalarOptional p, @ByVal @Cast("c10::ArrayRef*") LongArrayRef dim, @Cast("bool") boolean keepdim/*=false*/); -@Namespace("at") public static native @ByRef Tensor norm_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef ScalarOptional p, @ByVal @Cast("c10::ArrayRef*") LongArrayRef dim); -@Namespace("at") public static native @ByRef Tensor norm_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef ScalarOptional p, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dim, @Cast("bool") boolean keepdim/*=false*/); -@Namespace("at") public static native @ByRef Tensor norm_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef ScalarOptional p, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... dim); -// aten::norm.out(Tensor self, Scalar? p, int[1] dim, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor norm_outf(@Const @ByRef Tensor self, @Const @ByRef ScalarOptional p, @ByVal @Cast("c10::ArrayRef*") LongArrayRef dim, @Cast("bool") boolean keepdim, @ByRef Tensor out); -@Namespace("at") public static native @ByRef Tensor norm_outf(@Const @ByRef Tensor self, @Const @ByRef ScalarOptional p, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dim, @Cast("bool") boolean keepdim, @ByRef Tensor out); - -// aten::norm.names_ScalarOpt_dim_dtype(Tensor self, Scalar? p, Dimname[1] dim, bool keepdim, *, ScalarType dtype) -> Tensor -@Namespace("at") public static native @ByVal Tensor norm(@Const @ByRef Tensor self, @Const @ByRef ScalarOptional p, @ByVal DimnameArrayRef dim, @Cast("bool") boolean keepdim, ScalarType dtype); - -// aten::norm.names_ScalarOpt_dim(Tensor self, Scalar? p, Dimname[1] dim, bool keepdim=False) -> Tensor -@Namespace("at") public static native @ByVal Tensor norm(@Const @ByRef Tensor self, @Const @ByRef ScalarOptional p, @ByVal DimnameArrayRef dim, @Cast("bool") boolean keepdim/*=false*/); -@Namespace("at") public static native @ByVal Tensor norm(@Const @ByRef Tensor self, @Const @ByRef ScalarOptional p, @ByVal DimnameArrayRef dim); - -// aten::norm.names_dtype_out(Tensor self, Scalar? p, Dimname[1] dim, bool keepdim, *, ScalarType dtype, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor norm_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef ScalarOptional p, @ByVal DimnameArrayRef dim, @Cast("bool") boolean keepdim, ScalarType dtype); -// aten::norm.names_dtype_out(Tensor self, Scalar? p, Dimname[1] dim, bool keepdim, *, ScalarType dtype, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor norm_outf(@Const @ByRef Tensor self, @Const @ByRef ScalarOptional p, @ByVal DimnameArrayRef dim, @Cast("bool") boolean keepdim, ScalarType dtype, @ByRef Tensor out); - -// aten::norm.names_out(Tensor self, Scalar? p, Dimname[1] dim, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor norm_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef ScalarOptional p, @ByVal DimnameArrayRef dim, @Cast("bool") boolean keepdim/*=false*/); -@Namespace("at") public static native @ByRef Tensor norm_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef ScalarOptional p, @ByVal DimnameArrayRef dim); -// aten::norm.names_out(Tensor self, Scalar? p, Dimname[1] dim, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor norm_outf(@Const @ByRef Tensor self, @Const @ByRef ScalarOptional p, @ByVal DimnameArrayRef dim, @Cast("bool") boolean keepdim, @ByRef Tensor out); +// #include -// aten::norm.ScalarOpt_dtype_out(Tensor self, Scalar? p, *, ScalarType dtype, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor norm_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef ScalarOptional p, ScalarType dtype); -// aten::norm.ScalarOpt_dtype_out(Tensor self, Scalar? p, *, ScalarType dtype, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor norm_outf(@Const @ByRef Tensor self, @Const @ByRef ScalarOptional p, ScalarType dtype, @ByRef Tensor out); -// aten::norm.Scalar_out(Tensor self, Scalar p=2, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor norm_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef(nullValue = "at::Scalar(2)") Scalar p); -@Namespace("at") public static native @ByRef Tensor norm_out(@ByRef Tensor out, @Const @ByRef Tensor self); -// aten::norm.Scalar_out(Tensor self, Scalar p=2, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor norm_outf(@Const @ByRef Tensor self, @Const @ByRef Scalar p, @ByRef Tensor out); +// aten::q_zero_point(Tensor self) -> int +@Namespace("at") public static native @Cast("int64_t") long q_zero_point(@Const @ByRef Tensor self); -// Parsed from ATen/ops/norm_except_dim.h +// Parsed from ATen/ops/qr.h // #pragma once @@ -56474,17 +42177,23 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include +// #include -// aten::norm_except_dim(Tensor v, int pow=2, int dim=0) -> Tensor -@Namespace("at") public static native @ByVal Tensor norm_except_dim(@Const @ByRef Tensor v, @Cast("int64_t") long pow/*=2*/, @Cast("int64_t") long dim/*=0*/); -@Namespace("at") public static native @ByVal Tensor norm_except_dim(@Const @ByRef Tensor v); +// aten::qr.Q(Tensor self, bool some=True, *, Tensor(a!) Q, Tensor(b!) R) -> (Tensor(a!) Q, Tensor(b!) R) +@Namespace("at") public static native @ByVal T_TensorTensor_T qr_out(@ByRef Tensor Q, @ByRef Tensor R, @Const @ByRef Tensor self, @Cast("bool") boolean some/*=true*/); +@Namespace("at") public static native @ByVal T_TensorTensor_T qr_out(@ByRef Tensor Q, @ByRef Tensor R, @Const @ByRef Tensor self); +// aten::qr.Q(Tensor self, bool some=True, *, Tensor(a!) Q, Tensor(b!) R) -> (Tensor(a!) Q, Tensor(b!) R) +@Namespace("at") public static native @ByVal T_TensorTensor_T qr_outf(@Const @ByRef Tensor self, @Cast("bool") boolean some, @ByRef Tensor Q, @ByRef Tensor R); +// aten::qr(Tensor self, bool some=True) -> (Tensor Q, Tensor R) +@Namespace("at") public static native @ByVal T_TensorTensor_T qr(@Const @ByRef Tensor self, @Cast("bool") boolean some/*=true*/); +@Namespace("at") public static native @ByVal T_TensorTensor_T qr(@Const @ByRef Tensor self); -// Parsed from ATen/ops/normal.h + +// Parsed from ATen/ops/qscheme.h // #pragma once @@ -56505,91 +42214,61 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include - - -// aten::normal_functional(Tensor self, float mean=0, float std=1, *, Generator? generator=None) -> Tensor -@Namespace("at") public static native @ByVal Tensor normal_functional(@Const @ByRef Tensor self, double mean/*=0*/, double std/*=1*/, @ByVal(nullValue = "c10::optional(c10::nullopt)") GeneratorOptional generator); -@Namespace("at") public static native @ByVal Tensor normal_functional(@Const @ByRef Tensor self); - -// aten::normal.Tensor_float_out(Tensor mean, float std=1, *, Generator? generator=None, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor normal_out(@ByRef Tensor out, @Const @ByRef Tensor mean, double std/*=1*/, @ByVal(nullValue = "c10::optional(c10::nullopt)") GeneratorOptional generator); -// aten::normal.Tensor_float_out(Tensor mean, float std=1, *, Generator? generator=None, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor normal_outf(@Const @ByRef Tensor mean, double std, @ByVal GeneratorOptional generator, @ByRef Tensor out); - -// aten::normal.Tensor_float(Tensor mean, float std=1, *, Generator? generator=None) -> Tensor -@Namespace("at") public static native @ByVal Tensor normal(@Const @ByRef Tensor mean, double std/*=1*/, @ByVal(nullValue = "c10::optional(c10::nullopt)") GeneratorOptional generator); -@Namespace("at") public static native @ByVal Tensor normal(@Const @ByRef Tensor mean); - -// aten::normal.float_Tensor_out(float mean, Tensor std, *, Generator? generator=None, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor normal_out(@ByRef Tensor out, double mean, @Const @ByRef Tensor std, @ByVal(nullValue = "c10::optional(c10::nullopt)") GeneratorOptional generator); -// aten::normal.float_Tensor_out(float mean, Tensor std, *, Generator? generator=None, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor normal_outf(double mean, @Const @ByRef Tensor std, @ByVal GeneratorOptional generator, @ByRef Tensor out); - -// aten::normal.float_Tensor(float mean, Tensor std, *, Generator? generator=None) -> Tensor -@Namespace("at") public static native @ByVal Tensor normal(double mean, @Const @ByRef Tensor std, @ByVal(nullValue = "c10::optional(c10::nullopt)") GeneratorOptional generator); -@Namespace("at") public static native @ByVal Tensor normal(double mean, @Const @ByRef Tensor std); - -// aten::normal.Tensor_Tensor_out(Tensor mean, Tensor std, *, Generator? generator=None, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor normal_out(@ByRef Tensor out, @Const @ByRef Tensor mean, @Const @ByRef Tensor std, @ByVal(nullValue = "c10::optional(c10::nullopt)") GeneratorOptional generator); -// aten::normal.Tensor_Tensor_out(Tensor mean, Tensor std, *, Generator? generator=None, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor normal_outf(@Const @ByRef Tensor mean, @Const @ByRef Tensor std, @ByVal GeneratorOptional generator, @ByRef Tensor out); - -// aten::normal.Tensor_Tensor(Tensor mean, Tensor std, *, Generator? generator=None) -> Tensor -@Namespace("at") public static native @ByVal Tensor normal(@Const @ByRef Tensor mean, @Const @ByRef Tensor std, @ByVal(nullValue = "c10::optional(c10::nullopt)") GeneratorOptional generator); -@Namespace("at") public static native @ByVal Tensor normal(@Const @ByRef Tensor mean, @Const @ByRef Tensor std); +// #include -// aten::normal.float_float(float mean, float std, SymInt[] size, *, Generator? generator=None, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor -@Namespace("at") public static native @ByVal Tensor normal(double mean, double std, @ByVal @Cast("c10::ArrayRef*") LongArrayRef size, @ByVal(nullValue = "c10::optional(c10::nullopt)") GeneratorOptional generator, @ByVal(nullValue = "at::TensorOptions{}") TensorOptions options); -@Namespace("at") public static native @ByVal Tensor normal(double mean, double std, @ByVal @Cast("c10::ArrayRef*") LongArrayRef size); -@Namespace("at") public static native @ByVal Tensor normal(double mean, double std, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @ByVal(nullValue = "c10::optional(c10::nullopt)") GeneratorOptional generator, @ByVal(nullValue = "at::TensorOptions{}") TensorOptions options); -@Namespace("at") public static native @ByVal Tensor normal(double mean, double std, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... size); -// aten::normal.float_float(float mean, float std, SymInt[] size, *, Generator? generator=None, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor -@Namespace("at") public static native @ByVal Tensor normal(double mean, double std, @ByVal @Cast("c10::ArrayRef*") LongArrayRef size, @ByVal GeneratorOptional generator, @ByVal ScalarTypeOptional dtype, @ByVal LayoutOptional layout, @ByVal DeviceOptional device, @ByVal BoolOptional pin_memory); -@Namespace("at") public static native @ByVal Tensor normal(double mean, double std, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @ByVal GeneratorOptional generator, @ByVal ScalarTypeOptional dtype, @ByVal LayoutOptional layout, @ByVal DeviceOptional device, @ByVal BoolOptional pin_memory); -// aten::normal.float_float(float mean, float std, SymInt[] size, *, Generator? generator=None, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor -@Namespace("at") public static native @ByVal Tensor normal_symint(double mean, double std, @ByVal SymIntRef size, @ByVal(nullValue = "c10::optional(c10::nullopt)") GeneratorOptional generator, @ByVal(nullValue = "at::TensorOptions{}") TensorOptions options); -@Namespace("at") public static native @ByVal Tensor normal_symint(double mean, double std, @ByVal SymIntRef size); +// Parsed from ATen/ops/quantile.h -// aten::normal.float_float(float mean, float std, SymInt[] size, *, Generator? generator=None, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor -@Namespace("at") public static native @ByVal Tensor normal_symint(double mean, double std, @ByVal SymIntRef size, @ByVal GeneratorOptional generator, @ByVal ScalarTypeOptional dtype, @ByVal LayoutOptional layout, @ByVal DeviceOptional device, @ByVal BoolOptional pin_memory); +// #pragma once +// @generated by torchgen/gen.py from Function.h -// aten::normal.float_float_out(float mean, float std, SymInt[] size, *, Generator? generator=None, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor normal_out(@ByRef Tensor out, double mean, double std, @ByVal @Cast("c10::ArrayRef*") LongArrayRef size, @ByVal(nullValue = "c10::optional(c10::nullopt)") GeneratorOptional generator); -@Namespace("at") public static native @ByRef Tensor normal_out(@ByRef Tensor out, double mean, double std, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @ByVal(nullValue = "c10::optional(c10::nullopt)") GeneratorOptional generator); - +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include -// aten::normal.float_float_out(float mean, float std, SymInt[] size, *, Generator? generator=None, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor normal_outf(double mean, double std, @ByVal @Cast("c10::ArrayRef*") LongArrayRef size, @ByVal GeneratorOptional generator, @ByRef Tensor out); -@Namespace("at") public static native @ByRef Tensor normal_outf(double mean, double std, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @ByVal GeneratorOptional generator, @ByRef Tensor out); +// #include -// aten::normal.float_float_out(float mean, float std, SymInt[] size, *, Generator? generator=None, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor normal_symint_out(@ByRef Tensor out, double mean, double std, @ByVal SymIntRef size, @ByVal(nullValue = "c10::optional(c10::nullopt)") GeneratorOptional generator); -@Namespace("at") public static native @ByRef Tensor normal_symint_out(@ByRef Tensor out, double mean, double std, @ByVal SymIntRef size); - +// aten::quantile(Tensor self, Tensor q, int? dim=None, bool keepdim=False, *, str interpolation='linear') -> Tensor +@Namespace("at") public static native @ByVal Tensor quantile(@Const @ByRef Tensor self, @Const @ByRef Tensor q, @ByVal(nullValue = "c10::optional(c10::nullopt)") LongOptional dim, @Cast("bool") boolean keepdim/*=false*/, @ByVal(nullValue = "c10::string_view(\"linear\")") @Cast("c10::string_view*") Pointer interpolation); +@Namespace("at") public static native @ByVal Tensor quantile(@Const @ByRef Tensor self, @Const @ByRef Tensor q); -// aten::normal.float_float_out(float mean, float std, SymInt[] size, *, Generator? generator=None, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor normal_symint_outf(double mean, double std, @ByVal SymIntRef size, @ByVal GeneratorOptional generator, @ByRef Tensor out); +// aten::quantile.out(Tensor self, Tensor q, int? dim=None, bool keepdim=False, *, str interpolation='linear', Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor quantile_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor q, @ByVal(nullValue = "c10::optional(c10::nullopt)") LongOptional dim, @Cast("bool") boolean keepdim/*=false*/, @ByVal(nullValue = "c10::string_view(\"linear\")") @Cast("c10::string_view*") Pointer interpolation); +@Namespace("at") public static native @ByRef Tensor quantile_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor q); +// aten::quantile.out(Tensor self, Tensor q, int? dim=None, bool keepdim=False, *, str interpolation='linear', Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor quantile_outf(@Const @ByRef Tensor self, @Const @ByRef Tensor q, @ByVal LongOptional dim, @Cast("bool") boolean keepdim, @ByVal @Cast("c10::string_view*") Pointer interpolation, @ByRef Tensor out); +// aten::quantile.scalar(Tensor self, float q, int? dim=None, bool keepdim=False, *, str interpolation='linear') -> Tensor +@Namespace("at") public static native @ByVal Tensor quantile(@Const @ByRef Tensor self, double q, @ByVal(nullValue = "c10::optional(c10::nullopt)") LongOptional dim, @Cast("bool") boolean keepdim/*=false*/, @ByVal(nullValue = "c10::string_view(\"linear\")") @Cast("c10::string_view*") Pointer interpolation); +@Namespace("at") public static native @ByVal Tensor quantile(@Const @ByRef Tensor self, double q); -// aten::normal.out(Tensor self, float mean=0, float std=1, *, Generator? generator=None, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor normal_out(@ByRef Tensor out, @Const @ByRef Tensor self, double mean/*=0*/, double std/*=1*/, @ByVal(nullValue = "c10::optional(c10::nullopt)") GeneratorOptional generator); -// aten::normal.out(Tensor self, float mean=0, float std=1, *, Generator? generator=None, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor normal_outf(@Const @ByRef Tensor self, double mean, double std, @ByVal GeneratorOptional generator, @ByRef Tensor out); +// aten::quantile.scalar_out(Tensor self, float q, int? dim=None, bool keepdim=False, *, str interpolation='linear', Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor quantile_out(@ByRef Tensor out, @Const @ByRef Tensor self, double q, @ByVal(nullValue = "c10::optional(c10::nullopt)") LongOptional dim, @Cast("bool") boolean keepdim/*=false*/, @ByVal(nullValue = "c10::string_view(\"linear\")") @Cast("c10::string_view*") Pointer interpolation); +@Namespace("at") public static native @ByRef Tensor quantile_out(@ByRef Tensor out, @Const @ByRef Tensor self, double q); +// aten::quantile.scalar_out(Tensor self, float q, int? dim=None, bool keepdim=False, *, str interpolation='linear', Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor quantile_outf(@Const @ByRef Tensor self, double q, @ByVal LongOptional dim, @Cast("bool") boolean keepdim, @ByVal @Cast("c10::string_view*") Pointer interpolation, @ByRef Tensor out); -// Parsed from ATen/ops/not_equal.h +// Parsed from ATen/ops/quantize_per_channel.h // #pragma once @@ -56610,29 +42289,21 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include - - -// aten::not_equal.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor not_equal_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Scalar other); -// aten::not_equal.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor not_equal_outf(@Const @ByRef Tensor self, @Const @ByRef Scalar other, @ByRef Tensor out); +// #include -// aten::not_equal.Scalar(Tensor self, Scalar other) -> Tensor -@Namespace("at") public static native @ByVal Tensor not_equal(@Const @ByRef Tensor self, @Const @ByRef Scalar other); -// aten::not_equal.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor not_equal_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor other); -// aten::not_equal.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor not_equal_outf(@Const @ByRef Tensor self, @Const @ByRef Tensor other, @ByRef Tensor out); +// aten::quantize_per_channel(Tensor self, Tensor scales, Tensor zero_points, int axis, ScalarType dtype) -> Tensor +@Namespace("at") public static native @ByVal Tensor quantize_per_channel(@Const @ByRef Tensor self, @Const @ByRef Tensor scales, @Const @ByRef Tensor zero_points, @Cast("int64_t") long axis, ScalarType dtype); -// aten::not_equal.Tensor(Tensor self, Tensor other) -> Tensor -@Namespace("at") public static native @ByVal Tensor not_equal(@Const @ByRef Tensor self, @Const @ByRef Tensor other); +// aten::quantize_per_channel.out(Tensor self, Tensor scales, Tensor zero_points, int axis, ScalarType dtype, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor quantize_per_channel_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor scales, @Const @ByRef Tensor zero_points, @Cast("int64_t") long axis, ScalarType dtype); +// aten::quantize_per_channel.out(Tensor self, Tensor scales, Tensor zero_points, int axis, ScalarType dtype, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor quantize_per_channel_outf(@Const @ByRef Tensor self, @Const @ByRef Tensor scales, @Const @ByRef Tensor zero_points, @Cast("int64_t") long axis, ScalarType dtype, @ByRef Tensor out); -// Parsed from ATen/ops/nuclear_norm.h +// Parsed from ATen/ops/quantize_per_tensor.h // #pragma once @@ -56653,38 +42324,37 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include +// #include -// aten::nuclear_norm(Tensor self, bool keepdim=False) -> Tensor -@Namespace("at") public static native @ByVal Tensor nuclear_norm(@Const @ByRef Tensor self, @Cast("bool") boolean keepdim/*=false*/); -@Namespace("at") public static native @ByVal Tensor nuclear_norm(@Const @ByRef Tensor self); +// aten::quantize_per_tensor(Tensor self, float scale, int zero_point, ScalarType dtype) -> Tensor +@Namespace("at") public static native @ByVal Tensor quantize_per_tensor(@Const @ByRef Tensor self, double scale, @Cast("int64_t") long zero_point, ScalarType dtype); -// aten::nuclear_norm.out(Tensor self, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor nuclear_norm_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Cast("bool") boolean keepdim/*=false*/); -@Namespace("at") public static native @ByRef Tensor nuclear_norm_out(@ByRef Tensor out, @Const @ByRef Tensor self); -// aten::nuclear_norm.out(Tensor self, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor nuclear_norm_outf(@Const @ByRef Tensor self, @Cast("bool") boolean keepdim, @ByRef Tensor out); +// aten::quantize_per_tensor.tensor_qparams(Tensor self, Tensor scale, Tensor zero_point, ScalarType dtype) -> Tensor +@Namespace("at") public static native @ByVal Tensor quantize_per_tensor(@Const @ByRef Tensor self, @Const @ByRef Tensor scale, @Const @ByRef Tensor zero_point, ScalarType dtype); -// aten::nuclear_norm.dim(Tensor self, int[2] dim, bool keepdim=False) -> Tensor -@Namespace("at") public static native @ByVal Tensor nuclear_norm(@Const @ByRef Tensor self, @ByVal @Cast("c10::ArrayRef*") LongArrayRef dim, @Cast("bool") boolean keepdim/*=false*/); -@Namespace("at") public static native @ByVal Tensor nuclear_norm(@Const @ByRef Tensor self, @ByVal @Cast("c10::ArrayRef*") LongArrayRef dim); -@Namespace("at") public static native @ByVal Tensor nuclear_norm(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dim, @Cast("bool") boolean keepdim/*=false*/); -@Namespace("at") public static native @ByVal Tensor nuclear_norm(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... dim); +// aten::quantize_per_tensor.tensors(Tensor[] tensors, Tensor scales, Tensor zero_points, ScalarType dtype) -> Tensor[] +@Namespace("at") public static native @Cast({"", "std::vector"}) @StdMove TensorVector quantize_per_tensor(@ByVal @Cast("at::TensorList*") TensorArrayRef tensors, @Const @ByRef Tensor scales, @Const @ByRef Tensor zero_points, ScalarType dtype); -// aten::nuclear_norm.dim_out(Tensor self, int[2] dim, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor nuclear_norm_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal @Cast("c10::ArrayRef*") LongArrayRef dim, @Cast("bool") boolean keepdim/*=false*/); -@Namespace("at") public static native @ByRef Tensor nuclear_norm_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal @Cast("c10::ArrayRef*") LongArrayRef dim); -@Namespace("at") public static native @ByRef Tensor nuclear_norm_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dim, @Cast("bool") boolean keepdim/*=false*/); -@Namespace("at") public static native @ByRef Tensor nuclear_norm_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... dim); -// aten::nuclear_norm.dim_out(Tensor self, int[2] dim, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor nuclear_norm_outf(@Const @ByRef Tensor self, @ByVal @Cast("c10::ArrayRef*") LongArrayRef dim, @Cast("bool") boolean keepdim, @ByRef Tensor out); -@Namespace("at") public static native @ByRef Tensor nuclear_norm_outf(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dim, @Cast("bool") boolean keepdim, @ByRef Tensor out); +// aten::quantize_per_tensor.out(Tensor self, float scale, int zero_point, ScalarType dtype, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor quantize_per_tensor_out(@ByRef Tensor out, @Const @ByRef Tensor self, double scale, @Cast("int64_t") long zero_point, ScalarType dtype); +// aten::quantize_per_tensor.out(Tensor self, float scale, int zero_point, ScalarType dtype, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor quantize_per_tensor_outf(@Const @ByRef Tensor self, double scale, @Cast("int64_t") long zero_point, ScalarType dtype, @ByRef Tensor out); +// aten::quantize_per_tensor.tensor_qparams_out(Tensor self, Tensor scale, Tensor zero_point, ScalarType dtype, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor quantize_per_tensor_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor scale, @Const @ByRef Tensor zero_point, ScalarType dtype); +// aten::quantize_per_tensor.tensor_qparams_out(Tensor self, Tensor scale, Tensor zero_point, ScalarType dtype, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor quantize_per_tensor_outf(@Const @ByRef Tensor self, @Const @ByRef Tensor scale, @Const @ByRef Tensor zero_point, ScalarType dtype, @ByRef Tensor out); + +// aten::quantize_per_tensor.tensors_out(Tensor[] tensors, Tensor scales, Tensor zero_points, ScalarType dtype, *, Tensor(a!)[] out) -> () +@Namespace("at") public static native void quantize_per_tensor_out(@ByVal @Cast("at::TensorList*") TensorArrayRef out, @ByVal @Cast("at::TensorList*") TensorArrayRef tensors, @Const @ByRef Tensor scales, @Const @ByRef Tensor zero_points, ScalarType dtype); +// aten::quantize_per_tensor.tensors_out(Tensor[] tensors, Tensor scales, Tensor zero_points, ScalarType dtype, *, Tensor(a!)[] out) -> () +@Namespace("at") public static native void quantize_per_tensor_outf(@ByVal @Cast("at::TensorList*") TensorArrayRef tensors, @Const @ByRef Tensor scales, @Const @ByRef Tensor zero_points, ScalarType dtype, @ByVal @Cast("at::TensorList*") TensorArrayRef out); -// Parsed from ATen/ops/numpy_T.h + +// Parsed from ATen/ops/quantize_per_tensor_dynamic.h // #pragma once @@ -56705,14 +42375,21 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include +// #include +// aten::quantize_per_tensor_dynamic(Tensor self, ScalarType dtype, bool reduce_range) -> Tensor +@Namespace("at") public static native @ByVal Tensor quantize_per_tensor_dynamic(@Const @ByRef Tensor self, ScalarType dtype, @Cast("bool") boolean reduce_range); + +// aten::quantize_per_tensor_dynamic.out(Tensor self, ScalarType dtype, bool reduce_range, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor quantize_per_tensor_dynamic_out(@ByRef Tensor out, @Const @ByRef Tensor self, ScalarType dtype, @Cast("bool") boolean reduce_range); +// aten::quantize_per_tensor_dynamic.out(Tensor self, ScalarType dtype, bool reduce_range, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor quantize_per_tensor_dynamic_outf(@Const @ByRef Tensor self, ScalarType dtype, @Cast("bool") boolean reduce_range, @ByRef Tensor out); -// Parsed from ATen/ops/one_hot.h +// Parsed from ATen/ops/quantized_batch_norm.h // #pragma once @@ -56733,17 +42410,21 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include +// #include -// aten::one_hot(Tensor self, int num_classes=-1) -> Tensor -@Namespace("at") public static native @ByVal Tensor one_hot(@Const @ByRef Tensor self, @Cast("int64_t") long num_classes/*=-1*/); -@Namespace("at") public static native @ByVal Tensor one_hot(@Const @ByRef Tensor self); +// aten::quantized_batch_norm(Tensor input, Tensor? weight, Tensor? bias, Tensor mean, Tensor var, float eps, float output_scale, int output_zero_point) -> Tensor +@Namespace("at") public static native @ByVal Tensor quantized_batch_norm(@Const @ByRef Tensor input, @Const @ByRef TensorOptional weight, @Const @ByRef TensorOptional bias, @Const @ByRef Tensor mean, @Const @ByRef Tensor var, double eps, double output_scale, @Cast("int64_t") long output_zero_point); + +// aten::quantized_batch_norm.out(Tensor input, Tensor? weight, Tensor? bias, Tensor mean, Tensor var, float eps, float output_scale, int output_zero_point, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor quantized_batch_norm_out(@ByRef Tensor out, @Const @ByRef Tensor input, @Const @ByRef TensorOptional weight, @Const @ByRef TensorOptional bias, @Const @ByRef Tensor mean, @Const @ByRef Tensor var, double eps, double output_scale, @Cast("int64_t") long output_zero_point); +// aten::quantized_batch_norm.out(Tensor input, Tensor? weight, Tensor? bias, Tensor mean, Tensor var, float eps, float output_scale, int output_zero_point, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor quantized_batch_norm_outf(@Const @ByRef Tensor input, @Const @ByRef TensorOptional weight, @Const @ByRef TensorOptional bias, @Const @ByRef Tensor mean, @Const @ByRef Tensor var, double eps, double output_scale, @Cast("int64_t") long output_zero_point, @ByRef Tensor out); -// Parsed from ATen/ops/ones.h +// Parsed from ATen/ops/quantized_gru_cell.h // #pragma once @@ -56764,68 +42445,16 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include - - -// aten::ones.names(int[] size, *, Dimname[]? names, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor -@Namespace("at") public static native @ByVal Tensor ones(@ByVal @Cast("c10::ArrayRef*") LongArrayRef size, @ByVal DimnameListOptional names, @ByVal(nullValue = "at::TensorOptions{}") TensorOptions options); -@Namespace("at") public static native @ByVal Tensor ones(@ByVal @Cast("c10::ArrayRef*") LongArrayRef size, @ByVal DimnameListOptional names); -@Namespace("at") public static native @ByVal Tensor ones(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @ByVal DimnameListOptional names, @ByVal(nullValue = "at::TensorOptions{}") TensorOptions options); -@Namespace("at") public static native @ByVal Tensor ones(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @ByVal DimnameListOptional names); -// aten::ones.names(int[] size, *, Dimname[]? names, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor -@Namespace("at") public static native @ByVal Tensor ones(@ByVal @Cast("c10::ArrayRef*") LongArrayRef size, @ByVal DimnameListOptional names, @ByVal ScalarTypeOptional dtype, @ByVal LayoutOptional layout, @ByVal DeviceOptional device, @ByVal BoolOptional pin_memory); -@Namespace("at") public static native @ByVal Tensor ones(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @ByVal DimnameListOptional names, @ByVal ScalarTypeOptional dtype, @ByVal LayoutOptional layout, @ByVal DeviceOptional device, @ByVal BoolOptional pin_memory); - -// aten::ones(SymInt[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor -@Namespace("at") public static native @ByVal Tensor ones(@ByVal @Cast("c10::ArrayRef*") LongArrayRef size, @ByVal(nullValue = "at::TensorOptions{}") TensorOptions options); -@Namespace("at") public static native @ByVal Tensor ones(@ByVal @Cast("c10::ArrayRef*") LongArrayRef size); -@Namespace("at") public static native @ByVal Tensor ones(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @ByVal(nullValue = "at::TensorOptions{}") TensorOptions options); -@Namespace("at") public static native @ByVal Tensor ones(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... size); - - -// aten::ones(SymInt[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor -@Namespace("at") public static native @ByVal Tensor ones(@ByVal @Cast("c10::ArrayRef*") LongArrayRef size, @ByVal ScalarTypeOptional dtype, @ByVal LayoutOptional layout, @ByVal DeviceOptional device, @ByVal BoolOptional pin_memory); -@Namespace("at") public static native @ByVal Tensor ones(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @ByVal ScalarTypeOptional dtype, @ByVal LayoutOptional layout, @ByVal DeviceOptional device, @ByVal BoolOptional pin_memory); - - -// aten::ones(SymInt[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor -@Namespace("at") public static native @ByVal Tensor ones_symint(@ByVal SymIntRef size, @ByVal(nullValue = "at::TensorOptions{}") TensorOptions options); -@Namespace("at") public static native @ByVal Tensor ones_symint(@ByVal SymIntRef size); - - -// aten::ones(SymInt[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor -@Namespace("at") public static native @ByVal Tensor ones_symint(@ByVal SymIntRef size, @ByVal ScalarTypeOptional dtype, @ByVal LayoutOptional layout, @ByVal DeviceOptional device, @ByVal BoolOptional pin_memory); - - -// aten::ones.out(SymInt[] size, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor ones_out(@ByRef Tensor out, @ByVal @Cast("c10::ArrayRef*") LongArrayRef size); -@Namespace("at") public static native @ByRef Tensor ones_out(@ByRef Tensor out, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... size); - - -// aten::ones.out(SymInt[] size, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor ones_outf(@ByVal @Cast("c10::ArrayRef*") LongArrayRef size, @ByRef Tensor out); -@Namespace("at") public static native @ByRef Tensor ones_outf(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @ByRef Tensor out); - - -// aten::ones.out(SymInt[] size, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor ones_symint_out(@ByRef Tensor out, @ByVal SymIntRef size); - - -// aten::ones.out(SymInt[] size, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor ones_symint_outf(@ByVal SymIntRef size, @ByRef Tensor out); +// #include -// aten::ones.names_out(int[] size, *, Dimname[]? names, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor ones_out(@ByRef Tensor out, @ByVal @Cast("c10::ArrayRef*") LongArrayRef size, @ByVal DimnameListOptional names); -@Namespace("at") public static native @ByRef Tensor ones_out(@ByRef Tensor out, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @ByVal DimnameListOptional names); -// aten::ones.names_out(int[] size, *, Dimname[]? names, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor ones_outf(@ByVal @Cast("c10::ArrayRef*") LongArrayRef size, @ByVal DimnameListOptional names, @ByRef Tensor out); -@Namespace("at") public static native @ByRef Tensor ones_outf(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @ByVal DimnameListOptional names, @ByRef Tensor out); +// aten::quantized_gru_cell(Tensor input, Tensor hx, Tensor w_ih, Tensor w_hh, Tensor b_ih, Tensor b_hh, Tensor packed_ih, Tensor packed_hh, Tensor col_offsets_ih, Tensor col_offsets_hh, Scalar scale_ih, Scalar scale_hh, Scalar zero_point_ih, Scalar zero_point_hh) -> Tensor +@Namespace("at") public static native @ByVal Tensor quantized_gru_cell(@Const @ByRef Tensor input, @Const @ByRef Tensor hx, @Const @ByRef Tensor w_ih, @Const @ByRef Tensor w_hh, @Const @ByRef Tensor b_ih, @Const @ByRef Tensor b_hh, @Const @ByRef Tensor packed_ih, @Const @ByRef Tensor packed_hh, @Const @ByRef Tensor col_offsets_ih, @Const @ByRef Tensor col_offsets_hh, @Const @ByRef Scalar scale_ih, @Const @ByRef Scalar scale_hh, @Const @ByRef Scalar zero_point_ih, @Const @ByRef Scalar zero_point_hh); -// Parsed from ATen/ops/ones_like.h +// Parsed from ATen/ops/quantized_lstm_cell.h // #pragma once @@ -56846,25 +42475,16 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include - +// #include -// aten::ones_like(Tensor self, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, MemoryFormat? memory_format=None) -> Tensor -@Namespace("at") public static native @ByVal Tensor ones_like(@Const @ByRef Tensor self, @ByVal(nullValue = "at::TensorOptions{}") TensorOptions options, @ByVal(nullValue = "c10::optional(c10::nullopt)") MemoryFormatOptional memory_format); -@Namespace("at") public static native @ByVal Tensor ones_like(@Const @ByRef Tensor self); -// aten::ones_like(Tensor self, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, MemoryFormat? memory_format=None) -> Tensor -@Namespace("at") public static native @ByVal Tensor ones_like(@Const @ByRef Tensor self, @ByVal ScalarTypeOptional dtype, @ByVal LayoutOptional layout, @ByVal DeviceOptional device, @ByVal BoolOptional pin_memory, @ByVal MemoryFormatOptional memory_format); -// aten::ones_like.out(Tensor self, *, MemoryFormat? memory_format=None, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor ones_like_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal(nullValue = "c10::optional(c10::nullopt)") MemoryFormatOptional memory_format); -@Namespace("at") public static native @ByRef Tensor ones_like_out(@ByRef Tensor out, @Const @ByRef Tensor self); -// aten::ones_like.out(Tensor self, *, MemoryFormat? memory_format=None, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor ones_like_outf(@Const @ByRef Tensor self, @ByVal MemoryFormatOptional memory_format, @ByRef Tensor out); +// aten::quantized_lstm_cell(Tensor input, Tensor[] hx, Tensor w_ih, Tensor w_hh, Tensor b_ih, Tensor b_hh, Tensor packed_ih, Tensor packed_hh, Tensor col_offsets_ih, Tensor col_offsets_hh, Scalar scale_ih, Scalar scale_hh, Scalar zero_point_ih, Scalar zero_point_hh) -> (Tensor, Tensor) +@Namespace("at") public static native @ByVal T_TensorTensor_T quantized_lstm_cell(@Const @ByRef Tensor input, @ByVal @Cast("at::TensorList*") TensorArrayRef hx, @Const @ByRef Tensor w_ih, @Const @ByRef Tensor w_hh, @Const @ByRef Tensor b_ih, @Const @ByRef Tensor b_hh, @Const @ByRef Tensor packed_ih, @Const @ByRef Tensor packed_hh, @Const @ByRef Tensor col_offsets_ih, @Const @ByRef Tensor col_offsets_hh, @Const @ByRef Scalar scale_ih, @Const @ByRef Scalar scale_hh, @Const @ByRef Scalar zero_point_ih, @Const @ByRef Scalar zero_point_hh); -// Parsed from ATen/ops/or.h +// Parsed from ATen/ops/quantized_max_pool1d.h // #pragma once @@ -56885,19 +42505,28 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include +// #include -// aten::__or__.Scalar(Tensor self, Scalar other) -> Tensor -@Namespace("at") public static native @ByVal Tensor __or__(@Const @ByRef Tensor self, @Const @ByRef Scalar other); +// aten::quantized_max_pool1d(Tensor self, int[1] kernel_size, int[1] stride=[], int[1] padding=0, int[1] dilation=1, bool ceil_mode=False) -> Tensor +@Namespace("at") public static native @ByVal Tensor quantized_max_pool1d(@Const @ByRef Tensor self, @ByVal LongArrayRef kernel_size, @ByVal(nullValue = "at::IntArrayRef{}") LongArrayRef stride, @ByVal(nullValue = "at::IntArrayRef(0)") LongArrayRef padding, @ByVal(nullValue = "at::IntArrayRef(1)") LongArrayRef dilation, @Cast("bool") boolean ceil_mode/*=false*/); +@Namespace("at") public static native @ByVal Tensor quantized_max_pool1d(@Const @ByRef Tensor self, @ByVal LongArrayRef kernel_size); +@Namespace("at") public static native @ByVal Tensor quantized_max_pool1d(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] kernel_size, @ByVal(nullValue = "at::IntArrayRef{}") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] stride, @ByVal(nullValue = "at::IntArrayRef(0)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] padding, @ByVal(nullValue = "at::IntArrayRef(1)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dilation, @Cast("bool") boolean ceil_mode/*=false*/); +@Namespace("at") public static native @ByVal Tensor quantized_max_pool1d(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... kernel_size); -// aten::__or__.Tensor(Tensor self, Tensor other) -> Tensor -@Namespace("at") public static native @ByVal Tensor __or__(@Const @ByRef Tensor self, @Const @ByRef Tensor other); +// aten::quantized_max_pool1d.out(Tensor self, int[1] kernel_size, int[1] stride=[], int[1] padding=0, int[1] dilation=1, bool ceil_mode=False, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor quantized_max_pool1d_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal LongArrayRef kernel_size, @ByVal(nullValue = "at::IntArrayRef{}") LongArrayRef stride, @ByVal(nullValue = "at::IntArrayRef(0)") LongArrayRef padding, @ByVal(nullValue = "at::IntArrayRef(1)") LongArrayRef dilation, @Cast("bool") boolean ceil_mode/*=false*/); +@Namespace("at") public static native @ByRef Tensor quantized_max_pool1d_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal LongArrayRef kernel_size); +@Namespace("at") public static native @ByRef Tensor quantized_max_pool1d_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] kernel_size, @ByVal(nullValue = "at::IntArrayRef{}") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] stride, @ByVal(nullValue = "at::IntArrayRef(0)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] padding, @ByVal(nullValue = "at::IntArrayRef(1)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dilation, @Cast("bool") boolean ceil_mode/*=false*/); +@Namespace("at") public static native @ByRef Tensor quantized_max_pool1d_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... kernel_size); +// aten::quantized_max_pool1d.out(Tensor self, int[1] kernel_size, int[1] stride=[], int[1] padding=0, int[1] dilation=1, bool ceil_mode=False, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor quantized_max_pool1d_outf(@Const @ByRef Tensor self, @ByVal LongArrayRef kernel_size, @ByVal LongArrayRef stride, @ByVal LongArrayRef padding, @ByVal LongArrayRef dilation, @Cast("bool") boolean ceil_mode, @ByRef Tensor out); +@Namespace("at") public static native @ByRef Tensor quantized_max_pool1d_outf(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] kernel_size, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] stride, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] padding, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dilation, @Cast("bool") boolean ceil_mode, @ByRef Tensor out); -// Parsed from ATen/ops/orgqr.h +// Parsed from ATen/ops/quantized_max_pool2d.h // #pragma once @@ -56918,21 +42547,28 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include +// #include -// aten::orgqr(Tensor self, Tensor input2) -> Tensor -@Namespace("at") public static native @ByVal Tensor orgqr(@Const @ByRef Tensor self, @Const @ByRef Tensor input2); +// aten::quantized_max_pool2d(Tensor self, int[2] kernel_size, int[2] stride=[], int[2] padding=0, int[2] dilation=1, bool ceil_mode=False) -> Tensor +@Namespace("at") public static native @ByVal Tensor quantized_max_pool2d(@Const @ByRef Tensor self, @ByVal LongArrayRef kernel_size, @ByVal(nullValue = "at::IntArrayRef{}") LongArrayRef stride, @ByVal(nullValue = "at::IntArrayRef(0)") LongArrayRef padding, @ByVal(nullValue = "at::IntArrayRef(1)") LongArrayRef dilation, @Cast("bool") boolean ceil_mode/*=false*/); +@Namespace("at") public static native @ByVal Tensor quantized_max_pool2d(@Const @ByRef Tensor self, @ByVal LongArrayRef kernel_size); +@Namespace("at") public static native @ByVal Tensor quantized_max_pool2d(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] kernel_size, @ByVal(nullValue = "at::IntArrayRef{}") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] stride, @ByVal(nullValue = "at::IntArrayRef(0)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] padding, @ByVal(nullValue = "at::IntArrayRef(1)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dilation, @Cast("bool") boolean ceil_mode/*=false*/); +@Namespace("at") public static native @ByVal Tensor quantized_max_pool2d(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... kernel_size); -// aten::orgqr.out(Tensor self, Tensor input2, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor orgqr_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor input2); -// aten::orgqr.out(Tensor self, Tensor input2, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor orgqr_outf(@Const @ByRef Tensor self, @Const @ByRef Tensor input2, @ByRef Tensor out); +// aten::quantized_max_pool2d.out(Tensor self, int[2] kernel_size, int[2] stride=[], int[2] padding=0, int[2] dilation=1, bool ceil_mode=False, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor quantized_max_pool2d_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal LongArrayRef kernel_size, @ByVal(nullValue = "at::IntArrayRef{}") LongArrayRef stride, @ByVal(nullValue = "at::IntArrayRef(0)") LongArrayRef padding, @ByVal(nullValue = "at::IntArrayRef(1)") LongArrayRef dilation, @Cast("bool") boolean ceil_mode/*=false*/); +@Namespace("at") public static native @ByRef Tensor quantized_max_pool2d_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal LongArrayRef kernel_size); +@Namespace("at") public static native @ByRef Tensor quantized_max_pool2d_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] kernel_size, @ByVal(nullValue = "at::IntArrayRef{}") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] stride, @ByVal(nullValue = "at::IntArrayRef(0)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] padding, @ByVal(nullValue = "at::IntArrayRef(1)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dilation, @Cast("bool") boolean ceil_mode/*=false*/); +@Namespace("at") public static native @ByRef Tensor quantized_max_pool2d_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... kernel_size); +// aten::quantized_max_pool2d.out(Tensor self, int[2] kernel_size, int[2] stride=[], int[2] padding=0, int[2] dilation=1, bool ceil_mode=False, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor quantized_max_pool2d_outf(@Const @ByRef Tensor self, @ByVal LongArrayRef kernel_size, @ByVal LongArrayRef stride, @ByVal LongArrayRef padding, @ByVal LongArrayRef dilation, @Cast("bool") boolean ceil_mode, @ByRef Tensor out); +@Namespace("at") public static native @ByRef Tensor quantized_max_pool2d_outf(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] kernel_size, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] stride, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] padding, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dilation, @Cast("bool") boolean ceil_mode, @ByRef Tensor out); -// Parsed from ATen/ops/ormqr.h +// Parsed from ATen/ops/quantized_rnn_relu_cell.h // #pragma once @@ -56953,23 +42589,16 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include - +// #include -// aten::ormqr.out(Tensor self, Tensor input2, Tensor input3, bool left=True, bool transpose=False, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor ormqr_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor input2, @Const @ByRef Tensor input3, @Cast("bool") boolean left/*=true*/, @Cast("bool") boolean transpose/*=false*/); -@Namespace("at") public static native @ByRef Tensor ormqr_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor input2, @Const @ByRef Tensor input3); -// aten::ormqr.out(Tensor self, Tensor input2, Tensor input3, bool left=True, bool transpose=False, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor ormqr_outf(@Const @ByRef Tensor self, @Const @ByRef Tensor input2, @Const @ByRef Tensor input3, @Cast("bool") boolean left, @Cast("bool") boolean transpose, @ByRef Tensor out); -// aten::ormqr(Tensor self, Tensor input2, Tensor input3, bool left=True, bool transpose=False) -> Tensor -@Namespace("at") public static native @ByVal Tensor ormqr(@Const @ByRef Tensor self, @Const @ByRef Tensor input2, @Const @ByRef Tensor input3, @Cast("bool") boolean left/*=true*/, @Cast("bool") boolean transpose/*=false*/); -@Namespace("at") public static native @ByVal Tensor ormqr(@Const @ByRef Tensor self, @Const @ByRef Tensor input2, @Const @ByRef Tensor input3); +// aten::quantized_rnn_relu_cell(Tensor input, Tensor hx, Tensor w_ih, Tensor w_hh, Tensor b_ih, Tensor b_hh, Tensor packed_ih, Tensor packed_hh, Tensor col_offsets_ih, Tensor col_offsets_hh, Scalar scale_ih, Scalar scale_hh, Scalar zero_point_ih, Scalar zero_point_hh) -> Tensor +@Namespace("at") public static native @ByVal Tensor quantized_rnn_relu_cell(@Const @ByRef Tensor input, @Const @ByRef Tensor hx, @Const @ByRef Tensor w_ih, @Const @ByRef Tensor w_hh, @Const @ByRef Tensor b_ih, @Const @ByRef Tensor b_hh, @Const @ByRef Tensor packed_ih, @Const @ByRef Tensor packed_hh, @Const @ByRef Tensor col_offsets_ih, @Const @ByRef Tensor col_offsets_hh, @Const @ByRef Scalar scale_ih, @Const @ByRef Scalar scale_hh, @Const @ByRef Scalar zero_point_ih, @Const @ByRef Scalar zero_point_hh); -// Parsed from ATen/ops/outer.h +// Parsed from ATen/ops/quantized_rnn_tanh_cell.h // #pragma once @@ -56990,21 +42619,16 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include - +// #include -// aten::outer(Tensor self, Tensor vec2) -> Tensor -@Namespace("at") public static native @ByVal Tensor outer(@Const @ByRef Tensor self, @Const @ByRef Tensor vec2); -// aten::outer.out(Tensor self, Tensor vec2, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor outer_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor vec2); -// aten::outer.out(Tensor self, Tensor vec2, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor outer_outf(@Const @ByRef Tensor self, @Const @ByRef Tensor vec2, @ByRef Tensor out); +// aten::quantized_rnn_tanh_cell(Tensor input, Tensor hx, Tensor w_ih, Tensor w_hh, Tensor b_ih, Tensor b_hh, Tensor packed_ih, Tensor packed_hh, Tensor col_offsets_ih, Tensor col_offsets_hh, Scalar scale_ih, Scalar scale_hh, Scalar zero_point_ih, Scalar zero_point_hh) -> Tensor +@Namespace("at") public static native @ByVal Tensor quantized_rnn_tanh_cell(@Const @ByRef Tensor input, @Const @ByRef Tensor hx, @Const @ByRef Tensor w_ih, @Const @ByRef Tensor w_hh, @Const @ByRef Tensor b_ih, @Const @ByRef Tensor b_hh, @Const @ByRef Tensor packed_ih, @Const @ByRef Tensor packed_hh, @Const @ByRef Tensor col_offsets_ih, @Const @ByRef Tensor col_offsets_hh, @Const @ByRef Scalar scale_ih, @Const @ByRef Scalar scale_hh, @Const @ByRef Scalar zero_point_ih, @Const @ByRef Scalar zero_point_hh); -// Parsed from ATen/ops/output_nr.h +// Parsed from ATen/ops/rad2deg.h // #pragma once @@ -57025,14 +42649,24 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include +// #include +// aten::rad2deg(Tensor self) -> Tensor +@Namespace("at") public static native @ByVal Tensor rad2deg(@Const @ByRef Tensor self); + +// aten::rad2deg_(Tensor(a!) self) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor rad2deg_(@ByRef Tensor self); + +// aten::rad2deg.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor rad2deg_out(@ByRef Tensor out, @Const @ByRef Tensor self); +// aten::rad2deg.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor rad2deg_outf(@Const @ByRef Tensor self, @ByRef Tensor out); -// Parsed from ATen/ops/pad.h +// Parsed from ATen/ops/rand.h // #pragma once @@ -57053,149 +42687,169 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include +// #include -// aten::pad(Tensor self, SymInt[] pad, str mode="constant", float? value=None) -> Tensor -@Namespace("at") public static native @ByVal Tensor pad(@Const @ByRef Tensor self, @ByVal @Cast("c10::ArrayRef*") LongArrayRef pad, @ByVal(nullValue = "c10::string_view(\"constant\")") @Cast("c10::string_view*") Pointer mode, @ByVal(nullValue = "c10::optional(c10::nullopt)") DoubleOptional value); -@Namespace("at") public static native @ByVal Tensor pad(@Const @ByRef Tensor self, @ByVal @Cast("c10::ArrayRef*") LongArrayRef pad); -@Namespace("at") public static native @ByVal Tensor pad(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] pad, @ByVal(nullValue = "c10::string_view(\"constant\")") @Cast("c10::string_view*") Pointer mode, @ByVal(nullValue = "c10::optional(c10::nullopt)") DoubleOptional value); -@Namespace("at") public static native @ByVal Tensor pad(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... pad); +// aten::rand.names(SymInt[] size, *, Dimname[]? names, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor +@Namespace("at") public static native @ByVal Tensor rand(@ByVal LongArrayRef size, @ByVal DimnameListOptional names, @ByVal(nullValue = "at::TensorOptions{}") TensorOptions options); +@Namespace("at") public static native @ByVal Tensor rand(@ByVal LongArrayRef size, @ByVal DimnameListOptional names); +@Namespace("at") public static native @ByVal Tensor rand(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @ByVal DimnameListOptional names, @ByVal(nullValue = "at::TensorOptions{}") TensorOptions options); +@Namespace("at") public static native @ByVal Tensor rand(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @ByVal DimnameListOptional names); + + +// aten::rand.names(SymInt[] size, *, Dimname[]? names, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor +@Namespace("at") public static native @ByVal Tensor rand(@ByVal LongArrayRef size, @ByVal DimnameListOptional names, @ByVal ScalarTypeOptional dtype, @ByVal LayoutOptional layout, @ByVal DeviceOptional device, @ByVal BoolOptional pin_memory); +@Namespace("at") public static native @ByVal Tensor rand(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @ByVal DimnameListOptional names, @ByVal ScalarTypeOptional dtype, @ByVal LayoutOptional layout, @ByVal DeviceOptional device, @ByVal BoolOptional pin_memory); + + +// aten::rand.names(SymInt[] size, *, Dimname[]? names, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor +@Namespace("at") public static native @ByVal Tensor rand_symint(@ByVal SymIntArrayRef size, @ByVal DimnameListOptional names, @ByVal(nullValue = "at::TensorOptions{}") TensorOptions options); +@Namespace("at") public static native @ByVal Tensor rand_symint(@ByVal SymIntArrayRef size, @ByVal DimnameListOptional names); + + +// aten::rand.names(SymInt[] size, *, Dimname[]? names, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor +@Namespace("at") public static native @ByVal Tensor rand_symint(@ByVal SymIntArrayRef size, @ByVal DimnameListOptional names, @ByVal ScalarTypeOptional dtype, @ByVal LayoutOptional layout, @ByVal DeviceOptional device, @ByVal BoolOptional pin_memory); + + +// aten::rand.generator_with_names(SymInt[] size, *, Generator? generator, Dimname[]? names, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor +@Namespace("at") public static native @ByVal Tensor rand(@ByVal LongArrayRef size, @ByVal GeneratorOptional generator, @ByVal DimnameListOptional names, @ByVal(nullValue = "at::TensorOptions{}") TensorOptions options); +@Namespace("at") public static native @ByVal Tensor rand(@ByVal LongArrayRef size, @ByVal GeneratorOptional generator, @ByVal DimnameListOptional names); +@Namespace("at") public static native @ByVal Tensor rand(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @ByVal GeneratorOptional generator, @ByVal DimnameListOptional names, @ByVal(nullValue = "at::TensorOptions{}") TensorOptions options); +@Namespace("at") public static native @ByVal Tensor rand(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @ByVal GeneratorOptional generator, @ByVal DimnameListOptional names); + + +// aten::rand.generator_with_names(SymInt[] size, *, Generator? generator, Dimname[]? names, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor +@Namespace("at") public static native @ByVal Tensor rand(@ByVal LongArrayRef size, @ByVal GeneratorOptional generator, @ByVal DimnameListOptional names, @ByVal ScalarTypeOptional dtype, @ByVal LayoutOptional layout, @ByVal DeviceOptional device, @ByVal BoolOptional pin_memory); +@Namespace("at") public static native @ByVal Tensor rand(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @ByVal GeneratorOptional generator, @ByVal DimnameListOptional names, @ByVal ScalarTypeOptional dtype, @ByVal LayoutOptional layout, @ByVal DeviceOptional device, @ByVal BoolOptional pin_memory); + +// aten::rand.generator_with_names(SymInt[] size, *, Generator? generator, Dimname[]? names, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor +@Namespace("at") public static native @ByVal Tensor rand_symint(@ByVal SymIntArrayRef size, @ByVal GeneratorOptional generator, @ByVal DimnameListOptional names, @ByVal(nullValue = "at::TensorOptions{}") TensorOptions options); +@Namespace("at") public static native @ByVal Tensor rand_symint(@ByVal SymIntArrayRef size, @ByVal GeneratorOptional generator, @ByVal DimnameListOptional names); -// aten::pad(Tensor self, SymInt[] pad, str mode="constant", float? value=None) -> Tensor -@Namespace("at") public static native @ByVal Tensor pad_symint(@Const @ByRef Tensor self, @ByVal SymIntRef pad, @ByVal(nullValue = "c10::string_view(\"constant\")") @Cast("c10::string_view*") Pointer mode, @ByVal(nullValue = "c10::optional(c10::nullopt)") DoubleOptional value); -@Namespace("at") public static native @ByVal Tensor pad_symint(@Const @ByRef Tensor self, @ByVal SymIntRef pad); +// aten::rand.generator_with_names(SymInt[] size, *, Generator? generator, Dimname[]? names, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor +@Namespace("at") public static native @ByVal Tensor rand_symint(@ByVal SymIntArrayRef size, @ByVal GeneratorOptional generator, @ByVal DimnameListOptional names, @ByVal ScalarTypeOptional dtype, @ByVal LayoutOptional layout, @ByVal DeviceOptional device, @ByVal BoolOptional pin_memory); +// aten::rand(SymInt[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor +@Namespace("at") public static native @ByVal Tensor rand(@ByVal LongArrayRef size, @ByVal(nullValue = "at::TensorOptions{}") TensorOptions options); +@Namespace("at") public static native @ByVal Tensor rand(@ByVal LongArrayRef size); +@Namespace("at") public static native @ByVal Tensor rand(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @ByVal(nullValue = "at::TensorOptions{}") TensorOptions options); +@Namespace("at") public static native @ByVal Tensor rand(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... size); -// Parsed from ATen/ops/pad_sequence.h +// aten::rand(SymInt[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor +@Namespace("at") public static native @ByVal Tensor rand(@ByVal LongArrayRef size, @ByVal ScalarTypeOptional dtype, @ByVal LayoutOptional layout, @ByVal DeviceOptional device, @ByVal BoolOptional pin_memory); +@Namespace("at") public static native @ByVal Tensor rand(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @ByVal ScalarTypeOptional dtype, @ByVal LayoutOptional layout, @ByVal DeviceOptional device, @ByVal BoolOptional pin_memory); -// #pragma once -// @generated by torchgen/gen.py from Function.h +// aten::rand(SymInt[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor +@Namespace("at") public static native @ByVal Tensor rand_symint(@ByVal SymIntArrayRef size, @ByVal(nullValue = "at::TensorOptions{}") TensorOptions options); +@Namespace("at") public static native @ByVal Tensor rand_symint(@ByVal SymIntArrayRef size); -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include +// aten::rand(SymInt[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor +@Namespace("at") public static native @ByVal Tensor rand_symint(@ByVal SymIntArrayRef size, @ByVal ScalarTypeOptional dtype, @ByVal LayoutOptional layout, @ByVal DeviceOptional device, @ByVal BoolOptional pin_memory); -// #include +// aten::rand.generator(SymInt[] size, *, Generator? generator, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor +@Namespace("at") public static native @ByVal Tensor rand(@ByVal LongArrayRef size, @ByVal GeneratorOptional generator, @ByVal(nullValue = "at::TensorOptions{}") TensorOptions options); +@Namespace("at") public static native @ByVal Tensor rand(@ByVal LongArrayRef size, @ByVal GeneratorOptional generator); +@Namespace("at") public static native @ByVal Tensor rand(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @ByVal GeneratorOptional generator, @ByVal(nullValue = "at::TensorOptions{}") TensorOptions options); +@Namespace("at") public static native @ByVal Tensor rand(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @ByVal GeneratorOptional generator); -// aten::pad_sequence(Tensor[] sequences, bool batch_first=False, float padding_value=0.0) -> Tensor -@Namespace("at") public static native @ByVal Tensor pad_sequence(@ByVal TensorArrayRef sequences, @Cast("bool") boolean batch_first/*=false*/, double padding_value/*=0.0*/); -@Namespace("at") public static native @ByVal Tensor pad_sequence(@ByVal TensorArrayRef sequences); +// aten::rand.generator(SymInt[] size, *, Generator? generator, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor +@Namespace("at") public static native @ByVal Tensor rand(@ByVal LongArrayRef size, @ByVal GeneratorOptional generator, @ByVal ScalarTypeOptional dtype, @ByVal LayoutOptional layout, @ByVal DeviceOptional device, @ByVal BoolOptional pin_memory); +@Namespace("at") public static native @ByVal Tensor rand(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @ByVal GeneratorOptional generator, @ByVal ScalarTypeOptional dtype, @ByVal LayoutOptional layout, @ByVal DeviceOptional device, @ByVal BoolOptional pin_memory); +// aten::rand.generator(SymInt[] size, *, Generator? generator, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor +@Namespace("at") public static native @ByVal Tensor rand_symint(@ByVal SymIntArrayRef size, @ByVal GeneratorOptional generator, @ByVal(nullValue = "at::TensorOptions{}") TensorOptions options); +@Namespace("at") public static native @ByVal Tensor rand_symint(@ByVal SymIntArrayRef size, @ByVal GeneratorOptional generator); -// Parsed from ATen/ops/pairwise_distance.h +// aten::rand.generator(SymInt[] size, *, Generator? generator, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor +@Namespace("at") public static native @ByVal Tensor rand_symint(@ByVal SymIntArrayRef size, @ByVal GeneratorOptional generator, @ByVal ScalarTypeOptional dtype, @ByVal LayoutOptional layout, @ByVal DeviceOptional device, @ByVal BoolOptional pin_memory); -// #pragma once -// @generated by torchgen/gen.py from Function.h +// aten::rand.out(SymInt[] size, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor rand_out(@ByRef Tensor out, @ByVal LongArrayRef size); +@Namespace("at") public static native @ByRef Tensor rand_out(@ByRef Tensor out, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... size); -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include +// aten::rand.out(SymInt[] size, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor rand_outf(@ByVal LongArrayRef size, @ByRef Tensor out); +@Namespace("at") public static native @ByRef Tensor rand_outf(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @ByRef Tensor out); -// #include +// aten::rand.out(SymInt[] size, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor rand_symint_out(@ByRef Tensor out, @ByVal SymIntArrayRef size); -// aten::pairwise_distance(Tensor x1, Tensor x2, float p=2, float eps=1e-06, bool keepdim=False) -> Tensor -@Namespace("at") public static native @ByVal Tensor pairwise_distance(@Const @ByRef Tensor x1, @Const @ByRef Tensor x2, double p/*=2*/, double eps/*=1e-06*/, @Cast("bool") boolean keepdim/*=false*/); -@Namespace("at") public static native @ByVal Tensor pairwise_distance(@Const @ByRef Tensor x1, @Const @ByRef Tensor x2); +// aten::rand.out(SymInt[] size, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor rand_symint_outf(@ByVal SymIntArrayRef size, @ByRef Tensor out); +// aten::rand.generator_out(SymInt[] size, *, Generator? generator, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor rand_out(@ByRef Tensor out, @ByVal LongArrayRef size, @ByVal GeneratorOptional generator); +@Namespace("at") public static native @ByRef Tensor rand_out(@ByRef Tensor out, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @ByVal GeneratorOptional generator); -// Parsed from ATen/ops/pdist.h +// aten::rand.generator_out(SymInt[] size, *, Generator? generator, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor rand_outf(@ByVal LongArrayRef size, @ByVal GeneratorOptional generator, @ByRef Tensor out); +@Namespace("at") public static native @ByRef Tensor rand_outf(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @ByVal GeneratorOptional generator, @ByRef Tensor out); -// #pragma once -// @generated by torchgen/gen.py from Function.h +// aten::rand.generator_out(SymInt[] size, *, Generator? generator, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor rand_symint_out(@ByRef Tensor out, @ByVal SymIntArrayRef size, @ByVal GeneratorOptional generator); -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include +// aten::rand.generator_out(SymInt[] size, *, Generator? generator, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor rand_symint_outf(@ByVal SymIntArrayRef size, @ByVal GeneratorOptional generator, @ByRef Tensor out); -// #include +// aten::rand.names_out(SymInt[] size, *, Dimname[]? names, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor rand_out(@ByRef Tensor out, @ByVal LongArrayRef size, @ByVal DimnameListOptional names); +@Namespace("at") public static native @ByRef Tensor rand_out(@ByRef Tensor out, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @ByVal DimnameListOptional names); -// aten::pdist(Tensor self, float p=2) -> Tensor -@Namespace("at") public static native @ByVal Tensor pdist(@Const @ByRef Tensor self, double p/*=2*/); -@Namespace("at") public static native @ByVal Tensor pdist(@Const @ByRef Tensor self); +// aten::rand.names_out(SymInt[] size, *, Dimname[]? names, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor rand_outf(@ByVal LongArrayRef size, @ByVal DimnameListOptional names, @ByRef Tensor out); +@Namespace("at") public static native @ByRef Tensor rand_outf(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @ByVal DimnameListOptional names, @ByRef Tensor out); +// aten::rand.names_out(SymInt[] size, *, Dimname[]? names, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor rand_symint_out(@ByRef Tensor out, @ByVal SymIntArrayRef size, @ByVal DimnameListOptional names); -// Parsed from ATen/ops/permute.h +// aten::rand.names_out(SymInt[] size, *, Dimname[]? names, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor rand_symint_outf(@ByVal SymIntArrayRef size, @ByVal DimnameListOptional names, @ByRef Tensor out); -// #pragma once -// @generated by torchgen/gen.py from Function.h +// aten::rand.generator_with_names_out(SymInt[] size, *, Generator? generator, Dimname[]? names, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor rand_out(@ByRef Tensor out, @ByVal LongArrayRef size, @ByVal GeneratorOptional generator, @ByVal DimnameListOptional names); +@Namespace("at") public static native @ByRef Tensor rand_out(@ByRef Tensor out, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @ByVal GeneratorOptional generator, @ByVal DimnameListOptional names); -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include +// aten::rand.generator_with_names_out(SymInt[] size, *, Generator? generator, Dimname[]? names, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor rand_outf(@ByVal LongArrayRef size, @ByVal GeneratorOptional generator, @ByVal DimnameListOptional names, @ByRef Tensor out); +@Namespace("at") public static native @ByRef Tensor rand_outf(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @ByVal GeneratorOptional generator, @ByVal DimnameListOptional names, @ByRef Tensor out); -// #include +// aten::rand.generator_with_names_out(SymInt[] size, *, Generator? generator, Dimname[]? names, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor rand_symint_out(@ByRef Tensor out, @ByVal SymIntArrayRef size, @ByVal GeneratorOptional generator, @ByVal DimnameListOptional names); -// aten::permute(Tensor(a) self, int[] dims) -> Tensor(a) -@Namespace("at") public static native @ByVal Tensor permute(@Const @ByRef Tensor self, @ByVal @Cast("c10::ArrayRef*") LongArrayRef dims); -@Namespace("at") public static native @ByVal Tensor permute(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... dims); +// aten::rand.generator_with_names_out(SymInt[] size, *, Generator? generator, Dimname[]? names, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor rand_symint_outf(@ByVal SymIntArrayRef size, @ByVal GeneratorOptional generator, @ByVal DimnameListOptional names, @ByRef Tensor out); -// Parsed from ATen/ops/permute_copy.h + +// Parsed from ATen/ops/rand_like.h // #pragma once @@ -57216,24 +42870,25 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include +// #include -// aten::permute_copy(Tensor self, int[] dims) -> Tensor -@Namespace("at") public static native @ByVal Tensor permute_copy(@Const @ByRef Tensor self, @ByVal @Cast("c10::ArrayRef*") LongArrayRef dims); -@Namespace("at") public static native @ByVal Tensor permute_copy(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... dims); +// aten::rand_like(Tensor self, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, MemoryFormat? memory_format=None) -> Tensor +@Namespace("at") public static native @ByVal Tensor rand_like(@Const @ByRef Tensor self, @ByVal(nullValue = "at::TensorOptions{}") TensorOptions options, @ByVal(nullValue = "c10::optional(c10::nullopt)") MemoryFormatOptional memory_format); +@Namespace("at") public static native @ByVal Tensor rand_like(@Const @ByRef Tensor self); +// aten::rand_like(Tensor self, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, MemoryFormat? memory_format=None) -> Tensor +@Namespace("at") public static native @ByVal Tensor rand_like(@Const @ByRef Tensor self, @ByVal ScalarTypeOptional dtype, @ByVal LayoutOptional layout, @ByVal DeviceOptional device, @ByVal BoolOptional pin_memory, @ByVal MemoryFormatOptional memory_format); -// aten::permute_copy.out(Tensor self, int[] dims, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor permute_copy_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal @Cast("c10::ArrayRef*") LongArrayRef dims); -@Namespace("at") public static native @ByRef Tensor permute_copy_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... dims); -// aten::permute_copy.out(Tensor self, int[] dims, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor permute_copy_outf(@Const @ByRef Tensor self, @ByVal @Cast("c10::ArrayRef*") LongArrayRef dims, @ByRef Tensor out); -@Namespace("at") public static native @ByRef Tensor permute_copy_outf(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dims, @ByRef Tensor out); +// aten::rand_like.out(Tensor self, *, MemoryFormat? memory_format=None, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor rand_like_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal(nullValue = "c10::optional(c10::nullopt)") MemoryFormatOptional memory_format); +@Namespace("at") public static native @ByRef Tensor rand_like_out(@ByRef Tensor out, @Const @ByRef Tensor self); +// aten::rand_like.out(Tensor self, *, MemoryFormat? memory_format=None, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor rand_like_outf(@Const @ByRef Tensor self, @ByVal MemoryFormatOptional memory_format, @ByRef Tensor out); -// Parsed from ATen/ops/pin_memory.h +// Parsed from ATen/ops/randint.h // #pragma once @@ -57254,182 +42909,169 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include +// #include +// aten::randint(int high, SymInt[] size, *, ScalarType? dtype=long, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor +@Namespace("at") public static native @ByVal Tensor randint(@Cast("int64_t") long high, @ByVal LongArrayRef size, @ByVal(nullValue = "at::TensorOptions(at::kLong)") TensorOptions options); +@Namespace("at") public static native @ByVal Tensor randint(@Cast("int64_t") long high, @ByVal LongArrayRef size); +@Namespace("at") public static native @ByVal Tensor randint(@Cast("int64_t") long high, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @ByVal(nullValue = "at::TensorOptions(at::kLong)") TensorOptions options); +@Namespace("at") public static native @ByVal Tensor randint(@Cast("int64_t") long high, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... size); +// aten::randint(int high, SymInt[] size, *, ScalarType? dtype=long, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor +@Namespace("at") public static native @ByVal Tensor randint(@Cast("int64_t") long high, @ByVal LongArrayRef size, @ByVal ScalarTypeOptional dtype, @ByVal LayoutOptional layout, @ByVal DeviceOptional device, @ByVal BoolOptional pin_memory); +@Namespace("at") public static native @ByVal Tensor randint(@Cast("int64_t") long high, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @ByVal ScalarTypeOptional dtype, @ByVal LayoutOptional layout, @ByVal DeviceOptional device, @ByVal BoolOptional pin_memory); -// Parsed from ATen/ops/pinverse.h +// aten::randint(int high, SymInt[] size, *, ScalarType? dtype=long, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor +@Namespace("at") public static native @ByVal Tensor randint_symint(@Cast("int64_t") long high, @ByVal SymIntArrayRef size, @ByVal(nullValue = "at::TensorOptions(at::kLong)") TensorOptions options); +@Namespace("at") public static native @ByVal Tensor randint_symint(@Cast("int64_t") long high, @ByVal SymIntArrayRef size); -// #pragma once -// @generated by torchgen/gen.py from Function.h +// aten::randint(int high, SymInt[] size, *, ScalarType? dtype=long, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor +@Namespace("at") public static native @ByVal Tensor randint_symint(@Cast("int64_t") long high, @ByVal SymIntArrayRef size, @ByVal ScalarTypeOptional dtype, @ByVal LayoutOptional layout, @ByVal DeviceOptional device, @ByVal BoolOptional pin_memory); -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include +// aten::randint.generator(int high, SymInt[] size, *, Generator? generator, ScalarType? dtype=long, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor +@Namespace("at") public static native @ByVal Tensor randint(@Cast("int64_t") long high, @ByVal LongArrayRef size, @ByVal GeneratorOptional generator, @ByVal(nullValue = "at::TensorOptions(at::kLong)") TensorOptions options); +@Namespace("at") public static native @ByVal Tensor randint(@Cast("int64_t") long high, @ByVal LongArrayRef size, @ByVal GeneratorOptional generator); +@Namespace("at") public static native @ByVal Tensor randint(@Cast("int64_t") long high, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @ByVal GeneratorOptional generator, @ByVal(nullValue = "at::TensorOptions(at::kLong)") TensorOptions options); +@Namespace("at") public static native @ByVal Tensor randint(@Cast("int64_t") long high, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @ByVal GeneratorOptional generator); -// #include +// aten::randint.generator(int high, SymInt[] size, *, Generator? generator, ScalarType? dtype=long, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor +@Namespace("at") public static native @ByVal Tensor randint(@Cast("int64_t") long high, @ByVal LongArrayRef size, @ByVal GeneratorOptional generator, @ByVal ScalarTypeOptional dtype, @ByVal LayoutOptional layout, @ByVal DeviceOptional device, @ByVal BoolOptional pin_memory); +@Namespace("at") public static native @ByVal Tensor randint(@Cast("int64_t") long high, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @ByVal GeneratorOptional generator, @ByVal ScalarTypeOptional dtype, @ByVal LayoutOptional layout, @ByVal DeviceOptional device, @ByVal BoolOptional pin_memory); -// aten::pinverse(Tensor self, float rcond=1e-15) -> Tensor -@Namespace("at") public static native @ByVal Tensor pinverse(@Const @ByRef Tensor self, double rcond/*=1e-15*/); -@Namespace("at") public static native @ByVal Tensor pinverse(@Const @ByRef Tensor self); +// aten::randint.generator(int high, SymInt[] size, *, Generator? generator, ScalarType? dtype=long, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor +@Namespace("at") public static native @ByVal Tensor randint_symint(@Cast("int64_t") long high, @ByVal SymIntArrayRef size, @ByVal GeneratorOptional generator, @ByVal(nullValue = "at::TensorOptions(at::kLong)") TensorOptions options); +@Namespace("at") public static native @ByVal Tensor randint_symint(@Cast("int64_t") long high, @ByVal SymIntArrayRef size, @ByVal GeneratorOptional generator); +// aten::randint.generator(int high, SymInt[] size, *, Generator? generator, ScalarType? dtype=long, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor +@Namespace("at") public static native @ByVal Tensor randint_symint(@Cast("int64_t") long high, @ByVal SymIntArrayRef size, @ByVal GeneratorOptional generator, @ByVal ScalarTypeOptional dtype, @ByVal LayoutOptional layout, @ByVal DeviceOptional device, @ByVal BoolOptional pin_memory); -// Parsed from ATen/ops/pixel_shuffle.h +// aten::randint.low(int low, int high, SymInt[] size, *, ScalarType? dtype=long, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor +@Namespace("at") public static native @ByVal Tensor randint(@Cast("int64_t") long low, @Cast("int64_t") long high, @ByVal LongArrayRef size, @ByVal(nullValue = "at::TensorOptions(at::kLong)") TensorOptions options); +@Namespace("at") public static native @ByVal Tensor randint(@Cast("int64_t") long low, @Cast("int64_t") long high, @ByVal LongArrayRef size); +@Namespace("at") public static native @ByVal Tensor randint(@Cast("int64_t") long low, @Cast("int64_t") long high, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @ByVal(nullValue = "at::TensorOptions(at::kLong)") TensorOptions options); +@Namespace("at") public static native @ByVal Tensor randint(@Cast("int64_t") long low, @Cast("int64_t") long high, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... size); -// #pragma once -// @generated by torchgen/gen.py from Function.h +// aten::randint.low(int low, int high, SymInt[] size, *, ScalarType? dtype=long, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor +@Namespace("at") public static native @ByVal Tensor randint(@Cast("int64_t") long low, @Cast("int64_t") long high, @ByVal LongArrayRef size, @ByVal ScalarTypeOptional dtype, @ByVal LayoutOptional layout, @ByVal DeviceOptional device, @ByVal BoolOptional pin_memory); +@Namespace("at") public static native @ByVal Tensor randint(@Cast("int64_t") long low, @Cast("int64_t") long high, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @ByVal ScalarTypeOptional dtype, @ByVal LayoutOptional layout, @ByVal DeviceOptional device, @ByVal BoolOptional pin_memory); -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include +// aten::randint.low(int low, int high, SymInt[] size, *, ScalarType? dtype=long, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor +@Namespace("at") public static native @ByVal Tensor randint_symint(@Cast("int64_t") long low, @Cast("int64_t") long high, @ByVal SymIntArrayRef size, @ByVal(nullValue = "at::TensorOptions(at::kLong)") TensorOptions options); +@Namespace("at") public static native @ByVal Tensor randint_symint(@Cast("int64_t") long low, @Cast("int64_t") long high, @ByVal SymIntArrayRef size); -// #include +// aten::randint.low(int low, int high, SymInt[] size, *, ScalarType? dtype=long, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor +@Namespace("at") public static native @ByVal Tensor randint_symint(@Cast("int64_t") long low, @Cast("int64_t") long high, @ByVal SymIntArrayRef size, @ByVal ScalarTypeOptional dtype, @ByVal LayoutOptional layout, @ByVal DeviceOptional device, @ByVal BoolOptional pin_memory); -// aten::pixel_shuffle(Tensor self, int upscale_factor) -> Tensor -@Namespace("at") public static native @ByVal Tensor pixel_shuffle(@Const @ByRef Tensor self, @Cast("int64_t") long upscale_factor); +// aten::randint.low_generator(int low, int high, SymInt[] size, *, Generator? generator, ScalarType? dtype=long, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor +@Namespace("at") public static native @ByVal Tensor randint(@Cast("int64_t") long low, @Cast("int64_t") long high, @ByVal LongArrayRef size, @ByVal GeneratorOptional generator, @ByVal(nullValue = "at::TensorOptions(at::kLong)") TensorOptions options); +@Namespace("at") public static native @ByVal Tensor randint(@Cast("int64_t") long low, @Cast("int64_t") long high, @ByVal LongArrayRef size, @ByVal GeneratorOptional generator); +@Namespace("at") public static native @ByVal Tensor randint(@Cast("int64_t") long low, @Cast("int64_t") long high, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @ByVal GeneratorOptional generator, @ByVal(nullValue = "at::TensorOptions(at::kLong)") TensorOptions options); +@Namespace("at") public static native @ByVal Tensor randint(@Cast("int64_t") long low, @Cast("int64_t") long high, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @ByVal GeneratorOptional generator); -// aten::pixel_shuffle.out(Tensor self, int upscale_factor, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor pixel_shuffle_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Cast("int64_t") long upscale_factor); -// aten::pixel_shuffle.out(Tensor self, int upscale_factor, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor pixel_shuffle_outf(@Const @ByRef Tensor self, @Cast("int64_t") long upscale_factor, @ByRef Tensor out); +// aten::randint.low_generator(int low, int high, SymInt[] size, *, Generator? generator, ScalarType? dtype=long, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor +@Namespace("at") public static native @ByVal Tensor randint(@Cast("int64_t") long low, @Cast("int64_t") long high, @ByVal LongArrayRef size, @ByVal GeneratorOptional generator, @ByVal ScalarTypeOptional dtype, @ByVal LayoutOptional layout, @ByVal DeviceOptional device, @ByVal BoolOptional pin_memory); +@Namespace("at") public static native @ByVal Tensor randint(@Cast("int64_t") long low, @Cast("int64_t") long high, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @ByVal GeneratorOptional generator, @ByVal ScalarTypeOptional dtype, @ByVal LayoutOptional layout, @ByVal DeviceOptional device, @ByVal BoolOptional pin_memory); +// aten::randint.low_generator(int low, int high, SymInt[] size, *, Generator? generator, ScalarType? dtype=long, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor +@Namespace("at") public static native @ByVal Tensor randint_symint(@Cast("int64_t") long low, @Cast("int64_t") long high, @ByVal SymIntArrayRef size, @ByVal GeneratorOptional generator, @ByVal(nullValue = "at::TensorOptions(at::kLong)") TensorOptions options); +@Namespace("at") public static native @ByVal Tensor randint_symint(@Cast("int64_t") long low, @Cast("int64_t") long high, @ByVal SymIntArrayRef size, @ByVal GeneratorOptional generator); -// Parsed from ATen/ops/pixel_unshuffle.h -// #pragma once +// aten::randint.low_generator(int low, int high, SymInt[] size, *, Generator? generator, ScalarType? dtype=long, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor +@Namespace("at") public static native @ByVal Tensor randint_symint(@Cast("int64_t") long low, @Cast("int64_t") long high, @ByVal SymIntArrayRef size, @ByVal GeneratorOptional generator, @ByVal ScalarTypeOptional dtype, @ByVal LayoutOptional layout, @ByVal DeviceOptional device, @ByVal BoolOptional pin_memory); -// @generated by torchgen/gen.py from Function.h -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include +// aten::randint.out(int high, SymInt[] size, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor randint_out(@ByRef Tensor out, @Cast("int64_t") long high, @ByVal LongArrayRef size); +@Namespace("at") public static native @ByRef Tensor randint_out(@ByRef Tensor out, @Cast("int64_t") long high, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... size); +// aten::randint.out(int high, SymInt[] size, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor randint_outf(@Cast("int64_t") long high, @ByVal LongArrayRef size, @ByRef Tensor out); +@Namespace("at") public static native @ByRef Tensor randint_outf(@Cast("int64_t") long high, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @ByRef Tensor out); -// #include +// aten::randint.out(int high, SymInt[] size, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor randint_symint_out(@ByRef Tensor out, @Cast("int64_t") long high, @ByVal SymIntArrayRef size); -// aten::pixel_unshuffle(Tensor self, int downscale_factor) -> Tensor -@Namespace("at") public static native @ByVal Tensor pixel_unshuffle(@Const @ByRef Tensor self, @Cast("int64_t") long downscale_factor); -// aten::pixel_unshuffle.out(Tensor self, int downscale_factor, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor pixel_unshuffle_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Cast("int64_t") long downscale_factor); -// aten::pixel_unshuffle.out(Tensor self, int downscale_factor, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor pixel_unshuffle_outf(@Const @ByRef Tensor self, @Cast("int64_t") long downscale_factor, @ByRef Tensor out); +// aten::randint.out(int high, SymInt[] size, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor randint_symint_outf(@Cast("int64_t") long high, @ByVal SymIntArrayRef size, @ByRef Tensor out); +// aten::randint.generator_out(int high, SymInt[] size, *, Generator? generator, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor randint_out(@ByRef Tensor out, @Cast("int64_t") long high, @ByVal LongArrayRef size, @ByVal GeneratorOptional generator); +@Namespace("at") public static native @ByRef Tensor randint_out(@ByRef Tensor out, @Cast("int64_t") long high, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @ByVal GeneratorOptional generator); -// Parsed from ATen/ops/poisson.h +// aten::randint.generator_out(int high, SymInt[] size, *, Generator? generator, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor randint_outf(@Cast("int64_t") long high, @ByVal LongArrayRef size, @ByVal GeneratorOptional generator, @ByRef Tensor out); +@Namespace("at") public static native @ByRef Tensor randint_outf(@Cast("int64_t") long high, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @ByVal GeneratorOptional generator, @ByRef Tensor out); -// #pragma once -// @generated by torchgen/gen.py from Function.h +// aten::randint.generator_out(int high, SymInt[] size, *, Generator? generator, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor randint_symint_out(@ByRef Tensor out, @Cast("int64_t") long high, @ByVal SymIntArrayRef size, @ByVal GeneratorOptional generator); -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include +// aten::randint.generator_out(int high, SymInt[] size, *, Generator? generator, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor randint_symint_outf(@Cast("int64_t") long high, @ByVal SymIntArrayRef size, @ByVal GeneratorOptional generator, @ByRef Tensor out); -// #include +// aten::randint.low_out(int low, int high, SymInt[] size, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor randint_out(@ByRef Tensor out, @Cast("int64_t") long low, @Cast("int64_t") long high, @ByVal LongArrayRef size); +@Namespace("at") public static native @ByRef Tensor randint_out(@ByRef Tensor out, @Cast("int64_t") long low, @Cast("int64_t") long high, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... size); -// aten::poisson(Tensor self, Generator? generator=None) -> Tensor -@Namespace("at") public static native @ByVal Tensor poisson(@Const @ByRef Tensor self, @ByVal(nullValue = "c10::optional(c10::nullopt)") GeneratorOptional generator); -@Namespace("at") public static native @ByVal Tensor poisson(@Const @ByRef Tensor self); +// aten::randint.low_out(int low, int high, SymInt[] size, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor randint_outf(@Cast("int64_t") long low, @Cast("int64_t") long high, @ByVal LongArrayRef size, @ByRef Tensor out); +@Namespace("at") public static native @ByRef Tensor randint_outf(@Cast("int64_t") long low, @Cast("int64_t") long high, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @ByRef Tensor out); -// aten::poisson.out(Tensor self, Generator? generator=None, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor poisson_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal(nullValue = "c10::optional(c10::nullopt)") GeneratorOptional generator); -@Namespace("at") public static native @ByRef Tensor poisson_out(@ByRef Tensor out, @Const @ByRef Tensor self); -// aten::poisson.out(Tensor self, Generator? generator=None, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor poisson_outf(@Const @ByRef Tensor self, @ByVal GeneratorOptional generator, @ByRef Tensor out); +// aten::randint.low_out(int low, int high, SymInt[] size, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor randint_symint_out(@ByRef Tensor out, @Cast("int64_t") long low, @Cast("int64_t") long high, @ByVal SymIntArrayRef size); +// aten::randint.low_out(int low, int high, SymInt[] size, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor randint_symint_outf(@Cast("int64_t") long low, @Cast("int64_t") long high, @ByVal SymIntArrayRef size, @ByRef Tensor out); -// Parsed from ATen/ops/poisson_nll_loss.h -// #pragma once +// aten::randint.low_generator_out(int low, int high, SymInt[] size, *, Generator? generator, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor randint_out(@ByRef Tensor out, @Cast("int64_t") long low, @Cast("int64_t") long high, @ByVal LongArrayRef size, @ByVal GeneratorOptional generator); +@Namespace("at") public static native @ByRef Tensor randint_out(@ByRef Tensor out, @Cast("int64_t") long low, @Cast("int64_t") long high, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @ByVal GeneratorOptional generator); -// @generated by torchgen/gen.py from Function.h -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include +// aten::randint.low_generator_out(int low, int high, SymInt[] size, *, Generator? generator, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor randint_outf(@Cast("int64_t") long low, @Cast("int64_t") long high, @ByVal LongArrayRef size, @ByVal GeneratorOptional generator, @ByRef Tensor out); +@Namespace("at") public static native @ByRef Tensor randint_outf(@Cast("int64_t") long low, @Cast("int64_t") long high, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @ByVal GeneratorOptional generator, @ByRef Tensor out); +// aten::randint.low_generator_out(int low, int high, SymInt[] size, *, Generator? generator, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor randint_symint_out(@ByRef Tensor out, @Cast("int64_t") long low, @Cast("int64_t") long high, @ByVal SymIntArrayRef size, @ByVal GeneratorOptional generator); -// #include +// aten::randint.low_generator_out(int low, int high, SymInt[] size, *, Generator? generator, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor randint_symint_outf(@Cast("int64_t") long low, @Cast("int64_t") long high, @ByVal SymIntArrayRef size, @ByVal GeneratorOptional generator, @ByRef Tensor out); -// aten::poisson_nll_loss(Tensor input, Tensor target, bool log_input, bool full, float eps, int reduction) -> Tensor -@Namespace("at") public static native @ByVal Tensor poisson_nll_loss(@Const @ByRef Tensor input, @Const @ByRef Tensor target, @Cast("bool") boolean log_input, @Cast("bool") boolean full, double eps, @Cast("int64_t") long reduction); -// Parsed from ATen/ops/polar.h +// Parsed from ATen/ops/randint_like.h // #pragma once @@ -57450,21 +43092,37 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include +// #include -// aten::polar(Tensor abs, Tensor angle) -> Tensor -@Namespace("at") public static native @ByVal Tensor polar(@Const @ByRef Tensor abs, @Const @ByRef Tensor angle); +// aten::randint_like(Tensor self, int high, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, MemoryFormat? memory_format=None) -> Tensor +@Namespace("at") public static native @ByVal Tensor randint_like(@Const @ByRef Tensor self, @Cast("int64_t") long high, @ByVal(nullValue = "at::TensorOptions{}") TensorOptions options, @ByVal(nullValue = "c10::optional(c10::nullopt)") MemoryFormatOptional memory_format); +@Namespace("at") public static native @ByVal Tensor randint_like(@Const @ByRef Tensor self, @Cast("int64_t") long high); +// aten::randint_like(Tensor self, int high, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, MemoryFormat? memory_format=None) -> Tensor +@Namespace("at") public static native @ByVal Tensor randint_like(@Const @ByRef Tensor self, @Cast("int64_t") long high, @ByVal ScalarTypeOptional dtype, @ByVal LayoutOptional layout, @ByVal DeviceOptional device, @ByVal BoolOptional pin_memory, @ByVal MemoryFormatOptional memory_format); -// aten::polar.out(Tensor abs, Tensor angle, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor polar_out(@ByRef Tensor out, @Const @ByRef Tensor abs, @Const @ByRef Tensor angle); -// aten::polar.out(Tensor abs, Tensor angle, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor polar_outf(@Const @ByRef Tensor abs, @Const @ByRef Tensor angle, @ByRef Tensor out); +// aten::randint_like.low_dtype(Tensor self, int low, int high, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, MemoryFormat? memory_format=None) -> Tensor +@Namespace("at") public static native @ByVal Tensor randint_like(@Const @ByRef Tensor self, @Cast("int64_t") long low, @Cast("int64_t") long high, @ByVal(nullValue = "at::TensorOptions{}") TensorOptions options, @ByVal(nullValue = "c10::optional(c10::nullopt)") MemoryFormatOptional memory_format); +@Namespace("at") public static native @ByVal Tensor randint_like(@Const @ByRef Tensor self, @Cast("int64_t") long low, @Cast("int64_t") long high); +// aten::randint_like.low_dtype(Tensor self, int low, int high, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, MemoryFormat? memory_format=None) -> Tensor +@Namespace("at") public static native @ByVal Tensor randint_like(@Const @ByRef Tensor self, @Cast("int64_t") long low, @Cast("int64_t") long high, @ByVal ScalarTypeOptional dtype, @ByVal LayoutOptional layout, @ByVal DeviceOptional device, @ByVal BoolOptional pin_memory, @ByVal MemoryFormatOptional memory_format); + +// aten::randint_like.out(Tensor self, int high, *, MemoryFormat? memory_format=None, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor randint_like_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Cast("int64_t") long high, @ByVal(nullValue = "c10::optional(c10::nullopt)") MemoryFormatOptional memory_format); +@Namespace("at") public static native @ByRef Tensor randint_like_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Cast("int64_t") long high); +// aten::randint_like.out(Tensor self, int high, *, MemoryFormat? memory_format=None, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor randint_like_outf(@Const @ByRef Tensor self, @Cast("int64_t") long high, @ByVal MemoryFormatOptional memory_format, @ByRef Tensor out); +// aten::randint_like.low_dtype_out(Tensor self, int low, int high, *, MemoryFormat? memory_format=None, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor randint_like_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Cast("int64_t") long low, @Cast("int64_t") long high, @ByVal(nullValue = "c10::optional(c10::nullopt)") MemoryFormatOptional memory_format); +@Namespace("at") public static native @ByRef Tensor randint_like_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Cast("int64_t") long low, @Cast("int64_t") long high); +// aten::randint_like.low_dtype_out(Tensor self, int low, int high, *, MemoryFormat? memory_format=None, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor randint_like_outf(@Const @ByRef Tensor self, @Cast("int64_t") long low, @Cast("int64_t") long high, @ByVal MemoryFormatOptional memory_format, @ByRef Tensor out); -// Parsed from ATen/ops/polygamma.h + +// Parsed from ATen/ops/randn.h // #pragma once @@ -57485,189 +43143,169 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include +// #include -// aten::polygamma.out(int n, Tensor self, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor polygamma_out(@ByRef Tensor out, @Cast("int64_t") long n, @Const @ByRef Tensor self); -// aten::polygamma.out(int n, Tensor self, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor polygamma_outf(@Cast("int64_t") long n, @Const @ByRef Tensor self, @ByRef Tensor out); +// aten::randn(SymInt[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor +@Namespace("at") public static native @ByVal Tensor randn(@ByVal LongArrayRef size, @ByVal(nullValue = "at::TensorOptions{}") TensorOptions options); +@Namespace("at") public static native @ByVal Tensor randn(@ByVal LongArrayRef size); +@Namespace("at") public static native @ByVal Tensor randn(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @ByVal(nullValue = "at::TensorOptions{}") TensorOptions options); +@Namespace("at") public static native @ByVal Tensor randn(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... size); -// aten::polygamma(int n, Tensor self) -> Tensor -@Namespace("at") public static native @ByVal Tensor polygamma(@Cast("int64_t") long n, @Const @ByRef Tensor self); +// aten::randn(SymInt[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor +@Namespace("at") public static native @ByVal Tensor randn(@ByVal LongArrayRef size, @ByVal ScalarTypeOptional dtype, @ByVal LayoutOptional layout, @ByVal DeviceOptional device, @ByVal BoolOptional pin_memory); +@Namespace("at") public static native @ByVal Tensor randn(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @ByVal ScalarTypeOptional dtype, @ByVal LayoutOptional layout, @ByVal DeviceOptional device, @ByVal BoolOptional pin_memory); +// aten::randn(SymInt[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor +@Namespace("at") public static native @ByVal Tensor randn_symint(@ByVal SymIntArrayRef size, @ByVal(nullValue = "at::TensorOptions{}") TensorOptions options); +@Namespace("at") public static native @ByVal Tensor randn_symint(@ByVal SymIntArrayRef size); -// Parsed from ATen/ops/positive.h -// #pragma once +// aten::randn(SymInt[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor +@Namespace("at") public static native @ByVal Tensor randn_symint(@ByVal SymIntArrayRef size, @ByVal ScalarTypeOptional dtype, @ByVal LayoutOptional layout, @ByVal DeviceOptional device, @ByVal BoolOptional pin_memory); -// @generated by torchgen/gen.py from Function.h -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include +// aten::randn.generator(SymInt[] size, *, Generator? generator, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor +@Namespace("at") public static native @ByVal Tensor randn(@ByVal LongArrayRef size, @ByVal GeneratorOptional generator, @ByVal(nullValue = "at::TensorOptions{}") TensorOptions options); +@Namespace("at") public static native @ByVal Tensor randn(@ByVal LongArrayRef size, @ByVal GeneratorOptional generator); +@Namespace("at") public static native @ByVal Tensor randn(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @ByVal GeneratorOptional generator, @ByVal(nullValue = "at::TensorOptions{}") TensorOptions options); +@Namespace("at") public static native @ByVal Tensor randn(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @ByVal GeneratorOptional generator); +// aten::randn.generator(SymInt[] size, *, Generator? generator, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor +@Namespace("at") public static native @ByVal Tensor randn(@ByVal LongArrayRef size, @ByVal GeneratorOptional generator, @ByVal ScalarTypeOptional dtype, @ByVal LayoutOptional layout, @ByVal DeviceOptional device, @ByVal BoolOptional pin_memory); +@Namespace("at") public static native @ByVal Tensor randn(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @ByVal GeneratorOptional generator, @ByVal ScalarTypeOptional dtype, @ByVal LayoutOptional layout, @ByVal DeviceOptional device, @ByVal BoolOptional pin_memory); -// #include +// aten::randn.generator(SymInt[] size, *, Generator? generator, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor +@Namespace("at") public static native @ByVal Tensor randn_symint(@ByVal SymIntArrayRef size, @ByVal GeneratorOptional generator, @ByVal(nullValue = "at::TensorOptions{}") TensorOptions options); +@Namespace("at") public static native @ByVal Tensor randn_symint(@ByVal SymIntArrayRef size, @ByVal GeneratorOptional generator); -// aten::positive(Tensor(a) self) -> Tensor(a) -@Namespace("at") public static native @ByVal Tensor positive(@Const @ByRef Tensor self); +// aten::randn.generator(SymInt[] size, *, Generator? generator, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor +@Namespace("at") public static native @ByVal Tensor randn_symint(@ByVal SymIntArrayRef size, @ByVal GeneratorOptional generator, @ByVal ScalarTypeOptional dtype, @ByVal LayoutOptional layout, @ByVal DeviceOptional device, @ByVal BoolOptional pin_memory); +// aten::randn.names(SymInt[] size, *, Dimname[]? names, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor +@Namespace("at") public static native @ByVal Tensor randn(@ByVal LongArrayRef size, @ByVal DimnameListOptional names, @ByVal(nullValue = "at::TensorOptions{}") TensorOptions options); +@Namespace("at") public static native @ByVal Tensor randn(@ByVal LongArrayRef size, @ByVal DimnameListOptional names); +@Namespace("at") public static native @ByVal Tensor randn(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @ByVal DimnameListOptional names, @ByVal(nullValue = "at::TensorOptions{}") TensorOptions options); +@Namespace("at") public static native @ByVal Tensor randn(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @ByVal DimnameListOptional names); -// Parsed from ATen/ops/pow.h -// #pragma once +// aten::randn.names(SymInt[] size, *, Dimname[]? names, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor +@Namespace("at") public static native @ByVal Tensor randn(@ByVal LongArrayRef size, @ByVal DimnameListOptional names, @ByVal ScalarTypeOptional dtype, @ByVal LayoutOptional layout, @ByVal DeviceOptional device, @ByVal BoolOptional pin_memory); +@Namespace("at") public static native @ByVal Tensor randn(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @ByVal DimnameListOptional names, @ByVal ScalarTypeOptional dtype, @ByVal LayoutOptional layout, @ByVal DeviceOptional device, @ByVal BoolOptional pin_memory); -// @generated by torchgen/gen.py from Function.h -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include +// aten::randn.names(SymInt[] size, *, Dimname[]? names, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor +@Namespace("at") public static native @ByVal Tensor randn_symint(@ByVal SymIntArrayRef size, @ByVal DimnameListOptional names, @ByVal(nullValue = "at::TensorOptions{}") TensorOptions options); +@Namespace("at") public static native @ByVal Tensor randn_symint(@ByVal SymIntArrayRef size, @ByVal DimnameListOptional names); +// aten::randn.names(SymInt[] size, *, Dimname[]? names, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor +@Namespace("at") public static native @ByVal Tensor randn_symint(@ByVal SymIntArrayRef size, @ByVal DimnameListOptional names, @ByVal ScalarTypeOptional dtype, @ByVal LayoutOptional layout, @ByVal DeviceOptional device, @ByVal BoolOptional pin_memory); -// #include +// aten::randn.generator_with_names(SymInt[] size, *, Generator? generator, Dimname[]? names, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor +@Namespace("at") public static native @ByVal Tensor randn(@ByVal LongArrayRef size, @ByVal GeneratorOptional generator, @ByVal DimnameListOptional names, @ByVal(nullValue = "at::TensorOptions{}") TensorOptions options); +@Namespace("at") public static native @ByVal Tensor randn(@ByVal LongArrayRef size, @ByVal GeneratorOptional generator, @ByVal DimnameListOptional names); +@Namespace("at") public static native @ByVal Tensor randn(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @ByVal GeneratorOptional generator, @ByVal DimnameListOptional names, @ByVal(nullValue = "at::TensorOptions{}") TensorOptions options); +@Namespace("at") public static native @ByVal Tensor randn(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @ByVal GeneratorOptional generator, @ByVal DimnameListOptional names); -// aten::pow.Tensor_Tensor_out(Tensor self, Tensor exponent, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor pow_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor exponent); -// aten::pow.Tensor_Tensor_out(Tensor self, Tensor exponent, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor pow_outf(@Const @ByRef Tensor self, @Const @ByRef Tensor exponent, @ByRef Tensor out); -// aten::pow.Tensor_Tensor(Tensor self, Tensor exponent) -> Tensor -@Namespace("at") public static native @ByVal Tensor pow(@Const @ByRef Tensor self, @Const @ByRef Tensor exponent); +// aten::randn.generator_with_names(SymInt[] size, *, Generator? generator, Dimname[]? names, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor +@Namespace("at") public static native @ByVal Tensor randn(@ByVal LongArrayRef size, @ByVal GeneratorOptional generator, @ByVal DimnameListOptional names, @ByVal ScalarTypeOptional dtype, @ByVal LayoutOptional layout, @ByVal DeviceOptional device, @ByVal BoolOptional pin_memory); +@Namespace("at") public static native @ByVal Tensor randn(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @ByVal GeneratorOptional generator, @ByVal DimnameListOptional names, @ByVal ScalarTypeOptional dtype, @ByVal LayoutOptional layout, @ByVal DeviceOptional device, @ByVal BoolOptional pin_memory); -// aten::pow.Scalar_out(Scalar self, Tensor exponent, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor pow_out(@ByRef Tensor out, @Const @ByRef Scalar self, @Const @ByRef Tensor exponent); -// aten::pow.Scalar_out(Scalar self, Tensor exponent, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor pow_outf(@Const @ByRef Scalar self, @Const @ByRef Tensor exponent, @ByRef Tensor out); -// aten::pow.Scalar(Scalar self, Tensor exponent) -> Tensor -@Namespace("at") public static native @ByVal Tensor pow(@Const @ByRef Scalar self, @Const @ByRef Tensor exponent); +// aten::randn.generator_with_names(SymInt[] size, *, Generator? generator, Dimname[]? names, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor +@Namespace("at") public static native @ByVal Tensor randn_symint(@ByVal SymIntArrayRef size, @ByVal GeneratorOptional generator, @ByVal DimnameListOptional names, @ByVal(nullValue = "at::TensorOptions{}") TensorOptions options); +@Namespace("at") public static native @ByVal Tensor randn_symint(@ByVal SymIntArrayRef size, @ByVal GeneratorOptional generator, @ByVal DimnameListOptional names); -// aten::pow.Tensor_Scalar_out(Tensor self, Scalar exponent, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor pow_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Scalar exponent); -// aten::pow.Tensor_Scalar_out(Tensor self, Scalar exponent, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor pow_outf(@Const @ByRef Tensor self, @Const @ByRef Scalar exponent, @ByRef Tensor out); -// aten::pow.Tensor_Scalar(Tensor self, Scalar exponent) -> Tensor -@Namespace("at") public static native @ByVal Tensor pow(@Const @ByRef Tensor self, @Const @ByRef Scalar exponent); +// aten::randn.generator_with_names(SymInt[] size, *, Generator? generator, Dimname[]? names, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor +@Namespace("at") public static native @ByVal Tensor randn_symint(@ByVal SymIntArrayRef size, @ByVal GeneratorOptional generator, @ByVal DimnameListOptional names, @ByVal ScalarTypeOptional dtype, @ByVal LayoutOptional layout, @ByVal DeviceOptional device, @ByVal BoolOptional pin_memory); +// aten::randn.out(SymInt[] size, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor randn_out(@ByRef Tensor out, @ByVal LongArrayRef size); +@Namespace("at") public static native @ByRef Tensor randn_out(@ByRef Tensor out, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... size); -// Parsed from ATen/ops/prelu.h +// aten::randn.out(SymInt[] size, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor randn_outf(@ByVal LongArrayRef size, @ByRef Tensor out); +@Namespace("at") public static native @ByRef Tensor randn_outf(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @ByRef Tensor out); -// #pragma once -// @generated by torchgen/gen.py from Function.h +// aten::randn.out(SymInt[] size, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor randn_symint_out(@ByRef Tensor out, @ByVal SymIntArrayRef size); -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include +// aten::randn.out(SymInt[] size, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor randn_symint_outf(@ByVal SymIntArrayRef size, @ByRef Tensor out); -// #include +// aten::randn.generator_out(SymInt[] size, *, Generator? generator, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor randn_out(@ByRef Tensor out, @ByVal LongArrayRef size, @ByVal GeneratorOptional generator); +@Namespace("at") public static native @ByRef Tensor randn_out(@ByRef Tensor out, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @ByVal GeneratorOptional generator); -// aten::prelu(Tensor self, Tensor weight) -> Tensor -@Namespace("at") public static native @ByVal Tensor prelu(@Const @ByRef Tensor self, @Const @ByRef Tensor weight); +// aten::randn.generator_out(SymInt[] size, *, Generator? generator, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor randn_outf(@ByVal LongArrayRef size, @ByVal GeneratorOptional generator, @ByRef Tensor out); +@Namespace("at") public static native @ByRef Tensor randn_outf(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @ByVal GeneratorOptional generator, @ByRef Tensor out); +// aten::randn.generator_out(SymInt[] size, *, Generator? generator, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor randn_symint_out(@ByRef Tensor out, @ByVal SymIntArrayRef size, @ByVal GeneratorOptional generator); -// Parsed from ATen/ops/prod.h +// aten::randn.generator_out(SymInt[] size, *, Generator? generator, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor randn_symint_outf(@ByVal SymIntArrayRef size, @ByVal GeneratorOptional generator, @ByRef Tensor out); -// #pragma once -// @generated by torchgen/gen.py from Function.h +// aten::randn.names_out(SymInt[] size, *, Dimname[]? names, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor randn_out(@ByRef Tensor out, @ByVal LongArrayRef size, @ByVal DimnameListOptional names); +@Namespace("at") public static native @ByRef Tensor randn_out(@ByRef Tensor out, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @ByVal DimnameListOptional names); -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include +// aten::randn.names_out(SymInt[] size, *, Dimname[]? names, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor randn_outf(@ByVal LongArrayRef size, @ByVal DimnameListOptional names, @ByRef Tensor out); +@Namespace("at") public static native @ByRef Tensor randn_outf(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @ByVal DimnameListOptional names, @ByRef Tensor out); -// #include +// aten::randn.names_out(SymInt[] size, *, Dimname[]? names, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor randn_symint_out(@ByRef Tensor out, @ByVal SymIntArrayRef size, @ByVal DimnameListOptional names); -// aten::prod(Tensor self, *, ScalarType? dtype=None) -> Tensor -@Namespace("at") public static native @ByVal Tensor prod(@Const @ByRef Tensor self, @ByVal(nullValue = "c10::optional(c10::nullopt)") ScalarTypeOptional dtype); -@Namespace("at") public static native @ByVal Tensor prod(@Const @ByRef Tensor self); +// aten::randn.names_out(SymInt[] size, *, Dimname[]? names, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor randn_symint_outf(@ByVal SymIntArrayRef size, @ByVal DimnameListOptional names, @ByRef Tensor out); -// aten::prod.dim_int(Tensor self, int dim, bool keepdim=False, *, ScalarType? dtype=None) -> Tensor -@Namespace("at") public static native @ByVal Tensor prod(@Const @ByRef Tensor self, @Cast("int64_t") long dim, @Cast("bool") boolean keepdim/*=false*/, @ByVal(nullValue = "c10::optional(c10::nullopt)") ScalarTypeOptional dtype); -@Namespace("at") public static native @ByVal Tensor prod(@Const @ByRef Tensor self, @Cast("int64_t") long dim); -// aten::prod.int_out(Tensor self, int dim, bool keepdim=False, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor prod_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Cast("int64_t") long dim, @Cast("bool") boolean keepdim/*=false*/, @ByVal(nullValue = "c10::optional(c10::nullopt)") ScalarTypeOptional dtype); -@Namespace("at") public static native @ByRef Tensor prod_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Cast("int64_t") long dim); -// aten::prod.int_out(Tensor self, int dim, bool keepdim=False, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor prod_outf(@Const @ByRef Tensor self, @Cast("int64_t") long dim, @Cast("bool") boolean keepdim, @ByVal ScalarTypeOptional dtype, @ByRef Tensor out); +// aten::randn.generator_with_names_out(SymInt[] size, *, Generator? generator, Dimname[]? names, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor randn_out(@ByRef Tensor out, @ByVal LongArrayRef size, @ByVal GeneratorOptional generator, @ByVal DimnameListOptional names); +@Namespace("at") public static native @ByRef Tensor randn_out(@ByRef Tensor out, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @ByVal GeneratorOptional generator, @ByVal DimnameListOptional names); -// aten::prod.dim_Dimname(Tensor self, Dimname dim, bool keepdim=False, *, ScalarType? dtype=None) -> Tensor -@Namespace("at") public static native @ByVal Tensor prod(@Const @ByRef Tensor self, @ByVal Dimname dim, @Cast("bool") boolean keepdim/*=false*/, @ByVal(nullValue = "c10::optional(c10::nullopt)") ScalarTypeOptional dtype); -@Namespace("at") public static native @ByVal Tensor prod(@Const @ByRef Tensor self, @ByVal Dimname dim); -// aten::prod.Dimname_out(Tensor self, Dimname dim, bool keepdim=False, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor prod_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal Dimname dim, @Cast("bool") boolean keepdim/*=false*/, @ByVal(nullValue = "c10::optional(c10::nullopt)") ScalarTypeOptional dtype); -@Namespace("at") public static native @ByRef Tensor prod_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal Dimname dim); -// aten::prod.Dimname_out(Tensor self, Dimname dim, bool keepdim=False, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor prod_outf(@Const @ByRef Tensor self, @ByVal Dimname dim, @Cast("bool") boolean keepdim, @ByVal ScalarTypeOptional dtype, @ByRef Tensor out); +// aten::randn.generator_with_names_out(SymInt[] size, *, Generator? generator, Dimname[]? names, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor randn_outf(@ByVal LongArrayRef size, @ByVal GeneratorOptional generator, @ByVal DimnameListOptional names, @ByRef Tensor out); +@Namespace("at") public static native @ByRef Tensor randn_outf(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @ByVal GeneratorOptional generator, @ByVal DimnameListOptional names, @ByRef Tensor out); -// aten::prod.out(Tensor self, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor prod_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal(nullValue = "c10::optional(c10::nullopt)") ScalarTypeOptional dtype); -@Namespace("at") public static native @ByRef Tensor prod_out(@ByRef Tensor out, @Const @ByRef Tensor self); -// aten::prod.out(Tensor self, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor prod_outf(@Const @ByRef Tensor self, @ByVal ScalarTypeOptional dtype, @ByRef Tensor out); + +// aten::randn.generator_with_names_out(SymInt[] size, *, Generator? generator, Dimname[]? names, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor randn_symint_out(@ByRef Tensor out, @ByVal SymIntArrayRef size, @ByVal GeneratorOptional generator, @ByVal DimnameListOptional names); + + +// aten::randn.generator_with_names_out(SymInt[] size, *, Generator? generator, Dimname[]? names, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor randn_symint_outf(@ByVal SymIntArrayRef size, @ByVal GeneratorOptional generator, @ByVal DimnameListOptional names, @ByRef Tensor out); -// Parsed from ATen/ops/promote_types.h + +// Parsed from ATen/ops/randn_like.h // #pragma once @@ -57688,16 +43326,25 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include +// #include -// aten::promote_types(ScalarType type1, ScalarType type2) -> ScalarType -@Namespace("at") public static native ScalarType promote_types(ScalarType type1, ScalarType type2); +// aten::randn_like(Tensor self, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, MemoryFormat? memory_format=None) -> Tensor +@Namespace("at") public static native @ByVal Tensor randn_like(@Const @ByRef Tensor self, @ByVal(nullValue = "at::TensorOptions{}") TensorOptions options, @ByVal(nullValue = "c10::optional(c10::nullopt)") MemoryFormatOptional memory_format); +@Namespace("at") public static native @ByVal Tensor randn_like(@Const @ByRef Tensor self); +// aten::randn_like(Tensor self, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, MemoryFormat? memory_format=None) -> Tensor +@Namespace("at") public static native @ByVal Tensor randn_like(@Const @ByRef Tensor self, @ByVal ScalarTypeOptional dtype, @ByVal LayoutOptional layout, @ByVal DeviceOptional device, @ByVal BoolOptional pin_memory, @ByVal MemoryFormatOptional memory_format); + +// aten::randn_like.out(Tensor self, *, MemoryFormat? memory_format=None, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor randn_like_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal(nullValue = "c10::optional(c10::nullopt)") MemoryFormatOptional memory_format); +@Namespace("at") public static native @ByRef Tensor randn_like_out(@ByRef Tensor out, @Const @ByRef Tensor self); +// aten::randn_like.out(Tensor self, *, MemoryFormat? memory_format=None, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor randn_like_outf(@Const @ByRef Tensor self, @ByVal MemoryFormatOptional memory_format, @ByRef Tensor out); -// Parsed from ATen/ops/put.h +// Parsed from ATen/ops/random.h // #pragma once @@ -57718,23 +43365,43 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include +// #include -// aten::put(Tensor self, Tensor index, Tensor source, bool accumulate=False) -> Tensor -@Namespace("at") public static native @ByVal Tensor put(@Const @ByRef Tensor self, @Const @ByRef Tensor index, @Const @ByRef Tensor source, @Cast("bool") boolean accumulate/*=false*/); -@Namespace("at") public static native @ByVal Tensor put(@Const @ByRef Tensor self, @Const @ByRef Tensor index, @Const @ByRef Tensor source); +// aten::random.from_out(Tensor self, int from, int? to, *, Generator? generator=None, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor random_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Cast("int64_t") long from, @ByVal LongOptional to, @ByVal(nullValue = "c10::optional(c10::nullopt)") GeneratorOptional generator); +@Namespace("at") public static native @ByRef Tensor random_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Cast("int64_t") long from, @ByVal LongOptional to); +// aten::random.from_out(Tensor self, int from, int? to, *, Generator? generator=None, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor random_outf(@Const @ByRef Tensor self, @Cast("int64_t") long from, @ByVal LongOptional to, @ByVal GeneratorOptional generator, @ByRef Tensor out); -// aten::put.out(Tensor self, Tensor index, Tensor source, bool accumulate=False, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor put_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor index, @Const @ByRef Tensor source, @Cast("bool") boolean accumulate/*=false*/); -@Namespace("at") public static native @ByRef Tensor put_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor index, @Const @ByRef Tensor source); -// aten::put.out(Tensor self, Tensor index, Tensor source, bool accumulate=False, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor put_outf(@Const @ByRef Tensor self, @Const @ByRef Tensor index, @Const @ByRef Tensor source, @Cast("bool") boolean accumulate, @ByRef Tensor out); +// aten::random.from(Tensor self, int from, int? to, *, Generator? generator=None) -> Tensor +@Namespace("at") public static native @ByVal Tensor random(@Const @ByRef Tensor self, @Cast("int64_t") long from, @ByVal LongOptional to, @ByVal(nullValue = "c10::optional(c10::nullopt)") GeneratorOptional generator); +@Namespace("at") public static native @ByVal Tensor random(@Const @ByRef Tensor self, @Cast("int64_t") long from, @ByVal LongOptional to); + +// aten::random.to_out(Tensor self, int to, *, Generator? generator=None, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor random_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Cast("int64_t") long to, @ByVal(nullValue = "c10::optional(c10::nullopt)") GeneratorOptional generator); +@Namespace("at") public static native @ByRef Tensor random_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Cast("int64_t") long to); +// aten::random.to_out(Tensor self, int to, *, Generator? generator=None, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor random_outf(@Const @ByRef Tensor self, @Cast("int64_t") long to, @ByVal GeneratorOptional generator, @ByRef Tensor out); + +// aten::random.to(Tensor self, int to, *, Generator? generator=None) -> Tensor +@Namespace("at") public static native @ByVal Tensor random(@Const @ByRef Tensor self, @Cast("int64_t") long to, @ByVal(nullValue = "c10::optional(c10::nullopt)") GeneratorOptional generator); +@Namespace("at") public static native @ByVal Tensor random(@Const @ByRef Tensor self, @Cast("int64_t") long to); +// aten::random.out(Tensor self, *, Generator? generator=None, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor random_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal(nullValue = "c10::optional(c10::nullopt)") GeneratorOptional generator); +@Namespace("at") public static native @ByRef Tensor random_out(@ByRef Tensor out, @Const @ByRef Tensor self); +// aten::random.out(Tensor self, *, Generator? generator=None, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor random_outf(@Const @ByRef Tensor self, @ByVal GeneratorOptional generator, @ByRef Tensor out); +// aten::random(Tensor self, *, Generator? generator=None) -> Tensor +@Namespace("at") public static native @ByVal Tensor random(@Const @ByRef Tensor self, @ByVal(nullValue = "c10::optional(c10::nullopt)") GeneratorOptional generator); +@Namespace("at") public static native @ByVal Tensor random(@Const @ByRef Tensor self); -// Parsed from ATen/ops/q_per_channel_axis.h + + +// Parsed from ATen/ops/randperm.h // #pragma once @@ -57755,16 +43422,35 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include +// #include -// aten::q_per_channel_axis(Tensor self) -> int -@Namespace("at") public static native @Cast("int64_t") long q_per_channel_axis(@Const @ByRef Tensor self); +// aten::randperm(int n, *, ScalarType? dtype=long, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor +@Namespace("at") public static native @ByVal Tensor randperm(@Cast("int64_t") long n, @ByVal(nullValue = "at::TensorOptions(at::kLong)") TensorOptions options); +@Namespace("at") public static native @ByVal Tensor randperm(@Cast("int64_t") long n); +// aten::randperm(int n, *, ScalarType? dtype=long, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor +@Namespace("at") public static native @ByVal Tensor randperm(@Cast("int64_t") long n, @ByVal ScalarTypeOptional dtype, @ByVal LayoutOptional layout, @ByVal DeviceOptional device, @ByVal BoolOptional pin_memory); + +// aten::randperm.generator(int n, *, Generator? generator, ScalarType? dtype=long, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor +@Namespace("at") public static native @ByVal Tensor randperm(@Cast("int64_t") long n, @ByVal GeneratorOptional generator, @ByVal(nullValue = "at::TensorOptions(at::kLong)") TensorOptions options); +@Namespace("at") public static native @ByVal Tensor randperm(@Cast("int64_t") long n, @ByVal GeneratorOptional generator); +// aten::randperm.generator(int n, *, Generator? generator, ScalarType? dtype=long, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor +@Namespace("at") public static native @ByVal Tensor randperm(@Cast("int64_t") long n, @ByVal GeneratorOptional generator, @ByVal ScalarTypeOptional dtype, @ByVal LayoutOptional layout, @ByVal DeviceOptional device, @ByVal BoolOptional pin_memory); + +// aten::randperm.out(int n, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor randperm_out(@ByRef Tensor out, @Cast("int64_t") long n); +// aten::randperm.out(int n, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor randperm_outf(@Cast("int64_t") long n, @ByRef Tensor out); + +// aten::randperm.generator_out(int n, *, Generator? generator, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor randperm_out(@ByRef Tensor out, @Cast("int64_t") long n, @ByVal GeneratorOptional generator); +// aten::randperm.generator_out(int n, *, Generator? generator, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor randperm_outf(@Cast("int64_t") long n, @ByVal GeneratorOptional generator, @ByRef Tensor out); -// Parsed from ATen/ops/q_per_channel_scales.h +// Parsed from ATen/ops/range.h // #pragma once @@ -57785,21 +43471,33 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include +// #include -// aten::q_per_channel_scales(Tensor self) -> Tensor -@Namespace("at") public static native @ByVal Tensor q_per_channel_scales(@Const @ByRef Tensor self); +// aten::range.step(Scalar start, Scalar end, Scalar step=1, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor +@Namespace("at") public static native @ByVal Tensor range(@Const @ByRef Scalar start, @Const @ByRef Scalar end, @Const @ByRef(nullValue = "at::Scalar(1)") Scalar step, @ByVal(nullValue = "at::TensorOptions{}") TensorOptions options); +// aten::range.step(Scalar start, Scalar end, Scalar step=1, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor +@Namespace("at") public static native @ByVal Tensor range(@Const @ByRef Scalar start, @Const @ByRef Scalar end, @Const @ByRef Scalar step, @ByVal ScalarTypeOptional dtype, @ByVal LayoutOptional layout, @ByVal DeviceOptional device, @ByVal BoolOptional pin_memory); -// aten::q_per_channel_scales.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor q_per_channel_scales_out(@ByRef Tensor out, @Const @ByRef Tensor self); -// aten::q_per_channel_scales.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor q_per_channel_scales_outf(@Const @ByRef Tensor self, @ByRef Tensor out); +// aten::range(Scalar start, Scalar end, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor +@Namespace("at") public static native @ByVal Tensor range(@Const @ByRef Scalar start, @Const @ByRef Scalar end, @ByVal(nullValue = "at::TensorOptions{}") TensorOptions options); +// aten::range(Scalar start, Scalar end, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor +@Namespace("at") public static native @ByVal Tensor range(@Const @ByRef Scalar start, @Const @ByRef Scalar end, @ByVal ScalarTypeOptional dtype, @ByVal LayoutOptional layout, @ByVal DeviceOptional device, @ByVal BoolOptional pin_memory); + +// aten::range.out_(Scalar start, Scalar end, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor range_out(@ByRef Tensor out, @Const @ByRef Scalar start, @Const @ByRef Scalar end); +// aten::range.out_(Scalar start, Scalar end, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor range_outf(@Const @ByRef Scalar start, @Const @ByRef Scalar end, @ByRef Tensor out); +// aten::range.out(Scalar start, Scalar end, Scalar step=1, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor range_out(@ByRef Tensor out, @Const @ByRef Scalar start, @Const @ByRef Scalar end, @Const @ByRef Scalar step); +// aten::range.out(Scalar start, Scalar end, Scalar step=1, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor range_outf(@Const @ByRef Scalar start, @Const @ByRef Scalar end, @Const @ByRef Scalar step, @ByRef Tensor out); -// Parsed from ATen/ops/q_per_channel_zero_points.h + +// Parsed from ATen/ops/ravel.h // #pragma once @@ -57820,21 +43518,16 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include - +// #include -// aten::q_per_channel_zero_points(Tensor self) -> Tensor -@Namespace("at") public static native @ByVal Tensor q_per_channel_zero_points(@Const @ByRef Tensor self); -// aten::q_per_channel_zero_points.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor q_per_channel_zero_points_out(@ByRef Tensor out, @Const @ByRef Tensor self); -// aten::q_per_channel_zero_points.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor q_per_channel_zero_points_outf(@Const @ByRef Tensor self, @ByRef Tensor out); +// aten::ravel(Tensor(a) self) -> Tensor(a) +@Namespace("at") public static native @ByVal Tensor ravel(@Const @ByRef Tensor self); -// Parsed from ATen/ops/q_scale.h +// Parsed from ATen/ops/real.h // #pragma once @@ -57855,16 +43548,16 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include +// #include -// aten::q_scale(Tensor self) -> float -@Namespace("at") public static native double q_scale(@Const @ByRef Tensor self); +// aten::real(Tensor(a) self) -> Tensor(a) +@Namespace("at") public static native @ByVal Tensor real(@Const @ByRef Tensor self); -// Parsed from ATen/ops/q_zero_point.h +// Parsed from ATen/ops/reciprocal.h // #pragma once @@ -57885,16 +43578,24 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include +// #include -// aten::q_zero_point(Tensor self) -> int -@Namespace("at") public static native @Cast("int64_t") long q_zero_point(@Const @ByRef Tensor self); +// aten::reciprocal(Tensor self) -> Tensor +@Namespace("at") public static native @ByVal Tensor reciprocal(@Const @ByRef Tensor self); + +// aten::reciprocal_(Tensor(a!) self) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor reciprocal_(@ByRef Tensor self); +// aten::reciprocal.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor reciprocal_out(@ByRef Tensor out, @Const @ByRef Tensor self); +// aten::reciprocal.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor reciprocal_outf(@Const @ByRef Tensor self, @ByRef Tensor out); -// Parsed from ATen/ops/qr.h + +// Parsed from ATen/ops/record_stream.h // #pragma once @@ -57915,23 +43616,14 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include - +// #include -// aten::qr.Q(Tensor self, bool some=True, *, Tensor(a!) Q, Tensor(b!) R) -> (Tensor(a!) Q, Tensor(b!) R) -@Namespace("at") public static native @ByVal @Cast("std::tuple*") PointerPointer qr_out(@ByRef Tensor Q, @ByRef Tensor R, @Const @ByRef Tensor self, @Cast("bool") boolean some/*=true*/); -@Namespace("at") public static native @ByVal @Cast("std::tuple*") PointerPointer qr_out(@ByRef Tensor Q, @ByRef Tensor R, @Const @ByRef Tensor self); -// aten::qr.Q(Tensor self, bool some=True, *, Tensor(a!) Q, Tensor(b!) R) -> (Tensor(a!) Q, Tensor(b!) R) -@Namespace("at") public static native @ByVal @Cast("std::tuple*") PointerPointer qr_outf(@Const @ByRef Tensor self, @Cast("bool") boolean some, @ByRef Tensor Q, @ByRef Tensor R); -// aten::qr(Tensor self, bool some=True) -> (Tensor Q, Tensor R) -@Namespace("at") public static native @ByVal TensorTensorTuple qr(@Const @ByRef Tensor self, @Cast("bool") boolean some/*=true*/); -@Namespace("at") public static native @ByVal TensorTensorTuple qr(@Const @ByRef Tensor self); -// Parsed from ATen/ops/qscheme.h +// Parsed from ATen/ops/refine_names.h // #pragma once @@ -57952,14 +43644,14 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include +// #include -// Parsed from ATen/ops/quantile.h +// Parsed from ATen/ops/reflection_pad1d.h // #pragma once @@ -57980,33 +43672,40 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include +// #include -// aten::quantile(Tensor self, Tensor q, int? dim=None, bool keepdim=False, *, str interpolation='linear') -> Tensor -@Namespace("at") public static native @ByVal Tensor quantile(@Const @ByRef Tensor self, @Const @ByRef Tensor q, @ByVal(nullValue = "c10::optional(c10::nullopt)") LongOptional dim, @Cast("bool") boolean keepdim/*=false*/, @ByVal(nullValue = "c10::string_view(\"linear\")") @Cast("c10::string_view*") Pointer interpolation); -@Namespace("at") public static native @ByVal Tensor quantile(@Const @ByRef Tensor self, @Const @ByRef Tensor q); +// aten::reflection_pad1d.out(Tensor self, SymInt[2] padding, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor reflection_pad1d_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal LongArrayRef padding); +@Namespace("at") public static native @ByRef Tensor reflection_pad1d_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... padding); -// aten::quantile.out(Tensor self, Tensor q, int? dim=None, bool keepdim=False, *, str interpolation='linear', Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor quantile_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor q, @ByVal(nullValue = "c10::optional(c10::nullopt)") LongOptional dim, @Cast("bool") boolean keepdim/*=false*/, @ByVal(nullValue = "c10::string_view(\"linear\")") @Cast("c10::string_view*") Pointer interpolation); -@Namespace("at") public static native @ByRef Tensor quantile_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor q); -// aten::quantile.out(Tensor self, Tensor q, int? dim=None, bool keepdim=False, *, str interpolation='linear', Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor quantile_outf(@Const @ByRef Tensor self, @Const @ByRef Tensor q, @ByVal LongOptional dim, @Cast("bool") boolean keepdim, @ByVal @Cast("c10::string_view*") Pointer interpolation, @ByRef Tensor out); -// aten::quantile.scalar(Tensor self, float q, int? dim=None, bool keepdim=False, *, str interpolation='linear') -> Tensor -@Namespace("at") public static native @ByVal Tensor quantile(@Const @ByRef Tensor self, double q, @ByVal(nullValue = "c10::optional(c10::nullopt)") LongOptional dim, @Cast("bool") boolean keepdim/*=false*/, @ByVal(nullValue = "c10::string_view(\"linear\")") @Cast("c10::string_view*") Pointer interpolation); -@Namespace("at") public static native @ByVal Tensor quantile(@Const @ByRef Tensor self, double q); +// aten::reflection_pad1d.out(Tensor self, SymInt[2] padding, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor reflection_pad1d_outf(@Const @ByRef Tensor self, @ByVal LongArrayRef padding, @ByRef Tensor out); +@Namespace("at") public static native @ByRef Tensor reflection_pad1d_outf(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] padding, @ByRef Tensor out); -// aten::quantile.scalar_out(Tensor self, float q, int? dim=None, bool keepdim=False, *, str interpolation='linear', Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor quantile_out(@ByRef Tensor out, @Const @ByRef Tensor self, double q, @ByVal(nullValue = "c10::optional(c10::nullopt)") LongOptional dim, @Cast("bool") boolean keepdim/*=false*/, @ByVal(nullValue = "c10::string_view(\"linear\")") @Cast("c10::string_view*") Pointer interpolation); -@Namespace("at") public static native @ByRef Tensor quantile_out(@ByRef Tensor out, @Const @ByRef Tensor self, double q); -// aten::quantile.scalar_out(Tensor self, float q, int? dim=None, bool keepdim=False, *, str interpolation='linear', Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor quantile_outf(@Const @ByRef Tensor self, double q, @ByVal LongOptional dim, @Cast("bool") boolean keepdim, @ByVal @Cast("c10::string_view*") Pointer interpolation, @ByRef Tensor out); +// aten::reflection_pad1d.out(Tensor self, SymInt[2] padding, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor reflection_pad1d_symint_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal SymIntArrayRef padding); +// aten::reflection_pad1d.out(Tensor self, SymInt[2] padding, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor reflection_pad1d_symint_outf(@Const @ByRef Tensor self, @ByVal SymIntArrayRef padding, @ByRef Tensor out); -// Parsed from ATen/ops/quantize_per_channel.h + +// aten::reflection_pad1d(Tensor self, SymInt[2] padding) -> Tensor +@Namespace("at") public static native @ByVal Tensor reflection_pad1d(@Const @ByRef Tensor self, @ByVal LongArrayRef padding); +@Namespace("at") public static native @ByVal Tensor reflection_pad1d(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... padding); + + +// aten::reflection_pad1d(Tensor self, SymInt[2] padding) -> Tensor +@Namespace("at") public static native @ByVal Tensor reflection_pad1d_symint(@Const @ByRef Tensor self, @ByVal SymIntArrayRef padding); + + + + + +// Parsed from ATen/ops/reflection_pad1d_backward.h // #pragma once @@ -58027,21 +43726,40 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include +// #include -// aten::quantize_per_channel(Tensor self, Tensor scales, Tensor zero_points, int axis, ScalarType dtype) -> Tensor -@Namespace("at") public static native @ByVal Tensor quantize_per_channel(@Const @ByRef Tensor self, @Const @ByRef Tensor scales, @Const @ByRef Tensor zero_points, @Cast("int64_t") long axis, ScalarType dtype); +// aten::reflection_pad1d_backward.grad_input(Tensor grad_output, Tensor self, SymInt[2] padding, *, Tensor(a!) grad_input) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor reflection_pad1d_backward_out(@ByRef Tensor grad_input, @Const @ByRef Tensor grad_output, @Const @ByRef Tensor self, @ByVal LongArrayRef padding); +@Namespace("at") public static native @ByRef Tensor reflection_pad1d_backward_out(@ByRef Tensor grad_input, @Const @ByRef Tensor grad_output, @Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... padding); -// aten::quantize_per_channel.out(Tensor self, Tensor scales, Tensor zero_points, int axis, ScalarType dtype, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor quantize_per_channel_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor scales, @Const @ByRef Tensor zero_points, @Cast("int64_t") long axis, ScalarType dtype); -// aten::quantize_per_channel.out(Tensor self, Tensor scales, Tensor zero_points, int axis, ScalarType dtype, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor quantize_per_channel_outf(@Const @ByRef Tensor self, @Const @ByRef Tensor scales, @Const @ByRef Tensor zero_points, @Cast("int64_t") long axis, ScalarType dtype, @ByRef Tensor out); + +// aten::reflection_pad1d_backward.grad_input(Tensor grad_output, Tensor self, SymInt[2] padding, *, Tensor(a!) grad_input) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor reflection_pad1d_backward_outf(@Const @ByRef Tensor grad_output, @Const @ByRef Tensor self, @ByVal LongArrayRef padding, @ByRef Tensor grad_input); +@Namespace("at") public static native @ByRef Tensor reflection_pad1d_backward_outf(@Const @ByRef Tensor grad_output, @Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] padding, @ByRef Tensor grad_input); +// aten::reflection_pad1d_backward.grad_input(Tensor grad_output, Tensor self, SymInt[2] padding, *, Tensor(a!) grad_input) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor reflection_pad1d_backward_symint_out(@ByRef Tensor grad_input, @Const @ByRef Tensor grad_output, @Const @ByRef Tensor self, @ByVal SymIntArrayRef padding); -// Parsed from ATen/ops/quantize_per_tensor.h +// aten::reflection_pad1d_backward.grad_input(Tensor grad_output, Tensor self, SymInt[2] padding, *, Tensor(a!) grad_input) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor reflection_pad1d_backward_symint_outf(@Const @ByRef Tensor grad_output, @Const @ByRef Tensor self, @ByVal SymIntArrayRef padding, @ByRef Tensor grad_input); + + +// aten::reflection_pad1d_backward(Tensor grad_output, Tensor self, SymInt[2] padding) -> Tensor +@Namespace("at") public static native @ByVal Tensor reflection_pad1d_backward(@Const @ByRef Tensor grad_output, @Const @ByRef Tensor self, @ByVal LongArrayRef padding); +@Namespace("at") public static native @ByVal Tensor reflection_pad1d_backward(@Const @ByRef Tensor grad_output, @Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... padding); + + +// aten::reflection_pad1d_backward(Tensor grad_output, Tensor self, SymInt[2] padding) -> Tensor +@Namespace("at") public static native @ByVal Tensor reflection_pad1d_backward_symint(@Const @ByRef Tensor grad_output, @Const @ByRef Tensor self, @ByVal SymIntArrayRef padding); + + + + + +// Parsed from ATen/ops/reflection_pad2d.h // #pragma once @@ -58062,37 +43780,40 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include +// #include -// aten::quantize_per_tensor(Tensor self, float scale, int zero_point, ScalarType dtype) -> Tensor -@Namespace("at") public static native @ByVal Tensor quantize_per_tensor(@Const @ByRef Tensor self, double scale, @Cast("int64_t") long zero_point, ScalarType dtype); +// aten::reflection_pad2d.out(Tensor self, SymInt[4] padding, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor reflection_pad2d_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal LongArrayRef padding); +@Namespace("at") public static native @ByRef Tensor reflection_pad2d_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... padding); -// aten::quantize_per_tensor.tensor_qparams(Tensor self, Tensor scale, Tensor zero_point, ScalarType dtype) -> Tensor -@Namespace("at") public static native @ByVal Tensor quantize_per_tensor(@Const @ByRef Tensor self, @Const @ByRef Tensor scale, @Const @ByRef Tensor zero_point, ScalarType dtype); -// aten::quantize_per_tensor.tensors(Tensor[] tensors, Tensor scales, Tensor zero_points, ScalarType dtype) -> Tensor[] -@Namespace("at") public static native @Cast({"", "std::vector"}) @StdMove TensorVector quantize_per_tensor(@ByVal TensorArrayRef tensors, @Const @ByRef Tensor scales, @Const @ByRef Tensor zero_points, ScalarType dtype); +// aten::reflection_pad2d.out(Tensor self, SymInt[4] padding, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor reflection_pad2d_outf(@Const @ByRef Tensor self, @ByVal LongArrayRef padding, @ByRef Tensor out); +@Namespace("at") public static native @ByRef Tensor reflection_pad2d_outf(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] padding, @ByRef Tensor out); -// aten::quantize_per_tensor.out(Tensor self, float scale, int zero_point, ScalarType dtype, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor quantize_per_tensor_out(@ByRef Tensor out, @Const @ByRef Tensor self, double scale, @Cast("int64_t") long zero_point, ScalarType dtype); -// aten::quantize_per_tensor.out(Tensor self, float scale, int zero_point, ScalarType dtype, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor quantize_per_tensor_outf(@Const @ByRef Tensor self, double scale, @Cast("int64_t") long zero_point, ScalarType dtype, @ByRef Tensor out); -// aten::quantize_per_tensor.tensor_qparams_out(Tensor self, Tensor scale, Tensor zero_point, ScalarType dtype, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor quantize_per_tensor_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor scale, @Const @ByRef Tensor zero_point, ScalarType dtype); -// aten::quantize_per_tensor.tensor_qparams_out(Tensor self, Tensor scale, Tensor zero_point, ScalarType dtype, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor quantize_per_tensor_outf(@Const @ByRef Tensor self, @Const @ByRef Tensor scale, @Const @ByRef Tensor zero_point, ScalarType dtype, @ByRef Tensor out); +// aten::reflection_pad2d.out(Tensor self, SymInt[4] padding, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor reflection_pad2d_symint_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal SymIntArrayRef padding); -// aten::quantize_per_tensor.tensors_out(Tensor[] tensors, Tensor scales, Tensor zero_points, ScalarType dtype, *, Tensor(a!)[] out) -> () -@Namespace("at") public static native void quantize_per_tensor_out(@ByVal TensorArrayRef out, @ByVal TensorArrayRef tensors, @Const @ByRef Tensor scales, @Const @ByRef Tensor zero_points, ScalarType dtype); -// aten::quantize_per_tensor.tensors_out(Tensor[] tensors, Tensor scales, Tensor zero_points, ScalarType dtype, *, Tensor(a!)[] out) -> () -@Namespace("at") public static native void quantize_per_tensor_outf(@ByVal TensorArrayRef tensors, @Const @ByRef Tensor scales, @Const @ByRef Tensor zero_points, ScalarType dtype, @ByVal TensorArrayRef out); + +// aten::reflection_pad2d.out(Tensor self, SymInt[4] padding, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor reflection_pad2d_symint_outf(@Const @ByRef Tensor self, @ByVal SymIntArrayRef padding, @ByRef Tensor out); +// aten::reflection_pad2d(Tensor self, SymInt[4] padding) -> Tensor +@Namespace("at") public static native @ByVal Tensor reflection_pad2d(@Const @ByRef Tensor self, @ByVal LongArrayRef padding); +@Namespace("at") public static native @ByVal Tensor reflection_pad2d(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... padding); + + +// aten::reflection_pad2d(Tensor self, SymInt[4] padding) -> Tensor +@Namespace("at") public static native @ByVal Tensor reflection_pad2d_symint(@Const @ByRef Tensor self, @ByVal SymIntArrayRef padding); + -// Parsed from ATen/ops/quantize_per_tensor_dynamic.h + + +// Parsed from ATen/ops/reflection_pad2d_backward.h // #pragma once @@ -58113,21 +43834,40 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include +// #include -// aten::quantize_per_tensor_dynamic(Tensor self, ScalarType dtype, bool reduce_range) -> Tensor -@Namespace("at") public static native @ByVal Tensor quantize_per_tensor_dynamic(@Const @ByRef Tensor self, ScalarType dtype, @Cast("bool") boolean reduce_range); +// aten::reflection_pad2d_backward.grad_input(Tensor grad_output, Tensor self, SymInt[4] padding, *, Tensor(a!) grad_input) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor reflection_pad2d_backward_out(@ByRef Tensor grad_input, @Const @ByRef Tensor grad_output, @Const @ByRef Tensor self, @ByVal LongArrayRef padding); +@Namespace("at") public static native @ByRef Tensor reflection_pad2d_backward_out(@ByRef Tensor grad_input, @Const @ByRef Tensor grad_output, @Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... padding); -// aten::quantize_per_tensor_dynamic.out(Tensor self, ScalarType dtype, bool reduce_range, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor quantize_per_tensor_dynamic_out(@ByRef Tensor out, @Const @ByRef Tensor self, ScalarType dtype, @Cast("bool") boolean reduce_range); -// aten::quantize_per_tensor_dynamic.out(Tensor self, ScalarType dtype, bool reduce_range, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor quantize_per_tensor_dynamic_outf(@Const @ByRef Tensor self, ScalarType dtype, @Cast("bool") boolean reduce_range, @ByRef Tensor out); + +// aten::reflection_pad2d_backward.grad_input(Tensor grad_output, Tensor self, SymInt[4] padding, *, Tensor(a!) grad_input) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor reflection_pad2d_backward_outf(@Const @ByRef Tensor grad_output, @Const @ByRef Tensor self, @ByVal LongArrayRef padding, @ByRef Tensor grad_input); +@Namespace("at") public static native @ByRef Tensor reflection_pad2d_backward_outf(@Const @ByRef Tensor grad_output, @Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] padding, @ByRef Tensor grad_input); +// aten::reflection_pad2d_backward.grad_input(Tensor grad_output, Tensor self, SymInt[4] padding, *, Tensor(a!) grad_input) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor reflection_pad2d_backward_symint_out(@ByRef Tensor grad_input, @Const @ByRef Tensor grad_output, @Const @ByRef Tensor self, @ByVal SymIntArrayRef padding); -// Parsed from ATen/ops/quantized_batch_norm.h +// aten::reflection_pad2d_backward.grad_input(Tensor grad_output, Tensor self, SymInt[4] padding, *, Tensor(a!) grad_input) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor reflection_pad2d_backward_symint_outf(@Const @ByRef Tensor grad_output, @Const @ByRef Tensor self, @ByVal SymIntArrayRef padding, @ByRef Tensor grad_input); + + +// aten::reflection_pad2d_backward(Tensor grad_output, Tensor self, SymInt[4] padding) -> Tensor +@Namespace("at") public static native @ByVal Tensor reflection_pad2d_backward(@Const @ByRef Tensor grad_output, @Const @ByRef Tensor self, @ByVal LongArrayRef padding); +@Namespace("at") public static native @ByVal Tensor reflection_pad2d_backward(@Const @ByRef Tensor grad_output, @Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... padding); + + +// aten::reflection_pad2d_backward(Tensor grad_output, Tensor self, SymInt[4] padding) -> Tensor +@Namespace("at") public static native @ByVal Tensor reflection_pad2d_backward_symint(@Const @ByRef Tensor grad_output, @Const @ByRef Tensor self, @ByVal SymIntArrayRef padding); + + + + + +// Parsed from ATen/ops/reflection_pad3d.h // #pragma once @@ -58148,21 +43888,40 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include +// #include -// aten::quantized_batch_norm(Tensor input, Tensor? weight, Tensor? bias, Tensor mean, Tensor var, float eps, float output_scale, int output_zero_point) -> Tensor -@Namespace("at") public static native @ByVal Tensor quantized_batch_norm(@Const @ByRef Tensor input, @Const @ByRef TensorOptional weight, @Const @ByRef TensorOptional bias, @Const @ByRef Tensor mean, @Const @ByRef Tensor var, double eps, double output_scale, @Cast("int64_t") long output_zero_point); +// aten::reflection_pad3d.out(Tensor self, SymInt[6] padding, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor reflection_pad3d_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal LongArrayRef padding); +@Namespace("at") public static native @ByRef Tensor reflection_pad3d_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... padding); -// aten::quantized_batch_norm.out(Tensor input, Tensor? weight, Tensor? bias, Tensor mean, Tensor var, float eps, float output_scale, int output_zero_point, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor quantized_batch_norm_out(@ByRef Tensor out, @Const @ByRef Tensor input, @Const @ByRef TensorOptional weight, @Const @ByRef TensorOptional bias, @Const @ByRef Tensor mean, @Const @ByRef Tensor var, double eps, double output_scale, @Cast("int64_t") long output_zero_point); -// aten::quantized_batch_norm.out(Tensor input, Tensor? weight, Tensor? bias, Tensor mean, Tensor var, float eps, float output_scale, int output_zero_point, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor quantized_batch_norm_outf(@Const @ByRef Tensor input, @Const @ByRef TensorOptional weight, @Const @ByRef TensorOptional bias, @Const @ByRef Tensor mean, @Const @ByRef Tensor var, double eps, double output_scale, @Cast("int64_t") long output_zero_point, @ByRef Tensor out); + +// aten::reflection_pad3d.out(Tensor self, SymInt[6] padding, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor reflection_pad3d_outf(@Const @ByRef Tensor self, @ByVal LongArrayRef padding, @ByRef Tensor out); +@Namespace("at") public static native @ByRef Tensor reflection_pad3d_outf(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] padding, @ByRef Tensor out); +// aten::reflection_pad3d.out(Tensor self, SymInt[6] padding, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor reflection_pad3d_symint_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal SymIntArrayRef padding); -// Parsed from ATen/ops/quantized_gru_cell.h +// aten::reflection_pad3d.out(Tensor self, SymInt[6] padding, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor reflection_pad3d_symint_outf(@Const @ByRef Tensor self, @ByVal SymIntArrayRef padding, @ByRef Tensor out); + + +// aten::reflection_pad3d(Tensor self, SymInt[6] padding) -> Tensor +@Namespace("at") public static native @ByVal Tensor reflection_pad3d(@Const @ByRef Tensor self, @ByVal LongArrayRef padding); +@Namespace("at") public static native @ByVal Tensor reflection_pad3d(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... padding); + + +// aten::reflection_pad3d(Tensor self, SymInt[6] padding) -> Tensor +@Namespace("at") public static native @ByVal Tensor reflection_pad3d_symint(@Const @ByRef Tensor self, @ByVal SymIntArrayRef padding); + + + + + +// Parsed from ATen/ops/reflection_pad3d_backward.h // #pragma once @@ -58183,16 +43942,40 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include +// #include -// aten::quantized_gru_cell(Tensor input, Tensor hx, Tensor w_ih, Tensor w_hh, Tensor b_ih, Tensor b_hh, Tensor packed_ih, Tensor packed_hh, Tensor col_offsets_ih, Tensor col_offsets_hh, Scalar scale_ih, Scalar scale_hh, Scalar zero_point_ih, Scalar zero_point_hh) -> Tensor -@Namespace("at") public static native @ByVal Tensor quantized_gru_cell(@Const @ByRef Tensor input, @Const @ByRef Tensor hx, @Const @ByRef Tensor w_ih, @Const @ByRef Tensor w_hh, @Const @ByRef Tensor b_ih, @Const @ByRef Tensor b_hh, @Const @ByRef Tensor packed_ih, @Const @ByRef Tensor packed_hh, @Const @ByRef Tensor col_offsets_ih, @Const @ByRef Tensor col_offsets_hh, @Const @ByRef Scalar scale_ih, @Const @ByRef Scalar scale_hh, @Const @ByRef Scalar zero_point_ih, @Const @ByRef Scalar zero_point_hh); +// aten::reflection_pad3d_backward.grad_input(Tensor grad_output, Tensor self, SymInt[6] padding, *, Tensor(a!) grad_input) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor reflection_pad3d_backward_out(@ByRef Tensor grad_input, @Const @ByRef Tensor grad_output, @Const @ByRef Tensor self, @ByVal LongArrayRef padding); +@Namespace("at") public static native @ByRef Tensor reflection_pad3d_backward_out(@ByRef Tensor grad_input, @Const @ByRef Tensor grad_output, @Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... padding); + +// aten::reflection_pad3d_backward.grad_input(Tensor grad_output, Tensor self, SymInt[6] padding, *, Tensor(a!) grad_input) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor reflection_pad3d_backward_outf(@Const @ByRef Tensor grad_output, @Const @ByRef Tensor self, @ByVal LongArrayRef padding, @ByRef Tensor grad_input); +@Namespace("at") public static native @ByRef Tensor reflection_pad3d_backward_outf(@Const @ByRef Tensor grad_output, @Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] padding, @ByRef Tensor grad_input); + + +// aten::reflection_pad3d_backward.grad_input(Tensor grad_output, Tensor self, SymInt[6] padding, *, Tensor(a!) grad_input) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor reflection_pad3d_backward_symint_out(@ByRef Tensor grad_input, @Const @ByRef Tensor grad_output, @Const @ByRef Tensor self, @ByVal SymIntArrayRef padding); + + +// aten::reflection_pad3d_backward.grad_input(Tensor grad_output, Tensor self, SymInt[6] padding, *, Tensor(a!) grad_input) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor reflection_pad3d_backward_symint_outf(@Const @ByRef Tensor grad_output, @Const @ByRef Tensor self, @ByVal SymIntArrayRef padding, @ByRef Tensor grad_input); + + +// aten::reflection_pad3d_backward(Tensor grad_output, Tensor self, SymInt[6] padding) -> Tensor +@Namespace("at") public static native @ByVal Tensor reflection_pad3d_backward(@Const @ByRef Tensor grad_output, @Const @ByRef Tensor self, @ByVal LongArrayRef padding); +@Namespace("at") public static native @ByVal Tensor reflection_pad3d_backward(@Const @ByRef Tensor grad_output, @Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... padding); + + +// aten::reflection_pad3d_backward(Tensor grad_output, Tensor self, SymInt[6] padding) -> Tensor +@Namespace("at") public static native @ByVal Tensor reflection_pad3d_backward_symint(@Const @ByRef Tensor grad_output, @Const @ByRef Tensor self, @ByVal SymIntArrayRef padding); -// Parsed from ATen/ops/quantized_lstm_cell.h + + +// Parsed from ATen/ops/relu.h // #pragma once @@ -58213,16 +43996,24 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include +// #include -// aten::quantized_lstm_cell(Tensor input, Tensor[] hx, Tensor w_ih, Tensor w_hh, Tensor b_ih, Tensor b_hh, Tensor packed_ih, Tensor packed_hh, Tensor col_offsets_ih, Tensor col_offsets_hh, Scalar scale_ih, Scalar scale_hh, Scalar zero_point_ih, Scalar zero_point_hh) -> (Tensor, Tensor) -@Namespace("at") public static native @ByVal TensorTensorTuple quantized_lstm_cell(@Const @ByRef Tensor input, @ByVal TensorArrayRef hx, @Const @ByRef Tensor w_ih, @Const @ByRef Tensor w_hh, @Const @ByRef Tensor b_ih, @Const @ByRef Tensor b_hh, @Const @ByRef Tensor packed_ih, @Const @ByRef Tensor packed_hh, @Const @ByRef Tensor col_offsets_ih, @Const @ByRef Tensor col_offsets_hh, @Const @ByRef Scalar scale_ih, @Const @ByRef Scalar scale_hh, @Const @ByRef Scalar zero_point_ih, @Const @ByRef Scalar zero_point_hh); +// aten::relu(Tensor self) -> Tensor +@Namespace("at") public static native @ByVal Tensor relu(@Const @ByRef Tensor self); +// aten::relu_(Tensor(a!) self) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor relu_(@ByRef Tensor self); + +// aten::relu.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor relu_out(@ByRef Tensor out, @Const @ByRef Tensor self); +// aten::relu.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor relu_outf(@Const @ByRef Tensor self, @ByRef Tensor out); -// Parsed from ATen/ops/quantized_max_pool1d.h + +// Parsed from ATen/ops/relu6.h // #pragma once @@ -58243,28 +44034,19 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include +// #include -// aten::quantized_max_pool1d(Tensor self, int[1] kernel_size, int[1] stride=[], int[1] padding=0, int[1] dilation=1, bool ceil_mode=False) -> Tensor -@Namespace("at") public static native @ByVal Tensor quantized_max_pool1d(@Const @ByRef Tensor self, @ByVal @Cast("c10::ArrayRef*") LongArrayRef kernel_size, @ByVal(nullValue = "at::IntArrayRef{}") @Cast("c10::ArrayRef*") LongArrayRef stride, @ByVal(nullValue = "at::IntArrayRef(0)") @Cast("c10::ArrayRef*") LongArrayRef padding, @ByVal(nullValue = "at::IntArrayRef(1)") @Cast("c10::ArrayRef*") LongArrayRef dilation, @Cast("bool") boolean ceil_mode/*=false*/); -@Namespace("at") public static native @ByVal Tensor quantized_max_pool1d(@Const @ByRef Tensor self, @ByVal @Cast("c10::ArrayRef*") LongArrayRef kernel_size); -@Namespace("at") public static native @ByVal Tensor quantized_max_pool1d(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] kernel_size, @ByVal(nullValue = "at::IntArrayRef{}") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] stride, @ByVal(nullValue = "at::IntArrayRef(0)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] padding, @ByVal(nullValue = "at::IntArrayRef(1)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dilation, @Cast("bool") boolean ceil_mode/*=false*/); -@Namespace("at") public static native @ByVal Tensor quantized_max_pool1d(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... kernel_size); +// aten::relu6(Tensor self) -> Tensor +@Namespace("at") public static native @ByVal Tensor relu6(@Const @ByRef Tensor self); -// aten::quantized_max_pool1d.out(Tensor self, int[1] kernel_size, int[1] stride=[], int[1] padding=0, int[1] dilation=1, bool ceil_mode=False, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor quantized_max_pool1d_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal @Cast("c10::ArrayRef*") LongArrayRef kernel_size, @ByVal(nullValue = "at::IntArrayRef{}") @Cast("c10::ArrayRef*") LongArrayRef stride, @ByVal(nullValue = "at::IntArrayRef(0)") @Cast("c10::ArrayRef*") LongArrayRef padding, @ByVal(nullValue = "at::IntArrayRef(1)") @Cast("c10::ArrayRef*") LongArrayRef dilation, @Cast("bool") boolean ceil_mode/*=false*/); -@Namespace("at") public static native @ByRef Tensor quantized_max_pool1d_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal @Cast("c10::ArrayRef*") LongArrayRef kernel_size); -@Namespace("at") public static native @ByRef Tensor quantized_max_pool1d_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] kernel_size, @ByVal(nullValue = "at::IntArrayRef{}") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] stride, @ByVal(nullValue = "at::IntArrayRef(0)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] padding, @ByVal(nullValue = "at::IntArrayRef(1)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dilation, @Cast("bool") boolean ceil_mode/*=false*/); -@Namespace("at") public static native @ByRef Tensor quantized_max_pool1d_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... kernel_size); -// aten::quantized_max_pool1d.out(Tensor self, int[1] kernel_size, int[1] stride=[], int[1] padding=0, int[1] dilation=1, bool ceil_mode=False, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor quantized_max_pool1d_outf(@Const @ByRef Tensor self, @ByVal @Cast("c10::ArrayRef*") LongArrayRef kernel_size, @ByVal @Cast("c10::ArrayRef*") LongArrayRef stride, @ByVal @Cast("c10::ArrayRef*") LongArrayRef padding, @ByVal @Cast("c10::ArrayRef*") LongArrayRef dilation, @Cast("bool") boolean ceil_mode, @ByRef Tensor out); -@Namespace("at") public static native @ByRef Tensor quantized_max_pool1d_outf(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] kernel_size, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] stride, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] padding, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dilation, @Cast("bool") boolean ceil_mode, @ByRef Tensor out); +// aten::relu6_(Tensor(a!) self) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor relu6_(@ByRef Tensor self); -// Parsed from ATen/ops/quantized_max_pool2d.h +// Parsed from ATen/ops/remainder.h // #pragma once @@ -58285,28 +44067,37 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include +// #include -// aten::quantized_max_pool2d(Tensor self, int[2] kernel_size, int[2] stride=[], int[2] padding=0, int[2] dilation=1, bool ceil_mode=False) -> Tensor -@Namespace("at") public static native @ByVal Tensor quantized_max_pool2d(@Const @ByRef Tensor self, @ByVal @Cast("c10::ArrayRef*") LongArrayRef kernel_size, @ByVal(nullValue = "at::IntArrayRef{}") @Cast("c10::ArrayRef*") LongArrayRef stride, @ByVal(nullValue = "at::IntArrayRef(0)") @Cast("c10::ArrayRef*") LongArrayRef padding, @ByVal(nullValue = "at::IntArrayRef(1)") @Cast("c10::ArrayRef*") LongArrayRef dilation, @Cast("bool") boolean ceil_mode/*=false*/); -@Namespace("at") public static native @ByVal Tensor quantized_max_pool2d(@Const @ByRef Tensor self, @ByVal @Cast("c10::ArrayRef*") LongArrayRef kernel_size); -@Namespace("at") public static native @ByVal Tensor quantized_max_pool2d(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] kernel_size, @ByVal(nullValue = "at::IntArrayRef{}") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] stride, @ByVal(nullValue = "at::IntArrayRef(0)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] padding, @ByVal(nullValue = "at::IntArrayRef(1)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dilation, @Cast("bool") boolean ceil_mode/*=false*/); -@Namespace("at") public static native @ByVal Tensor quantized_max_pool2d(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... kernel_size); +// aten::remainder.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor remainder_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Scalar other); +// aten::remainder.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor remainder_outf(@Const @ByRef Tensor self, @Const @ByRef Scalar other, @ByRef Tensor out); -// aten::quantized_max_pool2d.out(Tensor self, int[2] kernel_size, int[2] stride=[], int[2] padding=0, int[2] dilation=1, bool ceil_mode=False, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor quantized_max_pool2d_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal @Cast("c10::ArrayRef*") LongArrayRef kernel_size, @ByVal(nullValue = "at::IntArrayRef{}") @Cast("c10::ArrayRef*") LongArrayRef stride, @ByVal(nullValue = "at::IntArrayRef(0)") @Cast("c10::ArrayRef*") LongArrayRef padding, @ByVal(nullValue = "at::IntArrayRef(1)") @Cast("c10::ArrayRef*") LongArrayRef dilation, @Cast("bool") boolean ceil_mode/*=false*/); -@Namespace("at") public static native @ByRef Tensor quantized_max_pool2d_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal @Cast("c10::ArrayRef*") LongArrayRef kernel_size); -@Namespace("at") public static native @ByRef Tensor quantized_max_pool2d_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] kernel_size, @ByVal(nullValue = "at::IntArrayRef{}") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] stride, @ByVal(nullValue = "at::IntArrayRef(0)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] padding, @ByVal(nullValue = "at::IntArrayRef(1)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dilation, @Cast("bool") boolean ceil_mode/*=false*/); -@Namespace("at") public static native @ByRef Tensor quantized_max_pool2d_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... kernel_size); -// aten::quantized_max_pool2d.out(Tensor self, int[2] kernel_size, int[2] stride=[], int[2] padding=0, int[2] dilation=1, bool ceil_mode=False, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor quantized_max_pool2d_outf(@Const @ByRef Tensor self, @ByVal @Cast("c10::ArrayRef*") LongArrayRef kernel_size, @ByVal @Cast("c10::ArrayRef*") LongArrayRef stride, @ByVal @Cast("c10::ArrayRef*") LongArrayRef padding, @ByVal @Cast("c10::ArrayRef*") LongArrayRef dilation, @Cast("bool") boolean ceil_mode, @ByRef Tensor out); -@Namespace("at") public static native @ByRef Tensor quantized_max_pool2d_outf(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] kernel_size, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] stride, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] padding, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dilation, @Cast("bool") boolean ceil_mode, @ByRef Tensor out); +// aten::remainder.Scalar(Tensor self, Scalar other) -> Tensor +@Namespace("at") public static native @ByVal Tensor remainder(@Const @ByRef Tensor self, @Const @ByRef Scalar other); + +// aten::remainder.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor remainder_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor other); +// aten::remainder.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor remainder_outf(@Const @ByRef Tensor self, @Const @ByRef Tensor other, @ByRef Tensor out); +// aten::remainder.Tensor(Tensor self, Tensor other) -> Tensor +@Namespace("at") public static native @ByVal Tensor remainder(@Const @ByRef Tensor self, @Const @ByRef Tensor other); +// aten::remainder.Scalar_Tensor(Scalar self, Tensor other) -> Tensor +@Namespace("at") public static native @ByVal Tensor remainder(@Const @ByRef Scalar self, @Const @ByRef Tensor other); +// aten::remainder.Scalar_Tensor_out(Scalar self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor remainder_out(@ByRef Tensor out, @Const @ByRef Scalar self, @Const @ByRef Tensor other); +// aten::remainder.Scalar_Tensor_out(Scalar self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor remainder_outf(@Const @ByRef Scalar self, @Const @ByRef Tensor other, @ByRef Tensor out); -// Parsed from ATen/ops/quantized_rnn_relu_cell.h + + + +// Parsed from ATen/ops/rename.h // #pragma once @@ -58327,16 +44118,14 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include +// #include -// aten::quantized_rnn_relu_cell(Tensor input, Tensor hx, Tensor w_ih, Tensor w_hh, Tensor b_ih, Tensor b_hh, Tensor packed_ih, Tensor packed_hh, Tensor col_offsets_ih, Tensor col_offsets_hh, Scalar scale_ih, Scalar scale_hh, Scalar zero_point_ih, Scalar zero_point_hh) -> Tensor -@Namespace("at") public static native @ByVal Tensor quantized_rnn_relu_cell(@Const @ByRef Tensor input, @Const @ByRef Tensor hx, @Const @ByRef Tensor w_ih, @Const @ByRef Tensor w_hh, @Const @ByRef Tensor b_ih, @Const @ByRef Tensor b_hh, @Const @ByRef Tensor packed_ih, @Const @ByRef Tensor packed_hh, @Const @ByRef Tensor col_offsets_ih, @Const @ByRef Tensor col_offsets_hh, @Const @ByRef Scalar scale_ih, @Const @ByRef Scalar scale_hh, @Const @ByRef Scalar zero_point_ih, @Const @ByRef Scalar zero_point_hh); -// Parsed from ATen/ops/quantized_rnn_tanh_cell.h +// Parsed from ATen/ops/renorm.h // #pragma once @@ -58357,16 +44146,21 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include +// #include -// aten::quantized_rnn_tanh_cell(Tensor input, Tensor hx, Tensor w_ih, Tensor w_hh, Tensor b_ih, Tensor b_hh, Tensor packed_ih, Tensor packed_hh, Tensor col_offsets_ih, Tensor col_offsets_hh, Scalar scale_ih, Scalar scale_hh, Scalar zero_point_ih, Scalar zero_point_hh) -> Tensor -@Namespace("at") public static native @ByVal Tensor quantized_rnn_tanh_cell(@Const @ByRef Tensor input, @Const @ByRef Tensor hx, @Const @ByRef Tensor w_ih, @Const @ByRef Tensor w_hh, @Const @ByRef Tensor b_ih, @Const @ByRef Tensor b_hh, @Const @ByRef Tensor packed_ih, @Const @ByRef Tensor packed_hh, @Const @ByRef Tensor col_offsets_ih, @Const @ByRef Tensor col_offsets_hh, @Const @ByRef Scalar scale_ih, @Const @ByRef Scalar scale_hh, @Const @ByRef Scalar zero_point_ih, @Const @ByRef Scalar zero_point_hh); +// aten::renorm.out(Tensor self, Scalar p, int dim, Scalar maxnorm, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor renorm_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Scalar p, @Cast("int64_t") long dim, @Const @ByRef Scalar maxnorm); +// aten::renorm.out(Tensor self, Scalar p, int dim, Scalar maxnorm, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor renorm_outf(@Const @ByRef Tensor self, @Const @ByRef Scalar p, @Cast("int64_t") long dim, @Const @ByRef Scalar maxnorm, @ByRef Tensor out); + +// aten::renorm(Tensor self, Scalar p, int dim, Scalar maxnorm) -> Tensor +@Namespace("at") public static native @ByVal Tensor renorm(@Const @ByRef Tensor self, @Const @ByRef Scalar p, @Cast("int64_t") long dim, @Const @ByRef Scalar maxnorm); -// Parsed from ATen/ops/rad2deg.h +// Parsed from ATen/ops/repeat.h // #pragma once @@ -58387,24 +44181,32 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include +// #include -// aten::rad2deg(Tensor self) -> Tensor -@Namespace("at") public static native @ByVal Tensor rad2deg(@Const @ByRef Tensor self); -// aten::rad2deg_(Tensor(a!) self) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor rad2deg_(@ByRef Tensor self); +// aten::repeat.out(Tensor self, SymInt[] repeats, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor repeat_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal LongArrayRef repeats); +@Namespace("at") public static native @ByRef Tensor repeat_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... repeats); -// aten::rad2deg.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor rad2deg_out(@ByRef Tensor out, @Const @ByRef Tensor self); -// aten::rad2deg.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor rad2deg_outf(@Const @ByRef Tensor self, @ByRef Tensor out); + +// aten::repeat.out(Tensor self, SymInt[] repeats, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor repeat_outf(@Const @ByRef Tensor self, @ByVal LongArrayRef repeats, @ByRef Tensor out); +@Namespace("at") public static native @ByRef Tensor repeat_outf(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] repeats, @ByRef Tensor out); +// aten::repeat.out(Tensor self, SymInt[] repeats, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor repeat_symint_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal SymIntArrayRef repeats); + + +// aten::repeat.out(Tensor self, SymInt[] repeats, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor repeat_symint_outf(@Const @ByRef Tensor self, @ByVal SymIntArrayRef repeats, @ByRef Tensor out); + -// Parsed from ATen/ops/rand.h + + +// Parsed from ATen/ops/repeat_interleave.h // #pragma once @@ -58425,169 +44227,145 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include - - -// aten::rand.names(SymInt[] size, *, Dimname[]? names, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor -@Namespace("at") public static native @ByVal Tensor rand(@ByVal @Cast("c10::ArrayRef*") LongArrayRef size, @ByVal DimnameListOptional names, @ByVal(nullValue = "at::TensorOptions{}") TensorOptions options); -@Namespace("at") public static native @ByVal Tensor rand(@ByVal @Cast("c10::ArrayRef*") LongArrayRef size, @ByVal DimnameListOptional names); -@Namespace("at") public static native @ByVal Tensor rand(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @ByVal DimnameListOptional names, @ByVal(nullValue = "at::TensorOptions{}") TensorOptions options); -@Namespace("at") public static native @ByVal Tensor rand(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @ByVal DimnameListOptional names); - - -// aten::rand.names(SymInt[] size, *, Dimname[]? names, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor -@Namespace("at") public static native @ByVal Tensor rand(@ByVal @Cast("c10::ArrayRef*") LongArrayRef size, @ByVal DimnameListOptional names, @ByVal ScalarTypeOptional dtype, @ByVal LayoutOptional layout, @ByVal DeviceOptional device, @ByVal BoolOptional pin_memory); -@Namespace("at") public static native @ByVal Tensor rand(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @ByVal DimnameListOptional names, @ByVal ScalarTypeOptional dtype, @ByVal LayoutOptional layout, @ByVal DeviceOptional device, @ByVal BoolOptional pin_memory); - - -// aten::rand.names(SymInt[] size, *, Dimname[]? names, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor -@Namespace("at") public static native @ByVal Tensor rand_symint(@ByVal SymIntRef size, @ByVal DimnameListOptional names, @ByVal(nullValue = "at::TensorOptions{}") TensorOptions options); -@Namespace("at") public static native @ByVal Tensor rand_symint(@ByVal SymIntRef size, @ByVal DimnameListOptional names); - - -// aten::rand.names(SymInt[] size, *, Dimname[]? names, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor -@Namespace("at") public static native @ByVal Tensor rand_symint(@ByVal SymIntRef size, @ByVal DimnameListOptional names, @ByVal ScalarTypeOptional dtype, @ByVal LayoutOptional layout, @ByVal DeviceOptional device, @ByVal BoolOptional pin_memory); - - -// aten::rand.generator_with_names(SymInt[] size, *, Generator? generator, Dimname[]? names, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor -@Namespace("at") public static native @ByVal Tensor rand(@ByVal @Cast("c10::ArrayRef*") LongArrayRef size, @ByVal GeneratorOptional generator, @ByVal DimnameListOptional names, @ByVal(nullValue = "at::TensorOptions{}") TensorOptions options); -@Namespace("at") public static native @ByVal Tensor rand(@ByVal @Cast("c10::ArrayRef*") LongArrayRef size, @ByVal GeneratorOptional generator, @ByVal DimnameListOptional names); -@Namespace("at") public static native @ByVal Tensor rand(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @ByVal GeneratorOptional generator, @ByVal DimnameListOptional names, @ByVal(nullValue = "at::TensorOptions{}") TensorOptions options); -@Namespace("at") public static native @ByVal Tensor rand(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @ByVal GeneratorOptional generator, @ByVal DimnameListOptional names); - +// #include -// aten::rand.generator_with_names(SymInt[] size, *, Generator? generator, Dimname[]? names, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor -@Namespace("at") public static native @ByVal Tensor rand(@ByVal @Cast("c10::ArrayRef*") LongArrayRef size, @ByVal GeneratorOptional generator, @ByVal DimnameListOptional names, @ByVal ScalarTypeOptional dtype, @ByVal LayoutOptional layout, @ByVal DeviceOptional device, @ByVal BoolOptional pin_memory); -@Namespace("at") public static native @ByVal Tensor rand(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @ByVal GeneratorOptional generator, @ByVal DimnameListOptional names, @ByVal ScalarTypeOptional dtype, @ByVal LayoutOptional layout, @ByVal DeviceOptional device, @ByVal BoolOptional pin_memory); +// aten::repeat_interleave.Tensor(Tensor repeats, *, int? output_size=None) -> Tensor +@Namespace("at") public static native @ByVal Tensor repeat_interleave(@Const @ByRef Tensor repeats, @ByVal(nullValue = "c10::optional(c10::nullopt)") LongOptional output_size); +@Namespace("at") public static native @ByVal Tensor repeat_interleave(@Const @ByRef Tensor repeats); -// aten::rand.generator_with_names(SymInt[] size, *, Generator? generator, Dimname[]? names, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor -@Namespace("at") public static native @ByVal Tensor rand_symint(@ByVal SymIntRef size, @ByVal GeneratorOptional generator, @ByVal DimnameListOptional names, @ByVal(nullValue = "at::TensorOptions{}") TensorOptions options); -@Namespace("at") public static native @ByVal Tensor rand_symint(@ByVal SymIntRef size, @ByVal GeneratorOptional generator, @ByVal DimnameListOptional names); +// aten::repeat_interleave.self_Tensor(Tensor self, Tensor repeats, int? dim=None, *, int? output_size=None) -> Tensor +@Namespace("at") public static native @ByVal Tensor repeat_interleave(@Const @ByRef Tensor self, @Const @ByRef Tensor repeats, @ByVal(nullValue = "c10::optional(c10::nullopt)") LongOptional dim, @ByVal(nullValue = "c10::optional(c10::nullopt)") LongOptional output_size); +@Namespace("at") public static native @ByVal Tensor repeat_interleave(@Const @ByRef Tensor self, @Const @ByRef Tensor repeats); +// aten::repeat_interleave.self_int(Tensor self, SymInt repeats, int? dim=None, *, int? output_size=None) -> Tensor +@Namespace("at") public static native @ByVal Tensor repeat_interleave(@Const @ByRef Tensor self, @Cast("int64_t") long repeats, @ByVal(nullValue = "c10::optional(c10::nullopt)") LongOptional dim, @ByVal(nullValue = "c10::optional(c10::nullopt)") LongOptional output_size); +@Namespace("at") public static native @ByVal Tensor repeat_interleave(@Const @ByRef Tensor self, @Cast("int64_t") long repeats); -// aten::rand.generator_with_names(SymInt[] size, *, Generator? generator, Dimname[]? names, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor -@Namespace("at") public static native @ByVal Tensor rand_symint(@ByVal SymIntRef size, @ByVal GeneratorOptional generator, @ByVal DimnameListOptional names, @ByVal ScalarTypeOptional dtype, @ByVal LayoutOptional layout, @ByVal DeviceOptional device, @ByVal BoolOptional pin_memory); +// aten::repeat_interleave.self_int(Tensor self, SymInt repeats, int? dim=None, *, int? output_size=None) -> Tensor +@Namespace("at") public static native @ByVal Tensor repeat_interleave_symint(@Const @ByRef Tensor self, @ByVal SymInt repeats, @ByVal(nullValue = "c10::optional(c10::nullopt)") LongOptional dim, @ByVal(nullValue = "c10::optional(c10::nullopt)") LongOptional output_size); +@Namespace("at") public static native @ByVal Tensor repeat_interleave_symint(@Const @ByRef Tensor self, @ByVal SymInt repeats); -// aten::rand(SymInt[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor -@Namespace("at") public static native @ByVal Tensor rand(@ByVal @Cast("c10::ArrayRef*") LongArrayRef size, @ByVal(nullValue = "at::TensorOptions{}") TensorOptions options); -@Namespace("at") public static native @ByVal Tensor rand(@ByVal @Cast("c10::ArrayRef*") LongArrayRef size); -@Namespace("at") public static native @ByVal Tensor rand(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @ByVal(nullValue = "at::TensorOptions{}") TensorOptions options); -@Namespace("at") public static native @ByVal Tensor rand(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... size); +// aten::repeat_interleave.Tensor_out(Tensor repeats, *, int? output_size=None, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor repeat_interleave_out(@ByRef Tensor out, @Const @ByRef Tensor repeats, @ByVal(nullValue = "c10::optional(c10::nullopt)") LongOptional output_size); +@Namespace("at") public static native @ByRef Tensor repeat_interleave_out(@ByRef Tensor out, @Const @ByRef Tensor repeats); +// aten::repeat_interleave.Tensor_out(Tensor repeats, *, int? output_size=None, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor repeat_interleave_outf(@Const @ByRef Tensor repeats, @ByVal LongOptional output_size, @ByRef Tensor out); -// aten::rand(SymInt[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor -@Namespace("at") public static native @ByVal Tensor rand(@ByVal @Cast("c10::ArrayRef*") LongArrayRef size, @ByVal ScalarTypeOptional dtype, @ByVal LayoutOptional layout, @ByVal DeviceOptional device, @ByVal BoolOptional pin_memory); -@Namespace("at") public static native @ByVal Tensor rand(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @ByVal ScalarTypeOptional dtype, @ByVal LayoutOptional layout, @ByVal DeviceOptional device, @ByVal BoolOptional pin_memory); -// aten::rand(SymInt[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor -@Namespace("at") public static native @ByVal Tensor rand_symint(@ByVal SymIntRef size, @ByVal(nullValue = "at::TensorOptions{}") TensorOptions options); -@Namespace("at") public static native @ByVal Tensor rand_symint(@ByVal SymIntRef size); +// Parsed from ATen/ops/replication_pad1d.h -// aten::rand(SymInt[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor -@Namespace("at") public static native @ByVal Tensor rand_symint(@ByVal SymIntRef size, @ByVal ScalarTypeOptional dtype, @ByVal LayoutOptional layout, @ByVal DeviceOptional device, @ByVal BoolOptional pin_memory); +// #pragma once +// @generated by torchgen/gen.py from Function.h -// aten::rand.generator(SymInt[] size, *, Generator? generator, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor -@Namespace("at") public static native @ByVal Tensor rand(@ByVal @Cast("c10::ArrayRef*") LongArrayRef size, @ByVal GeneratorOptional generator, @ByVal(nullValue = "at::TensorOptions{}") TensorOptions options); -@Namespace("at") public static native @ByVal Tensor rand(@ByVal @Cast("c10::ArrayRef*") LongArrayRef size, @ByVal GeneratorOptional generator); -@Namespace("at") public static native @ByVal Tensor rand(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @ByVal GeneratorOptional generator, @ByVal(nullValue = "at::TensorOptions{}") TensorOptions options); -@Namespace("at") public static native @ByVal Tensor rand(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @ByVal GeneratorOptional generator); +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include -// aten::rand.generator(SymInt[] size, *, Generator? generator, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor -@Namespace("at") public static native @ByVal Tensor rand(@ByVal @Cast("c10::ArrayRef*") LongArrayRef size, @ByVal GeneratorOptional generator, @ByVal ScalarTypeOptional dtype, @ByVal LayoutOptional layout, @ByVal DeviceOptional device, @ByVal BoolOptional pin_memory); -@Namespace("at") public static native @ByVal Tensor rand(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @ByVal GeneratorOptional generator, @ByVal ScalarTypeOptional dtype, @ByVal LayoutOptional layout, @ByVal DeviceOptional device, @ByVal BoolOptional pin_memory); +// #include -// aten::rand.generator(SymInt[] size, *, Generator? generator, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor -@Namespace("at") public static native @ByVal Tensor rand_symint(@ByVal SymIntRef size, @ByVal GeneratorOptional generator, @ByVal(nullValue = "at::TensorOptions{}") TensorOptions options); -@Namespace("at") public static native @ByVal Tensor rand_symint(@ByVal SymIntRef size, @ByVal GeneratorOptional generator); +// aten::replication_pad1d.out(Tensor self, SymInt[2] padding, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor replication_pad1d_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal LongArrayRef padding); +@Namespace("at") public static native @ByRef Tensor replication_pad1d_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... padding); -// aten::rand.generator(SymInt[] size, *, Generator? generator, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor -@Namespace("at") public static native @ByVal Tensor rand_symint(@ByVal SymIntRef size, @ByVal GeneratorOptional generator, @ByVal ScalarTypeOptional dtype, @ByVal LayoutOptional layout, @ByVal DeviceOptional device, @ByVal BoolOptional pin_memory); +// aten::replication_pad1d.out(Tensor self, SymInt[2] padding, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor replication_pad1d_outf(@Const @ByRef Tensor self, @ByVal LongArrayRef padding, @ByRef Tensor out); +@Namespace("at") public static native @ByRef Tensor replication_pad1d_outf(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] padding, @ByRef Tensor out); -// aten::rand.out(SymInt[] size, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor rand_out(@ByRef Tensor out, @ByVal @Cast("c10::ArrayRef*") LongArrayRef size); -@Namespace("at") public static native @ByRef Tensor rand_out(@ByRef Tensor out, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... size); +// aten::replication_pad1d.out(Tensor self, SymInt[2] padding, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor replication_pad1d_symint_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal SymIntArrayRef padding); -// aten::rand.out(SymInt[] size, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor rand_outf(@ByVal @Cast("c10::ArrayRef*") LongArrayRef size, @ByRef Tensor out); -@Namespace("at") public static native @ByRef Tensor rand_outf(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @ByRef Tensor out); +// aten::replication_pad1d.out(Tensor self, SymInt[2] padding, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor replication_pad1d_symint_outf(@Const @ByRef Tensor self, @ByVal SymIntArrayRef padding, @ByRef Tensor out); -// aten::rand.out(SymInt[] size, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor rand_symint_out(@ByRef Tensor out, @ByVal SymIntRef size); +// aten::replication_pad1d(Tensor self, SymInt[2] padding) -> Tensor +@Namespace("at") public static native @ByVal Tensor replication_pad1d(@Const @ByRef Tensor self, @ByVal LongArrayRef padding); +@Namespace("at") public static native @ByVal Tensor replication_pad1d(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... padding); -// aten::rand.out(SymInt[] size, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor rand_symint_outf(@ByVal SymIntRef size, @ByRef Tensor out); +// aten::replication_pad1d(Tensor self, SymInt[2] padding) -> Tensor +@Namespace("at") public static native @ByVal Tensor replication_pad1d_symint(@Const @ByRef Tensor self, @ByVal SymIntArrayRef padding); -// aten::rand.generator_out(SymInt[] size, *, Generator? generator, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor rand_out(@ByRef Tensor out, @ByVal @Cast("c10::ArrayRef*") LongArrayRef size, @ByVal GeneratorOptional generator); -@Namespace("at") public static native @ByRef Tensor rand_out(@ByRef Tensor out, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @ByVal GeneratorOptional generator); -// aten::rand.generator_out(SymInt[] size, *, Generator? generator, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor rand_outf(@ByVal @Cast("c10::ArrayRef*") LongArrayRef size, @ByVal GeneratorOptional generator, @ByRef Tensor out); -@Namespace("at") public static native @ByRef Tensor rand_outf(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @ByVal GeneratorOptional generator, @ByRef Tensor out); -// aten::rand.generator_out(SymInt[] size, *, Generator? generator, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor rand_symint_out(@ByRef Tensor out, @ByVal SymIntRef size, @ByVal GeneratorOptional generator); +// Parsed from ATen/ops/replication_pad1d_backward.h +// #pragma once -// aten::rand.generator_out(SymInt[] size, *, Generator? generator, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor rand_symint_outf(@ByVal SymIntRef size, @ByVal GeneratorOptional generator, @ByRef Tensor out); +// @generated by torchgen/gen.py from Function.h +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include -// aten::rand.names_out(SymInt[] size, *, Dimname[]? names, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor rand_out(@ByRef Tensor out, @ByVal @Cast("c10::ArrayRef*") LongArrayRef size, @ByVal DimnameListOptional names); -@Namespace("at") public static native @ByRef Tensor rand_out(@ByRef Tensor out, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @ByVal DimnameListOptional names); -// aten::rand.names_out(SymInt[] size, *, Dimname[]? names, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor rand_outf(@ByVal @Cast("c10::ArrayRef*") LongArrayRef size, @ByVal DimnameListOptional names, @ByRef Tensor out); -@Namespace("at") public static native @ByRef Tensor rand_outf(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @ByVal DimnameListOptional names, @ByRef Tensor out); +// #include -// aten::rand.names_out(SymInt[] size, *, Dimname[]? names, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor rand_symint_out(@ByRef Tensor out, @ByVal SymIntRef size, @ByVal DimnameListOptional names); +// aten::replication_pad1d_backward.grad_input(Tensor grad_output, Tensor self, SymInt[2] padding, *, Tensor(a!) grad_input) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor replication_pad1d_backward_out(@ByRef Tensor grad_input, @Const @ByRef Tensor grad_output, @Const @ByRef Tensor self, @ByVal LongArrayRef padding); +@Namespace("at") public static native @ByRef Tensor replication_pad1d_backward_out(@ByRef Tensor grad_input, @Const @ByRef Tensor grad_output, @Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... padding); -// aten::rand.names_out(SymInt[] size, *, Dimname[]? names, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor rand_symint_outf(@ByVal SymIntRef size, @ByVal DimnameListOptional names, @ByRef Tensor out); +// aten::replication_pad1d_backward.grad_input(Tensor grad_output, Tensor self, SymInt[2] padding, *, Tensor(a!) grad_input) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor replication_pad1d_backward_outf(@Const @ByRef Tensor grad_output, @Const @ByRef Tensor self, @ByVal LongArrayRef padding, @ByRef Tensor grad_input); +@Namespace("at") public static native @ByRef Tensor replication_pad1d_backward_outf(@Const @ByRef Tensor grad_output, @Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] padding, @ByRef Tensor grad_input); -// aten::rand.generator_with_names_out(SymInt[] size, *, Generator? generator, Dimname[]? names, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor rand_out(@ByRef Tensor out, @ByVal @Cast("c10::ArrayRef*") LongArrayRef size, @ByVal GeneratorOptional generator, @ByVal DimnameListOptional names); -@Namespace("at") public static native @ByRef Tensor rand_out(@ByRef Tensor out, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @ByVal GeneratorOptional generator, @ByVal DimnameListOptional names); +// aten::replication_pad1d_backward.grad_input(Tensor grad_output, Tensor self, SymInt[2] padding, *, Tensor(a!) grad_input) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor replication_pad1d_backward_symint_out(@ByRef Tensor grad_input, @Const @ByRef Tensor grad_output, @Const @ByRef Tensor self, @ByVal SymIntArrayRef padding); -// aten::rand.generator_with_names_out(SymInt[] size, *, Generator? generator, Dimname[]? names, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor rand_outf(@ByVal @Cast("c10::ArrayRef*") LongArrayRef size, @ByVal GeneratorOptional generator, @ByVal DimnameListOptional names, @ByRef Tensor out); -@Namespace("at") public static native @ByRef Tensor rand_outf(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @ByVal GeneratorOptional generator, @ByVal DimnameListOptional names, @ByRef Tensor out); +// aten::replication_pad1d_backward.grad_input(Tensor grad_output, Tensor self, SymInt[2] padding, *, Tensor(a!) grad_input) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor replication_pad1d_backward_symint_outf(@Const @ByRef Tensor grad_output, @Const @ByRef Tensor self, @ByVal SymIntArrayRef padding, @ByRef Tensor grad_input); -// aten::rand.generator_with_names_out(SymInt[] size, *, Generator? generator, Dimname[]? names, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor rand_symint_out(@ByRef Tensor out, @ByVal SymIntRef size, @ByVal GeneratorOptional generator, @ByVal DimnameListOptional names); +// aten::replication_pad1d_backward(Tensor grad_output, Tensor self, SymInt[2] padding) -> Tensor +@Namespace("at") public static native @ByVal Tensor replication_pad1d_backward(@Const @ByRef Tensor grad_output, @Const @ByRef Tensor self, @ByVal LongArrayRef padding); +@Namespace("at") public static native @ByVal Tensor replication_pad1d_backward(@Const @ByRef Tensor grad_output, @Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... padding); -// aten::rand.generator_with_names_out(SymInt[] size, *, Generator? generator, Dimname[]? names, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor rand_symint_outf(@ByVal SymIntRef size, @ByVal GeneratorOptional generator, @ByVal DimnameListOptional names, @ByRef Tensor out); +// aten::replication_pad1d_backward(Tensor grad_output, Tensor self, SymInt[2] padding) -> Tensor +@Namespace("at") public static native @ByVal Tensor replication_pad1d_backward_symint(@Const @ByRef Tensor grad_output, @Const @ByRef Tensor self, @ByVal SymIntArrayRef padding); -// Parsed from ATen/ops/rand_like.h +// Parsed from ATen/ops/replication_pad2d.h // #pragma once @@ -58608,25 +44386,40 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include +// #include -// aten::rand_like(Tensor self, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, MemoryFormat? memory_format=None) -> Tensor -@Namespace("at") public static native @ByVal Tensor rand_like(@Const @ByRef Tensor self, @ByVal(nullValue = "at::TensorOptions{}") TensorOptions options, @ByVal(nullValue = "c10::optional(c10::nullopt)") MemoryFormatOptional memory_format); -@Namespace("at") public static native @ByVal Tensor rand_like(@Const @ByRef Tensor self); -// aten::rand_like(Tensor self, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, MemoryFormat? memory_format=None) -> Tensor -@Namespace("at") public static native @ByVal Tensor rand_like(@Const @ByRef Tensor self, @ByVal ScalarTypeOptional dtype, @ByVal LayoutOptional layout, @ByVal DeviceOptional device, @ByVal BoolOptional pin_memory, @ByVal MemoryFormatOptional memory_format); +// aten::replication_pad2d.out(Tensor self, SymInt[4] padding, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor replication_pad2d_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal LongArrayRef padding); +@Namespace("at") public static native @ByRef Tensor replication_pad2d_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... padding); -// aten::rand_like.out(Tensor self, *, MemoryFormat? memory_format=None, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor rand_like_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal(nullValue = "c10::optional(c10::nullopt)") MemoryFormatOptional memory_format); -@Namespace("at") public static native @ByRef Tensor rand_like_out(@ByRef Tensor out, @Const @ByRef Tensor self); -// aten::rand_like.out(Tensor self, *, MemoryFormat? memory_format=None, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor rand_like_outf(@Const @ByRef Tensor self, @ByVal MemoryFormatOptional memory_format, @ByRef Tensor out); + +// aten::replication_pad2d.out(Tensor self, SymInt[4] padding, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor replication_pad2d_outf(@Const @ByRef Tensor self, @ByVal LongArrayRef padding, @ByRef Tensor out); +@Namespace("at") public static native @ByRef Tensor replication_pad2d_outf(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] padding, @ByRef Tensor out); +// aten::replication_pad2d.out(Tensor self, SymInt[4] padding, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor replication_pad2d_symint_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal SymIntArrayRef padding); -// Parsed from ATen/ops/randint.h +// aten::replication_pad2d.out(Tensor self, SymInt[4] padding, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor replication_pad2d_symint_outf(@Const @ByRef Tensor self, @ByVal SymIntArrayRef padding, @ByRef Tensor out); + + +// aten::replication_pad2d(Tensor self, SymInt[4] padding) -> Tensor +@Namespace("at") public static native @ByVal Tensor replication_pad2d(@Const @ByRef Tensor self, @ByVal LongArrayRef padding); +@Namespace("at") public static native @ByVal Tensor replication_pad2d(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... padding); + + +// aten::replication_pad2d(Tensor self, SymInt[4] padding) -> Tensor +@Namespace("at") public static native @ByVal Tensor replication_pad2d_symint(@Const @ByRef Tensor self, @ByVal SymIntArrayRef padding); + + + + + +// Parsed from ATen/ops/replication_pad2d_backward.h // #pragma once @@ -58647,169 +44440,148 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include - - -// aten::randint(int high, SymInt[] size, *, ScalarType? dtype=long, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor -@Namespace("at") public static native @ByVal Tensor randint(@Cast("int64_t") long high, @ByVal @Cast("c10::ArrayRef*") LongArrayRef size, @ByVal(nullValue = "at::TensorOptions(at::kLong)") TensorOptions options); -@Namespace("at") public static native @ByVal Tensor randint(@Cast("int64_t") long high, @ByVal @Cast("c10::ArrayRef*") LongArrayRef size); -@Namespace("at") public static native @ByVal Tensor randint(@Cast("int64_t") long high, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @ByVal(nullValue = "at::TensorOptions(at::kLong)") TensorOptions options); -@Namespace("at") public static native @ByVal Tensor randint(@Cast("int64_t") long high, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... size); - - -// aten::randint(int high, SymInt[] size, *, ScalarType? dtype=long, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor -@Namespace("at") public static native @ByVal Tensor randint(@Cast("int64_t") long high, @ByVal @Cast("c10::ArrayRef*") LongArrayRef size, @ByVal ScalarTypeOptional dtype, @ByVal LayoutOptional layout, @ByVal DeviceOptional device, @ByVal BoolOptional pin_memory); -@Namespace("at") public static native @ByVal Tensor randint(@Cast("int64_t") long high, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @ByVal ScalarTypeOptional dtype, @ByVal LayoutOptional layout, @ByVal DeviceOptional device, @ByVal BoolOptional pin_memory); - - -// aten::randint(int high, SymInt[] size, *, ScalarType? dtype=long, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor -@Namespace("at") public static native @ByVal Tensor randint_symint(@Cast("int64_t") long high, @ByVal SymIntRef size, @ByVal(nullValue = "at::TensorOptions(at::kLong)") TensorOptions options); -@Namespace("at") public static native @ByVal Tensor randint_symint(@Cast("int64_t") long high, @ByVal SymIntRef size); +// #include -// aten::randint(int high, SymInt[] size, *, ScalarType? dtype=long, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor -@Namespace("at") public static native @ByVal Tensor randint_symint(@Cast("int64_t") long high, @ByVal SymIntRef size, @ByVal ScalarTypeOptional dtype, @ByVal LayoutOptional layout, @ByVal DeviceOptional device, @ByVal BoolOptional pin_memory); +// aten::replication_pad2d_backward.grad_input(Tensor grad_output, Tensor self, SymInt[4] padding, *, Tensor(a!) grad_input) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor replication_pad2d_backward_out(@ByRef Tensor grad_input, @Const @ByRef Tensor grad_output, @Const @ByRef Tensor self, @ByVal LongArrayRef padding); +@Namespace("at") public static native @ByRef Tensor replication_pad2d_backward_out(@ByRef Tensor grad_input, @Const @ByRef Tensor grad_output, @Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... padding); -// aten::randint.generator(int high, SymInt[] size, *, Generator? generator, ScalarType? dtype=long, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor -@Namespace("at") public static native @ByVal Tensor randint(@Cast("int64_t") long high, @ByVal @Cast("c10::ArrayRef*") LongArrayRef size, @ByVal GeneratorOptional generator, @ByVal(nullValue = "at::TensorOptions(at::kLong)") TensorOptions options); -@Namespace("at") public static native @ByVal Tensor randint(@Cast("int64_t") long high, @ByVal @Cast("c10::ArrayRef*") LongArrayRef size, @ByVal GeneratorOptional generator); -@Namespace("at") public static native @ByVal Tensor randint(@Cast("int64_t") long high, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @ByVal GeneratorOptional generator, @ByVal(nullValue = "at::TensorOptions(at::kLong)") TensorOptions options); -@Namespace("at") public static native @ByVal Tensor randint(@Cast("int64_t") long high, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @ByVal GeneratorOptional generator); +// aten::replication_pad2d_backward.grad_input(Tensor grad_output, Tensor self, SymInt[4] padding, *, Tensor(a!) grad_input) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor replication_pad2d_backward_outf(@Const @ByRef Tensor grad_output, @Const @ByRef Tensor self, @ByVal LongArrayRef padding, @ByRef Tensor grad_input); +@Namespace("at") public static native @ByRef Tensor replication_pad2d_backward_outf(@Const @ByRef Tensor grad_output, @Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] padding, @ByRef Tensor grad_input); -// aten::randint.generator(int high, SymInt[] size, *, Generator? generator, ScalarType? dtype=long, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor -@Namespace("at") public static native @ByVal Tensor randint(@Cast("int64_t") long high, @ByVal @Cast("c10::ArrayRef*") LongArrayRef size, @ByVal GeneratorOptional generator, @ByVal ScalarTypeOptional dtype, @ByVal LayoutOptional layout, @ByVal DeviceOptional device, @ByVal BoolOptional pin_memory); -@Namespace("at") public static native @ByVal Tensor randint(@Cast("int64_t") long high, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @ByVal GeneratorOptional generator, @ByVal ScalarTypeOptional dtype, @ByVal LayoutOptional layout, @ByVal DeviceOptional device, @ByVal BoolOptional pin_memory); +// aten::replication_pad2d_backward.grad_input(Tensor grad_output, Tensor self, SymInt[4] padding, *, Tensor(a!) grad_input) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor replication_pad2d_backward_symint_out(@ByRef Tensor grad_input, @Const @ByRef Tensor grad_output, @Const @ByRef Tensor self, @ByVal SymIntArrayRef padding); -// aten::randint.generator(int high, SymInt[] size, *, Generator? generator, ScalarType? dtype=long, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor -@Namespace("at") public static native @ByVal Tensor randint_symint(@Cast("int64_t") long high, @ByVal SymIntRef size, @ByVal GeneratorOptional generator, @ByVal(nullValue = "at::TensorOptions(at::kLong)") TensorOptions options); -@Namespace("at") public static native @ByVal Tensor randint_symint(@Cast("int64_t") long high, @ByVal SymIntRef size, @ByVal GeneratorOptional generator); +// aten::replication_pad2d_backward.grad_input(Tensor grad_output, Tensor self, SymInt[4] padding, *, Tensor(a!) grad_input) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor replication_pad2d_backward_symint_outf(@Const @ByRef Tensor grad_output, @Const @ByRef Tensor self, @ByVal SymIntArrayRef padding, @ByRef Tensor grad_input); -// aten::randint.generator(int high, SymInt[] size, *, Generator? generator, ScalarType? dtype=long, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor -@Namespace("at") public static native @ByVal Tensor randint_symint(@Cast("int64_t") long high, @ByVal SymIntRef size, @ByVal GeneratorOptional generator, @ByVal ScalarTypeOptional dtype, @ByVal LayoutOptional layout, @ByVal DeviceOptional device, @ByVal BoolOptional pin_memory); +// aten::replication_pad2d_backward(Tensor grad_output, Tensor self, SymInt[4] padding) -> Tensor +@Namespace("at") public static native @ByVal Tensor replication_pad2d_backward(@Const @ByRef Tensor grad_output, @Const @ByRef Tensor self, @ByVal LongArrayRef padding); +@Namespace("at") public static native @ByVal Tensor replication_pad2d_backward(@Const @ByRef Tensor grad_output, @Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... padding); -// aten::randint.low(int low, int high, SymInt[] size, *, ScalarType? dtype=long, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor -@Namespace("at") public static native @ByVal Tensor randint(@Cast("int64_t") long low, @Cast("int64_t") long high, @ByVal @Cast("c10::ArrayRef*") LongArrayRef size, @ByVal(nullValue = "at::TensorOptions(at::kLong)") TensorOptions options); -@Namespace("at") public static native @ByVal Tensor randint(@Cast("int64_t") long low, @Cast("int64_t") long high, @ByVal @Cast("c10::ArrayRef*") LongArrayRef size); -@Namespace("at") public static native @ByVal Tensor randint(@Cast("int64_t") long low, @Cast("int64_t") long high, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @ByVal(nullValue = "at::TensorOptions(at::kLong)") TensorOptions options); -@Namespace("at") public static native @ByVal Tensor randint(@Cast("int64_t") long low, @Cast("int64_t") long high, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... size); +// aten::replication_pad2d_backward(Tensor grad_output, Tensor self, SymInt[4] padding) -> Tensor +@Namespace("at") public static native @ByVal Tensor replication_pad2d_backward_symint(@Const @ByRef Tensor grad_output, @Const @ByRef Tensor self, @ByVal SymIntArrayRef padding); -// aten::randint.low(int low, int high, SymInt[] size, *, ScalarType? dtype=long, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor -@Namespace("at") public static native @ByVal Tensor randint(@Cast("int64_t") long low, @Cast("int64_t") long high, @ByVal @Cast("c10::ArrayRef*") LongArrayRef size, @ByVal ScalarTypeOptional dtype, @ByVal LayoutOptional layout, @ByVal DeviceOptional device, @ByVal BoolOptional pin_memory); -@Namespace("at") public static native @ByVal Tensor randint(@Cast("int64_t") long low, @Cast("int64_t") long high, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @ByVal ScalarTypeOptional dtype, @ByVal LayoutOptional layout, @ByVal DeviceOptional device, @ByVal BoolOptional pin_memory); -// aten::randint.low(int low, int high, SymInt[] size, *, ScalarType? dtype=long, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor -@Namespace("at") public static native @ByVal Tensor randint_symint(@Cast("int64_t") long low, @Cast("int64_t") long high, @ByVal SymIntRef size, @ByVal(nullValue = "at::TensorOptions(at::kLong)") TensorOptions options); -@Namespace("at") public static native @ByVal Tensor randint_symint(@Cast("int64_t") long low, @Cast("int64_t") long high, @ByVal SymIntRef size); +// Parsed from ATen/ops/replication_pad3d.h -// aten::randint.low(int low, int high, SymInt[] size, *, ScalarType? dtype=long, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor -@Namespace("at") public static native @ByVal Tensor randint_symint(@Cast("int64_t") long low, @Cast("int64_t") long high, @ByVal SymIntRef size, @ByVal ScalarTypeOptional dtype, @ByVal LayoutOptional layout, @ByVal DeviceOptional device, @ByVal BoolOptional pin_memory); +// #pragma once +// @generated by torchgen/gen.py from Function.h -// aten::randint.low_generator(int low, int high, SymInt[] size, *, Generator? generator, ScalarType? dtype=long, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor -@Namespace("at") public static native @ByVal Tensor randint(@Cast("int64_t") long low, @Cast("int64_t") long high, @ByVal @Cast("c10::ArrayRef*") LongArrayRef size, @ByVal GeneratorOptional generator, @ByVal(nullValue = "at::TensorOptions(at::kLong)") TensorOptions options); -@Namespace("at") public static native @ByVal Tensor randint(@Cast("int64_t") long low, @Cast("int64_t") long high, @ByVal @Cast("c10::ArrayRef*") LongArrayRef size, @ByVal GeneratorOptional generator); -@Namespace("at") public static native @ByVal Tensor randint(@Cast("int64_t") long low, @Cast("int64_t") long high, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @ByVal GeneratorOptional generator, @ByVal(nullValue = "at::TensorOptions(at::kLong)") TensorOptions options); -@Namespace("at") public static native @ByVal Tensor randint(@Cast("int64_t") long low, @Cast("int64_t") long high, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @ByVal GeneratorOptional generator); +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include -// aten::randint.low_generator(int low, int high, SymInt[] size, *, Generator? generator, ScalarType? dtype=long, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor -@Namespace("at") public static native @ByVal Tensor randint(@Cast("int64_t") long low, @Cast("int64_t") long high, @ByVal @Cast("c10::ArrayRef*") LongArrayRef size, @ByVal GeneratorOptional generator, @ByVal ScalarTypeOptional dtype, @ByVal LayoutOptional layout, @ByVal DeviceOptional device, @ByVal BoolOptional pin_memory); -@Namespace("at") public static native @ByVal Tensor randint(@Cast("int64_t") long low, @Cast("int64_t") long high, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @ByVal GeneratorOptional generator, @ByVal ScalarTypeOptional dtype, @ByVal LayoutOptional layout, @ByVal DeviceOptional device, @ByVal BoolOptional pin_memory); +// #include -// aten::randint.low_generator(int low, int high, SymInt[] size, *, Generator? generator, ScalarType? dtype=long, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor -@Namespace("at") public static native @ByVal Tensor randint_symint(@Cast("int64_t") long low, @Cast("int64_t") long high, @ByVal SymIntRef size, @ByVal GeneratorOptional generator, @ByVal(nullValue = "at::TensorOptions(at::kLong)") TensorOptions options); -@Namespace("at") public static native @ByVal Tensor randint_symint(@Cast("int64_t") long low, @Cast("int64_t") long high, @ByVal SymIntRef size, @ByVal GeneratorOptional generator); +// aten::replication_pad3d.out(Tensor self, SymInt[6] padding, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor replication_pad3d_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal LongArrayRef padding); +@Namespace("at") public static native @ByRef Tensor replication_pad3d_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... padding); -// aten::randint.low_generator(int low, int high, SymInt[] size, *, Generator? generator, ScalarType? dtype=long, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor -@Namespace("at") public static native @ByVal Tensor randint_symint(@Cast("int64_t") long low, @Cast("int64_t") long high, @ByVal SymIntRef size, @ByVal GeneratorOptional generator, @ByVal ScalarTypeOptional dtype, @ByVal LayoutOptional layout, @ByVal DeviceOptional device, @ByVal BoolOptional pin_memory); +// aten::replication_pad3d.out(Tensor self, SymInt[6] padding, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor replication_pad3d_outf(@Const @ByRef Tensor self, @ByVal LongArrayRef padding, @ByRef Tensor out); +@Namespace("at") public static native @ByRef Tensor replication_pad3d_outf(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] padding, @ByRef Tensor out); -// aten::randint.out(int high, SymInt[] size, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor randint_out(@ByRef Tensor out, @Cast("int64_t") long high, @ByVal @Cast("c10::ArrayRef*") LongArrayRef size); -@Namespace("at") public static native @ByRef Tensor randint_out(@ByRef Tensor out, @Cast("int64_t") long high, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... size); +// aten::replication_pad3d.out(Tensor self, SymInt[6] padding, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor replication_pad3d_symint_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal SymIntArrayRef padding); -// aten::randint.out(int high, SymInt[] size, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor randint_outf(@Cast("int64_t") long high, @ByVal @Cast("c10::ArrayRef*") LongArrayRef size, @ByRef Tensor out); -@Namespace("at") public static native @ByRef Tensor randint_outf(@Cast("int64_t") long high, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @ByRef Tensor out); +// aten::replication_pad3d.out(Tensor self, SymInt[6] padding, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor replication_pad3d_symint_outf(@Const @ByRef Tensor self, @ByVal SymIntArrayRef padding, @ByRef Tensor out); -// aten::randint.out(int high, SymInt[] size, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor randint_symint_out(@ByRef Tensor out, @Cast("int64_t") long high, @ByVal SymIntRef size); +// aten::replication_pad3d(Tensor self, SymInt[6] padding) -> Tensor +@Namespace("at") public static native @ByVal Tensor replication_pad3d(@Const @ByRef Tensor self, @ByVal LongArrayRef padding); +@Namespace("at") public static native @ByVal Tensor replication_pad3d(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... padding); -// aten::randint.out(int high, SymInt[] size, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor randint_symint_outf(@Cast("int64_t") long high, @ByVal SymIntRef size, @ByRef Tensor out); +// aten::replication_pad3d(Tensor self, SymInt[6] padding) -> Tensor +@Namespace("at") public static native @ByVal Tensor replication_pad3d_symint(@Const @ByRef Tensor self, @ByVal SymIntArrayRef padding); -// aten::randint.generator_out(int high, SymInt[] size, *, Generator? generator, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor randint_out(@ByRef Tensor out, @Cast("int64_t") long high, @ByVal @Cast("c10::ArrayRef*") LongArrayRef size, @ByVal GeneratorOptional generator); -@Namespace("at") public static native @ByRef Tensor randint_out(@ByRef Tensor out, @Cast("int64_t") long high, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @ByVal GeneratorOptional generator); -// aten::randint.generator_out(int high, SymInt[] size, *, Generator? generator, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor randint_outf(@Cast("int64_t") long high, @ByVal @Cast("c10::ArrayRef*") LongArrayRef size, @ByVal GeneratorOptional generator, @ByRef Tensor out); -@Namespace("at") public static native @ByRef Tensor randint_outf(@Cast("int64_t") long high, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @ByVal GeneratorOptional generator, @ByRef Tensor out); -// aten::randint.generator_out(int high, SymInt[] size, *, Generator? generator, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor randint_symint_out(@ByRef Tensor out, @Cast("int64_t") long high, @ByVal SymIntRef size, @ByVal GeneratorOptional generator); +// Parsed from ATen/ops/replication_pad3d_backward.h +// #pragma once -// aten::randint.generator_out(int high, SymInt[] size, *, Generator? generator, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor randint_symint_outf(@Cast("int64_t") long high, @ByVal SymIntRef size, @ByVal GeneratorOptional generator, @ByRef Tensor out); +// @generated by torchgen/gen.py from Function.h +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include -// aten::randint.low_out(int low, int high, SymInt[] size, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor randint_out(@ByRef Tensor out, @Cast("int64_t") long low, @Cast("int64_t") long high, @ByVal @Cast("c10::ArrayRef*") LongArrayRef size); -@Namespace("at") public static native @ByRef Tensor randint_out(@ByRef Tensor out, @Cast("int64_t") long low, @Cast("int64_t") long high, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... size); -// aten::randint.low_out(int low, int high, SymInt[] size, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor randint_outf(@Cast("int64_t") long low, @Cast("int64_t") long high, @ByVal @Cast("c10::ArrayRef*") LongArrayRef size, @ByRef Tensor out); -@Namespace("at") public static native @ByRef Tensor randint_outf(@Cast("int64_t") long low, @Cast("int64_t") long high, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @ByRef Tensor out); +// #include -// aten::randint.low_out(int low, int high, SymInt[] size, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor randint_symint_out(@ByRef Tensor out, @Cast("int64_t") long low, @Cast("int64_t") long high, @ByVal SymIntRef size); +// aten::replication_pad3d_backward.grad_input(Tensor grad_output, Tensor self, SymInt[6] padding, *, Tensor(a!) grad_input) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor replication_pad3d_backward_out(@ByRef Tensor grad_input, @Const @ByRef Tensor grad_output, @Const @ByRef Tensor self, @ByVal LongArrayRef padding); +@Namespace("at") public static native @ByRef Tensor replication_pad3d_backward_out(@ByRef Tensor grad_input, @Const @ByRef Tensor grad_output, @Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... padding); -// aten::randint.low_out(int low, int high, SymInt[] size, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor randint_symint_outf(@Cast("int64_t") long low, @Cast("int64_t") long high, @ByVal SymIntRef size, @ByRef Tensor out); +// aten::replication_pad3d_backward.grad_input(Tensor grad_output, Tensor self, SymInt[6] padding, *, Tensor(a!) grad_input) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor replication_pad3d_backward_outf(@Const @ByRef Tensor grad_output, @Const @ByRef Tensor self, @ByVal LongArrayRef padding, @ByRef Tensor grad_input); +@Namespace("at") public static native @ByRef Tensor replication_pad3d_backward_outf(@Const @ByRef Tensor grad_output, @Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] padding, @ByRef Tensor grad_input); -// aten::randint.low_generator_out(int low, int high, SymInt[] size, *, Generator? generator, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor randint_out(@ByRef Tensor out, @Cast("int64_t") long low, @Cast("int64_t") long high, @ByVal @Cast("c10::ArrayRef*") LongArrayRef size, @ByVal GeneratorOptional generator); -@Namespace("at") public static native @ByRef Tensor randint_out(@ByRef Tensor out, @Cast("int64_t") long low, @Cast("int64_t") long high, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @ByVal GeneratorOptional generator); +// aten::replication_pad3d_backward.grad_input(Tensor grad_output, Tensor self, SymInt[6] padding, *, Tensor(a!) grad_input) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor replication_pad3d_backward_symint_out(@ByRef Tensor grad_input, @Const @ByRef Tensor grad_output, @Const @ByRef Tensor self, @ByVal SymIntArrayRef padding); -// aten::randint.low_generator_out(int low, int high, SymInt[] size, *, Generator? generator, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor randint_outf(@Cast("int64_t") long low, @Cast("int64_t") long high, @ByVal @Cast("c10::ArrayRef*") LongArrayRef size, @ByVal GeneratorOptional generator, @ByRef Tensor out); -@Namespace("at") public static native @ByRef Tensor randint_outf(@Cast("int64_t") long low, @Cast("int64_t") long high, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @ByVal GeneratorOptional generator, @ByRef Tensor out); +// aten::replication_pad3d_backward.grad_input(Tensor grad_output, Tensor self, SymInt[6] padding, *, Tensor(a!) grad_input) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor replication_pad3d_backward_symint_outf(@Const @ByRef Tensor grad_output, @Const @ByRef Tensor self, @ByVal SymIntArrayRef padding, @ByRef Tensor grad_input); -// aten::randint.low_generator_out(int low, int high, SymInt[] size, *, Generator? generator, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor randint_symint_out(@ByRef Tensor out, @Cast("int64_t") long low, @Cast("int64_t") long high, @ByVal SymIntRef size, @ByVal GeneratorOptional generator); +// aten::replication_pad3d_backward(Tensor grad_output, Tensor self, SymInt[6] padding) -> Tensor +@Namespace("at") public static native @ByVal Tensor replication_pad3d_backward(@Const @ByRef Tensor grad_output, @Const @ByRef Tensor self, @ByVal LongArrayRef padding); +@Namespace("at") public static native @ByVal Tensor replication_pad3d_backward(@Const @ByRef Tensor grad_output, @Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... padding); -// aten::randint.low_generator_out(int low, int high, SymInt[] size, *, Generator? generator, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor randint_symint_outf(@Cast("int64_t") long low, @Cast("int64_t") long high, @ByVal SymIntRef size, @ByVal GeneratorOptional generator, @ByRef Tensor out); +// aten::replication_pad3d_backward(Tensor grad_output, Tensor self, SymInt[6] padding) -> Tensor +@Namespace("at") public static native @ByVal Tensor replication_pad3d_backward_symint(@Const @ByRef Tensor grad_output, @Const @ByRef Tensor self, @ByVal SymIntArrayRef padding); -// Parsed from ATen/ops/randint_like.h +// Parsed from ATen/ops/requires_grad.h // #pragma once @@ -58830,37 +44602,14 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include - - -// aten::randint_like(Tensor self, int high, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, MemoryFormat? memory_format=None) -> Tensor -@Namespace("at") public static native @ByVal Tensor randint_like(@Const @ByRef Tensor self, @Cast("int64_t") long high, @ByVal(nullValue = "at::TensorOptions{}") TensorOptions options, @ByVal(nullValue = "c10::optional(c10::nullopt)") MemoryFormatOptional memory_format); -@Namespace("at") public static native @ByVal Tensor randint_like(@Const @ByRef Tensor self, @Cast("int64_t") long high); -// aten::randint_like(Tensor self, int high, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, MemoryFormat? memory_format=None) -> Tensor -@Namespace("at") public static native @ByVal Tensor randint_like(@Const @ByRef Tensor self, @Cast("int64_t") long high, @ByVal ScalarTypeOptional dtype, @ByVal LayoutOptional layout, @ByVal DeviceOptional device, @ByVal BoolOptional pin_memory, @ByVal MemoryFormatOptional memory_format); - -// aten::randint_like.low_dtype(Tensor self, int low, int high, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, MemoryFormat? memory_format=None) -> Tensor -@Namespace("at") public static native @ByVal Tensor randint_like(@Const @ByRef Tensor self, @Cast("int64_t") long low, @Cast("int64_t") long high, @ByVal(nullValue = "at::TensorOptions{}") TensorOptions options, @ByVal(nullValue = "c10::optional(c10::nullopt)") MemoryFormatOptional memory_format); -@Namespace("at") public static native @ByVal Tensor randint_like(@Const @ByRef Tensor self, @Cast("int64_t") long low, @Cast("int64_t") long high); -// aten::randint_like.low_dtype(Tensor self, int low, int high, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, MemoryFormat? memory_format=None) -> Tensor -@Namespace("at") public static native @ByVal Tensor randint_like(@Const @ByRef Tensor self, @Cast("int64_t") long low, @Cast("int64_t") long high, @ByVal ScalarTypeOptional dtype, @ByVal LayoutOptional layout, @ByVal DeviceOptional device, @ByVal BoolOptional pin_memory, @ByVal MemoryFormatOptional memory_format); +// #include -// aten::randint_like.out(Tensor self, int high, *, MemoryFormat? memory_format=None, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor randint_like_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Cast("int64_t") long high, @ByVal(nullValue = "c10::optional(c10::nullopt)") MemoryFormatOptional memory_format); -@Namespace("at") public static native @ByRef Tensor randint_like_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Cast("int64_t") long high); -// aten::randint_like.out(Tensor self, int high, *, MemoryFormat? memory_format=None, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor randint_like_outf(@Const @ByRef Tensor self, @Cast("int64_t") long high, @ByVal MemoryFormatOptional memory_format, @ByRef Tensor out); -// aten::randint_like.low_dtype_out(Tensor self, int low, int high, *, MemoryFormat? memory_format=None, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor randint_like_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Cast("int64_t") long low, @Cast("int64_t") long high, @ByVal(nullValue = "c10::optional(c10::nullopt)") MemoryFormatOptional memory_format); -@Namespace("at") public static native @ByRef Tensor randint_like_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Cast("int64_t") long low, @Cast("int64_t") long high); -// aten::randint_like.low_dtype_out(Tensor self, int low, int high, *, MemoryFormat? memory_format=None, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor randint_like_outf(@Const @ByRef Tensor self, @Cast("int64_t") long low, @Cast("int64_t") long high, @ByVal MemoryFormatOptional memory_format, @ByRef Tensor out); -// Parsed from ATen/ops/randn.h +// Parsed from ATen/ops/reshape.h // #pragma once @@ -58881,169 +44630,190 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include +// #include -// aten::randn(SymInt[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor -@Namespace("at") public static native @ByVal Tensor randn(@ByVal @Cast("c10::ArrayRef*") LongArrayRef size, @ByVal(nullValue = "at::TensorOptions{}") TensorOptions options); -@Namespace("at") public static native @ByVal Tensor randn(@ByVal @Cast("c10::ArrayRef*") LongArrayRef size); -@Namespace("at") public static native @ByVal Tensor randn(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @ByVal(nullValue = "at::TensorOptions{}") TensorOptions options); -@Namespace("at") public static native @ByVal Tensor randn(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... size); +// aten::reshape(Tensor(a) self, SymInt[] shape) -> Tensor(a) +@Namespace("at") public static native @ByVal Tensor reshape(@Const @ByRef Tensor self, @ByVal LongArrayRef shape); +@Namespace("at") public static native @ByVal Tensor reshape(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... shape); -// aten::randn(SymInt[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor -@Namespace("at") public static native @ByVal Tensor randn(@ByVal @Cast("c10::ArrayRef*") LongArrayRef size, @ByVal ScalarTypeOptional dtype, @ByVal LayoutOptional layout, @ByVal DeviceOptional device, @ByVal BoolOptional pin_memory); -@Namespace("at") public static native @ByVal Tensor randn(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @ByVal ScalarTypeOptional dtype, @ByVal LayoutOptional layout, @ByVal DeviceOptional device, @ByVal BoolOptional pin_memory); +// aten::reshape(Tensor(a) self, SymInt[] shape) -> Tensor(a) +@Namespace("at") public static native @ByVal Tensor reshape_symint(@Const @ByRef Tensor self, @ByVal SymIntArrayRef shape); -// aten::randn(SymInt[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor -@Namespace("at") public static native @ByVal Tensor randn_symint(@ByVal SymIntRef size, @ByVal(nullValue = "at::TensorOptions{}") TensorOptions options); -@Namespace("at") public static native @ByVal Tensor randn_symint(@ByVal SymIntRef size); -// aten::randn(SymInt[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor -@Namespace("at") public static native @ByVal Tensor randn_symint(@ByVal SymIntRef size, @ByVal ScalarTypeOptional dtype, @ByVal LayoutOptional layout, @ByVal DeviceOptional device, @ByVal BoolOptional pin_memory); +// Parsed from ATen/ops/reshape_as.h -// aten::randn.generator(SymInt[] size, *, Generator? generator, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor -@Namespace("at") public static native @ByVal Tensor randn(@ByVal @Cast("c10::ArrayRef*") LongArrayRef size, @ByVal GeneratorOptional generator, @ByVal(nullValue = "at::TensorOptions{}") TensorOptions options); -@Namespace("at") public static native @ByVal Tensor randn(@ByVal @Cast("c10::ArrayRef*") LongArrayRef size, @ByVal GeneratorOptional generator); -@Namespace("at") public static native @ByVal Tensor randn(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @ByVal GeneratorOptional generator, @ByVal(nullValue = "at::TensorOptions{}") TensorOptions options); -@Namespace("at") public static native @ByVal Tensor randn(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @ByVal GeneratorOptional generator); +// #pragma once +// @generated by torchgen/gen.py from Function.h -// aten::randn.generator(SymInt[] size, *, Generator? generator, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor -@Namespace("at") public static native @ByVal Tensor randn(@ByVal @Cast("c10::ArrayRef*") LongArrayRef size, @ByVal GeneratorOptional generator, @ByVal ScalarTypeOptional dtype, @ByVal LayoutOptional layout, @ByVal DeviceOptional device, @ByVal BoolOptional pin_memory); -@Namespace("at") public static native @ByVal Tensor randn(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @ByVal GeneratorOptional generator, @ByVal ScalarTypeOptional dtype, @ByVal LayoutOptional layout, @ByVal DeviceOptional device, @ByVal BoolOptional pin_memory); +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include -// aten::randn.generator(SymInt[] size, *, Generator? generator, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor -@Namespace("at") public static native @ByVal Tensor randn_symint(@ByVal SymIntRef size, @ByVal GeneratorOptional generator, @ByVal(nullValue = "at::TensorOptions{}") TensorOptions options); -@Namespace("at") public static native @ByVal Tensor randn_symint(@ByVal SymIntRef size, @ByVal GeneratorOptional generator); +// #include -// aten::randn.generator(SymInt[] size, *, Generator? generator, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor -@Namespace("at") public static native @ByVal Tensor randn_symint(@ByVal SymIntRef size, @ByVal GeneratorOptional generator, @ByVal ScalarTypeOptional dtype, @ByVal LayoutOptional layout, @ByVal DeviceOptional device, @ByVal BoolOptional pin_memory); -// aten::randn.names(SymInt[] size, *, Dimname[]? names, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor -@Namespace("at") public static native @ByVal Tensor randn(@ByVal @Cast("c10::ArrayRef*") LongArrayRef size, @ByVal DimnameListOptional names, @ByVal(nullValue = "at::TensorOptions{}") TensorOptions options); -@Namespace("at") public static native @ByVal Tensor randn(@ByVal @Cast("c10::ArrayRef*") LongArrayRef size, @ByVal DimnameListOptional names); -@Namespace("at") public static native @ByVal Tensor randn(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @ByVal DimnameListOptional names, @ByVal(nullValue = "at::TensorOptions{}") TensorOptions options); -@Namespace("at") public static native @ByVal Tensor randn(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @ByVal DimnameListOptional names); -// aten::randn.names(SymInt[] size, *, Dimname[]? names, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor -@Namespace("at") public static native @ByVal Tensor randn(@ByVal @Cast("c10::ArrayRef*") LongArrayRef size, @ByVal DimnameListOptional names, @ByVal ScalarTypeOptional dtype, @ByVal LayoutOptional layout, @ByVal DeviceOptional device, @ByVal BoolOptional pin_memory); -@Namespace("at") public static native @ByVal Tensor randn(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @ByVal DimnameListOptional names, @ByVal ScalarTypeOptional dtype, @ByVal LayoutOptional layout, @ByVal DeviceOptional device, @ByVal BoolOptional pin_memory); +// Parsed from ATen/ops/resize.h -// aten::randn.names(SymInt[] size, *, Dimname[]? names, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor -@Namespace("at") public static native @ByVal Tensor randn_symint(@ByVal SymIntRef size, @ByVal DimnameListOptional names, @ByVal(nullValue = "at::TensorOptions{}") TensorOptions options); -@Namespace("at") public static native @ByVal Tensor randn_symint(@ByVal SymIntRef size, @ByVal DimnameListOptional names); +// #pragma once +// @generated by torchgen/gen.py from Function.h -// aten::randn.names(SymInt[] size, *, Dimname[]? names, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor -@Namespace("at") public static native @ByVal Tensor randn_symint(@ByVal SymIntRef size, @ByVal DimnameListOptional names, @ByVal ScalarTypeOptional dtype, @ByVal LayoutOptional layout, @ByVal DeviceOptional device, @ByVal BoolOptional pin_memory); +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include -// aten::randn.generator_with_names(SymInt[] size, *, Generator? generator, Dimname[]? names, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor -@Namespace("at") public static native @ByVal Tensor randn(@ByVal @Cast("c10::ArrayRef*") LongArrayRef size, @ByVal GeneratorOptional generator, @ByVal DimnameListOptional names, @ByVal(nullValue = "at::TensorOptions{}") TensorOptions options); -@Namespace("at") public static native @ByVal Tensor randn(@ByVal @Cast("c10::ArrayRef*") LongArrayRef size, @ByVal GeneratorOptional generator, @ByVal DimnameListOptional names); -@Namespace("at") public static native @ByVal Tensor randn(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @ByVal GeneratorOptional generator, @ByVal DimnameListOptional names, @ByVal(nullValue = "at::TensorOptions{}") TensorOptions options); -@Namespace("at") public static native @ByVal Tensor randn(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @ByVal GeneratorOptional generator, @ByVal DimnameListOptional names); +// #include -// aten::randn.generator_with_names(SymInt[] size, *, Generator? generator, Dimname[]? names, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor -@Namespace("at") public static native @ByVal Tensor randn(@ByVal @Cast("c10::ArrayRef*") LongArrayRef size, @ByVal GeneratorOptional generator, @ByVal DimnameListOptional names, @ByVal ScalarTypeOptional dtype, @ByVal LayoutOptional layout, @ByVal DeviceOptional device, @ByVal BoolOptional pin_memory); -@Namespace("at") public static native @ByVal Tensor randn(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @ByVal GeneratorOptional generator, @ByVal DimnameListOptional names, @ByVal ScalarTypeOptional dtype, @ByVal LayoutOptional layout, @ByVal DeviceOptional device, @ByVal BoolOptional pin_memory); -// aten::randn.generator_with_names(SymInt[] size, *, Generator? generator, Dimname[]? names, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor -@Namespace("at") public static native @ByVal Tensor randn_symint(@ByVal SymIntRef size, @ByVal GeneratorOptional generator, @ByVal DimnameListOptional names, @ByVal(nullValue = "at::TensorOptions{}") TensorOptions options); -@Namespace("at") public static native @ByVal Tensor randn_symint(@ByVal SymIntRef size, @ByVal GeneratorOptional generator, @ByVal DimnameListOptional names); +// aten::resize.out(Tensor self, SymInt[] size, *, MemoryFormat? memory_format=None, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @Const @ByRef Tensor resize_out(@Const @ByRef Tensor out, @Const @ByRef Tensor self, @ByVal LongArrayRef size, @ByVal(nullValue = "c10::optional(c10::nullopt)") MemoryFormatOptional memory_format); +@Namespace("at") public static native @Const @ByRef Tensor resize_out(@Const @ByRef Tensor out, @Const @ByRef Tensor self, @ByVal LongArrayRef size); +@Namespace("at") public static native @Const @ByRef Tensor resize_out(@Const @ByRef Tensor out, @Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @ByVal(nullValue = "c10::optional(c10::nullopt)") MemoryFormatOptional memory_format); +@Namespace("at") public static native @Const @ByRef Tensor resize_out(@Const @ByRef Tensor out, @Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... size); -// aten::randn.generator_with_names(SymInt[] size, *, Generator? generator, Dimname[]? names, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor -@Namespace("at") public static native @ByVal Tensor randn_symint(@ByVal SymIntRef size, @ByVal GeneratorOptional generator, @ByVal DimnameListOptional names, @ByVal ScalarTypeOptional dtype, @ByVal LayoutOptional layout, @ByVal DeviceOptional device, @ByVal BoolOptional pin_memory); +// aten::resize.out(Tensor self, SymInt[] size, *, MemoryFormat? memory_format=None, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @Const @ByRef Tensor resize_outf(@Const @ByRef Tensor self, @ByVal LongArrayRef size, @ByVal MemoryFormatOptional memory_format, @Const @ByRef Tensor out); +@Namespace("at") public static native @Const @ByRef Tensor resize_outf(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @ByVal MemoryFormatOptional memory_format, @Const @ByRef Tensor out); -// aten::randn.out(SymInt[] size, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor randn_out(@ByRef Tensor out, @ByVal @Cast("c10::ArrayRef*") LongArrayRef size); -@Namespace("at") public static native @ByRef Tensor randn_out(@ByRef Tensor out, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... size); +// aten::resize.out(Tensor self, SymInt[] size, *, MemoryFormat? memory_format=None, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @Const @ByRef Tensor resize_symint_out(@Const @ByRef Tensor out, @Const @ByRef Tensor self, @ByVal SymIntArrayRef size, @ByVal(nullValue = "c10::optional(c10::nullopt)") MemoryFormatOptional memory_format); +@Namespace("at") public static native @Const @ByRef Tensor resize_symint_out(@Const @ByRef Tensor out, @Const @ByRef Tensor self, @ByVal SymIntArrayRef size); -// aten::randn.out(SymInt[] size, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor randn_outf(@ByVal @Cast("c10::ArrayRef*") LongArrayRef size, @ByRef Tensor out); -@Namespace("at") public static native @ByRef Tensor randn_outf(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @ByRef Tensor out); +// aten::resize.out(Tensor self, SymInt[] size, *, MemoryFormat? memory_format=None, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @Const @ByRef Tensor resize_symint_outf(@Const @ByRef Tensor self, @ByVal SymIntArrayRef size, @ByVal MemoryFormatOptional memory_format, @Const @ByRef Tensor out); -// aten::randn.out(SymInt[] size, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor randn_symint_out(@ByRef Tensor out, @ByVal SymIntRef size); +// aten::resize(Tensor self, SymInt[] size, *, MemoryFormat? memory_format=None) -> Tensor +@Namespace("at") public static native @ByVal Tensor resize(@Const @ByRef Tensor self, @ByVal LongArrayRef size, @ByVal(nullValue = "c10::optional(c10::nullopt)") MemoryFormatOptional memory_format); +@Namespace("at") public static native @ByVal Tensor resize(@Const @ByRef Tensor self, @ByVal LongArrayRef size); +@Namespace("at") public static native @ByVal Tensor resize(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @ByVal(nullValue = "c10::optional(c10::nullopt)") MemoryFormatOptional memory_format); +@Namespace("at") public static native @ByVal Tensor resize(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... size); -// aten::randn.out(SymInt[] size, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor randn_symint_outf(@ByVal SymIntRef size, @ByRef Tensor out); +// aten::resize(Tensor self, SymInt[] size, *, MemoryFormat? memory_format=None) -> Tensor +@Namespace("at") public static native @ByVal Tensor resize_symint(@Const @ByRef Tensor self, @ByVal SymIntArrayRef size, @ByVal(nullValue = "c10::optional(c10::nullopt)") MemoryFormatOptional memory_format); +@Namespace("at") public static native @ByVal Tensor resize_symint(@Const @ByRef Tensor self, @ByVal SymIntArrayRef size); -// aten::randn.generator_out(SymInt[] size, *, Generator? generator, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor randn_out(@ByRef Tensor out, @ByVal @Cast("c10::ArrayRef*") LongArrayRef size, @ByVal GeneratorOptional generator); -@Namespace("at") public static native @ByRef Tensor randn_out(@ByRef Tensor out, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @ByVal GeneratorOptional generator); -// aten::randn.generator_out(SymInt[] size, *, Generator? generator, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor randn_outf(@ByVal @Cast("c10::ArrayRef*") LongArrayRef size, @ByVal GeneratorOptional generator, @ByRef Tensor out); -@Namespace("at") public static native @ByRef Tensor randn_outf(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @ByVal GeneratorOptional generator, @ByRef Tensor out); +// Parsed from ATen/ops/resize_as.h -// aten::randn.generator_out(SymInt[] size, *, Generator? generator, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor randn_symint_out(@ByRef Tensor out, @ByVal SymIntRef size, @ByVal GeneratorOptional generator); +// #pragma once +// @generated by torchgen/gen.py from Function.h -// aten::randn.generator_out(SymInt[] size, *, Generator? generator, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor randn_symint_outf(@ByVal SymIntRef size, @ByVal GeneratorOptional generator, @ByRef Tensor out); +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include -// aten::randn.names_out(SymInt[] size, *, Dimname[]? names, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor randn_out(@ByRef Tensor out, @ByVal @Cast("c10::ArrayRef*") LongArrayRef size, @ByVal DimnameListOptional names); -@Namespace("at") public static native @ByRef Tensor randn_out(@ByRef Tensor out, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @ByVal DimnameListOptional names); +// #include -// aten::randn.names_out(SymInt[] size, *, Dimname[]? names, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor randn_outf(@ByVal @Cast("c10::ArrayRef*") LongArrayRef size, @ByVal DimnameListOptional names, @ByRef Tensor out); -@Namespace("at") public static native @ByRef Tensor randn_outf(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @ByVal DimnameListOptional names, @ByRef Tensor out); +// aten::resize_as_(Tensor(a!) self, Tensor the_template, *, MemoryFormat? memory_format=None) -> Tensor(a!) +@Namespace("at") public static native @Const @ByRef Tensor resize_as_(@Const @ByRef Tensor self, @Const @ByRef Tensor the_template, @ByVal(nullValue = "c10::optional(c10::nullopt)") MemoryFormatOptional memory_format); +@Namespace("at") public static native @Const @ByRef Tensor resize_as_(@Const @ByRef Tensor self, @Const @ByRef Tensor the_template); -// aten::randn.names_out(SymInt[] size, *, Dimname[]? names, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor randn_symint_out(@ByRef Tensor out, @ByVal SymIntRef size, @ByVal DimnameListOptional names); +// aten::resize_as.out(Tensor self, Tensor the_template, *, MemoryFormat? memory_format=None, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @Const @ByRef Tensor resize_as_out(@Const @ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor the_template, @ByVal(nullValue = "c10::optional(c10::nullopt)") MemoryFormatOptional memory_format); +@Namespace("at") public static native @Const @ByRef Tensor resize_as_out(@Const @ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor the_template); +// aten::resize_as.out(Tensor self, Tensor the_template, *, MemoryFormat? memory_format=None, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @Const @ByRef Tensor resize_as_outf(@Const @ByRef Tensor self, @Const @ByRef Tensor the_template, @ByVal MemoryFormatOptional memory_format, @Const @ByRef Tensor out); +// aten::resize_as(Tensor self, Tensor the_template, *, MemoryFormat? memory_format=None) -> Tensor +@Namespace("at") public static native @ByVal Tensor resize_as(@Const @ByRef Tensor self, @Const @ByRef Tensor the_template, @ByVal(nullValue = "c10::optional(c10::nullopt)") MemoryFormatOptional memory_format); +@Namespace("at") public static native @ByVal Tensor resize_as(@Const @ByRef Tensor self, @Const @ByRef Tensor the_template); -// aten::randn.names_out(SymInt[] size, *, Dimname[]? names, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor randn_symint_outf(@ByVal SymIntRef size, @ByVal DimnameListOptional names, @ByRef Tensor out); -// aten::randn.generator_with_names_out(SymInt[] size, *, Generator? generator, Dimname[]? names, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor randn_out(@ByRef Tensor out, @ByVal @Cast("c10::ArrayRef*") LongArrayRef size, @ByVal GeneratorOptional generator, @ByVal DimnameListOptional names); -@Namespace("at") public static native @ByRef Tensor randn_out(@ByRef Tensor out, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @ByVal GeneratorOptional generator, @ByVal DimnameListOptional names); +// Parsed from ATen/ops/resize_as_sparse.h -// aten::randn.generator_with_names_out(SymInt[] size, *, Generator? generator, Dimname[]? names, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor randn_outf(@ByVal @Cast("c10::ArrayRef*") LongArrayRef size, @ByVal GeneratorOptional generator, @ByVal DimnameListOptional names, @ByRef Tensor out); -@Namespace("at") public static native @ByRef Tensor randn_outf(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @ByVal GeneratorOptional generator, @ByVal DimnameListOptional names, @ByRef Tensor out); +// #pragma once +// @generated by torchgen/gen.py from Function.h -// aten::randn.generator_with_names_out(SymInt[] size, *, Generator? generator, Dimname[]? names, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor randn_symint_out(@ByRef Tensor out, @ByVal SymIntRef size, @ByVal GeneratorOptional generator, @ByVal DimnameListOptional names); +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include -// aten::randn.generator_with_names_out(SymInt[] size, *, Generator? generator, Dimname[]? names, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor randn_symint_outf(@ByVal SymIntRef size, @ByVal GeneratorOptional generator, @ByVal DimnameListOptional names, @ByRef Tensor out); +// #include +// aten::resize_as_sparse_(Tensor(a!) self, Tensor the_template) -> Tensor(a!) +@Namespace("at") public static native @Const @ByRef Tensor resize_as_sparse_(@Const @ByRef Tensor self, @Const @ByRef Tensor the_template); + +// aten::resize_as_sparse.out(Tensor self, Tensor the_template, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @Const @ByRef Tensor resize_as_sparse_out(@Const @ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor the_template); +// aten::resize_as_sparse.out(Tensor self, Tensor the_template, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @Const @ByRef Tensor resize_as_sparse_outf(@Const @ByRef Tensor self, @Const @ByRef Tensor the_template, @Const @ByRef Tensor out); + +// aten::resize_as_sparse(Tensor self, Tensor the_template) -> Tensor +@Namespace("at") public static native @ByVal Tensor resize_as_sparse(@Const @ByRef Tensor self, @Const @ByRef Tensor the_template); -// Parsed from ATen/ops/randn_like.h + + +// Parsed from ATen/ops/resolve_conj.h // #pragma once @@ -59064,25 +44834,16 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include - +// #include -// aten::randn_like(Tensor self, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, MemoryFormat? memory_format=None) -> Tensor -@Namespace("at") public static native @ByVal Tensor randn_like(@Const @ByRef Tensor self, @ByVal(nullValue = "at::TensorOptions{}") TensorOptions options, @ByVal(nullValue = "c10::optional(c10::nullopt)") MemoryFormatOptional memory_format); -@Namespace("at") public static native @ByVal Tensor randn_like(@Const @ByRef Tensor self); -// aten::randn_like(Tensor self, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, MemoryFormat? memory_format=None) -> Tensor -@Namespace("at") public static native @ByVal Tensor randn_like(@Const @ByRef Tensor self, @ByVal ScalarTypeOptional dtype, @ByVal LayoutOptional layout, @ByVal DeviceOptional device, @ByVal BoolOptional pin_memory, @ByVal MemoryFormatOptional memory_format); -// aten::randn_like.out(Tensor self, *, MemoryFormat? memory_format=None, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor randn_like_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal(nullValue = "c10::optional(c10::nullopt)") MemoryFormatOptional memory_format); -@Namespace("at") public static native @ByRef Tensor randn_like_out(@ByRef Tensor out, @Const @ByRef Tensor self); -// aten::randn_like.out(Tensor self, *, MemoryFormat? memory_format=None, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor randn_like_outf(@Const @ByRef Tensor self, @ByVal MemoryFormatOptional memory_format, @ByRef Tensor out); +// aten::resolve_conj(Tensor(a) self) -> Tensor(a) +@Namespace("at") public static native @ByVal Tensor resolve_conj(@Const @ByRef Tensor self); -// Parsed from ATen/ops/random.h +// Parsed from ATen/ops/resolve_neg.h // #pragma once @@ -59103,43 +44864,16 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include - - -// aten::random.from_out(Tensor self, int from, int? to, *, Generator? generator=None, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor random_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Cast("int64_t") long from, @ByVal LongOptional to, @ByVal(nullValue = "c10::optional(c10::nullopt)") GeneratorOptional generator); -@Namespace("at") public static native @ByRef Tensor random_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Cast("int64_t") long from, @ByVal LongOptional to); -// aten::random.from_out(Tensor self, int from, int? to, *, Generator? generator=None, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor random_outf(@Const @ByRef Tensor self, @Cast("int64_t") long from, @ByVal LongOptional to, @ByVal GeneratorOptional generator, @ByRef Tensor out); - -// aten::random.from(Tensor self, int from, int? to, *, Generator? generator=None) -> Tensor -@Namespace("at") public static native @ByVal Tensor random(@Const @ByRef Tensor self, @Cast("int64_t") long from, @ByVal LongOptional to, @ByVal(nullValue = "c10::optional(c10::nullopt)") GeneratorOptional generator); -@Namespace("at") public static native @ByVal Tensor random(@Const @ByRef Tensor self, @Cast("int64_t") long from, @ByVal LongOptional to); - -// aten::random.to_out(Tensor self, int to, *, Generator? generator=None, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor random_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Cast("int64_t") long to, @ByVal(nullValue = "c10::optional(c10::nullopt)") GeneratorOptional generator); -@Namespace("at") public static native @ByRef Tensor random_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Cast("int64_t") long to); -// aten::random.to_out(Tensor self, int to, *, Generator? generator=None, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor random_outf(@Const @ByRef Tensor self, @Cast("int64_t") long to, @ByVal GeneratorOptional generator, @ByRef Tensor out); - -// aten::random.to(Tensor self, int to, *, Generator? generator=None) -> Tensor -@Namespace("at") public static native @ByVal Tensor random(@Const @ByRef Tensor self, @Cast("int64_t") long to, @ByVal(nullValue = "c10::optional(c10::nullopt)") GeneratorOptional generator); -@Namespace("at") public static native @ByVal Tensor random(@Const @ByRef Tensor self, @Cast("int64_t") long to); +// #include -// aten::random.out(Tensor self, *, Generator? generator=None, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor random_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal(nullValue = "c10::optional(c10::nullopt)") GeneratorOptional generator); -@Namespace("at") public static native @ByRef Tensor random_out(@ByRef Tensor out, @Const @ByRef Tensor self); -// aten::random.out(Tensor self, *, Generator? generator=None, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor random_outf(@Const @ByRef Tensor self, @ByVal GeneratorOptional generator, @ByRef Tensor out); -// aten::random(Tensor self, *, Generator? generator=None) -> Tensor -@Namespace("at") public static native @ByVal Tensor random(@Const @ByRef Tensor self, @ByVal(nullValue = "c10::optional(c10::nullopt)") GeneratorOptional generator); -@Namespace("at") public static native @ByVal Tensor random(@Const @ByRef Tensor self); +// aten::resolve_neg(Tensor(a) self) -> Tensor(a) +@Namespace("at") public static native @ByVal Tensor resolve_neg(@Const @ByRef Tensor self); -// Parsed from ATen/ops/randperm.h +// Parsed from ATen/ops/result_type.h // #pragma once @@ -59160,35 +44894,25 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include +// #include -// aten::randperm(int n, *, ScalarType? dtype=long, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor -@Namespace("at") public static native @ByVal Tensor randperm(@Cast("int64_t") long n, @ByVal(nullValue = "at::TensorOptions(at::kLong)") TensorOptions options); -@Namespace("at") public static native @ByVal Tensor randperm(@Cast("int64_t") long n); -// aten::randperm(int n, *, ScalarType? dtype=long, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor -@Namespace("at") public static native @ByVal Tensor randperm(@Cast("int64_t") long n, @ByVal ScalarTypeOptional dtype, @ByVal LayoutOptional layout, @ByVal DeviceOptional device, @ByVal BoolOptional pin_memory); +// aten::result_type.Tensor(Tensor tensor, Tensor other) -> ScalarType +@Namespace("at") public static native ScalarType result_type(@Const @ByRef Tensor tensor, @Const @ByRef Tensor other); -// aten::randperm.generator(int n, *, Generator? generator, ScalarType? dtype=long, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor -@Namespace("at") public static native @ByVal Tensor randperm(@Cast("int64_t") long n, @ByVal GeneratorOptional generator, @ByVal(nullValue = "at::TensorOptions(at::kLong)") TensorOptions options); -@Namespace("at") public static native @ByVal Tensor randperm(@Cast("int64_t") long n, @ByVal GeneratorOptional generator); -// aten::randperm.generator(int n, *, Generator? generator, ScalarType? dtype=long, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor -@Namespace("at") public static native @ByVal Tensor randperm(@Cast("int64_t") long n, @ByVal GeneratorOptional generator, @ByVal ScalarTypeOptional dtype, @ByVal LayoutOptional layout, @ByVal DeviceOptional device, @ByVal BoolOptional pin_memory); +// aten::result_type.Scalar(Tensor tensor, Scalar other) -> ScalarType +@Namespace("at") public static native ScalarType result_type(@Const @ByRef Tensor tensor, @Const @ByRef Scalar other); -// aten::randperm.out(int n, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor randperm_out(@ByRef Tensor out, @Cast("int64_t") long n); -// aten::randperm.out(int n, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor randperm_outf(@Cast("int64_t") long n, @ByRef Tensor out); +// aten::result_type.Scalar_Tensor(Scalar scalar, Tensor tensor) -> ScalarType +@Namespace("at") public static native ScalarType result_type(@Const @ByRef Scalar scalar, @Const @ByRef Tensor tensor); -// aten::randperm.generator_out(int n, *, Generator? generator, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor randperm_out(@ByRef Tensor out, @Cast("int64_t") long n, @ByVal GeneratorOptional generator); -// aten::randperm.generator_out(int n, *, Generator? generator, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor randperm_outf(@Cast("int64_t") long n, @ByVal GeneratorOptional generator, @ByRef Tensor out); +// aten::result_type.Scalar_Scalar(Scalar scalar1, Scalar scalar2) -> ScalarType +@Namespace("at") public static native ScalarType result_type(@Const @ByRef Scalar scalar1, @Const @ByRef Scalar scalar2); -// Parsed from ATen/ops/range.h +// Parsed from ATen/ops/retain_grad.h // #pragma once @@ -59209,33 +44933,14 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include - - -// aten::range.step(Scalar start, Scalar end, Scalar step=1, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor -@Namespace("at") public static native @ByVal Tensor range(@Const @ByRef Scalar start, @Const @ByRef Scalar end, @Const @ByRef(nullValue = "at::Scalar(1)") Scalar step, @ByVal(nullValue = "at::TensorOptions{}") TensorOptions options); -// aten::range.step(Scalar start, Scalar end, Scalar step=1, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor -@Namespace("at") public static native @ByVal Tensor range(@Const @ByRef Scalar start, @Const @ByRef Scalar end, @Const @ByRef Scalar step, @ByVal ScalarTypeOptional dtype, @ByVal LayoutOptional layout, @ByVal DeviceOptional device, @ByVal BoolOptional pin_memory); - -// aten::range(Scalar start, Scalar end, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor -@Namespace("at") public static native @ByVal Tensor range(@Const @ByRef Scalar start, @Const @ByRef Scalar end, @ByVal(nullValue = "at::TensorOptions{}") TensorOptions options); -// aten::range(Scalar start, Scalar end, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor -@Namespace("at") public static native @ByVal Tensor range(@Const @ByRef Scalar start, @Const @ByRef Scalar end, @ByVal ScalarTypeOptional dtype, @ByVal LayoutOptional layout, @ByVal DeviceOptional device, @ByVal BoolOptional pin_memory); - -// aten::range.out_(Scalar start, Scalar end, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor range_out(@ByRef Tensor out, @Const @ByRef Scalar start, @Const @ByRef Scalar end); -// aten::range.out_(Scalar start, Scalar end, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor range_outf(@Const @ByRef Scalar start, @Const @ByRef Scalar end, @ByRef Tensor out); - -// aten::range.out(Scalar start, Scalar end, Scalar step=1, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor range_out(@ByRef Tensor out, @Const @ByRef Scalar start, @Const @ByRef Scalar end, @Const @ByRef Scalar step); -// aten::range.out(Scalar start, Scalar end, Scalar step=1, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor range_outf(@Const @ByRef Scalar start, @Const @ByRef Scalar end, @Const @ByRef Scalar step, @ByRef Tensor out); +// #include -// Parsed from ATen/ops/ravel.h + + +// Parsed from ATen/ops/retains_grad.h // #pragma once @@ -59256,16 +44961,14 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include +// #include -// aten::ravel(Tensor(a) self) -> Tensor(a) -@Namespace("at") public static native @ByVal Tensor ravel(@Const @ByRef Tensor self); -// Parsed from ATen/ops/real.h +// Parsed from ATen/ops/rnn_relu.h // #pragma once @@ -59286,16 +44989,19 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include +// #include -// aten::real(Tensor(a) self) -> Tensor(a) -@Namespace("at") public static native @ByVal Tensor real(@Const @ByRef Tensor self); +// aten::rnn_relu.input(Tensor input, Tensor hx, Tensor[] params, bool has_biases, int num_layers, float dropout, bool train, bool bidirectional, bool batch_first) -> (Tensor, Tensor) +@Namespace("at") public static native @ByVal T_TensorTensor_T rnn_relu(@Const @ByRef Tensor input, @Const @ByRef Tensor hx, @ByVal @Cast("at::TensorList*") TensorArrayRef params, @Cast("bool") boolean has_biases, @Cast("int64_t") long num_layers, double dropout, @Cast("bool") boolean train, @Cast("bool") boolean bidirectional, @Cast("bool") boolean batch_first); + +// aten::rnn_relu.data(Tensor data, Tensor batch_sizes, Tensor hx, Tensor[] params, bool has_biases, int num_layers, float dropout, bool train, bool bidirectional) -> (Tensor, Tensor) +@Namespace("at") public static native @ByVal T_TensorTensor_T rnn_relu(@Const @ByRef Tensor data, @Const @ByRef Tensor batch_sizes, @Const @ByRef Tensor hx, @ByVal @Cast("at::TensorList*") TensorArrayRef params, @Cast("bool") boolean has_biases, @Cast("int64_t") long num_layers, double dropout, @Cast("bool") boolean train, @Cast("bool") boolean bidirectional); -// Parsed from ATen/ops/reciprocal.h +// Parsed from ATen/ops/rnn_relu_cell.h // #pragma once @@ -59316,24 +45022,17 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include - - -// aten::reciprocal(Tensor self) -> Tensor -@Namespace("at") public static native @ByVal Tensor reciprocal(@Const @ByRef Tensor self); +// #include -// aten::reciprocal_(Tensor(a!) self) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor reciprocal_(@ByRef Tensor self); -// aten::reciprocal.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor reciprocal_out(@ByRef Tensor out, @Const @ByRef Tensor self); -// aten::reciprocal.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor reciprocal_outf(@Const @ByRef Tensor self, @ByRef Tensor out); +// aten::rnn_relu_cell(Tensor input, Tensor hx, Tensor w_ih, Tensor w_hh, Tensor? b_ih=None, Tensor? b_hh=None) -> Tensor +@Namespace("at") public static native @ByVal Tensor rnn_relu_cell(@Const @ByRef Tensor input, @Const @ByRef Tensor hx, @Const @ByRef Tensor w_ih, @Const @ByRef Tensor w_hh, @Const @ByRef(nullValue = "c10::optional{}") TensorOptional b_ih, @Const @ByRef(nullValue = "c10::optional{}") TensorOptional b_hh); +@Namespace("at") public static native @ByVal Tensor rnn_relu_cell(@Const @ByRef Tensor input, @Const @ByRef Tensor hx, @Const @ByRef Tensor w_ih, @Const @ByRef Tensor w_hh); -// Parsed from ATen/ops/record_stream.h +// Parsed from ATen/ops/rnn_tanh.h // #pragma once @@ -59354,14 +45053,19 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include +// #include +// aten::rnn_tanh.input(Tensor input, Tensor hx, Tensor[] params, bool has_biases, int num_layers, float dropout, bool train, bool bidirectional, bool batch_first) -> (Tensor, Tensor) +@Namespace("at") public static native @ByVal T_TensorTensor_T rnn_tanh(@Const @ByRef Tensor input, @Const @ByRef Tensor hx, @ByVal @Cast("at::TensorList*") TensorArrayRef params, @Cast("bool") boolean has_biases, @Cast("int64_t") long num_layers, double dropout, @Cast("bool") boolean train, @Cast("bool") boolean bidirectional, @Cast("bool") boolean batch_first); + +// aten::rnn_tanh.data(Tensor data, Tensor batch_sizes, Tensor hx, Tensor[] params, bool has_biases, int num_layers, float dropout, bool train, bool bidirectional) -> (Tensor, Tensor) +@Namespace("at") public static native @ByVal T_TensorTensor_T rnn_tanh(@Const @ByRef Tensor data, @Const @ByRef Tensor batch_sizes, @Const @ByRef Tensor hx, @ByVal @Cast("at::TensorList*") TensorArrayRef params, @Cast("bool") boolean has_biases, @Cast("int64_t") long num_layers, double dropout, @Cast("bool") boolean train, @Cast("bool") boolean bidirectional); -// Parsed from ATen/ops/refine_names.h +// Parsed from ATen/ops/rnn_tanh_cell.h // #pragma once @@ -59382,14 +45086,17 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include +// #include +// aten::rnn_tanh_cell(Tensor input, Tensor hx, Tensor w_ih, Tensor w_hh, Tensor? b_ih=None, Tensor? b_hh=None) -> Tensor +@Namespace("at") public static native @ByVal Tensor rnn_tanh_cell(@Const @ByRef Tensor input, @Const @ByRef Tensor hx, @Const @ByRef Tensor w_ih, @Const @ByRef Tensor w_hh, @Const @ByRef(nullValue = "c10::optional{}") TensorOptional b_ih, @Const @ByRef(nullValue = "c10::optional{}") TensorOptional b_hh); +@Namespace("at") public static native @ByVal Tensor rnn_tanh_cell(@Const @ByRef Tensor input, @Const @ByRef Tensor hx, @Const @ByRef Tensor w_ih, @Const @ByRef Tensor w_hh); -// Parsed from ATen/ops/reflection_pad1d.h +// Parsed from ATen/ops/roll.h // #pragma once @@ -59410,40 +45117,68 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include +// #include -// aten::reflection_pad1d.out(Tensor self, SymInt[2] padding, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor reflection_pad1d_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal @Cast("c10::ArrayRef*") LongArrayRef padding); -@Namespace("at") public static native @ByRef Tensor reflection_pad1d_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... padding); +// aten::roll(Tensor self, int[1] shifts, int[1] dims=[]) -> Tensor +@Namespace("at") public static native @ByVal Tensor roll(@Const @ByRef Tensor self, @ByVal LongArrayRef shifts, @ByVal(nullValue = "at::IntArrayRef{}") LongArrayRef dims); +@Namespace("at") public static native @ByVal Tensor roll(@Const @ByRef Tensor self, @ByVal LongArrayRef shifts); +@Namespace("at") public static native @ByVal Tensor roll(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] shifts, @ByVal(nullValue = "at::IntArrayRef{}") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... dims); +@Namespace("at") public static native @ByVal Tensor roll(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... shifts); +// aten::roll.out(Tensor self, int[1] shifts, int[1] dims=[], *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor roll_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal LongArrayRef shifts, @ByVal(nullValue = "at::IntArrayRef{}") LongArrayRef dims); +@Namespace("at") public static native @ByRef Tensor roll_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal LongArrayRef shifts); +@Namespace("at") public static native @ByRef Tensor roll_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] shifts, @ByVal(nullValue = "at::IntArrayRef{}") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... dims); +@Namespace("at") public static native @ByRef Tensor roll_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... shifts); +// aten::roll.out(Tensor self, int[1] shifts, int[1] dims=[], *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor roll_outf(@Const @ByRef Tensor self, @ByVal LongArrayRef shifts, @ByVal LongArrayRef dims, @ByRef Tensor out); +@Namespace("at") public static native @ByRef Tensor roll_outf(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] shifts, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dims, @ByRef Tensor out); -// aten::reflection_pad1d.out(Tensor self, SymInt[2] padding, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor reflection_pad1d_outf(@Const @ByRef Tensor self, @ByVal @Cast("c10::ArrayRef*") LongArrayRef padding, @ByRef Tensor out); -@Namespace("at") public static native @ByRef Tensor reflection_pad1d_outf(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] padding, @ByRef Tensor out); -// aten::reflection_pad1d.out(Tensor self, SymInt[2] padding, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor reflection_pad1d_symint_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal SymIntRef padding); +// Parsed from ATen/ops/rot90.h -// aten::reflection_pad1d.out(Tensor self, SymInt[2] padding, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor reflection_pad1d_symint_outf(@Const @ByRef Tensor self, @ByVal SymIntRef padding, @ByRef Tensor out); +// #pragma once +// @generated by torchgen/gen.py from Function.h -// aten::reflection_pad1d(Tensor self, SymInt[2] padding) -> Tensor -@Namespace("at") public static native @ByVal Tensor reflection_pad1d(@Const @ByRef Tensor self, @ByVal @Cast("c10::ArrayRef*") LongArrayRef padding); -@Namespace("at") public static native @ByVal Tensor reflection_pad1d(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... padding); +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include -// aten::reflection_pad1d(Tensor self, SymInt[2] padding) -> Tensor -@Namespace("at") public static native @ByVal Tensor reflection_pad1d_symint(@Const @ByRef Tensor self, @ByVal SymIntRef padding); +// #include + + +// aten::rot90(Tensor self, int k=1, int[] dims=[0,1]) -> Tensor +@Namespace("at") public static native @ByVal Tensor rot90(@Const @ByRef Tensor self, @Cast("int64_t") long k/*=1*/, @ByVal(nullValue = "at::IntArrayRef({0,1})") LongArrayRef dims); +@Namespace("at") public static native @ByVal Tensor rot90(@Const @ByRef Tensor self); +@Namespace("at") public static native @ByVal Tensor rot90(@Const @ByRef Tensor self, @Cast("int64_t") long k/*=1*/, @ByVal(nullValue = "at::IntArrayRef({0,1})") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... dims); +// aten::rot90.out(Tensor self, int k=1, int[] dims=[0,1], *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor rot90_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Cast("int64_t") long k/*=1*/, @ByVal(nullValue = "at::IntArrayRef({0,1})") LongArrayRef dims); +@Namespace("at") public static native @ByRef Tensor rot90_out(@ByRef Tensor out, @Const @ByRef Tensor self); +@Namespace("at") public static native @ByRef Tensor rot90_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Cast("int64_t") long k/*=1*/, @ByVal(nullValue = "at::IntArrayRef({0,1})") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... dims); +// aten::rot90.out(Tensor self, int k=1, int[] dims=[0,1], *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor rot90_outf(@Const @ByRef Tensor self, @Cast("int64_t") long k, @ByVal LongArrayRef dims, @ByRef Tensor out); +@Namespace("at") public static native @ByRef Tensor rot90_outf(@Const @ByRef Tensor self, @Cast("int64_t") long k, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dims, @ByRef Tensor out); -// Parsed from ATen/ops/reflection_pad1d_backward.h + +// Parsed from ATen/ops/round.h // #pragma once @@ -59464,40 +45199,35 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include - - -// aten::reflection_pad1d_backward.grad_input(Tensor grad_output, Tensor self, SymInt[2] padding, *, Tensor(a!) grad_input) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor reflection_pad1d_backward_out(@ByRef Tensor grad_input, @Const @ByRef Tensor grad_output, @Const @ByRef Tensor self, @ByVal @Cast("c10::ArrayRef*") LongArrayRef padding); -@Namespace("at") public static native @ByRef Tensor reflection_pad1d_backward_out(@ByRef Tensor grad_input, @Const @ByRef Tensor grad_output, @Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... padding); - - -// aten::reflection_pad1d_backward.grad_input(Tensor grad_output, Tensor self, SymInt[2] padding, *, Tensor(a!) grad_input) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor reflection_pad1d_backward_outf(@Const @ByRef Tensor grad_output, @Const @ByRef Tensor self, @ByVal @Cast("c10::ArrayRef*") LongArrayRef padding, @ByRef Tensor grad_input); -@Namespace("at") public static native @ByRef Tensor reflection_pad1d_backward_outf(@Const @ByRef Tensor grad_output, @Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] padding, @ByRef Tensor grad_input); - - -// aten::reflection_pad1d_backward.grad_input(Tensor grad_output, Tensor self, SymInt[2] padding, *, Tensor(a!) grad_input) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor reflection_pad1d_backward_symint_out(@ByRef Tensor grad_input, @Const @ByRef Tensor grad_output, @Const @ByRef Tensor self, @ByVal SymIntRef padding); +// #include -// aten::reflection_pad1d_backward.grad_input(Tensor grad_output, Tensor self, SymInt[2] padding, *, Tensor(a!) grad_input) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor reflection_pad1d_backward_symint_outf(@Const @ByRef Tensor grad_output, @Const @ByRef Tensor self, @ByVal SymIntRef padding, @ByRef Tensor grad_input); +// aten::round(Tensor self) -> Tensor +@Namespace("at") public static native @ByVal Tensor round(@Const @ByRef Tensor self); +// aten::round_(Tensor(a!) self) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor round_(@ByRef Tensor self); -// aten::reflection_pad1d_backward(Tensor grad_output, Tensor self, SymInt[2] padding) -> Tensor -@Namespace("at") public static native @ByVal Tensor reflection_pad1d_backward(@Const @ByRef Tensor grad_output, @Const @ByRef Tensor self, @ByVal @Cast("c10::ArrayRef*") LongArrayRef padding); -@Namespace("at") public static native @ByVal Tensor reflection_pad1d_backward(@Const @ByRef Tensor grad_output, @Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... padding); +// aten::round.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor round_out(@ByRef Tensor out, @Const @ByRef Tensor self); +// aten::round.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor round_outf(@Const @ByRef Tensor self, @ByRef Tensor out); +// aten::round.decimals(Tensor self, *, int decimals) -> Tensor +@Namespace("at") public static native @ByVal Tensor round(@Const @ByRef Tensor self, @Cast("int64_t") long decimals); -// aten::reflection_pad1d_backward(Tensor grad_output, Tensor self, SymInt[2] padding) -> Tensor -@Namespace("at") public static native @ByVal Tensor reflection_pad1d_backward_symint(@Const @ByRef Tensor grad_output, @Const @ByRef Tensor self, @ByVal SymIntRef padding); +// aten::round_.decimals(Tensor(a!) self, *, int decimals) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor round_(@ByRef Tensor self, @Cast("int64_t") long decimals); +// aten::round.decimals_out(Tensor self, *, int decimals, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor round_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Cast("int64_t") long decimals); +// aten::round.decimals_out(Tensor self, *, int decimals, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor round_outf(@Const @ByRef Tensor self, @Cast("int64_t") long decimals, @ByRef Tensor out); -// Parsed from ATen/ops/reflection_pad2d.h +// Parsed from ATen/ops/row_indices.h // #pragma once @@ -59518,40 +45248,49 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include +// #include -// aten::reflection_pad2d.out(Tensor self, SymInt[4] padding, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor reflection_pad2d_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal @Cast("c10::ArrayRef*") LongArrayRef padding); -@Namespace("at") public static native @ByRef Tensor reflection_pad2d_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... padding); -// aten::reflection_pad2d.out(Tensor self, SymInt[4] padding, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor reflection_pad2d_outf(@Const @ByRef Tensor self, @ByVal @Cast("c10::ArrayRef*") LongArrayRef padding, @ByRef Tensor out); -@Namespace("at") public static native @ByRef Tensor reflection_pad2d_outf(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] padding, @ByRef Tensor out); -// aten::reflection_pad2d.out(Tensor self, SymInt[4] padding, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor reflection_pad2d_symint_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal SymIntRef padding); +// Parsed from ATen/ops/row_indices_copy.h +// #pragma once -// aten::reflection_pad2d.out(Tensor self, SymInt[4] padding, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor reflection_pad2d_symint_outf(@Const @ByRef Tensor self, @ByVal SymIntRef padding, @ByRef Tensor out); +// @generated by torchgen/gen.py from Function.h +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include -// aten::reflection_pad2d(Tensor self, SymInt[4] padding) -> Tensor -@Namespace("at") public static native @ByVal Tensor reflection_pad2d(@Const @ByRef Tensor self, @ByVal @Cast("c10::ArrayRef*") LongArrayRef padding); -@Namespace("at") public static native @ByVal Tensor reflection_pad2d(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... padding); -// aten::reflection_pad2d(Tensor self, SymInt[4] padding) -> Tensor -@Namespace("at") public static native @ByVal Tensor reflection_pad2d_symint(@Const @ByRef Tensor self, @ByVal SymIntRef padding); +// #include +// aten::row_indices_copy(Tensor self) -> Tensor +@Namespace("at") public static native @ByVal Tensor row_indices_copy(@Const @ByRef Tensor self); + +// aten::row_indices_copy.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor row_indices_copy_out(@ByRef Tensor out, @Const @ByRef Tensor self); +// aten::row_indices_copy.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor row_indices_copy_outf(@Const @ByRef Tensor self, @ByRef Tensor out); -// Parsed from ATen/ops/reflection_pad2d_backward.h + +// Parsed from ATen/ops/row_stack.h // #pragma once @@ -59572,40 +45311,56 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include +// #include -// aten::reflection_pad2d_backward.grad_input(Tensor grad_output, Tensor self, SymInt[4] padding, *, Tensor(a!) grad_input) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor reflection_pad2d_backward_out(@ByRef Tensor grad_input, @Const @ByRef Tensor grad_output, @Const @ByRef Tensor self, @ByVal @Cast("c10::ArrayRef*") LongArrayRef padding); -@Namespace("at") public static native @ByRef Tensor reflection_pad2d_backward_out(@ByRef Tensor grad_input, @Const @ByRef Tensor grad_output, @Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... padding); +// aten::row_stack(Tensor[] tensors) -> Tensor +@Namespace("at") public static native @ByVal Tensor row_stack(@ByVal @Cast("at::TensorList*") TensorArrayRef tensors); +// aten::row_stack.out(Tensor[] tensors, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor row_stack_out(@ByRef Tensor out, @ByVal @Cast("at::TensorList*") TensorArrayRef tensors); +// aten::row_stack.out(Tensor[] tensors, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor row_stack_outf(@ByVal @Cast("at::TensorList*") TensorArrayRef tensors, @ByRef Tensor out); -// aten::reflection_pad2d_backward.grad_input(Tensor grad_output, Tensor self, SymInt[4] padding, *, Tensor(a!) grad_input) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor reflection_pad2d_backward_outf(@Const @ByRef Tensor grad_output, @Const @ByRef Tensor self, @ByVal @Cast("c10::ArrayRef*") LongArrayRef padding, @ByRef Tensor grad_input); -@Namespace("at") public static native @ByRef Tensor reflection_pad2d_backward_outf(@Const @ByRef Tensor grad_output, @Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] padding, @ByRef Tensor grad_input); -// aten::reflection_pad2d_backward.grad_input(Tensor grad_output, Tensor self, SymInt[4] padding, *, Tensor(a!) grad_input) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor reflection_pad2d_backward_symint_out(@ByRef Tensor grad_input, @Const @ByRef Tensor grad_output, @Const @ByRef Tensor self, @ByVal SymIntRef padding); +// Parsed from ATen/ops/rrelu.h -// aten::reflection_pad2d_backward.grad_input(Tensor grad_output, Tensor self, SymInt[4] padding, *, Tensor(a!) grad_input) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor reflection_pad2d_backward_symint_outf(@Const @ByRef Tensor grad_output, @Const @ByRef Tensor self, @ByVal SymIntRef padding, @ByRef Tensor grad_input); +// #pragma once +// @generated by torchgen/gen.py from Function.h -// aten::reflection_pad2d_backward(Tensor grad_output, Tensor self, SymInt[4] padding) -> Tensor -@Namespace("at") public static native @ByVal Tensor reflection_pad2d_backward(@Const @ByRef Tensor grad_output, @Const @ByRef Tensor self, @ByVal @Cast("c10::ArrayRef*") LongArrayRef padding); -@Namespace("at") public static native @ByVal Tensor reflection_pad2d_backward(@Const @ByRef Tensor grad_output, @Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... padding); +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include -// aten::reflection_pad2d_backward(Tensor grad_output, Tensor self, SymInt[4] padding) -> Tensor -@Namespace("at") public static native @ByVal Tensor reflection_pad2d_backward_symint(@Const @ByRef Tensor grad_output, @Const @ByRef Tensor self, @ByVal SymIntRef padding); + +// #include +// aten::rrelu(Tensor self, Scalar lower=0.125, Scalar upper=0.3333333333333333, bool training=False, Generator? generator=None) -> Tensor +@Namespace("at") public static native @ByVal Tensor rrelu(@Const @ByRef Tensor self, @Const @ByRef(nullValue = "at::Scalar(0.125)") Scalar lower, @Const @ByRef(nullValue = "at::Scalar(0.3333333333333333)") Scalar upper, @Cast("bool") boolean training/*=false*/, @ByVal(nullValue = "c10::optional(c10::nullopt)") GeneratorOptional generator); +@Namespace("at") public static native @ByVal Tensor rrelu(@Const @ByRef Tensor self); +// aten::rrelu_(Tensor(a!) self, Scalar lower=0.125, Scalar upper=0.3333333333333333, bool training=False, Generator? generator=None) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor rrelu_(@ByRef Tensor self, @Const @ByRef(nullValue = "at::Scalar(0.125)") Scalar lower, @Const @ByRef(nullValue = "at::Scalar(0.3333333333333333)") Scalar upper, @Cast("bool") boolean training/*=false*/, @ByVal(nullValue = "c10::optional(c10::nullopt)") GeneratorOptional generator); +@Namespace("at") public static native @ByRef Tensor rrelu_(@ByRef Tensor self); -// Parsed from ATen/ops/reflection_pad3d.h + + +// Parsed from ATen/ops/rrelu_with_noise.h // #pragma once @@ -59626,40 +45381,27 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include - - -// aten::reflection_pad3d.out(Tensor self, SymInt[6] padding, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor reflection_pad3d_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal @Cast("c10::ArrayRef*") LongArrayRef padding); -@Namespace("at") public static native @ByRef Tensor reflection_pad3d_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... padding); - - -// aten::reflection_pad3d.out(Tensor self, SymInt[6] padding, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor reflection_pad3d_outf(@Const @ByRef Tensor self, @ByVal @Cast("c10::ArrayRef*") LongArrayRef padding, @ByRef Tensor out); -@Namespace("at") public static native @ByRef Tensor reflection_pad3d_outf(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] padding, @ByRef Tensor out); - - -// aten::reflection_pad3d.out(Tensor self, SymInt[6] padding, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor reflection_pad3d_symint_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal SymIntRef padding); - - -// aten::reflection_pad3d.out(Tensor self, SymInt[6] padding, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor reflection_pad3d_symint_outf(@Const @ByRef Tensor self, @ByVal SymIntRef padding, @ByRef Tensor out); - +// #include -// aten::reflection_pad3d(Tensor self, SymInt[6] padding) -> Tensor -@Namespace("at") public static native @ByVal Tensor reflection_pad3d(@Const @ByRef Tensor self, @ByVal @Cast("c10::ArrayRef*") LongArrayRef padding); -@Namespace("at") public static native @ByVal Tensor reflection_pad3d(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... padding); +// aten::rrelu_with_noise.out(Tensor self, Tensor noise, Scalar lower=0.125, Scalar upper=0.3333333333333333, bool training=False, Generator? generator=None, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor rrelu_with_noise_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor noise, @Const @ByRef(nullValue = "at::Scalar(0.125)") Scalar lower, @Const @ByRef(nullValue = "at::Scalar(0.3333333333333333)") Scalar upper, @Cast("bool") boolean training/*=false*/, @ByVal(nullValue = "c10::optional(c10::nullopt)") GeneratorOptional generator); +@Namespace("at") public static native @ByRef Tensor rrelu_with_noise_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor noise); +// aten::rrelu_with_noise.out(Tensor self, Tensor noise, Scalar lower=0.125, Scalar upper=0.3333333333333333, bool training=False, Generator? generator=None, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor rrelu_with_noise_outf(@Const @ByRef Tensor self, @Const @ByRef Tensor noise, @Const @ByRef Scalar lower, @Const @ByRef Scalar upper, @Cast("bool") boolean training, @ByVal GeneratorOptional generator, @ByRef Tensor out); -// aten::reflection_pad3d(Tensor self, SymInt[6] padding) -> Tensor -@Namespace("at") public static native @ByVal Tensor reflection_pad3d_symint(@Const @ByRef Tensor self, @ByVal SymIntRef padding); +// aten::rrelu_with_noise(Tensor self, Tensor noise, Scalar lower=0.125, Scalar upper=0.3333333333333333, bool training=False, Generator? generator=None) -> Tensor +@Namespace("at") public static native @ByVal Tensor rrelu_with_noise(@Const @ByRef Tensor self, @Const @ByRef Tensor noise, @Const @ByRef(nullValue = "at::Scalar(0.125)") Scalar lower, @Const @ByRef(nullValue = "at::Scalar(0.3333333333333333)") Scalar upper, @Cast("bool") boolean training/*=false*/, @ByVal(nullValue = "c10::optional(c10::nullopt)") GeneratorOptional generator); +@Namespace("at") public static native @ByVal Tensor rrelu_with_noise(@Const @ByRef Tensor self, @Const @ByRef Tensor noise); +// aten::rrelu_with_noise_(Tensor(a!) self, Tensor noise, Scalar lower=0.125, Scalar upper=0.3333333333333333, bool training=False, Generator? generator=None) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor rrelu_with_noise_(@ByRef Tensor self, @Const @ByRef Tensor noise, @Const @ByRef(nullValue = "at::Scalar(0.125)") Scalar lower, @Const @ByRef(nullValue = "at::Scalar(0.3333333333333333)") Scalar upper, @Cast("bool") boolean training/*=false*/, @ByVal(nullValue = "c10::optional(c10::nullopt)") GeneratorOptional generator); +@Namespace("at") public static native @ByRef Tensor rrelu_with_noise_(@ByRef Tensor self, @Const @ByRef Tensor noise); -// Parsed from ATen/ops/reflection_pad3d_backward.h +// Parsed from ATen/ops/rrelu_with_noise_backward.h // #pragma once @@ -59680,40 +45422,21 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include - - -// aten::reflection_pad3d_backward.grad_input(Tensor grad_output, Tensor self, SymInt[6] padding, *, Tensor(a!) grad_input) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor reflection_pad3d_backward_out(@ByRef Tensor grad_input, @Const @ByRef Tensor grad_output, @Const @ByRef Tensor self, @ByVal @Cast("c10::ArrayRef*") LongArrayRef padding); -@Namespace("at") public static native @ByRef Tensor reflection_pad3d_backward_out(@ByRef Tensor grad_input, @Const @ByRef Tensor grad_output, @Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... padding); - - -// aten::reflection_pad3d_backward.grad_input(Tensor grad_output, Tensor self, SymInt[6] padding, *, Tensor(a!) grad_input) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor reflection_pad3d_backward_outf(@Const @ByRef Tensor grad_output, @Const @ByRef Tensor self, @ByVal @Cast("c10::ArrayRef*") LongArrayRef padding, @ByRef Tensor grad_input); -@Namespace("at") public static native @ByRef Tensor reflection_pad3d_backward_outf(@Const @ByRef Tensor grad_output, @Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] padding, @ByRef Tensor grad_input); - - -// aten::reflection_pad3d_backward.grad_input(Tensor grad_output, Tensor self, SymInt[6] padding, *, Tensor(a!) grad_input) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor reflection_pad3d_backward_symint_out(@ByRef Tensor grad_input, @Const @ByRef Tensor grad_output, @Const @ByRef Tensor self, @ByVal SymIntRef padding); - - -// aten::reflection_pad3d_backward.grad_input(Tensor grad_output, Tensor self, SymInt[6] padding, *, Tensor(a!) grad_input) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor reflection_pad3d_backward_symint_outf(@Const @ByRef Tensor grad_output, @Const @ByRef Tensor self, @ByVal SymIntRef padding, @ByRef Tensor grad_input); - - -// aten::reflection_pad3d_backward(Tensor grad_output, Tensor self, SymInt[6] padding) -> Tensor -@Namespace("at") public static native @ByVal Tensor reflection_pad3d_backward(@Const @ByRef Tensor grad_output, @Const @ByRef Tensor self, @ByVal @Cast("c10::ArrayRef*") LongArrayRef padding); -@Namespace("at") public static native @ByVal Tensor reflection_pad3d_backward(@Const @ByRef Tensor grad_output, @Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... padding); +// #include -// aten::reflection_pad3d_backward(Tensor grad_output, Tensor self, SymInt[6] padding) -> Tensor -@Namespace("at") public static native @ByVal Tensor reflection_pad3d_backward_symint(@Const @ByRef Tensor grad_output, @Const @ByRef Tensor self, @ByVal SymIntRef padding); +// aten::rrelu_with_noise_backward(Tensor grad_output, Tensor self, Tensor noise, Scalar lower, Scalar upper, bool training, bool self_is_result) -> Tensor +@Namespace("at") public static native @ByVal Tensor rrelu_with_noise_backward(@Const @ByRef Tensor grad_output, @Const @ByRef Tensor self, @Const @ByRef Tensor noise, @Const @ByRef Scalar lower, @Const @ByRef Scalar upper, @Cast("bool") boolean training, @Cast("bool") boolean self_is_result); +// aten::rrelu_with_noise_backward.out(Tensor grad_output, Tensor self, Tensor noise, Scalar lower, Scalar upper, bool training, bool self_is_result, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor rrelu_with_noise_backward_out(@ByRef Tensor out, @Const @ByRef Tensor grad_output, @Const @ByRef Tensor self, @Const @ByRef Tensor noise, @Const @ByRef Scalar lower, @Const @ByRef Scalar upper, @Cast("bool") boolean training, @Cast("bool") boolean self_is_result); +// aten::rrelu_with_noise_backward.out(Tensor grad_output, Tensor self, Tensor noise, Scalar lower, Scalar upper, bool training, bool self_is_result, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor rrelu_with_noise_backward_outf(@Const @ByRef Tensor grad_output, @Const @ByRef Tensor self, @Const @ByRef Tensor noise, @Const @ByRef Scalar lower, @Const @ByRef Scalar upper, @Cast("bool") boolean training, @Cast("bool") boolean self_is_result, @ByRef Tensor out); -// Parsed from ATen/ops/relu.h +// Parsed from ATen/ops/rshift.h // #pragma once @@ -59734,24 +45457,29 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include +// #include -// aten::relu(Tensor self) -> Tensor -@Namespace("at") public static native @ByVal Tensor relu(@Const @ByRef Tensor self); +// aten::__rshift__.Scalar(Tensor self, Scalar other) -> Tensor +@Namespace("at") public static native @ByVal Tensor __rshift__(@Const @ByRef Tensor self, @Const @ByRef Scalar other); -// aten::relu_(Tensor(a!) self) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor relu_(@ByRef Tensor self); +// aten::__rshift__.Tensor(Tensor self, Tensor other) -> Tensor +@Namespace("at") public static native @ByVal Tensor __rshift__(@Const @ByRef Tensor self, @Const @ByRef Tensor other); -// aten::relu.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor relu_out(@ByRef Tensor out, @Const @ByRef Tensor self); -// aten::relu.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor relu_outf(@Const @ByRef Tensor self, @ByRef Tensor out); +// aten::__rshift__.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor __rshift___out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Scalar other); +// aten::__rshift__.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor __rshift___outf(@Const @ByRef Tensor self, @Const @ByRef Scalar other, @ByRef Tensor out); +// aten::__rshift__.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor __rshift___out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor other); +// aten::__rshift__.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor __rshift___outf(@Const @ByRef Tensor self, @Const @ByRef Tensor other, @ByRef Tensor out); -// Parsed from ATen/ops/relu6.h + +// Parsed from ATen/ops/rsqrt.h // #pragma once @@ -59772,19 +45500,24 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include +// #include -// aten::relu6(Tensor self) -> Tensor -@Namespace("at") public static native @ByVal Tensor relu6(@Const @ByRef Tensor self); +// aten::rsqrt(Tensor self) -> Tensor +@Namespace("at") public static native @ByVal Tensor rsqrt(@Const @ByRef Tensor self); -// aten::relu6_(Tensor(a!) self) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor relu6_(@ByRef Tensor self); +// aten::rsqrt_(Tensor(a!) self) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor rsqrt_(@ByRef Tensor self); + +// aten::rsqrt.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor rsqrt_out(@ByRef Tensor out, @Const @ByRef Tensor self); +// aten::rsqrt.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor rsqrt_outf(@Const @ByRef Tensor self, @ByRef Tensor out); -// Parsed from ATen/ops/remainder.h +// Parsed from ATen/ops/rsub.h // #pragma once @@ -59805,37 +45538,33 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include - - -// aten::remainder.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor remainder_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Scalar other); -// aten::remainder.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor remainder_outf(@Const @ByRef Tensor self, @Const @ByRef Scalar other, @ByRef Tensor out); +// #include -// aten::remainder.Scalar(Tensor self, Scalar other) -> Tensor -@Namespace("at") public static native @ByVal Tensor remainder(@Const @ByRef Tensor self, @Const @ByRef Scalar other); -// aten::remainder.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor remainder_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor other); -// aten::remainder.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor remainder_outf(@Const @ByRef Tensor self, @Const @ByRef Tensor other, @ByRef Tensor out); +// aten::rsub.Tensor(Tensor self, Tensor other, *, Scalar alpha=1) -> Tensor +@Namespace("at") public static native @ByVal Tensor rsub(@Const @ByRef Tensor self, @Const @ByRef Tensor other, @Const @ByRef(nullValue = "at::Scalar(1)") Scalar alpha); +@Namespace("at") public static native @ByVal Tensor rsub(@Const @ByRef Tensor self, @Const @ByRef Tensor other); -// aten::remainder.Tensor(Tensor self, Tensor other) -> Tensor -@Namespace("at") public static native @ByVal Tensor remainder(@Const @ByRef Tensor self, @Const @ByRef Tensor other); +// aten::rsub.Scalar(Tensor self, Scalar other, Scalar alpha=1) -> Tensor +@Namespace("at") public static native @ByVal Tensor rsub(@Const @ByRef Tensor self, @Const @ByRef Scalar other, @Const @ByRef(nullValue = "at::Scalar(1)") Scalar alpha); +@Namespace("at") public static native @ByVal Tensor rsub(@Const @ByRef Tensor self, @Const @ByRef Scalar other); -// aten::remainder.Scalar_Tensor(Scalar self, Tensor other) -> Tensor -@Namespace("at") public static native @ByVal Tensor remainder(@Const @ByRef Scalar self, @Const @ByRef Tensor other); +// aten::rsub.Tensor_out(Tensor self, Tensor other, *, Scalar alpha=1, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor rsub_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor other, @Const @ByRef(nullValue = "at::Scalar(1)") Scalar alpha); +@Namespace("at") public static native @ByRef Tensor rsub_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor other); +// aten::rsub.Tensor_out(Tensor self, Tensor other, *, Scalar alpha=1, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor rsub_outf(@Const @ByRef Tensor self, @Const @ByRef Tensor other, @Const @ByRef Scalar alpha, @ByRef Tensor out); -// aten::remainder.Scalar_Tensor_out(Scalar self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor remainder_out(@ByRef Tensor out, @Const @ByRef Scalar self, @Const @ByRef Tensor other); -// aten::remainder.Scalar_Tensor_out(Scalar self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor remainder_outf(@Const @ByRef Scalar self, @Const @ByRef Tensor other, @ByRef Tensor out); +// aten::rsub.Scalar_out(Tensor self, Scalar other, Scalar alpha=1, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor rsub_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Scalar other, @Const @ByRef(nullValue = "at::Scalar(1)") Scalar alpha); +@Namespace("at") public static native @ByRef Tensor rsub_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Scalar other); +// aten::rsub.Scalar_out(Tensor self, Scalar other, Scalar alpha=1, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor rsub_outf(@Const @ByRef Tensor self, @Const @ByRef Scalar other, @Const @ByRef Scalar alpha, @ByRef Tensor out); -// Parsed from ATen/ops/rename.h +// Parsed from ATen/ops/scalar_tensor.h // #pragma once @@ -59856,14 +45585,24 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include +// #include + +// aten::scalar_tensor(Scalar s, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor +@Namespace("at") public static native @ByVal Tensor scalar_tensor(@Const @ByRef Scalar s, @ByVal(nullValue = "at::TensorOptions{}") TensorOptions options); +@Namespace("at") public static native @ByVal Tensor scalar_tensor(@Const @ByRef Scalar s); +// aten::scalar_tensor(Scalar s, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor +@Namespace("at") public static native @ByVal Tensor scalar_tensor(@Const @ByRef Scalar s, @ByVal ScalarTypeOptional dtype, @ByVal LayoutOptional layout, @ByVal DeviceOptional device, @ByVal BoolOptional pin_memory); +// aten::scalar_tensor.out(Scalar s, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor scalar_tensor_out(@ByRef Tensor out, @Const @ByRef Scalar s); +// aten::scalar_tensor.out(Scalar s, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor scalar_tensor_outf(@Const @ByRef Scalar s, @ByRef Tensor out); -// Parsed from ATen/ops/renorm.h +// Parsed from ATen/ops/scaled_dot_product_attention.h // #pragma once @@ -59884,21 +45623,17 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include - +// #include -// aten::renorm.out(Tensor self, Scalar p, int dim, Scalar maxnorm, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor renorm_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Scalar p, @Cast("int64_t") long dim, @Const @ByRef Scalar maxnorm); -// aten::renorm.out(Tensor self, Scalar p, int dim, Scalar maxnorm, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor renorm_outf(@Const @ByRef Tensor self, @Const @ByRef Scalar p, @Cast("int64_t") long dim, @Const @ByRef Scalar maxnorm, @ByRef Tensor out); -// aten::renorm(Tensor self, Scalar p, int dim, Scalar maxnorm) -> Tensor -@Namespace("at") public static native @ByVal Tensor renorm(@Const @ByRef Tensor self, @Const @ByRef Scalar p, @Cast("int64_t") long dim, @Const @ByRef Scalar maxnorm); +// aten::scaled_dot_product_attention(Tensor query, Tensor key, Tensor value, Tensor? attn_mask=None, float dropout_p=0.0, bool is_causal=False) -> Tensor +@Namespace("at") public static native @ByVal Tensor scaled_dot_product_attention(@Const @ByRef Tensor query, @Const @ByRef Tensor key, @Const @ByRef Tensor value, @Const @ByRef(nullValue = "c10::optional{}") TensorOptional attn_mask, double dropout_p/*=0.0*/, @Cast("bool") boolean is_causal/*=false*/); +@Namespace("at") public static native @ByVal Tensor scaled_dot_product_attention(@Const @ByRef Tensor query, @Const @ByRef Tensor key, @Const @ByRef Tensor value); -// Parsed from ATen/ops/repeat.h +// Parsed from ATen/ops/scatter.h // #pragma once @@ -59919,32 +45654,51 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include +// #include +// aten::scatter.src(Tensor self, int dim, Tensor index, Tensor src) -> Tensor +@Namespace("at") public static native @ByVal Tensor scatter(@Const @ByRef Tensor self, @Cast("int64_t") long dim, @Const @ByRef Tensor index, @Const @ByRef Tensor src); -// aten::repeat.out(Tensor self, SymInt[] repeats, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor repeat_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal @Cast("c10::ArrayRef*") LongArrayRef repeats); -@Namespace("at") public static native @ByRef Tensor repeat_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... repeats); +// aten::scatter.src_out(Tensor self, int dim, Tensor index, Tensor src, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor scatter_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Cast("int64_t") long dim, @Const @ByRef Tensor index, @Const @ByRef Tensor src); +// aten::scatter.src_out(Tensor self, int dim, Tensor index, Tensor src, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor scatter_outf(@Const @ByRef Tensor self, @Cast("int64_t") long dim, @Const @ByRef Tensor index, @Const @ByRef Tensor src, @ByRef Tensor out); +// aten::scatter.value(Tensor self, int dim, Tensor index, Scalar value) -> Tensor +@Namespace("at") public static native @ByVal Tensor scatter(@Const @ByRef Tensor self, @Cast("int64_t") long dim, @Const @ByRef Tensor index, @Const @ByRef Scalar value); -// aten::repeat.out(Tensor self, SymInt[] repeats, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor repeat_outf(@Const @ByRef Tensor self, @ByVal @Cast("c10::ArrayRef*") LongArrayRef repeats, @ByRef Tensor out); -@Namespace("at") public static native @ByRef Tensor repeat_outf(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] repeats, @ByRef Tensor out); +// aten::scatter.value_out(Tensor self, int dim, Tensor index, Scalar value, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor scatter_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Cast("int64_t") long dim, @Const @ByRef Tensor index, @Const @ByRef Scalar value); +// aten::scatter.value_out(Tensor self, int dim, Tensor index, Scalar value, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor scatter_outf(@Const @ByRef Tensor self, @Cast("int64_t") long dim, @Const @ByRef Tensor index, @Const @ByRef Scalar value, @ByRef Tensor out); +// aten::scatter.reduce(Tensor self, int dim, Tensor index, Tensor src, *, str reduce) -> Tensor +@Namespace("at") public static native @ByVal Tensor scatter(@Const @ByRef Tensor self, @Cast("int64_t") long dim, @Const @ByRef Tensor index, @Const @ByRef Tensor src, @ByVal @Cast("c10::string_view*") Pointer reduce); -// aten::repeat.out(Tensor self, SymInt[] repeats, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor repeat_symint_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal SymIntRef repeats); +// aten::scatter.reduce_out(Tensor self, int dim, Tensor index, Tensor src, *, str reduce, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor scatter_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Cast("int64_t") long dim, @Const @ByRef Tensor index, @Const @ByRef Tensor src, @ByVal @Cast("c10::string_view*") Pointer reduce); +// aten::scatter.reduce_out(Tensor self, int dim, Tensor index, Tensor src, *, str reduce, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor scatter_outf(@Const @ByRef Tensor self, @Cast("int64_t") long dim, @Const @ByRef Tensor index, @Const @ByRef Tensor src, @ByVal @Cast("c10::string_view*") Pointer reduce, @ByRef Tensor out); +// aten::scatter.value_reduce(Tensor self, int dim, Tensor index, Scalar value, *, str reduce) -> Tensor +@Namespace("at") public static native @ByVal Tensor scatter(@Const @ByRef Tensor self, @Cast("int64_t") long dim, @Const @ByRef Tensor index, @Const @ByRef Scalar value, @ByVal @Cast("c10::string_view*") Pointer reduce); -// aten::repeat.out(Tensor self, SymInt[] repeats, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor repeat_symint_outf(@Const @ByRef Tensor self, @ByVal SymIntRef repeats, @ByRef Tensor out); +// aten::scatter.value_reduce_out(Tensor self, int dim, Tensor index, Scalar value, *, str reduce, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor scatter_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Cast("int64_t") long dim, @Const @ByRef Tensor index, @Const @ByRef Scalar value, @ByVal @Cast("c10::string_view*") Pointer reduce); +// aten::scatter.value_reduce_out(Tensor self, int dim, Tensor index, Scalar value, *, str reduce, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor scatter_outf(@Const @ByRef Tensor self, @Cast("int64_t") long dim, @Const @ByRef Tensor index, @Const @ByRef Scalar value, @ByVal @Cast("c10::string_view*") Pointer reduce, @ByRef Tensor out); + +// aten::scatter.dimname_src(Tensor self, Dimname dim, Tensor index, Tensor src) -> Tensor +@Namespace("at") public static native @ByVal Tensor scatter(@Const @ByRef Tensor self, @ByVal Dimname dim, @Const @ByRef Tensor index, @Const @ByRef Tensor src); +// aten::scatter.dimname_value(Tensor self, Dimname dim, Tensor index, Scalar value) -> Tensor +@Namespace("at") public static native @ByVal Tensor scatter(@Const @ByRef Tensor self, @ByVal Dimname dim, @Const @ByRef Tensor index, @Const @ByRef Scalar value); -// Parsed from ATen/ops/repeat_interleave.h +// Parsed from ATen/ops/scatter_add.h // #pragma once @@ -59965,37 +45719,24 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include - - -// aten::repeat_interleave.Tensor(Tensor repeats, *, int? output_size=None) -> Tensor -@Namespace("at") public static native @ByVal Tensor repeat_interleave(@Const @ByRef Tensor repeats, @ByVal(nullValue = "c10::optional(c10::nullopt)") LongOptional output_size); -@Namespace("at") public static native @ByVal Tensor repeat_interleave(@Const @ByRef Tensor repeats); - -// aten::repeat_interleave.self_Tensor(Tensor self, Tensor repeats, int? dim=None, *, int? output_size=None) -> Tensor -@Namespace("at") public static native @ByVal Tensor repeat_interleave(@Const @ByRef Tensor self, @Const @ByRef Tensor repeats, @ByVal(nullValue = "c10::optional(c10::nullopt)") LongOptional dim, @ByVal(nullValue = "c10::optional(c10::nullopt)") LongOptional output_size); -@Namespace("at") public static native @ByVal Tensor repeat_interleave(@Const @ByRef Tensor self, @Const @ByRef Tensor repeats); - -// aten::repeat_interleave.self_int(Tensor self, SymInt repeats, int? dim=None, *, int? output_size=None) -> Tensor -@Namespace("at") public static native @ByVal Tensor repeat_interleave(@Const @ByRef Tensor self, @Cast("int64_t") long repeats, @ByVal(nullValue = "c10::optional(c10::nullopt)") LongOptional dim, @ByVal(nullValue = "c10::optional(c10::nullopt)") LongOptional output_size); -@Namespace("at") public static native @ByVal Tensor repeat_interleave(@Const @ByRef Tensor self, @Cast("int64_t") long repeats); +// #include -// aten::repeat_interleave.self_int(Tensor self, SymInt repeats, int? dim=None, *, int? output_size=None) -> Tensor -@Namespace("at") public static native @ByVal Tensor repeat_interleave_symint(@Const @ByRef Tensor self, @ByVal SymInt repeats, @ByVal(nullValue = "c10::optional(c10::nullopt)") LongOptional dim, @ByVal(nullValue = "c10::optional(c10::nullopt)") LongOptional output_size); -@Namespace("at") public static native @ByVal Tensor repeat_interleave_symint(@Const @ByRef Tensor self, @ByVal SymInt repeats); +// aten::scatter_add(Tensor self, int dim, Tensor index, Tensor src) -> Tensor +@Namespace("at") public static native @ByVal Tensor scatter_add(@Const @ByRef Tensor self, @Cast("int64_t") long dim, @Const @ByRef Tensor index, @Const @ByRef Tensor src); +// aten::scatter_add.out(Tensor self, int dim, Tensor index, Tensor src, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor scatter_add_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Cast("int64_t") long dim, @Const @ByRef Tensor index, @Const @ByRef Tensor src); +// aten::scatter_add.out(Tensor self, int dim, Tensor index, Tensor src, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor scatter_add_outf(@Const @ByRef Tensor self, @Cast("int64_t") long dim, @Const @ByRef Tensor index, @Const @ByRef Tensor src, @ByRef Tensor out); -// aten::repeat_interleave.Tensor_out(Tensor repeats, *, int? output_size=None, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor repeat_interleave_out(@ByRef Tensor out, @Const @ByRef Tensor repeats, @ByVal(nullValue = "c10::optional(c10::nullopt)") LongOptional output_size); -@Namespace("at") public static native @ByRef Tensor repeat_interleave_out(@ByRef Tensor out, @Const @ByRef Tensor repeats); -// aten::repeat_interleave.Tensor_out(Tensor repeats, *, int? output_size=None, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor repeat_interleave_outf(@Const @ByRef Tensor repeats, @ByVal LongOptional output_size, @ByRef Tensor out); +// aten::scatter_add.dimname(Tensor self, Dimname dim, Tensor index, Tensor src) -> Tensor +@Namespace("at") public static native @ByVal Tensor scatter_add(@Const @ByRef Tensor self, @ByVal Dimname dim, @Const @ByRef Tensor index, @Const @ByRef Tensor src); -// Parsed from ATen/ops/replication_pad1d.h +// Parsed from ATen/ops/scatter_reduce.h // #pragma once @@ -60016,40 +45757,70 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include +// #include -// aten::replication_pad1d.out(Tensor self, SymInt[2] padding, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor replication_pad1d_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal @Cast("c10::ArrayRef*") LongArrayRef padding); -@Namespace("at") public static native @ByRef Tensor replication_pad1d_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... padding); +// aten::scatter_reduce.two(Tensor self, int dim, Tensor index, Tensor src, str reduce, *, bool include_self=True) -> Tensor +@Namespace("at") public static native @ByVal Tensor scatter_reduce(@Const @ByRef Tensor self, @Cast("int64_t") long dim, @Const @ByRef Tensor index, @Const @ByRef Tensor src, @ByVal @Cast("c10::string_view*") Pointer reduce, @Cast("bool") boolean include_self/*=true*/); +@Namespace("at") public static native @ByVal Tensor scatter_reduce(@Const @ByRef Tensor self, @Cast("int64_t") long dim, @Const @ByRef Tensor index, @Const @ByRef Tensor src, @ByVal @Cast("c10::string_view*") Pointer reduce); +// aten::scatter_reduce.two_out(Tensor self, int dim, Tensor index, Tensor src, str reduce, *, bool include_self=True, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor scatter_reduce_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Cast("int64_t") long dim, @Const @ByRef Tensor index, @Const @ByRef Tensor src, @ByVal @Cast("c10::string_view*") Pointer reduce, @Cast("bool") boolean include_self/*=true*/); +@Namespace("at") public static native @ByRef Tensor scatter_reduce_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Cast("int64_t") long dim, @Const @ByRef Tensor index, @Const @ByRef Tensor src, @ByVal @Cast("c10::string_view*") Pointer reduce); +// aten::scatter_reduce.two_out(Tensor self, int dim, Tensor index, Tensor src, str reduce, *, bool include_self=True, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor scatter_reduce_outf(@Const @ByRef Tensor self, @Cast("int64_t") long dim, @Const @ByRef Tensor index, @Const @ByRef Tensor src, @ByVal @Cast("c10::string_view*") Pointer reduce, @Cast("bool") boolean include_self, @ByRef Tensor out); -// aten::replication_pad1d.out(Tensor self, SymInt[2] padding, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor replication_pad1d_outf(@Const @ByRef Tensor self, @ByVal @Cast("c10::ArrayRef*") LongArrayRef padding, @ByRef Tensor out); -@Namespace("at") public static native @ByRef Tensor replication_pad1d_outf(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] padding, @ByRef Tensor out); -// aten::replication_pad1d.out(Tensor self, SymInt[2] padding, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor replication_pad1d_symint_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal SymIntRef padding); +// Parsed from ATen/ops/searchsorted.h -// aten::replication_pad1d.out(Tensor self, SymInt[2] padding, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor replication_pad1d_symint_outf(@Const @ByRef Tensor self, @ByVal SymIntRef padding, @ByRef Tensor out); +// #pragma once +// @generated by torchgen/gen.py from Function.h -// aten::replication_pad1d(Tensor self, SymInt[2] padding) -> Tensor -@Namespace("at") public static native @ByVal Tensor replication_pad1d(@Const @ByRef Tensor self, @ByVal @Cast("c10::ArrayRef*") LongArrayRef padding); -@Namespace("at") public static native @ByVal Tensor replication_pad1d(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... padding); +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include -// aten::replication_pad1d(Tensor self, SymInt[2] padding) -> Tensor -@Namespace("at") public static native @ByVal Tensor replication_pad1d_symint(@Const @ByRef Tensor self, @ByVal SymIntRef padding); +// #include + + +// aten::searchsorted.Tensor(Tensor sorted_sequence, Tensor self, *, bool out_int32=False, bool right=False, str? side=None, Tensor? sorter=None) -> Tensor +@Namespace("at") public static native @ByVal Tensor searchsorted(@Const @ByRef Tensor sorted_sequence, @Const @ByRef Tensor self, @Cast("bool") boolean out_int32/*=false*/, @Cast("bool") boolean right/*=false*/, @ByVal(nullValue = "c10::optional(c10::nullopt)") @Cast("c10::optional*") Pointer side, @Const @ByRef(nullValue = "c10::optional{}") TensorOptional sorter); +@Namespace("at") public static native @ByVal Tensor searchsorted(@Const @ByRef Tensor sorted_sequence, @Const @ByRef Tensor self); + +// aten::searchsorted.Tensor_out(Tensor sorted_sequence, Tensor self, *, bool out_int32=False, bool right=False, str? side=None, Tensor? sorter=None, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor searchsorted_out(@ByRef Tensor out, @Const @ByRef Tensor sorted_sequence, @Const @ByRef Tensor self, @Cast("bool") boolean out_int32/*=false*/, @Cast("bool") boolean right/*=false*/, @ByVal(nullValue = "c10::optional(c10::nullopt)") @Cast("c10::optional*") Pointer side, @Const @ByRef(nullValue = "c10::optional{}") TensorOptional sorter); +@Namespace("at") public static native @ByRef Tensor searchsorted_out(@ByRef Tensor out, @Const @ByRef Tensor sorted_sequence, @Const @ByRef Tensor self); +// aten::searchsorted.Tensor_out(Tensor sorted_sequence, Tensor self, *, bool out_int32=False, bool right=False, str? side=None, Tensor? sorter=None, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor searchsorted_outf(@Const @ByRef Tensor sorted_sequence, @Const @ByRef Tensor self, @Cast("bool") boolean out_int32, @Cast("bool") boolean right, @ByVal @Cast("c10::optional*") Pointer side, @Const @ByRef TensorOptional sorter, @ByRef Tensor out); + +// aten::searchsorted.Scalar(Tensor sorted_sequence, Scalar self, *, bool out_int32=False, bool right=False, str? side=None, Tensor? sorter=None) -> Tensor +@Namespace("at") public static native @ByVal Tensor searchsorted(@Const @ByRef Tensor sorted_sequence, @Const @ByRef Scalar self, @Cast("bool") boolean out_int32/*=false*/, @Cast("bool") boolean right/*=false*/, @ByVal(nullValue = "c10::optional(c10::nullopt)") @Cast("c10::optional*") Pointer side, @Const @ByRef(nullValue = "c10::optional{}") TensorOptional sorter); +@Namespace("at") public static native @ByVal Tensor searchsorted(@Const @ByRef Tensor sorted_sequence, @Const @ByRef Scalar self); + +// aten::searchsorted.Scalar_out(Tensor sorted_sequence, Scalar self, *, bool out_int32=False, bool right=False, str? side=None, Tensor? sorter=None, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor searchsorted_out(@ByRef Tensor out, @Const @ByRef Tensor sorted_sequence, @Const @ByRef Scalar self, @Cast("bool") boolean out_int32/*=false*/, @Cast("bool") boolean right/*=false*/, @ByVal(nullValue = "c10::optional(c10::nullopt)") @Cast("c10::optional*") Pointer side, @Const @ByRef(nullValue = "c10::optional{}") TensorOptional sorter); +@Namespace("at") public static native @ByRef Tensor searchsorted_out(@ByRef Tensor out, @Const @ByRef Tensor sorted_sequence, @Const @ByRef Scalar self); +// aten::searchsorted.Scalar_out(Tensor sorted_sequence, Scalar self, *, bool out_int32=False, bool right=False, str? side=None, Tensor? sorter=None, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor searchsorted_outf(@Const @ByRef Tensor sorted_sequence, @Const @ByRef Scalar self, @Cast("bool") boolean out_int32, @Cast("bool") boolean right, @ByVal @Cast("c10::optional*") Pointer side, @Const @ByRef TensorOptional sorter, @ByRef Tensor out); -// Parsed from ATen/ops/replication_pad1d_backward.h +// Parsed from ATen/ops/segment_reduce.h // #pragma once @@ -60070,40 +45841,23 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include - - -// aten::replication_pad1d_backward.grad_input(Tensor grad_output, Tensor self, SymInt[2] padding, *, Tensor(a!) grad_input) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor replication_pad1d_backward_out(@ByRef Tensor grad_input, @Const @ByRef Tensor grad_output, @Const @ByRef Tensor self, @ByVal @Cast("c10::ArrayRef*") LongArrayRef padding); -@Namespace("at") public static native @ByRef Tensor replication_pad1d_backward_out(@ByRef Tensor grad_input, @Const @ByRef Tensor grad_output, @Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... padding); - - -// aten::replication_pad1d_backward.grad_input(Tensor grad_output, Tensor self, SymInt[2] padding, *, Tensor(a!) grad_input) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor replication_pad1d_backward_outf(@Const @ByRef Tensor grad_output, @Const @ByRef Tensor self, @ByVal @Cast("c10::ArrayRef*") LongArrayRef padding, @ByRef Tensor grad_input); -@Namespace("at") public static native @ByRef Tensor replication_pad1d_backward_outf(@Const @ByRef Tensor grad_output, @Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] padding, @ByRef Tensor grad_input); - - -// aten::replication_pad1d_backward.grad_input(Tensor grad_output, Tensor self, SymInt[2] padding, *, Tensor(a!) grad_input) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor replication_pad1d_backward_symint_out(@ByRef Tensor grad_input, @Const @ByRef Tensor grad_output, @Const @ByRef Tensor self, @ByVal SymIntRef padding); - - -// aten::replication_pad1d_backward.grad_input(Tensor grad_output, Tensor self, SymInt[2] padding, *, Tensor(a!) grad_input) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor replication_pad1d_backward_symint_outf(@Const @ByRef Tensor grad_output, @Const @ByRef Tensor self, @ByVal SymIntRef padding, @ByRef Tensor grad_input); - - -// aten::replication_pad1d_backward(Tensor grad_output, Tensor self, SymInt[2] padding) -> Tensor -@Namespace("at") public static native @ByVal Tensor replication_pad1d_backward(@Const @ByRef Tensor grad_output, @Const @ByRef Tensor self, @ByVal @Cast("c10::ArrayRef*") LongArrayRef padding); -@Namespace("at") public static native @ByVal Tensor replication_pad1d_backward(@Const @ByRef Tensor grad_output, @Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... padding); +// #include -// aten::replication_pad1d_backward(Tensor grad_output, Tensor self, SymInt[2] padding) -> Tensor -@Namespace("at") public static native @ByVal Tensor replication_pad1d_backward_symint(@Const @ByRef Tensor grad_output, @Const @ByRef Tensor self, @ByVal SymIntRef padding); +// aten::segment_reduce(Tensor data, str reduce, *, Tensor? lengths=None, Tensor? indices=None, Tensor? offsets=None, int axis=0, bool unsafe=False, Scalar? initial=None) -> Tensor +@Namespace("at") public static native @ByVal Tensor segment_reduce(@Const @ByRef Tensor data, @ByVal @Cast("c10::string_view*") Pointer reduce, @Const @ByRef(nullValue = "c10::optional{}") TensorOptional lengths, @Const @ByRef(nullValue = "c10::optional{}") TensorOptional indices, @Const @ByRef(nullValue = "c10::optional{}") TensorOptional offsets, @Cast("int64_t") long axis/*=0*/, @Cast("bool") boolean unsafe/*=false*/, @Const @ByRef(nullValue = "c10::optional(c10::nullopt)") ScalarOptional initial); +@Namespace("at") public static native @ByVal Tensor segment_reduce(@Const @ByRef Tensor data, @ByVal @Cast("c10::string_view*") Pointer reduce); +// aten::segment_reduce.out(Tensor data, str reduce, *, Tensor? lengths=None, Tensor? indices=None, Tensor? offsets=None, int axis=0, bool unsafe=False, Scalar? initial=None, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor segment_reduce_out(@ByRef Tensor out, @Const @ByRef Tensor data, @ByVal @Cast("c10::string_view*") Pointer reduce, @Const @ByRef(nullValue = "c10::optional{}") TensorOptional lengths, @Const @ByRef(nullValue = "c10::optional{}") TensorOptional indices, @Const @ByRef(nullValue = "c10::optional{}") TensorOptional offsets, @Cast("int64_t") long axis/*=0*/, @Cast("bool") boolean unsafe/*=false*/, @Const @ByRef(nullValue = "c10::optional(c10::nullopt)") ScalarOptional initial); +@Namespace("at") public static native @ByRef Tensor segment_reduce_out(@ByRef Tensor out, @Const @ByRef Tensor data, @ByVal @Cast("c10::string_view*") Pointer reduce); +// aten::segment_reduce.out(Tensor data, str reduce, *, Tensor? lengths=None, Tensor? indices=None, Tensor? offsets=None, int axis=0, bool unsafe=False, Scalar? initial=None, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor segment_reduce_outf(@Const @ByRef Tensor data, @ByVal @Cast("c10::string_view*") Pointer reduce, @Const @ByRef TensorOptional lengths, @Const @ByRef TensorOptional indices, @Const @ByRef TensorOptional offsets, @Cast("int64_t") long axis, @Cast("bool") boolean unsafe, @Const @ByRef ScalarOptional initial, @ByRef Tensor out); -// Parsed from ATen/ops/replication_pad2d.h +// Parsed from ATen/ops/select.h // #pragma once @@ -60124,40 +45878,24 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include - - -// aten::replication_pad2d.out(Tensor self, SymInt[4] padding, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor replication_pad2d_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal @Cast("c10::ArrayRef*") LongArrayRef padding); -@Namespace("at") public static native @ByRef Tensor replication_pad2d_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... padding); - - -// aten::replication_pad2d.out(Tensor self, SymInt[4] padding, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor replication_pad2d_outf(@Const @ByRef Tensor self, @ByVal @Cast("c10::ArrayRef*") LongArrayRef padding, @ByRef Tensor out); -@Namespace("at") public static native @ByRef Tensor replication_pad2d_outf(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] padding, @ByRef Tensor out); - - -// aten::replication_pad2d.out(Tensor self, SymInt[4] padding, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor replication_pad2d_symint_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal SymIntRef padding); - +// #include -// aten::replication_pad2d.out(Tensor self, SymInt[4] padding, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor replication_pad2d_symint_outf(@Const @ByRef Tensor self, @ByVal SymIntRef padding, @ByRef Tensor out); +// aten::select.Dimname(Tensor(a) self, Dimname dim, int index) -> Tensor(a) +@Namespace("at") public static native @ByVal Tensor select(@Const @ByRef Tensor self, @ByVal Dimname dim, @Cast("int64_t") long index); -// aten::replication_pad2d(Tensor self, SymInt[4] padding) -> Tensor -@Namespace("at") public static native @ByVal Tensor replication_pad2d(@Const @ByRef Tensor self, @ByVal @Cast("c10::ArrayRef*") LongArrayRef padding); -@Namespace("at") public static native @ByVal Tensor replication_pad2d(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... padding); +// aten::select.int(Tensor(a) self, int dim, SymInt index) -> Tensor(a) +@Namespace("at") public static native @ByVal Tensor select(@Const @ByRef Tensor self, @Cast("int64_t") long dim, @Cast("int64_t") long index); -// aten::replication_pad2d(Tensor self, SymInt[4] padding) -> Tensor -@Namespace("at") public static native @ByVal Tensor replication_pad2d_symint(@Const @ByRef Tensor self, @ByVal SymIntRef padding); +// aten::select.int(Tensor(a) self, int dim, SymInt index) -> Tensor(a) +@Namespace("at") public static native @ByVal Tensor select_symint(@Const @ByRef Tensor self, @Cast("int64_t") long dim, @ByVal SymInt index); -// Parsed from ATen/ops/replication_pad2d_backward.h +// Parsed from ATen/ops/select_backward.h // #pragma once @@ -60178,40 +45916,40 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include +// #include -// aten::replication_pad2d_backward.grad_input(Tensor grad_output, Tensor self, SymInt[4] padding, *, Tensor(a!) grad_input) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor replication_pad2d_backward_out(@ByRef Tensor grad_input, @Const @ByRef Tensor grad_output, @Const @ByRef Tensor self, @ByVal @Cast("c10::ArrayRef*") LongArrayRef padding); -@Namespace("at") public static native @ByRef Tensor replication_pad2d_backward_out(@ByRef Tensor grad_input, @Const @ByRef Tensor grad_output, @Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... padding); +// aten::select_backward(Tensor grad_output, SymInt[] input_sizes, int dim, SymInt index) -> Tensor +@Namespace("at") public static native @ByVal Tensor select_backward(@Const @ByRef Tensor grad_output, @ByVal LongArrayRef input_sizes, @Cast("int64_t") long dim, @Cast("int64_t") long index); +@Namespace("at") public static native @ByVal Tensor select_backward(@Const @ByRef Tensor grad_output, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] input_sizes, @Cast("int64_t") long dim, @Cast("int64_t") long index); -// aten::replication_pad2d_backward.grad_input(Tensor grad_output, Tensor self, SymInt[4] padding, *, Tensor(a!) grad_input) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor replication_pad2d_backward_outf(@Const @ByRef Tensor grad_output, @Const @ByRef Tensor self, @ByVal @Cast("c10::ArrayRef*") LongArrayRef padding, @ByRef Tensor grad_input); -@Namespace("at") public static native @ByRef Tensor replication_pad2d_backward_outf(@Const @ByRef Tensor grad_output, @Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] padding, @ByRef Tensor grad_input); +// aten::select_backward(Tensor grad_output, SymInt[] input_sizes, int dim, SymInt index) -> Tensor +@Namespace("at") public static native @ByVal Tensor select_backward_symint(@Const @ByRef Tensor grad_output, @ByVal SymIntArrayRef input_sizes, @Cast("int64_t") long dim, @ByVal SymInt index); -// aten::replication_pad2d_backward.grad_input(Tensor grad_output, Tensor self, SymInt[4] padding, *, Tensor(a!) grad_input) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor replication_pad2d_backward_symint_out(@ByRef Tensor grad_input, @Const @ByRef Tensor grad_output, @Const @ByRef Tensor self, @ByVal SymIntRef padding); +// aten::select_backward.out(Tensor grad_output, SymInt[] input_sizes, int dim, SymInt index, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor select_backward_out(@ByRef Tensor out, @Const @ByRef Tensor grad_output, @ByVal LongArrayRef input_sizes, @Cast("int64_t") long dim, @Cast("int64_t") long index); +@Namespace("at") public static native @ByRef Tensor select_backward_out(@ByRef Tensor out, @Const @ByRef Tensor grad_output, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] input_sizes, @Cast("int64_t") long dim, @Cast("int64_t") long index); -// aten::replication_pad2d_backward.grad_input(Tensor grad_output, Tensor self, SymInt[4] padding, *, Tensor(a!) grad_input) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor replication_pad2d_backward_symint_outf(@Const @ByRef Tensor grad_output, @Const @ByRef Tensor self, @ByVal SymIntRef padding, @ByRef Tensor grad_input); +// aten::select_backward.out(Tensor grad_output, SymInt[] input_sizes, int dim, SymInt index, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor select_backward_outf(@Const @ByRef Tensor grad_output, @ByVal LongArrayRef input_sizes, @Cast("int64_t") long dim, @Cast("int64_t") long index, @ByRef Tensor out); +@Namespace("at") public static native @ByRef Tensor select_backward_outf(@Const @ByRef Tensor grad_output, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] input_sizes, @Cast("int64_t") long dim, @Cast("int64_t") long index, @ByRef Tensor out); -// aten::replication_pad2d_backward(Tensor grad_output, Tensor self, SymInt[4] padding) -> Tensor -@Namespace("at") public static native @ByVal Tensor replication_pad2d_backward(@Const @ByRef Tensor grad_output, @Const @ByRef Tensor self, @ByVal @Cast("c10::ArrayRef*") LongArrayRef padding); -@Namespace("at") public static native @ByVal Tensor replication_pad2d_backward(@Const @ByRef Tensor grad_output, @Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... padding); +// aten::select_backward.out(Tensor grad_output, SymInt[] input_sizes, int dim, SymInt index, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor select_backward_symint_out(@ByRef Tensor out, @Const @ByRef Tensor grad_output, @ByVal SymIntArrayRef input_sizes, @Cast("int64_t") long dim, @ByVal SymInt index); -// aten::replication_pad2d_backward(Tensor grad_output, Tensor self, SymInt[4] padding) -> Tensor -@Namespace("at") public static native @ByVal Tensor replication_pad2d_backward_symint(@Const @ByRef Tensor grad_output, @Const @ByRef Tensor self, @ByVal SymIntRef padding); +// aten::select_backward.out(Tensor grad_output, SymInt[] input_sizes, int dim, SymInt index, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor select_backward_symint_outf(@Const @ByRef Tensor grad_output, @ByVal SymIntArrayRef input_sizes, @Cast("int64_t") long dim, @ByVal SymInt index, @ByRef Tensor out); -// Parsed from ATen/ops/replication_pad3d.h +// Parsed from ATen/ops/select_copy.h // #pragma once @@ -60232,40 +45970,37 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include +// #include -// aten::replication_pad3d.out(Tensor self, SymInt[6] padding, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor replication_pad3d_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal @Cast("c10::ArrayRef*") LongArrayRef padding); -@Namespace("at") public static native @ByRef Tensor replication_pad3d_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... padding); +// aten::select_copy.int(Tensor self, int dim, SymInt index) -> Tensor +@Namespace("at") public static native @ByVal Tensor select_copy(@Const @ByRef Tensor self, @Cast("int64_t") long dim, @Cast("int64_t") long index); -// aten::replication_pad3d.out(Tensor self, SymInt[6] padding, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor replication_pad3d_outf(@Const @ByRef Tensor self, @ByVal @Cast("c10::ArrayRef*") LongArrayRef padding, @ByRef Tensor out); -@Namespace("at") public static native @ByRef Tensor replication_pad3d_outf(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] padding, @ByRef Tensor out); +// aten::select_copy.int(Tensor self, int dim, SymInt index) -> Tensor +@Namespace("at") public static native @ByVal Tensor select_copy_symint(@Const @ByRef Tensor self, @Cast("int64_t") long dim, @ByVal SymInt index); -// aten::replication_pad3d.out(Tensor self, SymInt[6] padding, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor replication_pad3d_symint_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal SymIntRef padding); +// aten::select_copy.int_out(Tensor self, int dim, SymInt index, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor select_copy_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Cast("int64_t") long dim, @Cast("int64_t") long index); -// aten::replication_pad3d.out(Tensor self, SymInt[6] padding, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor replication_pad3d_symint_outf(@Const @ByRef Tensor self, @ByVal SymIntRef padding, @ByRef Tensor out); +// aten::select_copy.int_out(Tensor self, int dim, SymInt index, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor select_copy_outf(@Const @ByRef Tensor self, @Cast("int64_t") long dim, @Cast("int64_t") long index, @ByRef Tensor out); -// aten::replication_pad3d(Tensor self, SymInt[6] padding) -> Tensor -@Namespace("at") public static native @ByVal Tensor replication_pad3d(@Const @ByRef Tensor self, @ByVal @Cast("c10::ArrayRef*") LongArrayRef padding); -@Namespace("at") public static native @ByVal Tensor replication_pad3d(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... padding); +// aten::select_copy.int_out(Tensor self, int dim, SymInt index, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor select_copy_symint_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Cast("int64_t") long dim, @ByVal SymInt index); -// aten::replication_pad3d(Tensor self, SymInt[6] padding) -> Tensor -@Namespace("at") public static native @ByVal Tensor replication_pad3d_symint(@Const @ByRef Tensor self, @ByVal SymIntRef padding); +// aten::select_copy.int_out(Tensor self, int dim, SymInt index, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor select_copy_symint_outf(@Const @ByRef Tensor self, @Cast("int64_t") long dim, @ByVal SymInt index, @ByRef Tensor out); -// Parsed from ATen/ops/replication_pad3d_backward.h +// Parsed from ATen/ops/select_scatter.h // #pragma once @@ -60286,40 +46021,37 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include +// #include -// aten::replication_pad3d_backward.grad_input(Tensor grad_output, Tensor self, SymInt[6] padding, *, Tensor(a!) grad_input) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor replication_pad3d_backward_out(@ByRef Tensor grad_input, @Const @ByRef Tensor grad_output, @Const @ByRef Tensor self, @ByVal @Cast("c10::ArrayRef*") LongArrayRef padding); -@Namespace("at") public static native @ByRef Tensor replication_pad3d_backward_out(@ByRef Tensor grad_input, @Const @ByRef Tensor grad_output, @Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... padding); +// aten::select_scatter(Tensor self, Tensor src, int dim, SymInt index) -> Tensor +@Namespace("at") public static native @ByVal Tensor select_scatter(@Const @ByRef Tensor self, @Const @ByRef Tensor src, @Cast("int64_t") long dim, @Cast("int64_t") long index); -// aten::replication_pad3d_backward.grad_input(Tensor grad_output, Tensor self, SymInt[6] padding, *, Tensor(a!) grad_input) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor replication_pad3d_backward_outf(@Const @ByRef Tensor grad_output, @Const @ByRef Tensor self, @ByVal @Cast("c10::ArrayRef*") LongArrayRef padding, @ByRef Tensor grad_input); -@Namespace("at") public static native @ByRef Tensor replication_pad3d_backward_outf(@Const @ByRef Tensor grad_output, @Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] padding, @ByRef Tensor grad_input); +// aten::select_scatter(Tensor self, Tensor src, int dim, SymInt index) -> Tensor +@Namespace("at") public static native @ByVal Tensor select_scatter_symint(@Const @ByRef Tensor self, @Const @ByRef Tensor src, @Cast("int64_t") long dim, @ByVal SymInt index); -// aten::replication_pad3d_backward.grad_input(Tensor grad_output, Tensor self, SymInt[6] padding, *, Tensor(a!) grad_input) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor replication_pad3d_backward_symint_out(@ByRef Tensor grad_input, @Const @ByRef Tensor grad_output, @Const @ByRef Tensor self, @ByVal SymIntRef padding); +// aten::select_scatter.out(Tensor self, Tensor src, int dim, SymInt index, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor select_scatter_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor src, @Cast("int64_t") long dim, @Cast("int64_t") long index); -// aten::replication_pad3d_backward.grad_input(Tensor grad_output, Tensor self, SymInt[6] padding, *, Tensor(a!) grad_input) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor replication_pad3d_backward_symint_outf(@Const @ByRef Tensor grad_output, @Const @ByRef Tensor self, @ByVal SymIntRef padding, @ByRef Tensor grad_input); +// aten::select_scatter.out(Tensor self, Tensor src, int dim, SymInt index, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor select_scatter_outf(@Const @ByRef Tensor self, @Const @ByRef Tensor src, @Cast("int64_t") long dim, @Cast("int64_t") long index, @ByRef Tensor out); -// aten::replication_pad3d_backward(Tensor grad_output, Tensor self, SymInt[6] padding) -> Tensor -@Namespace("at") public static native @ByVal Tensor replication_pad3d_backward(@Const @ByRef Tensor grad_output, @Const @ByRef Tensor self, @ByVal @Cast("c10::ArrayRef*") LongArrayRef padding); -@Namespace("at") public static native @ByVal Tensor replication_pad3d_backward(@Const @ByRef Tensor grad_output, @Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... padding); +// aten::select_scatter.out(Tensor self, Tensor src, int dim, SymInt index, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor select_scatter_symint_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor src, @Cast("int64_t") long dim, @ByVal SymInt index); -// aten::replication_pad3d_backward(Tensor grad_output, Tensor self, SymInt[6] padding) -> Tensor -@Namespace("at") public static native @ByVal Tensor replication_pad3d_backward_symint(@Const @ByRef Tensor grad_output, @Const @ByRef Tensor self, @ByVal SymIntRef padding); +// aten::select_scatter.out(Tensor self, Tensor src, int dim, SymInt index, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor select_scatter_symint_outf(@Const @ByRef Tensor self, @Const @ByRef Tensor src, @Cast("int64_t") long dim, @ByVal SymInt index, @ByRef Tensor out); -// Parsed from ATen/ops/requires_grad.h +// Parsed from ATen/ops/selu.h // #pragma once @@ -60340,14 +46072,19 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include +// #include +// aten::selu(Tensor self) -> Tensor +@Namespace("at") public static native @ByVal Tensor selu(@Const @ByRef Tensor self); + +// aten::selu_(Tensor(a!) self) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor selu_(@ByRef Tensor self); -// Parsed from ATen/ops/reshape.h +// Parsed from ATen/ops/set.h // #pragma once @@ -60368,50 +46105,73 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include +// #include -// aten::reshape(Tensor(a) self, SymInt[] shape) -> Tensor(a) -@Namespace("at") public static native @ByVal Tensor reshape(@Const @ByRef Tensor self, @ByVal @Cast("c10::ArrayRef*") LongArrayRef shape); -@Namespace("at") public static native @ByVal Tensor reshape(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... shape); -// aten::reshape(Tensor(a) self, SymInt[] shape) -> Tensor(a) -@Namespace("at") public static native @ByVal Tensor reshape_symint(@Const @ByRef Tensor self, @ByVal SymIntRef shape); +// aten::set.source_Storage_out(Tensor self, Storage source, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor set_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Cast({"", "c10::Storage&&"}) @StdMove Storage source); +// aten::set.source_Storage_out(Tensor self, Storage source, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor set_outf(@Const @ByRef Tensor self, @Cast({"", "c10::Storage&&"}) @StdMove Storage source, @ByRef Tensor out); +// aten::set.source_Storage(Tensor self, Storage source) -> Tensor +@Namespace("at") public static native @ByVal Tensor set(@Const @ByRef Tensor self, @Cast({"", "c10::Storage&&"}) @StdMove Storage source); +// aten::set.source_Storage_storage_offset_out(Tensor self, Storage source, SymInt storage_offset, SymInt[] size, SymInt[] stride=[], *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor set_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Cast({"", "c10::Storage&&"}) @StdMove Storage source, @Cast("int64_t") long storage_offset, @ByVal LongArrayRef size, @ByVal(nullValue = "at::IntArrayRef{}") LongArrayRef stride); +@Namespace("at") public static native @ByRef Tensor set_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Cast({"", "c10::Storage&&"}) @StdMove Storage source, @Cast("int64_t") long storage_offset, @ByVal LongArrayRef size); +@Namespace("at") public static native @ByRef Tensor set_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Cast({"", "c10::Storage&&"}) @StdMove Storage source, @Cast("int64_t") long storage_offset, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @ByVal(nullValue = "at::IntArrayRef{}") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... stride); +@Namespace("at") public static native @ByRef Tensor set_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Cast({"", "c10::Storage&&"}) @StdMove Storage source, @Cast("int64_t") long storage_offset, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... size); -// Parsed from ATen/ops/reshape_as.h +// aten::set.source_Storage_storage_offset_out(Tensor self, Storage source, SymInt storage_offset, SymInt[] size, SymInt[] stride=[], *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor set_outf(@Const @ByRef Tensor self, @Cast({"", "c10::Storage&&"}) @StdMove Storage source, @Cast("int64_t") long storage_offset, @ByVal LongArrayRef size, @ByVal LongArrayRef stride, @ByRef Tensor out); +@Namespace("at") public static native @ByRef Tensor set_outf(@Const @ByRef Tensor self, @Cast({"", "c10::Storage&&"}) @StdMove Storage source, @Cast("int64_t") long storage_offset, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] stride, @ByRef Tensor out); -// #pragma once -// @generated by torchgen/gen.py from Function.h +// aten::set.source_Storage_storage_offset_out(Tensor self, Storage source, SymInt storage_offset, SymInt[] size, SymInt[] stride=[], *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor set_symint_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Cast({"", "c10::Storage&&"}) @StdMove Storage source, @ByVal SymInt storage_offset, @ByVal SymIntArrayRef size, @ByVal(nullValue = "c10::SymIntArrayRef{}") SymIntArrayRef stride); +@Namespace("at") public static native @ByRef Tensor set_symint_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Cast({"", "c10::Storage&&"}) @StdMove Storage source, @ByVal SymInt storage_offset, @ByVal SymIntArrayRef size); -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include +// aten::set.source_Storage_storage_offset_out(Tensor self, Storage source, SymInt storage_offset, SymInt[] size, SymInt[] stride=[], *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor set_symint_outf(@Const @ByRef Tensor self, @Cast({"", "c10::Storage&&"}) @StdMove Storage source, @ByVal SymInt storage_offset, @ByVal SymIntArrayRef size, @ByVal SymIntArrayRef stride, @ByRef Tensor out); -// #include +// aten::set.source_Storage_storage_offset(Tensor self, Storage source, SymInt storage_offset, SymInt[] size, SymInt[] stride=[]) -> Tensor +@Namespace("at") public static native @ByVal Tensor set(@Const @ByRef Tensor self, @Cast({"", "c10::Storage&&"}) @StdMove Storage source, @Cast("int64_t") long storage_offset, @ByVal LongArrayRef size, @ByVal(nullValue = "at::IntArrayRef{}") LongArrayRef stride); +@Namespace("at") public static native @ByVal Tensor set(@Const @ByRef Tensor self, @Cast({"", "c10::Storage&&"}) @StdMove Storage source, @Cast("int64_t") long storage_offset, @ByVal LongArrayRef size); +@Namespace("at") public static native @ByVal Tensor set(@Const @ByRef Tensor self, @Cast({"", "c10::Storage&&"}) @StdMove Storage source, @Cast("int64_t") long storage_offset, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @ByVal(nullValue = "at::IntArrayRef{}") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... stride); +@Namespace("at") public static native @ByVal Tensor set(@Const @ByRef Tensor self, @Cast({"", "c10::Storage&&"}) @StdMove Storage source, @Cast("int64_t") long storage_offset, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... size); +// aten::set.source_Storage_storage_offset(Tensor self, Storage source, SymInt storage_offset, SymInt[] size, SymInt[] stride=[]) -> Tensor +@Namespace("at") public static native @ByVal Tensor set_symint(@Const @ByRef Tensor self, @Cast({"", "c10::Storage&&"}) @StdMove Storage source, @ByVal SymInt storage_offset, @ByVal SymIntArrayRef size, @ByVal(nullValue = "c10::SymIntArrayRef{}") SymIntArrayRef stride); +@Namespace("at") public static native @ByVal Tensor set_symint(@Const @ByRef Tensor self, @Cast({"", "c10::Storage&&"}) @StdMove Storage source, @ByVal SymInt storage_offset, @ByVal SymIntArrayRef size); +// aten::set.source_Tensor_out(Tensor self, Tensor source, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor set_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor source); +// aten::set.source_Tensor_out(Tensor self, Tensor source, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor set_outf(@Const @ByRef Tensor self, @Const @ByRef Tensor source, @ByRef Tensor out); +// aten::set.source_Tensor(Tensor self, Tensor source) -> Tensor +@Namespace("at") public static native @ByVal Tensor set(@Const @ByRef Tensor self, @Const @ByRef Tensor source); + +// aten::set.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor set_out(@ByRef Tensor out, @Const @ByRef Tensor self); +// aten::set.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor set_outf(@Const @ByRef Tensor self, @ByRef Tensor out); + +// aten::set(Tensor self) -> Tensor +@Namespace("at") public static native @ByVal Tensor set(@Const @ByRef Tensor self); -// Parsed from ATen/ops/resize.h + + + +// Parsed from ATen/ops/set_data.h // #pragma once @@ -60432,47 +46192,14 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include - - - -// aten::resize.out(Tensor self, SymInt[] size, *, MemoryFormat? memory_format=None, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @Const @ByRef Tensor resize_out(@Const @ByRef Tensor out, @Const @ByRef Tensor self, @ByVal @Cast("c10::ArrayRef*") LongArrayRef size, @ByVal(nullValue = "c10::optional(c10::nullopt)") MemoryFormatOptional memory_format); -@Namespace("at") public static native @Const @ByRef Tensor resize_out(@Const @ByRef Tensor out, @Const @ByRef Tensor self, @ByVal @Cast("c10::ArrayRef*") LongArrayRef size); -@Namespace("at") public static native @Const @ByRef Tensor resize_out(@Const @ByRef Tensor out, @Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @ByVal(nullValue = "c10::optional(c10::nullopt)") MemoryFormatOptional memory_format); -@Namespace("at") public static native @Const @ByRef Tensor resize_out(@Const @ByRef Tensor out, @Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... size); - - -// aten::resize.out(Tensor self, SymInt[] size, *, MemoryFormat? memory_format=None, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @Const @ByRef Tensor resize_outf(@Const @ByRef Tensor self, @ByVal @Cast("c10::ArrayRef*") LongArrayRef size, @ByVal MemoryFormatOptional memory_format, @Const @ByRef Tensor out); -@Namespace("at") public static native @Const @ByRef Tensor resize_outf(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @ByVal MemoryFormatOptional memory_format, @Const @ByRef Tensor out); - - -// aten::resize.out(Tensor self, SymInt[] size, *, MemoryFormat? memory_format=None, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @Const @ByRef Tensor resize_symint_out(@Const @ByRef Tensor out, @Const @ByRef Tensor self, @ByVal SymIntRef size, @ByVal(nullValue = "c10::optional(c10::nullopt)") MemoryFormatOptional memory_format); -@Namespace("at") public static native @Const @ByRef Tensor resize_symint_out(@Const @ByRef Tensor out, @Const @ByRef Tensor self, @ByVal SymIntRef size); - - -// aten::resize.out(Tensor self, SymInt[] size, *, MemoryFormat? memory_format=None, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @Const @ByRef Tensor resize_symint_outf(@Const @ByRef Tensor self, @ByVal SymIntRef size, @ByVal MemoryFormatOptional memory_format, @Const @ByRef Tensor out); - - -// aten::resize(Tensor self, SymInt[] size, *, MemoryFormat? memory_format=None) -> Tensor -@Namespace("at") public static native @ByVal Tensor resize(@Const @ByRef Tensor self, @ByVal @Cast("c10::ArrayRef*") LongArrayRef size, @ByVal(nullValue = "c10::optional(c10::nullopt)") MemoryFormatOptional memory_format); -@Namespace("at") public static native @ByVal Tensor resize(@Const @ByRef Tensor self, @ByVal @Cast("c10::ArrayRef*") LongArrayRef size); -@Namespace("at") public static native @ByVal Tensor resize(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @ByVal(nullValue = "c10::optional(c10::nullopt)") MemoryFormatOptional memory_format); -@Namespace("at") public static native @ByVal Tensor resize(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... size); - +// #include -// aten::resize(Tensor self, SymInt[] size, *, MemoryFormat? memory_format=None) -> Tensor -@Namespace("at") public static native @ByVal Tensor resize_symint(@Const @ByRef Tensor self, @ByVal SymIntRef size, @ByVal(nullValue = "c10::optional(c10::nullopt)") MemoryFormatOptional memory_format); -@Namespace("at") public static native @ByVal Tensor resize_symint(@Const @ByRef Tensor self, @ByVal SymIntRef size); -// Parsed from ATen/ops/resize_as.h +// Parsed from ATen/ops/sgn.h // #pragma once @@ -60493,27 +46220,21 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include - +// #include -// aten::resize_as_(Tensor(a!) self, Tensor the_template, *, MemoryFormat? memory_format=None) -> Tensor(a!) -@Namespace("at") public static native @Const @ByRef Tensor resize_as_(@Const @ByRef Tensor self, @Const @ByRef Tensor the_template, @ByVal(nullValue = "c10::optional(c10::nullopt)") MemoryFormatOptional memory_format); -@Namespace("at") public static native @Const @ByRef Tensor resize_as_(@Const @ByRef Tensor self, @Const @ByRef Tensor the_template); -// aten::resize_as.out(Tensor self, Tensor the_template, *, MemoryFormat? memory_format=None, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @Const @ByRef Tensor resize_as_out(@Const @ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor the_template, @ByVal(nullValue = "c10::optional(c10::nullopt)") MemoryFormatOptional memory_format); -@Namespace("at") public static native @Const @ByRef Tensor resize_as_out(@Const @ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor the_template); -// aten::resize_as.out(Tensor self, Tensor the_template, *, MemoryFormat? memory_format=None, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @Const @ByRef Tensor resize_as_outf(@Const @ByRef Tensor self, @Const @ByRef Tensor the_template, @ByVal MemoryFormatOptional memory_format, @Const @ByRef Tensor out); +// aten::sgn(Tensor self) -> Tensor +@Namespace("at") public static native @ByVal Tensor sgn(@Const @ByRef Tensor self); -// aten::resize_as(Tensor self, Tensor the_template, *, MemoryFormat? memory_format=None) -> Tensor -@Namespace("at") public static native @ByVal Tensor resize_as(@Const @ByRef Tensor self, @Const @ByRef Tensor the_template, @ByVal(nullValue = "c10::optional(c10::nullopt)") MemoryFormatOptional memory_format); -@Namespace("at") public static native @ByVal Tensor resize_as(@Const @ByRef Tensor self, @Const @ByRef Tensor the_template); +// aten::sgn.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor sgn_out(@ByRef Tensor out, @Const @ByRef Tensor self); +// aten::sgn.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor sgn_outf(@Const @ByRef Tensor self, @ByRef Tensor out); -// Parsed from ATen/ops/resize_as_sparse.h +// Parsed from ATen/ops/sigmoid.h // #pragma once @@ -60534,24 +46255,24 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include +// #include -// aten::resize_as_sparse_(Tensor(a!) self, Tensor the_template) -> Tensor(a!) -@Namespace("at") public static native @Const @ByRef Tensor resize_as_sparse_(@Const @ByRef Tensor self, @Const @ByRef Tensor the_template); +// aten::sigmoid(Tensor self) -> Tensor +@Namespace("at") public static native @ByVal Tensor sigmoid(@Const @ByRef Tensor self); -// aten::resize_as_sparse.out(Tensor self, Tensor the_template, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @Const @ByRef Tensor resize_as_sparse_out(@Const @ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor the_template); -// aten::resize_as_sparse.out(Tensor self, Tensor the_template, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @Const @ByRef Tensor resize_as_sparse_outf(@Const @ByRef Tensor self, @Const @ByRef Tensor the_template, @Const @ByRef Tensor out); +// aten::sigmoid_(Tensor(a!) self) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor sigmoid_(@ByRef Tensor self); -// aten::resize_as_sparse(Tensor self, Tensor the_template) -> Tensor -@Namespace("at") public static native @ByVal Tensor resize_as_sparse(@Const @ByRef Tensor self, @Const @ByRef Tensor the_template); +// aten::sigmoid.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor sigmoid_out(@ByRef Tensor out, @Const @ByRef Tensor self); +// aten::sigmoid.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor sigmoid_outf(@Const @ByRef Tensor self, @ByRef Tensor out); -// Parsed from ATen/ops/resolve_conj.h +// Parsed from ATen/ops/sigmoid_backward.h // #pragma once @@ -60572,16 +46293,21 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include +// #include -// aten::resolve_conj(Tensor(a) self) -> Tensor(a) -@Namespace("at") public static native @ByVal Tensor resolve_conj(@Const @ByRef Tensor self); +// aten::sigmoid_backward.grad_input(Tensor grad_output, Tensor output, *, Tensor(a!) grad_input) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor sigmoid_backward_out(@ByRef Tensor grad_input, @Const @ByRef Tensor grad_output, @Const @ByRef Tensor output); +// aten::sigmoid_backward.grad_input(Tensor grad_output, Tensor output, *, Tensor(a!) grad_input) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor sigmoid_backward_outf(@Const @ByRef Tensor grad_output, @Const @ByRef Tensor output, @ByRef Tensor grad_input); +// aten::sigmoid_backward(Tensor grad_output, Tensor output) -> Tensor +@Namespace("at") public static native @ByVal Tensor sigmoid_backward(@Const @ByRef Tensor grad_output, @Const @ByRef Tensor output); -// Parsed from ATen/ops/resolve_neg.h + +// Parsed from ATen/ops/sign.h // #pragma once @@ -60602,16 +46328,21 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include +// #include -// aten::resolve_neg(Tensor(a) self) -> Tensor(a) -@Namespace("at") public static native @ByVal Tensor resolve_neg(@Const @ByRef Tensor self); +// aten::sign(Tensor self) -> Tensor +@Namespace("at") public static native @ByVal Tensor sign(@Const @ByRef Tensor self); + +// aten::sign.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor sign_out(@ByRef Tensor out, @Const @ByRef Tensor self); +// aten::sign.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor sign_outf(@Const @ByRef Tensor self, @ByRef Tensor out); -// Parsed from ATen/ops/result_type.h +// Parsed from ATen/ops/signbit.h // #pragma once @@ -60632,25 +46363,21 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include - - -// aten::result_type.Tensor(Tensor tensor, Tensor other) -> ScalarType -@Namespace("at") public static native ScalarType result_type(@Const @ByRef Tensor tensor, @Const @ByRef Tensor other); +// #include -// aten::result_type.Scalar(Tensor tensor, Scalar other) -> ScalarType -@Namespace("at") public static native ScalarType result_type(@Const @ByRef Tensor tensor, @Const @ByRef Scalar other); -// aten::result_type.Scalar_Tensor(Scalar scalar, Tensor tensor) -> ScalarType -@Namespace("at") public static native ScalarType result_type(@Const @ByRef Scalar scalar, @Const @ByRef Tensor tensor); +// aten::signbit(Tensor self) -> Tensor +@Namespace("at") public static native @ByVal Tensor signbit(@Const @ByRef Tensor self); -// aten::result_type.Scalar_Scalar(Scalar scalar1, Scalar scalar2) -> ScalarType -@Namespace("at") public static native ScalarType result_type(@Const @ByRef Scalar scalar1, @Const @ByRef Scalar scalar2); +// aten::signbit.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor signbit_out(@ByRef Tensor out, @Const @ByRef Tensor self); +// aten::signbit.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor signbit_outf(@Const @ByRef Tensor self, @ByRef Tensor out); -// Parsed from ATen/ops/retain_grad.h +// Parsed from ATen/ops/silu.h // #pragma once @@ -60671,14 +46398,24 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include +// #include + + +// aten::silu(Tensor self) -> Tensor +@Namespace("at") public static native @ByVal Tensor silu(@Const @ByRef Tensor self); +// aten::silu_(Tensor(a!) self) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor silu_(@ByRef Tensor self); +// aten::silu.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor silu_out(@ByRef Tensor out, @Const @ByRef Tensor self); +// aten::silu.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor silu_outf(@Const @ByRef Tensor self, @ByRef Tensor out); -// Parsed from ATen/ops/retains_grad.h +// Parsed from ATen/ops/silu_backward.h // #pragma once @@ -60699,14 +46436,21 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include +// #include + +// aten::silu_backward.grad_input(Tensor grad_output, Tensor self, *, Tensor(a!) grad_input) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor silu_backward_out(@ByRef Tensor grad_input, @Const @ByRef Tensor grad_output, @Const @ByRef Tensor self); +// aten::silu_backward.grad_input(Tensor grad_output, Tensor self, *, Tensor(a!) grad_input) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor silu_backward_outf(@Const @ByRef Tensor grad_output, @Const @ByRef Tensor self, @ByRef Tensor grad_input); +// aten::silu_backward(Tensor grad_output, Tensor self) -> Tensor +@Namespace("at") public static native @ByVal Tensor silu_backward(@Const @ByRef Tensor grad_output, @Const @ByRef Tensor self); -// Parsed from ATen/ops/rnn_relu.h +// Parsed from ATen/ops/sin.h // #pragma once @@ -60727,19 +46471,24 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include +// #include -// aten::rnn_relu.input(Tensor input, Tensor hx, Tensor[] params, bool has_biases, int num_layers, float dropout, bool train, bool bidirectional, bool batch_first) -> (Tensor, Tensor) -@Namespace("at") public static native @ByVal TensorTensorTuple rnn_relu(@Const @ByRef Tensor input, @Const @ByRef Tensor hx, @ByVal TensorArrayRef params, @Cast("bool") boolean has_biases, @Cast("int64_t") long num_layers, double dropout, @Cast("bool") boolean train, @Cast("bool") boolean bidirectional, @Cast("bool") boolean batch_first); +// aten::sin(Tensor self) -> Tensor +@Namespace("at") public static native @ByVal Tensor sin(@Const @ByRef Tensor self); -// aten::rnn_relu.data(Tensor data, Tensor batch_sizes, Tensor hx, Tensor[] params, bool has_biases, int num_layers, float dropout, bool train, bool bidirectional) -> (Tensor, Tensor) -@Namespace("at") public static native @ByVal TensorTensorTuple rnn_relu(@Const @ByRef Tensor data, @Const @ByRef Tensor batch_sizes, @Const @ByRef Tensor hx, @ByVal TensorArrayRef params, @Cast("bool") boolean has_biases, @Cast("int64_t") long num_layers, double dropout, @Cast("bool") boolean train, @Cast("bool") boolean bidirectional); +// aten::sin_(Tensor(a!) self) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor sin_(@ByRef Tensor self); +// aten::sin.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor sin_out(@ByRef Tensor out, @Const @ByRef Tensor self); +// aten::sin.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor sin_outf(@Const @ByRef Tensor self, @ByRef Tensor out); -// Parsed from ATen/ops/rnn_relu_cell.h + +// Parsed from ATen/ops/sinc.h // #pragma once @@ -60760,17 +46509,24 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include +// #include -// aten::rnn_relu_cell(Tensor input, Tensor hx, Tensor w_ih, Tensor w_hh, Tensor? b_ih=None, Tensor? b_hh=None) -> Tensor -@Namespace("at") public static native @ByVal Tensor rnn_relu_cell(@Const @ByRef Tensor input, @Const @ByRef Tensor hx, @Const @ByRef Tensor w_ih, @Const @ByRef Tensor w_hh, @Const @ByRef(nullValue = "c10::optional{}") TensorOptional b_ih, @Const @ByRef(nullValue = "c10::optional{}") TensorOptional b_hh); -@Namespace("at") public static native @ByVal Tensor rnn_relu_cell(@Const @ByRef Tensor input, @Const @ByRef Tensor hx, @Const @ByRef Tensor w_ih, @Const @ByRef Tensor w_hh); +// aten::sinc(Tensor self) -> Tensor +@Namespace("at") public static native @ByVal Tensor sinc(@Const @ByRef Tensor self); + +// aten::sinc_(Tensor(a!) self) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor sinc_(@ByRef Tensor self); + +// aten::sinc.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor sinc_out(@ByRef Tensor out, @Const @ByRef Tensor self); +// aten::sinc.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor sinc_outf(@Const @ByRef Tensor self, @ByRef Tensor out); -// Parsed from ATen/ops/rnn_tanh.h +// Parsed from ATen/ops/sinh.h // #pragma once @@ -60791,19 +46547,24 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include +// #include -// aten::rnn_tanh.input(Tensor input, Tensor hx, Tensor[] params, bool has_biases, int num_layers, float dropout, bool train, bool bidirectional, bool batch_first) -> (Tensor, Tensor) -@Namespace("at") public static native @ByVal TensorTensorTuple rnn_tanh(@Const @ByRef Tensor input, @Const @ByRef Tensor hx, @ByVal TensorArrayRef params, @Cast("bool") boolean has_biases, @Cast("int64_t") long num_layers, double dropout, @Cast("bool") boolean train, @Cast("bool") boolean bidirectional, @Cast("bool") boolean batch_first); +// aten::sinh(Tensor self) -> Tensor +@Namespace("at") public static native @ByVal Tensor sinh(@Const @ByRef Tensor self); -// aten::rnn_tanh.data(Tensor data, Tensor batch_sizes, Tensor hx, Tensor[] params, bool has_biases, int num_layers, float dropout, bool train, bool bidirectional) -> (Tensor, Tensor) -@Namespace("at") public static native @ByVal TensorTensorTuple rnn_tanh(@Const @ByRef Tensor data, @Const @ByRef Tensor batch_sizes, @Const @ByRef Tensor hx, @ByVal TensorArrayRef params, @Cast("bool") boolean has_biases, @Cast("int64_t") long num_layers, double dropout, @Cast("bool") boolean train, @Cast("bool") boolean bidirectional); +// aten::sinh_(Tensor(a!) self) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor sinh_(@ByRef Tensor self); +// aten::sinh.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor sinh_out(@ByRef Tensor out, @Const @ByRef Tensor self); +// aten::sinh.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor sinh_outf(@Const @ByRef Tensor self, @ByRef Tensor out); -// Parsed from ATen/ops/rnn_tanh_cell.h + +// Parsed from ATen/ops/size.h // #pragma once @@ -60824,17 +46585,19 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include +// #include -// aten::rnn_tanh_cell(Tensor input, Tensor hx, Tensor w_ih, Tensor w_hh, Tensor? b_ih=None, Tensor? b_hh=None) -> Tensor -@Namespace("at") public static native @ByVal Tensor rnn_tanh_cell(@Const @ByRef Tensor input, @Const @ByRef Tensor hx, @Const @ByRef Tensor w_ih, @Const @ByRef Tensor w_hh, @Const @ByRef(nullValue = "c10::optional{}") TensorOptional b_ih, @Const @ByRef(nullValue = "c10::optional{}") TensorOptional b_hh); -@Namespace("at") public static native @ByVal Tensor rnn_tanh_cell(@Const @ByRef Tensor input, @Const @ByRef Tensor hx, @Const @ByRef Tensor w_ih, @Const @ByRef Tensor w_hh); +// aten::size.int(Tensor self, int dim) -> int +@Namespace("at") public static native @Cast("int64_t") long __dispatch_size(@Const @ByRef Tensor self, @Cast("int64_t") long dim); + +// aten::size.Dimname(Tensor self, Dimname dim) -> int +@Namespace("at") public static native @Cast("int64_t") long size(@Const @ByRef Tensor self, @ByVal Dimname dim); -// Parsed from ATen/ops/roll.h +// Parsed from ATen/ops/slice.h // #pragma once @@ -60855,28 +46618,23 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include +// #include -// aten::roll(Tensor self, int[1] shifts, int[1] dims=[]) -> Tensor -@Namespace("at") public static native @ByVal Tensor roll(@Const @ByRef Tensor self, @ByVal @Cast("c10::ArrayRef*") LongArrayRef shifts, @ByVal(nullValue = "at::IntArrayRef{}") @Cast("c10::ArrayRef*") LongArrayRef dims); -@Namespace("at") public static native @ByVal Tensor roll(@Const @ByRef Tensor self, @ByVal @Cast("c10::ArrayRef*") LongArrayRef shifts); -@Namespace("at") public static native @ByVal Tensor roll(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] shifts, @ByVal(nullValue = "at::IntArrayRef{}") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... dims); -@Namespace("at") public static native @ByVal Tensor roll(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... shifts); +// aten::slice.Tensor(Tensor(a) self, int dim=0, SymInt? start=None, SymInt? end=None, SymInt step=1) -> Tensor(a) +@Namespace("at") public static native @ByVal Tensor slice(@Const @ByRef Tensor self, @Cast("int64_t") long dim/*=0*/, @ByVal(nullValue = "c10::optional(c10::nullopt)") LongOptional start, @ByVal(nullValue = "c10::optional(c10::nullopt)") LongOptional end, @Cast("int64_t") long step/*=1*/); +@Namespace("at") public static native @ByVal Tensor slice(@Const @ByRef Tensor self); -// aten::roll.out(Tensor self, int[1] shifts, int[1] dims=[], *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor roll_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal @Cast("c10::ArrayRef*") LongArrayRef shifts, @ByVal(nullValue = "at::IntArrayRef{}") @Cast("c10::ArrayRef*") LongArrayRef dims); -@Namespace("at") public static native @ByRef Tensor roll_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal @Cast("c10::ArrayRef*") LongArrayRef shifts); -@Namespace("at") public static native @ByRef Tensor roll_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] shifts, @ByVal(nullValue = "at::IntArrayRef{}") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... dims); -@Namespace("at") public static native @ByRef Tensor roll_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... shifts); -// aten::roll.out(Tensor self, int[1] shifts, int[1] dims=[], *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor roll_outf(@Const @ByRef Tensor self, @ByVal @Cast("c10::ArrayRef*") LongArrayRef shifts, @ByVal @Cast("c10::ArrayRef*") LongArrayRef dims, @ByRef Tensor out); -@Namespace("at") public static native @ByRef Tensor roll_outf(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] shifts, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dims, @ByRef Tensor out); +// aten::slice.Tensor(Tensor(a) self, int dim=0, SymInt? start=None, SymInt? end=None, SymInt step=1) -> Tensor(a) +@Namespace("at") public static native @ByVal Tensor slice_symint(@Const @ByRef Tensor self, @Cast("int64_t") long dim/*=0*/, @ByVal(nullValue = "c10::optional(c10::nullopt)") SymIntOptional start, @ByVal(nullValue = "c10::optional(c10::nullopt)") SymIntOptional end, @ByVal(nullValue = "c10::SymInt(1)") SymInt step); +@Namespace("at") public static native @ByVal Tensor slice_symint(@Const @ByRef Tensor self); -// Parsed from ATen/ops/rot90.h + + +// Parsed from ATen/ops/slice_backward.h // #pragma once @@ -60897,26 +46655,40 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include +// #include -// aten::rot90(Tensor self, int k=1, int[] dims=[0,1]) -> Tensor -@Namespace("at") public static native @ByVal Tensor rot90(@Const @ByRef Tensor self, @Cast("int64_t") long k/*=1*/, @ByVal(nullValue = "at::IntArrayRef({0,1})") @Cast("c10::ArrayRef*") LongArrayRef dims); -@Namespace("at") public static native @ByVal Tensor rot90(@Const @ByRef Tensor self); -@Namespace("at") public static native @ByVal Tensor rot90(@Const @ByRef Tensor self, @Cast("int64_t") long k/*=1*/, @ByVal(nullValue = "at::IntArrayRef({0,1})") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... dims); +// aten::slice_backward(Tensor grad_output, SymInt[] input_sizes, int dim, SymInt start, SymInt end, SymInt step) -> Tensor +@Namespace("at") public static native @ByVal Tensor slice_backward(@Const @ByRef Tensor grad_output, @ByVal LongArrayRef input_sizes, @Cast("int64_t") long dim, @Cast("int64_t") long start, @Cast("int64_t") long end, @Cast("int64_t") long step); +@Namespace("at") public static native @ByVal Tensor slice_backward(@Const @ByRef Tensor grad_output, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] input_sizes, @Cast("int64_t") long dim, @Cast("int64_t") long start, @Cast("int64_t") long end, @Cast("int64_t") long step); -// aten::rot90.out(Tensor self, int k=1, int[] dims=[0,1], *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor rot90_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Cast("int64_t") long k/*=1*/, @ByVal(nullValue = "at::IntArrayRef({0,1})") @Cast("c10::ArrayRef*") LongArrayRef dims); -@Namespace("at") public static native @ByRef Tensor rot90_out(@ByRef Tensor out, @Const @ByRef Tensor self); -@Namespace("at") public static native @ByRef Tensor rot90_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Cast("int64_t") long k/*=1*/, @ByVal(nullValue = "at::IntArrayRef({0,1})") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... dims); -// aten::rot90.out(Tensor self, int k=1, int[] dims=[0,1], *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor rot90_outf(@Const @ByRef Tensor self, @Cast("int64_t") long k, @ByVal @Cast("c10::ArrayRef*") LongArrayRef dims, @ByRef Tensor out); -@Namespace("at") public static native @ByRef Tensor rot90_outf(@Const @ByRef Tensor self, @Cast("int64_t") long k, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dims, @ByRef Tensor out); + +// aten::slice_backward(Tensor grad_output, SymInt[] input_sizes, int dim, SymInt start, SymInt end, SymInt step) -> Tensor +@Namespace("at") public static native @ByVal Tensor slice_backward_symint(@Const @ByRef Tensor grad_output, @ByVal SymIntArrayRef input_sizes, @Cast("int64_t") long dim, @ByVal SymInt start, @ByVal SymInt end, @ByVal SymInt step); +// aten::slice_backward.out(Tensor grad_output, SymInt[] input_sizes, int dim, SymInt start, SymInt end, SymInt step, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor slice_backward_out(@ByRef Tensor out, @Const @ByRef Tensor grad_output, @ByVal LongArrayRef input_sizes, @Cast("int64_t") long dim, @Cast("int64_t") long start, @Cast("int64_t") long end, @Cast("int64_t") long step); +@Namespace("at") public static native @ByRef Tensor slice_backward_out(@ByRef Tensor out, @Const @ByRef Tensor grad_output, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] input_sizes, @Cast("int64_t") long dim, @Cast("int64_t") long start, @Cast("int64_t") long end, @Cast("int64_t") long step); -// Parsed from ATen/ops/round.h +// aten::slice_backward.out(Tensor grad_output, SymInt[] input_sizes, int dim, SymInt start, SymInt end, SymInt step, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor slice_backward_outf(@Const @ByRef Tensor grad_output, @ByVal LongArrayRef input_sizes, @Cast("int64_t") long dim, @Cast("int64_t") long start, @Cast("int64_t") long end, @Cast("int64_t") long step, @ByRef Tensor out); +@Namespace("at") public static native @ByRef Tensor slice_backward_outf(@Const @ByRef Tensor grad_output, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] input_sizes, @Cast("int64_t") long dim, @Cast("int64_t") long start, @Cast("int64_t") long end, @Cast("int64_t") long step, @ByRef Tensor out); + + +// aten::slice_backward.out(Tensor grad_output, SymInt[] input_sizes, int dim, SymInt start, SymInt end, SymInt step, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor slice_backward_symint_out(@ByRef Tensor out, @Const @ByRef Tensor grad_output, @ByVal SymIntArrayRef input_sizes, @Cast("int64_t") long dim, @ByVal SymInt start, @ByVal SymInt end, @ByVal SymInt step); + + +// aten::slice_backward.out(Tensor grad_output, SymInt[] input_sizes, int dim, SymInt start, SymInt end, SymInt step, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor slice_backward_symint_outf(@Const @ByRef Tensor grad_output, @ByVal SymIntArrayRef input_sizes, @Cast("int64_t") long dim, @ByVal SymInt start, @ByVal SymInt end, @ByVal SymInt step, @ByRef Tensor out); + + + + + +// Parsed from ATen/ops/slice_copy.h // #pragma once @@ -60937,35 +46709,41 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include +// #include -// aten::round(Tensor self) -> Tensor -@Namespace("at") public static native @ByVal Tensor round(@Const @ByRef Tensor self); +// aten::slice_copy.Tensor(Tensor self, int dim=0, SymInt? start=None, SymInt? end=None, SymInt step=1) -> Tensor +@Namespace("at") public static native @ByVal Tensor slice_copy(@Const @ByRef Tensor self, @Cast("int64_t") long dim/*=0*/, @ByVal(nullValue = "c10::optional(c10::nullopt)") LongOptional start, @ByVal(nullValue = "c10::optional(c10::nullopt)") LongOptional end, @Cast("int64_t") long step/*=1*/); +@Namespace("at") public static native @ByVal Tensor slice_copy(@Const @ByRef Tensor self); -// aten::round_(Tensor(a!) self) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor round_(@ByRef Tensor self); -// aten::round.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor round_out(@ByRef Tensor out, @Const @ByRef Tensor self); -// aten::round.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor round_outf(@Const @ByRef Tensor self, @ByRef Tensor out); +// aten::slice_copy.Tensor(Tensor self, int dim=0, SymInt? start=None, SymInt? end=None, SymInt step=1) -> Tensor +@Namespace("at") public static native @ByVal Tensor slice_copy_symint(@Const @ByRef Tensor self, @Cast("int64_t") long dim/*=0*/, @ByVal(nullValue = "c10::optional(c10::nullopt)") SymIntOptional start, @ByVal(nullValue = "c10::optional(c10::nullopt)") SymIntOptional end, @ByVal(nullValue = "c10::SymInt(1)") SymInt step); +@Namespace("at") public static native @ByVal Tensor slice_copy_symint(@Const @ByRef Tensor self); -// aten::round.decimals(Tensor self, *, int decimals) -> Tensor -@Namespace("at") public static native @ByVal Tensor round(@Const @ByRef Tensor self, @Cast("int64_t") long decimals); -// aten::round_.decimals(Tensor(a!) self, *, int decimals) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor round_(@ByRef Tensor self, @Cast("int64_t") long decimals); +// aten::slice_copy.Tensor_out(Tensor self, int dim=0, SymInt? start=None, SymInt? end=None, SymInt step=1, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor slice_copy_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Cast("int64_t") long dim/*=0*/, @ByVal(nullValue = "c10::optional(c10::nullopt)") LongOptional start, @ByVal(nullValue = "c10::optional(c10::nullopt)") LongOptional end, @Cast("int64_t") long step/*=1*/); +@Namespace("at") public static native @ByRef Tensor slice_copy_out(@ByRef Tensor out, @Const @ByRef Tensor self); -// aten::round.decimals_out(Tensor self, *, int decimals, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor round_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Cast("int64_t") long decimals); -// aten::round.decimals_out(Tensor self, *, int decimals, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor round_outf(@Const @ByRef Tensor self, @Cast("int64_t") long decimals, @ByRef Tensor out); + +// aten::slice_copy.Tensor_out(Tensor self, int dim=0, SymInt? start=None, SymInt? end=None, SymInt step=1, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor slice_copy_outf(@Const @ByRef Tensor self, @Cast("int64_t") long dim, @ByVal LongOptional start, @ByVal LongOptional end, @Cast("int64_t") long step, @ByRef Tensor out); +// aten::slice_copy.Tensor_out(Tensor self, int dim=0, SymInt? start=None, SymInt? end=None, SymInt step=1, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor slice_copy_symint_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Cast("int64_t") long dim/*=0*/, @ByVal(nullValue = "c10::optional(c10::nullopt)") SymIntOptional start, @ByVal(nullValue = "c10::optional(c10::nullopt)") SymIntOptional end, @ByVal(nullValue = "c10::SymInt(1)") SymInt step); +@Namespace("at") public static native @ByRef Tensor slice_copy_symint_out(@ByRef Tensor out, @Const @ByRef Tensor self); -// Parsed from ATen/ops/row_indices.h +// aten::slice_copy.Tensor_out(Tensor self, int dim=0, SymInt? start=None, SymInt? end=None, SymInt step=1, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor slice_copy_symint_outf(@Const @ByRef Tensor self, @Cast("int64_t") long dim, @ByVal SymIntOptional start, @ByVal SymIntOptional end, @ByVal SymInt step, @ByRef Tensor out); + + + + + +// Parsed from ATen/ops/slice_scatter.h // #pragma once @@ -60986,14 +46764,41 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include +// #include + + +// aten::slice_scatter(Tensor self, Tensor src, int dim=0, SymInt? start=None, SymInt? end=None, SymInt step=1) -> Tensor +@Namespace("at") public static native @ByVal Tensor slice_scatter(@Const @ByRef Tensor self, @Const @ByRef Tensor src, @Cast("int64_t") long dim/*=0*/, @ByVal(nullValue = "c10::optional(c10::nullopt)") LongOptional start, @ByVal(nullValue = "c10::optional(c10::nullopt)") LongOptional end, @Cast("int64_t") long step/*=1*/); +@Namespace("at") public static native @ByVal Tensor slice_scatter(@Const @ByRef Tensor self, @Const @ByRef Tensor src); + + +// aten::slice_scatter(Tensor self, Tensor src, int dim=0, SymInt? start=None, SymInt? end=None, SymInt step=1) -> Tensor +@Namespace("at") public static native @ByVal Tensor slice_scatter_symint(@Const @ByRef Tensor self, @Const @ByRef Tensor src, @Cast("int64_t") long dim/*=0*/, @ByVal(nullValue = "c10::optional(c10::nullopt)") SymIntOptional start, @ByVal(nullValue = "c10::optional(c10::nullopt)") SymIntOptional end, @ByVal(nullValue = "c10::SymInt(1)") SymInt step); +@Namespace("at") public static native @ByVal Tensor slice_scatter_symint(@Const @ByRef Tensor self, @Const @ByRef Tensor src); + + +// aten::slice_scatter.out(Tensor self, Tensor src, int dim=0, SymInt? start=None, SymInt? end=None, SymInt step=1, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor slice_scatter_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor src, @Cast("int64_t") long dim/*=0*/, @ByVal(nullValue = "c10::optional(c10::nullopt)") LongOptional start, @ByVal(nullValue = "c10::optional(c10::nullopt)") LongOptional end, @Cast("int64_t") long step/*=1*/); +@Namespace("at") public static native @ByRef Tensor slice_scatter_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor src); + + +// aten::slice_scatter.out(Tensor self, Tensor src, int dim=0, SymInt? start=None, SymInt? end=None, SymInt step=1, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor slice_scatter_outf(@Const @ByRef Tensor self, @Const @ByRef Tensor src, @Cast("int64_t") long dim, @ByVal LongOptional start, @ByVal LongOptional end, @Cast("int64_t") long step, @ByRef Tensor out); + + +// aten::slice_scatter.out(Tensor self, Tensor src, int dim=0, SymInt? start=None, SymInt? end=None, SymInt step=1, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor slice_scatter_symint_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor src, @Cast("int64_t") long dim/*=0*/, @ByVal(nullValue = "c10::optional(c10::nullopt)") SymIntOptional start, @ByVal(nullValue = "c10::optional(c10::nullopt)") SymIntOptional end, @ByVal(nullValue = "c10::SymInt(1)") SymInt step); +@Namespace("at") public static native @ByRef Tensor slice_scatter_symint_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor src); + +// aten::slice_scatter.out(Tensor self, Tensor src, int dim=0, SymInt? start=None, SymInt? end=None, SymInt step=1, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor slice_scatter_symint_outf(@Const @ByRef Tensor self, @Const @ByRef Tensor src, @Cast("int64_t") long dim, @ByVal SymIntOptional start, @ByVal SymIntOptional end, @ByVal SymInt step, @ByRef Tensor out); -// Parsed from ATen/ops/row_indices_copy.h +// Parsed from ATen/ops/slogdet.h // #pragma once @@ -61014,21 +46819,21 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include +// #include -// aten::row_indices_copy(Tensor self) -> Tensor -@Namespace("at") public static native @ByVal Tensor row_indices_copy(@Const @ByRef Tensor self); +// aten::slogdet(Tensor self) -> (Tensor sign, Tensor logabsdet) +@Namespace("at") public static native @ByVal T_TensorTensor_T slogdet(@Const @ByRef Tensor self); -// aten::row_indices_copy.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor row_indices_copy_out(@ByRef Tensor out, @Const @ByRef Tensor self); -// aten::row_indices_copy.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor row_indices_copy_outf(@Const @ByRef Tensor self, @ByRef Tensor out); +// aten::slogdet.out(Tensor self, *, Tensor(a!) sign, Tensor(b!) logabsdet) -> (Tensor(a!) sign, Tensor(b!) logabsdet) +@Namespace("at") public static native @ByVal T_TensorTensor_T slogdet_out(@ByRef Tensor sign, @ByRef Tensor logabsdet, @Const @ByRef Tensor self); +// aten::slogdet.out(Tensor self, *, Tensor(a!) sign, Tensor(b!) logabsdet) -> (Tensor(a!) sign, Tensor(b!) logabsdet) +@Namespace("at") public static native @ByVal T_TensorTensor_T slogdet_outf(@Const @ByRef Tensor self, @ByRef Tensor sign, @ByRef Tensor logabsdet); -// Parsed from ATen/ops/row_stack.h +// Parsed from ATen/ops/slow_conv3d.h // #pragma once @@ -61049,56 +46854,51 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include - - -// aten::row_stack(Tensor[] tensors) -> Tensor -@Namespace("at") public static native @ByVal Tensor row_stack(@ByVal TensorArrayRef tensors); - -// aten::row_stack.out(Tensor[] tensors, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor row_stack_out(@ByRef Tensor out, @ByVal TensorArrayRef tensors); -// aten::row_stack.out(Tensor[] tensors, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor row_stack_outf(@ByVal TensorArrayRef tensors, @ByRef Tensor out); +// #include +// aten::slow_conv3d.out(Tensor self, Tensor weight, int[3] kernel_size, Tensor? bias=None, int[3] stride=1, SymInt[3] padding=0, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor slow_conv3d_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor weight, @ByVal LongArrayRef kernel_size, @Const @ByRef(nullValue = "c10::optional{}") TensorOptional bias, @ByVal(nullValue = "at::IntArrayRef(1)") LongArrayRef stride, @ByVal(nullValue = "at::IntArrayRef(0)") LongArrayRef padding); +@Namespace("at") public static native @ByRef Tensor slow_conv3d_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor weight, @ByVal LongArrayRef kernel_size); +@Namespace("at") public static native @ByRef Tensor slow_conv3d_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor weight, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] kernel_size, @Const @ByRef(nullValue = "c10::optional{}") TensorOptional bias, @ByVal(nullValue = "at::IntArrayRef(1)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] stride, @ByVal(nullValue = "at::IntArrayRef(0)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... padding); +@Namespace("at") public static native @ByRef Tensor slow_conv3d_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor weight, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... kernel_size); -// Parsed from ATen/ops/rrelu.h +// aten::slow_conv3d.out(Tensor self, Tensor weight, int[3] kernel_size, Tensor? bias=None, int[3] stride=1, SymInt[3] padding=0, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor slow_conv3d_outf(@Const @ByRef Tensor self, @Const @ByRef Tensor weight, @ByVal LongArrayRef kernel_size, @Const @ByRef TensorOptional bias, @ByVal LongArrayRef stride, @ByVal LongArrayRef padding, @ByRef Tensor out); +@Namespace("at") public static native @ByRef Tensor slow_conv3d_outf(@Const @ByRef Tensor self, @Const @ByRef Tensor weight, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] kernel_size, @Const @ByRef TensorOptional bias, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] stride, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] padding, @ByRef Tensor out); -// #pragma once -// @generated by torchgen/gen.py from Function.h +// aten::slow_conv3d.out(Tensor self, Tensor weight, int[3] kernel_size, Tensor? bias=None, int[3] stride=1, SymInt[3] padding=0, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor slow_conv3d_symint_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor weight, @ByVal LongArrayRef kernel_size, @Const @ByRef(nullValue = "c10::optional{}") TensorOptional bias, @ByVal(nullValue = "at::IntArrayRef(1)") LongArrayRef stride, @ByVal(nullValue = "c10::SymIntArrayRef(c10::SymInt(0))") SymIntArrayRef padding); +@Namespace("at") public static native @ByRef Tensor slow_conv3d_symint_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor weight, @ByVal LongArrayRef kernel_size); +@Namespace("at") public static native @ByRef Tensor slow_conv3d_symint_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor weight, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] kernel_size, @Const @ByRef(nullValue = "c10::optional{}") TensorOptional bias, @ByVal(nullValue = "at::IntArrayRef(1)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] stride, @ByVal(nullValue = "c10::SymIntArrayRef(c10::SymInt(0))") SymIntArrayRef padding); +@Namespace("at") public static native @ByRef Tensor slow_conv3d_symint_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor weight, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... kernel_size); -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include +// aten::slow_conv3d.out(Tensor self, Tensor weight, int[3] kernel_size, Tensor? bias=None, int[3] stride=1, SymInt[3] padding=0, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor slow_conv3d_symint_outf(@Const @ByRef Tensor self, @Const @ByRef Tensor weight, @ByVal LongArrayRef kernel_size, @Const @ByRef TensorOptional bias, @ByVal LongArrayRef stride, @ByVal SymIntArrayRef padding, @ByRef Tensor out); +@Namespace("at") public static native @ByRef Tensor slow_conv3d_symint_outf(@Const @ByRef Tensor self, @Const @ByRef Tensor weight, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] kernel_size, @Const @ByRef TensorOptional bias, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] stride, @ByVal SymIntArrayRef padding, @ByRef Tensor out); -// #include +// aten::slow_conv3d(Tensor self, Tensor weight, int[3] kernel_size, Tensor? bias=None, int[3] stride=1, SymInt[3] padding=0) -> Tensor +@Namespace("at") public static native @ByVal Tensor slow_conv3d(@Const @ByRef Tensor self, @Const @ByRef Tensor weight, @ByVal LongArrayRef kernel_size, @Const @ByRef(nullValue = "c10::optional{}") TensorOptional bias, @ByVal(nullValue = "at::IntArrayRef(1)") LongArrayRef stride, @ByVal(nullValue = "at::IntArrayRef(0)") LongArrayRef padding); +@Namespace("at") public static native @ByVal Tensor slow_conv3d(@Const @ByRef Tensor self, @Const @ByRef Tensor weight, @ByVal LongArrayRef kernel_size); +@Namespace("at") public static native @ByVal Tensor slow_conv3d(@Const @ByRef Tensor self, @Const @ByRef Tensor weight, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] kernel_size, @Const @ByRef(nullValue = "c10::optional{}") TensorOptional bias, @ByVal(nullValue = "at::IntArrayRef(1)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] stride, @ByVal(nullValue = "at::IntArrayRef(0)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... padding); +@Namespace("at") public static native @ByVal Tensor slow_conv3d(@Const @ByRef Tensor self, @Const @ByRef Tensor weight, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... kernel_size); -// aten::rrelu(Tensor self, Scalar lower=0.125, Scalar upper=0.3333333333333333, bool training=False, Generator? generator=None) -> Tensor -@Namespace("at") public static native @ByVal Tensor rrelu(@Const @ByRef Tensor self, @Const @ByRef(nullValue = "at::Scalar(0.125)") Scalar lower, @Const @ByRef(nullValue = "at::Scalar(0.3333333333333333)") Scalar upper, @Cast("bool") boolean training/*=false*/, @ByVal(nullValue = "c10::optional(c10::nullopt)") GeneratorOptional generator); -@Namespace("at") public static native @ByVal Tensor rrelu(@Const @ByRef Tensor self); +// aten::slow_conv3d(Tensor self, Tensor weight, int[3] kernel_size, Tensor? bias=None, int[3] stride=1, SymInt[3] padding=0) -> Tensor +@Namespace("at") public static native @ByVal Tensor slow_conv3d_symint(@Const @ByRef Tensor self, @Const @ByRef Tensor weight, @ByVal LongArrayRef kernel_size, @Const @ByRef(nullValue = "c10::optional{}") TensorOptional bias, @ByVal(nullValue = "at::IntArrayRef(1)") LongArrayRef stride, @ByVal(nullValue = "c10::SymIntArrayRef(c10::SymInt(0))") SymIntArrayRef padding); +@Namespace("at") public static native @ByVal Tensor slow_conv3d_symint(@Const @ByRef Tensor self, @Const @ByRef Tensor weight, @ByVal LongArrayRef kernel_size); +@Namespace("at") public static native @ByVal Tensor slow_conv3d_symint(@Const @ByRef Tensor self, @Const @ByRef Tensor weight, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] kernel_size, @Const @ByRef(nullValue = "c10::optional{}") TensorOptional bias, @ByVal(nullValue = "at::IntArrayRef(1)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] stride, @ByVal(nullValue = "c10::SymIntArrayRef(c10::SymInt(0))") SymIntArrayRef padding); +@Namespace("at") public static native @ByVal Tensor slow_conv3d_symint(@Const @ByRef Tensor self, @Const @ByRef Tensor weight, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... kernel_size); -// aten::rrelu_(Tensor(a!) self, Scalar lower=0.125, Scalar upper=0.3333333333333333, bool training=False, Generator? generator=None) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor rrelu_(@ByRef Tensor self, @Const @ByRef(nullValue = "at::Scalar(0.125)") Scalar lower, @Const @ByRef(nullValue = "at::Scalar(0.3333333333333333)") Scalar upper, @Cast("bool") boolean training/*=false*/, @ByVal(nullValue = "c10::optional(c10::nullopt)") GeneratorOptional generator); -@Namespace("at") public static native @ByRef Tensor rrelu_(@ByRef Tensor self); -// Parsed from ATen/ops/rrelu_with_noise.h +// Parsed from ATen/ops/slow_conv3d_forward.h // #pragma once @@ -61119,27 +46919,43 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include +// #include -// aten::rrelu_with_noise.out(Tensor self, Tensor noise, Scalar lower=0.125, Scalar upper=0.3333333333333333, bool training=False, Generator? generator=None, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor rrelu_with_noise_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor noise, @Const @ByRef(nullValue = "at::Scalar(0.125)") Scalar lower, @Const @ByRef(nullValue = "at::Scalar(0.3333333333333333)") Scalar upper, @Cast("bool") boolean training/*=false*/, @ByVal(nullValue = "c10::optional(c10::nullopt)") GeneratorOptional generator); -@Namespace("at") public static native @ByRef Tensor rrelu_with_noise_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor noise); -// aten::rrelu_with_noise.out(Tensor self, Tensor noise, Scalar lower=0.125, Scalar upper=0.3333333333333333, bool training=False, Generator? generator=None, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor rrelu_with_noise_outf(@Const @ByRef Tensor self, @Const @ByRef Tensor noise, @Const @ByRef Scalar lower, @Const @ByRef Scalar upper, @Cast("bool") boolean training, @ByVal GeneratorOptional generator, @ByRef Tensor out); +// aten::slow_conv3d_forward.output(Tensor self, Tensor weight, int[3] kernel_size, Tensor? bias, int[3] stride, SymInt[3] padding, *, Tensor(a!) output) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor slow_conv3d_forward_out(@ByRef Tensor output, @Const @ByRef Tensor self, @Const @ByRef Tensor weight, @ByVal LongArrayRef kernel_size, @Const @ByRef TensorOptional bias, @ByVal LongArrayRef stride, @ByVal LongArrayRef padding); +@Namespace("at") public static native @ByRef Tensor slow_conv3d_forward_out(@ByRef Tensor output, @Const @ByRef Tensor self, @Const @ByRef Tensor weight, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] kernel_size, @Const @ByRef TensorOptional bias, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] stride, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... padding); -// aten::rrelu_with_noise(Tensor self, Tensor noise, Scalar lower=0.125, Scalar upper=0.3333333333333333, bool training=False, Generator? generator=None) -> Tensor -@Namespace("at") public static native @ByVal Tensor rrelu_with_noise(@Const @ByRef Tensor self, @Const @ByRef Tensor noise, @Const @ByRef(nullValue = "at::Scalar(0.125)") Scalar lower, @Const @ByRef(nullValue = "at::Scalar(0.3333333333333333)") Scalar upper, @Cast("bool") boolean training/*=false*/, @ByVal(nullValue = "c10::optional(c10::nullopt)") GeneratorOptional generator); -@Namespace("at") public static native @ByVal Tensor rrelu_with_noise(@Const @ByRef Tensor self, @Const @ByRef Tensor noise); -// aten::rrelu_with_noise_(Tensor(a!) self, Tensor noise, Scalar lower=0.125, Scalar upper=0.3333333333333333, bool training=False, Generator? generator=None) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor rrelu_with_noise_(@ByRef Tensor self, @Const @ByRef Tensor noise, @Const @ByRef(nullValue = "at::Scalar(0.125)") Scalar lower, @Const @ByRef(nullValue = "at::Scalar(0.3333333333333333)") Scalar upper, @Cast("bool") boolean training/*=false*/, @ByVal(nullValue = "c10::optional(c10::nullopt)") GeneratorOptional generator); -@Namespace("at") public static native @ByRef Tensor rrelu_with_noise_(@ByRef Tensor self, @Const @ByRef Tensor noise); +// aten::slow_conv3d_forward.output(Tensor self, Tensor weight, int[3] kernel_size, Tensor? bias, int[3] stride, SymInt[3] padding, *, Tensor(a!) output) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor slow_conv3d_forward_outf(@Const @ByRef Tensor self, @Const @ByRef Tensor weight, @ByVal LongArrayRef kernel_size, @Const @ByRef TensorOptional bias, @ByVal LongArrayRef stride, @ByVal LongArrayRef padding, @ByRef Tensor output); +@Namespace("at") public static native @ByRef Tensor slow_conv3d_forward_outf(@Const @ByRef Tensor self, @Const @ByRef Tensor weight, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] kernel_size, @Const @ByRef TensorOptional bias, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] stride, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] padding, @ByRef Tensor output); + + +// aten::slow_conv3d_forward.output(Tensor self, Tensor weight, int[3] kernel_size, Tensor? bias, int[3] stride, SymInt[3] padding, *, Tensor(a!) output) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor slow_conv3d_forward_symint_out(@ByRef Tensor output, @Const @ByRef Tensor self, @Const @ByRef Tensor weight, @ByVal LongArrayRef kernel_size, @Const @ByRef TensorOptional bias, @ByVal LongArrayRef stride, @ByVal SymIntArrayRef padding); +@Namespace("at") public static native @ByRef Tensor slow_conv3d_forward_symint_out(@ByRef Tensor output, @Const @ByRef Tensor self, @Const @ByRef Tensor weight, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] kernel_size, @Const @ByRef TensorOptional bias, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] stride, @ByVal SymIntArrayRef padding); + + +// aten::slow_conv3d_forward.output(Tensor self, Tensor weight, int[3] kernel_size, Tensor? bias, int[3] stride, SymInt[3] padding, *, Tensor(a!) output) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor slow_conv3d_forward_symint_outf(@Const @ByRef Tensor self, @Const @ByRef Tensor weight, @ByVal LongArrayRef kernel_size, @Const @ByRef TensorOptional bias, @ByVal LongArrayRef stride, @ByVal SymIntArrayRef padding, @ByRef Tensor output); +@Namespace("at") public static native @ByRef Tensor slow_conv3d_forward_symint_outf(@Const @ByRef Tensor self, @Const @ByRef Tensor weight, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] kernel_size, @Const @ByRef TensorOptional bias, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] stride, @ByVal SymIntArrayRef padding, @ByRef Tensor output); + + +// aten::slow_conv3d_forward(Tensor self, Tensor weight, int[3] kernel_size, Tensor? bias, int[3] stride, SymInt[3] padding) -> Tensor +@Namespace("at") public static native @ByVal Tensor slow_conv3d_forward(@Const @ByRef Tensor self, @Const @ByRef Tensor weight, @ByVal LongArrayRef kernel_size, @Const @ByRef TensorOptional bias, @ByVal LongArrayRef stride, @ByVal LongArrayRef padding); +@Namespace("at") public static native @ByVal Tensor slow_conv3d_forward(@Const @ByRef Tensor self, @Const @ByRef Tensor weight, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] kernel_size, @Const @ByRef TensorOptional bias, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] stride, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... padding); + + +// aten::slow_conv3d_forward(Tensor self, Tensor weight, int[3] kernel_size, Tensor? bias, int[3] stride, SymInt[3] padding) -> Tensor +@Namespace("at") public static native @ByVal Tensor slow_conv3d_forward_symint(@Const @ByRef Tensor self, @Const @ByRef Tensor weight, @ByVal LongArrayRef kernel_size, @Const @ByRef TensorOptional bias, @ByVal LongArrayRef stride, @ByVal SymIntArrayRef padding); +@Namespace("at") public static native @ByVal Tensor slow_conv3d_forward_symint(@Const @ByRef Tensor self, @Const @ByRef Tensor weight, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] kernel_size, @Const @ByRef TensorOptional bias, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] stride, @ByVal SymIntArrayRef padding); -// Parsed from ATen/ops/rrelu_with_noise_backward.h + +// Parsed from ATen/ops/slow_conv_dilated2d.h // #pragma once @@ -61160,21 +46976,51 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include +// #include -// aten::rrelu_with_noise_backward(Tensor grad_output, Tensor self, Tensor noise, Scalar lower, Scalar upper, bool training, bool self_is_result) -> Tensor -@Namespace("at") public static native @ByVal Tensor rrelu_with_noise_backward(@Const @ByRef Tensor grad_output, @Const @ByRef Tensor self, @Const @ByRef Tensor noise, @Const @ByRef Scalar lower, @Const @ByRef Scalar upper, @Cast("bool") boolean training, @Cast("bool") boolean self_is_result); +// aten::slow_conv_dilated2d(Tensor self, Tensor weight, int[2] kernel_size, Tensor? bias=None, int[2] stride=1, SymInt[2] padding=0, int[2] dilation=1) -> Tensor +@Namespace("at") public static native @ByVal Tensor slow_conv_dilated2d(@Const @ByRef Tensor self, @Const @ByRef Tensor weight, @ByVal LongArrayRef kernel_size, @Const @ByRef(nullValue = "c10::optional{}") TensorOptional bias, @ByVal(nullValue = "at::IntArrayRef(1)") LongArrayRef stride, @ByVal(nullValue = "at::IntArrayRef(0)") LongArrayRef padding, @ByVal(nullValue = "at::IntArrayRef(1)") LongArrayRef dilation); +@Namespace("at") public static native @ByVal Tensor slow_conv_dilated2d(@Const @ByRef Tensor self, @Const @ByRef Tensor weight, @ByVal LongArrayRef kernel_size); +@Namespace("at") public static native @ByVal Tensor slow_conv_dilated2d(@Const @ByRef Tensor self, @Const @ByRef Tensor weight, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] kernel_size, @Const @ByRef(nullValue = "c10::optional{}") TensorOptional bias, @ByVal(nullValue = "at::IntArrayRef(1)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] stride, @ByVal(nullValue = "at::IntArrayRef(0)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] padding, @ByVal(nullValue = "at::IntArrayRef(1)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... dilation); +@Namespace("at") public static native @ByVal Tensor slow_conv_dilated2d(@Const @ByRef Tensor self, @Const @ByRef Tensor weight, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... kernel_size); -// aten::rrelu_with_noise_backward.out(Tensor grad_output, Tensor self, Tensor noise, Scalar lower, Scalar upper, bool training, bool self_is_result, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor rrelu_with_noise_backward_out(@ByRef Tensor out, @Const @ByRef Tensor grad_output, @Const @ByRef Tensor self, @Const @ByRef Tensor noise, @Const @ByRef Scalar lower, @Const @ByRef Scalar upper, @Cast("bool") boolean training, @Cast("bool") boolean self_is_result); -// aten::rrelu_with_noise_backward.out(Tensor grad_output, Tensor self, Tensor noise, Scalar lower, Scalar upper, bool training, bool self_is_result, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor rrelu_with_noise_backward_outf(@Const @ByRef Tensor grad_output, @Const @ByRef Tensor self, @Const @ByRef Tensor noise, @Const @ByRef Scalar lower, @Const @ByRef Scalar upper, @Cast("bool") boolean training, @Cast("bool") boolean self_is_result, @ByRef Tensor out); + +// aten::slow_conv_dilated2d(Tensor self, Tensor weight, int[2] kernel_size, Tensor? bias=None, int[2] stride=1, SymInt[2] padding=0, int[2] dilation=1) -> Tensor +@Namespace("at") public static native @ByVal Tensor slow_conv_dilated2d_symint(@Const @ByRef Tensor self, @Const @ByRef Tensor weight, @ByVal LongArrayRef kernel_size, @Const @ByRef(nullValue = "c10::optional{}") TensorOptional bias, @ByVal(nullValue = "at::IntArrayRef(1)") LongArrayRef stride, @ByVal(nullValue = "c10::SymIntArrayRef(c10::SymInt(0))") SymIntArrayRef padding, @ByVal(nullValue = "at::IntArrayRef(1)") LongArrayRef dilation); +@Namespace("at") public static native @ByVal Tensor slow_conv_dilated2d_symint(@Const @ByRef Tensor self, @Const @ByRef Tensor weight, @ByVal LongArrayRef kernel_size); +@Namespace("at") public static native @ByVal Tensor slow_conv_dilated2d_symint(@Const @ByRef Tensor self, @Const @ByRef Tensor weight, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] kernel_size, @Const @ByRef(nullValue = "c10::optional{}") TensorOptional bias, @ByVal(nullValue = "at::IntArrayRef(1)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] stride, @ByVal(nullValue = "c10::SymIntArrayRef(c10::SymInt(0))") SymIntArrayRef padding, @ByVal(nullValue = "at::IntArrayRef(1)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... dilation); +@Namespace("at") public static native @ByVal Tensor slow_conv_dilated2d_symint(@Const @ByRef Tensor self, @Const @ByRef Tensor weight, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... kernel_size); +// aten::slow_conv_dilated2d.out(Tensor self, Tensor weight, int[2] kernel_size, Tensor? bias=None, int[2] stride=1, SymInt[2] padding=0, int[2] dilation=1, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor slow_conv_dilated2d_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor weight, @ByVal LongArrayRef kernel_size, @Const @ByRef(nullValue = "c10::optional{}") TensorOptional bias, @ByVal(nullValue = "at::IntArrayRef(1)") LongArrayRef stride, @ByVal(nullValue = "at::IntArrayRef(0)") LongArrayRef padding, @ByVal(nullValue = "at::IntArrayRef(1)") LongArrayRef dilation); +@Namespace("at") public static native @ByRef Tensor slow_conv_dilated2d_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor weight, @ByVal LongArrayRef kernel_size); +@Namespace("at") public static native @ByRef Tensor slow_conv_dilated2d_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor weight, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] kernel_size, @Const @ByRef(nullValue = "c10::optional{}") TensorOptional bias, @ByVal(nullValue = "at::IntArrayRef(1)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] stride, @ByVal(nullValue = "at::IntArrayRef(0)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] padding, @ByVal(nullValue = "at::IntArrayRef(1)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... dilation); +@Namespace("at") public static native @ByRef Tensor slow_conv_dilated2d_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor weight, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... kernel_size); + + +// aten::slow_conv_dilated2d.out(Tensor self, Tensor weight, int[2] kernel_size, Tensor? bias=None, int[2] stride=1, SymInt[2] padding=0, int[2] dilation=1, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor slow_conv_dilated2d_outf(@Const @ByRef Tensor self, @Const @ByRef Tensor weight, @ByVal LongArrayRef kernel_size, @Const @ByRef TensorOptional bias, @ByVal LongArrayRef stride, @ByVal LongArrayRef padding, @ByVal LongArrayRef dilation, @ByRef Tensor out); +@Namespace("at") public static native @ByRef Tensor slow_conv_dilated2d_outf(@Const @ByRef Tensor self, @Const @ByRef Tensor weight, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] kernel_size, @Const @ByRef TensorOptional bias, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] stride, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] padding, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dilation, @ByRef Tensor out); + + +// aten::slow_conv_dilated2d.out(Tensor self, Tensor weight, int[2] kernel_size, Tensor? bias=None, int[2] stride=1, SymInt[2] padding=0, int[2] dilation=1, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor slow_conv_dilated2d_symint_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor weight, @ByVal LongArrayRef kernel_size, @Const @ByRef(nullValue = "c10::optional{}") TensorOptional bias, @ByVal(nullValue = "at::IntArrayRef(1)") LongArrayRef stride, @ByVal(nullValue = "c10::SymIntArrayRef(c10::SymInt(0))") SymIntArrayRef padding, @ByVal(nullValue = "at::IntArrayRef(1)") LongArrayRef dilation); +@Namespace("at") public static native @ByRef Tensor slow_conv_dilated2d_symint_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor weight, @ByVal LongArrayRef kernel_size); +@Namespace("at") public static native @ByRef Tensor slow_conv_dilated2d_symint_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor weight, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] kernel_size, @Const @ByRef(nullValue = "c10::optional{}") TensorOptional bias, @ByVal(nullValue = "at::IntArrayRef(1)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] stride, @ByVal(nullValue = "c10::SymIntArrayRef(c10::SymInt(0))") SymIntArrayRef padding, @ByVal(nullValue = "at::IntArrayRef(1)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... dilation); +@Namespace("at") public static native @ByRef Tensor slow_conv_dilated2d_symint_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor weight, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... kernel_size); + + +// aten::slow_conv_dilated2d.out(Tensor self, Tensor weight, int[2] kernel_size, Tensor? bias=None, int[2] stride=1, SymInt[2] padding=0, int[2] dilation=1, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor slow_conv_dilated2d_symint_outf(@Const @ByRef Tensor self, @Const @ByRef Tensor weight, @ByVal LongArrayRef kernel_size, @Const @ByRef TensorOptional bias, @ByVal LongArrayRef stride, @ByVal SymIntArrayRef padding, @ByVal LongArrayRef dilation, @ByRef Tensor out); +@Namespace("at") public static native @ByRef Tensor slow_conv_dilated2d_symint_outf(@Const @ByRef Tensor self, @Const @ByRef Tensor weight, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] kernel_size, @Const @ByRef TensorOptional bias, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] stride, @ByVal SymIntArrayRef padding, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dilation, @ByRef Tensor out); + -// Parsed from ATen/ops/rshift.h + + +// Parsed from ATen/ops/slow_conv_dilated3d.h // #pragma once @@ -61195,29 +47041,51 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include +// #include -// aten::__rshift__.Scalar(Tensor self, Scalar other) -> Tensor -@Namespace("at") public static native @ByVal Tensor __rshift__(@Const @ByRef Tensor self, @Const @ByRef Scalar other); +// aten::slow_conv_dilated3d(Tensor self, Tensor weight, int[3] kernel_size, Tensor? bias=None, int[3] stride=1, SymInt[3] padding=0, int[3] dilation=1) -> Tensor +@Namespace("at") public static native @ByVal Tensor slow_conv_dilated3d(@Const @ByRef Tensor self, @Const @ByRef Tensor weight, @ByVal LongArrayRef kernel_size, @Const @ByRef(nullValue = "c10::optional{}") TensorOptional bias, @ByVal(nullValue = "at::IntArrayRef(1)") LongArrayRef stride, @ByVal(nullValue = "at::IntArrayRef(0)") LongArrayRef padding, @ByVal(nullValue = "at::IntArrayRef(1)") LongArrayRef dilation); +@Namespace("at") public static native @ByVal Tensor slow_conv_dilated3d(@Const @ByRef Tensor self, @Const @ByRef Tensor weight, @ByVal LongArrayRef kernel_size); +@Namespace("at") public static native @ByVal Tensor slow_conv_dilated3d(@Const @ByRef Tensor self, @Const @ByRef Tensor weight, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] kernel_size, @Const @ByRef(nullValue = "c10::optional{}") TensorOptional bias, @ByVal(nullValue = "at::IntArrayRef(1)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] stride, @ByVal(nullValue = "at::IntArrayRef(0)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] padding, @ByVal(nullValue = "at::IntArrayRef(1)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... dilation); +@Namespace("at") public static native @ByVal Tensor slow_conv_dilated3d(@Const @ByRef Tensor self, @Const @ByRef Tensor weight, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... kernel_size); -// aten::__rshift__.Tensor(Tensor self, Tensor other) -> Tensor -@Namespace("at") public static native @ByVal Tensor __rshift__(@Const @ByRef Tensor self, @Const @ByRef Tensor other); -// aten::__rshift__.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor __rshift___out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Scalar other); -// aten::__rshift__.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor __rshift___outf(@Const @ByRef Tensor self, @Const @ByRef Scalar other, @ByRef Tensor out); +// aten::slow_conv_dilated3d(Tensor self, Tensor weight, int[3] kernel_size, Tensor? bias=None, int[3] stride=1, SymInt[3] padding=0, int[3] dilation=1) -> Tensor +@Namespace("at") public static native @ByVal Tensor slow_conv_dilated3d_symint(@Const @ByRef Tensor self, @Const @ByRef Tensor weight, @ByVal LongArrayRef kernel_size, @Const @ByRef(nullValue = "c10::optional{}") TensorOptional bias, @ByVal(nullValue = "at::IntArrayRef(1)") LongArrayRef stride, @ByVal(nullValue = "c10::SymIntArrayRef(c10::SymInt(0))") SymIntArrayRef padding, @ByVal(nullValue = "at::IntArrayRef(1)") LongArrayRef dilation); +@Namespace("at") public static native @ByVal Tensor slow_conv_dilated3d_symint(@Const @ByRef Tensor self, @Const @ByRef Tensor weight, @ByVal LongArrayRef kernel_size); +@Namespace("at") public static native @ByVal Tensor slow_conv_dilated3d_symint(@Const @ByRef Tensor self, @Const @ByRef Tensor weight, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] kernel_size, @Const @ByRef(nullValue = "c10::optional{}") TensorOptional bias, @ByVal(nullValue = "at::IntArrayRef(1)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] stride, @ByVal(nullValue = "c10::SymIntArrayRef(c10::SymInt(0))") SymIntArrayRef padding, @ByVal(nullValue = "at::IntArrayRef(1)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... dilation); +@Namespace("at") public static native @ByVal Tensor slow_conv_dilated3d_symint(@Const @ByRef Tensor self, @Const @ByRef Tensor weight, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... kernel_size); -// aten::__rshift__.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor __rshift___out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor other); -// aten::__rshift__.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor __rshift___outf(@Const @ByRef Tensor self, @Const @ByRef Tensor other, @ByRef Tensor out); +// aten::slow_conv_dilated3d.out(Tensor self, Tensor weight, int[3] kernel_size, Tensor? bias=None, int[3] stride=1, SymInt[3] padding=0, int[3] dilation=1, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor slow_conv_dilated3d_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor weight, @ByVal LongArrayRef kernel_size, @Const @ByRef(nullValue = "c10::optional{}") TensorOptional bias, @ByVal(nullValue = "at::IntArrayRef(1)") LongArrayRef stride, @ByVal(nullValue = "at::IntArrayRef(0)") LongArrayRef padding, @ByVal(nullValue = "at::IntArrayRef(1)") LongArrayRef dilation); +@Namespace("at") public static native @ByRef Tensor slow_conv_dilated3d_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor weight, @ByVal LongArrayRef kernel_size); +@Namespace("at") public static native @ByRef Tensor slow_conv_dilated3d_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor weight, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] kernel_size, @Const @ByRef(nullValue = "c10::optional{}") TensorOptional bias, @ByVal(nullValue = "at::IntArrayRef(1)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] stride, @ByVal(nullValue = "at::IntArrayRef(0)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] padding, @ByVal(nullValue = "at::IntArrayRef(1)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... dilation); +@Namespace("at") public static native @ByRef Tensor slow_conv_dilated3d_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor weight, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... kernel_size); + + +// aten::slow_conv_dilated3d.out(Tensor self, Tensor weight, int[3] kernel_size, Tensor? bias=None, int[3] stride=1, SymInt[3] padding=0, int[3] dilation=1, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor slow_conv_dilated3d_outf(@Const @ByRef Tensor self, @Const @ByRef Tensor weight, @ByVal LongArrayRef kernel_size, @Const @ByRef TensorOptional bias, @ByVal LongArrayRef stride, @ByVal LongArrayRef padding, @ByVal LongArrayRef dilation, @ByRef Tensor out); +@Namespace("at") public static native @ByRef Tensor slow_conv_dilated3d_outf(@Const @ByRef Tensor self, @Const @ByRef Tensor weight, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] kernel_size, @Const @ByRef TensorOptional bias, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] stride, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] padding, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dilation, @ByRef Tensor out); +// aten::slow_conv_dilated3d.out(Tensor self, Tensor weight, int[3] kernel_size, Tensor? bias=None, int[3] stride=1, SymInt[3] padding=0, int[3] dilation=1, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor slow_conv_dilated3d_symint_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor weight, @ByVal LongArrayRef kernel_size, @Const @ByRef(nullValue = "c10::optional{}") TensorOptional bias, @ByVal(nullValue = "at::IntArrayRef(1)") LongArrayRef stride, @ByVal(nullValue = "c10::SymIntArrayRef(c10::SymInt(0))") SymIntArrayRef padding, @ByVal(nullValue = "at::IntArrayRef(1)") LongArrayRef dilation); +@Namespace("at") public static native @ByRef Tensor slow_conv_dilated3d_symint_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor weight, @ByVal LongArrayRef kernel_size); +@Namespace("at") public static native @ByRef Tensor slow_conv_dilated3d_symint_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor weight, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] kernel_size, @Const @ByRef(nullValue = "c10::optional{}") TensorOptional bias, @ByVal(nullValue = "at::IntArrayRef(1)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] stride, @ByVal(nullValue = "c10::SymIntArrayRef(c10::SymInt(0))") SymIntArrayRef padding, @ByVal(nullValue = "at::IntArrayRef(1)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... dilation); +@Namespace("at") public static native @ByRef Tensor slow_conv_dilated3d_symint_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor weight, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... kernel_size); -// Parsed from ATen/ops/rsqrt.h + +// aten::slow_conv_dilated3d.out(Tensor self, Tensor weight, int[3] kernel_size, Tensor? bias=None, int[3] stride=1, SymInt[3] padding=0, int[3] dilation=1, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor slow_conv_dilated3d_symint_outf(@Const @ByRef Tensor self, @Const @ByRef Tensor weight, @ByVal LongArrayRef kernel_size, @Const @ByRef TensorOptional bias, @ByVal LongArrayRef stride, @ByVal SymIntArrayRef padding, @ByVal LongArrayRef dilation, @ByRef Tensor out); +@Namespace("at") public static native @ByRef Tensor slow_conv_dilated3d_symint_outf(@Const @ByRef Tensor self, @Const @ByRef Tensor weight, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] kernel_size, @Const @ByRef TensorOptional bias, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] stride, @ByVal SymIntArrayRef padding, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dilation, @ByRef Tensor out); + + + + + +// Parsed from ATen/ops/slow_conv_transpose2d.h // #pragma once @@ -61238,24 +47106,51 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include +// #include -// aten::rsqrt(Tensor self) -> Tensor -@Namespace("at") public static native @ByVal Tensor rsqrt(@Const @ByRef Tensor self); +// aten::slow_conv_transpose2d.out(Tensor self, Tensor weight, int[2] kernel_size, Tensor? bias=None, int[2] stride=1, SymInt[2] padding=0, SymInt[2] output_padding=0, int[2] dilation=1, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor slow_conv_transpose2d_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor weight, @ByVal LongArrayRef kernel_size, @Const @ByRef(nullValue = "c10::optional{}") TensorOptional bias, @ByVal(nullValue = "at::IntArrayRef(1)") LongArrayRef stride, @ByVal(nullValue = "at::IntArrayRef(0)") LongArrayRef padding, @ByVal(nullValue = "at::IntArrayRef(0)") LongArrayRef output_padding, @ByVal(nullValue = "at::IntArrayRef(1)") LongArrayRef dilation); +@Namespace("at") public static native @ByRef Tensor slow_conv_transpose2d_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor weight, @ByVal LongArrayRef kernel_size); +@Namespace("at") public static native @ByRef Tensor slow_conv_transpose2d_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor weight, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] kernel_size, @Const @ByRef(nullValue = "c10::optional{}") TensorOptional bias, @ByVal(nullValue = "at::IntArrayRef(1)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] stride, @ByVal(nullValue = "at::IntArrayRef(0)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] padding, @ByVal(nullValue = "at::IntArrayRef(0)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] output_padding, @ByVal(nullValue = "at::IntArrayRef(1)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... dilation); +@Namespace("at") public static native @ByRef Tensor slow_conv_transpose2d_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor weight, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... kernel_size); -// aten::rsqrt_(Tensor(a!) self) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor rsqrt_(@ByRef Tensor self); -// aten::rsqrt.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor rsqrt_out(@ByRef Tensor out, @Const @ByRef Tensor self); -// aten::rsqrt.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor rsqrt_outf(@Const @ByRef Tensor self, @ByRef Tensor out); +// aten::slow_conv_transpose2d.out(Tensor self, Tensor weight, int[2] kernel_size, Tensor? bias=None, int[2] stride=1, SymInt[2] padding=0, SymInt[2] output_padding=0, int[2] dilation=1, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor slow_conv_transpose2d_outf(@Const @ByRef Tensor self, @Const @ByRef Tensor weight, @ByVal LongArrayRef kernel_size, @Const @ByRef TensorOptional bias, @ByVal LongArrayRef stride, @ByVal LongArrayRef padding, @ByVal LongArrayRef output_padding, @ByVal LongArrayRef dilation, @ByRef Tensor out); +@Namespace("at") public static native @ByRef Tensor slow_conv_transpose2d_outf(@Const @ByRef Tensor self, @Const @ByRef Tensor weight, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] kernel_size, @Const @ByRef TensorOptional bias, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] stride, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] padding, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] output_padding, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dilation, @ByRef Tensor out); + +// aten::slow_conv_transpose2d.out(Tensor self, Tensor weight, int[2] kernel_size, Tensor? bias=None, int[2] stride=1, SymInt[2] padding=0, SymInt[2] output_padding=0, int[2] dilation=1, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor slow_conv_transpose2d_symint_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor weight, @ByVal LongArrayRef kernel_size, @Const @ByRef(nullValue = "c10::optional{}") TensorOptional bias, @ByVal(nullValue = "at::IntArrayRef(1)") LongArrayRef stride, @ByVal(nullValue = "c10::SymIntArrayRef(c10::SymInt(0))") SymIntArrayRef padding, @ByVal(nullValue = "c10::SymIntArrayRef(c10::SymInt(0))") SymIntArrayRef output_padding, @ByVal(nullValue = "at::IntArrayRef(1)") LongArrayRef dilation); +@Namespace("at") public static native @ByRef Tensor slow_conv_transpose2d_symint_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor weight, @ByVal LongArrayRef kernel_size); +@Namespace("at") public static native @ByRef Tensor slow_conv_transpose2d_symint_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor weight, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] kernel_size, @Const @ByRef(nullValue = "c10::optional{}") TensorOptional bias, @ByVal(nullValue = "at::IntArrayRef(1)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] stride, @ByVal(nullValue = "c10::SymIntArrayRef(c10::SymInt(0))") SymIntArrayRef padding, @ByVal(nullValue = "c10::SymIntArrayRef(c10::SymInt(0))") SymIntArrayRef output_padding, @ByVal(nullValue = "at::IntArrayRef(1)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... dilation); +@Namespace("at") public static native @ByRef Tensor slow_conv_transpose2d_symint_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor weight, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... kernel_size); +// aten::slow_conv_transpose2d.out(Tensor self, Tensor weight, int[2] kernel_size, Tensor? bias=None, int[2] stride=1, SymInt[2] padding=0, SymInt[2] output_padding=0, int[2] dilation=1, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor slow_conv_transpose2d_symint_outf(@Const @ByRef Tensor self, @Const @ByRef Tensor weight, @ByVal LongArrayRef kernel_size, @Const @ByRef TensorOptional bias, @ByVal LongArrayRef stride, @ByVal SymIntArrayRef padding, @ByVal SymIntArrayRef output_padding, @ByVal LongArrayRef dilation, @ByRef Tensor out); +@Namespace("at") public static native @ByRef Tensor slow_conv_transpose2d_symint_outf(@Const @ByRef Tensor self, @Const @ByRef Tensor weight, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] kernel_size, @Const @ByRef TensorOptional bias, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] stride, @ByVal SymIntArrayRef padding, @ByVal SymIntArrayRef output_padding, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dilation, @ByRef Tensor out); -// Parsed from ATen/ops/rsub.h + +// aten::slow_conv_transpose2d(Tensor self, Tensor weight, int[2] kernel_size, Tensor? bias=None, int[2] stride=1, SymInt[2] padding=0, SymInt[2] output_padding=0, int[2] dilation=1) -> Tensor +@Namespace("at") public static native @ByVal Tensor slow_conv_transpose2d(@Const @ByRef Tensor self, @Const @ByRef Tensor weight, @ByVal LongArrayRef kernel_size, @Const @ByRef(nullValue = "c10::optional{}") TensorOptional bias, @ByVal(nullValue = "at::IntArrayRef(1)") LongArrayRef stride, @ByVal(nullValue = "at::IntArrayRef(0)") LongArrayRef padding, @ByVal(nullValue = "at::IntArrayRef(0)") LongArrayRef output_padding, @ByVal(nullValue = "at::IntArrayRef(1)") LongArrayRef dilation); +@Namespace("at") public static native @ByVal Tensor slow_conv_transpose2d(@Const @ByRef Tensor self, @Const @ByRef Tensor weight, @ByVal LongArrayRef kernel_size); +@Namespace("at") public static native @ByVal Tensor slow_conv_transpose2d(@Const @ByRef Tensor self, @Const @ByRef Tensor weight, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] kernel_size, @Const @ByRef(nullValue = "c10::optional{}") TensorOptional bias, @ByVal(nullValue = "at::IntArrayRef(1)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] stride, @ByVal(nullValue = "at::IntArrayRef(0)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] padding, @ByVal(nullValue = "at::IntArrayRef(0)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] output_padding, @ByVal(nullValue = "at::IntArrayRef(1)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... dilation); +@Namespace("at") public static native @ByVal Tensor slow_conv_transpose2d(@Const @ByRef Tensor self, @Const @ByRef Tensor weight, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... kernel_size); + + +// aten::slow_conv_transpose2d(Tensor self, Tensor weight, int[2] kernel_size, Tensor? bias=None, int[2] stride=1, SymInt[2] padding=0, SymInt[2] output_padding=0, int[2] dilation=1) -> Tensor +@Namespace("at") public static native @ByVal Tensor slow_conv_transpose2d_symint(@Const @ByRef Tensor self, @Const @ByRef Tensor weight, @ByVal LongArrayRef kernel_size, @Const @ByRef(nullValue = "c10::optional{}") TensorOptional bias, @ByVal(nullValue = "at::IntArrayRef(1)") LongArrayRef stride, @ByVal(nullValue = "c10::SymIntArrayRef(c10::SymInt(0))") SymIntArrayRef padding, @ByVal(nullValue = "c10::SymIntArrayRef(c10::SymInt(0))") SymIntArrayRef output_padding, @ByVal(nullValue = "at::IntArrayRef(1)") LongArrayRef dilation); +@Namespace("at") public static native @ByVal Tensor slow_conv_transpose2d_symint(@Const @ByRef Tensor self, @Const @ByRef Tensor weight, @ByVal LongArrayRef kernel_size); +@Namespace("at") public static native @ByVal Tensor slow_conv_transpose2d_symint(@Const @ByRef Tensor self, @Const @ByRef Tensor weight, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] kernel_size, @Const @ByRef(nullValue = "c10::optional{}") TensorOptional bias, @ByVal(nullValue = "at::IntArrayRef(1)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] stride, @ByVal(nullValue = "c10::SymIntArrayRef(c10::SymInt(0))") SymIntArrayRef padding, @ByVal(nullValue = "c10::SymIntArrayRef(c10::SymInt(0))") SymIntArrayRef output_padding, @ByVal(nullValue = "at::IntArrayRef(1)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... dilation); +@Namespace("at") public static native @ByVal Tensor slow_conv_transpose2d_symint(@Const @ByRef Tensor self, @Const @ByRef Tensor weight, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... kernel_size); + + + + + +// Parsed from ATen/ops/slow_conv_transpose3d.h // #pragma once @@ -61276,33 +47171,51 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include +// #include -// aten::rsub.Tensor(Tensor self, Tensor other, *, Scalar alpha=1) -> Tensor -@Namespace("at") public static native @ByVal Tensor rsub(@Const @ByRef Tensor self, @Const @ByRef Tensor other, @Const @ByRef(nullValue = "at::Scalar(1)") Scalar alpha); -@Namespace("at") public static native @ByVal Tensor rsub(@Const @ByRef Tensor self, @Const @ByRef Tensor other); +// aten::slow_conv_transpose3d.out(Tensor self, Tensor weight, int[3] kernel_size, Tensor? bias=None, int[3] stride=1, SymInt[3] padding=0, SymInt[3] output_padding=0, int[3] dilation=1, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor slow_conv_transpose3d_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor weight, @ByVal LongArrayRef kernel_size, @Const @ByRef(nullValue = "c10::optional{}") TensorOptional bias, @ByVal(nullValue = "at::IntArrayRef(1)") LongArrayRef stride, @ByVal(nullValue = "at::IntArrayRef(0)") LongArrayRef padding, @ByVal(nullValue = "at::IntArrayRef(0)") LongArrayRef output_padding, @ByVal(nullValue = "at::IntArrayRef(1)") LongArrayRef dilation); +@Namespace("at") public static native @ByRef Tensor slow_conv_transpose3d_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor weight, @ByVal LongArrayRef kernel_size); +@Namespace("at") public static native @ByRef Tensor slow_conv_transpose3d_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor weight, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] kernel_size, @Const @ByRef(nullValue = "c10::optional{}") TensorOptional bias, @ByVal(nullValue = "at::IntArrayRef(1)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] stride, @ByVal(nullValue = "at::IntArrayRef(0)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] padding, @ByVal(nullValue = "at::IntArrayRef(0)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] output_padding, @ByVal(nullValue = "at::IntArrayRef(1)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... dilation); +@Namespace("at") public static native @ByRef Tensor slow_conv_transpose3d_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor weight, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... kernel_size); -// aten::rsub.Scalar(Tensor self, Scalar other, Scalar alpha=1) -> Tensor -@Namespace("at") public static native @ByVal Tensor rsub(@Const @ByRef Tensor self, @Const @ByRef Scalar other, @Const @ByRef(nullValue = "at::Scalar(1)") Scalar alpha); -@Namespace("at") public static native @ByVal Tensor rsub(@Const @ByRef Tensor self, @Const @ByRef Scalar other); -// aten::rsub.Tensor_out(Tensor self, Tensor other, *, Scalar alpha=1, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor rsub_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor other, @Const @ByRef(nullValue = "at::Scalar(1)") Scalar alpha); -@Namespace("at") public static native @ByRef Tensor rsub_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor other); -// aten::rsub.Tensor_out(Tensor self, Tensor other, *, Scalar alpha=1, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor rsub_outf(@Const @ByRef Tensor self, @Const @ByRef Tensor other, @Const @ByRef Scalar alpha, @ByRef Tensor out); +// aten::slow_conv_transpose3d.out(Tensor self, Tensor weight, int[3] kernel_size, Tensor? bias=None, int[3] stride=1, SymInt[3] padding=0, SymInt[3] output_padding=0, int[3] dilation=1, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor slow_conv_transpose3d_outf(@Const @ByRef Tensor self, @Const @ByRef Tensor weight, @ByVal LongArrayRef kernel_size, @Const @ByRef TensorOptional bias, @ByVal LongArrayRef stride, @ByVal LongArrayRef padding, @ByVal LongArrayRef output_padding, @ByVal LongArrayRef dilation, @ByRef Tensor out); +@Namespace("at") public static native @ByRef Tensor slow_conv_transpose3d_outf(@Const @ByRef Tensor self, @Const @ByRef Tensor weight, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] kernel_size, @Const @ByRef TensorOptional bias, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] stride, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] padding, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] output_padding, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dilation, @ByRef Tensor out); -// aten::rsub.Scalar_out(Tensor self, Scalar other, Scalar alpha=1, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor rsub_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Scalar other, @Const @ByRef(nullValue = "at::Scalar(1)") Scalar alpha); -@Namespace("at") public static native @ByRef Tensor rsub_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Scalar other); -// aten::rsub.Scalar_out(Tensor self, Scalar other, Scalar alpha=1, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor rsub_outf(@Const @ByRef Tensor self, @Const @ByRef Scalar other, @Const @ByRef Scalar alpha, @ByRef Tensor out); + +// aten::slow_conv_transpose3d.out(Tensor self, Tensor weight, int[3] kernel_size, Tensor? bias=None, int[3] stride=1, SymInt[3] padding=0, SymInt[3] output_padding=0, int[3] dilation=1, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor slow_conv_transpose3d_symint_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor weight, @ByVal LongArrayRef kernel_size, @Const @ByRef(nullValue = "c10::optional{}") TensorOptional bias, @ByVal(nullValue = "at::IntArrayRef(1)") LongArrayRef stride, @ByVal(nullValue = "c10::SymIntArrayRef(c10::SymInt(0))") SymIntArrayRef padding, @ByVal(nullValue = "c10::SymIntArrayRef(c10::SymInt(0))") SymIntArrayRef output_padding, @ByVal(nullValue = "at::IntArrayRef(1)") LongArrayRef dilation); +@Namespace("at") public static native @ByRef Tensor slow_conv_transpose3d_symint_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor weight, @ByVal LongArrayRef kernel_size); +@Namespace("at") public static native @ByRef Tensor slow_conv_transpose3d_symint_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor weight, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] kernel_size, @Const @ByRef(nullValue = "c10::optional{}") TensorOptional bias, @ByVal(nullValue = "at::IntArrayRef(1)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] stride, @ByVal(nullValue = "c10::SymIntArrayRef(c10::SymInt(0))") SymIntArrayRef padding, @ByVal(nullValue = "c10::SymIntArrayRef(c10::SymInt(0))") SymIntArrayRef output_padding, @ByVal(nullValue = "at::IntArrayRef(1)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... dilation); +@Namespace("at") public static native @ByRef Tensor slow_conv_transpose3d_symint_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor weight, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... kernel_size); +// aten::slow_conv_transpose3d.out(Tensor self, Tensor weight, int[3] kernel_size, Tensor? bias=None, int[3] stride=1, SymInt[3] padding=0, SymInt[3] output_padding=0, int[3] dilation=1, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor slow_conv_transpose3d_symint_outf(@Const @ByRef Tensor self, @Const @ByRef Tensor weight, @ByVal LongArrayRef kernel_size, @Const @ByRef TensorOptional bias, @ByVal LongArrayRef stride, @ByVal SymIntArrayRef padding, @ByVal SymIntArrayRef output_padding, @ByVal LongArrayRef dilation, @ByRef Tensor out); +@Namespace("at") public static native @ByRef Tensor slow_conv_transpose3d_symint_outf(@Const @ByRef Tensor self, @Const @ByRef Tensor weight, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] kernel_size, @Const @ByRef TensorOptional bias, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] stride, @ByVal SymIntArrayRef padding, @ByVal SymIntArrayRef output_padding, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dilation, @ByRef Tensor out); + + +// aten::slow_conv_transpose3d(Tensor self, Tensor weight, int[3] kernel_size, Tensor? bias=None, int[3] stride=1, SymInt[3] padding=0, SymInt[3] output_padding=0, int[3] dilation=1) -> Tensor +@Namespace("at") public static native @ByVal Tensor slow_conv_transpose3d(@Const @ByRef Tensor self, @Const @ByRef Tensor weight, @ByVal LongArrayRef kernel_size, @Const @ByRef(nullValue = "c10::optional{}") TensorOptional bias, @ByVal(nullValue = "at::IntArrayRef(1)") LongArrayRef stride, @ByVal(nullValue = "at::IntArrayRef(0)") LongArrayRef padding, @ByVal(nullValue = "at::IntArrayRef(0)") LongArrayRef output_padding, @ByVal(nullValue = "at::IntArrayRef(1)") LongArrayRef dilation); +@Namespace("at") public static native @ByVal Tensor slow_conv_transpose3d(@Const @ByRef Tensor self, @Const @ByRef Tensor weight, @ByVal LongArrayRef kernel_size); +@Namespace("at") public static native @ByVal Tensor slow_conv_transpose3d(@Const @ByRef Tensor self, @Const @ByRef Tensor weight, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] kernel_size, @Const @ByRef(nullValue = "c10::optional{}") TensorOptional bias, @ByVal(nullValue = "at::IntArrayRef(1)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] stride, @ByVal(nullValue = "at::IntArrayRef(0)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] padding, @ByVal(nullValue = "at::IntArrayRef(0)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] output_padding, @ByVal(nullValue = "at::IntArrayRef(1)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... dilation); +@Namespace("at") public static native @ByVal Tensor slow_conv_transpose3d(@Const @ByRef Tensor self, @Const @ByRef Tensor weight, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... kernel_size); + + +// aten::slow_conv_transpose3d(Tensor self, Tensor weight, int[3] kernel_size, Tensor? bias=None, int[3] stride=1, SymInt[3] padding=0, SymInt[3] output_padding=0, int[3] dilation=1) -> Tensor +@Namespace("at") public static native @ByVal Tensor slow_conv_transpose3d_symint(@Const @ByRef Tensor self, @Const @ByRef Tensor weight, @ByVal LongArrayRef kernel_size, @Const @ByRef(nullValue = "c10::optional{}") TensorOptional bias, @ByVal(nullValue = "at::IntArrayRef(1)") LongArrayRef stride, @ByVal(nullValue = "c10::SymIntArrayRef(c10::SymInt(0))") SymIntArrayRef padding, @ByVal(nullValue = "c10::SymIntArrayRef(c10::SymInt(0))") SymIntArrayRef output_padding, @ByVal(nullValue = "at::IntArrayRef(1)") LongArrayRef dilation); +@Namespace("at") public static native @ByVal Tensor slow_conv_transpose3d_symint(@Const @ByRef Tensor self, @Const @ByRef Tensor weight, @ByVal LongArrayRef kernel_size); +@Namespace("at") public static native @ByVal Tensor slow_conv_transpose3d_symint(@Const @ByRef Tensor self, @Const @ByRef Tensor weight, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] kernel_size, @Const @ByRef(nullValue = "c10::optional{}") TensorOptional bias, @ByVal(nullValue = "at::IntArrayRef(1)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] stride, @ByVal(nullValue = "c10::SymIntArrayRef(c10::SymInt(0))") SymIntArrayRef padding, @ByVal(nullValue = "c10::SymIntArrayRef(c10::SymInt(0))") SymIntArrayRef output_padding, @ByVal(nullValue = "at::IntArrayRef(1)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... dilation); +@Namespace("at") public static native @ByVal Tensor slow_conv_transpose3d_symint(@Const @ByRef Tensor self, @Const @ByRef Tensor weight, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... kernel_size); + -// Parsed from ATen/ops/scalar_tensor.h + + +// Parsed from ATen/ops/smm.h // #pragma once @@ -61323,24 +47236,16 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include - +// #include -// aten::scalar_tensor(Scalar s, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor -@Namespace("at") public static native @ByVal Tensor scalar_tensor(@Const @ByRef Scalar s, @ByVal(nullValue = "at::TensorOptions{}") TensorOptions options); -@Namespace("at") public static native @ByVal Tensor scalar_tensor(@Const @ByRef Scalar s); -// aten::scalar_tensor(Scalar s, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor -@Namespace("at") public static native @ByVal Tensor scalar_tensor(@Const @ByRef Scalar s, @ByVal ScalarTypeOptional dtype, @ByVal LayoutOptional layout, @ByVal DeviceOptional device, @ByVal BoolOptional pin_memory); -// aten::scalar_tensor.out(Scalar s, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor scalar_tensor_out(@ByRef Tensor out, @Const @ByRef Scalar s); -// aten::scalar_tensor.out(Scalar s, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor scalar_tensor_outf(@Const @ByRef Scalar s, @ByRef Tensor out); +// aten::smm(Tensor self, Tensor mat2) -> Tensor +@Namespace("at") public static native @ByVal Tensor smm(@Const @ByRef Tensor self, @Const @ByRef Tensor mat2); -// Parsed from ATen/ops/scaled_dot_product_attention.h +// Parsed from ATen/ops/smooth_l1_loss.h // #pragma once @@ -61361,17 +47266,23 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include +// #include -// aten::scaled_dot_product_attention(Tensor query, Tensor key, Tensor value, Tensor? attn_mask=None, float dropout_p=0.0, bool is_causal=False) -> Tensor -@Namespace("at") public static native @ByVal Tensor scaled_dot_product_attention(@Const @ByRef Tensor query, @Const @ByRef Tensor key, @Const @ByRef Tensor value, @Const @ByRef(nullValue = "c10::optional{}") TensorOptional attn_mask, double dropout_p/*=0.0*/, @Cast("bool") boolean is_causal/*=false*/); -@Namespace("at") public static native @ByVal Tensor scaled_dot_product_attention(@Const @ByRef Tensor query, @Const @ByRef Tensor key, @Const @ByRef Tensor value); +// aten::smooth_l1_loss.out(Tensor self, Tensor target, int reduction=Mean, float beta=1.0, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor smooth_l1_loss_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor target, @Cast("int64_t") long reduction/*=at::Reduction::Mean*/, double beta/*=1.0*/); +@Namespace("at") public static native @ByRef Tensor smooth_l1_loss_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor target); +// aten::smooth_l1_loss.out(Tensor self, Tensor target, int reduction=Mean, float beta=1.0, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor smooth_l1_loss_outf(@Const @ByRef Tensor self, @Const @ByRef Tensor target, @Cast("int64_t") long reduction, double beta, @ByRef Tensor out); + +// aten::smooth_l1_loss(Tensor self, Tensor target, int reduction=Mean, float beta=1.0) -> Tensor +@Namespace("at") public static native @ByVal Tensor smooth_l1_loss(@Const @ByRef Tensor self, @Const @ByRef Tensor target, @Cast("int64_t") long reduction/*=at::Reduction::Mean*/, double beta/*=1.0*/); +@Namespace("at") public static native @ByVal Tensor smooth_l1_loss(@Const @ByRef Tensor self, @Const @ByRef Tensor target); -// Parsed from ATen/ops/scatter.h +// Parsed from ATen/ops/smooth_l1_loss_backward.h // #pragma once @@ -61392,51 +47303,21 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include - - -// aten::scatter.src(Tensor self, int dim, Tensor index, Tensor src) -> Tensor -@Namespace("at") public static native @ByVal Tensor scatter(@Const @ByRef Tensor self, @Cast("int64_t") long dim, @Const @ByRef Tensor index, @Const @ByRef Tensor src); - -// aten::scatter.src_out(Tensor self, int dim, Tensor index, Tensor src, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor scatter_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Cast("int64_t") long dim, @Const @ByRef Tensor index, @Const @ByRef Tensor src); -// aten::scatter.src_out(Tensor self, int dim, Tensor index, Tensor src, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor scatter_outf(@Const @ByRef Tensor self, @Cast("int64_t") long dim, @Const @ByRef Tensor index, @Const @ByRef Tensor src, @ByRef Tensor out); - -// aten::scatter.value(Tensor self, int dim, Tensor index, Scalar value) -> Tensor -@Namespace("at") public static native @ByVal Tensor scatter(@Const @ByRef Tensor self, @Cast("int64_t") long dim, @Const @ByRef Tensor index, @Const @ByRef Scalar value); - -// aten::scatter.value_out(Tensor self, int dim, Tensor index, Scalar value, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor scatter_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Cast("int64_t") long dim, @Const @ByRef Tensor index, @Const @ByRef Scalar value); -// aten::scatter.value_out(Tensor self, int dim, Tensor index, Scalar value, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor scatter_outf(@Const @ByRef Tensor self, @Cast("int64_t") long dim, @Const @ByRef Tensor index, @Const @ByRef Scalar value, @ByRef Tensor out); - -// aten::scatter.reduce(Tensor self, int dim, Tensor index, Tensor src, *, str reduce) -> Tensor -@Namespace("at") public static native @ByVal Tensor scatter(@Const @ByRef Tensor self, @Cast("int64_t") long dim, @Const @ByRef Tensor index, @Const @ByRef Tensor src, @ByVal @Cast("c10::string_view*") Pointer reduce); - -// aten::scatter.reduce_out(Tensor self, int dim, Tensor index, Tensor src, *, str reduce, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor scatter_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Cast("int64_t") long dim, @Const @ByRef Tensor index, @Const @ByRef Tensor src, @ByVal @Cast("c10::string_view*") Pointer reduce); -// aten::scatter.reduce_out(Tensor self, int dim, Tensor index, Tensor src, *, str reduce, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor scatter_outf(@Const @ByRef Tensor self, @Cast("int64_t") long dim, @Const @ByRef Tensor index, @Const @ByRef Tensor src, @ByVal @Cast("c10::string_view*") Pointer reduce, @ByRef Tensor out); - -// aten::scatter.value_reduce(Tensor self, int dim, Tensor index, Scalar value, *, str reduce) -> Tensor -@Namespace("at") public static native @ByVal Tensor scatter(@Const @ByRef Tensor self, @Cast("int64_t") long dim, @Const @ByRef Tensor index, @Const @ByRef Scalar value, @ByVal @Cast("c10::string_view*") Pointer reduce); +// #include -// aten::scatter.value_reduce_out(Tensor self, int dim, Tensor index, Scalar value, *, str reduce, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor scatter_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Cast("int64_t") long dim, @Const @ByRef Tensor index, @Const @ByRef Scalar value, @ByVal @Cast("c10::string_view*") Pointer reduce); -// aten::scatter.value_reduce_out(Tensor self, int dim, Tensor index, Scalar value, *, str reduce, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor scatter_outf(@Const @ByRef Tensor self, @Cast("int64_t") long dim, @Const @ByRef Tensor index, @Const @ByRef Scalar value, @ByVal @Cast("c10::string_view*") Pointer reduce, @ByRef Tensor out); -// aten::scatter.dimname_src(Tensor self, Dimname dim, Tensor index, Tensor src) -> Tensor -@Namespace("at") public static native @ByVal Tensor scatter(@Const @ByRef Tensor self, @ByVal Dimname dim, @Const @ByRef Tensor index, @Const @ByRef Tensor src); +// aten::smooth_l1_loss_backward.grad_input(Tensor grad_output, Tensor self, Tensor target, int reduction, float beta, *, Tensor(a!) grad_input) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor smooth_l1_loss_backward_out(@ByRef Tensor grad_input, @Const @ByRef Tensor grad_output, @Const @ByRef Tensor self, @Const @ByRef Tensor target, @Cast("int64_t") long reduction, double beta); +// aten::smooth_l1_loss_backward.grad_input(Tensor grad_output, Tensor self, Tensor target, int reduction, float beta, *, Tensor(a!) grad_input) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor smooth_l1_loss_backward_outf(@Const @ByRef Tensor grad_output, @Const @ByRef Tensor self, @Const @ByRef Tensor target, @Cast("int64_t") long reduction, double beta, @ByRef Tensor grad_input); -// aten::scatter.dimname_value(Tensor self, Dimname dim, Tensor index, Scalar value) -> Tensor -@Namespace("at") public static native @ByVal Tensor scatter(@Const @ByRef Tensor self, @ByVal Dimname dim, @Const @ByRef Tensor index, @Const @ByRef Scalar value); +// aten::smooth_l1_loss_backward(Tensor grad_output, Tensor self, Tensor target, int reduction, float beta) -> Tensor +@Namespace("at") public static native @ByVal Tensor smooth_l1_loss_backward(@Const @ByRef Tensor grad_output, @Const @ByRef Tensor self, @Const @ByRef Tensor target, @Cast("int64_t") long reduction, double beta); -// Parsed from ATen/ops/scatter_add.h +// Parsed from ATen/ops/soft_margin_loss.h // #pragma once @@ -61457,24 +47338,23 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include - +// #include -// aten::scatter_add(Tensor self, int dim, Tensor index, Tensor src) -> Tensor -@Namespace("at") public static native @ByVal Tensor scatter_add(@Const @ByRef Tensor self, @Cast("int64_t") long dim, @Const @ByRef Tensor index, @Const @ByRef Tensor src); -// aten::scatter_add.out(Tensor self, int dim, Tensor index, Tensor src, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor scatter_add_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Cast("int64_t") long dim, @Const @ByRef Tensor index, @Const @ByRef Tensor src); -// aten::scatter_add.out(Tensor self, int dim, Tensor index, Tensor src, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor scatter_add_outf(@Const @ByRef Tensor self, @Cast("int64_t") long dim, @Const @ByRef Tensor index, @Const @ByRef Tensor src, @ByRef Tensor out); +// aten::soft_margin_loss.out(Tensor self, Tensor target, int reduction=Mean, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor soft_margin_loss_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor target, @Cast("int64_t") long reduction/*=at::Reduction::Mean*/); +@Namespace("at") public static native @ByRef Tensor soft_margin_loss_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor target); +// aten::soft_margin_loss.out(Tensor self, Tensor target, int reduction=Mean, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor soft_margin_loss_outf(@Const @ByRef Tensor self, @Const @ByRef Tensor target, @Cast("int64_t") long reduction, @ByRef Tensor out); -// aten::scatter_add.dimname(Tensor self, Dimname dim, Tensor index, Tensor src) -> Tensor -@Namespace("at") public static native @ByVal Tensor scatter_add(@Const @ByRef Tensor self, @ByVal Dimname dim, @Const @ByRef Tensor index, @Const @ByRef Tensor src); +// aten::soft_margin_loss(Tensor self, Tensor target, int reduction=Mean) -> Tensor +@Namespace("at") public static native @ByVal Tensor soft_margin_loss(@Const @ByRef Tensor self, @Const @ByRef Tensor target, @Cast("int64_t") long reduction/*=at::Reduction::Mean*/); +@Namespace("at") public static native @ByVal Tensor soft_margin_loss(@Const @ByRef Tensor self, @Const @ByRef Tensor target); -// Parsed from ATen/ops/scatter_reduce.h +// Parsed from ATen/ops/soft_margin_loss_backward.h // #pragma once @@ -61495,23 +47375,21 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include +// #include -// aten::scatter_reduce.two(Tensor self, int dim, Tensor index, Tensor src, str reduce, *, bool include_self=True) -> Tensor -@Namespace("at") public static native @ByVal Tensor scatter_reduce(@Const @ByRef Tensor self, @Cast("int64_t") long dim, @Const @ByRef Tensor index, @Const @ByRef Tensor src, @ByVal @Cast("c10::string_view*") Pointer reduce, @Cast("bool") boolean include_self/*=true*/); -@Namespace("at") public static native @ByVal Tensor scatter_reduce(@Const @ByRef Tensor self, @Cast("int64_t") long dim, @Const @ByRef Tensor index, @Const @ByRef Tensor src, @ByVal @Cast("c10::string_view*") Pointer reduce); +// aten::soft_margin_loss_backward.grad_input(Tensor grad_output, Tensor self, Tensor target, int reduction, *, Tensor(a!) grad_input) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor soft_margin_loss_backward_out(@ByRef Tensor grad_input, @Const @ByRef Tensor grad_output, @Const @ByRef Tensor self, @Const @ByRef Tensor target, @Cast("int64_t") long reduction); +// aten::soft_margin_loss_backward.grad_input(Tensor grad_output, Tensor self, Tensor target, int reduction, *, Tensor(a!) grad_input) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor soft_margin_loss_backward_outf(@Const @ByRef Tensor grad_output, @Const @ByRef Tensor self, @Const @ByRef Tensor target, @Cast("int64_t") long reduction, @ByRef Tensor grad_input); -// aten::scatter_reduce.two_out(Tensor self, int dim, Tensor index, Tensor src, str reduce, *, bool include_self=True, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor scatter_reduce_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Cast("int64_t") long dim, @Const @ByRef Tensor index, @Const @ByRef Tensor src, @ByVal @Cast("c10::string_view*") Pointer reduce, @Cast("bool") boolean include_self/*=true*/); -@Namespace("at") public static native @ByRef Tensor scatter_reduce_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Cast("int64_t") long dim, @Const @ByRef Tensor index, @Const @ByRef Tensor src, @ByVal @Cast("c10::string_view*") Pointer reduce); -// aten::scatter_reduce.two_out(Tensor self, int dim, Tensor index, Tensor src, str reduce, *, bool include_self=True, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor scatter_reduce_outf(@Const @ByRef Tensor self, @Cast("int64_t") long dim, @Const @ByRef Tensor index, @Const @ByRef Tensor src, @ByVal @Cast("c10::string_view*") Pointer reduce, @Cast("bool") boolean include_self, @ByRef Tensor out); +// aten::soft_margin_loss_backward(Tensor grad_output, Tensor self, Tensor target, int reduction) -> Tensor +@Namespace("at") public static native @ByVal Tensor soft_margin_loss_backward(@Const @ByRef Tensor grad_output, @Const @ByRef Tensor self, @Const @ByRef Tensor target, @Cast("int64_t") long reduction); -// Parsed from ATen/ops/searchsorted.h +// Parsed from ATen/ops/softmax.h // #pragma once @@ -61532,33 +47410,27 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include - +// #include -// aten::searchsorted.Tensor(Tensor sorted_sequence, Tensor self, *, bool out_int32=False, bool right=False, str? side=None, Tensor? sorter=None) -> Tensor -@Namespace("at") public static native @ByVal Tensor searchsorted(@Const @ByRef Tensor sorted_sequence, @Const @ByRef Tensor self, @Cast("bool") boolean out_int32/*=false*/, @Cast("bool") boolean right/*=false*/, @ByVal(nullValue = "c10::optional(c10::nullopt)") @Cast("c10::optional*") Pointer side, @Const @ByRef(nullValue = "c10::optional{}") TensorOptional sorter); -@Namespace("at") public static native @ByVal Tensor searchsorted(@Const @ByRef Tensor sorted_sequence, @Const @ByRef Tensor self); -// aten::searchsorted.Tensor_out(Tensor sorted_sequence, Tensor self, *, bool out_int32=False, bool right=False, str? side=None, Tensor? sorter=None, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor searchsorted_out(@ByRef Tensor out, @Const @ByRef Tensor sorted_sequence, @Const @ByRef Tensor self, @Cast("bool") boolean out_int32/*=false*/, @Cast("bool") boolean right/*=false*/, @ByVal(nullValue = "c10::optional(c10::nullopt)") @Cast("c10::optional*") Pointer side, @Const @ByRef(nullValue = "c10::optional{}") TensorOptional sorter); -@Namespace("at") public static native @ByRef Tensor searchsorted_out(@ByRef Tensor out, @Const @ByRef Tensor sorted_sequence, @Const @ByRef Tensor self); -// aten::searchsorted.Tensor_out(Tensor sorted_sequence, Tensor self, *, bool out_int32=False, bool right=False, str? side=None, Tensor? sorter=None, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor searchsorted_outf(@Const @ByRef Tensor sorted_sequence, @Const @ByRef Tensor self, @Cast("bool") boolean out_int32, @Cast("bool") boolean right, @ByVal @Cast("c10::optional*") Pointer side, @Const @ByRef TensorOptional sorter, @ByRef Tensor out); +// aten::softmax.int(Tensor self, int dim, ScalarType? dtype=None) -> Tensor +@Namespace("at") public static native @ByVal Tensor softmax(@Const @ByRef Tensor self, @Cast("int64_t") long dim, @ByVal(nullValue = "c10::optional(c10::nullopt)") ScalarTypeOptional dtype); +@Namespace("at") public static native @ByVal Tensor softmax(@Const @ByRef Tensor self, @Cast("int64_t") long dim); -// aten::searchsorted.Scalar(Tensor sorted_sequence, Scalar self, *, bool out_int32=False, bool right=False, str? side=None, Tensor? sorter=None) -> Tensor -@Namespace("at") public static native @ByVal Tensor searchsorted(@Const @ByRef Tensor sorted_sequence, @Const @ByRef Scalar self, @Cast("bool") boolean out_int32/*=false*/, @Cast("bool") boolean right/*=false*/, @ByVal(nullValue = "c10::optional(c10::nullopt)") @Cast("c10::optional*") Pointer side, @Const @ByRef(nullValue = "c10::optional{}") TensorOptional sorter); -@Namespace("at") public static native @ByVal Tensor searchsorted(@Const @ByRef Tensor sorted_sequence, @Const @ByRef Scalar self); +// aten::softmax.int_out(Tensor self, int dim, ScalarType? dtype=None, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor softmax_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Cast("int64_t") long dim, @ByVal(nullValue = "c10::optional(c10::nullopt)") ScalarTypeOptional dtype); +@Namespace("at") public static native @ByRef Tensor softmax_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Cast("int64_t") long dim); +// aten::softmax.int_out(Tensor self, int dim, ScalarType? dtype=None, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor softmax_outf(@Const @ByRef Tensor self, @Cast("int64_t") long dim, @ByVal ScalarTypeOptional dtype, @ByRef Tensor out); -// aten::searchsorted.Scalar_out(Tensor sorted_sequence, Scalar self, *, bool out_int32=False, bool right=False, str? side=None, Tensor? sorter=None, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor searchsorted_out(@ByRef Tensor out, @Const @ByRef Tensor sorted_sequence, @Const @ByRef Scalar self, @Cast("bool") boolean out_int32/*=false*/, @Cast("bool") boolean right/*=false*/, @ByVal(nullValue = "c10::optional(c10::nullopt)") @Cast("c10::optional*") Pointer side, @Const @ByRef(nullValue = "c10::optional{}") TensorOptional sorter); -@Namespace("at") public static native @ByRef Tensor searchsorted_out(@ByRef Tensor out, @Const @ByRef Tensor sorted_sequence, @Const @ByRef Scalar self); -// aten::searchsorted.Scalar_out(Tensor sorted_sequence, Scalar self, *, bool out_int32=False, bool right=False, str? side=None, Tensor? sorter=None, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor searchsorted_outf(@Const @ByRef Tensor sorted_sequence, @Const @ByRef Scalar self, @Cast("bool") boolean out_int32, @Cast("bool") boolean right, @ByVal @Cast("c10::optional*") Pointer side, @Const @ByRef TensorOptional sorter, @ByRef Tensor out); +// aten::softmax.Dimname(Tensor self, Dimname dim, *, ScalarType? dtype=None) -> Tensor +@Namespace("at") public static native @ByVal Tensor softmax(@Const @ByRef Tensor self, @ByVal Dimname dim, @ByVal(nullValue = "c10::optional(c10::nullopt)") ScalarTypeOptional dtype); +@Namespace("at") public static native @ByVal Tensor softmax(@Const @ByRef Tensor self, @ByVal Dimname dim); -// Parsed from ATen/ops/segment_reduce.h +// Parsed from ATen/ops/softplus.h // #pragma once @@ -61579,23 +47451,23 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include +// #include -// aten::segment_reduce(Tensor data, str reduce, *, Tensor? lengths=None, Tensor? indices=None, Tensor? offsets=None, int axis=0, bool unsafe=False, Scalar? initial=None) -> Tensor -@Namespace("at") public static native @ByVal Tensor segment_reduce(@Const @ByRef Tensor data, @ByVal @Cast("c10::string_view*") Pointer reduce, @Const @ByRef(nullValue = "c10::optional{}") TensorOptional lengths, @Const @ByRef(nullValue = "c10::optional{}") TensorOptional indices, @Const @ByRef(nullValue = "c10::optional{}") TensorOptional offsets, @Cast("int64_t") long axis/*=0*/, @Cast("bool") boolean unsafe/*=false*/, @Const @ByRef(nullValue = "c10::optional(c10::nullopt)") ScalarOptional initial); -@Namespace("at") public static native @ByVal Tensor segment_reduce(@Const @ByRef Tensor data, @ByVal @Cast("c10::string_view*") Pointer reduce); +// aten::softplus.out(Tensor self, Scalar beta=1, Scalar threshold=20, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor softplus_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef(nullValue = "at::Scalar(1)") Scalar beta, @Const @ByRef(nullValue = "at::Scalar(20)") Scalar threshold); +@Namespace("at") public static native @ByRef Tensor softplus_out(@ByRef Tensor out, @Const @ByRef Tensor self); +// aten::softplus.out(Tensor self, Scalar beta=1, Scalar threshold=20, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor softplus_outf(@Const @ByRef Tensor self, @Const @ByRef Scalar beta, @Const @ByRef Scalar threshold, @ByRef Tensor out); -// aten::segment_reduce.out(Tensor data, str reduce, *, Tensor? lengths=None, Tensor? indices=None, Tensor? offsets=None, int axis=0, bool unsafe=False, Scalar? initial=None, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor segment_reduce_out(@ByRef Tensor out, @Const @ByRef Tensor data, @ByVal @Cast("c10::string_view*") Pointer reduce, @Const @ByRef(nullValue = "c10::optional{}") TensorOptional lengths, @Const @ByRef(nullValue = "c10::optional{}") TensorOptional indices, @Const @ByRef(nullValue = "c10::optional{}") TensorOptional offsets, @Cast("int64_t") long axis/*=0*/, @Cast("bool") boolean unsafe/*=false*/, @Const @ByRef(nullValue = "c10::optional(c10::nullopt)") ScalarOptional initial); -@Namespace("at") public static native @ByRef Tensor segment_reduce_out(@ByRef Tensor out, @Const @ByRef Tensor data, @ByVal @Cast("c10::string_view*") Pointer reduce); -// aten::segment_reduce.out(Tensor data, str reduce, *, Tensor? lengths=None, Tensor? indices=None, Tensor? offsets=None, int axis=0, bool unsafe=False, Scalar? initial=None, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor segment_reduce_outf(@Const @ByRef Tensor data, @ByVal @Cast("c10::string_view*") Pointer reduce, @Const @ByRef TensorOptional lengths, @Const @ByRef TensorOptional indices, @Const @ByRef TensorOptional offsets, @Cast("int64_t") long axis, @Cast("bool") boolean unsafe, @Const @ByRef ScalarOptional initial, @ByRef Tensor out); +// aten::softplus(Tensor self, Scalar beta=1, Scalar threshold=20) -> Tensor +@Namespace("at") public static native @ByVal Tensor softplus(@Const @ByRef Tensor self, @Const @ByRef(nullValue = "at::Scalar(1)") Scalar beta, @Const @ByRef(nullValue = "at::Scalar(20)") Scalar threshold); +@Namespace("at") public static native @ByVal Tensor softplus(@Const @ByRef Tensor self); -// Parsed from ATen/ops/select.h +// Parsed from ATen/ops/softplus_backward.h // #pragma once @@ -61616,24 +47488,21 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include - - -// aten::select.Dimname(Tensor(a) self, Dimname dim, int index) -> Tensor(a) -@Namespace("at") public static native @ByVal Tensor select(@Const @ByRef Tensor self, @ByVal Dimname dim, @Cast("int64_t") long index); - -// aten::select.int(Tensor(a) self, int dim, SymInt index) -> Tensor(a) -@Namespace("at") public static native @ByVal Tensor select(@Const @ByRef Tensor self, @Cast("int64_t") long dim, @Cast("int64_t") long index); +// #include -// aten::select.int(Tensor(a) self, int dim, SymInt index) -> Tensor(a) -@Namespace("at") public static native @ByVal Tensor select_symint(@Const @ByRef Tensor self, @Cast("int64_t") long dim, @ByVal SymInt index); +// aten::softplus_backward.grad_input(Tensor grad_output, Tensor self, Scalar beta, Scalar threshold, *, Tensor(a!) grad_input) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor softplus_backward_out(@ByRef Tensor grad_input, @Const @ByRef Tensor grad_output, @Const @ByRef Tensor self, @Const @ByRef Scalar beta, @Const @ByRef Scalar threshold); +// aten::softplus_backward.grad_input(Tensor grad_output, Tensor self, Scalar beta, Scalar threshold, *, Tensor(a!) grad_input) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor softplus_backward_outf(@Const @ByRef Tensor grad_output, @Const @ByRef Tensor self, @Const @ByRef Scalar beta, @Const @ByRef Scalar threshold, @ByRef Tensor grad_input); +// aten::softplus_backward(Tensor grad_output, Tensor self, Scalar beta, Scalar threshold) -> Tensor +@Namespace("at") public static native @ByVal Tensor softplus_backward(@Const @ByRef Tensor grad_output, @Const @ByRef Tensor self, @Const @ByRef Scalar beta, @Const @ByRef Scalar threshold); -// Parsed from ATen/ops/select_backward.h +// Parsed from ATen/ops/softshrink.h // #pragma once @@ -61654,40 +47523,23 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include - - -// aten::select_backward(Tensor grad_output, SymInt[] input_sizes, int dim, SymInt index) -> Tensor -@Namespace("at") public static native @ByVal Tensor select_backward(@Const @ByRef Tensor grad_output, @ByVal @Cast("c10::ArrayRef*") LongArrayRef input_sizes, @Cast("int64_t") long dim, @Cast("int64_t") long index); -@Namespace("at") public static native @ByVal Tensor select_backward(@Const @ByRef Tensor grad_output, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] input_sizes, @Cast("int64_t") long dim, @Cast("int64_t") long index); - - -// aten::select_backward(Tensor grad_output, SymInt[] input_sizes, int dim, SymInt index) -> Tensor -@Namespace("at") public static native @ByVal Tensor select_backward_symint(@Const @ByRef Tensor grad_output, @ByVal SymIntRef input_sizes, @Cast("int64_t") long dim, @ByVal SymInt index); - - -// aten::select_backward.out(Tensor grad_output, SymInt[] input_sizes, int dim, SymInt index, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor select_backward_out(@ByRef Tensor out, @Const @ByRef Tensor grad_output, @ByVal @Cast("c10::ArrayRef*") LongArrayRef input_sizes, @Cast("int64_t") long dim, @Cast("int64_t") long index); -@Namespace("at") public static native @ByRef Tensor select_backward_out(@ByRef Tensor out, @Const @ByRef Tensor grad_output, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] input_sizes, @Cast("int64_t") long dim, @Cast("int64_t") long index); - - -// aten::select_backward.out(Tensor grad_output, SymInt[] input_sizes, int dim, SymInt index, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor select_backward_outf(@Const @ByRef Tensor grad_output, @ByVal @Cast("c10::ArrayRef*") LongArrayRef input_sizes, @Cast("int64_t") long dim, @Cast("int64_t") long index, @ByRef Tensor out); -@Namespace("at") public static native @ByRef Tensor select_backward_outf(@Const @ByRef Tensor grad_output, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] input_sizes, @Cast("int64_t") long dim, @Cast("int64_t") long index, @ByRef Tensor out); - - -// aten::select_backward.out(Tensor grad_output, SymInt[] input_sizes, int dim, SymInt index, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor select_backward_symint_out(@ByRef Tensor out, @Const @ByRef Tensor grad_output, @ByVal SymIntRef input_sizes, @Cast("int64_t") long dim, @ByVal SymInt index); +// #include -// aten::select_backward.out(Tensor grad_output, SymInt[] input_sizes, int dim, SymInt index, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor select_backward_symint_outf(@Const @ByRef Tensor grad_output, @ByVal SymIntRef input_sizes, @Cast("int64_t") long dim, @ByVal SymInt index, @ByRef Tensor out); +// aten::softshrink.out(Tensor self, Scalar lambd=0.5, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor softshrink_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef(nullValue = "at::Scalar(0.5)") Scalar lambd); +@Namespace("at") public static native @ByRef Tensor softshrink_out(@ByRef Tensor out, @Const @ByRef Tensor self); +// aten::softshrink.out(Tensor self, Scalar lambd=0.5, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor softshrink_outf(@Const @ByRef Tensor self, @Const @ByRef Scalar lambd, @ByRef Tensor out); +// aten::softshrink(Tensor self, Scalar lambd=0.5) -> Tensor +@Namespace("at") public static native @ByVal Tensor softshrink(@Const @ByRef Tensor self, @Const @ByRef(nullValue = "at::Scalar(0.5)") Scalar lambd); +@Namespace("at") public static native @ByVal Tensor softshrink(@Const @ByRef Tensor self); -// Parsed from ATen/ops/select_copy.h +// Parsed from ATen/ops/softshrink_backward.h // #pragma once @@ -61708,37 +47560,21 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include - - -// aten::select_copy.int(Tensor self, int dim, SymInt index) -> Tensor -@Namespace("at") public static native @ByVal Tensor select_copy(@Const @ByRef Tensor self, @Cast("int64_t") long dim, @Cast("int64_t") long index); - - -// aten::select_copy.int(Tensor self, int dim, SymInt index) -> Tensor -@Namespace("at") public static native @ByVal Tensor select_copy_symint(@Const @ByRef Tensor self, @Cast("int64_t") long dim, @ByVal SymInt index); - - -// aten::select_copy.int_out(Tensor self, int dim, SymInt index, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor select_copy_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Cast("int64_t") long dim, @Cast("int64_t") long index); - - -// aten::select_copy.int_out(Tensor self, int dim, SymInt index, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor select_copy_outf(@Const @ByRef Tensor self, @Cast("int64_t") long dim, @Cast("int64_t") long index, @ByRef Tensor out); - - -// aten::select_copy.int_out(Tensor self, int dim, SymInt index, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor select_copy_symint_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Cast("int64_t") long dim, @ByVal SymInt index); +// #include -// aten::select_copy.int_out(Tensor self, int dim, SymInt index, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor select_copy_symint_outf(@Const @ByRef Tensor self, @Cast("int64_t") long dim, @ByVal SymInt index, @ByRef Tensor out); +// aten::softshrink_backward.grad_input(Tensor grad_output, Tensor self, Scalar lambd, *, Tensor(a!) grad_input) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor softshrink_backward_out(@ByRef Tensor grad_input, @Const @ByRef Tensor grad_output, @Const @ByRef Tensor self, @Const @ByRef Scalar lambd); +// aten::softshrink_backward.grad_input(Tensor grad_output, Tensor self, Scalar lambd, *, Tensor(a!) grad_input) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor softshrink_backward_outf(@Const @ByRef Tensor grad_output, @Const @ByRef Tensor self, @Const @ByRef Scalar lambd, @ByRef Tensor grad_input); +// aten::softshrink_backward(Tensor grad_output, Tensor self, Scalar lambd) -> Tensor +@Namespace("at") public static native @ByVal Tensor softshrink_backward(@Const @ByRef Tensor grad_output, @Const @ByRef Tensor self, @Const @ByRef Scalar lambd); -// Parsed from ATen/ops/select_scatter.h +// Parsed from ATen/ops/sort.h // #pragma once @@ -61759,37 +47595,53 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include - - -// aten::select_scatter(Tensor self, Tensor src, int dim, SymInt index) -> Tensor -@Namespace("at") public static native @ByVal Tensor select_scatter(@Const @ByRef Tensor self, @Const @ByRef Tensor src, @Cast("int64_t") long dim, @Cast("int64_t") long index); - - -// aten::select_scatter(Tensor self, Tensor src, int dim, SymInt index) -> Tensor -@Namespace("at") public static native @ByVal Tensor select_scatter_symint(@Const @ByRef Tensor self, @Const @ByRef Tensor src, @Cast("int64_t") long dim, @ByVal SymInt index); +// #include -// aten::select_scatter.out(Tensor self, Tensor src, int dim, SymInt index, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor select_scatter_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor src, @Cast("int64_t") long dim, @Cast("int64_t") long index); +// aten::sort.values(Tensor self, int dim=-1, bool descending=False, *, Tensor(a!) values, Tensor(b!) indices) -> (Tensor(a!) values, Tensor(b!) indices) +@Namespace("at") public static native @ByVal T_TensorTensor_T sort_out(@ByRef Tensor values, @ByRef Tensor indices, @Const @ByRef Tensor self, @Cast("int64_t") long dim/*=-1*/, @Cast("bool") boolean descending/*=false*/); +@Namespace("at") public static native @ByVal T_TensorTensor_T sort_out(@ByRef Tensor values, @ByRef Tensor indices, @Const @ByRef Tensor self); +// aten::sort.values(Tensor self, int dim=-1, bool descending=False, *, Tensor(a!) values, Tensor(b!) indices) -> (Tensor(a!) values, Tensor(b!) indices) +@Namespace("at") public static native @ByVal T_TensorTensor_T sort_outf(@Const @ByRef Tensor self, @Cast("int64_t") long dim, @Cast("bool") boolean descending, @ByRef Tensor values, @ByRef Tensor indices); +// aten::sort.values_stable(Tensor self, *, bool? stable, int dim=-1, bool descending=False, Tensor(a!) values, Tensor(b!) indices) -> (Tensor(a!) values, Tensor(b!) indices) +@Namespace("at") public static native @ByVal T_TensorTensor_T sort_out(@ByRef Tensor values, @ByRef Tensor indices, @Const @ByRef Tensor self, @ByVal BoolOptional stable, @Cast("int64_t") long dim/*=-1*/, @Cast("bool") boolean descending/*=false*/); +@Namespace("at") public static native @ByVal T_TensorTensor_T sort_out(@ByRef Tensor values, @ByRef Tensor indices, @Const @ByRef Tensor self, @ByVal BoolOptional stable); +// aten::sort.values_stable(Tensor self, *, bool? stable, int dim=-1, bool descending=False, Tensor(a!) values, Tensor(b!) indices) -> (Tensor(a!) values, Tensor(b!) indices) +@Namespace("at") public static native @ByVal T_TensorTensor_T sort_outf(@Const @ByRef Tensor self, @ByVal BoolOptional stable, @Cast("int64_t") long dim, @Cast("bool") boolean descending, @ByRef Tensor values, @ByRef Tensor indices); -// aten::select_scatter.out(Tensor self, Tensor src, int dim, SymInt index, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor select_scatter_outf(@Const @ByRef Tensor self, @Const @ByRef Tensor src, @Cast("int64_t") long dim, @Cast("int64_t") long index, @ByRef Tensor out); +// aten::sort(Tensor self, int dim=-1, bool descending=False) -> (Tensor values, Tensor indices) +@Namespace("at") public static native @ByVal T_TensorTensor_T sort(@Const @ByRef Tensor self, @Cast("int64_t") long dim/*=-1*/, @Cast("bool") boolean descending/*=false*/); +@Namespace("at") public static native @ByVal T_TensorTensor_T sort(@Const @ByRef Tensor self); +// aten::sort.stable(Tensor self, *, bool? stable, int dim=-1, bool descending=False) -> (Tensor values, Tensor indices) +@Namespace("at") public static native @ByVal T_TensorTensor_T sort(@Const @ByRef Tensor self, @ByVal BoolOptional stable, @Cast("int64_t") long dim/*=-1*/, @Cast("bool") boolean descending/*=false*/); +@Namespace("at") public static native @ByVal T_TensorTensor_T sort(@Const @ByRef Tensor self, @ByVal BoolOptional stable); -// aten::select_scatter.out(Tensor self, Tensor src, int dim, SymInt index, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor select_scatter_symint_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor src, @Cast("int64_t") long dim, @ByVal SymInt index); +// aten::sort.dimname_values(Tensor self, Dimname dim, bool descending=False, *, Tensor(a!) values, Tensor(b!) indices) -> (Tensor(a!) values, Tensor(b!) indices) +@Namespace("at") public static native @ByVal T_TensorTensor_T sort_out(@ByRef Tensor values, @ByRef Tensor indices, @Const @ByRef Tensor self, @ByVal Dimname dim, @Cast("bool") boolean descending/*=false*/); +@Namespace("at") public static native @ByVal T_TensorTensor_T sort_out(@ByRef Tensor values, @ByRef Tensor indices, @Const @ByRef Tensor self, @ByVal Dimname dim); +// aten::sort.dimname_values(Tensor self, Dimname dim, bool descending=False, *, Tensor(a!) values, Tensor(b!) indices) -> (Tensor(a!) values, Tensor(b!) indices) +@Namespace("at") public static native @ByVal T_TensorTensor_T sort_outf(@Const @ByRef Tensor self, @ByVal Dimname dim, @Cast("bool") boolean descending, @ByRef Tensor values, @ByRef Tensor indices); +// aten::sort.dimname_values_stable(Tensor self, *, bool? stable, Dimname dim, bool descending=False, Tensor(a!) values, Tensor(b!) indices) -> (Tensor(a!) values, Tensor(b!) indices) +@Namespace("at") public static native @ByVal T_TensorTensor_T sort_out(@ByRef Tensor values, @ByRef Tensor indices, @Const @ByRef Tensor self, @ByVal BoolOptional stable, @ByVal Dimname dim, @Cast("bool") boolean descending/*=false*/); +@Namespace("at") public static native @ByVal T_TensorTensor_T sort_out(@ByRef Tensor values, @ByRef Tensor indices, @Const @ByRef Tensor self, @ByVal BoolOptional stable, @ByVal Dimname dim); +// aten::sort.dimname_values_stable(Tensor self, *, bool? stable, Dimname dim, bool descending=False, Tensor(a!) values, Tensor(b!) indices) -> (Tensor(a!) values, Tensor(b!) indices) +@Namespace("at") public static native @ByVal T_TensorTensor_T sort_outf(@Const @ByRef Tensor self, @ByVal BoolOptional stable, @ByVal Dimname dim, @Cast("bool") boolean descending, @ByRef Tensor values, @ByRef Tensor indices); -// aten::select_scatter.out(Tensor self, Tensor src, int dim, SymInt index, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor select_scatter_symint_outf(@Const @ByRef Tensor self, @Const @ByRef Tensor src, @Cast("int64_t") long dim, @ByVal SymInt index, @ByRef Tensor out); +// aten::sort.dimname(Tensor self, Dimname dim, bool descending=False) -> (Tensor values, Tensor indices) +@Namespace("at") public static native @ByVal T_TensorTensor_T sort(@Const @ByRef Tensor self, @ByVal Dimname dim, @Cast("bool") boolean descending/*=false*/); +@Namespace("at") public static native @ByVal T_TensorTensor_T sort(@Const @ByRef Tensor self, @ByVal Dimname dim); +// aten::sort.dimname_stable(Tensor self, *, bool? stable, Dimname dim, bool descending=False) -> (Tensor values, Tensor indices) +@Namespace("at") public static native @ByVal T_TensorTensor_T sort(@Const @ByRef Tensor self, @ByVal BoolOptional stable, @ByVal Dimname dim, @Cast("bool") boolean descending/*=false*/); +@Namespace("at") public static native @ByVal T_TensorTensor_T sort(@Const @ByRef Tensor self, @ByVal BoolOptional stable, @ByVal Dimname dim); -// Parsed from ATen/ops/selu.h +// Parsed from ATen/ops/sparse_bsc_tensor.h // #pragma once @@ -61810,19 +47662,25 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include +// #include -// aten::selu(Tensor self) -> Tensor -@Namespace("at") public static native @ByVal Tensor selu(@Const @ByRef Tensor self); +// aten::sparse_bsc_tensor.ccol_row_value_size(Tensor ccol_indices, Tensor row_indices, Tensor values, int[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=False) -> Tensor +@Namespace("at") public static native @ByVal Tensor sparse_bsc_tensor(@Const @ByRef Tensor ccol_indices, @Const @ByRef Tensor row_indices, @Const @ByRef Tensor values, @ByVal LongArrayRef size, @ByVal TensorOptions options); +@Namespace("at") public static native @ByVal Tensor sparse_bsc_tensor(@Const @ByRef Tensor ccol_indices, @Const @ByRef Tensor row_indices, @Const @ByRef Tensor values, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @ByVal TensorOptions options); +// aten::sparse_bsc_tensor.ccol_row_value_size(Tensor ccol_indices, Tensor row_indices, Tensor values, int[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=False) -> Tensor +@Namespace("at") public static native @ByVal Tensor sparse_bsc_tensor(@Const @ByRef Tensor ccol_indices, @Const @ByRef Tensor row_indices, @Const @ByRef Tensor values, @ByVal LongArrayRef size, @ByVal ScalarTypeOptional dtype, @ByVal LayoutOptional layout, @ByVal DeviceOptional device, @ByVal BoolOptional pin_memory); +@Namespace("at") public static native @ByVal Tensor sparse_bsc_tensor(@Const @ByRef Tensor ccol_indices, @Const @ByRef Tensor row_indices, @Const @ByRef Tensor values, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @ByVal ScalarTypeOptional dtype, @ByVal LayoutOptional layout, @ByVal DeviceOptional device, @ByVal BoolOptional pin_memory); -// aten::selu_(Tensor(a!) self) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor selu_(@ByRef Tensor self); +// aten::sparse_bsc_tensor.ccol_row_value(Tensor ccol_indices, Tensor row_indices, Tensor values, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=False) -> Tensor +@Namespace("at") public static native @ByVal Tensor sparse_bsc_tensor(@Const @ByRef Tensor ccol_indices, @Const @ByRef Tensor row_indices, @Const @ByRef Tensor values, @ByVal TensorOptions options); +// aten::sparse_bsc_tensor.ccol_row_value(Tensor ccol_indices, Tensor row_indices, Tensor values, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=False) -> Tensor +@Namespace("at") public static native @ByVal Tensor sparse_bsc_tensor(@Const @ByRef Tensor ccol_indices, @Const @ByRef Tensor row_indices, @Const @ByRef Tensor values, @ByVal ScalarTypeOptional dtype, @ByVal LayoutOptional layout, @ByVal DeviceOptional device, @ByVal BoolOptional pin_memory); -// Parsed from ATen/ops/set.h +// Parsed from ATen/ops/sparse_bsr_tensor.h // #pragma once @@ -61843,73 +47701,64 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include - - - - - -// aten::set.source_Storage_out(Tensor self, Storage source, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor set_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Cast({"", "c10::Storage&&"}) @StdMove Storage source); -// aten::set.source_Storage_out(Tensor self, Storage source, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor set_outf(@Const @ByRef Tensor self, @Cast({"", "c10::Storage&&"}) @StdMove Storage source, @ByRef Tensor out); - -// aten::set.source_Storage(Tensor self, Storage source) -> Tensor -@Namespace("at") public static native @ByVal Tensor set(@Const @ByRef Tensor self, @Cast({"", "c10::Storage&&"}) @StdMove Storage source); +// #include -// aten::set.source_Storage_storage_offset_out(Tensor self, Storage source, SymInt storage_offset, SymInt[] size, SymInt[] stride=[], *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor set_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Cast({"", "c10::Storage&&"}) @StdMove Storage source, @Cast("int64_t") long storage_offset, @ByVal @Cast("c10::ArrayRef*") LongArrayRef size, @ByVal(nullValue = "at::IntArrayRef{}") @Cast("c10::ArrayRef*") LongArrayRef stride); -@Namespace("at") public static native @ByRef Tensor set_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Cast({"", "c10::Storage&&"}) @StdMove Storage source, @Cast("int64_t") long storage_offset, @ByVal @Cast("c10::ArrayRef*") LongArrayRef size); -@Namespace("at") public static native @ByRef Tensor set_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Cast({"", "c10::Storage&&"}) @StdMove Storage source, @Cast("int64_t") long storage_offset, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @ByVal(nullValue = "at::IntArrayRef{}") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... stride); -@Namespace("at") public static native @ByRef Tensor set_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Cast({"", "c10::Storage&&"}) @StdMove Storage source, @Cast("int64_t") long storage_offset, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... size); +// aten::sparse_bsr_tensor.crow_col_value_size(Tensor crow_indices, Tensor col_indices, Tensor values, int[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=False) -> Tensor +@Namespace("at") public static native @ByVal Tensor sparse_bsr_tensor(@Const @ByRef Tensor crow_indices, @Const @ByRef Tensor col_indices, @Const @ByRef Tensor values, @ByVal LongArrayRef size, @ByVal TensorOptions options); +@Namespace("at") public static native @ByVal Tensor sparse_bsr_tensor(@Const @ByRef Tensor crow_indices, @Const @ByRef Tensor col_indices, @Const @ByRef Tensor values, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @ByVal TensorOptions options); +// aten::sparse_bsr_tensor.crow_col_value_size(Tensor crow_indices, Tensor col_indices, Tensor values, int[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=False) -> Tensor +@Namespace("at") public static native @ByVal Tensor sparse_bsr_tensor(@Const @ByRef Tensor crow_indices, @Const @ByRef Tensor col_indices, @Const @ByRef Tensor values, @ByVal LongArrayRef size, @ByVal ScalarTypeOptional dtype, @ByVal LayoutOptional layout, @ByVal DeviceOptional device, @ByVal BoolOptional pin_memory); +@Namespace("at") public static native @ByVal Tensor sparse_bsr_tensor(@Const @ByRef Tensor crow_indices, @Const @ByRef Tensor col_indices, @Const @ByRef Tensor values, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @ByVal ScalarTypeOptional dtype, @ByVal LayoutOptional layout, @ByVal DeviceOptional device, @ByVal BoolOptional pin_memory); -// aten::set.source_Storage_storage_offset_out(Tensor self, Storage source, SymInt storage_offset, SymInt[] size, SymInt[] stride=[], *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor set_outf(@Const @ByRef Tensor self, @Cast({"", "c10::Storage&&"}) @StdMove Storage source, @Cast("int64_t") long storage_offset, @ByVal @Cast("c10::ArrayRef*") LongArrayRef size, @ByVal @Cast("c10::ArrayRef*") LongArrayRef stride, @ByRef Tensor out); -@Namespace("at") public static native @ByRef Tensor set_outf(@Const @ByRef Tensor self, @Cast({"", "c10::Storage&&"}) @StdMove Storage source, @Cast("int64_t") long storage_offset, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] stride, @ByRef Tensor out); +// aten::sparse_bsr_tensor.crow_col_value(Tensor crow_indices, Tensor col_indices, Tensor values, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=False) -> Tensor +@Namespace("at") public static native @ByVal Tensor sparse_bsr_tensor(@Const @ByRef Tensor crow_indices, @Const @ByRef Tensor col_indices, @Const @ByRef Tensor values, @ByVal TensorOptions options); +// aten::sparse_bsr_tensor.crow_col_value(Tensor crow_indices, Tensor col_indices, Tensor values, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=False) -> Tensor +@Namespace("at") public static native @ByVal Tensor sparse_bsr_tensor(@Const @ByRef Tensor crow_indices, @Const @ByRef Tensor col_indices, @Const @ByRef Tensor values, @ByVal ScalarTypeOptional dtype, @ByVal LayoutOptional layout, @ByVal DeviceOptional device, @ByVal BoolOptional pin_memory); -// aten::set.source_Storage_storage_offset_out(Tensor self, Storage source, SymInt storage_offset, SymInt[] size, SymInt[] stride=[], *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor set_symint_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Cast({"", "c10::Storage&&"}) @StdMove Storage source, @ByVal SymInt storage_offset, @ByVal SymIntRef size, @ByVal(nullValue = "c10::SymIntArrayRef{}") SymIntRef stride); -@Namespace("at") public static native @ByRef Tensor set_symint_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Cast({"", "c10::Storage&&"}) @StdMove Storage source, @ByVal SymInt storage_offset, @ByVal SymIntRef size); -// aten::set.source_Storage_storage_offset_out(Tensor self, Storage source, SymInt storage_offset, SymInt[] size, SymInt[] stride=[], *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor set_symint_outf(@Const @ByRef Tensor self, @Cast({"", "c10::Storage&&"}) @StdMove Storage source, @ByVal SymInt storage_offset, @ByVal SymIntRef size, @ByVal SymIntRef stride, @ByRef Tensor out); +// Parsed from ATen/ops/sparse_compressed_tensor.h +// #pragma once -// aten::set.source_Storage_storage_offset(Tensor self, Storage source, SymInt storage_offset, SymInt[] size, SymInt[] stride=[]) -> Tensor -@Namespace("at") public static native @ByVal Tensor set(@Const @ByRef Tensor self, @Cast({"", "c10::Storage&&"}) @StdMove Storage source, @Cast("int64_t") long storage_offset, @ByVal @Cast("c10::ArrayRef*") LongArrayRef size, @ByVal(nullValue = "at::IntArrayRef{}") @Cast("c10::ArrayRef*") LongArrayRef stride); -@Namespace("at") public static native @ByVal Tensor set(@Const @ByRef Tensor self, @Cast({"", "c10::Storage&&"}) @StdMove Storage source, @Cast("int64_t") long storage_offset, @ByVal @Cast("c10::ArrayRef*") LongArrayRef size); -@Namespace("at") public static native @ByVal Tensor set(@Const @ByRef Tensor self, @Cast({"", "c10::Storage&&"}) @StdMove Storage source, @Cast("int64_t") long storage_offset, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @ByVal(nullValue = "at::IntArrayRef{}") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... stride); -@Namespace("at") public static native @ByVal Tensor set(@Const @ByRef Tensor self, @Cast({"", "c10::Storage&&"}) @StdMove Storage source, @Cast("int64_t") long storage_offset, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... size); +// @generated by torchgen/gen.py from Function.h +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include -// aten::set.source_Storage_storage_offset(Tensor self, Storage source, SymInt storage_offset, SymInt[] size, SymInt[] stride=[]) -> Tensor -@Namespace("at") public static native @ByVal Tensor set_symint(@Const @ByRef Tensor self, @Cast({"", "c10::Storage&&"}) @StdMove Storage source, @ByVal SymInt storage_offset, @ByVal SymIntRef size, @ByVal(nullValue = "c10::SymIntArrayRef{}") SymIntRef stride); -@Namespace("at") public static native @ByVal Tensor set_symint(@Const @ByRef Tensor self, @Cast({"", "c10::Storage&&"}) @StdMove Storage source, @ByVal SymInt storage_offset, @ByVal SymIntRef size); -// aten::set.source_Tensor_out(Tensor self, Tensor source, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor set_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor source); -// aten::set.source_Tensor_out(Tensor self, Tensor source, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor set_outf(@Const @ByRef Tensor self, @Const @ByRef Tensor source, @ByRef Tensor out); +// #include -// aten::set.source_Tensor(Tensor self, Tensor source) -> Tensor -@Namespace("at") public static native @ByVal Tensor set(@Const @ByRef Tensor self, @Const @ByRef Tensor source); -// aten::set.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor set_out(@ByRef Tensor out, @Const @ByRef Tensor self); -// aten::set.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor set_outf(@Const @ByRef Tensor self, @ByRef Tensor out); +// aten::sparse_compressed_tensor.comp_plain_value_size(Tensor compressed_indices, Tensor plain_indices, Tensor values, int[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=False) -> Tensor +@Namespace("at") public static native @ByVal Tensor sparse_compressed_tensor(@Const @ByRef Tensor compressed_indices, @Const @ByRef Tensor plain_indices, @Const @ByRef Tensor values, @ByVal LongArrayRef size, @ByVal TensorOptions options); +@Namespace("at") public static native @ByVal Tensor sparse_compressed_tensor(@Const @ByRef Tensor compressed_indices, @Const @ByRef Tensor plain_indices, @Const @ByRef Tensor values, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @ByVal TensorOptions options); +// aten::sparse_compressed_tensor.comp_plain_value_size(Tensor compressed_indices, Tensor plain_indices, Tensor values, int[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=False) -> Tensor +@Namespace("at") public static native @ByVal Tensor sparse_compressed_tensor(@Const @ByRef Tensor compressed_indices, @Const @ByRef Tensor plain_indices, @Const @ByRef Tensor values, @ByVal LongArrayRef size, @ByVal ScalarTypeOptional dtype, @ByVal LayoutOptional layout, @ByVal DeviceOptional device, @ByVal BoolOptional pin_memory); +@Namespace("at") public static native @ByVal Tensor sparse_compressed_tensor(@Const @ByRef Tensor compressed_indices, @Const @ByRef Tensor plain_indices, @Const @ByRef Tensor values, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @ByVal ScalarTypeOptional dtype, @ByVal LayoutOptional layout, @ByVal DeviceOptional device, @ByVal BoolOptional pin_memory); -// aten::set(Tensor self) -> Tensor -@Namespace("at") public static native @ByVal Tensor set(@Const @ByRef Tensor self); +// aten::sparse_compressed_tensor.comp_plain_value(Tensor compressed_indices, Tensor plain_indices, Tensor values, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=False) -> Tensor +@Namespace("at") public static native @ByVal Tensor sparse_compressed_tensor(@Const @ByRef Tensor compressed_indices, @Const @ByRef Tensor plain_indices, @Const @ByRef Tensor values, @ByVal TensorOptions options); +// aten::sparse_compressed_tensor.comp_plain_value(Tensor compressed_indices, Tensor plain_indices, Tensor values, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=False) -> Tensor +@Namespace("at") public static native @ByVal Tensor sparse_compressed_tensor(@Const @ByRef Tensor compressed_indices, @Const @ByRef Tensor plain_indices, @Const @ByRef Tensor values, @ByVal ScalarTypeOptional dtype, @ByVal LayoutOptional layout, @ByVal DeviceOptional device, @ByVal BoolOptional pin_memory); -// Parsed from ATen/ops/set_data.h +// Parsed from ATen/ops/sparse_coo_tensor.h // #pragma once @@ -61930,14 +47779,42 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include +// #include + + +// aten::sparse_coo_tensor.size(int[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=False) -> Tensor +@Namespace("at") public static native @ByVal Tensor sparse_coo_tensor(@ByVal LongArrayRef size, @ByVal TensorOptions options); +@Namespace("at") public static native @ByVal Tensor sparse_coo_tensor(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @ByVal TensorOptions options); +// aten::sparse_coo_tensor.size(int[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=False) -> Tensor +@Namespace("at") public static native @ByVal Tensor sparse_coo_tensor(@ByVal LongArrayRef size, @ByVal ScalarTypeOptional dtype, @ByVal LayoutOptional layout, @ByVal DeviceOptional device, @ByVal BoolOptional pin_memory); +@Namespace("at") public static native @ByVal Tensor sparse_coo_tensor(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @ByVal ScalarTypeOptional dtype, @ByVal LayoutOptional layout, @ByVal DeviceOptional device, @ByVal BoolOptional pin_memory); + +// aten::sparse_coo_tensor.indices(Tensor indices, Tensor values, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor +@Namespace("at") public static native @ByVal Tensor sparse_coo_tensor(@Const @ByRef Tensor indices, @Const @ByRef Tensor values, @ByVal(nullValue = "at::TensorOptions{}") TensorOptions options); +@Namespace("at") public static native @ByVal Tensor sparse_coo_tensor(@Const @ByRef Tensor indices, @Const @ByRef Tensor values); +// aten::sparse_coo_tensor.indices(Tensor indices, Tensor values, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor +@Namespace("at") public static native @ByVal Tensor sparse_coo_tensor(@Const @ByRef Tensor indices, @Const @ByRef Tensor values, @ByVal ScalarTypeOptional dtype, @ByVal LayoutOptional layout, @ByVal DeviceOptional device, @ByVal BoolOptional pin_memory); +// aten::sparse_coo_tensor.indices_size(Tensor indices, Tensor values, int[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor +@Namespace("at") public static native @ByVal Tensor sparse_coo_tensor(@Const @ByRef Tensor indices, @Const @ByRef Tensor values, @ByVal LongArrayRef size, @ByVal(nullValue = "at::TensorOptions{}") TensorOptions options); +@Namespace("at") public static native @ByVal Tensor sparse_coo_tensor(@Const @ByRef Tensor indices, @Const @ByRef Tensor values, @ByVal LongArrayRef size); +@Namespace("at") public static native @ByVal Tensor sparse_coo_tensor(@Const @ByRef Tensor indices, @Const @ByRef Tensor values, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @ByVal(nullValue = "at::TensorOptions{}") TensorOptions options); +@Namespace("at") public static native @ByVal Tensor sparse_coo_tensor(@Const @ByRef Tensor indices, @Const @ByRef Tensor values, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... size); +// aten::sparse_coo_tensor.indices_size(Tensor indices, Tensor values, int[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor +@Namespace("at") public static native @ByVal Tensor sparse_coo_tensor(@Const @ByRef Tensor indices, @Const @ByRef Tensor values, @ByVal LongArrayRef size, @ByVal ScalarTypeOptional dtype, @ByVal LayoutOptional layout, @ByVal DeviceOptional device, @ByVal BoolOptional pin_memory); +@Namespace("at") public static native @ByVal Tensor sparse_coo_tensor(@Const @ByRef Tensor indices, @Const @ByRef Tensor values, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @ByVal ScalarTypeOptional dtype, @ByVal LayoutOptional layout, @ByVal DeviceOptional device, @ByVal BoolOptional pin_memory); +// aten::sparse_coo_tensor.size_out(int[] size, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor sparse_coo_tensor_out(@ByRef Tensor out, @ByVal LongArrayRef size); +@Namespace("at") public static native @ByRef Tensor sparse_coo_tensor_out(@ByRef Tensor out, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... size); +// aten::sparse_coo_tensor.size_out(int[] size, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor sparse_coo_tensor_outf(@ByVal LongArrayRef size, @ByRef Tensor out); +@Namespace("at") public static native @ByRef Tensor sparse_coo_tensor_outf(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @ByRef Tensor out); -// Parsed from ATen/ops/sgn.h +// Parsed from ATen/ops/sparse_csc_tensor.h // #pragma once @@ -61958,21 +47835,25 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include +// #include -// aten::sgn(Tensor self) -> Tensor -@Namespace("at") public static native @ByVal Tensor sgn(@Const @ByRef Tensor self); +// aten::sparse_csc_tensor.ccol_row_value_size(Tensor ccol_indices, Tensor row_indices, Tensor values, int[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=False) -> Tensor +@Namespace("at") public static native @ByVal Tensor sparse_csc_tensor(@Const @ByRef Tensor ccol_indices, @Const @ByRef Tensor row_indices, @Const @ByRef Tensor values, @ByVal LongArrayRef size, @ByVal TensorOptions options); +@Namespace("at") public static native @ByVal Tensor sparse_csc_tensor(@Const @ByRef Tensor ccol_indices, @Const @ByRef Tensor row_indices, @Const @ByRef Tensor values, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @ByVal TensorOptions options); +// aten::sparse_csc_tensor.ccol_row_value_size(Tensor ccol_indices, Tensor row_indices, Tensor values, int[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=False) -> Tensor +@Namespace("at") public static native @ByVal Tensor sparse_csc_tensor(@Const @ByRef Tensor ccol_indices, @Const @ByRef Tensor row_indices, @Const @ByRef Tensor values, @ByVal LongArrayRef size, @ByVal ScalarTypeOptional dtype, @ByVal LayoutOptional layout, @ByVal DeviceOptional device, @ByVal BoolOptional pin_memory); +@Namespace("at") public static native @ByVal Tensor sparse_csc_tensor(@Const @ByRef Tensor ccol_indices, @Const @ByRef Tensor row_indices, @Const @ByRef Tensor values, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @ByVal ScalarTypeOptional dtype, @ByVal LayoutOptional layout, @ByVal DeviceOptional device, @ByVal BoolOptional pin_memory); -// aten::sgn.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor sgn_out(@ByRef Tensor out, @Const @ByRef Tensor self); -// aten::sgn.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor sgn_outf(@Const @ByRef Tensor self, @ByRef Tensor out); +// aten::sparse_csc_tensor.ccol_row_value(Tensor ccol_indices, Tensor row_indices, Tensor values, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=False) -> Tensor +@Namespace("at") public static native @ByVal Tensor sparse_csc_tensor(@Const @ByRef Tensor ccol_indices, @Const @ByRef Tensor row_indices, @Const @ByRef Tensor values, @ByVal TensorOptions options); +// aten::sparse_csc_tensor.ccol_row_value(Tensor ccol_indices, Tensor row_indices, Tensor values, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=False) -> Tensor +@Namespace("at") public static native @ByVal Tensor sparse_csc_tensor(@Const @ByRef Tensor ccol_indices, @Const @ByRef Tensor row_indices, @Const @ByRef Tensor values, @ByVal ScalarTypeOptional dtype, @ByVal LayoutOptional layout, @ByVal DeviceOptional device, @ByVal BoolOptional pin_memory); -// Parsed from ATen/ops/sigmoid.h +// Parsed from ATen/ops/sparse_csr_tensor.h // #pragma once @@ -61993,24 +47874,25 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include - +// #include -// aten::sigmoid(Tensor self) -> Tensor -@Namespace("at") public static native @ByVal Tensor sigmoid(@Const @ByRef Tensor self); -// aten::sigmoid_(Tensor(a!) self) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor sigmoid_(@ByRef Tensor self); +// aten::sparse_csr_tensor.crow_col_value_size(Tensor crow_indices, Tensor col_indices, Tensor values, int[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=False) -> Tensor +@Namespace("at") public static native @ByVal Tensor sparse_csr_tensor(@Const @ByRef Tensor crow_indices, @Const @ByRef Tensor col_indices, @Const @ByRef Tensor values, @ByVal LongArrayRef size, @ByVal TensorOptions options); +@Namespace("at") public static native @ByVal Tensor sparse_csr_tensor(@Const @ByRef Tensor crow_indices, @Const @ByRef Tensor col_indices, @Const @ByRef Tensor values, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @ByVal TensorOptions options); +// aten::sparse_csr_tensor.crow_col_value_size(Tensor crow_indices, Tensor col_indices, Tensor values, int[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=False) -> Tensor +@Namespace("at") public static native @ByVal Tensor sparse_csr_tensor(@Const @ByRef Tensor crow_indices, @Const @ByRef Tensor col_indices, @Const @ByRef Tensor values, @ByVal LongArrayRef size, @ByVal ScalarTypeOptional dtype, @ByVal LayoutOptional layout, @ByVal DeviceOptional device, @ByVal BoolOptional pin_memory); +@Namespace("at") public static native @ByVal Tensor sparse_csr_tensor(@Const @ByRef Tensor crow_indices, @Const @ByRef Tensor col_indices, @Const @ByRef Tensor values, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @ByVal ScalarTypeOptional dtype, @ByVal LayoutOptional layout, @ByVal DeviceOptional device, @ByVal BoolOptional pin_memory); -// aten::sigmoid.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor sigmoid_out(@ByRef Tensor out, @Const @ByRef Tensor self); -// aten::sigmoid.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor sigmoid_outf(@Const @ByRef Tensor self, @ByRef Tensor out); +// aten::sparse_csr_tensor.crow_col_value(Tensor crow_indices, Tensor col_indices, Tensor values, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=False) -> Tensor +@Namespace("at") public static native @ByVal Tensor sparse_csr_tensor(@Const @ByRef Tensor crow_indices, @Const @ByRef Tensor col_indices, @Const @ByRef Tensor values, @ByVal TensorOptions options); +// aten::sparse_csr_tensor.crow_col_value(Tensor crow_indices, Tensor col_indices, Tensor values, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=False) -> Tensor +@Namespace("at") public static native @ByVal Tensor sparse_csr_tensor(@Const @ByRef Tensor crow_indices, @Const @ByRef Tensor col_indices, @Const @ByRef Tensor values, @ByVal ScalarTypeOptional dtype, @ByVal LayoutOptional layout, @ByVal DeviceOptional device, @ByVal BoolOptional pin_memory); -// Parsed from ATen/ops/sigmoid_backward.h +// Parsed from ATen/ops/sparse_dim.h // #pragma once @@ -62031,21 +47913,14 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include - +// #include -// aten::sigmoid_backward.grad_input(Tensor grad_output, Tensor output, *, Tensor(a!) grad_input) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor sigmoid_backward_out(@ByRef Tensor grad_input, @Const @ByRef Tensor grad_output, @Const @ByRef Tensor output); -// aten::sigmoid_backward.grad_input(Tensor grad_output, Tensor output, *, Tensor(a!) grad_input) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor sigmoid_backward_outf(@Const @ByRef Tensor grad_output, @Const @ByRef Tensor output, @ByRef Tensor grad_input); -// aten::sigmoid_backward(Tensor grad_output, Tensor output) -> Tensor -@Namespace("at") public static native @ByVal Tensor sigmoid_backward(@Const @ByRef Tensor grad_output, @Const @ByRef Tensor output); -// Parsed from ATen/ops/sign.h +// Parsed from ATen/ops/sparse_mask.h // #pragma once @@ -62066,21 +47941,18 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include - +// #include -// aten::sign(Tensor self) -> Tensor -@Namespace("at") public static native @ByVal Tensor sign(@Const @ByRef Tensor self); -// aten::sign.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor sign_out(@ByRef Tensor out, @Const @ByRef Tensor self); -// aten::sign.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor sign_outf(@Const @ByRef Tensor self, @ByRef Tensor out); +// aten::sparse_mask.out(Tensor self, Tensor mask, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor sparse_mask_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor mask); +// aten::sparse_mask.out(Tensor self, Tensor mask, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor sparse_mask_outf(@Const @ByRef Tensor self, @Const @ByRef Tensor mask, @ByRef Tensor out); -// Parsed from ATen/ops/signbit.h +// Parsed from ATen/ops/sparse_resize.h // #pragma once @@ -62101,21 +47973,24 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include +// #include -// aten::signbit(Tensor self) -> Tensor -@Namespace("at") public static native @ByVal Tensor signbit(@Const @ByRef Tensor self); +// aten::sparse_resize.out(Tensor self, int[] size, int sparse_dim, int dense_dim, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @Const @ByRef Tensor sparse_resize_out(@Const @ByRef Tensor out, @Const @ByRef Tensor self, @ByVal LongArrayRef size, @Cast("int64_t") long sparse_dim, @Cast("int64_t") long dense_dim); +@Namespace("at") public static native @Const @ByRef Tensor sparse_resize_out(@Const @ByRef Tensor out, @Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @Cast("int64_t") long sparse_dim, @Cast("int64_t") long dense_dim); +// aten::sparse_resize.out(Tensor self, int[] size, int sparse_dim, int dense_dim, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @Const @ByRef Tensor sparse_resize_outf(@Const @ByRef Tensor self, @ByVal LongArrayRef size, @Cast("int64_t") long sparse_dim, @Cast("int64_t") long dense_dim, @Const @ByRef Tensor out); +@Namespace("at") public static native @Const @ByRef Tensor sparse_resize_outf(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @Cast("int64_t") long sparse_dim, @Cast("int64_t") long dense_dim, @Const @ByRef Tensor out); -// aten::signbit.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor signbit_out(@ByRef Tensor out, @Const @ByRef Tensor self); -// aten::signbit.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor signbit_outf(@Const @ByRef Tensor self, @ByRef Tensor out); +// aten::sparse_resize(Tensor self, int[] size, int sparse_dim, int dense_dim) -> Tensor +@Namespace("at") public static native @ByVal Tensor sparse_resize(@Const @ByRef Tensor self, @ByVal LongArrayRef size, @Cast("int64_t") long sparse_dim, @Cast("int64_t") long dense_dim); +@Namespace("at") public static native @ByVal Tensor sparse_resize(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @Cast("int64_t") long sparse_dim, @Cast("int64_t") long dense_dim); -// Parsed from ATen/ops/silu.h +// Parsed from ATen/ops/sparse_resize_and_clear.h // #pragma once @@ -62136,24 +48011,24 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include - +// #include -// aten::silu(Tensor self) -> Tensor -@Namespace("at") public static native @ByVal Tensor silu(@Const @ByRef Tensor self); -// aten::silu_(Tensor(a!) self) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor silu_(@ByRef Tensor self); +// aten::sparse_resize_and_clear.out(Tensor self, int[] size, int sparse_dim, int dense_dim, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @Const @ByRef Tensor sparse_resize_and_clear_out(@Const @ByRef Tensor out, @Const @ByRef Tensor self, @ByVal LongArrayRef size, @Cast("int64_t") long sparse_dim, @Cast("int64_t") long dense_dim); +@Namespace("at") public static native @Const @ByRef Tensor sparse_resize_and_clear_out(@Const @ByRef Tensor out, @Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @Cast("int64_t") long sparse_dim, @Cast("int64_t") long dense_dim); +// aten::sparse_resize_and_clear.out(Tensor self, int[] size, int sparse_dim, int dense_dim, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @Const @ByRef Tensor sparse_resize_and_clear_outf(@Const @ByRef Tensor self, @ByVal LongArrayRef size, @Cast("int64_t") long sparse_dim, @Cast("int64_t") long dense_dim, @Const @ByRef Tensor out); +@Namespace("at") public static native @Const @ByRef Tensor sparse_resize_and_clear_outf(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @Cast("int64_t") long sparse_dim, @Cast("int64_t") long dense_dim, @Const @ByRef Tensor out); -// aten::silu.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor silu_out(@ByRef Tensor out, @Const @ByRef Tensor self); -// aten::silu.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor silu_outf(@Const @ByRef Tensor self, @ByRef Tensor out); +// aten::sparse_resize_and_clear(Tensor self, int[] size, int sparse_dim, int dense_dim) -> Tensor +@Namespace("at") public static native @ByVal Tensor sparse_resize_and_clear(@Const @ByRef Tensor self, @ByVal LongArrayRef size, @Cast("int64_t") long sparse_dim, @Cast("int64_t") long dense_dim); +@Namespace("at") public static native @ByVal Tensor sparse_resize_and_clear(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @Cast("int64_t") long sparse_dim, @Cast("int64_t") long dense_dim); -// Parsed from ATen/ops/silu_backward.h +// Parsed from ATen/ops/sparse_sampled_addmm.h // #pragma once @@ -62174,21 +48049,23 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include +// #include -// aten::silu_backward.grad_input(Tensor grad_output, Tensor self, *, Tensor(a!) grad_input) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor silu_backward_out(@ByRef Tensor grad_input, @Const @ByRef Tensor grad_output, @Const @ByRef Tensor self); -// aten::silu_backward.grad_input(Tensor grad_output, Tensor self, *, Tensor(a!) grad_input) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor silu_backward_outf(@Const @ByRef Tensor grad_output, @Const @ByRef Tensor self, @ByRef Tensor grad_input); +// aten::sparse_sampled_addmm.out(Tensor self, Tensor mat1, Tensor mat2, *, Scalar beta=1, Scalar alpha=1, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor sparse_sampled_addmm_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor mat1, @Const @ByRef Tensor mat2, @Const @ByRef(nullValue = "at::Scalar(1)") Scalar beta, @Const @ByRef(nullValue = "at::Scalar(1)") Scalar alpha); +@Namespace("at") public static native @ByRef Tensor sparse_sampled_addmm_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor mat1, @Const @ByRef Tensor mat2); +// aten::sparse_sampled_addmm.out(Tensor self, Tensor mat1, Tensor mat2, *, Scalar beta=1, Scalar alpha=1, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor sparse_sampled_addmm_outf(@Const @ByRef Tensor self, @Const @ByRef Tensor mat1, @Const @ByRef Tensor mat2, @Const @ByRef Scalar beta, @Const @ByRef Scalar alpha, @ByRef Tensor out); -// aten::silu_backward(Tensor grad_output, Tensor self) -> Tensor -@Namespace("at") public static native @ByVal Tensor silu_backward(@Const @ByRef Tensor grad_output, @Const @ByRef Tensor self); +// aten::sparse_sampled_addmm(Tensor self, Tensor mat1, Tensor mat2, *, Scalar beta=1, Scalar alpha=1) -> Tensor +@Namespace("at") public static native @ByVal Tensor sparse_sampled_addmm(@Const @ByRef Tensor self, @Const @ByRef Tensor mat1, @Const @ByRef Tensor mat2, @Const @ByRef(nullValue = "at::Scalar(1)") Scalar beta, @Const @ByRef(nullValue = "at::Scalar(1)") Scalar alpha); +@Namespace("at") public static native @ByVal Tensor sparse_sampled_addmm(@Const @ByRef Tensor self, @Const @ByRef Tensor mat1, @Const @ByRef Tensor mat2); -// Parsed from ATen/ops/sin.h +// Parsed from ATen/ops/special_airy_ai.h // #pragma once @@ -62209,24 +48086,21 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include - +// #include -// aten::sin(Tensor self) -> Tensor -@Namespace("at") public static native @ByVal Tensor sin(@Const @ByRef Tensor self); -// aten::sin_(Tensor(a!) self) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor sin_(@ByRef Tensor self); +// aten::special_airy_ai(Tensor x) -> Tensor +@Namespace("at") public static native @ByVal Tensor special_airy_ai(@Const @ByRef Tensor x); -// aten::sin.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor sin_out(@ByRef Tensor out, @Const @ByRef Tensor self); -// aten::sin.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor sin_outf(@Const @ByRef Tensor self, @ByRef Tensor out); +// aten::special_airy_ai.out(Tensor x, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor special_airy_ai_out(@ByRef Tensor out, @Const @ByRef Tensor x); +// aten::special_airy_ai.out(Tensor x, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor special_airy_ai_outf(@Const @ByRef Tensor x, @ByRef Tensor out); -// Parsed from ATen/ops/sinc.h +// Parsed from ATen/ops/special_bessel_j0.h // #pragma once @@ -62247,24 +48121,21 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include - +// #include -// aten::sinc(Tensor self) -> Tensor -@Namespace("at") public static native @ByVal Tensor sinc(@Const @ByRef Tensor self); -// aten::sinc_(Tensor(a!) self) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor sinc_(@ByRef Tensor self); +// aten::special_bessel_j0(Tensor self) -> Tensor +@Namespace("at") public static native @ByVal Tensor special_bessel_j0(@Const @ByRef Tensor self); -// aten::sinc.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor sinc_out(@ByRef Tensor out, @Const @ByRef Tensor self); -// aten::sinc.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor sinc_outf(@Const @ByRef Tensor self, @ByRef Tensor out); +// aten::special_bessel_j0.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor special_bessel_j0_out(@ByRef Tensor out, @Const @ByRef Tensor self); +// aten::special_bessel_j0.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor special_bessel_j0_outf(@Const @ByRef Tensor self, @ByRef Tensor out); -// Parsed from ATen/ops/sinh.h +// Parsed from ATen/ops/special_bessel_j1.h // #pragma once @@ -62285,24 +48156,21 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include - +// #include -// aten::sinh(Tensor self) -> Tensor -@Namespace("at") public static native @ByVal Tensor sinh(@Const @ByRef Tensor self); -// aten::sinh_(Tensor(a!) self) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor sinh_(@ByRef Tensor self); +// aten::special_bessel_j1(Tensor self) -> Tensor +@Namespace("at") public static native @ByVal Tensor special_bessel_j1(@Const @ByRef Tensor self); -// aten::sinh.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor sinh_out(@ByRef Tensor out, @Const @ByRef Tensor self); -// aten::sinh.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor sinh_outf(@Const @ByRef Tensor self, @ByRef Tensor out); +// aten::special_bessel_j1.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor special_bessel_j1_out(@ByRef Tensor out, @Const @ByRef Tensor self); +// aten::special_bessel_j1.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor special_bessel_j1_outf(@Const @ByRef Tensor self, @ByRef Tensor out); -// Parsed from ATen/ops/size.h +// Parsed from ATen/ops/special_bessel_y0.h // #pragma once @@ -62323,19 +48191,21 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include +// #include -// aten::size.int(Tensor self, int dim) -> int -@Namespace("at") public static native @Cast("int64_t") long __dispatch_size(@Const @ByRef Tensor self, @Cast("int64_t") long dim); +// aten::special_bessel_y0(Tensor self) -> Tensor +@Namespace("at") public static native @ByVal Tensor special_bessel_y0(@Const @ByRef Tensor self); -// aten::size.Dimname(Tensor self, Dimname dim) -> int -@Namespace("at") public static native @Cast("int64_t") long size(@Const @ByRef Tensor self, @ByVal Dimname dim); +// aten::special_bessel_y0.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor special_bessel_y0_out(@ByRef Tensor out, @Const @ByRef Tensor self); +// aten::special_bessel_y0.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor special_bessel_y0_outf(@Const @ByRef Tensor self, @ByRef Tensor out); -// Parsed from ATen/ops/slice.h +// Parsed from ATen/ops/special_bessel_y1.h // #pragma once @@ -62356,23 +48226,21 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include - - -// aten::slice.Tensor(Tensor(a) self, int dim=0, SymInt? start=None, SymInt? end=None, SymInt step=1) -> Tensor(a) -@Namespace("at") public static native @ByVal Tensor slice(@Const @ByRef Tensor self, @Cast("int64_t") long dim/*=0*/, @ByVal(nullValue = "c10::optional(c10::nullopt)") LongOptional start, @ByVal(nullValue = "c10::optional(c10::nullopt)") LongOptional end, @Cast("int64_t") long step/*=1*/); -@Namespace("at") public static native @ByVal Tensor slice(@Const @ByRef Tensor self); +// #include -// aten::slice.Tensor(Tensor(a) self, int dim=0, SymInt? start=None, SymInt? end=None, SymInt step=1) -> Tensor(a) -@Namespace("at") public static native @ByVal Tensor slice_symint(@Const @ByRef Tensor self, @Cast("int64_t") long dim/*=0*/, @ByVal(nullValue = "c10::optional(c10::nullopt)") SymIntOptional start, @ByVal(nullValue = "c10::optional(c10::nullopt)") SymIntOptional end, @ByVal(nullValue = "c10::SymInt(1)") SymInt step); -@Namespace("at") public static native @ByVal Tensor slice_symint(@Const @ByRef Tensor self); +// aten::special_bessel_y1(Tensor self) -> Tensor +@Namespace("at") public static native @ByVal Tensor special_bessel_y1(@Const @ByRef Tensor self); +// aten::special_bessel_y1.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor special_bessel_y1_out(@ByRef Tensor out, @Const @ByRef Tensor self); +// aten::special_bessel_y1.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor special_bessel_y1_outf(@Const @ByRef Tensor self, @ByRef Tensor out); -// Parsed from ATen/ops/slice_backward.h +// Parsed from ATen/ops/special_chebyshev_polynomial_t.h // #pragma once @@ -62393,40 +48261,37 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include - - -// aten::slice_backward(Tensor grad_output, SymInt[] input_sizes, int dim, SymInt start, SymInt end, SymInt step) -> Tensor -@Namespace("at") public static native @ByVal Tensor slice_backward(@Const @ByRef Tensor grad_output, @ByVal @Cast("c10::ArrayRef*") LongArrayRef input_sizes, @Cast("int64_t") long dim, @Cast("int64_t") long start, @Cast("int64_t") long end, @Cast("int64_t") long step); -@Namespace("at") public static native @ByVal Tensor slice_backward(@Const @ByRef Tensor grad_output, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] input_sizes, @Cast("int64_t") long dim, @Cast("int64_t") long start, @Cast("int64_t") long end, @Cast("int64_t") long step); - - -// aten::slice_backward(Tensor grad_output, SymInt[] input_sizes, int dim, SymInt start, SymInt end, SymInt step) -> Tensor -@Namespace("at") public static native @ByVal Tensor slice_backward_symint(@Const @ByRef Tensor grad_output, @ByVal SymIntRef input_sizes, @Cast("int64_t") long dim, @ByVal SymInt start, @ByVal SymInt end, @ByVal SymInt step); - - -// aten::slice_backward.out(Tensor grad_output, SymInt[] input_sizes, int dim, SymInt start, SymInt end, SymInt step, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor slice_backward_out(@ByRef Tensor out, @Const @ByRef Tensor grad_output, @ByVal @Cast("c10::ArrayRef*") LongArrayRef input_sizes, @Cast("int64_t") long dim, @Cast("int64_t") long start, @Cast("int64_t") long end, @Cast("int64_t") long step); -@Namespace("at") public static native @ByRef Tensor slice_backward_out(@ByRef Tensor out, @Const @ByRef Tensor grad_output, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] input_sizes, @Cast("int64_t") long dim, @Cast("int64_t") long start, @Cast("int64_t") long end, @Cast("int64_t") long step); +// #include -// aten::slice_backward.out(Tensor grad_output, SymInt[] input_sizes, int dim, SymInt start, SymInt end, SymInt step, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor slice_backward_outf(@Const @ByRef Tensor grad_output, @ByVal @Cast("c10::ArrayRef*") LongArrayRef input_sizes, @Cast("int64_t") long dim, @Cast("int64_t") long start, @Cast("int64_t") long end, @Cast("int64_t") long step, @ByRef Tensor out); -@Namespace("at") public static native @ByRef Tensor slice_backward_outf(@Const @ByRef Tensor grad_output, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] input_sizes, @Cast("int64_t") long dim, @Cast("int64_t") long start, @Cast("int64_t") long end, @Cast("int64_t") long step, @ByRef Tensor out); +// aten::special_chebyshev_polynomial_t(Tensor x, Tensor n) -> Tensor +@Namespace("at") public static native @ByVal Tensor special_chebyshev_polynomial_t(@Const @ByRef Tensor x, @Const @ByRef Tensor n); +// aten::special_chebyshev_polynomial_t.x_scalar(Scalar x, Tensor n) -> Tensor +@Namespace("at") public static native @ByVal Tensor special_chebyshev_polynomial_t(@Const @ByRef Scalar x, @Const @ByRef Tensor n); -// aten::slice_backward.out(Tensor grad_output, SymInt[] input_sizes, int dim, SymInt start, SymInt end, SymInt step, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor slice_backward_symint_out(@ByRef Tensor out, @Const @ByRef Tensor grad_output, @ByVal SymIntRef input_sizes, @Cast("int64_t") long dim, @ByVal SymInt start, @ByVal SymInt end, @ByVal SymInt step); +// aten::special_chebyshev_polynomial_t.n_scalar(Tensor x, Scalar n) -> Tensor +@Namespace("at") public static native @ByVal Tensor special_chebyshev_polynomial_t(@Const @ByRef Tensor x, @Const @ByRef Scalar n); +// aten::special_chebyshev_polynomial_t.out(Tensor x, Tensor n, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor special_chebyshev_polynomial_t_out(@ByRef Tensor out, @Const @ByRef Tensor x, @Const @ByRef Tensor n); +// aten::special_chebyshev_polynomial_t.out(Tensor x, Tensor n, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor special_chebyshev_polynomial_t_outf(@Const @ByRef Tensor x, @Const @ByRef Tensor n, @ByRef Tensor out); -// aten::slice_backward.out(Tensor grad_output, SymInt[] input_sizes, int dim, SymInt start, SymInt end, SymInt step, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor slice_backward_symint_outf(@Const @ByRef Tensor grad_output, @ByVal SymIntRef input_sizes, @Cast("int64_t") long dim, @ByVal SymInt start, @ByVal SymInt end, @ByVal SymInt step, @ByRef Tensor out); +// aten::special_chebyshev_polynomial_t.x_scalar_out(Scalar x, Tensor n, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor special_chebyshev_polynomial_t_out(@ByRef Tensor out, @Const @ByRef Scalar x, @Const @ByRef Tensor n); +// aten::special_chebyshev_polynomial_t.x_scalar_out(Scalar x, Tensor n, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor special_chebyshev_polynomial_t_outf(@Const @ByRef Scalar x, @Const @ByRef Tensor n, @ByRef Tensor out); +// aten::special_chebyshev_polynomial_t.n_scalar_out(Tensor x, Scalar n, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor special_chebyshev_polynomial_t_out(@ByRef Tensor out, @Const @ByRef Tensor x, @Const @ByRef Scalar n); +// aten::special_chebyshev_polynomial_t.n_scalar_out(Tensor x, Scalar n, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor special_chebyshev_polynomial_t_outf(@Const @ByRef Tensor x, @Const @ByRef Scalar n, @ByRef Tensor out); -// Parsed from ATen/ops/slice_copy.h +// Parsed from ATen/ops/special_chebyshev_polynomial_u.h // #pragma once @@ -62447,41 +48312,37 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include - - -// aten::slice_copy.Tensor(Tensor self, int dim=0, SymInt? start=None, SymInt? end=None, SymInt step=1) -> Tensor -@Namespace("at") public static native @ByVal Tensor slice_copy(@Const @ByRef Tensor self, @Cast("int64_t") long dim/*=0*/, @ByVal(nullValue = "c10::optional(c10::nullopt)") LongOptional start, @ByVal(nullValue = "c10::optional(c10::nullopt)") LongOptional end, @Cast("int64_t") long step/*=1*/); -@Namespace("at") public static native @ByVal Tensor slice_copy(@Const @ByRef Tensor self); - - -// aten::slice_copy.Tensor(Tensor self, int dim=0, SymInt? start=None, SymInt? end=None, SymInt step=1) -> Tensor -@Namespace("at") public static native @ByVal Tensor slice_copy_symint(@Const @ByRef Tensor self, @Cast("int64_t") long dim/*=0*/, @ByVal(nullValue = "c10::optional(c10::nullopt)") SymIntOptional start, @ByVal(nullValue = "c10::optional(c10::nullopt)") SymIntOptional end, @ByVal(nullValue = "c10::SymInt(1)") SymInt step); -@Namespace("at") public static native @ByVal Tensor slice_copy_symint(@Const @ByRef Tensor self); - - -// aten::slice_copy.Tensor_out(Tensor self, int dim=0, SymInt? start=None, SymInt? end=None, SymInt step=1, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor slice_copy_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Cast("int64_t") long dim/*=0*/, @ByVal(nullValue = "c10::optional(c10::nullopt)") LongOptional start, @ByVal(nullValue = "c10::optional(c10::nullopt)") LongOptional end, @Cast("int64_t") long step/*=1*/); -@Namespace("at") public static native @ByRef Tensor slice_copy_out(@ByRef Tensor out, @Const @ByRef Tensor self); +// #include -// aten::slice_copy.Tensor_out(Tensor self, int dim=0, SymInt? start=None, SymInt? end=None, SymInt step=1, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor slice_copy_outf(@Const @ByRef Tensor self, @Cast("int64_t") long dim, @ByVal LongOptional start, @ByVal LongOptional end, @Cast("int64_t") long step, @ByRef Tensor out); +// aten::special_chebyshev_polynomial_u(Tensor x, Tensor n) -> Tensor +@Namespace("at") public static native @ByVal Tensor special_chebyshev_polynomial_u(@Const @ByRef Tensor x, @Const @ByRef Tensor n); +// aten::special_chebyshev_polynomial_u.x_scalar(Scalar x, Tensor n) -> Tensor +@Namespace("at") public static native @ByVal Tensor special_chebyshev_polynomial_u(@Const @ByRef Scalar x, @Const @ByRef Tensor n); -// aten::slice_copy.Tensor_out(Tensor self, int dim=0, SymInt? start=None, SymInt? end=None, SymInt step=1, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor slice_copy_symint_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Cast("int64_t") long dim/*=0*/, @ByVal(nullValue = "c10::optional(c10::nullopt)") SymIntOptional start, @ByVal(nullValue = "c10::optional(c10::nullopt)") SymIntOptional end, @ByVal(nullValue = "c10::SymInt(1)") SymInt step); -@Namespace("at") public static native @ByRef Tensor slice_copy_symint_out(@ByRef Tensor out, @Const @ByRef Tensor self); +// aten::special_chebyshev_polynomial_u.n_scalar(Tensor x, Scalar n) -> Tensor +@Namespace("at") public static native @ByVal Tensor special_chebyshev_polynomial_u(@Const @ByRef Tensor x, @Const @ByRef Scalar n); +// aten::special_chebyshev_polynomial_u.out(Tensor x, Tensor n, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor special_chebyshev_polynomial_u_out(@ByRef Tensor out, @Const @ByRef Tensor x, @Const @ByRef Tensor n); +// aten::special_chebyshev_polynomial_u.out(Tensor x, Tensor n, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor special_chebyshev_polynomial_u_outf(@Const @ByRef Tensor x, @Const @ByRef Tensor n, @ByRef Tensor out); -// aten::slice_copy.Tensor_out(Tensor self, int dim=0, SymInt? start=None, SymInt? end=None, SymInt step=1, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor slice_copy_symint_outf(@Const @ByRef Tensor self, @Cast("int64_t") long dim, @ByVal SymIntOptional start, @ByVal SymIntOptional end, @ByVal SymInt step, @ByRef Tensor out); +// aten::special_chebyshev_polynomial_u.x_scalar_out(Scalar x, Tensor n, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor special_chebyshev_polynomial_u_out(@ByRef Tensor out, @Const @ByRef Scalar x, @Const @ByRef Tensor n); +// aten::special_chebyshev_polynomial_u.x_scalar_out(Scalar x, Tensor n, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor special_chebyshev_polynomial_u_outf(@Const @ByRef Scalar x, @Const @ByRef Tensor n, @ByRef Tensor out); +// aten::special_chebyshev_polynomial_u.n_scalar_out(Tensor x, Scalar n, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor special_chebyshev_polynomial_u_out(@ByRef Tensor out, @Const @ByRef Tensor x, @Const @ByRef Scalar n); +// aten::special_chebyshev_polynomial_u.n_scalar_out(Tensor x, Scalar n, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor special_chebyshev_polynomial_u_outf(@Const @ByRef Tensor x, @Const @ByRef Scalar n, @ByRef Tensor out); -// Parsed from ATen/ops/slice_scatter.h +// Parsed from ATen/ops/special_chebyshev_polynomial_v.h // #pragma once @@ -62502,41 +48363,37 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include - - -// aten::slice_scatter(Tensor self, Tensor src, int dim=0, SymInt? start=None, SymInt? end=None, SymInt step=1) -> Tensor -@Namespace("at") public static native @ByVal Tensor slice_scatter(@Const @ByRef Tensor self, @Const @ByRef Tensor src, @Cast("int64_t") long dim/*=0*/, @ByVal(nullValue = "c10::optional(c10::nullopt)") LongOptional start, @ByVal(nullValue = "c10::optional(c10::nullopt)") LongOptional end, @Cast("int64_t") long step/*=1*/); -@Namespace("at") public static native @ByVal Tensor slice_scatter(@Const @ByRef Tensor self, @Const @ByRef Tensor src); - - -// aten::slice_scatter(Tensor self, Tensor src, int dim=0, SymInt? start=None, SymInt? end=None, SymInt step=1) -> Tensor -@Namespace("at") public static native @ByVal Tensor slice_scatter_symint(@Const @ByRef Tensor self, @Const @ByRef Tensor src, @Cast("int64_t") long dim/*=0*/, @ByVal(nullValue = "c10::optional(c10::nullopt)") SymIntOptional start, @ByVal(nullValue = "c10::optional(c10::nullopt)") SymIntOptional end, @ByVal(nullValue = "c10::SymInt(1)") SymInt step); -@Namespace("at") public static native @ByVal Tensor slice_scatter_symint(@Const @ByRef Tensor self, @Const @ByRef Tensor src); - - -// aten::slice_scatter.out(Tensor self, Tensor src, int dim=0, SymInt? start=None, SymInt? end=None, SymInt step=1, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor slice_scatter_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor src, @Cast("int64_t") long dim/*=0*/, @ByVal(nullValue = "c10::optional(c10::nullopt)") LongOptional start, @ByVal(nullValue = "c10::optional(c10::nullopt)") LongOptional end, @Cast("int64_t") long step/*=1*/); -@Namespace("at") public static native @ByRef Tensor slice_scatter_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor src); +// #include -// aten::slice_scatter.out(Tensor self, Tensor src, int dim=0, SymInt? start=None, SymInt? end=None, SymInt step=1, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor slice_scatter_outf(@Const @ByRef Tensor self, @Const @ByRef Tensor src, @Cast("int64_t") long dim, @ByVal LongOptional start, @ByVal LongOptional end, @Cast("int64_t") long step, @ByRef Tensor out); +// aten::special_chebyshev_polynomial_v(Tensor x, Tensor n) -> Tensor +@Namespace("at") public static native @ByVal Tensor special_chebyshev_polynomial_v(@Const @ByRef Tensor x, @Const @ByRef Tensor n); +// aten::special_chebyshev_polynomial_v.x_scalar(Scalar x, Tensor n) -> Tensor +@Namespace("at") public static native @ByVal Tensor special_chebyshev_polynomial_v(@Const @ByRef Scalar x, @Const @ByRef Tensor n); -// aten::slice_scatter.out(Tensor self, Tensor src, int dim=0, SymInt? start=None, SymInt? end=None, SymInt step=1, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor slice_scatter_symint_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor src, @Cast("int64_t") long dim/*=0*/, @ByVal(nullValue = "c10::optional(c10::nullopt)") SymIntOptional start, @ByVal(nullValue = "c10::optional(c10::nullopt)") SymIntOptional end, @ByVal(nullValue = "c10::SymInt(1)") SymInt step); -@Namespace("at") public static native @ByRef Tensor slice_scatter_symint_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor src); +// aten::special_chebyshev_polynomial_v.n_scalar(Tensor x, Scalar n) -> Tensor +@Namespace("at") public static native @ByVal Tensor special_chebyshev_polynomial_v(@Const @ByRef Tensor x, @Const @ByRef Scalar n); +// aten::special_chebyshev_polynomial_v.out(Tensor x, Tensor n, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor special_chebyshev_polynomial_v_out(@ByRef Tensor out, @Const @ByRef Tensor x, @Const @ByRef Tensor n); +// aten::special_chebyshev_polynomial_v.out(Tensor x, Tensor n, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor special_chebyshev_polynomial_v_outf(@Const @ByRef Tensor x, @Const @ByRef Tensor n, @ByRef Tensor out); -// aten::slice_scatter.out(Tensor self, Tensor src, int dim=0, SymInt? start=None, SymInt? end=None, SymInt step=1, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor slice_scatter_symint_outf(@Const @ByRef Tensor self, @Const @ByRef Tensor src, @Cast("int64_t") long dim, @ByVal SymIntOptional start, @ByVal SymIntOptional end, @ByVal SymInt step, @ByRef Tensor out); +// aten::special_chebyshev_polynomial_v.x_scalar_out(Scalar x, Tensor n, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor special_chebyshev_polynomial_v_out(@ByRef Tensor out, @Const @ByRef Scalar x, @Const @ByRef Tensor n); +// aten::special_chebyshev_polynomial_v.x_scalar_out(Scalar x, Tensor n, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor special_chebyshev_polynomial_v_outf(@Const @ByRef Scalar x, @Const @ByRef Tensor n, @ByRef Tensor out); +// aten::special_chebyshev_polynomial_v.n_scalar_out(Tensor x, Scalar n, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor special_chebyshev_polynomial_v_out(@ByRef Tensor out, @Const @ByRef Tensor x, @Const @ByRef Scalar n); +// aten::special_chebyshev_polynomial_v.n_scalar_out(Tensor x, Scalar n, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor special_chebyshev_polynomial_v_outf(@Const @ByRef Tensor x, @Const @ByRef Scalar n, @ByRef Tensor out); -// Parsed from ATen/ops/slogdet.h +// Parsed from ATen/ops/special_chebyshev_polynomial_w.h // #pragma once @@ -62557,21 +48414,37 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include +// #include -// aten::slogdet(Tensor self) -> (Tensor sign, Tensor logabsdet) -@Namespace("at") public static native @ByVal TensorTensorTuple slogdet(@Const @ByRef Tensor self); +// aten::special_chebyshev_polynomial_w(Tensor x, Tensor n) -> Tensor +@Namespace("at") public static native @ByVal Tensor special_chebyshev_polynomial_w(@Const @ByRef Tensor x, @Const @ByRef Tensor n); -// aten::slogdet.out(Tensor self, *, Tensor(a!) sign, Tensor(b!) logabsdet) -> (Tensor(a!) sign, Tensor(b!) logabsdet) -@Namespace("at") public static native @ByVal @Cast("std::tuple*") PointerPointer slogdet_out(@ByRef Tensor sign, @ByRef Tensor logabsdet, @Const @ByRef Tensor self); -// aten::slogdet.out(Tensor self, *, Tensor(a!) sign, Tensor(b!) logabsdet) -> (Tensor(a!) sign, Tensor(b!) logabsdet) -@Namespace("at") public static native @ByVal @Cast("std::tuple*") PointerPointer slogdet_outf(@Const @ByRef Tensor self, @ByRef Tensor sign, @ByRef Tensor logabsdet); +// aten::special_chebyshev_polynomial_w.x_scalar(Scalar x, Tensor n) -> Tensor +@Namespace("at") public static native @ByVal Tensor special_chebyshev_polynomial_w(@Const @ByRef Scalar x, @Const @ByRef Tensor n); +// aten::special_chebyshev_polynomial_w.n_scalar(Tensor x, Scalar n) -> Tensor +@Namespace("at") public static native @ByVal Tensor special_chebyshev_polynomial_w(@Const @ByRef Tensor x, @Const @ByRef Scalar n); +// aten::special_chebyshev_polynomial_w.out(Tensor x, Tensor n, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor special_chebyshev_polynomial_w_out(@ByRef Tensor out, @Const @ByRef Tensor x, @Const @ByRef Tensor n); +// aten::special_chebyshev_polynomial_w.out(Tensor x, Tensor n, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor special_chebyshev_polynomial_w_outf(@Const @ByRef Tensor x, @Const @ByRef Tensor n, @ByRef Tensor out); + +// aten::special_chebyshev_polynomial_w.x_scalar_out(Scalar x, Tensor n, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor special_chebyshev_polynomial_w_out(@ByRef Tensor out, @Const @ByRef Scalar x, @Const @ByRef Tensor n); +// aten::special_chebyshev_polynomial_w.x_scalar_out(Scalar x, Tensor n, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor special_chebyshev_polynomial_w_outf(@Const @ByRef Scalar x, @Const @ByRef Tensor n, @ByRef Tensor out); + +// aten::special_chebyshev_polynomial_w.n_scalar_out(Tensor x, Scalar n, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor special_chebyshev_polynomial_w_out(@ByRef Tensor out, @Const @ByRef Tensor x, @Const @ByRef Scalar n); +// aten::special_chebyshev_polynomial_w.n_scalar_out(Tensor x, Scalar n, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor special_chebyshev_polynomial_w_outf(@Const @ByRef Tensor x, @Const @ByRef Scalar n, @ByRef Tensor out); -// Parsed from ATen/ops/slow_conv3d.h + + +// Parsed from ATen/ops/special_digamma.h // #pragma once @@ -62592,51 +48465,21 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include - - -// aten::slow_conv3d.out(Tensor self, Tensor weight, int[3] kernel_size, Tensor? bias=None, int[3] stride=1, SymInt[3] padding=0, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor slow_conv3d_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor weight, @ByVal @Cast("c10::ArrayRef*") LongArrayRef kernel_size, @Const @ByRef(nullValue = "c10::optional{}") TensorOptional bias, @ByVal(nullValue = "at::IntArrayRef(1)") @Cast("c10::ArrayRef*") LongArrayRef stride, @ByVal(nullValue = "at::IntArrayRef(0)") @Cast("c10::ArrayRef*") LongArrayRef padding); -@Namespace("at") public static native @ByRef Tensor slow_conv3d_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor weight, @ByVal @Cast("c10::ArrayRef*") LongArrayRef kernel_size); -@Namespace("at") public static native @ByRef Tensor slow_conv3d_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor weight, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] kernel_size, @Const @ByRef(nullValue = "c10::optional{}") TensorOptional bias, @ByVal(nullValue = "at::IntArrayRef(1)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] stride, @ByVal(nullValue = "at::IntArrayRef(0)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... padding); -@Namespace("at") public static native @ByRef Tensor slow_conv3d_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor weight, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... kernel_size); - - -// aten::slow_conv3d.out(Tensor self, Tensor weight, int[3] kernel_size, Tensor? bias=None, int[3] stride=1, SymInt[3] padding=0, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor slow_conv3d_outf(@Const @ByRef Tensor self, @Const @ByRef Tensor weight, @ByVal @Cast("c10::ArrayRef*") LongArrayRef kernel_size, @Const @ByRef TensorOptional bias, @ByVal @Cast("c10::ArrayRef*") LongArrayRef stride, @ByVal @Cast("c10::ArrayRef*") LongArrayRef padding, @ByRef Tensor out); -@Namespace("at") public static native @ByRef Tensor slow_conv3d_outf(@Const @ByRef Tensor self, @Const @ByRef Tensor weight, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] kernel_size, @Const @ByRef TensorOptional bias, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] stride, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] padding, @ByRef Tensor out); - - -// aten::slow_conv3d.out(Tensor self, Tensor weight, int[3] kernel_size, Tensor? bias=None, int[3] stride=1, SymInt[3] padding=0, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor slow_conv3d_symint_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor weight, @ByVal @Cast("c10::ArrayRef*") LongArrayRef kernel_size, @Const @ByRef(nullValue = "c10::optional{}") TensorOptional bias, @ByVal(nullValue = "at::IntArrayRef(1)") @Cast("c10::ArrayRef*") LongArrayRef stride, @ByVal(nullValue = "c10::SymIntArrayRef(c10::SymInt(0))") SymIntRef padding); -@Namespace("at") public static native @ByRef Tensor slow_conv3d_symint_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor weight, @ByVal @Cast("c10::ArrayRef*") LongArrayRef kernel_size); -@Namespace("at") public static native @ByRef Tensor slow_conv3d_symint_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor weight, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] kernel_size, @Const @ByRef(nullValue = "c10::optional{}") TensorOptional bias, @ByVal(nullValue = "at::IntArrayRef(1)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] stride, @ByVal(nullValue = "c10::SymIntArrayRef(c10::SymInt(0))") SymIntRef padding); -@Namespace("at") public static native @ByRef Tensor slow_conv3d_symint_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor weight, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... kernel_size); - - -// aten::slow_conv3d.out(Tensor self, Tensor weight, int[3] kernel_size, Tensor? bias=None, int[3] stride=1, SymInt[3] padding=0, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor slow_conv3d_symint_outf(@Const @ByRef Tensor self, @Const @ByRef Tensor weight, @ByVal @Cast("c10::ArrayRef*") LongArrayRef kernel_size, @Const @ByRef TensorOptional bias, @ByVal @Cast("c10::ArrayRef*") LongArrayRef stride, @ByVal SymIntRef padding, @ByRef Tensor out); -@Namespace("at") public static native @ByRef Tensor slow_conv3d_symint_outf(@Const @ByRef Tensor self, @Const @ByRef Tensor weight, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] kernel_size, @Const @ByRef TensorOptional bias, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] stride, @ByVal SymIntRef padding, @ByRef Tensor out); - - -// aten::slow_conv3d(Tensor self, Tensor weight, int[3] kernel_size, Tensor? bias=None, int[3] stride=1, SymInt[3] padding=0) -> Tensor -@Namespace("at") public static native @ByVal Tensor slow_conv3d(@Const @ByRef Tensor self, @Const @ByRef Tensor weight, @ByVal @Cast("c10::ArrayRef*") LongArrayRef kernel_size, @Const @ByRef(nullValue = "c10::optional{}") TensorOptional bias, @ByVal(nullValue = "at::IntArrayRef(1)") @Cast("c10::ArrayRef*") LongArrayRef stride, @ByVal(nullValue = "at::IntArrayRef(0)") @Cast("c10::ArrayRef*") LongArrayRef padding); -@Namespace("at") public static native @ByVal Tensor slow_conv3d(@Const @ByRef Tensor self, @Const @ByRef Tensor weight, @ByVal @Cast("c10::ArrayRef*") LongArrayRef kernel_size); -@Namespace("at") public static native @ByVal Tensor slow_conv3d(@Const @ByRef Tensor self, @Const @ByRef Tensor weight, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] kernel_size, @Const @ByRef(nullValue = "c10::optional{}") TensorOptional bias, @ByVal(nullValue = "at::IntArrayRef(1)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] stride, @ByVal(nullValue = "at::IntArrayRef(0)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... padding); -@Namespace("at") public static native @ByVal Tensor slow_conv3d(@Const @ByRef Tensor self, @Const @ByRef Tensor weight, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... kernel_size); +// #include -// aten::slow_conv3d(Tensor self, Tensor weight, int[3] kernel_size, Tensor? bias=None, int[3] stride=1, SymInt[3] padding=0) -> Tensor -@Namespace("at") public static native @ByVal Tensor slow_conv3d_symint(@Const @ByRef Tensor self, @Const @ByRef Tensor weight, @ByVal @Cast("c10::ArrayRef*") LongArrayRef kernel_size, @Const @ByRef(nullValue = "c10::optional{}") TensorOptional bias, @ByVal(nullValue = "at::IntArrayRef(1)") @Cast("c10::ArrayRef*") LongArrayRef stride, @ByVal(nullValue = "c10::SymIntArrayRef(c10::SymInt(0))") SymIntRef padding); -@Namespace("at") public static native @ByVal Tensor slow_conv3d_symint(@Const @ByRef Tensor self, @Const @ByRef Tensor weight, @ByVal @Cast("c10::ArrayRef*") LongArrayRef kernel_size); -@Namespace("at") public static native @ByVal Tensor slow_conv3d_symint(@Const @ByRef Tensor self, @Const @ByRef Tensor weight, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] kernel_size, @Const @ByRef(nullValue = "c10::optional{}") TensorOptional bias, @ByVal(nullValue = "at::IntArrayRef(1)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] stride, @ByVal(nullValue = "c10::SymIntArrayRef(c10::SymInt(0))") SymIntRef padding); -@Namespace("at") public static native @ByVal Tensor slow_conv3d_symint(@Const @ByRef Tensor self, @Const @ByRef Tensor weight, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... kernel_size); +// aten::special_digamma(Tensor self) -> Tensor +@Namespace("at") public static native @ByVal Tensor special_digamma(@Const @ByRef Tensor self); +// aten::special_digamma.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor special_digamma_out(@ByRef Tensor out, @Const @ByRef Tensor self); +// aten::special_digamma.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor special_digamma_outf(@Const @ByRef Tensor self, @ByRef Tensor out); -// Parsed from ATen/ops/slow_conv3d_forward.h +// Parsed from ATen/ops/special_entr.h // #pragma once @@ -62657,43 +48500,21 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include - - -// aten::slow_conv3d_forward.output(Tensor self, Tensor weight, int[3] kernel_size, Tensor? bias, int[3] stride, SymInt[3] padding, *, Tensor(a!) output) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor slow_conv3d_forward_out(@ByRef Tensor output, @Const @ByRef Tensor self, @Const @ByRef Tensor weight, @ByVal @Cast("c10::ArrayRef*") LongArrayRef kernel_size, @Const @ByRef TensorOptional bias, @ByVal @Cast("c10::ArrayRef*") LongArrayRef stride, @ByVal @Cast("c10::ArrayRef*") LongArrayRef padding); -@Namespace("at") public static native @ByRef Tensor slow_conv3d_forward_out(@ByRef Tensor output, @Const @ByRef Tensor self, @Const @ByRef Tensor weight, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] kernel_size, @Const @ByRef TensorOptional bias, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] stride, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... padding); - - -// aten::slow_conv3d_forward.output(Tensor self, Tensor weight, int[3] kernel_size, Tensor? bias, int[3] stride, SymInt[3] padding, *, Tensor(a!) output) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor slow_conv3d_forward_outf(@Const @ByRef Tensor self, @Const @ByRef Tensor weight, @ByVal @Cast("c10::ArrayRef*") LongArrayRef kernel_size, @Const @ByRef TensorOptional bias, @ByVal @Cast("c10::ArrayRef*") LongArrayRef stride, @ByVal @Cast("c10::ArrayRef*") LongArrayRef padding, @ByRef Tensor output); -@Namespace("at") public static native @ByRef Tensor slow_conv3d_forward_outf(@Const @ByRef Tensor self, @Const @ByRef Tensor weight, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] kernel_size, @Const @ByRef TensorOptional bias, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] stride, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] padding, @ByRef Tensor output); - - -// aten::slow_conv3d_forward.output(Tensor self, Tensor weight, int[3] kernel_size, Tensor? bias, int[3] stride, SymInt[3] padding, *, Tensor(a!) output) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor slow_conv3d_forward_symint_out(@ByRef Tensor output, @Const @ByRef Tensor self, @Const @ByRef Tensor weight, @ByVal @Cast("c10::ArrayRef*") LongArrayRef kernel_size, @Const @ByRef TensorOptional bias, @ByVal @Cast("c10::ArrayRef*") LongArrayRef stride, @ByVal SymIntRef padding); -@Namespace("at") public static native @ByRef Tensor slow_conv3d_forward_symint_out(@ByRef Tensor output, @Const @ByRef Tensor self, @Const @ByRef Tensor weight, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] kernel_size, @Const @ByRef TensorOptional bias, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] stride, @ByVal SymIntRef padding); - - -// aten::slow_conv3d_forward.output(Tensor self, Tensor weight, int[3] kernel_size, Tensor? bias, int[3] stride, SymInt[3] padding, *, Tensor(a!) output) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor slow_conv3d_forward_symint_outf(@Const @ByRef Tensor self, @Const @ByRef Tensor weight, @ByVal @Cast("c10::ArrayRef*") LongArrayRef kernel_size, @Const @ByRef TensorOptional bias, @ByVal @Cast("c10::ArrayRef*") LongArrayRef stride, @ByVal SymIntRef padding, @ByRef Tensor output); -@Namespace("at") public static native @ByRef Tensor slow_conv3d_forward_symint_outf(@Const @ByRef Tensor self, @Const @ByRef Tensor weight, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] kernel_size, @Const @ByRef TensorOptional bias, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] stride, @ByVal SymIntRef padding, @ByRef Tensor output); - - -// aten::slow_conv3d_forward(Tensor self, Tensor weight, int[3] kernel_size, Tensor? bias, int[3] stride, SymInt[3] padding) -> Tensor -@Namespace("at") public static native @ByVal Tensor slow_conv3d_forward(@Const @ByRef Tensor self, @Const @ByRef Tensor weight, @ByVal @Cast("c10::ArrayRef*") LongArrayRef kernel_size, @Const @ByRef TensorOptional bias, @ByVal @Cast("c10::ArrayRef*") LongArrayRef stride, @ByVal @Cast("c10::ArrayRef*") LongArrayRef padding); -@Namespace("at") public static native @ByVal Tensor slow_conv3d_forward(@Const @ByRef Tensor self, @Const @ByRef Tensor weight, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] kernel_size, @Const @ByRef TensorOptional bias, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] stride, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... padding); +// #include -// aten::slow_conv3d_forward(Tensor self, Tensor weight, int[3] kernel_size, Tensor? bias, int[3] stride, SymInt[3] padding) -> Tensor -@Namespace("at") public static native @ByVal Tensor slow_conv3d_forward_symint(@Const @ByRef Tensor self, @Const @ByRef Tensor weight, @ByVal @Cast("c10::ArrayRef*") LongArrayRef kernel_size, @Const @ByRef TensorOptional bias, @ByVal @Cast("c10::ArrayRef*") LongArrayRef stride, @ByVal SymIntRef padding); -@Namespace("at") public static native @ByVal Tensor slow_conv3d_forward_symint(@Const @ByRef Tensor self, @Const @ByRef Tensor weight, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] kernel_size, @Const @ByRef TensorOptional bias, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] stride, @ByVal SymIntRef padding); +// aten::special_entr(Tensor self) -> Tensor +@Namespace("at") public static native @ByVal Tensor special_entr(@Const @ByRef Tensor self); +// aten::special_entr.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor special_entr_out(@ByRef Tensor out, @Const @ByRef Tensor self); +// aten::special_entr.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor special_entr_outf(@Const @ByRef Tensor self, @ByRef Tensor out); -// Parsed from ATen/ops/slow_conv_dilated2d.h +// Parsed from ATen/ops/special_erf.h // #pragma once @@ -62714,51 +48535,21 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include - - -// aten::slow_conv_dilated2d(Tensor self, Tensor weight, int[2] kernel_size, Tensor? bias=None, int[2] stride=1, SymInt[2] padding=0, int[2] dilation=1) -> Tensor -@Namespace("at") public static native @ByVal Tensor slow_conv_dilated2d(@Const @ByRef Tensor self, @Const @ByRef Tensor weight, @ByVal @Cast("c10::ArrayRef*") LongArrayRef kernel_size, @Const @ByRef(nullValue = "c10::optional{}") TensorOptional bias, @ByVal(nullValue = "at::IntArrayRef(1)") @Cast("c10::ArrayRef*") LongArrayRef stride, @ByVal(nullValue = "at::IntArrayRef(0)") @Cast("c10::ArrayRef*") LongArrayRef padding, @ByVal(nullValue = "at::IntArrayRef(1)") @Cast("c10::ArrayRef*") LongArrayRef dilation); -@Namespace("at") public static native @ByVal Tensor slow_conv_dilated2d(@Const @ByRef Tensor self, @Const @ByRef Tensor weight, @ByVal @Cast("c10::ArrayRef*") LongArrayRef kernel_size); -@Namespace("at") public static native @ByVal Tensor slow_conv_dilated2d(@Const @ByRef Tensor self, @Const @ByRef Tensor weight, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] kernel_size, @Const @ByRef(nullValue = "c10::optional{}") TensorOptional bias, @ByVal(nullValue = "at::IntArrayRef(1)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] stride, @ByVal(nullValue = "at::IntArrayRef(0)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] padding, @ByVal(nullValue = "at::IntArrayRef(1)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... dilation); -@Namespace("at") public static native @ByVal Tensor slow_conv_dilated2d(@Const @ByRef Tensor self, @Const @ByRef Tensor weight, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... kernel_size); - - -// aten::slow_conv_dilated2d(Tensor self, Tensor weight, int[2] kernel_size, Tensor? bias=None, int[2] stride=1, SymInt[2] padding=0, int[2] dilation=1) -> Tensor -@Namespace("at") public static native @ByVal Tensor slow_conv_dilated2d_symint(@Const @ByRef Tensor self, @Const @ByRef Tensor weight, @ByVal @Cast("c10::ArrayRef*") LongArrayRef kernel_size, @Const @ByRef(nullValue = "c10::optional{}") TensorOptional bias, @ByVal(nullValue = "at::IntArrayRef(1)") @Cast("c10::ArrayRef*") LongArrayRef stride, @ByVal(nullValue = "c10::SymIntArrayRef(c10::SymInt(0))") SymIntRef padding, @ByVal(nullValue = "at::IntArrayRef(1)") @Cast("c10::ArrayRef*") LongArrayRef dilation); -@Namespace("at") public static native @ByVal Tensor slow_conv_dilated2d_symint(@Const @ByRef Tensor self, @Const @ByRef Tensor weight, @ByVal @Cast("c10::ArrayRef*") LongArrayRef kernel_size); -@Namespace("at") public static native @ByVal Tensor slow_conv_dilated2d_symint(@Const @ByRef Tensor self, @Const @ByRef Tensor weight, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] kernel_size, @Const @ByRef(nullValue = "c10::optional{}") TensorOptional bias, @ByVal(nullValue = "at::IntArrayRef(1)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] stride, @ByVal(nullValue = "c10::SymIntArrayRef(c10::SymInt(0))") SymIntRef padding, @ByVal(nullValue = "at::IntArrayRef(1)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... dilation); -@Namespace("at") public static native @ByVal Tensor slow_conv_dilated2d_symint(@Const @ByRef Tensor self, @Const @ByRef Tensor weight, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... kernel_size); - - -// aten::slow_conv_dilated2d.out(Tensor self, Tensor weight, int[2] kernel_size, Tensor? bias=None, int[2] stride=1, SymInt[2] padding=0, int[2] dilation=1, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor slow_conv_dilated2d_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor weight, @ByVal @Cast("c10::ArrayRef*") LongArrayRef kernel_size, @Const @ByRef(nullValue = "c10::optional{}") TensorOptional bias, @ByVal(nullValue = "at::IntArrayRef(1)") @Cast("c10::ArrayRef*") LongArrayRef stride, @ByVal(nullValue = "at::IntArrayRef(0)") @Cast("c10::ArrayRef*") LongArrayRef padding, @ByVal(nullValue = "at::IntArrayRef(1)") @Cast("c10::ArrayRef*") LongArrayRef dilation); -@Namespace("at") public static native @ByRef Tensor slow_conv_dilated2d_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor weight, @ByVal @Cast("c10::ArrayRef*") LongArrayRef kernel_size); -@Namespace("at") public static native @ByRef Tensor slow_conv_dilated2d_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor weight, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] kernel_size, @Const @ByRef(nullValue = "c10::optional{}") TensorOptional bias, @ByVal(nullValue = "at::IntArrayRef(1)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] stride, @ByVal(nullValue = "at::IntArrayRef(0)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] padding, @ByVal(nullValue = "at::IntArrayRef(1)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... dilation); -@Namespace("at") public static native @ByRef Tensor slow_conv_dilated2d_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor weight, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... kernel_size); - - -// aten::slow_conv_dilated2d.out(Tensor self, Tensor weight, int[2] kernel_size, Tensor? bias=None, int[2] stride=1, SymInt[2] padding=0, int[2] dilation=1, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor slow_conv_dilated2d_outf(@Const @ByRef Tensor self, @Const @ByRef Tensor weight, @ByVal @Cast("c10::ArrayRef*") LongArrayRef kernel_size, @Const @ByRef TensorOptional bias, @ByVal @Cast("c10::ArrayRef*") LongArrayRef stride, @ByVal @Cast("c10::ArrayRef*") LongArrayRef padding, @ByVal @Cast("c10::ArrayRef*") LongArrayRef dilation, @ByRef Tensor out); -@Namespace("at") public static native @ByRef Tensor slow_conv_dilated2d_outf(@Const @ByRef Tensor self, @Const @ByRef Tensor weight, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] kernel_size, @Const @ByRef TensorOptional bias, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] stride, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] padding, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dilation, @ByRef Tensor out); - - -// aten::slow_conv_dilated2d.out(Tensor self, Tensor weight, int[2] kernel_size, Tensor? bias=None, int[2] stride=1, SymInt[2] padding=0, int[2] dilation=1, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor slow_conv_dilated2d_symint_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor weight, @ByVal @Cast("c10::ArrayRef*") LongArrayRef kernel_size, @Const @ByRef(nullValue = "c10::optional{}") TensorOptional bias, @ByVal(nullValue = "at::IntArrayRef(1)") @Cast("c10::ArrayRef*") LongArrayRef stride, @ByVal(nullValue = "c10::SymIntArrayRef(c10::SymInt(0))") SymIntRef padding, @ByVal(nullValue = "at::IntArrayRef(1)") @Cast("c10::ArrayRef*") LongArrayRef dilation); -@Namespace("at") public static native @ByRef Tensor slow_conv_dilated2d_symint_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor weight, @ByVal @Cast("c10::ArrayRef*") LongArrayRef kernel_size); -@Namespace("at") public static native @ByRef Tensor slow_conv_dilated2d_symint_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor weight, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] kernel_size, @Const @ByRef(nullValue = "c10::optional{}") TensorOptional bias, @ByVal(nullValue = "at::IntArrayRef(1)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] stride, @ByVal(nullValue = "c10::SymIntArrayRef(c10::SymInt(0))") SymIntRef padding, @ByVal(nullValue = "at::IntArrayRef(1)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... dilation); -@Namespace("at") public static native @ByRef Tensor slow_conv_dilated2d_symint_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor weight, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... kernel_size); +// #include -// aten::slow_conv_dilated2d.out(Tensor self, Tensor weight, int[2] kernel_size, Tensor? bias=None, int[2] stride=1, SymInt[2] padding=0, int[2] dilation=1, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor slow_conv_dilated2d_symint_outf(@Const @ByRef Tensor self, @Const @ByRef Tensor weight, @ByVal @Cast("c10::ArrayRef*") LongArrayRef kernel_size, @Const @ByRef TensorOptional bias, @ByVal @Cast("c10::ArrayRef*") LongArrayRef stride, @ByVal SymIntRef padding, @ByVal @Cast("c10::ArrayRef*") LongArrayRef dilation, @ByRef Tensor out); -@Namespace("at") public static native @ByRef Tensor slow_conv_dilated2d_symint_outf(@Const @ByRef Tensor self, @Const @ByRef Tensor weight, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] kernel_size, @Const @ByRef TensorOptional bias, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] stride, @ByVal SymIntRef padding, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dilation, @ByRef Tensor out); +// aten::special_erf(Tensor self) -> Tensor +@Namespace("at") public static native @ByVal Tensor special_erf(@Const @ByRef Tensor self); +// aten::special_erf.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor special_erf_out(@ByRef Tensor out, @Const @ByRef Tensor self); +// aten::special_erf.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor special_erf_outf(@Const @ByRef Tensor self, @ByRef Tensor out); -// Parsed from ATen/ops/slow_conv_dilated3d.h +// Parsed from ATen/ops/special_erfc.h // #pragma once @@ -62779,51 +48570,21 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include - - -// aten::slow_conv_dilated3d(Tensor self, Tensor weight, int[3] kernel_size, Tensor? bias=None, int[3] stride=1, SymInt[3] padding=0, int[3] dilation=1) -> Tensor -@Namespace("at") public static native @ByVal Tensor slow_conv_dilated3d(@Const @ByRef Tensor self, @Const @ByRef Tensor weight, @ByVal @Cast("c10::ArrayRef*") LongArrayRef kernel_size, @Const @ByRef(nullValue = "c10::optional{}") TensorOptional bias, @ByVal(nullValue = "at::IntArrayRef(1)") @Cast("c10::ArrayRef*") LongArrayRef stride, @ByVal(nullValue = "at::IntArrayRef(0)") @Cast("c10::ArrayRef*") LongArrayRef padding, @ByVal(nullValue = "at::IntArrayRef(1)") @Cast("c10::ArrayRef*") LongArrayRef dilation); -@Namespace("at") public static native @ByVal Tensor slow_conv_dilated3d(@Const @ByRef Tensor self, @Const @ByRef Tensor weight, @ByVal @Cast("c10::ArrayRef*") LongArrayRef kernel_size); -@Namespace("at") public static native @ByVal Tensor slow_conv_dilated3d(@Const @ByRef Tensor self, @Const @ByRef Tensor weight, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] kernel_size, @Const @ByRef(nullValue = "c10::optional{}") TensorOptional bias, @ByVal(nullValue = "at::IntArrayRef(1)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] stride, @ByVal(nullValue = "at::IntArrayRef(0)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] padding, @ByVal(nullValue = "at::IntArrayRef(1)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... dilation); -@Namespace("at") public static native @ByVal Tensor slow_conv_dilated3d(@Const @ByRef Tensor self, @Const @ByRef Tensor weight, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... kernel_size); - - -// aten::slow_conv_dilated3d(Tensor self, Tensor weight, int[3] kernel_size, Tensor? bias=None, int[3] stride=1, SymInt[3] padding=0, int[3] dilation=1) -> Tensor -@Namespace("at") public static native @ByVal Tensor slow_conv_dilated3d_symint(@Const @ByRef Tensor self, @Const @ByRef Tensor weight, @ByVal @Cast("c10::ArrayRef*") LongArrayRef kernel_size, @Const @ByRef(nullValue = "c10::optional{}") TensorOptional bias, @ByVal(nullValue = "at::IntArrayRef(1)") @Cast("c10::ArrayRef*") LongArrayRef stride, @ByVal(nullValue = "c10::SymIntArrayRef(c10::SymInt(0))") SymIntRef padding, @ByVal(nullValue = "at::IntArrayRef(1)") @Cast("c10::ArrayRef*") LongArrayRef dilation); -@Namespace("at") public static native @ByVal Tensor slow_conv_dilated3d_symint(@Const @ByRef Tensor self, @Const @ByRef Tensor weight, @ByVal @Cast("c10::ArrayRef*") LongArrayRef kernel_size); -@Namespace("at") public static native @ByVal Tensor slow_conv_dilated3d_symint(@Const @ByRef Tensor self, @Const @ByRef Tensor weight, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] kernel_size, @Const @ByRef(nullValue = "c10::optional{}") TensorOptional bias, @ByVal(nullValue = "at::IntArrayRef(1)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] stride, @ByVal(nullValue = "c10::SymIntArrayRef(c10::SymInt(0))") SymIntRef padding, @ByVal(nullValue = "at::IntArrayRef(1)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... dilation); -@Namespace("at") public static native @ByVal Tensor slow_conv_dilated3d_symint(@Const @ByRef Tensor self, @Const @ByRef Tensor weight, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... kernel_size); - - -// aten::slow_conv_dilated3d.out(Tensor self, Tensor weight, int[3] kernel_size, Tensor? bias=None, int[3] stride=1, SymInt[3] padding=0, int[3] dilation=1, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor slow_conv_dilated3d_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor weight, @ByVal @Cast("c10::ArrayRef*") LongArrayRef kernel_size, @Const @ByRef(nullValue = "c10::optional{}") TensorOptional bias, @ByVal(nullValue = "at::IntArrayRef(1)") @Cast("c10::ArrayRef*") LongArrayRef stride, @ByVal(nullValue = "at::IntArrayRef(0)") @Cast("c10::ArrayRef*") LongArrayRef padding, @ByVal(nullValue = "at::IntArrayRef(1)") @Cast("c10::ArrayRef*") LongArrayRef dilation); -@Namespace("at") public static native @ByRef Tensor slow_conv_dilated3d_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor weight, @ByVal @Cast("c10::ArrayRef*") LongArrayRef kernel_size); -@Namespace("at") public static native @ByRef Tensor slow_conv_dilated3d_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor weight, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] kernel_size, @Const @ByRef(nullValue = "c10::optional{}") TensorOptional bias, @ByVal(nullValue = "at::IntArrayRef(1)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] stride, @ByVal(nullValue = "at::IntArrayRef(0)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] padding, @ByVal(nullValue = "at::IntArrayRef(1)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... dilation); -@Namespace("at") public static native @ByRef Tensor slow_conv_dilated3d_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor weight, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... kernel_size); - - -// aten::slow_conv_dilated3d.out(Tensor self, Tensor weight, int[3] kernel_size, Tensor? bias=None, int[3] stride=1, SymInt[3] padding=0, int[3] dilation=1, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor slow_conv_dilated3d_outf(@Const @ByRef Tensor self, @Const @ByRef Tensor weight, @ByVal @Cast("c10::ArrayRef*") LongArrayRef kernel_size, @Const @ByRef TensorOptional bias, @ByVal @Cast("c10::ArrayRef*") LongArrayRef stride, @ByVal @Cast("c10::ArrayRef*") LongArrayRef padding, @ByVal @Cast("c10::ArrayRef*") LongArrayRef dilation, @ByRef Tensor out); -@Namespace("at") public static native @ByRef Tensor slow_conv_dilated3d_outf(@Const @ByRef Tensor self, @Const @ByRef Tensor weight, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] kernel_size, @Const @ByRef TensorOptional bias, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] stride, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] padding, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dilation, @ByRef Tensor out); - - -// aten::slow_conv_dilated3d.out(Tensor self, Tensor weight, int[3] kernel_size, Tensor? bias=None, int[3] stride=1, SymInt[3] padding=0, int[3] dilation=1, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor slow_conv_dilated3d_symint_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor weight, @ByVal @Cast("c10::ArrayRef*") LongArrayRef kernel_size, @Const @ByRef(nullValue = "c10::optional{}") TensorOptional bias, @ByVal(nullValue = "at::IntArrayRef(1)") @Cast("c10::ArrayRef*") LongArrayRef stride, @ByVal(nullValue = "c10::SymIntArrayRef(c10::SymInt(0))") SymIntRef padding, @ByVal(nullValue = "at::IntArrayRef(1)") @Cast("c10::ArrayRef*") LongArrayRef dilation); -@Namespace("at") public static native @ByRef Tensor slow_conv_dilated3d_symint_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor weight, @ByVal @Cast("c10::ArrayRef*") LongArrayRef kernel_size); -@Namespace("at") public static native @ByRef Tensor slow_conv_dilated3d_symint_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor weight, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] kernel_size, @Const @ByRef(nullValue = "c10::optional{}") TensorOptional bias, @ByVal(nullValue = "at::IntArrayRef(1)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] stride, @ByVal(nullValue = "c10::SymIntArrayRef(c10::SymInt(0))") SymIntRef padding, @ByVal(nullValue = "at::IntArrayRef(1)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... dilation); -@Namespace("at") public static native @ByRef Tensor slow_conv_dilated3d_symint_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor weight, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... kernel_size); +// #include -// aten::slow_conv_dilated3d.out(Tensor self, Tensor weight, int[3] kernel_size, Tensor? bias=None, int[3] stride=1, SymInt[3] padding=0, int[3] dilation=1, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor slow_conv_dilated3d_symint_outf(@Const @ByRef Tensor self, @Const @ByRef Tensor weight, @ByVal @Cast("c10::ArrayRef*") LongArrayRef kernel_size, @Const @ByRef TensorOptional bias, @ByVal @Cast("c10::ArrayRef*") LongArrayRef stride, @ByVal SymIntRef padding, @ByVal @Cast("c10::ArrayRef*") LongArrayRef dilation, @ByRef Tensor out); -@Namespace("at") public static native @ByRef Tensor slow_conv_dilated3d_symint_outf(@Const @ByRef Tensor self, @Const @ByRef Tensor weight, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] kernel_size, @Const @ByRef TensorOptional bias, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] stride, @ByVal SymIntRef padding, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dilation, @ByRef Tensor out); +// aten::special_erfc(Tensor self) -> Tensor +@Namespace("at") public static native @ByVal Tensor special_erfc(@Const @ByRef Tensor self); +// aten::special_erfc.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor special_erfc_out(@ByRef Tensor out, @Const @ByRef Tensor self); +// aten::special_erfc.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor special_erfc_outf(@Const @ByRef Tensor self, @ByRef Tensor out); -// Parsed from ATen/ops/slow_conv_transpose2d.h +// Parsed from ATen/ops/special_erfcx.h // #pragma once @@ -62844,51 +48605,21 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include - - -// aten::slow_conv_transpose2d.out(Tensor self, Tensor weight, int[2] kernel_size, Tensor? bias=None, int[2] stride=1, SymInt[2] padding=0, SymInt[2] output_padding=0, int[2] dilation=1, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor slow_conv_transpose2d_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor weight, @ByVal @Cast("c10::ArrayRef*") LongArrayRef kernel_size, @Const @ByRef(nullValue = "c10::optional{}") TensorOptional bias, @ByVal(nullValue = "at::IntArrayRef(1)") @Cast("c10::ArrayRef*") LongArrayRef stride, @ByVal(nullValue = "at::IntArrayRef(0)") @Cast("c10::ArrayRef*") LongArrayRef padding, @ByVal(nullValue = "at::IntArrayRef(0)") @Cast("c10::ArrayRef*") LongArrayRef output_padding, @ByVal(nullValue = "at::IntArrayRef(1)") @Cast("c10::ArrayRef*") LongArrayRef dilation); -@Namespace("at") public static native @ByRef Tensor slow_conv_transpose2d_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor weight, @ByVal @Cast("c10::ArrayRef*") LongArrayRef kernel_size); -@Namespace("at") public static native @ByRef Tensor slow_conv_transpose2d_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor weight, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] kernel_size, @Const @ByRef(nullValue = "c10::optional{}") TensorOptional bias, @ByVal(nullValue = "at::IntArrayRef(1)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] stride, @ByVal(nullValue = "at::IntArrayRef(0)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] padding, @ByVal(nullValue = "at::IntArrayRef(0)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] output_padding, @ByVal(nullValue = "at::IntArrayRef(1)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... dilation); -@Namespace("at") public static native @ByRef Tensor slow_conv_transpose2d_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor weight, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... kernel_size); - - -// aten::slow_conv_transpose2d.out(Tensor self, Tensor weight, int[2] kernel_size, Tensor? bias=None, int[2] stride=1, SymInt[2] padding=0, SymInt[2] output_padding=0, int[2] dilation=1, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor slow_conv_transpose2d_outf(@Const @ByRef Tensor self, @Const @ByRef Tensor weight, @ByVal @Cast("c10::ArrayRef*") LongArrayRef kernel_size, @Const @ByRef TensorOptional bias, @ByVal @Cast("c10::ArrayRef*") LongArrayRef stride, @ByVal @Cast("c10::ArrayRef*") LongArrayRef padding, @ByVal @Cast("c10::ArrayRef*") LongArrayRef output_padding, @ByVal @Cast("c10::ArrayRef*") LongArrayRef dilation, @ByRef Tensor out); -@Namespace("at") public static native @ByRef Tensor slow_conv_transpose2d_outf(@Const @ByRef Tensor self, @Const @ByRef Tensor weight, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] kernel_size, @Const @ByRef TensorOptional bias, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] stride, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] padding, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] output_padding, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dilation, @ByRef Tensor out); - - -// aten::slow_conv_transpose2d.out(Tensor self, Tensor weight, int[2] kernel_size, Tensor? bias=None, int[2] stride=1, SymInt[2] padding=0, SymInt[2] output_padding=0, int[2] dilation=1, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor slow_conv_transpose2d_symint_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor weight, @ByVal @Cast("c10::ArrayRef*") LongArrayRef kernel_size, @Const @ByRef(nullValue = "c10::optional{}") TensorOptional bias, @ByVal(nullValue = "at::IntArrayRef(1)") @Cast("c10::ArrayRef*") LongArrayRef stride, @ByVal(nullValue = "c10::SymIntArrayRef(c10::SymInt(0))") SymIntRef padding, @ByVal(nullValue = "c10::SymIntArrayRef(c10::SymInt(0))") SymIntRef output_padding, @ByVal(nullValue = "at::IntArrayRef(1)") @Cast("c10::ArrayRef*") LongArrayRef dilation); -@Namespace("at") public static native @ByRef Tensor slow_conv_transpose2d_symint_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor weight, @ByVal @Cast("c10::ArrayRef*") LongArrayRef kernel_size); -@Namespace("at") public static native @ByRef Tensor slow_conv_transpose2d_symint_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor weight, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] kernel_size, @Const @ByRef(nullValue = "c10::optional{}") TensorOptional bias, @ByVal(nullValue = "at::IntArrayRef(1)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] stride, @ByVal(nullValue = "c10::SymIntArrayRef(c10::SymInt(0))") SymIntRef padding, @ByVal(nullValue = "c10::SymIntArrayRef(c10::SymInt(0))") SymIntRef output_padding, @ByVal(nullValue = "at::IntArrayRef(1)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... dilation); -@Namespace("at") public static native @ByRef Tensor slow_conv_transpose2d_symint_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor weight, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... kernel_size); - - -// aten::slow_conv_transpose2d.out(Tensor self, Tensor weight, int[2] kernel_size, Tensor? bias=None, int[2] stride=1, SymInt[2] padding=0, SymInt[2] output_padding=0, int[2] dilation=1, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor slow_conv_transpose2d_symint_outf(@Const @ByRef Tensor self, @Const @ByRef Tensor weight, @ByVal @Cast("c10::ArrayRef*") LongArrayRef kernel_size, @Const @ByRef TensorOptional bias, @ByVal @Cast("c10::ArrayRef*") LongArrayRef stride, @ByVal SymIntRef padding, @ByVal SymIntRef output_padding, @ByVal @Cast("c10::ArrayRef*") LongArrayRef dilation, @ByRef Tensor out); -@Namespace("at") public static native @ByRef Tensor slow_conv_transpose2d_symint_outf(@Const @ByRef Tensor self, @Const @ByRef Tensor weight, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] kernel_size, @Const @ByRef TensorOptional bias, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] stride, @ByVal SymIntRef padding, @ByVal SymIntRef output_padding, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dilation, @ByRef Tensor out); - - -// aten::slow_conv_transpose2d(Tensor self, Tensor weight, int[2] kernel_size, Tensor? bias=None, int[2] stride=1, SymInt[2] padding=0, SymInt[2] output_padding=0, int[2] dilation=1) -> Tensor -@Namespace("at") public static native @ByVal Tensor slow_conv_transpose2d(@Const @ByRef Tensor self, @Const @ByRef Tensor weight, @ByVal @Cast("c10::ArrayRef*") LongArrayRef kernel_size, @Const @ByRef(nullValue = "c10::optional{}") TensorOptional bias, @ByVal(nullValue = "at::IntArrayRef(1)") @Cast("c10::ArrayRef*") LongArrayRef stride, @ByVal(nullValue = "at::IntArrayRef(0)") @Cast("c10::ArrayRef*") LongArrayRef padding, @ByVal(nullValue = "at::IntArrayRef(0)") @Cast("c10::ArrayRef*") LongArrayRef output_padding, @ByVal(nullValue = "at::IntArrayRef(1)") @Cast("c10::ArrayRef*") LongArrayRef dilation); -@Namespace("at") public static native @ByVal Tensor slow_conv_transpose2d(@Const @ByRef Tensor self, @Const @ByRef Tensor weight, @ByVal @Cast("c10::ArrayRef*") LongArrayRef kernel_size); -@Namespace("at") public static native @ByVal Tensor slow_conv_transpose2d(@Const @ByRef Tensor self, @Const @ByRef Tensor weight, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] kernel_size, @Const @ByRef(nullValue = "c10::optional{}") TensorOptional bias, @ByVal(nullValue = "at::IntArrayRef(1)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] stride, @ByVal(nullValue = "at::IntArrayRef(0)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] padding, @ByVal(nullValue = "at::IntArrayRef(0)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] output_padding, @ByVal(nullValue = "at::IntArrayRef(1)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... dilation); -@Namespace("at") public static native @ByVal Tensor slow_conv_transpose2d(@Const @ByRef Tensor self, @Const @ByRef Tensor weight, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... kernel_size); +// #include -// aten::slow_conv_transpose2d(Tensor self, Tensor weight, int[2] kernel_size, Tensor? bias=None, int[2] stride=1, SymInt[2] padding=0, SymInt[2] output_padding=0, int[2] dilation=1) -> Tensor -@Namespace("at") public static native @ByVal Tensor slow_conv_transpose2d_symint(@Const @ByRef Tensor self, @Const @ByRef Tensor weight, @ByVal @Cast("c10::ArrayRef*") LongArrayRef kernel_size, @Const @ByRef(nullValue = "c10::optional{}") TensorOptional bias, @ByVal(nullValue = "at::IntArrayRef(1)") @Cast("c10::ArrayRef*") LongArrayRef stride, @ByVal(nullValue = "c10::SymIntArrayRef(c10::SymInt(0))") SymIntRef padding, @ByVal(nullValue = "c10::SymIntArrayRef(c10::SymInt(0))") SymIntRef output_padding, @ByVal(nullValue = "at::IntArrayRef(1)") @Cast("c10::ArrayRef*") LongArrayRef dilation); -@Namespace("at") public static native @ByVal Tensor slow_conv_transpose2d_symint(@Const @ByRef Tensor self, @Const @ByRef Tensor weight, @ByVal @Cast("c10::ArrayRef*") LongArrayRef kernel_size); -@Namespace("at") public static native @ByVal Tensor slow_conv_transpose2d_symint(@Const @ByRef Tensor self, @Const @ByRef Tensor weight, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] kernel_size, @Const @ByRef(nullValue = "c10::optional{}") TensorOptional bias, @ByVal(nullValue = "at::IntArrayRef(1)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] stride, @ByVal(nullValue = "c10::SymIntArrayRef(c10::SymInt(0))") SymIntRef padding, @ByVal(nullValue = "c10::SymIntArrayRef(c10::SymInt(0))") SymIntRef output_padding, @ByVal(nullValue = "at::IntArrayRef(1)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... dilation); -@Namespace("at") public static native @ByVal Tensor slow_conv_transpose2d_symint(@Const @ByRef Tensor self, @Const @ByRef Tensor weight, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... kernel_size); +// aten::special_erfcx(Tensor self) -> Tensor +@Namespace("at") public static native @ByVal Tensor special_erfcx(@Const @ByRef Tensor self); +// aten::special_erfcx.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor special_erfcx_out(@ByRef Tensor out, @Const @ByRef Tensor self); +// aten::special_erfcx.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor special_erfcx_outf(@Const @ByRef Tensor self, @ByRef Tensor out); -// Parsed from ATen/ops/slow_conv_transpose3d.h +// Parsed from ATen/ops/special_erfinv.h // #pragma once @@ -62909,51 +48640,56 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include +// #include -// aten::slow_conv_transpose3d.out(Tensor self, Tensor weight, int[3] kernel_size, Tensor? bias=None, int[3] stride=1, SymInt[3] padding=0, SymInt[3] output_padding=0, int[3] dilation=1, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor slow_conv_transpose3d_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor weight, @ByVal @Cast("c10::ArrayRef*") LongArrayRef kernel_size, @Const @ByRef(nullValue = "c10::optional{}") TensorOptional bias, @ByVal(nullValue = "at::IntArrayRef(1)") @Cast("c10::ArrayRef*") LongArrayRef stride, @ByVal(nullValue = "at::IntArrayRef(0)") @Cast("c10::ArrayRef*") LongArrayRef padding, @ByVal(nullValue = "at::IntArrayRef(0)") @Cast("c10::ArrayRef*") LongArrayRef output_padding, @ByVal(nullValue = "at::IntArrayRef(1)") @Cast("c10::ArrayRef*") LongArrayRef dilation); -@Namespace("at") public static native @ByRef Tensor slow_conv_transpose3d_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor weight, @ByVal @Cast("c10::ArrayRef*") LongArrayRef kernel_size); -@Namespace("at") public static native @ByRef Tensor slow_conv_transpose3d_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor weight, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] kernel_size, @Const @ByRef(nullValue = "c10::optional{}") TensorOptional bias, @ByVal(nullValue = "at::IntArrayRef(1)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] stride, @ByVal(nullValue = "at::IntArrayRef(0)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] padding, @ByVal(nullValue = "at::IntArrayRef(0)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] output_padding, @ByVal(nullValue = "at::IntArrayRef(1)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... dilation); -@Namespace("at") public static native @ByRef Tensor slow_conv_transpose3d_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor weight, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... kernel_size); +// aten::special_erfinv(Tensor self) -> Tensor +@Namespace("at") public static native @ByVal Tensor special_erfinv(@Const @ByRef Tensor self); +// aten::special_erfinv.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor special_erfinv_out(@ByRef Tensor out, @Const @ByRef Tensor self); +// aten::special_erfinv.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor special_erfinv_outf(@Const @ByRef Tensor self, @ByRef Tensor out); -// aten::slow_conv_transpose3d.out(Tensor self, Tensor weight, int[3] kernel_size, Tensor? bias=None, int[3] stride=1, SymInt[3] padding=0, SymInt[3] output_padding=0, int[3] dilation=1, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor slow_conv_transpose3d_outf(@Const @ByRef Tensor self, @Const @ByRef Tensor weight, @ByVal @Cast("c10::ArrayRef*") LongArrayRef kernel_size, @Const @ByRef TensorOptional bias, @ByVal @Cast("c10::ArrayRef*") LongArrayRef stride, @ByVal @Cast("c10::ArrayRef*") LongArrayRef padding, @ByVal @Cast("c10::ArrayRef*") LongArrayRef output_padding, @ByVal @Cast("c10::ArrayRef*") LongArrayRef dilation, @ByRef Tensor out); -@Namespace("at") public static native @ByRef Tensor slow_conv_transpose3d_outf(@Const @ByRef Tensor self, @Const @ByRef Tensor weight, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] kernel_size, @Const @ByRef TensorOptional bias, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] stride, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] padding, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] output_padding, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dilation, @ByRef Tensor out); -// aten::slow_conv_transpose3d.out(Tensor self, Tensor weight, int[3] kernel_size, Tensor? bias=None, int[3] stride=1, SymInt[3] padding=0, SymInt[3] output_padding=0, int[3] dilation=1, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor slow_conv_transpose3d_symint_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor weight, @ByVal @Cast("c10::ArrayRef*") LongArrayRef kernel_size, @Const @ByRef(nullValue = "c10::optional{}") TensorOptional bias, @ByVal(nullValue = "at::IntArrayRef(1)") @Cast("c10::ArrayRef*") LongArrayRef stride, @ByVal(nullValue = "c10::SymIntArrayRef(c10::SymInt(0))") SymIntRef padding, @ByVal(nullValue = "c10::SymIntArrayRef(c10::SymInt(0))") SymIntRef output_padding, @ByVal(nullValue = "at::IntArrayRef(1)") @Cast("c10::ArrayRef*") LongArrayRef dilation); -@Namespace("at") public static native @ByRef Tensor slow_conv_transpose3d_symint_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor weight, @ByVal @Cast("c10::ArrayRef*") LongArrayRef kernel_size); -@Namespace("at") public static native @ByRef Tensor slow_conv_transpose3d_symint_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor weight, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] kernel_size, @Const @ByRef(nullValue = "c10::optional{}") TensorOptional bias, @ByVal(nullValue = "at::IntArrayRef(1)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] stride, @ByVal(nullValue = "c10::SymIntArrayRef(c10::SymInt(0))") SymIntRef padding, @ByVal(nullValue = "c10::SymIntArrayRef(c10::SymInt(0))") SymIntRef output_padding, @ByVal(nullValue = "at::IntArrayRef(1)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... dilation); -@Namespace("at") public static native @ByRef Tensor slow_conv_transpose3d_symint_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor weight, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... kernel_size); +// Parsed from ATen/ops/special_exp2.h -// aten::slow_conv_transpose3d.out(Tensor self, Tensor weight, int[3] kernel_size, Tensor? bias=None, int[3] stride=1, SymInt[3] padding=0, SymInt[3] output_padding=0, int[3] dilation=1, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor slow_conv_transpose3d_symint_outf(@Const @ByRef Tensor self, @Const @ByRef Tensor weight, @ByVal @Cast("c10::ArrayRef*") LongArrayRef kernel_size, @Const @ByRef TensorOptional bias, @ByVal @Cast("c10::ArrayRef*") LongArrayRef stride, @ByVal SymIntRef padding, @ByVal SymIntRef output_padding, @ByVal @Cast("c10::ArrayRef*") LongArrayRef dilation, @ByRef Tensor out); -@Namespace("at") public static native @ByRef Tensor slow_conv_transpose3d_symint_outf(@Const @ByRef Tensor self, @Const @ByRef Tensor weight, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] kernel_size, @Const @ByRef TensorOptional bias, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] stride, @ByVal SymIntRef padding, @ByVal SymIntRef output_padding, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dilation, @ByRef Tensor out); +// #pragma once +// @generated by torchgen/gen.py from Function.h -// aten::slow_conv_transpose3d(Tensor self, Tensor weight, int[3] kernel_size, Tensor? bias=None, int[3] stride=1, SymInt[3] padding=0, SymInt[3] output_padding=0, int[3] dilation=1) -> Tensor -@Namespace("at") public static native @ByVal Tensor slow_conv_transpose3d(@Const @ByRef Tensor self, @Const @ByRef Tensor weight, @ByVal @Cast("c10::ArrayRef*") LongArrayRef kernel_size, @Const @ByRef(nullValue = "c10::optional{}") TensorOptional bias, @ByVal(nullValue = "at::IntArrayRef(1)") @Cast("c10::ArrayRef*") LongArrayRef stride, @ByVal(nullValue = "at::IntArrayRef(0)") @Cast("c10::ArrayRef*") LongArrayRef padding, @ByVal(nullValue = "at::IntArrayRef(0)") @Cast("c10::ArrayRef*") LongArrayRef output_padding, @ByVal(nullValue = "at::IntArrayRef(1)") @Cast("c10::ArrayRef*") LongArrayRef dilation); -@Namespace("at") public static native @ByVal Tensor slow_conv_transpose3d(@Const @ByRef Tensor self, @Const @ByRef Tensor weight, @ByVal @Cast("c10::ArrayRef*") LongArrayRef kernel_size); -@Namespace("at") public static native @ByVal Tensor slow_conv_transpose3d(@Const @ByRef Tensor self, @Const @ByRef Tensor weight, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] kernel_size, @Const @ByRef(nullValue = "c10::optional{}") TensorOptional bias, @ByVal(nullValue = "at::IntArrayRef(1)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] stride, @ByVal(nullValue = "at::IntArrayRef(0)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] padding, @ByVal(nullValue = "at::IntArrayRef(0)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] output_padding, @ByVal(nullValue = "at::IntArrayRef(1)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... dilation); -@Namespace("at") public static native @ByVal Tensor slow_conv_transpose3d(@Const @ByRef Tensor self, @Const @ByRef Tensor weight, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... kernel_size); +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include -// aten::slow_conv_transpose3d(Tensor self, Tensor weight, int[3] kernel_size, Tensor? bias=None, int[3] stride=1, SymInt[3] padding=0, SymInt[3] output_padding=0, int[3] dilation=1) -> Tensor -@Namespace("at") public static native @ByVal Tensor slow_conv_transpose3d_symint(@Const @ByRef Tensor self, @Const @ByRef Tensor weight, @ByVal @Cast("c10::ArrayRef*") LongArrayRef kernel_size, @Const @ByRef(nullValue = "c10::optional{}") TensorOptional bias, @ByVal(nullValue = "at::IntArrayRef(1)") @Cast("c10::ArrayRef*") LongArrayRef stride, @ByVal(nullValue = "c10::SymIntArrayRef(c10::SymInt(0))") SymIntRef padding, @ByVal(nullValue = "c10::SymIntArrayRef(c10::SymInt(0))") SymIntRef output_padding, @ByVal(nullValue = "at::IntArrayRef(1)") @Cast("c10::ArrayRef*") LongArrayRef dilation); -@Namespace("at") public static native @ByVal Tensor slow_conv_transpose3d_symint(@Const @ByRef Tensor self, @Const @ByRef Tensor weight, @ByVal @Cast("c10::ArrayRef*") LongArrayRef kernel_size); -@Namespace("at") public static native @ByVal Tensor slow_conv_transpose3d_symint(@Const @ByRef Tensor self, @Const @ByRef Tensor weight, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] kernel_size, @Const @ByRef(nullValue = "c10::optional{}") TensorOptional bias, @ByVal(nullValue = "at::IntArrayRef(1)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] stride, @ByVal(nullValue = "c10::SymIntArrayRef(c10::SymInt(0))") SymIntRef padding, @ByVal(nullValue = "c10::SymIntArrayRef(c10::SymInt(0))") SymIntRef output_padding, @ByVal(nullValue = "at::IntArrayRef(1)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... dilation); -@Namespace("at") public static native @ByVal Tensor slow_conv_transpose3d_symint(@Const @ByRef Tensor self, @Const @ByRef Tensor weight, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... kernel_size); +// #include + + +// aten::special_exp2(Tensor self) -> Tensor +@Namespace("at") public static native @ByVal Tensor special_exp2(@Const @ByRef Tensor self); + +// aten::special_exp2.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor special_exp2_out(@ByRef Tensor out, @Const @ByRef Tensor self); +// aten::special_exp2.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor special_exp2_outf(@Const @ByRef Tensor self, @ByRef Tensor out); -// Parsed from ATen/ops/smm.h +// Parsed from ATen/ops/special_expit.h // #pragma once @@ -62974,16 +48710,21 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include +// #include -// aten::smm(Tensor self, Tensor mat2) -> Tensor -@Namespace("at") public static native @ByVal Tensor smm(@Const @ByRef Tensor self, @Const @ByRef Tensor mat2); +// aten::special_expit(Tensor self) -> Tensor +@Namespace("at") public static native @ByVal Tensor special_expit(@Const @ByRef Tensor self); + +// aten::special_expit.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor special_expit_out(@ByRef Tensor out, @Const @ByRef Tensor self); +// aten::special_expit.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor special_expit_outf(@Const @ByRef Tensor self, @ByRef Tensor out); -// Parsed from ATen/ops/smooth_l1_loss.h +// Parsed from ATen/ops/special_expm1.h // #pragma once @@ -63004,23 +48745,21 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include +// #include -// aten::smooth_l1_loss.out(Tensor self, Tensor target, int reduction=Mean, float beta=1.0, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor smooth_l1_loss_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor target, @Cast("int64_t") long reduction/*=at::Reduction::Mean*/, double beta/*=1.0*/); -@Namespace("at") public static native @ByRef Tensor smooth_l1_loss_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor target); -// aten::smooth_l1_loss.out(Tensor self, Tensor target, int reduction=Mean, float beta=1.0, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor smooth_l1_loss_outf(@Const @ByRef Tensor self, @Const @ByRef Tensor target, @Cast("int64_t") long reduction, double beta, @ByRef Tensor out); +// aten::special_expm1(Tensor self) -> Tensor +@Namespace("at") public static native @ByVal Tensor special_expm1(@Const @ByRef Tensor self); -// aten::smooth_l1_loss(Tensor self, Tensor target, int reduction=Mean, float beta=1.0) -> Tensor -@Namespace("at") public static native @ByVal Tensor smooth_l1_loss(@Const @ByRef Tensor self, @Const @ByRef Tensor target, @Cast("int64_t") long reduction/*=at::Reduction::Mean*/, double beta/*=1.0*/); -@Namespace("at") public static native @ByVal Tensor smooth_l1_loss(@Const @ByRef Tensor self, @Const @ByRef Tensor target); +// aten::special_expm1.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor special_expm1_out(@ByRef Tensor out, @Const @ByRef Tensor self); +// aten::special_expm1.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor special_expm1_outf(@Const @ByRef Tensor self, @ByRef Tensor out); -// Parsed from ATen/ops/smooth_l1_loss_backward.h +// Parsed from ATen/ops/special_gammainc.h // #pragma once @@ -63041,21 +48780,21 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include +// #include -// aten::smooth_l1_loss_backward.grad_input(Tensor grad_output, Tensor self, Tensor target, int reduction, float beta, *, Tensor(a!) grad_input) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor smooth_l1_loss_backward_out(@ByRef Tensor grad_input, @Const @ByRef Tensor grad_output, @Const @ByRef Tensor self, @Const @ByRef Tensor target, @Cast("int64_t") long reduction, double beta); -// aten::smooth_l1_loss_backward.grad_input(Tensor grad_output, Tensor self, Tensor target, int reduction, float beta, *, Tensor(a!) grad_input) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor smooth_l1_loss_backward_outf(@Const @ByRef Tensor grad_output, @Const @ByRef Tensor self, @Const @ByRef Tensor target, @Cast("int64_t") long reduction, double beta, @ByRef Tensor grad_input); +// aten::special_gammainc.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor special_gammainc_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor other); +// aten::special_gammainc.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor special_gammainc_outf(@Const @ByRef Tensor self, @Const @ByRef Tensor other, @ByRef Tensor out); -// aten::smooth_l1_loss_backward(Tensor grad_output, Tensor self, Tensor target, int reduction, float beta) -> Tensor -@Namespace("at") public static native @ByVal Tensor smooth_l1_loss_backward(@Const @ByRef Tensor grad_output, @Const @ByRef Tensor self, @Const @ByRef Tensor target, @Cast("int64_t") long reduction, double beta); +// aten::special_gammainc(Tensor self, Tensor other) -> Tensor +@Namespace("at") public static native @ByVal Tensor special_gammainc(@Const @ByRef Tensor self, @Const @ByRef Tensor other); -// Parsed from ATen/ops/soft_margin_loss.h +// Parsed from ATen/ops/special_gammaincc.h // #pragma once @@ -63076,23 +48815,21 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include +// #include -// aten::soft_margin_loss.out(Tensor self, Tensor target, int reduction=Mean, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor soft_margin_loss_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor target, @Cast("int64_t") long reduction/*=at::Reduction::Mean*/); -@Namespace("at") public static native @ByRef Tensor soft_margin_loss_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor target); -// aten::soft_margin_loss.out(Tensor self, Tensor target, int reduction=Mean, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor soft_margin_loss_outf(@Const @ByRef Tensor self, @Const @ByRef Tensor target, @Cast("int64_t") long reduction, @ByRef Tensor out); +// aten::special_gammaincc.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor special_gammaincc_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor other); +// aten::special_gammaincc.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor special_gammaincc_outf(@Const @ByRef Tensor self, @Const @ByRef Tensor other, @ByRef Tensor out); -// aten::soft_margin_loss(Tensor self, Tensor target, int reduction=Mean) -> Tensor -@Namespace("at") public static native @ByVal Tensor soft_margin_loss(@Const @ByRef Tensor self, @Const @ByRef Tensor target, @Cast("int64_t") long reduction/*=at::Reduction::Mean*/); -@Namespace("at") public static native @ByVal Tensor soft_margin_loss(@Const @ByRef Tensor self, @Const @ByRef Tensor target); +// aten::special_gammaincc(Tensor self, Tensor other) -> Tensor +@Namespace("at") public static native @ByVal Tensor special_gammaincc(@Const @ByRef Tensor self, @Const @ByRef Tensor other); -// Parsed from ATen/ops/soft_margin_loss_backward.h +// Parsed from ATen/ops/special_gammaln.h // #pragma once @@ -63113,21 +48850,21 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include +// #include -// aten::soft_margin_loss_backward.grad_input(Tensor grad_output, Tensor self, Tensor target, int reduction, *, Tensor(a!) grad_input) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor soft_margin_loss_backward_out(@ByRef Tensor grad_input, @Const @ByRef Tensor grad_output, @Const @ByRef Tensor self, @Const @ByRef Tensor target, @Cast("int64_t") long reduction); -// aten::soft_margin_loss_backward.grad_input(Tensor grad_output, Tensor self, Tensor target, int reduction, *, Tensor(a!) grad_input) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor soft_margin_loss_backward_outf(@Const @ByRef Tensor grad_output, @Const @ByRef Tensor self, @Const @ByRef Tensor target, @Cast("int64_t") long reduction, @ByRef Tensor grad_input); +// aten::special_gammaln(Tensor self) -> Tensor +@Namespace("at") public static native @ByVal Tensor special_gammaln(@Const @ByRef Tensor self); -// aten::soft_margin_loss_backward(Tensor grad_output, Tensor self, Tensor target, int reduction) -> Tensor -@Namespace("at") public static native @ByVal Tensor soft_margin_loss_backward(@Const @ByRef Tensor grad_output, @Const @ByRef Tensor self, @Const @ByRef Tensor target, @Cast("int64_t") long reduction); +// aten::special_gammaln.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor special_gammaln_out(@ByRef Tensor out, @Const @ByRef Tensor self); +// aten::special_gammaln.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor special_gammaln_outf(@Const @ByRef Tensor self, @ByRef Tensor out); -// Parsed from ATen/ops/softmax.h +// Parsed from ATen/ops/special_hermite_polynomial_h.h // #pragma once @@ -63148,27 +48885,37 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include +// #include -// aten::softmax.int(Tensor self, int dim, ScalarType? dtype=None) -> Tensor -@Namespace("at") public static native @ByVal Tensor softmax(@Const @ByRef Tensor self, @Cast("int64_t") long dim, @ByVal(nullValue = "c10::optional(c10::nullopt)") ScalarTypeOptional dtype); -@Namespace("at") public static native @ByVal Tensor softmax(@Const @ByRef Tensor self, @Cast("int64_t") long dim); +// aten::special_hermite_polynomial_h(Tensor x, Tensor n) -> Tensor +@Namespace("at") public static native @ByVal Tensor special_hermite_polynomial_h(@Const @ByRef Tensor x, @Const @ByRef Tensor n); -// aten::softmax.int_out(Tensor self, int dim, ScalarType? dtype=None, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor softmax_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Cast("int64_t") long dim, @ByVal(nullValue = "c10::optional(c10::nullopt)") ScalarTypeOptional dtype); -@Namespace("at") public static native @ByRef Tensor softmax_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Cast("int64_t") long dim); -// aten::softmax.int_out(Tensor self, int dim, ScalarType? dtype=None, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor softmax_outf(@Const @ByRef Tensor self, @Cast("int64_t") long dim, @ByVal ScalarTypeOptional dtype, @ByRef Tensor out); +// aten::special_hermite_polynomial_h.x_scalar(Scalar x, Tensor n) -> Tensor +@Namespace("at") public static native @ByVal Tensor special_hermite_polynomial_h(@Const @ByRef Scalar x, @Const @ByRef Tensor n); -// aten::softmax.Dimname(Tensor self, Dimname dim, *, ScalarType? dtype=None) -> Tensor -@Namespace("at") public static native @ByVal Tensor softmax(@Const @ByRef Tensor self, @ByVal Dimname dim, @ByVal(nullValue = "c10::optional(c10::nullopt)") ScalarTypeOptional dtype); -@Namespace("at") public static native @ByVal Tensor softmax(@Const @ByRef Tensor self, @ByVal Dimname dim); +// aten::special_hermite_polynomial_h.n_scalar(Tensor x, Scalar n) -> Tensor +@Namespace("at") public static native @ByVal Tensor special_hermite_polynomial_h(@Const @ByRef Tensor x, @Const @ByRef Scalar n); + +// aten::special_hermite_polynomial_h.out(Tensor x, Tensor n, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor special_hermite_polynomial_h_out(@ByRef Tensor out, @Const @ByRef Tensor x, @Const @ByRef Tensor n); +// aten::special_hermite_polynomial_h.out(Tensor x, Tensor n, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor special_hermite_polynomial_h_outf(@Const @ByRef Tensor x, @Const @ByRef Tensor n, @ByRef Tensor out); + +// aten::special_hermite_polynomial_h.x_scalar_out(Scalar x, Tensor n, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor special_hermite_polynomial_h_out(@ByRef Tensor out, @Const @ByRef Scalar x, @Const @ByRef Tensor n); +// aten::special_hermite_polynomial_h.x_scalar_out(Scalar x, Tensor n, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor special_hermite_polynomial_h_outf(@Const @ByRef Scalar x, @Const @ByRef Tensor n, @ByRef Tensor out); + +// aten::special_hermite_polynomial_h.n_scalar_out(Tensor x, Scalar n, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor special_hermite_polynomial_h_out(@ByRef Tensor out, @Const @ByRef Tensor x, @Const @ByRef Scalar n); +// aten::special_hermite_polynomial_h.n_scalar_out(Tensor x, Scalar n, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor special_hermite_polynomial_h_outf(@Const @ByRef Tensor x, @Const @ByRef Scalar n, @ByRef Tensor out); -// Parsed from ATen/ops/softplus.h +// Parsed from ATen/ops/special_hermite_polynomial_he.h // #pragma once @@ -63189,23 +48936,37 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include +// #include -// aten::softplus.out(Tensor self, Scalar beta=1, Scalar threshold=20, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor softplus_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef(nullValue = "at::Scalar(1)") Scalar beta, @Const @ByRef(nullValue = "at::Scalar(20)") Scalar threshold); -@Namespace("at") public static native @ByRef Tensor softplus_out(@ByRef Tensor out, @Const @ByRef Tensor self); -// aten::softplus.out(Tensor self, Scalar beta=1, Scalar threshold=20, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor softplus_outf(@Const @ByRef Tensor self, @Const @ByRef Scalar beta, @Const @ByRef Scalar threshold, @ByRef Tensor out); +// aten::special_hermite_polynomial_he(Tensor x, Tensor n) -> Tensor +@Namespace("at") public static native @ByVal Tensor special_hermite_polynomial_he(@Const @ByRef Tensor x, @Const @ByRef Tensor n); -// aten::softplus(Tensor self, Scalar beta=1, Scalar threshold=20) -> Tensor -@Namespace("at") public static native @ByVal Tensor softplus(@Const @ByRef Tensor self, @Const @ByRef(nullValue = "at::Scalar(1)") Scalar beta, @Const @ByRef(nullValue = "at::Scalar(20)") Scalar threshold); -@Namespace("at") public static native @ByVal Tensor softplus(@Const @ByRef Tensor self); +// aten::special_hermite_polynomial_he.x_scalar(Scalar x, Tensor n) -> Tensor +@Namespace("at") public static native @ByVal Tensor special_hermite_polynomial_he(@Const @ByRef Scalar x, @Const @ByRef Tensor n); + +// aten::special_hermite_polynomial_he.n_scalar(Tensor x, Scalar n) -> Tensor +@Namespace("at") public static native @ByVal Tensor special_hermite_polynomial_he(@Const @ByRef Tensor x, @Const @ByRef Scalar n); + +// aten::special_hermite_polynomial_he.out(Tensor x, Tensor n, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor special_hermite_polynomial_he_out(@ByRef Tensor out, @Const @ByRef Tensor x, @Const @ByRef Tensor n); +// aten::special_hermite_polynomial_he.out(Tensor x, Tensor n, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor special_hermite_polynomial_he_outf(@Const @ByRef Tensor x, @Const @ByRef Tensor n, @ByRef Tensor out); + +// aten::special_hermite_polynomial_he.x_scalar_out(Scalar x, Tensor n, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor special_hermite_polynomial_he_out(@ByRef Tensor out, @Const @ByRef Scalar x, @Const @ByRef Tensor n); +// aten::special_hermite_polynomial_he.x_scalar_out(Scalar x, Tensor n, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor special_hermite_polynomial_he_outf(@Const @ByRef Scalar x, @Const @ByRef Tensor n, @ByRef Tensor out); +// aten::special_hermite_polynomial_he.n_scalar_out(Tensor x, Scalar n, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor special_hermite_polynomial_he_out(@ByRef Tensor out, @Const @ByRef Tensor x, @Const @ByRef Scalar n); +// aten::special_hermite_polynomial_he.n_scalar_out(Tensor x, Scalar n, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor special_hermite_polynomial_he_outf(@Const @ByRef Tensor x, @Const @ByRef Scalar n, @ByRef Tensor out); -// Parsed from ATen/ops/softplus_backward.h + +// Parsed from ATen/ops/special_i0.h // #pragma once @@ -63226,21 +48987,21 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include +// #include -// aten::softplus_backward.grad_input(Tensor grad_output, Tensor self, Scalar beta, Scalar threshold, *, Tensor(a!) grad_input) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor softplus_backward_out(@ByRef Tensor grad_input, @Const @ByRef Tensor grad_output, @Const @ByRef Tensor self, @Const @ByRef Scalar beta, @Const @ByRef Scalar threshold); -// aten::softplus_backward.grad_input(Tensor grad_output, Tensor self, Scalar beta, Scalar threshold, *, Tensor(a!) grad_input) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor softplus_backward_outf(@Const @ByRef Tensor grad_output, @Const @ByRef Tensor self, @Const @ByRef Scalar beta, @Const @ByRef Scalar threshold, @ByRef Tensor grad_input); +// aten::special_i0(Tensor self) -> Tensor +@Namespace("at") public static native @ByVal Tensor special_i0(@Const @ByRef Tensor self); -// aten::softplus_backward(Tensor grad_output, Tensor self, Scalar beta, Scalar threshold) -> Tensor -@Namespace("at") public static native @ByVal Tensor softplus_backward(@Const @ByRef Tensor grad_output, @Const @ByRef Tensor self, @Const @ByRef Scalar beta, @Const @ByRef Scalar threshold); +// aten::special_i0.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor special_i0_out(@ByRef Tensor out, @Const @ByRef Tensor self); +// aten::special_i0.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor special_i0_outf(@Const @ByRef Tensor self, @ByRef Tensor out); -// Parsed from ATen/ops/softshrink.h +// Parsed from ATen/ops/special_i0e.h // #pragma once @@ -63261,23 +49022,21 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include +// #include -// aten::softshrink.out(Tensor self, Scalar lambd=0.5, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor softshrink_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef(nullValue = "at::Scalar(0.5)") Scalar lambd); -@Namespace("at") public static native @ByRef Tensor softshrink_out(@ByRef Tensor out, @Const @ByRef Tensor self); -// aten::softshrink.out(Tensor self, Scalar lambd=0.5, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor softshrink_outf(@Const @ByRef Tensor self, @Const @ByRef Scalar lambd, @ByRef Tensor out); +// aten::special_i0e(Tensor self) -> Tensor +@Namespace("at") public static native @ByVal Tensor special_i0e(@Const @ByRef Tensor self); -// aten::softshrink(Tensor self, Scalar lambd=0.5) -> Tensor -@Namespace("at") public static native @ByVal Tensor softshrink(@Const @ByRef Tensor self, @Const @ByRef(nullValue = "at::Scalar(0.5)") Scalar lambd); -@Namespace("at") public static native @ByVal Tensor softshrink(@Const @ByRef Tensor self); +// aten::special_i0e.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor special_i0e_out(@ByRef Tensor out, @Const @ByRef Tensor self); +// aten::special_i0e.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor special_i0e_outf(@Const @ByRef Tensor self, @ByRef Tensor out); -// Parsed from ATen/ops/softshrink_backward.h +// Parsed from ATen/ops/special_i1.h // #pragma once @@ -63298,21 +49057,21 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include +// #include -// aten::softshrink_backward.grad_input(Tensor grad_output, Tensor self, Scalar lambd, *, Tensor(a!) grad_input) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor softshrink_backward_out(@ByRef Tensor grad_input, @Const @ByRef Tensor grad_output, @Const @ByRef Tensor self, @Const @ByRef Scalar lambd); -// aten::softshrink_backward.grad_input(Tensor grad_output, Tensor self, Scalar lambd, *, Tensor(a!) grad_input) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor softshrink_backward_outf(@Const @ByRef Tensor grad_output, @Const @ByRef Tensor self, @Const @ByRef Scalar lambd, @ByRef Tensor grad_input); +// aten::special_i1(Tensor self) -> Tensor +@Namespace("at") public static native @ByVal Tensor special_i1(@Const @ByRef Tensor self); -// aten::softshrink_backward(Tensor grad_output, Tensor self, Scalar lambd) -> Tensor -@Namespace("at") public static native @ByVal Tensor softshrink_backward(@Const @ByRef Tensor grad_output, @Const @ByRef Tensor self, @Const @ByRef Scalar lambd); +// aten::special_i1.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor special_i1_out(@ByRef Tensor out, @Const @ByRef Tensor self); +// aten::special_i1.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor special_i1_outf(@Const @ByRef Tensor self, @ByRef Tensor out); -// Parsed from ATen/ops/sort.h +// Parsed from ATen/ops/special_i1e.h // #pragma once @@ -63333,53 +49092,21 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include - - -// aten::sort.values(Tensor self, int dim=-1, bool descending=False, *, Tensor(a!) values, Tensor(b!) indices) -> (Tensor(a!) values, Tensor(b!) indices) -@Namespace("at") public static native @ByVal @Cast("std::tuple*") PointerPointer sort_out(@ByRef Tensor values, @ByRef Tensor indices, @Const @ByRef Tensor self, @Cast("int64_t") long dim/*=-1*/, @Cast("bool") boolean descending/*=false*/); -@Namespace("at") public static native @ByVal @Cast("std::tuple*") PointerPointer sort_out(@ByRef Tensor values, @ByRef Tensor indices, @Const @ByRef Tensor self); -// aten::sort.values(Tensor self, int dim=-1, bool descending=False, *, Tensor(a!) values, Tensor(b!) indices) -> (Tensor(a!) values, Tensor(b!) indices) -@Namespace("at") public static native @ByVal @Cast("std::tuple*") PointerPointer sort_outf(@Const @ByRef Tensor self, @Cast("int64_t") long dim, @Cast("bool") boolean descending, @ByRef Tensor values, @ByRef Tensor indices); - -// aten::sort.values_stable(Tensor self, *, bool? stable, int dim=-1, bool descending=False, Tensor(a!) values, Tensor(b!) indices) -> (Tensor(a!) values, Tensor(b!) indices) -@Namespace("at") public static native @ByVal @Cast("std::tuple*") PointerPointer sort_out(@ByRef Tensor values, @ByRef Tensor indices, @Const @ByRef Tensor self, @ByVal BoolOptional stable, @Cast("int64_t") long dim/*=-1*/, @Cast("bool") boolean descending/*=false*/); -@Namespace("at") public static native @ByVal @Cast("std::tuple*") PointerPointer sort_out(@ByRef Tensor values, @ByRef Tensor indices, @Const @ByRef Tensor self, @ByVal BoolOptional stable); -// aten::sort.values_stable(Tensor self, *, bool? stable, int dim=-1, bool descending=False, Tensor(a!) values, Tensor(b!) indices) -> (Tensor(a!) values, Tensor(b!) indices) -@Namespace("at") public static native @ByVal @Cast("std::tuple*") PointerPointer sort_outf(@Const @ByRef Tensor self, @ByVal BoolOptional stable, @Cast("int64_t") long dim, @Cast("bool") boolean descending, @ByRef Tensor values, @ByRef Tensor indices); - -// aten::sort(Tensor self, int dim=-1, bool descending=False) -> (Tensor values, Tensor indices) -@Namespace("at") public static native @ByVal TensorTensorTuple sort(@Const @ByRef Tensor self, @Cast("int64_t") long dim/*=-1*/, @Cast("bool") boolean descending/*=false*/); -@Namespace("at") public static native @ByVal TensorTensorTuple sort(@Const @ByRef Tensor self); - -// aten::sort.stable(Tensor self, *, bool? stable, int dim=-1, bool descending=False) -> (Tensor values, Tensor indices) -@Namespace("at") public static native @ByVal TensorTensorTuple sort(@Const @ByRef Tensor self, @ByVal BoolOptional stable, @Cast("int64_t") long dim/*=-1*/, @Cast("bool") boolean descending/*=false*/); -@Namespace("at") public static native @ByVal TensorTensorTuple sort(@Const @ByRef Tensor self, @ByVal BoolOptional stable); - -// aten::sort.dimname_values(Tensor self, Dimname dim, bool descending=False, *, Tensor(a!) values, Tensor(b!) indices) -> (Tensor(a!) values, Tensor(b!) indices) -@Namespace("at") public static native @ByVal @Cast("std::tuple*") PointerPointer sort_out(@ByRef Tensor values, @ByRef Tensor indices, @Const @ByRef Tensor self, @ByVal Dimname dim, @Cast("bool") boolean descending/*=false*/); -@Namespace("at") public static native @ByVal @Cast("std::tuple*") PointerPointer sort_out(@ByRef Tensor values, @ByRef Tensor indices, @Const @ByRef Tensor self, @ByVal Dimname dim); -// aten::sort.dimname_values(Tensor self, Dimname dim, bool descending=False, *, Tensor(a!) values, Tensor(b!) indices) -> (Tensor(a!) values, Tensor(b!) indices) -@Namespace("at") public static native @ByVal @Cast("std::tuple*") PointerPointer sort_outf(@Const @ByRef Tensor self, @ByVal Dimname dim, @Cast("bool") boolean descending, @ByRef Tensor values, @ByRef Tensor indices); +// #include -// aten::sort.dimname_values_stable(Tensor self, *, bool? stable, Dimname dim, bool descending=False, Tensor(a!) values, Tensor(b!) indices) -> (Tensor(a!) values, Tensor(b!) indices) -@Namespace("at") public static native @ByVal @Cast("std::tuple*") PointerPointer sort_out(@ByRef Tensor values, @ByRef Tensor indices, @Const @ByRef Tensor self, @ByVal BoolOptional stable, @ByVal Dimname dim, @Cast("bool") boolean descending/*=false*/); -@Namespace("at") public static native @ByVal @Cast("std::tuple*") PointerPointer sort_out(@ByRef Tensor values, @ByRef Tensor indices, @Const @ByRef Tensor self, @ByVal BoolOptional stable, @ByVal Dimname dim); -// aten::sort.dimname_values_stable(Tensor self, *, bool? stable, Dimname dim, bool descending=False, Tensor(a!) values, Tensor(b!) indices) -> (Tensor(a!) values, Tensor(b!) indices) -@Namespace("at") public static native @ByVal @Cast("std::tuple*") PointerPointer sort_outf(@Const @ByRef Tensor self, @ByVal BoolOptional stable, @ByVal Dimname dim, @Cast("bool") boolean descending, @ByRef Tensor values, @ByRef Tensor indices); -// aten::sort.dimname(Tensor self, Dimname dim, bool descending=False) -> (Tensor values, Tensor indices) -@Namespace("at") public static native @ByVal TensorTensorTuple sort(@Const @ByRef Tensor self, @ByVal Dimname dim, @Cast("bool") boolean descending/*=false*/); -@Namespace("at") public static native @ByVal TensorTensorTuple sort(@Const @ByRef Tensor self, @ByVal Dimname dim); +// aten::special_i1e(Tensor self) -> Tensor +@Namespace("at") public static native @ByVal Tensor special_i1e(@Const @ByRef Tensor self); -// aten::sort.dimname_stable(Tensor self, *, bool? stable, Dimname dim, bool descending=False) -> (Tensor values, Tensor indices) -@Namespace("at") public static native @ByVal TensorTensorTuple sort(@Const @ByRef Tensor self, @ByVal BoolOptional stable, @ByVal Dimname dim, @Cast("bool") boolean descending/*=false*/); -@Namespace("at") public static native @ByVal TensorTensorTuple sort(@Const @ByRef Tensor self, @ByVal BoolOptional stable, @ByVal Dimname dim); +// aten::special_i1e.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor special_i1e_out(@ByRef Tensor out, @Const @ByRef Tensor self); +// aten::special_i1e.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor special_i1e_outf(@Const @ByRef Tensor self, @ByRef Tensor out); -// Parsed from ATen/ops/sparse_bsc_tensor.h +// Parsed from ATen/ops/special_laguerre_polynomial_l.h // #pragma once @@ -63400,25 +49127,37 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include +// #include -// aten::sparse_bsc_tensor.ccol_row_value_size(Tensor ccol_indices, Tensor row_indices, Tensor values, int[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=False) -> Tensor -@Namespace("at") public static native @ByVal Tensor sparse_bsc_tensor(@Const @ByRef Tensor ccol_indices, @Const @ByRef Tensor row_indices, @Const @ByRef Tensor values, @ByVal @Cast("c10::ArrayRef*") LongArrayRef size, @ByVal TensorOptions options); -@Namespace("at") public static native @ByVal Tensor sparse_bsc_tensor(@Const @ByRef Tensor ccol_indices, @Const @ByRef Tensor row_indices, @Const @ByRef Tensor values, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @ByVal TensorOptions options); -// aten::sparse_bsc_tensor.ccol_row_value_size(Tensor ccol_indices, Tensor row_indices, Tensor values, int[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=False) -> Tensor -@Namespace("at") public static native @ByVal Tensor sparse_bsc_tensor(@Const @ByRef Tensor ccol_indices, @Const @ByRef Tensor row_indices, @Const @ByRef Tensor values, @ByVal @Cast("c10::ArrayRef*") LongArrayRef size, @ByVal ScalarTypeOptional dtype, @ByVal LayoutOptional layout, @ByVal DeviceOptional device, @ByVal BoolOptional pin_memory); -@Namespace("at") public static native @ByVal Tensor sparse_bsc_tensor(@Const @ByRef Tensor ccol_indices, @Const @ByRef Tensor row_indices, @Const @ByRef Tensor values, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @ByVal ScalarTypeOptional dtype, @ByVal LayoutOptional layout, @ByVal DeviceOptional device, @ByVal BoolOptional pin_memory); +// aten::special_laguerre_polynomial_l(Tensor x, Tensor n) -> Tensor +@Namespace("at") public static native @ByVal Tensor special_laguerre_polynomial_l(@Const @ByRef Tensor x, @Const @ByRef Tensor n); -// aten::sparse_bsc_tensor.ccol_row_value(Tensor ccol_indices, Tensor row_indices, Tensor values, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=False) -> Tensor -@Namespace("at") public static native @ByVal Tensor sparse_bsc_tensor(@Const @ByRef Tensor ccol_indices, @Const @ByRef Tensor row_indices, @Const @ByRef Tensor values, @ByVal TensorOptions options); -// aten::sparse_bsc_tensor.ccol_row_value(Tensor ccol_indices, Tensor row_indices, Tensor values, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=False) -> Tensor -@Namespace("at") public static native @ByVal Tensor sparse_bsc_tensor(@Const @ByRef Tensor ccol_indices, @Const @ByRef Tensor row_indices, @Const @ByRef Tensor values, @ByVal ScalarTypeOptional dtype, @ByVal LayoutOptional layout, @ByVal DeviceOptional device, @ByVal BoolOptional pin_memory); +// aten::special_laguerre_polynomial_l.x_scalar(Scalar x, Tensor n) -> Tensor +@Namespace("at") public static native @ByVal Tensor special_laguerre_polynomial_l(@Const @ByRef Scalar x, @Const @ByRef Tensor n); + +// aten::special_laguerre_polynomial_l.n_scalar(Tensor x, Scalar n) -> Tensor +@Namespace("at") public static native @ByVal Tensor special_laguerre_polynomial_l(@Const @ByRef Tensor x, @Const @ByRef Scalar n); + +// aten::special_laguerre_polynomial_l.out(Tensor x, Tensor n, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor special_laguerre_polynomial_l_out(@ByRef Tensor out, @Const @ByRef Tensor x, @Const @ByRef Tensor n); +// aten::special_laguerre_polynomial_l.out(Tensor x, Tensor n, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor special_laguerre_polynomial_l_outf(@Const @ByRef Tensor x, @Const @ByRef Tensor n, @ByRef Tensor out); + +// aten::special_laguerre_polynomial_l.x_scalar_out(Scalar x, Tensor n, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor special_laguerre_polynomial_l_out(@ByRef Tensor out, @Const @ByRef Scalar x, @Const @ByRef Tensor n); +// aten::special_laguerre_polynomial_l.x_scalar_out(Scalar x, Tensor n, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor special_laguerre_polynomial_l_outf(@Const @ByRef Scalar x, @Const @ByRef Tensor n, @ByRef Tensor out); +// aten::special_laguerre_polynomial_l.n_scalar_out(Tensor x, Scalar n, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor special_laguerre_polynomial_l_out(@ByRef Tensor out, @Const @ByRef Tensor x, @Const @ByRef Scalar n); +// aten::special_laguerre_polynomial_l.n_scalar_out(Tensor x, Scalar n, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor special_laguerre_polynomial_l_outf(@Const @ByRef Tensor x, @Const @ByRef Scalar n, @ByRef Tensor out); -// Parsed from ATen/ops/sparse_bsr_tensor.h + +// Parsed from ATen/ops/special_legendre_polynomial_p.h // #pragma once @@ -63439,25 +49178,37 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include +// #include -// aten::sparse_bsr_tensor.crow_col_value_size(Tensor crow_indices, Tensor col_indices, Tensor values, int[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=False) -> Tensor -@Namespace("at") public static native @ByVal Tensor sparse_bsr_tensor(@Const @ByRef Tensor crow_indices, @Const @ByRef Tensor col_indices, @Const @ByRef Tensor values, @ByVal @Cast("c10::ArrayRef*") LongArrayRef size, @ByVal TensorOptions options); -@Namespace("at") public static native @ByVal Tensor sparse_bsr_tensor(@Const @ByRef Tensor crow_indices, @Const @ByRef Tensor col_indices, @Const @ByRef Tensor values, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @ByVal TensorOptions options); -// aten::sparse_bsr_tensor.crow_col_value_size(Tensor crow_indices, Tensor col_indices, Tensor values, int[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=False) -> Tensor -@Namespace("at") public static native @ByVal Tensor sparse_bsr_tensor(@Const @ByRef Tensor crow_indices, @Const @ByRef Tensor col_indices, @Const @ByRef Tensor values, @ByVal @Cast("c10::ArrayRef*") LongArrayRef size, @ByVal ScalarTypeOptional dtype, @ByVal LayoutOptional layout, @ByVal DeviceOptional device, @ByVal BoolOptional pin_memory); -@Namespace("at") public static native @ByVal Tensor sparse_bsr_tensor(@Const @ByRef Tensor crow_indices, @Const @ByRef Tensor col_indices, @Const @ByRef Tensor values, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @ByVal ScalarTypeOptional dtype, @ByVal LayoutOptional layout, @ByVal DeviceOptional device, @ByVal BoolOptional pin_memory); +// aten::special_legendre_polynomial_p(Tensor x, Tensor n) -> Tensor +@Namespace("at") public static native @ByVal Tensor special_legendre_polynomial_p(@Const @ByRef Tensor x, @Const @ByRef Tensor n); -// aten::sparse_bsr_tensor.crow_col_value(Tensor crow_indices, Tensor col_indices, Tensor values, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=False) -> Tensor -@Namespace("at") public static native @ByVal Tensor sparse_bsr_tensor(@Const @ByRef Tensor crow_indices, @Const @ByRef Tensor col_indices, @Const @ByRef Tensor values, @ByVal TensorOptions options); -// aten::sparse_bsr_tensor.crow_col_value(Tensor crow_indices, Tensor col_indices, Tensor values, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=False) -> Tensor -@Namespace("at") public static native @ByVal Tensor sparse_bsr_tensor(@Const @ByRef Tensor crow_indices, @Const @ByRef Tensor col_indices, @Const @ByRef Tensor values, @ByVal ScalarTypeOptional dtype, @ByVal LayoutOptional layout, @ByVal DeviceOptional device, @ByVal BoolOptional pin_memory); +// aten::special_legendre_polynomial_p.x_scalar(Scalar x, Tensor n) -> Tensor +@Namespace("at") public static native @ByVal Tensor special_legendre_polynomial_p(@Const @ByRef Scalar x, @Const @ByRef Tensor n); + +// aten::special_legendre_polynomial_p.n_scalar(Tensor x, Scalar n) -> Tensor +@Namespace("at") public static native @ByVal Tensor special_legendre_polynomial_p(@Const @ByRef Tensor x, @Const @ByRef Scalar n); +// aten::special_legendre_polynomial_p.out(Tensor x, Tensor n, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor special_legendre_polynomial_p_out(@ByRef Tensor out, @Const @ByRef Tensor x, @Const @ByRef Tensor n); +// aten::special_legendre_polynomial_p.out(Tensor x, Tensor n, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor special_legendre_polynomial_p_outf(@Const @ByRef Tensor x, @Const @ByRef Tensor n, @ByRef Tensor out); + +// aten::special_legendre_polynomial_p.x_scalar_out(Scalar x, Tensor n, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor special_legendre_polynomial_p_out(@ByRef Tensor out, @Const @ByRef Scalar x, @Const @ByRef Tensor n); +// aten::special_legendre_polynomial_p.x_scalar_out(Scalar x, Tensor n, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor special_legendre_polynomial_p_outf(@Const @ByRef Scalar x, @Const @ByRef Tensor n, @ByRef Tensor out); + +// aten::special_legendre_polynomial_p.n_scalar_out(Tensor x, Scalar n, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor special_legendre_polynomial_p_out(@ByRef Tensor out, @Const @ByRef Tensor x, @Const @ByRef Scalar n); +// aten::special_legendre_polynomial_p.n_scalar_out(Tensor x, Scalar n, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor special_legendre_polynomial_p_outf(@Const @ByRef Tensor x, @Const @ByRef Scalar n, @ByRef Tensor out); -// Parsed from ATen/ops/sparse_compressed_tensor.h + +// Parsed from ATen/ops/special_log1p.h // #pragma once @@ -63478,25 +49229,21 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include +// #include -// aten::sparse_compressed_tensor.comp_plain_value_size(Tensor compressed_indices, Tensor plain_indices, Tensor values, int[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=False) -> Tensor -@Namespace("at") public static native @ByVal Tensor sparse_compressed_tensor(@Const @ByRef Tensor compressed_indices, @Const @ByRef Tensor plain_indices, @Const @ByRef Tensor values, @ByVal @Cast("c10::ArrayRef*") LongArrayRef size, @ByVal TensorOptions options); -@Namespace("at") public static native @ByVal Tensor sparse_compressed_tensor(@Const @ByRef Tensor compressed_indices, @Const @ByRef Tensor plain_indices, @Const @ByRef Tensor values, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @ByVal TensorOptions options); -// aten::sparse_compressed_tensor.comp_plain_value_size(Tensor compressed_indices, Tensor plain_indices, Tensor values, int[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=False) -> Tensor -@Namespace("at") public static native @ByVal Tensor sparse_compressed_tensor(@Const @ByRef Tensor compressed_indices, @Const @ByRef Tensor plain_indices, @Const @ByRef Tensor values, @ByVal @Cast("c10::ArrayRef*") LongArrayRef size, @ByVal ScalarTypeOptional dtype, @ByVal LayoutOptional layout, @ByVal DeviceOptional device, @ByVal BoolOptional pin_memory); -@Namespace("at") public static native @ByVal Tensor sparse_compressed_tensor(@Const @ByRef Tensor compressed_indices, @Const @ByRef Tensor plain_indices, @Const @ByRef Tensor values, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @ByVal ScalarTypeOptional dtype, @ByVal LayoutOptional layout, @ByVal DeviceOptional device, @ByVal BoolOptional pin_memory); +// aten::special_log1p(Tensor self) -> Tensor +@Namespace("at") public static native @ByVal Tensor special_log1p(@Const @ByRef Tensor self); -// aten::sparse_compressed_tensor.comp_plain_value(Tensor compressed_indices, Tensor plain_indices, Tensor values, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=False) -> Tensor -@Namespace("at") public static native @ByVal Tensor sparse_compressed_tensor(@Const @ByRef Tensor compressed_indices, @Const @ByRef Tensor plain_indices, @Const @ByRef Tensor values, @ByVal TensorOptions options); -// aten::sparse_compressed_tensor.comp_plain_value(Tensor compressed_indices, Tensor plain_indices, Tensor values, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=False) -> Tensor -@Namespace("at") public static native @ByVal Tensor sparse_compressed_tensor(@Const @ByRef Tensor compressed_indices, @Const @ByRef Tensor plain_indices, @Const @ByRef Tensor values, @ByVal ScalarTypeOptional dtype, @ByVal LayoutOptional layout, @ByVal DeviceOptional device, @ByVal BoolOptional pin_memory); +// aten::special_log1p.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor special_log1p_out(@ByRef Tensor out, @Const @ByRef Tensor self); +// aten::special_log1p.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor special_log1p_outf(@Const @ByRef Tensor self, @ByRef Tensor out); -// Parsed from ATen/ops/sparse_coo_tensor.h +// Parsed from ATen/ops/special_log_ndtr.h // #pragma once @@ -63517,42 +49264,21 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include - - -// aten::sparse_coo_tensor.size(int[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=False) -> Tensor -@Namespace("at") public static native @ByVal Tensor sparse_coo_tensor(@ByVal @Cast("c10::ArrayRef*") LongArrayRef size, @ByVal TensorOptions options); -@Namespace("at") public static native @ByVal Tensor sparse_coo_tensor(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @ByVal TensorOptions options); -// aten::sparse_coo_tensor.size(int[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=False) -> Tensor -@Namespace("at") public static native @ByVal Tensor sparse_coo_tensor(@ByVal @Cast("c10::ArrayRef*") LongArrayRef size, @ByVal ScalarTypeOptional dtype, @ByVal LayoutOptional layout, @ByVal DeviceOptional device, @ByVal BoolOptional pin_memory); -@Namespace("at") public static native @ByVal Tensor sparse_coo_tensor(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @ByVal ScalarTypeOptional dtype, @ByVal LayoutOptional layout, @ByVal DeviceOptional device, @ByVal BoolOptional pin_memory); +// #include -// aten::sparse_coo_tensor.indices(Tensor indices, Tensor values, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor -@Namespace("at") public static native @ByVal Tensor sparse_coo_tensor(@Const @ByRef Tensor indices, @Const @ByRef Tensor values, @ByVal(nullValue = "at::TensorOptions{}") TensorOptions options); -@Namespace("at") public static native @ByVal Tensor sparse_coo_tensor(@Const @ByRef Tensor indices, @Const @ByRef Tensor values); -// aten::sparse_coo_tensor.indices(Tensor indices, Tensor values, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor -@Namespace("at") public static native @ByVal Tensor sparse_coo_tensor(@Const @ByRef Tensor indices, @Const @ByRef Tensor values, @ByVal ScalarTypeOptional dtype, @ByVal LayoutOptional layout, @ByVal DeviceOptional device, @ByVal BoolOptional pin_memory); -// aten::sparse_coo_tensor.indices_size(Tensor indices, Tensor values, int[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor -@Namespace("at") public static native @ByVal Tensor sparse_coo_tensor(@Const @ByRef Tensor indices, @Const @ByRef Tensor values, @ByVal @Cast("c10::ArrayRef*") LongArrayRef size, @ByVal(nullValue = "at::TensorOptions{}") TensorOptions options); -@Namespace("at") public static native @ByVal Tensor sparse_coo_tensor(@Const @ByRef Tensor indices, @Const @ByRef Tensor values, @ByVal @Cast("c10::ArrayRef*") LongArrayRef size); -@Namespace("at") public static native @ByVal Tensor sparse_coo_tensor(@Const @ByRef Tensor indices, @Const @ByRef Tensor values, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @ByVal(nullValue = "at::TensorOptions{}") TensorOptions options); -@Namespace("at") public static native @ByVal Tensor sparse_coo_tensor(@Const @ByRef Tensor indices, @Const @ByRef Tensor values, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... size); -// aten::sparse_coo_tensor.indices_size(Tensor indices, Tensor values, int[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor -@Namespace("at") public static native @ByVal Tensor sparse_coo_tensor(@Const @ByRef Tensor indices, @Const @ByRef Tensor values, @ByVal @Cast("c10::ArrayRef*") LongArrayRef size, @ByVal ScalarTypeOptional dtype, @ByVal LayoutOptional layout, @ByVal DeviceOptional device, @ByVal BoolOptional pin_memory); -@Namespace("at") public static native @ByVal Tensor sparse_coo_tensor(@Const @ByRef Tensor indices, @Const @ByRef Tensor values, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @ByVal ScalarTypeOptional dtype, @ByVal LayoutOptional layout, @ByVal DeviceOptional device, @ByVal BoolOptional pin_memory); +// aten::special_log_ndtr(Tensor self) -> Tensor +@Namespace("at") public static native @ByVal Tensor special_log_ndtr(@Const @ByRef Tensor self); -// aten::sparse_coo_tensor.size_out(int[] size, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor sparse_coo_tensor_out(@ByRef Tensor out, @ByVal @Cast("c10::ArrayRef*") LongArrayRef size); -@Namespace("at") public static native @ByRef Tensor sparse_coo_tensor_out(@ByRef Tensor out, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... size); -// aten::sparse_coo_tensor.size_out(int[] size, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor sparse_coo_tensor_outf(@ByVal @Cast("c10::ArrayRef*") LongArrayRef size, @ByRef Tensor out); -@Namespace("at") public static native @ByRef Tensor sparse_coo_tensor_outf(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @ByRef Tensor out); +// aten::special_log_ndtr.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor special_log_ndtr_out(@ByRef Tensor out, @Const @ByRef Tensor self); +// aten::special_log_ndtr.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor special_log_ndtr_outf(@Const @ByRef Tensor self, @ByRef Tensor out); -// Parsed from ATen/ops/sparse_csc_tensor.h +// Parsed from ATen/ops/special_log_softmax.h // #pragma once @@ -63573,25 +49299,17 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include - +// #include -// aten::sparse_csc_tensor.ccol_row_value_size(Tensor ccol_indices, Tensor row_indices, Tensor values, int[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=False) -> Tensor -@Namespace("at") public static native @ByVal Tensor sparse_csc_tensor(@Const @ByRef Tensor ccol_indices, @Const @ByRef Tensor row_indices, @Const @ByRef Tensor values, @ByVal @Cast("c10::ArrayRef*") LongArrayRef size, @ByVal TensorOptions options); -@Namespace("at") public static native @ByVal Tensor sparse_csc_tensor(@Const @ByRef Tensor ccol_indices, @Const @ByRef Tensor row_indices, @Const @ByRef Tensor values, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @ByVal TensorOptions options); -// aten::sparse_csc_tensor.ccol_row_value_size(Tensor ccol_indices, Tensor row_indices, Tensor values, int[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=False) -> Tensor -@Namespace("at") public static native @ByVal Tensor sparse_csc_tensor(@Const @ByRef Tensor ccol_indices, @Const @ByRef Tensor row_indices, @Const @ByRef Tensor values, @ByVal @Cast("c10::ArrayRef*") LongArrayRef size, @ByVal ScalarTypeOptional dtype, @ByVal LayoutOptional layout, @ByVal DeviceOptional device, @ByVal BoolOptional pin_memory); -@Namespace("at") public static native @ByVal Tensor sparse_csc_tensor(@Const @ByRef Tensor ccol_indices, @Const @ByRef Tensor row_indices, @Const @ByRef Tensor values, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @ByVal ScalarTypeOptional dtype, @ByVal LayoutOptional layout, @ByVal DeviceOptional device, @ByVal BoolOptional pin_memory); -// aten::sparse_csc_tensor.ccol_row_value(Tensor ccol_indices, Tensor row_indices, Tensor values, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=False) -> Tensor -@Namespace("at") public static native @ByVal Tensor sparse_csc_tensor(@Const @ByRef Tensor ccol_indices, @Const @ByRef Tensor row_indices, @Const @ByRef Tensor values, @ByVal TensorOptions options); -// aten::sparse_csc_tensor.ccol_row_value(Tensor ccol_indices, Tensor row_indices, Tensor values, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=False) -> Tensor -@Namespace("at") public static native @ByVal Tensor sparse_csc_tensor(@Const @ByRef Tensor ccol_indices, @Const @ByRef Tensor row_indices, @Const @ByRef Tensor values, @ByVal ScalarTypeOptional dtype, @ByVal LayoutOptional layout, @ByVal DeviceOptional device, @ByVal BoolOptional pin_memory); +// aten::special_log_softmax(Tensor self, int dim, *, ScalarType? dtype=None) -> Tensor +@Namespace("at") public static native @ByVal Tensor special_log_softmax(@Const @ByRef Tensor self, @Cast("int64_t") long dim, @ByVal(nullValue = "c10::optional(c10::nullopt)") ScalarTypeOptional dtype); +@Namespace("at") public static native @ByVal Tensor special_log_softmax(@Const @ByRef Tensor self, @Cast("int64_t") long dim); -// Parsed from ATen/ops/sparse_csr_tensor.h +// Parsed from ATen/ops/special_logit.h // #pragma once @@ -63612,25 +49330,23 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include +// #include -// aten::sparse_csr_tensor.crow_col_value_size(Tensor crow_indices, Tensor col_indices, Tensor values, int[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=False) -> Tensor -@Namespace("at") public static native @ByVal Tensor sparse_csr_tensor(@Const @ByRef Tensor crow_indices, @Const @ByRef Tensor col_indices, @Const @ByRef Tensor values, @ByVal @Cast("c10::ArrayRef*") LongArrayRef size, @ByVal TensorOptions options); -@Namespace("at") public static native @ByVal Tensor sparse_csr_tensor(@Const @ByRef Tensor crow_indices, @Const @ByRef Tensor col_indices, @Const @ByRef Tensor values, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @ByVal TensorOptions options); -// aten::sparse_csr_tensor.crow_col_value_size(Tensor crow_indices, Tensor col_indices, Tensor values, int[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=False) -> Tensor -@Namespace("at") public static native @ByVal Tensor sparse_csr_tensor(@Const @ByRef Tensor crow_indices, @Const @ByRef Tensor col_indices, @Const @ByRef Tensor values, @ByVal @Cast("c10::ArrayRef*") LongArrayRef size, @ByVal ScalarTypeOptional dtype, @ByVal LayoutOptional layout, @ByVal DeviceOptional device, @ByVal BoolOptional pin_memory); -@Namespace("at") public static native @ByVal Tensor sparse_csr_tensor(@Const @ByRef Tensor crow_indices, @Const @ByRef Tensor col_indices, @Const @ByRef Tensor values, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @ByVal ScalarTypeOptional dtype, @ByVal LayoutOptional layout, @ByVal DeviceOptional device, @ByVal BoolOptional pin_memory); +// aten::special_logit(Tensor self, float? eps=None) -> Tensor +@Namespace("at") public static native @ByVal Tensor special_logit(@Const @ByRef Tensor self, @ByVal(nullValue = "c10::optional(c10::nullopt)") DoubleOptional eps); +@Namespace("at") public static native @ByVal Tensor special_logit(@Const @ByRef Tensor self); -// aten::sparse_csr_tensor.crow_col_value(Tensor crow_indices, Tensor col_indices, Tensor values, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=False) -> Tensor -@Namespace("at") public static native @ByVal Tensor sparse_csr_tensor(@Const @ByRef Tensor crow_indices, @Const @ByRef Tensor col_indices, @Const @ByRef Tensor values, @ByVal TensorOptions options); -// aten::sparse_csr_tensor.crow_col_value(Tensor crow_indices, Tensor col_indices, Tensor values, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=False) -> Tensor -@Namespace("at") public static native @ByVal Tensor sparse_csr_tensor(@Const @ByRef Tensor crow_indices, @Const @ByRef Tensor col_indices, @Const @ByRef Tensor values, @ByVal ScalarTypeOptional dtype, @ByVal LayoutOptional layout, @ByVal DeviceOptional device, @ByVal BoolOptional pin_memory); +// aten::special_logit.out(Tensor self, float? eps=None, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor special_logit_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal(nullValue = "c10::optional(c10::nullopt)") DoubleOptional eps); +@Namespace("at") public static native @ByRef Tensor special_logit_out(@ByRef Tensor out, @Const @ByRef Tensor self); +// aten::special_logit.out(Tensor self, float? eps=None, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor special_logit_outf(@Const @ByRef Tensor self, @ByVal DoubleOptional eps, @ByRef Tensor out); -// Parsed from ATen/ops/sparse_dim.h +// Parsed from ATen/ops/special_logsumexp.h // #pragma once @@ -63651,14 +49367,28 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include +// #include +// aten::special_logsumexp(Tensor self, int[1] dim, bool keepdim=False) -> Tensor +@Namespace("at") public static native @ByVal Tensor special_logsumexp(@Const @ByRef Tensor self, @ByVal LongArrayRef dim, @Cast("bool") boolean keepdim/*=false*/); +@Namespace("at") public static native @ByVal Tensor special_logsumexp(@Const @ByRef Tensor self, @ByVal LongArrayRef dim); +@Namespace("at") public static native @ByVal Tensor special_logsumexp(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dim, @Cast("bool") boolean keepdim/*=false*/); +@Namespace("at") public static native @ByVal Tensor special_logsumexp(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... dim); +// aten::special_logsumexp.out(Tensor self, int[1] dim, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor special_logsumexp_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal LongArrayRef dim, @Cast("bool") boolean keepdim/*=false*/); +@Namespace("at") public static native @ByRef Tensor special_logsumexp_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal LongArrayRef dim); +@Namespace("at") public static native @ByRef Tensor special_logsumexp_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dim, @Cast("bool") boolean keepdim/*=false*/); +@Namespace("at") public static native @ByRef Tensor special_logsumexp_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... dim); +// aten::special_logsumexp.out(Tensor self, int[1] dim, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor special_logsumexp_outf(@Const @ByRef Tensor self, @ByVal LongArrayRef dim, @Cast("bool") boolean keepdim, @ByRef Tensor out); +@Namespace("at") public static native @ByRef Tensor special_logsumexp_outf(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dim, @Cast("bool") boolean keepdim, @ByRef Tensor out); -// Parsed from ATen/ops/sparse_mask.h + +// Parsed from ATen/ops/special_modified_bessel_i0.h // #pragma once @@ -63679,18 +49409,21 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include +// #include -// aten::sparse_mask.out(Tensor self, Tensor mask, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor sparse_mask_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor mask); -// aten::sparse_mask.out(Tensor self, Tensor mask, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor sparse_mask_outf(@Const @ByRef Tensor self, @Const @ByRef Tensor mask, @ByRef Tensor out); +// aten::special_modified_bessel_i0(Tensor self) -> Tensor +@Namespace("at") public static native @ByVal Tensor special_modified_bessel_i0(@Const @ByRef Tensor self); + +// aten::special_modified_bessel_i0.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor special_modified_bessel_i0_out(@ByRef Tensor out, @Const @ByRef Tensor self); +// aten::special_modified_bessel_i0.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor special_modified_bessel_i0_outf(@Const @ByRef Tensor self, @ByRef Tensor out); -// Parsed from ATen/ops/sparse_resize.h +// Parsed from ATen/ops/special_modified_bessel_i1.h // #pragma once @@ -63711,24 +49444,21 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include +// #include -// aten::sparse_resize.out(Tensor self, int[] size, int sparse_dim, int dense_dim, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @Const @ByRef Tensor sparse_resize_out(@Const @ByRef Tensor out, @Const @ByRef Tensor self, @ByVal @Cast("c10::ArrayRef*") LongArrayRef size, @Cast("int64_t") long sparse_dim, @Cast("int64_t") long dense_dim); -@Namespace("at") public static native @Const @ByRef Tensor sparse_resize_out(@Const @ByRef Tensor out, @Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @Cast("int64_t") long sparse_dim, @Cast("int64_t") long dense_dim); -// aten::sparse_resize.out(Tensor self, int[] size, int sparse_dim, int dense_dim, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @Const @ByRef Tensor sparse_resize_outf(@Const @ByRef Tensor self, @ByVal @Cast("c10::ArrayRef*") LongArrayRef size, @Cast("int64_t") long sparse_dim, @Cast("int64_t") long dense_dim, @Const @ByRef Tensor out); -@Namespace("at") public static native @Const @ByRef Tensor sparse_resize_outf(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @Cast("int64_t") long sparse_dim, @Cast("int64_t") long dense_dim, @Const @ByRef Tensor out); +// aten::special_modified_bessel_i1(Tensor self) -> Tensor +@Namespace("at") public static native @ByVal Tensor special_modified_bessel_i1(@Const @ByRef Tensor self); -// aten::sparse_resize(Tensor self, int[] size, int sparse_dim, int dense_dim) -> Tensor -@Namespace("at") public static native @ByVal Tensor sparse_resize(@Const @ByRef Tensor self, @ByVal @Cast("c10::ArrayRef*") LongArrayRef size, @Cast("int64_t") long sparse_dim, @Cast("int64_t") long dense_dim); -@Namespace("at") public static native @ByVal Tensor sparse_resize(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @Cast("int64_t") long sparse_dim, @Cast("int64_t") long dense_dim); +// aten::special_modified_bessel_i1.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor special_modified_bessel_i1_out(@ByRef Tensor out, @Const @ByRef Tensor self); +// aten::special_modified_bessel_i1.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor special_modified_bessel_i1_outf(@Const @ByRef Tensor self, @ByRef Tensor out); -// Parsed from ATen/ops/sparse_resize_and_clear.h +// Parsed from ATen/ops/special_modified_bessel_k0.h // #pragma once @@ -63749,24 +49479,21 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include +// #include -// aten::sparse_resize_and_clear.out(Tensor self, int[] size, int sparse_dim, int dense_dim, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @Const @ByRef Tensor sparse_resize_and_clear_out(@Const @ByRef Tensor out, @Const @ByRef Tensor self, @ByVal @Cast("c10::ArrayRef*") LongArrayRef size, @Cast("int64_t") long sparse_dim, @Cast("int64_t") long dense_dim); -@Namespace("at") public static native @Const @ByRef Tensor sparse_resize_and_clear_out(@Const @ByRef Tensor out, @Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @Cast("int64_t") long sparse_dim, @Cast("int64_t") long dense_dim); -// aten::sparse_resize_and_clear.out(Tensor self, int[] size, int sparse_dim, int dense_dim, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @Const @ByRef Tensor sparse_resize_and_clear_outf(@Const @ByRef Tensor self, @ByVal @Cast("c10::ArrayRef*") LongArrayRef size, @Cast("int64_t") long sparse_dim, @Cast("int64_t") long dense_dim, @Const @ByRef Tensor out); -@Namespace("at") public static native @Const @ByRef Tensor sparse_resize_and_clear_outf(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @Cast("int64_t") long sparse_dim, @Cast("int64_t") long dense_dim, @Const @ByRef Tensor out); +// aten::special_modified_bessel_k0(Tensor self) -> Tensor +@Namespace("at") public static native @ByVal Tensor special_modified_bessel_k0(@Const @ByRef Tensor self); -// aten::sparse_resize_and_clear(Tensor self, int[] size, int sparse_dim, int dense_dim) -> Tensor -@Namespace("at") public static native @ByVal Tensor sparse_resize_and_clear(@Const @ByRef Tensor self, @ByVal @Cast("c10::ArrayRef*") LongArrayRef size, @Cast("int64_t") long sparse_dim, @Cast("int64_t") long dense_dim); -@Namespace("at") public static native @ByVal Tensor sparse_resize_and_clear(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @Cast("int64_t") long sparse_dim, @Cast("int64_t") long dense_dim); +// aten::special_modified_bessel_k0.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor special_modified_bessel_k0_out(@ByRef Tensor out, @Const @ByRef Tensor self); +// aten::special_modified_bessel_k0.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor special_modified_bessel_k0_outf(@Const @ByRef Tensor self, @ByRef Tensor out); -// Parsed from ATen/ops/sparse_sampled_addmm.h +// Parsed from ATen/ops/special_modified_bessel_k1.h // #pragma once @@ -63787,23 +49514,21 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include +// #include -// aten::sparse_sampled_addmm.out(Tensor self, Tensor mat1, Tensor mat2, *, Scalar beta=1, Scalar alpha=1, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor sparse_sampled_addmm_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor mat1, @Const @ByRef Tensor mat2, @Const @ByRef(nullValue = "at::Scalar(1)") Scalar beta, @Const @ByRef(nullValue = "at::Scalar(1)") Scalar alpha); -@Namespace("at") public static native @ByRef Tensor sparse_sampled_addmm_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor mat1, @Const @ByRef Tensor mat2); -// aten::sparse_sampled_addmm.out(Tensor self, Tensor mat1, Tensor mat2, *, Scalar beta=1, Scalar alpha=1, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor sparse_sampled_addmm_outf(@Const @ByRef Tensor self, @Const @ByRef Tensor mat1, @Const @ByRef Tensor mat2, @Const @ByRef Scalar beta, @Const @ByRef Scalar alpha, @ByRef Tensor out); +// aten::special_modified_bessel_k1(Tensor self) -> Tensor +@Namespace("at") public static native @ByVal Tensor special_modified_bessel_k1(@Const @ByRef Tensor self); -// aten::sparse_sampled_addmm(Tensor self, Tensor mat1, Tensor mat2, *, Scalar beta=1, Scalar alpha=1) -> Tensor -@Namespace("at") public static native @ByVal Tensor sparse_sampled_addmm(@Const @ByRef Tensor self, @Const @ByRef Tensor mat1, @Const @ByRef Tensor mat2, @Const @ByRef(nullValue = "at::Scalar(1)") Scalar beta, @Const @ByRef(nullValue = "at::Scalar(1)") Scalar alpha); -@Namespace("at") public static native @ByVal Tensor sparse_sampled_addmm(@Const @ByRef Tensor self, @Const @ByRef Tensor mat1, @Const @ByRef Tensor mat2); +// aten::special_modified_bessel_k1.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor special_modified_bessel_k1_out(@ByRef Tensor out, @Const @ByRef Tensor self); +// aten::special_modified_bessel_k1.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor special_modified_bessel_k1_outf(@Const @ByRef Tensor self, @ByRef Tensor out); -// Parsed from ATen/ops/special_airy_ai.h +// Parsed from ATen/ops/special_multigammaln.h // #pragma once @@ -63824,21 +49549,21 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include +// #include -// aten::special_airy_ai(Tensor x) -> Tensor -@Namespace("at") public static native @ByVal Tensor special_airy_ai(@Const @ByRef Tensor x); +// aten::special_multigammaln(Tensor self, int p) -> Tensor +@Namespace("at") public static native @ByVal Tensor special_multigammaln(@Const @ByRef Tensor self, @Cast("int64_t") long p); -// aten::special_airy_ai.out(Tensor x, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor special_airy_ai_out(@ByRef Tensor out, @Const @ByRef Tensor x); -// aten::special_airy_ai.out(Tensor x, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor special_airy_ai_outf(@Const @ByRef Tensor x, @ByRef Tensor out); +// aten::special_multigammaln.out(Tensor self, int p, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor special_multigammaln_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Cast("int64_t") long p); +// aten::special_multigammaln.out(Tensor self, int p, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor special_multigammaln_outf(@Const @ByRef Tensor self, @Cast("int64_t") long p, @ByRef Tensor out); -// Parsed from ATen/ops/special_bessel_j0.h +// Parsed from ATen/ops/special_ndtr.h // #pragma once @@ -63859,21 +49584,21 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include +// #include -// aten::special_bessel_j0(Tensor self) -> Tensor -@Namespace("at") public static native @ByVal Tensor special_bessel_j0(@Const @ByRef Tensor self); +// aten::special_ndtr(Tensor self) -> Tensor +@Namespace("at") public static native @ByVal Tensor special_ndtr(@Const @ByRef Tensor self); -// aten::special_bessel_j0.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor special_bessel_j0_out(@ByRef Tensor out, @Const @ByRef Tensor self); -// aten::special_bessel_j0.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor special_bessel_j0_outf(@Const @ByRef Tensor self, @ByRef Tensor out); +// aten::special_ndtr.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor special_ndtr_out(@ByRef Tensor out, @Const @ByRef Tensor self); +// aten::special_ndtr.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor special_ndtr_outf(@Const @ByRef Tensor self, @ByRef Tensor out); -// Parsed from ATen/ops/special_bessel_j1.h +// Parsed from ATen/ops/special_ndtri.h // #pragma once @@ -63894,21 +49619,21 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include +// #include -// aten::special_bessel_j1(Tensor self) -> Tensor -@Namespace("at") public static native @ByVal Tensor special_bessel_j1(@Const @ByRef Tensor self); +// aten::special_ndtri(Tensor self) -> Tensor +@Namespace("at") public static native @ByVal Tensor special_ndtri(@Const @ByRef Tensor self); -// aten::special_bessel_j1.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor special_bessel_j1_out(@ByRef Tensor out, @Const @ByRef Tensor self); -// aten::special_bessel_j1.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor special_bessel_j1_outf(@Const @ByRef Tensor self, @ByRef Tensor out); +// aten::special_ndtri.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor special_ndtri_out(@ByRef Tensor out, @Const @ByRef Tensor self); +// aten::special_ndtri.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor special_ndtri_outf(@Const @ByRef Tensor self, @ByRef Tensor out); -// Parsed from ATen/ops/special_bessel_y0.h +// Parsed from ATen/ops/special_polygamma.h // #pragma once @@ -63929,21 +49654,21 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include +// #include -// aten::special_bessel_y0(Tensor self) -> Tensor -@Namespace("at") public static native @ByVal Tensor special_bessel_y0(@Const @ByRef Tensor self); +// aten::special_polygamma(int n, Tensor self) -> Tensor +@Namespace("at") public static native @ByVal Tensor special_polygamma(@Cast("int64_t") long n, @Const @ByRef Tensor self); -// aten::special_bessel_y0.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor special_bessel_y0_out(@ByRef Tensor out, @Const @ByRef Tensor self); -// aten::special_bessel_y0.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor special_bessel_y0_outf(@Const @ByRef Tensor self, @ByRef Tensor out); +// aten::special_polygamma.out(int n, Tensor self, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor special_polygamma_out(@ByRef Tensor out, @Cast("int64_t") long n, @Const @ByRef Tensor self); +// aten::special_polygamma.out(int n, Tensor self, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor special_polygamma_outf(@Cast("int64_t") long n, @Const @ByRef Tensor self, @ByRef Tensor out); -// Parsed from ATen/ops/special_bessel_y1.h +// Parsed from ATen/ops/special_psi.h // #pragma once @@ -63964,21 +49689,21 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include +// #include -// aten::special_bessel_y1(Tensor self) -> Tensor -@Namespace("at") public static native @ByVal Tensor special_bessel_y1(@Const @ByRef Tensor self); +// aten::special_psi(Tensor self) -> Tensor +@Namespace("at") public static native @ByVal Tensor special_psi(@Const @ByRef Tensor self); -// aten::special_bessel_y1.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor special_bessel_y1_out(@ByRef Tensor out, @Const @ByRef Tensor self); -// aten::special_bessel_y1.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor special_bessel_y1_outf(@Const @ByRef Tensor self, @ByRef Tensor out); +// aten::special_psi.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor special_psi_out(@ByRef Tensor out, @Const @ByRef Tensor self); +// aten::special_psi.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor special_psi_outf(@Const @ByRef Tensor self, @ByRef Tensor out); -// Parsed from ATen/ops/special_chebyshev_polynomial_t.h +// Parsed from ATen/ops/special_round.h // #pragma once @@ -63999,37 +49724,23 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include - - -// aten::special_chebyshev_polynomial_t(Tensor x, Tensor n) -> Tensor -@Namespace("at") public static native @ByVal Tensor special_chebyshev_polynomial_t(@Const @ByRef Tensor x, @Const @ByRef Tensor n); - -// aten::special_chebyshev_polynomial_t.x_scalar(Scalar x, Tensor n) -> Tensor -@Namespace("at") public static native @ByVal Tensor special_chebyshev_polynomial_t(@Const @ByRef Scalar x, @Const @ByRef Tensor n); - -// aten::special_chebyshev_polynomial_t.n_scalar(Tensor x, Scalar n) -> Tensor -@Namespace("at") public static native @ByVal Tensor special_chebyshev_polynomial_t(@Const @ByRef Tensor x, @Const @ByRef Scalar n); +// #include -// aten::special_chebyshev_polynomial_t.out(Tensor x, Tensor n, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor special_chebyshev_polynomial_t_out(@ByRef Tensor out, @Const @ByRef Tensor x, @Const @ByRef Tensor n); -// aten::special_chebyshev_polynomial_t.out(Tensor x, Tensor n, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor special_chebyshev_polynomial_t_outf(@Const @ByRef Tensor x, @Const @ByRef Tensor n, @ByRef Tensor out); -// aten::special_chebyshev_polynomial_t.x_scalar_out(Scalar x, Tensor n, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor special_chebyshev_polynomial_t_out(@ByRef Tensor out, @Const @ByRef Scalar x, @Const @ByRef Tensor n); -// aten::special_chebyshev_polynomial_t.x_scalar_out(Scalar x, Tensor n, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor special_chebyshev_polynomial_t_outf(@Const @ByRef Scalar x, @Const @ByRef Tensor n, @ByRef Tensor out); +// aten::special_round(Tensor self, *, int decimals=0) -> Tensor +@Namespace("at") public static native @ByVal Tensor special_round(@Const @ByRef Tensor self, @Cast("int64_t") long decimals/*=0*/); +@Namespace("at") public static native @ByVal Tensor special_round(@Const @ByRef Tensor self); -// aten::special_chebyshev_polynomial_t.n_scalar_out(Tensor x, Scalar n, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor special_chebyshev_polynomial_t_out(@ByRef Tensor out, @Const @ByRef Tensor x, @Const @ByRef Scalar n); -// aten::special_chebyshev_polynomial_t.n_scalar_out(Tensor x, Scalar n, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor special_chebyshev_polynomial_t_outf(@Const @ByRef Tensor x, @Const @ByRef Scalar n, @ByRef Tensor out); +// aten::special_round.out(Tensor self, *, int decimals=0, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor special_round_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Cast("int64_t") long decimals/*=0*/); +@Namespace("at") public static native @ByRef Tensor special_round_out(@ByRef Tensor out, @Const @ByRef Tensor self); +// aten::special_round.out(Tensor self, *, int decimals=0, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor special_round_outf(@Const @ByRef Tensor self, @Cast("int64_t") long decimals, @ByRef Tensor out); -// Parsed from ATen/ops/special_chebyshev_polynomial_u.h +// Parsed from ATen/ops/special_scaled_modified_bessel_k0.h // #pragma once @@ -64050,37 +49761,21 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include - - -// aten::special_chebyshev_polynomial_u(Tensor x, Tensor n) -> Tensor -@Namespace("at") public static native @ByVal Tensor special_chebyshev_polynomial_u(@Const @ByRef Tensor x, @Const @ByRef Tensor n); - -// aten::special_chebyshev_polynomial_u.x_scalar(Scalar x, Tensor n) -> Tensor -@Namespace("at") public static native @ByVal Tensor special_chebyshev_polynomial_u(@Const @ByRef Scalar x, @Const @ByRef Tensor n); - -// aten::special_chebyshev_polynomial_u.n_scalar(Tensor x, Scalar n) -> Tensor -@Namespace("at") public static native @ByVal Tensor special_chebyshev_polynomial_u(@Const @ByRef Tensor x, @Const @ByRef Scalar n); +// #include -// aten::special_chebyshev_polynomial_u.out(Tensor x, Tensor n, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor special_chebyshev_polynomial_u_out(@ByRef Tensor out, @Const @ByRef Tensor x, @Const @ByRef Tensor n); -// aten::special_chebyshev_polynomial_u.out(Tensor x, Tensor n, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor special_chebyshev_polynomial_u_outf(@Const @ByRef Tensor x, @Const @ByRef Tensor n, @ByRef Tensor out); -// aten::special_chebyshev_polynomial_u.x_scalar_out(Scalar x, Tensor n, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor special_chebyshev_polynomial_u_out(@ByRef Tensor out, @Const @ByRef Scalar x, @Const @ByRef Tensor n); -// aten::special_chebyshev_polynomial_u.x_scalar_out(Scalar x, Tensor n, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor special_chebyshev_polynomial_u_outf(@Const @ByRef Scalar x, @Const @ByRef Tensor n, @ByRef Tensor out); +// aten::special_scaled_modified_bessel_k0(Tensor x) -> Tensor +@Namespace("at") public static native @ByVal Tensor special_scaled_modified_bessel_k0(@Const @ByRef Tensor x); -// aten::special_chebyshev_polynomial_u.n_scalar_out(Tensor x, Scalar n, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor special_chebyshev_polynomial_u_out(@ByRef Tensor out, @Const @ByRef Tensor x, @Const @ByRef Scalar n); -// aten::special_chebyshev_polynomial_u.n_scalar_out(Tensor x, Scalar n, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor special_chebyshev_polynomial_u_outf(@Const @ByRef Tensor x, @Const @ByRef Scalar n, @ByRef Tensor out); +// aten::special_scaled_modified_bessel_k0.out(Tensor x, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor special_scaled_modified_bessel_k0_out(@ByRef Tensor out, @Const @ByRef Tensor x); +// aten::special_scaled_modified_bessel_k0.out(Tensor x, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor special_scaled_modified_bessel_k0_outf(@Const @ByRef Tensor x, @ByRef Tensor out); -// Parsed from ATen/ops/special_chebyshev_polynomial_v.h +// Parsed from ATen/ops/special_scaled_modified_bessel_k1.h // #pragma once @@ -64101,37 +49796,21 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include - - -// aten::special_chebyshev_polynomial_v(Tensor x, Tensor n) -> Tensor -@Namespace("at") public static native @ByVal Tensor special_chebyshev_polynomial_v(@Const @ByRef Tensor x, @Const @ByRef Tensor n); - -// aten::special_chebyshev_polynomial_v.x_scalar(Scalar x, Tensor n) -> Tensor -@Namespace("at") public static native @ByVal Tensor special_chebyshev_polynomial_v(@Const @ByRef Scalar x, @Const @ByRef Tensor n); - -// aten::special_chebyshev_polynomial_v.n_scalar(Tensor x, Scalar n) -> Tensor -@Namespace("at") public static native @ByVal Tensor special_chebyshev_polynomial_v(@Const @ByRef Tensor x, @Const @ByRef Scalar n); +// #include -// aten::special_chebyshev_polynomial_v.out(Tensor x, Tensor n, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor special_chebyshev_polynomial_v_out(@ByRef Tensor out, @Const @ByRef Tensor x, @Const @ByRef Tensor n); -// aten::special_chebyshev_polynomial_v.out(Tensor x, Tensor n, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor special_chebyshev_polynomial_v_outf(@Const @ByRef Tensor x, @Const @ByRef Tensor n, @ByRef Tensor out); -// aten::special_chebyshev_polynomial_v.x_scalar_out(Scalar x, Tensor n, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor special_chebyshev_polynomial_v_out(@ByRef Tensor out, @Const @ByRef Scalar x, @Const @ByRef Tensor n); -// aten::special_chebyshev_polynomial_v.x_scalar_out(Scalar x, Tensor n, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor special_chebyshev_polynomial_v_outf(@Const @ByRef Scalar x, @Const @ByRef Tensor n, @ByRef Tensor out); +// aten::special_scaled_modified_bessel_k1(Tensor x) -> Tensor +@Namespace("at") public static native @ByVal Tensor special_scaled_modified_bessel_k1(@Const @ByRef Tensor x); -// aten::special_chebyshev_polynomial_v.n_scalar_out(Tensor x, Scalar n, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor special_chebyshev_polynomial_v_out(@ByRef Tensor out, @Const @ByRef Tensor x, @Const @ByRef Scalar n); -// aten::special_chebyshev_polynomial_v.n_scalar_out(Tensor x, Scalar n, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor special_chebyshev_polynomial_v_outf(@Const @ByRef Tensor x, @Const @ByRef Scalar n, @ByRef Tensor out); +// aten::special_scaled_modified_bessel_k1.out(Tensor x, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor special_scaled_modified_bessel_k1_out(@ByRef Tensor out, @Const @ByRef Tensor x); +// aten::special_scaled_modified_bessel_k1.out(Tensor x, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor special_scaled_modified_bessel_k1_outf(@Const @ByRef Tensor x, @ByRef Tensor out); -// Parsed from ATen/ops/special_chebyshev_polynomial_w.h +// Parsed from ATen/ops/special_shifted_chebyshev_polynomial_t.h // #pragma once @@ -64152,37 +49831,37 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include +// #include -// aten::special_chebyshev_polynomial_w(Tensor x, Tensor n) -> Tensor -@Namespace("at") public static native @ByVal Tensor special_chebyshev_polynomial_w(@Const @ByRef Tensor x, @Const @ByRef Tensor n); +// aten::special_shifted_chebyshev_polynomial_t(Tensor x, Tensor n) -> Tensor +@Namespace("at") public static native @ByVal Tensor special_shifted_chebyshev_polynomial_t(@Const @ByRef Tensor x, @Const @ByRef Tensor n); -// aten::special_chebyshev_polynomial_w.x_scalar(Scalar x, Tensor n) -> Tensor -@Namespace("at") public static native @ByVal Tensor special_chebyshev_polynomial_w(@Const @ByRef Scalar x, @Const @ByRef Tensor n); +// aten::special_shifted_chebyshev_polynomial_t.x_scalar(Scalar x, Tensor n) -> Tensor +@Namespace("at") public static native @ByVal Tensor special_shifted_chebyshev_polynomial_t(@Const @ByRef Scalar x, @Const @ByRef Tensor n); -// aten::special_chebyshev_polynomial_w.n_scalar(Tensor x, Scalar n) -> Tensor -@Namespace("at") public static native @ByVal Tensor special_chebyshev_polynomial_w(@Const @ByRef Tensor x, @Const @ByRef Scalar n); +// aten::special_shifted_chebyshev_polynomial_t.n_scalar(Tensor x, Scalar n) -> Tensor +@Namespace("at") public static native @ByVal Tensor special_shifted_chebyshev_polynomial_t(@Const @ByRef Tensor x, @Const @ByRef Scalar n); -// aten::special_chebyshev_polynomial_w.out(Tensor x, Tensor n, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor special_chebyshev_polynomial_w_out(@ByRef Tensor out, @Const @ByRef Tensor x, @Const @ByRef Tensor n); -// aten::special_chebyshev_polynomial_w.out(Tensor x, Tensor n, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor special_chebyshev_polynomial_w_outf(@Const @ByRef Tensor x, @Const @ByRef Tensor n, @ByRef Tensor out); +// aten::special_shifted_chebyshev_polynomial_t.out(Tensor x, Tensor n, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor special_shifted_chebyshev_polynomial_t_out(@ByRef Tensor out, @Const @ByRef Tensor x, @Const @ByRef Tensor n); +// aten::special_shifted_chebyshev_polynomial_t.out(Tensor x, Tensor n, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor special_shifted_chebyshev_polynomial_t_outf(@Const @ByRef Tensor x, @Const @ByRef Tensor n, @ByRef Tensor out); -// aten::special_chebyshev_polynomial_w.x_scalar_out(Scalar x, Tensor n, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor special_chebyshev_polynomial_w_out(@ByRef Tensor out, @Const @ByRef Scalar x, @Const @ByRef Tensor n); -// aten::special_chebyshev_polynomial_w.x_scalar_out(Scalar x, Tensor n, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor special_chebyshev_polynomial_w_outf(@Const @ByRef Scalar x, @Const @ByRef Tensor n, @ByRef Tensor out); +// aten::special_shifted_chebyshev_polynomial_t.x_scalar_out(Scalar x, Tensor n, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor special_shifted_chebyshev_polynomial_t_out(@ByRef Tensor out, @Const @ByRef Scalar x, @Const @ByRef Tensor n); +// aten::special_shifted_chebyshev_polynomial_t.x_scalar_out(Scalar x, Tensor n, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor special_shifted_chebyshev_polynomial_t_outf(@Const @ByRef Scalar x, @Const @ByRef Tensor n, @ByRef Tensor out); -// aten::special_chebyshev_polynomial_w.n_scalar_out(Tensor x, Scalar n, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor special_chebyshev_polynomial_w_out(@ByRef Tensor out, @Const @ByRef Tensor x, @Const @ByRef Scalar n); -// aten::special_chebyshev_polynomial_w.n_scalar_out(Tensor x, Scalar n, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor special_chebyshev_polynomial_w_outf(@Const @ByRef Tensor x, @Const @ByRef Scalar n, @ByRef Tensor out); +// aten::special_shifted_chebyshev_polynomial_t.n_scalar_out(Tensor x, Scalar n, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor special_shifted_chebyshev_polynomial_t_out(@ByRef Tensor out, @Const @ByRef Tensor x, @Const @ByRef Scalar n); +// aten::special_shifted_chebyshev_polynomial_t.n_scalar_out(Tensor x, Scalar n, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor special_shifted_chebyshev_polynomial_t_outf(@Const @ByRef Tensor x, @Const @ByRef Scalar n, @ByRef Tensor out); -// Parsed from ATen/ops/special_digamma.h +// Parsed from ATen/ops/special_shifted_chebyshev_polynomial_u.h // #pragma once @@ -64203,21 +49882,37 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include +// #include -// aten::special_digamma(Tensor self) -> Tensor -@Namespace("at") public static native @ByVal Tensor special_digamma(@Const @ByRef Tensor self); +// aten::special_shifted_chebyshev_polynomial_u(Tensor x, Tensor n) -> Tensor +@Namespace("at") public static native @ByVal Tensor special_shifted_chebyshev_polynomial_u(@Const @ByRef Tensor x, @Const @ByRef Tensor n); -// aten::special_digamma.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor special_digamma_out(@ByRef Tensor out, @Const @ByRef Tensor self); -// aten::special_digamma.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor special_digamma_outf(@Const @ByRef Tensor self, @ByRef Tensor out); +// aten::special_shifted_chebyshev_polynomial_u.x_scalar(Scalar x, Tensor n) -> Tensor +@Namespace("at") public static native @ByVal Tensor special_shifted_chebyshev_polynomial_u(@Const @ByRef Scalar x, @Const @ByRef Tensor n); + +// aten::special_shifted_chebyshev_polynomial_u.n_scalar(Tensor x, Scalar n) -> Tensor +@Namespace("at") public static native @ByVal Tensor special_shifted_chebyshev_polynomial_u(@Const @ByRef Tensor x, @Const @ByRef Scalar n); + +// aten::special_shifted_chebyshev_polynomial_u.out(Tensor x, Tensor n, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor special_shifted_chebyshev_polynomial_u_out(@ByRef Tensor out, @Const @ByRef Tensor x, @Const @ByRef Tensor n); +// aten::special_shifted_chebyshev_polynomial_u.out(Tensor x, Tensor n, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor special_shifted_chebyshev_polynomial_u_outf(@Const @ByRef Tensor x, @Const @ByRef Tensor n, @ByRef Tensor out); + +// aten::special_shifted_chebyshev_polynomial_u.x_scalar_out(Scalar x, Tensor n, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor special_shifted_chebyshev_polynomial_u_out(@ByRef Tensor out, @Const @ByRef Scalar x, @Const @ByRef Tensor n); +// aten::special_shifted_chebyshev_polynomial_u.x_scalar_out(Scalar x, Tensor n, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor special_shifted_chebyshev_polynomial_u_outf(@Const @ByRef Scalar x, @Const @ByRef Tensor n, @ByRef Tensor out); + +// aten::special_shifted_chebyshev_polynomial_u.n_scalar_out(Tensor x, Scalar n, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor special_shifted_chebyshev_polynomial_u_out(@ByRef Tensor out, @Const @ByRef Tensor x, @Const @ByRef Scalar n); +// aten::special_shifted_chebyshev_polynomial_u.n_scalar_out(Tensor x, Scalar n, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor special_shifted_chebyshev_polynomial_u_outf(@Const @ByRef Tensor x, @Const @ByRef Scalar n, @ByRef Tensor out); -// Parsed from ATen/ops/special_entr.h +// Parsed from ATen/ops/special_shifted_chebyshev_polynomial_v.h // #pragma once @@ -64238,21 +49933,37 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include +// #include -// aten::special_entr(Tensor self) -> Tensor -@Namespace("at") public static native @ByVal Tensor special_entr(@Const @ByRef Tensor self); +// aten::special_shifted_chebyshev_polynomial_v(Tensor x, Tensor n) -> Tensor +@Namespace("at") public static native @ByVal Tensor special_shifted_chebyshev_polynomial_v(@Const @ByRef Tensor x, @Const @ByRef Tensor n); -// aten::special_entr.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor special_entr_out(@ByRef Tensor out, @Const @ByRef Tensor self); -// aten::special_entr.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor special_entr_outf(@Const @ByRef Tensor self, @ByRef Tensor out); +// aten::special_shifted_chebyshev_polynomial_v.x_scalar(Scalar x, Tensor n) -> Tensor +@Namespace("at") public static native @ByVal Tensor special_shifted_chebyshev_polynomial_v(@Const @ByRef Scalar x, @Const @ByRef Tensor n); + +// aten::special_shifted_chebyshev_polynomial_v.n_scalar(Tensor x, Scalar n) -> Tensor +@Namespace("at") public static native @ByVal Tensor special_shifted_chebyshev_polynomial_v(@Const @ByRef Tensor x, @Const @ByRef Scalar n); + +// aten::special_shifted_chebyshev_polynomial_v.out(Tensor x, Tensor n, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor special_shifted_chebyshev_polynomial_v_out(@ByRef Tensor out, @Const @ByRef Tensor x, @Const @ByRef Tensor n); +// aten::special_shifted_chebyshev_polynomial_v.out(Tensor x, Tensor n, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor special_shifted_chebyshev_polynomial_v_outf(@Const @ByRef Tensor x, @Const @ByRef Tensor n, @ByRef Tensor out); + +// aten::special_shifted_chebyshev_polynomial_v.x_scalar_out(Scalar x, Tensor n, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor special_shifted_chebyshev_polynomial_v_out(@ByRef Tensor out, @Const @ByRef Scalar x, @Const @ByRef Tensor n); +// aten::special_shifted_chebyshev_polynomial_v.x_scalar_out(Scalar x, Tensor n, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor special_shifted_chebyshev_polynomial_v_outf(@Const @ByRef Scalar x, @Const @ByRef Tensor n, @ByRef Tensor out); + +// aten::special_shifted_chebyshev_polynomial_v.n_scalar_out(Tensor x, Scalar n, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor special_shifted_chebyshev_polynomial_v_out(@ByRef Tensor out, @Const @ByRef Tensor x, @Const @ByRef Scalar n); +// aten::special_shifted_chebyshev_polynomial_v.n_scalar_out(Tensor x, Scalar n, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor special_shifted_chebyshev_polynomial_v_outf(@Const @ByRef Tensor x, @Const @ByRef Scalar n, @ByRef Tensor out); -// Parsed from ATen/ops/special_erf.h +// Parsed from ATen/ops/special_shifted_chebyshev_polynomial_w.h // #pragma once @@ -64273,21 +49984,37 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include +// #include -// aten::special_erf(Tensor self) -> Tensor -@Namespace("at") public static native @ByVal Tensor special_erf(@Const @ByRef Tensor self); +// aten::special_shifted_chebyshev_polynomial_w(Tensor x, Tensor n) -> Tensor +@Namespace("at") public static native @ByVal Tensor special_shifted_chebyshev_polynomial_w(@Const @ByRef Tensor x, @Const @ByRef Tensor n); -// aten::special_erf.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor special_erf_out(@ByRef Tensor out, @Const @ByRef Tensor self); -// aten::special_erf.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor special_erf_outf(@Const @ByRef Tensor self, @ByRef Tensor out); +// aten::special_shifted_chebyshev_polynomial_w.x_scalar(Scalar x, Tensor n) -> Tensor +@Namespace("at") public static native @ByVal Tensor special_shifted_chebyshev_polynomial_w(@Const @ByRef Scalar x, @Const @ByRef Tensor n); + +// aten::special_shifted_chebyshev_polynomial_w.n_scalar(Tensor x, Scalar n) -> Tensor +@Namespace("at") public static native @ByVal Tensor special_shifted_chebyshev_polynomial_w(@Const @ByRef Tensor x, @Const @ByRef Scalar n); + +// aten::special_shifted_chebyshev_polynomial_w.out(Tensor x, Tensor n, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor special_shifted_chebyshev_polynomial_w_out(@ByRef Tensor out, @Const @ByRef Tensor x, @Const @ByRef Tensor n); +// aten::special_shifted_chebyshev_polynomial_w.out(Tensor x, Tensor n, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor special_shifted_chebyshev_polynomial_w_outf(@Const @ByRef Tensor x, @Const @ByRef Tensor n, @ByRef Tensor out); + +// aten::special_shifted_chebyshev_polynomial_w.x_scalar_out(Scalar x, Tensor n, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor special_shifted_chebyshev_polynomial_w_out(@ByRef Tensor out, @Const @ByRef Scalar x, @Const @ByRef Tensor n); +// aten::special_shifted_chebyshev_polynomial_w.x_scalar_out(Scalar x, Tensor n, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor special_shifted_chebyshev_polynomial_w_outf(@Const @ByRef Scalar x, @Const @ByRef Tensor n, @ByRef Tensor out); + +// aten::special_shifted_chebyshev_polynomial_w.n_scalar_out(Tensor x, Scalar n, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor special_shifted_chebyshev_polynomial_w_out(@ByRef Tensor out, @Const @ByRef Tensor x, @Const @ByRef Scalar n); +// aten::special_shifted_chebyshev_polynomial_w.n_scalar_out(Tensor x, Scalar n, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor special_shifted_chebyshev_polynomial_w_outf(@Const @ByRef Tensor x, @Const @ByRef Scalar n, @ByRef Tensor out); -// Parsed from ATen/ops/special_erfc.h +// Parsed from ATen/ops/special_sinc.h // #pragma once @@ -64308,21 +50035,21 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include +// #include -// aten::special_erfc(Tensor self) -> Tensor -@Namespace("at") public static native @ByVal Tensor special_erfc(@Const @ByRef Tensor self); +// aten::special_sinc(Tensor self) -> Tensor +@Namespace("at") public static native @ByVal Tensor special_sinc(@Const @ByRef Tensor self); -// aten::special_erfc.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor special_erfc_out(@ByRef Tensor out, @Const @ByRef Tensor self); -// aten::special_erfc.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor special_erfc_outf(@Const @ByRef Tensor self, @ByRef Tensor out); +// aten::special_sinc.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor special_sinc_out(@ByRef Tensor out, @Const @ByRef Tensor self); +// aten::special_sinc.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor special_sinc_outf(@Const @ByRef Tensor self, @ByRef Tensor out); -// Parsed from ATen/ops/special_erfcx.h +// Parsed from ATen/ops/special_softmax.h // #pragma once @@ -64343,21 +50070,17 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include - +// #include -// aten::special_erfcx(Tensor self) -> Tensor -@Namespace("at") public static native @ByVal Tensor special_erfcx(@Const @ByRef Tensor self); -// aten::special_erfcx.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor special_erfcx_out(@ByRef Tensor out, @Const @ByRef Tensor self); -// aten::special_erfcx.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor special_erfcx_outf(@Const @ByRef Tensor self, @ByRef Tensor out); +// aten::special_softmax(Tensor self, int dim, ScalarType? dtype=None) -> Tensor +@Namespace("at") public static native @ByVal Tensor special_softmax(@Const @ByRef Tensor self, @Cast("int64_t") long dim, @ByVal(nullValue = "c10::optional(c10::nullopt)") ScalarTypeOptional dtype); +@Namespace("at") public static native @ByVal Tensor special_softmax(@Const @ByRef Tensor self, @Cast("int64_t") long dim); -// Parsed from ATen/ops/special_erfinv.h +// Parsed from ATen/ops/special_spherical_bessel_j0.h // #pragma once @@ -64378,21 +50101,21 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include +// #include -// aten::special_erfinv(Tensor self) -> Tensor -@Namespace("at") public static native @ByVal Tensor special_erfinv(@Const @ByRef Tensor self); +// aten::special_spherical_bessel_j0(Tensor x) -> Tensor +@Namespace("at") public static native @ByVal Tensor special_spherical_bessel_j0(@Const @ByRef Tensor x); -// aten::special_erfinv.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor special_erfinv_out(@ByRef Tensor out, @Const @ByRef Tensor self); -// aten::special_erfinv.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor special_erfinv_outf(@Const @ByRef Tensor self, @ByRef Tensor out); +// aten::special_spherical_bessel_j0.out(Tensor x, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor special_spherical_bessel_j0_out(@ByRef Tensor out, @Const @ByRef Tensor x); +// aten::special_spherical_bessel_j0.out(Tensor x, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor special_spherical_bessel_j0_outf(@Const @ByRef Tensor x, @ByRef Tensor out); -// Parsed from ATen/ops/special_exp2.h +// Parsed from ATen/ops/special_xlog1py.h // #pragma once @@ -64413,21 +50136,37 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include +// #include -// aten::special_exp2(Tensor self) -> Tensor -@Namespace("at") public static native @ByVal Tensor special_exp2(@Const @ByRef Tensor self); +// aten::special_xlog1py(Tensor self, Tensor other) -> Tensor +@Namespace("at") public static native @ByVal Tensor special_xlog1py(@Const @ByRef Tensor self, @Const @ByRef Tensor other); + +// aten::special_xlog1py.self_scalar(Scalar self, Tensor other) -> Tensor +@Namespace("at") public static native @ByVal Tensor special_xlog1py(@Const @ByRef Scalar self, @Const @ByRef Tensor other); + +// aten::special_xlog1py.other_scalar(Tensor self, Scalar other) -> Tensor +@Namespace("at") public static native @ByVal Tensor special_xlog1py(@Const @ByRef Tensor self, @Const @ByRef Scalar other); + +// aten::special_xlog1py.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor special_xlog1py_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor other); +// aten::special_xlog1py.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor special_xlog1py_outf(@Const @ByRef Tensor self, @Const @ByRef Tensor other, @ByRef Tensor out); + +// aten::special_xlog1py.self_scalar_out(Scalar self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor special_xlog1py_out(@ByRef Tensor out, @Const @ByRef Scalar self, @Const @ByRef Tensor other); +// aten::special_xlog1py.self_scalar_out(Scalar self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor special_xlog1py_outf(@Const @ByRef Scalar self, @Const @ByRef Tensor other, @ByRef Tensor out); -// aten::special_exp2.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor special_exp2_out(@ByRef Tensor out, @Const @ByRef Tensor self); -// aten::special_exp2.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor special_exp2_outf(@Const @ByRef Tensor self, @ByRef Tensor out); +// aten::special_xlog1py.other_scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor special_xlog1py_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Scalar other); +// aten::special_xlog1py.other_scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor special_xlog1py_outf(@Const @ByRef Tensor self, @Const @ByRef Scalar other, @ByRef Tensor out); -// Parsed from ATen/ops/special_expit.h +// Parsed from ATen/ops/special_xlogy.h // #pragma once @@ -64448,21 +50187,37 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include +// #include -// aten::special_expit(Tensor self) -> Tensor -@Namespace("at") public static native @ByVal Tensor special_expit(@Const @ByRef Tensor self); +// aten::special_xlogy(Tensor self, Tensor other) -> Tensor +@Namespace("at") public static native @ByVal Tensor special_xlogy(@Const @ByRef Tensor self, @Const @ByRef Tensor other); -// aten::special_expit.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor special_expit_out(@ByRef Tensor out, @Const @ByRef Tensor self); -// aten::special_expit.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor special_expit_outf(@Const @ByRef Tensor self, @ByRef Tensor out); +// aten::special_xlogy.self_scalar(Scalar self, Tensor other) -> Tensor +@Namespace("at") public static native @ByVal Tensor special_xlogy(@Const @ByRef Scalar self, @Const @ByRef Tensor other); + +// aten::special_xlogy.other_scalar(Tensor self, Scalar other) -> Tensor +@Namespace("at") public static native @ByVal Tensor special_xlogy(@Const @ByRef Tensor self, @Const @ByRef Scalar other); + +// aten::special_xlogy.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor special_xlogy_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor other); +// aten::special_xlogy.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor special_xlogy_outf(@Const @ByRef Tensor self, @Const @ByRef Tensor other, @ByRef Tensor out); + +// aten::special_xlogy.self_scalar_out(Scalar self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor special_xlogy_out(@ByRef Tensor out, @Const @ByRef Scalar self, @Const @ByRef Tensor other); +// aten::special_xlogy.self_scalar_out(Scalar self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor special_xlogy_outf(@Const @ByRef Scalar self, @Const @ByRef Tensor other, @ByRef Tensor out); + +// aten::special_xlogy.other_scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor special_xlogy_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Scalar other); +// aten::special_xlogy.other_scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor special_xlogy_outf(@Const @ByRef Tensor self, @Const @ByRef Scalar other, @ByRef Tensor out); -// Parsed from ATen/ops/special_expm1.h +// Parsed from ATen/ops/special_zeta.h // #pragma once @@ -64483,21 +50238,37 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include +// #include -// aten::special_expm1(Tensor self) -> Tensor -@Namespace("at") public static native @ByVal Tensor special_expm1(@Const @ByRef Tensor self); +// aten::special_zeta(Tensor self, Tensor other) -> Tensor +@Namespace("at") public static native @ByVal Tensor special_zeta(@Const @ByRef Tensor self, @Const @ByRef Tensor other); -// aten::special_expm1.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor special_expm1_out(@ByRef Tensor out, @Const @ByRef Tensor self); -// aten::special_expm1.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor special_expm1_outf(@Const @ByRef Tensor self, @ByRef Tensor out); +// aten::special_zeta.self_scalar(Scalar self, Tensor other) -> Tensor +@Namespace("at") public static native @ByVal Tensor special_zeta(@Const @ByRef Scalar self, @Const @ByRef Tensor other); + +// aten::special_zeta.other_scalar(Tensor self, Scalar other) -> Tensor +@Namespace("at") public static native @ByVal Tensor special_zeta(@Const @ByRef Tensor self, @Const @ByRef Scalar other); + +// aten::special_zeta.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor special_zeta_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor other); +// aten::special_zeta.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor special_zeta_outf(@Const @ByRef Tensor self, @Const @ByRef Tensor other, @ByRef Tensor out); + +// aten::special_zeta.self_scalar_out(Scalar self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor special_zeta_out(@ByRef Tensor out, @Const @ByRef Scalar self, @Const @ByRef Tensor other); +// aten::special_zeta.self_scalar_out(Scalar self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor special_zeta_outf(@Const @ByRef Scalar self, @Const @ByRef Tensor other, @ByRef Tensor out); + +// aten::special_zeta.other_scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor special_zeta_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Scalar other); +// aten::special_zeta.other_scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor special_zeta_outf(@Const @ByRef Tensor self, @Const @ByRef Scalar other, @ByRef Tensor out); -// Parsed from ATen/ops/special_gammainc.h +// Parsed from ATen/ops/split.h // #pragma once @@ -64518,21 +50289,35 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include +// #include -// aten::special_gammainc.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor special_gammainc_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor other); -// aten::special_gammainc.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor special_gammainc_outf(@Const @ByRef Tensor self, @Const @ByRef Tensor other, @ByRef Tensor out); +// aten::split.Tensor(Tensor(a -> *) self, SymInt split_size, int dim=0) -> Tensor(a)[] +@Namespace("at") public static native @Cast({"", "std::vector"}) @StdMove TensorVector split(@Const @ByRef Tensor self, @Cast("int64_t") long split_size, @Cast("int64_t") long dim/*=0*/); +@Namespace("at") public static native @Cast({"", "std::vector"}) @StdMove TensorVector split(@Const @ByRef Tensor self, @Cast("int64_t") long split_size); -// aten::special_gammainc(Tensor self, Tensor other) -> Tensor -@Namespace("at") public static native @ByVal Tensor special_gammainc(@Const @ByRef Tensor self, @Const @ByRef Tensor other); + +// aten::split.Tensor(Tensor(a -> *) self, SymInt split_size, int dim=0) -> Tensor(a)[] +@Namespace("at") public static native @Cast({"", "std::vector"}) @StdMove TensorVector split_symint(@Const @ByRef Tensor self, @ByVal SymInt split_size, @Cast("int64_t") long dim/*=0*/); +@Namespace("at") public static native @Cast({"", "std::vector"}) @StdMove TensorVector split_symint(@Const @ByRef Tensor self, @ByVal SymInt split_size); +// aten::split.sizes(Tensor(a -> *) self, SymInt[] split_size, int dim=0) -> Tensor(a)[] +@Namespace("at") public static native @Cast({"", "std::vector"}) @StdMove TensorVector split(@Const @ByRef Tensor self, @ByVal LongArrayRef split_size, @Cast("int64_t") long dim/*=0*/); +@Namespace("at") public static native @Cast({"", "std::vector"}) @StdMove TensorVector split(@Const @ByRef Tensor self, @ByVal LongArrayRef split_size); +@Namespace("at") public static native @Cast({"", "std::vector"}) @StdMove TensorVector split(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] split_size, @Cast("int64_t") long dim/*=0*/); +@Namespace("at") public static native @Cast({"", "std::vector"}) @StdMove TensorVector split(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... split_size); + + +// aten::split.sizes(Tensor(a -> *) self, SymInt[] split_size, int dim=0) -> Tensor(a)[] +@Namespace("at") public static native @Cast({"", "std::vector"}) @StdMove TensorVector split_symint(@Const @ByRef Tensor self, @ByVal SymIntArrayRef split_size, @Cast("int64_t") long dim/*=0*/); +@Namespace("at") public static native @Cast({"", "std::vector"}) @StdMove TensorVector split_symint(@Const @ByRef Tensor self, @ByVal SymIntArrayRef split_size); + -// Parsed from ATen/ops/special_gammaincc.h + + +// Parsed from ATen/ops/split_copy.h // #pragma once @@ -64553,21 +50338,41 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include +// #include -// aten::special_gammaincc.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor special_gammaincc_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor other); -// aten::special_gammaincc.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor special_gammaincc_outf(@Const @ByRef Tensor self, @Const @ByRef Tensor other, @ByRef Tensor out); +// aten::split_copy.Tensor(Tensor self, SymInt split_size, int dim=0) -> Tensor[] +@Namespace("at") public static native @Cast({"", "std::vector"}) @StdMove TensorVector split_copy(@Const @ByRef Tensor self, @Cast("int64_t") long split_size, @Cast("int64_t") long dim/*=0*/); +@Namespace("at") public static native @Cast({"", "std::vector"}) @StdMove TensorVector split_copy(@Const @ByRef Tensor self, @Cast("int64_t") long split_size); -// aten::special_gammaincc(Tensor self, Tensor other) -> Tensor -@Namespace("at") public static native @ByVal Tensor special_gammaincc(@Const @ByRef Tensor self, @Const @ByRef Tensor other); + +// aten::split_copy.Tensor(Tensor self, SymInt split_size, int dim=0) -> Tensor[] +@Namespace("at") public static native @Cast({"", "std::vector"}) @StdMove TensorVector split_copy_symint(@Const @ByRef Tensor self, @ByVal SymInt split_size, @Cast("int64_t") long dim/*=0*/); +@Namespace("at") public static native @Cast({"", "std::vector"}) @StdMove TensorVector split_copy_symint(@Const @ByRef Tensor self, @ByVal SymInt split_size); +// aten::split_copy.Tensor_out(Tensor self, SymInt split_size, int dim=0, *, Tensor(a!)[] out) -> () +@Namespace("at") public static native void split_copy_out(@ByVal @Cast("at::TensorList*") TensorArrayRef out, @Const @ByRef Tensor self, @Cast("int64_t") long split_size, @Cast("int64_t") long dim/*=0*/); +@Namespace("at") public static native void split_copy_out(@ByVal @Cast("at::TensorList*") TensorArrayRef out, @Const @ByRef Tensor self, @Cast("int64_t") long split_size); -// Parsed from ATen/ops/special_gammaln.h +// aten::split_copy.Tensor_out(Tensor self, SymInt split_size, int dim=0, *, Tensor(a!)[] out) -> () +@Namespace("at") public static native void split_copy_outf(@Const @ByRef Tensor self, @Cast("int64_t") long split_size, @Cast("int64_t") long dim, @ByVal @Cast("at::TensorList*") TensorArrayRef out); + + +// aten::split_copy.Tensor_out(Tensor self, SymInt split_size, int dim=0, *, Tensor(a!)[] out) -> () +@Namespace("at") public static native void split_copy_symint_out(@ByVal @Cast("at::TensorList*") TensorArrayRef out, @Const @ByRef Tensor self, @ByVal SymInt split_size, @Cast("int64_t") long dim/*=0*/); +@Namespace("at") public static native void split_copy_symint_out(@ByVal @Cast("at::TensorList*") TensorArrayRef out, @Const @ByRef Tensor self, @ByVal SymInt split_size); + + +// aten::split_copy.Tensor_out(Tensor self, SymInt split_size, int dim=0, *, Tensor(a!)[] out) -> () +@Namespace("at") public static native void split_copy_symint_outf(@Const @ByRef Tensor self, @ByVal SymInt split_size, @Cast("int64_t") long dim, @ByVal @Cast("at::TensorList*") TensorArrayRef out); + + + + + +// Parsed from ATen/ops/split_with_sizes.h // #pragma once @@ -64588,21 +50393,25 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include +// #include -// aten::special_gammaln(Tensor self) -> Tensor -@Namespace("at") public static native @ByVal Tensor special_gammaln(@Const @ByRef Tensor self); +// aten::split_with_sizes(Tensor(a -> *) self, SymInt[] split_sizes, int dim=0) -> Tensor(a)[] +@Namespace("at") public static native @Cast({"", "std::vector"}) @StdMove TensorVector split_with_sizes(@Const @ByRef Tensor self, @ByVal LongArrayRef split_sizes, @Cast("int64_t") long dim/*=0*/); +@Namespace("at") public static native @Cast({"", "std::vector"}) @StdMove TensorVector split_with_sizes(@Const @ByRef Tensor self, @ByVal LongArrayRef split_sizes); +@Namespace("at") public static native @Cast({"", "std::vector"}) @StdMove TensorVector split_with_sizes(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] split_sizes, @Cast("int64_t") long dim/*=0*/); +@Namespace("at") public static native @Cast({"", "std::vector"}) @StdMove TensorVector split_with_sizes(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... split_sizes); -// aten::special_gammaln.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor special_gammaln_out(@ByRef Tensor out, @Const @ByRef Tensor self); -// aten::special_gammaln.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor special_gammaln_outf(@Const @ByRef Tensor self, @ByRef Tensor out); +// aten::split_with_sizes(Tensor(a -> *) self, SymInt[] split_sizes, int dim=0) -> Tensor(a)[] +@Namespace("at") public static native @Cast({"", "std::vector"}) @StdMove TensorVector split_with_sizes_symint(@Const @ByRef Tensor self, @ByVal SymIntArrayRef split_sizes, @Cast("int64_t") long dim/*=0*/); +@Namespace("at") public static native @Cast({"", "std::vector"}) @StdMove TensorVector split_with_sizes_symint(@Const @ByRef Tensor self, @ByVal SymIntArrayRef split_sizes); -// Parsed from ATen/ops/special_hermite_polynomial_h.h + + +// Parsed from ATen/ops/split_with_sizes_copy.h // #pragma once @@ -64623,37 +50432,46 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include +// #include -// aten::special_hermite_polynomial_h(Tensor x, Tensor n) -> Tensor -@Namespace("at") public static native @ByVal Tensor special_hermite_polynomial_h(@Const @ByRef Tensor x, @Const @ByRef Tensor n); +// aten::split_with_sizes_copy(Tensor self, SymInt[] split_sizes, int dim=0) -> Tensor[] +@Namespace("at") public static native @Cast({"", "std::vector"}) @StdMove TensorVector split_with_sizes_copy(@Const @ByRef Tensor self, @ByVal LongArrayRef split_sizes, @Cast("int64_t") long dim/*=0*/); +@Namespace("at") public static native @Cast({"", "std::vector"}) @StdMove TensorVector split_with_sizes_copy(@Const @ByRef Tensor self, @ByVal LongArrayRef split_sizes); +@Namespace("at") public static native @Cast({"", "std::vector"}) @StdMove TensorVector split_with_sizes_copy(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] split_sizes, @Cast("int64_t") long dim/*=0*/); +@Namespace("at") public static native @Cast({"", "std::vector"}) @StdMove TensorVector split_with_sizes_copy(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... split_sizes); -// aten::special_hermite_polynomial_h.x_scalar(Scalar x, Tensor n) -> Tensor -@Namespace("at") public static native @ByVal Tensor special_hermite_polynomial_h(@Const @ByRef Scalar x, @Const @ByRef Tensor n); -// aten::special_hermite_polynomial_h.n_scalar(Tensor x, Scalar n) -> Tensor -@Namespace("at") public static native @ByVal Tensor special_hermite_polynomial_h(@Const @ByRef Tensor x, @Const @ByRef Scalar n); +// aten::split_with_sizes_copy(Tensor self, SymInt[] split_sizes, int dim=0) -> Tensor[] +@Namespace("at") public static native @Cast({"", "std::vector"}) @StdMove TensorVector split_with_sizes_copy_symint(@Const @ByRef Tensor self, @ByVal SymIntArrayRef split_sizes, @Cast("int64_t") long dim/*=0*/); +@Namespace("at") public static native @Cast({"", "std::vector"}) @StdMove TensorVector split_with_sizes_copy_symint(@Const @ByRef Tensor self, @ByVal SymIntArrayRef split_sizes); -// aten::special_hermite_polynomial_h.out(Tensor x, Tensor n, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor special_hermite_polynomial_h_out(@ByRef Tensor out, @Const @ByRef Tensor x, @Const @ByRef Tensor n); -// aten::special_hermite_polynomial_h.out(Tensor x, Tensor n, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor special_hermite_polynomial_h_outf(@Const @ByRef Tensor x, @Const @ByRef Tensor n, @ByRef Tensor out); -// aten::special_hermite_polynomial_h.x_scalar_out(Scalar x, Tensor n, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor special_hermite_polynomial_h_out(@ByRef Tensor out, @Const @ByRef Scalar x, @Const @ByRef Tensor n); -// aten::special_hermite_polynomial_h.x_scalar_out(Scalar x, Tensor n, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor special_hermite_polynomial_h_outf(@Const @ByRef Scalar x, @Const @ByRef Tensor n, @ByRef Tensor out); +// aten::split_with_sizes_copy.out(Tensor self, SymInt[] split_sizes, int dim=0, *, Tensor(a!)[] out) -> () +@Namespace("at") public static native void split_with_sizes_copy_out(@ByVal @Cast("at::TensorList*") TensorArrayRef out, @Const @ByRef Tensor self, @ByVal LongArrayRef split_sizes, @Cast("int64_t") long dim/*=0*/); +@Namespace("at") public static native void split_with_sizes_copy_out(@ByVal @Cast("at::TensorList*") TensorArrayRef out, @Const @ByRef Tensor self, @ByVal LongArrayRef split_sizes); +@Namespace("at") public static native void split_with_sizes_copy_out(@ByVal @Cast("at::TensorList*") TensorArrayRef out, @Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] split_sizes, @Cast("int64_t") long dim/*=0*/); +@Namespace("at") public static native void split_with_sizes_copy_out(@ByVal @Cast("at::TensorList*") TensorArrayRef out, @Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... split_sizes); -// aten::special_hermite_polynomial_h.n_scalar_out(Tensor x, Scalar n, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor special_hermite_polynomial_h_out(@ByRef Tensor out, @Const @ByRef Tensor x, @Const @ByRef Scalar n); -// aten::special_hermite_polynomial_h.n_scalar_out(Tensor x, Scalar n, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor special_hermite_polynomial_h_outf(@Const @ByRef Tensor x, @Const @ByRef Scalar n, @ByRef Tensor out); + +// aten::split_with_sizes_copy.out(Tensor self, SymInt[] split_sizes, int dim=0, *, Tensor(a!)[] out) -> () +@Namespace("at") public static native void split_with_sizes_copy_outf(@Const @ByRef Tensor self, @ByVal LongArrayRef split_sizes, @Cast("int64_t") long dim, @ByVal @Cast("at::TensorList*") TensorArrayRef out); +@Namespace("at") public static native void split_with_sizes_copy_outf(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] split_sizes, @Cast("int64_t") long dim, @ByVal @Cast("at::TensorList*") TensorArrayRef out); +// aten::split_with_sizes_copy.out(Tensor self, SymInt[] split_sizes, int dim=0, *, Tensor(a!)[] out) -> () +@Namespace("at") public static native void split_with_sizes_copy_symint_out(@ByVal @Cast("at::TensorList*") TensorArrayRef out, @Const @ByRef Tensor self, @ByVal SymIntArrayRef split_sizes, @Cast("int64_t") long dim/*=0*/); +@Namespace("at") public static native void split_with_sizes_copy_symint_out(@ByVal @Cast("at::TensorList*") TensorArrayRef out, @Const @ByRef Tensor self, @ByVal SymIntArrayRef split_sizes); -// Parsed from ATen/ops/special_hermite_polynomial_he.h +// aten::split_with_sizes_copy.out(Tensor self, SymInt[] split_sizes, int dim=0, *, Tensor(a!)[] out) -> () +@Namespace("at") public static native void split_with_sizes_copy_symint_outf(@Const @ByRef Tensor self, @ByVal SymIntArrayRef split_sizes, @Cast("int64_t") long dim, @ByVal @Cast("at::TensorList*") TensorArrayRef out); + + + + + +// Parsed from ATen/ops/sqrt.h // #pragma once @@ -64674,37 +50492,24 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include - - -// aten::special_hermite_polynomial_he(Tensor x, Tensor n) -> Tensor -@Namespace("at") public static native @ByVal Tensor special_hermite_polynomial_he(@Const @ByRef Tensor x, @Const @ByRef Tensor n); - -// aten::special_hermite_polynomial_he.x_scalar(Scalar x, Tensor n) -> Tensor -@Namespace("at") public static native @ByVal Tensor special_hermite_polynomial_he(@Const @ByRef Scalar x, @Const @ByRef Tensor n); +// #include -// aten::special_hermite_polynomial_he.n_scalar(Tensor x, Scalar n) -> Tensor -@Namespace("at") public static native @ByVal Tensor special_hermite_polynomial_he(@Const @ByRef Tensor x, @Const @ByRef Scalar n); -// aten::special_hermite_polynomial_he.out(Tensor x, Tensor n, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor special_hermite_polynomial_he_out(@ByRef Tensor out, @Const @ByRef Tensor x, @Const @ByRef Tensor n); -// aten::special_hermite_polynomial_he.out(Tensor x, Tensor n, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor special_hermite_polynomial_he_outf(@Const @ByRef Tensor x, @Const @ByRef Tensor n, @ByRef Tensor out); +// aten::sqrt(Tensor self) -> Tensor +@Namespace("at") public static native @ByVal Tensor sqrt(@Const @ByRef Tensor self); -// aten::special_hermite_polynomial_he.x_scalar_out(Scalar x, Tensor n, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor special_hermite_polynomial_he_out(@ByRef Tensor out, @Const @ByRef Scalar x, @Const @ByRef Tensor n); -// aten::special_hermite_polynomial_he.x_scalar_out(Scalar x, Tensor n, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor special_hermite_polynomial_he_outf(@Const @ByRef Scalar x, @Const @ByRef Tensor n, @ByRef Tensor out); +// aten::sqrt_(Tensor(a!) self) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor sqrt_(@ByRef Tensor self); -// aten::special_hermite_polynomial_he.n_scalar_out(Tensor x, Scalar n, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor special_hermite_polynomial_he_out(@ByRef Tensor out, @Const @ByRef Tensor x, @Const @ByRef Scalar n); -// aten::special_hermite_polynomial_he.n_scalar_out(Tensor x, Scalar n, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor special_hermite_polynomial_he_outf(@Const @ByRef Tensor x, @Const @ByRef Scalar n, @ByRef Tensor out); +// aten::sqrt.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor sqrt_out(@ByRef Tensor out, @Const @ByRef Tensor self); +// aten::sqrt.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor sqrt_outf(@Const @ByRef Tensor self, @ByRef Tensor out); -// Parsed from ATen/ops/special_i0.h +// Parsed from ATen/ops/square.h // #pragma once @@ -64725,21 +50530,24 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include +// #include -// aten::special_i0(Tensor self) -> Tensor -@Namespace("at") public static native @ByVal Tensor special_i0(@Const @ByRef Tensor self); +// aten::square(Tensor self) -> Tensor +@Namespace("at") public static native @ByVal Tensor square(@Const @ByRef Tensor self); -// aten::special_i0.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor special_i0_out(@ByRef Tensor out, @Const @ByRef Tensor self); -// aten::special_i0.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor special_i0_outf(@Const @ByRef Tensor self, @ByRef Tensor out); +// aten::square_(Tensor(a!) self) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor square_(@ByRef Tensor self); + +// aten::square.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor square_out(@ByRef Tensor out, @Const @ByRef Tensor self); +// aten::square.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor square_outf(@Const @ByRef Tensor self, @ByRef Tensor out); -// Parsed from ATen/ops/special_i0e.h +// Parsed from ATen/ops/squeeze.h // #pragma once @@ -64760,21 +50568,26 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include +// #include -// aten::special_i0e(Tensor self) -> Tensor -@Namespace("at") public static native @ByVal Tensor special_i0e(@Const @ByRef Tensor self); +// aten::squeeze(Tensor(a) self) -> Tensor(a) +@Namespace("at") public static native @ByVal Tensor squeeze(@Const @ByRef Tensor self); -// aten::special_i0e.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor special_i0e_out(@ByRef Tensor out, @Const @ByRef Tensor self); -// aten::special_i0e.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor special_i0e_outf(@Const @ByRef Tensor self, @ByRef Tensor out); +// aten::squeeze.dim(Tensor(a) self, int dim) -> Tensor(a) +@Namespace("at") public static native @ByVal Tensor squeeze(@Const @ByRef Tensor self, @Cast("int64_t") long dim); + +// aten::squeeze.dimname(Tensor(a) self, Dimname dim) -> Tensor(a) +@Namespace("at") public static native @ByVal Tensor squeeze(@Const @ByRef Tensor self, @ByVal Dimname dim); +// aten::squeeze.dims(Tensor(a) self, int[] dim) -> Tensor(a) +@Namespace("at") public static native @ByVal Tensor squeeze(@Const @ByRef Tensor self, @ByVal LongArrayRef dim); +@Namespace("at") public static native @ByVal Tensor squeeze(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... dim); -// Parsed from ATen/ops/special_i1.h + +// Parsed from ATen/ops/squeeze_copy.h // #pragma once @@ -64795,21 +50608,40 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include +// #include -// aten::special_i1(Tensor self) -> Tensor -@Namespace("at") public static native @ByVal Tensor special_i1(@Const @ByRef Tensor self); +// aten::squeeze_copy(Tensor self) -> Tensor +@Namespace("at") public static native @ByVal Tensor squeeze_copy(@Const @ByRef Tensor self); -// aten::special_i1.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor special_i1_out(@ByRef Tensor out, @Const @ByRef Tensor self); -// aten::special_i1.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor special_i1_outf(@Const @ByRef Tensor self, @ByRef Tensor out); +// aten::squeeze_copy.dim(Tensor self, int dim) -> Tensor +@Namespace("at") public static native @ByVal Tensor squeeze_copy(@Const @ByRef Tensor self, @Cast("int64_t") long dim); + +// aten::squeeze_copy.dims(Tensor self, int[] dim) -> Tensor +@Namespace("at") public static native @ByVal Tensor squeeze_copy(@Const @ByRef Tensor self, @ByVal LongArrayRef dim); +@Namespace("at") public static native @ByVal Tensor squeeze_copy(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... dim); +// aten::squeeze_copy.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor squeeze_copy_out(@ByRef Tensor out, @Const @ByRef Tensor self); +// aten::squeeze_copy.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor squeeze_copy_outf(@Const @ByRef Tensor self, @ByRef Tensor out); + +// aten::squeeze_copy.dim_out(Tensor self, int dim, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor squeeze_copy_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Cast("int64_t") long dim); +// aten::squeeze_copy.dim_out(Tensor self, int dim, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor squeeze_copy_outf(@Const @ByRef Tensor self, @Cast("int64_t") long dim, @ByRef Tensor out); + +// aten::squeeze_copy.dims_out(Tensor self, int[] dim, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor squeeze_copy_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal LongArrayRef dim); +@Namespace("at") public static native @ByRef Tensor squeeze_copy_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... dim); +// aten::squeeze_copy.dims_out(Tensor self, int[] dim, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor squeeze_copy_outf(@Const @ByRef Tensor self, @ByVal LongArrayRef dim, @ByRef Tensor out); +@Namespace("at") public static native @ByRef Tensor squeeze_copy_outf(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dim, @ByRef Tensor out); -// Parsed from ATen/ops/special_i1e.h + +// Parsed from ATen/ops/sspaddmm.h // #pragma once @@ -64830,21 +50662,23 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include +// #include -// aten::special_i1e(Tensor self) -> Tensor -@Namespace("at") public static native @ByVal Tensor special_i1e(@Const @ByRef Tensor self); +// aten::sspaddmm(Tensor self, Tensor mat1, Tensor mat2, *, Scalar beta=1, Scalar alpha=1) -> Tensor +@Namespace("at") public static native @ByVal Tensor sspaddmm(@Const @ByRef Tensor self, @Const @ByRef Tensor mat1, @Const @ByRef Tensor mat2, @Const @ByRef(nullValue = "at::Scalar(1)") Scalar beta, @Const @ByRef(nullValue = "at::Scalar(1)") Scalar alpha); +@Namespace("at") public static native @ByVal Tensor sspaddmm(@Const @ByRef Tensor self, @Const @ByRef Tensor mat1, @Const @ByRef Tensor mat2); -// aten::special_i1e.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor special_i1e_out(@ByRef Tensor out, @Const @ByRef Tensor self); -// aten::special_i1e.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor special_i1e_outf(@Const @ByRef Tensor self, @ByRef Tensor out); +// aten::sspaddmm.out(Tensor self, Tensor mat1, Tensor mat2, *, Scalar beta=1, Scalar alpha=1, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor sspaddmm_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor mat1, @Const @ByRef Tensor mat2, @Const @ByRef(nullValue = "at::Scalar(1)") Scalar beta, @Const @ByRef(nullValue = "at::Scalar(1)") Scalar alpha); +@Namespace("at") public static native @ByRef Tensor sspaddmm_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor mat1, @Const @ByRef Tensor mat2); +// aten::sspaddmm.out(Tensor self, Tensor mat1, Tensor mat2, *, Scalar beta=1, Scalar alpha=1, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor sspaddmm_outf(@Const @ByRef Tensor self, @Const @ByRef Tensor mat1, @Const @ByRef Tensor mat2, @Const @ByRef Scalar beta, @Const @ByRef Scalar alpha, @ByRef Tensor out); -// Parsed from ATen/ops/special_laguerre_polynomial_l.h +// Parsed from ATen/ops/stack.h // #pragma once @@ -64865,37 +50699,23 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include - - -// aten::special_laguerre_polynomial_l(Tensor x, Tensor n) -> Tensor -@Namespace("at") public static native @ByVal Tensor special_laguerre_polynomial_l(@Const @ByRef Tensor x, @Const @ByRef Tensor n); - -// aten::special_laguerre_polynomial_l.x_scalar(Scalar x, Tensor n) -> Tensor -@Namespace("at") public static native @ByVal Tensor special_laguerre_polynomial_l(@Const @ByRef Scalar x, @Const @ByRef Tensor n); - -// aten::special_laguerre_polynomial_l.n_scalar(Tensor x, Scalar n) -> Tensor -@Namespace("at") public static native @ByVal Tensor special_laguerre_polynomial_l(@Const @ByRef Tensor x, @Const @ByRef Scalar n); +// #include -// aten::special_laguerre_polynomial_l.out(Tensor x, Tensor n, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor special_laguerre_polynomial_l_out(@ByRef Tensor out, @Const @ByRef Tensor x, @Const @ByRef Tensor n); -// aten::special_laguerre_polynomial_l.out(Tensor x, Tensor n, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor special_laguerre_polynomial_l_outf(@Const @ByRef Tensor x, @Const @ByRef Tensor n, @ByRef Tensor out); -// aten::special_laguerre_polynomial_l.x_scalar_out(Scalar x, Tensor n, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor special_laguerre_polynomial_l_out(@ByRef Tensor out, @Const @ByRef Scalar x, @Const @ByRef Tensor n); -// aten::special_laguerre_polynomial_l.x_scalar_out(Scalar x, Tensor n, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor special_laguerre_polynomial_l_outf(@Const @ByRef Scalar x, @Const @ByRef Tensor n, @ByRef Tensor out); +// aten::stack(Tensor[] tensors, int dim=0) -> Tensor +@Namespace("at") public static native @ByVal Tensor stack(@ByVal @Cast("at::TensorList*") TensorArrayRef tensors, @Cast("int64_t") long dim/*=0*/); +@Namespace("at") public static native @ByVal Tensor stack(@ByVal @Cast("at::TensorList*") TensorArrayRef tensors); -// aten::special_laguerre_polynomial_l.n_scalar_out(Tensor x, Scalar n, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor special_laguerre_polynomial_l_out(@ByRef Tensor out, @Const @ByRef Tensor x, @Const @ByRef Scalar n); -// aten::special_laguerre_polynomial_l.n_scalar_out(Tensor x, Scalar n, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor special_laguerre_polynomial_l_outf(@Const @ByRef Tensor x, @Const @ByRef Scalar n, @ByRef Tensor out); +// aten::stack.out(Tensor[] tensors, int dim=0, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor stack_out(@ByRef Tensor out, @ByVal @Cast("at::TensorList*") TensorArrayRef tensors, @Cast("int64_t") long dim/*=0*/); +@Namespace("at") public static native @ByRef Tensor stack_out(@ByRef Tensor out, @ByVal @Cast("at::TensorList*") TensorArrayRef tensors); +// aten::stack.out(Tensor[] tensors, int dim=0, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor stack_outf(@ByVal @Cast("at::TensorList*") TensorArrayRef tensors, @Cast("int64_t") long dim, @ByRef Tensor out); -// Parsed from ATen/ops/special_legendre_polynomial_p.h +// Parsed from ATen/ops/std.h // #pragma once @@ -64916,37 +50736,64 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include +// #include -// aten::special_legendre_polynomial_p(Tensor x, Tensor n) -> Tensor -@Namespace("at") public static native @ByVal Tensor special_legendre_polynomial_p(@Const @ByRef Tensor x, @Const @ByRef Tensor n); +// aten::std(Tensor self, bool unbiased=True) -> Tensor +@Namespace("at") public static native @ByVal Tensor std(@Const @ByRef Tensor self, @Cast("bool") boolean unbiased); -// aten::special_legendre_polynomial_p.x_scalar(Scalar x, Tensor n) -> Tensor -@Namespace("at") public static native @ByVal Tensor special_legendre_polynomial_p(@Const @ByRef Scalar x, @Const @ByRef Tensor n); +// aten::std.dim(Tensor self, int[1]? dim, bool unbiased=True, bool keepdim=False) -> Tensor +@Namespace("at") public static native @ByVal Tensor std(@Const @ByRef Tensor self, @ByVal LongArrayRefOptional dim, @Cast("bool") boolean unbiased, @Cast("bool") boolean keepdim/*=false*/); +@Namespace("at") public static native @ByVal Tensor std(@Const @ByRef Tensor self, @ByVal LongArrayRefOptional dim, @Cast("bool") boolean unbiased); +@Namespace("at") public static native @ByVal Tensor std(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dim, @Cast("bool") boolean unbiased, @Cast("bool") boolean keepdim/*=false*/); +@Namespace("at") public static native @ByVal Tensor std(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dim, @Cast("bool") boolean unbiased); -// aten::special_legendre_polynomial_p.n_scalar(Tensor x, Scalar n) -> Tensor -@Namespace("at") public static native @ByVal Tensor special_legendre_polynomial_p(@Const @ByRef Tensor x, @Const @ByRef Scalar n); +// aten::std.correction(Tensor self, int[1]? dim=None, *, int? correction=None, bool keepdim=False) -> Tensor +@Namespace("at") public static native @ByVal Tensor std(@Const @ByRef Tensor self, @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") LongArrayRefOptional dim, @ByVal(nullValue = "c10::optional(c10::nullopt)") LongOptional correction, @Cast("bool") boolean keepdim/*=false*/); +@Namespace("at") public static native @ByVal Tensor std(@Const @ByRef Tensor self); +@Namespace("at") public static native @ByVal Tensor std(@Const @ByRef Tensor self, @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dim, @ByVal(nullValue = "c10::optional(c10::nullopt)") LongOptional correction, @Cast("bool") boolean keepdim/*=false*/); -// aten::special_legendre_polynomial_p.out(Tensor x, Tensor n, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor special_legendre_polynomial_p_out(@ByRef Tensor out, @Const @ByRef Tensor x, @Const @ByRef Tensor n); -// aten::special_legendre_polynomial_p.out(Tensor x, Tensor n, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor special_legendre_polynomial_p_outf(@Const @ByRef Tensor x, @Const @ByRef Tensor n, @ByRef Tensor out); +// aten::std.out(Tensor self, int[1]? dim, bool unbiased=True, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor std_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal LongArrayRefOptional dim, @Cast("bool") boolean unbiased, @Cast("bool") boolean keepdim/*=false*/); +@Namespace("at") public static native @ByRef Tensor std_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal LongArrayRefOptional dim, @Cast("bool") boolean unbiased); +@Namespace("at") public static native @ByRef Tensor std_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dim, @Cast("bool") boolean unbiased, @Cast("bool") boolean keepdim/*=false*/); +@Namespace("at") public static native @ByRef Tensor std_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dim, @Cast("bool") boolean unbiased); +// aten::std.out(Tensor self, int[1]? dim, bool unbiased=True, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor std_outf(@Const @ByRef Tensor self, @ByVal LongArrayRefOptional dim, @Cast("bool") boolean unbiased, @Cast("bool") boolean keepdim, @ByRef Tensor out); +@Namespace("at") public static native @ByRef Tensor std_outf(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dim, @Cast("bool") boolean unbiased, @Cast("bool") boolean keepdim, @ByRef Tensor out); -// aten::special_legendre_polynomial_p.x_scalar_out(Scalar x, Tensor n, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor special_legendre_polynomial_p_out(@ByRef Tensor out, @Const @ByRef Scalar x, @Const @ByRef Tensor n); -// aten::special_legendre_polynomial_p.x_scalar_out(Scalar x, Tensor n, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor special_legendre_polynomial_p_outf(@Const @ByRef Scalar x, @Const @ByRef Tensor n, @ByRef Tensor out); +// aten::std.correction_out(Tensor self, int[1]? dim=None, *, int? correction=None, bool keepdim=False, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor std_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") LongArrayRefOptional dim, @ByVal(nullValue = "c10::optional(c10::nullopt)") LongOptional correction, @Cast("bool") boolean keepdim/*=false*/); +@Namespace("at") public static native @ByRef Tensor std_out(@ByRef Tensor out, @Const @ByRef Tensor self); +@Namespace("at") public static native @ByRef Tensor std_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dim, @ByVal(nullValue = "c10::optional(c10::nullopt)") LongOptional correction, @Cast("bool") boolean keepdim/*=false*/); +// aten::std.correction_out(Tensor self, int[1]? dim=None, *, int? correction=None, bool keepdim=False, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor std_outf(@Const @ByRef Tensor self, @ByVal LongArrayRefOptional dim, @ByVal LongOptional correction, @Cast("bool") boolean keepdim, @ByRef Tensor out); +@Namespace("at") public static native @ByRef Tensor std_outf(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dim, @ByVal LongOptional correction, @Cast("bool") boolean keepdim, @ByRef Tensor out); -// aten::special_legendre_polynomial_p.n_scalar_out(Tensor x, Scalar n, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor special_legendre_polynomial_p_out(@ByRef Tensor out, @Const @ByRef Tensor x, @Const @ByRef Scalar n); -// aten::special_legendre_polynomial_p.n_scalar_out(Tensor x, Scalar n, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor special_legendre_polynomial_p_outf(@Const @ByRef Tensor x, @Const @ByRef Scalar n, @ByRef Tensor out); +// aten::std.names_dim(Tensor self, Dimname[1] dim, bool unbiased=True, bool keepdim=False) -> Tensor +@Namespace("at") public static native @ByVal Tensor std(@Const @ByRef Tensor self, @ByVal DimnameArrayRef dim, @Cast("bool") boolean unbiased, @Cast("bool") boolean keepdim/*=false*/); +@Namespace("at") public static native @ByVal Tensor std(@Const @ByRef Tensor self, @ByVal DimnameArrayRef dim, @Cast("bool") boolean unbiased); + +// aten::std.names_out(Tensor self, Dimname[1] dim, bool unbiased=True, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor std_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal DimnameArrayRef dim, @Cast("bool") boolean unbiased, @Cast("bool") boolean keepdim/*=false*/); +@Namespace("at") public static native @ByRef Tensor std_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal DimnameArrayRef dim, @Cast("bool") boolean unbiased); +// aten::std.names_out(Tensor self, Dimname[1] dim, bool unbiased=True, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor std_outf(@Const @ByRef Tensor self, @ByVal DimnameArrayRef dim, @Cast("bool") boolean unbiased, @Cast("bool") boolean keepdim, @ByRef Tensor out); +// aten::std.correction_names(Tensor self, Dimname[1] dim, *, int? correction=None, bool keepdim=False) -> Tensor +@Namespace("at") public static native @ByVal Tensor std(@Const @ByRef Tensor self, @ByVal DimnameArrayRef dim, @ByVal(nullValue = "c10::optional(c10::nullopt)") LongOptional correction, @Cast("bool") boolean keepdim/*=false*/); +@Namespace("at") public static native @ByVal Tensor std(@Const @ByRef Tensor self, @ByVal DimnameArrayRef dim); +// aten::std.correction_names_out(Tensor self, Dimname[1] dim, *, int? correction=None, bool keepdim=False, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor std_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal DimnameArrayRef dim, @ByVal(nullValue = "c10::optional(c10::nullopt)") LongOptional correction, @Cast("bool") boolean keepdim/*=false*/); +@Namespace("at") public static native @ByRef Tensor std_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal DimnameArrayRef dim); +// aten::std.correction_names_out(Tensor self, Dimname[1] dim, *, int? correction=None, bool keepdim=False, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor std_outf(@Const @ByRef Tensor self, @ByVal DimnameArrayRef dim, @ByVal LongOptional correction, @Cast("bool") boolean keepdim, @ByRef Tensor out); -// Parsed from ATen/ops/special_log1p.h + + +// Parsed from ATen/ops/std_mean.h // #pragma once @@ -64967,21 +50814,43 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include +// #include -// aten::special_log1p(Tensor self) -> Tensor -@Namespace("at") public static native @ByVal Tensor special_log1p(@Const @ByRef Tensor self); +// aten::std_mean(Tensor self, bool unbiased=True) -> (Tensor, Tensor) +@Namespace("at") public static native @ByVal T_TensorTensor_T std_mean(@Const @ByRef Tensor self, @Cast("bool") boolean unbiased); -// aten::special_log1p.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor special_log1p_out(@ByRef Tensor out, @Const @ByRef Tensor self); -// aten::special_log1p.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor special_log1p_outf(@Const @ByRef Tensor self, @ByRef Tensor out); +// aten::std_mean.dim(Tensor self, int[1]? dim, bool unbiased=True, bool keepdim=False) -> (Tensor, Tensor) +@Namespace("at") public static native @ByVal T_TensorTensor_T std_mean(@Const @ByRef Tensor self, @ByVal LongArrayRefOptional dim, @Cast("bool") boolean unbiased, @Cast("bool") boolean keepdim/*=false*/); +@Namespace("at") public static native @ByVal T_TensorTensor_T std_mean(@Const @ByRef Tensor self, @ByVal LongArrayRefOptional dim, @Cast("bool") boolean unbiased); +@Namespace("at") public static native @ByVal T_TensorTensor_T std_mean(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dim, @Cast("bool") boolean unbiased, @Cast("bool") boolean keepdim/*=false*/); +@Namespace("at") public static native @ByVal T_TensorTensor_T std_mean(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dim, @Cast("bool") boolean unbiased); + +// aten::std_mean.correction(Tensor self, int[1]? dim=None, *, int? correction=None, bool keepdim=False) -> (Tensor, Tensor) +@Namespace("at") public static native @ByVal T_TensorTensor_T std_mean(@Const @ByRef Tensor self, @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") LongArrayRefOptional dim, @ByVal(nullValue = "c10::optional(c10::nullopt)") LongOptional correction, @Cast("bool") boolean keepdim/*=false*/); +@Namespace("at") public static native @ByVal T_TensorTensor_T std_mean(@Const @ByRef Tensor self); +@Namespace("at") public static native @ByVal T_TensorTensor_T std_mean(@Const @ByRef Tensor self, @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dim, @ByVal(nullValue = "c10::optional(c10::nullopt)") LongOptional correction, @Cast("bool") boolean keepdim/*=false*/); + +// aten::std_mean.names_dim(Tensor self, Dimname[1] dim, bool unbiased=True, bool keepdim=False) -> (Tensor, Tensor) +@Namespace("at") public static native @ByVal T_TensorTensor_T std_mean(@Const @ByRef Tensor self, @ByVal DimnameArrayRef dim, @Cast("bool") boolean unbiased, @Cast("bool") boolean keepdim/*=false*/); +@Namespace("at") public static native @ByVal T_TensorTensor_T std_mean(@Const @ByRef Tensor self, @ByVal DimnameArrayRef dim, @Cast("bool") boolean unbiased); + +// aten::std_mean.correction_names(Tensor self, Dimname[1] dim, *, int? correction=None, bool keepdim=False) -> (Tensor, Tensor) +@Namespace("at") public static native @ByVal T_TensorTensor_T std_mean(@Const @ByRef Tensor self, @ByVal DimnameArrayRef dim, @ByVal(nullValue = "c10::optional(c10::nullopt)") LongOptional correction, @Cast("bool") boolean keepdim/*=false*/); +@Namespace("at") public static native @ByVal T_TensorTensor_T std_mean(@Const @ByRef Tensor self, @ByVal DimnameArrayRef dim); + +// aten::std_mean.correction_out(Tensor self, int[1]? dim=None, *, int? correction=None, bool keepdim=False, Tensor(a!) out0, Tensor(b!) out1) -> (Tensor(a!), Tensor(b!)) +@Namespace("at") public static native @ByVal T_TensorTensor_T std_mean_out(@ByRef Tensor out0, @ByRef Tensor out1, @Const @ByRef Tensor self, @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") LongArrayRefOptional dim, @ByVal(nullValue = "c10::optional(c10::nullopt)") LongOptional correction, @Cast("bool") boolean keepdim/*=false*/); +@Namespace("at") public static native @ByVal T_TensorTensor_T std_mean_out(@ByRef Tensor out0, @ByRef Tensor out1, @Const @ByRef Tensor self); +@Namespace("at") public static native @ByVal T_TensorTensor_T std_mean_out(@ByRef Tensor out0, @ByRef Tensor out1, @Const @ByRef Tensor self, @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dim, @ByVal(nullValue = "c10::optional(c10::nullopt)") LongOptional correction, @Cast("bool") boolean keepdim/*=false*/); +// aten::std_mean.correction_out(Tensor self, int[1]? dim=None, *, int? correction=None, bool keepdim=False, Tensor(a!) out0, Tensor(b!) out1) -> (Tensor(a!), Tensor(b!)) +@Namespace("at") public static native @ByVal T_TensorTensor_T std_mean_outf(@Const @ByRef Tensor self, @ByVal LongArrayRefOptional dim, @ByVal LongOptional correction, @Cast("bool") boolean keepdim, @ByRef Tensor out0, @ByRef Tensor out1); +@Namespace("at") public static native @ByVal T_TensorTensor_T std_mean_outf(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dim, @ByVal LongOptional correction, @Cast("bool") boolean keepdim, @ByRef Tensor out0, @ByRef Tensor out1); -// Parsed from ATen/ops/special_log_ndtr.h +// Parsed from ATen/ops/stft.h // #pragma once @@ -65002,21 +50871,19 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include +// #include -// aten::special_log_ndtr(Tensor self) -> Tensor -@Namespace("at") public static native @ByVal Tensor special_log_ndtr(@Const @ByRef Tensor self); +// aten::stft(Tensor self, int n_fft, int? hop_length=None, int? win_length=None, Tensor? window=None, bool normalized=False, bool? onesided=None, bool? return_complex=None) -> Tensor +@Namespace("at") public static native @ByVal Tensor stft(@Const @ByRef Tensor self, @Cast("int64_t") long n_fft, @ByVal LongOptional hop_length, @ByVal LongOptional win_length, @Const @ByRef TensorOptional window, @Cast("bool") boolean normalized, @ByVal(nullValue = "c10::optional(c10::nullopt)") BoolOptional onesided, @ByVal(nullValue = "c10::optional(c10::nullopt)") BoolOptional return_complex); -// aten::special_log_ndtr.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor special_log_ndtr_out(@ByRef Tensor out, @Const @ByRef Tensor self); -// aten::special_log_ndtr.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor special_log_ndtr_outf(@Const @ByRef Tensor self, @ByRef Tensor out); +// aten::stft.center(Tensor self, int n_fft, int? hop_length=None, int? win_length=None, Tensor? window=None, bool center=True, str pad_mode="reflect", bool normalized=False, bool? onesided=None, bool? return_complex=None) -> Tensor +@Namespace("at") public static native @ByVal Tensor stft(@Const @ByRef Tensor self, @Cast("int64_t") long n_fft, @ByVal(nullValue = "c10::optional(c10::nullopt)") LongOptional hop_length, @ByVal(nullValue = "c10::optional(c10::nullopt)") LongOptional win_length, @Const @ByRef(nullValue = "c10::optional{}") TensorOptional window, @Cast("bool") boolean center/*=true*/, @ByVal(nullValue = "c10::string_view(\"reflect\")") @Cast("c10::string_view*") Pointer pad_mode, @Cast("bool") boolean normalized/*=false*/, @ByVal(nullValue = "c10::optional(c10::nullopt)") BoolOptional onesided, @ByVal(nullValue = "c10::optional(c10::nullopt)") BoolOptional return_complex); -// Parsed from ATen/ops/special_log_softmax.h +// Parsed from ATen/ops/stride.h // #pragma once @@ -65037,17 +50904,19 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include +// #include -// aten::special_log_softmax(Tensor self, int dim, *, ScalarType? dtype=None) -> Tensor -@Namespace("at") public static native @ByVal Tensor special_log_softmax(@Const @ByRef Tensor self, @Cast("int64_t") long dim, @ByVal(nullValue = "c10::optional(c10::nullopt)") ScalarTypeOptional dtype); -@Namespace("at") public static native @ByVal Tensor special_log_softmax(@Const @ByRef Tensor self, @Cast("int64_t") long dim); +// aten::stride.int(Tensor self, int dim) -> int +@Namespace("at") public static native @Cast("int64_t") long __dispatch_stride(@Const @ByRef Tensor self, @Cast("int64_t") long dim); +// aten::stride.Dimname(Tensor self, Dimname dim) -> int +@Namespace("at") public static native @Cast("int64_t") long stride(@Const @ByRef Tensor self, @ByVal Dimname dim); -// Parsed from ATen/ops/special_logit.h + +// Parsed from ATen/ops/sub.h // #pragma once @@ -65068,23 +50937,33 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include +// #include -// aten::special_logit(Tensor self, float? eps=None) -> Tensor -@Namespace("at") public static native @ByVal Tensor special_logit(@Const @ByRef Tensor self, @ByVal(nullValue = "c10::optional(c10::nullopt)") DoubleOptional eps); -@Namespace("at") public static native @ByVal Tensor special_logit(@Const @ByRef Tensor self); +// aten::sub.out(Tensor self, Tensor other, *, Scalar alpha=1, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor sub_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor other, @Const @ByRef(nullValue = "at::Scalar(1)") Scalar alpha); +@Namespace("at") public static native @ByRef Tensor sub_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor other); +// aten::sub.out(Tensor self, Tensor other, *, Scalar alpha=1, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor sub_outf(@Const @ByRef Tensor self, @Const @ByRef Tensor other, @Const @ByRef Scalar alpha, @ByRef Tensor out); -// aten::special_logit.out(Tensor self, float? eps=None, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor special_logit_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal(nullValue = "c10::optional(c10::nullopt)") DoubleOptional eps); -@Namespace("at") public static native @ByRef Tensor special_logit_out(@ByRef Tensor out, @Const @ByRef Tensor self); -// aten::special_logit.out(Tensor self, float? eps=None, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor special_logit_outf(@Const @ByRef Tensor self, @ByVal DoubleOptional eps, @ByRef Tensor out); +// aten::sub.Tensor(Tensor self, Tensor other, *, Scalar alpha=1) -> Tensor +@Namespace("at") public static native @ByVal Tensor sub(@Const @ByRef Tensor self, @Const @ByRef Tensor other, @Const @ByRef(nullValue = "at::Scalar(1)") Scalar alpha); +@Namespace("at") public static native @ByVal Tensor sub(@Const @ByRef Tensor self, @Const @ByRef Tensor other); + +// aten::sub.Scalar(Tensor self, Scalar other, Scalar alpha=1) -> Tensor +@Namespace("at") public static native @ByVal Tensor sub(@Const @ByRef Tensor self, @Const @ByRef Scalar other, @Const @ByRef(nullValue = "at::Scalar(1)") Scalar alpha); +@Namespace("at") public static native @ByVal Tensor sub(@Const @ByRef Tensor self, @Const @ByRef Scalar other); +// aten::sub.Scalar_out(Tensor self, Scalar other, Scalar alpha=1, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor sub_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Scalar other, @Const @ByRef(nullValue = "at::Scalar(1)") Scalar alpha); +@Namespace("at") public static native @ByRef Tensor sub_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Scalar other); +// aten::sub.Scalar_out(Tensor self, Scalar other, Scalar alpha=1, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor sub_outf(@Const @ByRef Tensor self, @Const @ByRef Scalar other, @Const @ByRef Scalar alpha, @ByRef Tensor out); -// Parsed from ATen/ops/special_logsumexp.h + +// Parsed from ATen/ops/subtract.h // #pragma once @@ -65105,28 +50984,27 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include +// #include -// aten::special_logsumexp(Tensor self, int[1] dim, bool keepdim=False) -> Tensor -@Namespace("at") public static native @ByVal Tensor special_logsumexp(@Const @ByRef Tensor self, @ByVal @Cast("c10::ArrayRef*") LongArrayRef dim, @Cast("bool") boolean keepdim/*=false*/); -@Namespace("at") public static native @ByVal Tensor special_logsumexp(@Const @ByRef Tensor self, @ByVal @Cast("c10::ArrayRef*") LongArrayRef dim); -@Namespace("at") public static native @ByVal Tensor special_logsumexp(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dim, @Cast("bool") boolean keepdim/*=false*/); -@Namespace("at") public static native @ByVal Tensor special_logsumexp(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... dim); +// aten::subtract.out(Tensor self, Tensor other, *, Scalar alpha=1, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor subtract_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor other, @Const @ByRef(nullValue = "at::Scalar(1)") Scalar alpha); +@Namespace("at") public static native @ByRef Tensor subtract_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor other); +// aten::subtract.out(Tensor self, Tensor other, *, Scalar alpha=1, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor subtract_outf(@Const @ByRef Tensor self, @Const @ByRef Tensor other, @Const @ByRef Scalar alpha, @ByRef Tensor out); -// aten::special_logsumexp.out(Tensor self, int[1] dim, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor special_logsumexp_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal @Cast("c10::ArrayRef*") LongArrayRef dim, @Cast("bool") boolean keepdim/*=false*/); -@Namespace("at") public static native @ByRef Tensor special_logsumexp_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal @Cast("c10::ArrayRef*") LongArrayRef dim); -@Namespace("at") public static native @ByRef Tensor special_logsumexp_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dim, @Cast("bool") boolean keepdim/*=false*/); -@Namespace("at") public static native @ByRef Tensor special_logsumexp_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... dim); -// aten::special_logsumexp.out(Tensor self, int[1] dim, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor special_logsumexp_outf(@Const @ByRef Tensor self, @ByVal @Cast("c10::ArrayRef*") LongArrayRef dim, @Cast("bool") boolean keepdim, @ByRef Tensor out); -@Namespace("at") public static native @ByRef Tensor special_logsumexp_outf(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dim, @Cast("bool") boolean keepdim, @ByRef Tensor out); +// aten::subtract.Tensor(Tensor self, Tensor other, *, Scalar alpha=1) -> Tensor +@Namespace("at") public static native @ByVal Tensor subtract(@Const @ByRef Tensor self, @Const @ByRef Tensor other, @Const @ByRef(nullValue = "at::Scalar(1)") Scalar alpha); +@Namespace("at") public static native @ByVal Tensor subtract(@Const @ByRef Tensor self, @Const @ByRef Tensor other); +// aten::subtract.Scalar(Tensor self, Scalar other, Scalar alpha=1) -> Tensor +@Namespace("at") public static native @ByVal Tensor subtract(@Const @ByRef Tensor self, @Const @ByRef Scalar other, @Const @ByRef(nullValue = "at::Scalar(1)") Scalar alpha); +@Namespace("at") public static native @ByVal Tensor subtract(@Const @ByRef Tensor self, @Const @ByRef Scalar other); -// Parsed from ATen/ops/special_modified_bessel_i0.h + +// Parsed from ATen/ops/sum.h // #pragma once @@ -65147,21 +51025,48 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include +// #include -// aten::special_modified_bessel_i0(Tensor self) -> Tensor -@Namespace("at") public static native @ByVal Tensor special_modified_bessel_i0(@Const @ByRef Tensor self); +// aten::sum(Tensor self, *, ScalarType? dtype=None) -> Tensor +@Namespace("at") public static native @ByVal Tensor sum(@Const @ByRef Tensor self, @ByVal(nullValue = "c10::optional(c10::nullopt)") ScalarTypeOptional dtype); +@Namespace("at") public static native @ByVal Tensor sum(@Const @ByRef Tensor self); + +// aten::sum.dim_IntList(Tensor self, int[1]? dim, bool keepdim=False, *, ScalarType? dtype=None) -> Tensor +@Namespace("at") public static native @ByVal Tensor sum(@Const @ByRef Tensor self, @ByVal LongArrayRefOptional dim, @Cast("bool") boolean keepdim/*=false*/, @ByVal(nullValue = "c10::optional(c10::nullopt)") ScalarTypeOptional dtype); +@Namespace("at") public static native @ByVal Tensor sum(@Const @ByRef Tensor self, @ByVal LongArrayRefOptional dim); +@Namespace("at") public static native @ByVal Tensor sum(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dim, @Cast("bool") boolean keepdim/*=false*/, @ByVal(nullValue = "c10::optional(c10::nullopt)") ScalarTypeOptional dtype); +@Namespace("at") public static native @ByVal Tensor sum(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... dim); + +// aten::sum.dim_DimnameList(Tensor self, Dimname[1] dim, bool keepdim=False, *, ScalarType? dtype=None) -> Tensor +@Namespace("at") public static native @ByVal Tensor sum(@Const @ByRef Tensor self, @ByVal DimnameArrayRef dim, @Cast("bool") boolean keepdim/*=false*/, @ByVal(nullValue = "c10::optional(c10::nullopt)") ScalarTypeOptional dtype); +@Namespace("at") public static native @ByVal Tensor sum(@Const @ByRef Tensor self, @ByVal DimnameArrayRef dim); + +// aten::sum.IntList_out(Tensor self, int[1]? dim, bool keepdim=False, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor sum_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal LongArrayRefOptional dim, @Cast("bool") boolean keepdim/*=false*/, @ByVal(nullValue = "c10::optional(c10::nullopt)") ScalarTypeOptional dtype); +@Namespace("at") public static native @ByRef Tensor sum_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal LongArrayRefOptional dim); +@Namespace("at") public static native @ByRef Tensor sum_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dim, @Cast("bool") boolean keepdim/*=false*/, @ByVal(nullValue = "c10::optional(c10::nullopt)") ScalarTypeOptional dtype); +@Namespace("at") public static native @ByRef Tensor sum_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... dim); +// aten::sum.IntList_out(Tensor self, int[1]? dim, bool keepdim=False, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor sum_outf(@Const @ByRef Tensor self, @ByVal LongArrayRefOptional dim, @Cast("bool") boolean keepdim, @ByVal ScalarTypeOptional dtype, @ByRef Tensor out); +@Namespace("at") public static native @ByRef Tensor sum_outf(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dim, @Cast("bool") boolean keepdim, @ByVal ScalarTypeOptional dtype, @ByRef Tensor out); -// aten::special_modified_bessel_i0.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor special_modified_bessel_i0_out(@ByRef Tensor out, @Const @ByRef Tensor self); -// aten::special_modified_bessel_i0.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor special_modified_bessel_i0_outf(@Const @ByRef Tensor self, @ByRef Tensor out); +// aten::sum.DimnameList_out(Tensor self, Dimname[1] dim, bool keepdim=False, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor sum_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal DimnameArrayRef dim, @Cast("bool") boolean keepdim/*=false*/, @ByVal(nullValue = "c10::optional(c10::nullopt)") ScalarTypeOptional dtype); +@Namespace("at") public static native @ByRef Tensor sum_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal DimnameArrayRef dim); +// aten::sum.DimnameList_out(Tensor self, Dimname[1] dim, bool keepdim=False, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor sum_outf(@Const @ByRef Tensor self, @ByVal DimnameArrayRef dim, @Cast("bool") boolean keepdim, @ByVal ScalarTypeOptional dtype, @ByRef Tensor out); + +// aten::sum.out(Tensor self, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor sum_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal(nullValue = "c10::optional(c10::nullopt)") ScalarTypeOptional dtype); +@Namespace("at") public static native @ByRef Tensor sum_out(@ByRef Tensor out, @Const @ByRef Tensor self); +// aten::sum.out(Tensor self, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor sum_outf(@Const @ByRef Tensor self, @ByVal ScalarTypeOptional dtype, @ByRef Tensor out); -// Parsed from ATen/ops/special_modified_bessel_i1.h +// Parsed from ATen/ops/sum_to_size.h // #pragma once @@ -65182,21 +51087,14 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include - +// #include -// aten::special_modified_bessel_i1(Tensor self) -> Tensor -@Namespace("at") public static native @ByVal Tensor special_modified_bessel_i1(@Const @ByRef Tensor self); -// aten::special_modified_bessel_i1.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor special_modified_bessel_i1_out(@ByRef Tensor out, @Const @ByRef Tensor self); -// aten::special_modified_bessel_i1.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor special_modified_bessel_i1_outf(@Const @ByRef Tensor self, @ByRef Tensor out); -// Parsed from ATen/ops/special_modified_bessel_k0.h +// Parsed from ATen/ops/svd.h // #pragma once @@ -65217,21 +51115,23 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include +// #include -// aten::special_modified_bessel_k0(Tensor self) -> Tensor -@Namespace("at") public static native @ByVal Tensor special_modified_bessel_k0(@Const @ByRef Tensor self); +// aten::svd.U(Tensor self, bool some=True, bool compute_uv=True, *, Tensor(a!) U, Tensor(b!) S, Tensor(c!) V) -> (Tensor(a!) U, Tensor(b!) S, Tensor(c!) V) +@Namespace("at") public static native @ByVal T_TensorTensorTensor_T svd_out(@ByRef Tensor U, @ByRef Tensor S, @ByRef Tensor V, @Const @ByRef Tensor self, @Cast("bool") boolean some/*=true*/, @Cast("bool") boolean compute_uv/*=true*/); +@Namespace("at") public static native @ByVal T_TensorTensorTensor_T svd_out(@ByRef Tensor U, @ByRef Tensor S, @ByRef Tensor V, @Const @ByRef Tensor self); +// aten::svd.U(Tensor self, bool some=True, bool compute_uv=True, *, Tensor(a!) U, Tensor(b!) S, Tensor(c!) V) -> (Tensor(a!) U, Tensor(b!) S, Tensor(c!) V) +@Namespace("at") public static native @ByVal T_TensorTensorTensor_T svd_outf(@Const @ByRef Tensor self, @Cast("bool") boolean some, @Cast("bool") boolean compute_uv, @ByRef Tensor U, @ByRef Tensor S, @ByRef Tensor V); -// aten::special_modified_bessel_k0.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor special_modified_bessel_k0_out(@ByRef Tensor out, @Const @ByRef Tensor self); -// aten::special_modified_bessel_k0.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor special_modified_bessel_k0_outf(@Const @ByRef Tensor self, @ByRef Tensor out); +// aten::svd(Tensor self, bool some=True, bool compute_uv=True) -> (Tensor U, Tensor S, Tensor V) +@Namespace("at") public static native @ByVal T_TensorTensorTensor_T svd(@Const @ByRef Tensor self, @Cast("bool") boolean some/*=true*/, @Cast("bool") boolean compute_uv/*=true*/); +@Namespace("at") public static native @ByVal T_TensorTensorTensor_T svd(@Const @ByRef Tensor self); -// Parsed from ATen/ops/special_modified_bessel_k1.h +// Parsed from ATen/ops/swapaxes.h // #pragma once @@ -65252,21 +51152,16 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include - +// #include -// aten::special_modified_bessel_k1(Tensor self) -> Tensor -@Namespace("at") public static native @ByVal Tensor special_modified_bessel_k1(@Const @ByRef Tensor self); -// aten::special_modified_bessel_k1.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor special_modified_bessel_k1_out(@ByRef Tensor out, @Const @ByRef Tensor self); -// aten::special_modified_bessel_k1.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor special_modified_bessel_k1_outf(@Const @ByRef Tensor self, @ByRef Tensor out); +// aten::swapaxes(Tensor(a) self, int axis0, int axis1) -> Tensor(a) +@Namespace("at") public static native @ByVal Tensor swapaxes(@Const @ByRef Tensor self, @Cast("int64_t") long axis0, @Cast("int64_t") long axis1); -// Parsed from ATen/ops/special_multigammaln.h +// Parsed from ATen/ops/swapdims.h // #pragma once @@ -65287,21 +51182,16 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include - +// #include -// aten::special_multigammaln(Tensor self, int p) -> Tensor -@Namespace("at") public static native @ByVal Tensor special_multigammaln(@Const @ByRef Tensor self, @Cast("int64_t") long p); -// aten::special_multigammaln.out(Tensor self, int p, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor special_multigammaln_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Cast("int64_t") long p); -// aten::special_multigammaln.out(Tensor self, int p, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor special_multigammaln_outf(@Const @ByRef Tensor self, @Cast("int64_t") long p, @ByRef Tensor out); +// aten::swapdims(Tensor(a) self, int dim0, int dim1) -> Tensor(a) +@Namespace("at") public static native @ByVal Tensor swapdims(@Const @ByRef Tensor self, @Cast("int64_t") long dim0, @Cast("int64_t") long dim1); -// Parsed from ATen/ops/special_ndtr.h +// Parsed from ATen/ops/t.h // #pragma once @@ -65322,21 +51212,16 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include - +// #include -// aten::special_ndtr(Tensor self) -> Tensor -@Namespace("at") public static native @ByVal Tensor special_ndtr(@Const @ByRef Tensor self); -// aten::special_ndtr.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor special_ndtr_out(@ByRef Tensor out, @Const @ByRef Tensor self); -// aten::special_ndtr.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor special_ndtr_outf(@Const @ByRef Tensor self, @ByRef Tensor out); +// aten::t(Tensor(a) self) -> Tensor(a) +@Namespace("at") public static native @ByVal Tensor t(@Const @ByRef Tensor self); -// Parsed from ATen/ops/special_ndtri.h +// Parsed from ATen/ops/t_copy.h // #pragma once @@ -65357,21 +51242,21 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include +// #include -// aten::special_ndtri(Tensor self) -> Tensor -@Namespace("at") public static native @ByVal Tensor special_ndtri(@Const @ByRef Tensor self); +// aten::t_copy(Tensor self) -> Tensor +@Namespace("at") public static native @ByVal Tensor t_copy(@Const @ByRef Tensor self); -// aten::special_ndtri.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor special_ndtri_out(@ByRef Tensor out, @Const @ByRef Tensor self); -// aten::special_ndtri.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor special_ndtri_outf(@Const @ByRef Tensor self, @ByRef Tensor out); +// aten::t_copy.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor t_copy_out(@ByRef Tensor out, @Const @ByRef Tensor self); +// aten::t_copy.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor t_copy_outf(@Const @ByRef Tensor self, @ByRef Tensor out); -// Parsed from ATen/ops/special_polygamma.h +// Parsed from ATen/ops/take.h // #pragma once @@ -65392,21 +51277,21 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include +// #include -// aten::special_polygamma(int n, Tensor self) -> Tensor -@Namespace("at") public static native @ByVal Tensor special_polygamma(@Cast("int64_t") long n, @Const @ByRef Tensor self); +// aten::take.out(Tensor self, Tensor index, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor take_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor index); +// aten::take.out(Tensor self, Tensor index, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor take_outf(@Const @ByRef Tensor self, @Const @ByRef Tensor index, @ByRef Tensor out); -// aten::special_polygamma.out(int n, Tensor self, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor special_polygamma_out(@ByRef Tensor out, @Cast("int64_t") long n, @Const @ByRef Tensor self); -// aten::special_polygamma.out(int n, Tensor self, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor special_polygamma_outf(@Cast("int64_t") long n, @Const @ByRef Tensor self, @ByRef Tensor out); +// aten::take(Tensor self, Tensor index) -> Tensor +@Namespace("at") public static native @ByVal Tensor take(@Const @ByRef Tensor self, @Const @ByRef Tensor index); -// Parsed from ATen/ops/special_psi.h +// Parsed from ATen/ops/take_along_dim.h // #pragma once @@ -65427,21 +51312,23 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include +// #include -// aten::special_psi(Tensor self) -> Tensor -@Namespace("at") public static native @ByVal Tensor special_psi(@Const @ByRef Tensor self); +// aten::take_along_dim.out(Tensor self, Tensor indices, int? dim=None, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor take_along_dim_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor indices, @ByVal(nullValue = "c10::optional(c10::nullopt)") LongOptional dim); +@Namespace("at") public static native @ByRef Tensor take_along_dim_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor indices); +// aten::take_along_dim.out(Tensor self, Tensor indices, int? dim=None, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor take_along_dim_outf(@Const @ByRef Tensor self, @Const @ByRef Tensor indices, @ByVal LongOptional dim, @ByRef Tensor out); -// aten::special_psi.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor special_psi_out(@ByRef Tensor out, @Const @ByRef Tensor self); -// aten::special_psi.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor special_psi_outf(@Const @ByRef Tensor self, @ByRef Tensor out); +// aten::take_along_dim(Tensor self, Tensor indices, int? dim=None) -> Tensor +@Namespace("at") public static native @ByVal Tensor take_along_dim(@Const @ByRef Tensor self, @Const @ByRef Tensor indices, @ByVal(nullValue = "c10::optional(c10::nullopt)") LongOptional dim); +@Namespace("at") public static native @ByVal Tensor take_along_dim(@Const @ByRef Tensor self, @Const @ByRef Tensor indices); -// Parsed from ATen/ops/special_round.h +// Parsed from ATen/ops/tan.h // #pragma once @@ -65462,23 +51349,24 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include +// #include -// aten::special_round(Tensor self, *, int decimals=0) -> Tensor -@Namespace("at") public static native @ByVal Tensor special_round(@Const @ByRef Tensor self, @Cast("int64_t") long decimals/*=0*/); -@Namespace("at") public static native @ByVal Tensor special_round(@Const @ByRef Tensor self); +// aten::tan(Tensor self) -> Tensor +@Namespace("at") public static native @ByVal Tensor tan(@Const @ByRef Tensor self); -// aten::special_round.out(Tensor self, *, int decimals=0, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor special_round_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Cast("int64_t") long decimals/*=0*/); -@Namespace("at") public static native @ByRef Tensor special_round_out(@ByRef Tensor out, @Const @ByRef Tensor self); -// aten::special_round.out(Tensor self, *, int decimals=0, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor special_round_outf(@Const @ByRef Tensor self, @Cast("int64_t") long decimals, @ByRef Tensor out); +// aten::tan_(Tensor(a!) self) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor tan_(@ByRef Tensor self); +// aten::tan.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor tan_out(@ByRef Tensor out, @Const @ByRef Tensor self); +// aten::tan.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor tan_outf(@Const @ByRef Tensor self, @ByRef Tensor out); -// Parsed from ATen/ops/special_scaled_modified_bessel_k0.h + +// Parsed from ATen/ops/tanh.h // #pragma once @@ -65499,21 +51387,24 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include +// #include -// aten::special_scaled_modified_bessel_k0(Tensor x) -> Tensor -@Namespace("at") public static native @ByVal Tensor special_scaled_modified_bessel_k0(@Const @ByRef Tensor x); +// aten::tanh(Tensor self) -> Tensor +@Namespace("at") public static native @ByVal Tensor tanh(@Const @ByRef Tensor self); -// aten::special_scaled_modified_bessel_k0.out(Tensor x, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor special_scaled_modified_bessel_k0_out(@ByRef Tensor out, @Const @ByRef Tensor x); -// aten::special_scaled_modified_bessel_k0.out(Tensor x, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor special_scaled_modified_bessel_k0_outf(@Const @ByRef Tensor x, @ByRef Tensor out); +// aten::tanh_(Tensor(a!) self) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor tanh_(@ByRef Tensor self); +// aten::tanh.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor tanh_out(@ByRef Tensor out, @Const @ByRef Tensor self); +// aten::tanh.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor tanh_outf(@Const @ByRef Tensor self, @ByRef Tensor out); -// Parsed from ATen/ops/special_scaled_modified_bessel_k1.h + +// Parsed from ATen/ops/tanh_backward.h // #pragma once @@ -65534,21 +51425,21 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include +// #include -// aten::special_scaled_modified_bessel_k1(Tensor x) -> Tensor -@Namespace("at") public static native @ByVal Tensor special_scaled_modified_bessel_k1(@Const @ByRef Tensor x); +// aten::tanh_backward.grad_input(Tensor grad_output, Tensor output, *, Tensor(a!) grad_input) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor tanh_backward_out(@ByRef Tensor grad_input, @Const @ByRef Tensor grad_output, @Const @ByRef Tensor output); +// aten::tanh_backward.grad_input(Tensor grad_output, Tensor output, *, Tensor(a!) grad_input) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor tanh_backward_outf(@Const @ByRef Tensor grad_output, @Const @ByRef Tensor output, @ByRef Tensor grad_input); -// aten::special_scaled_modified_bessel_k1.out(Tensor x, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor special_scaled_modified_bessel_k1_out(@ByRef Tensor out, @Const @ByRef Tensor x); -// aten::special_scaled_modified_bessel_k1.out(Tensor x, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor special_scaled_modified_bessel_k1_outf(@Const @ByRef Tensor x, @ByRef Tensor out); +// aten::tanh_backward(Tensor grad_output, Tensor output) -> Tensor +@Namespace("at") public static native @ByVal Tensor tanh_backward(@Const @ByRef Tensor grad_output, @Const @ByRef Tensor output); -// Parsed from ATen/ops/special_shifted_chebyshev_polynomial_t.h +// Parsed from ATen/ops/tensor_split.h // #pragma once @@ -65569,37 +51460,115 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include +// #include -// aten::special_shifted_chebyshev_polynomial_t(Tensor x, Tensor n) -> Tensor -@Namespace("at") public static native @ByVal Tensor special_shifted_chebyshev_polynomial_t(@Const @ByRef Tensor x, @Const @ByRef Tensor n); +// aten::tensor_split.sections(Tensor(a -> *) self, SymInt sections, int dim=0) -> Tensor(a)[] +@Namespace("at") public static native @Cast({"", "std::vector"}) @StdMove TensorVector tensor_split(@Const @ByRef Tensor self, @Cast("int64_t") long sections, @Cast("int64_t") long dim/*=0*/); +@Namespace("at") public static native @Cast({"", "std::vector"}) @StdMove TensorVector tensor_split(@Const @ByRef Tensor self, @Cast("int64_t") long sections); -// aten::special_shifted_chebyshev_polynomial_t.x_scalar(Scalar x, Tensor n) -> Tensor -@Namespace("at") public static native @ByVal Tensor special_shifted_chebyshev_polynomial_t(@Const @ByRef Scalar x, @Const @ByRef Tensor n); -// aten::special_shifted_chebyshev_polynomial_t.n_scalar(Tensor x, Scalar n) -> Tensor -@Namespace("at") public static native @ByVal Tensor special_shifted_chebyshev_polynomial_t(@Const @ByRef Tensor x, @Const @ByRef Scalar n); +// aten::tensor_split.sections(Tensor(a -> *) self, SymInt sections, int dim=0) -> Tensor(a)[] +@Namespace("at") public static native @Cast({"", "std::vector"}) @StdMove TensorVector tensor_split_symint(@Const @ByRef Tensor self, @ByVal SymInt sections, @Cast("int64_t") long dim/*=0*/); +@Namespace("at") public static native @Cast({"", "std::vector"}) @StdMove TensorVector tensor_split_symint(@Const @ByRef Tensor self, @ByVal SymInt sections); -// aten::special_shifted_chebyshev_polynomial_t.out(Tensor x, Tensor n, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor special_shifted_chebyshev_polynomial_t_out(@ByRef Tensor out, @Const @ByRef Tensor x, @Const @ByRef Tensor n); -// aten::special_shifted_chebyshev_polynomial_t.out(Tensor x, Tensor n, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor special_shifted_chebyshev_polynomial_t_outf(@Const @ByRef Tensor x, @Const @ByRef Tensor n, @ByRef Tensor out); -// aten::special_shifted_chebyshev_polynomial_t.x_scalar_out(Scalar x, Tensor n, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor special_shifted_chebyshev_polynomial_t_out(@ByRef Tensor out, @Const @ByRef Scalar x, @Const @ByRef Tensor n); -// aten::special_shifted_chebyshev_polynomial_t.x_scalar_out(Scalar x, Tensor n, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor special_shifted_chebyshev_polynomial_t_outf(@Const @ByRef Scalar x, @Const @ByRef Tensor n, @ByRef Tensor out); +// aten::tensor_split.indices(Tensor(a -> *) self, SymInt[] indices, int dim=0) -> Tensor(a)[] +@Namespace("at") public static native @Cast({"", "std::vector"}) @StdMove TensorVector tensor_split(@Const @ByRef Tensor self, @ByVal LongArrayRef indices, @Cast("int64_t") long dim/*=0*/); +@Namespace("at") public static native @Cast({"", "std::vector"}) @StdMove TensorVector tensor_split(@Const @ByRef Tensor self, @ByVal LongArrayRef indices); +@Namespace("at") public static native @Cast({"", "std::vector"}) @StdMove TensorVector tensor_split(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] indices, @Cast("int64_t") long dim/*=0*/); +@Namespace("at") public static native @Cast({"", "std::vector"}) @StdMove TensorVector tensor_split(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... indices); -// aten::special_shifted_chebyshev_polynomial_t.n_scalar_out(Tensor x, Scalar n, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor special_shifted_chebyshev_polynomial_t_out(@ByRef Tensor out, @Const @ByRef Tensor x, @Const @ByRef Scalar n); -// aten::special_shifted_chebyshev_polynomial_t.n_scalar_out(Tensor x, Scalar n, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor special_shifted_chebyshev_polynomial_t_outf(@Const @ByRef Tensor x, @Const @ByRef Scalar n, @ByRef Tensor out); +// aten::tensor_split.indices(Tensor(a -> *) self, SymInt[] indices, int dim=0) -> Tensor(a)[] +@Namespace("at") public static native @Cast({"", "std::vector"}) @StdMove TensorVector tensor_split_symint(@Const @ByRef Tensor self, @ByVal SymIntArrayRef indices, @Cast("int64_t") long dim/*=0*/); +@Namespace("at") public static native @Cast({"", "std::vector"}) @StdMove TensorVector tensor_split_symint(@Const @ByRef Tensor self, @ByVal SymIntArrayRef indices); +// aten::tensor_split.tensor_indices_or_sections(Tensor(a -> *) self, Tensor tensor_indices_or_sections, int dim=0) -> Tensor(a)[] +@Namespace("at") public static native @Cast({"", "std::vector"}) @StdMove TensorVector tensor_split(@Const @ByRef Tensor self, @Const @ByRef Tensor tensor_indices_or_sections, @Cast("int64_t") long dim/*=0*/); +@Namespace("at") public static native @Cast({"", "std::vector"}) @StdMove TensorVector tensor_split(@Const @ByRef Tensor self, @Const @ByRef Tensor tensor_indices_or_sections); -// Parsed from ATen/ops/special_shifted_chebyshev_polynomial_u.h + + + +// Parsed from ATen/ops/tensor.h + +// #pragma once +// #include +// #include + +// These functions are defined in ATen/Utils.cpp. +// #define TENSOR(T, S) +// TORCH_API Tensor tensor(ArrayRef values, const TensorOptions& options); +// inline Tensor tensor( +// std::initializer_list values, const TensorOptions& options) { +// return at::tensor(ArrayRef(values), options); +// } +// inline Tensor tensor(T value, const TensorOptions& options) { +// return at::tensor(ArrayRef(value), options); +// } +// inline Tensor tensor(ArrayRef values) { +// return at::tensor(std::move(values), at::dtype(k##S)); +// } +// inline Tensor tensor(std::initializer_list values) { +// return at::tensor(ArrayRef(values)); +// } +// inline Tensor tensor(T value) { +// return at::tensor(ArrayRef(value)); +// } +@Namespace("at") public static native @ByVal Tensor tensor(@ByVal ByteArrayRef values, @Const @ByRef TensorOptions options); + @Namespace("at") public static native @ByVal Tensor tensor(@Cast("uint8_t") byte value, @Const @ByRef TensorOptions options); + @Namespace("at") public static native @ByVal Tensor tensor(@ByVal ByteArrayRef values); + @Namespace("at") public static native @ByVal Tensor tensor(@Cast("uint8_t") byte value); + @Namespace("at") public static native @ByVal Tensor tensor(@ByVal ShortArrayRef values, @Const @ByRef TensorOptions options); + @Namespace("at") public static native @ByVal Tensor tensor(short value, @Const @ByRef TensorOptions options); + @Namespace("at") public static native @ByVal Tensor tensor(@ByVal ShortArrayRef values); + @Namespace("at") public static native @ByVal Tensor tensor(short value); + @Namespace("at") public static native @ByVal Tensor tensor(@ByVal IntArrayRef values, @Const @ByRef TensorOptions options); + @Namespace("at") public static native @ByVal Tensor tensor(int value, @Const @ByRef TensorOptions options); + @Namespace("at") public static native @ByVal Tensor tensor(@ByVal IntArrayRef values); + @Namespace("at") public static native @ByVal Tensor tensor(int value); + @Namespace("at") public static native @ByVal Tensor tensor(@ByVal LongArrayRef values, @Const @ByRef TensorOptions options); + @Namespace("at") public static native @ByVal Tensor tensor(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] values, @Const @ByRef TensorOptions options); + @Namespace("at") public static native @ByVal Tensor tensor(@Cast("int64_t") long value, @Const @ByRef TensorOptions options); + @Namespace("at") public static native @ByVal Tensor tensor(@ByVal LongArrayRef values); + @Namespace("at") public static native @ByVal Tensor tensor(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... values); + @Namespace("at") public static native @ByVal Tensor tensor(@Cast("int64_t") long value); + @Namespace("at") public static native @ByVal Tensor tensor(@ByVal FloatArrayRef values, @Const @ByRef TensorOptions options); + @Namespace("at") public static native @ByVal Tensor tensor(float value, @Const @ByRef TensorOptions options); + @Namespace("at") public static native @ByVal Tensor tensor(@ByVal FloatArrayRef values); + @Namespace("at") public static native @ByVal Tensor tensor(float value); + @Namespace("at") public static native @ByVal Tensor tensor(@ByVal DoubleArrayRef values, @Const @ByRef TensorOptions options); + @Namespace("at") public static native @ByVal Tensor tensor(double value, @Const @ByRef TensorOptions options); + @Namespace("at") public static native @ByVal Tensor tensor(@ByVal DoubleArrayRef values); + @Namespace("at") public static native @ByVal Tensor tensor(double value); + @Namespace("at") public static native @ByVal Tensor tensor(@ByVal BoolArrayRef values, @Const @ByRef TensorOptions options); + @Namespace("at") public static native @ByVal Tensor tensor(@Cast("decltype(::c10::impl::ScalarTypeToCPPType<::c10::ScalarType::Bool>::t)") boolean value, @Const @ByRef TensorOptions options); + @Namespace("at") public static native @ByVal Tensor tensor(@ByVal BoolArrayRef values); + @Namespace("at") public static native @ByVal Tensor tensor(@Cast("decltype(::c10::impl::ScalarTypeToCPPType<::c10::ScalarType::Bool>::t)") boolean value); + @Namespace("at") public static native @ByVal Tensor tensor(@ByVal HalfArrayRef values, @Const @ByRef TensorOptions options); + @Namespace("at") public static native @ByVal Tensor tensor(@ByVal Half value, @Const @ByRef TensorOptions options); + @Namespace("at") public static native @ByVal Tensor tensor(@ByVal HalfArrayRef values); + @Namespace("at") public static native @ByVal Tensor tensor(@ByVal Half value); + @Namespace("at") public static native @ByVal Tensor tensor(@ByVal BFloat16ArrayRef values, @Const @ByRef TensorOptions options); + @Namespace("at") public static native @ByVal Tensor tensor(@ByVal BFloat16 value, @Const @ByRef TensorOptions options); + @Namespace("at") public static native @ByVal Tensor tensor(@ByVal BFloat16ArrayRef values); + @Namespace("at") public static native @ByVal Tensor tensor(@ByVal BFloat16 value); +@Namespace("at") public static native @ByVal Tensor tensor(@ByVal FloatComplexArrayRef values, @Const @ByRef TensorOptions options); + @Namespace("at") public static native @ByVal Tensor tensor(@ByVal FloatComplex value, @Const @ByRef TensorOptions options); + @Namespace("at") public static native @ByVal Tensor tensor(@ByVal FloatComplexArrayRef values); + @Namespace("at") public static native @ByVal Tensor tensor(@ByVal FloatComplex value); + @Namespace("at") public static native @ByVal Tensor tensor(@ByVal DoubleComplexArrayRef values, @Const @ByRef TensorOptions options); + @Namespace("at") public static native @ByVal Tensor tensor(@ByVal DoubleComplex value, @Const @ByRef TensorOptions options); + @Namespace("at") public static native @ByVal Tensor tensor(@ByVal DoubleComplexArrayRef values); + @Namespace("at") public static native @ByVal Tensor tensor(@ByVal DoubleComplex value); +// #undef TENSOR + + // namespace at + + +// Parsed from ATen/ops/tensordot.h // #pragma once @@ -65620,37 +51589,24 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include - - -// aten::special_shifted_chebyshev_polynomial_u(Tensor x, Tensor n) -> Tensor -@Namespace("at") public static native @ByVal Tensor special_shifted_chebyshev_polynomial_u(@Const @ByRef Tensor x, @Const @ByRef Tensor n); - -// aten::special_shifted_chebyshev_polynomial_u.x_scalar(Scalar x, Tensor n) -> Tensor -@Namespace("at") public static native @ByVal Tensor special_shifted_chebyshev_polynomial_u(@Const @ByRef Scalar x, @Const @ByRef Tensor n); - -// aten::special_shifted_chebyshev_polynomial_u.n_scalar(Tensor x, Scalar n) -> Tensor -@Namespace("at") public static native @ByVal Tensor special_shifted_chebyshev_polynomial_u(@Const @ByRef Tensor x, @Const @ByRef Scalar n); +// #include -// aten::special_shifted_chebyshev_polynomial_u.out(Tensor x, Tensor n, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor special_shifted_chebyshev_polynomial_u_out(@ByRef Tensor out, @Const @ByRef Tensor x, @Const @ByRef Tensor n); -// aten::special_shifted_chebyshev_polynomial_u.out(Tensor x, Tensor n, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor special_shifted_chebyshev_polynomial_u_outf(@Const @ByRef Tensor x, @Const @ByRef Tensor n, @ByRef Tensor out); -// aten::special_shifted_chebyshev_polynomial_u.x_scalar_out(Scalar x, Tensor n, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor special_shifted_chebyshev_polynomial_u_out(@ByRef Tensor out, @Const @ByRef Scalar x, @Const @ByRef Tensor n); -// aten::special_shifted_chebyshev_polynomial_u.x_scalar_out(Scalar x, Tensor n, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor special_shifted_chebyshev_polynomial_u_outf(@Const @ByRef Scalar x, @Const @ByRef Tensor n, @ByRef Tensor out); +// aten::tensordot(Tensor self, Tensor other, int[] dims_self, int[] dims_other) -> Tensor +@Namespace("at") public static native @ByVal Tensor tensordot(@Const @ByRef Tensor self, @Const @ByRef Tensor other, @ByVal LongArrayRef dims_self, @ByVal LongArrayRef dims_other); +@Namespace("at") public static native @ByVal Tensor tensordot(@Const @ByRef Tensor self, @Const @ByRef Tensor other, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dims_self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... dims_other); -// aten::special_shifted_chebyshev_polynomial_u.n_scalar_out(Tensor x, Scalar n, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor special_shifted_chebyshev_polynomial_u_out(@ByRef Tensor out, @Const @ByRef Tensor x, @Const @ByRef Scalar n); -// aten::special_shifted_chebyshev_polynomial_u.n_scalar_out(Tensor x, Scalar n, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor special_shifted_chebyshev_polynomial_u_outf(@Const @ByRef Tensor x, @Const @ByRef Scalar n, @ByRef Tensor out); +// aten::tensordot.out(Tensor self, Tensor other, int[] dims_self, int[] dims_other, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor tensordot_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor other, @ByVal LongArrayRef dims_self, @ByVal LongArrayRef dims_other); +@Namespace("at") public static native @ByRef Tensor tensordot_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor other, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dims_self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... dims_other); +// aten::tensordot.out(Tensor self, Tensor other, int[] dims_self, int[] dims_other, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor tensordot_outf(@Const @ByRef Tensor self, @Const @ByRef Tensor other, @ByVal LongArrayRef dims_self, @ByVal LongArrayRef dims_other, @ByRef Tensor out); +@Namespace("at") public static native @ByRef Tensor tensordot_outf(@Const @ByRef Tensor self, @Const @ByRef Tensor other, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dims_self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dims_other, @ByRef Tensor out); -// Parsed from ATen/ops/special_shifted_chebyshev_polynomial_v.h +// Parsed from ATen/ops/thnn_conv2d.h // #pragma once @@ -65671,37 +51627,28 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include - - -// aten::special_shifted_chebyshev_polynomial_v(Tensor x, Tensor n) -> Tensor -@Namespace("at") public static native @ByVal Tensor special_shifted_chebyshev_polynomial_v(@Const @ByRef Tensor x, @Const @ByRef Tensor n); - -// aten::special_shifted_chebyshev_polynomial_v.x_scalar(Scalar x, Tensor n) -> Tensor -@Namespace("at") public static native @ByVal Tensor special_shifted_chebyshev_polynomial_v(@Const @ByRef Scalar x, @Const @ByRef Tensor n); - -// aten::special_shifted_chebyshev_polynomial_v.n_scalar(Tensor x, Scalar n) -> Tensor -@Namespace("at") public static native @ByVal Tensor special_shifted_chebyshev_polynomial_v(@Const @ByRef Tensor x, @Const @ByRef Scalar n); +// #include -// aten::special_shifted_chebyshev_polynomial_v.out(Tensor x, Tensor n, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor special_shifted_chebyshev_polynomial_v_out(@ByRef Tensor out, @Const @ByRef Tensor x, @Const @ByRef Tensor n); -// aten::special_shifted_chebyshev_polynomial_v.out(Tensor x, Tensor n, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor special_shifted_chebyshev_polynomial_v_outf(@Const @ByRef Tensor x, @Const @ByRef Tensor n, @ByRef Tensor out); -// aten::special_shifted_chebyshev_polynomial_v.x_scalar_out(Scalar x, Tensor n, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor special_shifted_chebyshev_polynomial_v_out(@ByRef Tensor out, @Const @ByRef Scalar x, @Const @ByRef Tensor n); -// aten::special_shifted_chebyshev_polynomial_v.x_scalar_out(Scalar x, Tensor n, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor special_shifted_chebyshev_polynomial_v_outf(@Const @ByRef Scalar x, @Const @ByRef Tensor n, @ByRef Tensor out); +// aten::thnn_conv2d.out(Tensor self, Tensor weight, int[2] kernel_size, Tensor? bias=None, int[2] stride=1, int[2] padding=0, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor thnn_conv2d_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor weight, @ByVal LongArrayRef kernel_size, @Const @ByRef(nullValue = "c10::optional{}") TensorOptional bias, @ByVal(nullValue = "at::IntArrayRef(1)") LongArrayRef stride, @ByVal(nullValue = "at::IntArrayRef(0)") LongArrayRef padding); +@Namespace("at") public static native @ByRef Tensor thnn_conv2d_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor weight, @ByVal LongArrayRef kernel_size); +@Namespace("at") public static native @ByRef Tensor thnn_conv2d_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor weight, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] kernel_size, @Const @ByRef(nullValue = "c10::optional{}") TensorOptional bias, @ByVal(nullValue = "at::IntArrayRef(1)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] stride, @ByVal(nullValue = "at::IntArrayRef(0)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... padding); +@Namespace("at") public static native @ByRef Tensor thnn_conv2d_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor weight, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... kernel_size); +// aten::thnn_conv2d.out(Tensor self, Tensor weight, int[2] kernel_size, Tensor? bias=None, int[2] stride=1, int[2] padding=0, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor thnn_conv2d_outf(@Const @ByRef Tensor self, @Const @ByRef Tensor weight, @ByVal LongArrayRef kernel_size, @Const @ByRef TensorOptional bias, @ByVal LongArrayRef stride, @ByVal LongArrayRef padding, @ByRef Tensor out); +@Namespace("at") public static native @ByRef Tensor thnn_conv2d_outf(@Const @ByRef Tensor self, @Const @ByRef Tensor weight, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] kernel_size, @Const @ByRef TensorOptional bias, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] stride, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] padding, @ByRef Tensor out); -// aten::special_shifted_chebyshev_polynomial_v.n_scalar_out(Tensor x, Scalar n, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor special_shifted_chebyshev_polynomial_v_out(@ByRef Tensor out, @Const @ByRef Tensor x, @Const @ByRef Scalar n); -// aten::special_shifted_chebyshev_polynomial_v.n_scalar_out(Tensor x, Scalar n, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor special_shifted_chebyshev_polynomial_v_outf(@Const @ByRef Tensor x, @Const @ByRef Scalar n, @ByRef Tensor out); +// aten::thnn_conv2d(Tensor self, Tensor weight, int[2] kernel_size, Tensor? bias=None, int[2] stride=1, int[2] padding=0) -> Tensor +@Namespace("at") public static native @ByVal Tensor thnn_conv2d(@Const @ByRef Tensor self, @Const @ByRef Tensor weight, @ByVal LongArrayRef kernel_size, @Const @ByRef(nullValue = "c10::optional{}") TensorOptional bias, @ByVal(nullValue = "at::IntArrayRef(1)") LongArrayRef stride, @ByVal(nullValue = "at::IntArrayRef(0)") LongArrayRef padding); +@Namespace("at") public static native @ByVal Tensor thnn_conv2d(@Const @ByRef Tensor self, @Const @ByRef Tensor weight, @ByVal LongArrayRef kernel_size); +@Namespace("at") public static native @ByVal Tensor thnn_conv2d(@Const @ByRef Tensor self, @Const @ByRef Tensor weight, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] kernel_size, @Const @ByRef(nullValue = "c10::optional{}") TensorOptional bias, @ByVal(nullValue = "at::IntArrayRef(1)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] stride, @ByVal(nullValue = "at::IntArrayRef(0)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... padding); +@Namespace("at") public static native @ByVal Tensor thnn_conv2d(@Const @ByRef Tensor self, @Const @ByRef Tensor weight, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... kernel_size); -// Parsed from ATen/ops/special_shifted_chebyshev_polynomial_w.h +// Parsed from ATen/ops/threshold.h // #pragma once @@ -65722,37 +51669,24 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include - - -// aten::special_shifted_chebyshev_polynomial_w(Tensor x, Tensor n) -> Tensor -@Namespace("at") public static native @ByVal Tensor special_shifted_chebyshev_polynomial_w(@Const @ByRef Tensor x, @Const @ByRef Tensor n); - -// aten::special_shifted_chebyshev_polynomial_w.x_scalar(Scalar x, Tensor n) -> Tensor -@Namespace("at") public static native @ByVal Tensor special_shifted_chebyshev_polynomial_w(@Const @ByRef Scalar x, @Const @ByRef Tensor n); +// #include -// aten::special_shifted_chebyshev_polynomial_w.n_scalar(Tensor x, Scalar n) -> Tensor -@Namespace("at") public static native @ByVal Tensor special_shifted_chebyshev_polynomial_w(@Const @ByRef Tensor x, @Const @ByRef Scalar n); -// aten::special_shifted_chebyshev_polynomial_w.out(Tensor x, Tensor n, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor special_shifted_chebyshev_polynomial_w_out(@ByRef Tensor out, @Const @ByRef Tensor x, @Const @ByRef Tensor n); -// aten::special_shifted_chebyshev_polynomial_w.out(Tensor x, Tensor n, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor special_shifted_chebyshev_polynomial_w_outf(@Const @ByRef Tensor x, @Const @ByRef Tensor n, @ByRef Tensor out); +// aten::threshold(Tensor self, Scalar threshold, Scalar value) -> Tensor +@Namespace("at") public static native @ByVal Tensor threshold(@Const @ByRef Tensor self, @Const @ByRef Scalar threshold, @Const @ByRef Scalar value); -// aten::special_shifted_chebyshev_polynomial_w.x_scalar_out(Scalar x, Tensor n, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor special_shifted_chebyshev_polynomial_w_out(@ByRef Tensor out, @Const @ByRef Scalar x, @Const @ByRef Tensor n); -// aten::special_shifted_chebyshev_polynomial_w.x_scalar_out(Scalar x, Tensor n, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor special_shifted_chebyshev_polynomial_w_outf(@Const @ByRef Scalar x, @Const @ByRef Tensor n, @ByRef Tensor out); +// aten::threshold_(Tensor(a!) self, Scalar threshold, Scalar value) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor threshold_(@ByRef Tensor self, @Const @ByRef Scalar threshold, @Const @ByRef Scalar value); -// aten::special_shifted_chebyshev_polynomial_w.n_scalar_out(Tensor x, Scalar n, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor special_shifted_chebyshev_polynomial_w_out(@ByRef Tensor out, @Const @ByRef Tensor x, @Const @ByRef Scalar n); -// aten::special_shifted_chebyshev_polynomial_w.n_scalar_out(Tensor x, Scalar n, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor special_shifted_chebyshev_polynomial_w_outf(@Const @ByRef Tensor x, @Const @ByRef Scalar n, @ByRef Tensor out); +// aten::threshold.out(Tensor self, Scalar threshold, Scalar value, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor threshold_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Scalar threshold, @Const @ByRef Scalar value); +// aten::threshold.out(Tensor self, Scalar threshold, Scalar value, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor threshold_outf(@Const @ByRef Tensor self, @Const @ByRef Scalar threshold, @Const @ByRef Scalar value, @ByRef Tensor out); -// Parsed from ATen/ops/special_sinc.h +// Parsed from ATen/ops/threshold_backward.h // #pragma once @@ -65773,21 +51707,21 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include +// #include -// aten::special_sinc(Tensor self) -> Tensor -@Namespace("at") public static native @ByVal Tensor special_sinc(@Const @ByRef Tensor self); +// aten::threshold_backward.grad_input(Tensor grad_output, Tensor self, Scalar threshold, *, Tensor(a!) grad_input) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor threshold_backward_out(@ByRef Tensor grad_input, @Const @ByRef Tensor grad_output, @Const @ByRef Tensor self, @Const @ByRef Scalar threshold); +// aten::threshold_backward.grad_input(Tensor grad_output, Tensor self, Scalar threshold, *, Tensor(a!) grad_input) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor threshold_backward_outf(@Const @ByRef Tensor grad_output, @Const @ByRef Tensor self, @Const @ByRef Scalar threshold, @ByRef Tensor grad_input); -// aten::special_sinc.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor special_sinc_out(@ByRef Tensor out, @Const @ByRef Tensor self); -// aten::special_sinc.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor special_sinc_outf(@Const @ByRef Tensor self, @ByRef Tensor out); +// aten::threshold_backward(Tensor grad_output, Tensor self, Scalar threshold) -> Tensor +@Namespace("at") public static native @ByVal Tensor threshold_backward(@Const @ByRef Tensor grad_output, @Const @ByRef Tensor self, @Const @ByRef Scalar threshold); -// Parsed from ATen/ops/special_softmax.h +// Parsed from ATen/ops/tile.h // #pragma once @@ -65808,17 +51742,17 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include +// #include -// aten::special_softmax(Tensor self, int dim, ScalarType? dtype=None) -> Tensor -@Namespace("at") public static native @ByVal Tensor special_softmax(@Const @ByRef Tensor self, @Cast("int64_t") long dim, @ByVal(nullValue = "c10::optional(c10::nullopt)") ScalarTypeOptional dtype); -@Namespace("at") public static native @ByVal Tensor special_softmax(@Const @ByRef Tensor self, @Cast("int64_t") long dim); +// aten::tile(Tensor self, int[] dims) -> Tensor +@Namespace("at") public static native @ByVal Tensor tile(@Const @ByRef Tensor self, @ByVal LongArrayRef dims); +@Namespace("at") public static native @ByVal Tensor tile(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... dims); -// Parsed from ATen/ops/special_spherical_bessel_j0.h +// Parsed from ATen/ops/to.h // #pragma once @@ -65839,21 +51773,14 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include - +// #include -// aten::special_spherical_bessel_j0(Tensor x) -> Tensor -@Namespace("at") public static native @ByVal Tensor special_spherical_bessel_j0(@Const @ByRef Tensor x); -// aten::special_spherical_bessel_j0.out(Tensor x, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor special_spherical_bessel_j0_out(@ByRef Tensor out, @Const @ByRef Tensor x); -// aten::special_spherical_bessel_j0.out(Tensor x, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor special_spherical_bessel_j0_outf(@Const @ByRef Tensor x, @ByRef Tensor out); -// Parsed from ATen/ops/special_xlog1py.h +// Parsed from ATen/ops/to_dense.h // #pragma once @@ -65874,37 +51801,14 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include - - -// aten::special_xlog1py(Tensor self, Tensor other) -> Tensor -@Namespace("at") public static native @ByVal Tensor special_xlog1py(@Const @ByRef Tensor self, @Const @ByRef Tensor other); - -// aten::special_xlog1py.self_scalar(Scalar self, Tensor other) -> Tensor -@Namespace("at") public static native @ByVal Tensor special_xlog1py(@Const @ByRef Scalar self, @Const @ByRef Tensor other); - -// aten::special_xlog1py.other_scalar(Tensor self, Scalar other) -> Tensor -@Namespace("at") public static native @ByVal Tensor special_xlog1py(@Const @ByRef Tensor self, @Const @ByRef Scalar other); - -// aten::special_xlog1py.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor special_xlog1py_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor other); -// aten::special_xlog1py.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor special_xlog1py_outf(@Const @ByRef Tensor self, @Const @ByRef Tensor other, @ByRef Tensor out); +// #include -// aten::special_xlog1py.self_scalar_out(Scalar self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor special_xlog1py_out(@ByRef Tensor out, @Const @ByRef Scalar self, @Const @ByRef Tensor other); -// aten::special_xlog1py.self_scalar_out(Scalar self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor special_xlog1py_outf(@Const @ByRef Scalar self, @Const @ByRef Tensor other, @ByRef Tensor out); -// aten::special_xlog1py.other_scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor special_xlog1py_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Scalar other); -// aten::special_xlog1py.other_scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor special_xlog1py_outf(@Const @ByRef Tensor self, @Const @ByRef Scalar other, @ByRef Tensor out); -// Parsed from ATen/ops/special_xlogy.h +// Parsed from ATen/ops/to_dense_backward.h // #pragma once @@ -65925,37 +51829,16 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include - - -// aten::special_xlogy(Tensor self, Tensor other) -> Tensor -@Namespace("at") public static native @ByVal Tensor special_xlogy(@Const @ByRef Tensor self, @Const @ByRef Tensor other); - -// aten::special_xlogy.self_scalar(Scalar self, Tensor other) -> Tensor -@Namespace("at") public static native @ByVal Tensor special_xlogy(@Const @ByRef Scalar self, @Const @ByRef Tensor other); - -// aten::special_xlogy.other_scalar(Tensor self, Scalar other) -> Tensor -@Namespace("at") public static native @ByVal Tensor special_xlogy(@Const @ByRef Tensor self, @Const @ByRef Scalar other); - -// aten::special_xlogy.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor special_xlogy_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor other); -// aten::special_xlogy.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor special_xlogy_outf(@Const @ByRef Tensor self, @Const @ByRef Tensor other, @ByRef Tensor out); +// #include -// aten::special_xlogy.self_scalar_out(Scalar self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor special_xlogy_out(@ByRef Tensor out, @Const @ByRef Scalar self, @Const @ByRef Tensor other); -// aten::special_xlogy.self_scalar_out(Scalar self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor special_xlogy_outf(@Const @ByRef Scalar self, @Const @ByRef Tensor other, @ByRef Tensor out); -// aten::special_xlogy.other_scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor special_xlogy_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Scalar other); -// aten::special_xlogy.other_scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor special_xlogy_outf(@Const @ByRef Tensor self, @Const @ByRef Scalar other, @ByRef Tensor out); +// aten::to_dense_backward(Tensor grad, Tensor input) -> Tensor +@Namespace("at") public static native @ByVal Tensor to_dense_backward(@Const @ByRef Tensor grad, @Const @ByRef Tensor input); -// Parsed from ATen/ops/special_zeta.h +// Parsed from ATen/ops/to_mkldnn.h // #pragma once @@ -65976,37 +51859,19 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include - - -// aten::special_zeta(Tensor self, Tensor other) -> Tensor -@Namespace("at") public static native @ByVal Tensor special_zeta(@Const @ByRef Tensor self, @Const @ByRef Tensor other); - -// aten::special_zeta.self_scalar(Scalar self, Tensor other) -> Tensor -@Namespace("at") public static native @ByVal Tensor special_zeta(@Const @ByRef Scalar self, @Const @ByRef Tensor other); - -// aten::special_zeta.other_scalar(Tensor self, Scalar other) -> Tensor -@Namespace("at") public static native @ByVal Tensor special_zeta(@Const @ByRef Tensor self, @Const @ByRef Scalar other); - -// aten::special_zeta.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor special_zeta_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor other); -// aten::special_zeta.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor special_zeta_outf(@Const @ByRef Tensor self, @Const @ByRef Tensor other, @ByRef Tensor out); +// #include -// aten::special_zeta.self_scalar_out(Scalar self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor special_zeta_out(@ByRef Tensor out, @Const @ByRef Scalar self, @Const @ByRef Tensor other); -// aten::special_zeta.self_scalar_out(Scalar self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor special_zeta_outf(@Const @ByRef Scalar self, @Const @ByRef Tensor other, @ByRef Tensor out); -// aten::special_zeta.other_scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor special_zeta_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Scalar other); -// aten::special_zeta.other_scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor special_zeta_outf(@Const @ByRef Tensor self, @Const @ByRef Scalar other, @ByRef Tensor out); +// aten::to_mkldnn.out(Tensor self, ScalarType? dtype=None, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor to_mkldnn_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal(nullValue = "c10::optional(c10::nullopt)") ScalarTypeOptional dtype); +@Namespace("at") public static native @ByRef Tensor to_mkldnn_out(@ByRef Tensor out, @Const @ByRef Tensor self); +// aten::to_mkldnn.out(Tensor self, ScalarType? dtype=None, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor to_mkldnn_outf(@Const @ByRef Tensor self, @ByVal ScalarTypeOptional dtype, @ByRef Tensor out); -// Parsed from ATen/ops/split.h +// Parsed from ATen/ops/to_mkldnn_backward.h // #pragma once @@ -66027,35 +51892,16 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include - - -// aten::split.Tensor(Tensor(a -> *) self, SymInt split_size, int dim=0) -> Tensor(a)[] -@Namespace("at") public static native @Cast({"", "std::vector"}) @StdMove TensorVector split(@Const @ByRef Tensor self, @Cast("int64_t") long split_size, @Cast("int64_t") long dim/*=0*/); -@Namespace("at") public static native @Cast({"", "std::vector"}) @StdMove TensorVector split(@Const @ByRef Tensor self, @Cast("int64_t") long split_size); - - -// aten::split.Tensor(Tensor(a -> *) self, SymInt split_size, int dim=0) -> Tensor(a)[] -@Namespace("at") public static native @Cast({"", "std::vector"}) @StdMove TensorVector split_symint(@Const @ByRef Tensor self, @ByVal SymInt split_size, @Cast("int64_t") long dim/*=0*/); -@Namespace("at") public static native @Cast({"", "std::vector"}) @StdMove TensorVector split_symint(@Const @ByRef Tensor self, @ByVal SymInt split_size); - - -// aten::split.sizes(Tensor(a -> *) self, SymInt[] split_size, int dim=0) -> Tensor(a)[] -@Namespace("at") public static native @Cast({"", "std::vector"}) @StdMove TensorVector split(@Const @ByRef Tensor self, @ByVal @Cast("c10::ArrayRef*") LongArrayRef split_size, @Cast("int64_t") long dim/*=0*/); -@Namespace("at") public static native @Cast({"", "std::vector"}) @StdMove TensorVector split(@Const @ByRef Tensor self, @ByVal @Cast("c10::ArrayRef*") LongArrayRef split_size); -@Namespace("at") public static native @Cast({"", "std::vector"}) @StdMove TensorVector split(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] split_size, @Cast("int64_t") long dim/*=0*/); -@Namespace("at") public static native @Cast({"", "std::vector"}) @StdMove TensorVector split(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... split_size); - +// #include -// aten::split.sizes(Tensor(a -> *) self, SymInt[] split_size, int dim=0) -> Tensor(a)[] -@Namespace("at") public static native @Cast({"", "std::vector"}) @StdMove TensorVector split_symint(@Const @ByRef Tensor self, @ByVal SymIntRef split_size, @Cast("int64_t") long dim/*=0*/); -@Namespace("at") public static native @Cast({"", "std::vector"}) @StdMove TensorVector split_symint(@Const @ByRef Tensor self, @ByVal SymIntRef split_size); +// aten::to_mkldnn_backward(Tensor grad, Tensor input) -> Tensor +@Namespace("at") public static native @ByVal Tensor to_mkldnn_backward(@Const @ByRef Tensor grad, @Const @ByRef Tensor input); -// Parsed from ATen/ops/split_copy.h +// Parsed from ATen/ops/to_padded_tensor.h // #pragma once @@ -66076,41 +51922,34 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include - - -// aten::split_copy.Tensor(Tensor self, SymInt split_size, int dim=0) -> Tensor[] -@Namespace("at") public static native @Cast({"", "std::vector"}) @StdMove TensorVector split_copy(@Const @ByRef Tensor self, @Cast("int64_t") long split_size, @Cast("int64_t") long dim/*=0*/); -@Namespace("at") public static native @Cast({"", "std::vector"}) @StdMove TensorVector split_copy(@Const @ByRef Tensor self, @Cast("int64_t") long split_size); - +// #include -// aten::split_copy.Tensor(Tensor self, SymInt split_size, int dim=0) -> Tensor[] -@Namespace("at") public static native @Cast({"", "std::vector"}) @StdMove TensorVector split_copy_symint(@Const @ByRef Tensor self, @ByVal SymInt split_size, @Cast("int64_t") long dim/*=0*/); -@Namespace("at") public static native @Cast({"", "std::vector"}) @StdMove TensorVector split_copy_symint(@Const @ByRef Tensor self, @ByVal SymInt split_size); -// aten::split_copy.Tensor_out(Tensor self, SymInt split_size, int dim=0, *, Tensor(a!)[] out) -> () -@Namespace("at") public static native void split_copy_out(@ByVal TensorArrayRef out, @Const @ByRef Tensor self, @Cast("int64_t") long split_size, @Cast("int64_t") long dim/*=0*/); -@Namespace("at") public static native void split_copy_out(@ByVal TensorArrayRef out, @Const @ByRef Tensor self, @Cast("int64_t") long split_size); +// aten::to_padded_tensor.out(Tensor self, float padding, SymInt[]? output_size=None, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor to_padded_tensor_out(@ByRef Tensor out, @Const @ByRef Tensor self, double padding, @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") LongArrayRefOptional output_size); +@Namespace("at") public static native @ByRef Tensor to_padded_tensor_out(@ByRef Tensor out, @Const @ByRef Tensor self, double padding); +@Namespace("at") public static native @ByRef Tensor to_padded_tensor_out(@ByRef Tensor out, @Const @ByRef Tensor self, double padding, @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... output_size); -// aten::split_copy.Tensor_out(Tensor self, SymInt split_size, int dim=0, *, Tensor(a!)[] out) -> () -@Namespace("at") public static native void split_copy_outf(@Const @ByRef Tensor self, @Cast("int64_t") long split_size, @Cast("int64_t") long dim, @ByVal TensorArrayRef out); +// aten::to_padded_tensor.out(Tensor self, float padding, SymInt[]? output_size=None, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor to_padded_tensor_outf(@Const @ByRef Tensor self, double padding, @ByVal LongArrayRefOptional output_size, @ByRef Tensor out); +@Namespace("at") public static native @ByRef Tensor to_padded_tensor_outf(@Const @ByRef Tensor self, double padding, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] output_size, @ByRef Tensor out); -// aten::split_copy.Tensor_out(Tensor self, SymInt split_size, int dim=0, *, Tensor(a!)[] out) -> () -@Namespace("at") public static native void split_copy_symint_out(@ByVal TensorArrayRef out, @Const @ByRef Tensor self, @ByVal SymInt split_size, @Cast("int64_t") long dim/*=0*/); -@Namespace("at") public static native void split_copy_symint_out(@ByVal TensorArrayRef out, @Const @ByRef Tensor self, @ByVal SymInt split_size); +// aten::to_padded_tensor.out(Tensor self, float padding, SymInt[]? output_size=None, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor to_padded_tensor_symint_out(@ByRef Tensor out, @Const @ByRef Tensor self, double padding, @ByVal(nullValue = "at::OptionalSymIntArrayRef(c10::nullopt)") SymIntArrayRefOptional output_size); +@Namespace("at") public static native @ByRef Tensor to_padded_tensor_symint_out(@ByRef Tensor out, @Const @ByRef Tensor self, double padding); -// aten::split_copy.Tensor_out(Tensor self, SymInt split_size, int dim=0, *, Tensor(a!)[] out) -> () -@Namespace("at") public static native void split_copy_symint_outf(@Const @ByRef Tensor self, @ByVal SymInt split_size, @Cast("int64_t") long dim, @ByVal TensorArrayRef out); +// aten::to_padded_tensor.out(Tensor self, float padding, SymInt[]? output_size=None, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor to_padded_tensor_symint_outf(@Const @ByRef Tensor self, double padding, @ByVal SymIntArrayRefOptional output_size, @ByRef Tensor out); -// Parsed from ATen/ops/split_with_sizes.h +// Parsed from ATen/ops/to_sparse.h // #pragma once @@ -66131,25 +51970,26 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include - - -// aten::split_with_sizes(Tensor(a -> *) self, SymInt[] split_sizes, int dim=0) -> Tensor(a)[] -@Namespace("at") public static native @Cast({"", "std::vector"}) @StdMove TensorVector split_with_sizes(@Const @ByRef Tensor self, @ByVal @Cast("c10::ArrayRef*") LongArrayRef split_sizes, @Cast("int64_t") long dim/*=0*/); -@Namespace("at") public static native @Cast({"", "std::vector"}) @StdMove TensorVector split_with_sizes(@Const @ByRef Tensor self, @ByVal @Cast("c10::ArrayRef*") LongArrayRef split_sizes); -@Namespace("at") public static native @Cast({"", "std::vector"}) @StdMove TensorVector split_with_sizes(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] split_sizes, @Cast("int64_t") long dim/*=0*/); -@Namespace("at") public static native @Cast({"", "std::vector"}) @StdMove TensorVector split_with_sizes(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... split_sizes); +// #include -// aten::split_with_sizes(Tensor(a -> *) self, SymInt[] split_sizes, int dim=0) -> Tensor(a)[] -@Namespace("at") public static native @Cast({"", "std::vector"}) @StdMove TensorVector split_with_sizes_symint(@Const @ByRef Tensor self, @ByVal SymIntRef split_sizes, @Cast("int64_t") long dim/*=0*/); -@Namespace("at") public static native @Cast({"", "std::vector"}) @StdMove TensorVector split_with_sizes_symint(@Const @ByRef Tensor self, @ByVal SymIntRef split_sizes); +// aten::to_sparse.sparse_dim_out(Tensor self, int sparse_dim, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor to_sparse_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Cast("int64_t") long sparse_dim); +// aten::to_sparse.sparse_dim_out(Tensor self, int sparse_dim, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor to_sparse_outf(@Const @ByRef Tensor self, @Cast("int64_t") long sparse_dim, @ByRef Tensor out); +// aten::to_sparse.out(Tensor self, *, Layout? layout=None, int[2]? blocksize=None, int? dense_dim=None, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor to_sparse_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal(nullValue = "c10::optional(c10::nullopt)") LayoutOptional layout, @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") LongArrayRefOptional blocksize, @ByVal(nullValue = "c10::optional(c10::nullopt)") LongOptional dense_dim); +@Namespace("at") public static native @ByRef Tensor to_sparse_out(@ByRef Tensor out, @Const @ByRef Tensor self); +@Namespace("at") public static native @ByRef Tensor to_sparse_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal(nullValue = "c10::optional(c10::nullopt)") LayoutOptional layout, @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] blocksize, @ByVal(nullValue = "c10::optional(c10::nullopt)") LongOptional dense_dim); +// aten::to_sparse.out(Tensor self, *, Layout? layout=None, int[2]? blocksize=None, int? dense_dim=None, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor to_sparse_outf(@Const @ByRef Tensor self, @ByVal LayoutOptional layout, @ByVal LongArrayRefOptional blocksize, @ByVal LongOptional dense_dim, @ByRef Tensor out); +@Namespace("at") public static native @ByRef Tensor to_sparse_outf(@Const @ByRef Tensor self, @ByVal LayoutOptional layout, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] blocksize, @ByVal LongOptional dense_dim, @ByRef Tensor out); -// Parsed from ATen/ops/split_with_sizes_copy.h +// Parsed from ATen/ops/to_sparse_bsc.h // #pragma once @@ -66170,46 +52010,22 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include - - -// aten::split_with_sizes_copy(Tensor self, SymInt[] split_sizes, int dim=0) -> Tensor[] -@Namespace("at") public static native @Cast({"", "std::vector"}) @StdMove TensorVector split_with_sizes_copy(@Const @ByRef Tensor self, @ByVal @Cast("c10::ArrayRef*") LongArrayRef split_sizes, @Cast("int64_t") long dim/*=0*/); -@Namespace("at") public static native @Cast({"", "std::vector"}) @StdMove TensorVector split_with_sizes_copy(@Const @ByRef Tensor self, @ByVal @Cast("c10::ArrayRef*") LongArrayRef split_sizes); -@Namespace("at") public static native @Cast({"", "std::vector"}) @StdMove TensorVector split_with_sizes_copy(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] split_sizes, @Cast("int64_t") long dim/*=0*/); -@Namespace("at") public static native @Cast({"", "std::vector"}) @StdMove TensorVector split_with_sizes_copy(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... split_sizes); - - -// aten::split_with_sizes_copy(Tensor self, SymInt[] split_sizes, int dim=0) -> Tensor[] -@Namespace("at") public static native @Cast({"", "std::vector"}) @StdMove TensorVector split_with_sizes_copy_symint(@Const @ByRef Tensor self, @ByVal SymIntRef split_sizes, @Cast("int64_t") long dim/*=0*/); -@Namespace("at") public static native @Cast({"", "std::vector"}) @StdMove TensorVector split_with_sizes_copy_symint(@Const @ByRef Tensor self, @ByVal SymIntRef split_sizes); - - -// aten::split_with_sizes_copy.out(Tensor self, SymInt[] split_sizes, int dim=0, *, Tensor(a!)[] out) -> () -@Namespace("at") public static native void split_with_sizes_copy_out(@ByVal TensorArrayRef out, @Const @ByRef Tensor self, @ByVal @Cast("c10::ArrayRef*") LongArrayRef split_sizes, @Cast("int64_t") long dim/*=0*/); -@Namespace("at") public static native void split_with_sizes_copy_out(@ByVal TensorArrayRef out, @Const @ByRef Tensor self, @ByVal @Cast("c10::ArrayRef*") LongArrayRef split_sizes); -@Namespace("at") public static native void split_with_sizes_copy_out(@ByVal TensorArrayRef out, @Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] split_sizes, @Cast("int64_t") long dim/*=0*/); -@Namespace("at") public static native void split_with_sizes_copy_out(@ByVal TensorArrayRef out, @Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... split_sizes); - - -// aten::split_with_sizes_copy.out(Tensor self, SymInt[] split_sizes, int dim=0, *, Tensor(a!)[] out) -> () -@Namespace("at") public static native void split_with_sizes_copy_outf(@Const @ByRef Tensor self, @ByVal @Cast("c10::ArrayRef*") LongArrayRef split_sizes, @Cast("int64_t") long dim, @ByVal TensorArrayRef out); -@Namespace("at") public static native void split_with_sizes_copy_outf(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] split_sizes, @Cast("int64_t") long dim, @ByVal TensorArrayRef out); - - -// aten::split_with_sizes_copy.out(Tensor self, SymInt[] split_sizes, int dim=0, *, Tensor(a!)[] out) -> () -@Namespace("at") public static native void split_with_sizes_copy_symint_out(@ByVal TensorArrayRef out, @Const @ByRef Tensor self, @ByVal SymIntRef split_sizes, @Cast("int64_t") long dim/*=0*/); -@Namespace("at") public static native void split_with_sizes_copy_symint_out(@ByVal TensorArrayRef out, @Const @ByRef Tensor self, @ByVal SymIntRef split_sizes); - +// #include -// aten::split_with_sizes_copy.out(Tensor self, SymInt[] split_sizes, int dim=0, *, Tensor(a!)[] out) -> () -@Namespace("at") public static native void split_with_sizes_copy_symint_outf(@Const @ByRef Tensor self, @ByVal SymIntRef split_sizes, @Cast("int64_t") long dim, @ByVal TensorArrayRef out); +// aten::to_sparse_bsc.out(Tensor self, int[2] blocksize, int? dense_dim=None, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor to_sparse_bsc_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal LongArrayRef blocksize, @ByVal(nullValue = "c10::optional(c10::nullopt)") LongOptional dense_dim); +@Namespace("at") public static native @ByRef Tensor to_sparse_bsc_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal LongArrayRef blocksize); +@Namespace("at") public static native @ByRef Tensor to_sparse_bsc_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] blocksize, @ByVal(nullValue = "c10::optional(c10::nullopt)") LongOptional dense_dim); +@Namespace("at") public static native @ByRef Tensor to_sparse_bsc_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... blocksize); +// aten::to_sparse_bsc.out(Tensor self, int[2] blocksize, int? dense_dim=None, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor to_sparse_bsc_outf(@Const @ByRef Tensor self, @ByVal LongArrayRef blocksize, @ByVal LongOptional dense_dim, @ByRef Tensor out); +@Namespace("at") public static native @ByRef Tensor to_sparse_bsc_outf(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] blocksize, @ByVal LongOptional dense_dim, @ByRef Tensor out); -// Parsed from ATen/ops/sqrt.h +// Parsed from ATen/ops/to_sparse_bsr.h // #pragma once @@ -66230,24 +52046,22 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include - - -// aten::sqrt(Tensor self) -> Tensor -@Namespace("at") public static native @ByVal Tensor sqrt(@Const @ByRef Tensor self); +// #include -// aten::sqrt_(Tensor(a!) self) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor sqrt_(@ByRef Tensor self); -// aten::sqrt.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor sqrt_out(@ByRef Tensor out, @Const @ByRef Tensor self); -// aten::sqrt.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor sqrt_outf(@Const @ByRef Tensor self, @ByRef Tensor out); +// aten::to_sparse_bsr.out(Tensor self, int[2] blocksize, int? dense_dim=None, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor to_sparse_bsr_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal LongArrayRef blocksize, @ByVal(nullValue = "c10::optional(c10::nullopt)") LongOptional dense_dim); +@Namespace("at") public static native @ByRef Tensor to_sparse_bsr_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal LongArrayRef blocksize); +@Namespace("at") public static native @ByRef Tensor to_sparse_bsr_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] blocksize, @ByVal(nullValue = "c10::optional(c10::nullopt)") LongOptional dense_dim); +@Namespace("at") public static native @ByRef Tensor to_sparse_bsr_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... blocksize); +// aten::to_sparse_bsr.out(Tensor self, int[2] blocksize, int? dense_dim=None, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor to_sparse_bsr_outf(@Const @ByRef Tensor self, @ByVal LongArrayRef blocksize, @ByVal LongOptional dense_dim, @ByRef Tensor out); +@Namespace("at") public static native @ByRef Tensor to_sparse_bsr_outf(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] blocksize, @ByVal LongOptional dense_dim, @ByRef Tensor out); -// Parsed from ATen/ops/square.h +// Parsed from ATen/ops/to_sparse_csc.h // #pragma once @@ -66268,24 +52082,19 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include - - -// aten::square(Tensor self) -> Tensor -@Namespace("at") public static native @ByVal Tensor square(@Const @ByRef Tensor self); +// #include -// aten::square_(Tensor(a!) self) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor square_(@ByRef Tensor self); -// aten::square.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor square_out(@ByRef Tensor out, @Const @ByRef Tensor self); -// aten::square.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor square_outf(@Const @ByRef Tensor self, @ByRef Tensor out); +// aten::to_sparse_csc.out(Tensor self, int? dense_dim=None, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor to_sparse_csc_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal(nullValue = "c10::optional(c10::nullopt)") LongOptional dense_dim); +@Namespace("at") public static native @ByRef Tensor to_sparse_csc_out(@ByRef Tensor out, @Const @ByRef Tensor self); +// aten::to_sparse_csc.out(Tensor self, int? dense_dim=None, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor to_sparse_csc_outf(@Const @ByRef Tensor self, @ByVal LongOptional dense_dim, @ByRef Tensor out); -// Parsed from ATen/ops/squeeze.h +// Parsed from ATen/ops/to_sparse_csr.h // #pragma once @@ -66306,26 +52115,19 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include - - -// aten::squeeze(Tensor(a) self) -> Tensor(a) -@Namespace("at") public static native @ByVal Tensor squeeze(@Const @ByRef Tensor self); - -// aten::squeeze.dim(Tensor(a) self, int dim) -> Tensor(a) -@Namespace("at") public static native @ByVal Tensor squeeze(@Const @ByRef Tensor self, @Cast("int64_t") long dim); +// #include -// aten::squeeze.dimname(Tensor(a) self, Dimname dim) -> Tensor(a) -@Namespace("at") public static native @ByVal Tensor squeeze(@Const @ByRef Tensor self, @ByVal Dimname dim); -// aten::squeeze.dims(Tensor(a) self, int[] dim) -> Tensor(a) -@Namespace("at") public static native @ByVal Tensor squeeze(@Const @ByRef Tensor self, @ByVal @Cast("c10::ArrayRef*") LongArrayRef dim); -@Namespace("at") public static native @ByVal Tensor squeeze(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... dim); +// aten::to_sparse_csr.out(Tensor self, int? dense_dim=None, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor to_sparse_csr_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal(nullValue = "c10::optional(c10::nullopt)") LongOptional dense_dim); +@Namespace("at") public static native @ByRef Tensor to_sparse_csr_out(@ByRef Tensor out, @Const @ByRef Tensor self); +// aten::to_sparse_csr.out(Tensor self, int? dense_dim=None, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor to_sparse_csr_outf(@Const @ByRef Tensor self, @ByVal LongOptional dense_dim, @ByRef Tensor out); -// Parsed from ATen/ops/squeeze_copy.h +// Parsed from ATen/ops/topk.h // #pragma once @@ -66346,40 +52148,23 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include - - -// aten::squeeze_copy(Tensor self) -> Tensor -@Namespace("at") public static native @ByVal Tensor squeeze_copy(@Const @ByRef Tensor self); - -// aten::squeeze_copy.dim(Tensor self, int dim) -> Tensor -@Namespace("at") public static native @ByVal Tensor squeeze_copy(@Const @ByRef Tensor self, @Cast("int64_t") long dim); - -// aten::squeeze_copy.dims(Tensor self, int[] dim) -> Tensor -@Namespace("at") public static native @ByVal Tensor squeeze_copy(@Const @ByRef Tensor self, @ByVal @Cast("c10::ArrayRef*") LongArrayRef dim); -@Namespace("at") public static native @ByVal Tensor squeeze_copy(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... dim); +// #include -// aten::squeeze_copy.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor squeeze_copy_out(@ByRef Tensor out, @Const @ByRef Tensor self); -// aten::squeeze_copy.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor squeeze_copy_outf(@Const @ByRef Tensor self, @ByRef Tensor out); -// aten::squeeze_copy.dim_out(Tensor self, int dim, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor squeeze_copy_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Cast("int64_t") long dim); -// aten::squeeze_copy.dim_out(Tensor self, int dim, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor squeeze_copy_outf(@Const @ByRef Tensor self, @Cast("int64_t") long dim, @ByRef Tensor out); +// aten::topk.values(Tensor self, int k, int dim=-1, bool largest=True, bool sorted=True, *, Tensor(a!) values, Tensor(b!) indices) -> (Tensor(a!) values, Tensor(b!) indices) +@Namespace("at") public static native @ByVal T_TensorTensor_T topk_out(@ByRef Tensor values, @ByRef Tensor indices, @Const @ByRef Tensor self, @Cast("int64_t") long k, @Cast("int64_t") long dim/*=-1*/, @Cast("bool") boolean largest/*=true*/, @Cast("bool") boolean sorted/*=true*/); +@Namespace("at") public static native @ByVal T_TensorTensor_T topk_out(@ByRef Tensor values, @ByRef Tensor indices, @Const @ByRef Tensor self, @Cast("int64_t") long k); +// aten::topk.values(Tensor self, int k, int dim=-1, bool largest=True, bool sorted=True, *, Tensor(a!) values, Tensor(b!) indices) -> (Tensor(a!) values, Tensor(b!) indices) +@Namespace("at") public static native @ByVal T_TensorTensor_T topk_outf(@Const @ByRef Tensor self, @Cast("int64_t") long k, @Cast("int64_t") long dim, @Cast("bool") boolean largest, @Cast("bool") boolean sorted, @ByRef Tensor values, @ByRef Tensor indices); -// aten::squeeze_copy.dims_out(Tensor self, int[] dim, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor squeeze_copy_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal @Cast("c10::ArrayRef*") LongArrayRef dim); -@Namespace("at") public static native @ByRef Tensor squeeze_copy_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... dim); -// aten::squeeze_copy.dims_out(Tensor self, int[] dim, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor squeeze_copy_outf(@Const @ByRef Tensor self, @ByVal @Cast("c10::ArrayRef*") LongArrayRef dim, @ByRef Tensor out); -@Namespace("at") public static native @ByRef Tensor squeeze_copy_outf(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dim, @ByRef Tensor out); +// aten::topk(Tensor self, int k, int dim=-1, bool largest=True, bool sorted=True) -> (Tensor values, Tensor indices) +@Namespace("at") public static native @ByVal T_TensorTensor_T topk(@Const @ByRef Tensor self, @Cast("int64_t") long k, @Cast("int64_t") long dim/*=-1*/, @Cast("bool") boolean largest/*=true*/, @Cast("bool") boolean sorted/*=true*/); +@Namespace("at") public static native @ByVal T_TensorTensor_T topk(@Const @ByRef Tensor self, @Cast("int64_t") long k); -// Parsed from ATen/ops/sspaddmm.h +// Parsed from ATen/ops/trace.h // #pragma once @@ -66400,23 +52185,21 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include +// #include -// aten::sspaddmm(Tensor self, Tensor mat1, Tensor mat2, *, Scalar beta=1, Scalar alpha=1) -> Tensor -@Namespace("at") public static native @ByVal Tensor sspaddmm(@Const @ByRef Tensor self, @Const @ByRef Tensor mat1, @Const @ByRef Tensor mat2, @Const @ByRef(nullValue = "at::Scalar(1)") Scalar beta, @Const @ByRef(nullValue = "at::Scalar(1)") Scalar alpha); -@Namespace("at") public static native @ByVal Tensor sspaddmm(@Const @ByRef Tensor self, @Const @ByRef Tensor mat1, @Const @ByRef Tensor mat2); +// aten::trace(Tensor self) -> Tensor +@Namespace("at") public static native @ByVal Tensor trace(@Const @ByRef Tensor self); -// aten::sspaddmm.out(Tensor self, Tensor mat1, Tensor mat2, *, Scalar beta=1, Scalar alpha=1, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor sspaddmm_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor mat1, @Const @ByRef Tensor mat2, @Const @ByRef(nullValue = "at::Scalar(1)") Scalar beta, @Const @ByRef(nullValue = "at::Scalar(1)") Scalar alpha); -@Namespace("at") public static native @ByRef Tensor sspaddmm_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor mat1, @Const @ByRef Tensor mat2); -// aten::sspaddmm.out(Tensor self, Tensor mat1, Tensor mat2, *, Scalar beta=1, Scalar alpha=1, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor sspaddmm_outf(@Const @ByRef Tensor self, @Const @ByRef Tensor mat1, @Const @ByRef Tensor mat2, @Const @ByRef Scalar beta, @Const @ByRef Scalar alpha, @ByRef Tensor out); +// aten::trace.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor trace_out(@ByRef Tensor out, @Const @ByRef Tensor self); +// aten::trace.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor trace_outf(@Const @ByRef Tensor self, @ByRef Tensor out); -// Parsed from ATen/ops/stack.h +// Parsed from ATen/ops/trace_backward.h // #pragma once @@ -66437,23 +52220,22 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include +// #include -// aten::stack(Tensor[] tensors, int dim=0) -> Tensor -@Namespace("at") public static native @ByVal Tensor stack(@ByVal TensorArrayRef tensors, @Cast("int64_t") long dim/*=0*/); -@Namespace("at") public static native @ByVal Tensor stack(@ByVal TensorArrayRef tensors); +// aten::trace_backward(Tensor grad, SymInt[] sizes) -> Tensor +@Namespace("at") public static native @ByVal Tensor trace_backward(@Const @ByRef Tensor grad, @ByVal LongArrayRef sizes); +@Namespace("at") public static native @ByVal Tensor trace_backward(@Const @ByRef Tensor grad, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... sizes); -// aten::stack.out(Tensor[] tensors, int dim=0, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor stack_out(@ByRef Tensor out, @ByVal TensorArrayRef tensors, @Cast("int64_t") long dim/*=0*/); -@Namespace("at") public static native @ByRef Tensor stack_out(@ByRef Tensor out, @ByVal TensorArrayRef tensors); -// aten::stack.out(Tensor[] tensors, int dim=0, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor stack_outf(@ByVal TensorArrayRef tensors, @Cast("int64_t") long dim, @ByRef Tensor out); +// aten::trace_backward(Tensor grad, SymInt[] sizes) -> Tensor +@Namespace("at") public static native @ByVal Tensor trace_backward_symint(@Const @ByRef Tensor grad, @ByVal SymIntArrayRef sizes); -// Parsed from ATen/ops/std.h + + +// Parsed from ATen/ops/transpose.h // #pragma once @@ -66474,64 +52256,19 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include - - -// aten::std(Tensor self, bool unbiased=True) -> Tensor -@Namespace("at") public static native @ByVal Tensor std(@Const @ByRef Tensor self, @Cast("bool") boolean unbiased); - -// aten::std.dim(Tensor self, int[1]? dim, bool unbiased=True, bool keepdim=False) -> Tensor -@Namespace("at") public static native @ByVal Tensor std(@Const @ByRef Tensor self, @ByVal LongArrayRefOptional dim, @Cast("bool") boolean unbiased, @Cast("bool") boolean keepdim/*=false*/); -@Namespace("at") public static native @ByVal Tensor std(@Const @ByRef Tensor self, @ByVal LongArrayRefOptional dim, @Cast("bool") boolean unbiased); -@Namespace("at") public static native @ByVal Tensor std(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dim, @Cast("bool") boolean unbiased, @Cast("bool") boolean keepdim/*=false*/); -@Namespace("at") public static native @ByVal Tensor std(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dim, @Cast("bool") boolean unbiased); - -// aten::std.correction(Tensor self, int[1]? dim=None, *, int? correction=None, bool keepdim=False) -> Tensor -@Namespace("at") public static native @ByVal Tensor std(@Const @ByRef Tensor self, @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") LongArrayRefOptional dim, @ByVal(nullValue = "c10::optional(c10::nullopt)") LongOptional correction, @Cast("bool") boolean keepdim/*=false*/); -@Namespace("at") public static native @ByVal Tensor std(@Const @ByRef Tensor self); -@Namespace("at") public static native @ByVal Tensor std(@Const @ByRef Tensor self, @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dim, @ByVal(nullValue = "c10::optional(c10::nullopt)") LongOptional correction, @Cast("bool") boolean keepdim/*=false*/); - -// aten::std.out(Tensor self, int[1]? dim, bool unbiased=True, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor std_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal LongArrayRefOptional dim, @Cast("bool") boolean unbiased, @Cast("bool") boolean keepdim/*=false*/); -@Namespace("at") public static native @ByRef Tensor std_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal LongArrayRefOptional dim, @Cast("bool") boolean unbiased); -@Namespace("at") public static native @ByRef Tensor std_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dim, @Cast("bool") boolean unbiased, @Cast("bool") boolean keepdim/*=false*/); -@Namespace("at") public static native @ByRef Tensor std_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dim, @Cast("bool") boolean unbiased); -// aten::std.out(Tensor self, int[1]? dim, bool unbiased=True, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor std_outf(@Const @ByRef Tensor self, @ByVal LongArrayRefOptional dim, @Cast("bool") boolean unbiased, @Cast("bool") boolean keepdim, @ByRef Tensor out); -@Namespace("at") public static native @ByRef Tensor std_outf(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dim, @Cast("bool") boolean unbiased, @Cast("bool") boolean keepdim, @ByRef Tensor out); - -// aten::std.correction_out(Tensor self, int[1]? dim=None, *, int? correction=None, bool keepdim=False, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor std_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") LongArrayRefOptional dim, @ByVal(nullValue = "c10::optional(c10::nullopt)") LongOptional correction, @Cast("bool") boolean keepdim/*=false*/); -@Namespace("at") public static native @ByRef Tensor std_out(@ByRef Tensor out, @Const @ByRef Tensor self); -@Namespace("at") public static native @ByRef Tensor std_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dim, @ByVal(nullValue = "c10::optional(c10::nullopt)") LongOptional correction, @Cast("bool") boolean keepdim/*=false*/); -// aten::std.correction_out(Tensor self, int[1]? dim=None, *, int? correction=None, bool keepdim=False, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor std_outf(@Const @ByRef Tensor self, @ByVal LongArrayRefOptional dim, @ByVal LongOptional correction, @Cast("bool") boolean keepdim, @ByRef Tensor out); -@Namespace("at") public static native @ByRef Tensor std_outf(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dim, @ByVal LongOptional correction, @Cast("bool") boolean keepdim, @ByRef Tensor out); - -// aten::std.names_dim(Tensor self, Dimname[1] dim, bool unbiased=True, bool keepdim=False) -> Tensor -@Namespace("at") public static native @ByVal Tensor std(@Const @ByRef Tensor self, @ByVal DimnameArrayRef dim, @Cast("bool") boolean unbiased, @Cast("bool") boolean keepdim/*=false*/); -@Namespace("at") public static native @ByVal Tensor std(@Const @ByRef Tensor self, @ByVal DimnameArrayRef dim, @Cast("bool") boolean unbiased); +// #include -// aten::std.names_out(Tensor self, Dimname[1] dim, bool unbiased=True, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor std_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal DimnameArrayRef dim, @Cast("bool") boolean unbiased, @Cast("bool") boolean keepdim/*=false*/); -@Namespace("at") public static native @ByRef Tensor std_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal DimnameArrayRef dim, @Cast("bool") boolean unbiased); -// aten::std.names_out(Tensor self, Dimname[1] dim, bool unbiased=True, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor std_outf(@Const @ByRef Tensor self, @ByVal DimnameArrayRef dim, @Cast("bool") boolean unbiased, @Cast("bool") boolean keepdim, @ByRef Tensor out); -// aten::std.correction_names(Tensor self, Dimname[1] dim, *, int? correction=None, bool keepdim=False) -> Tensor -@Namespace("at") public static native @ByVal Tensor std(@Const @ByRef Tensor self, @ByVal DimnameArrayRef dim, @ByVal(nullValue = "c10::optional(c10::nullopt)") LongOptional correction, @Cast("bool") boolean keepdim/*=false*/); -@Namespace("at") public static native @ByVal Tensor std(@Const @ByRef Tensor self, @ByVal DimnameArrayRef dim); +// aten::transpose.int(Tensor(a) self, int dim0, int dim1) -> Tensor(a) +@Namespace("at") public static native @ByVal Tensor transpose(@Const @ByRef Tensor self, @Cast("int64_t") long dim0, @Cast("int64_t") long dim1); -// aten::std.correction_names_out(Tensor self, Dimname[1] dim, *, int? correction=None, bool keepdim=False, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor std_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal DimnameArrayRef dim, @ByVal(nullValue = "c10::optional(c10::nullopt)") LongOptional correction, @Cast("bool") boolean keepdim/*=false*/); -@Namespace("at") public static native @ByRef Tensor std_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal DimnameArrayRef dim); -// aten::std.correction_names_out(Tensor self, Dimname[1] dim, *, int? correction=None, bool keepdim=False, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor std_outf(@Const @ByRef Tensor self, @ByVal DimnameArrayRef dim, @ByVal LongOptional correction, @Cast("bool") boolean keepdim, @ByRef Tensor out); +// aten::transpose.Dimname(Tensor(a) self, Dimname dim0, Dimname dim1) -> Tensor(a) +@Namespace("at") public static native @ByVal Tensor transpose(@Const @ByRef Tensor self, @ByVal Dimname dim0, @ByVal Dimname dim1); -// Parsed from ATen/ops/std_mean.h +// Parsed from ATen/ops/transpose_copy.h // #pragma once @@ -66552,43 +52289,21 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include - - -// aten::std_mean(Tensor self, bool unbiased=True) -> (Tensor, Tensor) -@Namespace("at") public static native @ByVal TensorTensorTuple std_mean(@Const @ByRef Tensor self, @Cast("bool") boolean unbiased); - -// aten::std_mean.dim(Tensor self, int[1]? dim, bool unbiased=True, bool keepdim=False) -> (Tensor, Tensor) -@Namespace("at") public static native @ByVal TensorTensorTuple std_mean(@Const @ByRef Tensor self, @ByVal LongArrayRefOptional dim, @Cast("bool") boolean unbiased, @Cast("bool") boolean keepdim/*=false*/); -@Namespace("at") public static native @ByVal TensorTensorTuple std_mean(@Const @ByRef Tensor self, @ByVal LongArrayRefOptional dim, @Cast("bool") boolean unbiased); -@Namespace("at") public static native @ByVal TensorTensorTuple std_mean(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dim, @Cast("bool") boolean unbiased, @Cast("bool") boolean keepdim/*=false*/); -@Namespace("at") public static native @ByVal TensorTensorTuple std_mean(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dim, @Cast("bool") boolean unbiased); - -// aten::std_mean.correction(Tensor self, int[1]? dim=None, *, int? correction=None, bool keepdim=False) -> (Tensor, Tensor) -@Namespace("at") public static native @ByVal TensorTensorTuple std_mean(@Const @ByRef Tensor self, @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") LongArrayRefOptional dim, @ByVal(nullValue = "c10::optional(c10::nullopt)") LongOptional correction, @Cast("bool") boolean keepdim/*=false*/); -@Namespace("at") public static native @ByVal TensorTensorTuple std_mean(@Const @ByRef Tensor self); -@Namespace("at") public static native @ByVal TensorTensorTuple std_mean(@Const @ByRef Tensor self, @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dim, @ByVal(nullValue = "c10::optional(c10::nullopt)") LongOptional correction, @Cast("bool") boolean keepdim/*=false*/); +// #include -// aten::std_mean.names_dim(Tensor self, Dimname[1] dim, bool unbiased=True, bool keepdim=False) -> (Tensor, Tensor) -@Namespace("at") public static native @ByVal TensorTensorTuple std_mean(@Const @ByRef Tensor self, @ByVal DimnameArrayRef dim, @Cast("bool") boolean unbiased, @Cast("bool") boolean keepdim/*=false*/); -@Namespace("at") public static native @ByVal TensorTensorTuple std_mean(@Const @ByRef Tensor self, @ByVal DimnameArrayRef dim, @Cast("bool") boolean unbiased); -// aten::std_mean.correction_names(Tensor self, Dimname[1] dim, *, int? correction=None, bool keepdim=False) -> (Tensor, Tensor) -@Namespace("at") public static native @ByVal TensorTensorTuple std_mean(@Const @ByRef Tensor self, @ByVal DimnameArrayRef dim, @ByVal(nullValue = "c10::optional(c10::nullopt)") LongOptional correction, @Cast("bool") boolean keepdim/*=false*/); -@Namespace("at") public static native @ByVal TensorTensorTuple std_mean(@Const @ByRef Tensor self, @ByVal DimnameArrayRef dim); +// aten::transpose_copy.int(Tensor self, int dim0, int dim1) -> Tensor +@Namespace("at") public static native @ByVal Tensor transpose_copy(@Const @ByRef Tensor self, @Cast("int64_t") long dim0, @Cast("int64_t") long dim1); -// aten::std_mean.correction_out(Tensor self, int[1]? dim=None, *, int? correction=None, bool keepdim=False, Tensor(a!) out0, Tensor(b!) out1) -> (Tensor(a!), Tensor(b!)) -@Namespace("at") public static native @ByVal @Cast("std::tuple*") PointerPointer std_mean_out(@ByRef Tensor out0, @ByRef Tensor out1, @Const @ByRef Tensor self, @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") LongArrayRefOptional dim, @ByVal(nullValue = "c10::optional(c10::nullopt)") LongOptional correction, @Cast("bool") boolean keepdim/*=false*/); -@Namespace("at") public static native @ByVal @Cast("std::tuple*") PointerPointer std_mean_out(@ByRef Tensor out0, @ByRef Tensor out1, @Const @ByRef Tensor self); -@Namespace("at") public static native @ByVal @Cast("std::tuple*") PointerPointer std_mean_out(@ByRef Tensor out0, @ByRef Tensor out1, @Const @ByRef Tensor self, @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dim, @ByVal(nullValue = "c10::optional(c10::nullopt)") LongOptional correction, @Cast("bool") boolean keepdim/*=false*/); -// aten::std_mean.correction_out(Tensor self, int[1]? dim=None, *, int? correction=None, bool keepdim=False, Tensor(a!) out0, Tensor(b!) out1) -> (Tensor(a!), Tensor(b!)) -@Namespace("at") public static native @ByVal @Cast("std::tuple*") PointerPointer std_mean_outf(@Const @ByRef Tensor self, @ByVal LongArrayRefOptional dim, @ByVal LongOptional correction, @Cast("bool") boolean keepdim, @ByRef Tensor out0, @ByRef Tensor out1); -@Namespace("at") public static native @ByVal @Cast("std::tuple*") PointerPointer std_mean_outf(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dim, @ByVal LongOptional correction, @Cast("bool") boolean keepdim, @ByRef Tensor out0, @ByRef Tensor out1); +// aten::transpose_copy.int_out(Tensor self, int dim0, int dim1, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor transpose_copy_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Cast("int64_t") long dim0, @Cast("int64_t") long dim1); +// aten::transpose_copy.int_out(Tensor self, int dim0, int dim1, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor transpose_copy_outf(@Const @ByRef Tensor self, @Cast("int64_t") long dim0, @Cast("int64_t") long dim1, @ByRef Tensor out); -// Parsed from ATen/ops/stft.h +// Parsed from ATen/ops/trapezoid.h // #pragma once @@ -66609,19 +52324,21 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include +// #include -// aten::stft(Tensor self, int n_fft, int? hop_length=None, int? win_length=None, Tensor? window=None, bool normalized=False, bool? onesided=None, bool? return_complex=None) -> Tensor -@Namespace("at") public static native @ByVal Tensor stft(@Const @ByRef Tensor self, @Cast("int64_t") long n_fft, @ByVal LongOptional hop_length, @ByVal LongOptional win_length, @Const @ByRef TensorOptional window, @Cast("bool") boolean normalized, @ByVal(nullValue = "c10::optional(c10::nullopt)") BoolOptional onesided, @ByVal(nullValue = "c10::optional(c10::nullopt)") BoolOptional return_complex); +// aten::trapezoid.x(Tensor y, Tensor x, *, int dim=-1) -> Tensor +@Namespace("at") public static native @ByVal Tensor trapezoid(@Const @ByRef Tensor y, @Const @ByRef Tensor x, @Cast("int64_t") long dim/*=-1*/); +@Namespace("at") public static native @ByVal Tensor trapezoid(@Const @ByRef Tensor y, @Const @ByRef Tensor x); -// aten::stft.center(Tensor self, int n_fft, int? hop_length=None, int? win_length=None, Tensor? window=None, bool center=True, str pad_mode="reflect", bool normalized=False, bool? onesided=None, bool? return_complex=None) -> Tensor -@Namespace("at") public static native @ByVal Tensor stft(@Const @ByRef Tensor self, @Cast("int64_t") long n_fft, @ByVal(nullValue = "c10::optional(c10::nullopt)") LongOptional hop_length, @ByVal(nullValue = "c10::optional(c10::nullopt)") LongOptional win_length, @Const @ByRef(nullValue = "c10::optional{}") TensorOptional window, @Cast("bool") boolean center/*=true*/, @ByVal(nullValue = "c10::string_view(\"reflect\")") @Cast("c10::string_view*") Pointer pad_mode, @Cast("bool") boolean normalized/*=false*/, @ByVal(nullValue = "c10::optional(c10::nullopt)") BoolOptional onesided, @ByVal(nullValue = "c10::optional(c10::nullopt)") BoolOptional return_complex); +// aten::trapezoid.dx(Tensor y, *, Scalar dx=1, int dim=-1) -> Tensor +@Namespace("at") public static native @ByVal Tensor trapezoid(@Const @ByRef Tensor y, @Const @ByRef(nullValue = "at::Scalar(1)") Scalar dx, @Cast("int64_t") long dim/*=-1*/); +@Namespace("at") public static native @ByVal Tensor trapezoid(@Const @ByRef Tensor y); -// Parsed from ATen/ops/stride.h +// Parsed from ATen/ops/trapz.h // #pragma once @@ -66642,19 +52359,21 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include +// #include -// aten::stride.int(Tensor self, int dim) -> int -@Namespace("at") public static native @Cast("int64_t") long __dispatch_stride(@Const @ByRef Tensor self, @Cast("int64_t") long dim); +// aten::trapz.x(Tensor y, Tensor x, *, int dim=-1) -> Tensor +@Namespace("at") public static native @ByVal Tensor trapz(@Const @ByRef Tensor y, @Const @ByRef Tensor x, @Cast("int64_t") long dim/*=-1*/); +@Namespace("at") public static native @ByVal Tensor trapz(@Const @ByRef Tensor y, @Const @ByRef Tensor x); -// aten::stride.Dimname(Tensor self, Dimname dim) -> int -@Namespace("at") public static native @Cast("int64_t") long stride(@Const @ByRef Tensor self, @ByVal Dimname dim); +// aten::trapz.dx(Tensor y, *, float dx=1, int dim=-1) -> Tensor +@Namespace("at") public static native @ByVal Tensor trapz(@Const @ByRef Tensor y, double dx/*=1*/, @Cast("int64_t") long dim/*=-1*/); +@Namespace("at") public static native @ByVal Tensor trapz(@Const @ByRef Tensor y); -// Parsed from ATen/ops/sub.h +// Parsed from ATen/ops/triangular_solve.h // #pragma once @@ -66675,33 +52394,23 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include - - -// aten::sub.out(Tensor self, Tensor other, *, Scalar alpha=1, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor sub_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor other, @Const @ByRef(nullValue = "at::Scalar(1)") Scalar alpha); -@Namespace("at") public static native @ByRef Tensor sub_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor other); -// aten::sub.out(Tensor self, Tensor other, *, Scalar alpha=1, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor sub_outf(@Const @ByRef Tensor self, @Const @ByRef Tensor other, @Const @ByRef Scalar alpha, @ByRef Tensor out); +// #include -// aten::sub.Tensor(Tensor self, Tensor other, *, Scalar alpha=1) -> Tensor -@Namespace("at") public static native @ByVal Tensor sub(@Const @ByRef Tensor self, @Const @ByRef Tensor other, @Const @ByRef(nullValue = "at::Scalar(1)") Scalar alpha); -@Namespace("at") public static native @ByVal Tensor sub(@Const @ByRef Tensor self, @Const @ByRef Tensor other); -// aten::sub.Scalar(Tensor self, Scalar other, Scalar alpha=1) -> Tensor -@Namespace("at") public static native @ByVal Tensor sub(@Const @ByRef Tensor self, @Const @ByRef Scalar other, @Const @ByRef(nullValue = "at::Scalar(1)") Scalar alpha); -@Namespace("at") public static native @ByVal Tensor sub(@Const @ByRef Tensor self, @Const @ByRef Scalar other); +// aten::triangular_solve.X(Tensor self, Tensor A, bool upper=True, bool transpose=False, bool unitriangular=False, *, Tensor(a!) X, Tensor(b!) M) -> (Tensor(a!) solution, Tensor(b!) cloned_coefficient) +@Namespace("at") public static native @ByVal T_TensorTensor_T triangular_solve_out(@ByRef Tensor X, @ByRef Tensor M, @Const @ByRef Tensor self, @Const @ByRef Tensor A, @Cast("bool") boolean upper/*=true*/, @Cast("bool") boolean transpose/*=false*/, @Cast("bool") boolean unitriangular/*=false*/); +@Namespace("at") public static native @ByVal T_TensorTensor_T triangular_solve_out(@ByRef Tensor X, @ByRef Tensor M, @Const @ByRef Tensor self, @Const @ByRef Tensor A); +// aten::triangular_solve.X(Tensor self, Tensor A, bool upper=True, bool transpose=False, bool unitriangular=False, *, Tensor(a!) X, Tensor(b!) M) -> (Tensor(a!) solution, Tensor(b!) cloned_coefficient) +@Namespace("at") public static native @ByVal T_TensorTensor_T triangular_solve_outf(@Const @ByRef Tensor self, @Const @ByRef Tensor A, @Cast("bool") boolean upper, @Cast("bool") boolean transpose, @Cast("bool") boolean unitriangular, @ByRef Tensor X, @ByRef Tensor M); -// aten::sub.Scalar_out(Tensor self, Scalar other, Scalar alpha=1, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor sub_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Scalar other, @Const @ByRef(nullValue = "at::Scalar(1)") Scalar alpha); -@Namespace("at") public static native @ByRef Tensor sub_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Scalar other); -// aten::sub.Scalar_out(Tensor self, Scalar other, Scalar alpha=1, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor sub_outf(@Const @ByRef Tensor self, @Const @ByRef Scalar other, @Const @ByRef Scalar alpha, @ByRef Tensor out); +// aten::triangular_solve(Tensor self, Tensor A, bool upper=True, bool transpose=False, bool unitriangular=False) -> (Tensor solution, Tensor cloned_coefficient) +@Namespace("at") public static native @ByVal T_TensorTensor_T triangular_solve(@Const @ByRef Tensor self, @Const @ByRef Tensor A, @Cast("bool") boolean upper/*=true*/, @Cast("bool") boolean transpose/*=false*/, @Cast("bool") boolean unitriangular/*=false*/); +@Namespace("at") public static native @ByVal T_TensorTensor_T triangular_solve(@Const @ByRef Tensor self, @Const @ByRef Tensor A); -// Parsed from ATen/ops/subtract.h +// Parsed from ATen/ops/tril.h // #pragma once @@ -66722,25 +52431,23 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include - +// #include -// aten::subtract.out(Tensor self, Tensor other, *, Scalar alpha=1, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor subtract_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor other, @Const @ByRef(nullValue = "at::Scalar(1)") Scalar alpha); -@Namespace("at") public static native @ByRef Tensor subtract_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor other); -// aten::subtract.out(Tensor self, Tensor other, *, Scalar alpha=1, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor subtract_outf(@Const @ByRef Tensor self, @Const @ByRef Tensor other, @Const @ByRef Scalar alpha, @ByRef Tensor out); -// aten::subtract.Tensor(Tensor self, Tensor other, *, Scalar alpha=1) -> Tensor -@Namespace("at") public static native @ByVal Tensor subtract(@Const @ByRef Tensor self, @Const @ByRef Tensor other, @Const @ByRef(nullValue = "at::Scalar(1)") Scalar alpha); +// aten::tril.out(Tensor self, int diagonal=0, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor tril_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Cast("int64_t") long diagonal/*=0*/); +@Namespace("at") public static native @ByRef Tensor tril_out(@ByRef Tensor out, @Const @ByRef Tensor self); +// aten::tril.out(Tensor self, int diagonal=0, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor tril_outf(@Const @ByRef Tensor self, @Cast("int64_t") long diagonal, @ByRef Tensor out); -// aten::subtract.Scalar(Tensor self, Scalar other, Scalar alpha=1) -> Tensor -@Namespace("at") public static native @ByVal Tensor subtract(@Const @ByRef Tensor self, @Const @ByRef Scalar other, @Const @ByRef(nullValue = "at::Scalar(1)") Scalar alpha); +// aten::tril(Tensor self, int diagonal=0) -> Tensor +@Namespace("at") public static native @ByVal Tensor tril(@Const @ByRef Tensor self, @Cast("int64_t") long diagonal/*=0*/); +@Namespace("at") public static native @ByVal Tensor tril(@Const @ByRef Tensor self); -// Parsed from ATen/ops/sum.h +// Parsed from ATen/ops/tril_indices.h // #pragma once @@ -66761,48 +52468,25 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include - - -// aten::sum(Tensor self, *, ScalarType? dtype=None) -> Tensor -@Namespace("at") public static native @ByVal Tensor sum(@Const @ByRef Tensor self, @ByVal(nullValue = "c10::optional(c10::nullopt)") ScalarTypeOptional dtype); -@Namespace("at") public static native @ByVal Tensor sum(@Const @ByRef Tensor self); - -// aten::sum.dim_IntList(Tensor self, int[1]? dim, bool keepdim=False, *, ScalarType? dtype=None) -> Tensor -@Namespace("at") public static native @ByVal Tensor sum(@Const @ByRef Tensor self, @ByVal LongArrayRefOptional dim, @Cast("bool") boolean keepdim/*=false*/, @ByVal(nullValue = "c10::optional(c10::nullopt)") ScalarTypeOptional dtype); -@Namespace("at") public static native @ByVal Tensor sum(@Const @ByRef Tensor self, @ByVal LongArrayRefOptional dim); -@Namespace("at") public static native @ByVal Tensor sum(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dim, @Cast("bool") boolean keepdim/*=false*/, @ByVal(nullValue = "c10::optional(c10::nullopt)") ScalarTypeOptional dtype); -@Namespace("at") public static native @ByVal Tensor sum(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... dim); - -// aten::sum.dim_DimnameList(Tensor self, Dimname[1] dim, bool keepdim=False, *, ScalarType? dtype=None) -> Tensor -@Namespace("at") public static native @ByVal Tensor sum(@Const @ByRef Tensor self, @ByVal DimnameArrayRef dim, @Cast("bool") boolean keepdim/*=false*/, @ByVal(nullValue = "c10::optional(c10::nullopt)") ScalarTypeOptional dtype); -@Namespace("at") public static native @ByVal Tensor sum(@Const @ByRef Tensor self, @ByVal DimnameArrayRef dim); +// #include -// aten::sum.IntList_out(Tensor self, int[1]? dim, bool keepdim=False, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor sum_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal LongArrayRefOptional dim, @Cast("bool") boolean keepdim/*=false*/, @ByVal(nullValue = "c10::optional(c10::nullopt)") ScalarTypeOptional dtype); -@Namespace("at") public static native @ByRef Tensor sum_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal LongArrayRefOptional dim); -@Namespace("at") public static native @ByRef Tensor sum_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dim, @Cast("bool") boolean keepdim/*=false*/, @ByVal(nullValue = "c10::optional(c10::nullopt)") ScalarTypeOptional dtype); -@Namespace("at") public static native @ByRef Tensor sum_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... dim); -// aten::sum.IntList_out(Tensor self, int[1]? dim, bool keepdim=False, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor sum_outf(@Const @ByRef Tensor self, @ByVal LongArrayRefOptional dim, @Cast("bool") boolean keepdim, @ByVal ScalarTypeOptional dtype, @ByRef Tensor out); -@Namespace("at") public static native @ByRef Tensor sum_outf(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dim, @Cast("bool") boolean keepdim, @ByVal ScalarTypeOptional dtype, @ByRef Tensor out); -// aten::sum.DimnameList_out(Tensor self, Dimname[1] dim, bool keepdim=False, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor sum_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal DimnameArrayRef dim, @Cast("bool") boolean keepdim/*=false*/, @ByVal(nullValue = "c10::optional(c10::nullopt)") ScalarTypeOptional dtype); -@Namespace("at") public static native @ByRef Tensor sum_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal DimnameArrayRef dim); -// aten::sum.DimnameList_out(Tensor self, Dimname[1] dim, bool keepdim=False, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor sum_outf(@Const @ByRef Tensor self, @ByVal DimnameArrayRef dim, @Cast("bool") boolean keepdim, @ByVal ScalarTypeOptional dtype, @ByRef Tensor out); +// aten::tril_indices(int row, int col, int offset=0, *, ScalarType? dtype=long, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor +@Namespace("at") public static native @ByVal Tensor tril_indices(@Cast("int64_t") long row, @Cast("int64_t") long col, @Cast("int64_t") long offset/*=0*/, @ByVal(nullValue = "at::TensorOptions(at::kLong)") TensorOptions options); +@Namespace("at") public static native @ByVal Tensor tril_indices(@Cast("int64_t") long row, @Cast("int64_t") long col); +// aten::tril_indices(int row, int col, int offset=0, *, ScalarType? dtype=long, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor +@Namespace("at") public static native @ByVal Tensor tril_indices(@Cast("int64_t") long row, @Cast("int64_t") long col, @Cast("int64_t") long offset, @ByVal ScalarTypeOptional dtype, @ByVal LayoutOptional layout, @ByVal DeviceOptional device, @ByVal BoolOptional pin_memory); -// aten::sum.out(Tensor self, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor sum_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal(nullValue = "c10::optional(c10::nullopt)") ScalarTypeOptional dtype); -@Namespace("at") public static native @ByRef Tensor sum_out(@ByRef Tensor out, @Const @ByRef Tensor self); -// aten::sum.out(Tensor self, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor sum_outf(@Const @ByRef Tensor self, @ByVal ScalarTypeOptional dtype, @ByRef Tensor out); +// aten::tril_indices.out(int row, int col, int offset=0, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor tril_indices_out(@ByRef Tensor out, @Cast("int64_t") long row, @Cast("int64_t") long col, @Cast("int64_t") long offset/*=0*/); +@Namespace("at") public static native @ByRef Tensor tril_indices_out(@ByRef Tensor out, @Cast("int64_t") long row, @Cast("int64_t") long col); +// aten::tril_indices.out(int row, int col, int offset=0, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor tril_indices_outf(@Cast("int64_t") long row, @Cast("int64_t") long col, @Cast("int64_t") long offset, @ByRef Tensor out); -// Parsed from ATen/ops/sum_to_size.h +// Parsed from ATen/ops/triplet_margin_loss.h // #pragma once @@ -66823,14 +52507,17 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include +// #include +// aten::triplet_margin_loss(Tensor anchor, Tensor positive, Tensor negative, float margin=1.0, float p=2, float eps=1e-06, bool swap=False, int reduction=Mean) -> Tensor +@Namespace("at") public static native @ByVal Tensor triplet_margin_loss(@Const @ByRef Tensor anchor, @Const @ByRef Tensor positive, @Const @ByRef Tensor negative, double margin/*=1.0*/, double p/*=2*/, double eps/*=1e-06*/, @Cast("bool") boolean swap/*=false*/, @Cast("int64_t") long reduction/*=at::Reduction::Mean*/); +@Namespace("at") public static native @ByVal Tensor triplet_margin_loss(@Const @ByRef Tensor anchor, @Const @ByRef Tensor positive, @Const @ByRef Tensor negative); -// Parsed from ATen/ops/svd.h +// Parsed from ATen/ops/triu.h // #pragma once @@ -66851,23 +52538,23 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include +// #include -// aten::svd.U(Tensor self, bool some=True, bool compute_uv=True, *, Tensor(a!) U, Tensor(b!) S, Tensor(c!) V) -> (Tensor(a!) U, Tensor(b!) S, Tensor(c!) V) -@Namespace("at") public static native @ByVal @Cast("std::tuple*") PointerPointer svd_out(@ByRef Tensor U, @ByRef Tensor S, @ByRef Tensor V, @Const @ByRef Tensor self, @Cast("bool") boolean some/*=true*/, @Cast("bool") boolean compute_uv/*=true*/); -@Namespace("at") public static native @ByVal @Cast("std::tuple*") PointerPointer svd_out(@ByRef Tensor U, @ByRef Tensor S, @ByRef Tensor V, @Const @ByRef Tensor self); -// aten::svd.U(Tensor self, bool some=True, bool compute_uv=True, *, Tensor(a!) U, Tensor(b!) S, Tensor(c!) V) -> (Tensor(a!) U, Tensor(b!) S, Tensor(c!) V) -@Namespace("at") public static native @ByVal @Cast("std::tuple*") PointerPointer svd_outf(@Const @ByRef Tensor self, @Cast("bool") boolean some, @Cast("bool") boolean compute_uv, @ByRef Tensor U, @ByRef Tensor S, @ByRef Tensor V); +// aten::triu.out(Tensor self, int diagonal=0, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor triu_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Cast("int64_t") long diagonal/*=0*/); +@Namespace("at") public static native @ByRef Tensor triu_out(@ByRef Tensor out, @Const @ByRef Tensor self); +// aten::triu.out(Tensor self, int diagonal=0, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor triu_outf(@Const @ByRef Tensor self, @Cast("int64_t") long diagonal, @ByRef Tensor out); -// aten::svd(Tensor self, bool some=True, bool compute_uv=True) -> (Tensor U, Tensor S, Tensor V) -@Namespace("at") public static native @ByVal TensorTensorTensorTuple svd(@Const @ByRef Tensor self, @Cast("bool") boolean some/*=true*/, @Cast("bool") boolean compute_uv/*=true*/); -@Namespace("at") public static native @ByVal TensorTensorTensorTuple svd(@Const @ByRef Tensor self); +// aten::triu(Tensor self, int diagonal=0) -> Tensor +@Namespace("at") public static native @ByVal Tensor triu(@Const @ByRef Tensor self, @Cast("int64_t") long diagonal/*=0*/); +@Namespace("at") public static native @ByVal Tensor triu(@Const @ByRef Tensor self); -// Parsed from ATen/ops/swapaxes.h +// Parsed from ATen/ops/triu_indices.h // #pragma once @@ -66888,16 +52575,25 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include +// #include -// aten::swapaxes(Tensor(a) self, int axis0, int axis1) -> Tensor(a) -@Namespace("at") public static native @ByVal Tensor swapaxes(@Const @ByRef Tensor self, @Cast("int64_t") long axis0, @Cast("int64_t") long axis1); +// aten::triu_indices(int row, int col, int offset=0, *, ScalarType? dtype=long, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor +@Namespace("at") public static native @ByVal Tensor triu_indices(@Cast("int64_t") long row, @Cast("int64_t") long col, @Cast("int64_t") long offset/*=0*/, @ByVal(nullValue = "at::TensorOptions(at::kLong)") TensorOptions options); +@Namespace("at") public static native @ByVal Tensor triu_indices(@Cast("int64_t") long row, @Cast("int64_t") long col); +// aten::triu_indices(int row, int col, int offset=0, *, ScalarType? dtype=long, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor +@Namespace("at") public static native @ByVal Tensor triu_indices(@Cast("int64_t") long row, @Cast("int64_t") long col, @Cast("int64_t") long offset, @ByVal ScalarTypeOptional dtype, @ByVal LayoutOptional layout, @ByVal DeviceOptional device, @ByVal BoolOptional pin_memory); + +// aten::triu_indices.out(int row, int col, int offset=0, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor triu_indices_out(@ByRef Tensor out, @Cast("int64_t") long row, @Cast("int64_t") long col, @Cast("int64_t") long offset/*=0*/); +@Namespace("at") public static native @ByRef Tensor triu_indices_out(@ByRef Tensor out, @Cast("int64_t") long row, @Cast("int64_t") long col); +// aten::triu_indices.out(int row, int col, int offset=0, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor triu_indices_outf(@Cast("int64_t") long row, @Cast("int64_t") long col, @Cast("int64_t") long offset, @ByRef Tensor out); -// Parsed from ATen/ops/swapdims.h +// Parsed from ATen/ops/true_divide.h // #pragma once @@ -66918,16 +52614,24 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include +// #include -// aten::swapdims(Tensor(a) self, int dim0, int dim1) -> Tensor(a) -@Namespace("at") public static native @ByVal Tensor swapdims(@Const @ByRef Tensor self, @Cast("int64_t") long dim0, @Cast("int64_t") long dim1); +// aten::true_divide.Tensor(Tensor self, Tensor other) -> Tensor +@Namespace("at") public static native @ByVal Tensor true_divide(@Const @ByRef Tensor self, @Const @ByRef Tensor other); + +// aten::true_divide.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor true_divide_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor other); +// aten::true_divide.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor true_divide_outf(@Const @ByRef Tensor self, @Const @ByRef Tensor other, @ByRef Tensor out); +// aten::true_divide.Scalar(Tensor self, Scalar other) -> Tensor +@Namespace("at") public static native @ByVal Tensor true_divide(@Const @ByRef Tensor self, @Const @ByRef Scalar other); -// Parsed from ATen/ops/t.h + +// Parsed from ATen/ops/trunc.h // #pragma once @@ -66948,16 +52652,24 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include +// #include -// aten::t(Tensor(a) self) -> Tensor(a) -@Namespace("at") public static native @ByVal Tensor t(@Const @ByRef Tensor self); +// aten::trunc(Tensor self) -> Tensor +@Namespace("at") public static native @ByVal Tensor trunc(@Const @ByRef Tensor self); + +// aten::trunc_(Tensor(a!) self) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor trunc_(@ByRef Tensor self); + +// aten::trunc.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor trunc_out(@ByRef Tensor out, @Const @ByRef Tensor self); +// aten::trunc.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor trunc_outf(@Const @ByRef Tensor self, @ByRef Tensor out); -// Parsed from ATen/ops/t_copy.h +// Parsed from ATen/ops/type_as.h // #pragma once @@ -66978,21 +52690,14 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include - +// #include -// aten::t_copy(Tensor self) -> Tensor -@Namespace("at") public static native @ByVal Tensor t_copy(@Const @ByRef Tensor self); -// aten::t_copy.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor t_copy_out(@ByRef Tensor out, @Const @ByRef Tensor self); -// aten::t_copy.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor t_copy_outf(@Const @ByRef Tensor self, @ByRef Tensor out); -// Parsed from ATen/ops/take.h +// Parsed from ATen/ops/unbind.h // #pragma once @@ -67013,21 +52718,20 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include +// #include -// aten::take.out(Tensor self, Tensor index, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor take_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor index); -// aten::take.out(Tensor self, Tensor index, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor take_outf(@Const @ByRef Tensor self, @Const @ByRef Tensor index, @ByRef Tensor out); +// aten::unbind.int(Tensor(a -> *) self, int dim=0) -> Tensor(a)[] +@Namespace("at") public static native @Cast({"", "std::vector"}) @StdMove TensorVector unbind(@Const @ByRef Tensor self, @Cast("int64_t") long dim/*=0*/); +@Namespace("at") public static native @Cast({"", "std::vector"}) @StdMove TensorVector unbind(@Const @ByRef Tensor self); -// aten::take(Tensor self, Tensor index) -> Tensor -@Namespace("at") public static native @ByVal Tensor take(@Const @ByRef Tensor self, @Const @ByRef Tensor index); +// aten::unbind.Dimname(Tensor(a -> *) self, Dimname dim) -> Tensor(a)[] +@Namespace("at") public static native @Cast({"", "std::vector"}) @StdMove TensorVector unbind(@Const @ByRef Tensor self, @ByVal Dimname dim); -// Parsed from ATen/ops/take_along_dim.h +// Parsed from ATen/ops/unbind_copy.h // #pragma once @@ -67048,23 +52752,23 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include +// #include -// aten::take_along_dim.out(Tensor self, Tensor indices, int? dim=None, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor take_along_dim_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor indices, @ByVal(nullValue = "c10::optional(c10::nullopt)") LongOptional dim); -@Namespace("at") public static native @ByRef Tensor take_along_dim_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor indices); -// aten::take_along_dim.out(Tensor self, Tensor indices, int? dim=None, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor take_along_dim_outf(@Const @ByRef Tensor self, @Const @ByRef Tensor indices, @ByVal LongOptional dim, @ByRef Tensor out); +// aten::unbind_copy.int(Tensor self, int dim=0) -> Tensor[] +@Namespace("at") public static native @Cast({"", "std::vector"}) @StdMove TensorVector unbind_copy(@Const @ByRef Tensor self, @Cast("int64_t") long dim/*=0*/); +@Namespace("at") public static native @Cast({"", "std::vector"}) @StdMove TensorVector unbind_copy(@Const @ByRef Tensor self); -// aten::take_along_dim(Tensor self, Tensor indices, int? dim=None) -> Tensor -@Namespace("at") public static native @ByVal Tensor take_along_dim(@Const @ByRef Tensor self, @Const @ByRef Tensor indices, @ByVal(nullValue = "c10::optional(c10::nullopt)") LongOptional dim); -@Namespace("at") public static native @ByVal Tensor take_along_dim(@Const @ByRef Tensor self, @Const @ByRef Tensor indices); +// aten::unbind_copy.int_out(Tensor self, int dim=0, *, Tensor(a!)[] out) -> () +@Namespace("at") public static native void unbind_copy_out(@ByVal @Cast("at::TensorList*") TensorArrayRef out, @Const @ByRef Tensor self, @Cast("int64_t") long dim/*=0*/); +@Namespace("at") public static native void unbind_copy_out(@ByVal @Cast("at::TensorList*") TensorArrayRef out, @Const @ByRef Tensor self); +// aten::unbind_copy.int_out(Tensor self, int dim=0, *, Tensor(a!)[] out) -> () +@Namespace("at") public static native void unbind_copy_outf(@Const @ByRef Tensor self, @Cast("int64_t") long dim, @ByVal @Cast("at::TensorList*") TensorArrayRef out); -// Parsed from ATen/ops/tan.h +// Parsed from ATen/ops/unflatten.h // #pragma once @@ -67085,24 +52789,21 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include - - -// aten::tan(Tensor self) -> Tensor -@Namespace("at") public static native @ByVal Tensor tan(@Const @ByRef Tensor self); +// #include -// aten::tan_(Tensor(a!) self) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor tan_(@ByRef Tensor self); -// aten::tan.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor tan_out(@ByRef Tensor out, @Const @ByRef Tensor self); -// aten::tan.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor tan_outf(@Const @ByRef Tensor self, @ByRef Tensor out); +// aten::unflatten.int(Tensor(a) self, int dim, int[] sizes) -> Tensor(a) +@Namespace("at") public static native @ByVal Tensor unflatten(@Const @ByRef Tensor self, @Cast("int64_t") long dim, @ByVal LongArrayRef sizes); +@Namespace("at") public static native @ByVal Tensor unflatten(@Const @ByRef Tensor self, @Cast("int64_t") long dim, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... sizes); + +// aten::unflatten.Dimname(Tensor(a) self, Dimname dim, int[] sizes, Dimname[] names) -> Tensor(a) +@Namespace("at") public static native @ByVal Tensor unflatten(@Const @ByRef Tensor self, @ByVal Dimname dim, @ByVal LongArrayRef sizes, @ByVal DimnameArrayRef names); +@Namespace("at") public static native @ByVal Tensor unflatten(@Const @ByRef Tensor self, @ByVal Dimname dim, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] sizes, @ByVal DimnameArrayRef names); -// Parsed from ATen/ops/tanh.h +// Parsed from ATen/ops/unflatten_dense_tensors.h // #pragma once @@ -67123,24 +52824,16 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include - - -// aten::tanh(Tensor self) -> Tensor -@Namespace("at") public static native @ByVal Tensor tanh(@Const @ByRef Tensor self); +// #include -// aten::tanh_(Tensor(a!) self) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor tanh_(@ByRef Tensor self); -// aten::tanh.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor tanh_out(@ByRef Tensor out, @Const @ByRef Tensor self); -// aten::tanh.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor tanh_outf(@Const @ByRef Tensor self, @ByRef Tensor out); +// aten::unflatten_dense_tensors(Tensor flat, Tensor[] tensors) -> Tensor[] +@Namespace("at") public static native @Cast({"", "std::vector"}) @StdMove TensorVector unflatten_dense_tensors(@Const @ByRef Tensor flat, @ByVal @Cast("at::TensorList*") TensorArrayRef tensors); -// Parsed from ATen/ops/tanh_backward.h +// Parsed from ATen/ops/unfold.h // #pragma once @@ -67161,21 +52854,14 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include - +// #include -// aten::tanh_backward.grad_input(Tensor grad_output, Tensor output, *, Tensor(a!) grad_input) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor tanh_backward_out(@ByRef Tensor grad_input, @Const @ByRef Tensor grad_output, @Const @ByRef Tensor output); -// aten::tanh_backward.grad_input(Tensor grad_output, Tensor output, *, Tensor(a!) grad_input) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor tanh_backward_outf(@Const @ByRef Tensor grad_output, @Const @ByRef Tensor output, @ByRef Tensor grad_input); -// aten::tanh_backward(Tensor grad_output, Tensor output) -> Tensor -@Namespace("at") public static native @ByVal Tensor tanh_backward(@Const @ByRef Tensor grad_output, @Const @ByRef Tensor output); -// Parsed from ATen/ops/tensor_split.h +// Parsed from ATen/ops/unfold_backward.h // #pragma once @@ -67196,39 +52882,40 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include +// #include -// aten::tensor_split.sections(Tensor(a -> *) self, SymInt sections, int dim=0) -> Tensor(a)[] -@Namespace("at") public static native @Cast({"", "std::vector"}) @StdMove TensorVector tensor_split(@Const @ByRef Tensor self, @Cast("int64_t") long sections, @Cast("int64_t") long dim/*=0*/); -@Namespace("at") public static native @Cast({"", "std::vector"}) @StdMove TensorVector tensor_split(@Const @ByRef Tensor self, @Cast("int64_t") long sections); +// aten::unfold_backward(Tensor grad_in, SymInt[] input_sizes, int dim, int size, int step) -> Tensor +@Namespace("at") public static native @ByVal Tensor unfold_backward(@Const @ByRef Tensor grad_in, @ByVal LongArrayRef input_sizes, @Cast("int64_t") long dim, @Cast("int64_t") long size, @Cast("int64_t") long step); +@Namespace("at") public static native @ByVal Tensor unfold_backward(@Const @ByRef Tensor grad_in, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] input_sizes, @Cast("int64_t") long dim, @Cast("int64_t") long size, @Cast("int64_t") long step); -// aten::tensor_split.sections(Tensor(a -> *) self, SymInt sections, int dim=0) -> Tensor(a)[] -@Namespace("at") public static native @Cast({"", "std::vector"}) @StdMove TensorVector tensor_split_symint(@Const @ByRef Tensor self, @ByVal SymInt sections, @Cast("int64_t") long dim/*=0*/); -@Namespace("at") public static native @Cast({"", "std::vector"}) @StdMove TensorVector tensor_split_symint(@Const @ByRef Tensor self, @ByVal SymInt sections); +// aten::unfold_backward(Tensor grad_in, SymInt[] input_sizes, int dim, int size, int step) -> Tensor +@Namespace("at") public static native @ByVal Tensor unfold_backward_symint(@Const @ByRef Tensor grad_in, @ByVal SymIntArrayRef input_sizes, @Cast("int64_t") long dim, @Cast("int64_t") long size, @Cast("int64_t") long step); -// aten::tensor_split.indices(Tensor(a -> *) self, SymInt[] indices, int dim=0) -> Tensor(a)[] -@Namespace("at") public static native @Cast({"", "std::vector"}) @StdMove TensorVector tensor_split(@Const @ByRef Tensor self, @ByVal @Cast("c10::ArrayRef*") LongArrayRef indices, @Cast("int64_t") long dim/*=0*/); -@Namespace("at") public static native @Cast({"", "std::vector"}) @StdMove TensorVector tensor_split(@Const @ByRef Tensor self, @ByVal @Cast("c10::ArrayRef*") LongArrayRef indices); -@Namespace("at") public static native @Cast({"", "std::vector"}) @StdMove TensorVector tensor_split(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] indices, @Cast("int64_t") long dim/*=0*/); -@Namespace("at") public static native @Cast({"", "std::vector"}) @StdMove TensorVector tensor_split(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... indices); +// aten::unfold_backward.out(Tensor grad_in, SymInt[] input_sizes, int dim, int size, int step, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor unfold_backward_out(@ByRef Tensor out, @Const @ByRef Tensor grad_in, @ByVal LongArrayRef input_sizes, @Cast("int64_t") long dim, @Cast("int64_t") long size, @Cast("int64_t") long step); +@Namespace("at") public static native @ByRef Tensor unfold_backward_out(@ByRef Tensor out, @Const @ByRef Tensor grad_in, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] input_sizes, @Cast("int64_t") long dim, @Cast("int64_t") long size, @Cast("int64_t") long step); -// aten::tensor_split.indices(Tensor(a -> *) self, SymInt[] indices, int dim=0) -> Tensor(a)[] -@Namespace("at") public static native @Cast({"", "std::vector"}) @StdMove TensorVector tensor_split_symint(@Const @ByRef Tensor self, @ByVal SymIntRef indices, @Cast("int64_t") long dim/*=0*/); -@Namespace("at") public static native @Cast({"", "std::vector"}) @StdMove TensorVector tensor_split_symint(@Const @ByRef Tensor self, @ByVal SymIntRef indices); +// aten::unfold_backward.out(Tensor grad_in, SymInt[] input_sizes, int dim, int size, int step, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor unfold_backward_outf(@Const @ByRef Tensor grad_in, @ByVal LongArrayRef input_sizes, @Cast("int64_t") long dim, @Cast("int64_t") long size, @Cast("int64_t") long step, @ByRef Tensor out); +@Namespace("at") public static native @ByRef Tensor unfold_backward_outf(@Const @ByRef Tensor grad_in, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] input_sizes, @Cast("int64_t") long dim, @Cast("int64_t") long size, @Cast("int64_t") long step, @ByRef Tensor out); -// aten::tensor_split.tensor_indices_or_sections(Tensor(a -> *) self, Tensor tensor_indices_or_sections, int dim=0) -> Tensor(a)[] -@Namespace("at") public static native @Cast({"", "std::vector"}) @StdMove TensorVector tensor_split(@Const @ByRef Tensor self, @Const @ByRef Tensor tensor_indices_or_sections, @Cast("int64_t") long dim/*=0*/); -@Namespace("at") public static native @Cast({"", "std::vector"}) @StdMove TensorVector tensor_split(@Const @ByRef Tensor self, @Const @ByRef Tensor tensor_indices_or_sections); +// aten::unfold_backward.out(Tensor grad_in, SymInt[] input_sizes, int dim, int size, int step, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor unfold_backward_symint_out(@ByRef Tensor out, @Const @ByRef Tensor grad_in, @ByVal SymIntArrayRef input_sizes, @Cast("int64_t") long dim, @Cast("int64_t") long size, @Cast("int64_t") long step); + + +// aten::unfold_backward.out(Tensor grad_in, SymInt[] input_sizes, int dim, int size, int step, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor unfold_backward_symint_outf(@Const @ByRef Tensor grad_in, @ByVal SymIntArrayRef input_sizes, @Cast("int64_t") long dim, @Cast("int64_t") long size, @Cast("int64_t") long step, @ByRef Tensor out); -// Parsed from ATen/ops/tensordot.h + +// Parsed from ATen/ops/unfold_copy.h // #pragma once @@ -67249,24 +52936,21 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include +// #include -// aten::tensordot(Tensor self, Tensor other, int[] dims_self, int[] dims_other) -> Tensor -@Namespace("at") public static native @ByVal Tensor tensordot(@Const @ByRef Tensor self, @Const @ByRef Tensor other, @ByVal @Cast("c10::ArrayRef*") LongArrayRef dims_self, @ByVal @Cast("c10::ArrayRef*") LongArrayRef dims_other); -@Namespace("at") public static native @ByVal Tensor tensordot(@Const @ByRef Tensor self, @Const @ByRef Tensor other, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dims_self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... dims_other); +// aten::unfold_copy(Tensor self, int dimension, int size, int step) -> Tensor +@Namespace("at") public static native @ByVal Tensor unfold_copy(@Const @ByRef Tensor self, @Cast("int64_t") long dimension, @Cast("int64_t") long size, @Cast("int64_t") long step); -// aten::tensordot.out(Tensor self, Tensor other, int[] dims_self, int[] dims_other, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor tensordot_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor other, @ByVal @Cast("c10::ArrayRef*") LongArrayRef dims_self, @ByVal @Cast("c10::ArrayRef*") LongArrayRef dims_other); -@Namespace("at") public static native @ByRef Tensor tensordot_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor other, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dims_self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... dims_other); -// aten::tensordot.out(Tensor self, Tensor other, int[] dims_self, int[] dims_other, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor tensordot_outf(@Const @ByRef Tensor self, @Const @ByRef Tensor other, @ByVal @Cast("c10::ArrayRef*") LongArrayRef dims_self, @ByVal @Cast("c10::ArrayRef*") LongArrayRef dims_other, @ByRef Tensor out); -@Namespace("at") public static native @ByRef Tensor tensordot_outf(@Const @ByRef Tensor self, @Const @ByRef Tensor other, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dims_self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dims_other, @ByRef Tensor out); +// aten::unfold_copy.out(Tensor self, int dimension, int size, int step, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor unfold_copy_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Cast("int64_t") long dimension, @Cast("int64_t") long size, @Cast("int64_t") long step); +// aten::unfold_copy.out(Tensor self, int dimension, int size, int step, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor unfold_copy_outf(@Const @ByRef Tensor self, @Cast("int64_t") long dimension, @Cast("int64_t") long size, @Cast("int64_t") long step, @ByRef Tensor out); -// Parsed from ATen/ops/thnn_conv2d.h +// Parsed from ATen/ops/uniform.h // #pragma once @@ -67287,28 +52971,23 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include +// #include -// aten::thnn_conv2d.out(Tensor self, Tensor weight, int[2] kernel_size, Tensor? bias=None, int[2] stride=1, int[2] padding=0, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor thnn_conv2d_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor weight, @ByVal @Cast("c10::ArrayRef*") LongArrayRef kernel_size, @Const @ByRef(nullValue = "c10::optional{}") TensorOptional bias, @ByVal(nullValue = "at::IntArrayRef(1)") @Cast("c10::ArrayRef*") LongArrayRef stride, @ByVal(nullValue = "at::IntArrayRef(0)") @Cast("c10::ArrayRef*") LongArrayRef padding); -@Namespace("at") public static native @ByRef Tensor thnn_conv2d_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor weight, @ByVal @Cast("c10::ArrayRef*") LongArrayRef kernel_size); -@Namespace("at") public static native @ByRef Tensor thnn_conv2d_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor weight, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] kernel_size, @Const @ByRef(nullValue = "c10::optional{}") TensorOptional bias, @ByVal(nullValue = "at::IntArrayRef(1)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] stride, @ByVal(nullValue = "at::IntArrayRef(0)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... padding); -@Namespace("at") public static native @ByRef Tensor thnn_conv2d_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor weight, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... kernel_size); -// aten::thnn_conv2d.out(Tensor self, Tensor weight, int[2] kernel_size, Tensor? bias=None, int[2] stride=1, int[2] padding=0, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor thnn_conv2d_outf(@Const @ByRef Tensor self, @Const @ByRef Tensor weight, @ByVal @Cast("c10::ArrayRef*") LongArrayRef kernel_size, @Const @ByRef TensorOptional bias, @ByVal @Cast("c10::ArrayRef*") LongArrayRef stride, @ByVal @Cast("c10::ArrayRef*") LongArrayRef padding, @ByRef Tensor out); -@Namespace("at") public static native @ByRef Tensor thnn_conv2d_outf(@Const @ByRef Tensor self, @Const @ByRef Tensor weight, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] kernel_size, @Const @ByRef TensorOptional bias, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] stride, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] padding, @ByRef Tensor out); +// aten::uniform.out(Tensor self, float from=0, float to=1, *, Generator? generator=None, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor uniform_out(@ByRef Tensor out, @Const @ByRef Tensor self, double from/*=0*/, double to/*=1*/, @ByVal(nullValue = "c10::optional(c10::nullopt)") GeneratorOptional generator); +@Namespace("at") public static native @ByRef Tensor uniform_out(@ByRef Tensor out, @Const @ByRef Tensor self); +// aten::uniform.out(Tensor self, float from=0, float to=1, *, Generator? generator=None, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor uniform_outf(@Const @ByRef Tensor self, double from, double to, @ByVal GeneratorOptional generator, @ByRef Tensor out); -// aten::thnn_conv2d(Tensor self, Tensor weight, int[2] kernel_size, Tensor? bias=None, int[2] stride=1, int[2] padding=0) -> Tensor -@Namespace("at") public static native @ByVal Tensor thnn_conv2d(@Const @ByRef Tensor self, @Const @ByRef Tensor weight, @ByVal @Cast("c10::ArrayRef*") LongArrayRef kernel_size, @Const @ByRef(nullValue = "c10::optional{}") TensorOptional bias, @ByVal(nullValue = "at::IntArrayRef(1)") @Cast("c10::ArrayRef*") LongArrayRef stride, @ByVal(nullValue = "at::IntArrayRef(0)") @Cast("c10::ArrayRef*") LongArrayRef padding); -@Namespace("at") public static native @ByVal Tensor thnn_conv2d(@Const @ByRef Tensor self, @Const @ByRef Tensor weight, @ByVal @Cast("c10::ArrayRef*") LongArrayRef kernel_size); -@Namespace("at") public static native @ByVal Tensor thnn_conv2d(@Const @ByRef Tensor self, @Const @ByRef Tensor weight, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] kernel_size, @Const @ByRef(nullValue = "c10::optional{}") TensorOptional bias, @ByVal(nullValue = "at::IntArrayRef(1)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] stride, @ByVal(nullValue = "at::IntArrayRef(0)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... padding); -@Namespace("at") public static native @ByVal Tensor thnn_conv2d(@Const @ByRef Tensor self, @Const @ByRef Tensor weight, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... kernel_size); +// aten::uniform(Tensor self, float from=0, float to=1, *, Generator? generator=None) -> Tensor +@Namespace("at") public static native @ByVal Tensor uniform(@Const @ByRef Tensor self, double from/*=0*/, double to/*=1*/, @ByVal(nullValue = "c10::optional(c10::nullopt)") GeneratorOptional generator); +@Namespace("at") public static native @ByVal Tensor uniform(@Const @ByRef Tensor self); -// Parsed from ATen/ops/threshold.h +// Parsed from ATen/ops/unique_consecutive.h // #pragma once @@ -67329,24 +53008,23 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include - +// #include -// aten::threshold(Tensor self, Scalar threshold, Scalar value) -> Tensor -@Namespace("at") public static native @ByVal Tensor threshold(@Const @ByRef Tensor self, @Const @ByRef Scalar threshold, @Const @ByRef Scalar value); -// aten::threshold_(Tensor(a!) self, Scalar threshold, Scalar value) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor threshold_(@ByRef Tensor self, @Const @ByRef Scalar threshold, @Const @ByRef Scalar value); +// aten::unique_consecutive(Tensor self, bool return_inverse=False, bool return_counts=False, int? dim=None) -> (Tensor, Tensor, Tensor) +@Namespace("at") public static native @ByVal T_TensorTensorTensor_T unique_consecutive(@Const @ByRef Tensor self, @Cast("bool") boolean return_inverse/*=false*/, @Cast("bool") boolean return_counts/*=false*/, @ByVal(nullValue = "c10::optional(c10::nullopt)") LongOptional dim); +@Namespace("at") public static native @ByVal T_TensorTensorTensor_T unique_consecutive(@Const @ByRef Tensor self); -// aten::threshold.out(Tensor self, Scalar threshold, Scalar value, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor threshold_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Scalar threshold, @Const @ByRef Scalar value); -// aten::threshold.out(Tensor self, Scalar threshold, Scalar value, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor threshold_outf(@Const @ByRef Tensor self, @Const @ByRef Scalar threshold, @Const @ByRef Scalar value, @ByRef Tensor out); +// aten::unique_consecutive.out(Tensor self, bool return_inverse=False, bool return_counts=False, int? dim=None, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2) -> (Tensor(a!), Tensor(b!), Tensor(c!)) +@Namespace("at") public static native @ByVal T_TensorTensorTensor_T unique_consecutive_out(@ByRef Tensor out0, @ByRef Tensor out1, @ByRef Tensor out2, @Const @ByRef Tensor self, @Cast("bool") boolean return_inverse/*=false*/, @Cast("bool") boolean return_counts/*=false*/, @ByVal(nullValue = "c10::optional(c10::nullopt)") LongOptional dim); +@Namespace("at") public static native @ByVal T_TensorTensorTensor_T unique_consecutive_out(@ByRef Tensor out0, @ByRef Tensor out1, @ByRef Tensor out2, @Const @ByRef Tensor self); +// aten::unique_consecutive.out(Tensor self, bool return_inverse=False, bool return_counts=False, int? dim=None, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2) -> (Tensor(a!), Tensor(b!), Tensor(c!)) +@Namespace("at") public static native @ByVal T_TensorTensorTensor_T unique_consecutive_outf(@Const @ByRef Tensor self, @Cast("bool") boolean return_inverse, @Cast("bool") boolean return_counts, @ByVal LongOptional dim, @ByRef Tensor out0, @ByRef Tensor out1, @ByRef Tensor out2); -// Parsed from ATen/ops/threshold_backward.h +// Parsed from ATen/ops/unique_dim.h // #pragma once @@ -67367,21 +53045,23 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include +// #include -// aten::threshold_backward.grad_input(Tensor grad_output, Tensor self, Scalar threshold, *, Tensor(a!) grad_input) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor threshold_backward_out(@ByRef Tensor grad_input, @Const @ByRef Tensor grad_output, @Const @ByRef Tensor self, @Const @ByRef Scalar threshold); -// aten::threshold_backward.grad_input(Tensor grad_output, Tensor self, Scalar threshold, *, Tensor(a!) grad_input) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor threshold_backward_outf(@Const @ByRef Tensor grad_output, @Const @ByRef Tensor self, @Const @ByRef Scalar threshold, @ByRef Tensor grad_input); +// aten::unique_dim(Tensor self, int dim, bool sorted=True, bool return_inverse=False, bool return_counts=False) -> (Tensor, Tensor, Tensor) +@Namespace("at") public static native @ByVal T_TensorTensorTensor_T unique_dim(@Const @ByRef Tensor self, @Cast("int64_t") long dim, @Cast("bool") boolean sorted/*=true*/, @Cast("bool") boolean return_inverse/*=false*/, @Cast("bool") boolean return_counts/*=false*/); +@Namespace("at") public static native @ByVal T_TensorTensorTensor_T unique_dim(@Const @ByRef Tensor self, @Cast("int64_t") long dim); -// aten::threshold_backward(Tensor grad_output, Tensor self, Scalar threshold) -> Tensor -@Namespace("at") public static native @ByVal Tensor threshold_backward(@Const @ByRef Tensor grad_output, @Const @ByRef Tensor self, @Const @ByRef Scalar threshold); +// aten::unique_dim.out(Tensor self, int dim, bool sorted=True, bool return_inverse=False, bool return_counts=False, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2) -> (Tensor(a!), Tensor(b!), Tensor(c!)) +@Namespace("at") public static native @ByVal T_TensorTensorTensor_T unique_dim_out(@ByRef Tensor out0, @ByRef Tensor out1, @ByRef Tensor out2, @Const @ByRef Tensor self, @Cast("int64_t") long dim, @Cast("bool") boolean sorted/*=true*/, @Cast("bool") boolean return_inverse/*=false*/, @Cast("bool") boolean return_counts/*=false*/); +@Namespace("at") public static native @ByVal T_TensorTensorTensor_T unique_dim_out(@ByRef Tensor out0, @ByRef Tensor out1, @ByRef Tensor out2, @Const @ByRef Tensor self, @Cast("int64_t") long dim); +// aten::unique_dim.out(Tensor self, int dim, bool sorted=True, bool return_inverse=False, bool return_counts=False, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2) -> (Tensor(a!), Tensor(b!), Tensor(c!)) +@Namespace("at") public static native @ByVal T_TensorTensorTensor_T unique_dim_outf(@Const @ByRef Tensor self, @Cast("int64_t") long dim, @Cast("bool") boolean sorted, @Cast("bool") boolean return_inverse, @Cast("bool") boolean return_counts, @ByRef Tensor out0, @ByRef Tensor out1, @ByRef Tensor out2); -// Parsed from ATen/ops/tile.h +// Parsed from ATen/ops/unique_dim_consecutive.h // #pragma once @@ -67402,17 +53082,23 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include +// #include -// aten::tile(Tensor self, int[] dims) -> Tensor -@Namespace("at") public static native @ByVal Tensor tile(@Const @ByRef Tensor self, @ByVal @Cast("c10::ArrayRef*") LongArrayRef dims); -@Namespace("at") public static native @ByVal Tensor tile(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... dims); +// aten::unique_dim_consecutive(Tensor self, int dim, bool return_inverse=False, bool return_counts=False) -> (Tensor, Tensor, Tensor) +@Namespace("at") public static native @ByVal T_TensorTensorTensor_T unique_dim_consecutive(@Const @ByRef Tensor self, @Cast("int64_t") long dim, @Cast("bool") boolean return_inverse/*=false*/, @Cast("bool") boolean return_counts/*=false*/); +@Namespace("at") public static native @ByVal T_TensorTensorTensor_T unique_dim_consecutive(@Const @ByRef Tensor self, @Cast("int64_t") long dim); +// aten::unique_dim_consecutive.out(Tensor self, int dim, bool return_inverse=False, bool return_counts=False, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2) -> (Tensor(a!), Tensor(b!), Tensor(c!)) +@Namespace("at") public static native @ByVal T_TensorTensorTensor_T unique_dim_consecutive_out(@ByRef Tensor out0, @ByRef Tensor out1, @ByRef Tensor out2, @Const @ByRef Tensor self, @Cast("int64_t") long dim, @Cast("bool") boolean return_inverse/*=false*/, @Cast("bool") boolean return_counts/*=false*/); +@Namespace("at") public static native @ByVal T_TensorTensorTensor_T unique_dim_consecutive_out(@ByRef Tensor out0, @ByRef Tensor out1, @ByRef Tensor out2, @Const @ByRef Tensor self, @Cast("int64_t") long dim); +// aten::unique_dim_consecutive.out(Tensor self, int dim, bool return_inverse=False, bool return_counts=False, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2) -> (Tensor(a!), Tensor(b!), Tensor(c!)) +@Namespace("at") public static native @ByVal T_TensorTensorTensor_T unique_dim_consecutive_outf(@Const @ByRef Tensor self, @Cast("int64_t") long dim, @Cast("bool") boolean return_inverse, @Cast("bool") boolean return_counts, @ByRef Tensor out0, @ByRef Tensor out1, @ByRef Tensor out2); -// Parsed from ATen/ops/to.h + +// Parsed from ATen/ops/unsafe_chunk.h // #pragma once @@ -67433,14 +53119,17 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include +// #include +// aten::unsafe_chunk(Tensor self, int chunks, int dim=0) -> Tensor[] +@Namespace("at") public static native @Cast({"", "std::vector"}) @StdMove TensorVector unsafe_chunk(@Const @ByRef Tensor self, @Cast("int64_t") long chunks, @Cast("int64_t") long dim/*=0*/); +@Namespace("at") public static native @Cast({"", "std::vector"}) @StdMove TensorVector unsafe_chunk(@Const @ByRef Tensor self, @Cast("int64_t") long chunks); -// Parsed from ATen/ops/to_dense.h +// Parsed from ATen/ops/unsafe_split.h // #pragma once @@ -67461,14 +53150,41 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include +// #include +// aten::unsafe_split.Tensor(Tensor self, SymInt split_size, int dim=0) -> Tensor[] +@Namespace("at") public static native @Cast({"", "std::vector"}) @StdMove TensorVector unsafe_split(@Const @ByRef Tensor self, @Cast("int64_t") long split_size, @Cast("int64_t") long dim/*=0*/); +@Namespace("at") public static native @Cast({"", "std::vector"}) @StdMove TensorVector unsafe_split(@Const @ByRef Tensor self, @Cast("int64_t") long split_size); +// aten::unsafe_split.Tensor(Tensor self, SymInt split_size, int dim=0) -> Tensor[] +@Namespace("at") public static native @Cast({"", "std::vector"}) @StdMove TensorVector unsafe_split_symint(@Const @ByRef Tensor self, @ByVal SymInt split_size, @Cast("int64_t") long dim/*=0*/); +@Namespace("at") public static native @Cast({"", "std::vector"}) @StdMove TensorVector unsafe_split_symint(@Const @ByRef Tensor self, @ByVal SymInt split_size); -// Parsed from ATen/ops/to_dense_backward.h +// aten::unsafe_split.Tensor_out(Tensor self, SymInt split_size, int dim=0, *, Tensor(a!)[] out) -> () +@Namespace("at") public static native void unsafe_split_out(@ByVal @Cast("at::TensorList*") TensorArrayRef out, @Const @ByRef Tensor self, @Cast("int64_t") long split_size, @Cast("int64_t") long dim/*=0*/); +@Namespace("at") public static native void unsafe_split_out(@ByVal @Cast("at::TensorList*") TensorArrayRef out, @Const @ByRef Tensor self, @Cast("int64_t") long split_size); + + +// aten::unsafe_split.Tensor_out(Tensor self, SymInt split_size, int dim=0, *, Tensor(a!)[] out) -> () +@Namespace("at") public static native void unsafe_split_outf(@Const @ByRef Tensor self, @Cast("int64_t") long split_size, @Cast("int64_t") long dim, @ByVal @Cast("at::TensorList*") TensorArrayRef out); + + +// aten::unsafe_split.Tensor_out(Tensor self, SymInt split_size, int dim=0, *, Tensor(a!)[] out) -> () +@Namespace("at") public static native void unsafe_split_symint_out(@ByVal @Cast("at::TensorList*") TensorArrayRef out, @Const @ByRef Tensor self, @ByVal SymInt split_size, @Cast("int64_t") long dim/*=0*/); +@Namespace("at") public static native void unsafe_split_symint_out(@ByVal @Cast("at::TensorList*") TensorArrayRef out, @Const @ByRef Tensor self, @ByVal SymInt split_size); + + +// aten::unsafe_split.Tensor_out(Tensor self, SymInt split_size, int dim=0, *, Tensor(a!)[] out) -> () +@Namespace("at") public static native void unsafe_split_symint_outf(@Const @ByRef Tensor self, @ByVal SymInt split_size, @Cast("int64_t") long dim, @ByVal @Cast("at::TensorList*") TensorArrayRef out); + + + + + +// Parsed from ATen/ops/unsafe_split_with_sizes.h // #pragma once @@ -67489,16 +53205,46 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include +// #include -// aten::to_dense_backward(Tensor grad, Tensor input) -> Tensor -@Namespace("at") public static native @ByVal Tensor to_dense_backward(@Const @ByRef Tensor grad, @Const @ByRef Tensor input); +// aten::unsafe_split_with_sizes(Tensor self, SymInt[] split_sizes, int dim=0) -> Tensor[] +@Namespace("at") public static native @Cast({"", "std::vector"}) @StdMove TensorVector unsafe_split_with_sizes(@Const @ByRef Tensor self, @ByVal LongArrayRef split_sizes, @Cast("int64_t") long dim/*=0*/); +@Namespace("at") public static native @Cast({"", "std::vector"}) @StdMove TensorVector unsafe_split_with_sizes(@Const @ByRef Tensor self, @ByVal LongArrayRef split_sizes); +@Namespace("at") public static native @Cast({"", "std::vector"}) @StdMove TensorVector unsafe_split_with_sizes(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] split_sizes, @Cast("int64_t") long dim/*=0*/); +@Namespace("at") public static native @Cast({"", "std::vector"}) @StdMove TensorVector unsafe_split_with_sizes(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... split_sizes); + + +// aten::unsafe_split_with_sizes(Tensor self, SymInt[] split_sizes, int dim=0) -> Tensor[] +@Namespace("at") public static native @Cast({"", "std::vector"}) @StdMove TensorVector unsafe_split_with_sizes_symint(@Const @ByRef Tensor self, @ByVal SymIntArrayRef split_sizes, @Cast("int64_t") long dim/*=0*/); +@Namespace("at") public static native @Cast({"", "std::vector"}) @StdMove TensorVector unsafe_split_with_sizes_symint(@Const @ByRef Tensor self, @ByVal SymIntArrayRef split_sizes); + + +// aten::unsafe_split_with_sizes.out(Tensor self, SymInt[] split_sizes, int dim=0, *, Tensor(a!)[] out) -> () +@Namespace("at") public static native void unsafe_split_with_sizes_out(@ByVal @Cast("at::TensorList*") TensorArrayRef out, @Const @ByRef Tensor self, @ByVal LongArrayRef split_sizes, @Cast("int64_t") long dim/*=0*/); +@Namespace("at") public static native void unsafe_split_with_sizes_out(@ByVal @Cast("at::TensorList*") TensorArrayRef out, @Const @ByRef Tensor self, @ByVal LongArrayRef split_sizes); +@Namespace("at") public static native void unsafe_split_with_sizes_out(@ByVal @Cast("at::TensorList*") TensorArrayRef out, @Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] split_sizes, @Cast("int64_t") long dim/*=0*/); +@Namespace("at") public static native void unsafe_split_with_sizes_out(@ByVal @Cast("at::TensorList*") TensorArrayRef out, @Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... split_sizes); + + +// aten::unsafe_split_with_sizes.out(Tensor self, SymInt[] split_sizes, int dim=0, *, Tensor(a!)[] out) -> () +@Namespace("at") public static native void unsafe_split_with_sizes_outf(@Const @ByRef Tensor self, @ByVal LongArrayRef split_sizes, @Cast("int64_t") long dim, @ByVal @Cast("at::TensorList*") TensorArrayRef out); +@Namespace("at") public static native void unsafe_split_with_sizes_outf(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] split_sizes, @Cast("int64_t") long dim, @ByVal @Cast("at::TensorList*") TensorArrayRef out); + + +// aten::unsafe_split_with_sizes.out(Tensor self, SymInt[] split_sizes, int dim=0, *, Tensor(a!)[] out) -> () +@Namespace("at") public static native void unsafe_split_with_sizes_symint_out(@ByVal @Cast("at::TensorList*") TensorArrayRef out, @Const @ByRef Tensor self, @ByVal SymIntArrayRef split_sizes, @Cast("int64_t") long dim/*=0*/); +@Namespace("at") public static native void unsafe_split_with_sizes_symint_out(@ByVal @Cast("at::TensorList*") TensorArrayRef out, @Const @ByRef Tensor self, @ByVal SymIntArrayRef split_sizes); + + +// aten::unsafe_split_with_sizes.out(Tensor self, SymInt[] split_sizes, int dim=0, *, Tensor(a!)[] out) -> () +@Namespace("at") public static native void unsafe_split_with_sizes_symint_outf(@Const @ByRef Tensor self, @ByVal SymIntArrayRef split_sizes, @Cast("int64_t") long dim, @ByVal @Cast("at::TensorList*") TensorArrayRef out); -// Parsed from ATen/ops/to_mkldnn.h + +// Parsed from ATen/ops/unsqueeze.h // #pragma once @@ -67519,19 +53265,16 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include +// #include -// aten::to_mkldnn.out(Tensor self, ScalarType? dtype=None, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor to_mkldnn_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal(nullValue = "c10::optional(c10::nullopt)") ScalarTypeOptional dtype); -@Namespace("at") public static native @ByRef Tensor to_mkldnn_out(@ByRef Tensor out, @Const @ByRef Tensor self); -// aten::to_mkldnn.out(Tensor self, ScalarType? dtype=None, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor to_mkldnn_outf(@Const @ByRef Tensor self, @ByVal ScalarTypeOptional dtype, @ByRef Tensor out); +// aten::unsqueeze(Tensor(a) self, int dim) -> Tensor(a) +@Namespace("at") public static native @ByVal Tensor unsqueeze(@Const @ByRef Tensor self, @Cast("int64_t") long dim); -// Parsed from ATen/ops/to_mkldnn_backward.h +// Parsed from ATen/ops/unsqueeze_copy.h // #pragma once @@ -67552,16 +53295,21 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include +// #include -// aten::to_mkldnn_backward(Tensor grad, Tensor input) -> Tensor -@Namespace("at") public static native @ByVal Tensor to_mkldnn_backward(@Const @ByRef Tensor grad, @Const @ByRef Tensor input); +// aten::unsqueeze_copy(Tensor self, int dim) -> Tensor +@Namespace("at") public static native @ByVal Tensor unsqueeze_copy(@Const @ByRef Tensor self, @Cast("int64_t") long dim); +// aten::unsqueeze_copy.out(Tensor self, int dim, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor unsqueeze_copy_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Cast("int64_t") long dim); +// aten::unsqueeze_copy.out(Tensor self, int dim, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor unsqueeze_copy_outf(@Const @ByRef Tensor self, @Cast("int64_t") long dim, @ByRef Tensor out); -// Parsed from ATen/ops/to_padded_tensor.h + +// Parsed from ATen/ops/upsample_bicubic2d.h // #pragma once @@ -67582,34 +53330,55 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include +// #include +// aten::upsample_bicubic2d.vec(Tensor input, SymInt[]? output_size, bool align_corners, float[]? scale_factors) -> Tensor +@Namespace("at") public static native @ByVal Tensor upsample_bicubic2d(@Const @ByRef Tensor input, @ByVal LongArrayRefOptional output_size, @Cast("bool") boolean align_corners, @ByVal DoubleArrayRefOptional scale_factors); +@Namespace("at") public static native @ByVal Tensor upsample_bicubic2d(@Const @ByRef Tensor input, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] output_size, @Cast("bool") boolean align_corners, @ByVal DoubleArrayRefOptional scale_factors); -// aten::to_padded_tensor.out(Tensor self, float padding, SymInt[]? output_size=None, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor to_padded_tensor_out(@ByRef Tensor out, @Const @ByRef Tensor self, double padding, @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") LongArrayRefOptional output_size); -@Namespace("at") public static native @ByRef Tensor to_padded_tensor_out(@ByRef Tensor out, @Const @ByRef Tensor self, double padding); -@Namespace("at") public static native @ByRef Tensor to_padded_tensor_out(@ByRef Tensor out, @Const @ByRef Tensor self, double padding, @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... output_size); +// aten::upsample_bicubic2d.vec(Tensor input, SymInt[]? output_size, bool align_corners, float[]? scale_factors) -> Tensor +@Namespace("at") public static native @ByVal Tensor upsample_bicubic2d_symint(@Const @ByRef Tensor input, @ByVal SymIntArrayRefOptional output_size, @Cast("bool") boolean align_corners, @ByVal DoubleArrayRefOptional scale_factors); -// aten::to_padded_tensor.out(Tensor self, float padding, SymInt[]? output_size=None, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor to_padded_tensor_outf(@Const @ByRef Tensor self, double padding, @ByVal LongArrayRefOptional output_size, @ByRef Tensor out); -@Namespace("at") public static native @ByRef Tensor to_padded_tensor_outf(@Const @ByRef Tensor self, double padding, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] output_size, @ByRef Tensor out); +// aten::upsample_bicubic2d.out(Tensor self, SymInt[2] output_size, bool align_corners, float? scales_h=None, float? scales_w=None, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor upsample_bicubic2d_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal LongArrayRef output_size, @Cast("bool") boolean align_corners, @ByVal(nullValue = "c10::optional(c10::nullopt)") DoubleOptional scales_h, @ByVal(nullValue = "c10::optional(c10::nullopt)") DoubleOptional scales_w); +@Namespace("at") public static native @ByRef Tensor upsample_bicubic2d_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal LongArrayRef output_size, @Cast("bool") boolean align_corners); +@Namespace("at") public static native @ByRef Tensor upsample_bicubic2d_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] output_size, @Cast("bool") boolean align_corners, @ByVal(nullValue = "c10::optional(c10::nullopt)") DoubleOptional scales_h, @ByVal(nullValue = "c10::optional(c10::nullopt)") DoubleOptional scales_w); +@Namespace("at") public static native @ByRef Tensor upsample_bicubic2d_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] output_size, @Cast("bool") boolean align_corners); -// aten::to_padded_tensor.out(Tensor self, float padding, SymInt[]? output_size=None, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor to_padded_tensor_symint_out(@ByRef Tensor out, @Const @ByRef Tensor self, double padding, @ByVal(nullValue = "at::OptionalSymIntArrayRef(c10::nullopt)") SymIntArrayRefOptional output_size); -@Namespace("at") public static native @ByRef Tensor to_padded_tensor_symint_out(@ByRef Tensor out, @Const @ByRef Tensor self, double padding); + +// aten::upsample_bicubic2d.out(Tensor self, SymInt[2] output_size, bool align_corners, float? scales_h=None, float? scales_w=None, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor upsample_bicubic2d_outf(@Const @ByRef Tensor self, @ByVal LongArrayRef output_size, @Cast("bool") boolean align_corners, @ByVal DoubleOptional scales_h, @ByVal DoubleOptional scales_w, @ByRef Tensor out); +@Namespace("at") public static native @ByRef Tensor upsample_bicubic2d_outf(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] output_size, @Cast("bool") boolean align_corners, @ByVal DoubleOptional scales_h, @ByVal DoubleOptional scales_w, @ByRef Tensor out); -// aten::to_padded_tensor.out(Tensor self, float padding, SymInt[]? output_size=None, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor to_padded_tensor_symint_outf(@Const @ByRef Tensor self, double padding, @ByVal SymIntArrayRefOptional output_size, @ByRef Tensor out); +// aten::upsample_bicubic2d.out(Tensor self, SymInt[2] output_size, bool align_corners, float? scales_h=None, float? scales_w=None, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor upsample_bicubic2d_symint_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal SymIntArrayRef output_size, @Cast("bool") boolean align_corners, @ByVal(nullValue = "c10::optional(c10::nullopt)") DoubleOptional scales_h, @ByVal(nullValue = "c10::optional(c10::nullopt)") DoubleOptional scales_w); +@Namespace("at") public static native @ByRef Tensor upsample_bicubic2d_symint_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal SymIntArrayRef output_size, @Cast("bool") boolean align_corners); + + +// aten::upsample_bicubic2d.out(Tensor self, SymInt[2] output_size, bool align_corners, float? scales_h=None, float? scales_w=None, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor upsample_bicubic2d_symint_outf(@Const @ByRef Tensor self, @ByVal SymIntArrayRef output_size, @Cast("bool") boolean align_corners, @ByVal DoubleOptional scales_h, @ByVal DoubleOptional scales_w, @ByRef Tensor out); + + +// aten::upsample_bicubic2d(Tensor self, SymInt[2] output_size, bool align_corners, float? scales_h=None, float? scales_w=None) -> Tensor +@Namespace("at") public static native @ByVal Tensor upsample_bicubic2d(@Const @ByRef Tensor self, @ByVal LongArrayRef output_size, @Cast("bool") boolean align_corners, @ByVal(nullValue = "c10::optional(c10::nullopt)") DoubleOptional scales_h, @ByVal(nullValue = "c10::optional(c10::nullopt)") DoubleOptional scales_w); +@Namespace("at") public static native @ByVal Tensor upsample_bicubic2d(@Const @ByRef Tensor self, @ByVal LongArrayRef output_size, @Cast("bool") boolean align_corners); +@Namespace("at") public static native @ByVal Tensor upsample_bicubic2d(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] output_size, @Cast("bool") boolean align_corners, @ByVal(nullValue = "c10::optional(c10::nullopt)") DoubleOptional scales_h, @ByVal(nullValue = "c10::optional(c10::nullopt)") DoubleOptional scales_w); +@Namespace("at") public static native @ByVal Tensor upsample_bicubic2d(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] output_size, @Cast("bool") boolean align_corners); +// aten::upsample_bicubic2d(Tensor self, SymInt[2] output_size, bool align_corners, float? scales_h=None, float? scales_w=None) -> Tensor +@Namespace("at") public static native @ByVal Tensor upsample_bicubic2d_symint(@Const @ByRef Tensor self, @ByVal SymIntArrayRef output_size, @Cast("bool") boolean align_corners, @ByVal(nullValue = "c10::optional(c10::nullopt)") DoubleOptional scales_h, @ByVal(nullValue = "c10::optional(c10::nullopt)") DoubleOptional scales_w); +@Namespace("at") public static native @ByVal Tensor upsample_bicubic2d_symint(@Const @ByRef Tensor self, @ByVal SymIntArrayRef output_size, @Cast("bool") boolean align_corners); -// Parsed from ATen/ops/to_sparse.h + + +// Parsed from ATen/ops/upsample_bicubic2d_backward.h // #pragma once @@ -67630,26 +53399,46 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include +// #include -// aten::to_sparse.sparse_dim_out(Tensor self, int sparse_dim, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor to_sparse_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Cast("int64_t") long sparse_dim); -// aten::to_sparse.sparse_dim_out(Tensor self, int sparse_dim, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor to_sparse_outf(@Const @ByRef Tensor self, @Cast("int64_t") long sparse_dim, @ByRef Tensor out); +// aten::upsample_bicubic2d_backward.grad_input(Tensor grad_output, SymInt[2] output_size, SymInt[4] input_size, bool align_corners, float? scales_h=None, float? scales_w=None, *, Tensor(a!) grad_input) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor upsample_bicubic2d_backward_out(@ByRef Tensor grad_input, @Const @ByRef Tensor grad_output, @ByVal LongArrayRef output_size, @ByVal LongArrayRef input_size, @Cast("bool") boolean align_corners, @ByVal(nullValue = "c10::optional(c10::nullopt)") DoubleOptional scales_h, @ByVal(nullValue = "c10::optional(c10::nullopt)") DoubleOptional scales_w); +@Namespace("at") public static native @ByRef Tensor upsample_bicubic2d_backward_out(@ByRef Tensor grad_input, @Const @ByRef Tensor grad_output, @ByVal LongArrayRef output_size, @ByVal LongArrayRef input_size, @Cast("bool") boolean align_corners); +@Namespace("at") public static native @ByRef Tensor upsample_bicubic2d_backward_out(@ByRef Tensor grad_input, @Const @ByRef Tensor grad_output, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] output_size, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] input_size, @Cast("bool") boolean align_corners, @ByVal(nullValue = "c10::optional(c10::nullopt)") DoubleOptional scales_h, @ByVal(nullValue = "c10::optional(c10::nullopt)") DoubleOptional scales_w); +@Namespace("at") public static native @ByRef Tensor upsample_bicubic2d_backward_out(@ByRef Tensor grad_input, @Const @ByRef Tensor grad_output, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] output_size, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] input_size, @Cast("bool") boolean align_corners); -// aten::to_sparse.out(Tensor self, *, Layout? layout=None, int[2]? blocksize=None, int? dense_dim=None, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor to_sparse_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal(nullValue = "c10::optional(c10::nullopt)") LayoutOptional layout, @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") LongArrayRefOptional blocksize, @ByVal(nullValue = "c10::optional(c10::nullopt)") LongOptional dense_dim); -@Namespace("at") public static native @ByRef Tensor to_sparse_out(@ByRef Tensor out, @Const @ByRef Tensor self); -@Namespace("at") public static native @ByRef Tensor to_sparse_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal(nullValue = "c10::optional(c10::nullopt)") LayoutOptional layout, @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] blocksize, @ByVal(nullValue = "c10::optional(c10::nullopt)") LongOptional dense_dim); -// aten::to_sparse.out(Tensor self, *, Layout? layout=None, int[2]? blocksize=None, int? dense_dim=None, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor to_sparse_outf(@Const @ByRef Tensor self, @ByVal LayoutOptional layout, @ByVal LongArrayRefOptional blocksize, @ByVal LongOptional dense_dim, @ByRef Tensor out); -@Namespace("at") public static native @ByRef Tensor to_sparse_outf(@Const @ByRef Tensor self, @ByVal LayoutOptional layout, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] blocksize, @ByVal LongOptional dense_dim, @ByRef Tensor out); + +// aten::upsample_bicubic2d_backward.grad_input(Tensor grad_output, SymInt[2] output_size, SymInt[4] input_size, bool align_corners, float? scales_h=None, float? scales_w=None, *, Tensor(a!) grad_input) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor upsample_bicubic2d_backward_outf(@Const @ByRef Tensor grad_output, @ByVal LongArrayRef output_size, @ByVal LongArrayRef input_size, @Cast("bool") boolean align_corners, @ByVal DoubleOptional scales_h, @ByVal DoubleOptional scales_w, @ByRef Tensor grad_input); +@Namespace("at") public static native @ByRef Tensor upsample_bicubic2d_backward_outf(@Const @ByRef Tensor grad_output, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] output_size, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] input_size, @Cast("bool") boolean align_corners, @ByVal DoubleOptional scales_h, @ByVal DoubleOptional scales_w, @ByRef Tensor grad_input); +// aten::upsample_bicubic2d_backward.grad_input(Tensor grad_output, SymInt[2] output_size, SymInt[4] input_size, bool align_corners, float? scales_h=None, float? scales_w=None, *, Tensor(a!) grad_input) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor upsample_bicubic2d_backward_symint_out(@ByRef Tensor grad_input, @Const @ByRef Tensor grad_output, @ByVal SymIntArrayRef output_size, @ByVal SymIntArrayRef input_size, @Cast("bool") boolean align_corners, @ByVal(nullValue = "c10::optional(c10::nullopt)") DoubleOptional scales_h, @ByVal(nullValue = "c10::optional(c10::nullopt)") DoubleOptional scales_w); +@Namespace("at") public static native @ByRef Tensor upsample_bicubic2d_backward_symint_out(@ByRef Tensor grad_input, @Const @ByRef Tensor grad_output, @ByVal SymIntArrayRef output_size, @ByVal SymIntArrayRef input_size, @Cast("bool") boolean align_corners); -// Parsed from ATen/ops/to_sparse_bsc.h +// aten::upsample_bicubic2d_backward.grad_input(Tensor grad_output, SymInt[2] output_size, SymInt[4] input_size, bool align_corners, float? scales_h=None, float? scales_w=None, *, Tensor(a!) grad_input) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor upsample_bicubic2d_backward_symint_outf(@Const @ByRef Tensor grad_output, @ByVal SymIntArrayRef output_size, @ByVal SymIntArrayRef input_size, @Cast("bool") boolean align_corners, @ByVal DoubleOptional scales_h, @ByVal DoubleOptional scales_w, @ByRef Tensor grad_input); + + +// aten::upsample_bicubic2d_backward(Tensor grad_output, SymInt[2] output_size, SymInt[4] input_size, bool align_corners, float? scales_h=None, float? scales_w=None) -> Tensor +@Namespace("at") public static native @ByVal Tensor upsample_bicubic2d_backward(@Const @ByRef Tensor grad_output, @ByVal LongArrayRef output_size, @ByVal LongArrayRef input_size, @Cast("bool") boolean align_corners, @ByVal(nullValue = "c10::optional(c10::nullopt)") DoubleOptional scales_h, @ByVal(nullValue = "c10::optional(c10::nullopt)") DoubleOptional scales_w); +@Namespace("at") public static native @ByVal Tensor upsample_bicubic2d_backward(@Const @ByRef Tensor grad_output, @ByVal LongArrayRef output_size, @ByVal LongArrayRef input_size, @Cast("bool") boolean align_corners); +@Namespace("at") public static native @ByVal Tensor upsample_bicubic2d_backward(@Const @ByRef Tensor grad_output, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] output_size, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] input_size, @Cast("bool") boolean align_corners, @ByVal(nullValue = "c10::optional(c10::nullopt)") DoubleOptional scales_h, @ByVal(nullValue = "c10::optional(c10::nullopt)") DoubleOptional scales_w); +@Namespace("at") public static native @ByVal Tensor upsample_bicubic2d_backward(@Const @ByRef Tensor grad_output, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] output_size, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] input_size, @Cast("bool") boolean align_corners); + + +// aten::upsample_bicubic2d_backward(Tensor grad_output, SymInt[2] output_size, SymInt[4] input_size, bool align_corners, float? scales_h=None, float? scales_w=None) -> Tensor +@Namespace("at") public static native @ByVal Tensor upsample_bicubic2d_backward_symint(@Const @ByRef Tensor grad_output, @ByVal SymIntArrayRef output_size, @ByVal SymIntArrayRef input_size, @Cast("bool") boolean align_corners, @ByVal(nullValue = "c10::optional(c10::nullopt)") DoubleOptional scales_h, @ByVal(nullValue = "c10::optional(c10::nullopt)") DoubleOptional scales_w); +@Namespace("at") public static native @ByVal Tensor upsample_bicubic2d_backward_symint(@Const @ByRef Tensor grad_output, @ByVal SymIntArrayRef output_size, @ByVal SymIntArrayRef input_size, @Cast("bool") boolean align_corners); + + + + + +// Parsed from ATen/ops/upsample_bilinear2d.h // #pragma once @@ -67670,58 +53459,55 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include +// #include -// aten::to_sparse_bsc.out(Tensor self, int[2] blocksize, int? dense_dim=None, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor to_sparse_bsc_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal @Cast("c10::ArrayRef*") LongArrayRef blocksize, @ByVal(nullValue = "c10::optional(c10::nullopt)") LongOptional dense_dim); -@Namespace("at") public static native @ByRef Tensor to_sparse_bsc_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal @Cast("c10::ArrayRef*") LongArrayRef blocksize); -@Namespace("at") public static native @ByRef Tensor to_sparse_bsc_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] blocksize, @ByVal(nullValue = "c10::optional(c10::nullopt)") LongOptional dense_dim); -@Namespace("at") public static native @ByRef Tensor to_sparse_bsc_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... blocksize); -// aten::to_sparse_bsc.out(Tensor self, int[2] blocksize, int? dense_dim=None, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor to_sparse_bsc_outf(@Const @ByRef Tensor self, @ByVal @Cast("c10::ArrayRef*") LongArrayRef blocksize, @ByVal LongOptional dense_dim, @ByRef Tensor out); -@Namespace("at") public static native @ByRef Tensor to_sparse_bsc_outf(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] blocksize, @ByVal LongOptional dense_dim, @ByRef Tensor out); +// aten::upsample_bilinear2d.vec(Tensor input, SymInt[]? output_size, bool align_corners, float[]? scale_factors) -> Tensor +@Namespace("at") public static native @ByVal Tensor upsample_bilinear2d(@Const @ByRef Tensor input, @ByVal LongArrayRefOptional output_size, @Cast("bool") boolean align_corners, @ByVal DoubleArrayRefOptional scale_factors); +@Namespace("at") public static native @ByVal Tensor upsample_bilinear2d(@Const @ByRef Tensor input, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] output_size, @Cast("bool") boolean align_corners, @ByVal DoubleArrayRefOptional scale_factors); +// aten::upsample_bilinear2d.vec(Tensor input, SymInt[]? output_size, bool align_corners, float[]? scale_factors) -> Tensor +@Namespace("at") public static native @ByVal Tensor upsample_bilinear2d_symint(@Const @ByRef Tensor input, @ByVal SymIntArrayRefOptional output_size, @Cast("bool") boolean align_corners, @ByVal DoubleArrayRefOptional scale_factors); -// Parsed from ATen/ops/to_sparse_bsr.h +// aten::upsample_bilinear2d.out(Tensor self, SymInt[2] output_size, bool align_corners, float? scales_h=None, float? scales_w=None, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor upsample_bilinear2d_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal LongArrayRef output_size, @Cast("bool") boolean align_corners, @ByVal(nullValue = "c10::optional(c10::nullopt)") DoubleOptional scales_h, @ByVal(nullValue = "c10::optional(c10::nullopt)") DoubleOptional scales_w); +@Namespace("at") public static native @ByRef Tensor upsample_bilinear2d_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal LongArrayRef output_size, @Cast("bool") boolean align_corners); +@Namespace("at") public static native @ByRef Tensor upsample_bilinear2d_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] output_size, @Cast("bool") boolean align_corners, @ByVal(nullValue = "c10::optional(c10::nullopt)") DoubleOptional scales_h, @ByVal(nullValue = "c10::optional(c10::nullopt)") DoubleOptional scales_w); +@Namespace("at") public static native @ByRef Tensor upsample_bilinear2d_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] output_size, @Cast("bool") boolean align_corners); -// #pragma once -// @generated by torchgen/gen.py from Function.h +// aten::upsample_bilinear2d.out(Tensor self, SymInt[2] output_size, bool align_corners, float? scales_h=None, float? scales_w=None, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor upsample_bilinear2d_outf(@Const @ByRef Tensor self, @ByVal LongArrayRef output_size, @Cast("bool") boolean align_corners, @ByVal DoubleOptional scales_h, @ByVal DoubleOptional scales_w, @ByRef Tensor out); +@Namespace("at") public static native @ByRef Tensor upsample_bilinear2d_outf(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] output_size, @Cast("bool") boolean align_corners, @ByVal DoubleOptional scales_h, @ByVal DoubleOptional scales_w, @ByRef Tensor out); -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include +// aten::upsample_bilinear2d.out(Tensor self, SymInt[2] output_size, bool align_corners, float? scales_h=None, float? scales_w=None, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor upsample_bilinear2d_symint_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal SymIntArrayRef output_size, @Cast("bool") boolean align_corners, @ByVal(nullValue = "c10::optional(c10::nullopt)") DoubleOptional scales_h, @ByVal(nullValue = "c10::optional(c10::nullopt)") DoubleOptional scales_w); +@Namespace("at") public static native @ByRef Tensor upsample_bilinear2d_symint_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal SymIntArrayRef output_size, @Cast("bool") boolean align_corners); -// #include +// aten::upsample_bilinear2d.out(Tensor self, SymInt[2] output_size, bool align_corners, float? scales_h=None, float? scales_w=None, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor upsample_bilinear2d_symint_outf(@Const @ByRef Tensor self, @ByVal SymIntArrayRef output_size, @Cast("bool") boolean align_corners, @ByVal DoubleOptional scales_h, @ByVal DoubleOptional scales_w, @ByRef Tensor out); -// aten::to_sparse_bsr.out(Tensor self, int[2] blocksize, int? dense_dim=None, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor to_sparse_bsr_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal @Cast("c10::ArrayRef*") LongArrayRef blocksize, @ByVal(nullValue = "c10::optional(c10::nullopt)") LongOptional dense_dim); -@Namespace("at") public static native @ByRef Tensor to_sparse_bsr_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal @Cast("c10::ArrayRef*") LongArrayRef blocksize); -@Namespace("at") public static native @ByRef Tensor to_sparse_bsr_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] blocksize, @ByVal(nullValue = "c10::optional(c10::nullopt)") LongOptional dense_dim); -@Namespace("at") public static native @ByRef Tensor to_sparse_bsr_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... blocksize); -// aten::to_sparse_bsr.out(Tensor self, int[2] blocksize, int? dense_dim=None, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor to_sparse_bsr_outf(@Const @ByRef Tensor self, @ByVal @Cast("c10::ArrayRef*") LongArrayRef blocksize, @ByVal LongOptional dense_dim, @ByRef Tensor out); -@Namespace("at") public static native @ByRef Tensor to_sparse_bsr_outf(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] blocksize, @ByVal LongOptional dense_dim, @ByRef Tensor out); +// aten::upsample_bilinear2d(Tensor self, SymInt[2] output_size, bool align_corners, float? scales_h=None, float? scales_w=None) -> Tensor +@Namespace("at") public static native @ByVal Tensor upsample_bilinear2d(@Const @ByRef Tensor self, @ByVal LongArrayRef output_size, @Cast("bool") boolean align_corners, @ByVal(nullValue = "c10::optional(c10::nullopt)") DoubleOptional scales_h, @ByVal(nullValue = "c10::optional(c10::nullopt)") DoubleOptional scales_w); +@Namespace("at") public static native @ByVal Tensor upsample_bilinear2d(@Const @ByRef Tensor self, @ByVal LongArrayRef output_size, @Cast("bool") boolean align_corners); +@Namespace("at") public static native @ByVal Tensor upsample_bilinear2d(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] output_size, @Cast("bool") boolean align_corners, @ByVal(nullValue = "c10::optional(c10::nullopt)") DoubleOptional scales_h, @ByVal(nullValue = "c10::optional(c10::nullopt)") DoubleOptional scales_w); +@Namespace("at") public static native @ByVal Tensor upsample_bilinear2d(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] output_size, @Cast("bool") boolean align_corners); + + +// aten::upsample_bilinear2d(Tensor self, SymInt[2] output_size, bool align_corners, float? scales_h=None, float? scales_w=None) -> Tensor +@Namespace("at") public static native @ByVal Tensor upsample_bilinear2d_symint(@Const @ByRef Tensor self, @ByVal SymIntArrayRef output_size, @Cast("bool") boolean align_corners, @ByVal(nullValue = "c10::optional(c10::nullopt)") DoubleOptional scales_h, @ByVal(nullValue = "c10::optional(c10::nullopt)") DoubleOptional scales_w); +@Namespace("at") public static native @ByVal Tensor upsample_bilinear2d_symint(@Const @ByRef Tensor self, @ByVal SymIntArrayRef output_size, @Cast("bool") boolean align_corners); -// Parsed from ATen/ops/to_sparse_csc.h + +// Parsed from ATen/ops/upsample_bilinear2d_backward.h // #pragma once @@ -67742,52 +53528,46 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include - +// #include -// aten::to_sparse_csc.out(Tensor self, int? dense_dim=None, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor to_sparse_csc_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal(nullValue = "c10::optional(c10::nullopt)") LongOptional dense_dim); -@Namespace("at") public static native @ByRef Tensor to_sparse_csc_out(@ByRef Tensor out, @Const @ByRef Tensor self); -// aten::to_sparse_csc.out(Tensor self, int? dense_dim=None, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor to_sparse_csc_outf(@Const @ByRef Tensor self, @ByVal LongOptional dense_dim, @ByRef Tensor out); +// aten::upsample_bilinear2d_backward.grad_input(Tensor grad_output, SymInt[2] output_size, SymInt[4] input_size, bool align_corners, float? scales_h=None, float? scales_w=None, *, Tensor(a!) grad_input) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor upsample_bilinear2d_backward_out(@ByRef Tensor grad_input, @Const @ByRef Tensor grad_output, @ByVal LongArrayRef output_size, @ByVal LongArrayRef input_size, @Cast("bool") boolean align_corners, @ByVal(nullValue = "c10::optional(c10::nullopt)") DoubleOptional scales_h, @ByVal(nullValue = "c10::optional(c10::nullopt)") DoubleOptional scales_w); +@Namespace("at") public static native @ByRef Tensor upsample_bilinear2d_backward_out(@ByRef Tensor grad_input, @Const @ByRef Tensor grad_output, @ByVal LongArrayRef output_size, @ByVal LongArrayRef input_size, @Cast("bool") boolean align_corners); +@Namespace("at") public static native @ByRef Tensor upsample_bilinear2d_backward_out(@ByRef Tensor grad_input, @Const @ByRef Tensor grad_output, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] output_size, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] input_size, @Cast("bool") boolean align_corners, @ByVal(nullValue = "c10::optional(c10::nullopt)") DoubleOptional scales_h, @ByVal(nullValue = "c10::optional(c10::nullopt)") DoubleOptional scales_w); +@Namespace("at") public static native @ByRef Tensor upsample_bilinear2d_backward_out(@ByRef Tensor grad_input, @Const @ByRef Tensor grad_output, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] output_size, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] input_size, @Cast("bool") boolean align_corners); +// aten::upsample_bilinear2d_backward.grad_input(Tensor grad_output, SymInt[2] output_size, SymInt[4] input_size, bool align_corners, float? scales_h=None, float? scales_w=None, *, Tensor(a!) grad_input) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor upsample_bilinear2d_backward_outf(@Const @ByRef Tensor grad_output, @ByVal LongArrayRef output_size, @ByVal LongArrayRef input_size, @Cast("bool") boolean align_corners, @ByVal DoubleOptional scales_h, @ByVal DoubleOptional scales_w, @ByRef Tensor grad_input); +@Namespace("at") public static native @ByRef Tensor upsample_bilinear2d_backward_outf(@Const @ByRef Tensor grad_output, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] output_size, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] input_size, @Cast("bool") boolean align_corners, @ByVal DoubleOptional scales_h, @ByVal DoubleOptional scales_w, @ByRef Tensor grad_input); -// Parsed from ATen/ops/to_sparse_csr.h -// #pragma once +// aten::upsample_bilinear2d_backward.grad_input(Tensor grad_output, SymInt[2] output_size, SymInt[4] input_size, bool align_corners, float? scales_h=None, float? scales_w=None, *, Tensor(a!) grad_input) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor upsample_bilinear2d_backward_symint_out(@ByRef Tensor grad_input, @Const @ByRef Tensor grad_output, @ByVal SymIntArrayRef output_size, @ByVal SymIntArrayRef input_size, @Cast("bool") boolean align_corners, @ByVal(nullValue = "c10::optional(c10::nullopt)") DoubleOptional scales_h, @ByVal(nullValue = "c10::optional(c10::nullopt)") DoubleOptional scales_w); +@Namespace("at") public static native @ByRef Tensor upsample_bilinear2d_backward_symint_out(@ByRef Tensor grad_input, @Const @ByRef Tensor grad_output, @ByVal SymIntArrayRef output_size, @ByVal SymIntArrayRef input_size, @Cast("bool") boolean align_corners); -// @generated by torchgen/gen.py from Function.h -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include +// aten::upsample_bilinear2d_backward.grad_input(Tensor grad_output, SymInt[2] output_size, SymInt[4] input_size, bool align_corners, float? scales_h=None, float? scales_w=None, *, Tensor(a!) grad_input) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor upsample_bilinear2d_backward_symint_outf(@Const @ByRef Tensor grad_output, @ByVal SymIntArrayRef output_size, @ByVal SymIntArrayRef input_size, @Cast("bool") boolean align_corners, @ByVal DoubleOptional scales_h, @ByVal DoubleOptional scales_w, @ByRef Tensor grad_input); +// aten::upsample_bilinear2d_backward(Tensor grad_output, SymInt[2] output_size, SymInt[4] input_size, bool align_corners, float? scales_h=None, float? scales_w=None) -> Tensor +@Namespace("at") public static native @ByVal Tensor upsample_bilinear2d_backward(@Const @ByRef Tensor grad_output, @ByVal LongArrayRef output_size, @ByVal LongArrayRef input_size, @Cast("bool") boolean align_corners, @ByVal(nullValue = "c10::optional(c10::nullopt)") DoubleOptional scales_h, @ByVal(nullValue = "c10::optional(c10::nullopt)") DoubleOptional scales_w); +@Namespace("at") public static native @ByVal Tensor upsample_bilinear2d_backward(@Const @ByRef Tensor grad_output, @ByVal LongArrayRef output_size, @ByVal LongArrayRef input_size, @Cast("bool") boolean align_corners); +@Namespace("at") public static native @ByVal Tensor upsample_bilinear2d_backward(@Const @ByRef Tensor grad_output, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] output_size, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] input_size, @Cast("bool") boolean align_corners, @ByVal(nullValue = "c10::optional(c10::nullopt)") DoubleOptional scales_h, @ByVal(nullValue = "c10::optional(c10::nullopt)") DoubleOptional scales_w); +@Namespace("at") public static native @ByVal Tensor upsample_bilinear2d_backward(@Const @ByRef Tensor grad_output, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] output_size, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] input_size, @Cast("bool") boolean align_corners); -// #include +// aten::upsample_bilinear2d_backward(Tensor grad_output, SymInt[2] output_size, SymInt[4] input_size, bool align_corners, float? scales_h=None, float? scales_w=None) -> Tensor +@Namespace("at") public static native @ByVal Tensor upsample_bilinear2d_backward_symint(@Const @ByRef Tensor grad_output, @ByVal SymIntArrayRef output_size, @ByVal SymIntArrayRef input_size, @Cast("bool") boolean align_corners, @ByVal(nullValue = "c10::optional(c10::nullopt)") DoubleOptional scales_h, @ByVal(nullValue = "c10::optional(c10::nullopt)") DoubleOptional scales_w); +@Namespace("at") public static native @ByVal Tensor upsample_bilinear2d_backward_symint(@Const @ByRef Tensor grad_output, @ByVal SymIntArrayRef output_size, @ByVal SymIntArrayRef input_size, @Cast("bool") boolean align_corners); -// aten::to_sparse_csr.out(Tensor self, int? dense_dim=None, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor to_sparse_csr_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal(nullValue = "c10::optional(c10::nullopt)") LongOptional dense_dim); -@Namespace("at") public static native @ByRef Tensor to_sparse_csr_out(@ByRef Tensor out, @Const @ByRef Tensor self); -// aten::to_sparse_csr.out(Tensor self, int? dense_dim=None, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor to_sparse_csr_outf(@Const @ByRef Tensor self, @ByVal LongOptional dense_dim, @ByRef Tensor out); -// Parsed from ATen/ops/topk.h +// Parsed from ATen/ops/upsample_linear1d.h // #pragma once @@ -67808,58 +53588,55 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include +// #include -// aten::topk.values(Tensor self, int k, int dim=-1, bool largest=True, bool sorted=True, *, Tensor(a!) values, Tensor(b!) indices) -> (Tensor(a!) values, Tensor(b!) indices) -@Namespace("at") public static native @ByVal @Cast("std::tuple*") PointerPointer topk_out(@ByRef Tensor values, @ByRef Tensor indices, @Const @ByRef Tensor self, @Cast("int64_t") long k, @Cast("int64_t") long dim/*=-1*/, @Cast("bool") boolean largest/*=true*/, @Cast("bool") boolean sorted/*=true*/); -@Namespace("at") public static native @ByVal @Cast("std::tuple*") PointerPointer topk_out(@ByRef Tensor values, @ByRef Tensor indices, @Const @ByRef Tensor self, @Cast("int64_t") long k); -// aten::topk.values(Tensor self, int k, int dim=-1, bool largest=True, bool sorted=True, *, Tensor(a!) values, Tensor(b!) indices) -> (Tensor(a!) values, Tensor(b!) indices) -@Namespace("at") public static native @ByVal @Cast("std::tuple*") PointerPointer topk_outf(@Const @ByRef Tensor self, @Cast("int64_t") long k, @Cast("int64_t") long dim, @Cast("bool") boolean largest, @Cast("bool") boolean sorted, @ByRef Tensor values, @ByRef Tensor indices); +// aten::upsample_linear1d.vec(Tensor input, SymInt[]? output_size, bool align_corners, float[]? scale_factors) -> Tensor +@Namespace("at") public static native @ByVal Tensor upsample_linear1d(@Const @ByRef Tensor input, @ByVal LongArrayRefOptional output_size, @Cast("bool") boolean align_corners, @ByVal DoubleArrayRefOptional scale_factors); +@Namespace("at") public static native @ByVal Tensor upsample_linear1d(@Const @ByRef Tensor input, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] output_size, @Cast("bool") boolean align_corners, @ByVal DoubleArrayRefOptional scale_factors); -// aten::topk(Tensor self, int k, int dim=-1, bool largest=True, bool sorted=True) -> (Tensor values, Tensor indices) -@Namespace("at") public static native @ByVal TensorTensorTuple topk(@Const @ByRef Tensor self, @Cast("int64_t") long k, @Cast("int64_t") long dim/*=-1*/, @Cast("bool") boolean largest/*=true*/, @Cast("bool") boolean sorted/*=true*/); -@Namespace("at") public static native @ByVal TensorTensorTuple topk(@Const @ByRef Tensor self, @Cast("int64_t") long k); +// aten::upsample_linear1d.vec(Tensor input, SymInt[]? output_size, bool align_corners, float[]? scale_factors) -> Tensor +@Namespace("at") public static native @ByVal Tensor upsample_linear1d_symint(@Const @ByRef Tensor input, @ByVal SymIntArrayRefOptional output_size, @Cast("bool") boolean align_corners, @ByVal DoubleArrayRefOptional scale_factors); +// aten::upsample_linear1d.out(Tensor self, SymInt[1] output_size, bool align_corners, float? scales=None, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor upsample_linear1d_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal LongArrayRef output_size, @Cast("bool") boolean align_corners, @ByVal(nullValue = "c10::optional(c10::nullopt)") DoubleOptional scales); +@Namespace("at") public static native @ByRef Tensor upsample_linear1d_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal LongArrayRef output_size, @Cast("bool") boolean align_corners); +@Namespace("at") public static native @ByRef Tensor upsample_linear1d_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] output_size, @Cast("bool") boolean align_corners, @ByVal(nullValue = "c10::optional(c10::nullopt)") DoubleOptional scales); +@Namespace("at") public static native @ByRef Tensor upsample_linear1d_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] output_size, @Cast("bool") boolean align_corners); -// Parsed from ATen/ops/trace.h -// #pragma once +// aten::upsample_linear1d.out(Tensor self, SymInt[1] output_size, bool align_corners, float? scales=None, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor upsample_linear1d_outf(@Const @ByRef Tensor self, @ByVal LongArrayRef output_size, @Cast("bool") boolean align_corners, @ByVal DoubleOptional scales, @ByRef Tensor out); +@Namespace("at") public static native @ByRef Tensor upsample_linear1d_outf(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] output_size, @Cast("bool") boolean align_corners, @ByVal DoubleOptional scales, @ByRef Tensor out); -// @generated by torchgen/gen.py from Function.h -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include +// aten::upsample_linear1d.out(Tensor self, SymInt[1] output_size, bool align_corners, float? scales=None, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor upsample_linear1d_symint_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal SymIntArrayRef output_size, @Cast("bool") boolean align_corners, @ByVal(nullValue = "c10::optional(c10::nullopt)") DoubleOptional scales); +@Namespace("at") public static native @ByRef Tensor upsample_linear1d_symint_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal SymIntArrayRef output_size, @Cast("bool") boolean align_corners); + +// aten::upsample_linear1d.out(Tensor self, SymInt[1] output_size, bool align_corners, float? scales=None, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor upsample_linear1d_symint_outf(@Const @ByRef Tensor self, @ByVal SymIntArrayRef output_size, @Cast("bool") boolean align_corners, @ByVal DoubleOptional scales, @ByRef Tensor out); -// #include +// aten::upsample_linear1d(Tensor self, SymInt[1] output_size, bool align_corners, float? scales=None) -> Tensor +@Namespace("at") public static native @ByVal Tensor upsample_linear1d(@Const @ByRef Tensor self, @ByVal LongArrayRef output_size, @Cast("bool") boolean align_corners, @ByVal(nullValue = "c10::optional(c10::nullopt)") DoubleOptional scales); +@Namespace("at") public static native @ByVal Tensor upsample_linear1d(@Const @ByRef Tensor self, @ByVal LongArrayRef output_size, @Cast("bool") boolean align_corners); +@Namespace("at") public static native @ByVal Tensor upsample_linear1d(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] output_size, @Cast("bool") boolean align_corners, @ByVal(nullValue = "c10::optional(c10::nullopt)") DoubleOptional scales); +@Namespace("at") public static native @ByVal Tensor upsample_linear1d(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] output_size, @Cast("bool") boolean align_corners); -// aten::trace(Tensor self) -> Tensor -@Namespace("at") public static native @ByVal Tensor trace(@Const @ByRef Tensor self); +// aten::upsample_linear1d(Tensor self, SymInt[1] output_size, bool align_corners, float? scales=None) -> Tensor +@Namespace("at") public static native @ByVal Tensor upsample_linear1d_symint(@Const @ByRef Tensor self, @ByVal SymIntArrayRef output_size, @Cast("bool") boolean align_corners, @ByVal(nullValue = "c10::optional(c10::nullopt)") DoubleOptional scales); +@Namespace("at") public static native @ByVal Tensor upsample_linear1d_symint(@Const @ByRef Tensor self, @ByVal SymIntArrayRef output_size, @Cast("bool") boolean align_corners); -// aten::trace.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor trace_out(@ByRef Tensor out, @Const @ByRef Tensor self); -// aten::trace.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor trace_outf(@Const @ByRef Tensor self, @ByRef Tensor out); -// Parsed from ATen/ops/trace_backward.h +// Parsed from ATen/ops/upsample_linear1d_backward.h // #pragma once @@ -67880,22 +53657,46 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include +// #include -// aten::trace_backward(Tensor grad, SymInt[] sizes) -> Tensor -@Namespace("at") public static native @ByVal Tensor trace_backward(@Const @ByRef Tensor grad, @ByVal @Cast("c10::ArrayRef*") LongArrayRef sizes); -@Namespace("at") public static native @ByVal Tensor trace_backward(@Const @ByRef Tensor grad, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... sizes); +// aten::upsample_linear1d_backward.grad_input(Tensor grad_output, SymInt[1] output_size, SymInt[3] input_size, bool align_corners, float? scales=None, *, Tensor(a!) grad_input) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor upsample_linear1d_backward_out(@ByRef Tensor grad_input, @Const @ByRef Tensor grad_output, @ByVal LongArrayRef output_size, @ByVal LongArrayRef input_size, @Cast("bool") boolean align_corners, @ByVal(nullValue = "c10::optional(c10::nullopt)") DoubleOptional scales); +@Namespace("at") public static native @ByRef Tensor upsample_linear1d_backward_out(@ByRef Tensor grad_input, @Const @ByRef Tensor grad_output, @ByVal LongArrayRef output_size, @ByVal LongArrayRef input_size, @Cast("bool") boolean align_corners); +@Namespace("at") public static native @ByRef Tensor upsample_linear1d_backward_out(@ByRef Tensor grad_input, @Const @ByRef Tensor grad_output, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] output_size, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] input_size, @Cast("bool") boolean align_corners, @ByVal(nullValue = "c10::optional(c10::nullopt)") DoubleOptional scales); +@Namespace("at") public static native @ByRef Tensor upsample_linear1d_backward_out(@ByRef Tensor grad_input, @Const @ByRef Tensor grad_output, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] output_size, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] input_size, @Cast("bool") boolean align_corners); -// aten::trace_backward(Tensor grad, SymInt[] sizes) -> Tensor -@Namespace("at") public static native @ByVal Tensor trace_backward_symint(@Const @ByRef Tensor grad, @ByVal SymIntRef sizes); +// aten::upsample_linear1d_backward.grad_input(Tensor grad_output, SymInt[1] output_size, SymInt[3] input_size, bool align_corners, float? scales=None, *, Tensor(a!) grad_input) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor upsample_linear1d_backward_outf(@Const @ByRef Tensor grad_output, @ByVal LongArrayRef output_size, @ByVal LongArrayRef input_size, @Cast("bool") boolean align_corners, @ByVal DoubleOptional scales, @ByRef Tensor grad_input); +@Namespace("at") public static native @ByRef Tensor upsample_linear1d_backward_outf(@Const @ByRef Tensor grad_output, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] output_size, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] input_size, @Cast("bool") boolean align_corners, @ByVal DoubleOptional scales, @ByRef Tensor grad_input); +// aten::upsample_linear1d_backward.grad_input(Tensor grad_output, SymInt[1] output_size, SymInt[3] input_size, bool align_corners, float? scales=None, *, Tensor(a!) grad_input) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor upsample_linear1d_backward_symint_out(@ByRef Tensor grad_input, @Const @ByRef Tensor grad_output, @ByVal SymIntArrayRef output_size, @ByVal SymIntArrayRef input_size, @Cast("bool") boolean align_corners, @ByVal(nullValue = "c10::optional(c10::nullopt)") DoubleOptional scales); +@Namespace("at") public static native @ByRef Tensor upsample_linear1d_backward_symint_out(@ByRef Tensor grad_input, @Const @ByRef Tensor grad_output, @ByVal SymIntArrayRef output_size, @ByVal SymIntArrayRef input_size, @Cast("bool") boolean align_corners); + +// aten::upsample_linear1d_backward.grad_input(Tensor grad_output, SymInt[1] output_size, SymInt[3] input_size, bool align_corners, float? scales=None, *, Tensor(a!) grad_input) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor upsample_linear1d_backward_symint_outf(@Const @ByRef Tensor grad_output, @ByVal SymIntArrayRef output_size, @ByVal SymIntArrayRef input_size, @Cast("bool") boolean align_corners, @ByVal DoubleOptional scales, @ByRef Tensor grad_input); -// Parsed from ATen/ops/transpose.h +// aten::upsample_linear1d_backward(Tensor grad_output, SymInt[1] output_size, SymInt[3] input_size, bool align_corners, float? scales=None) -> Tensor +@Namespace("at") public static native @ByVal Tensor upsample_linear1d_backward(@Const @ByRef Tensor grad_output, @ByVal LongArrayRef output_size, @ByVal LongArrayRef input_size, @Cast("bool") boolean align_corners, @ByVal(nullValue = "c10::optional(c10::nullopt)") DoubleOptional scales); +@Namespace("at") public static native @ByVal Tensor upsample_linear1d_backward(@Const @ByRef Tensor grad_output, @ByVal LongArrayRef output_size, @ByVal LongArrayRef input_size, @Cast("bool") boolean align_corners); +@Namespace("at") public static native @ByVal Tensor upsample_linear1d_backward(@Const @ByRef Tensor grad_output, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] output_size, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] input_size, @Cast("bool") boolean align_corners, @ByVal(nullValue = "c10::optional(c10::nullopt)") DoubleOptional scales); +@Namespace("at") public static native @ByVal Tensor upsample_linear1d_backward(@Const @ByRef Tensor grad_output, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] output_size, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] input_size, @Cast("bool") boolean align_corners); + + +// aten::upsample_linear1d_backward(Tensor grad_output, SymInt[1] output_size, SymInt[3] input_size, bool align_corners, float? scales=None) -> Tensor +@Namespace("at") public static native @ByVal Tensor upsample_linear1d_backward_symint(@Const @ByRef Tensor grad_output, @ByVal SymIntArrayRef output_size, @ByVal SymIntArrayRef input_size, @Cast("bool") boolean align_corners, @ByVal(nullValue = "c10::optional(c10::nullopt)") DoubleOptional scales); +@Namespace("at") public static native @ByVal Tensor upsample_linear1d_backward_symint(@Const @ByRef Tensor grad_output, @ByVal SymIntArrayRef output_size, @ByVal SymIntArrayRef input_size, @Cast("bool") boolean align_corners); + + + + + +// Parsed from ATen/ops/upsample_nearest1d.h // #pragma once @@ -67916,54 +53717,55 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include +// #include -// aten::transpose.int(Tensor(a) self, int dim0, int dim1) -> Tensor(a) -@Namespace("at") public static native @ByVal Tensor transpose(@Const @ByRef Tensor self, @Cast("int64_t") long dim0, @Cast("int64_t") long dim1); +// aten::upsample_nearest1d.vec(Tensor input, SymInt[]? output_size, float[]? scale_factors) -> Tensor +@Namespace("at") public static native @ByVal Tensor upsample_nearest1d(@Const @ByRef Tensor input, @ByVal LongArrayRefOptional output_size, @ByVal DoubleArrayRefOptional scale_factors); +@Namespace("at") public static native @ByVal Tensor upsample_nearest1d(@Const @ByRef Tensor input, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] output_size, @ByVal DoubleArrayRefOptional scale_factors); -// aten::transpose.Dimname(Tensor(a) self, Dimname dim0, Dimname dim1) -> Tensor(a) -@Namespace("at") public static native @ByVal Tensor transpose(@Const @ByRef Tensor self, @ByVal Dimname dim0, @ByVal Dimname dim1); +// aten::upsample_nearest1d.vec(Tensor input, SymInt[]? output_size, float[]? scale_factors) -> Tensor +@Namespace("at") public static native @ByVal Tensor upsample_nearest1d_symint(@Const @ByRef Tensor input, @ByVal SymIntArrayRefOptional output_size, @ByVal DoubleArrayRefOptional scale_factors); +// aten::upsample_nearest1d.out(Tensor self, SymInt[1] output_size, float? scales=None, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor upsample_nearest1d_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal LongArrayRef output_size, @ByVal(nullValue = "c10::optional(c10::nullopt)") DoubleOptional scales); +@Namespace("at") public static native @ByRef Tensor upsample_nearest1d_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal LongArrayRef output_size); +@Namespace("at") public static native @ByRef Tensor upsample_nearest1d_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] output_size, @ByVal(nullValue = "c10::optional(c10::nullopt)") DoubleOptional scales); +@Namespace("at") public static native @ByRef Tensor upsample_nearest1d_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... output_size); -// Parsed from ATen/ops/transpose_copy.h -// #pragma once +// aten::upsample_nearest1d.out(Tensor self, SymInt[1] output_size, float? scales=None, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor upsample_nearest1d_outf(@Const @ByRef Tensor self, @ByVal LongArrayRef output_size, @ByVal DoubleOptional scales, @ByRef Tensor out); +@Namespace("at") public static native @ByRef Tensor upsample_nearest1d_outf(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] output_size, @ByVal DoubleOptional scales, @ByRef Tensor out); -// @generated by torchgen/gen.py from Function.h -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include +// aten::upsample_nearest1d.out(Tensor self, SymInt[1] output_size, float? scales=None, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor upsample_nearest1d_symint_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal SymIntArrayRef output_size, @ByVal(nullValue = "c10::optional(c10::nullopt)") DoubleOptional scales); +@Namespace("at") public static native @ByRef Tensor upsample_nearest1d_symint_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal SymIntArrayRef output_size); +// aten::upsample_nearest1d.out(Tensor self, SymInt[1] output_size, float? scales=None, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor upsample_nearest1d_symint_outf(@Const @ByRef Tensor self, @ByVal SymIntArrayRef output_size, @ByVal DoubleOptional scales, @ByRef Tensor out); -// #include +// aten::upsample_nearest1d(Tensor self, SymInt[1] output_size, float? scales=None) -> Tensor +@Namespace("at") public static native @ByVal Tensor upsample_nearest1d(@Const @ByRef Tensor self, @ByVal LongArrayRef output_size, @ByVal(nullValue = "c10::optional(c10::nullopt)") DoubleOptional scales); +@Namespace("at") public static native @ByVal Tensor upsample_nearest1d(@Const @ByRef Tensor self, @ByVal LongArrayRef output_size); +@Namespace("at") public static native @ByVal Tensor upsample_nearest1d(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] output_size, @ByVal(nullValue = "c10::optional(c10::nullopt)") DoubleOptional scales); +@Namespace("at") public static native @ByVal Tensor upsample_nearest1d(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... output_size); -// aten::transpose_copy.int(Tensor self, int dim0, int dim1) -> Tensor -@Namespace("at") public static native @ByVal Tensor transpose_copy(@Const @ByRef Tensor self, @Cast("int64_t") long dim0, @Cast("int64_t") long dim1); -// aten::transpose_copy.int_out(Tensor self, int dim0, int dim1, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor transpose_copy_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Cast("int64_t") long dim0, @Cast("int64_t") long dim1); -// aten::transpose_copy.int_out(Tensor self, int dim0, int dim1, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor transpose_copy_outf(@Const @ByRef Tensor self, @Cast("int64_t") long dim0, @Cast("int64_t") long dim1, @ByRef Tensor out); +// aten::upsample_nearest1d(Tensor self, SymInt[1] output_size, float? scales=None) -> Tensor +@Namespace("at") public static native @ByVal Tensor upsample_nearest1d_symint(@Const @ByRef Tensor self, @ByVal SymIntArrayRef output_size, @ByVal(nullValue = "c10::optional(c10::nullopt)") DoubleOptional scales); +@Namespace("at") public static native @ByVal Tensor upsample_nearest1d_symint(@Const @ByRef Tensor self, @ByVal SymIntArrayRef output_size); -// Parsed from ATen/ops/trapezoid.h + +// Parsed from ATen/ops/upsample_nearest1d_backward.h // #pragma once @@ -67984,21 +53786,46 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include +// #include -// aten::trapezoid.x(Tensor y, Tensor x, *, int dim=-1) -> Tensor -@Namespace("at") public static native @ByVal Tensor trapezoid(@Const @ByRef Tensor y, @Const @ByRef Tensor x, @Cast("int64_t") long dim/*=-1*/); -@Namespace("at") public static native @ByVal Tensor trapezoid(@Const @ByRef Tensor y, @Const @ByRef Tensor x); +// aten::upsample_nearest1d_backward.grad_input(Tensor grad_output, SymInt[1] output_size, SymInt[3] input_size, float? scales=None, *, Tensor(a!) grad_input) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor upsample_nearest1d_backward_out(@ByRef Tensor grad_input, @Const @ByRef Tensor grad_output, @ByVal LongArrayRef output_size, @ByVal LongArrayRef input_size, @ByVal(nullValue = "c10::optional(c10::nullopt)") DoubleOptional scales); +@Namespace("at") public static native @ByRef Tensor upsample_nearest1d_backward_out(@ByRef Tensor grad_input, @Const @ByRef Tensor grad_output, @ByVal LongArrayRef output_size, @ByVal LongArrayRef input_size); +@Namespace("at") public static native @ByRef Tensor upsample_nearest1d_backward_out(@ByRef Tensor grad_input, @Const @ByRef Tensor grad_output, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] output_size, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] input_size, @ByVal(nullValue = "c10::optional(c10::nullopt)") DoubleOptional scales); +@Namespace("at") public static native @ByRef Tensor upsample_nearest1d_backward_out(@ByRef Tensor grad_input, @Const @ByRef Tensor grad_output, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] output_size, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... input_size); + + +// aten::upsample_nearest1d_backward.grad_input(Tensor grad_output, SymInt[1] output_size, SymInt[3] input_size, float? scales=None, *, Tensor(a!) grad_input) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor upsample_nearest1d_backward_outf(@Const @ByRef Tensor grad_output, @ByVal LongArrayRef output_size, @ByVal LongArrayRef input_size, @ByVal DoubleOptional scales, @ByRef Tensor grad_input); +@Namespace("at") public static native @ByRef Tensor upsample_nearest1d_backward_outf(@Const @ByRef Tensor grad_output, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] output_size, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] input_size, @ByVal DoubleOptional scales, @ByRef Tensor grad_input); + + +// aten::upsample_nearest1d_backward.grad_input(Tensor grad_output, SymInt[1] output_size, SymInt[3] input_size, float? scales=None, *, Tensor(a!) grad_input) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor upsample_nearest1d_backward_symint_out(@ByRef Tensor grad_input, @Const @ByRef Tensor grad_output, @ByVal SymIntArrayRef output_size, @ByVal SymIntArrayRef input_size, @ByVal(nullValue = "c10::optional(c10::nullopt)") DoubleOptional scales); +@Namespace("at") public static native @ByRef Tensor upsample_nearest1d_backward_symint_out(@ByRef Tensor grad_input, @Const @ByRef Tensor grad_output, @ByVal SymIntArrayRef output_size, @ByVal SymIntArrayRef input_size); + + +// aten::upsample_nearest1d_backward.grad_input(Tensor grad_output, SymInt[1] output_size, SymInt[3] input_size, float? scales=None, *, Tensor(a!) grad_input) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor upsample_nearest1d_backward_symint_outf(@Const @ByRef Tensor grad_output, @ByVal SymIntArrayRef output_size, @ByVal SymIntArrayRef input_size, @ByVal DoubleOptional scales, @ByRef Tensor grad_input); + + +// aten::upsample_nearest1d_backward(Tensor grad_output, SymInt[1] output_size, SymInt[3] input_size, float? scales=None) -> Tensor +@Namespace("at") public static native @ByVal Tensor upsample_nearest1d_backward(@Const @ByRef Tensor grad_output, @ByVal LongArrayRef output_size, @ByVal LongArrayRef input_size, @ByVal(nullValue = "c10::optional(c10::nullopt)") DoubleOptional scales); +@Namespace("at") public static native @ByVal Tensor upsample_nearest1d_backward(@Const @ByRef Tensor grad_output, @ByVal LongArrayRef output_size, @ByVal LongArrayRef input_size); +@Namespace("at") public static native @ByVal Tensor upsample_nearest1d_backward(@Const @ByRef Tensor grad_output, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] output_size, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] input_size, @ByVal(nullValue = "c10::optional(c10::nullopt)") DoubleOptional scales); +@Namespace("at") public static native @ByVal Tensor upsample_nearest1d_backward(@Const @ByRef Tensor grad_output, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] output_size, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... input_size); + + +// aten::upsample_nearest1d_backward(Tensor grad_output, SymInt[1] output_size, SymInt[3] input_size, float? scales=None) -> Tensor +@Namespace("at") public static native @ByVal Tensor upsample_nearest1d_backward_symint(@Const @ByRef Tensor grad_output, @ByVal SymIntArrayRef output_size, @ByVal SymIntArrayRef input_size, @ByVal(nullValue = "c10::optional(c10::nullopt)") DoubleOptional scales); +@Namespace("at") public static native @ByVal Tensor upsample_nearest1d_backward_symint(@Const @ByRef Tensor grad_output, @ByVal SymIntArrayRef output_size, @ByVal SymIntArrayRef input_size); -// aten::trapezoid.dx(Tensor y, *, Scalar dx=1, int dim=-1) -> Tensor -@Namespace("at") public static native @ByVal Tensor trapezoid(@Const @ByRef Tensor y, @Const @ByRef(nullValue = "at::Scalar(1)") Scalar dx, @Cast("int64_t") long dim/*=-1*/); -@Namespace("at") public static native @ByVal Tensor trapezoid(@Const @ByRef Tensor y); -// Parsed from ATen/ops/trapz.h +// Parsed from ATen/ops/upsample_nearest2d.h // #pragma once @@ -68019,58 +53846,55 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include +// #include -// aten::trapz.x(Tensor y, Tensor x, *, int dim=-1) -> Tensor -@Namespace("at") public static native @ByVal Tensor trapz(@Const @ByRef Tensor y, @Const @ByRef Tensor x, @Cast("int64_t") long dim/*=-1*/); -@Namespace("at") public static native @ByVal Tensor trapz(@Const @ByRef Tensor y, @Const @ByRef Tensor x); +// aten::upsample_nearest2d.vec(Tensor input, SymInt[]? output_size, float[]? scale_factors) -> Tensor +@Namespace("at") public static native @ByVal Tensor upsample_nearest2d(@Const @ByRef Tensor input, @ByVal LongArrayRefOptional output_size, @ByVal DoubleArrayRefOptional scale_factors); +@Namespace("at") public static native @ByVal Tensor upsample_nearest2d(@Const @ByRef Tensor input, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] output_size, @ByVal DoubleArrayRefOptional scale_factors); -// aten::trapz.dx(Tensor y, *, float dx=1, int dim=-1) -> Tensor -@Namespace("at") public static native @ByVal Tensor trapz(@Const @ByRef Tensor y, double dx/*=1*/, @Cast("int64_t") long dim/*=-1*/); -@Namespace("at") public static native @ByVal Tensor trapz(@Const @ByRef Tensor y); +// aten::upsample_nearest2d.vec(Tensor input, SymInt[]? output_size, float[]? scale_factors) -> Tensor +@Namespace("at") public static native @ByVal Tensor upsample_nearest2d_symint(@Const @ByRef Tensor input, @ByVal SymIntArrayRefOptional output_size, @ByVal DoubleArrayRefOptional scale_factors); +// aten::upsample_nearest2d.out(Tensor self, SymInt[2] output_size, float? scales_h=None, float? scales_w=None, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor upsample_nearest2d_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal LongArrayRef output_size, @ByVal(nullValue = "c10::optional(c10::nullopt)") DoubleOptional scales_h, @ByVal(nullValue = "c10::optional(c10::nullopt)") DoubleOptional scales_w); +@Namespace("at") public static native @ByRef Tensor upsample_nearest2d_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal LongArrayRef output_size); +@Namespace("at") public static native @ByRef Tensor upsample_nearest2d_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] output_size, @ByVal(nullValue = "c10::optional(c10::nullopt)") DoubleOptional scales_h, @ByVal(nullValue = "c10::optional(c10::nullopt)") DoubleOptional scales_w); +@Namespace("at") public static native @ByRef Tensor upsample_nearest2d_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... output_size); -// Parsed from ATen/ops/triangular_solve.h -// #pragma once +// aten::upsample_nearest2d.out(Tensor self, SymInt[2] output_size, float? scales_h=None, float? scales_w=None, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor upsample_nearest2d_outf(@Const @ByRef Tensor self, @ByVal LongArrayRef output_size, @ByVal DoubleOptional scales_h, @ByVal DoubleOptional scales_w, @ByRef Tensor out); +@Namespace("at") public static native @ByRef Tensor upsample_nearest2d_outf(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] output_size, @ByVal DoubleOptional scales_h, @ByVal DoubleOptional scales_w, @ByRef Tensor out); -// @generated by torchgen/gen.py from Function.h -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include +// aten::upsample_nearest2d.out(Tensor self, SymInt[2] output_size, float? scales_h=None, float? scales_w=None, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor upsample_nearest2d_symint_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal SymIntArrayRef output_size, @ByVal(nullValue = "c10::optional(c10::nullopt)") DoubleOptional scales_h, @ByVal(nullValue = "c10::optional(c10::nullopt)") DoubleOptional scales_w); +@Namespace("at") public static native @ByRef Tensor upsample_nearest2d_symint_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal SymIntArrayRef output_size); +// aten::upsample_nearest2d.out(Tensor self, SymInt[2] output_size, float? scales_h=None, float? scales_w=None, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor upsample_nearest2d_symint_outf(@Const @ByRef Tensor self, @ByVal SymIntArrayRef output_size, @ByVal DoubleOptional scales_h, @ByVal DoubleOptional scales_w, @ByRef Tensor out); -// #include +// aten::upsample_nearest2d(Tensor self, SymInt[2] output_size, float? scales_h=None, float? scales_w=None) -> Tensor +@Namespace("at") public static native @ByVal Tensor upsample_nearest2d(@Const @ByRef Tensor self, @ByVal LongArrayRef output_size, @ByVal(nullValue = "c10::optional(c10::nullopt)") DoubleOptional scales_h, @ByVal(nullValue = "c10::optional(c10::nullopt)") DoubleOptional scales_w); +@Namespace("at") public static native @ByVal Tensor upsample_nearest2d(@Const @ByRef Tensor self, @ByVal LongArrayRef output_size); +@Namespace("at") public static native @ByVal Tensor upsample_nearest2d(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] output_size, @ByVal(nullValue = "c10::optional(c10::nullopt)") DoubleOptional scales_h, @ByVal(nullValue = "c10::optional(c10::nullopt)") DoubleOptional scales_w); +@Namespace("at") public static native @ByVal Tensor upsample_nearest2d(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... output_size); -// aten::triangular_solve.X(Tensor self, Tensor A, bool upper=True, bool transpose=False, bool unitriangular=False, *, Tensor(a!) X, Tensor(b!) M) -> (Tensor(a!) solution, Tensor(b!) cloned_coefficient) -@Namespace("at") public static native @ByVal @Cast("std::tuple*") PointerPointer triangular_solve_out(@ByRef Tensor X, @ByRef Tensor M, @Const @ByRef Tensor self, @Const @ByRef Tensor A, @Cast("bool") boolean upper/*=true*/, @Cast("bool") boolean transpose/*=false*/, @Cast("bool") boolean unitriangular/*=false*/); -@Namespace("at") public static native @ByVal @Cast("std::tuple*") PointerPointer triangular_solve_out(@ByRef Tensor X, @ByRef Tensor M, @Const @ByRef Tensor self, @Const @ByRef Tensor A); -// aten::triangular_solve.X(Tensor self, Tensor A, bool upper=True, bool transpose=False, bool unitriangular=False, *, Tensor(a!) X, Tensor(b!) M) -> (Tensor(a!) solution, Tensor(b!) cloned_coefficient) -@Namespace("at") public static native @ByVal @Cast("std::tuple*") PointerPointer triangular_solve_outf(@Const @ByRef Tensor self, @Const @ByRef Tensor A, @Cast("bool") boolean upper, @Cast("bool") boolean transpose, @Cast("bool") boolean unitriangular, @ByRef Tensor X, @ByRef Tensor M); -// aten::triangular_solve(Tensor self, Tensor A, bool upper=True, bool transpose=False, bool unitriangular=False) -> (Tensor solution, Tensor cloned_coefficient) -@Namespace("at") public static native @ByVal TensorTensorTuple triangular_solve(@Const @ByRef Tensor self, @Const @ByRef Tensor A, @Cast("bool") boolean upper/*=true*/, @Cast("bool") boolean transpose/*=false*/, @Cast("bool") boolean unitriangular/*=false*/); -@Namespace("at") public static native @ByVal TensorTensorTuple triangular_solve(@Const @ByRef Tensor self, @Const @ByRef Tensor A); +// aten::upsample_nearest2d(Tensor self, SymInt[2] output_size, float? scales_h=None, float? scales_w=None) -> Tensor +@Namespace("at") public static native @ByVal Tensor upsample_nearest2d_symint(@Const @ByRef Tensor self, @ByVal SymIntArrayRef output_size, @ByVal(nullValue = "c10::optional(c10::nullopt)") DoubleOptional scales_h, @ByVal(nullValue = "c10::optional(c10::nullopt)") DoubleOptional scales_w); +@Namespace("at") public static native @ByVal Tensor upsample_nearest2d_symint(@Const @ByRef Tensor self, @ByVal SymIntArrayRef output_size); -// Parsed from ATen/ops/tril.h + +// Parsed from ATen/ops/upsample_nearest2d_backward.h // #pragma once @@ -68091,23 +53915,46 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include +// #include -// aten::tril.out(Tensor self, int diagonal=0, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor tril_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Cast("int64_t") long diagonal/*=0*/); -@Namespace("at") public static native @ByRef Tensor tril_out(@ByRef Tensor out, @Const @ByRef Tensor self); -// aten::tril.out(Tensor self, int diagonal=0, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor tril_outf(@Const @ByRef Tensor self, @Cast("int64_t") long diagonal, @ByRef Tensor out); +// aten::upsample_nearest2d_backward.grad_input(Tensor grad_output, SymInt[2] output_size, SymInt[4] input_size, float? scales_h=None, float? scales_w=None, *, Tensor(a!) grad_input) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor upsample_nearest2d_backward_out(@ByRef Tensor grad_input, @Const @ByRef Tensor grad_output, @ByVal LongArrayRef output_size, @ByVal LongArrayRef input_size, @ByVal(nullValue = "c10::optional(c10::nullopt)") DoubleOptional scales_h, @ByVal(nullValue = "c10::optional(c10::nullopt)") DoubleOptional scales_w); +@Namespace("at") public static native @ByRef Tensor upsample_nearest2d_backward_out(@ByRef Tensor grad_input, @Const @ByRef Tensor grad_output, @ByVal LongArrayRef output_size, @ByVal LongArrayRef input_size); +@Namespace("at") public static native @ByRef Tensor upsample_nearest2d_backward_out(@ByRef Tensor grad_input, @Const @ByRef Tensor grad_output, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] output_size, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] input_size, @ByVal(nullValue = "c10::optional(c10::nullopt)") DoubleOptional scales_h, @ByVal(nullValue = "c10::optional(c10::nullopt)") DoubleOptional scales_w); +@Namespace("at") public static native @ByRef Tensor upsample_nearest2d_backward_out(@ByRef Tensor grad_input, @Const @ByRef Tensor grad_output, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] output_size, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... input_size); -// aten::tril(Tensor self, int diagonal=0) -> Tensor -@Namespace("at") public static native @ByVal Tensor tril(@Const @ByRef Tensor self, @Cast("int64_t") long diagonal/*=0*/); -@Namespace("at") public static native @ByVal Tensor tril(@Const @ByRef Tensor self); + +// aten::upsample_nearest2d_backward.grad_input(Tensor grad_output, SymInt[2] output_size, SymInt[4] input_size, float? scales_h=None, float? scales_w=None, *, Tensor(a!) grad_input) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor upsample_nearest2d_backward_outf(@Const @ByRef Tensor grad_output, @ByVal LongArrayRef output_size, @ByVal LongArrayRef input_size, @ByVal DoubleOptional scales_h, @ByVal DoubleOptional scales_w, @ByRef Tensor grad_input); +@Namespace("at") public static native @ByRef Tensor upsample_nearest2d_backward_outf(@Const @ByRef Tensor grad_output, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] output_size, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] input_size, @ByVal DoubleOptional scales_h, @ByVal DoubleOptional scales_w, @ByRef Tensor grad_input); +// aten::upsample_nearest2d_backward.grad_input(Tensor grad_output, SymInt[2] output_size, SymInt[4] input_size, float? scales_h=None, float? scales_w=None, *, Tensor(a!) grad_input) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor upsample_nearest2d_backward_symint_out(@ByRef Tensor grad_input, @Const @ByRef Tensor grad_output, @ByVal SymIntArrayRef output_size, @ByVal SymIntArrayRef input_size, @ByVal(nullValue = "c10::optional(c10::nullopt)") DoubleOptional scales_h, @ByVal(nullValue = "c10::optional(c10::nullopt)") DoubleOptional scales_w); +@Namespace("at") public static native @ByRef Tensor upsample_nearest2d_backward_symint_out(@ByRef Tensor grad_input, @Const @ByRef Tensor grad_output, @ByVal SymIntArrayRef output_size, @ByVal SymIntArrayRef input_size); -// Parsed from ATen/ops/tril_indices.h +// aten::upsample_nearest2d_backward.grad_input(Tensor grad_output, SymInt[2] output_size, SymInt[4] input_size, float? scales_h=None, float? scales_w=None, *, Tensor(a!) grad_input) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor upsample_nearest2d_backward_symint_outf(@Const @ByRef Tensor grad_output, @ByVal SymIntArrayRef output_size, @ByVal SymIntArrayRef input_size, @ByVal DoubleOptional scales_h, @ByVal DoubleOptional scales_w, @ByRef Tensor grad_input); + + +// aten::upsample_nearest2d_backward(Tensor grad_output, SymInt[2] output_size, SymInt[4] input_size, float? scales_h=None, float? scales_w=None) -> Tensor +@Namespace("at") public static native @ByVal Tensor upsample_nearest2d_backward(@Const @ByRef Tensor grad_output, @ByVal LongArrayRef output_size, @ByVal LongArrayRef input_size, @ByVal(nullValue = "c10::optional(c10::nullopt)") DoubleOptional scales_h, @ByVal(nullValue = "c10::optional(c10::nullopt)") DoubleOptional scales_w); +@Namespace("at") public static native @ByVal Tensor upsample_nearest2d_backward(@Const @ByRef Tensor grad_output, @ByVal LongArrayRef output_size, @ByVal LongArrayRef input_size); +@Namespace("at") public static native @ByVal Tensor upsample_nearest2d_backward(@Const @ByRef Tensor grad_output, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] output_size, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] input_size, @ByVal(nullValue = "c10::optional(c10::nullopt)") DoubleOptional scales_h, @ByVal(nullValue = "c10::optional(c10::nullopt)") DoubleOptional scales_w); +@Namespace("at") public static native @ByVal Tensor upsample_nearest2d_backward(@Const @ByRef Tensor grad_output, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] output_size, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... input_size); + + +// aten::upsample_nearest2d_backward(Tensor grad_output, SymInt[2] output_size, SymInt[4] input_size, float? scales_h=None, float? scales_w=None) -> Tensor +@Namespace("at") public static native @ByVal Tensor upsample_nearest2d_backward_symint(@Const @ByRef Tensor grad_output, @ByVal SymIntArrayRef output_size, @ByVal SymIntArrayRef input_size, @ByVal(nullValue = "c10::optional(c10::nullopt)") DoubleOptional scales_h, @ByVal(nullValue = "c10::optional(c10::nullopt)") DoubleOptional scales_w); +@Namespace("at") public static native @ByVal Tensor upsample_nearest2d_backward_symint(@Const @ByRef Tensor grad_output, @ByVal SymIntArrayRef output_size, @ByVal SymIntArrayRef input_size); + + + + + +// Parsed from ATen/ops/upsample_nearest3d.h // #pragma once @@ -68128,56 +53975,55 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include +// #include -// aten::tril_indices(int row, int col, int offset=0, *, ScalarType? dtype=long, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor -@Namespace("at") public static native @ByVal Tensor tril_indices(@Cast("int64_t") long row, @Cast("int64_t") long col, @Cast("int64_t") long offset/*=0*/, @ByVal(nullValue = "at::TensorOptions(at::kLong)") TensorOptions options); -@Namespace("at") public static native @ByVal Tensor tril_indices(@Cast("int64_t") long row, @Cast("int64_t") long col); -// aten::tril_indices(int row, int col, int offset=0, *, ScalarType? dtype=long, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor -@Namespace("at") public static native @ByVal Tensor tril_indices(@Cast("int64_t") long row, @Cast("int64_t") long col, @Cast("int64_t") long offset, @ByVal ScalarTypeOptional dtype, @ByVal LayoutOptional layout, @ByVal DeviceOptional device, @ByVal BoolOptional pin_memory); +// aten::upsample_nearest3d.vec(Tensor input, SymInt[]? output_size, float[]? scale_factors) -> Tensor +@Namespace("at") public static native @ByVal Tensor upsample_nearest3d(@Const @ByRef Tensor input, @ByVal LongArrayRefOptional output_size, @ByVal DoubleArrayRefOptional scale_factors); +@Namespace("at") public static native @ByVal Tensor upsample_nearest3d(@Const @ByRef Tensor input, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] output_size, @ByVal DoubleArrayRefOptional scale_factors); -// aten::tril_indices.out(int row, int col, int offset=0, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor tril_indices_out(@ByRef Tensor out, @Cast("int64_t") long row, @Cast("int64_t") long col, @Cast("int64_t") long offset/*=0*/); -@Namespace("at") public static native @ByRef Tensor tril_indices_out(@ByRef Tensor out, @Cast("int64_t") long row, @Cast("int64_t") long col); -// aten::tril_indices.out(int row, int col, int offset=0, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor tril_indices_outf(@Cast("int64_t") long row, @Cast("int64_t") long col, @Cast("int64_t") long offset, @ByRef Tensor out); +// aten::upsample_nearest3d.vec(Tensor input, SymInt[]? output_size, float[]? scale_factors) -> Tensor +@Namespace("at") public static native @ByVal Tensor upsample_nearest3d_symint(@Const @ByRef Tensor input, @ByVal SymIntArrayRefOptional output_size, @ByVal DoubleArrayRefOptional scale_factors); +// aten::upsample_nearest3d.out(Tensor self, SymInt[3] output_size, float? scales_d=None, float? scales_h=None, float? scales_w=None, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor upsample_nearest3d_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal LongArrayRef output_size, @ByVal(nullValue = "c10::optional(c10::nullopt)") DoubleOptional scales_d, @ByVal(nullValue = "c10::optional(c10::nullopt)") DoubleOptional scales_h, @ByVal(nullValue = "c10::optional(c10::nullopt)") DoubleOptional scales_w); +@Namespace("at") public static native @ByRef Tensor upsample_nearest3d_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal LongArrayRef output_size); +@Namespace("at") public static native @ByRef Tensor upsample_nearest3d_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] output_size, @ByVal(nullValue = "c10::optional(c10::nullopt)") DoubleOptional scales_d, @ByVal(nullValue = "c10::optional(c10::nullopt)") DoubleOptional scales_h, @ByVal(nullValue = "c10::optional(c10::nullopt)") DoubleOptional scales_w); +@Namespace("at") public static native @ByRef Tensor upsample_nearest3d_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... output_size); -// Parsed from ATen/ops/triplet_margin_loss.h -// #pragma once +// aten::upsample_nearest3d.out(Tensor self, SymInt[3] output_size, float? scales_d=None, float? scales_h=None, float? scales_w=None, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor upsample_nearest3d_outf(@Const @ByRef Tensor self, @ByVal LongArrayRef output_size, @ByVal DoubleOptional scales_d, @ByVal DoubleOptional scales_h, @ByVal DoubleOptional scales_w, @ByRef Tensor out); +@Namespace("at") public static native @ByRef Tensor upsample_nearest3d_outf(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] output_size, @ByVal DoubleOptional scales_d, @ByVal DoubleOptional scales_h, @ByVal DoubleOptional scales_w, @ByRef Tensor out); -// @generated by torchgen/gen.py from Function.h -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include +// aten::upsample_nearest3d.out(Tensor self, SymInt[3] output_size, float? scales_d=None, float? scales_h=None, float? scales_w=None, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor upsample_nearest3d_symint_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal SymIntArrayRef output_size, @ByVal(nullValue = "c10::optional(c10::nullopt)") DoubleOptional scales_d, @ByVal(nullValue = "c10::optional(c10::nullopt)") DoubleOptional scales_h, @ByVal(nullValue = "c10::optional(c10::nullopt)") DoubleOptional scales_w); +@Namespace("at") public static native @ByRef Tensor upsample_nearest3d_symint_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal SymIntArrayRef output_size); + +// aten::upsample_nearest3d.out(Tensor self, SymInt[3] output_size, float? scales_d=None, float? scales_h=None, float? scales_w=None, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor upsample_nearest3d_symint_outf(@Const @ByRef Tensor self, @ByVal SymIntArrayRef output_size, @ByVal DoubleOptional scales_d, @ByVal DoubleOptional scales_h, @ByVal DoubleOptional scales_w, @ByRef Tensor out); -// #include +// aten::upsample_nearest3d(Tensor self, SymInt[3] output_size, float? scales_d=None, float? scales_h=None, float? scales_w=None) -> Tensor +@Namespace("at") public static native @ByVal Tensor upsample_nearest3d(@Const @ByRef Tensor self, @ByVal LongArrayRef output_size, @ByVal(nullValue = "c10::optional(c10::nullopt)") DoubleOptional scales_d, @ByVal(nullValue = "c10::optional(c10::nullopt)") DoubleOptional scales_h, @ByVal(nullValue = "c10::optional(c10::nullopt)") DoubleOptional scales_w); +@Namespace("at") public static native @ByVal Tensor upsample_nearest3d(@Const @ByRef Tensor self, @ByVal LongArrayRef output_size); +@Namespace("at") public static native @ByVal Tensor upsample_nearest3d(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] output_size, @ByVal(nullValue = "c10::optional(c10::nullopt)") DoubleOptional scales_d, @ByVal(nullValue = "c10::optional(c10::nullopt)") DoubleOptional scales_h, @ByVal(nullValue = "c10::optional(c10::nullopt)") DoubleOptional scales_w); +@Namespace("at") public static native @ByVal Tensor upsample_nearest3d(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... output_size); -// aten::triplet_margin_loss(Tensor anchor, Tensor positive, Tensor negative, float margin=1.0, float p=2, float eps=1e-06, bool swap=False, int reduction=Mean) -> Tensor -@Namespace("at") public static native @ByVal Tensor triplet_margin_loss(@Const @ByRef Tensor anchor, @Const @ByRef Tensor positive, @Const @ByRef Tensor negative, double margin/*=1.0*/, double p/*=2*/, double eps/*=1e-06*/, @Cast("bool") boolean swap/*=false*/, @Cast("int64_t") long reduction/*=at::Reduction::Mean*/); -@Namespace("at") public static native @ByVal Tensor triplet_margin_loss(@Const @ByRef Tensor anchor, @Const @ByRef Tensor positive, @Const @ByRef Tensor negative); +// aten::upsample_nearest3d(Tensor self, SymInt[3] output_size, float? scales_d=None, float? scales_h=None, float? scales_w=None) -> Tensor +@Namespace("at") public static native @ByVal Tensor upsample_nearest3d_symint(@Const @ByRef Tensor self, @ByVal SymIntArrayRef output_size, @ByVal(nullValue = "c10::optional(c10::nullopt)") DoubleOptional scales_d, @ByVal(nullValue = "c10::optional(c10::nullopt)") DoubleOptional scales_h, @ByVal(nullValue = "c10::optional(c10::nullopt)") DoubleOptional scales_w); +@Namespace("at") public static native @ByVal Tensor upsample_nearest3d_symint(@Const @ByRef Tensor self, @ByVal SymIntArrayRef output_size); -// Parsed from ATen/ops/triu.h + +// Parsed from ATen/ops/upsample_nearest3d_backward.h // #pragma once @@ -68198,23 +54044,46 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include +// #include -// aten::triu.out(Tensor self, int diagonal=0, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor triu_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Cast("int64_t") long diagonal/*=0*/); -@Namespace("at") public static native @ByRef Tensor triu_out(@ByRef Tensor out, @Const @ByRef Tensor self); -// aten::triu.out(Tensor self, int diagonal=0, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor triu_outf(@Const @ByRef Tensor self, @Cast("int64_t") long diagonal, @ByRef Tensor out); +// aten::upsample_nearest3d_backward.grad_input(Tensor grad_output, SymInt[3] output_size, SymInt[5] input_size, float? scales_d=None, float? scales_h=None, float? scales_w=None, *, Tensor(a!) grad_input) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor upsample_nearest3d_backward_out(@ByRef Tensor grad_input, @Const @ByRef Tensor grad_output, @ByVal LongArrayRef output_size, @ByVal LongArrayRef input_size, @ByVal(nullValue = "c10::optional(c10::nullopt)") DoubleOptional scales_d, @ByVal(nullValue = "c10::optional(c10::nullopt)") DoubleOptional scales_h, @ByVal(nullValue = "c10::optional(c10::nullopt)") DoubleOptional scales_w); +@Namespace("at") public static native @ByRef Tensor upsample_nearest3d_backward_out(@ByRef Tensor grad_input, @Const @ByRef Tensor grad_output, @ByVal LongArrayRef output_size, @ByVal LongArrayRef input_size); +@Namespace("at") public static native @ByRef Tensor upsample_nearest3d_backward_out(@ByRef Tensor grad_input, @Const @ByRef Tensor grad_output, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] output_size, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] input_size, @ByVal(nullValue = "c10::optional(c10::nullopt)") DoubleOptional scales_d, @ByVal(nullValue = "c10::optional(c10::nullopt)") DoubleOptional scales_h, @ByVal(nullValue = "c10::optional(c10::nullopt)") DoubleOptional scales_w); +@Namespace("at") public static native @ByRef Tensor upsample_nearest3d_backward_out(@ByRef Tensor grad_input, @Const @ByRef Tensor grad_output, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] output_size, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... input_size); -// aten::triu(Tensor self, int diagonal=0) -> Tensor -@Namespace("at") public static native @ByVal Tensor triu(@Const @ByRef Tensor self, @Cast("int64_t") long diagonal/*=0*/); -@Namespace("at") public static native @ByVal Tensor triu(@Const @ByRef Tensor self); + +// aten::upsample_nearest3d_backward.grad_input(Tensor grad_output, SymInt[3] output_size, SymInt[5] input_size, float? scales_d=None, float? scales_h=None, float? scales_w=None, *, Tensor(a!) grad_input) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor upsample_nearest3d_backward_outf(@Const @ByRef Tensor grad_output, @ByVal LongArrayRef output_size, @ByVal LongArrayRef input_size, @ByVal DoubleOptional scales_d, @ByVal DoubleOptional scales_h, @ByVal DoubleOptional scales_w, @ByRef Tensor grad_input); +@Namespace("at") public static native @ByRef Tensor upsample_nearest3d_backward_outf(@Const @ByRef Tensor grad_output, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] output_size, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] input_size, @ByVal DoubleOptional scales_d, @ByVal DoubleOptional scales_h, @ByVal DoubleOptional scales_w, @ByRef Tensor grad_input); +// aten::upsample_nearest3d_backward.grad_input(Tensor grad_output, SymInt[3] output_size, SymInt[5] input_size, float? scales_d=None, float? scales_h=None, float? scales_w=None, *, Tensor(a!) grad_input) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor upsample_nearest3d_backward_symint_out(@ByRef Tensor grad_input, @Const @ByRef Tensor grad_output, @ByVal SymIntArrayRef output_size, @ByVal SymIntArrayRef input_size, @ByVal(nullValue = "c10::optional(c10::nullopt)") DoubleOptional scales_d, @ByVal(nullValue = "c10::optional(c10::nullopt)") DoubleOptional scales_h, @ByVal(nullValue = "c10::optional(c10::nullopt)") DoubleOptional scales_w); +@Namespace("at") public static native @ByRef Tensor upsample_nearest3d_backward_symint_out(@ByRef Tensor grad_input, @Const @ByRef Tensor grad_output, @ByVal SymIntArrayRef output_size, @ByVal SymIntArrayRef input_size); -// Parsed from ATen/ops/triu_indices.h +// aten::upsample_nearest3d_backward.grad_input(Tensor grad_output, SymInt[3] output_size, SymInt[5] input_size, float? scales_d=None, float? scales_h=None, float? scales_w=None, *, Tensor(a!) grad_input) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor upsample_nearest3d_backward_symint_outf(@Const @ByRef Tensor grad_output, @ByVal SymIntArrayRef output_size, @ByVal SymIntArrayRef input_size, @ByVal DoubleOptional scales_d, @ByVal DoubleOptional scales_h, @ByVal DoubleOptional scales_w, @ByRef Tensor grad_input); + + +// aten::upsample_nearest3d_backward(Tensor grad_output, SymInt[3] output_size, SymInt[5] input_size, float? scales_d=None, float? scales_h=None, float? scales_w=None) -> Tensor +@Namespace("at") public static native @ByVal Tensor upsample_nearest3d_backward(@Const @ByRef Tensor grad_output, @ByVal LongArrayRef output_size, @ByVal LongArrayRef input_size, @ByVal(nullValue = "c10::optional(c10::nullopt)") DoubleOptional scales_d, @ByVal(nullValue = "c10::optional(c10::nullopt)") DoubleOptional scales_h, @ByVal(nullValue = "c10::optional(c10::nullopt)") DoubleOptional scales_w); +@Namespace("at") public static native @ByVal Tensor upsample_nearest3d_backward(@Const @ByRef Tensor grad_output, @ByVal LongArrayRef output_size, @ByVal LongArrayRef input_size); +@Namespace("at") public static native @ByVal Tensor upsample_nearest3d_backward(@Const @ByRef Tensor grad_output, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] output_size, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] input_size, @ByVal(nullValue = "c10::optional(c10::nullopt)") DoubleOptional scales_d, @ByVal(nullValue = "c10::optional(c10::nullopt)") DoubleOptional scales_h, @ByVal(nullValue = "c10::optional(c10::nullopt)") DoubleOptional scales_w); +@Namespace("at") public static native @ByVal Tensor upsample_nearest3d_backward(@Const @ByRef Tensor grad_output, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] output_size, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... input_size); + + +// aten::upsample_nearest3d_backward(Tensor grad_output, SymInt[3] output_size, SymInt[5] input_size, float? scales_d=None, float? scales_h=None, float? scales_w=None) -> Tensor +@Namespace("at") public static native @ByVal Tensor upsample_nearest3d_backward_symint(@Const @ByRef Tensor grad_output, @ByVal SymIntArrayRef output_size, @ByVal SymIntArrayRef input_size, @ByVal(nullValue = "c10::optional(c10::nullopt)") DoubleOptional scales_d, @ByVal(nullValue = "c10::optional(c10::nullopt)") DoubleOptional scales_h, @ByVal(nullValue = "c10::optional(c10::nullopt)") DoubleOptional scales_w); +@Namespace("at") public static native @ByVal Tensor upsample_nearest3d_backward_symint(@Const @ByRef Tensor grad_output, @ByVal SymIntArrayRef output_size, @ByVal SymIntArrayRef input_size); + + + + + +// Parsed from ATen/ops/upsample_trilinear3d.h // #pragma once @@ -68235,63 +54104,55 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include +// #include -// aten::triu_indices(int row, int col, int offset=0, *, ScalarType? dtype=long, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor -@Namespace("at") public static native @ByVal Tensor triu_indices(@Cast("int64_t") long row, @Cast("int64_t") long col, @Cast("int64_t") long offset/*=0*/, @ByVal(nullValue = "at::TensorOptions(at::kLong)") TensorOptions options); -@Namespace("at") public static native @ByVal Tensor triu_indices(@Cast("int64_t") long row, @Cast("int64_t") long col); -// aten::triu_indices(int row, int col, int offset=0, *, ScalarType? dtype=long, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor -@Namespace("at") public static native @ByVal Tensor triu_indices(@Cast("int64_t") long row, @Cast("int64_t") long col, @Cast("int64_t") long offset, @ByVal ScalarTypeOptional dtype, @ByVal LayoutOptional layout, @ByVal DeviceOptional device, @ByVal BoolOptional pin_memory); +// aten::upsample_trilinear3d.vec(Tensor input, SymInt[]? output_size, bool align_corners, float[]? scale_factors) -> Tensor +@Namespace("at") public static native @ByVal Tensor upsample_trilinear3d(@Const @ByRef Tensor input, @ByVal LongArrayRefOptional output_size, @Cast("bool") boolean align_corners, @ByVal DoubleArrayRefOptional scale_factors); +@Namespace("at") public static native @ByVal Tensor upsample_trilinear3d(@Const @ByRef Tensor input, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] output_size, @Cast("bool") boolean align_corners, @ByVal DoubleArrayRefOptional scale_factors); -// aten::triu_indices.out(int row, int col, int offset=0, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor triu_indices_out(@ByRef Tensor out, @Cast("int64_t") long row, @Cast("int64_t") long col, @Cast("int64_t") long offset/*=0*/); -@Namespace("at") public static native @ByRef Tensor triu_indices_out(@ByRef Tensor out, @Cast("int64_t") long row, @Cast("int64_t") long col); -// aten::triu_indices.out(int row, int col, int offset=0, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor triu_indices_outf(@Cast("int64_t") long row, @Cast("int64_t") long col, @Cast("int64_t") long offset, @ByRef Tensor out); +// aten::upsample_trilinear3d.vec(Tensor input, SymInt[]? output_size, bool align_corners, float[]? scale_factors) -> Tensor +@Namespace("at") public static native @ByVal Tensor upsample_trilinear3d_symint(@Const @ByRef Tensor input, @ByVal SymIntArrayRefOptional output_size, @Cast("bool") boolean align_corners, @ByVal DoubleArrayRefOptional scale_factors); +// aten::upsample_trilinear3d.out(Tensor self, SymInt[3] output_size, bool align_corners, float? scales_d=None, float? scales_h=None, float? scales_w=None, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor upsample_trilinear3d_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal LongArrayRef output_size, @Cast("bool") boolean align_corners, @ByVal(nullValue = "c10::optional(c10::nullopt)") DoubleOptional scales_d, @ByVal(nullValue = "c10::optional(c10::nullopt)") DoubleOptional scales_h, @ByVal(nullValue = "c10::optional(c10::nullopt)") DoubleOptional scales_w); +@Namespace("at") public static native @ByRef Tensor upsample_trilinear3d_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal LongArrayRef output_size, @Cast("bool") boolean align_corners); +@Namespace("at") public static native @ByRef Tensor upsample_trilinear3d_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] output_size, @Cast("bool") boolean align_corners, @ByVal(nullValue = "c10::optional(c10::nullopt)") DoubleOptional scales_d, @ByVal(nullValue = "c10::optional(c10::nullopt)") DoubleOptional scales_h, @ByVal(nullValue = "c10::optional(c10::nullopt)") DoubleOptional scales_w); +@Namespace("at") public static native @ByRef Tensor upsample_trilinear3d_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] output_size, @Cast("bool") boolean align_corners); -// Parsed from ATen/ops/true_divide.h -// #pragma once +// aten::upsample_trilinear3d.out(Tensor self, SymInt[3] output_size, bool align_corners, float? scales_d=None, float? scales_h=None, float? scales_w=None, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor upsample_trilinear3d_outf(@Const @ByRef Tensor self, @ByVal LongArrayRef output_size, @Cast("bool") boolean align_corners, @ByVal DoubleOptional scales_d, @ByVal DoubleOptional scales_h, @ByVal DoubleOptional scales_w, @ByRef Tensor out); +@Namespace("at") public static native @ByRef Tensor upsample_trilinear3d_outf(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] output_size, @Cast("bool") boolean align_corners, @ByVal DoubleOptional scales_d, @ByVal DoubleOptional scales_h, @ByVal DoubleOptional scales_w, @ByRef Tensor out); -// @generated by torchgen/gen.py from Function.h -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include +// aten::upsample_trilinear3d.out(Tensor self, SymInt[3] output_size, bool align_corners, float? scales_d=None, float? scales_h=None, float? scales_w=None, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor upsample_trilinear3d_symint_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal SymIntArrayRef output_size, @Cast("bool") boolean align_corners, @ByVal(nullValue = "c10::optional(c10::nullopt)") DoubleOptional scales_d, @ByVal(nullValue = "c10::optional(c10::nullopt)") DoubleOptional scales_h, @ByVal(nullValue = "c10::optional(c10::nullopt)") DoubleOptional scales_w); +@Namespace("at") public static native @ByRef Tensor upsample_trilinear3d_symint_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal SymIntArrayRef output_size, @Cast("bool") boolean align_corners); +// aten::upsample_trilinear3d.out(Tensor self, SymInt[3] output_size, bool align_corners, float? scales_d=None, float? scales_h=None, float? scales_w=None, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor upsample_trilinear3d_symint_outf(@Const @ByRef Tensor self, @ByVal SymIntArrayRef output_size, @Cast("bool") boolean align_corners, @ByVal DoubleOptional scales_d, @ByVal DoubleOptional scales_h, @ByVal DoubleOptional scales_w, @ByRef Tensor out); -// #include +// aten::upsample_trilinear3d(Tensor self, SymInt[3] output_size, bool align_corners, float? scales_d=None, float? scales_h=None, float? scales_w=None) -> Tensor +@Namespace("at") public static native @ByVal Tensor upsample_trilinear3d(@Const @ByRef Tensor self, @ByVal LongArrayRef output_size, @Cast("bool") boolean align_corners, @ByVal(nullValue = "c10::optional(c10::nullopt)") DoubleOptional scales_d, @ByVal(nullValue = "c10::optional(c10::nullopt)") DoubleOptional scales_h, @ByVal(nullValue = "c10::optional(c10::nullopt)") DoubleOptional scales_w); +@Namespace("at") public static native @ByVal Tensor upsample_trilinear3d(@Const @ByRef Tensor self, @ByVal LongArrayRef output_size, @Cast("bool") boolean align_corners); +@Namespace("at") public static native @ByVal Tensor upsample_trilinear3d(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] output_size, @Cast("bool") boolean align_corners, @ByVal(nullValue = "c10::optional(c10::nullopt)") DoubleOptional scales_d, @ByVal(nullValue = "c10::optional(c10::nullopt)") DoubleOptional scales_h, @ByVal(nullValue = "c10::optional(c10::nullopt)") DoubleOptional scales_w); +@Namespace("at") public static native @ByVal Tensor upsample_trilinear3d(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] output_size, @Cast("bool") boolean align_corners); -// aten::true_divide.Tensor(Tensor self, Tensor other) -> Tensor -@Namespace("at") public static native @ByVal Tensor true_divide(@Const @ByRef Tensor self, @Const @ByRef Tensor other); -// aten::true_divide.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor true_divide_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor other); -// aten::true_divide.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor true_divide_outf(@Const @ByRef Tensor self, @Const @ByRef Tensor other, @ByRef Tensor out); +// aten::upsample_trilinear3d(Tensor self, SymInt[3] output_size, bool align_corners, float? scales_d=None, float? scales_h=None, float? scales_w=None) -> Tensor +@Namespace("at") public static native @ByVal Tensor upsample_trilinear3d_symint(@Const @ByRef Tensor self, @ByVal SymIntArrayRef output_size, @Cast("bool") boolean align_corners, @ByVal(nullValue = "c10::optional(c10::nullopt)") DoubleOptional scales_d, @ByVal(nullValue = "c10::optional(c10::nullopt)") DoubleOptional scales_h, @ByVal(nullValue = "c10::optional(c10::nullopt)") DoubleOptional scales_w); +@Namespace("at") public static native @ByVal Tensor upsample_trilinear3d_symint(@Const @ByRef Tensor self, @ByVal SymIntArrayRef output_size, @Cast("bool") boolean align_corners); -// aten::true_divide.Scalar(Tensor self, Scalar other) -> Tensor -@Namespace("at") public static native @ByVal Tensor true_divide(@Const @ByRef Tensor self, @Const @ByRef Scalar other); -// Parsed from ATen/ops/trunc.h +// Parsed from ATen/ops/upsample_trilinear3d_backward.h // #pragma once @@ -68312,52 +54173,46 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include - - -// aten::trunc(Tensor self) -> Tensor -@Namespace("at") public static native @ByVal Tensor trunc(@Const @ByRef Tensor self); - -// aten::trunc_(Tensor(a!) self) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor trunc_(@ByRef Tensor self); +// #include -// aten::trunc.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor trunc_out(@ByRef Tensor out, @Const @ByRef Tensor self); -// aten::trunc.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor trunc_outf(@Const @ByRef Tensor self, @ByRef Tensor out); +// aten::upsample_trilinear3d_backward.grad_input(Tensor grad_output, SymInt[3] output_size, SymInt[5] input_size, bool align_corners, float? scales_d=None, float? scales_h=None, float? scales_w=None, *, Tensor(a!) grad_input) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor upsample_trilinear3d_backward_out(@ByRef Tensor grad_input, @Const @ByRef Tensor grad_output, @ByVal LongArrayRef output_size, @ByVal LongArrayRef input_size, @Cast("bool") boolean align_corners, @ByVal(nullValue = "c10::optional(c10::nullopt)") DoubleOptional scales_d, @ByVal(nullValue = "c10::optional(c10::nullopt)") DoubleOptional scales_h, @ByVal(nullValue = "c10::optional(c10::nullopt)") DoubleOptional scales_w); +@Namespace("at") public static native @ByRef Tensor upsample_trilinear3d_backward_out(@ByRef Tensor grad_input, @Const @ByRef Tensor grad_output, @ByVal LongArrayRef output_size, @ByVal LongArrayRef input_size, @Cast("bool") boolean align_corners); +@Namespace("at") public static native @ByRef Tensor upsample_trilinear3d_backward_out(@ByRef Tensor grad_input, @Const @ByRef Tensor grad_output, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] output_size, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] input_size, @Cast("bool") boolean align_corners, @ByVal(nullValue = "c10::optional(c10::nullopt)") DoubleOptional scales_d, @ByVal(nullValue = "c10::optional(c10::nullopt)") DoubleOptional scales_h, @ByVal(nullValue = "c10::optional(c10::nullopt)") DoubleOptional scales_w); +@Namespace("at") public static native @ByRef Tensor upsample_trilinear3d_backward_out(@ByRef Tensor grad_input, @Const @ByRef Tensor grad_output, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] output_size, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] input_size, @Cast("bool") boolean align_corners); +// aten::upsample_trilinear3d_backward.grad_input(Tensor grad_output, SymInt[3] output_size, SymInt[5] input_size, bool align_corners, float? scales_d=None, float? scales_h=None, float? scales_w=None, *, Tensor(a!) grad_input) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor upsample_trilinear3d_backward_outf(@Const @ByRef Tensor grad_output, @ByVal LongArrayRef output_size, @ByVal LongArrayRef input_size, @Cast("bool") boolean align_corners, @ByVal DoubleOptional scales_d, @ByVal DoubleOptional scales_h, @ByVal DoubleOptional scales_w, @ByRef Tensor grad_input); +@Namespace("at") public static native @ByRef Tensor upsample_trilinear3d_backward_outf(@Const @ByRef Tensor grad_output, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] output_size, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] input_size, @Cast("bool") boolean align_corners, @ByVal DoubleOptional scales_d, @ByVal DoubleOptional scales_h, @ByVal DoubleOptional scales_w, @ByRef Tensor grad_input); -// Parsed from ATen/ops/type_as.h -// #pragma once +// aten::upsample_trilinear3d_backward.grad_input(Tensor grad_output, SymInt[3] output_size, SymInt[5] input_size, bool align_corners, float? scales_d=None, float? scales_h=None, float? scales_w=None, *, Tensor(a!) grad_input) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor upsample_trilinear3d_backward_symint_out(@ByRef Tensor grad_input, @Const @ByRef Tensor grad_output, @ByVal SymIntArrayRef output_size, @ByVal SymIntArrayRef input_size, @Cast("bool") boolean align_corners, @ByVal(nullValue = "c10::optional(c10::nullopt)") DoubleOptional scales_d, @ByVal(nullValue = "c10::optional(c10::nullopt)") DoubleOptional scales_h, @ByVal(nullValue = "c10::optional(c10::nullopt)") DoubleOptional scales_w); +@Namespace("at") public static native @ByRef Tensor upsample_trilinear3d_backward_symint_out(@ByRef Tensor grad_input, @Const @ByRef Tensor grad_output, @ByVal SymIntArrayRef output_size, @ByVal SymIntArrayRef input_size, @Cast("bool") boolean align_corners); -// @generated by torchgen/gen.py from Function.h -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include +// aten::upsample_trilinear3d_backward.grad_input(Tensor grad_output, SymInt[3] output_size, SymInt[5] input_size, bool align_corners, float? scales_d=None, float? scales_h=None, float? scales_w=None, *, Tensor(a!) grad_input) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor upsample_trilinear3d_backward_symint_outf(@Const @ByRef Tensor grad_output, @ByVal SymIntArrayRef output_size, @ByVal SymIntArrayRef input_size, @Cast("bool") boolean align_corners, @ByVal DoubleOptional scales_d, @ByVal DoubleOptional scales_h, @ByVal DoubleOptional scales_w, @ByRef Tensor grad_input); +// aten::upsample_trilinear3d_backward(Tensor grad_output, SymInt[3] output_size, SymInt[5] input_size, bool align_corners, float? scales_d=None, float? scales_h=None, float? scales_w=None) -> Tensor +@Namespace("at") public static native @ByVal Tensor upsample_trilinear3d_backward(@Const @ByRef Tensor grad_output, @ByVal LongArrayRef output_size, @ByVal LongArrayRef input_size, @Cast("bool") boolean align_corners, @ByVal(nullValue = "c10::optional(c10::nullopt)") DoubleOptional scales_d, @ByVal(nullValue = "c10::optional(c10::nullopt)") DoubleOptional scales_h, @ByVal(nullValue = "c10::optional(c10::nullopt)") DoubleOptional scales_w); +@Namespace("at") public static native @ByVal Tensor upsample_trilinear3d_backward(@Const @ByRef Tensor grad_output, @ByVal LongArrayRef output_size, @ByVal LongArrayRef input_size, @Cast("bool") boolean align_corners); +@Namespace("at") public static native @ByVal Tensor upsample_trilinear3d_backward(@Const @ByRef Tensor grad_output, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] output_size, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] input_size, @Cast("bool") boolean align_corners, @ByVal(nullValue = "c10::optional(c10::nullopt)") DoubleOptional scales_d, @ByVal(nullValue = "c10::optional(c10::nullopt)") DoubleOptional scales_h, @ByVal(nullValue = "c10::optional(c10::nullopt)") DoubleOptional scales_w); +@Namespace("at") public static native @ByVal Tensor upsample_trilinear3d_backward(@Const @ByRef Tensor grad_output, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] output_size, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] input_size, @Cast("bool") boolean align_corners); -// #include +// aten::upsample_trilinear3d_backward(Tensor grad_output, SymInt[3] output_size, SymInt[5] input_size, bool align_corners, float? scales_d=None, float? scales_h=None, float? scales_w=None) -> Tensor +@Namespace("at") public static native @ByVal Tensor upsample_trilinear3d_backward_symint(@Const @ByRef Tensor grad_output, @ByVal SymIntArrayRef output_size, @ByVal SymIntArrayRef input_size, @Cast("bool") boolean align_corners, @ByVal(nullValue = "c10::optional(c10::nullopt)") DoubleOptional scales_d, @ByVal(nullValue = "c10::optional(c10::nullopt)") DoubleOptional scales_h, @ByVal(nullValue = "c10::optional(c10::nullopt)") DoubleOptional scales_w); +@Namespace("at") public static native @ByVal Tensor upsample_trilinear3d_backward_symint(@Const @ByRef Tensor grad_output, @ByVal SymIntArrayRef output_size, @ByVal SymIntArrayRef input_size, @Cast("bool") boolean align_corners); -// Parsed from ATen/ops/unbind.h +// Parsed from ATen/ops/value_selecting_reduction_backward.h // #pragma once @@ -68378,20 +54233,22 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include +// #include -// aten::unbind.int(Tensor(a -> *) self, int dim=0) -> Tensor(a)[] -@Namespace("at") public static native @Cast({"", "std::vector"}) @StdMove TensorVector unbind(@Const @ByRef Tensor self, @Cast("int64_t") long dim/*=0*/); -@Namespace("at") public static native @Cast({"", "std::vector"}) @StdMove TensorVector unbind(@Const @ByRef Tensor self); +// aten::value_selecting_reduction_backward(Tensor grad, int dim, Tensor indices, SymInt[] sizes, bool keepdim) -> Tensor +@Namespace("at") public static native @ByVal Tensor value_selecting_reduction_backward(@Const @ByRef Tensor grad, @Cast("int64_t") long dim, @Const @ByRef Tensor indices, @ByVal LongArrayRef sizes, @Cast("bool") boolean keepdim); +@Namespace("at") public static native @ByVal Tensor value_selecting_reduction_backward(@Const @ByRef Tensor grad, @Cast("int64_t") long dim, @Const @ByRef Tensor indices, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] sizes, @Cast("bool") boolean keepdim); -// aten::unbind.Dimname(Tensor(a -> *) self, Dimname dim) -> Tensor(a)[] -@Namespace("at") public static native @Cast({"", "std::vector"}) @StdMove TensorVector unbind(@Const @ByRef Tensor self, @ByVal Dimname dim); + +// aten::value_selecting_reduction_backward(Tensor grad, int dim, Tensor indices, SymInt[] sizes, bool keepdim) -> Tensor +@Namespace("at") public static native @ByVal Tensor value_selecting_reduction_backward_symint(@Const @ByRef Tensor grad, @Cast("int64_t") long dim, @Const @ByRef Tensor indices, @ByVal SymIntArrayRef sizes, @Cast("bool") boolean keepdim); -// Parsed from ATen/ops/unbind_copy.h + +// Parsed from ATen/ops/values.h // #pragma once @@ -68412,23 +54269,14 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include - +// #include -// aten::unbind_copy.int(Tensor self, int dim=0) -> Tensor[] -@Namespace("at") public static native @Cast({"", "std::vector"}) @StdMove TensorVector unbind_copy(@Const @ByRef Tensor self, @Cast("int64_t") long dim/*=0*/); -@Namespace("at") public static native @Cast({"", "std::vector"}) @StdMove TensorVector unbind_copy(@Const @ByRef Tensor self); -// aten::unbind_copy.int_out(Tensor self, int dim=0, *, Tensor(a!)[] out) -> () -@Namespace("at") public static native void unbind_copy_out(@ByVal TensorArrayRef out, @Const @ByRef Tensor self, @Cast("int64_t") long dim/*=0*/); -@Namespace("at") public static native void unbind_copy_out(@ByVal TensorArrayRef out, @Const @ByRef Tensor self); -// aten::unbind_copy.int_out(Tensor self, int dim=0, *, Tensor(a!)[] out) -> () -@Namespace("at") public static native void unbind_copy_outf(@Const @ByRef Tensor self, @Cast("int64_t") long dim, @ByVal TensorArrayRef out); -// Parsed from ATen/ops/unflatten.h +// Parsed from ATen/ops/values_copy.h // #pragma once @@ -68449,21 +54297,21 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include +// #include -// aten::unflatten.int(Tensor(a) self, int dim, int[] sizes) -> Tensor(a) -@Namespace("at") public static native @ByVal Tensor unflatten(@Const @ByRef Tensor self, @Cast("int64_t") long dim, @ByVal @Cast("c10::ArrayRef*") LongArrayRef sizes); -@Namespace("at") public static native @ByVal Tensor unflatten(@Const @ByRef Tensor self, @Cast("int64_t") long dim, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... sizes); +// aten::values_copy(Tensor self) -> Tensor +@Namespace("at") public static native @ByVal Tensor values_copy(@Const @ByRef Tensor self); -// aten::unflatten.Dimname(Tensor(a) self, Dimname dim, int[] sizes, Dimname[] names) -> Tensor(a) -@Namespace("at") public static native @ByVal Tensor unflatten(@Const @ByRef Tensor self, @ByVal Dimname dim, @ByVal @Cast("c10::ArrayRef*") LongArrayRef sizes, @ByVal DimnameArrayRef names); -@Namespace("at") public static native @ByVal Tensor unflatten(@Const @ByRef Tensor self, @ByVal Dimname dim, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] sizes, @ByVal DimnameArrayRef names); +// aten::values_copy.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor values_copy_out(@ByRef Tensor out, @Const @ByRef Tensor self); +// aten::values_copy.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor values_copy_outf(@Const @ByRef Tensor self, @ByRef Tensor out); -// Parsed from ATen/ops/unflatten_dense_tensors.h +// Parsed from ATen/ops/vander.h // #pragma once @@ -68484,16 +54332,17 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include +// #include -// aten::unflatten_dense_tensors(Tensor flat, Tensor[] tensors) -> Tensor[] -@Namespace("at") public static native @Cast({"", "std::vector"}) @StdMove TensorVector unflatten_dense_tensors(@Const @ByRef Tensor flat, @ByVal TensorArrayRef tensors); +// aten::vander(Tensor x, int? N=None, bool increasing=False) -> Tensor +@Namespace("at") public static native @ByVal Tensor vander(@Const @ByRef Tensor x, @ByVal(nullValue = "c10::optional(c10::nullopt)") LongOptional N, @Cast("bool") boolean increasing/*=false*/); +@Namespace("at") public static native @ByVal Tensor vander(@Const @ByRef Tensor x); -// Parsed from ATen/ops/unfold.h +// Parsed from ATen/ops/var.h // #pragma once @@ -68514,14 +54363,64 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include +// #include + + +// aten::var(Tensor self, bool unbiased=True) -> Tensor +@Namespace("at") public static native @ByVal Tensor var(@Const @ByRef Tensor self, @Cast("bool") boolean unbiased); + +// aten::var.dim(Tensor self, int[1]? dim, bool unbiased=True, bool keepdim=False) -> Tensor +@Namespace("at") public static native @ByVal Tensor var(@Const @ByRef Tensor self, @ByVal LongArrayRefOptional dim, @Cast("bool") boolean unbiased, @Cast("bool") boolean keepdim/*=false*/); +@Namespace("at") public static native @ByVal Tensor var(@Const @ByRef Tensor self, @ByVal LongArrayRefOptional dim, @Cast("bool") boolean unbiased); +@Namespace("at") public static native @ByVal Tensor var(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dim, @Cast("bool") boolean unbiased, @Cast("bool") boolean keepdim/*=false*/); +@Namespace("at") public static native @ByVal Tensor var(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dim, @Cast("bool") boolean unbiased); + +// aten::var.correction(Tensor self, int[1]? dim=None, *, int? correction=None, bool keepdim=False) -> Tensor +@Namespace("at") public static native @ByVal Tensor var(@Const @ByRef Tensor self, @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") LongArrayRefOptional dim, @ByVal(nullValue = "c10::optional(c10::nullopt)") LongOptional correction, @Cast("bool") boolean keepdim/*=false*/); +@Namespace("at") public static native @ByVal Tensor var(@Const @ByRef Tensor self); +@Namespace("at") public static native @ByVal Tensor var(@Const @ByRef Tensor self, @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dim, @ByVal(nullValue = "c10::optional(c10::nullopt)") LongOptional correction, @Cast("bool") boolean keepdim/*=false*/); +// aten::var.out(Tensor self, int[1]? dim, bool unbiased=True, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor var_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal LongArrayRefOptional dim, @Cast("bool") boolean unbiased, @Cast("bool") boolean keepdim/*=false*/); +@Namespace("at") public static native @ByRef Tensor var_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal LongArrayRefOptional dim, @Cast("bool") boolean unbiased); +@Namespace("at") public static native @ByRef Tensor var_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dim, @Cast("bool") boolean unbiased, @Cast("bool") boolean keepdim/*=false*/); +@Namespace("at") public static native @ByRef Tensor var_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dim, @Cast("bool") boolean unbiased); +// aten::var.out(Tensor self, int[1]? dim, bool unbiased=True, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor var_outf(@Const @ByRef Tensor self, @ByVal LongArrayRefOptional dim, @Cast("bool") boolean unbiased, @Cast("bool") boolean keepdim, @ByRef Tensor out); +@Namespace("at") public static native @ByRef Tensor var_outf(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dim, @Cast("bool") boolean unbiased, @Cast("bool") boolean keepdim, @ByRef Tensor out); +// aten::var.correction_out(Tensor self, int[1]? dim=None, *, int? correction=None, bool keepdim=False, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor var_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") LongArrayRefOptional dim, @ByVal(nullValue = "c10::optional(c10::nullopt)") LongOptional correction, @Cast("bool") boolean keepdim/*=false*/); +@Namespace("at") public static native @ByRef Tensor var_out(@ByRef Tensor out, @Const @ByRef Tensor self); +@Namespace("at") public static native @ByRef Tensor var_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dim, @ByVal(nullValue = "c10::optional(c10::nullopt)") LongOptional correction, @Cast("bool") boolean keepdim/*=false*/); +// aten::var.correction_out(Tensor self, int[1]? dim=None, *, int? correction=None, bool keepdim=False, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor var_outf(@Const @ByRef Tensor self, @ByVal LongArrayRefOptional dim, @ByVal LongOptional correction, @Cast("bool") boolean keepdim, @ByRef Tensor out); +@Namespace("at") public static native @ByRef Tensor var_outf(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dim, @ByVal LongOptional correction, @Cast("bool") boolean keepdim, @ByRef Tensor out); +// aten::var.names_dim(Tensor self, Dimname[1] dim, bool unbiased=True, bool keepdim=False) -> Tensor +@Namespace("at") public static native @ByVal Tensor var(@Const @ByRef Tensor self, @ByVal DimnameArrayRef dim, @Cast("bool") boolean unbiased, @Cast("bool") boolean keepdim/*=false*/); +@Namespace("at") public static native @ByVal Tensor var(@Const @ByRef Tensor self, @ByVal DimnameArrayRef dim, @Cast("bool") boolean unbiased); +// aten::var.names_out(Tensor self, Dimname[1] dim, bool unbiased=True, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor var_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal DimnameArrayRef dim, @Cast("bool") boolean unbiased, @Cast("bool") boolean keepdim/*=false*/); +@Namespace("at") public static native @ByRef Tensor var_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal DimnameArrayRef dim, @Cast("bool") boolean unbiased); +// aten::var.names_out(Tensor self, Dimname[1] dim, bool unbiased=True, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor var_outf(@Const @ByRef Tensor self, @ByVal DimnameArrayRef dim, @Cast("bool") boolean unbiased, @Cast("bool") boolean keepdim, @ByRef Tensor out); +// aten::var.correction_names(Tensor self, Dimname[1] dim, *, int? correction=None, bool keepdim=False) -> Tensor +@Namespace("at") public static native @ByVal Tensor var(@Const @ByRef Tensor self, @ByVal DimnameArrayRef dim, @ByVal(nullValue = "c10::optional(c10::nullopt)") LongOptional correction, @Cast("bool") boolean keepdim/*=false*/); +@Namespace("at") public static native @ByVal Tensor var(@Const @ByRef Tensor self, @ByVal DimnameArrayRef dim); -// Parsed from ATen/ops/unfold_backward.h +// aten::var.correction_names_out(Tensor self, Dimname[1] dim, *, int? correction=None, bool keepdim=False, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor var_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal DimnameArrayRef dim, @ByVal(nullValue = "c10::optional(c10::nullopt)") LongOptional correction, @Cast("bool") boolean keepdim/*=false*/); +@Namespace("at") public static native @ByRef Tensor var_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal DimnameArrayRef dim); +// aten::var.correction_names_out(Tensor self, Dimname[1] dim, *, int? correction=None, bool keepdim=False, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor var_outf(@Const @ByRef Tensor self, @ByVal DimnameArrayRef dim, @ByVal LongOptional correction, @Cast("bool") boolean keepdim, @ByRef Tensor out); + + + + +// Parsed from ATen/ops/var_mean.h // #pragma once @@ -68542,40 +54441,43 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include - - -// aten::unfold_backward(Tensor grad_in, SymInt[] input_sizes, int dim, int size, int step) -> Tensor -@Namespace("at") public static native @ByVal Tensor unfold_backward(@Const @ByRef Tensor grad_in, @ByVal @Cast("c10::ArrayRef*") LongArrayRef input_sizes, @Cast("int64_t") long dim, @Cast("int64_t") long size, @Cast("int64_t") long step); -@Namespace("at") public static native @ByVal Tensor unfold_backward(@Const @ByRef Tensor grad_in, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] input_sizes, @Cast("int64_t") long dim, @Cast("int64_t") long size, @Cast("int64_t") long step); - - -// aten::unfold_backward(Tensor grad_in, SymInt[] input_sizes, int dim, int size, int step) -> Tensor -@Namespace("at") public static native @ByVal Tensor unfold_backward_symint(@Const @ByRef Tensor grad_in, @ByVal SymIntRef input_sizes, @Cast("int64_t") long dim, @Cast("int64_t") long size, @Cast("int64_t") long step); - - -// aten::unfold_backward.out(Tensor grad_in, SymInt[] input_sizes, int dim, int size, int step, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor unfold_backward_out(@ByRef Tensor out, @Const @ByRef Tensor grad_in, @ByVal @Cast("c10::ArrayRef*") LongArrayRef input_sizes, @Cast("int64_t") long dim, @Cast("int64_t") long size, @Cast("int64_t") long step); -@Namespace("at") public static native @ByRef Tensor unfold_backward_out(@ByRef Tensor out, @Const @ByRef Tensor grad_in, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] input_sizes, @Cast("int64_t") long dim, @Cast("int64_t") long size, @Cast("int64_t") long step); +// #include -// aten::unfold_backward.out(Tensor grad_in, SymInt[] input_sizes, int dim, int size, int step, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor unfold_backward_outf(@Const @ByRef Tensor grad_in, @ByVal @Cast("c10::ArrayRef*") LongArrayRef input_sizes, @Cast("int64_t") long dim, @Cast("int64_t") long size, @Cast("int64_t") long step, @ByRef Tensor out); -@Namespace("at") public static native @ByRef Tensor unfold_backward_outf(@Const @ByRef Tensor grad_in, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] input_sizes, @Cast("int64_t") long dim, @Cast("int64_t") long size, @Cast("int64_t") long step, @ByRef Tensor out); +// aten::var_mean(Tensor self, bool unbiased=True) -> (Tensor, Tensor) +@Namespace("at") public static native @ByVal T_TensorTensor_T var_mean(@Const @ByRef Tensor self, @Cast("bool") boolean unbiased); +// aten::var_mean.dim(Tensor self, int[1]? dim, bool unbiased=True, bool keepdim=False) -> (Tensor, Tensor) +@Namespace("at") public static native @ByVal T_TensorTensor_T var_mean(@Const @ByRef Tensor self, @ByVal LongArrayRefOptional dim, @Cast("bool") boolean unbiased, @Cast("bool") boolean keepdim/*=false*/); +@Namespace("at") public static native @ByVal T_TensorTensor_T var_mean(@Const @ByRef Tensor self, @ByVal LongArrayRefOptional dim, @Cast("bool") boolean unbiased); +@Namespace("at") public static native @ByVal T_TensorTensor_T var_mean(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dim, @Cast("bool") boolean unbiased, @Cast("bool") boolean keepdim/*=false*/); +@Namespace("at") public static native @ByVal T_TensorTensor_T var_mean(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dim, @Cast("bool") boolean unbiased); -// aten::unfold_backward.out(Tensor grad_in, SymInt[] input_sizes, int dim, int size, int step, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor unfold_backward_symint_out(@ByRef Tensor out, @Const @ByRef Tensor grad_in, @ByVal SymIntRef input_sizes, @Cast("int64_t") long dim, @Cast("int64_t") long size, @Cast("int64_t") long step); +// aten::var_mean.correction(Tensor self, int[1]? dim=None, *, int? correction=None, bool keepdim=False) -> (Tensor, Tensor) +@Namespace("at") public static native @ByVal T_TensorTensor_T var_mean(@Const @ByRef Tensor self, @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") LongArrayRefOptional dim, @ByVal(nullValue = "c10::optional(c10::nullopt)") LongOptional correction, @Cast("bool") boolean keepdim/*=false*/); +@Namespace("at") public static native @ByVal T_TensorTensor_T var_mean(@Const @ByRef Tensor self); +@Namespace("at") public static native @ByVal T_TensorTensor_T var_mean(@Const @ByRef Tensor self, @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dim, @ByVal(nullValue = "c10::optional(c10::nullopt)") LongOptional correction, @Cast("bool") boolean keepdim/*=false*/); +// aten::var_mean.names_dim(Tensor self, Dimname[1] dim, bool unbiased=True, bool keepdim=False) -> (Tensor, Tensor) +@Namespace("at") public static native @ByVal T_TensorTensor_T var_mean(@Const @ByRef Tensor self, @ByVal DimnameArrayRef dim, @Cast("bool") boolean unbiased, @Cast("bool") boolean keepdim/*=false*/); +@Namespace("at") public static native @ByVal T_TensorTensor_T var_mean(@Const @ByRef Tensor self, @ByVal DimnameArrayRef dim, @Cast("bool") boolean unbiased); -// aten::unfold_backward.out(Tensor grad_in, SymInt[] input_sizes, int dim, int size, int step, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor unfold_backward_symint_outf(@Const @ByRef Tensor grad_in, @ByVal SymIntRef input_sizes, @Cast("int64_t") long dim, @Cast("int64_t") long size, @Cast("int64_t") long step, @ByRef Tensor out); +// aten::var_mean.correction_names(Tensor self, Dimname[1] dim, *, int? correction=None, bool keepdim=False) -> (Tensor, Tensor) +@Namespace("at") public static native @ByVal T_TensorTensor_T var_mean(@Const @ByRef Tensor self, @ByVal DimnameArrayRef dim, @ByVal(nullValue = "c10::optional(c10::nullopt)") LongOptional correction, @Cast("bool") boolean keepdim/*=false*/); +@Namespace("at") public static native @ByVal T_TensorTensor_T var_mean(@Const @ByRef Tensor self, @ByVal DimnameArrayRef dim); +// aten::var_mean.correction_out(Tensor self, int[1]? dim=None, *, int? correction=None, bool keepdim=False, Tensor(a!) out0, Tensor(b!) out1) -> (Tensor(a!), Tensor(b!)) +@Namespace("at") public static native @ByVal T_TensorTensor_T var_mean_out(@ByRef Tensor out0, @ByRef Tensor out1, @Const @ByRef Tensor self, @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") LongArrayRefOptional dim, @ByVal(nullValue = "c10::optional(c10::nullopt)") LongOptional correction, @Cast("bool") boolean keepdim/*=false*/); +@Namespace("at") public static native @ByVal T_TensorTensor_T var_mean_out(@ByRef Tensor out0, @ByRef Tensor out1, @Const @ByRef Tensor self); +@Namespace("at") public static native @ByVal T_TensorTensor_T var_mean_out(@ByRef Tensor out0, @ByRef Tensor out1, @Const @ByRef Tensor self, @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dim, @ByVal(nullValue = "c10::optional(c10::nullopt)") LongOptional correction, @Cast("bool") boolean keepdim/*=false*/); +// aten::var_mean.correction_out(Tensor self, int[1]? dim=None, *, int? correction=None, bool keepdim=False, Tensor(a!) out0, Tensor(b!) out1) -> (Tensor(a!), Tensor(b!)) +@Namespace("at") public static native @ByVal T_TensorTensor_T var_mean_outf(@Const @ByRef Tensor self, @ByVal LongArrayRefOptional dim, @ByVal LongOptional correction, @Cast("bool") boolean keepdim, @ByRef Tensor out0, @ByRef Tensor out1); +@Namespace("at") public static native @ByVal T_TensorTensor_T var_mean_outf(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dim, @ByVal LongOptional correction, @Cast("bool") boolean keepdim, @ByRef Tensor out0, @ByRef Tensor out1); -// Parsed from ATen/ops/unfold_copy.h +// Parsed from ATen/ops/vdot.h // #pragma once @@ -68596,21 +54498,21 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include +// #include -// aten::unfold_copy(Tensor self, int dimension, int size, int step) -> Tensor -@Namespace("at") public static native @ByVal Tensor unfold_copy(@Const @ByRef Tensor self, @Cast("int64_t") long dimension, @Cast("int64_t") long size, @Cast("int64_t") long step); +// aten::vdot(Tensor self, Tensor other) -> Tensor +@Namespace("at") public static native @ByVal Tensor vdot(@Const @ByRef Tensor self, @Const @ByRef Tensor other); -// aten::unfold_copy.out(Tensor self, int dimension, int size, int step, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor unfold_copy_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Cast("int64_t") long dimension, @Cast("int64_t") long size, @Cast("int64_t") long step); -// aten::unfold_copy.out(Tensor self, int dimension, int size, int step, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor unfold_copy_outf(@Const @ByRef Tensor self, @Cast("int64_t") long dimension, @Cast("int64_t") long size, @Cast("int64_t") long step, @ByRef Tensor out); +// aten::vdot.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor vdot_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor other); +// aten::vdot.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor vdot_outf(@Const @ByRef Tensor self, @Const @ByRef Tensor other, @ByRef Tensor out); -// Parsed from ATen/ops/uniform.h +// Parsed from ATen/ops/view.h // #pragma once @@ -68631,23 +54533,14 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include - +// #include -// aten::uniform.out(Tensor self, float from=0, float to=1, *, Generator? generator=None, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor uniform_out(@ByRef Tensor out, @Const @ByRef Tensor self, double from/*=0*/, double to/*=1*/, @ByVal(nullValue = "c10::optional(c10::nullopt)") GeneratorOptional generator); -@Namespace("at") public static native @ByRef Tensor uniform_out(@ByRef Tensor out, @Const @ByRef Tensor self); -// aten::uniform.out(Tensor self, float from=0, float to=1, *, Generator? generator=None, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor uniform_outf(@Const @ByRef Tensor self, double from, double to, @ByVal GeneratorOptional generator, @ByRef Tensor out); -// aten::uniform(Tensor self, float from=0, float to=1, *, Generator? generator=None) -> Tensor -@Namespace("at") public static native @ByVal Tensor uniform(@Const @ByRef Tensor self, double from/*=0*/, double to/*=1*/, @ByVal(nullValue = "c10::optional(c10::nullopt)") GeneratorOptional generator); -@Namespace("at") public static native @ByVal Tensor uniform(@Const @ByRef Tensor self); -// Parsed from ATen/ops/unique_consecutive.h +// Parsed from ATen/ops/view_as.h // #pragma once @@ -68668,23 +54561,14 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include - +// #include -// aten::unique_consecutive(Tensor self, bool return_inverse=False, bool return_counts=False, int? dim=None) -> (Tensor, Tensor, Tensor) -@Namespace("at") public static native @ByVal TensorTensorTensorTuple unique_consecutive(@Const @ByRef Tensor self, @Cast("bool") boolean return_inverse/*=false*/, @Cast("bool") boolean return_counts/*=false*/, @ByVal(nullValue = "c10::optional(c10::nullopt)") LongOptional dim); -@Namespace("at") public static native @ByVal TensorTensorTensorTuple unique_consecutive(@Const @ByRef Tensor self); -// aten::unique_consecutive.out(Tensor self, bool return_inverse=False, bool return_counts=False, int? dim=None, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2) -> (Tensor(a!), Tensor(b!), Tensor(c!)) -@Namespace("at") public static native @ByVal @Cast("std::tuple*") PointerPointer unique_consecutive_out(@ByRef Tensor out0, @ByRef Tensor out1, @ByRef Tensor out2, @Const @ByRef Tensor self, @Cast("bool") boolean return_inverse/*=false*/, @Cast("bool") boolean return_counts/*=false*/, @ByVal(nullValue = "c10::optional(c10::nullopt)") LongOptional dim); -@Namespace("at") public static native @ByVal @Cast("std::tuple*") PointerPointer unique_consecutive_out(@ByRef Tensor out0, @ByRef Tensor out1, @ByRef Tensor out2, @Const @ByRef Tensor self); -// aten::unique_consecutive.out(Tensor self, bool return_inverse=False, bool return_counts=False, int? dim=None, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2) -> (Tensor(a!), Tensor(b!), Tensor(c!)) -@Namespace("at") public static native @ByVal @Cast("std::tuple*") PointerPointer unique_consecutive_outf(@Const @ByRef Tensor self, @Cast("bool") boolean return_inverse, @Cast("bool") boolean return_counts, @ByVal LongOptional dim, @ByRef Tensor out0, @ByRef Tensor out1, @ByRef Tensor out2); -// Parsed from ATen/ops/unique_dim.h +// Parsed from ATen/ops/view_as_complex.h // #pragma once @@ -68705,23 +54589,16 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include - +// #include -// aten::unique_dim(Tensor self, int dim, bool sorted=True, bool return_inverse=False, bool return_counts=False) -> (Tensor, Tensor, Tensor) -@Namespace("at") public static native @ByVal TensorTensorTensorTuple unique_dim(@Const @ByRef Tensor self, @Cast("int64_t") long dim, @Cast("bool") boolean sorted/*=true*/, @Cast("bool") boolean return_inverse/*=false*/, @Cast("bool") boolean return_counts/*=false*/); -@Namespace("at") public static native @ByVal TensorTensorTensorTuple unique_dim(@Const @ByRef Tensor self, @Cast("int64_t") long dim); -// aten::unique_dim.out(Tensor self, int dim, bool sorted=True, bool return_inverse=False, bool return_counts=False, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2) -> (Tensor(a!), Tensor(b!), Tensor(c!)) -@Namespace("at") public static native @ByVal @Cast("std::tuple*") PointerPointer unique_dim_out(@ByRef Tensor out0, @ByRef Tensor out1, @ByRef Tensor out2, @Const @ByRef Tensor self, @Cast("int64_t") long dim, @Cast("bool") boolean sorted/*=true*/, @Cast("bool") boolean return_inverse/*=false*/, @Cast("bool") boolean return_counts/*=false*/); -@Namespace("at") public static native @ByVal @Cast("std::tuple*") PointerPointer unique_dim_out(@ByRef Tensor out0, @ByRef Tensor out1, @ByRef Tensor out2, @Const @ByRef Tensor self, @Cast("int64_t") long dim); -// aten::unique_dim.out(Tensor self, int dim, bool sorted=True, bool return_inverse=False, bool return_counts=False, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2) -> (Tensor(a!), Tensor(b!), Tensor(c!)) -@Namespace("at") public static native @ByVal @Cast("std::tuple*") PointerPointer unique_dim_outf(@Const @ByRef Tensor self, @Cast("int64_t") long dim, @Cast("bool") boolean sorted, @Cast("bool") boolean return_inverse, @Cast("bool") boolean return_counts, @ByRef Tensor out0, @ByRef Tensor out1, @ByRef Tensor out2); +// aten::view_as_complex(Tensor(a) self) -> Tensor(a) +@Namespace("at") public static native @ByVal Tensor view_as_complex(@Const @ByRef Tensor self); -// Parsed from ATen/ops/unique_dim_consecutive.h +// Parsed from ATen/ops/view_as_complex_copy.h // #pragma once @@ -68742,23 +54619,21 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include +// #include -// aten::unique_dim_consecutive(Tensor self, int dim, bool return_inverse=False, bool return_counts=False) -> (Tensor, Tensor, Tensor) -@Namespace("at") public static native @ByVal TensorTensorTensorTuple unique_dim_consecutive(@Const @ByRef Tensor self, @Cast("int64_t") long dim, @Cast("bool") boolean return_inverse/*=false*/, @Cast("bool") boolean return_counts/*=false*/); -@Namespace("at") public static native @ByVal TensorTensorTensorTuple unique_dim_consecutive(@Const @ByRef Tensor self, @Cast("int64_t") long dim); +// aten::view_as_complex_copy(Tensor self) -> Tensor +@Namespace("at") public static native @ByVal Tensor view_as_complex_copy(@Const @ByRef Tensor self); -// aten::unique_dim_consecutive.out(Tensor self, int dim, bool return_inverse=False, bool return_counts=False, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2) -> (Tensor(a!), Tensor(b!), Tensor(c!)) -@Namespace("at") public static native @ByVal @Cast("std::tuple*") PointerPointer unique_dim_consecutive_out(@ByRef Tensor out0, @ByRef Tensor out1, @ByRef Tensor out2, @Const @ByRef Tensor self, @Cast("int64_t") long dim, @Cast("bool") boolean return_inverse/*=false*/, @Cast("bool") boolean return_counts/*=false*/); -@Namespace("at") public static native @ByVal @Cast("std::tuple*") PointerPointer unique_dim_consecutive_out(@ByRef Tensor out0, @ByRef Tensor out1, @ByRef Tensor out2, @Const @ByRef Tensor self, @Cast("int64_t") long dim); -// aten::unique_dim_consecutive.out(Tensor self, int dim, bool return_inverse=False, bool return_counts=False, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2) -> (Tensor(a!), Tensor(b!), Tensor(c!)) -@Namespace("at") public static native @ByVal @Cast("std::tuple*") PointerPointer unique_dim_consecutive_outf(@Const @ByRef Tensor self, @Cast("int64_t") long dim, @Cast("bool") boolean return_inverse, @Cast("bool") boolean return_counts, @ByRef Tensor out0, @ByRef Tensor out1, @ByRef Tensor out2); +// aten::view_as_complex_copy.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor view_as_complex_copy_out(@ByRef Tensor out, @Const @ByRef Tensor self); +// aten::view_as_complex_copy.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor view_as_complex_copy_outf(@Const @ByRef Tensor self, @ByRef Tensor out); -// Parsed from ATen/ops/unsafe_chunk.h +// Parsed from ATen/ops/view_as_real.h // #pragma once @@ -68779,17 +54654,16 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include +// #include -// aten::unsafe_chunk(Tensor self, int chunks, int dim=0) -> Tensor[] -@Namespace("at") public static native @Cast({"", "std::vector"}) @StdMove TensorVector unsafe_chunk(@Const @ByRef Tensor self, @Cast("int64_t") long chunks, @Cast("int64_t") long dim/*=0*/); -@Namespace("at") public static native @Cast({"", "std::vector"}) @StdMove TensorVector unsafe_chunk(@Const @ByRef Tensor self, @Cast("int64_t") long chunks); +// aten::view_as_real(Tensor(a) self) -> Tensor(a) +@Namespace("at") public static native @ByVal Tensor view_as_real(@Const @ByRef Tensor self); -// Parsed from ATen/ops/unsafe_split.h +// Parsed from ATen/ops/view_as_real_copy.h // #pragma once @@ -68810,41 +54684,21 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include - - -// aten::unsafe_split.Tensor(Tensor self, SymInt split_size, int dim=0) -> Tensor[] -@Namespace("at") public static native @Cast({"", "std::vector"}) @StdMove TensorVector unsafe_split(@Const @ByRef Tensor self, @Cast("int64_t") long split_size, @Cast("int64_t") long dim/*=0*/); -@Namespace("at") public static native @Cast({"", "std::vector"}) @StdMove TensorVector unsafe_split(@Const @ByRef Tensor self, @Cast("int64_t") long split_size); - - -// aten::unsafe_split.Tensor(Tensor self, SymInt split_size, int dim=0) -> Tensor[] -@Namespace("at") public static native @Cast({"", "std::vector"}) @StdMove TensorVector unsafe_split_symint(@Const @ByRef Tensor self, @ByVal SymInt split_size, @Cast("int64_t") long dim/*=0*/); -@Namespace("at") public static native @Cast({"", "std::vector"}) @StdMove TensorVector unsafe_split_symint(@Const @ByRef Tensor self, @ByVal SymInt split_size); - - -// aten::unsafe_split.Tensor_out(Tensor self, SymInt split_size, int dim=0, *, Tensor(a!)[] out) -> () -@Namespace("at") public static native void unsafe_split_out(@ByVal TensorArrayRef out, @Const @ByRef Tensor self, @Cast("int64_t") long split_size, @Cast("int64_t") long dim/*=0*/); -@Namespace("at") public static native void unsafe_split_out(@ByVal TensorArrayRef out, @Const @ByRef Tensor self, @Cast("int64_t") long split_size); - - -// aten::unsafe_split.Tensor_out(Tensor self, SymInt split_size, int dim=0, *, Tensor(a!)[] out) -> () -@Namespace("at") public static native void unsafe_split_outf(@Const @ByRef Tensor self, @Cast("int64_t") long split_size, @Cast("int64_t") long dim, @ByVal TensorArrayRef out); - - -// aten::unsafe_split.Tensor_out(Tensor self, SymInt split_size, int dim=0, *, Tensor(a!)[] out) -> () -@Namespace("at") public static native void unsafe_split_symint_out(@ByVal TensorArrayRef out, @Const @ByRef Tensor self, @ByVal SymInt split_size, @Cast("int64_t") long dim/*=0*/); -@Namespace("at") public static native void unsafe_split_symint_out(@ByVal TensorArrayRef out, @Const @ByRef Tensor self, @ByVal SymInt split_size); +// #include -// aten::unsafe_split.Tensor_out(Tensor self, SymInt split_size, int dim=0, *, Tensor(a!)[] out) -> () -@Namespace("at") public static native void unsafe_split_symint_outf(@Const @ByRef Tensor self, @ByVal SymInt split_size, @Cast("int64_t") long dim, @ByVal TensorArrayRef out); +// aten::view_as_real_copy(Tensor self) -> Tensor +@Namespace("at") public static native @ByVal Tensor view_as_real_copy(@Const @ByRef Tensor self); +// aten::view_as_real_copy.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor view_as_real_copy_out(@ByRef Tensor out, @Const @ByRef Tensor self); +// aten::view_as_real_copy.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor view_as_real_copy_outf(@Const @ByRef Tensor self, @ByRef Tensor out); -// Parsed from ATen/ops/unsafe_split_with_sizes.h +// Parsed from ATen/ops/view_copy.h // #pragma once @@ -68865,46 +54719,48 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include +// #include -// aten::unsafe_split_with_sizes(Tensor self, SymInt[] split_sizes, int dim=0) -> Tensor[] -@Namespace("at") public static native @Cast({"", "std::vector"}) @StdMove TensorVector unsafe_split_with_sizes(@Const @ByRef Tensor self, @ByVal @Cast("c10::ArrayRef*") LongArrayRef split_sizes, @Cast("int64_t") long dim/*=0*/); -@Namespace("at") public static native @Cast({"", "std::vector"}) @StdMove TensorVector unsafe_split_with_sizes(@Const @ByRef Tensor self, @ByVal @Cast("c10::ArrayRef*") LongArrayRef split_sizes); -@Namespace("at") public static native @Cast({"", "std::vector"}) @StdMove TensorVector unsafe_split_with_sizes(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] split_sizes, @Cast("int64_t") long dim/*=0*/); -@Namespace("at") public static native @Cast({"", "std::vector"}) @StdMove TensorVector unsafe_split_with_sizes(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... split_sizes); +// aten::view_copy(Tensor self, SymInt[] size) -> Tensor +@Namespace("at") public static native @ByVal Tensor view_copy(@Const @ByRef Tensor self, @ByVal LongArrayRef size); +@Namespace("at") public static native @ByVal Tensor view_copy(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... size); -// aten::unsafe_split_with_sizes(Tensor self, SymInt[] split_sizes, int dim=0) -> Tensor[] -@Namespace("at") public static native @Cast({"", "std::vector"}) @StdMove TensorVector unsafe_split_with_sizes_symint(@Const @ByRef Tensor self, @ByVal SymIntRef split_sizes, @Cast("int64_t") long dim/*=0*/); -@Namespace("at") public static native @Cast({"", "std::vector"}) @StdMove TensorVector unsafe_split_with_sizes_symint(@Const @ByRef Tensor self, @ByVal SymIntRef split_sizes); +// aten::view_copy(Tensor self, SymInt[] size) -> Tensor +@Namespace("at") public static native @ByVal Tensor view_copy_symint(@Const @ByRef Tensor self, @ByVal SymIntArrayRef size); -// aten::unsafe_split_with_sizes.out(Tensor self, SymInt[] split_sizes, int dim=0, *, Tensor(a!)[] out) -> () -@Namespace("at") public static native void unsafe_split_with_sizes_out(@ByVal TensorArrayRef out, @Const @ByRef Tensor self, @ByVal @Cast("c10::ArrayRef*") LongArrayRef split_sizes, @Cast("int64_t") long dim/*=0*/); -@Namespace("at") public static native void unsafe_split_with_sizes_out(@ByVal TensorArrayRef out, @Const @ByRef Tensor self, @ByVal @Cast("c10::ArrayRef*") LongArrayRef split_sizes); -@Namespace("at") public static native void unsafe_split_with_sizes_out(@ByVal TensorArrayRef out, @Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] split_sizes, @Cast("int64_t") long dim/*=0*/); -@Namespace("at") public static native void unsafe_split_with_sizes_out(@ByVal TensorArrayRef out, @Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... split_sizes); +// aten::view_copy.dtype(Tensor self, ScalarType dtype) -> Tensor +@Namespace("at") public static native @ByVal Tensor view_copy(@Const @ByRef Tensor self, ScalarType dtype); +// aten::view_copy.out(Tensor self, SymInt[] size, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor view_copy_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal LongArrayRef size); +@Namespace("at") public static native @ByRef Tensor view_copy_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... size); -// aten::unsafe_split_with_sizes.out(Tensor self, SymInt[] split_sizes, int dim=0, *, Tensor(a!)[] out) -> () -@Namespace("at") public static native void unsafe_split_with_sizes_outf(@Const @ByRef Tensor self, @ByVal @Cast("c10::ArrayRef*") LongArrayRef split_sizes, @Cast("int64_t") long dim, @ByVal TensorArrayRef out); -@Namespace("at") public static native void unsafe_split_with_sizes_outf(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] split_sizes, @Cast("int64_t") long dim, @ByVal TensorArrayRef out); +// aten::view_copy.out(Tensor self, SymInt[] size, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor view_copy_outf(@Const @ByRef Tensor self, @ByVal LongArrayRef size, @ByRef Tensor out); +@Namespace("at") public static native @ByRef Tensor view_copy_outf(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @ByRef Tensor out); -// aten::unsafe_split_with_sizes.out(Tensor self, SymInt[] split_sizes, int dim=0, *, Tensor(a!)[] out) -> () -@Namespace("at") public static native void unsafe_split_with_sizes_symint_out(@ByVal TensorArrayRef out, @Const @ByRef Tensor self, @ByVal SymIntRef split_sizes, @Cast("int64_t") long dim/*=0*/); -@Namespace("at") public static native void unsafe_split_with_sizes_symint_out(@ByVal TensorArrayRef out, @Const @ByRef Tensor self, @ByVal SymIntRef split_sizes); + +// aten::view_copy.out(Tensor self, SymInt[] size, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor view_copy_symint_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal SymIntArrayRef size); -// aten::unsafe_split_with_sizes.out(Tensor self, SymInt[] split_sizes, int dim=0, *, Tensor(a!)[] out) -> () -@Namespace("at") public static native void unsafe_split_with_sizes_symint_outf(@Const @ByRef Tensor self, @ByVal SymIntRef split_sizes, @Cast("int64_t") long dim, @ByVal TensorArrayRef out); +// aten::view_copy.out(Tensor self, SymInt[] size, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor view_copy_symint_outf(@Const @ByRef Tensor self, @ByVal SymIntArrayRef size, @ByRef Tensor out); +// aten::view_copy.dtype_out(Tensor self, ScalarType dtype, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor view_copy_out(@ByRef Tensor out, @Const @ByRef Tensor self, ScalarType dtype); +// aten::view_copy.dtype_out(Tensor self, ScalarType dtype, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor view_copy_outf(@Const @ByRef Tensor self, ScalarType dtype, @ByRef Tensor out); -// Parsed from ATen/ops/unsqueeze.h + +// Parsed from ATen/ops/vsplit.h // #pragma once @@ -68925,16 +54781,20 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include +// #include -// aten::unsqueeze(Tensor(a) self, int dim) -> Tensor(a) -@Namespace("at") public static native @ByVal Tensor unsqueeze(@Const @ByRef Tensor self, @Cast("int64_t") long dim); +// aten::vsplit.int(Tensor(a -> *) self, int sections) -> Tensor(a)[] +@Namespace("at") public static native @Cast({"", "std::vector"}) @StdMove TensorVector vsplit(@Const @ByRef Tensor self, @Cast("int64_t") long sections); + +// aten::vsplit.array(Tensor(a -> *) self, int[] indices) -> Tensor(a)[] +@Namespace("at") public static native @Cast({"", "std::vector"}) @StdMove TensorVector vsplit(@Const @ByRef Tensor self, @ByVal LongArrayRef indices); +@Namespace("at") public static native @Cast({"", "std::vector"}) @StdMove TensorVector vsplit(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... indices); -// Parsed from ATen/ops/unsqueeze_copy.h +// Parsed from ATen/ops/vstack.h // #pragma once @@ -68955,21 +54815,21 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include +// #include -// aten::unsqueeze_copy(Tensor self, int dim) -> Tensor -@Namespace("at") public static native @ByVal Tensor unsqueeze_copy(@Const @ByRef Tensor self, @Cast("int64_t") long dim); +// aten::vstack(Tensor[] tensors) -> Tensor +@Namespace("at") public static native @ByVal Tensor vstack(@ByVal @Cast("at::TensorList*") TensorArrayRef tensors); -// aten::unsqueeze_copy.out(Tensor self, int dim, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor unsqueeze_copy_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Cast("int64_t") long dim); -// aten::unsqueeze_copy.out(Tensor self, int dim, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor unsqueeze_copy_outf(@Const @ByRef Tensor self, @Cast("int64_t") long dim, @ByRef Tensor out); +// aten::vstack.out(Tensor[] tensors, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor vstack_out(@ByRef Tensor out, @ByVal @Cast("at::TensorList*") TensorArrayRef tensors); +// aten::vstack.out(Tensor[] tensors, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor vstack_outf(@ByVal @Cast("at::TensorList*") TensorArrayRef tensors, @ByRef Tensor out); -// Parsed from ATen/ops/upsample_bicubic2d.h +// Parsed from ATen/ops/where.h // #pragma once @@ -68990,55 +54850,33 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include - - -// aten::upsample_bicubic2d.vec(Tensor input, SymInt[]? output_size, bool align_corners, float[]? scale_factors) -> Tensor -@Namespace("at") public static native @ByVal Tensor upsample_bicubic2d(@Const @ByRef Tensor input, @ByVal LongArrayRefOptional output_size, @Cast("bool") boolean align_corners, @ByVal DoubleArrayRefOptional scale_factors); -@Namespace("at") public static native @ByVal Tensor upsample_bicubic2d(@Const @ByRef Tensor input, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] output_size, @Cast("bool") boolean align_corners, @ByVal DoubleArrayRefOptional scale_factors); - - -// aten::upsample_bicubic2d.vec(Tensor input, SymInt[]? output_size, bool align_corners, float[]? scale_factors) -> Tensor -@Namespace("at") public static native @ByVal Tensor upsample_bicubic2d_symint(@Const @ByRef Tensor input, @ByVal SymIntArrayRefOptional output_size, @Cast("bool") boolean align_corners, @ByVal DoubleArrayRefOptional scale_factors); - - -// aten::upsample_bicubic2d.out(Tensor self, SymInt[2] output_size, bool align_corners, float? scales_h=None, float? scales_w=None, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor upsample_bicubic2d_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal @Cast("c10::ArrayRef*") LongArrayRef output_size, @Cast("bool") boolean align_corners, @ByVal(nullValue = "c10::optional(c10::nullopt)") DoubleOptional scales_h, @ByVal(nullValue = "c10::optional(c10::nullopt)") DoubleOptional scales_w); -@Namespace("at") public static native @ByRef Tensor upsample_bicubic2d_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal @Cast("c10::ArrayRef*") LongArrayRef output_size, @Cast("bool") boolean align_corners); -@Namespace("at") public static native @ByRef Tensor upsample_bicubic2d_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] output_size, @Cast("bool") boolean align_corners, @ByVal(nullValue = "c10::optional(c10::nullopt)") DoubleOptional scales_h, @ByVal(nullValue = "c10::optional(c10::nullopt)") DoubleOptional scales_w); -@Namespace("at") public static native @ByRef Tensor upsample_bicubic2d_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] output_size, @Cast("bool") boolean align_corners); - - -// aten::upsample_bicubic2d.out(Tensor self, SymInt[2] output_size, bool align_corners, float? scales_h=None, float? scales_w=None, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor upsample_bicubic2d_outf(@Const @ByRef Tensor self, @ByVal @Cast("c10::ArrayRef*") LongArrayRef output_size, @Cast("bool") boolean align_corners, @ByVal DoubleOptional scales_h, @ByVal DoubleOptional scales_w, @ByRef Tensor out); -@Namespace("at") public static native @ByRef Tensor upsample_bicubic2d_outf(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] output_size, @Cast("bool") boolean align_corners, @ByVal DoubleOptional scales_h, @ByVal DoubleOptional scales_w, @ByRef Tensor out); - - -// aten::upsample_bicubic2d.out(Tensor self, SymInt[2] output_size, bool align_corners, float? scales_h=None, float? scales_w=None, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor upsample_bicubic2d_symint_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal SymIntRef output_size, @Cast("bool") boolean align_corners, @ByVal(nullValue = "c10::optional(c10::nullopt)") DoubleOptional scales_h, @ByVal(nullValue = "c10::optional(c10::nullopt)") DoubleOptional scales_w); -@Namespace("at") public static native @ByRef Tensor upsample_bicubic2d_symint_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal SymIntRef output_size, @Cast("bool") boolean align_corners); +// #include -// aten::upsample_bicubic2d.out(Tensor self, SymInt[2] output_size, bool align_corners, float? scales_h=None, float? scales_w=None, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor upsample_bicubic2d_symint_outf(@Const @ByRef Tensor self, @ByVal SymIntRef output_size, @Cast("bool") boolean align_corners, @ByVal DoubleOptional scales_h, @ByVal DoubleOptional scales_w, @ByRef Tensor out); +// aten::where.self(Tensor condition, Tensor self, Tensor other) -> Tensor +@Namespace("at") public static native @ByVal Tensor where(@Const @ByRef Tensor condition, @Const @ByRef Tensor self, @Const @ByRef Tensor other); +// aten::where.self_out(Tensor condition, Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor where_out(@ByRef Tensor out, @Const @ByRef Tensor condition, @Const @ByRef Tensor self, @Const @ByRef Tensor other); +// aten::where.self_out(Tensor condition, Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor where_outf(@Const @ByRef Tensor condition, @Const @ByRef Tensor self, @Const @ByRef Tensor other, @ByRef Tensor out); -// aten::upsample_bicubic2d(Tensor self, SymInt[2] output_size, bool align_corners, float? scales_h=None, float? scales_w=None) -> Tensor -@Namespace("at") public static native @ByVal Tensor upsample_bicubic2d(@Const @ByRef Tensor self, @ByVal @Cast("c10::ArrayRef*") LongArrayRef output_size, @Cast("bool") boolean align_corners, @ByVal(nullValue = "c10::optional(c10::nullopt)") DoubleOptional scales_h, @ByVal(nullValue = "c10::optional(c10::nullopt)") DoubleOptional scales_w); -@Namespace("at") public static native @ByVal Tensor upsample_bicubic2d(@Const @ByRef Tensor self, @ByVal @Cast("c10::ArrayRef*") LongArrayRef output_size, @Cast("bool") boolean align_corners); -@Namespace("at") public static native @ByVal Tensor upsample_bicubic2d(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] output_size, @Cast("bool") boolean align_corners, @ByVal(nullValue = "c10::optional(c10::nullopt)") DoubleOptional scales_h, @ByVal(nullValue = "c10::optional(c10::nullopt)") DoubleOptional scales_w); -@Namespace("at") public static native @ByVal Tensor upsample_bicubic2d(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] output_size, @Cast("bool") boolean align_corners); +// aten::where.ScalarSelf(Tensor condition, Scalar self, Tensor other) -> Tensor +@Namespace("at") public static native @ByVal Tensor where(@Const @ByRef Tensor condition, @Const @ByRef Scalar self, @Const @ByRef Tensor other); +// aten::where.ScalarOther(Tensor condition, Tensor self, Scalar other) -> Tensor +@Namespace("at") public static native @ByVal Tensor where(@Const @ByRef Tensor condition, @Const @ByRef Tensor self, @Const @ByRef Scalar other); -// aten::upsample_bicubic2d(Tensor self, SymInt[2] output_size, bool align_corners, float? scales_h=None, float? scales_w=None) -> Tensor -@Namespace("at") public static native @ByVal Tensor upsample_bicubic2d_symint(@Const @ByRef Tensor self, @ByVal SymIntRef output_size, @Cast("bool") boolean align_corners, @ByVal(nullValue = "c10::optional(c10::nullopt)") DoubleOptional scales_h, @ByVal(nullValue = "c10::optional(c10::nullopt)") DoubleOptional scales_w); -@Namespace("at") public static native @ByVal Tensor upsample_bicubic2d_symint(@Const @ByRef Tensor self, @ByVal SymIntRef output_size, @Cast("bool") boolean align_corners); +// aten::where.Scalar(Tensor condition, Scalar self, Scalar other) -> Tensor +@Namespace("at") public static native @ByVal Tensor where(@Const @ByRef Tensor condition, @Const @ByRef Scalar self, @Const @ByRef Scalar other); +// aten::where(Tensor condition) -> Tensor[] +@Namespace("at") public static native @Cast({"", "std::vector"}) @StdMove TensorVector where(@Const @ByRef Tensor condition); -// Parsed from ATen/ops/upsample_bicubic2d_backward.h +// Parsed from ATen/ops/xlogy.h // #pragma once @@ -69059,46 +54897,43 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include - - -// aten::upsample_bicubic2d_backward.grad_input(Tensor grad_output, SymInt[2] output_size, SymInt[4] input_size, bool align_corners, float? scales_h=None, float? scales_w=None, *, Tensor(a!) grad_input) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor upsample_bicubic2d_backward_out(@ByRef Tensor grad_input, @Const @ByRef Tensor grad_output, @ByVal @Cast("c10::ArrayRef*") LongArrayRef output_size, @ByVal @Cast("c10::ArrayRef*") LongArrayRef input_size, @Cast("bool") boolean align_corners, @ByVal(nullValue = "c10::optional(c10::nullopt)") DoubleOptional scales_h, @ByVal(nullValue = "c10::optional(c10::nullopt)") DoubleOptional scales_w); -@Namespace("at") public static native @ByRef Tensor upsample_bicubic2d_backward_out(@ByRef Tensor grad_input, @Const @ByRef Tensor grad_output, @ByVal @Cast("c10::ArrayRef*") LongArrayRef output_size, @ByVal @Cast("c10::ArrayRef*") LongArrayRef input_size, @Cast("bool") boolean align_corners); -@Namespace("at") public static native @ByRef Tensor upsample_bicubic2d_backward_out(@ByRef Tensor grad_input, @Const @ByRef Tensor grad_output, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] output_size, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] input_size, @Cast("bool") boolean align_corners, @ByVal(nullValue = "c10::optional(c10::nullopt)") DoubleOptional scales_h, @ByVal(nullValue = "c10::optional(c10::nullopt)") DoubleOptional scales_w); -@Namespace("at") public static native @ByRef Tensor upsample_bicubic2d_backward_out(@ByRef Tensor grad_input, @Const @ByRef Tensor grad_output, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] output_size, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] input_size, @Cast("bool") boolean align_corners); - - -// aten::upsample_bicubic2d_backward.grad_input(Tensor grad_output, SymInt[2] output_size, SymInt[4] input_size, bool align_corners, float? scales_h=None, float? scales_w=None, *, Tensor(a!) grad_input) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor upsample_bicubic2d_backward_outf(@Const @ByRef Tensor grad_output, @ByVal @Cast("c10::ArrayRef*") LongArrayRef output_size, @ByVal @Cast("c10::ArrayRef*") LongArrayRef input_size, @Cast("bool") boolean align_corners, @ByVal DoubleOptional scales_h, @ByVal DoubleOptional scales_w, @ByRef Tensor grad_input); -@Namespace("at") public static native @ByRef Tensor upsample_bicubic2d_backward_outf(@Const @ByRef Tensor grad_output, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] output_size, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] input_size, @Cast("bool") boolean align_corners, @ByVal DoubleOptional scales_h, @ByVal DoubleOptional scales_w, @ByRef Tensor grad_input); +// #include -// aten::upsample_bicubic2d_backward.grad_input(Tensor grad_output, SymInt[2] output_size, SymInt[4] input_size, bool align_corners, float? scales_h=None, float? scales_w=None, *, Tensor(a!) grad_input) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor upsample_bicubic2d_backward_symint_out(@ByRef Tensor grad_input, @Const @ByRef Tensor grad_output, @ByVal SymIntRef output_size, @ByVal SymIntRef input_size, @Cast("bool") boolean align_corners, @ByVal(nullValue = "c10::optional(c10::nullopt)") DoubleOptional scales_h, @ByVal(nullValue = "c10::optional(c10::nullopt)") DoubleOptional scales_w); -@Namespace("at") public static native @ByRef Tensor upsample_bicubic2d_backward_symint_out(@ByRef Tensor grad_input, @Const @ByRef Tensor grad_output, @ByVal SymIntRef output_size, @ByVal SymIntRef input_size, @Cast("bool") boolean align_corners); +// aten::xlogy.Tensor(Tensor self, Tensor other) -> Tensor +@Namespace("at") public static native @ByVal Tensor xlogy(@Const @ByRef Tensor self, @Const @ByRef Tensor other); +// aten::xlogy.Scalar_Self(Scalar self, Tensor other) -> Tensor +@Namespace("at") public static native @ByVal Tensor xlogy(@Const @ByRef Scalar self, @Const @ByRef Tensor other); -// aten::upsample_bicubic2d_backward.grad_input(Tensor grad_output, SymInt[2] output_size, SymInt[4] input_size, bool align_corners, float? scales_h=None, float? scales_w=None, *, Tensor(a!) grad_input) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor upsample_bicubic2d_backward_symint_outf(@Const @ByRef Tensor grad_output, @ByVal SymIntRef output_size, @ByVal SymIntRef input_size, @Cast("bool") boolean align_corners, @ByVal DoubleOptional scales_h, @ByVal DoubleOptional scales_w, @ByRef Tensor grad_input); +// aten::xlogy.Scalar_Other(Tensor self, Scalar other) -> Tensor +@Namespace("at") public static native @ByVal Tensor xlogy(@Const @ByRef Tensor self, @Const @ByRef Scalar other); +// aten::xlogy_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor xlogy_(@ByRef Tensor self, @Const @ByRef Tensor other); -// aten::upsample_bicubic2d_backward(Tensor grad_output, SymInt[2] output_size, SymInt[4] input_size, bool align_corners, float? scales_h=None, float? scales_w=None) -> Tensor -@Namespace("at") public static native @ByVal Tensor upsample_bicubic2d_backward(@Const @ByRef Tensor grad_output, @ByVal @Cast("c10::ArrayRef*") LongArrayRef output_size, @ByVal @Cast("c10::ArrayRef*") LongArrayRef input_size, @Cast("bool") boolean align_corners, @ByVal(nullValue = "c10::optional(c10::nullopt)") DoubleOptional scales_h, @ByVal(nullValue = "c10::optional(c10::nullopt)") DoubleOptional scales_w); -@Namespace("at") public static native @ByVal Tensor upsample_bicubic2d_backward(@Const @ByRef Tensor grad_output, @ByVal @Cast("c10::ArrayRef*") LongArrayRef output_size, @ByVal @Cast("c10::ArrayRef*") LongArrayRef input_size, @Cast("bool") boolean align_corners); -@Namespace("at") public static native @ByVal Tensor upsample_bicubic2d_backward(@Const @ByRef Tensor grad_output, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] output_size, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] input_size, @Cast("bool") boolean align_corners, @ByVal(nullValue = "c10::optional(c10::nullopt)") DoubleOptional scales_h, @ByVal(nullValue = "c10::optional(c10::nullopt)") DoubleOptional scales_w); -@Namespace("at") public static native @ByVal Tensor upsample_bicubic2d_backward(@Const @ByRef Tensor grad_output, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] output_size, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] input_size, @Cast("bool") boolean align_corners); +// aten::xlogy_.Scalar_Other(Tensor(a!) self, Scalar other) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor xlogy_(@ByRef Tensor self, @Const @ByRef Scalar other); +// aten::xlogy.OutTensor(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor xlogy_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor other); +// aten::xlogy.OutTensor(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor xlogy_outf(@Const @ByRef Tensor self, @Const @ByRef Tensor other, @ByRef Tensor out); -// aten::upsample_bicubic2d_backward(Tensor grad_output, SymInt[2] output_size, SymInt[4] input_size, bool align_corners, float? scales_h=None, float? scales_w=None) -> Tensor -@Namespace("at") public static native @ByVal Tensor upsample_bicubic2d_backward_symint(@Const @ByRef Tensor grad_output, @ByVal SymIntRef output_size, @ByVal SymIntRef input_size, @Cast("bool") boolean align_corners, @ByVal(nullValue = "c10::optional(c10::nullopt)") DoubleOptional scales_h, @ByVal(nullValue = "c10::optional(c10::nullopt)") DoubleOptional scales_w); -@Namespace("at") public static native @ByVal Tensor upsample_bicubic2d_backward_symint(@Const @ByRef Tensor grad_output, @ByVal SymIntRef output_size, @ByVal SymIntRef input_size, @Cast("bool") boolean align_corners); +// aten::xlogy.OutScalar_Self(Scalar self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor xlogy_out(@ByRef Tensor out, @Const @ByRef Scalar self, @Const @ByRef Tensor other); +// aten::xlogy.OutScalar_Self(Scalar self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor xlogy_outf(@Const @ByRef Scalar self, @Const @ByRef Tensor other, @ByRef Tensor out); +// aten::xlogy.OutScalar_Other(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor xlogy_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Scalar other); +// aten::xlogy.OutScalar_Other(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor xlogy_outf(@Const @ByRef Tensor self, @Const @ByRef Scalar other, @ByRef Tensor out); -// Parsed from ATen/ops/upsample_bilinear2d.h +// Parsed from ATen/ops/xor.h // #pragma once @@ -69119,55 +54954,19 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include - - -// aten::upsample_bilinear2d.vec(Tensor input, SymInt[]? output_size, bool align_corners, float[]? scale_factors) -> Tensor -@Namespace("at") public static native @ByVal Tensor upsample_bilinear2d(@Const @ByRef Tensor input, @ByVal LongArrayRefOptional output_size, @Cast("bool") boolean align_corners, @ByVal DoubleArrayRefOptional scale_factors); -@Namespace("at") public static native @ByVal Tensor upsample_bilinear2d(@Const @ByRef Tensor input, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] output_size, @Cast("bool") boolean align_corners, @ByVal DoubleArrayRefOptional scale_factors); - - -// aten::upsample_bilinear2d.vec(Tensor input, SymInt[]? output_size, bool align_corners, float[]? scale_factors) -> Tensor -@Namespace("at") public static native @ByVal Tensor upsample_bilinear2d_symint(@Const @ByRef Tensor input, @ByVal SymIntArrayRefOptional output_size, @Cast("bool") boolean align_corners, @ByVal DoubleArrayRefOptional scale_factors); - - -// aten::upsample_bilinear2d.out(Tensor self, SymInt[2] output_size, bool align_corners, float? scales_h=None, float? scales_w=None, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor upsample_bilinear2d_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal @Cast("c10::ArrayRef*") LongArrayRef output_size, @Cast("bool") boolean align_corners, @ByVal(nullValue = "c10::optional(c10::nullopt)") DoubleOptional scales_h, @ByVal(nullValue = "c10::optional(c10::nullopt)") DoubleOptional scales_w); -@Namespace("at") public static native @ByRef Tensor upsample_bilinear2d_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal @Cast("c10::ArrayRef*") LongArrayRef output_size, @Cast("bool") boolean align_corners); -@Namespace("at") public static native @ByRef Tensor upsample_bilinear2d_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] output_size, @Cast("bool") boolean align_corners, @ByVal(nullValue = "c10::optional(c10::nullopt)") DoubleOptional scales_h, @ByVal(nullValue = "c10::optional(c10::nullopt)") DoubleOptional scales_w); -@Namespace("at") public static native @ByRef Tensor upsample_bilinear2d_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] output_size, @Cast("bool") boolean align_corners); - - -// aten::upsample_bilinear2d.out(Tensor self, SymInt[2] output_size, bool align_corners, float? scales_h=None, float? scales_w=None, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor upsample_bilinear2d_outf(@Const @ByRef Tensor self, @ByVal @Cast("c10::ArrayRef*") LongArrayRef output_size, @Cast("bool") boolean align_corners, @ByVal DoubleOptional scales_h, @ByVal DoubleOptional scales_w, @ByRef Tensor out); -@Namespace("at") public static native @ByRef Tensor upsample_bilinear2d_outf(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] output_size, @Cast("bool") boolean align_corners, @ByVal DoubleOptional scales_h, @ByVal DoubleOptional scales_w, @ByRef Tensor out); - - -// aten::upsample_bilinear2d.out(Tensor self, SymInt[2] output_size, bool align_corners, float? scales_h=None, float? scales_w=None, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor upsample_bilinear2d_symint_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal SymIntRef output_size, @Cast("bool") boolean align_corners, @ByVal(nullValue = "c10::optional(c10::nullopt)") DoubleOptional scales_h, @ByVal(nullValue = "c10::optional(c10::nullopt)") DoubleOptional scales_w); -@Namespace("at") public static native @ByRef Tensor upsample_bilinear2d_symint_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal SymIntRef output_size, @Cast("bool") boolean align_corners); - - -// aten::upsample_bilinear2d.out(Tensor self, SymInt[2] output_size, bool align_corners, float? scales_h=None, float? scales_w=None, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor upsample_bilinear2d_symint_outf(@Const @ByRef Tensor self, @ByVal SymIntRef output_size, @Cast("bool") boolean align_corners, @ByVal DoubleOptional scales_h, @ByVal DoubleOptional scales_w, @ByRef Tensor out); - - -// aten::upsample_bilinear2d(Tensor self, SymInt[2] output_size, bool align_corners, float? scales_h=None, float? scales_w=None) -> Tensor -@Namespace("at") public static native @ByVal Tensor upsample_bilinear2d(@Const @ByRef Tensor self, @ByVal @Cast("c10::ArrayRef*") LongArrayRef output_size, @Cast("bool") boolean align_corners, @ByVal(nullValue = "c10::optional(c10::nullopt)") DoubleOptional scales_h, @ByVal(nullValue = "c10::optional(c10::nullopt)") DoubleOptional scales_w); -@Namespace("at") public static native @ByVal Tensor upsample_bilinear2d(@Const @ByRef Tensor self, @ByVal @Cast("c10::ArrayRef*") LongArrayRef output_size, @Cast("bool") boolean align_corners); -@Namespace("at") public static native @ByVal Tensor upsample_bilinear2d(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] output_size, @Cast("bool") boolean align_corners, @ByVal(nullValue = "c10::optional(c10::nullopt)") DoubleOptional scales_h, @ByVal(nullValue = "c10::optional(c10::nullopt)") DoubleOptional scales_w); -@Namespace("at") public static native @ByVal Tensor upsample_bilinear2d(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] output_size, @Cast("bool") boolean align_corners); +// #include -// aten::upsample_bilinear2d(Tensor self, SymInt[2] output_size, bool align_corners, float? scales_h=None, float? scales_w=None) -> Tensor -@Namespace("at") public static native @ByVal Tensor upsample_bilinear2d_symint(@Const @ByRef Tensor self, @ByVal SymIntRef output_size, @Cast("bool") boolean align_corners, @ByVal(nullValue = "c10::optional(c10::nullopt)") DoubleOptional scales_h, @ByVal(nullValue = "c10::optional(c10::nullopt)") DoubleOptional scales_w); -@Namespace("at") public static native @ByVal Tensor upsample_bilinear2d_symint(@Const @ByRef Tensor self, @ByVal SymIntRef output_size, @Cast("bool") boolean align_corners); +// aten::__xor__.Scalar(Tensor self, Scalar other) -> Tensor +@Namespace("at") public static native @ByVal Tensor __xor__(@Const @ByRef Tensor self, @Const @ByRef Scalar other); +// aten::__xor__.Tensor(Tensor self, Tensor other) -> Tensor +@Namespace("at") public static native @ByVal Tensor __xor__(@Const @ByRef Tensor self, @Const @ByRef Tensor other); -// Parsed from ATen/ops/upsample_bilinear2d_backward.h +// Parsed from ATen/ops/zero.h // #pragma once @@ -69188,46 +54987,24 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include - - -// aten::upsample_bilinear2d_backward.grad_input(Tensor grad_output, SymInt[2] output_size, SymInt[4] input_size, bool align_corners, float? scales_h=None, float? scales_w=None, *, Tensor(a!) grad_input) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor upsample_bilinear2d_backward_out(@ByRef Tensor grad_input, @Const @ByRef Tensor grad_output, @ByVal @Cast("c10::ArrayRef*") LongArrayRef output_size, @ByVal @Cast("c10::ArrayRef*") LongArrayRef input_size, @Cast("bool") boolean align_corners, @ByVal(nullValue = "c10::optional(c10::nullopt)") DoubleOptional scales_h, @ByVal(nullValue = "c10::optional(c10::nullopt)") DoubleOptional scales_w); -@Namespace("at") public static native @ByRef Tensor upsample_bilinear2d_backward_out(@ByRef Tensor grad_input, @Const @ByRef Tensor grad_output, @ByVal @Cast("c10::ArrayRef*") LongArrayRef output_size, @ByVal @Cast("c10::ArrayRef*") LongArrayRef input_size, @Cast("bool") boolean align_corners); -@Namespace("at") public static native @ByRef Tensor upsample_bilinear2d_backward_out(@ByRef Tensor grad_input, @Const @ByRef Tensor grad_output, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] output_size, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] input_size, @Cast("bool") boolean align_corners, @ByVal(nullValue = "c10::optional(c10::nullopt)") DoubleOptional scales_h, @ByVal(nullValue = "c10::optional(c10::nullopt)") DoubleOptional scales_w); -@Namespace("at") public static native @ByRef Tensor upsample_bilinear2d_backward_out(@ByRef Tensor grad_input, @Const @ByRef Tensor grad_output, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] output_size, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] input_size, @Cast("bool") boolean align_corners); - - -// aten::upsample_bilinear2d_backward.grad_input(Tensor grad_output, SymInt[2] output_size, SymInt[4] input_size, bool align_corners, float? scales_h=None, float? scales_w=None, *, Tensor(a!) grad_input) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor upsample_bilinear2d_backward_outf(@Const @ByRef Tensor grad_output, @ByVal @Cast("c10::ArrayRef*") LongArrayRef output_size, @ByVal @Cast("c10::ArrayRef*") LongArrayRef input_size, @Cast("bool") boolean align_corners, @ByVal DoubleOptional scales_h, @ByVal DoubleOptional scales_w, @ByRef Tensor grad_input); -@Namespace("at") public static native @ByRef Tensor upsample_bilinear2d_backward_outf(@Const @ByRef Tensor grad_output, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] output_size, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] input_size, @Cast("bool") boolean align_corners, @ByVal DoubleOptional scales_h, @ByVal DoubleOptional scales_w, @ByRef Tensor grad_input); - - -// aten::upsample_bilinear2d_backward.grad_input(Tensor grad_output, SymInt[2] output_size, SymInt[4] input_size, bool align_corners, float? scales_h=None, float? scales_w=None, *, Tensor(a!) grad_input) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor upsample_bilinear2d_backward_symint_out(@ByRef Tensor grad_input, @Const @ByRef Tensor grad_output, @ByVal SymIntRef output_size, @ByVal SymIntRef input_size, @Cast("bool") boolean align_corners, @ByVal(nullValue = "c10::optional(c10::nullopt)") DoubleOptional scales_h, @ByVal(nullValue = "c10::optional(c10::nullopt)") DoubleOptional scales_w); -@Namespace("at") public static native @ByRef Tensor upsample_bilinear2d_backward_symint_out(@ByRef Tensor grad_input, @Const @ByRef Tensor grad_output, @ByVal SymIntRef output_size, @ByVal SymIntRef input_size, @Cast("bool") boolean align_corners); - - -// aten::upsample_bilinear2d_backward.grad_input(Tensor grad_output, SymInt[2] output_size, SymInt[4] input_size, bool align_corners, float? scales_h=None, float? scales_w=None, *, Tensor(a!) grad_input) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor upsample_bilinear2d_backward_symint_outf(@Const @ByRef Tensor grad_output, @ByVal SymIntRef output_size, @ByVal SymIntRef input_size, @Cast("bool") boolean align_corners, @ByVal DoubleOptional scales_h, @ByVal DoubleOptional scales_w, @ByRef Tensor grad_input); - +// #include -// aten::upsample_bilinear2d_backward(Tensor grad_output, SymInt[2] output_size, SymInt[4] input_size, bool align_corners, float? scales_h=None, float? scales_w=None) -> Tensor -@Namespace("at") public static native @ByVal Tensor upsample_bilinear2d_backward(@Const @ByRef Tensor grad_output, @ByVal @Cast("c10::ArrayRef*") LongArrayRef output_size, @ByVal @Cast("c10::ArrayRef*") LongArrayRef input_size, @Cast("bool") boolean align_corners, @ByVal(nullValue = "c10::optional(c10::nullopt)") DoubleOptional scales_h, @ByVal(nullValue = "c10::optional(c10::nullopt)") DoubleOptional scales_w); -@Namespace("at") public static native @ByVal Tensor upsample_bilinear2d_backward(@Const @ByRef Tensor grad_output, @ByVal @Cast("c10::ArrayRef*") LongArrayRef output_size, @ByVal @Cast("c10::ArrayRef*") LongArrayRef input_size, @Cast("bool") boolean align_corners); -@Namespace("at") public static native @ByVal Tensor upsample_bilinear2d_backward(@Const @ByRef Tensor grad_output, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] output_size, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] input_size, @Cast("bool") boolean align_corners, @ByVal(nullValue = "c10::optional(c10::nullopt)") DoubleOptional scales_h, @ByVal(nullValue = "c10::optional(c10::nullopt)") DoubleOptional scales_w); -@Namespace("at") public static native @ByVal Tensor upsample_bilinear2d_backward(@Const @ByRef Tensor grad_output, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] output_size, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] input_size, @Cast("bool") boolean align_corners); +// aten::zero_(Tensor(a!) self) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor zero_(@ByRef Tensor self); -// aten::upsample_bilinear2d_backward(Tensor grad_output, SymInt[2] output_size, SymInt[4] input_size, bool align_corners, float? scales_h=None, float? scales_w=None) -> Tensor -@Namespace("at") public static native @ByVal Tensor upsample_bilinear2d_backward_symint(@Const @ByRef Tensor grad_output, @ByVal SymIntRef output_size, @ByVal SymIntRef input_size, @Cast("bool") boolean align_corners, @ByVal(nullValue = "c10::optional(c10::nullopt)") DoubleOptional scales_h, @ByVal(nullValue = "c10::optional(c10::nullopt)") DoubleOptional scales_w); -@Namespace("at") public static native @ByVal Tensor upsample_bilinear2d_backward_symint(@Const @ByRef Tensor grad_output, @ByVal SymIntRef output_size, @ByVal SymIntRef input_size, @Cast("bool") boolean align_corners); +// aten::zero.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor zero_out(@ByRef Tensor out, @Const @ByRef Tensor self); +// aten::zero.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor zero_outf(@Const @ByRef Tensor self, @ByRef Tensor out); +// aten::zero(Tensor self) -> Tensor +@Namespace("at") public static native @ByVal @Name("zero") Tensor _zero(@Const @ByRef Tensor self); -// Parsed from ATen/ops/upsample_linear1d.h +// Parsed from ATen/ops/zeros.h // #pragma once @@ -69248,55 +55025,68 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include +// #include -// aten::upsample_linear1d.vec(Tensor input, SymInt[]? output_size, bool align_corners, float[]? scale_factors) -> Tensor -@Namespace("at") public static native @ByVal Tensor upsample_linear1d(@Const @ByRef Tensor input, @ByVal LongArrayRefOptional output_size, @Cast("bool") boolean align_corners, @ByVal DoubleArrayRefOptional scale_factors); -@Namespace("at") public static native @ByVal Tensor upsample_linear1d(@Const @ByRef Tensor input, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] output_size, @Cast("bool") boolean align_corners, @ByVal DoubleArrayRefOptional scale_factors); +// aten::zeros.names(int[] size, *, Dimname[]? names, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor +@Namespace("at") public static native @ByVal Tensor zeros(@ByVal LongArrayRef size, @ByVal DimnameListOptional names, @ByVal(nullValue = "at::TensorOptions{}") TensorOptions options); +@Namespace("at") public static native @ByVal Tensor zeros(@ByVal LongArrayRef size, @ByVal DimnameListOptional names); +@Namespace("at") public static native @ByVal Tensor zeros(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @ByVal DimnameListOptional names, @ByVal(nullValue = "at::TensorOptions{}") TensorOptions options); +@Namespace("at") public static native @ByVal Tensor zeros(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @ByVal DimnameListOptional names); +// aten::zeros.names(int[] size, *, Dimname[]? names, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor +@Namespace("at") public static native @ByVal Tensor zeros(@ByVal LongArrayRef size, @ByVal DimnameListOptional names, @ByVal ScalarTypeOptional dtype, @ByVal LayoutOptional layout, @ByVal DeviceOptional device, @ByVal BoolOptional pin_memory); +@Namespace("at") public static native @ByVal Tensor zeros(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @ByVal DimnameListOptional names, @ByVal ScalarTypeOptional dtype, @ByVal LayoutOptional layout, @ByVal DeviceOptional device, @ByVal BoolOptional pin_memory); +// aten::zeros(SymInt[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor +@Namespace("at") public static native @ByVal Tensor zeros(@ByVal LongArrayRef size, @ByVal(nullValue = "at::TensorOptions{}") TensorOptions options); +@Namespace("at") public static native @ByVal Tensor zeros(@ByVal LongArrayRef size); +@Namespace("at") public static native @ByVal Tensor zeros(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @ByVal(nullValue = "at::TensorOptions{}") TensorOptions options); +@Namespace("at") public static native @ByVal Tensor zeros(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... size); -// aten::upsample_linear1d.vec(Tensor input, SymInt[]? output_size, bool align_corners, float[]? scale_factors) -> Tensor -@Namespace("at") public static native @ByVal Tensor upsample_linear1d_symint(@Const @ByRef Tensor input, @ByVal SymIntArrayRefOptional output_size, @Cast("bool") boolean align_corners, @ByVal DoubleArrayRefOptional scale_factors); +// aten::zeros(SymInt[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor +@Namespace("at") public static native @ByVal Tensor zeros(@ByVal LongArrayRef size, @ByVal ScalarTypeOptional dtype, @ByVal LayoutOptional layout, @ByVal DeviceOptional device, @ByVal BoolOptional pin_memory); +@Namespace("at") public static native @ByVal Tensor zeros(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @ByVal ScalarTypeOptional dtype, @ByVal LayoutOptional layout, @ByVal DeviceOptional device, @ByVal BoolOptional pin_memory); -// aten::upsample_linear1d.out(Tensor self, SymInt[1] output_size, bool align_corners, float? scales=None, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor upsample_linear1d_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal @Cast("c10::ArrayRef*") LongArrayRef output_size, @Cast("bool") boolean align_corners, @ByVal(nullValue = "c10::optional(c10::nullopt)") DoubleOptional scales); -@Namespace("at") public static native @ByRef Tensor upsample_linear1d_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal @Cast("c10::ArrayRef*") LongArrayRef output_size, @Cast("bool") boolean align_corners); -@Namespace("at") public static native @ByRef Tensor upsample_linear1d_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] output_size, @Cast("bool") boolean align_corners, @ByVal(nullValue = "c10::optional(c10::nullopt)") DoubleOptional scales); -@Namespace("at") public static native @ByRef Tensor upsample_linear1d_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] output_size, @Cast("bool") boolean align_corners); +// aten::zeros(SymInt[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor +@Namespace("at") public static native @ByVal Tensor zeros_symint(@ByVal SymIntArrayRef size, @ByVal(nullValue = "at::TensorOptions{}") TensorOptions options); +@Namespace("at") public static native @ByVal Tensor zeros_symint(@ByVal SymIntArrayRef size); -// aten::upsample_linear1d.out(Tensor self, SymInt[1] output_size, bool align_corners, float? scales=None, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor upsample_linear1d_outf(@Const @ByRef Tensor self, @ByVal @Cast("c10::ArrayRef*") LongArrayRef output_size, @Cast("bool") boolean align_corners, @ByVal DoubleOptional scales, @ByRef Tensor out); -@Namespace("at") public static native @ByRef Tensor upsample_linear1d_outf(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] output_size, @Cast("bool") boolean align_corners, @ByVal DoubleOptional scales, @ByRef Tensor out); + +// aten::zeros(SymInt[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor +@Namespace("at") public static native @ByVal Tensor zeros_symint(@ByVal SymIntArrayRef size, @ByVal ScalarTypeOptional dtype, @ByVal LayoutOptional layout, @ByVal DeviceOptional device, @ByVal BoolOptional pin_memory); -// aten::upsample_linear1d.out(Tensor self, SymInt[1] output_size, bool align_corners, float? scales=None, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor upsample_linear1d_symint_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal SymIntRef output_size, @Cast("bool") boolean align_corners, @ByVal(nullValue = "c10::optional(c10::nullopt)") DoubleOptional scales); -@Namespace("at") public static native @ByRef Tensor upsample_linear1d_symint_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal SymIntRef output_size, @Cast("bool") boolean align_corners); +// aten::zeros.out(SymInt[] size, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor zeros_out(@ByRef Tensor out, @ByVal LongArrayRef size); +@Namespace("at") public static native @ByRef Tensor zeros_out(@ByRef Tensor out, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... size); -// aten::upsample_linear1d.out(Tensor self, SymInt[1] output_size, bool align_corners, float? scales=None, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor upsample_linear1d_symint_outf(@Const @ByRef Tensor self, @ByVal SymIntRef output_size, @Cast("bool") boolean align_corners, @ByVal DoubleOptional scales, @ByRef Tensor out); +// aten::zeros.out(SymInt[] size, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor zeros_outf(@ByVal LongArrayRef size, @ByRef Tensor out); +@Namespace("at") public static native @ByRef Tensor zeros_outf(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @ByRef Tensor out); -// aten::upsample_linear1d(Tensor self, SymInt[1] output_size, bool align_corners, float? scales=None) -> Tensor -@Namespace("at") public static native @ByVal Tensor upsample_linear1d(@Const @ByRef Tensor self, @ByVal @Cast("c10::ArrayRef*") LongArrayRef output_size, @Cast("bool") boolean align_corners, @ByVal(nullValue = "c10::optional(c10::nullopt)") DoubleOptional scales); -@Namespace("at") public static native @ByVal Tensor upsample_linear1d(@Const @ByRef Tensor self, @ByVal @Cast("c10::ArrayRef*") LongArrayRef output_size, @Cast("bool") boolean align_corners); -@Namespace("at") public static native @ByVal Tensor upsample_linear1d(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] output_size, @Cast("bool") boolean align_corners, @ByVal(nullValue = "c10::optional(c10::nullopt)") DoubleOptional scales); -@Namespace("at") public static native @ByVal Tensor upsample_linear1d(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] output_size, @Cast("bool") boolean align_corners); +// aten::zeros.out(SymInt[] size, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor zeros_symint_out(@ByRef Tensor out, @ByVal SymIntArrayRef size); -// aten::upsample_linear1d(Tensor self, SymInt[1] output_size, bool align_corners, float? scales=None) -> Tensor -@Namespace("at") public static native @ByVal Tensor upsample_linear1d_symint(@Const @ByRef Tensor self, @ByVal SymIntRef output_size, @Cast("bool") boolean align_corners, @ByVal(nullValue = "c10::optional(c10::nullopt)") DoubleOptional scales); -@Namespace("at") public static native @ByVal Tensor upsample_linear1d_symint(@Const @ByRef Tensor self, @ByVal SymIntRef output_size, @Cast("bool") boolean align_corners); +// aten::zeros.out(SymInt[] size, *, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor zeros_symint_outf(@ByVal SymIntArrayRef size, @ByRef Tensor out); + +// aten::zeros.names_out(int[] size, *, Dimname[]? names, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor zeros_out(@ByRef Tensor out, @ByVal LongArrayRef size, @ByVal DimnameListOptional names); +@Namespace("at") public static native @ByRef Tensor zeros_out(@ByRef Tensor out, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @ByVal DimnameListOptional names); +// aten::zeros.names_out(int[] size, *, Dimname[]? names, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor zeros_outf(@ByVal LongArrayRef size, @ByVal DimnameListOptional names, @ByRef Tensor out); +@Namespace("at") public static native @ByRef Tensor zeros_outf(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @ByVal DimnameListOptional names, @ByRef Tensor out); -// Parsed from ATen/ops/upsample_linear1d_backward.h +// Parsed from ATen/ops/zeros_like.h // #pragma once @@ -69317,50 +55107,83 @@ scalar_t sf(scalar_t x, scalar_t y) -// #include - - -// aten::upsample_linear1d_backward.grad_input(Tensor grad_output, SymInt[1] output_size, SymInt[3] input_size, bool align_corners, float? scales=None, *, Tensor(a!) grad_input) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor upsample_linear1d_backward_out(@ByRef Tensor grad_input, @Const @ByRef Tensor grad_output, @ByVal @Cast("c10::ArrayRef*") LongArrayRef output_size, @ByVal @Cast("c10::ArrayRef*") LongArrayRef input_size, @Cast("bool") boolean align_corners, @ByVal(nullValue = "c10::optional(c10::nullopt)") DoubleOptional scales); -@Namespace("at") public static native @ByRef Tensor upsample_linear1d_backward_out(@ByRef Tensor grad_input, @Const @ByRef Tensor grad_output, @ByVal @Cast("c10::ArrayRef*") LongArrayRef output_size, @ByVal @Cast("c10::ArrayRef*") LongArrayRef input_size, @Cast("bool") boolean align_corners); -@Namespace("at") public static native @ByRef Tensor upsample_linear1d_backward_out(@ByRef Tensor grad_input, @Const @ByRef Tensor grad_output, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] output_size, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] input_size, @Cast("bool") boolean align_corners, @ByVal(nullValue = "c10::optional(c10::nullopt)") DoubleOptional scales); -@Namespace("at") public static native @ByRef Tensor upsample_linear1d_backward_out(@ByRef Tensor grad_input, @Const @ByRef Tensor grad_output, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] output_size, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] input_size, @Cast("bool") boolean align_corners); - - -// aten::upsample_linear1d_backward.grad_input(Tensor grad_output, SymInt[1] output_size, SymInt[3] input_size, bool align_corners, float? scales=None, *, Tensor(a!) grad_input) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor upsample_linear1d_backward_outf(@Const @ByRef Tensor grad_output, @ByVal @Cast("c10::ArrayRef*") LongArrayRef output_size, @ByVal @Cast("c10::ArrayRef*") LongArrayRef input_size, @Cast("bool") boolean align_corners, @ByVal DoubleOptional scales, @ByRef Tensor grad_input); -@Namespace("at") public static native @ByRef Tensor upsample_linear1d_backward_outf(@Const @ByRef Tensor grad_output, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] output_size, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] input_size, @Cast("bool") boolean align_corners, @ByVal DoubleOptional scales, @ByRef Tensor grad_input); - - -// aten::upsample_linear1d_backward.grad_input(Tensor grad_output, SymInt[1] output_size, SymInt[3] input_size, bool align_corners, float? scales=None, *, Tensor(a!) grad_input) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor upsample_linear1d_backward_symint_out(@ByRef Tensor grad_input, @Const @ByRef Tensor grad_output, @ByVal SymIntRef output_size, @ByVal SymIntRef input_size, @Cast("bool") boolean align_corners, @ByVal(nullValue = "c10::optional(c10::nullopt)") DoubleOptional scales); -@Namespace("at") public static native @ByRef Tensor upsample_linear1d_backward_symint_out(@ByRef Tensor grad_input, @Const @ByRef Tensor grad_output, @ByVal SymIntRef output_size, @ByVal SymIntRef input_size, @Cast("bool") boolean align_corners); - +// #include -// aten::upsample_linear1d_backward.grad_input(Tensor grad_output, SymInt[1] output_size, SymInt[3] input_size, bool align_corners, float? scales=None, *, Tensor(a!) grad_input) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor upsample_linear1d_backward_symint_outf(@Const @ByRef Tensor grad_output, @ByVal SymIntRef output_size, @ByVal SymIntRef input_size, @Cast("bool") boolean align_corners, @ByVal DoubleOptional scales, @ByRef Tensor grad_input); +// aten::zeros_like(Tensor self, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, MemoryFormat? memory_format=None) -> Tensor +@Namespace("at") public static native @ByVal Tensor zeros_like(@Const @ByRef Tensor self, @ByVal(nullValue = "at::TensorOptions{}") TensorOptions options, @ByVal(nullValue = "c10::optional(c10::nullopt)") MemoryFormatOptional memory_format); +@Namespace("at") public static native @ByVal Tensor zeros_like(@Const @ByRef Tensor self); +// aten::zeros_like(Tensor self, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, MemoryFormat? memory_format=None) -> Tensor +@Namespace("at") public static native @ByVal Tensor zeros_like(@Const @ByRef Tensor self, @ByVal ScalarTypeOptional dtype, @ByVal LayoutOptional layout, @ByVal DeviceOptional device, @ByVal BoolOptional pin_memory, @ByVal MemoryFormatOptional memory_format); -// aten::upsample_linear1d_backward(Tensor grad_output, SymInt[1] output_size, SymInt[3] input_size, bool align_corners, float? scales=None) -> Tensor -@Namespace("at") public static native @ByVal Tensor upsample_linear1d_backward(@Const @ByRef Tensor grad_output, @ByVal @Cast("c10::ArrayRef*") LongArrayRef output_size, @ByVal @Cast("c10::ArrayRef*") LongArrayRef input_size, @Cast("bool") boolean align_corners, @ByVal(nullValue = "c10::optional(c10::nullopt)") DoubleOptional scales); -@Namespace("at") public static native @ByVal Tensor upsample_linear1d_backward(@Const @ByRef Tensor grad_output, @ByVal @Cast("c10::ArrayRef*") LongArrayRef output_size, @ByVal @Cast("c10::ArrayRef*") LongArrayRef input_size, @Cast("bool") boolean align_corners); -@Namespace("at") public static native @ByVal Tensor upsample_linear1d_backward(@Const @ByRef Tensor grad_output, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] output_size, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] input_size, @Cast("bool") boolean align_corners, @ByVal(nullValue = "c10::optional(c10::nullopt)") DoubleOptional scales); -@Namespace("at") public static native @ByVal Tensor upsample_linear1d_backward(@Const @ByRef Tensor grad_output, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] output_size, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] input_size, @Cast("bool") boolean align_corners); +// aten::zeros_like.out(Tensor self, *, MemoryFormat? memory_format=None, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor zeros_like_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal(nullValue = "c10::optional(c10::nullopt)") MemoryFormatOptional memory_format); +@Namespace("at") public static native @ByRef Tensor zeros_like_out(@ByRef Tensor out, @Const @ByRef Tensor self); +// aten::zeros_like.out(Tensor self, *, MemoryFormat? memory_format=None, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor zeros_like_outf(@Const @ByRef Tensor self, @ByVal MemoryFormatOptional memory_format, @ByRef Tensor out); -// aten::upsample_linear1d_backward(Tensor grad_output, SymInt[1] output_size, SymInt[3] input_size, bool align_corners, float? scales=None) -> Tensor -@Namespace("at") public static native @ByVal Tensor upsample_linear1d_backward_symint(@Const @ByRef Tensor grad_output, @ByVal SymIntRef output_size, @ByVal SymIntRef input_size, @Cast("bool") boolean align_corners, @ByVal(nullValue = "c10::optional(c10::nullopt)") DoubleOptional scales); -@Namespace("at") public static native @ByVal Tensor upsample_linear1d_backward_symint(@Const @ByRef Tensor grad_output, @ByVal SymIntRef output_size, @ByVal SymIntRef input_size, @Cast("bool") boolean align_corners); +// Parsed from ATen/Functions.h +// #pragma once +// @generated by torchgen/gen.py from Functions.h -// Parsed from ATen/ops/upsample_nearest1d.h +// #ifdef TORCH_ASSERT_NO_OPERATORS +// #error This change adds a dependency on native_functions.yaml, +// meaning the file will need to be re-compiled every time an operator +// is changed or added. Consider if your change would be better placed in +// another file, or if a more specific header might achieve the same goal. +// See NOTE: [Tensor vs. TensorBase] +// #endif -// #pragma once +// #if defined(AT_PER_OPERATOR_HEADERS) && defined(TORCH_ASSERT_ONLY_METHOD_OPERATORS) +// #error This change adds a dependency on all pytorch operators, meaning the +// file will need to be re-compiled every time an operator is changed or added. +// Consider including a specific operator from and +// see NOTE [TORCH_ASSERT_ONLY_METHOD_OPERATORS]. +// #endif -// @generated by torchgen/gen.py from Function.h +// NOTE: [TORCH_ASSERT_ONLY_METHOD_OPERATORS] +// +// In ATen, certain generated headers files include the definitions of +// every single operator in PyTorch. Unfortunately this means every +// time an operator signature is updated or changed in +// native_functions.yaml, you (and every other PyTorch developer) need +// to recompile every source file that includes any of these headers. +// +// To break up these header dependencies, and improve incremental +// build times for all PyTorch developers. These headers are split +// into per-operator headers in the `ATen/ops` folder. This limits +// incremental builds to only changes to methods of `Tensor`, or files +// that use the specific operator being changed. With `at::sum` as an +// example, you should include +// +// // instead of ATen/Functions.h +// // instead of ATen/NativeFunctions.h +// // instead of ATen/Operators.h +// // instead of ATen/CPUFunctions.h +// +// However, even if you're careful to use this in your own code. +// `Functions.h` might be included indirectly through another header +// without you realising. To avoid this, you can add +// +// #define TORCH_ASSERT_ONLY_METHOD_OPERATORS +// +// to the top of your source file. This way any time the non-specific +// headers are included, the compiler will error out. +// +// Also, be aware that `ops` are not available in all build +// configurations (namely fb-internal) so you must guard these +// includes with `#ifdef AT_PER_OPERATOR_HEADERS`. e.g. +// +// #ifndef AT_PER_OPERATOR_HEADERS +// #include +// #else +// #include +// #endif // #include // #include @@ -69368,6701 +55191,9648 @@ scalar_t sf(scalar_t x, scalar_t y) // #include // #include // #include +// #include // #include // #include // #include // #include // #include // #include +// #include +// #include +// #include - -// #include - - -// aten::upsample_nearest1d.vec(Tensor input, SymInt[]? output_size, float[]? scale_factors) -> Tensor -@Namespace("at") public static native @ByVal Tensor upsample_nearest1d(@Const @ByRef Tensor input, @ByVal LongArrayRefOptional output_size, @ByVal DoubleArrayRefOptional scale_factors); -@Namespace("at") public static native @ByVal Tensor upsample_nearest1d(@Const @ByRef Tensor input, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] output_size, @ByVal DoubleArrayRefOptional scale_factors); +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include -// aten::upsample_nearest1d.vec(Tensor input, SymInt[]? output_size, float[]? scale_factors) -> Tensor -@Namespace("at") public static native @ByVal Tensor upsample_nearest1d_symint(@Const @ByRef Tensor input, @ByVal SymIntArrayRefOptional output_size, @ByVal DoubleArrayRefOptional scale_factors); +// Special C++ only overloads for std()-like functions (See gh-40287) +// These are needed because int -> bool conversion takes precedence over int -> IntArrayRef +// So, for example std(0) would select the std(unbiased=False) overload +@Namespace("at") public static native @ByVal Tensor var(@Const @ByRef Tensor self, int dim); +@Namespace("at") public static native @ByVal T_TensorTensor_T var_mean(@Const @ByRef Tensor self, int dim); +@Namespace("at") public static native @ByVal Tensor std(@Const @ByRef Tensor self, int dim); +@Namespace("at") public static native @ByVal T_TensorTensor_T std_mean(@Const @ByRef Tensor self, int dim); -// aten::upsample_nearest1d.out(Tensor self, SymInt[1] output_size, float? scales=None, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor upsample_nearest1d_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal @Cast("c10::ArrayRef*") LongArrayRef output_size, @ByVal(nullValue = "c10::optional(c10::nullopt)") DoubleOptional scales); -@Namespace("at") public static native @ByRef Tensor upsample_nearest1d_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal @Cast("c10::ArrayRef*") LongArrayRef output_size); -@Namespace("at") public static native @ByRef Tensor upsample_nearest1d_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] output_size, @ByVal(nullValue = "c10::optional(c10::nullopt)") DoubleOptional scales); -@Namespace("at") public static native @ByRef Tensor upsample_nearest1d_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... output_size); +@Namespace("at") public static native @Cast("int64_t") long numel(@Const @ByRef Tensor tensor); +@Namespace("at") public static native @Cast("int64_t") long size(@Const @ByRef Tensor tensor, @Cast("int64_t") long dim); -// aten::upsample_nearest1d.out(Tensor self, SymInt[1] output_size, float? scales=None, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor upsample_nearest1d_outf(@Const @ByRef Tensor self, @ByVal @Cast("c10::ArrayRef*") LongArrayRef output_size, @ByVal DoubleOptional scales, @ByRef Tensor out); -@Namespace("at") public static native @ByRef Tensor upsample_nearest1d_outf(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] output_size, @ByVal DoubleOptional scales, @ByRef Tensor out); +@Namespace("at") public static native @Cast("int64_t") long stride(@Const @ByRef Tensor tensor, @Cast("int64_t") long dim); -// aten::upsample_nearest1d.out(Tensor self, SymInt[1] output_size, float? scales=None, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor upsample_nearest1d_symint_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal SymIntRef output_size, @ByVal(nullValue = "c10::optional(c10::nullopt)") DoubleOptional scales); -@Namespace("at") public static native @ByRef Tensor upsample_nearest1d_symint_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal SymIntRef output_size); +@Namespace("at") public static native @Cast("bool") boolean is_floating_point(@Const @ByRef Tensor tensor); -// aten::upsample_nearest1d.out(Tensor self, SymInt[1] output_size, float? scales=None, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor upsample_nearest1d_symint_outf(@Const @ByRef Tensor self, @ByVal SymIntRef output_size, @ByVal DoubleOptional scales, @ByRef Tensor out); +@Namespace("at") public static native @Cast("bool") boolean is_signed(@Const @ByRef Tensor tensor); +@Namespace("at") public static native @Cast("bool") boolean is_inference(@Const @ByRef Tensor tensor); -// aten::upsample_nearest1d(Tensor self, SymInt[1] output_size, float? scales=None) -> Tensor -@Namespace("at") public static native @ByVal Tensor upsample_nearest1d(@Const @ByRef Tensor self, @ByVal @Cast("c10::ArrayRef*") LongArrayRef output_size, @ByVal(nullValue = "c10::optional(c10::nullopt)") DoubleOptional scales); -@Namespace("at") public static native @ByVal Tensor upsample_nearest1d(@Const @ByRef Tensor self, @ByVal @Cast("c10::ArrayRef*") LongArrayRef output_size); -@Namespace("at") public static native @ByVal Tensor upsample_nearest1d(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] output_size, @ByVal(nullValue = "c10::optional(c10::nullopt)") DoubleOptional scales); -@Namespace("at") public static native @ByVal Tensor upsample_nearest1d(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... output_size); +@Namespace("at") public static native @Cast("bool") boolean _is_zerotensor(@Const @ByRef Tensor tensor); +@Namespace("at") public static native @Cast("bool") boolean is_conj(@Const @ByRef Tensor tensor); -// aten::upsample_nearest1d(Tensor self, SymInt[1] output_size, float? scales=None) -> Tensor -@Namespace("at") public static native @ByVal Tensor upsample_nearest1d_symint(@Const @ByRef Tensor self, @ByVal SymIntRef output_size, @ByVal(nullValue = "c10::optional(c10::nullopt)") DoubleOptional scales); -@Namespace("at") public static native @ByVal Tensor upsample_nearest1d_symint(@Const @ByRef Tensor self, @ByVal SymIntRef output_size); +@Namespace("at") public static native @ByVal Tensor conj(@Const @ByRef Tensor tensor); +@Namespace("at") public static native @Cast("bool") boolean is_neg(@Const @ByRef Tensor tensor); -// Parsed from ATen/ops/upsample_nearest1d_backward.h +// Parsed from ATen/ExpandUtils.h // #pragma once -// @generated by torchgen/gen.py from Function.h - -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include - +// #ifndef AT_PER_OPERATOR_HEADERS +// #include +// #else +// #include +// #include +// #endif +// #include +// #include +// #include +// #include +// #include -// #include +// #include +// #include +// #include +// #include +@Namespace("at") public static native @ByVal @Cast("std::vector*") LongVector infer_size(@ByVal LongArrayRef a, @ByVal LongArrayRef b); +@Namespace("at") public static native @ByVal @Cast("std::vector*") LongVector infer_size(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] a, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... b); +@Namespace("at") public static native @ByVal DimVector infer_size_dimvector(@ByVal LongArrayRef a, @ByVal LongArrayRef b); +@Namespace("at") public static native @ByVal DimVector infer_size_dimvector(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] a, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... b); +@Namespace("at") public static native @ByVal SymDimVector infer_size_symdimvector(@ByVal SymIntArrayRef a, @ByVal SymIntArrayRef b); +// Targeting ../DimVectorInferExpandGeometryResult.java -// aten::upsample_nearest1d_backward.grad_input(Tensor grad_output, SymInt[1] output_size, SymInt[3] input_size, float? scales=None, *, Tensor(a!) grad_input) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor upsample_nearest1d_backward_out(@ByRef Tensor grad_input, @Const @ByRef Tensor grad_output, @ByVal @Cast("c10::ArrayRef*") LongArrayRef output_size, @ByVal @Cast("c10::ArrayRef*") LongArrayRef input_size, @ByVal(nullValue = "c10::optional(c10::nullopt)") DoubleOptional scales); -@Namespace("at") public static native @ByRef Tensor upsample_nearest1d_backward_out(@ByRef Tensor grad_input, @Const @ByRef Tensor grad_output, @ByVal @Cast("c10::ArrayRef*") LongArrayRef output_size, @ByVal @Cast("c10::ArrayRef*") LongArrayRef input_size); -@Namespace("at") public static native @ByRef Tensor upsample_nearest1d_backward_out(@ByRef Tensor grad_input, @Const @ByRef Tensor grad_output, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] output_size, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] input_size, @ByVal(nullValue = "c10::optional(c10::nullopt)") DoubleOptional scales); -@Namespace("at") public static native @ByRef Tensor upsample_nearest1d_backward_out(@ByRef Tensor grad_input, @Const @ByRef Tensor grad_output, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] output_size, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... input_size); -// aten::upsample_nearest1d_backward.grad_input(Tensor grad_output, SymInt[1] output_size, SymInt[3] input_size, float? scales=None, *, Tensor(a!) grad_input) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor upsample_nearest1d_backward_outf(@Const @ByRef Tensor grad_output, @ByVal @Cast("c10::ArrayRef*") LongArrayRef output_size, @ByVal @Cast("c10::ArrayRef*") LongArrayRef input_size, @ByVal DoubleOptional scales, @ByRef Tensor grad_input); -@Namespace("at") public static native @ByRef Tensor upsample_nearest1d_backward_outf(@Const @ByRef Tensor grad_output, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] output_size, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] input_size, @ByVal DoubleOptional scales, @ByRef Tensor grad_input); +@Namespace("at") public static native @ByVal @Cast("std::tuple,std::vector >*") LongVector inferExpandGeometry( + @ByVal LongArrayRef tensor_sizes, + @ByVal LongArrayRef tensor_strides, + @ByVal LongArrayRef sizes); +@Namespace("at") public static native @ByVal @Cast("std::tuple,std::vector >*") LongVector inferExpandGeometry( + @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] tensor_sizes, + @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] tensor_strides, + @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... sizes); +@Namespace("at") public static native @ByVal DimVectorInferExpandGeometryResult inferExpandGeometry_dimvector( + @ByVal LongArrayRef tensor_sizes, + @ByVal LongArrayRef tensor_strides, + @ByVal LongArrayRef sizes); +@Namespace("at") public static native @ByVal DimVectorInferExpandGeometryResult inferExpandGeometry_dimvector( + @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] tensor_sizes, + @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] tensor_strides, + @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... sizes); -// aten::upsample_nearest1d_backward.grad_input(Tensor grad_output, SymInt[1] output_size, SymInt[3] input_size, float? scales=None, *, Tensor(a!) grad_input) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor upsample_nearest1d_backward_symint_out(@ByRef Tensor grad_input, @Const @ByRef Tensor grad_output, @ByVal SymIntRef output_size, @ByVal SymIntRef input_size, @ByVal(nullValue = "c10::optional(c10::nullopt)") DoubleOptional scales); -@Namespace("at") public static native @ByRef Tensor upsample_nearest1d_backward_symint_out(@ByRef Tensor grad_input, @Const @ByRef Tensor grad_output, @ByVal SymIntRef output_size, @ByVal SymIntRef input_size); +@Namespace("at") public static native @ByVal @Cast("std::vector*") LongVector infer_dense_strides( + @ByVal LongArrayRef tensor_sizes, + @ByVal LongArrayRef tensor_strides); +@Namespace("at") public static native @ByVal @Cast("std::vector*") LongVector infer_dense_strides( + @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] tensor_sizes, + @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... tensor_strides); +// True if input shapes are expandable +// NOTE: infer_size did a similar check, please keep them sync if change is +// needed +@Namespace("at") public static native @Cast("bool") boolean are_expandable(@ByVal LongArrayRef shape1, @ByVal LongArrayRef shape2); +@Namespace("at") public static native @Cast("bool") boolean are_expandable(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] shape1, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... shape2); -// aten::upsample_nearest1d_backward.grad_input(Tensor grad_output, SymInt[1] output_size, SymInt[3] input_size, float? scales=None, *, Tensor(a!) grad_input) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor upsample_nearest1d_backward_symint_outf(@Const @ByRef Tensor grad_output, @ByVal SymIntRef output_size, @ByVal SymIntRef input_size, @ByVal DoubleOptional scales, @ByRef Tensor grad_input); +// avoid copy-construction of Tensor by using a reference_wrapper. +// NOTE [ ExpandUtils Borrowing ] +// +// Functions in ExpandUtils return `c10::MaybeOwned` because +// expansion may not actually be needed, in which case we can improve +// efficiency by returning +// `c10::MaybeOwned::borrowed(to_expand)`. However, this means +// that you need to be careful: the returned `c10::MaybeOwned` +// must not outlive the original `Tensor` object that `to_expand` +// referred to! The deleted rvalue reference overloads of these +// functions help with this by preventing trivial use of a temporary +// resulting from a function call, but it is still possible to make a +// mistake. -// aten::upsample_nearest1d_backward(Tensor grad_output, SymInt[1] output_size, SymInt[3] input_size, float? scales=None) -> Tensor -@Namespace("at") public static native @ByVal Tensor upsample_nearest1d_backward(@Const @ByRef Tensor grad_output, @ByVal @Cast("c10::ArrayRef*") LongArrayRef output_size, @ByVal @Cast("c10::ArrayRef*") LongArrayRef input_size, @ByVal(nullValue = "c10::optional(c10::nullopt)") DoubleOptional scales); -@Namespace("at") public static native @ByVal Tensor upsample_nearest1d_backward(@Const @ByRef Tensor grad_output, @ByVal @Cast("c10::ArrayRef*") LongArrayRef output_size, @ByVal @Cast("c10::ArrayRef*") LongArrayRef input_size); -@Namespace("at") public static native @ByVal Tensor upsample_nearest1d_backward(@Const @ByRef Tensor grad_output, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] output_size, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] input_size, @ByVal(nullValue = "c10::optional(c10::nullopt)") DoubleOptional scales); -@Namespace("at") public static native @ByVal Tensor upsample_nearest1d_backward(@Const @ByRef Tensor grad_output, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] output_size, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... input_size); +@Namespace("at") public static native @Cast({"", "c10::MaybeOwned&&"}) @StdMove TensorMaybeOwned expand_inplace( + @Const @ByRef Tensor tensor, + @Const @ByRef Tensor to_expand); -// aten::upsample_nearest1d_backward(Tensor grad_output, SymInt[1] output_size, SymInt[3] input_size, float? scales=None) -> Tensor -@Namespace("at") public static native @ByVal Tensor upsample_nearest1d_backward_symint(@Const @ByRef Tensor grad_output, @ByVal SymIntRef output_size, @ByVal SymIntRef input_size, @ByVal(nullValue = "c10::optional(c10::nullopt)") DoubleOptional scales); -@Namespace("at") public static native @ByVal Tensor upsample_nearest1d_backward_symint(@Const @ByRef Tensor grad_output, @ByVal SymIntRef output_size, @ByVal SymIntRef input_size); +@Namespace("at") public static native @Cast({"", "c10::MaybeOwned&&"}) @StdMove TensorMaybeOwned expand_inplace( + @Const @ByRef Tensor tensor, + @Const @ByRef Tensor to_expand, + @Cast("const char*") BytePointer api_name); +@Namespace("at") public static native @Cast({"", "c10::MaybeOwned&&"}) @StdMove TensorMaybeOwned expand_inplace( + @Const @ByRef Tensor tensor, + @Const @ByRef Tensor to_expand, + String api_name); +@Namespace("at") public static native @ByVal T_TensorMaybeOwnedTensorMaybeOwned_T expand_inplace( + @Const @ByRef Tensor tensor, + @Const @ByRef Tensor to_expand1, + @Const @ByRef Tensor to_expand2); -// Parsed from ATen/ops/upsample_nearest2d.h -// #pragma once -// @generated by torchgen/gen.py from Function.h -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include +@Namespace("at") public static native @ByVal T_TensorMaybeOwnedTensorMaybeOwned_T expand_inplace( + @Const @ByRef Tensor tensor, + @Const @ByRef Tensor to_expand1, + @Const @ByRef Tensor to_expand2, + @Cast("const char*") BytePointer api_name); +@Namespace("at") public static native @ByVal T_TensorMaybeOwnedTensorMaybeOwned_T expand_inplace( + @Const @ByRef Tensor tensor, + @Const @ByRef Tensor to_expand1, + @Const @ByRef Tensor to_expand2, + String api_name); -// #include -// aten::upsample_nearest2d.vec(Tensor input, SymInt[]? output_size, float[]? scale_factors) -> Tensor -@Namespace("at") public static native @ByVal Tensor upsample_nearest2d(@Const @ByRef Tensor input, @ByVal LongArrayRefOptional output_size, @ByVal DoubleArrayRefOptional scale_factors); -@Namespace("at") public static native @ByVal Tensor upsample_nearest2d(@Const @ByRef Tensor input, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] output_size, @ByVal DoubleArrayRefOptional scale_factors); +// See NOTE [ ExpandUtils Borrowing ] above for `MaybeOwned` explanation. +@Namespace("at") public static native @ByVal T_TensorMaybeOwnedTensorMaybeOwned_T expand_outplace(@Const @ByRef Tensor to_expand1, @Const @ByRef Tensor to_expand2); -// aten::upsample_nearest2d.vec(Tensor input, SymInt[]? output_size, float[]? scale_factors) -> Tensor -@Namespace("at") public static native @ByVal Tensor upsample_nearest2d_symint(@Const @ByRef Tensor input, @ByVal SymIntArrayRefOptional output_size, @ByVal DoubleArrayRefOptional scale_factors); -// aten::upsample_nearest2d.out(Tensor self, SymInt[2] output_size, float? scales_h=None, float? scales_w=None, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor upsample_nearest2d_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal @Cast("c10::ArrayRef*") LongArrayRef output_size, @ByVal(nullValue = "c10::optional(c10::nullopt)") DoubleOptional scales_h, @ByVal(nullValue = "c10::optional(c10::nullopt)") DoubleOptional scales_w); -@Namespace("at") public static native @ByRef Tensor upsample_nearest2d_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal @Cast("c10::ArrayRef*") LongArrayRef output_size); -@Namespace("at") public static native @ByRef Tensor upsample_nearest2d_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] output_size, @ByVal(nullValue = "c10::optional(c10::nullopt)") DoubleOptional scales_h, @ByVal(nullValue = "c10::optional(c10::nullopt)") DoubleOptional scales_w); -@Namespace("at") public static native @ByRef Tensor upsample_nearest2d_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... output_size); -// aten::upsample_nearest2d.out(Tensor self, SymInt[2] output_size, float? scales_h=None, float? scales_w=None, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor upsample_nearest2d_outf(@Const @ByRef Tensor self, @ByVal @Cast("c10::ArrayRef*") LongArrayRef output_size, @ByVal DoubleOptional scales_h, @ByVal DoubleOptional scales_w, @ByRef Tensor out); -@Namespace("at") public static native @ByRef Tensor upsample_nearest2d_outf(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] output_size, @ByVal DoubleOptional scales_h, @ByVal DoubleOptional scales_w, @ByRef Tensor out); +@Namespace("at") public static native @ByVal T_TensorMaybeOwnedTensorMaybeOwned_T expand_outplace( + @Const @ByRef Tensor to_expand1, + @Const @ByRef Tensor to_expand2, + @Cast("const char*") BytePointer api_name); +@Namespace("at") public static native @ByVal T_TensorMaybeOwnedTensorMaybeOwned_T expand_outplace( + @Const @ByRef Tensor to_expand1, + @Const @ByRef Tensor to_expand2, + String api_name); -// aten::upsample_nearest2d.out(Tensor self, SymInt[2] output_size, float? scales_h=None, float? scales_w=None, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor upsample_nearest2d_symint_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal SymIntRef output_size, @ByVal(nullValue = "c10::optional(c10::nullopt)") DoubleOptional scales_h, @ByVal(nullValue = "c10::optional(c10::nullopt)") DoubleOptional scales_w); -@Namespace("at") public static native @ByRef Tensor upsample_nearest2d_symint_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal SymIntRef output_size); -// aten::upsample_nearest2d.out(Tensor self, SymInt[2] output_size, float? scales_h=None, float? scales_w=None, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor upsample_nearest2d_symint_outf(@Const @ByRef Tensor self, @ByVal SymIntRef output_size, @ByVal DoubleOptional scales_h, @ByVal DoubleOptional scales_w, @ByRef Tensor out); +@Namespace("at") public static native @ByVal T_TensorMaybeOwnedTensorMaybeOwnedTensorMaybeOwned_T expand_outplace( + @Const @ByRef Tensor to_expand1, + @Const @ByRef Tensor to_expand2, + @Const @ByRef Tensor to_expand3); -// aten::upsample_nearest2d(Tensor self, SymInt[2] output_size, float? scales_h=None, float? scales_w=None) -> Tensor -@Namespace("at") public static native @ByVal Tensor upsample_nearest2d(@Const @ByRef Tensor self, @ByVal @Cast("c10::ArrayRef*") LongArrayRef output_size, @ByVal(nullValue = "c10::optional(c10::nullopt)") DoubleOptional scales_h, @ByVal(nullValue = "c10::optional(c10::nullopt)") DoubleOptional scales_w); -@Namespace("at") public static native @ByVal Tensor upsample_nearest2d(@Const @ByRef Tensor self, @ByVal @Cast("c10::ArrayRef*") LongArrayRef output_size); -@Namespace("at") public static native @ByVal Tensor upsample_nearest2d(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] output_size, @ByVal(nullValue = "c10::optional(c10::nullopt)") DoubleOptional scales_h, @ByVal(nullValue = "c10::optional(c10::nullopt)") DoubleOptional scales_w); -@Namespace("at") public static native @ByVal Tensor upsample_nearest2d(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... output_size); -// aten::upsample_nearest2d(Tensor self, SymInt[2] output_size, float? scales_h=None, float? scales_w=None) -> Tensor -@Namespace("at") public static native @ByVal Tensor upsample_nearest2d_symint(@Const @ByRef Tensor self, @ByVal SymIntRef output_size, @ByVal(nullValue = "c10::optional(c10::nullopt)") DoubleOptional scales_h, @ByVal(nullValue = "c10::optional(c10::nullopt)") DoubleOptional scales_w); -@Namespace("at") public static native @ByVal Tensor upsample_nearest2d_symint(@Const @ByRef Tensor self, @ByVal SymIntRef output_size); -// Parsed from ATen/ops/upsample_nearest2d_backward.h -// #pragma once +@Namespace("at") public static native @ByVal T_TensorMaybeOwnedTensorMaybeOwnedTensorMaybeOwned_T expand_outplace( + @Const @ByRef Tensor to_expand1, + @Const @ByRef Tensor to_expand2, + @Const @ByRef Tensor to_expand3, + @Cast("const char*") BytePointer api_name); +@Namespace("at") public static native @ByVal T_TensorMaybeOwnedTensorMaybeOwnedTensorMaybeOwned_T expand_outplace( + @Const @ByRef Tensor to_expand1, + @Const @ByRef Tensor to_expand2, + @Const @ByRef Tensor to_expand3, + String api_name); -// @generated by torchgen/gen.py from Function.h -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// aten::upsample_nearest2d_backward.grad_input(Tensor grad_output, SymInt[2] output_size, SymInt[4] input_size, float? scales_h=None, float? scales_w=None, *, Tensor(a!) grad_input) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor upsample_nearest2d_backward_out(@ByRef Tensor grad_input, @Const @ByRef Tensor grad_output, @ByVal @Cast("c10::ArrayRef*") LongArrayRef output_size, @ByVal @Cast("c10::ArrayRef*") LongArrayRef input_size, @ByVal(nullValue = "c10::optional(c10::nullopt)") DoubleOptional scales_h, @ByVal(nullValue = "c10::optional(c10::nullopt)") DoubleOptional scales_w); -@Namespace("at") public static native @ByRef Tensor upsample_nearest2d_backward_out(@ByRef Tensor grad_input, @Const @ByRef Tensor grad_output, @ByVal @Cast("c10::ArrayRef*") LongArrayRef output_size, @ByVal @Cast("c10::ArrayRef*") LongArrayRef input_size); -@Namespace("at") public static native @ByRef Tensor upsample_nearest2d_backward_out(@ByRef Tensor grad_input, @Const @ByRef Tensor grad_output, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] output_size, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] input_size, @ByVal(nullValue = "c10::optional(c10::nullopt)") DoubleOptional scales_h, @ByVal(nullValue = "c10::optional(c10::nullopt)") DoubleOptional scales_w); -@Namespace("at") public static native @ByRef Tensor upsample_nearest2d_backward_out(@ByRef Tensor grad_input, @Const @ByRef Tensor grad_output, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] output_size, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... input_size); -// aten::upsample_nearest2d_backward.grad_input(Tensor grad_output, SymInt[2] output_size, SymInt[4] input_size, float? scales_h=None, float? scales_w=None, *, Tensor(a!) grad_input) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor upsample_nearest2d_backward_outf(@Const @ByRef Tensor grad_output, @ByVal @Cast("c10::ArrayRef*") LongArrayRef output_size, @ByVal @Cast("c10::ArrayRef*") LongArrayRef input_size, @ByVal DoubleOptional scales_h, @ByVal DoubleOptional scales_w, @ByRef Tensor grad_input); -@Namespace("at") public static native @ByRef Tensor upsample_nearest2d_backward_outf(@Const @ByRef Tensor grad_output, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] output_size, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] input_size, @ByVal DoubleOptional scales_h, @ByVal DoubleOptional scales_w, @ByRef Tensor grad_input); +@Namespace("at") public static native @Cast({"", "c10::MaybeOwned&&"}) @StdMove TensorMaybeOwned expand_size( + @Const @ByRef Tensor to_expand, + @ByVal LongArrayRef sizes); +@Namespace("at") public static native @Cast({"", "c10::MaybeOwned&&"}) @StdMove TensorMaybeOwned expand_size( + @Const @ByRef Tensor to_expand, + @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... sizes); -// aten::upsample_nearest2d_backward.grad_input(Tensor grad_output, SymInt[2] output_size, SymInt[4] input_size, float? scales_h=None, float? scales_w=None, *, Tensor(a!) grad_input) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor upsample_nearest2d_backward_symint_out(@ByRef Tensor grad_input, @Const @ByRef Tensor grad_output, @ByVal SymIntRef output_size, @ByVal SymIntRef input_size, @ByVal(nullValue = "c10::optional(c10::nullopt)") DoubleOptional scales_h, @ByVal(nullValue = "c10::optional(c10::nullopt)") DoubleOptional scales_w); -@Namespace("at") public static native @ByRef Tensor upsample_nearest2d_backward_symint_out(@ByRef Tensor grad_input, @Const @ByRef Tensor grad_output, @ByVal SymIntRef output_size, @ByVal SymIntRef input_size); +@Namespace("at") public static native @Cast({"", "c10::MaybeOwned&&"}) @StdMove TensorMaybeOwned expand_size( + @Const @ByRef Tensor to_expand, + @ByVal LongArrayRef sizes, + @Cast("const char*") BytePointer api_name); +@Namespace("at") public static native @Cast({"", "c10::MaybeOwned&&"}) @StdMove TensorMaybeOwned expand_size( + @Const @ByRef Tensor to_expand, + @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] sizes, + String api_name); -// aten::upsample_nearest2d_backward.grad_input(Tensor grad_output, SymInt[2] output_size, SymInt[4] input_size, float? scales_h=None, float? scales_w=None, *, Tensor(a!) grad_input) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor upsample_nearest2d_backward_symint_outf(@Const @ByRef Tensor grad_output, @ByVal SymIntRef output_size, @ByVal SymIntRef input_size, @ByVal DoubleOptional scales_h, @ByVal DoubleOptional scales_w, @ByRef Tensor grad_input); -// aten::upsample_nearest2d_backward(Tensor grad_output, SymInt[2] output_size, SymInt[4] input_size, float? scales_h=None, float? scales_w=None) -> Tensor -@Namespace("at") public static native @ByVal Tensor upsample_nearest2d_backward(@Const @ByRef Tensor grad_output, @ByVal @Cast("c10::ArrayRef*") LongArrayRef output_size, @ByVal @Cast("c10::ArrayRef*") LongArrayRef input_size, @ByVal(nullValue = "c10::optional(c10::nullopt)") DoubleOptional scales_h, @ByVal(nullValue = "c10::optional(c10::nullopt)") DoubleOptional scales_w); -@Namespace("at") public static native @ByVal Tensor upsample_nearest2d_backward(@Const @ByRef Tensor grad_output, @ByVal @Cast("c10::ArrayRef*") LongArrayRef output_size, @ByVal @Cast("c10::ArrayRef*") LongArrayRef input_size); -@Namespace("at") public static native @ByVal Tensor upsample_nearest2d_backward(@Const @ByRef Tensor grad_output, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] output_size, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] input_size, @ByVal(nullValue = "c10::optional(c10::nullopt)") DoubleOptional scales_h, @ByVal(nullValue = "c10::optional(c10::nullopt)") DoubleOptional scales_w); -@Namespace("at") public static native @ByVal Tensor upsample_nearest2d_backward(@Const @ByRef Tensor grad_output, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] output_size, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... input_size); +@Namespace("at") public static native @Cast({"", "std::vector"}) @StdMove TensorVector expand_outplace(@ByVal @Cast("at::TensorList*") TensorArrayRef to_expand); +@Namespace("at") public static native @ByVal Tensor sum_to( + @ByVal Tensor tensor, + @Const @ByVal SymIntArrayRef shape, + @Cast("bool") boolean always_return_non_view/*=false*/); +@Namespace("at") public static native @ByVal Tensor sum_to( + @ByVal Tensor tensor, + @Const @ByVal SymIntArrayRef shape); -// aten::upsample_nearest2d_backward(Tensor grad_output, SymInt[2] output_size, SymInt[4] input_size, float? scales_h=None, float? scales_w=None) -> Tensor -@Namespace("at") public static native @ByVal Tensor upsample_nearest2d_backward_symint(@Const @ByRef Tensor grad_output, @ByVal SymIntRef output_size, @ByVal SymIntRef input_size, @ByVal(nullValue = "c10::optional(c10::nullopt)") DoubleOptional scales_h, @ByVal(nullValue = "c10::optional(c10::nullopt)") DoubleOptional scales_w); -@Namespace("at") public static native @ByVal Tensor upsample_nearest2d_backward_symint(@Const @ByRef Tensor grad_output, @ByVal SymIntRef output_size, @ByVal SymIntRef input_size); +// Sums `tensor` repeatedly to produce a tensor of shape `shape`. +// Precondition: is_expandable_to(shape, tensor.sizes()) must be true +@Namespace("at") public static native @ByVal Tensor sum_to( + @ByVal Tensor tensor, + @Const @ByVal LongArrayRef shape, + @Cast("bool") boolean always_return_non_view/*=false*/); +@Namespace("at") public static native @ByVal Tensor sum_to( + @ByVal Tensor tensor, + @Const @ByVal LongArrayRef shape); +@Namespace("at") public static native @ByVal Tensor sum_to( + @ByVal Tensor tensor, + @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] shape, + @Cast("bool") boolean always_return_non_view/*=false*/); +@Namespace("at") public static native @ByVal Tensor sum_to( + @ByVal Tensor tensor, + @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... shape); +@Namespace("at") public static native @Cast("bool") boolean is_expandable_to( + @ByVal SymIntArrayRef shape, + @ByVal SymIntArrayRef desired); +@Namespace("at") public static native @Cast("bool") boolean is_expandable_to(@ByVal LongArrayRef shape, @ByVal LongArrayRef desired); +@Namespace("at") public static native @Cast("bool") boolean is_expandable_to(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] shape, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... desired); + // namespace at -// Parsed from ATen/ops/upsample_nearest3d.h +// Parsed from ATen/MemoryOverlap.h // #pragma once -// @generated by torchgen/gen.py from Function.h - -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include - +// #include -// #include +// MemOverlap: Whether or not there is memory overlap +// +// No: Absolutely no memory overlap +// Yes: Absolutely yes memory overlap +// TooHard: There might be memory overlap, but it was too expensive to compute. +// +// NB: Please update the python test for these if you renumber them. +@Namespace("at") public enum MemOverlap { No(0), Yes(1), TooHard(2); + public final int value; + private MemOverlap(int v) { this.value = v; } + private MemOverlap(MemOverlap e) { this.value = e.value; } + public MemOverlap intern() { for (MemOverlap e : values()) if (e.value == value) return e; return this; } + @Override public String toString() { return intern().name(); } +} -// aten::upsample_nearest3d.vec(Tensor input, SymInt[]? output_size, float[]? scale_factors) -> Tensor -@Namespace("at") public static native @ByVal Tensor upsample_nearest3d(@Const @ByRef Tensor input, @ByVal LongArrayRefOptional output_size, @ByVal DoubleArrayRefOptional scale_factors); -@Namespace("at") public static native @ByVal Tensor upsample_nearest3d(@Const @ByRef Tensor input, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] output_size, @ByVal DoubleArrayRefOptional scale_factors); +@Namespace("at") public enum MemOverlapStatus { Full(0), Partial(1), No(2), TooHard(3); + public final int value; + private MemOverlapStatus(int v) { this.value = v; } + private MemOverlapStatus(MemOverlapStatus e) { this.value = e.value; } + public MemOverlapStatus intern() { for (MemOverlapStatus e : values()) if (e.value == value) return e; return this; } + @Override public String toString() { return intern().name(); } +} -// aten::upsample_nearest3d.vec(Tensor input, SymInt[]? output_size, float[]? scale_factors) -> Tensor -@Namespace("at") public static native @ByVal Tensor upsample_nearest3d_symint(@Const @ByRef Tensor input, @ByVal SymIntArrayRefOptional output_size, @ByVal DoubleArrayRefOptional scale_factors); +@Namespace("at") public static native MemOverlap has_internal_overlap(@Const @ByRef TensorBase t); +@Namespace("at") public static native MemOverlap has_internal_overlap(TensorImpl t); +@Namespace("at") public static native void assert_no_internal_overlap(@Const @ByRef TensorBase t); +@Namespace("at") public static native void assert_no_internal_overlap(TensorImpl t); -// aten::upsample_nearest3d.out(Tensor self, SymInt[3] output_size, float? scales_d=None, float? scales_h=None, float? scales_w=None, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor upsample_nearest3d_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal @Cast("c10::ArrayRef*") LongArrayRef output_size, @ByVal(nullValue = "c10::optional(c10::nullopt)") DoubleOptional scales_d, @ByVal(nullValue = "c10::optional(c10::nullopt)") DoubleOptional scales_h, @ByVal(nullValue = "c10::optional(c10::nullopt)") DoubleOptional scales_w); -@Namespace("at") public static native @ByRef Tensor upsample_nearest3d_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal @Cast("c10::ArrayRef*") LongArrayRef output_size); -@Namespace("at") public static native @ByRef Tensor upsample_nearest3d_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] output_size, @ByVal(nullValue = "c10::optional(c10::nullopt)") DoubleOptional scales_d, @ByVal(nullValue = "c10::optional(c10::nullopt)") DoubleOptional scales_h, @ByVal(nullValue = "c10::optional(c10::nullopt)") DoubleOptional scales_w); -@Namespace("at") public static native @ByRef Tensor upsample_nearest3d_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... output_size); +@Namespace("at") public static native MemOverlapStatus get_overlap_status(@Const @ByRef TensorBase a, @Const @ByRef TensorBase b); +@Namespace("at") public static native MemOverlapStatus get_overlap_status(TensorImpl a, TensorImpl b); +@Namespace("at") public static native void assert_no_partial_overlap( + @Const @ByRef TensorBase a, + @Const @ByRef TensorBase b); -// aten::upsample_nearest3d.out(Tensor self, SymInt[3] output_size, float? scales_d=None, float? scales_h=None, float? scales_w=None, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor upsample_nearest3d_outf(@Const @ByRef Tensor self, @ByVal @Cast("c10::ArrayRef*") LongArrayRef output_size, @ByVal DoubleOptional scales_d, @ByVal DoubleOptional scales_h, @ByVal DoubleOptional scales_w, @ByRef Tensor out); -@Namespace("at") public static native @ByRef Tensor upsample_nearest3d_outf(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] output_size, @ByVal DoubleOptional scales_d, @ByVal DoubleOptional scales_h, @ByVal DoubleOptional scales_w, @ByRef Tensor out); +@Namespace("at") public static native void assert_no_overlap(@Const @ByRef TensorBase a, @Const @ByRef TensorBase b); +@Namespace("at") public static native void assert_no_overlap(TensorImpl a, TensorImpl b); -// aten::upsample_nearest3d.out(Tensor self, SymInt[3] output_size, float? scales_d=None, float? scales_h=None, float? scales_w=None, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor upsample_nearest3d_symint_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal SymIntRef output_size, @ByVal(nullValue = "c10::optional(c10::nullopt)") DoubleOptional scales_d, @ByVal(nullValue = "c10::optional(c10::nullopt)") DoubleOptional scales_h, @ByVal(nullValue = "c10::optional(c10::nullopt)") DoubleOptional scales_w); -@Namespace("at") public static native @ByRef Tensor upsample_nearest3d_symint_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal SymIntRef output_size); + // namespace at -// aten::upsample_nearest3d.out(Tensor self, SymInt[3] output_size, float? scales_d=None, float? scales_h=None, float? scales_w=None, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor upsample_nearest3d_symint_outf(@Const @ByRef Tensor self, @ByVal SymIntRef output_size, @ByVal DoubleOptional scales_d, @ByVal DoubleOptional scales_h, @ByVal DoubleOptional scales_w, @ByRef Tensor out); +// Parsed from ATen/NestedTensorImpl.h +// #pragma once +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +@Namespace("at::native") public static native @Cast("bool") boolean nested_tensor_impl_is_contiguous(@Const NestedTensorImpl nt); +// Targeting ../NestedTensorImpl.java -// aten::upsample_nearest3d(Tensor self, SymInt[3] output_size, float? scales_d=None, float? scales_h=None, float? scales_w=None) -> Tensor -@Namespace("at") public static native @ByVal Tensor upsample_nearest3d(@Const @ByRef Tensor self, @ByVal @Cast("c10::ArrayRef*") LongArrayRef output_size, @ByVal(nullValue = "c10::optional(c10::nullopt)") DoubleOptional scales_d, @ByVal(nullValue = "c10::optional(c10::nullopt)") DoubleOptional scales_h, @ByVal(nullValue = "c10::optional(c10::nullopt)") DoubleOptional scales_w); -@Namespace("at") public static native @ByVal Tensor upsample_nearest3d(@Const @ByRef Tensor self, @ByVal @Cast("c10::ArrayRef*") LongArrayRef output_size); -@Namespace("at") public static native @ByVal Tensor upsample_nearest3d(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] output_size, @ByVal(nullValue = "c10::optional(c10::nullopt)") DoubleOptional scales_d, @ByVal(nullValue = "c10::optional(c10::nullopt)") DoubleOptional scales_h, @ByVal(nullValue = "c10::optional(c10::nullopt)") DoubleOptional scales_w); -@Namespace("at") public static native @ByVal Tensor upsample_nearest3d(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... output_size); -// aten::upsample_nearest3d(Tensor self, SymInt[3] output_size, float? scales_d=None, float? scales_h=None, float? scales_w=None) -> Tensor -@Namespace("at") public static native @ByVal Tensor upsample_nearest3d_symint(@Const @ByRef Tensor self, @ByVal SymIntRef output_size, @ByVal(nullValue = "c10::optional(c10::nullopt)") DoubleOptional scales_d, @ByVal(nullValue = "c10::optional(c10::nullopt)") DoubleOptional scales_h, @ByVal(nullValue = "c10::optional(c10::nullopt)") DoubleOptional scales_w); -@Namespace("at") public static native @ByVal Tensor upsample_nearest3d_symint(@Const @ByRef Tensor self, @ByVal SymIntRef output_size); +@Namespace("at::native") public static native NestedTensorImpl get_nested_tensor_impl_or_null( + @Const @ByRef Tensor tensor); +@Namespace("at::native") public static native NestedTensorImpl get_nested_tensor_impl(@Const @ByRef Tensor tensor); +@Namespace("at::native") public static native @Const @ByRef Tensor get_nested_size_tensor(@Const @ByRef Tensor tensor); + // namespace native + // namespace at -// Parsed from ATen/ops/upsample_nearest3d_backward.h +// Parsed from torch/csrc/autograd/input_metadata.h // #pragma once -// @generated by torchgen/gen.py from Function.h - -// #include -// #include -// #include -// #include -// #include -// #include +// #include +// #include // #include -// #include -// #include -// #include -// #include -// #include - - - -// #include - - -// aten::upsample_nearest3d_backward.grad_input(Tensor grad_output, SymInt[3] output_size, SymInt[5] input_size, float? scales_d=None, float? scales_h=None, float? scales_w=None, *, Tensor(a!) grad_input) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor upsample_nearest3d_backward_out(@ByRef Tensor grad_input, @Const @ByRef Tensor grad_output, @ByVal @Cast("c10::ArrayRef*") LongArrayRef output_size, @ByVal @Cast("c10::ArrayRef*") LongArrayRef input_size, @ByVal(nullValue = "c10::optional(c10::nullopt)") DoubleOptional scales_d, @ByVal(nullValue = "c10::optional(c10::nullopt)") DoubleOptional scales_h, @ByVal(nullValue = "c10::optional(c10::nullopt)") DoubleOptional scales_w); -@Namespace("at") public static native @ByRef Tensor upsample_nearest3d_backward_out(@ByRef Tensor grad_input, @Const @ByRef Tensor grad_output, @ByVal @Cast("c10::ArrayRef*") LongArrayRef output_size, @ByVal @Cast("c10::ArrayRef*") LongArrayRef input_size); -@Namespace("at") public static native @ByRef Tensor upsample_nearest3d_backward_out(@ByRef Tensor grad_input, @Const @ByRef Tensor grad_output, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] output_size, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] input_size, @ByVal(nullValue = "c10::optional(c10::nullopt)") DoubleOptional scales_d, @ByVal(nullValue = "c10::optional(c10::nullopt)") DoubleOptional scales_h, @ByVal(nullValue = "c10::optional(c10::nullopt)") DoubleOptional scales_w); -@Namespace("at") public static native @ByRef Tensor upsample_nearest3d_backward_out(@ByRef Tensor grad_input, @Const @ByRef Tensor grad_output, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] output_size, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... input_size); - - -// aten::upsample_nearest3d_backward.grad_input(Tensor grad_output, SymInt[3] output_size, SymInt[5] input_size, float? scales_d=None, float? scales_h=None, float? scales_w=None, *, Tensor(a!) grad_input) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor upsample_nearest3d_backward_outf(@Const @ByRef Tensor grad_output, @ByVal @Cast("c10::ArrayRef*") LongArrayRef output_size, @ByVal @Cast("c10::ArrayRef*") LongArrayRef input_size, @ByVal DoubleOptional scales_d, @ByVal DoubleOptional scales_h, @ByVal DoubleOptional scales_w, @ByRef Tensor grad_input); -@Namespace("at") public static native @ByRef Tensor upsample_nearest3d_backward_outf(@Const @ByRef Tensor grad_output, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] output_size, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] input_size, @ByVal DoubleOptional scales_d, @ByVal DoubleOptional scales_h, @ByVal DoubleOptional scales_w, @ByRef Tensor grad_input); - - -// aten::upsample_nearest3d_backward.grad_input(Tensor grad_output, SymInt[3] output_size, SymInt[5] input_size, float? scales_d=None, float? scales_h=None, float? scales_w=None, *, Tensor(a!) grad_input) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor upsample_nearest3d_backward_symint_out(@ByRef Tensor grad_input, @Const @ByRef Tensor grad_output, @ByVal SymIntRef output_size, @ByVal SymIntRef input_size, @ByVal(nullValue = "c10::optional(c10::nullopt)") DoubleOptional scales_d, @ByVal(nullValue = "c10::optional(c10::nullopt)") DoubleOptional scales_h, @ByVal(nullValue = "c10::optional(c10::nullopt)") DoubleOptional scales_w); -@Namespace("at") public static native @ByRef Tensor upsample_nearest3d_backward_symint_out(@ByRef Tensor grad_input, @Const @ByRef Tensor grad_output, @ByVal SymIntRef output_size, @ByVal SymIntRef input_size); - - -// aten::upsample_nearest3d_backward.grad_input(Tensor grad_output, SymInt[3] output_size, SymInt[5] input_size, float? scales_d=None, float? scales_h=None, float? scales_w=None, *, Tensor(a!) grad_input) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor upsample_nearest3d_backward_symint_outf(@Const @ByRef Tensor grad_output, @ByVal SymIntRef output_size, @ByVal SymIntRef input_size, @ByVal DoubleOptional scales_d, @ByVal DoubleOptional scales_h, @ByVal DoubleOptional scales_w, @ByRef Tensor grad_input); - - -// aten::upsample_nearest3d_backward(Tensor grad_output, SymInt[3] output_size, SymInt[5] input_size, float? scales_d=None, float? scales_h=None, float? scales_w=None) -> Tensor -@Namespace("at") public static native @ByVal Tensor upsample_nearest3d_backward(@Const @ByRef Tensor grad_output, @ByVal @Cast("c10::ArrayRef*") LongArrayRef output_size, @ByVal @Cast("c10::ArrayRef*") LongArrayRef input_size, @ByVal(nullValue = "c10::optional(c10::nullopt)") DoubleOptional scales_d, @ByVal(nullValue = "c10::optional(c10::nullopt)") DoubleOptional scales_h, @ByVal(nullValue = "c10::optional(c10::nullopt)") DoubleOptional scales_w); -@Namespace("at") public static native @ByVal Tensor upsample_nearest3d_backward(@Const @ByRef Tensor grad_output, @ByVal @Cast("c10::ArrayRef*") LongArrayRef output_size, @ByVal @Cast("c10::ArrayRef*") LongArrayRef input_size); -@Namespace("at") public static native @ByVal Tensor upsample_nearest3d_backward(@Const @ByRef Tensor grad_output, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] output_size, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] input_size, @ByVal(nullValue = "c10::optional(c10::nullopt)") DoubleOptional scales_d, @ByVal(nullValue = "c10::optional(c10::nullopt)") DoubleOptional scales_h, @ByVal(nullValue = "c10::optional(c10::nullopt)") DoubleOptional scales_w); -@Namespace("at") public static native @ByVal Tensor upsample_nearest3d_backward(@Const @ByRef Tensor grad_output, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] output_size, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... input_size); - - -// aten::upsample_nearest3d_backward(Tensor grad_output, SymInt[3] output_size, SymInt[5] input_size, float? scales_d=None, float? scales_h=None, float? scales_w=None) -> Tensor -@Namespace("at") public static native @ByVal Tensor upsample_nearest3d_backward_symint(@Const @ByRef Tensor grad_output, @ByVal SymIntRef output_size, @ByVal SymIntRef input_size, @ByVal(nullValue = "c10::optional(c10::nullopt)") DoubleOptional scales_d, @ByVal(nullValue = "c10::optional(c10::nullopt)") DoubleOptional scales_h, @ByVal(nullValue = "c10::optional(c10::nullopt)") DoubleOptional scales_w); -@Namespace("at") public static native @ByVal Tensor upsample_nearest3d_backward_symint(@Const @ByRef Tensor grad_output, @ByVal SymIntRef output_size, @ByVal SymIntRef input_size); +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #ifndef AT_PER_OPERATOR_HEADERS +// #include +// #else +// #include +// #endif +// #include +// #include +/** + * Records TensorOptions, shape of the tensor, whether or not the Python + * dispatch key is set (tensor subclass), and, where applicable, the stream the + * corresponding operation took place on. + * + * If is_valid() is false, then the corresponding input is not used and may be + * an undefined tensor. + */ + // namespace autograd + // namespace torch -// Parsed from ATen/ops/upsample_trilinear3d.h +// Parsed from torch/csrc/autograd/saved_variable_hooks.h // #pragma once -// @generated by torchgen/gen.py from Function.h - -// #include -// #include -// #include -// #include -// #include -// #include // #include -// #include -// #include -// #include -// #include -// #include - - - -// #include - - -// aten::upsample_trilinear3d.vec(Tensor input, SymInt[]? output_size, bool align_corners, float[]? scale_factors) -> Tensor -@Namespace("at") public static native @ByVal Tensor upsample_trilinear3d(@Const @ByRef Tensor input, @ByVal LongArrayRefOptional output_size, @Cast("bool") boolean align_corners, @ByVal DoubleArrayRefOptional scale_factors); -@Namespace("at") public static native @ByVal Tensor upsample_trilinear3d(@Const @ByRef Tensor input, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] output_size, @Cast("bool") boolean align_corners, @ByVal DoubleArrayRefOptional scale_factors); - - -// aten::upsample_trilinear3d.vec(Tensor input, SymInt[]? output_size, bool align_corners, float[]? scale_factors) -> Tensor -@Namespace("at") public static native @ByVal Tensor upsample_trilinear3d_symint(@Const @ByRef Tensor input, @ByVal SymIntArrayRefOptional output_size, @Cast("bool") boolean align_corners, @ByVal DoubleArrayRefOptional scale_factors); - - -// aten::upsample_trilinear3d.out(Tensor self, SymInt[3] output_size, bool align_corners, float? scales_d=None, float? scales_h=None, float? scales_w=None, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor upsample_trilinear3d_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal @Cast("c10::ArrayRef*") LongArrayRef output_size, @Cast("bool") boolean align_corners, @ByVal(nullValue = "c10::optional(c10::nullopt)") DoubleOptional scales_d, @ByVal(nullValue = "c10::optional(c10::nullopt)") DoubleOptional scales_h, @ByVal(nullValue = "c10::optional(c10::nullopt)") DoubleOptional scales_w); -@Namespace("at") public static native @ByRef Tensor upsample_trilinear3d_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal @Cast("c10::ArrayRef*") LongArrayRef output_size, @Cast("bool") boolean align_corners); -@Namespace("at") public static native @ByRef Tensor upsample_trilinear3d_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] output_size, @Cast("bool") boolean align_corners, @ByVal(nullValue = "c10::optional(c10::nullopt)") DoubleOptional scales_d, @ByVal(nullValue = "c10::optional(c10::nullopt)") DoubleOptional scales_h, @ByVal(nullValue = "c10::optional(c10::nullopt)") DoubleOptional scales_w); -@Namespace("at") public static native @ByRef Tensor upsample_trilinear3d_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] output_size, @Cast("bool") boolean align_corners); - - -// aten::upsample_trilinear3d.out(Tensor self, SymInt[3] output_size, bool align_corners, float? scales_d=None, float? scales_h=None, float? scales_w=None, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor upsample_trilinear3d_outf(@Const @ByRef Tensor self, @ByVal @Cast("c10::ArrayRef*") LongArrayRef output_size, @Cast("bool") boolean align_corners, @ByVal DoubleOptional scales_d, @ByVal DoubleOptional scales_h, @ByVal DoubleOptional scales_w, @ByRef Tensor out); -@Namespace("at") public static native @ByRef Tensor upsample_trilinear3d_outf(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] output_size, @Cast("bool") boolean align_corners, @ByVal DoubleOptional scales_d, @ByVal DoubleOptional scales_h, @ByVal DoubleOptional scales_w, @ByRef Tensor out); - - -// aten::upsample_trilinear3d.out(Tensor self, SymInt[3] output_size, bool align_corners, float? scales_d=None, float? scales_h=None, float? scales_w=None, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor upsample_trilinear3d_symint_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal SymIntRef output_size, @Cast("bool") boolean align_corners, @ByVal(nullValue = "c10::optional(c10::nullopt)") DoubleOptional scales_d, @ByVal(nullValue = "c10::optional(c10::nullopt)") DoubleOptional scales_h, @ByVal(nullValue = "c10::optional(c10::nullopt)") DoubleOptional scales_w); -@Namespace("at") public static native @ByRef Tensor upsample_trilinear3d_symint_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal SymIntRef output_size, @Cast("bool") boolean align_corners); - - -// aten::upsample_trilinear3d.out(Tensor self, SymInt[3] output_size, bool align_corners, float? scales_d=None, float? scales_h=None, float? scales_w=None, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor upsample_trilinear3d_symint_outf(@Const @ByRef Tensor self, @ByVal SymIntRef output_size, @Cast("bool") boolean align_corners, @ByVal DoubleOptional scales_d, @ByVal DoubleOptional scales_h, @ByVal DoubleOptional scales_w, @ByRef Tensor out); - - -// aten::upsample_trilinear3d(Tensor self, SymInt[3] output_size, bool align_corners, float? scales_d=None, float? scales_h=None, float? scales_w=None) -> Tensor -@Namespace("at") public static native @ByVal Tensor upsample_trilinear3d(@Const @ByRef Tensor self, @ByVal @Cast("c10::ArrayRef*") LongArrayRef output_size, @Cast("bool") boolean align_corners, @ByVal(nullValue = "c10::optional(c10::nullopt)") DoubleOptional scales_d, @ByVal(nullValue = "c10::optional(c10::nullopt)") DoubleOptional scales_h, @ByVal(nullValue = "c10::optional(c10::nullopt)") DoubleOptional scales_w); -@Namespace("at") public static native @ByVal Tensor upsample_trilinear3d(@Const @ByRef Tensor self, @ByVal @Cast("c10::ArrayRef*") LongArrayRef output_size, @Cast("bool") boolean align_corners); -@Namespace("at") public static native @ByVal Tensor upsample_trilinear3d(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] output_size, @Cast("bool") boolean align_corners, @ByVal(nullValue = "c10::optional(c10::nullopt)") DoubleOptional scales_d, @ByVal(nullValue = "c10::optional(c10::nullopt)") DoubleOptional scales_h, @ByVal(nullValue = "c10::optional(c10::nullopt)") DoubleOptional scales_w); -@Namespace("at") public static native @ByVal Tensor upsample_trilinear3d(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] output_size, @Cast("bool") boolean align_corners); - - -// aten::upsample_trilinear3d(Tensor self, SymInt[3] output_size, bool align_corners, float? scales_d=None, float? scales_h=None, float? scales_w=None) -> Tensor -@Namespace("at") public static native @ByVal Tensor upsample_trilinear3d_symint(@Const @ByRef Tensor self, @ByVal SymIntRef output_size, @Cast("bool") boolean align_corners, @ByVal(nullValue = "c10::optional(c10::nullopt)") DoubleOptional scales_d, @ByVal(nullValue = "c10::optional(c10::nullopt)") DoubleOptional scales_h, @ByVal(nullValue = "c10::optional(c10::nullopt)") DoubleOptional scales_w); -@Namespace("at") public static native @ByVal Tensor upsample_trilinear3d_symint(@Const @ByRef Tensor self, @ByVal SymIntRef output_size, @Cast("bool") boolean align_corners); +// Targeting ../SavedVariableHooks.java + // namespace autograd + // namespace torch -// Parsed from ATen/ops/upsample_trilinear3d_backward.h +// Parsed from torch/csrc/autograd/saved_variable.h // #pragma once -// @generated by torchgen/gen.py from Function.h +// #include +// #include +// #include -// #include -// #include -// #include -// #include -// #include -// #include // #include -// #include -// #include -// #include -// #include -// #include - - - -// #include +// #include +// #include -// aten::upsample_trilinear3d_backward.grad_input(Tensor grad_output, SymInt[3] output_size, SymInt[5] input_size, bool align_corners, float? scales_d=None, float? scales_h=None, float? scales_w=None, *, Tensor(a!) grad_input) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor upsample_trilinear3d_backward_out(@ByRef Tensor grad_input, @Const @ByRef Tensor grad_output, @ByVal @Cast("c10::ArrayRef*") LongArrayRef output_size, @ByVal @Cast("c10::ArrayRef*") LongArrayRef input_size, @Cast("bool") boolean align_corners, @ByVal(nullValue = "c10::optional(c10::nullopt)") DoubleOptional scales_d, @ByVal(nullValue = "c10::optional(c10::nullopt)") DoubleOptional scales_h, @ByVal(nullValue = "c10::optional(c10::nullopt)") DoubleOptional scales_w); -@Namespace("at") public static native @ByRef Tensor upsample_trilinear3d_backward_out(@ByRef Tensor grad_input, @Const @ByRef Tensor grad_output, @ByVal @Cast("c10::ArrayRef*") LongArrayRef output_size, @ByVal @Cast("c10::ArrayRef*") LongArrayRef input_size, @Cast("bool") boolean align_corners); -@Namespace("at") public static native @ByRef Tensor upsample_trilinear3d_backward_out(@ByRef Tensor grad_input, @Const @ByRef Tensor grad_output, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] output_size, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] input_size, @Cast("bool") boolean align_corners, @ByVal(nullValue = "c10::optional(c10::nullopt)") DoubleOptional scales_d, @ByVal(nullValue = "c10::optional(c10::nullopt)") DoubleOptional scales_h, @ByVal(nullValue = "c10::optional(c10::nullopt)") DoubleOptional scales_w); -@Namespace("at") public static native @ByRef Tensor upsample_trilinear3d_backward_out(@ByRef Tensor grad_input, @Const @ByRef Tensor grad_output, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] output_size, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] input_size, @Cast("bool") boolean align_corners); - - -// aten::upsample_trilinear3d_backward.grad_input(Tensor grad_output, SymInt[3] output_size, SymInt[5] input_size, bool align_corners, float? scales_d=None, float? scales_h=None, float? scales_w=None, *, Tensor(a!) grad_input) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor upsample_trilinear3d_backward_outf(@Const @ByRef Tensor grad_output, @ByVal @Cast("c10::ArrayRef*") LongArrayRef output_size, @ByVal @Cast("c10::ArrayRef*") LongArrayRef input_size, @Cast("bool") boolean align_corners, @ByVal DoubleOptional scales_d, @ByVal DoubleOptional scales_h, @ByVal DoubleOptional scales_w, @ByRef Tensor grad_input); -@Namespace("at") public static native @ByRef Tensor upsample_trilinear3d_backward_outf(@Const @ByRef Tensor grad_output, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] output_size, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] input_size, @Cast("bool") boolean align_corners, @ByVal DoubleOptional scales_d, @ByVal DoubleOptional scales_h, @ByVal DoubleOptional scales_w, @ByRef Tensor grad_input); - - -// aten::upsample_trilinear3d_backward.grad_input(Tensor grad_output, SymInt[3] output_size, SymInt[5] input_size, bool align_corners, float? scales_d=None, float? scales_h=None, float? scales_w=None, *, Tensor(a!) grad_input) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor upsample_trilinear3d_backward_symint_out(@ByRef Tensor grad_input, @Const @ByRef Tensor grad_output, @ByVal SymIntRef output_size, @ByVal SymIntRef input_size, @Cast("bool") boolean align_corners, @ByVal(nullValue = "c10::optional(c10::nullopt)") DoubleOptional scales_d, @ByVal(nullValue = "c10::optional(c10::nullopt)") DoubleOptional scales_h, @ByVal(nullValue = "c10::optional(c10::nullopt)") DoubleOptional scales_w); -@Namespace("at") public static native @ByRef Tensor upsample_trilinear3d_backward_symint_out(@ByRef Tensor grad_input, @Const @ByRef Tensor grad_output, @ByVal SymIntRef output_size, @ByVal SymIntRef input_size, @Cast("bool") boolean align_corners); - - -// aten::upsample_trilinear3d_backward.grad_input(Tensor grad_output, SymInt[3] output_size, SymInt[5] input_size, bool align_corners, float? scales_d=None, float? scales_h=None, float? scales_w=None, *, Tensor(a!) grad_input) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor upsample_trilinear3d_backward_symint_outf(@Const @ByRef Tensor grad_output, @ByVal SymIntRef output_size, @ByVal SymIntRef input_size, @Cast("bool") boolean align_corners, @ByVal DoubleOptional scales_d, @ByVal DoubleOptional scales_h, @ByVal DoubleOptional scales_w, @ByRef Tensor grad_input); - - -// aten::upsample_trilinear3d_backward(Tensor grad_output, SymInt[3] output_size, SymInt[5] input_size, bool align_corners, float? scales_d=None, float? scales_h=None, float? scales_w=None) -> Tensor -@Namespace("at") public static native @ByVal Tensor upsample_trilinear3d_backward(@Const @ByRef Tensor grad_output, @ByVal @Cast("c10::ArrayRef*") LongArrayRef output_size, @ByVal @Cast("c10::ArrayRef*") LongArrayRef input_size, @Cast("bool") boolean align_corners, @ByVal(nullValue = "c10::optional(c10::nullopt)") DoubleOptional scales_d, @ByVal(nullValue = "c10::optional(c10::nullopt)") DoubleOptional scales_h, @ByVal(nullValue = "c10::optional(c10::nullopt)") DoubleOptional scales_w); -@Namespace("at") public static native @ByVal Tensor upsample_trilinear3d_backward(@Const @ByRef Tensor grad_output, @ByVal @Cast("c10::ArrayRef*") LongArrayRef output_size, @ByVal @Cast("c10::ArrayRef*") LongArrayRef input_size, @Cast("bool") boolean align_corners); -@Namespace("at") public static native @ByVal Tensor upsample_trilinear3d_backward(@Const @ByRef Tensor grad_output, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] output_size, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] input_size, @Cast("bool") boolean align_corners, @ByVal(nullValue = "c10::optional(c10::nullopt)") DoubleOptional scales_d, @ByVal(nullValue = "c10::optional(c10::nullopt)") DoubleOptional scales_h, @ByVal(nullValue = "c10::optional(c10::nullopt)") DoubleOptional scales_w); -@Namespace("at") public static native @ByVal Tensor upsample_trilinear3d_backward(@Const @ByRef Tensor grad_output, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] output_size, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] input_size, @Cast("bool") boolean align_corners); - - -// aten::upsample_trilinear3d_backward(Tensor grad_output, SymInt[3] output_size, SymInt[5] input_size, bool align_corners, float? scales_d=None, float? scales_h=None, float? scales_w=None) -> Tensor -@Namespace("at") public static native @ByVal Tensor upsample_trilinear3d_backward_symint(@Const @ByRef Tensor grad_output, @ByVal SymIntRef output_size, @ByVal SymIntRef input_size, @Cast("bool") boolean align_corners, @ByVal(nullValue = "c10::optional(c10::nullopt)") DoubleOptional scales_d, @ByVal(nullValue = "c10::optional(c10::nullopt)") DoubleOptional scales_h, @ByVal(nullValue = "c10::optional(c10::nullopt)") DoubleOptional scales_w); -@Namespace("at") public static native @ByVal Tensor upsample_trilinear3d_backward_symint(@Const @ByRef Tensor grad_output, @ByVal SymIntRef output_size, @ByVal SymIntRef input_size, @Cast("bool") boolean align_corners); - +@Namespace("torch::autograd") public static native @Cast("const char*") BytePointer ERR_BACKWARD_TWICE(); public static native void ERR_BACKWARD_TWICE(BytePointer setter); +// Targeting ../SavedVariable.java + // namespace autograd + // namespace torch -// Parsed from ATen/ops/value_selecting_reduction_backward.h +// Parsed from ATen/core/Variadic.h // #pragma once -// @generated by torchgen/gen.py from Function.h - -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include - - +// #include +// #include +// #include +// #include -// #include +// #include +// #include +// This class allows you to write variadic functions which +// call a (possibly overloaded) function on each argument, +// in order. This is most commonly used in autogenerated code, +// where it is convenient to have a function that can uniformly +// take arguments of different types. If your arguments +// are homogenous consider using a std::initializer_list instead. +// +// For examples of this in use, see torch/csrc/utils/variadic.h -// aten::value_selecting_reduction_backward(Tensor grad, int dim, Tensor indices, SymInt[] sizes, bool keepdim) -> Tensor -@Namespace("at") public static native @ByVal Tensor value_selecting_reduction_backward(@Const @ByRef Tensor grad, @Cast("int64_t") long dim, @Const @ByRef Tensor indices, @ByVal @Cast("c10::ArrayRef*") LongArrayRef sizes, @Cast("bool") boolean keepdim); -@Namespace("at") public static native @ByVal Tensor value_selecting_reduction_backward(@Const @ByRef Tensor grad, @Cast("int64_t") long dim, @Const @ByRef Tensor indices, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] sizes, @Cast("bool") boolean keepdim); + // namespace torch -// aten::value_selecting_reduction_backward(Tensor grad, int dim, Tensor indices, SymInt[] sizes, bool keepdim) -> Tensor -@Namespace("at") public static native @ByVal Tensor value_selecting_reduction_backward_symint(@Const @ByRef Tensor grad, @Cast("int64_t") long dim, @Const @ByRef Tensor indices, @ByVal SymIntRef sizes, @Cast("bool") boolean keepdim); +// Parsed from torch/csrc/utils/variadic.h +// #pragma once +// #include +// #include +// #include +// #include +// #include +// #include +// #include +//===----------------------------------------------------------------------===// +// std::index_sequence shim for C++11 +//===----------------------------------------------------------------------===// -// Parsed from ATen/ops/values.h +// A container of type-template parameter indices. -// #pragma once +// Decrements the index N, adds N-1 to the list of indices and forwards +// whatever we already have. -// @generated by torchgen/gen.py from Function.h +// Partial specialization that forms our base case. When N is zero, we stop +// and define a typedef that will be visible to earlier classes due to +// inheritance. The typedef we define is an index list containing the numbers +// 0 through N-1. -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include +//===----------------------------------------------------------------------===// +// Utilities +//===----------------------------------------------------------------------===// +// Targeting ../pack.java + // namespace detail -// #include + // namespace torch +// Parsed from ATen/SequenceNumber.h +// #pragma once +// #include +// #include +// A simple thread local enumeration, used to link forward and backward pass +// ops and is used by autograd and observers framework -// Parsed from ATen/ops/values_copy.h +@Namespace("at::sequence_number") public static native @Cast("uint64_t") long peek(); +@Namespace("at::sequence_number") public static native @Cast("uint64_t") long get_and_increment(); -// #pragma once + // namespace sequence_number + // namespace at -// @generated by torchgen/gen.py from Function.h -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include +// Parsed from torch/csrc/autograd/function.h +// #pragma once +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include -// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include -// aten::values_copy(Tensor self) -> Tensor -@Namespace("at") public static native @ByVal Tensor values_copy(@Const @ByRef Tensor self); +// #if C10_CLANG_HAS_WARNING("-Wshorten-64-to-32") +// #endif -// aten::values_copy.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor values_copy_out(@ByRef Tensor out, @Const @ByRef Tensor self); -// aten::values_copy.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor values_copy_outf(@Const @ByRef Tensor self, @ByRef Tensor out); +// Custom deleter to prevent stack overflows. +@Namespace("torch::autograd") public static native void deleteNode(Node function); +// Guard that sets and restores the evaluating node +// Return the Node currently being evaluated (if any) +// This is only set during the backward pass while a Node is being +// executed. +@Namespace("torch::autograd") public static native @SharedPtr Node get_current_node(); +// Targeting ../Node.java -// Parsed from ATen/ops/vander.h +// Targeting ../TraceableFunction.java -// #pragma once -// @generated by torchgen/gen.py from Function.h -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include +//~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +// Associated Free Nodes +//~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +// Implementation of `collect_next_edges` (see below). + // namespace detail +/** Create an {@code Edge} between the given {@code variable} and the {@code function}, which is + * assumed to be the gradient function of this variable (i.e. the function + * through which this variable is backpropagated during the backward pass). + * This sets the {@code grad_fn} property of the {@code variable}. This function assumes + * that the {@code Variable} is a new input to the gradient function and its + * {@code input_nr} thus equal to {@code function->num_inputs()}. Additionally, it + * increments the {@code Node}'s number of inputs by one. Approximately + * equivalent to {@code variable.set_gradient_edge(function, + * function->add_input_metadata(variable.dispatch_type(), variable.sizes()))}. + * If you don't want the {@code Node}'s {@code num_inputs} to be incremented, use + * {@code set_gradient_edge} directly. */ +@Namespace("torch::autograd") public static native void create_gradient_edge( + @Cast("torch::autograd::Variable*") @ByRef Tensor variable, + @SharedPtr Node function); +/** Return true if any of the variables in the list require a gradient. */ +@Namespace("torch::autograd") public static native @Cast("bool") boolean any_variable_requires_grad(@Cast({"", "std::vector"}) @StdMove TensorVector variables); -// #include +/** Return the next edges of all the given variables, or tuples of variables. */ + // namespace autograd + // namespace torch -// aten::vander(Tensor x, int? N=None, bool increasing=False) -> Tensor -@Namespace("at") public static native @ByVal Tensor vander(@Const @ByRef Tensor x, @ByVal(nullValue = "c10::optional(c10::nullopt)") LongOptional N, @Cast("bool") boolean increasing/*=false*/); -@Namespace("at") public static native @ByVal Tensor vander(@Const @ByRef Tensor x); +// Parsed from torch/csrc/autograd/custom_function.h +// #pragma once +// #include +// #include +// #include +// #include +// #include +// #include -// Parsed from ATen/ops/var.h -// #pragma once -// @generated by torchgen/gen.py from Function.h +@Namespace("torch::autograd") public static native void check_variable_result( + @Const @ByRef TensorBase original, + @Const @ByRef TensorBase result, + @StdString BytePointer hook_name); +@Namespace("torch::autograd") public static native void check_variable_result( + @Const @ByRef TensorBase original, + @Const @ByRef TensorBase result, + @StdString String hook_name); -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include +// Get the return type of the forward function of the custom Function class X +/// +/// +/// +/// +/// +// Targeting ../FunctionCrossMapLRN2d.java -// #include +// Targeting ../AutogradContext.java -// aten::var(Tensor self, bool unbiased=True) -> Tensor -@Namespace("at") public static native @ByVal Tensor var(@Const @ByRef Tensor self, @Cast("bool") boolean unbiased); +// Targeting ../VariableInfo.java -// aten::var.dim(Tensor self, int[1]? dim, bool unbiased=True, bool keepdim=False) -> Tensor -@Namespace("at") public static native @ByVal Tensor var(@Const @ByRef Tensor self, @ByVal LongArrayRefOptional dim, @Cast("bool") boolean unbiased, @Cast("bool") boolean keepdim/*=false*/); -@Namespace("at") public static native @ByVal Tensor var(@Const @ByRef Tensor self, @ByVal LongArrayRefOptional dim, @Cast("bool") boolean unbiased); -@Namespace("at") public static native @ByVal Tensor var(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dim, @Cast("bool") boolean unbiased, @Cast("bool") boolean keepdim/*=false*/); -@Namespace("at") public static native @ByVal Tensor var(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dim, @Cast("bool") boolean unbiased); -// aten::var.correction(Tensor self, int[1]? dim=None, *, int? correction=None, bool keepdim=False) -> Tensor -@Namespace("at") public static native @ByVal Tensor var(@Const @ByRef Tensor self, @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") LongArrayRefOptional dim, @ByVal(nullValue = "c10::optional(c10::nullopt)") LongOptional correction, @Cast("bool") boolean keepdim/*=false*/); -@Namespace("at") public static native @ByVal Tensor var(@Const @ByRef Tensor self); -@Namespace("at") public static native @ByVal Tensor var(@Const @ByRef Tensor self, @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dim, @ByVal(nullValue = "c10::optional(c10::nullopt)") LongOptional correction, @Cast("bool") boolean keepdim/*=false*/); -// aten::var.out(Tensor self, int[1]? dim, bool unbiased=True, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor var_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal LongArrayRefOptional dim, @Cast("bool") boolean unbiased, @Cast("bool") boolean keepdim/*=false*/); -@Namespace("at") public static native @ByRef Tensor var_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal LongArrayRefOptional dim, @Cast("bool") boolean unbiased); -@Namespace("at") public static native @ByRef Tensor var_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dim, @Cast("bool") boolean unbiased, @Cast("bool") boolean keepdim/*=false*/); -@Namespace("at") public static native @ByRef Tensor var_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dim, @Cast("bool") boolean unbiased); -// aten::var.out(Tensor self, int[1]? dim, bool unbiased=True, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor var_outf(@Const @ByRef Tensor self, @ByVal LongArrayRefOptional dim, @Cast("bool") boolean unbiased, @Cast("bool") boolean keepdim, @ByRef Tensor out); -@Namespace("at") public static native @ByRef Tensor var_outf(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dim, @Cast("bool") boolean unbiased, @Cast("bool") boolean keepdim, @ByRef Tensor out); +// CppNode is the Node in the autograd graph that represents the user defined +// backward function for Function. Calls to CppNode::apply are forward to +// T::backward(). -// aten::var.correction_out(Tensor self, int[1]? dim=None, *, int? correction=None, bool keepdim=False, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor var_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") LongArrayRefOptional dim, @ByVal(nullValue = "c10::optional(c10::nullopt)") LongOptional correction, @Cast("bool") boolean keepdim/*=false*/); -@Namespace("at") public static native @ByRef Tensor var_out(@ByRef Tensor out, @Const @ByRef Tensor self); -@Namespace("at") public static native @ByRef Tensor var_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dim, @ByVal(nullValue = "c10::optional(c10::nullopt)") LongOptional correction, @Cast("bool") boolean keepdim/*=false*/); -// aten::var.correction_out(Tensor self, int[1]? dim=None, *, int? correction=None, bool keepdim=False, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor var_outf(@Const @ByRef Tensor self, @ByVal LongArrayRefOptional dim, @ByVal LongOptional correction, @Cast("bool") boolean keepdim, @ByRef Tensor out); -@Namespace("at") public static native @ByRef Tensor var_outf(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dim, @ByVal LongOptional correction, @Cast("bool") boolean keepdim, @ByRef Tensor out); +@Namespace("torch::autograd") public static native @ByVal TensorOptionalVector to_optional(@Cast("torch::autograd::Variable*") @ByRef Tensor output); -// aten::var.names_dim(Tensor self, Dimname[1] dim, bool unbiased=True, bool keepdim=False) -> Tensor -@Namespace("at") public static native @ByVal Tensor var(@Const @ByRef Tensor self, @ByVal DimnameArrayRef dim, @Cast("bool") boolean unbiased, @Cast("bool") boolean keepdim/*=false*/); -@Namespace("at") public static native @ByVal Tensor var(@Const @ByRef Tensor self, @ByVal DimnameArrayRef dim, @Cast("bool") boolean unbiased); +@Namespace("torch::autograd") public static native @ByVal TensorOptionalVector to_optional(@ByRef TensorVector output); -// aten::var.names_out(Tensor self, Dimname[1] dim, bool unbiased=True, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor var_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal DimnameArrayRef dim, @Cast("bool") boolean unbiased, @Cast("bool") boolean keepdim/*=false*/); -@Namespace("at") public static native @ByRef Tensor var_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal DimnameArrayRef dim, @Cast("bool") boolean unbiased); -// aten::var.names_out(Tensor self, Dimname[1] dim, bool unbiased=True, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor var_outf(@Const @ByRef Tensor self, @ByVal DimnameArrayRef dim, @Cast("bool") boolean unbiased, @Cast("bool") boolean keepdim, @ByRef Tensor out); -// aten::var.correction_names(Tensor self, Dimname[1] dim, *, int? correction=None, bool keepdim=False) -> Tensor -@Namespace("at") public static native @ByVal Tensor var(@Const @ByRef Tensor self, @ByVal DimnameArrayRef dim, @ByVal(nullValue = "c10::optional(c10::nullopt)") LongOptional correction, @Cast("bool") boolean keepdim/*=false*/); -@Namespace("at") public static native @ByVal Tensor var(@Const @ByRef Tensor self, @ByVal DimnameArrayRef dim); -// aten::var.correction_names_out(Tensor self, Dimname[1] dim, *, int? correction=None, bool keepdim=False, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor var_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal DimnameArrayRef dim, @ByVal(nullValue = "c10::optional(c10::nullopt)") LongOptional correction, @Cast("bool") boolean keepdim/*=false*/); -@Namespace("at") public static native @ByRef Tensor var_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal DimnameArrayRef dim); -// aten::var.correction_names_out(Tensor self, Dimname[1] dim, *, int? correction=None, bool keepdim=False, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor var_outf(@Const @ByRef Tensor self, @ByVal DimnameArrayRef dim, @ByVal LongOptional correction, @Cast("bool") boolean keepdim, @ByRef Tensor out); +// The logic here is the same as PyNode::apply, so changes to it should be done +// in both the places -// Parsed from ATen/ops/var_mean.h -// #pragma once -// @generated by torchgen/gen.py from Function.h -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include + // namespace autograd + // namespace torch -// #include +// Parsed from torch/autograd.h +// #pragma once -// aten::var_mean(Tensor self, bool unbiased=True) -> (Tensor, Tensor) -@Namespace("at") public static native @ByVal TensorTensorTuple var_mean(@Const @ByRef Tensor self, @Cast("bool") boolean unbiased); +// #include +// #include +// #include -// aten::var_mean.dim(Tensor self, int[1]? dim, bool unbiased=True, bool keepdim=False) -> (Tensor, Tensor) -@Namespace("at") public static native @ByVal TensorTensorTuple var_mean(@Const @ByRef Tensor self, @ByVal LongArrayRefOptional dim, @Cast("bool") boolean unbiased, @Cast("bool") boolean keepdim/*=false*/); -@Namespace("at") public static native @ByVal TensorTensorTuple var_mean(@Const @ByRef Tensor self, @ByVal LongArrayRefOptional dim, @Cast("bool") boolean unbiased); -@Namespace("at") public static native @ByVal TensorTensorTuple var_mean(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dim, @Cast("bool") boolean unbiased, @Cast("bool") boolean keepdim/*=false*/); -@Namespace("at") public static native @ByVal TensorTensorTuple var_mean(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dim, @Cast("bool") boolean unbiased); -// aten::var_mean.correction(Tensor self, int[1]? dim=None, *, int? correction=None, bool keepdim=False) -> (Tensor, Tensor) -@Namespace("at") public static native @ByVal TensorTensorTuple var_mean(@Const @ByRef Tensor self, @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") LongArrayRefOptional dim, @ByVal(nullValue = "c10::optional(c10::nullopt)") LongOptional correction, @Cast("bool") boolean keepdim/*=false*/); -@Namespace("at") public static native @ByVal TensorTensorTuple var_mean(@Const @ByRef Tensor self); -@Namespace("at") public static native @ByVal TensorTensorTuple var_mean(@Const @ByRef Tensor self, @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dim, @ByVal(nullValue = "c10::optional(c10::nullopt)") LongOptional correction, @Cast("bool") boolean keepdim/*=false*/); +// Parsed from torch/cuda.h -// aten::var_mean.names_dim(Tensor self, Dimname[1] dim, bool unbiased=True, bool keepdim=False) -> (Tensor, Tensor) -@Namespace("at") public static native @ByVal TensorTensorTuple var_mean(@Const @ByRef Tensor self, @ByVal DimnameArrayRef dim, @Cast("bool") boolean unbiased, @Cast("bool") boolean keepdim/*=false*/); -@Namespace("at") public static native @ByVal TensorTensorTuple var_mean(@Const @ByRef Tensor self, @ByVal DimnameArrayRef dim, @Cast("bool") boolean unbiased); +// #pragma once -// aten::var_mean.correction_names(Tensor self, Dimname[1] dim, *, int? correction=None, bool keepdim=False) -> (Tensor, Tensor) -@Namespace("at") public static native @ByVal TensorTensorTuple var_mean(@Const @ByRef Tensor self, @ByVal DimnameArrayRef dim, @ByVal(nullValue = "c10::optional(c10::nullopt)") LongOptional correction, @Cast("bool") boolean keepdim/*=false*/); -@Namespace("at") public static native @ByVal TensorTensorTuple var_mean(@Const @ByRef Tensor self, @ByVal DimnameArrayRef dim); +// #include -// aten::var_mean.correction_out(Tensor self, int[1]? dim=None, *, int? correction=None, bool keepdim=False, Tensor(a!) out0, Tensor(b!) out1) -> (Tensor(a!), Tensor(b!)) -@Namespace("at") public static native @ByVal @Cast("std::tuple*") PointerPointer var_mean_out(@ByRef Tensor out0, @ByRef Tensor out1, @Const @ByRef Tensor self, @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") LongArrayRefOptional dim, @ByVal(nullValue = "c10::optional(c10::nullopt)") LongOptional correction, @Cast("bool") boolean keepdim/*=false*/); -@Namespace("at") public static native @ByVal @Cast("std::tuple*") PointerPointer var_mean_out(@ByRef Tensor out0, @ByRef Tensor out1, @Const @ByRef Tensor self); -@Namespace("at") public static native @ByVal @Cast("std::tuple*") PointerPointer var_mean_out(@ByRef Tensor out0, @ByRef Tensor out1, @Const @ByRef Tensor self, @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dim, @ByVal(nullValue = "c10::optional(c10::nullopt)") LongOptional correction, @Cast("bool") boolean keepdim/*=false*/); -// aten::var_mean.correction_out(Tensor self, int[1]? dim=None, *, int? correction=None, bool keepdim=False, Tensor(a!) out0, Tensor(b!) out1) -> (Tensor(a!), Tensor(b!)) -@Namespace("at") public static native @ByVal @Cast("std::tuple*") PointerPointer var_mean_outf(@Const @ByRef Tensor self, @ByVal LongArrayRefOptional dim, @ByVal LongOptional correction, @Cast("bool") boolean keepdim, @ByRef Tensor out0, @ByRef Tensor out1); -@Namespace("at") public static native @ByVal @Cast("std::tuple*") PointerPointer var_mean_outf(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dim, @ByVal LongOptional correction, @Cast("bool") boolean keepdim, @ByRef Tensor out0, @ByRef Tensor out1); +// #include +// #include +/** Returns the number of CUDA devices available. */ +@Namespace("torch::cuda") public static native @Cast("size_t") @Name("device_count") long cuda_device_count(); +/** Returns true if at least one CUDA device is available. */ +@Namespace("torch::cuda") public static native @Cast("bool") @Name("is_available") boolean cuda_is_available(); +/** Returns true if CUDA is available, and CuDNN is available. */ +@Namespace("torch::cuda") public static native @Cast("bool") boolean cudnn_is_available(); -// Parsed from ATen/ops/vdot.h +/** Sets the seed for the current GPU. */ +@Namespace("torch::cuda") public static native @Name("manual_seed") void cuda_manual_seed(@Cast("uint64_t") long seed); -// #pragma once +/** Sets the seed for all available GPUs. */ +@Namespace("torch::cuda") public static native @Name("manual_seed_all") void cuda_manual_seed_all(@Cast("uint64_t") long seed); -// @generated by torchgen/gen.py from Function.h +/** Waits for all kernels in all streams on a CUDA device to complete. */ +@Namespace("torch::cuda") public static native @Name("synchronize") void cuda_synchronize(@Cast("int64_t") long device_index/*=-1*/); +@Namespace("torch::cuda") public static native @Name("synchronize") void cuda_synchronize(); -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include + // namespace cuda + // namespace torch +// Parsed from torch/arg.h -// #include +// #pragma once +// #include -// aten::vdot(Tensor self, Tensor other) -> Tensor -@Namespace("at") public static native @ByVal Tensor vdot(@Const @ByRef Tensor self, @Const @ByRef Tensor other); +// #define TORCH_ARG(T, name) +// public: +// inline auto name(const T& new_##name)->decltype(*this) { /* NOLINT */ +// this->name##_ = new_##name; +// return *this; +// } +// inline auto name(T&& new_##name)->decltype(*this) { /* NOLINT */ +// this->name##_ = std::move(new_##name); +// return *this; +// } +// inline const T& name() const noexcept { /* NOLINT */ +// return this->name##_; +// } +// inline T& name() noexcept { /* NOLINT */ +// return this->name##_; +// } +// +// private: +// T name##_ /* NOLINT */ -// aten::vdot.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor vdot_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor other); -// aten::vdot.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor vdot_outf(@Const @ByRef Tensor self, @Const @ByRef Tensor other, @ByRef Tensor out); +// Parsed from ATen/Device.h +// #pragma once +// #include -// Parsed from ATen/ops/view.h +// Parsed from ATen/Dispatch.h // #pragma once -// @generated by torchgen/gen.py from Function.h +// #include +// #include +// #include +// #include +// #include +// #include +// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include +// #ifdef __CUDACC__ +// #include // For CUDA_VERSION +// #endif +// #ifdef TEMPLATE_SELECTIVE_BUILD +// #include +// #else +/** + * The method should_include_kernel_dtype() returns true/false + * based on whether the switching code for a specific dtype should be + * included based on build time constants generated from tracing model + * execution. This method will be implmeneted via code-generation and + * included in this file when code-gen is ready. + */ +@Namespace("at") public static native @Cast("const bool") boolean should_include_kernel_dtype( + @Cast("const char*") BytePointer arg0, + ScalarType arg1 +); +@Namespace("at") public static native @Cast("const bool") boolean should_include_kernel_dtype( + String arg0, + ScalarType arg1 +); + // namespace at +// #endif +/** + * In the Facebook internal build (using BUCK), this macro is enabled by + * passing in -c pt.enable_record_kernel_dtype=1 when building the tracer + * binary. + */ +// #if defined ENABLE_RECORD_KERNEL_FUNCTION_DTYPE +// #else +// #define RECORD_KERNEL_FUNCTION_DTYPE(NAME, enum_type) +// #endif -// #include +// Avoid if_constexpr if possble, as it's more expensive to compile +// #if defined __cpp_if_constexpr +// #define AT_PRIVATE_CHECK_SELECTIVE_BUILD(enum_type) +// do { +// if constexpr (!at::should_include_kernel_dtype( +// at_dispatch_name, enum_type)) { +// AT_ERROR( +// "dtype '", +// toString(enum_type), +// "' not selected for kernel tag ", +// at_dispatch_name); +// } +// } while (0) +// #else // defined __cpp_if_constexpr +// #define AT_PRIVATE_CHECK_SELECTIVE_BUILD(enum_type) +// at::guts::if_constexpr([&] { +// AT_ERROR( +// "dtype '", +// toString(enum_type), +// "' not selected for kernel tag ", +// at_dispatch_name); +// }) +// #endif +// #define AT_PRIVATE_CASE_TYPE_USING_HINT(enum_type, HINT, ...) +// case enum_type: { +// AT_PRIVATE_CHECK_SELECTIVE_BUILD(enum_type); +// using HINT C10_UNUSED = c10::impl::ScalarTypeToCPPTypeT; +// return __VA_ARGS__(); +// } +// #define AT_DISPATCH_CASE(enum_type, ...) +// AT_PRIVATE_CASE_TYPE_USING_HINT(enum_type, scalar_t, __VA_ARGS__) +// #define AT_DISPATCH_CASE_QINT(enum_type, scalar_type, ...) +// case enum_type: { +// AT_PRIVATE_CHECK_SELECTIVE_BUILD(enum_type); +// using scalar_t = scalar_type; +// using underlying_t C10_UNUSED = typename scalar_t::underlying; +// const auto& SCALAR_TYPE C10_UNUSED = enum_type; +// const auto& UNDERLYING_TYPE C10_UNUSED = toUnderlying(enum_type); +// return __VA_ARGS__(); +// } +// #define AT_QINT_SUB_BYTE_PRIVATE_CASE_TYPE( +// enum_type, scalar_type, bitwidth, qmin, qmax, ...) +// case enum_type: { +// AT_PRIVATE_CHECK_SELECTIVE_BUILD(enum_type); +// using scalar_t = scalar_type; +// using underlying_t C10_UNUSED = typename scalar_t::underlying; +// const auto& SCALAR_TYPE C10_UNUSED = enum_type; +// const auto& UNDERLYING_TYPE C10_UNUSED = toUnderlying(enum_type); +// C10_UNUSED int bit_width = bitwidth; +// C10_UNUSED int64_t quant_min = qmin; +// C10_UNUSED int64_t quant_max = qmax; +// return __VA_ARGS__(); +// } +@Namespace("detail") public static native ScalarType scalar_type(ScalarType s); -// Parsed from ATen/ops/view_as.h -// #pragma once -// @generated by torchgen/gen.py from Function.h -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include + // namespace detail +// The AT_DISPATCH_* family of macros provides the ability to +// conveniently generate specializations of a kernel over all of the +// dtypes we care about in PyTorch. We call it "dispatch" because +// we are "dispatching" to the correct, dtype-specific kernel. +// +// A standard usage looks like: +// +// AT_DISPATCH_ALL_TYPES(self.scalar_type(), "op_name", [&] { +// // Your code here, with 'scalar_t' now defined to +// // be the dtype in question +// }); +// +// There are many variations of this macro, so it's important to +// understand exactly /which/ dtypes you want to get instantiated, as +// well as what the "default" set is. +// +// The default set of dtypes that are instantiated (e.g., by +// AT_DISPATCH_ALL_TYPES) are floating point types (float, double), +// and integral types (int32_t, int64_t, int16_t, int8_t, uint8_t), +// but NOT booleans (bool), half-precision floats (Half) or +// complex number (c10::complex, c10::complex). +// This "cut" is somewhat historical (the default types are the +// ones that TH historically supported), but it also reflects the +// fact that the non-default types are "poorly" behaved (booleans +// are NOT integers mod 2, half precision operations ~essentially +// don't exist on CPU, complex numbers are an experimental application). +// +// Here are the questions you should generally ask to decide which +// dispatch you want: +// +// 1. Is this an integral or floating point specific operation? +// (If so, you'll want one of the FLOATING or INTEGRAL macros.) +// +// 2. Should half be supported? (If you're on CPU, the answer is almost +// definitely no. If you do want support, use one of the AND_HALF +// macros) +// +// Much rarer situations: +// +// 3. Should bool be supported? (You often have to write your kernel +// differently if arithmetic operations are involved.) If so, +// Use AT_DISPATCH_ALL_TYPES_AND along with ScalarType::Bool +// +// 4. Should complex be supported? The answer is almost always no, +// unless you are working on "generic" code that should work on +// all dtypes. +// +// Parameters: +// ----------- +// +// 1. The NAME argument is a "tag" that is used to trace and then +// conditionally compile fragments of the case statements such +// that the kernel functions are specialized only for the dtypes +// that are needed. The NAME parameter *must* be a build time +// const char* (can't be std::string, etc...) +// +// Please ensure that the NAME is unique for every implementation +// or you run the risk of over-including code for the kernel +// functions. There is no risk of missing out on any code, so +// it's mostly a risk of a Type-2 error, and not a Type-1 error. +// +// Switch-like syntax: +// ------------------- +// There is also a switch-case like syntax which is useful if a kernel +// needs to be specialized for particular scalar types +// +// AT_DISPATCH_SWITCH(self.scalar_type(), "op_name", +// AT_DISPATCH_CASE_INTEGRAL_TYPES([&] { +// op_integral(iter); +// }) +// AT_DISPATCH_CASE_FLOATING_TYPES([&] { +// op_floating(iter); +// }) +// AT_DISPATCH_CASE(kBool, [&] { +// op_bool(iter); +// }) +// ); +// +// For each AT_DISPATCH_FOO macro, there is a corresponding +// AT_DISPATCH_CASE_FOO macro which can be used inside of an +// AT_DISPATCH_SWITCH block. +// NB: the the_type variable is not used, but we have kept it for +// backwards compatibility. It's probably not used by anyone though; +// but we're just being safe (and it doesn't hurt.) Note we must +// use it to shut up warnings about unused store. +// #define AT_DISPATCH_SWITCH(TYPE, NAME, ...) +// [&] { +// const auto& the_type = TYPE; +// constexpr const char* at_dispatch_name = NAME; +// /* don't use TYPE again in case it is an expensive or side-effect op */ +// at::ScalarType _st = ::detail::scalar_type(the_type); +// RECORD_KERNEL_FUNCTION_DTYPE(at_dispatch_name, _st); +// switch (_st) { +// __VA_ARGS__ +// default: +// AT_ERROR( +// '"', +// at_dispatch_name, +// "\" not implemented for '", +// toString(_st), +// "'"); +// } +// }() +// #define AT_DISPATCH_CASE_FLOATING_TYPES(...) +// AT_DISPATCH_CASE(at::ScalarType::Double, __VA_ARGS__) +// AT_DISPATCH_CASE(at::ScalarType::Float, __VA_ARGS__) +// #define AT_DISPATCH_FLOATING_TYPES(TYPE, NAME, ...) +// AT_DISPATCH_SWITCH(TYPE, NAME, AT_DISPATCH_CASE_FLOATING_TYPES(__VA_ARGS__)) -// Parsed from ATen/ops/view_as_complex.h +// #define AT_DISPATCH_CASE_FLOATING_TYPES_AND_HALF(...) +// AT_DISPATCH_CASE(at::ScalarType::Double, __VA_ARGS__) +// AT_DISPATCH_CASE(at::ScalarType::Float, __VA_ARGS__) +// AT_DISPATCH_CASE(at::ScalarType::Half, __VA_ARGS__) -// #pragma once +// #define AT_DISPATCH_FLOATING_TYPES_AND_HALF(TYPE, NAME, ...) +// AT_DISPATCH_SWITCH( +// TYPE, NAME, AT_DISPATCH_CASE_FLOATING_TYPES_AND_HALF(__VA_ARGS__)) -// @generated by torchgen/gen.py from Function.h +// #define AT_DISPATCH_CASE_FLOATING_TYPES_AND(SCALARTYPE, ...) +// AT_DISPATCH_CASE_FLOATING_TYPES(__VA_ARGS__) +// AT_DISPATCH_CASE(SCALARTYPE, __VA_ARGS__) -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include +// #define AT_DISPATCH_FLOATING_TYPES_AND(SCALARTYPE, TYPE, NAME, ...) +// AT_DISPATCH_SWITCH( +// TYPE, +// NAME, +// AT_DISPATCH_CASE_FLOATING_TYPES_AND(SCALARTYPE, __VA_ARGS__)) +// #define AT_DISPATCH_CASE_FLOATING_TYPES_AND2(SCALARTYPE1, SCALARTYPE2, ...) +// AT_DISPATCH_CASE_FLOATING_TYPES(__VA_ARGS__) +// AT_DISPATCH_CASE(SCALARTYPE1, __VA_ARGS__) +// AT_DISPATCH_CASE(SCALARTYPE2, __VA_ARGS__) +// #define AT_DISPATCH_FLOATING_TYPES_AND2( +// SCALARTYPE1, SCALARTYPE2, TYPE, NAME, ...) +// AT_DISPATCH_SWITCH( +// TYPE, +// NAME, +// AT_DISPATCH_CASE_FLOATING_TYPES_AND2( +// SCALARTYPE1, SCALARTYPE2, __VA_ARGS__)) -// #include +// #define AT_DISPATCH_CASE_COMPLEX_TYPES(...) +// AT_DISPATCH_CASE(at::ScalarType::ComplexDouble, __VA_ARGS__) +// AT_DISPATCH_CASE(at::ScalarType::ComplexFloat, __VA_ARGS__) +// #define AT_DISPATCH_COMPLEX_TYPES(TYPE, NAME, ...) +// AT_DISPATCH_SWITCH(TYPE, NAME, AT_DISPATCH_CASE_COMPLEX_TYPES(__VA_ARGS__)) -// aten::view_as_complex(Tensor(a) self) -> Tensor(a) -@Namespace("at") public static native @ByVal Tensor view_as_complex(@Const @ByRef Tensor self); +// #define AT_DISPATCH_CASE_COMPLEX_TYPES_AND(SCALARTYPE, ...) +// AT_DISPATCH_CASE_COMPLEX_TYPES(__VA_ARGS__) +// AT_DISPATCH_CASE(SCALARTYPE, __VA_ARGS__) +// #define AT_DISPATCH_COMPLEX_TYPES_AND(SCALARTYPE, TYPE, NAME, ...) +// AT_DISPATCH_SWITCH( +// TYPE, NAME, AT_DISPATCH_CASE_COMPLEX_TYPES_AND(SCALARTYPE, __VA_ARGS__)) +// #define AT_DISPATCH_CASE_FLOATING_AND_COMPLEX_TYPES(...) +// AT_DISPATCH_CASE_FLOATING_TYPES(__VA_ARGS__) +// AT_DISPATCH_CASE_COMPLEX_TYPES(__VA_ARGS__) +// #define AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES(TYPE, NAME, ...) +// AT_DISPATCH_SWITCH( +// TYPE, NAME, AT_DISPATCH_CASE_FLOATING_AND_COMPLEX_TYPES(__VA_ARGS__)) -// Parsed from ATen/ops/view_as_complex_copy.h +// #define AT_DISPATCH_CASE_FLOATING_AND_COMPLEX_TYPES_AND1(SCALARTYPE, ...) +// AT_DISPATCH_CASE_FLOATING_AND_COMPLEX_TYPES(__VA_ARGS__) +// AT_DISPATCH_CASE(SCALARTYPE, __VA_ARGS__) -// #pragma once +// #define AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES_AND1( +// SCALARTYPE, TYPE, NAME, ...) +// AT_DISPATCH_SWITCH( +// TYPE, +// NAME, +// AT_DISPATCH_CASE_FLOATING_AND_COMPLEX_TYPES_AND1( +// SCALARTYPE, __VA_ARGS__)) -// @generated by torchgen/gen.py from Function.h +// #define AT_DISPATCH_CASE_FLOATING_AND_COMPLEX_TYPES_AND2( +// SCALARTYPE1, SCALARTYPE2, ...) +// AT_DISPATCH_CASE_FLOATING_AND_COMPLEX_TYPES(__VA_ARGS__) +// AT_DISPATCH_CASE(SCALARTYPE1, __VA_ARGS__) +// AT_DISPATCH_CASE(SCALARTYPE2, __VA_ARGS__) -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include +// #define AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES_AND2( +// SCALARTYPE1, SCALARTYPE2, TYPE, NAME, ...) +// AT_DISPATCH_SWITCH( +// TYPE, +// NAME, +// AT_DISPATCH_CASE_FLOATING_AND_COMPLEX_TYPES_AND2( +// SCALARTYPE1, SCALARTYPE2, __VA_ARGS__)) +// #define AT_DISPATCH_CASE_FLOATING_AND_COMPLEX_TYPES_AND3( +// SCALARTYPE1, SCALARTYPE2, SCALARTYPE3, ...) +// AT_DISPATCH_CASE_FLOATING_AND_COMPLEX_TYPES(__VA_ARGS__) +// AT_DISPATCH_CASE(SCALARTYPE1, __VA_ARGS__) +// AT_DISPATCH_CASE(SCALARTYPE2, __VA_ARGS__) +// AT_DISPATCH_CASE(SCALARTYPE3, __VA_ARGS__) +// #define AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES_AND3( +// SCALARTYPE1, SCALARTYPE2, SCALARTYPE3, TYPE, NAME, ...) +// AT_DISPATCH_SWITCH( +// TYPE, +// NAME, +// AT_DISPATCH_CASE_FLOATING_AND_COMPLEX_TYPES_AND3( +// SCALARTYPE1, SCALARTYPE2, SCALARTYPE3, __VA_ARGS__)) -// #include +// #define AT_DISPATCH_CASE_INTEGRAL_TYPES(...) +// AT_DISPATCH_CASE(at::ScalarType::Byte, __VA_ARGS__) +// AT_DISPATCH_CASE(at::ScalarType::Char, __VA_ARGS__) +// AT_DISPATCH_CASE(at::ScalarType::Int, __VA_ARGS__) +// AT_DISPATCH_CASE(at::ScalarType::Long, __VA_ARGS__) +// AT_DISPATCH_CASE(at::ScalarType::Short, __VA_ARGS__) +// #define AT_DISPATCH_INTEGRAL_TYPES(TYPE, NAME, ...) +// AT_DISPATCH_SWITCH(TYPE, NAME, AT_DISPATCH_CASE_INTEGRAL_TYPES(__VA_ARGS__)) -// aten::view_as_complex_copy(Tensor self) -> Tensor -@Namespace("at") public static native @ByVal Tensor view_as_complex_copy(@Const @ByRef Tensor self); +// #define AT_DISPATCH_CASE_INTEGRAL_TYPES_AND(SCALARTYPE, ...) +// AT_DISPATCH_CASE_INTEGRAL_TYPES(__VA_ARGS__) +// AT_DISPATCH_CASE(SCALARTYPE, __VA_ARGS__) -// aten::view_as_complex_copy.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor view_as_complex_copy_out(@ByRef Tensor out, @Const @ByRef Tensor self); -// aten::view_as_complex_copy.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor view_as_complex_copy_outf(@Const @ByRef Tensor self, @ByRef Tensor out); +// #define AT_DISPATCH_INTEGRAL_TYPES_AND(SCALARTYPE, TYPE, NAME, ...) +// AT_DISPATCH_SWITCH( +// TYPE, +// NAME, +// AT_DISPATCH_CASE_INTEGRAL_TYPES_AND(SCALARTYPE, __VA_ARGS__)) +// #define AT_DISPATCH_CASE_ALL_TYPES(...) +// AT_DISPATCH_CASE_INTEGRAL_TYPES(__VA_ARGS__) +// AT_DISPATCH_CASE_FLOATING_TYPES(__VA_ARGS__) +// #define AT_DISPATCH_ALL_TYPES(TYPE, NAME, ...) +// AT_DISPATCH_SWITCH(TYPE, NAME, AT_DISPATCH_CASE_ALL_TYPES(__VA_ARGS__)) +// #define AT_DISPATCH_CASE_QINT_TYPES(...) +// AT_DISPATCH_CASE_QINT(at::kQInt8, at::qint8, __VA_ARGS__) +// AT_DISPATCH_CASE_QINT(at::kQUInt8, at::quint8, __VA_ARGS__) +// AT_DISPATCH_CASE_QINT(at::kQInt32, at::qint32, __VA_ARGS__) -// Parsed from ATen/ops/view_as_real.h +// #define AT_DISPATCH_QINT_TYPES(TYPE, NAME, ...) +// AT_DISPATCH_SWITCH(TYPE, NAME, AT_DISPATCH_CASE_QINT_TYPES(__VA_ARGS__)) -// #pragma once +// #define AT_DISPATCH_CASE_QINT_BYTE_TYPES(...) +// AT_DISPATCH_CASE_QINT(at::kQInt8, at::qint8, __VA_ARGS__) +// AT_DISPATCH_CASE_QINT(at::kQUInt8, at::quint8, __VA_ARGS__) -// @generated by torchgen/gen.py from Function.h +// #define AT_DISPATCH_QINT_BYTE_TYPES(TYPE, NAME, ...) +// AT_DISPATCH_SWITCH(TYPE, NAME, AT_DISPATCH_CASE_QINT_BYTE_TYPES(__VA_ARGS__)) -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include +// #define AT_DISPATCH_CASE_QINT_AND_SUB_BYTE_TYPES(...) +// AT_QINT_SUB_BYTE_PRIVATE_CASE_TYPE( +// at::kQInt8, at::qint8, CHAR_BIT, SCHAR_MIN, SCHAR_MAX, __VA_ARGS__) +// AT_QINT_SUB_BYTE_PRIVATE_CASE_TYPE( +// at::kQUInt8, at::quint8, CHAR_BIT, 0, UCHAR_MAX, __VA_ARGS__) +// AT_QINT_SUB_BYTE_PRIVATE_CASE_TYPE( +// at::kQInt32, +// at::qint32, +// CHAR_BIT * sizeof(int), +// INT_MIN, +// INT_MAX, +// __VA_ARGS__) +// AT_QINT_SUB_BYTE_PRIVATE_CASE_TYPE( +// at::kQUInt4x2, at::quint4x2, 4, 0, 15, __VA_ARGS__) +// AT_QINT_SUB_BYTE_PRIVATE_CASE_TYPE( +// at::kQUInt2x4, at::quint2x4, 2, 0, 3, __VA_ARGS__) +// #define AT_DISPATCH_QINT_AND_SUB_BYTE_TYPES(TYPE, NAME, ...) +// AT_DISPATCH_SWITCH( +// TYPE, NAME, AT_DISPATCH_CASE_QINT_AND_SUB_BYTE_TYPES(__VA_ARGS__)) +// #define AT_DISPATCH_CASE_ALL_TYPES_AND_COMPLEX(...) +// AT_DISPATCH_CASE_ALL_TYPES(__VA_ARGS__) +// AT_DISPATCH_CASE_COMPLEX_TYPES(__VA_ARGS__) -// #include +// #define AT_DISPATCH_ALL_TYPES_AND_COMPLEX(TYPE, NAME, ...) +// AT_DISPATCH_SWITCH( +// TYPE, NAME, AT_DISPATCH_CASE_ALL_TYPES_AND_COMPLEX(__VA_ARGS__)) +// #define AT_DISPATCH_CASE_ALL_TYPES_AND(SCALARTYPE, ...) +// AT_DISPATCH_CASE_ALL_TYPES(__VA_ARGS__) +// AT_DISPATCH_CASE(SCALARTYPE, __VA_ARGS__) -// aten::view_as_real(Tensor(a) self) -> Tensor(a) -@Namespace("at") public static native @ByVal Tensor view_as_real(@Const @ByRef Tensor self); +// #define AT_DISPATCH_ALL_TYPES_AND(SCALARTYPE, TYPE, NAME, ...) +// AT_DISPATCH_SWITCH( +// TYPE, NAME, AT_DISPATCH_CASE_ALL_TYPES_AND(SCALARTYPE, __VA_ARGS__)) +// #define AT_DISPATCH_CASE_ALL_TYPES_AND_COMPLEX_AND(SCALARTYPE, ...) +// AT_DISPATCH_CASE_ALL_TYPES_AND_COMPLEX(__VA_ARGS__) +// AT_DISPATCH_CASE(SCALARTYPE, __VA_ARGS__) +// #define AT_DISPATCH_ALL_TYPES_AND_COMPLEX_AND(SCALARTYPE, TYPE, NAME, ...) +// AT_DISPATCH_SWITCH( +// TYPE, +// NAME, +// AT_DISPATCH_CASE_ALL_TYPES_AND_COMPLEX_AND(SCALARTYPE, __VA_ARGS__)) +// #define AT_DISPATCH_CASE_ALL_TYPES_AND2(SCALARTYPE1, SCALARTYPE2, ...) +// AT_DISPATCH_CASE_ALL_TYPES(__VA_ARGS__) +// AT_DISPATCH_CASE(SCALARTYPE1, __VA_ARGS__) +// AT_DISPATCH_CASE(SCALARTYPE2, __VA_ARGS__) -// Parsed from ATen/ops/view_as_real_copy.h +// #define AT_DISPATCH_ALL_TYPES_AND2(SCALARTYPE1, SCALARTYPE2, TYPE, NAME, ...) +// AT_DISPATCH_SWITCH( +// TYPE, +// NAME, +// AT_DISPATCH_CASE_ALL_TYPES_AND2(SCALARTYPE1, SCALARTYPE2, __VA_ARGS__)) -// #pragma once +// #define AT_DISPATCH_CASE_ALL_TYPES_AND_COMPLEX_AND2( +// SCALARTYPE1, SCALARTYPE2, ...) +// AT_DISPATCH_CASE_ALL_TYPES_AND_COMPLEX(__VA_ARGS__) +// AT_DISPATCH_CASE(SCALARTYPE1, __VA_ARGS__) +// AT_DISPATCH_CASE(SCALARTYPE2, __VA_ARGS__) -// @generated by torchgen/gen.py from Function.h +// #define AT_DISPATCH_ALL_TYPES_AND_COMPLEX_AND2( +// SCALARTYPE1, SCALARTYPE2, TYPE, NAME, ...) +// AT_DISPATCH_SWITCH( +// TYPE, +// NAME, +// AT_DISPATCH_CASE_ALL_TYPES_AND_COMPLEX_AND2( +// SCALARTYPE1, SCALARTYPE2, __VA_ARGS__)) -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include +// #define AT_DISPATCH_CASE_ALL_TYPES_AND3( +// SCALARTYPE1, SCALARTYPE2, SCALARTYPE3, ...) +// AT_DISPATCH_CASE_ALL_TYPES(__VA_ARGS__) +// AT_DISPATCH_CASE(SCALARTYPE1, __VA_ARGS__) +// AT_DISPATCH_CASE(SCALARTYPE2, __VA_ARGS__) +// AT_DISPATCH_CASE(SCALARTYPE3, __VA_ARGS__) +// #define AT_DISPATCH_ALL_TYPES_AND3( +// SCALARTYPE1, SCALARTYPE2, SCALARTYPE3, TYPE, NAME, ...) +// AT_DISPATCH_SWITCH( +// TYPE, +// NAME, +// AT_DISPATCH_CASE_ALL_TYPES_AND3( +// SCALARTYPE1, SCALARTYPE2, SCALARTYPE3, __VA_ARGS__)) +// #define AT_DISPATCH_CASE_ALL_TYPES_AND_COMPLEX_AND3( +// SCALARTYPE1, SCALARTYPE2, SCALARTYPE3, ...) +// AT_DISPATCH_CASE_ALL_TYPES_AND_COMPLEX(__VA_ARGS__) +// AT_DISPATCH_CASE(SCALARTYPE1, __VA_ARGS__) +// AT_DISPATCH_CASE(SCALARTYPE2, __VA_ARGS__) +// AT_DISPATCH_CASE(SCALARTYPE3, __VA_ARGS__) -// #include +// #define AT_DISPATCH_ALL_TYPES_AND_COMPLEX_AND3( +// SCALARTYPE1, SCALARTYPE2, SCALARTYPE3, TYPE, NAME, ...) +// AT_DISPATCH_SWITCH( +// TYPE, +// NAME, +// AT_DISPATCH_CASE_ALL_TYPES_AND_COMPLEX_AND3( +// SCALARTYPE1, SCALARTYPE2, SCALARTYPE3, __VA_ARGS__)) +// #define AT_DISPATCH_CASE_ALL_TYPES_AND_COMPLEX_AND4( +// SCALARTYPE1, SCALARTYPE2, SCALARTYPE3, SCALARTYPE4, ...) +// AT_DISPATCH_CASE_ALL_TYPES_AND_COMPLEX(__VA_ARGS__) +// AT_DISPATCH_CASE(SCALARTYPE1, __VA_ARGS__) +// AT_DISPATCH_CASE(SCALARTYPE2, __VA_ARGS__) +// AT_DISPATCH_CASE(SCALARTYPE3, __VA_ARGS__) +// AT_DISPATCH_CASE(SCALARTYPE4, __VA_ARGS__) -// aten::view_as_real_copy(Tensor self) -> Tensor -@Namespace("at") public static native @ByVal Tensor view_as_real_copy(@Const @ByRef Tensor self); +// #define AT_DISPATCH_ALL_TYPES_AND_COMPLEX_AND4( +// SCALARTYPE1, SCALARTYPE2, SCALARTYPE3, SCALARTYPE4, TYPE, NAME, ...) +// AT_DISPATCH_SWITCH( +// TYPE, +// NAME, +// AT_DISPATCH_CASE_ALL_TYPES_AND_COMPLEX_AND4( +// SCALARTYPE1, SCALARTYPE2, SCALARTYPE3, SCALARTYPE4, __VA_ARGS__)) -// aten::view_as_real_copy.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor view_as_real_copy_out(@ByRef Tensor out, @Const @ByRef Tensor self); -// aten::view_as_real_copy.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor view_as_real_copy_outf(@Const @ByRef Tensor self, @ByRef Tensor out); +// #define AT_DISPATCH_INDEX_TYPES(TYPE, NAME, ...) +// AT_DISPATCH_SWITCH( +// TYPE, +// NAME, +// AT_PRIVATE_CASE_TYPE_USING_HINT( +// at::ScalarType::Int, index_t, __VA_ARGS__) +// AT_PRIVATE_CASE_TYPE_USING_HINT( +// at::ScalarType::Long, index_t, __VA_ARGS__)) +// ---------------------------------------------------------------------------- +// DEPRECATED MACROS, DON'T USE THESE +// ---------------------------------------------------------------------------- +// #define AT_DISPATCH_ALL_TYPES_AND_HALF(TYPE, NAME, ...) +// detail::deprecated_AT_DISPATCH_ALL_TYPES_AND_HALF(); +// AT_DISPATCH_SWITCH( +// TYPE, +// NAME, +// AT_DISPATCH_CASE_ALL_TYPES_AND(at::ScalarType::Half, __VA_ARGS__)) -// Parsed from ATen/ops/view_copy.h +// Parsed from ATen/ScalarOps.h // #pragma once -// @generated by torchgen/gen.py from Function.h - -// #include -// #include -// #include -// #include -// #include -// #include -// #include +// #include // #include -// #include -// #include -// #include -// #include - - - -// #include - - -// aten::view_copy(Tensor self, SymInt[] size) -> Tensor -@Namespace("at") public static native @ByVal Tensor view_copy(@Const @ByRef Tensor self, @ByVal @Cast("c10::ArrayRef*") LongArrayRef size); -@Namespace("at") public static native @ByVal Tensor view_copy(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... size); - - -// aten::view_copy(Tensor self, SymInt[] size) -> Tensor -@Namespace("at") public static native @ByVal Tensor view_copy_symint(@Const @ByRef Tensor self, @ByVal SymIntRef size); - -// aten::view_copy.dtype(Tensor self, ScalarType dtype) -> Tensor -@Namespace("at") public static native @ByVal Tensor view_copy(@Const @ByRef Tensor self, ScalarType dtype); +// #ifndef AT_PER_OPERATOR_HEADERS +// #include +// #else +// #include +// #endif +// When filling a number to 1-element CPU tensor, we want to skip +// everything but manipulate data ptr directly. +// Ideally this fast pass should be implemented in TensorIterator, +// but we also want to skip compute_types which in not avoidable +// in TensorIterator for now. -// aten::view_copy.out(Tensor self, SymInt[] size, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor view_copy_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal @Cast("c10::ArrayRef*") LongArrayRef size); -@Namespace("at") public static native @ByRef Tensor view_copy_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... size); +@Namespace("at::detail") public static native @ByVal Tensor scalar_tensor_static( + @Const @ByRef Scalar s, + @ByVal ScalarTypeOptional dtype_opt, + @ByVal DeviceOptional device_opt); + // namespace detail + // namespace at +// This is in the c10 namespace because we use ADL to find the functions in it. -// aten::view_copy.out(Tensor self, SymInt[] size, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor view_copy_outf(@Const @ByRef Tensor self, @ByVal @Cast("c10::ArrayRef*") LongArrayRef size, @ByRef Tensor out); -@Namespace("at") public static native @ByRef Tensor view_copy_outf(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @ByRef Tensor out); +// FIXME: this should be (and was) Scalar::toTensor, but there is currently no +// way to implement this without going through Derived Types (which are not part +// of core). +@Namespace("c10") public static native @ByVal Tensor scalar_to_tensor( + @Const @ByRef Scalar s, + @Const @ByVal(nullValue = "c10::Device(at::kCPU)") Device device); +@Namespace("c10") public static native @ByVal Tensor scalar_to_tensor( + @Const @ByRef Scalar s); + // namespace c10 -// aten::view_copy.out(Tensor self, SymInt[] size, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor view_copy_symint_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal SymIntRef size); +@Namespace("at::native") public static native @ByVal Tensor wrapped_scalar_tensor( + @Const @ByRef Scalar scalar, + @Const @ByVal(nullValue = "at::Device(at::kCPU)") Device device); +@Namespace("at::native") public static native @ByVal Tensor wrapped_scalar_tensor( + @Const @ByRef Scalar scalar); + // namespace native + // namespace at -// aten::view_copy.out(Tensor self, SymInt[] size, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor view_copy_symint_outf(@Const @ByRef Tensor self, @ByVal SymIntRef size, @ByRef Tensor out); +// Parsed from c10/util/strides.h -// aten::view_copy.dtype_out(Tensor self, ScalarType dtype, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor view_copy_out(@ByRef Tensor out, @Const @ByRef Tensor self, ScalarType dtype); -// aten::view_copy.dtype_out(Tensor self, ScalarType dtype, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor view_copy_outf(@Const @ByRef Tensor self, ScalarType dtype, @ByRef Tensor out); +// #pragma once +// #include +// #include +// Computes the contiguous strides of a tensor, given its sizes. +@Namespace("c10") public static native @ByVal @Cast("c10::DimVector*") SymDimVector contiguous_strides(@Const @ByVal LongArrayRef sizes); +@Namespace("c10") public static native @ByVal @Cast("c10::DimVector*") SymDimVector contiguous_strides(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... sizes); + // namespace c10 -// Parsed from ATen/ops/vsplit.h +// Parsed from ATen/TensorMeta.h // #pragma once -// @generated by torchgen/gen.py from Function.h - -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include +// #include +// #include // #include -// #include -// #include - +// #include +// #if C10_CLANG_HAS_WARNING("-Wdeprecated-copy-dtor") +// #endif -// #include +// Use this to define the prototype for a meta function. There are two +// versions; one that takes one argument (just the operator name), or FUNC2 +// variant that takes two arguments (operator name and overload name). +// +// Example usage: +// +// TORCH_META_FUNC2(add, Tensor) ( +// const Tensor& self, const Tensor& other +// ) { +// ... compute sizes and options ... +// set_output(sizes, options); +// } +// +// #define TORCH_META_FUNC(name) void structured_##name::meta +// #define TORCH_META_FUNC2(name, overload) +// void structured_##name##_##overload::meta + +// These are versions of TORCH_META_FUNC(2) that include a precompute_out struct +// as a return value. They should be used when the kernel in question has +// precomputed values declared in native_functions.yaml and the corresponding +// implementation should return an instance of the aforementioned struct. +// #define TORCH_PRECOMPUTE_META_FUNC(name) +// structured_##name::meta_return_ty structured_##name::meta +// #define TORCH_PRECOMPUTE_META_FUNC2(name, overload) +// structured_##name##_##overload::meta_return_ty +// structured_##name##_##overload::meta + +// Use this to create a precompute struct in a meta function. +// #define TORCH_PRECOMPUTE_STRUCT(name) structured_##name::precompute_out<> +// #define TORCH_PRECOMPUTE_STRUCT2(name, overload) +// structured_##name##_##overload::precompute_out<> + +// Use this to define the prototype for an implementation. This takes only +// one argument, which is the name of the dispatch key entry you're +// implementing. +// +// Example usage: +// +// TORCH_IMPL_FUNC(add_cpu) ( +// Tensor& result, const Tensor& self, const Tensor& other +// ) { +// ... do the actual implementation ... +// } +// +// #define TORCH_IMPL_FUNC(name) void structured_##name::impl +// Targeting ../MetaBase.java -// aten::vsplit.int(Tensor(a -> *) self, int sections) -> Tensor(a)[] -@Namespace("at") public static native @Cast({"", "std::vector"}) @StdMove TensorVector vsplit(@Const @ByRef Tensor self, @Cast("int64_t") long sections); -// aten::vsplit.array(Tensor(a -> *) self, int[] indices) -> Tensor(a)[] -@Namespace("at") public static native @Cast({"", "std::vector"}) @StdMove TensorVector vsplit(@Const @ByRef Tensor self, @ByVal @Cast("c10::ArrayRef*") LongArrayRef indices); -@Namespace("at") public static native @Cast({"", "std::vector"}) @StdMove TensorVector vsplit(@Const @ByRef Tensor self, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... indices); + // namespace impl + // namespace at -// Parsed from ATen/ops/vstack.h +// Parsed from ATen/core/Range.h // #pragma once -// @generated by torchgen/gen.py from Function.h - -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include +// #include +// #include -// #include + // namespace at -// aten::vstack(Tensor[] tensors) -> Tensor -@Namespace("at") public static native @ByVal Tensor vstack(@ByVal TensorArrayRef tensors); +// Parsed from c10/util/Load.h -// aten::vstack.out(Tensor[] tensors, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor vstack_out(@ByRef Tensor out, @ByVal TensorArrayRef tensors); -// aten::vstack.out(Tensor[] tensors, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor vstack_outf(@ByVal TensorArrayRef tensors, @ByRef Tensor out); +// #pragma once +// #include +// #include + // namespace detail + // namespace c10 -// Parsed from ATen/ops/where.h +// Parsed from c10/core/DynamicCast.h // #pragma once -// @generated by torchgen/gen.py from Function.h - -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include - +// #include +// #include +// #include +// #include +// Dynamic type casting utils: +// - fetch_and_cast +// - cast_and_store +// +// fetch_and_cast fetch a value with dynamic type specified by a ScalarType +// from a void pointer and cast it to a static type. +// +// cast_and_store casts a static typed value into dynamic type specified +// by a ScalarType, and store it into a void pointer. +// +// NOTE: +// +// Dynamic casting allows us to support type promotion without blowing up +// the combination space: For example, without dynamic cast, in order to +// implement `add_` with type promotion, we would need something like +// +// AT_DISPATCH_ALL_TYPES(output.dtype(), +// AT_DISPATCH_ALL_TYPES(input1.dtype(), +// AT_DISPATCH_ALL_TYPES(input2.dtype(), +// [](arg0_t a, arg1_t b) -> out_t { return a + b; } +// ) +// ) +// ) +// +// If we support N dtypes, the above code would generate the a+b kernel for +// all the N * N * N different supported types, the compilation time and +// binary size would become horrible. +// +// Dynamic casting might sounds like a bad idea in terms of performance. +// Especially if you ever do it in a loop, you are going to do a billion tests. +// But in practice it is not as bad as it might look: +// +// - on CPU, this is a branch that always has the same outcome, therefore +// hopefully the branch predictor could do the job pretty well +// - on GPU, these branches will not diverge, so we could still have the same +// warp executing the same line of code +// - Most kernels, like `add`, are bandwidth bound, adding a few clock cycles to +// check an integer does not hurt the performance much because the ALUs would +// wait for load instructions anyway. +// +// For the discussion and benchmark, refer to: +// - https://github.com/pytorch/pytorch/pull/28343 +// - https://github.com/pytorch/pytorch/pull/28344 +// - https://github.com/pytorch/pytorch/pull/28345 +// -// #include +// #ifdef C10_HOST_DEVICE +// #else +// #define ERROR_UNSUPPORTED_CAST TORCH_CHECK(false, "Unexpected scalar type"); +// #endif +// Fetch a value with dynamic type src_type from ptr, and cast it to static type +// dest_t. +// #define FETCH_AND_CAST_CASE(type, scalartype) +// case ScalarType::scalartype: +// return c10::convert(c10::load(ptr)); + +@Namespace("c10") public static native @ByVal @Name("fetch_and_cast") qint8 fetch_and_cast_to_qint8( + ScalarType src_type, + @Const Pointer ptr); + +@Namespace("c10") public static native @ByVal @Name("fetch_and_cast") quint8 fetch_and_cast_to_quint8( + ScalarType src_type, + @Const Pointer ptr); + +@Namespace("c10") public static native @ByVal @Name("fetch_and_cast") qint32 fetch_and_cast_to_quint32( + ScalarType src_type, + @Const Pointer ptr); + +@Namespace("c10") public static native @ByVal @Name("fetch_and_cast") quint4x2 fetch_and_cast_to_quint4x2( + ScalarType src_type, + @Const Pointer ptr); + +@Namespace("c10") public static native @ByVal @Name("fetch_and_cast") quint2x4 fetch_and_cast_to_quint2x4( + ScalarType src_type, + @Const Pointer ptr); + +@Namespace("c10") public static native @Name("fetch_and_cast") byte fetch_and_cast_to_byte( + ScalarType src_type, + @Const Pointer ptr); + +@Namespace("c10") public static native @Name("fetch_and_cast") short fetch_and_cast_to_short( + ScalarType src_type, + @Const Pointer ptr); + +@Namespace("c10") public static native @Name("fetch_and_cast") int fetch_and_cast_to_int( + ScalarType src_type, + @Const Pointer ptr); + +@Namespace("c10") public static native @Cast("int64_t") @Name("fetch_and_cast") long fetch_and_cast_to_long( + ScalarType src_type, + @Const Pointer ptr); + +@Namespace("c10") public static native @ByVal @Name("fetch_and_cast") Half fetch_and_cast_to_Half( + ScalarType src_type, + @Const Pointer ptr); + +@Namespace("c10") public static native @Name("fetch_and_cast") float fetch_and_cast_to_float( + ScalarType src_type, + @Const Pointer ptr); + +@Namespace("c10") public static native @Name("fetch_and_cast") double fetch_and_cast_to_double( + ScalarType src_type, + @Const Pointer ptr); + +@Namespace("c10") public static native @Cast("bool") @Name("fetch_and_cast") boolean fetch_and_cast_to_boolean( + ScalarType src_type, + @Const Pointer ptr); + +@Namespace("c10") public static native @ByVal @Name("fetch_and_cast") BFloat16 fetch_and_cast_to_BFload16( + ScalarType src_type, + @Const Pointer ptr); + +// Cast a value with static type src_t into dynamic dest_type, and store it to +// ptr. +// #define CAST_AND_STORE_CASE(type, scalartype) +// case ScalarType::scalartype: +// *(type*)ptr = c10::convert(value); +// return; +@Namespace("c10") public static native @Name("cast_and_store") void cast_and_store_from_qint8( + ScalarType dest_type, + Pointer ptr, + @ByVal qint8 value); +@Namespace("c10") public static native @Name("cast_and_store") void cast_and_store_from_quint8( + ScalarType dest_type, + Pointer ptr, + @ByVal quint8 value); +@Namespace("c10") public static native @Name("cast_and_store") void cast_and_store_from_quint32( + ScalarType dest_type, + Pointer ptr, + @ByVal qint32 value); +@Namespace("c10") public static native @Name("cast_and_store") void cast_and_store_from_quint4x2( + ScalarType dest_type, + Pointer ptr, + @ByVal quint4x2 value); +@Namespace("c10") public static native @Name("cast_and_store") void cast_and_store_from_quint2x4( + ScalarType dest_type, + Pointer ptr, + @ByVal quint2x4 value); +@Namespace("c10") public static native @Name("cast_and_store") void cast_and_store_from_byte( + ScalarType dest_type, + Pointer ptr, + byte value); +@Namespace("c10") public static native @Name("cast_and_store") void cast_and_store_from_short( + ScalarType dest_type, + Pointer ptr, + short value); +@Namespace("c10") public static native @Name("cast_and_store") void cast_and_store_from_int( + ScalarType dest_type, + Pointer ptr, + int value); +@Namespace("c10") public static native @Name("cast_and_store") void cast_and_store_from_long( + ScalarType dest_type, + Pointer ptr, + @Cast("int64_t") long value); +@Namespace("c10") public static native @Name("cast_and_store") void cast_and_store_from_Half( + ScalarType dest_type, + Pointer ptr, + @ByVal Half value); +@Namespace("c10") public static native @Name("cast_and_store") void cast_and_store_from_float( + ScalarType dest_type, + Pointer ptr, + float value); +@Namespace("c10") public static native @Name("cast_and_store") void cast_and_store_from_double( + ScalarType dest_type, + Pointer ptr, + double value); +@Namespace("c10") public static native @Name("cast_and_store") void cast_and_store_from_qint8( + ScalarType dest_type, + Pointer ptr, + @ByVal FloatComplex value); +@Namespace("c10") public static native @Name("cast_and_store") void cast_and_store_from_qint8( + ScalarType dest_type, + Pointer ptr, + @ByVal DoubleComplex value); +@Namespace("c10") public static native @Name("cast_and_store") void cast_and_store_from_boolean( + ScalarType dest_type, + Pointer ptr, + @Cast("bool") boolean value); +@Namespace("c10") public static native @Name("cast_and_store") void cast_and_store_from_BFload16( + ScalarType dest_type, + Pointer ptr, + @ByVal BFloat16 value); -// aten::where.self(Tensor condition, Tensor self, Tensor other) -> Tensor -@Namespace("at") public static native @ByVal Tensor where(@Const @ByRef Tensor condition, @Const @ByRef Tensor self, @Const @ByRef Tensor other); +// #define DEFINE_UNCASTABLE(T, scalartype_) +// template <> +// C10_HOST_DEVICE inline T fetch_and_cast( +// const ScalarType src_type, const void* ptr) { +// CUDA_KERNEL_ASSERT(ScalarType::scalartype_ == src_type); +// return c10::load(ptr); +// } +// template <> +// C10_HOST_DEVICE inline void cast_and_store( +// const ScalarType dest_type, void* ptr, T value) { +// CUDA_KERNEL_ASSERT(ScalarType::scalartype_ == dest_type); +// *(T*)ptr = value; +// } -// aten::where.self_out(Tensor condition, Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor where_out(@ByRef Tensor out, @Const @ByRef Tensor condition, @Const @ByRef Tensor self, @Const @ByRef Tensor other); -// aten::where.self_out(Tensor condition, Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor where_outf(@Const @ByRef Tensor condition, @Const @ByRef Tensor self, @Const @ByRef Tensor other, @ByRef Tensor out); +// #undef FETCH_AND_CAST_CASE +// #undef CAST_AND_STORE_CASE +// #undef DEFINE_UNCASTABLE +// #undef ERROR_UNSUPPORTED_CAST -// aten::where.ScalarSelf(Tensor condition, Scalar self, Tensor other) -> Tensor -@Namespace("at") public static native @ByVal Tensor where(@Const @ByRef Tensor condition, @Const @ByRef Scalar self, @Const @ByRef Tensor other); + // namespace c10 -// aten::where.ScalarOther(Tensor condition, Tensor self, Scalar other) -> Tensor -@Namespace("at") public static native @ByVal Tensor where(@Const @ByRef Tensor condition, @Const @ByRef Tensor self, @Const @ByRef Scalar other); -// aten::where.Scalar(Tensor condition, Scalar self, Scalar other) -> Tensor -@Namespace("at") public static native @ByVal Tensor where(@Const @ByRef Tensor condition, @Const @ByRef Scalar self, @Const @ByRef Scalar other); +// Parsed from ATen/TensorIterator.h -// aten::where(Tensor condition) -> Tensor[] -@Namespace("at") public static native @Cast({"", "std::vector"}) @StdMove TensorVector where(@Const @ByRef Tensor condition); +// #pragma once +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #if C10_CLANG_HAS_WARNING("-Wshorten-64-to-32") +// #endif +// #if C10_CLANG_HAS_WARNING("-Wdeprecated-copy-dtor") +// #endif + // namespace at -// Parsed from ATen/ops/xlogy.h +// TensorIterator is a helper class for element-wise operations, such as +// arithmetic, comparisons, and trigonometric functions. It handles +// broadcasting and type conversions of operands. +// +// This is inspired by NumPy's Array Iterator API (NpyIter). +// +// The files Loops.h and Loops.cuh provide functions to build kernels that +// use TensorIterator. +// +// Example: +// +// auto iter = TensorIteratorConfig() +// .add_output(output) +// .add_input(input) +// .build() +// +// [MyKernel.cpp / MyKernel.cu] +// cpu_kernel(iter, [](float a, float b) { +// return a + b; +// }); +// +// gpu_kernel(iter, []GPU_LAMBDA(float a, float b) -> float { +// return a + b; +// }); +// +// Note [Order of Construction] +// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +// When setting up the tensor iterator configuration, the output Tensors +// have to be added first via +// TensorIteratorConfig::add_owned_output(at::Tensor). After adding all outputs, +// the inputs can be added via +// TensorIteratorConfig::add_owned_input(at::Tensor). +// Adding another output after inputs have been added will rise an exception. +// +// Note [Common Dtype Computation] +// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +// Some operations have a natural notion of a "common dtype" or +// "computation dtype" where all inputs are cast to one dtype, the +// operation is performed, and then the results are cast to all outputs. +// +// TensorIterator infers a common dtype if all inputs have the same dtype, +// and it computes one using type promotion rules on its inputs if +// promote_inputs_to_common_dtype_ is true. Attempting to query +// a common dtype otherwise will throw an exception. +// +// Note that the outputs are not considered when computing a common dtype. +// This parameter is heuristically chosen to determine the minimum number of +// work that warrants parallelism. For example, when summing an array, it is +// deemed inefficient to parallelise over arrays shorter than 32768. Further, +// no parallel algorithm (such as parallel_reduce) should split work into +// smaller than GRAIN_SIZE chunks. +@Namespace("at::internal") @MemberGetter public static native @Cast("const int64_t") long GRAIN_SIZE(); +// Targeting ../OpaqueOptionalTensorRef.java -// #pragma once -// @generated by torchgen/gen.py from Function.h -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include +// Targeting ../OperandInfo.java -// #include +@Namespace("at") public enum FastSetupType { + NONE((byte)(0)), + CONTIGUOUS((byte)(1)), + CHANNELS_LAST((byte)(2)), + NON_OVERLAPPING_DENSE((byte)(3)); + public final byte value; + private FastSetupType(byte v) { this.value = v; } + private FastSetupType(FastSetupType e) { this.value = e.value; } + public FastSetupType intern() { for (FastSetupType e : values()) if (e.value == value) return e; return this; } + @Override public String toString() { return intern().name(); } +} +// Targeting ../TensorIteratorBase.java -// aten::xlogy.Tensor(Tensor self, Tensor other) -> Tensor -@Namespace("at") public static native @ByVal Tensor xlogy(@Const @ByRef Tensor self, @Const @ByRef Tensor other); -// aten::xlogy.Scalar_Self(Scalar self, Tensor other) -> Tensor -@Namespace("at") public static native @ByVal Tensor xlogy(@Const @ByRef Scalar self, @Const @ByRef Tensor other); +// Targeting ../TensorIterator.java -// aten::xlogy.Scalar_Other(Tensor self, Scalar other) -> Tensor -@Namespace("at") public static native @ByVal Tensor xlogy(@Const @ByRef Tensor self, @Const @ByRef Scalar other); -// aten::xlogy_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor xlogy_(@ByRef Tensor self, @Const @ByRef Tensor other); +// Targeting ../TensorIteratorConfig.java -// aten::xlogy_.Scalar_Other(Tensor(a!) self, Scalar other) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor xlogy_(@ByRef Tensor self, @Const @ByRef Scalar other); -// aten::xlogy.OutTensor(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor xlogy_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Tensor other); -// aten::xlogy.OutTensor(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor xlogy_outf(@Const @ByRef Tensor self, @Const @ByRef Tensor other, @ByRef Tensor out); +// Targeting ../SplitUntil32Bit.java -// aten::xlogy.OutScalar_Self(Scalar self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor xlogy_out(@ByRef Tensor out, @Const @ByRef Scalar self, @Const @ByRef Tensor other); -// aten::xlogy.OutScalar_Self(Scalar self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor xlogy_outf(@Const @ByRef Scalar self, @Const @ByRef Tensor other, @ByRef Tensor out); -// aten::xlogy.OutScalar_Other(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor xlogy_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Const @ByRef Scalar other); -// aten::xlogy.OutScalar_Other(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor xlogy_outf(@Const @ByRef Tensor self, @Const @ByRef Scalar other, @ByRef Tensor out); + // namespace at -// Parsed from ATen/ops/xor.h +// Parsed from ATen/NativeFunctions.h // #pragma once -// @generated by torchgen/gen.py from Function.h +// @generated by torchgen/gen.py from NativeFunctions.h + +// #ifdef TORCH_ASSERT_NO_OPERATORS +// #error This change adds a dependency on native_functions.yaml, +// meaning the file will need to be re-compiled every time an operator +// is changed or added. Consider if your change would be better placed in +// another file, or if a more specific header might achieve the same goal. +// See NOTE: [Tensor vs. TensorBase] +// #endif + +// #if defined(AT_PER_OPERATOR_HEADERS) && defined(TORCH_ASSERT_ONLY_METHOD_OPERATORS) +// #error This change adds a dependency on all pytorch operators, meaning the +// file will need to be re-compiled every time an operator is changed or added. +// Consider including a specific operator from +// and see NOTE [TORCH_ASSERT_ONLY_METHOD_OPERATORS]. +// #endif -// #include -// #include -// #include -// #include -// #include -// #include -// #include // #include // #include // #include // #include // #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include -// #include - -// aten::__xor__.Scalar(Tensor self, Scalar other) -> Tensor -@Namespace("at") public static native @ByVal Tensor __xor__(@Const @ByRef Tensor self, @Const @ByRef Scalar other); -// aten::__xor__.Tensor(Tensor self, Tensor other) -> Tensor -@Namespace("at") public static native @ByVal Tensor __xor__(@Const @ByRef Tensor self, @Const @ByRef Tensor other); +// Parsed from ATen/TensorIndexing.h +// #pragma once +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #ifndef AT_PER_OPERATOR_HEADERS +// #include +// #include +// #else +// #include +// #include +// #include +// #include +// #endif -// Parsed from ATen/ops/zero.h +// #include -// #pragma once +// #include -// @generated by torchgen/gen.py from Function.h +@Namespace("at::indexing") @MemberGetter public static native @Cast("const int64_t") long INDEX_MIN(); +@Namespace("at::indexing") @MemberGetter public static native @Cast("const int64_t") long INDEX_MAX(); -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include +@Namespace("at::indexing") public enum TensorIndexType { None(0), Ellipsis(1), Integer(2), Boolean(3), Slice(4), Tensor(5); + public final int value; + private TensorIndexType(int v) { this.value = v; } + private TensorIndexType(TensorIndexType e) { this.value = e.value; } + public TensorIndexType intern() { for (TensorIndexType e : values()) if (e.value == value) return e; return this; } + @Override public String toString() { return intern().name(); } +} +@Namespace("at::indexing") @MemberGetter public static native @ByRef @Cast("const c10::nullopt_t*") Pointer None(); +// Targeting ../EllipsisIndexType.java -// #include +@Namespace("at::indexing") @MemberGetter public static native @Const @ByRef EllipsisIndexType Ellipsis(); +// Targeting ../Slice.java -// aten::zero_(Tensor(a!) self) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor zero_(@ByRef Tensor self); -// aten::zero.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor zero_out(@ByRef Tensor out, @Const @ByRef Tensor self); -// aten::zero.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor zero_outf(@Const @ByRef Tensor self, @ByRef Tensor out); -// aten::zero(Tensor self) -> Tensor -@Namespace("at") public static native @ByVal @Name("zero") Tensor _zero(@Const @ByRef Tensor self); +@Namespace("at::indexing") public static native @Cast("std::ostream*") @ByRef @Name("operator <<") Pointer shiftLeft(@Cast("std::ostream*") @ByRef Pointer stream, @Const @ByRef Slice slice); +// Targeting ../TensorIndex.java +@Namespace("at::indexing") public static native @Cast("std::ostream*") @ByRef @Name("operator <<") Pointer shiftLeft( + @Cast("std::ostream*") @ByRef Pointer stream, + @Const @ByRef TensorIndex tensor_index); +@Namespace("at::indexing") public static native @Cast("std::ostream*") @ByRef @Name("operator <<") Pointer shiftLeft( + @Cast("std::ostream*") @ByRef Pointer stream, + @Const @ByRef TensorIndexVector tensor_indices); +@Namespace("at::indexing::impl") public static native @ByVal Tensor applySlice( + @Const @ByRef Tensor self, + @Cast("int64_t") long dim, + @ByVal SymInt start, + @ByVal SymInt stop, + @ByVal SymInt step, + @Cast("bool") boolean disable_slice_optimization, + @Const @ByRef Device self_device, + @Const @ByRef SymIntArrayRefOptional self_sizes); -// Parsed from ATen/ops/zeros.h +@Namespace("at::indexing::impl") public static native @ByVal Tensor applySelect( + @Const @ByRef Tensor self, + @Cast("int64_t") long dim, + @Cast("int64_t") long index, + @Cast("int64_t") long real_dim, + @Const @ByRef Device arg4, + @Const @ByRef SymIntArrayRefOptional self_sizes); -// #pragma once +@Namespace("at::indexing::impl") public static native @ByVal Tensor boolToIndexingTensorCPUOrCUDA( + @Const @ByRef Tensor self, + @Cast("bool") boolean value); -// @generated by torchgen/gen.py from Function.h +@Namespace("at::indexing::impl") public static native @ByVal Tensor boolToIndexingTensorNonNativeDeviceType( + @Const @ByRef Tensor self, + @Cast("bool") boolean value); -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include +@Namespace("at::indexing::impl") public static native @ByVal Tensor boolToIndexingTensor( + @Const @ByRef Tensor self, + @Cast("bool") boolean value, + @Const @ByRef Device self_device); +@Namespace("at::indexing::impl") public static native @ByVal Tensor scalarToTensorNonNativeDeviceType( + @Const @ByRef Scalar v, + @Const @ByRef TensorOptions options); +@Namespace("at::indexing::impl") public static native void recordTensorIndex( + @Const @ByRef Tensor tensor, + @ByRef TensorVector outIndices, + @Cast("int64_t*") LongPointer dim_ptr); +@Namespace("at::indexing::impl") public static native void recordTensorIndex( + @Const @ByRef Tensor tensor, + @ByRef TensorVector outIndices, + @Cast("int64_t*") LongBuffer dim_ptr); +@Namespace("at::indexing::impl") public static native void recordTensorIndex( + @Const @ByRef Tensor tensor, + @ByRef TensorVector outIndices, + @Cast("int64_t*") long[] dim_ptr); -// #include +@Namespace("at::indexing::impl") public static native @ByVal TensorOptionalList typeConvertIndices( + @Const @ByRef Tensor arg0, + @Cast({"", "std::vector"}) @StdMove TensorVector indices); +// NOTE: Why do we mirror instead of replace the `count_specified_dimensions` +// function in torch/csrc/autograd/python_variable_indexing.cpp? It's because +// `count_specified_dimensions` is on the hot path of Python tensor multi-dim +// indexing (i.e. it's called by `applySlicing` which is called by +// `THPVariable_getitem` / `THPVariable_setitem` when handling indexing of more +// than one dimension). If we were to merge the Python/C++ +// `count_specified_dimensions` function, on the Python side we would have to +// construct a `std::vector` container to be consumed by the C++ +// `count_specified_dimensions` function, which adds 100s of nanoseconds +// overhead and is undesirable. +@Namespace("at::indexing::impl") public static native @Cast("int64_t") long count_specified_dimensions( + @Const @ByRef TensorIndexArrayRef indices); + // namespace impl -// aten::zeros.names(int[] size, *, Dimname[]? names, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor -@Namespace("at") public static native @ByVal Tensor zeros(@ByVal @Cast("c10::ArrayRef*") LongArrayRef size, @ByVal DimnameListOptional names, @ByVal(nullValue = "at::TensorOptions{}") TensorOptions options); -@Namespace("at") public static native @ByVal Tensor zeros(@ByVal @Cast("c10::ArrayRef*") LongArrayRef size, @ByVal DimnameListOptional names); -@Namespace("at") public static native @ByVal Tensor zeros(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @ByVal DimnameListOptional names, @ByVal(nullValue = "at::TensorOptions{}") TensorOptions options); -@Namespace("at") public static native @ByVal Tensor zeros(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @ByVal DimnameListOptional names); -// aten::zeros.names(int[] size, *, Dimname[]? names, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor -@Namespace("at") public static native @ByVal Tensor zeros(@ByVal @Cast("c10::ArrayRef*") LongArrayRef size, @ByVal DimnameListOptional names, @ByVal ScalarTypeOptional dtype, @ByVal LayoutOptional layout, @ByVal DeviceOptional device, @ByVal BoolOptional pin_memory); -@Namespace("at") public static native @ByVal Tensor zeros(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @ByVal DimnameListOptional names, @ByVal ScalarTypeOptional dtype, @ByVal LayoutOptional layout, @ByVal DeviceOptional device, @ByVal BoolOptional pin_memory); +// NOTE: Many functions below are only for consumption from Python indexing +// implementation, they include: +// +// - `Tensor scalarToTensor(...)` +// - `IntArrayRef slicePrefix1sSize(...)` +// - `void copy_to(...)` +// - `Tensor handleDimInMultiDimIndexing(...)` +// - `Tensor dispatch_index(...)` +// - `Tensor dispatch_index_put_(...)` +// - `Tensor get_item(...)` +// - `void set_item(...)` +// +// The rest of the functions are in `at::indexing::impl` namespace, signifying +// that they shouldn't be used from Python indexing implementation. +@Namespace("at::indexing") public static native @ByVal Tensor scalarToTensor( + @Const @ByRef Scalar v, + @Const @ByRef TensorOptions options, + @Const @ByRef Device self_device); -// aten::zeros(SymInt[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor -@Namespace("at") public static native @ByVal Tensor zeros(@ByVal @Cast("c10::ArrayRef*") LongArrayRef size, @ByVal(nullValue = "at::TensorOptions{}") TensorOptions options); -@Namespace("at") public static native @ByVal Tensor zeros(@ByVal @Cast("c10::ArrayRef*") LongArrayRef size); -@Namespace("at") public static native @ByVal Tensor zeros(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @ByVal(nullValue = "at::TensorOptions{}") TensorOptions options); -@Namespace("at") public static native @ByVal Tensor zeros(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... size); +// To match numpy semantics: +// As a special case for backwards compatibility, +// strip away unit dimensions from the left of 'src' +@Namespace("at::indexing") public static native @ByVal SymIntArrayRef slicePrefix1sSize(@Const @ByRef SymIntArrayRef sizes); +@Namespace("at::indexing") public static native void copy_to(@Const @ByRef Tensor dst, @Const @ByRef Tensor src); -// aten::zeros(SymInt[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor -@Namespace("at") public static native @ByVal Tensor zeros(@ByVal @Cast("c10::ArrayRef*") LongArrayRef size, @ByVal ScalarTypeOptional dtype, @ByVal LayoutOptional layout, @ByVal DeviceOptional device, @ByVal BoolOptional pin_memory); -@Namespace("at") public static native @ByVal Tensor zeros(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @ByVal ScalarTypeOptional dtype, @ByVal LayoutOptional layout, @ByVal DeviceOptional device, @ByVal BoolOptional pin_memory); +// See NOTE [ Setting `disable_slice_optimization` when calling C++ tensor +// indexing functions from Python ] +@Namespace("at::indexing") public static native @ByVal Tensor handleDimInMultiDimIndexing( + @Const @ByRef Tensor prev_dim_result, + @Const @ByRef Tensor original_tensor, + @Const @ByRef TensorIndex index, + @Cast("int64_t*") LongPointer dim_ptr, + @Cast("int64_t*") LongPointer specified_dims_ptr, + @Cast("int64_t") long real_dim, + @ByRef TensorVector outIndices, + @Cast("bool") boolean disable_slice_optimization, + @Const @ByRef Device original_tensor_device, + @Const @ByRef SymIntArrayRefOptional prev_dim_result_sizes); +@Namespace("at::indexing") public static native @ByVal Tensor handleDimInMultiDimIndexing( + @Const @ByRef Tensor prev_dim_result, + @Const @ByRef Tensor original_tensor, + @Const @ByRef TensorIndex index, + @Cast("int64_t*") LongBuffer dim_ptr, + @Cast("int64_t*") LongBuffer specified_dims_ptr, + @Cast("int64_t") long real_dim, + @ByRef TensorVector outIndices, + @Cast("bool") boolean disable_slice_optimization, + @Const @ByRef Device original_tensor_device, + @Const @ByRef SymIntArrayRefOptional prev_dim_result_sizes); +@Namespace("at::indexing") public static native @ByVal Tensor handleDimInMultiDimIndexing( + @Const @ByRef Tensor prev_dim_result, + @Const @ByRef Tensor original_tensor, + @Const @ByRef TensorIndex index, + @Cast("int64_t*") long[] dim_ptr, + @Cast("int64_t*") long[] specified_dims_ptr, + @Cast("int64_t") long real_dim, + @ByRef TensorVector outIndices, + @Cast("bool") boolean disable_slice_optimization, + @Const @ByRef Device original_tensor_device, + @Const @ByRef SymIntArrayRefOptional prev_dim_result_sizes); +// This mirrors `applySlicing` in +// torch/csrc/autograd/python_variable_indexing.cpp +@Namespace("at::indexing::impl") public static native @ByVal Tensor applySlicing( + @Const @ByRef Tensor self, + @Const @ByRef TensorIndexArrayRef indices, + @ByRef TensorVector outIndices, + @Cast("bool") boolean disable_slice_optimization, + @Const @ByRef Device self_device, + @Const @ByRef SymIntArrayRefOptional self_sizes); + // namespace impl +@Namespace("at::indexing") public static native @ByVal Tensor dispatch_index( + @Const @ByRef Tensor self, + @Cast({"", "std::vector"}) @StdMove TensorVector indices); -// aten::zeros(SymInt[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor -@Namespace("at") public static native @ByVal Tensor zeros_symint(@ByVal SymIntRef size, @ByVal(nullValue = "at::TensorOptions{}") TensorOptions options); -@Namespace("at") public static native @ByVal Tensor zeros_symint(@ByVal SymIntRef size); +@Namespace("at::indexing") public static native @ByVal Tensor dispatch_index_put_( + @ByRef Tensor self, + @Cast({"", "std::vector"}) @StdMove TensorVector indices, + @Const @ByRef Tensor value); +// NOTE [ Setting `disable_slice_optimization` when calling C++ tensor indexing +// functions from Python ] +// +// Question: When should we set `disable_slice_optimization` to `true` when +// calling C++ tensor indexing functions from Python indexing code? +// +// Answer: What "slice optimization" means: when we have a slicing expression +// like `x[0:5, 0]`, where the sliced tensor was of size 5 in dimension 0, we +// would skip dispatching the actual slice call as an optimization. However, +// here are the cases where we DON'T want this optimization: +// +// 1. When we are doing 1-D slicing (e.g. `tensor[:]`). +// Reason: we always return a shallow copy for expressions such as +// `tensor[:]` / `tensor[...]` / `tensor[:, :]`. (Note that for `tensor[:, +// :]`, we return an alias of `tensor` by doing the following: +// ``` +// Tensor sliced = impl::applySlicing(self, indices, tensorIndices, +// disable_slice_optimization, self_device, self_sizes); if +// (tensorIndices.empty()) { +// if (sliced.is_same(self)) { +// // ensure we return a shallow copy for things like x[...] +// sliced = at::alias(sliced); +// } +// return sliced; +// } +// ```) +// 2. When we are doing JIT tracing. +// Reason: JIT tracing needs the `self.slice(...)` call to properly trace the +// slice operation. -// aten::zeros(SymInt[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor -@Namespace("at") public static native @ByVal Tensor zeros_symint(@ByVal SymIntRef size, @ByVal ScalarTypeOptional dtype, @ByVal LayoutOptional layout, @ByVal DeviceOptional device, @ByVal BoolOptional pin_memory); +// This mirrors `THPVariable_getitem` in +// torch/csrc/autograd/python_variable_indexing.cpp See NOTE [ Setting +// `disable_slice_optimization` when calling C++ tensor indexing functions from +// Python ] +@Namespace("at::indexing") public static native @ByVal Tensor get_item( + @Const @ByRef Tensor self, + @Const @ByRef TensorIndexArrayRef indices, + @Cast("bool") boolean disable_slice_optimization/*=false*/); +@Namespace("at::indexing") public static native @ByVal Tensor get_item( + @Const @ByRef Tensor self, + @Const @ByRef TensorIndexArrayRef indices); +// This mirrors `THPVariable_setitem` in +// torch/csrc/autograd/python_variable_indexing.cpp for "the assigned value is a +// Tensor" case See NOTE [ Setting `disable_slice_optimization` when calling C++ +// tensor indexing functions from Python ] +@Namespace("at::indexing") public static native void set_item( + @Const @ByRef Tensor self, + @Const @ByRef TensorIndexArrayRef indices, + @Const @ByRef Tensor value, + @Cast("bool") boolean disable_slice_optimization/*=false*/); +@Namespace("at::indexing") public static native void set_item( + @Const @ByRef Tensor self, + @Const @ByRef TensorIndexArrayRef indices, + @Const @ByRef Tensor value); -// aten::zeros.out(SymInt[] size, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor zeros_out(@ByRef Tensor out, @ByVal @Cast("c10::ArrayRef*") LongArrayRef size); -@Namespace("at") public static native @ByRef Tensor zeros_out(@ByRef Tensor out, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... size); + // namespace indexing + // namespace at -// aten::zeros.out(SymInt[] size, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor zeros_outf(@ByVal @Cast("c10::ArrayRef*") LongArrayRef size, @ByRef Tensor out); -@Namespace("at") public static native @ByRef Tensor zeros_outf(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @ByRef Tensor out); +// Parsed from ATen/TensorOperators.h +// #pragma once -// aten::zeros.out(SymInt[] size, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor zeros_symint_out(@ByRef Tensor out, @ByVal SymIntRef size); +// #include +// #include +// #ifndef AT_PER_OPERATOR_HEADERS +// #include +// #else +// #include +// #endif -// aten::zeros.out(SymInt[] size, *, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor zeros_symint_outf(@ByVal SymIntRef size, @ByRef Tensor out); +// #include +// #include +// #define AT_FORALL_BINARY_OPS(_) +// _(+, x.add(y), y.add(x)) +// _(*, x.mul(y), y.mul(x)) +// _(-, +// x.sub(y), +// ::at::empty_like(y, at::MemoryFormat::Preserve).fi_(x).sub_(y)) +// _(/, +// x.div(y), +// ::at::empty_like(y, at::MemoryFormat::Preserve).fi_(x).div_(y)) +// _(%, +// x.remainder(y), +// ::at::empty_like(y, at::MemoryFormat::Preserve).fi_(x).remainder_(y)) +// _(&, x.bitwise_and(y), y.bitwise_and(x)) +// _(|, x.bitwise_or(y), y.bitwise_or(x)) +// _(^, x.bitwise_xor(y), y.bitwise_xor(x)) +// _(<, x.t(y), y.gt(x)) +// _(<=, x.e(y), y.ge(x)) +// _(>, x.gt(y), y.t(x)) +// _(>=, x.ge(y), y.e(x)) +// _(==, x.eq(y), y.eq(x)) +// _(!=, x.ne(y), y.ne(x)) -// aten::zeros.names_out(int[] size, *, Dimname[]? names, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor zeros_out(@ByRef Tensor out, @ByVal @Cast("c10::ArrayRef*") LongArrayRef size, @ByVal DimnameListOptional names); -@Namespace("at") public static native @ByRef Tensor zeros_out(@ByRef Tensor out, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @ByVal DimnameListOptional names); -// aten::zeros.names_out(int[] size, *, Dimname[]? names, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor zeros_outf(@ByVal @Cast("c10::ArrayRef*") LongArrayRef size, @ByVal DimnameListOptional names, @ByRef Tensor out); -@Namespace("at") public static native @ByRef Tensor zeros_outf(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @ByVal DimnameListOptional names, @ByRef Tensor out); +// #define DEFINE_OPERATOR(op, body, reverse_scalar_body) +// static inline Tensor operator op(const Tensor& x, const Tensor& y) { +// return body; +// } +// static inline Tensor operator op(const Tensor& x, const Scalar& y) { +// return body; +// } +// static inline Tensor operator op(const Scalar& x, const Tensor& y) { +// return reverse_scalar_body; +// } + @Namespace("at") public static native @ByVal @Name("operator +") Tensor add(@Const @ByRef Scalar x, @Const @ByRef Tensor y); + @Namespace("at") public static native @ByVal @Name("operator *") Tensor multiply(@Const @ByRef Scalar x, @Const @ByRef Tensor y); + @Namespace("at") public static native @ByVal @Name("operator -") Tensor subtract(@Const @ByRef Scalar x, @Const @ByRef Tensor y); + @Namespace("at") public static native @ByVal @Name("operator /") Tensor divide(@Const @ByRef Scalar x, @Const @ByRef Tensor y); + @Namespace("at") public static native @ByVal @Name("operator %") Tensor mod(@Const @ByRef Tensor x, @Const @ByRef Tensor y); + @Namespace("at") public static native @ByVal @Name("operator %") Tensor mod(@Const @ByRef Tensor x, @Const @ByRef Scalar y); + @Namespace("at") public static native @ByVal @Name("operator %") Tensor mod(@Const @ByRef Scalar x, @Const @ByRef Tensor y); + @Namespace("at") public static native @ByVal @Name("operator &") Tensor and(@Const @ByRef Tensor x, @Const @ByRef Tensor y); + @Namespace("at") public static native @ByVal @Name("operator &") Tensor and(@Const @ByRef Tensor x, @Const @ByRef Scalar y); + @Namespace("at") public static native @ByVal @Name("operator &") Tensor and(@Const @ByRef Scalar x, @Const @ByRef Tensor y); + @Namespace("at") public static native @ByVal @Name("operator |") Tensor or(@Const @ByRef Tensor x, @Const @ByRef Tensor y); + @Namespace("at") public static native @ByVal @Name("operator |") Tensor or(@Const @ByRef Tensor x, @Const @ByRef Scalar y); + @Namespace("at") public static native @ByVal @Name("operator |") Tensor or(@Const @ByRef Scalar x, @Const @ByRef Tensor y); + @Namespace("at") public static native @ByVal @Name("operator ^") Tensor xor(@Const @ByRef Tensor x, @Const @ByRef Tensor y); + @Namespace("at") public static native @ByVal @Name("operator ^") Tensor xor(@Const @ByRef Tensor x, @Const @ByRef Scalar y); + @Namespace("at") public static native @ByVal @Name("operator ^") Tensor xor(@Const @ByRef Scalar x, @Const @ByRef Tensor y); + @Namespace("at") public static native @ByVal @Name("operator <") Tensor lessThan(@Const @ByRef Tensor x, @Const @ByRef Tensor y); + @Namespace("at") public static native @ByVal @Name("operator <") Tensor lessThan(@Const @ByRef Tensor x, @Const @ByRef Scalar y); + @Namespace("at") public static native @ByVal @Name("operator <") Tensor lessThan(@Const @ByRef Scalar x, @Const @ByRef Tensor y); + @Namespace("at") public static native @ByVal @Name("operator <=") Tensor lessThanEquals(@Const @ByRef Tensor x, @Const @ByRef Tensor y); + @Namespace("at") public static native @ByVal @Name("operator <=") Tensor lessThanEquals(@Const @ByRef Tensor x, @Const @ByRef Scalar y); + @Namespace("at") public static native @ByVal @Name("operator <=") Tensor lessThanEquals(@Const @ByRef Scalar x, @Const @ByRef Tensor y); + @Namespace("at") public static native @ByVal @Name("operator >") Tensor greaterThan(@Const @ByRef Tensor x, @Const @ByRef Tensor y); + @Namespace("at") public static native @ByVal @Name("operator >") Tensor greaterThan(@Const @ByRef Tensor x, @Const @ByRef Scalar y); + @Namespace("at") public static native @ByVal @Name("operator >") Tensor greaterThan(@Const @ByRef Scalar x, @Const @ByRef Tensor y); + @Namespace("at") public static native @ByVal @Name("operator >=") Tensor greaterThanEquals(@Const @ByRef Tensor x, @Const @ByRef Tensor y); + @Namespace("at") public static native @ByVal @Name("operator >=") Tensor greaterThanEquals(@Const @ByRef Tensor x, @Const @ByRef Scalar y); + @Namespace("at") public static native @ByVal @Name("operator >=") Tensor greaterThanEquals(@Const @ByRef Scalar x, @Const @ByRef Tensor y); + @Namespace("at") public static native @ByVal @Name("operator ==") Tensor equals(@Const @ByRef Tensor x, @Const @ByRef Tensor y); + @Namespace("at") public static native @ByVal @Name("operator ==") Tensor equals(@Const @ByRef Tensor x, @Const @ByRef Scalar y); + @Namespace("at") public static native @ByVal @Name("operator ==") Tensor equals(@Const @ByRef Scalar x, @Const @ByRef Tensor y); + @Namespace("at") public static native @ByVal @Name("operator !=") Tensor notEquals(@Const @ByRef Tensor x, @Const @ByRef Tensor y); + @Namespace("at") public static native @ByVal @Name("operator !=") Tensor notEquals(@Const @ByRef Tensor x, @Const @ByRef Scalar y); + @Namespace("at") public static native @ByVal @Name("operator !=") Tensor notEquals(@Const @ByRef Scalar x, @Const @ByRef Tensor y); +// #undef DEFINE_OPERATOR +// #undef AT_FORALL_BINARY_OPS + // namespace at +// Parsed from ATen/Version.h -// Parsed from ATen/ops/zeros_like.h +// #include -// #pragma once +/** Returns a detailed string describing the configuration PyTorch. */ +@Namespace("at") public static native @StdString BytePointer show_config(); -// @generated by torchgen/gen.py from Function.h +@Namespace("at") public static native @StdString BytePointer get_mkl_version(); -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include +@Namespace("at") public static native @StdString BytePointer get_mkldnn_version(); +@Namespace("at") public static native @StdString BytePointer get_openmp_version(); +@Namespace("at") public static native @StdString BytePointer get_cxx_flags(); -// #include + // namespace at -// aten::zeros_like(Tensor self, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, MemoryFormat? memory_format=None) -> Tensor -@Namespace("at") public static native @ByVal Tensor zeros_like(@Const @ByRef Tensor self, @ByVal(nullValue = "at::TensorOptions{}") TensorOptions options, @ByVal(nullValue = "c10::optional(c10::nullopt)") MemoryFormatOptional memory_format); -@Namespace("at") public static native @ByVal Tensor zeros_like(@Const @ByRef Tensor self); -// aten::zeros_like(Tensor self, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, MemoryFormat? memory_format=None) -> Tensor -@Namespace("at") public static native @ByVal Tensor zeros_like(@Const @ByRef Tensor self, @ByVal ScalarTypeOptional dtype, @ByVal LayoutOptional layout, @ByVal DeviceOptional device, @ByVal BoolOptional pin_memory, @ByVal MemoryFormatOptional memory_format); +// Parsed from ATen/core/Scalar.h -// aten::zeros_like.out(Tensor self, *, MemoryFormat? memory_format=None, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor zeros_like_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal(nullValue = "c10::optional(c10::nullopt)") MemoryFormatOptional memory_format); -@Namespace("at") public static native @ByRef Tensor zeros_like_out(@ByRef Tensor out, @Const @ByRef Tensor self); -// aten::zeros_like.out(Tensor self, *, MemoryFormat? memory_format=None, Tensor(a!) out) -> Tensor(a!) -@Namespace("at") public static native @ByRef Tensor zeros_like_outf(@Const @ByRef Tensor self, @ByVal MemoryFormatOptional memory_format, @ByRef Tensor out); +// #include +// Parsed from ATen/core/UnsafeFromTH.h +// #pragma once +// #include -// Parsed from torch/autograd.h +@Namespace("at") public static native @ByVal Tensor unsafeTensorFromTH(Pointer th_pointer, @Cast("bool") boolean retain); -// #pragma once +@Namespace("at") public static native @Cast({"", "c10::Storage&&"}) @StdMove Storage unsafeStorageFromTH(Pointer th_pointer, @Cast("bool") boolean retain); -// #include -// #include -// #include -// Parsed from torch/script.h + +// Parsed from ATen/ATen.h // #pragma once -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include +// #if !defined(_MSC_VER) && __cplusplus < 201402L +// #error C++14 or later compatible compiler is required to use ATen. +// #endif -// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include + +// TODO: try to remove this +// There is some back story, see https://github.com/pytorch/pytorch/issues/48684 +// #include -// Parsed from torch/csrc/Export.h +// Parsed from torch/csrc/api/include/torch/detail/TensorDataContainer.h // #pragma once -// #include +// #include +// #include +// #include -// #ifdef THP_BUILD_MAIN_LIB -// #define TORCH_PYTHON_API C10_EXPORT +// #include + +// #ifndef AT_PER_OPERATOR_HEADERS +// #include // #else -// #define TORCH_PYTHON_API C10_IMPORT +// #include +// #include // #endif +// #include -// Parsed from torch/csrc/onnx/onnx.h - -// #pragma once - -@Namespace("torch::onnx") public enum OperatorExportTypes { - ONNX(0), // Strict ONNX export - ONNX_ATEN(1), // ONNX With ATen op everywhere - ONNX_ATEN_FALLBACK(2), // ONNX export with ATen fallback - ONNX_FALLTHROUGH(3);// Export supported ONNX ops. Pass through unsupported ops. - - public final int value; - private OperatorExportTypes(int v) { this.value = v; } - private OperatorExportTypes(OperatorExportTypes e) { this.value = e.value; } - public OperatorExportTypes intern() { for (OperatorExportTypes e : values()) if (e.value == value) return e; return this; } - @Override public String toString() { return intern().name(); } -} - -@Namespace("torch::onnx") public enum TrainingMode { - EVAL(0), // Inference mode - PRESERVE(1), // Preserve model state (eval/training) - TRAINING(2);// Training mode +@Namespace("torch::detail") public enum TensorDataContainerType { Scalar(0), InitList(1), Tensor(2); public final int value; - private TrainingMode(int v) { this.value = v; } - private TrainingMode(TrainingMode e) { this.value = e.value; } - public TrainingMode intern() { for (TrainingMode e : values()) if (e.value == value) return e; return this; } + private TensorDataContainerType(int v) { this.value = v; } + private TensorDataContainerType(TensorDataContainerType e) { this.value = e.value; } + public TensorDataContainerType intern() { for (TensorDataContainerType e : values()) if (e.value == value) return e; return this; } @Override public String toString() { return intern().name(); } } -@Namespace("torch::onnx") @MemberGetter public static native @Cast("const char") byte kOnnxNodeNameAttribute(int i); -@Namespace("torch::onnx") @MemberGetter public static native @Cast("const char*") BytePointer kOnnxNodeNameAttribute(); - - // namespace onnx - // namespace torch - +@Namespace("torch::detail") public static native @Cast("std::ostream*") @ByRef @Name("operator <<") Pointer shiftLeft( + @Cast("std::ostream*") @ByRef Pointer stream, + @Cast("const torch::detail::TensorDataContainer*") @ByRef Pointer tensor_data_container); -// Parsed from torch/csrc/api/include/torch/imethod.h +// FIXME: There is no `operator<<` overload for `at::kBFloat16` type, +// and we need to convert it to `float` type using `operator float()` function +// defined in `c10/util/BFloat16.h`. +// Tracking issue: https://github.com/pytorch/pytorch/issues/28845 +@Namespace("torch::detail") public static native @Cast("std::ostream*") @ByRef @Name("operator <<") Pointer shiftLeft(@Cast("std::ostream*") @ByRef Pointer stream, @ByVal BFloat16 value); -// #pragma once -// #include -// #include -// Targeting ../IMethod.java +@Namespace("torch::detail") public static native ScalarType compute_desired_dtype(ScalarType scalar_type); +// We use `TensorDataContainer` to support converting the following data +// container types into the equivalent Tensor: +// +// 1. Arbitrarily nested braced-init-list (e.g. `{{1, 2}, {3, 4}}`). +// 2. `at::ArrayRef` of supported tensor data types. +// 3. `std::vector` of supported tensor data types. +// +// At any time, a `TensorDataContainer` object represents one of the following: +// +// 1. A scalar with value `scalar()` and type `scalar_type()`. +// 2. A Tensor represented in `std::initializer_list` form, +// with value `init_list()`, Tensor scalar type `scalar_type()`, and Tensor +// sizes `sizes()`. +// 3. A Tensor represented in `at::Tensor` form, with value `tensor()`, scalar +// type `scalar_type()`, +// and Tensor sizes `sizes()`. +// +// All the infrastructure here is mostly to support converting an arbitrarily +// nested braced-init-list to the equivalent Tensor successfully. Consider the +// following example: +// +// `torch::tensor({{1}, {2}})` +// +// this will call into the `torch::tensor` function: +// +// `at::Tensor tensor(detail::TensorDataContainer tensor_data_container, const +// at::TensorOptions& options = {})` +// +// the compiler will first try to convert `{{1}, {2}}` to `TensorDataContainer` +// type: +// +// `TensorDataContainer({{1}, {2}})` +// +// which matches to the +// `TensorDataContainer(std::initializer_list)` +// constructor, and in an attempt to convert `{1}` and `{2}` to +// `TensorDataContainer`, it calls the following: +// +// `TensorDataContainer({1})` (same call path happens for `{2}`, and we'll just +// focus on `{1}` here) +// +// At this point, theoretically there are two plausible ways for `{1}` to be +// matched to one of the constructors of `TensorDataContainer`: +// +// 1. It can be a list-initialization of a scalar value, thus matching +// `TensorDataContainer(int value)`. +// 2. It can be converted to `std::initializer_list`, thus +// matching +// `TensorDataContainer(std::initializer_list)`. +// +// How does the compiler decide which one to choose? According to +// `https://en.cppreference.com/w/cpp/language/list_initialization`, +// braced-init-list always prefers the constructor that takes +// `std::initializer_list`. Hence we happily move forward with constructor #2, +// and it calls the following: +// +// `TensorDataContainer(1)` +// +// Now it matches `TensorDataContainer(int value)`, which stores `1` as a scalar +// value. All is good. + // namespace detail // namespace torch -// Parsed from torch/csrc/api/include/torch/types.h +// Parsed from torch/csrc/autograd/generated/variable_factories.h // #pragma once -// #include - -// #include +// @generated from ../tools/autograd/templates/variable_factories.h -// #include +// #include +// #include +// #include +// #include +// #include +// #include // #include -// TODO: These don't really belong here but torchvision builds in CI need them -// Remove once the torchvision version being compiled in CI is updated -// #include -// #include - -// NOTE [ Exposing declarations in `at::` to `torch::` ] -// -// The following line `using namespace at;` is responsible for exposing all -// declarations in `at::` namespace to `torch::` namespace. -// -// According to the rules laid out in -// https://en.cppreference.com/w/cpp/language/qualified_lookup, section -// "Namespace members": -// ``` -// Qualified lookup within the scope of a namespace N first considers all -// declarations that are located in N and all declarations that are located in -// the inline namespace members of N (and, transitively, in their inline -// namespace members). If there are no declarations in that set then it -// considers declarations in all namespaces named by using-directives found in N -// and in all transitive inline namespace members of N. -// ``` -// -// This means that if both `at::` and `torch::` namespaces have a function with -// the same signature (e.g. both `at::func()` and `torch::func()` exist), after -// `namespace torch { using namespace at; }`, when we call `torch::func()`, the -// `func()` function defined in `torch::` namespace will always be called, and -// the `func()` function defined in `at::` namespace is always hidden. // NOLINT - -/** Fixed width dtypes. */ - -/** Rust-style short dtypes. */ - // namespace torch - - -// Parsed from torch/csrc/api/include/torch/cuda.h - -// #pragma once +// #ifndef AT_PER_OPERATOR_HEADERS +// #include +// #else +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #endif -// #include +// #include +// #include +// #include -// #include -// #include +/** NOTE: Currently {@code torch::tensor(...)} doesn't support mixed data types + * (i.e. {@code torch::tensor({{bool, 2.0}})} doesn't work). We might be able to + * support it in the future by iterating over all sub-lists to find + * the largest data type that can represent all of the elements, or by using + * variadic templates. + * + * NOTE: C++ {@code torch::tensor} with a floating-point type or an {@code at::ArrayRef} / {@code std::vector} / + * (nested) braced-init-list of floating-point types always produces a tensor of dtype + * {@code torch::get_default_dtype()}, matching Python {@code torch.tensor} behavior. + * + * NOTE: C++ {@code torch::tensor} with an integer type or an {@code at::ArrayRef} / {@code std::vector} / + * (nested) braced-init-list of integer types always produces a tensor of dtype {@code at::kLong} + * (aka. int64_t), matching Python {@code torch.tensor} behavior. + * + * NOTE: The following dtypes are not supported by {@code torch::tensor} currently: + * - {@code unsigned int} + * - {@code unsigned long int} + * - {@code unsigned long long int} + * - {@code long long int} */ +@Namespace("torch") public static native @ByVal Tensor tensor(@ByVal @Cast("torch::detail::TensorDataContainer*") Pointer tensor_data_container, @Const @ByRef(nullValue = "at::TensorOptions{}") TensorOptions options); +@Namespace("torch") public static native @ByVal Tensor tensor(@ByVal @Cast("torch::detail::TensorDataContainer*") Pointer tensor_data_container); -/** Returns the number of CUDA devices available. */ -@Namespace("torch::cuda") public static native @Cast("size_t") @Name("device_count") long cuda_device_count(); +/** A generic deleter function. */ -/** Returns true if at least one CUDA device is available. */ -@Namespace("torch::cuda") public static native @Cast("bool") @Name("is_available") boolean cuda_is_available(); +/** Exposes the given {@code data} as a {@code Tensor} without taking ownership of the + * original data. {@code sizes} should specify the shape of the tensor, {@code strides} the + * stride in each dimension. The {@code deleter} function (a + * {@code std::function}) will be called on the {@code data} when the Tensor + * data would normally be deallocated. The {@code TensorOptions} specify additional + * configuration options for the returned tensor, such as what type to + * interpret the {@code data} as. */ +@Namespace("torch") public static native @ByVal Tensor from_blob( + Pointer data, + @ByVal LongArrayRef sizes, + @ByVal LongArrayRef strides, + @Const @ByRef PointerConsumer deleter, + @Const @ByRef(nullValue = "at::TensorOptions()") TensorOptions options); +@Namespace("torch") public static native @ByVal Tensor from_blob( + Pointer data, + @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] sizes, + @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] strides, + @ByRef @Cast("void(*)(void*)") Pointer deleter, + @Const @ByRef(nullValue = "at::TensorOptions()") TensorOptions options); +@Namespace("torch") public static native @ByVal Tensor from_blob( + Pointer data, + @ByVal LongArrayRef sizes, + @ByVal LongArrayRef strides, + @ByRef @Cast("void(*)(void*)") long deleter, + @Const @ByRef(nullValue = "at::TensorOptions()") TensorOptions options); +@Namespace("torch") public static native @ByVal Tensor from_blob( + Pointer data, + @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] sizes, + @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] strides, + @Const @ByRef PointerConsumer deleter, + @Const @ByRef(nullValue = "at::TensorOptions()") TensorOptions options); +@Namespace("torch") public static native @ByVal Tensor from_blob( + Pointer data, + @ByVal LongArrayRef sizes, + @ByVal LongArrayRef strides, + @ByRef @Cast("void(*)(void*)") Pointer deleter, + @Const @ByRef(nullValue = "at::TensorOptions()") TensorOptions options); +@Namespace("torch") public static native @ByVal Tensor from_blob( + Pointer data, + @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] sizes, + @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] strides, + @ByRef @Cast("void(*)(void*)") long deleter, + @Const @ByRef(nullValue = "at::TensorOptions()") TensorOptions options); -/** Returns true if CUDA is available, and CuDNN is available. */ -@Namespace("torch::cuda") public static native @Cast("bool") boolean cudnn_is_available(); +/** Exposes the given {@code data} as a {@code Tensor} without taking ownership of the + * original data. {@code sizes} should specify the shape of the tensor, {@code strides} the + * stride in each dimension. The {@code TensorOptions} + * specify additional configuration options for the returned tensor, such as + * what type to interpret the {@code data} as. */ -/** Sets the seed for the current GPU. */ -@Namespace("torch::cuda") public static native @Name("manual_seed") void cuda_manual_seed(@Cast("uint64_t") long seed); +/** Exposes the given {@code data} as a {@code Tensor} without taking ownership of the + * original data. {@code sizes} should specify the shape of the tensor. The {@code deleter} + * (a {@code std::function}) function will be called on the {@code data} when + * the Tensor data would normally be deallocated. The {@code TensorOptions} specify + * additional configuration options for the returned tensor, such as what type + * to interpret the {@code data} as. */ -/** Sets the seed for all available GPUs. */ -@Namespace("torch::cuda") public static native @Name("manual_seed_all") void cuda_manual_seed_all(@Cast("uint64_t") long seed); +/** Exposes the given {@code data} as a {@code Tensor} without taking ownership of the + * original data. {@code sizes} should specify the shape of the tensor. The + * {@code TensorOptions} specify additional configuration options for the returned + * tensor, such as what type to interpret the {@code data} as. */ -/** Waits for all kernels in all streams on a CUDA device to complete. */ -@Namespace("torch::cuda") public static native @Name("synchronize") void cuda_synchronize(@Cast("int64_t") long device_index/*=-1*/); -@Namespace("torch::cuda") public static native @Name("synchronize") void cuda_synchronize(); +@Namespace("torch") public static native @ByVal @Name("_cudnn_init_dropout_state") Tensor torch__cudnn_init_dropout_state(double dropout, @Cast("bool") boolean train, @Cast("int64_t") long dropout_seed, @ByVal TensorOptions options); +@Namespace("torch") public static native @ByVal @Name("arange") Tensor torch_arange(@Const @ByRef Scalar end, @ByVal(nullValue = "at::TensorOptions{}") TensorOptions options); +@Namespace("torch") public static native @ByVal @Name("arange") Tensor torch_arange(@Const @ByRef Scalar end); +@Namespace("torch") public static native @ByVal @Name("arange") Tensor torch_arange(@Const @ByRef Scalar start, @Const @ByRef Scalar end, @ByVal(nullValue = "at::TensorOptions{}") TensorOptions options); +@Namespace("torch") public static native @ByVal @Name("arange") Tensor torch_arange(@Const @ByRef Scalar start, @Const @ByRef Scalar end); +@Namespace("torch") public static native @ByVal @Name("arange") Tensor torch_arange(@Const @ByRef Scalar start, @Const @ByRef Scalar end, @Const @ByRef Scalar step, @ByVal(nullValue = "at::TensorOptions{}") TensorOptions options); +@Namespace("torch") public static native @ByVal @Name("arange") Tensor torch_arange(@Const @ByRef Scalar start, @Const @ByRef Scalar end, @Const @ByRef Scalar step); +@Namespace("torch") public static native @ByVal @Name("bartlett_window") Tensor torch_bartlett_window(@Cast("int64_t") long window_length, @ByVal(nullValue = "at::TensorOptions{}") TensorOptions options); +@Namespace("torch") public static native @ByVal @Name("bartlett_window") Tensor torch_bartlett_window(@Cast("int64_t") long window_length); +@Namespace("torch") public static native @ByVal @Name("bartlett_window") Tensor torch_bartlett_window(@Cast("int64_t") long window_length, @Cast("bool") boolean periodic, @ByVal(nullValue = "at::TensorOptions{}") TensorOptions options); +@Namespace("torch") public static native @ByVal @Name("bartlett_window") Tensor torch_bartlett_window(@Cast("int64_t") long window_length, @Cast("bool") boolean periodic); +@Namespace("torch") public static native @ByVal @Name("blackman_window") Tensor torch_blackman_window(@Cast("int64_t") long window_length, @ByVal(nullValue = "at::TensorOptions{}") TensorOptions options); +@Namespace("torch") public static native @ByVal @Name("blackman_window") Tensor torch_blackman_window(@Cast("int64_t") long window_length); +@Namespace("torch") public static native @ByVal @Name("blackman_window") Tensor torch_blackman_window(@Cast("int64_t") long window_length, @Cast("bool") boolean periodic, @ByVal(nullValue = "at::TensorOptions{}") TensorOptions options); +@Namespace("torch") public static native @ByVal @Name("blackman_window") Tensor torch_blackman_window(@Cast("int64_t") long window_length, @Cast("bool") boolean periodic); +@Namespace("torch") public static native @ByVal @Name("empty") Tensor torch_empty(@ByVal LongArrayRef size, @ByVal DimnameListOptional names, @ByVal(nullValue = "at::TensorOptions{}") TensorOptions options, @ByVal(nullValue = "c10::optional(c10::nullopt)") MemoryFormatOptional memory_format); +@Namespace("torch") public static native @ByVal @Name("empty") Tensor torch_empty(@ByVal LongArrayRef size, @ByVal DimnameListOptional names); +@Namespace("torch") public static native @ByVal @Name("empty") Tensor torch_empty(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @ByVal DimnameListOptional names, @ByVal(nullValue = "at::TensorOptions{}") TensorOptions options, @ByVal(nullValue = "c10::optional(c10::nullopt)") MemoryFormatOptional memory_format); +@Namespace("torch") public static native @ByVal @Name("empty") Tensor torch_empty(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @ByVal DimnameListOptional names); +@Namespace("torch") public static native @ByVal @Name("empty") Tensor torch_empty(@ByVal LongArrayRef size, @ByVal(nullValue = "at::TensorOptions{}") TensorOptions options, @ByVal(nullValue = "c10::optional(c10::nullopt)") MemoryFormatOptional memory_format); +@Namespace("torch") public static native @ByVal @Name("empty") Tensor torch_empty(@ByVal LongArrayRef size); +@Namespace("torch") public static native @ByVal @Name("empty") Tensor torch_empty(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @ByVal(nullValue = "at::TensorOptions{}") TensorOptions options, @ByVal(nullValue = "c10::optional(c10::nullopt)") MemoryFormatOptional memory_format); +@Namespace("torch") public static native @ByVal @Name("empty") Tensor torch_empty(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... size); +@Namespace("torch") public static native @ByVal @Name("_empty_affine_quantized") Tensor torch__empty_affine_quantized(@ByVal LongArrayRef size, @ByVal(nullValue = "at::TensorOptions{}") TensorOptions options, double scale/*=1*/, @Cast("int64_t") long zero_point/*=0*/, @ByVal(nullValue = "c10::optional(c10::MemoryFormat::Contiguous)") MemoryFormatOptional memory_format); +@Namespace("torch") public static native @ByVal @Name("_empty_affine_quantized") Tensor torch__empty_affine_quantized(@ByVal LongArrayRef size); +@Namespace("torch") public static native @ByVal @Name("_empty_affine_quantized") Tensor torch__empty_affine_quantized(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @ByVal(nullValue = "at::TensorOptions{}") TensorOptions options, double scale/*=1*/, @Cast("int64_t") long zero_point/*=0*/, @ByVal(nullValue = "c10::optional(c10::MemoryFormat::Contiguous)") MemoryFormatOptional memory_format); +@Namespace("torch") public static native @ByVal @Name("_empty_affine_quantized") Tensor torch__empty_affine_quantized(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... size); +@Namespace("torch") public static native @ByVal @Name("_empty_per_channel_affine_quantized") Tensor torch__empty_per_channel_affine_quantized(@ByVal LongArrayRef size, @Const @ByRef Tensor scales, @Const @ByRef Tensor zero_points, @Cast("int64_t") long axis, @ByVal(nullValue = "at::TensorOptions{}") TensorOptions options, @ByVal(nullValue = "c10::optional(c10::MemoryFormat::Contiguous)") MemoryFormatOptional memory_format); +@Namespace("torch") public static native @ByVal @Name("_empty_per_channel_affine_quantized") Tensor torch__empty_per_channel_affine_quantized(@ByVal LongArrayRef size, @Const @ByRef Tensor scales, @Const @ByRef Tensor zero_points, @Cast("int64_t") long axis); +@Namespace("torch") public static native @ByVal @Name("_empty_per_channel_affine_quantized") Tensor torch__empty_per_channel_affine_quantized(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @Const @ByRef Tensor scales, @Const @ByRef Tensor zero_points, @Cast("int64_t") long axis, @ByVal(nullValue = "at::TensorOptions{}") TensorOptions options, @ByVal(nullValue = "c10::optional(c10::MemoryFormat::Contiguous)") MemoryFormatOptional memory_format); +@Namespace("torch") public static native @ByVal @Name("_empty_per_channel_affine_quantized") Tensor torch__empty_per_channel_affine_quantized(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @Const @ByRef Tensor scales, @Const @ByRef Tensor zero_points, @Cast("int64_t") long axis); +@Namespace("torch") public static native @ByVal @Name("empty_quantized") Tensor torch_empty_quantized(@ByVal LongArrayRef size, @Const @ByRef Tensor qtensor, @ByVal(nullValue = "at::TensorOptions{}") TensorOptions options, @ByVal(nullValue = "c10::optional(c10::nullopt)") MemoryFormatOptional memory_format); +@Namespace("torch") public static native @ByVal @Name("empty_quantized") Tensor torch_empty_quantized(@ByVal LongArrayRef size, @Const @ByRef Tensor qtensor); +@Namespace("torch") public static native @ByVal @Name("empty_quantized") Tensor torch_empty_quantized(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @Const @ByRef Tensor qtensor, @ByVal(nullValue = "at::TensorOptions{}") TensorOptions options, @ByVal(nullValue = "c10::optional(c10::nullopt)") MemoryFormatOptional memory_format); +@Namespace("torch") public static native @ByVal @Name("empty_quantized") Tensor torch_empty_quantized(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @Const @ByRef Tensor qtensor); +@Namespace("torch") public static native @ByVal @Name("empty_like") Tensor torch_empty_like(@Const @ByRef Tensor self, @ByVal(nullValue = "at::TensorOptions{}") TensorOptions options, @ByVal(nullValue = "c10::optional(c10::nullopt)") MemoryFormatOptional memory_format); +@Namespace("torch") public static native @ByVal @Name("empty_like") Tensor torch_empty_like(@Const @ByRef Tensor self); +@Namespace("torch") public static native @ByVal @Name("empty_strided") Tensor torch_empty_strided(@ByVal LongArrayRef size, @ByVal LongArrayRef stride, @ByVal(nullValue = "at::TensorOptions{}") TensorOptions options); +@Namespace("torch") public static native @ByVal @Name("empty_strided") Tensor torch_empty_strided(@ByVal LongArrayRef size, @ByVal LongArrayRef stride); +@Namespace("torch") public static native @ByVal @Name("empty_strided") Tensor torch_empty_strided(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] stride, @ByVal(nullValue = "at::TensorOptions{}") TensorOptions options); +@Namespace("torch") public static native @ByVal @Name("empty_strided") Tensor torch_empty_strided(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... stride); +@Namespace("torch") public static native @ByVal @Name("eye") Tensor torch_eye(@Cast("int64_t") long n, @ByVal(nullValue = "at::TensorOptions{}") TensorOptions options); +@Namespace("torch") public static native @ByVal @Name("eye") Tensor torch_eye(@Cast("int64_t") long n); +@Namespace("torch") public static native @ByVal @Name("eye") Tensor torch_eye(@Cast("int64_t") long n, @Cast("int64_t") long m, @ByVal(nullValue = "at::TensorOptions{}") TensorOptions options); +@Namespace("torch") public static native @ByVal @Name("eye") Tensor torch_eye(@Cast("int64_t") long n, @Cast("int64_t") long m); +@Namespace("torch") public static native @ByVal @Name("full") Tensor torch_full(@ByVal LongArrayRef size, @Const @ByRef Scalar fill_value, @ByVal DimnameListOptional names, @ByVal(nullValue = "at::TensorOptions{}") TensorOptions options); +@Namespace("torch") public static native @ByVal @Name("full") Tensor torch_full(@ByVal LongArrayRef size, @Const @ByRef Scalar fill_value, @ByVal DimnameListOptional names); +@Namespace("torch") public static native @ByVal @Name("full") Tensor torch_full(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @Const @ByRef Scalar fill_value, @ByVal DimnameListOptional names, @ByVal(nullValue = "at::TensorOptions{}") TensorOptions options); +@Namespace("torch") public static native @ByVal @Name("full") Tensor torch_full(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @Const @ByRef Scalar fill_value, @ByVal DimnameListOptional names); +@Namespace("torch") public static native @ByVal @Name("full") Tensor torch_full(@ByVal LongArrayRef size, @Const @ByRef Scalar fill_value, @ByVal(nullValue = "at::TensorOptions{}") TensorOptions options); +@Namespace("torch") public static native @ByVal @Name("full") Tensor torch_full(@ByVal LongArrayRef size, @Const @ByRef Scalar fill_value); +@Namespace("torch") public static native @ByVal @Name("full") Tensor torch_full(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @Const @ByRef Scalar fill_value, @ByVal(nullValue = "at::TensorOptions{}") TensorOptions options); +@Namespace("torch") public static native @ByVal @Name("full") Tensor torch_full(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @Const @ByRef Scalar fill_value); +@Namespace("torch") public static native @ByVal @Name("full_like") Tensor torch_full_like(@Const @ByRef Tensor self, @Const @ByRef Scalar fill_value, @ByVal(nullValue = "at::TensorOptions{}") TensorOptions options, @ByVal(nullValue = "c10::optional(c10::nullopt)") MemoryFormatOptional memory_format); +@Namespace("torch") public static native @ByVal @Name("full_like") Tensor torch_full_like(@Const @ByRef Tensor self, @Const @ByRef Scalar fill_value); +@Namespace("torch") public static native @ByVal @Name("from_file") Tensor torch_from_file(@ByVal @Cast("c10::string_view*") Pointer filename, @ByVal(nullValue = "c10::optional(c10::nullopt)") BoolOptional shared, @ByVal(nullValue = "c10::optional(0)") LongOptional size, @ByVal(nullValue = "at::TensorOptions{}") TensorOptions options); +@Namespace("torch") public static native @ByVal @Name("from_file") Tensor torch_from_file(@ByVal @Cast("c10::string_view*") Pointer filename); +@Namespace("torch") public static native @ByVal @Name("hann_window") Tensor torch_hann_window(@Cast("int64_t") long window_length, @ByVal(nullValue = "at::TensorOptions{}") TensorOptions options); +@Namespace("torch") public static native @ByVal @Name("hann_window") Tensor torch_hann_window(@Cast("int64_t") long window_length); +@Namespace("torch") public static native @ByVal @Name("hann_window") Tensor torch_hann_window(@Cast("int64_t") long window_length, @Cast("bool") boolean periodic, @ByVal(nullValue = "at::TensorOptions{}") TensorOptions options); +@Namespace("torch") public static native @ByVal @Name("hann_window") Tensor torch_hann_window(@Cast("int64_t") long window_length, @Cast("bool") boolean periodic); +@Namespace("torch") public static native @ByVal @Name("hamming_window") Tensor torch_hamming_window(@Cast("int64_t") long window_length, @ByVal(nullValue = "at::TensorOptions{}") TensorOptions options); +@Namespace("torch") public static native @ByVal @Name("hamming_window") Tensor torch_hamming_window(@Cast("int64_t") long window_length); +@Namespace("torch") public static native @ByVal @Name("hamming_window") Tensor torch_hamming_window(@Cast("int64_t") long window_length, @Cast("bool") boolean periodic, @ByVal(nullValue = "at::TensorOptions{}") TensorOptions options); +@Namespace("torch") public static native @ByVal @Name("hamming_window") Tensor torch_hamming_window(@Cast("int64_t") long window_length, @Cast("bool") boolean periodic); +@Namespace("torch") public static native @ByVal @Name("hamming_window") Tensor torch_hamming_window(@Cast("int64_t") long window_length, @Cast("bool") boolean periodic, double alpha, @ByVal(nullValue = "at::TensorOptions{}") TensorOptions options); +@Namespace("torch") public static native @ByVal @Name("hamming_window") Tensor torch_hamming_window(@Cast("int64_t") long window_length, @Cast("bool") boolean periodic, double alpha); +@Namespace("torch") public static native @ByVal @Name("hamming_window") Tensor torch_hamming_window(@Cast("int64_t") long window_length, @Cast("bool") boolean periodic, double alpha, double beta, @ByVal(nullValue = "at::TensorOptions{}") TensorOptions options); +@Namespace("torch") public static native @ByVal @Name("hamming_window") Tensor torch_hamming_window(@Cast("int64_t") long window_length, @Cast("bool") boolean periodic, double alpha, double beta); +@Namespace("torch") public static native @ByVal @Name("kaiser_window") Tensor torch_kaiser_window(@Cast("int64_t") long window_length, @ByVal(nullValue = "at::TensorOptions{}") TensorOptions options); +@Namespace("torch") public static native @ByVal @Name("kaiser_window") Tensor torch_kaiser_window(@Cast("int64_t") long window_length); +@Namespace("torch") public static native @ByVal @Name("kaiser_window") Tensor torch_kaiser_window(@Cast("int64_t") long window_length, @Cast("bool") boolean periodic, @ByVal(nullValue = "at::TensorOptions{}") TensorOptions options); +@Namespace("torch") public static native @ByVal @Name("kaiser_window") Tensor torch_kaiser_window(@Cast("int64_t") long window_length, @Cast("bool") boolean periodic); +@Namespace("torch") public static native @ByVal @Name("kaiser_window") Tensor torch_kaiser_window(@Cast("int64_t") long window_length, @Cast("bool") boolean periodic, double beta, @ByVal(nullValue = "at::TensorOptions{}") TensorOptions options); +@Namespace("torch") public static native @ByVal @Name("kaiser_window") Tensor torch_kaiser_window(@Cast("int64_t") long window_length, @Cast("bool") boolean periodic, double beta); +@Namespace("torch") public static native @ByVal @Name("linspace") Tensor torch_linspace(@Const @ByRef Scalar start, @Const @ByRef Scalar end, @Cast("int64_t") long steps, @ByVal(nullValue = "at::TensorOptions{}") TensorOptions options); +@Namespace("torch") public static native @ByVal @Name("linspace") Tensor torch_linspace(@Const @ByRef Scalar start, @Const @ByRef Scalar end, @Cast("int64_t") long steps); +@Namespace("torch") public static native @ByVal @Name("logspace") Tensor torch_logspace(@Const @ByRef Scalar start, @Const @ByRef Scalar end, @Cast("int64_t") long steps, double base/*=10.0*/, @ByVal(nullValue = "at::TensorOptions{}") TensorOptions options); +@Namespace("torch") public static native @ByVal @Name("logspace") Tensor torch_logspace(@Const @ByRef Scalar start, @Const @ByRef Scalar end, @Cast("int64_t") long steps); +@Namespace("torch") public static native @ByVal @Name("ones") Tensor torch_ones(@ByVal LongArrayRef size, @ByVal DimnameListOptional names, @ByVal(nullValue = "at::TensorOptions{}") TensorOptions options); +@Namespace("torch") public static native @ByVal @Name("ones") Tensor torch_ones(@ByVal LongArrayRef size, @ByVal DimnameListOptional names); +@Namespace("torch") public static native @ByVal @Name("ones") Tensor torch_ones(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @ByVal DimnameListOptional names, @ByVal(nullValue = "at::TensorOptions{}") TensorOptions options); +@Namespace("torch") public static native @ByVal @Name("ones") Tensor torch_ones(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @ByVal DimnameListOptional names); +@Namespace("torch") public static native @ByVal @Name("ones") Tensor torch_ones(@ByVal LongArrayRef size, @ByVal(nullValue = "at::TensorOptions{}") TensorOptions options); +@Namespace("torch") public static native @ByVal @Name("ones") Tensor torch_ones(@ByVal LongArrayRef size); +@Namespace("torch") public static native @ByVal @Name("ones") Tensor torch_ones(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @ByVal(nullValue = "at::TensorOptions{}") TensorOptions options); +@Namespace("torch") public static native @ByVal @Name("ones") Tensor torch_ones(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... size); +@Namespace("torch") public static native @ByVal @Name("ones_like") Tensor torch_ones_like(@Const @ByRef Tensor self, @ByVal(nullValue = "at::TensorOptions{}") TensorOptions options, @ByVal(nullValue = "c10::optional(c10::nullopt)") MemoryFormatOptional memory_format); +@Namespace("torch") public static native @ByVal @Name("ones_like") Tensor torch_ones_like(@Const @ByRef Tensor self); +@Namespace("torch") public static native @ByVal @Name("scalar_tensor") Tensor torch_scalar_tensor(@Const @ByRef Scalar s, @ByVal(nullValue = "at::TensorOptions{}") TensorOptions options); +@Namespace("torch") public static native @ByVal @Name("scalar_tensor") Tensor torch_scalar_tensor(@Const @ByRef Scalar s); +@Namespace("torch") public static native @ByVal @Name("rand") Tensor torch_rand(@ByVal LongArrayRef size, @ByVal DimnameListOptional names, @ByVal(nullValue = "at::TensorOptions{}") TensorOptions options); +@Namespace("torch") public static native @ByVal @Name("rand") Tensor torch_rand(@ByVal LongArrayRef size, @ByVal DimnameListOptional names); +@Namespace("torch") public static native @ByVal @Name("rand") Tensor torch_rand(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @ByVal DimnameListOptional names, @ByVal(nullValue = "at::TensorOptions{}") TensorOptions options); +@Namespace("torch") public static native @ByVal @Name("rand") Tensor torch_rand(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @ByVal DimnameListOptional names); +@Namespace("torch") public static native @ByVal @Name("rand") Tensor torch_rand(@ByVal LongArrayRef size, @ByVal GeneratorOptional generator, @ByVal DimnameListOptional names, @ByVal(nullValue = "at::TensorOptions{}") TensorOptions options); +@Namespace("torch") public static native @ByVal @Name("rand") Tensor torch_rand(@ByVal LongArrayRef size, @ByVal GeneratorOptional generator, @ByVal DimnameListOptional names); +@Namespace("torch") public static native @ByVal @Name("rand") Tensor torch_rand(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @ByVal GeneratorOptional generator, @ByVal DimnameListOptional names, @ByVal(nullValue = "at::TensorOptions{}") TensorOptions options); +@Namespace("torch") public static native @ByVal @Name("rand") Tensor torch_rand(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @ByVal GeneratorOptional generator, @ByVal DimnameListOptional names); +@Namespace("torch") public static native @ByVal @Name("rand") Tensor torch_rand(@ByVal LongArrayRef size, @ByVal(nullValue = "at::TensorOptions{}") TensorOptions options); +@Namespace("torch") public static native @ByVal @Name("rand") Tensor torch_rand(@ByVal LongArrayRef size); +@Namespace("torch") public static native @ByVal @Name("rand") Tensor torch_rand(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @ByVal(nullValue = "at::TensorOptions{}") TensorOptions options); +@Namespace("torch") public static native @ByVal @Name("rand") Tensor torch_rand(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... size); +@Namespace("torch") public static native @ByVal @Name("rand") Tensor torch_rand(@ByVal LongArrayRef size, @ByVal GeneratorOptional generator, @ByVal(nullValue = "at::TensorOptions{}") TensorOptions options); +@Namespace("torch") public static native @ByVal @Name("rand") Tensor torch_rand(@ByVal LongArrayRef size, @ByVal GeneratorOptional generator); +@Namespace("torch") public static native @ByVal @Name("rand") Tensor torch_rand(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @ByVal GeneratorOptional generator, @ByVal(nullValue = "at::TensorOptions{}") TensorOptions options); +@Namespace("torch") public static native @ByVal @Name("rand") Tensor torch_rand(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @ByVal GeneratorOptional generator); +@Namespace("torch") public static native @ByVal @Name("rand_like") Tensor torch_rand_like(@Const @ByRef Tensor self, @ByVal(nullValue = "at::TensorOptions{}") TensorOptions options, @ByVal(nullValue = "c10::optional(c10::nullopt)") MemoryFormatOptional memory_format); +@Namespace("torch") public static native @ByVal @Name("rand_like") Tensor torch_rand_like(@Const @ByRef Tensor self); +@Namespace("torch") public static native @ByVal @Name("randint") Tensor torch_randint(@Cast("int64_t") long high, @ByVal LongArrayRef size, @ByVal(nullValue = "at::TensorOptions(at::kLong)") TensorOptions options); +@Namespace("torch") public static native @ByVal @Name("randint") Tensor torch_randint(@Cast("int64_t") long high, @ByVal LongArrayRef size); +@Namespace("torch") public static native @ByVal @Name("randint") Tensor torch_randint(@Cast("int64_t") long high, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @ByVal(nullValue = "at::TensorOptions(at::kLong)") TensorOptions options); +@Namespace("torch") public static native @ByVal @Name("randint") Tensor torch_randint(@Cast("int64_t") long high, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... size); +@Namespace("torch") public static native @ByVal @Name("randint") Tensor torch_randint(@Cast("int64_t") long high, @ByVal LongArrayRef size, @ByVal GeneratorOptional generator, @ByVal(nullValue = "at::TensorOptions(at::kLong)") TensorOptions options); +@Namespace("torch") public static native @ByVal @Name("randint") Tensor torch_randint(@Cast("int64_t") long high, @ByVal LongArrayRef size, @ByVal GeneratorOptional generator); +@Namespace("torch") public static native @ByVal @Name("randint") Tensor torch_randint(@Cast("int64_t") long high, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @ByVal GeneratorOptional generator, @ByVal(nullValue = "at::TensorOptions(at::kLong)") TensorOptions options); +@Namespace("torch") public static native @ByVal @Name("randint") Tensor torch_randint(@Cast("int64_t") long high, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @ByVal GeneratorOptional generator); +@Namespace("torch") public static native @ByVal @Name("randint") Tensor torch_randint(@Cast("int64_t") long low, @Cast("int64_t") long high, @ByVal LongArrayRef size, @ByVal(nullValue = "at::TensorOptions(at::kLong)") TensorOptions options); +@Namespace("torch") public static native @ByVal @Name("randint") Tensor torch_randint(@Cast("int64_t") long low, @Cast("int64_t") long high, @ByVal LongArrayRef size); +@Namespace("torch") public static native @ByVal @Name("randint") Tensor torch_randint(@Cast("int64_t") long low, @Cast("int64_t") long high, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @ByVal(nullValue = "at::TensorOptions(at::kLong)") TensorOptions options); +@Namespace("torch") public static native @ByVal @Name("randint") Tensor torch_randint(@Cast("int64_t") long low, @Cast("int64_t") long high, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... size); +@Namespace("torch") public static native @ByVal @Name("randint") Tensor torch_randint(@Cast("int64_t") long low, @Cast("int64_t") long high, @ByVal LongArrayRef size, @ByVal GeneratorOptional generator, @ByVal(nullValue = "at::TensorOptions(at::kLong)") TensorOptions options); +@Namespace("torch") public static native @ByVal @Name("randint") Tensor torch_randint(@Cast("int64_t") long low, @Cast("int64_t") long high, @ByVal LongArrayRef size, @ByVal GeneratorOptional generator); +@Namespace("torch") public static native @ByVal @Name("randint") Tensor torch_randint(@Cast("int64_t") long low, @Cast("int64_t") long high, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @ByVal GeneratorOptional generator, @ByVal(nullValue = "at::TensorOptions(at::kLong)") TensorOptions options); +@Namespace("torch") public static native @ByVal @Name("randint") Tensor torch_randint(@Cast("int64_t") long low, @Cast("int64_t") long high, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @ByVal GeneratorOptional generator); +@Namespace("torch") public static native @ByVal @Name("randint_like") Tensor torch_randint_like(@Const @ByRef Tensor self, @Cast("int64_t") long high, @ByVal(nullValue = "at::TensorOptions{}") TensorOptions options, @ByVal(nullValue = "c10::optional(c10::nullopt)") MemoryFormatOptional memory_format); +@Namespace("torch") public static native @ByVal @Name("randint_like") Tensor torch_randint_like(@Const @ByRef Tensor self, @Cast("int64_t") long high); +@Namespace("torch") public static native @ByVal @Name("randint_like") Tensor torch_randint_like(@Const @ByRef Tensor self, @Cast("int64_t") long low, @Cast("int64_t") long high, @ByVal(nullValue = "at::TensorOptions{}") TensorOptions options, @ByVal(nullValue = "c10::optional(c10::nullopt)") MemoryFormatOptional memory_format); +@Namespace("torch") public static native @ByVal @Name("randint_like") Tensor torch_randint_like(@Const @ByRef Tensor self, @Cast("int64_t") long low, @Cast("int64_t") long high); +@Namespace("torch") public static native @ByVal @Name("randn") Tensor torch_randn(@ByVal LongArrayRef size, @ByVal(nullValue = "at::TensorOptions{}") TensorOptions options); +@Namespace("torch") public static native @ByVal @Name("randn") Tensor torch_randn(@ByVal LongArrayRef size); +@Namespace("torch") public static native @ByVal @Name("randn") Tensor torch_randn(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @ByVal(nullValue = "at::TensorOptions{}") TensorOptions options); +@Namespace("torch") public static native @ByVal @Name("randn") Tensor torch_randn(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... size); +@Namespace("torch") public static native @ByVal @Name("randn") Tensor torch_randn(@ByVal LongArrayRef size, @ByVal GeneratorOptional generator, @ByVal(nullValue = "at::TensorOptions{}") TensorOptions options); +@Namespace("torch") public static native @ByVal @Name("randn") Tensor torch_randn(@ByVal LongArrayRef size, @ByVal GeneratorOptional generator); +@Namespace("torch") public static native @ByVal @Name("randn") Tensor torch_randn(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @ByVal GeneratorOptional generator, @ByVal(nullValue = "at::TensorOptions{}") TensorOptions options); +@Namespace("torch") public static native @ByVal @Name("randn") Tensor torch_randn(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @ByVal GeneratorOptional generator); +@Namespace("torch") public static native @ByVal @Name("randn") Tensor torch_randn(@ByVal LongArrayRef size, @ByVal DimnameListOptional names, @ByVal(nullValue = "at::TensorOptions{}") TensorOptions options); +@Namespace("torch") public static native @ByVal @Name("randn") Tensor torch_randn(@ByVal LongArrayRef size, @ByVal DimnameListOptional names); +@Namespace("torch") public static native @ByVal @Name("randn") Tensor torch_randn(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @ByVal DimnameListOptional names, @ByVal(nullValue = "at::TensorOptions{}") TensorOptions options); +@Namespace("torch") public static native @ByVal @Name("randn") Tensor torch_randn(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @ByVal DimnameListOptional names); +@Namespace("torch") public static native @ByVal @Name("randn") Tensor torch_randn(@ByVal LongArrayRef size, @ByVal GeneratorOptional generator, @ByVal DimnameListOptional names, @ByVal(nullValue = "at::TensorOptions{}") TensorOptions options); +@Namespace("torch") public static native @ByVal @Name("randn") Tensor torch_randn(@ByVal LongArrayRef size, @ByVal GeneratorOptional generator, @ByVal DimnameListOptional names); +@Namespace("torch") public static native @ByVal @Name("randn") Tensor torch_randn(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @ByVal GeneratorOptional generator, @ByVal DimnameListOptional names, @ByVal(nullValue = "at::TensorOptions{}") TensorOptions options); +@Namespace("torch") public static native @ByVal @Name("randn") Tensor torch_randn(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @ByVal GeneratorOptional generator, @ByVal DimnameListOptional names); +@Namespace("torch") public static native @ByVal @Name("randn_like") Tensor torch_randn_like(@Const @ByRef Tensor self, @ByVal(nullValue = "at::TensorOptions{}") TensorOptions options, @ByVal(nullValue = "c10::optional(c10::nullopt)") MemoryFormatOptional memory_format); +@Namespace("torch") public static native @ByVal @Name("randn_like") Tensor torch_randn_like(@Const @ByRef Tensor self); +@Namespace("torch") public static native @ByVal @Name("randperm") Tensor torch_randperm(@Cast("int64_t") long n, @ByVal(nullValue = "at::TensorOptions(at::kLong)") TensorOptions options); +@Namespace("torch") public static native @ByVal @Name("randperm") Tensor torch_randperm(@Cast("int64_t") long n); +@Namespace("torch") public static native @ByVal @Name("randperm") Tensor torch_randperm(@Cast("int64_t") long n, @ByVal GeneratorOptional generator, @ByVal(nullValue = "at::TensorOptions(at::kLong)") TensorOptions options); +@Namespace("torch") public static native @ByVal @Name("randperm") Tensor torch_randperm(@Cast("int64_t") long n, @ByVal GeneratorOptional generator); +@Namespace("torch") public static native @ByVal @Name("range") Tensor torch_range(@Const @ByRef Scalar start, @Const @ByRef Scalar end, @Const @ByRef(nullValue = "at::Scalar(1)") Scalar step, @ByVal(nullValue = "at::TensorOptions{}") TensorOptions options); +@Namespace("torch") public static native @ByVal @Name("range") Tensor torch_range(@Const @ByRef Scalar start, @Const @ByRef Scalar end, @ByVal(nullValue = "at::TensorOptions{}") TensorOptions options); +@Namespace("torch") public static native @ByVal @Name("zeros") Tensor torch_zeros(@ByVal LongArrayRef size, @ByVal DimnameListOptional names, @ByVal(nullValue = "at::TensorOptions{}") TensorOptions options); +@Namespace("torch") public static native @ByVal @Name("zeros") Tensor torch_zeros(@ByVal LongArrayRef size, @ByVal DimnameListOptional names); +@Namespace("torch") public static native @ByVal @Name("zeros") Tensor torch_zeros(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @ByVal DimnameListOptional names, @ByVal(nullValue = "at::TensorOptions{}") TensorOptions options); +@Namespace("torch") public static native @ByVal @Name("zeros") Tensor torch_zeros(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @ByVal DimnameListOptional names); +@Namespace("torch") public static native @ByVal @Name("_efficientzerotensor") Tensor torch__efficientzerotensor(@ByVal LongArrayRef size, @ByVal(nullValue = "at::TensorOptions{}") TensorOptions options); +@Namespace("torch") public static native @ByVal @Name("_efficientzerotensor") Tensor torch__efficientzerotensor(@ByVal LongArrayRef size); +@Namespace("torch") public static native @ByVal @Name("_efficientzerotensor") Tensor torch__efficientzerotensor(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @ByVal(nullValue = "at::TensorOptions{}") TensorOptions options); +@Namespace("torch") public static native @ByVal @Name("_efficientzerotensor") Tensor torch__efficientzerotensor(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... size); +@Namespace("torch") public static native @ByVal @Name("zeros") Tensor torch_zeros(@ByVal LongArrayRef size, @ByVal(nullValue = "at::TensorOptions{}") TensorOptions options); +@Namespace("torch") public static native @ByVal @Name("zeros") Tensor torch_zeros(@ByVal LongArrayRef size); +@Namespace("torch") public static native @ByVal @Name("zeros") Tensor torch_zeros(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @ByVal(nullValue = "at::TensorOptions{}") TensorOptions options); +@Namespace("torch") public static native @ByVal @Name("zeros") Tensor torch_zeros(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... size); +@Namespace("torch") public static native @ByVal @Name("zeros_like") Tensor torch_zeros_like(@Const @ByRef Tensor self, @ByVal(nullValue = "at::TensorOptions{}") TensorOptions options, @ByVal(nullValue = "c10::optional(c10::nullopt)") MemoryFormatOptional memory_format); +@Namespace("torch") public static native @ByVal @Name("zeros_like") Tensor torch_zeros_like(@Const @ByRef Tensor self); +@Namespace("torch") public static native @ByVal @Name("sparse_compressed_tensor") Tensor torch_sparse_compressed_tensor(@Const @ByRef Tensor compressed_indices, @Const @ByRef Tensor plain_indices, @Const @ByRef Tensor values, @ByVal LongArrayRef size, @ByVal TensorOptions options); +@Namespace("torch") public static native @ByVal @Name("sparse_compressed_tensor") Tensor torch_sparse_compressed_tensor(@Const @ByRef Tensor compressed_indices, @Const @ByRef Tensor plain_indices, @Const @ByRef Tensor values, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @ByVal TensorOptions options); +@Namespace("torch") public static native @ByVal @Name("sparse_csr_tensor") Tensor torch_sparse_csr_tensor(@Const @ByRef Tensor crow_indices, @Const @ByRef Tensor col_indices, @Const @ByRef Tensor values, @ByVal LongArrayRef size, @ByVal TensorOptions options); +@Namespace("torch") public static native @ByVal @Name("sparse_csr_tensor") Tensor torch_sparse_csr_tensor(@Const @ByRef Tensor crow_indices, @Const @ByRef Tensor col_indices, @Const @ByRef Tensor values, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @ByVal TensorOptions options); +@Namespace("torch") public static native @ByVal @Name("sparse_csc_tensor") Tensor torch_sparse_csc_tensor(@Const @ByRef Tensor ccol_indices, @Const @ByRef Tensor row_indices, @Const @ByRef Tensor values, @ByVal LongArrayRef size, @ByVal TensorOptions options); +@Namespace("torch") public static native @ByVal @Name("sparse_csc_tensor") Tensor torch_sparse_csc_tensor(@Const @ByRef Tensor ccol_indices, @Const @ByRef Tensor row_indices, @Const @ByRef Tensor values, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @ByVal TensorOptions options); +@Namespace("torch") public static native @ByVal @Name("sparse_bsr_tensor") Tensor torch_sparse_bsr_tensor(@Const @ByRef Tensor crow_indices, @Const @ByRef Tensor col_indices, @Const @ByRef Tensor values, @ByVal LongArrayRef size, @ByVal TensorOptions options); +@Namespace("torch") public static native @ByVal @Name("sparse_bsr_tensor") Tensor torch_sparse_bsr_tensor(@Const @ByRef Tensor crow_indices, @Const @ByRef Tensor col_indices, @Const @ByRef Tensor values, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @ByVal TensorOptions options); +@Namespace("torch") public static native @ByVal @Name("sparse_bsc_tensor") Tensor torch_sparse_bsc_tensor(@Const @ByRef Tensor ccol_indices, @Const @ByRef Tensor row_indices, @Const @ByRef Tensor values, @ByVal LongArrayRef size, @ByVal TensorOptions options); +@Namespace("torch") public static native @ByVal @Name("sparse_bsc_tensor") Tensor torch_sparse_bsc_tensor(@Const @ByRef Tensor ccol_indices, @Const @ByRef Tensor row_indices, @Const @ByRef Tensor values, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @ByVal TensorOptions options); +@Namespace("torch") public static native @ByVal @Name("sparse_compressed_tensor") Tensor torch_sparse_compressed_tensor(@Const @ByRef Tensor compressed_indices, @Const @ByRef Tensor plain_indices, @Const @ByRef Tensor values, @ByVal TensorOptions options); +@Namespace("torch") public static native @ByVal @Name("sparse_csr_tensor") Tensor torch_sparse_csr_tensor(@Const @ByRef Tensor crow_indices, @Const @ByRef Tensor col_indices, @Const @ByRef Tensor values, @ByVal TensorOptions options); +@Namespace("torch") public static native @ByVal @Name("sparse_csc_tensor") Tensor torch_sparse_csc_tensor(@Const @ByRef Tensor ccol_indices, @Const @ByRef Tensor row_indices, @Const @ByRef Tensor values, @ByVal TensorOptions options); +@Namespace("torch") public static native @ByVal @Name("sparse_bsr_tensor") Tensor torch_sparse_bsr_tensor(@Const @ByRef Tensor crow_indices, @Const @ByRef Tensor col_indices, @Const @ByRef Tensor values, @ByVal TensorOptions options); +@Namespace("torch") public static native @ByVal @Name("sparse_bsc_tensor") Tensor torch_sparse_bsc_tensor(@Const @ByRef Tensor ccol_indices, @Const @ByRef Tensor row_indices, @Const @ByRef Tensor values, @ByVal TensorOptions options); +@Namespace("torch") public static native @ByVal @Name("_sparse_compressed_tensor_unsafe") Tensor torch__sparse_compressed_tensor_unsafe(@Const @ByRef Tensor compressed_indices, @Const @ByRef Tensor plain_indices, @Const @ByRef Tensor values, @ByVal LongArrayRef size, @ByVal(nullValue = "at::TensorOptions{}") TensorOptions options); +@Namespace("torch") public static native @ByVal @Name("_sparse_compressed_tensor_unsafe") Tensor torch__sparse_compressed_tensor_unsafe(@Const @ByRef Tensor compressed_indices, @Const @ByRef Tensor plain_indices, @Const @ByRef Tensor values, @ByVal LongArrayRef size); +@Namespace("torch") public static native @ByVal @Name("_sparse_compressed_tensor_unsafe") Tensor torch__sparse_compressed_tensor_unsafe(@Const @ByRef Tensor compressed_indices, @Const @ByRef Tensor plain_indices, @Const @ByRef Tensor values, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @ByVal(nullValue = "at::TensorOptions{}") TensorOptions options); +@Namespace("torch") public static native @ByVal @Name("_sparse_compressed_tensor_unsafe") Tensor torch__sparse_compressed_tensor_unsafe(@Const @ByRef Tensor compressed_indices, @Const @ByRef Tensor plain_indices, @Const @ByRef Tensor values, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... size); +@Namespace("torch") public static native @ByVal @Name("_sparse_csr_tensor_unsafe") Tensor torch__sparse_csr_tensor_unsafe(@Const @ByRef Tensor crow_indices, @Const @ByRef Tensor col_indices, @Const @ByRef Tensor values, @ByVal LongArrayRef size, @ByVal(nullValue = "at::TensorOptions{}") TensorOptions options); +@Namespace("torch") public static native @ByVal @Name("_sparse_csr_tensor_unsafe") Tensor torch__sparse_csr_tensor_unsafe(@Const @ByRef Tensor crow_indices, @Const @ByRef Tensor col_indices, @Const @ByRef Tensor values, @ByVal LongArrayRef size); +@Namespace("torch") public static native @ByVal @Name("_sparse_csr_tensor_unsafe") Tensor torch__sparse_csr_tensor_unsafe(@Const @ByRef Tensor crow_indices, @Const @ByRef Tensor col_indices, @Const @ByRef Tensor values, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @ByVal(nullValue = "at::TensorOptions{}") TensorOptions options); +@Namespace("torch") public static native @ByVal @Name("_sparse_csr_tensor_unsafe") Tensor torch__sparse_csr_tensor_unsafe(@Const @ByRef Tensor crow_indices, @Const @ByRef Tensor col_indices, @Const @ByRef Tensor values, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... size); +@Namespace("torch") public static native @ByVal @Name("_sparse_csc_tensor_unsafe") Tensor torch__sparse_csc_tensor_unsafe(@Const @ByRef Tensor ccol_indices, @Const @ByRef Tensor row_indices, @Const @ByRef Tensor values, @ByVal LongArrayRef size, @ByVal(nullValue = "at::TensorOptions{}") TensorOptions options); +@Namespace("torch") public static native @ByVal @Name("_sparse_csc_tensor_unsafe") Tensor torch__sparse_csc_tensor_unsafe(@Const @ByRef Tensor ccol_indices, @Const @ByRef Tensor row_indices, @Const @ByRef Tensor values, @ByVal LongArrayRef size); +@Namespace("torch") public static native @ByVal @Name("_sparse_csc_tensor_unsafe") Tensor torch__sparse_csc_tensor_unsafe(@Const @ByRef Tensor ccol_indices, @Const @ByRef Tensor row_indices, @Const @ByRef Tensor values, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @ByVal(nullValue = "at::TensorOptions{}") TensorOptions options); +@Namespace("torch") public static native @ByVal @Name("_sparse_csc_tensor_unsafe") Tensor torch__sparse_csc_tensor_unsafe(@Const @ByRef Tensor ccol_indices, @Const @ByRef Tensor row_indices, @Const @ByRef Tensor values, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... size); +@Namespace("torch") public static native @ByVal @Name("_sparse_bsr_tensor_unsafe") Tensor torch__sparse_bsr_tensor_unsafe(@Const @ByRef Tensor crow_indices, @Const @ByRef Tensor col_indices, @Const @ByRef Tensor values, @ByVal LongArrayRef size, @ByVal(nullValue = "at::TensorOptions{}") TensorOptions options); +@Namespace("torch") public static native @ByVal @Name("_sparse_bsr_tensor_unsafe") Tensor torch__sparse_bsr_tensor_unsafe(@Const @ByRef Tensor crow_indices, @Const @ByRef Tensor col_indices, @Const @ByRef Tensor values, @ByVal LongArrayRef size); +@Namespace("torch") public static native @ByVal @Name("_sparse_bsr_tensor_unsafe") Tensor torch__sparse_bsr_tensor_unsafe(@Const @ByRef Tensor crow_indices, @Const @ByRef Tensor col_indices, @Const @ByRef Tensor values, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @ByVal(nullValue = "at::TensorOptions{}") TensorOptions options); +@Namespace("torch") public static native @ByVal @Name("_sparse_bsr_tensor_unsafe") Tensor torch__sparse_bsr_tensor_unsafe(@Const @ByRef Tensor crow_indices, @Const @ByRef Tensor col_indices, @Const @ByRef Tensor values, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... size); +@Namespace("torch") public static native @ByVal @Name("_sparse_bsc_tensor_unsafe") Tensor torch__sparse_bsc_tensor_unsafe(@Const @ByRef Tensor ccol_indices, @Const @ByRef Tensor row_indices, @Const @ByRef Tensor values, @ByVal LongArrayRef size, @ByVal(nullValue = "at::TensorOptions{}") TensorOptions options); +@Namespace("torch") public static native @ByVal @Name("_sparse_bsc_tensor_unsafe") Tensor torch__sparse_bsc_tensor_unsafe(@Const @ByRef Tensor ccol_indices, @Const @ByRef Tensor row_indices, @Const @ByRef Tensor values, @ByVal LongArrayRef size); +@Namespace("torch") public static native @ByVal @Name("_sparse_bsc_tensor_unsafe") Tensor torch__sparse_bsc_tensor_unsafe(@Const @ByRef Tensor ccol_indices, @Const @ByRef Tensor row_indices, @Const @ByRef Tensor values, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @ByVal(nullValue = "at::TensorOptions{}") TensorOptions options); +@Namespace("torch") public static native @ByVal @Name("_sparse_bsc_tensor_unsafe") Tensor torch__sparse_bsc_tensor_unsafe(@Const @ByRef Tensor ccol_indices, @Const @ByRef Tensor row_indices, @Const @ByRef Tensor values, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... size); +@Namespace("torch") public static native @ByVal @Name("sparse_coo_tensor") Tensor torch_sparse_coo_tensor(@ByVal LongArrayRef size, @ByVal TensorOptions options); +@Namespace("torch") public static native @ByVal @Name("sparse_coo_tensor") Tensor torch_sparse_coo_tensor(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @ByVal TensorOptions options); +@Namespace("torch") public static native @ByVal @Name("sparse_coo_tensor") Tensor torch_sparse_coo_tensor(@Const @ByRef Tensor indices, @Const @ByRef Tensor values, @ByVal(nullValue = "at::TensorOptions{}") TensorOptions options); +@Namespace("torch") public static native @ByVal @Name("sparse_coo_tensor") Tensor torch_sparse_coo_tensor(@Const @ByRef Tensor indices, @Const @ByRef Tensor values); +@Namespace("torch") public static native @ByVal @Name("sparse_coo_tensor") Tensor torch_sparse_coo_tensor(@Const @ByRef Tensor indices, @Const @ByRef Tensor values, @ByVal LongArrayRef size, @ByVal(nullValue = "at::TensorOptions{}") TensorOptions options); +@Namespace("torch") public static native @ByVal @Name("sparse_coo_tensor") Tensor torch_sparse_coo_tensor(@Const @ByRef Tensor indices, @Const @ByRef Tensor values, @ByVal LongArrayRef size); +@Namespace("torch") public static native @ByVal @Name("sparse_coo_tensor") Tensor torch_sparse_coo_tensor(@Const @ByRef Tensor indices, @Const @ByRef Tensor values, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @ByVal(nullValue = "at::TensorOptions{}") TensorOptions options); +@Namespace("torch") public static native @ByVal @Name("sparse_coo_tensor") Tensor torch_sparse_coo_tensor(@Const @ByRef Tensor indices, @Const @ByRef Tensor values, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... size); +@Namespace("torch") public static native @ByVal @Name("_sparse_coo_tensor_unsafe") Tensor torch__sparse_coo_tensor_unsafe(@Const @ByRef Tensor indices, @Const @ByRef Tensor values, @ByVal LongArrayRef size, @ByVal(nullValue = "at::TensorOptions{}") TensorOptions options); +@Namespace("torch") public static native @ByVal @Name("_sparse_coo_tensor_unsafe") Tensor torch__sparse_coo_tensor_unsafe(@Const @ByRef Tensor indices, @Const @ByRef Tensor values, @ByVal LongArrayRef size); +@Namespace("torch") public static native @ByVal @Name("_sparse_coo_tensor_unsafe") Tensor torch__sparse_coo_tensor_unsafe(@Const @ByRef Tensor indices, @Const @ByRef Tensor values, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @ByVal(nullValue = "at::TensorOptions{}") TensorOptions options); +@Namespace("torch") public static native @ByVal @Name("_sparse_coo_tensor_unsafe") Tensor torch__sparse_coo_tensor_unsafe(@Const @ByRef Tensor indices, @Const @ByRef Tensor values, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... size); +@Namespace("torch") public static native @ByVal Tensor _sparse_coo_tensor_unsafe_symint(@Const @ByRef Tensor indices, @Const @ByRef Tensor values, @ByVal SymIntArrayRef size, @ByVal(nullValue = "at::TensorOptions{}") TensorOptions options); +@Namespace("torch") public static native @ByVal Tensor _sparse_coo_tensor_unsafe_symint(@Const @ByRef Tensor indices, @Const @ByRef Tensor values, @ByVal SymIntArrayRef size); +@Namespace("torch") public static native @ByVal @Name("_sparse_coo_tensor_with_dims") Tensor torch__sparse_coo_tensor_with_dims(@Cast("int64_t") long sparse_dim, @Cast("int64_t") long dense_dim, @ByVal LongArrayRef size, @ByVal TensorOptions options); +@Namespace("torch") public static native @ByVal @Name("_sparse_coo_tensor_with_dims") Tensor torch__sparse_coo_tensor_with_dims(@Cast("int64_t") long sparse_dim, @Cast("int64_t") long dense_dim, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @ByVal TensorOptions options); +@Namespace("torch") public static native @ByVal @Name("_sparse_coo_tensor_with_dims_and_tensors") Tensor torch__sparse_coo_tensor_with_dims_and_tensors(@Cast("int64_t") long sparse_dim, @Cast("int64_t") long dense_dim, @ByVal LongArrayRef size, @Const @ByRef Tensor indices, @Const @ByRef Tensor values, @ByVal TensorOptions options); +@Namespace("torch") public static native @ByVal @Name("_sparse_coo_tensor_with_dims_and_tensors") Tensor torch__sparse_coo_tensor_with_dims_and_tensors(@Cast("int64_t") long sparse_dim, @Cast("int64_t") long dense_dim, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @Const @ByRef Tensor indices, @Const @ByRef Tensor values, @ByVal TensorOptions options); +@Namespace("torch") public static native @ByVal Tensor _sparse_coo_tensor_with_dims_and_tensors_symint(@Cast("int64_t") long sparse_dim, @Cast("int64_t") long dense_dim, @ByVal SymIntArrayRef size, @Const @ByRef Tensor indices, @Const @ByRef Tensor values, @ByVal TensorOptions options); +@Namespace("torch") public static native @ByVal @Name("_to_copy") Tensor torch__to_copy(@Const @ByRef Tensor self, @ByVal(nullValue = "at::TensorOptions{}") TensorOptions options, @Cast("bool") boolean non_blocking/*=false*/, @ByVal(nullValue = "c10::optional(c10::nullopt)") MemoryFormatOptional memory_format); +@Namespace("torch") public static native @ByVal @Name("_to_copy") Tensor torch__to_copy(@Const @ByRef Tensor self); +@Namespace("torch") public static native @ByVal @Name("tril_indices") Tensor torch_tril_indices(@Cast("int64_t") long row, @Cast("int64_t") long col, @Cast("int64_t") long offset/*=0*/, @ByVal(nullValue = "at::TensorOptions(at::kLong)") TensorOptions options); +@Namespace("torch") public static native @ByVal @Name("tril_indices") Tensor torch_tril_indices(@Cast("int64_t") long row, @Cast("int64_t") long col); +@Namespace("torch") public static native @ByVal @Name("triu_indices") Tensor torch_triu_indices(@Cast("int64_t") long row, @Cast("int64_t") long col, @Cast("int64_t") long offset/*=0*/, @ByVal(nullValue = "at::TensorOptions(at::kLong)") TensorOptions options); +@Namespace("torch") public static native @ByVal @Name("triu_indices") Tensor torch_triu_indices(@Cast("int64_t") long row, @Cast("int64_t") long col); +@Namespace("torch") public static native @ByVal @Name("normal") Tensor torch_normal(double mean, double std, @ByVal LongArrayRef size, @ByVal(nullValue = "c10::optional(c10::nullopt)") GeneratorOptional generator, @ByVal(nullValue = "at::TensorOptions{}") TensorOptions options); +@Namespace("torch") public static native @ByVal @Name("normal") Tensor torch_normal(double mean, double std, @ByVal LongArrayRef size); +@Namespace("torch") public static native @ByVal @Name("normal") Tensor torch_normal(double mean, double std, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @ByVal(nullValue = "c10::optional(c10::nullopt)") GeneratorOptional generator, @ByVal(nullValue = "at::TensorOptions{}") TensorOptions options); +@Namespace("torch") public static native @ByVal @Name("normal") Tensor torch_normal(double mean, double std, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... size); +@Namespace("torch") public static native @ByVal @Name("fft_fftfreq") Tensor torch_fft_fftfreq(@Cast("int64_t") long n, double d/*=1.0*/, @ByVal(nullValue = "at::TensorOptions{}") TensorOptions options); +@Namespace("torch") public static native @ByVal @Name("fft_fftfreq") Tensor torch_fft_fftfreq(@Cast("int64_t") long n); +@Namespace("torch") public static native @ByVal @Name("fft_rfftfreq") Tensor torch_fft_rfftfreq(@Cast("int64_t") long n, double d/*=1.0*/, @ByVal(nullValue = "at::TensorOptions{}") TensorOptions options); +@Namespace("torch") public static native @ByVal @Name("fft_rfftfreq") Tensor torch_fft_rfftfreq(@Cast("int64_t") long n); - // namespace cuda // namespace torch -// Parsed from torch/csrc/api/include/torch/ordered_dict.h +// Parsed from c10/core/PyHandleCache.h // #pragma once -// #include -// #include -// #include -// #include -// #include -// #include -// Targeting ../StringTensorDict.java - - -// Targeting ../StringModuleDict.java - +// #include +// #include +// #include -// Targeting ../StringAnyModuleDict.java +// #include +// A PyHandleCache represents a cached pointer from a C++ object to +// a Python object that represents that object analogously in Python. +// Upon a cache hit, the relevant object can be retrieved after a test +// and then a memory load. Two conditions must hold to be able to use this +// class: +// +// - This must truly be a cache; e.g., the caller must be able to produce +// the object some other way if the cache hit misses. +// +// - This must truly be a handle; e.g., the Python object referenced by +// this class must have static lifetime. This means we don't have to +// maintain strong ownership or deallocate the object when the C++ object +// dies. Static lifetime is a good idea in conjunction with the cache, +// since if you are producing a fresh object on miss you won't be +// maintaining object identity. If you need bidirectional ownership, +// you will want to factor out the pattern in TensorImpl with +// resurrection. +// +// This cache is expected to not improve perf under torchdeploy, as one +// interpreter will fill up the cache, and all the interpreters will be +// unable to use the slot. A potential improvement is to have multiple +// slots (one per interpreter), which will work in deployment scenarios +// where there a stable, fixed number of interpreters. You can also store +// the relevant state in the Python library, rather than in the non-Python +// library (although in many cases, this is not convenient, as there may +// not be a way to conveniently index based on the object.) -// Targeting ../StringSharedModuleDict.java + // namespace c10 -// Targeting ../StringTensorDictItem.java +// Parsed from c10/util/Bitset.h +// #pragma once -// Targeting ../StringModuleDictItem.java +// #include +// #include +// #include +// #if defined(_MSC_VER) +// #endif +// Targeting ../bitset.java -// Targeting ../StringAnyModuleDictItem.java +@Namespace("c10::utils") public static native @Cast("bool") @Name("operator !=") @NoException(true) boolean notEquals(@ByVal bitset lhs, @ByVal bitset rhs); -// Targeting ../StringSharedModuleDictItem.java + // namespace utils + // namespace c10 +// Parsed from ATen/core/dispatch/DispatchKeyExtractor.h -// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ OrderedDict ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +// #pragma once +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// Take a DispatchKeySet for a Tensor and determine what the actual dispatch +// DispatchKey should be, taking into account TLS, and skipping backends which +// fall through. +// +// Unlike Tensor::key_set(), the value of this on a tensor can change depending +// on TLS. +// +// NB: If there is no valid dispatch key, this will return Undefined +@Namespace("c10::impl") public static native @ByVal DispatchKeySet computeDispatchKeySet( + @ByVal DispatchKeySet ks, + @ByVal DispatchKeySet key_mask +); + // A small gadget to extract the DispatchKeySet from types which are known + // to have it. Used to extract dispatch keys from unboxed calls. + // NB: take by const reference (Don't do universal forwarding here! You + // don't want to move into this function!) +// Targeting ../DispatchKeyExtractor.java +// Parsed from ATen/core/dispatch/OperatorEntry.h +// #pragma once +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #ifdef C10_MOBILE +// #endif +// This data structure represents a kernel that was registered to us from a +// user. Unlike KernelFunction, AnnotatedKernel contains some extra metadata +// about the kernel that isn't necessary for actual dispatching (this is why +// we don't put AnnotatedKernel in the actual DispatchTable), but is useful for +// giving good error messages. +// This data structure represents operator schema, with metadata specifying +// where the registration of this schema occurred +// Internal data structure that records information about a specific operator. +// It's not part of the public API; typically, users will interact with +// OperatorHandle instead. +// +// Concurrent writes to OperatorEntry are protected by the GLOBAL Dispatcher +// lock (this is important because some methods in OperatorEntry access +// dispatcher state) + // namespace impl + // namespace c10 +// Parsed from c10/util/Synchronized.h +// #pragma once +// #include +// #include +/** + * A very simple Synchronization class for error-free use of data + * in a multi-threaded context. See folly/docs/Synchronized.md for + * the inspiration of this class. + * + * Full URL: + * https://github.com/facebook/folly/blob/main/folly/docs/Synchronized.md + * + * This class implements a small subset of the generic functionality + * implemented by folly:Synchronized. Specifically, only withLock + * is implemeted here since it's the smallest possible API that is + * able to cover a large surface area of functionality offered by + * folly::Synchronized. + */ + // end namespace c10 +// Parsed from ATen/core/dispatch/Dispatcher.h +// #pragma once +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +@Namespace("c10") public static native @Cast("bool") boolean show_dispatch_trace(); +@Namespace("c10") public static native void dispatch_trace_nesting_incr(); +@Namespace("c10") public static native void dispatch_trace_nesting_decr(); +@Namespace("c10") public static native @Cast("int64_t") long dispatch_trace_nesting_value(); +// Targeting ../OpRegistrationListener.java +// Targeting ../RegistrationListenerList.java +// Targeting ../SchemaRegistrationHandleRAII.java +// Targeting ../Dispatcher.java +// Targeting ../OperatorHandle.java +/** + * This is a handle to an operator schema registered with the dispatcher. + * It holds the same information as an OperatorHandle, but it is templated + * on the operator arguments and allows calling the operator in an + * unboxed way. + */ +// CaptureKernelCall is intended to capture return values from Dispatcher +// unboxed kernel calls. A record function may request to get outputs from the +// kernel calls. For boxed kernels, it's straightforward, the returned values +// are in the stack object. The stack can be passed to record functions. For +// unboxed kernels, we need to handle different kinds of return values, cache +// them temporarily, then release the values for the actual function call +// return. +// Handle the lvalue reference differently since it should not be moved. +// Handle case where the kernel returns void. + // namespace detail +// See [Note: Argument forwarding in the dispatcher] for why Args doesn't use && +// See [Note: Argument forwarding in the dispatcher] for why Args doesn't use && +// See [Note: Argument forwarding in the dispatcher] for why Args doesn't use && +// NB: this doesn't count as a "true" dispatcher jump, so no instrumentation + // namespace c10 + // namespace std +// Parsed from torch/types.h - // namespace torch +// #pragma once +// #include -// Parsed from torch/csrc/utils/memory.h +// #include -// #pragma once +// #include +// #include -// #include +// TODO: These don't really belong here but torchvision builds in CI need them +// Remove once the torchvision version being compiled in CI is updated +// #include +// #include -// Reference: -// https://github.com/llvm-mirror/libcxx/blob/master/include/memory#L3091 +// NOTE [ Exposing declarations in `at::` to `torch::` ] +// +// The following line `using namespace at;` is responsible for exposing all +// declarations in `at::` namespace to `torch::` namespace. +// +// According to the rules laid out in +// https://en.cppreference.com/w/cpp/language/qualified_lookup, section +// "Namespace members": +// ``` +// Qualified lookup within the scope of a namespace N first considers all +// declarations that are located in N and all declarations that are located in +// the inline namespace members of N (and, transitively, in their inline +// namespace members). If there are no declarations in that set then it +// considers declarations in all namespaces named by using-directives found in N +// and in all transitive inline namespace members of N. +// ``` +// +// This means that if both `at::` and `torch::` namespaces have a function with +// the same signature (e.g. both `at::func()` and `torch::func()` exist), after +// `namespace torch { using namespace at; }`, when we call `torch::func()`, the +// `func()` function defined in `torch::` namespace will always be called, and +// the `func()` function defined in `at::` namespace is always hidden. // NOLINT +/** Fixed width dtypes. */ +/** Rust-style short dtypes. */ // namespace torch -// Parsed from torch/csrc/utils/python_stub.h +// Parsed from torch/data/dataloader_options.h // #pragma once -// Targeting ../_object.java - +// #include +// #include +// #include +// #include +// Targeting ../DataLoaderOptions.java -// Parsed from torch/csrc/utils/schema_info.h - -// #pragma once - -// #include -// #include -// Targeting ../SchemaInfo.java - // namespace utils +/** Like {@code DataLoaderOptions}, but without any unconfigured state. + * {@code DataLoaderOptions} has some options that depend on other options + * ({@code max_jobs} => {@code 2 * workers}). In the spirit of properly using the C++ type + * system, {@code DataLoaderOptions} allows only setting values. To access values, + * you must create a {@code FullDataLoaderOptions} from a {@code DataLoaderOptions} + * instance, which will do any necessary coalescing. */ + // namespace data // namespace torch -// Parsed from torch/csrc/utils/variadic.h +// Parsed from torch/data/detail/queue.h // #pragma once -// #include -// #include -// #include - -// #include -// #include -// #include -// #include -// Targeting ../Indices.java - - - -// Decrements the index N, adds N-1 to the list of indices and forwards -// whatever we already have. -// Targeting ../MakeIndices.java +// #include +// #include +// #include +// #include +// #include +// #include +// #include -//===----------------------------------------------------------------------===// -// Utilities -//===----------------------------------------------------------------------===// -// Targeting ../pack.java +/** A basic locked, blocking MPMC queue. + * + * Every {@code push} and {@code pop} is guarded by a mutex. A condition variable is used + * to communicate insertion of new elements, such that waiting threads will be + * woken up if they are currently waiting inside a call to {@code pop()}. + * + * Note that this data structure is written specifically for use with the + * {@code DataLoader}. Its behavior is tailored to this use case and may not be + * applicable to more general uses. */ + // namespace detail + // namespace data + // namespace torch +// Parsed from torch/data/detail/data_shuttle.h -// Targeting ../all_of.java +// #pragma once +// #include +// #include -// Targeting ../any_of.java +// #include +// #include +// #include +// #include +/** Encapsulates the full life cycle of DataLoader jobs. + * + * When a new job is enqueued to the {@code DataShuttle}, a counter for in-flight + * jobs is bumped. This job is said to be "in-flight" until its result is + * popped. Worker threads dequeue jobs as soon as they are available. When a + * worker finishes a job, it enqueues the result. Only when the main thread + * dequeues a result is the count of in-flight jobs decremented. When the main + * thread attempts to dequeue a job but no jobs are in-flight, that means the + * epoch is complete and {@code pop_result} returns an empty optional. */ + // namespace detail + // namespace data // namespace torch -// Parsed from torch/csrc/autograd/utils/warnings.h +// Parsed from torch/data/detail/sequencers.h // #pragma once -// #include -// #include -// #include +// #include -// Warning handler for multi-threaded contexts. Gather warnings from -// all threads into a single queue, then process together at the end -// in the main thread. +// #include +// #include +// #include + // namespace detail - // namespace utils - // namespace autograd +/** A {@code Sequencer} accepts a function that yields the next result of a + * {@code DataLoader} and then has the opportunity to influence the order in which + * these results are returned. The {@code NoSequencer} does not enforce any + * sequencing and returns any result directly. The {@code OrderedSequencer} instead + * buffers results internally to return them in order of their sequence number. */ + +/** A {@code Sequencer} that does not enforce any ordering. It is effectively the + * identity function. */ + +/** A {@code Sequencer} that buffers results and returns them in order of their + * sequence number. The {@code OrderedSequencer} maintains an internal, monotonically + * incrementing counter for the next sequence number it expects. If it receives + * a result with a higher sequence number, it will buffer it for later (when + * the sequence number reaches that of this result). Otherwise, if the sequence + * numbers match, the result is returned. + * + * Implementation note: The {@code OrderedSequencer} is implemented with a fixed-size + * buffer. Let {@code m} be the maximum number of jobs in the data loader's queue and + * {@code s} be the current sequence number. Assume {@code m} jobs are scheduled in the + * {@code DataLoader}. Any new result is stored at index {@code job.sqn mod m} in the + * {@code OrderedSequencer}. Why are we sure sequence numbers of new jobs will not + * collide with sequence numbers of buffered jobs? The {@code OrderedSequencer} will + * not return from {@code next()} until it receives the result with sqn {@code s}. This + * means no new jobs can be scheduled in the {@code DataLoader} in the meantime, + * which enforces that as long as sqn {@code s} has not been received, {@code s + m} (which + * would cause a collision in the fixed-size buffer) will not yet be scheduled. */ + // namespace sequencers + // namespace detail + // namespace data // namespace torch -// Parsed from torch/csrc/autograd/anomaly_mode.h +// Parsed from torch/data/iterator.h // #pragma once -// #include -// #include -// #include -// Targeting ../AnomalyMode.java - - -// Targeting ../DetectAnomalyGuard.java - - -// Targeting ../AnomalyMetadata.java - - - - // namespace autograd - // namespace torch - - -// Parsed from torch/csrc/autograd/edge.h +// #include +// #include -// #pragma once +// #include -// #include // #include +// #include // #include +// #include +// #include +// For increased safety and more separated logic, this implementation of +// `Iterator` consists of a `ValidIterator` and a `SentinelIterator`. A +// `ValidIterator` yields new batches until the `DataLoader` is exhausted. While +// the `DataLoader` is not exhausted, `ValidIterator`s compare equal if they are +// the same object. When the `ValidIterator` becomes exhausted, it compares +// equal to the `SentinelIterator`, but not before. Half the code here is to +// implement double dispatch for the comparison. Got damnit, C++. -// #include -// Targeting ../Edge.java - - - // namespace autograd - // namespace torch - -// The idiomatic way of enabling use of a custom type as the key of hash -// containers in C++11. This method removes the requirement of having to pass -// a custom hasher to std::unordered_{map, set}. -// See http://en.cppreference.com/w/cpp/utility/hash for more information. - // namespace std +/** Base class for the {@code ValidIterator} and {@code SentinelIterator} */ +// Targeting ../ExampleIterator.java -// Parsed from torch/csrc/autograd/grad_mode.h -// #pragma once +// Targeting ../ExampleVectorOptionalIterator.java -// #include -// #include - // namespace autograd + // namespace data // namespace torch -// Parsed from torch/csrc/autograd/InferenceMode.h +// Parsed from torch/data/samplers/base.h // #pragma once -// #include // #include +// #include - +// #include +// #include +// #include + // namespace serialize // namespace torch +// Targeting ../Sampler.java -// Parsed from torch/csrc/autograd/input_metadata.h - -// #pragma once - -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include - -// #ifndef AT_PER_OPERATOR_HEADERS -// #include -// #else -// #include -// #endif +// Targeting ../BatchSizeSampler.java -// #include -// #include -// Targeting ../InputMetadata.java - // namespace autograd + // namespace samplers + // namespace data // namespace torch -// Parsed from torch/csrc/autograd/function_hook.h +// Parsed from torch/data/samplers/random.h // #pragma once -// #include // #include -// #include - -// A hook that's called on gradients -// Targeting ../FunctionPreHook.java - - -// Targeting ../FunctionPostHook.java +// #include +// #include +// #include +// #include + // namespace serialize + // namespace torch +// Targeting ../RandomSampler.java - // namespace autograd + // namespace samplers + // namespace data // namespace torch -// Parsed from torch/csrc/autograd/profiler.h +// Parsed from torch/data/worker_exception.h // #pragma once -// #include -// #include +// #include +// #include +// #include +/** An exception thrown when a DataLoader's worker thread throws an exception, + * which is caught. A {@code WorkerException} stores an {@code exception_ptr} to the + * original exception thrown in the worker thread. */ -// Parsed from torch/csrc/autograd/saved_variable_hooks.h + // namespace data + // namespace torch + + +// Parsed from torch/csrc/utils/memory.h // #pragma once -// #include -// Targeting ../SavedVariableHooks.java +// #include +// Reference: +// https://github.com/llvm-mirror/libcxx/blob/master/include/memory#L3091 - // namespace autograd // namespace torch -// Parsed from torch/csrc/autograd/saved_variable.h +// Parsed from torch/data/dataloader/base.h // #pragma once -// #include -// #include -// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include -// #include +// #include +// #include -// #include +// #include +// #include + +// #include +// #include // #include +// #include +// #include +// #include +// #include +// Targeting ../ChunkRandomDataLoaderBase.java -@Namespace("torch::autograd") public static native @Cast("const char*") BytePointer ERR_BACKWARD_TWICE(); public static native void ERR_BACKWARD_TWICE(BytePointer setter); -// Targeting ../SavedVariable.java + +// Targeting ../MNISTRandomDataLoaderBase.java - // namespace autograd + // namespace data // namespace torch -// Parsed from torch/csrc/autograd/forward_grad.h +// Parsed from torch/data/dataloader/stateful.h // #pragma once -// #include - -// [ Using ForwardGrad ] -// ForwardGrad needs to be a shared_ptr to satisfy constraints of its inner -// design. But this shared_ptr must be uniquely associated with the object that -// stores it (as of writing, either AutogradMeta or SavedVariable). This object -// is called the "owning object" in the discussions below. This owning object -// must call `ForwardGrad::clear()` when it is destroyed to ensure that the -// ForwardGrad is properly de-allocated. - -// This file contains two classes that are used to store forward AD gradients -// and ensure that they are scoped properly. Because forward AD runs -// concurrently with the evaluation of the function, we need a mechanism to -// separate different forward AD invocations and be able to compute the right -// gradients. We model such invocations as levels here. The particular scoping -// issue mentioned above has two main drivers: -// - Ensure that we can conveniently use forward AD within a high level API -// without -// leaking the forward AD states outside. -// - Ensure that we can keep the level that we expose to the user API simple -// (an integer -// that represents the nesting depth) while avoiding confusions when the -// level index is re-used. - -// The important external APIs from this file are: -// - ForwardADLevel::get_next_idx() that can be used to enter a new level and -// get its index -// - ForwardADLevel::release_idx() that can be used to exit a given level. -// - ForwardGrad() can be used to store a given forward gradient that will -// handle the level -// tracking automatically. - -// The basic implementation strategy is as follows: -// Every tensor has a ForwardGrad, maintaining a map from levels to tangents. -// ForwardGrad is responsible for registering itself to the appropriate -// ForwardADLevel when a new tangent is added to it via ForwardGrad::set_value -// and to un-register itself from this same level if that tangent is removed via -// ForwardGrad::reset. The ForwardADLevel is created when a new level is entered -// via ForwardADLevel::get_next_idx. A reference to the new ForwardADLevel is -// stored into a global (for the whole process) vector that ensure it can be -// accessed via ForwardADLevel::get_by_idx. This reference is deleted when the -// index is released by the user when calling ForwardADLevel::release_idx. When -// it is destructed, the ForwardADLevel is responsible for clearing all the -// tangents for its level stored in all the ForwardGrad that registered with it. -// -// This process-wide level design, compared to a thread local one, allows us to -// use very simple user facing handle for the level (an int) while enabling -// cross-thread forward AD. The only required synchronization for the user is -// when entering and exiting the levels. Some discussion on alternative design -// is in https://github.com/pytorch/pytorch/pull/49097#discussion_r543716453 and -// can be refined in the future. +// #include +// #include -// Correctness of concurrency: -// Each class uses its own lock when reading or modifying internal storages. -// This allows in particular to safely remove tangents from ForwardGrad when the -// ForwardADLevel is being exited. We ensure no deadlock by ensuring that a -// methods never calls into another class's method while the local class's lock -// is held except in one single case: calling from ForwardADLevel's destructor -// into ForwardGrad::reset with update_level=false. +// #include +// #include +// #include +// Targeting ../ChunkRandomDataLoader.java -// The lifetime of these objects is as follows: -// The ForwardADLevel can be in three states: -// - Initialized: where one of its reference is held by the global vector -// and there may be more -// references held by temporary variables in ForwardGrad's methods. -// - About to be destructed: where "release_idx" has been called and the -// only reason for the -// ForwardADLevel not to be destructed right away is that some methods in -// ForwardGrad have owning reference to it. This is done so that a -// ForwardADLevel can never be destructed when a ForwardGrad is -// registered with it and in the process of adding something to its -// internal state. -// - Being destructed: Here the ForwardADLevel is not referenced anymore -// and can be safely reset -// all of the ForwardGrad. Note that we can have more than one reset -// being called here (which is ok) but we are guaranteed that there is at -// least one. -// The ForwardGrad is simpler as there is no intermediary state and no special -// destructor for. The logic to unregister it from the different ForwardADLevel -// is done when the owning object (AutogradMeta or SavedVariable) is being -// destroyed. -// Other considered design: -// To avoid having the ForwardGrad::clear, we considered storing weak_ptr inside -// the ForwardADLevel. While this would work, it would mean that the set inside -// the ForwardADLevel would only grow unless we do an expensive linear scan to -// remove all the dangling weak pointers. Hence this approach was not used. + // namespace data + // namespace torch -// Data structures in this file are optimized for this maximum number of levels. -// The number of levels corresponds to the degree of the gradient being -// computed using forward AD and we don't expect more than second order -// gradients to be common. -public static final int EXPECTED_MAX_LEVEL = 2; -// Targeting ../ForwardADLevel.java +// Parsed from torch/data/dataloader/stateless.h + +// #pragma once -// Targeting ../ForwardGrad.java +// #include +// #include +// #include +// #include +// #include - // namespace autograd +// #include +// #include +// #include +// Targeting ../MNISTRandomDataLoader.java + + + // namespace data // namespace torch -// Parsed from torch/csrc/autograd/variable.h +// Parsed from torch/data/dataloader.h // #pragma once -// #include +// #include +// #include -// #include -// #include -// #include -// #include -// #include +// #include +// #include -// #include -// #include // #include -// #include +// #include // #include -// #include -// #include -// #include +// #include // #include -// #include -/** {@code Variable} is exactly the same as {@code Tensor} (i.e. we have {@code using Variable = - * at::Tensor}). This means you can perform all the usual mathematical and - * other operations you can perform on {@code Tensor}s also on {@code Variable}s. - * - * The only reason we are keeping the {@code Variable} class is backward - * compatibility with external user's legacy C++ frontend code. Our intention - * is to eliminate the {@code Variable} class in the near future. */ +/** Creates a {@code DataLoader} instance for a stateless {@code dataset}, a {@code sampler} and + * some {@code options}. */ - // namespace autograd +/** Creates a {@code DataLoader} instance for a stateless {@code dataset} and some + * {@code options}. A sampler (by default a {@code RandomSampler}) will be constructed from + * the size of the dataset. */ + +/** Creates a {@code DataLoader} for a stateful {@code dataset} and some {@code options}. */ + // namespace data // namespace torch -// The following are all internal APIs and should not be shown in libtorch docs. -// Therefore, we wrap the following code with `#ifndef DOXYGEN_SHOULD_SKIP_THIS -// ... #endif` -// #ifndef DOXYGEN_SHOULD_SKIP_THIS +// Parsed from torch/data/example.h -/** Check if this type is supported by the autograd engine. - * If you change this, update the doc at the top of the - * torch/autograd/__init__.py file and - * "test_set_requires_grad_only_for_continuous_types" in test/test_autograd.py */ -@Namespace("torch::autograd") public static native @Cast("bool") boolean isDifferentiableType(ScalarType t); +// #pragma once -/**~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - * Variable - * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - * A {@code Variable} augments a {@code Tensor} with the ability to interact in our - * autograd machinery. Conceptually, {@code Variable}s travel along {@code Edge}s between - * {@code Node}s in the autograd graph. A {@code Variable} can either be a leaf, like a - * weight in a neural network, or an interior variable, when it is the result - * of an operation between variables. Every {@code Variable} also stores another - * {@code Variable} called its {@code grad} (gradient). If the variable is a leaf, its - * gradient will be accumulated into this variable. - * - * Every Tensor is a Variable, but sometimes we colloquially refer to Variables - * that don't require gradients as Tensors (since none of the autograd - * machinery for Variables applies). Historically, Variables and Tensors - * were separate concepts, but now they are exactly the same (i.e. we have - * {@code using Variable = at::Tensor}). - * - * Gradient Edges - * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - * Furthermore, {@code Variable}s have the notion of a {@code gradient_edge}, which is the - * edge in the autograd graph that connects the variable to a particular input - * of the gradient function that will be invoked with the variable during the - * backward pass. More precisely, this gradient function can be one of two - * things: - * 1. A {@code grad_fn}, if the variable is in the interior of the graph. This is the - * gradient of the function that produced the variable. - * 2. A {@code grad_accumulator}, if the variable is a leaf, which accumulates a - * scalar gradient value into its {@code grad} variable. - * - * Versioning - * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - * Another major feature of {@code Variable}s are *versions*. Versions are - * incremented when an in-place mutation of a variable occurs. Versions are - * useful when constructing {@code SavedVariable}s, which take a snapshot of a - * {@code Variable} at a certain version. You can retrieve a {@code Variable}'s version - * through its {@code current_version()} method. +// #include +// Targeting ../Example.java + + +// Targeting ../TensorExample.java + + +// Targeting ../NoTarget.java + + + // namespace example + +/** A specialization for {@code Example} that does not have a target. * - * Views - * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - * It is possible for a {@code Variable} to be a *view* of another {@code Variable}, in - * which case it tracks that {@code Variable}'s data and autograd history. Beyond - * construction, the interface of a view is identical to that of a regular - * {@code Variable}. You can determine whether {@code Variable} is in fact a view by - * probing its {@code is_view()} method. Note that the *view* semantics are only - * meaningful for {@code Variable} relations that are relevant to autograd. - * See NOTE [ Autograd View Variables ] for more details. - * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ */ + * This class exists so that code can be written for a templated {@code Example} + * type, and work both for labeled and unlabeled datasets. */ + // namespace data + // namespace torch -// Private-ish functions for manipulating variables; we don't want to put them -// on Tensor proper -// WARNING: This may return a nullptr. If you require AutogradMeta to return -// a materialized structure, use materialize_autograd_meta instead. -@Namespace("torch::autograd::impl") public static native AutogradMeta get_autograd_meta(@Const @ByRef TensorBase arg0); +// Parsed from torch/data/datasets/base.h -// WARNING: This will return a nullptr if the Tensor is not a view. -@Namespace("torch::autograd::impl") public static native DifferentiableViewMeta get_view_autograd_meta(@Const @ByRef TensorBase arg0); +// #pragma once -// Returns the current autograd meta, materializing it if it was previously -// none. This counts as a *mutating* operation, so do not call it on -// "read-only" operators; in particular, this is NOT thread safe -@Namespace("torch::autograd::impl") public static native AutogradMeta materialize_autograd_meta(@Const @ByRef TensorBase arg0); +// #include +// #include -/** Set the gradient accumulator of the {@code Variable}. This is only applicable to - * leaf variables. Interior variables should call {@code set_gradient_edge()}. */ +// #include -/** Attempts to get a pointer to the gradient accumulator of the {@code Variable}, - * if it still exists. If the gradient accumulator function has been - * destroyed, returns a {@code nullptr}. */ -@Namespace("torch::autograd::impl") public static native @SharedPtr Node try_get_grad_accumulator(@Cast("const torch::autograd::Variable*") @ByRef Tensor arg0); +// #include +// #include +// #include +// #include +// #include // NOLINT + // namespace datasets + // namespace data + // namespace torch -/** Gets the gradient accumulator of the {@code Variable} if it has one, or else - * create one on the fly and return it. */ -@Namespace("torch::autograd::impl") public static native @SharedPtr Node grad_accumulator(@Cast("const torch::autograd::Variable*") @ByRef Tensor arg0); +// Targeting ../ChunkBatchDataset.java -/** Returns the "canonical" gradient edge of this {@code Variable}, i.e. either the - * gradient function if this is an interior {@code Variable}, or the gradient - * accumulator otherwise. If the {@code Variable} is interior, the returned {@code Edge} - * will store the input index of the {@code Node} to which this variable is - * connected in its {@code input_nr} field. For leaves, the {@code input_nr} is always - * zero. Note that {@code set_gradient_edge} and {@code gradient_edge} are not - * symmetric. You must use {@code set_gradient_edge} to set the {@code grad_fn} and - * {@code set_grad_accumulator} to set the accumulator. */ -@Namespace("torch::autograd::impl") public static native @ByVal Edge gradient_edge(@Cast("const torch::autograd::Variable*") @ByRef Tensor arg0); -/** Set the gradient edge -- i.e. {@code grad_fn} and {@code input_nr} -- of the - * {@code Variable}. - * NOTE: This will always set the {@code grad_fn}, even if this is a leaf variable, - * and never the {@code grad_accumulator}. For the latter, use - * {@code set_grad_accumulator}. This allows late construction of an interior - * {@code Variable}. */ +// Targeting ../ChunkBatchSharedBatchDataset.java -/// -@Namespace("torch::autograd::impl") public static native void set_gradient_edge(@Cast("const torch::autograd::Variable*") @ByRef Tensor arg0, @ByVal Edge edge); -// Autograd Graph Interaction -//~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +// Targeting ../ChunkMapBatchDataset.java -/** Update the {@code grad_fn} of an existing Variable. Called after in-place - * modifications. - * - * For View Variables: - * Called after in-place modifications. Modifies the grad_fn of the base - * Variable. */ -@Namespace("torch::autograd::impl") public static native void rebase_history(@Cast("const torch::autograd::Variable*") @ByRef Tensor arg0, @ByVal Edge gradient_edge); -/** Gets the raw gradient function pointer, whatever it currently is. */ -@Namespace("torch::autograd::impl") public static native Node grad_fn_unsafe(@Cast("const torch::autograd::Variable*") @ByRef Tensor arg0); +// Targeting ../MNISTBatchDataset.java -/** Increments the version count of this {@code Variable}. */ -@Namespace("torch::autograd::impl") public static native void bump_version(@Cast("const torch::autograd::Variable*") @ByRef Tensor arg0); -@Namespace("torch::autograd::impl") public static native void set_version_counter( - @Cast("const torch::autograd::Variable*") @ByRef Tensor arg0, - @Const @ByRef VariableVersion version_counter); -/** Retrieves this {@code Variable}s version counter. */ -@Namespace("torch::autograd::impl") public static native @Const @ByRef VariableVersion version_counter(@Cast("const torch::autograd::Variable*") @ByRef Tensor arg0); +// Targeting ../MNISTMapBatchDataset.java -@Namespace("torch::autograd::impl") public static native void set_name(@Cast("const torch::autograd::Variable*") @ByRef Tensor arg0, @StdString BytePointer name); -@Namespace("torch::autograd::impl") public static native void set_name(@Cast("const torch::autograd::Variable*") @ByRef Tensor arg0, @StdString String name); -@Namespace("torch::autograd::impl") public static native void add_hook( - @Const @ByRef TensorBase arg0, - @UniquePtr @Cast({"", "std::unique_ptr&&"}) FunctionPreHook hook); -@Namespace("torch::autograd::impl") public static native @ByRef FunctionPreHookVector hooks(@Cast("const torch::autograd::Variable*") @ByRef Tensor arg0); -@Namespace("torch::autograd::impl") public static native void clear_hooks(@Const @ByRef TensorBase arg0); +// Targeting ../TensorExampleBatchDataset.java -@Namespace("torch::autograd::impl") public static native void create_cpp_hook( - @Const @ByRef TensorBase arg0, - @Cast("bool") boolean is_retains_grad_hooks/*=false*/); -@Namespace("torch::autograd::impl") public static native void create_cpp_hook( - @Const @ByRef TensorBase arg0); -// Targeting ../AutogradMeta.java +// Targeting ../MNISTDataset.java +// Targeting ../TensorExampleDataset.java -//~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -// DifferentiableViewMeta -//~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -/** NOTE [ Autograd View Variables ] - * - * Many operations return Variable that shares storage with an input Variable. - * The returned Variable is called a **view** Variable on the input **base** - * Variable. - * - * In PyTorch, we have two types of views: differentiable views, and - * non-differentiable views. In either type, to support proper version - * checking, the base and view Variables must always share the same - * version_counter. - * - * - * Differentiable Views - * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - * This class allows to track both forward and backward AD differentiable - * views. These views can have different base as non-differentiable view for - * forward and backward mode AD are not the same. - * - * Most function are either both forward and backward differentiable views (for - * example: view, select, narrow, transpose, etc) or both not forward and not - * backward differentiable views (for example: indices, values, eq, lt, etc). - * But there are also functions that are forward but not backward - * differentiable views (only detach for now) or functions that are backward - * but not forward differentiable view (only make_dual and unpack dual for - * now). - * - * A concrete example of two views with different bases is as follow: - * - * # Have: - * # dual is a dual Tensor that is neither a forward or backward view - * detached_dual = dual.detach() - * view = detached_dual.view_as(dual) - * # The forward base of view is dual - * # The backward base of view is detached_dual - * - * - Backward Mode View - * Differentiable views are the view variables where you want gradients to flow - * back to the base variables. Out-of-place operations on views are quite - * straightforward, but in-place ones are very tricky. Even if the base - * variable may not require grad when we create the view, we still need to - * track the view relation because future in-place ops may require back-proping - * through it. For example, we need to support - * - * (1) in-place operation on view, e.g., - * - * # Have: - * # base.requires_grad = False - * # var.requires_grad = True - * base[1] = var # i.e., base[1].copy_(var) - * torch.autograd.grad(base.sum(), var) <- should return an all ones - * tensor - * - * (2) in-place operation on base after view is created, e.g., - * - * # Have: - * # base.requires_grad = False - * # var.requires_grad = True - * view = base[1] - * base.copy_(var) - * torch.autograd.grad(view.sum(), var) <- should return a tensor with - * var[1] filled with all ones and - * zeros everywhere else - * - * - Forward Mode View - * Forward differentiable views follow the same semantic as backward ones but - * show up differently as they are computed along with the forward evaluation. - * The hard examples above are thus very similar - * - * (1) in-place operation on view, e.g., - * - * # Have: - * # base is a regular Tensor - * # var is a dual Tensor whose tangent is all ones - * base[1] = var # i.e., base[1].copy_(var) - * # Now, base is a dual Tensor - * _, fw_grad = fwAD.unpack_dual(base) <- fw_grad should be a tensor with - * fw_grad[1] filled with all ones - * and zeros everywhere else - * - * (2) in-place operation on base after view is created, e.g., - * - * # Have: - * # base is a regular Tensor - * # var is a dual Tensor whose tangent is all ones - * view = base[1] - * base.copy_(var) - * _, fw_grad = fwAD.unpack_dual(view) <- fw_grad should be an all ones - * tensor - * - * See Note [Forward Grad View/inplace] for more details on how we handle these - * hard cases. - * - * - * DifferentiableViewMeta is created to support gradient tracking of - * such **in-place** operations. In particular, - * + if an in-place op is done on base, the grad_fn field of the view may - * become stale. So accesses should always go through grad_fn(), which - * reconstructs an updated grad_fn if the version_counter has incremented. - * All other fields are always valid. - * + if an in-place op is done on view, in rebase_history() of view, which is - * called after every in-place op in VariableType.cpp, the grad_fn of base - * is updated. - * + if a single autograd Node returns multiple differentiable views, if any - * output is modified by an inplace operation, the autograd engine will - * make an equivalent graph (corresponding to the view operations) without - * using equivalent graph, where each output is treated as if it were - * produced by a distinct view operation. This discards the original (e.g., - * user provided) grad_fn. If the provided grad_fn does more than the - * backward of the view, then the DifferentiableViewMeta must be created - * with creation_meta= CreationMeta::MULTI_OUTPUT_NODE to prevent the - * engine from ignoring the provided grad_fn. - * - * Interaction with GradMode: - * The particular case that we consider here is: - * - * # Have: - * # base.requires_grad = True or False - * with torch.no_grad(): - * view = base[1] - * base.requires_grad_() - * view.copy_(var) - * torch.autograd.grad(base.sum(), var) <- what should it return? - * - * Given that this particular code example is ambiguous and can easily be - * replace by either moving both inside the no_grad block or both outside, we - * explicitly forbid it. For now, it is deprecated by a warning. This is - * achieved by setting creation_meta=CreationMeta::NO_GRAD_MODE for all - * differentiable views created in no_grad mode. - * - * See Note [View + Inplace update for base tensor] - * and Note [View + Inplace update for view tensor] for the details how - * autograd handles inplace update with view ops. - * - * Non-Differentiable Views - * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - * In certain cases, although function outputs share storage with inputs, they - * will **never** require gradient history tracking. Instead of registering the - * view relation via DifferentiableViewMeta in autograd, the views will be - * using usual AutogradMeta and just share the version counters with the base - * Variables. - * Such views include: - * 1. Views created from .detach() - * 2. Views that are non-differentiable by its nature. - * E.g., {@code sparse_tensor.indices()} is a integral view on a (possibly) - * floating point tensor. - * See top of {@code derivatives.yaml} on how to specify that outputs of a - * function are non-differentiable. - * These are called non-differentiable views as the gradients do not flow - * through the view relation. - * - * Relevant logic for both differentiable and non-differentiable views is - * implemented in make_variable_(non_)differentiable_view below, and - * wrap_output of gen_variable_type.py. -

- * NOTE [ View + Inplace detection ] - * - * We want to detect views followed by inplace as they are often forbidden to - * ensure correctness of the computed gradients. But since we want to only - * notify the user when both happen, we tag the DifferentiableViewMeta when the - * view is created via the {@code make_variable_*_view()} functions. This tag is then - * checked by the {@code check_inplace()} function from {@code VariableTypeUtils.h} that - * should be called before every inplace operation and to detect cases where - * other views are modified and this one is rebased by side effect, we also - * check in the {@code VariableHooks::grad_fn()}. -

- * Flag that gives more information about when this view was created: - * - IN_CUSTOM_FUNCTION should be set when the view is created inside a custom - * autograd Function is returned. - * - NO_GRAD_MODE should be set when a view in created when GradMode is - * disabled - * - MULTI_OUTPUT_NODE should be set when a Node created by codegen code - * returns - * multiple differentiable views - * - Inference_MODE should be set when a view of normal tensor is created in - * InferenceMode. - * - DEFAULT is for all other cases */ -@Namespace("torch::autograd") public enum CreationMeta { - DEFAULT((byte)(0)), - IN_CUSTOM_FUNCTION((byte)(1)), - MULTI_OUTPUT_NODE((byte)(2)), - NO_GRAD_MODE((byte)(3)), - INFERENCE_MODE((byte)(4)); - public final byte value; - private CreationMeta(byte v) { this.value = v; } - private CreationMeta(CreationMeta e) { this.value = e.value; } - public CreationMeta intern() { for (CreationMeta e : values()) if (e.value == value) return e; return this; } - @Override public String toString() { return intern().name(); } -} +/** A {@code StreamDataset} represents a dataset that is a potentially infinite + * stream. It takes as batch index only a number, which is the batch size, and + * yields that many elements from the stream. */ + // namespace datasets + // namespace data + // namespace torch -/** Handles correctly propagating CreationMeta when a new view is created from a - * previous view. In general, we don't want the new view to be _less_ - * restrictive than the previous view (it's okay to be _more_ restrictive). A - * CreationMeta value of DEFAULT is currently the least restrictive, as the - * behavior for all other CreationMeta values is to error out for in-place ops. - * A CreationMeta value of INFERENCE_MODE is currently the most restrictive, so - * it takes precedence in propagation. If this changes, the logic here will - * need to be updated to properly handle the new semantics. */ -@Namespace("torch::autograd") public static native CreationMeta propagate_creation_meta( - CreationMeta prev_view_creation_meta, - CreationMeta new_view_creation_meta); -@Namespace("torch::autograd") public static native @Cast("torch::autograd::CreationMeta") byte propagate_creation_meta( - @Cast("torch::autograd::CreationMeta") byte prev_view_creation_meta, - @Cast("torch::autograd::CreationMeta") byte new_view_creation_meta); -/** Unified function to handle error checking when rebase happens - * indirect=true means that the caller is not doing the inplace, but the - * inplace happened somewhere else. */ -@Namespace("torch::autograd") public static native void handle_view_on_rebase( - DifferentiableViewMeta diff_view_meta, - @Cast("bool") boolean indirect/*=false*/); -@Namespace("torch::autograd") public static native void handle_view_on_rebase( - DifferentiableViewMeta diff_view_meta); -// Targeting ../DifferentiableViewMeta.java +// Parsed from torch/data/datasets/stateful.h + +// #pragma once + +// #include +// #include + +// #include +// #include + // namespace serialize + // namespace torch +// Targeting ../ChunkStatefulDataset.java + + + +/** Serializes a statefulDataset to {@code OutputArchive}. */ + +/** Deserializes a statefulDataset from an {@code InputArchive}. */ + + // namespace datasets + // namespace data + // namespace torch + + +// Parsed from torch/data/samplers/custom_batch_request.h + +// #pragma once + +// #include +// #include +// Targeting ../CustomBatchRequest.java + + + // namespace samplers + // namespace data + // namespace torch + + +// Parsed from torch/data/samplers/distributed.h + +// #pragma once +// #include +// #include +// #include +// #include + // namespace serialize + // namespace torch +// Targeting ../DistributedSampler.java -//~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -// Variable Implementation -//~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -// Factory Functions -//~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +// Targeting ../DistributedRandomSampler.java -/** Creates a {@code Variable} that is a *view* of another (*base*) variable. - * The {@code gradient_edge} is an optional (gradient_function, input_number) pair. - * {@code is_differentiable} is a bool that specifies whether this view is - * differentiable, i.e., whether the relation should be tracked by autograd. - * See NOTE [ Autograd View Variables ] for details. -

- * NOTE: {@code allow_tensor_metadata_change} is set to true by default, because - * there are a lot of call sites to these factory functions that need to change - * the variable's size or storage afterwards, and they don't expect the - * original tensor (where the variable is created from) to be updated. Setting - * {@code allow_tensor_metadata_change_} to false by default would unnecessarily - * prevent those changes from happening and is undesirable. */ -// See NOTE [ Autograd View Variables ] for details. -// Differentiable view. Track history with DifferentiableViewMeta. -@Namespace("torch::autograd") public static native @ByVal @Cast("torch::autograd::Variable*") Tensor make_variable_differentiable_view( - @Const @ByRef Tensor data, - @ByVal @Cast("c10::optional*") Pointer backward_info, - @ByVal @Cast("c10::optional*") Pointer forward_info, - @Cast("bool") boolean shared_view_info, - CreationMeta creation_meta, - @Cast("bool") boolean allow_tensor_metadata_change/*=true*/); -@Namespace("torch::autograd") public static native @ByVal @Cast("torch::autograd::Variable*") Tensor make_variable_differentiable_view( - @Const @ByRef Tensor data, - @ByVal @Cast("c10::optional*") Pointer backward_info, - @ByVal @Cast("c10::optional*") Pointer forward_info, - @Cast("bool") boolean shared_view_info, - CreationMeta creation_meta); -@Namespace("torch::autograd") public static native @ByVal @Cast("torch::autograd::Variable*") Tensor make_variable_differentiable_view( - @Const @ByRef Tensor data, - @ByVal @Cast("c10::optional*") Pointer backward_info, - @ByVal @Cast("c10::optional*") Pointer forward_info, - @Cast("bool") boolean shared_view_info, - @Cast("torch::autograd::CreationMeta") byte creation_meta, - @Cast("bool") boolean allow_tensor_metadata_change/*=true*/); -@Namespace("torch::autograd") public static native @ByVal @Cast("torch::autograd::Variable*") Tensor make_variable_differentiable_view( - @Const @ByRef Tensor data, - @ByVal @Cast("c10::optional*") Pointer backward_info, - @ByVal @Cast("c10::optional*") Pointer forward_info, - @Cast("bool") boolean shared_view_info, - @Cast("torch::autograd::CreationMeta") byte creation_meta); +// Targeting ../DistributedSequentialSampler.java -// See NOTE [ Autograd View Variables ] for details. -// Non-differentiable view. Just share version counter. -/// -@Namespace("torch::autograd") public static native @ByVal @Cast("torch::autograd::Variable*") Tensor make_variable_non_differentiable_view( - @ByVal @Cast("torch::autograd::Variable*") Tensor base, - @Const @ByRef Tensor data, - @Cast("bool") boolean allow_tensor_metadata_change/*=true*/); -@Namespace("torch::autograd") public static native @ByVal @Cast("torch::autograd::Variable*") Tensor make_variable_non_differentiable_view( - @ByVal @Cast("torch::autograd::Variable*") Tensor base, - @Const @ByRef Tensor data); -/** Creates a {@code Variable} from the given {@code Tensor}, copying its underlying - * {@code TensorImpl}. {@code requires_grad} should be set only for leaves, and determines - * whether the {@code Variable} will accumulate gradients. NOTE: {@code data} must *not* be - * a {@code Variable} already. Its dynamic type *must* be {@code Tensor}. - * - * TODO: Eliminate this function as much as possible, as it can be expressed - * more clearly as detach() or a no-op in most call sites (especially when - * there is only one use of the variable). */ -@Namespace("torch::autograd") public static native @ByVal @Cast("torch::autograd::Variable*") Tensor make_variable( - @ByVal Tensor data, - @Cast("bool") boolean requires_grad/*=false*/, - @Cast("bool") boolean allow_tensor_metadata_change/*=true*/); -@Namespace("torch::autograd") public static native @ByVal @Cast("torch::autograd::Variable*") Tensor make_variable( - @ByVal Tensor data); + // namespace samplers + // namespace data + // namespace torch -/** Creates a {@code Variable} from the given {@code Tensor}, copying its underlying - * {@code TensorImpl}. {@code gradient_edge} should be a (function, input_nr) pair - * specifying the function in the autograd graph, and what particular input of - * that function, this variable is connected to. */ -@Namespace("torch::autograd") public static native @ByVal @Cast("torch::autograd::Variable*") Tensor make_variable( - @ByVal Tensor data, - @ByVal Edge gradient_edge, - @Cast("bool") boolean allow_tensor_metadata_change/*=true*/); -@Namespace("torch::autograd") public static native @ByVal @Cast("torch::autograd::Variable*") Tensor make_variable( - @ByVal Tensor data, - @ByVal Edge gradient_edge); -@Namespace("torch::autograd::utils") public static native @Cast("bool") boolean has_same_meta(@Cast("const torch::autograd::Variable*") @ByRef Tensor base, @Cast("const torch::autograd::Variable*") @ByRef Tensor other); +// Parsed from torch/data/samplers/sequential.h - // namespace utils - // namespace autograd +// #pragma once + +// #include +// #include +// #include + +// #include +// #include + // namespace serialize // namespace torch +// Targeting ../SequentialSampler.java -// #endif /* DOXYGEN_SHOULD_SKIP_THIS */ -// Parsed from torch/csrc/autograd/function.h + // namespace samplers + // namespace data + // namespace torch + + +// Parsed from torch/csrc/api/include/torch/imethod.h // #pragma once +// #include +// #include +// Targeting ../IMethod.java -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include + // namespace torch + + +// Parsed from torch/csrc/jit/ir/attributes.h + +// #pragma once +// #include // #include -// #include // #include -// #if C10_CLANG_HAS_WARNING("-Wshorten-64-to-32") -// #endif +// #include +// #include -// Custom deleter to prevent stack overflows. -@Namespace("torch::autograd") public static native void deleteNode(Node function); -// Targeting ../NodeGuard.java +// #include +@Namespace("torch::jit") @MemberGetter public static native int max_tensor_display_size(); +@Name("torch::jit::AttributeKind") public enum JitAttributeKind { + f(0), + fs(1), + c(2), + cs(3), + i(4), + is(5), + s(6), + ss(7), + t(8), + ts(9), + g(10), + gs(11), + ty(12), + tys(13), + ival(14); -// Return the Node currently being evaluated (if any) -// This is only set during the backward pass while a Node is being -// executed. -@Namespace("torch::autograd") public static native @SharedPtr Node get_current_node(); -// Targeting ../Node.java + public final int value; + private JitAttributeKind(int v) { this.value = v; } + private JitAttributeKind(JitAttributeKind e) { this.value = e.value; } + public JitAttributeKind intern() { for (JitAttributeKind e : values()) if (e.value == value) return e; return this; } + @Override public String toString() { return intern().name(); } +} +@Namespace("torch::jit") public static native @Cast("const char*") BytePointer toString(JitAttributeKind kind); +// Targeting ../AttributeValue.java -// Targeting ../TraceableFunction.java +// Targeting ../GraphAttr.java +// Targeting ../GraphsAttr.java -//~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -// Associated Free Nodes -//~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -// Implementation of `collect_next_edges` (see below). - // namespace detail -/** Create an {@code Edge} between the given {@code variable} and the {@code function}, which is - * assumed to be the gradient function of this variable (i.e. the function - * through which this variable is backpropagated during the backward pass). - * This sets the {@code grad_fn} property of the {@code variable}. This function assumes - * that the {@code Variable} is a new input to the gradient function and its - * {@code input_nr} thus equal to {@code function->num_inputs()}. Additionally, it - * increments the {@code Node}'s number of inputs by one. Approximately - * equivalent to {@code variable.set_gradient_edge(function, - * function->add_input_metadata(variable.dispatch_type(), variable.sizes()))}. - * If you don't want the {@code Node}'s {@code num_inputs} to be incremented, use - * {@code set_gradient_edge} directly. */ -@Namespace("torch::autograd") public static native void create_gradient_edge( - @Cast("torch::autograd::Variable*") @ByRef Tensor variable, - @SharedPtr Node function); + // namespace jit + // namespace torch -/** Return true if any of the variables in the list require a gradient. */ -@Namespace("torch::autograd") public static native @Cast("bool") boolean any_variable_requires_grad(@Cast({"", "std::vector"}) @StdMove TensorVector variables); -/** Return the next edges of all the given variables, or tuples of variables. */ - // namespace autograd - // namespace torch +// Parsed from torch/csrc/jit/ir/graph_node_list.h +// #pragma once +// #include -// Parsed from torch/csrc/autograd/custom_function.h +// Intrusive doubly linked lists with sane reverse iterators. +// The header file is named generic_graph_node_list.h because it is ONLY +// used for Graph's Node lists, and if you want to use it for other +// things, you will have to do some refactoring. +// +// At the moment, the templated type T must support a few operations: +// +// - It must have a field: T* next_in_graph[2] = { nullptr, nullptr }; +// which are used for the intrusive linked list pointers. +// +// - It must have a method 'destroy()', which removes T from the +// list and frees a T. +// +// In practice, we are only using it with Node and const Node. 'destroy()' +// needs to be renegotiated if you want to use this somewhere else. +// +// Regardless of the iteration direction, iterators always physically point +// to the element they logically point to, rather than +// the off-by-one behavior for all standard library reverse iterators like +// std::list. -// #pragma once +// The list is includes two sentinel nodes, one at the beginning and one at the +// end with a circular link between them. It is an error to insert nodes after +// the end sentinel node but before the beginning node: -// #include -// #include -// #include -// #include -// #include -// #include +// Visualization showing only the next() links: +// HEAD -> first -> second -> ... -> last -> TAIL +// ^------------------------------------------ -@Namespace("torch::autograd") public static native @ByVal TensorOptionalVector _wrap_outputs( - @Cast({"", "std::vector"}) @StdMove TensorVector input_vars, - @Const @ByRef TensorImplSet non_differentiable, - @Const @ByRef TensorImplSet dirty_inputs, - @Const @ByVal TensorOptionalArrayRef raw_outputs, - @SharedPtr Node cdata, - @ByVal @Cast("torch::autograd::_jvp_fn_t*") Pointer jvp_user_function); +// Visualization showing only the prev() links: +// HEAD <- first <- second <- ... <- last <- TAIL +// ------------------------------------------^ -@Namespace("torch::autograd") public static native void check_variable_result( - @Const @ByRef TensorBase original, - @Const @ByRef TensorBase result, - @StdString BytePointer hook_name); -@Namespace("torch::autograd") public static native void check_variable_result( - @Const @ByRef TensorBase original, - @Const @ByRef TensorBase result, - @StdString String hook_name); +@Namespace("torch::jit") @MemberGetter public static native int kNextDirection(); +public static final int kNextDirection = kNextDirection(); +@Namespace("torch::jit") @MemberGetter public static native int kPrevDirection(); +public static final int kPrevDirection = kPrevDirection(); +// Targeting ../graph_node_list_iterator.java -// Get the return type of the forward function of the custom Function class X -/// -/// -/// -/// -/// +// Targeting ../graph_node_list.java -/** To use custom autograd operations, implement a Function subclass with - * static forward and backward functions: - * - * {@code forward} can take as many arguments as you want and should return either a - * variable list or a Variable. Use of any direct Variable arguments will be - * registered in the graph but no vectors/sets or any other data structures - * will be traversed. You can use c10::optional as one of the arguments - * and it will be registered as a variable in the graph if the argument has a - * value. It should take a pointer to {@code torch::autograd::AutogradContext} as the - * first argument. Variables can be saved in the {@code ctx} using - * {@code ctx->save_for_backward} - * (see {@code torch::autograd::AutogradContext::save_for_backward}) and other data - * can be saved in the {@code ctx->saved_data} map - * (see {@code torch::autograd::AutogradContext::saved_data}) - * in the form of {@code } pairs. - * - * {@code backward} should take a pointer to {@code torch::autograd::AutogradContext} - * and a variable list containing as many Variables as there were outputs from - * {@code forward} as arguments. It should return as many Variables as there were - * inputs with each of them containing the gradient w.r.t. its corresponding - * input. Variables saved in {@code forward} can be accessed with - * {@code ctx->get_saved_variables} (see - * {@code torch::autograd::AutogradContext::get_saved_variables}) and other saved - * data can be accessed from {@code ctx->saved_data}. - * - * For example: - *

{@code
- *  class MyFunction : public Function {
- *    public:
- *    static variable_list forward(AutogradContext *ctx, int n, Variable var) {
- *       // Save data for backward in context
- *       ctx->saved_data["n"] = n;
- *       var.mul_(2);
- *       // Mark var as modified by inplace operation
- *       ctx->mark_dirty({var});
- *       return {var};
- *    }
- * 
- *    static variable_list backward(AutogradContext *ctx, variable_list
- *    grad_output) {
- *       // Use data saved in forward
- *       auto n = ctx->saved_data["n"].toInt();
- *       return {grad_output[0]*n};
- *    }
- *  };
- *  }
- * - * To use {@code MyFunction}: - *
{@code
- *  Variable x;
- *  auto y = MyFunction::apply(6, x);
- *  // Example backward call
- *  y[0].sum().backward();
- *  }
*/ -// Targeting ../AutogradContext.java -// Targeting ../VariableInfo.java + // namespace jit + // namespace torch + + // namespace std +// Parsed from torch/csrc/jit/frontend/source_range.h -// CppNode is the Node in the autograd graph that represents the user defined -// backward function for Function. Calls to CppNode::apply are forward to -// T::backward(). +// #pragma once +// #include +// #include -@Namespace("torch::autograd") public static native @ByVal TensorOptionalVector to_optional(@Cast("torch::autograd::Variable*") @ByRef Tensor output); +// #include +// #include +// #include +// #include +// #include +// #include +// Targeting ../SourceRangeUnpickler.java -@Namespace("torch::autograd") public static native @ByVal TensorOptionalVector to_optional(@ByRef TensorVector output); +// Targeting ../StringCordView.java -// The logic here is the same as PyNode::apply, so changes to it should be done -// in both the places +// Targeting ../Source.java + + +// Targeting ../SourceRange.java +// OwnedSourceRange is just like a SourceRange except that it owns a `Source` +// instead of `Source`. Thus OwnedSourceRange owns a copy of source text. +// Targeting ../SourceRangeHasher.java +// Targeting ../StackEntry.java - // namespace autograd +@Namespace("torch::jit") public static native void format_stack_trace( + @Cast("std::ostream*") @ByRef Pointer out, + @Const @ByRef StackEntryVector entries); + +@Namespace("torch::jit") public static native @Cast("std::ostream*") @ByRef @Name("operator <<") Pointer shiftLeft(@Cast("std::ostream*") @ByRef Pointer out, @Const @ByRef SourceRange range); + +// A pair of (byte offset, SourceRange) describing a specific segment +// of the output stream + + // namespace jit // namespace torch + // namespace std -// Parsed from torch/csrc/autograd/autograd.h +// Parsed from torch/csrc/jit/ir/scope.h // #pragma once +// #include +// #include +// #include +// #include +// #include +// #include +// #include +@Namespace("torch::jit") @MemberGetter public static native @Cast("const size_t") long kModuleInstanceInfo(); -// #include + // namespace utils -/** Computes the sum of gradients of given tensors with respect to graph leaves. - * - * The graph is differentiated using the chain rule. If any of {@code }tensors{@code } - * are non-scalar (i.e. their data has more than one element) and require - * gradient, then the Jacobian-vector product would be computed, in this case - * the function additionally requires specifying {@code grad_tensors}. It should be a - * sequence of matching length, that contains the "vector" in the - * Jacobian-vector product, usually the gradient of the differentiated function - * w.r.t. corresponding tensors - * ({@code torch::Tensor()} is an acceptable value for all tensors that don't need - * gradient tensors). - * - * This function accumulates gradients in the leaves - you might need to zero - * them before calling it. - * - * @param tensors Tensors of which the derivative will be computed. - * @param grad_tensors The "vector" in the Jacobian-vector product, usually - * gradients - * w.r.t. each element of corresponding tensors. {@code torch::Tensor()} values - * can be specified for scalar Tensors or ones that don't require grad. If - * a {@code torch::Tensor()} value would be acceptable for all grad_tensors, then - * this argument is optional. - * @param retain_graph If {@code false}, the graph used to compute the grad will be - * freed. - * Note that in nearly all cases setting this option to {@code true} is not - * needed and often can be worked around in a much more efficient way. - * Defaults to the value of {@code create_graph}. - * @param create_graph If {@code true}, graph of the derivative will be constructed, - * allowing - * to compute higher order derivative products. Defaults to {@code false}. - * @param inputs Inputs w.r.t. which the gradient will be accumulated into - * {@code at::Tensor::grad}. All other Tensors will be ignored. If not provided, - * the gradient is accumulated into all the leaf Tensors that were used to - * compute param {@code tensors}. */ -// When inputs are provided and a given input is not a leaf, -// the current implementation will call its grad_fn (even though it is not -// strictly needed to get this gradients). It is an implementation detail -// on which the user should not rely. See -// https://github.com/pytorch/pytorch/pull/60521#issuecomment-867061780 for -// more details. +// Scope is a node of a trie that represents the tree of nested scopes. +// Individual scopes are pushed and popped from Graph, which holds a +// pointer to the current scope. Each Node in Graph holds a pointer +// to the scope that was current when the node was created. +// The trie never needs to shrink, it only grows until it is disposed +// of when Graph is deallocated. Hence, pointers to scopes held by nodes +// will always be valid as long as Graph is alive. +// Targeting ../Scope.java -/// -/// -@Namespace("torch::autograd") public static native void backward( - @Cast({"", "std::vector"}) @StdMove TensorVector tensors, - @Cast({"", "std::vector"}) @StdMove TensorVector grad_tensors/*={}*/, - @ByVal(nullValue = "c10::optional(c10::nullopt)") BoolOptional retain_graph, - @Cast("bool") boolean create_graph/*=false*/, - @Cast({"", "std::vector"}) @StdMove TensorVector inputs/*={}*/); -@Namespace("torch::autograd") public static native void backward( - @Cast({"", "std::vector"}) @StdMove TensorVector tensors); -/** Computes and returns the sum of gradients of outputs with respect to the - * inputs. - * - * {@code }grad_outputs{@code } should be a sequence of length matching {@code }output{@code } - * containing the "vector" in Jacobian-vector product, usually the pre-computed - * gradients w.r.t. each of the outputs. If an output doesn't require_grad, - * then the gradient can be {@code }torch::Tensor(){@code }). - * - * @param outputs outputs of the differentiated function. - * @param inputs Inputs w.r.t. which the gradient will be - * returned (and not accumulated into {@code }at::Tensor::grad{@code }). - * @param grad_outputs The "vector" in the Jacobian-vector product. - * Usually gradients w.r.t. each output. {@code torch::Tensor()} values can be - * specified for scalar Tensors or ones that don't require grad. If a - * {@code torch::Tensor()} value would be acceptable for all grad_tensors, then - * this argument is optional. Default: {@code {}}. - * @param retain_graph If {@code }false{@code }, the graph used to compute the grad - * will be freed. Note that in nearly all cases setting this option to - * {@code }true{@code } is not needed and often can be worked around in a much more - * efficient way. Defaults to the value of {@code }create_graph{@code }. - * @param create_graph If {@code }true{@code }, graph of the derivative will - * be constructed, allowing to compute higher order derivative products. - * Default: {@code }false{@code }. - * @param allow_unused If {@code }false{@code }, specifying inputs that were not - * used when computing outputs (and therefore their grad is always zero) - * is an error. Defaults to {@code }false{@code }. */ -@Namespace("torch::autograd") public static native @Cast({"", "std::vector"}) @StdMove TensorVector grad( - @Cast({"", "std::vector"}) @StdMove TensorVector outputs, - @Cast({"", "std::vector"}) @StdMove TensorVector inputs, - @Cast({"", "std::vector"}) @StdMove TensorVector grad_outputs/*={}*/, - @ByVal(nullValue = "c10::optional(c10::nullopt)") BoolOptional retain_graph, - @Cast("bool") boolean create_graph/*=false*/, - @Cast("bool") boolean allow_unused/*=false*/); -@Namespace("torch::autograd") public static native @Cast({"", "std::vector"}) @StdMove TensorVector grad( - @Cast({"", "std::vector"}) @StdMove TensorVector outputs, - @Cast({"", "std::vector"}) @StdMove TensorVector inputs); +// Targeting ../ModuleInstanceInfo.java -/** Creates a new dual level and returns its index. This level index should then - * be used to call into the other functions below. This API supports entering a - * new level before the previous one is exited. We call them nested forward AD - * levels. These can be used to compute higher order derivatives. */ -@Namespace("torch::autograd::forward_ad") public static native @Cast("uint64_t") long enter_dual_level(); -/** Exits the given level. This will clear up all the gradients from this level - * and all dual Tensors that had gradients for this level will become regular - * Tensors again. This function can only be used to exit the innermost nesting - * level and so exiting must happen in reverse order compared to the entering - * that was done with the function above. */ -@Namespace("torch::autograd::forward_ad") public static native void exit_dual_level(@Cast("uint64_t") long level); - // namespace forward_ad - // namespace autograd +/** + * InlinedCallStack is an element in a list representing callstack of functions + * that have been inlined. + * + * Each such element holds info about the current callsite (Function and + * SourceRange) and a pointer to the next element in the list. The last element + * in the list represents the innermost function that was inlined. + * + * For instance, if a node has a callstack + * [foo, source_range1] -> [bar, source_range2] + * it means that this node was originally from function 'bar' that was called + * at 'source_range2' in function 'foo' that was called in the current function + * at 'source_range1'. + * + * If a node did not come from any inlined function, its callstack will be + * empty. + * + * The callstack lists only grow, we never remove elements from them, which + * allows us to reuse same elements in different lists. For instance, if we + * inline function 'bar' to 'foo' and then inline 'foo' to two functions 'ham' + * and 'baz', the callstacks would look like: + * + * [baz, source_range3] -- + * \ + * --> [foo, source_range1] -> [bar, source_range2] + * / + * [ham, source_range4] -- + */ +// Targeting ../InlinedCallStack.java + + + +// {source range, node name, InlinedCallStack} +// We store node name because same debug infor will be used for +// profiling as well, so we need to know op names as well. +@Namespace("torch::jit") @MemberGetter public static native @Cast("const size_t") long kDebugInfoTupleSourceRangeIndex(); +@Namespace("torch::jit") @MemberGetter public static native @Cast("const size_t") long kDebugInfoTupleNodeNameIndex(); +@Namespace("torch::jit") @MemberGetter public static native @Cast("const size_t") long kDebugInfoTupleInlinedCSIndex(); + // namespace jit // namespace torch -// Parsed from torch/csrc/autograd/generated/VariableType.h +// Parsed from torch/csrc/jit/ir/constants.h + +// #pragma once +// #include +// #include +// #include +// #include +// #include + +// helpers for handling constants in the IR +// - create constant nodes from ints, floats, complex, intlist, Tensors, and +// other types +// - implement primitive constant ops. + +// thrown when insertConstant cannot encode the IValue into a graph + +@Namespace("torch::jit") public static native Value insertConstant( + @ByRef Graph g, + @Const @ByRef IValue val, + @ByVal(nullValue = "c10::optional(c10::nullopt)") SourceRangeOptional loc, + @ByVal(nullValue = "c10::optional(c10::nullopt)") @Cast("c10::optional*") ScopeOptional scope); +@Namespace("torch::jit") public static native Value insertConstant( + @ByRef Graph g, + @Const @ByRef IValue val); + +// note: prefer g.insertConsant(val, loc) which does exactly the same thing +// this function is only declared/defined here because its implementation is +// closely related to the implementation of prim::Constant that is also in +// constants.cpp. +// +// returns a c10::nullopt if the IValue kind cannot be inserted as a constant +@Namespace("torch::jit") public static native @ByVal ValueOptional tryInsertConstant( + @ByRef Graph g, + @Const @ByRef IValue val, + @ByVal(nullValue = "c10::optional(c10::nullopt)") SourceRangeOptional loc, + @ByVal(nullValue = "c10::optional(c10::nullopt)") @Cast("c10::optional*") ScopeOptional scope); +@Namespace("torch::jit") public static native @ByVal ValueOptional tryInsertConstant( + @ByRef Graph g, + @Const @ByRef IValue val); + +//////////////////////////////////////////////////////////////////////////////// +// Helper for retrieving constants +//////////////////////////////////////////////////////////////////////////////// + +// attempt to convert a (possibly constant) Value* into an interpreter value +// (IValue). returns c10::nullopt if the Value* was not constant +@Namespace("torch::jit") public static native @ByVal IValueOptional toIValue(@Const Value v); -// #pragma once +// if a value is a constant then try to turn into type T using the +// same rules as the interpreter + // namespace jit + // namespace torch -// @generated from ../tools/autograd/templates/VariableType.h -// #include -// #include +// Parsed from torch/csrc/jit/ir/named_value.h -// #include +// #pragma once +// #include +// #include +// #include +// #include +// Targeting ../NamedValue.java -// #include -// #include -// #include // for size_t -// #include // for function -// #include // for unique_ptr -// #include -// #include -// Targeting ../Quantizer.java + // namespace jit + // namespace torch -// This is temporary typedef to enable Quantizer in aten native function API -// we'll remove them when we are actually exposing Quantizer class -// to frontend - @Namespace("torch::autograd::VariableType") public static native @Cast("at::DeprecatedTypeProperties**") @StdVector PointerPointer allCUDATypes(); - @Namespace("torch::autograd::VariableType") public static native @Cast("at::DeprecatedTypeProperties**") @StdVector PointerPointer allCPUTypes(); +// Parsed from torch/csrc/jit/runtime/operator_options.h - - - - +// #pragma once +// #include - // namespace torch::autograd + // namespace jit + // namespace torch -// Parsed from torch/csrc/autograd/generated/variable_factories.h +// Parsed from torch/csrc/jit/runtime/operator.h +// in memory description of all ATen Ops similar to Caffe2 schema +// once C10 exists this can be removed, or stubbed out, but we need +// it now to implement correct semantic checking for script // #pragma once -// @generated from ../tools/autograd/templates/variable_factories.h - -// #include -// #include -// #include -// #include -// #include -// #include -// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include -// #ifndef AT_PER_OPERATOR_HEADERS -// #include -// #else -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #endif +// #include +// #include // #include // #include +// #include +// #include +// #include // #include +// #include +// Targeting ../OperationCreator.java -/** NOTE: Currently {@code torch::tensor(...)} doesn't support mixed data types - * (i.e. {@code torch::tensor({{bool, 2.0}})} doesn't work). We might be able to - * support it in the future by iterating over all sub-lists to find - * the largest data type that can represent all of the elements, or by using - * variadic templates. - * - * NOTE: C++ {@code torch::tensor} with a floating-point type or an {@code at::ArrayRef} / {@code std::vector} / - * (nested) braced-init-list of floating-point types always produces a tensor of dtype - * {@code torch::get_default_dtype()}, matching Python {@code torch.tensor} behavior. - * - * NOTE: C++ {@code torch::tensor} with an integer type or an {@code at::ArrayRef} / {@code std::vector} / - * (nested) braced-init-list of integer types always produces a tensor of dtype {@code at::kLong} - * (aka. int64_t), matching Python {@code torch.tensor} behavior. - * - * NOTE: The following dtypes are not supported by {@code torch::tensor} currently: - * - {@code unsigned int} - * - {@code unsigned long int} - * - {@code unsigned long long int} - * - {@code long long int} */ -@Namespace("torch") public static native @ByVal Tensor tensor(@ByVal @Cast("torch::detail::TensorDataContainer*") Pointer tensor_data_container, @Const @ByRef(nullValue = "at::TensorOptions{}") TensorOptions options); -@Namespace("torch") public static native @ByVal Tensor tensor(@ByVal @Cast("torch::detail::TensorDataContainer*") Pointer tensor_data_container); - -/** A generic deleter function. */ -/** Exposes the given {@code data} as a {@code Tensor} without taking ownership of the - * original data. {@code sizes} should specify the shape of the tensor, {@code strides} the - * stride in each dimension. The {@code deleter} function (a - * {@code std::function}) will be called on the {@code data} when the Tensor - * data would normally be deallocated. The {@code TensorOptions} specify additional - * configuration options for the returned tensor, such as what type to - * interpret the {@code data} as. */ -@Namespace("torch") public static native @ByVal Tensor from_blob( - Pointer data, - @ByVal @Cast("c10::ArrayRef*") LongArrayRef sizes, - @ByVal @Cast("c10::ArrayRef*") LongArrayRef strides, - @Const @ByRef Deleter deleter, - @Const @ByRef(nullValue = "at::TensorOptions()") TensorOptions options); -@Namespace("torch") public static native @ByVal Tensor from_blob( - Pointer data, - @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] sizes, - @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] strides, - @ByRef @Cast("void(*)(void*)") Pointer deleter, - @Const @ByRef(nullValue = "at::TensorOptions()") TensorOptions options); -@Namespace("torch") public static native @ByVal Tensor from_blob( - Pointer data, - @ByVal @Cast("c10::ArrayRef*") LongArrayRef sizes, - @ByVal @Cast("c10::ArrayRef*") LongArrayRef strides, - @ByRef @Cast("void(*)(void*)") long deleter, - @Const @ByRef(nullValue = "at::TensorOptions()") TensorOptions options); -@Namespace("torch") public static native @ByVal Tensor from_blob( - Pointer data, - @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] sizes, - @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] strides, - @Const @ByRef Deleter deleter, - @Const @ByRef(nullValue = "at::TensorOptions()") TensorOptions options); -@Namespace("torch") public static native @ByVal Tensor from_blob( - Pointer data, - @ByVal @Cast("c10::ArrayRef*") LongArrayRef sizes, - @ByVal @Cast("c10::ArrayRef*") LongArrayRef strides, - @ByRef @Cast("void(*)(void*)") Pointer deleter, - @Const @ByRef(nullValue = "at::TensorOptions()") TensorOptions options); -@Namespace("torch") public static native @ByVal Tensor from_blob( - Pointer data, - @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] sizes, - @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] strides, - @ByRef @Cast("void(*)(void*)") long deleter, - @Const @ByRef(nullValue = "at::TensorOptions()") TensorOptions options); +// Targeting ../Operator.java -/** Exposes the given {@code data} as a {@code Tensor} without taking ownership of the - * original data. {@code sizes} should specify the shape of the tensor, {@code strides} the - * stride in each dimension. The {@code TensorOptions} - * specify additional configuration options for the returned tensor, such as - * what type to interpret the {@code data} as. */ -/** Exposes the given {@code data} as a {@code Tensor} without taking ownership of the - * original data. {@code sizes} should specify the shape of the tensor. The {@code deleter} - * (a {@code std::function}) function will be called on the {@code data} when - * the Tensor data would normally be deallocated. The {@code TensorOptions} specify - * additional configuration options for the returned tensor, such as what type - * to interpret the {@code data} as. */ -/** Exposes the given {@code data} as a {@code Tensor} without taking ownership of the - * original data. {@code sizes} should specify the shape of the tensor. The - * {@code TensorOptions} specify additional configuration options for the returned - * tensor, such as what type to interpret the {@code data} as. */ +@Namespace("torch::jit") public static native @StdString BytePointer canonicalSchemaString(@Const @ByRef FunctionSchema schema); -@Namespace("torch") public static native @ByVal @Name("_cudnn_init_dropout_state") Tensor torch__cudnn_init_dropout_state(double dropout, @Cast("bool") boolean train, @Cast("int64_t") long dropout_seed, @ByVal TensorOptions options); -@Namespace("torch") public static native @ByVal @Name("arange") Tensor torch_arange(@Const @ByRef Scalar end, @ByVal(nullValue = "at::TensorOptions{}") TensorOptions options); -@Namespace("torch") public static native @ByVal @Name("arange") Tensor torch_arange(@Const @ByRef Scalar end); -@Namespace("torch") public static native @ByVal @Name("arange") Tensor torch_arange(@Const @ByRef Scalar start, @Const @ByRef Scalar end, @ByVal(nullValue = "at::TensorOptions{}") TensorOptions options); -@Namespace("torch") public static native @ByVal @Name("arange") Tensor torch_arange(@Const @ByRef Scalar start, @Const @ByRef Scalar end); -@Namespace("torch") public static native @ByVal @Name("arange") Tensor torch_arange(@Const @ByRef Scalar start, @Const @ByRef Scalar end, @Const @ByRef Scalar step, @ByVal(nullValue = "at::TensorOptions{}") TensorOptions options); -@Namespace("torch") public static native @ByVal @Name("arange") Tensor torch_arange(@Const @ByRef Scalar start, @Const @ByRef Scalar end, @Const @ByRef Scalar step); -@Namespace("torch") public static native @ByVal @Name("bartlett_window") Tensor torch_bartlett_window(@Cast("int64_t") long window_length, @ByVal(nullValue = "at::TensorOptions{}") TensorOptions options); -@Namespace("torch") public static native @ByVal @Name("bartlett_window") Tensor torch_bartlett_window(@Cast("int64_t") long window_length); -@Namespace("torch") public static native @ByVal @Name("bartlett_window") Tensor torch_bartlett_window(@Cast("int64_t") long window_length, @Cast("bool") boolean periodic, @ByVal(nullValue = "at::TensorOptions{}") TensorOptions options); -@Namespace("torch") public static native @ByVal @Name("bartlett_window") Tensor torch_bartlett_window(@Cast("int64_t") long window_length, @Cast("bool") boolean periodic); -@Namespace("torch") public static native @ByVal @Name("blackman_window") Tensor torch_blackman_window(@Cast("int64_t") long window_length, @ByVal(nullValue = "at::TensorOptions{}") TensorOptions options); -@Namespace("torch") public static native @ByVal @Name("blackman_window") Tensor torch_blackman_window(@Cast("int64_t") long window_length); -@Namespace("torch") public static native @ByVal @Name("blackman_window") Tensor torch_blackman_window(@Cast("int64_t") long window_length, @Cast("bool") boolean periodic, @ByVal(nullValue = "at::TensorOptions{}") TensorOptions options); -@Namespace("torch") public static native @ByVal @Name("blackman_window") Tensor torch_blackman_window(@Cast("int64_t") long window_length, @Cast("bool") boolean periodic); -@Namespace("torch") public static native @ByVal @Name("empty") Tensor torch_empty(@ByVal @Cast("c10::ArrayRef*") LongArrayRef size, @ByVal DimnameListOptional names, @ByVal(nullValue = "at::TensorOptions{}") TensorOptions options, @ByVal(nullValue = "c10::optional(c10::nullopt)") MemoryFormatOptional memory_format); -@Namespace("torch") public static native @ByVal @Name("empty") Tensor torch_empty(@ByVal @Cast("c10::ArrayRef*") LongArrayRef size, @ByVal DimnameListOptional names); -@Namespace("torch") public static native @ByVal @Name("empty") Tensor torch_empty(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @ByVal DimnameListOptional names, @ByVal(nullValue = "at::TensorOptions{}") TensorOptions options, @ByVal(nullValue = "c10::optional(c10::nullopt)") MemoryFormatOptional memory_format); -@Namespace("torch") public static native @ByVal @Name("empty") Tensor torch_empty(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @ByVal DimnameListOptional names); -@Namespace("torch") public static native @ByVal @Name("empty") Tensor torch_empty(@ByVal @Cast("c10::ArrayRef*") LongArrayRef size, @ByVal(nullValue = "at::TensorOptions{}") TensorOptions options, @ByVal(nullValue = "c10::optional(c10::nullopt)") MemoryFormatOptional memory_format); -@Namespace("torch") public static native @ByVal @Name("empty") Tensor torch_empty(@ByVal @Cast("c10::ArrayRef*") LongArrayRef size); -@Namespace("torch") public static native @ByVal @Name("empty") Tensor torch_empty(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @ByVal(nullValue = "at::TensorOptions{}") TensorOptions options, @ByVal(nullValue = "c10::optional(c10::nullopt)") MemoryFormatOptional memory_format); -@Namespace("torch") public static native @ByVal @Name("empty") Tensor torch_empty(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... size); -@Namespace("torch") public static native @ByVal @Name("_empty_affine_quantized") Tensor torch__empty_affine_quantized(@ByVal @Cast("c10::ArrayRef*") LongArrayRef size, @ByVal(nullValue = "at::TensorOptions{}") TensorOptions options, double scale/*=1*/, @Cast("int64_t") long zero_point/*=0*/, @ByVal(nullValue = "c10::optional(c10::MemoryFormat::Contiguous)") MemoryFormatOptional memory_format); -@Namespace("torch") public static native @ByVal @Name("_empty_affine_quantized") Tensor torch__empty_affine_quantized(@ByVal @Cast("c10::ArrayRef*") LongArrayRef size); -@Namespace("torch") public static native @ByVal @Name("_empty_affine_quantized") Tensor torch__empty_affine_quantized(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @ByVal(nullValue = "at::TensorOptions{}") TensorOptions options, double scale/*=1*/, @Cast("int64_t") long zero_point/*=0*/, @ByVal(nullValue = "c10::optional(c10::MemoryFormat::Contiguous)") MemoryFormatOptional memory_format); -@Namespace("torch") public static native @ByVal @Name("_empty_affine_quantized") Tensor torch__empty_affine_quantized(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... size); -@Namespace("torch") public static native @ByVal @Name("_empty_per_channel_affine_quantized") Tensor torch__empty_per_channel_affine_quantized(@ByVal @Cast("c10::ArrayRef*") LongArrayRef size, @Const @ByRef Tensor scales, @Const @ByRef Tensor zero_points, @Cast("int64_t") long axis, @ByVal(nullValue = "at::TensorOptions{}") TensorOptions options, @ByVal(nullValue = "c10::optional(c10::MemoryFormat::Contiguous)") MemoryFormatOptional memory_format); -@Namespace("torch") public static native @ByVal @Name("_empty_per_channel_affine_quantized") Tensor torch__empty_per_channel_affine_quantized(@ByVal @Cast("c10::ArrayRef*") LongArrayRef size, @Const @ByRef Tensor scales, @Const @ByRef Tensor zero_points, @Cast("int64_t") long axis); -@Namespace("torch") public static native @ByVal @Name("_empty_per_channel_affine_quantized") Tensor torch__empty_per_channel_affine_quantized(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @Const @ByRef Tensor scales, @Const @ByRef Tensor zero_points, @Cast("int64_t") long axis, @ByVal(nullValue = "at::TensorOptions{}") TensorOptions options, @ByVal(nullValue = "c10::optional(c10::MemoryFormat::Contiguous)") MemoryFormatOptional memory_format); -@Namespace("torch") public static native @ByVal @Name("_empty_per_channel_affine_quantized") Tensor torch__empty_per_channel_affine_quantized(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @Const @ByRef Tensor scales, @Const @ByRef Tensor zero_points, @Cast("int64_t") long axis); -@Namespace("torch") public static native @ByVal @Name("empty_quantized") Tensor torch_empty_quantized(@ByVal @Cast("c10::ArrayRef*") LongArrayRef size, @Const @ByRef Tensor qtensor, @ByVal(nullValue = "at::TensorOptions{}") TensorOptions options, @ByVal(nullValue = "c10::optional(c10::nullopt)") MemoryFormatOptional memory_format); -@Namespace("torch") public static native @ByVal @Name("empty_quantized") Tensor torch_empty_quantized(@ByVal @Cast("c10::ArrayRef*") LongArrayRef size, @Const @ByRef Tensor qtensor); -@Namespace("torch") public static native @ByVal @Name("empty_quantized") Tensor torch_empty_quantized(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @Const @ByRef Tensor qtensor, @ByVal(nullValue = "at::TensorOptions{}") TensorOptions options, @ByVal(nullValue = "c10::optional(c10::nullopt)") MemoryFormatOptional memory_format); -@Namespace("torch") public static native @ByVal @Name("empty_quantized") Tensor torch_empty_quantized(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @Const @ByRef Tensor qtensor); -@Namespace("torch") public static native @ByVal @Name("empty_like") Tensor torch_empty_like(@Const @ByRef Tensor self, @ByVal(nullValue = "at::TensorOptions{}") TensorOptions options, @ByVal(nullValue = "c10::optional(c10::nullopt)") MemoryFormatOptional memory_format); -@Namespace("torch") public static native @ByVal @Name("empty_like") Tensor torch_empty_like(@Const @ByRef Tensor self); -@Namespace("torch") public static native @ByVal @Name("empty_strided") Tensor torch_empty_strided(@ByVal @Cast("c10::ArrayRef*") LongArrayRef size, @ByVal @Cast("c10::ArrayRef*") LongArrayRef stride, @ByVal(nullValue = "at::TensorOptions{}") TensorOptions options); -@Namespace("torch") public static native @ByVal @Name("empty_strided") Tensor torch_empty_strided(@ByVal @Cast("c10::ArrayRef*") LongArrayRef size, @ByVal @Cast("c10::ArrayRef*") LongArrayRef stride); -@Namespace("torch") public static native @ByVal @Name("empty_strided") Tensor torch_empty_strided(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] stride, @ByVal(nullValue = "at::TensorOptions{}") TensorOptions options); -@Namespace("torch") public static native @ByVal @Name("empty_strided") Tensor torch_empty_strided(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... stride); -@Namespace("torch") public static native @ByVal @Name("eye") Tensor torch_eye(@Cast("int64_t") long n, @ByVal(nullValue = "at::TensorOptions{}") TensorOptions options); -@Namespace("torch") public static native @ByVal @Name("eye") Tensor torch_eye(@Cast("int64_t") long n); -@Namespace("torch") public static native @ByVal @Name("eye") Tensor torch_eye(@Cast("int64_t") long n, @Cast("int64_t") long m, @ByVal(nullValue = "at::TensorOptions{}") TensorOptions options); -@Namespace("torch") public static native @ByVal @Name("eye") Tensor torch_eye(@Cast("int64_t") long n, @Cast("int64_t") long m); -@Namespace("torch") public static native @ByVal @Name("full") Tensor torch_full(@ByVal @Cast("c10::ArrayRef*") LongArrayRef size, @Const @ByRef Scalar fill_value, @ByVal DimnameListOptional names, @ByVal(nullValue = "at::TensorOptions{}") TensorOptions options); -@Namespace("torch") public static native @ByVal @Name("full") Tensor torch_full(@ByVal @Cast("c10::ArrayRef*") LongArrayRef size, @Const @ByRef Scalar fill_value, @ByVal DimnameListOptional names); -@Namespace("torch") public static native @ByVal @Name("full") Tensor torch_full(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @Const @ByRef Scalar fill_value, @ByVal DimnameListOptional names, @ByVal(nullValue = "at::TensorOptions{}") TensorOptions options); -@Namespace("torch") public static native @ByVal @Name("full") Tensor torch_full(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @Const @ByRef Scalar fill_value, @ByVal DimnameListOptional names); -@Namespace("torch") public static native @ByVal @Name("full") Tensor torch_full(@ByVal @Cast("c10::ArrayRef*") LongArrayRef size, @Const @ByRef Scalar fill_value, @ByVal(nullValue = "at::TensorOptions{}") TensorOptions options); -@Namespace("torch") public static native @ByVal @Name("full") Tensor torch_full(@ByVal @Cast("c10::ArrayRef*") LongArrayRef size, @Const @ByRef Scalar fill_value); -@Namespace("torch") public static native @ByVal @Name("full") Tensor torch_full(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @Const @ByRef Scalar fill_value, @ByVal(nullValue = "at::TensorOptions{}") TensorOptions options); -@Namespace("torch") public static native @ByVal @Name("full") Tensor torch_full(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @Const @ByRef Scalar fill_value); -@Namespace("torch") public static native @ByVal @Name("full_like") Tensor torch_full_like(@Const @ByRef Tensor self, @Const @ByRef Scalar fill_value, @ByVal(nullValue = "at::TensorOptions{}") TensorOptions options, @ByVal(nullValue = "c10::optional(c10::nullopt)") MemoryFormatOptional memory_format); -@Namespace("torch") public static native @ByVal @Name("full_like") Tensor torch_full_like(@Const @ByRef Tensor self, @Const @ByRef Scalar fill_value); -@Namespace("torch") public static native @ByVal @Name("from_file") Tensor torch_from_file(@ByVal @Cast("c10::string_view*") Pointer filename, @ByVal(nullValue = "c10::optional(c10::nullopt)") BoolOptional shared, @ByVal(nullValue = "c10::optional(0)") LongOptional size, @ByVal(nullValue = "at::TensorOptions{}") TensorOptions options); -@Namespace("torch") public static native @ByVal @Name("from_file") Tensor torch_from_file(@ByVal @Cast("c10::string_view*") Pointer filename); -@Namespace("torch") public static native @ByVal @Name("hann_window") Tensor torch_hann_window(@Cast("int64_t") long window_length, @ByVal(nullValue = "at::TensorOptions{}") TensorOptions options); -@Namespace("torch") public static native @ByVal @Name("hann_window") Tensor torch_hann_window(@Cast("int64_t") long window_length); -@Namespace("torch") public static native @ByVal @Name("hann_window") Tensor torch_hann_window(@Cast("int64_t") long window_length, @Cast("bool") boolean periodic, @ByVal(nullValue = "at::TensorOptions{}") TensorOptions options); -@Namespace("torch") public static native @ByVal @Name("hann_window") Tensor torch_hann_window(@Cast("int64_t") long window_length, @Cast("bool") boolean periodic); -@Namespace("torch") public static native @ByVal @Name("hamming_window") Tensor torch_hamming_window(@Cast("int64_t") long window_length, @ByVal(nullValue = "at::TensorOptions{}") TensorOptions options); -@Namespace("torch") public static native @ByVal @Name("hamming_window") Tensor torch_hamming_window(@Cast("int64_t") long window_length); -@Namespace("torch") public static native @ByVal @Name("hamming_window") Tensor torch_hamming_window(@Cast("int64_t") long window_length, @Cast("bool") boolean periodic, @ByVal(nullValue = "at::TensorOptions{}") TensorOptions options); -@Namespace("torch") public static native @ByVal @Name("hamming_window") Tensor torch_hamming_window(@Cast("int64_t") long window_length, @Cast("bool") boolean periodic); -@Namespace("torch") public static native @ByVal @Name("hamming_window") Tensor torch_hamming_window(@Cast("int64_t") long window_length, @Cast("bool") boolean periodic, double alpha, @ByVal(nullValue = "at::TensorOptions{}") TensorOptions options); -@Namespace("torch") public static native @ByVal @Name("hamming_window") Tensor torch_hamming_window(@Cast("int64_t") long window_length, @Cast("bool") boolean periodic, double alpha); -@Namespace("torch") public static native @ByVal @Name("hamming_window") Tensor torch_hamming_window(@Cast("int64_t") long window_length, @Cast("bool") boolean periodic, double alpha, double beta, @ByVal(nullValue = "at::TensorOptions{}") TensorOptions options); -@Namespace("torch") public static native @ByVal @Name("hamming_window") Tensor torch_hamming_window(@Cast("int64_t") long window_length, @Cast("bool") boolean periodic, double alpha, double beta); -@Namespace("torch") public static native @ByVal @Name("kaiser_window") Tensor torch_kaiser_window(@Cast("int64_t") long window_length, @ByVal(nullValue = "at::TensorOptions{}") TensorOptions options); -@Namespace("torch") public static native @ByVal @Name("kaiser_window") Tensor torch_kaiser_window(@Cast("int64_t") long window_length); -@Namespace("torch") public static native @ByVal @Name("kaiser_window") Tensor torch_kaiser_window(@Cast("int64_t") long window_length, @Cast("bool") boolean periodic, @ByVal(nullValue = "at::TensorOptions{}") TensorOptions options); -@Namespace("torch") public static native @ByVal @Name("kaiser_window") Tensor torch_kaiser_window(@Cast("int64_t") long window_length, @Cast("bool") boolean periodic); -@Namespace("torch") public static native @ByVal @Name("kaiser_window") Tensor torch_kaiser_window(@Cast("int64_t") long window_length, @Cast("bool") boolean periodic, double beta, @ByVal(nullValue = "at::TensorOptions{}") TensorOptions options); -@Namespace("torch") public static native @ByVal @Name("kaiser_window") Tensor torch_kaiser_window(@Cast("int64_t") long window_length, @Cast("bool") boolean periodic, double beta); -@Namespace("torch") public static native @ByVal @Name("linspace") Tensor torch_linspace(@Const @ByRef Scalar start, @Const @ByRef Scalar end, @Cast("int64_t") long steps, @ByVal(nullValue = "at::TensorOptions{}") TensorOptions options); -@Namespace("torch") public static native @ByVal @Name("linspace") Tensor torch_linspace(@Const @ByRef Scalar start, @Const @ByRef Scalar end, @Cast("int64_t") long steps); -@Namespace("torch") public static native @ByVal @Name("logspace") Tensor torch_logspace(@Const @ByRef Scalar start, @Const @ByRef Scalar end, @Cast("int64_t") long steps, double base/*=10.0*/, @ByVal(nullValue = "at::TensorOptions{}") TensorOptions options); -@Namespace("torch") public static native @ByVal @Name("logspace") Tensor torch_logspace(@Const @ByRef Scalar start, @Const @ByRef Scalar end, @Cast("int64_t") long steps); -@Namespace("torch") public static native @ByVal @Name("ones") Tensor torch_ones(@ByVal @Cast("c10::ArrayRef*") LongArrayRef size, @ByVal DimnameListOptional names, @ByVal(nullValue = "at::TensorOptions{}") TensorOptions options); -@Namespace("torch") public static native @ByVal @Name("ones") Tensor torch_ones(@ByVal @Cast("c10::ArrayRef*") LongArrayRef size, @ByVal DimnameListOptional names); -@Namespace("torch") public static native @ByVal @Name("ones") Tensor torch_ones(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @ByVal DimnameListOptional names, @ByVal(nullValue = "at::TensorOptions{}") TensorOptions options); -@Namespace("torch") public static native @ByVal @Name("ones") Tensor torch_ones(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @ByVal DimnameListOptional names); -@Namespace("torch") public static native @ByVal @Name("ones") Tensor torch_ones(@ByVal @Cast("c10::ArrayRef*") LongArrayRef size, @ByVal(nullValue = "at::TensorOptions{}") TensorOptions options); -@Namespace("torch") public static native @ByVal @Name("ones") Tensor torch_ones(@ByVal @Cast("c10::ArrayRef*") LongArrayRef size); -@Namespace("torch") public static native @ByVal @Name("ones") Tensor torch_ones(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @ByVal(nullValue = "at::TensorOptions{}") TensorOptions options); -@Namespace("torch") public static native @ByVal @Name("ones") Tensor torch_ones(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... size); -@Namespace("torch") public static native @ByVal @Name("ones_like") Tensor torch_ones_like(@Const @ByRef Tensor self, @ByVal(nullValue = "at::TensorOptions{}") TensorOptions options, @ByVal(nullValue = "c10::optional(c10::nullopt)") MemoryFormatOptional memory_format); -@Namespace("torch") public static native @ByVal @Name("ones_like") Tensor torch_ones_like(@Const @ByRef Tensor self); -@Namespace("torch") public static native @ByVal @Name("scalar_tensor") Tensor torch_scalar_tensor(@Const @ByRef Scalar s, @ByVal(nullValue = "at::TensorOptions{}") TensorOptions options); -@Namespace("torch") public static native @ByVal @Name("scalar_tensor") Tensor torch_scalar_tensor(@Const @ByRef Scalar s); -@Namespace("torch") public static native @ByVal @Name("rand") Tensor torch_rand(@ByVal @Cast("c10::ArrayRef*") LongArrayRef size, @ByVal DimnameListOptional names, @ByVal(nullValue = "at::TensorOptions{}") TensorOptions options); -@Namespace("torch") public static native @ByVal @Name("rand") Tensor torch_rand(@ByVal @Cast("c10::ArrayRef*") LongArrayRef size, @ByVal DimnameListOptional names); -@Namespace("torch") public static native @ByVal @Name("rand") Tensor torch_rand(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @ByVal DimnameListOptional names, @ByVal(nullValue = "at::TensorOptions{}") TensorOptions options); -@Namespace("torch") public static native @ByVal @Name("rand") Tensor torch_rand(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @ByVal DimnameListOptional names); -@Namespace("torch") public static native @ByVal @Name("rand") Tensor torch_rand(@ByVal @Cast("c10::ArrayRef*") LongArrayRef size, @ByVal GeneratorOptional generator, @ByVal DimnameListOptional names, @ByVal(nullValue = "at::TensorOptions{}") TensorOptions options); -@Namespace("torch") public static native @ByVal @Name("rand") Tensor torch_rand(@ByVal @Cast("c10::ArrayRef*") LongArrayRef size, @ByVal GeneratorOptional generator, @ByVal DimnameListOptional names); -@Namespace("torch") public static native @ByVal @Name("rand") Tensor torch_rand(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @ByVal GeneratorOptional generator, @ByVal DimnameListOptional names, @ByVal(nullValue = "at::TensorOptions{}") TensorOptions options); -@Namespace("torch") public static native @ByVal @Name("rand") Tensor torch_rand(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @ByVal GeneratorOptional generator, @ByVal DimnameListOptional names); -@Namespace("torch") public static native @ByVal @Name("rand") Tensor torch_rand(@ByVal @Cast("c10::ArrayRef*") LongArrayRef size, @ByVal(nullValue = "at::TensorOptions{}") TensorOptions options); -@Namespace("torch") public static native @ByVal @Name("rand") Tensor torch_rand(@ByVal @Cast("c10::ArrayRef*") LongArrayRef size); -@Namespace("torch") public static native @ByVal @Name("rand") Tensor torch_rand(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @ByVal(nullValue = "at::TensorOptions{}") TensorOptions options); -@Namespace("torch") public static native @ByVal @Name("rand") Tensor torch_rand(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... size); -@Namespace("torch") public static native @ByVal @Name("rand") Tensor torch_rand(@ByVal @Cast("c10::ArrayRef*") LongArrayRef size, @ByVal GeneratorOptional generator, @ByVal(nullValue = "at::TensorOptions{}") TensorOptions options); -@Namespace("torch") public static native @ByVal @Name("rand") Tensor torch_rand(@ByVal @Cast("c10::ArrayRef*") LongArrayRef size, @ByVal GeneratorOptional generator); -@Namespace("torch") public static native @ByVal @Name("rand") Tensor torch_rand(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @ByVal GeneratorOptional generator, @ByVal(nullValue = "at::TensorOptions{}") TensorOptions options); -@Namespace("torch") public static native @ByVal @Name("rand") Tensor torch_rand(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @ByVal GeneratorOptional generator); -@Namespace("torch") public static native @ByVal @Name("rand_like") Tensor torch_rand_like(@Const @ByRef Tensor self, @ByVal(nullValue = "at::TensorOptions{}") TensorOptions options, @ByVal(nullValue = "c10::optional(c10::nullopt)") MemoryFormatOptional memory_format); -@Namespace("torch") public static native @ByVal @Name("rand_like") Tensor torch_rand_like(@Const @ByRef Tensor self); -@Namespace("torch") public static native @ByVal @Name("randint") Tensor torch_randint(@Cast("int64_t") long high, @ByVal @Cast("c10::ArrayRef*") LongArrayRef size, @ByVal(nullValue = "at::TensorOptions(at::kLong)") TensorOptions options); -@Namespace("torch") public static native @ByVal @Name("randint") Tensor torch_randint(@Cast("int64_t") long high, @ByVal @Cast("c10::ArrayRef*") LongArrayRef size); -@Namespace("torch") public static native @ByVal @Name("randint") Tensor torch_randint(@Cast("int64_t") long high, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @ByVal(nullValue = "at::TensorOptions(at::kLong)") TensorOptions options); -@Namespace("torch") public static native @ByVal @Name("randint") Tensor torch_randint(@Cast("int64_t") long high, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... size); -@Namespace("torch") public static native @ByVal @Name("randint") Tensor torch_randint(@Cast("int64_t") long high, @ByVal @Cast("c10::ArrayRef*") LongArrayRef size, @ByVal GeneratorOptional generator, @ByVal(nullValue = "at::TensorOptions(at::kLong)") TensorOptions options); -@Namespace("torch") public static native @ByVal @Name("randint") Tensor torch_randint(@Cast("int64_t") long high, @ByVal @Cast("c10::ArrayRef*") LongArrayRef size, @ByVal GeneratorOptional generator); -@Namespace("torch") public static native @ByVal @Name("randint") Tensor torch_randint(@Cast("int64_t") long high, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @ByVal GeneratorOptional generator, @ByVal(nullValue = "at::TensorOptions(at::kLong)") TensorOptions options); -@Namespace("torch") public static native @ByVal @Name("randint") Tensor torch_randint(@Cast("int64_t") long high, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @ByVal GeneratorOptional generator); -@Namespace("torch") public static native @ByVal @Name("randint") Tensor torch_randint(@Cast("int64_t") long low, @Cast("int64_t") long high, @ByVal @Cast("c10::ArrayRef*") LongArrayRef size, @ByVal(nullValue = "at::TensorOptions(at::kLong)") TensorOptions options); -@Namespace("torch") public static native @ByVal @Name("randint") Tensor torch_randint(@Cast("int64_t") long low, @Cast("int64_t") long high, @ByVal @Cast("c10::ArrayRef*") LongArrayRef size); -@Namespace("torch") public static native @ByVal @Name("randint") Tensor torch_randint(@Cast("int64_t") long low, @Cast("int64_t") long high, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @ByVal(nullValue = "at::TensorOptions(at::kLong)") TensorOptions options); -@Namespace("torch") public static native @ByVal @Name("randint") Tensor torch_randint(@Cast("int64_t") long low, @Cast("int64_t") long high, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... size); -@Namespace("torch") public static native @ByVal @Name("randint") Tensor torch_randint(@Cast("int64_t") long low, @Cast("int64_t") long high, @ByVal @Cast("c10::ArrayRef*") LongArrayRef size, @ByVal GeneratorOptional generator, @ByVal(nullValue = "at::TensorOptions(at::kLong)") TensorOptions options); -@Namespace("torch") public static native @ByVal @Name("randint") Tensor torch_randint(@Cast("int64_t") long low, @Cast("int64_t") long high, @ByVal @Cast("c10::ArrayRef*") LongArrayRef size, @ByVal GeneratorOptional generator); -@Namespace("torch") public static native @ByVal @Name("randint") Tensor torch_randint(@Cast("int64_t") long low, @Cast("int64_t") long high, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @ByVal GeneratorOptional generator, @ByVal(nullValue = "at::TensorOptions(at::kLong)") TensorOptions options); -@Namespace("torch") public static native @ByVal @Name("randint") Tensor torch_randint(@Cast("int64_t") long low, @Cast("int64_t") long high, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @ByVal GeneratorOptional generator); -@Namespace("torch") public static native @ByVal @Name("randint_like") Tensor torch_randint_like(@Const @ByRef Tensor self, @Cast("int64_t") long high, @ByVal(nullValue = "at::TensorOptions{}") TensorOptions options, @ByVal(nullValue = "c10::optional(c10::nullopt)") MemoryFormatOptional memory_format); -@Namespace("torch") public static native @ByVal @Name("randint_like") Tensor torch_randint_like(@Const @ByRef Tensor self, @Cast("int64_t") long high); -@Namespace("torch") public static native @ByVal @Name("randint_like") Tensor torch_randint_like(@Const @ByRef Tensor self, @Cast("int64_t") long low, @Cast("int64_t") long high, @ByVal(nullValue = "at::TensorOptions{}") TensorOptions options, @ByVal(nullValue = "c10::optional(c10::nullopt)") MemoryFormatOptional memory_format); -@Namespace("torch") public static native @ByVal @Name("randint_like") Tensor torch_randint_like(@Const @ByRef Tensor self, @Cast("int64_t") long low, @Cast("int64_t") long high); -@Namespace("torch") public static native @ByVal @Name("randn") Tensor torch_randn(@ByVal @Cast("c10::ArrayRef*") LongArrayRef size, @ByVal(nullValue = "at::TensorOptions{}") TensorOptions options); -@Namespace("torch") public static native @ByVal @Name("randn") Tensor torch_randn(@ByVal @Cast("c10::ArrayRef*") LongArrayRef size); -@Namespace("torch") public static native @ByVal @Name("randn") Tensor torch_randn(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @ByVal(nullValue = "at::TensorOptions{}") TensorOptions options); -@Namespace("torch") public static native @ByVal @Name("randn") Tensor torch_randn(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... size); -@Namespace("torch") public static native @ByVal @Name("randn") Tensor torch_randn(@ByVal @Cast("c10::ArrayRef*") LongArrayRef size, @ByVal GeneratorOptional generator, @ByVal(nullValue = "at::TensorOptions{}") TensorOptions options); -@Namespace("torch") public static native @ByVal @Name("randn") Tensor torch_randn(@ByVal @Cast("c10::ArrayRef*") LongArrayRef size, @ByVal GeneratorOptional generator); -@Namespace("torch") public static native @ByVal @Name("randn") Tensor torch_randn(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @ByVal GeneratorOptional generator, @ByVal(nullValue = "at::TensorOptions{}") TensorOptions options); -@Namespace("torch") public static native @ByVal @Name("randn") Tensor torch_randn(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @ByVal GeneratorOptional generator); -@Namespace("torch") public static native @ByVal @Name("randn") Tensor torch_randn(@ByVal @Cast("c10::ArrayRef*") LongArrayRef size, @ByVal DimnameListOptional names, @ByVal(nullValue = "at::TensorOptions{}") TensorOptions options); -@Namespace("torch") public static native @ByVal @Name("randn") Tensor torch_randn(@ByVal @Cast("c10::ArrayRef*") LongArrayRef size, @ByVal DimnameListOptional names); -@Namespace("torch") public static native @ByVal @Name("randn") Tensor torch_randn(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @ByVal DimnameListOptional names, @ByVal(nullValue = "at::TensorOptions{}") TensorOptions options); -@Namespace("torch") public static native @ByVal @Name("randn") Tensor torch_randn(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @ByVal DimnameListOptional names); -@Namespace("torch") public static native @ByVal @Name("randn") Tensor torch_randn(@ByVal @Cast("c10::ArrayRef*") LongArrayRef size, @ByVal GeneratorOptional generator, @ByVal DimnameListOptional names, @ByVal(nullValue = "at::TensorOptions{}") TensorOptions options); -@Namespace("torch") public static native @ByVal @Name("randn") Tensor torch_randn(@ByVal @Cast("c10::ArrayRef*") LongArrayRef size, @ByVal GeneratorOptional generator, @ByVal DimnameListOptional names); -@Namespace("torch") public static native @ByVal @Name("randn") Tensor torch_randn(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @ByVal GeneratorOptional generator, @ByVal DimnameListOptional names, @ByVal(nullValue = "at::TensorOptions{}") TensorOptions options); -@Namespace("torch") public static native @ByVal @Name("randn") Tensor torch_randn(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @ByVal GeneratorOptional generator, @ByVal DimnameListOptional names); -@Namespace("torch") public static native @ByVal @Name("randn_like") Tensor torch_randn_like(@Const @ByRef Tensor self, @ByVal(nullValue = "at::TensorOptions{}") TensorOptions options, @ByVal(nullValue = "c10::optional(c10::nullopt)") MemoryFormatOptional memory_format); -@Namespace("torch") public static native @ByVal @Name("randn_like") Tensor torch_randn_like(@Const @ByRef Tensor self); -@Namespace("torch") public static native @ByVal @Name("randperm") Tensor torch_randperm(@Cast("int64_t") long n, @ByVal(nullValue = "at::TensorOptions(at::kLong)") TensorOptions options); -@Namespace("torch") public static native @ByVal @Name("randperm") Tensor torch_randperm(@Cast("int64_t") long n); -@Namespace("torch") public static native @ByVal @Name("randperm") Tensor torch_randperm(@Cast("int64_t") long n, @ByVal GeneratorOptional generator, @ByVal(nullValue = "at::TensorOptions(at::kLong)") TensorOptions options); -@Namespace("torch") public static native @ByVal @Name("randperm") Tensor torch_randperm(@Cast("int64_t") long n, @ByVal GeneratorOptional generator); -@Namespace("torch") public static native @ByVal @Name("range") Tensor torch_range(@Const @ByRef Scalar start, @Const @ByRef Scalar end, @Const @ByRef(nullValue = "at::Scalar(1)") Scalar step, @ByVal(nullValue = "at::TensorOptions{}") TensorOptions options); -@Namespace("torch") public static native @ByVal @Name("range") Tensor torch_range(@Const @ByRef Scalar start, @Const @ByRef Scalar end, @ByVal(nullValue = "at::TensorOptions{}") TensorOptions options); -@Namespace("torch") public static native @ByVal @Name("zeros") Tensor torch_zeros(@ByVal @Cast("c10::ArrayRef*") LongArrayRef size, @ByVal DimnameListOptional names, @ByVal(nullValue = "at::TensorOptions{}") TensorOptions options); -@Namespace("torch") public static native @ByVal @Name("zeros") Tensor torch_zeros(@ByVal @Cast("c10::ArrayRef*") LongArrayRef size, @ByVal DimnameListOptional names); -@Namespace("torch") public static native @ByVal @Name("zeros") Tensor torch_zeros(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @ByVal DimnameListOptional names, @ByVal(nullValue = "at::TensorOptions{}") TensorOptions options); -@Namespace("torch") public static native @ByVal @Name("zeros") Tensor torch_zeros(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @ByVal DimnameListOptional names); -@Namespace("torch") public static native @ByVal @Name("_efficientzerotensor") Tensor torch__efficientzerotensor(@ByVal @Cast("c10::ArrayRef*") LongArrayRef size, @ByVal(nullValue = "at::TensorOptions{}") TensorOptions options); -@Namespace("torch") public static native @ByVal @Name("_efficientzerotensor") Tensor torch__efficientzerotensor(@ByVal @Cast("c10::ArrayRef*") LongArrayRef size); -@Namespace("torch") public static native @ByVal @Name("_efficientzerotensor") Tensor torch__efficientzerotensor(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @ByVal(nullValue = "at::TensorOptions{}") TensorOptions options); -@Namespace("torch") public static native @ByVal @Name("_efficientzerotensor") Tensor torch__efficientzerotensor(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... size); -@Namespace("torch") public static native @ByVal @Name("zeros") Tensor torch_zeros(@ByVal @Cast("c10::ArrayRef*") LongArrayRef size, @ByVal(nullValue = "at::TensorOptions{}") TensorOptions options); -@Namespace("torch") public static native @ByVal @Name("zeros") Tensor torch_zeros(@ByVal @Cast("c10::ArrayRef*") LongArrayRef size); -@Namespace("torch") public static native @ByVal @Name("zeros") Tensor torch_zeros(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @ByVal(nullValue = "at::TensorOptions{}") TensorOptions options); -@Namespace("torch") public static native @ByVal @Name("zeros") Tensor torch_zeros(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... size); -@Namespace("torch") public static native @ByVal @Name("zeros_like") Tensor torch_zeros_like(@Const @ByRef Tensor self, @ByVal(nullValue = "at::TensorOptions{}") TensorOptions options, @ByVal(nullValue = "c10::optional(c10::nullopt)") MemoryFormatOptional memory_format); -@Namespace("torch") public static native @ByVal @Name("zeros_like") Tensor torch_zeros_like(@Const @ByRef Tensor self); -@Namespace("torch") public static native @ByVal @Name("sparse_compressed_tensor") Tensor torch_sparse_compressed_tensor(@Const @ByRef Tensor compressed_indices, @Const @ByRef Tensor plain_indices, @Const @ByRef Tensor values, @ByVal @Cast("c10::ArrayRef*") LongArrayRef size, @ByVal TensorOptions options); -@Namespace("torch") public static native @ByVal @Name("sparse_compressed_tensor") Tensor torch_sparse_compressed_tensor(@Const @ByRef Tensor compressed_indices, @Const @ByRef Tensor plain_indices, @Const @ByRef Tensor values, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @ByVal TensorOptions options); -@Namespace("torch") public static native @ByVal @Name("sparse_csr_tensor") Tensor torch_sparse_csr_tensor(@Const @ByRef Tensor crow_indices, @Const @ByRef Tensor col_indices, @Const @ByRef Tensor values, @ByVal @Cast("c10::ArrayRef*") LongArrayRef size, @ByVal TensorOptions options); -@Namespace("torch") public static native @ByVal @Name("sparse_csr_tensor") Tensor torch_sparse_csr_tensor(@Const @ByRef Tensor crow_indices, @Const @ByRef Tensor col_indices, @Const @ByRef Tensor values, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @ByVal TensorOptions options); -@Namespace("torch") public static native @ByVal @Name("sparse_csc_tensor") Tensor torch_sparse_csc_tensor(@Const @ByRef Tensor ccol_indices, @Const @ByRef Tensor row_indices, @Const @ByRef Tensor values, @ByVal @Cast("c10::ArrayRef*") LongArrayRef size, @ByVal TensorOptions options); -@Namespace("torch") public static native @ByVal @Name("sparse_csc_tensor") Tensor torch_sparse_csc_tensor(@Const @ByRef Tensor ccol_indices, @Const @ByRef Tensor row_indices, @Const @ByRef Tensor values, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @ByVal TensorOptions options); -@Namespace("torch") public static native @ByVal @Name("sparse_bsr_tensor") Tensor torch_sparse_bsr_tensor(@Const @ByRef Tensor crow_indices, @Const @ByRef Tensor col_indices, @Const @ByRef Tensor values, @ByVal @Cast("c10::ArrayRef*") LongArrayRef size, @ByVal TensorOptions options); -@Namespace("torch") public static native @ByVal @Name("sparse_bsr_tensor") Tensor torch_sparse_bsr_tensor(@Const @ByRef Tensor crow_indices, @Const @ByRef Tensor col_indices, @Const @ByRef Tensor values, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @ByVal TensorOptions options); -@Namespace("torch") public static native @ByVal @Name("sparse_bsc_tensor") Tensor torch_sparse_bsc_tensor(@Const @ByRef Tensor ccol_indices, @Const @ByRef Tensor row_indices, @Const @ByRef Tensor values, @ByVal @Cast("c10::ArrayRef*") LongArrayRef size, @ByVal TensorOptions options); -@Namespace("torch") public static native @ByVal @Name("sparse_bsc_tensor") Tensor torch_sparse_bsc_tensor(@Const @ByRef Tensor ccol_indices, @Const @ByRef Tensor row_indices, @Const @ByRef Tensor values, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @ByVal TensorOptions options); -@Namespace("torch") public static native @ByVal @Name("sparse_compressed_tensor") Tensor torch_sparse_compressed_tensor(@Const @ByRef Tensor compressed_indices, @Const @ByRef Tensor plain_indices, @Const @ByRef Tensor values, @ByVal TensorOptions options); -@Namespace("torch") public static native @ByVal @Name("sparse_csr_tensor") Tensor torch_sparse_csr_tensor(@Const @ByRef Tensor crow_indices, @Const @ByRef Tensor col_indices, @Const @ByRef Tensor values, @ByVal TensorOptions options); -@Namespace("torch") public static native @ByVal @Name("sparse_csc_tensor") Tensor torch_sparse_csc_tensor(@Const @ByRef Tensor ccol_indices, @Const @ByRef Tensor row_indices, @Const @ByRef Tensor values, @ByVal TensorOptions options); -@Namespace("torch") public static native @ByVal @Name("sparse_bsr_tensor") Tensor torch_sparse_bsr_tensor(@Const @ByRef Tensor crow_indices, @Const @ByRef Tensor col_indices, @Const @ByRef Tensor values, @ByVal TensorOptions options); -@Namespace("torch") public static native @ByVal @Name("sparse_bsc_tensor") Tensor torch_sparse_bsc_tensor(@Const @ByRef Tensor ccol_indices, @Const @ByRef Tensor row_indices, @Const @ByRef Tensor values, @ByVal TensorOptions options); -@Namespace("torch") public static native @ByVal @Name("_sparse_compressed_tensor_unsafe") Tensor torch__sparse_compressed_tensor_unsafe(@Const @ByRef Tensor compressed_indices, @Const @ByRef Tensor plain_indices, @Const @ByRef Tensor values, @ByVal @Cast("c10::ArrayRef*") LongArrayRef size, @ByVal(nullValue = "at::TensorOptions{}") TensorOptions options); -@Namespace("torch") public static native @ByVal @Name("_sparse_compressed_tensor_unsafe") Tensor torch__sparse_compressed_tensor_unsafe(@Const @ByRef Tensor compressed_indices, @Const @ByRef Tensor plain_indices, @Const @ByRef Tensor values, @ByVal @Cast("c10::ArrayRef*") LongArrayRef size); -@Namespace("torch") public static native @ByVal @Name("_sparse_compressed_tensor_unsafe") Tensor torch__sparse_compressed_tensor_unsafe(@Const @ByRef Tensor compressed_indices, @Const @ByRef Tensor plain_indices, @Const @ByRef Tensor values, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @ByVal(nullValue = "at::TensorOptions{}") TensorOptions options); -@Namespace("torch") public static native @ByVal @Name("_sparse_compressed_tensor_unsafe") Tensor torch__sparse_compressed_tensor_unsafe(@Const @ByRef Tensor compressed_indices, @Const @ByRef Tensor plain_indices, @Const @ByRef Tensor values, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... size); -@Namespace("torch") public static native @ByVal @Name("_sparse_csr_tensor_unsafe") Tensor torch__sparse_csr_tensor_unsafe(@Const @ByRef Tensor crow_indices, @Const @ByRef Tensor col_indices, @Const @ByRef Tensor values, @ByVal @Cast("c10::ArrayRef*") LongArrayRef size, @ByVal(nullValue = "at::TensorOptions{}") TensorOptions options); -@Namespace("torch") public static native @ByVal @Name("_sparse_csr_tensor_unsafe") Tensor torch__sparse_csr_tensor_unsafe(@Const @ByRef Tensor crow_indices, @Const @ByRef Tensor col_indices, @Const @ByRef Tensor values, @ByVal @Cast("c10::ArrayRef*") LongArrayRef size); -@Namespace("torch") public static native @ByVal @Name("_sparse_csr_tensor_unsafe") Tensor torch__sparse_csr_tensor_unsafe(@Const @ByRef Tensor crow_indices, @Const @ByRef Tensor col_indices, @Const @ByRef Tensor values, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @ByVal(nullValue = "at::TensorOptions{}") TensorOptions options); -@Namespace("torch") public static native @ByVal @Name("_sparse_csr_tensor_unsafe") Tensor torch__sparse_csr_tensor_unsafe(@Const @ByRef Tensor crow_indices, @Const @ByRef Tensor col_indices, @Const @ByRef Tensor values, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... size); -@Namespace("torch") public static native @ByVal @Name("_sparse_csc_tensor_unsafe") Tensor torch__sparse_csc_tensor_unsafe(@Const @ByRef Tensor ccol_indices, @Const @ByRef Tensor row_indices, @Const @ByRef Tensor values, @ByVal @Cast("c10::ArrayRef*") LongArrayRef size, @ByVal(nullValue = "at::TensorOptions{}") TensorOptions options); -@Namespace("torch") public static native @ByVal @Name("_sparse_csc_tensor_unsafe") Tensor torch__sparse_csc_tensor_unsafe(@Const @ByRef Tensor ccol_indices, @Const @ByRef Tensor row_indices, @Const @ByRef Tensor values, @ByVal @Cast("c10::ArrayRef*") LongArrayRef size); -@Namespace("torch") public static native @ByVal @Name("_sparse_csc_tensor_unsafe") Tensor torch__sparse_csc_tensor_unsafe(@Const @ByRef Tensor ccol_indices, @Const @ByRef Tensor row_indices, @Const @ByRef Tensor values, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @ByVal(nullValue = "at::TensorOptions{}") TensorOptions options); -@Namespace("torch") public static native @ByVal @Name("_sparse_csc_tensor_unsafe") Tensor torch__sparse_csc_tensor_unsafe(@Const @ByRef Tensor ccol_indices, @Const @ByRef Tensor row_indices, @Const @ByRef Tensor values, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... size); -@Namespace("torch") public static native @ByVal @Name("_sparse_bsr_tensor_unsafe") Tensor torch__sparse_bsr_tensor_unsafe(@Const @ByRef Tensor crow_indices, @Const @ByRef Tensor col_indices, @Const @ByRef Tensor values, @ByVal @Cast("c10::ArrayRef*") LongArrayRef size, @ByVal(nullValue = "at::TensorOptions{}") TensorOptions options); -@Namespace("torch") public static native @ByVal @Name("_sparse_bsr_tensor_unsafe") Tensor torch__sparse_bsr_tensor_unsafe(@Const @ByRef Tensor crow_indices, @Const @ByRef Tensor col_indices, @Const @ByRef Tensor values, @ByVal @Cast("c10::ArrayRef*") LongArrayRef size); -@Namespace("torch") public static native @ByVal @Name("_sparse_bsr_tensor_unsafe") Tensor torch__sparse_bsr_tensor_unsafe(@Const @ByRef Tensor crow_indices, @Const @ByRef Tensor col_indices, @Const @ByRef Tensor values, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @ByVal(nullValue = "at::TensorOptions{}") TensorOptions options); -@Namespace("torch") public static native @ByVal @Name("_sparse_bsr_tensor_unsafe") Tensor torch__sparse_bsr_tensor_unsafe(@Const @ByRef Tensor crow_indices, @Const @ByRef Tensor col_indices, @Const @ByRef Tensor values, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... size); -@Namespace("torch") public static native @ByVal @Name("_sparse_bsc_tensor_unsafe") Tensor torch__sparse_bsc_tensor_unsafe(@Const @ByRef Tensor ccol_indices, @Const @ByRef Tensor row_indices, @Const @ByRef Tensor values, @ByVal @Cast("c10::ArrayRef*") LongArrayRef size, @ByVal(nullValue = "at::TensorOptions{}") TensorOptions options); -@Namespace("torch") public static native @ByVal @Name("_sparse_bsc_tensor_unsafe") Tensor torch__sparse_bsc_tensor_unsafe(@Const @ByRef Tensor ccol_indices, @Const @ByRef Tensor row_indices, @Const @ByRef Tensor values, @ByVal @Cast("c10::ArrayRef*") LongArrayRef size); -@Namespace("torch") public static native @ByVal @Name("_sparse_bsc_tensor_unsafe") Tensor torch__sparse_bsc_tensor_unsafe(@Const @ByRef Tensor ccol_indices, @Const @ByRef Tensor row_indices, @Const @ByRef Tensor values, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @ByVal(nullValue = "at::TensorOptions{}") TensorOptions options); -@Namespace("torch") public static native @ByVal @Name("_sparse_bsc_tensor_unsafe") Tensor torch__sparse_bsc_tensor_unsafe(@Const @ByRef Tensor ccol_indices, @Const @ByRef Tensor row_indices, @Const @ByRef Tensor values, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... size); -@Namespace("torch") public static native @ByVal @Name("sparse_coo_tensor") Tensor torch_sparse_coo_tensor(@ByVal @Cast("c10::ArrayRef*") LongArrayRef size, @ByVal TensorOptions options); -@Namespace("torch") public static native @ByVal @Name("sparse_coo_tensor") Tensor torch_sparse_coo_tensor(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @ByVal TensorOptions options); -@Namespace("torch") public static native @ByVal @Name("sparse_coo_tensor") Tensor torch_sparse_coo_tensor(@Const @ByRef Tensor indices, @Const @ByRef Tensor values, @ByVal(nullValue = "at::TensorOptions{}") TensorOptions options); -@Namespace("torch") public static native @ByVal @Name("sparse_coo_tensor") Tensor torch_sparse_coo_tensor(@Const @ByRef Tensor indices, @Const @ByRef Tensor values); -@Namespace("torch") public static native @ByVal @Name("sparse_coo_tensor") Tensor torch_sparse_coo_tensor(@Const @ByRef Tensor indices, @Const @ByRef Tensor values, @ByVal @Cast("c10::ArrayRef*") LongArrayRef size, @ByVal(nullValue = "at::TensorOptions{}") TensorOptions options); -@Namespace("torch") public static native @ByVal @Name("sparse_coo_tensor") Tensor torch_sparse_coo_tensor(@Const @ByRef Tensor indices, @Const @ByRef Tensor values, @ByVal @Cast("c10::ArrayRef*") LongArrayRef size); -@Namespace("torch") public static native @ByVal @Name("sparse_coo_tensor") Tensor torch_sparse_coo_tensor(@Const @ByRef Tensor indices, @Const @ByRef Tensor values, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @ByVal(nullValue = "at::TensorOptions{}") TensorOptions options); -@Namespace("torch") public static native @ByVal @Name("sparse_coo_tensor") Tensor torch_sparse_coo_tensor(@Const @ByRef Tensor indices, @Const @ByRef Tensor values, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... size); -@Namespace("torch") public static native @ByVal @Name("_sparse_coo_tensor_unsafe") Tensor torch__sparse_coo_tensor_unsafe(@Const @ByRef Tensor indices, @Const @ByRef Tensor values, @ByVal @Cast("c10::ArrayRef*") LongArrayRef size, @ByVal(nullValue = "at::TensorOptions{}") TensorOptions options); -@Namespace("torch") public static native @ByVal @Name("_sparse_coo_tensor_unsafe") Tensor torch__sparse_coo_tensor_unsafe(@Const @ByRef Tensor indices, @Const @ByRef Tensor values, @ByVal @Cast("c10::ArrayRef*") LongArrayRef size); -@Namespace("torch") public static native @ByVal @Name("_sparse_coo_tensor_unsafe") Tensor torch__sparse_coo_tensor_unsafe(@Const @ByRef Tensor indices, @Const @ByRef Tensor values, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @ByVal(nullValue = "at::TensorOptions{}") TensorOptions options); -@Namespace("torch") public static native @ByVal @Name("_sparse_coo_tensor_unsafe") Tensor torch__sparse_coo_tensor_unsafe(@Const @ByRef Tensor indices, @Const @ByRef Tensor values, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... size); -@Namespace("torch") public static native @ByVal @Name("_sparse_coo_tensor_with_dims") Tensor torch__sparse_coo_tensor_with_dims(@Cast("int64_t") long sparse_dim, @Cast("int64_t") long dense_dim, @ByVal @Cast("c10::ArrayRef*") LongArrayRef size, @ByVal TensorOptions options); -@Namespace("torch") public static native @ByVal @Name("_sparse_coo_tensor_with_dims") Tensor torch__sparse_coo_tensor_with_dims(@Cast("int64_t") long sparse_dim, @Cast("int64_t") long dense_dim, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @ByVal TensorOptions options); -@Namespace("torch") public static native @ByVal @Name("_sparse_coo_tensor_with_dims_and_tensors") Tensor torch__sparse_coo_tensor_with_dims_and_tensors(@Cast("int64_t") long sparse_dim, @Cast("int64_t") long dense_dim, @ByVal @Cast("c10::ArrayRef*") LongArrayRef size, @Const @ByRef Tensor indices, @Const @ByRef Tensor values, @ByVal TensorOptions options); -@Namespace("torch") public static native @ByVal @Name("_sparse_coo_tensor_with_dims_and_tensors") Tensor torch__sparse_coo_tensor_with_dims_and_tensors(@Cast("int64_t") long sparse_dim, @Cast("int64_t") long dense_dim, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @Const @ByRef Tensor indices, @Const @ByRef Tensor values, @ByVal TensorOptions options); -@Namespace("torch") public static native @ByVal @Name("_to_copy") Tensor torch__to_copy(@Const @ByRef Tensor self, @ByVal(nullValue = "at::TensorOptions{}") TensorOptions options, @Cast("bool") boolean non_blocking/*=false*/, @ByVal(nullValue = "c10::optional(c10::nullopt)") MemoryFormatOptional memory_format); -@Namespace("torch") public static native @ByVal @Name("_to_copy") Tensor torch__to_copy(@Const @ByRef Tensor self); -@Namespace("torch") public static native @ByVal @Name("tril_indices") Tensor torch_tril_indices(@Cast("int64_t") long row, @Cast("int64_t") long col, @Cast("int64_t") long offset/*=0*/, @ByVal(nullValue = "at::TensorOptions(at::kLong)") TensorOptions options); -@Namespace("torch") public static native @ByVal @Name("tril_indices") Tensor torch_tril_indices(@Cast("int64_t") long row, @Cast("int64_t") long col); -@Namespace("torch") public static native @ByVal @Name("triu_indices") Tensor torch_triu_indices(@Cast("int64_t") long row, @Cast("int64_t") long col, @Cast("int64_t") long offset/*=0*/, @ByVal(nullValue = "at::TensorOptions(at::kLong)") TensorOptions options); -@Namespace("torch") public static native @ByVal @Name("triu_indices") Tensor torch_triu_indices(@Cast("int64_t") long row, @Cast("int64_t") long col); -@Namespace("torch") public static native @ByVal @Name("normal") Tensor torch_normal(double mean, double std, @ByVal @Cast("c10::ArrayRef*") LongArrayRef size, @ByVal(nullValue = "c10::optional(c10::nullopt)") GeneratorOptional generator, @ByVal(nullValue = "at::TensorOptions{}") TensorOptions options); -@Namespace("torch") public static native @ByVal @Name("normal") Tensor torch_normal(double mean, double std, @ByVal @Cast("c10::ArrayRef*") LongArrayRef size); -@Namespace("torch") public static native @ByVal @Name("normal") Tensor torch_normal(double mean, double std, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, @ByVal(nullValue = "c10::optional(c10::nullopt)") GeneratorOptional generator, @ByVal(nullValue = "at::TensorOptions{}") TensorOptions options); -@Namespace("torch") public static native @ByVal @Name("normal") Tensor torch_normal(double mean, double std, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... size); -@Namespace("torch") public static native @ByVal @Name("fft_fftfreq") Tensor torch_fft_fftfreq(@Cast("int64_t") long n, double d/*=1.0*/, @ByVal(nullValue = "at::TensorOptions{}") TensorOptions options); -@Namespace("torch") public static native @ByVal @Name("fft_fftfreq") Tensor torch_fft_fftfreq(@Cast("int64_t") long n); -@Namespace("torch") public static native @ByVal @Name("fft_rfftfreq") Tensor torch_fft_rfftfreq(@Cast("int64_t") long n, double d/*=1.0*/, @ByVal(nullValue = "at::TensorOptions{}") TensorOptions options); -@Namespace("torch") public static native @ByVal @Name("fft_rfftfreq") Tensor torch_fft_rfftfreq(@Cast("int64_t") long n); +@Namespace("torch::jit") public static native @Const @ByVal OperatorVector getAllOperators(); +@Namespace("torch::jit") public static native @Const @ByRef OperatorVector getAllOperatorsFor( + @ByVal Symbol name); - // namespace torch +// given a operator with an overload name, find the specific operator related to +// it, may return nullptr if no operator exists. +@Namespace("torch::jit") public static native @SharedPtr("torch::jit::Operator") @ByVal Operator findOperatorFor( + @Const @ByRef OperatorName full_name); +@Namespace("torch::jit") public static native @ByVal SymbolVector findSimilarOperators(@ByVal Symbol input_op); -// Parsed from torch/csrc/jit/frontend/function_schema_parser.h +@Namespace("torch::jit") public static native void registerOperator(@ByRef(true) Operator op); +@Namespace("torch::jit") public static native void deregisterOperator(@Const @ByRef FunctionSchema schema); -// #pragma once +// XXX: this function is meant to be used with string literals only! +@Namespace("torch::jit") public static native @SharedPtr("torch::jit::Operator") @ByVal Operator getOperatorForLiteral( + @Cast("const char*") BytePointer signature); +@Namespace("torch::jit") public static native @SharedPtr("torch::jit::Operator") @ByVal Operator getOperatorForLiteral( + String signature); -// #include -// #include -// #include -// #include +// Ensure the thing that registers c10 ops is defined. +// Otherwise, our registry will not have c10 ops. You can run into this +// scenario if you're querying registered ops during static init. +// +// This fn is defined in register_c10_ops.cpp +@Namespace("torch::jit") public static native void ensure_c10_registerer_defined(); +// Used to assert that unschematized operators have an analysis method written +@Namespace("torch::jit") public static native @Cast("bool") boolean aliasAnalysisHasSpecialCaseFor(@ByVal Symbol sym); -@Namespace("torch::jit") public static native @ByVal FunctionSchema parseSchema(@StdString BytePointer schema); -@Namespace("torch::jit") public static native @ByVal FunctionSchema parseSchema(@StdString String schema); -@Namespace("torch::jit") public static native @ByVal OperatorName parseName(@StdString BytePointer name); -@Namespace("torch::jit") public static native @ByVal OperatorName parseName(@StdString String name); +// A factory function to generate an optional operator. It has two +// instantiations depending on the template bool arg value. The arg can be a +// compile-time function for the selective op registration based on schema +// string. // namespace jit // namespace torch -// Parsed from torch/csrc/jit/frontend/name_mangler.h +// Parsed from torch/csrc/utils/schema_info.h // #pragma once -// #include -// #include -// Targeting ../NameMangler.java - +// #include +// #include +// Targeting ../SchemaInfo.java - // namespace jit + // namespace utils // namespace torch -// Parsed from torch/csrc/jit/frontend/parser_constants.h +// Parsed from ATen/core/enum_type.h // #pragma once -@Namespace("torch::jit") public static native @Cast("const char*") BytePointer valid_single_char_tokens(); public static native void valid_single_char_tokens(BytePointer setter); - // namespace jit - // namespace torch +// #include -// Parsed from torch/csrc/jit/frontend/source_range.h +// #include +// Targeting ../EnumType.java + + + + // namespace c10 + + +// Parsed from torch/csrc/jit/ir/ir.h // #pragma once + +// #include +// #include +// #include +// #include +// #include + +// #include +// #include +// #include + +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include // #include // #include -// #include +// #include // #include -// #include -// #include -// #include -// #include -// Targeting ../StringCordView.java +// #include +// #include +// Forward declare, the real meat is in python_ir.cpp +@Namespace("torch::jit::utils") public static native @StdString BytePointer getNodesModuleHierarchy(@Const @ByRef JitNode n); -// Targeting ../Source.java +// Targeting ../AliasDb.java -// Targeting ../SourceRange.java +// #define C10_USING(T) using ::c10::T; +// #undef C10_USING + +// #define C10_USING(T) using ::c10::T##Ptr; +// #undef C10_USING -// Targeting ../OwnedSourceRange.java -// Targeting ../SourceRangeHasher.java +// #if !defined(USE_ROCM) +// #endif + // namespace cuda +// A Graph represents one "function" of computation. +// It uses a simple ownership model where the graph owns all the nodes inside +// it. All references inside the graph are raw pointers. Destroying the Graph +// will invalidate any pointers to nodes in the graph. -// Targeting ../StackEntry.java +// Node is the base class of the IR graph. It represents one computation +// and dependencies on a list of Values. The "prim-ops", so to speak. + +// A Value represents an input or output to node that is either a +// Tensor or an opaque Handle object, as determined by type(). +@Namespace("torch::jit") public static native @Cast("std::ostream*") @ByRef @Name("operator <<") Pointer shiftLeft(@Cast("std::ostream*") @ByRef Pointer out, @Const @ByRef Graph g); +@Namespace("torch::jit") public static native @Cast("std::ostream*") @ByRef @Name("operator <<") Pointer shiftLeft(@Cast("std::ostream*") @ByRef Pointer out, @Const @ByRef JitNode n); + +// A list of nodes, with inputs and outputs +// Targeting ../Use.java -@Namespace("torch::jit") public static native void format_stack_trace( - @Cast("std::ostream*") @ByRef Pointer out, - @Const @ByRef StackEntryVector entries); -@Namespace("torch::jit") public static native @Cast("std::ostream*") @ByRef @Name("operator <<") Pointer shiftLeft(@Cast("std::ostream*") @ByRef Pointer out, @Const @ByRef SourceRange range); -// Targeting ../TaggedRange.java +// Note [User node does not uniquely identify use] +// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +// A while back, we wrote some code manipulating uses that looked like this: +// +// for (auto& use : used_val->uses_) { +// if (use.user == this_node) { +// use.offset += 1; +// break; +// } +// } +// +// This code is trying to find a particular use (our node's use) to update it. +// However, it's wrong: there may be *multiple* uses of a value %x in a node, +// as might be the case in this IR: +// +// %y = Add %x %x +// +// In this case, there are two uses of %x whose user is the node 'Add %x %x'. +// So, "use induced by this node" is not a well-formed concept. +// +// If you are looking for "use induced by an input", it's best to use +// findUseForInput() to get it. +// the list types are intentionally simple, but we type-def +// them here so if we need to change them, refactoring will be easier +// Targeting ../BlockWrap.java - // namespace jit - // namespace torch - // namespace std +// Targeting ../JitNodeWrap.java -// Parsed from torch/csrc/jit/frontend/sugared_value.h +// Targeting ../ValueWrap.java -// #pragma once -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// Targeting ../SugaredValue.java +// Targeting ../Value.java -// Targeting ../SimpleValue.java +// Targeting ../JitNode.java -// Targeting ../BuiltinFunction.java +// Targeting ../Block.java -// Targeting ../SugaredTupleValue.java +// Targeting ../Graph.java -// Targeting ../BuiltinModule.java +/** \brief An utility class for setting temporary insertion points. + * + * When an object of this class is created, it stores the current insertion + * point, sets the new one, and restores the original insertion point when the + * object is destroyed. + */ -// Targeting ../ClassValue.java +/** \brief An utility class for setting temporary scopes. + * + * When an object of this class is created, it stores the current scope, sets + * the new one, and restores the original scope when the object is destroyed. + */ +// NOLINTNEXTLINE(cppcoreguidelines-pro-type-member-init) -// Targeting ../NamedTupleConstructor.java -// Targeting ../FunctionValue.java -// Targeting ../ClosureValue.java -// Targeting ../MethodValue.java +/************* All nodes not required to be defined before Graph **************/ +// Targeting ../ProfileIValueOp.java -// Targeting ../PrintValue.java +// Targeting ../PythonOp.java -// Targeting ../CastValue.java -// Targeting ../TensorCastValue.java +@Namespace("torch::jit") public static native void LintGraph(@Const @SharedPtr("torch::jit::Graph") @ByRef Graph graph); +@Namespace("torch::jit") public static native @ByVal ValueArrayRef createTupleUnpack(Value v); -// Targeting ../MagicMethod.java +/** Insert graph \p CALLEE into graph \p G using \p INPUTS as input values. + * The insertion happens at the current insertion point. + * Optionally, one can also pass \p VALUE_MAP to get a map between \p CALLEE + * values and their cloned copies in \p G. + */ +@Namespace("torch::jit") public static native @ByVal ValueVector insertGraph( + @ByRef Graph g, + @ByRef Graph callee, + @ByVal ValueArrayRef inputs); +@Namespace("torch::jit") public static native @ByVal ValueVector insertGraph( + @ByRef Graph g, + @ByRef Graph callee, + @ByVal ValueArrayRef inputs, + @ByRef ValueValueMap value_map); +/** Insert function \p CALLEE after node \p TO_REPLACE, remove the node and + * replace all its uses with corresponding outputs of the inserted function. + * This asserts that the number of outputs of the original node and the + * graph are the same. + */ +@Namespace("torch::jit") public static native @ByVal ValueVector inlineCallTo( + JitNode to_replace, + GraphFunction callee, + @Cast("bool") boolean use_graph/*=true*/); +@Namespace("torch::jit") public static native @ByVal ValueVector inlineCallTo( + JitNode to_replace, + GraphFunction callee); -// Targeting ../SpecialFormValue.java +@Namespace("torch::jit") public static native @ByVal ValueVector inlineCallTo( + JitNode to_replace, + GraphFunction callee, + Graph callee_graph); +/** If there is only one value in \p OUTPUTS and its kind is Tuple, insert a + * tuple unpack node and return the resulting values. + */ +@Namespace("torch::jit") public static native @ByVal ValueVector unpackOutputs(@Const @ByRef ValueVector outputs); -// Targeting ../LegacyTensorConstructor.java +@Namespace("torch::jit") public static native @Cast("torch::jit::Node**") @StdVector PointerPointer findAllNodes(@ByRef Graph g, @ByVal Symbol kind, @Cast("bool") boolean recurse); +@Namespace("torch::jit") public static native @Cast("torch::jit::Node**") @StdVector PointerPointer findAllNodes(@ByRef Block b, @ByVal Symbol kind, @Cast("bool") boolean recurse); +@Namespace("torch::jit") public static native @Cast("torch::jit::Node**") @StdVector PointerPointer findAllNodes( + @ByVal BlockArrayRef a, + @ByVal Symbol kind, + @Cast("bool") boolean recurse); +// Targeting ../OperatorSet.java -// Targeting ../RangeValue.java + // namespace jit + // namespace torch -// Targeting ../IterableTree.java +// Parsed from torch/csrc/jit/python/update_graph_executor_opt.h +// #pragma once +// #include +@Namespace("torch::jit") public static native void setGraphExecutorOptimize(@Cast("bool") boolean o); +@Namespace("torch::jit") public static native @Cast("bool") boolean getGraphExecutorOptimize(); + // namespace jit + // namespace torch -@Namespace("torch::jit") public static native @ByVal ValueVector toValues( - @ByRef Graph g, - @ByVal NamedValueArrayRef nvs); -// Targeting ../SimpleSelf.java +// Parsed from torch/csrc/jit/runtime/argument_spec.h -// Targeting ../ExceptionMessageValue.java +// #pragma once +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include -// Targeting ../ExceptionValue.java +// #if C10_CLANG_HAS_WARNING("-Wshorten-64-to-32") +// #endif +// Targeting ../ArgumentInfo.java -// Targeting ../SugaredEnumClass.java +// Targeting ../ArgumentSpec.java -// Targeting ../SliceValue.java +@Namespace("torch::jit") @MemberGetter public static native @Cast("const size_t") long ARG_SPEC_DEPTH_LIMIT(); +public static final long ARG_SPEC_DEPTH_LIMIT = ARG_SPEC_DEPTH_LIMIT(); +// Targeting ../ArgumentSpecCreator.java - // namespace jit - // namespace torch +// CompleteArgumentSpec represents one particular specialization. +// It is designed so that it can be created, hashed, and compared quickly +// since it is used along the hot-path of the JIT to check if the code +// we have created is valid for the given inputs. -// Parsed from torch/csrc/jit/frontend/resolver.h +// COmpleteArgumentInfoPOD is only used internally in CompleteArgumentSpec +// API users should use ArgumentInfo -// #pragma once +// public view of compressed CompleteArgumentInfo -// #include -// #include -// #include -// Targeting ../Resolver.java +@Namespace("torch::jit") public static native @Cast("std::ostream*") @ByRef @Name("operator <<") Pointer shiftLeft(@Cast("std::ostream*") @ByRef Pointer out, @Const @ByRef ArgumentInfo info); +@Namespace("torch::jit") public static native @Cast("std::ostream*") @ByRef @Name("operator <<") Pointer shiftLeft(@Cast("std::ostream*") @ByRef Pointer out, @Const @ByRef ArgumentSpec spec); -// Targeting ../NativeResolver.java +@Namespace("torch::jit") public static native @ByVal ByteOptional convertOptional( + @Const @ByRef ScalarTypeOptional from); -@Namespace("torch::jit") public static native @SharedPtr NativeResolver nativeResolver(); // namespace jit // namespace torch + // namespace std -// Parsed from torch/csrc/jit/frontend/tracer.h + +// Parsed from torch/csrc/jit/runtime/interpreter.h // #pragma once +// #include +// #include +// #include -// #include -// #include +// #include +// #include // #include -// #include -// #include -// #include // #include - // #include -// #include - -// #include -// #include -// #include -// #include -// #include -// #include -// Targeting ../TracingState.java - - -// This is meant to be used as a thread local place, where we can store extra -// info that gets lost when we call into ATen from Python bindings. One example -// for when this happens is when we get an IntArrayRef argument with e.g. sizes -// for view. When tracing, those might be tensors, which let us encode extra -// data dependencies, but once they get to the ATen call where we actually have -// the tracing logic, they get converted into a raw IntArrayRef, and we loose -// all information. To prevent this, we temporarily stash it in here. -// NOLINTNEXTLINE(cppcoreguidelines-pro-type-member-init) +// #if C10_CLANG_HAS_WARNING("-Wdeprecated-copy-dtor") +// #endif -// Retrieve or set the current tracing state. Returns a nullptr if tracing is -// disabled. -@Namespace("torch::jit::tracer") public static native @SharedPtr TracingState getTracingState(); -@Namespace("torch::jit::tracer") public static native void setTracingState(@SharedPtr TracingState state); -@Namespace("torch::jit::tracer") public static native @Cast("bool") boolean isTracing(); -// Targeting ../warn_fn_type.java + // namespace at + // namespace c10 +// Targeting ../CodeImpl.java -@Namespace("torch::jit::tracer") public static native @Cast("const char*") BytePointer WARN_PYTHON_DATAFLOW(); public static native void WARN_PYTHON_DATAFLOW(BytePointer setter); -@Namespace("torch::jit::tracer") public static native @Cast("const char*") BytePointer WARN_CONSTRUCTOR(); public static native void WARN_CONSTRUCTOR(BytePointer setter); -@Namespace("torch::jit::tracer") public static native @Cast("const char*") BytePointer WARN_RESIZE(); public static native void WARN_RESIZE(BytePointer setter); -@Namespace("torch::jit::tracer") public static native @Cast("const char*") BytePointer STRICT_TRACER_MSG(); public static native void STRICT_TRACER_MSG(BytePointer setter); -@Namespace("torch::jit::tracer") public static native void _do_warn(@Cast("const char*") BytePointer _reason, @Cast("const char*") BytePointer _kind); -@Namespace("torch::jit::tracer") public static native void _do_warn(String _reason, String _kind); -@Namespace("torch::jit::tracer") public static native void warn(@Cast("const char*") BytePointer _reason, @Cast("const char*") BytePointer _kind/*=nullptr*/); -@Namespace("torch::jit::tracer") public static native void warn(@Cast("const char*") BytePointer _reason); -@Namespace("torch::jit::tracer") public static native void warn(String _reason, String _kind/*=nullptr*/); -@Namespace("torch::jit::tracer") public static native void warn(String _reason); -@Namespace("torch::jit::tracer") public static native void setWarn(@ByVal @Cast("torch::jit::tracer::warn_fn_type*") warn_fn_type fn); -// Targeting ../NoWarn.java -// Targeting ../WithNestedTracingFrame.java +// Targeting ../InterpreterStateImpl.java -@Namespace("torch::jit::tracer") public static native void recordSourceLocation(JitNode n); -// Targeting ../V_JitNode.java +// Targeting ../Instruction.java -@Namespace("torch::jit::tracer") public static native void setRecordSourceLocation(V_JitNode v); +// Targeting ../Code.java -@Namespace("torch::jit::tracer") public static native @ByVal StackEntryVector pythonCallstack(); -// Targeting ../StackEntryVector_V.java +// Targeting ../MobileCode.java -@Namespace("torch::jit::tracer") public static native void setPythonCallstack(StackEntryVector_V v); -// Having finished adding a new 'node' to the graph IR 'setValueTrace' -// associates this node with an output variable, so that further operations -// involving this variable know which node in the IR to reference. -@Namespace("torch::jit::tracer") public static native void setValueTrace(@Const @ByRef IValue v, Value value); -@Namespace("torch::jit::tracer") public static native void delValueTrace(@Const @ByRef IValue var); +// Created by wait() -@Namespace("torch::jit::tracer") public static native @ByVal @Cast("std::function*") Pointer pauseTracing(); +// InterpreterContinuation propagates dist_autograd_context_id +// through (and only through) the forward pass manually, other +// thread local settings are propagated with ThreadLocalState -@Namespace("torch::jit::tracer") public static native Value getValueTrace(@Const @ByRef IValue var); +// what is the tensors type, including state from the current execution context +// that modifies how the tensor behaves. For instance if no_grad is enabled +// this will cause the TensorType to have requires_grad=False. +@Namespace("torch::jit") public static native @SharedPtr("c10::TensorType") @ByVal TensorType tensorTypeInCurrentExecutionContext( + @Const @ByRef Tensor t); +// current (TLS) TorchScript interpreter callstack +@Namespace("torch::jit") public static native @ByVal StackEntryVector currentCallstack(); +@Namespace("torch::jit") public static native @ByVal StringVector currentModuleHierarchy(); + // namespace jit + // namespace torch -@Namespace("torch::jit::tracer") public static native void abandon(); -// NB: those serve both as an intermediate steps in addInputs below, -// as well as the overloads that terminate template recursion -@Namespace("torch::jit::tracer") public static native void addInputs(JitNode n, @Cast("const char*") BytePointer name, @Cast("int64_t") long value); -@Namespace("torch::jit::tracer") public static native void addInputs(JitNode n, String name, @Cast("int64_t") long value); -@Namespace("torch::jit::tracer") public static native void addInputs(JitNode n, @Cast("const char*") BytePointer name, @ByVal SymInt value); -@Namespace("torch::jit::tracer") public static native void addInputs(JitNode n, String name, @ByVal SymInt value); -@Namespace("torch::jit::tracer") public static native void addInputs( - JitNode n, - @Cast("const char*") BytePointer name, - @ByVal LongOptional value); -@Namespace("torch::jit::tracer") public static native void addInputs( - JitNode n, - String name, - @ByVal LongOptional value); -@Namespace("torch::jit::tracer") public static native void addInputs(JitNode n, @Cast("const char*") BytePointer name, @Cast("bool") boolean value); -@Namespace("torch::jit::tracer") public static native void addInputs(JitNode n, String name, @Cast("bool") boolean value); -@Namespace("torch::jit::tracer") public static native void addInputs( - JitNode n, - @Cast("const char*") BytePointer name, - @Const @ByRef BoolOptional value); -@Namespace("torch::jit::tracer") public static native void addInputs( - JitNode n, - String name, - @Const @ByRef BoolOptional value); -@Namespace("torch::jit::tracer") public static native void addInputs(JitNode n, @Cast("const char*") BytePointer name, double value); -@Namespace("torch::jit::tracer") public static native void addInputs(JitNode n, String name, double value); -@Namespace("torch::jit::tracer") public static native void addInputs( - JitNode n, - @Cast("const char*") BytePointer name, - @Const @ByRef DoubleOptional value); -@Namespace("torch::jit::tracer") public static native void addInputs( - JitNode n, - String name, - @Const @ByRef DoubleOptional value); -@Namespace("torch::jit::tracer") public static native void addInputs(JitNode n, @Cast("const char*") BytePointer name, @Const @ByRef Scalar value); -@Namespace("torch::jit::tracer") public static native void addInputs(JitNode n, String name, @Const @ByRef Scalar value); -@Namespace("torch::jit::tracer") public static native void addInputs( - JitNode n, - @Cast("const char*") BytePointer name, - @Const @ByRef ScalarOptional value); -@Namespace("torch::jit::tracer") public static native void addInputs( - JitNode n, - String name, - @Const @ByRef ScalarOptional value); -@Namespace("torch::jit::tracer") public static native void addInputs(JitNode n, @Cast("const char*") BytePointer name, @Const @ByRef Tensor value); -@Namespace("torch::jit::tracer") public static native void addInputs(JitNode n, String name, @Const @ByRef Tensor value); -@Namespace("torch::jit::tracer") public static native void addInputs( - JitNode n, - @Cast("const char*") BytePointer name, - @Const @ByRef TensorOptional value); -@Namespace("torch::jit::tracer") public static native void addInputs( - JitNode n, - String name, - @Const @ByRef TensorOptional value); -@Namespace("torch::jit::tracer") public static native void addInputs(JitNode n, @Cast("const char*") BytePointer name, @ByVal @Cast("c10::ArrayRef*") LongArrayRef value); -@Namespace("torch::jit::tracer") public static native void addInputs(JitNode n, String name, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... value); -@Namespace("torch::jit::tracer") public static native void addInputs(JitNode n, @Cast("const char*") BytePointer name, @ByVal SymIntRef value); -@Namespace("torch::jit::tracer") public static native void addInputs(JitNode n, String name, @ByVal SymIntRef value); -@Namespace("torch::jit::tracer") public static native void addInputs( - JitNode n, - @Cast("const char*") BytePointer name, - @ByVal SymIntOptional value); -@Namespace("torch::jit::tracer") public static native void addInputs( - JitNode n, - String name, - @ByVal SymIntOptional value); -@Namespace("torch::jit::tracer") public static native void addInputs( - JitNode n, - @Cast("const char*") BytePointer name, - @Const @ByRef LongArrayRefOptional value); -@Namespace("torch::jit::tracer") public static native void addInputs( - JitNode n, - @Cast("const char*") BytePointer name, - @Const @ByRef SymIntArrayRefOptional opt_value); -@Namespace("torch::jit::tracer") public static native void addInputs( - JitNode n, - String name, - @Const @ByRef SymIntArrayRefOptional opt_value); -@Namespace("torch::jit::tracer") public static native void addInputs( - JitNode n, - @Cast("const char*") BytePointer name, - @ByVal TensorArrayRef value, - @Cast("bool") boolean allow_undefined/*=false*/); -@Namespace("torch::jit::tracer") public static native void addInputs( - JitNode n, - @Cast("const char*") BytePointer name, - @ByVal TensorArrayRef value); -@Namespace("torch::jit::tracer") public static native void addInputs( - JitNode n, - String name, - @ByVal TensorArrayRef value, - @Cast("bool") boolean allow_undefined/*=false*/); -@Namespace("torch::jit::tracer") public static native void addInputs( - JitNode n, - String name, - @ByVal TensorArrayRef value); -@Namespace("torch::jit::tracer") public static native void addInputs( - JitNode n, - @Cast("const char*") BytePointer name, - @Cast({"", "std::vector"}) @StdMove TensorVector value, - @Cast("bool") boolean allow_undefined/*=false*/); -@Namespace("torch::jit::tracer") public static native void addInputs( - JitNode n, - @Cast("const char*") BytePointer name, - @Cast({"", "std::vector"}) @StdMove TensorVector value); -@Namespace("torch::jit::tracer") public static native void addInputs( - JitNode n, - String name, - @Cast({"", "std::vector"}) @StdMove TensorVector value, - @Cast("bool") boolean allow_undefined/*=false*/); -@Namespace("torch::jit::tracer") public static native void addInputs( - JitNode n, - String name, - @Cast({"", "std::vector"}) @StdMove TensorVector value); -@Namespace("torch::jit::tracer") public static native void addInputs( - JitNode n, - @Cast("const char*") BytePointer name, - @ByVal @Cast("c10::ArrayRef >*") Pointer value, - @Const @SharedPtr @ByRef ClassType class_type); -@Namespace("torch::jit::tracer") public static native void addInputs( - JitNode n, - String name, - @ByVal @Cast("c10::ArrayRef >*") Pointer value, - @Const @SharedPtr @ByRef ClassType class_type); -@Namespace("torch::jit::tracer") public static native void addInputs(JitNode n, @Cast("const char*") BytePointer name, @ByVal DoubleArrayRef value); -@Namespace("torch::jit::tracer") public static native void addInputs(JitNode n, String name, @ByVal DoubleArrayRef value); -@Namespace("torch::jit::tracer") public static native void addInputs( - JitNode n, - @Cast("const char*") BytePointer name, - @Const @ByRef DoubleArrayRefOptional value); -@Namespace("torch::jit::tracer") public static native void addInputs( - JitNode n, - String name, - @Const @ByRef DoubleArrayRefOptional value); -@Namespace("torch::jit::tracer") public static native void addInputs( - JitNode n, - @Cast("const char*") BytePointer name, - @ByVal @Cast("const c10::string_view*") Pointer value); -@Namespace("torch::jit::tracer") public static native void addInputs( - JitNode n, - String name, - @ByVal @Cast("const c10::string_view*") Pointer value); -@Namespace("torch::jit::tracer") public static native void addInputs(JitNode n, @Cast("const char*") BytePointer name, @ByVal Device value); -@Namespace("torch::jit::tracer") public static native void addInputs(JitNode n, String name, @ByVal Device value); -@Namespace("torch::jit::tracer") public static native void addInputs(JitNode n, @Cast("const char*") BytePointer name, @ByVal Stream stream); -@Namespace("torch::jit::tracer") public static native void addInputs(JitNode n, String name, @ByVal Stream stream); -@Namespace("torch::jit::tracer") public static native void addInputs(JitNode n, @Cast("const char*") BytePointer name, @ByVal Layout value); -@Namespace("torch::jit::tracer") public static native void addInputs(JitNode n, String name, @ByVal Layout value); -@Namespace("torch::jit::tracer") public static native void addInputs(JitNode n, @Cast("const char*") BytePointer name, ScalarType value); -@Namespace("torch::jit::tracer") public static native void addInputs(JitNode n, String name, ScalarType value); -@Namespace("torch::jit::tracer") public static native void addInputs( - JitNode n, - @Cast("const char*") BytePointer name, - @Const @ByRef ScalarTypeOptional value); -@Namespace("torch::jit::tracer") public static native void addInputs( - JitNode n, - String name, - @Const @ByRef ScalarTypeOptional value); -@Namespace("torch::jit::tracer") public static native void addInputs( - JitNode n, - @Cast("const char*") BytePointer name, - @Const @ByRef DeviceOptional value); -@Namespace("torch::jit::tracer") public static native void addInputs( - JitNode n, - String name, - @Const @ByRef DeviceOptional value); -@Namespace("torch::jit::tracer") public static native void addInputs( - JitNode n, - @Cast("const char*") BytePointer name, - @Const @ByRef LayoutOptional value); -@Namespace("torch::jit::tracer") public static native void addInputs( - JitNode n, - String name, - @Const @ByRef LayoutOptional value); -@Namespace("torch::jit::tracer") public static native void addInputs(JitNode n, @Cast("const char*") BytePointer name, @ByVal MemoryFormat value); -@Namespace("torch::jit::tracer") public static native void addInputs(JitNode n, String name, @ByVal MemoryFormat value); -@Namespace("torch::jit::tracer") public static native void addInputs( - JitNode n, - @Cast("const char*") BytePointer name, - @ByVal DimnameListOptional value); -@Namespace("torch::jit::tracer") public static native void addInputs( - JitNode n, - String name, - @ByVal DimnameListOptional value); -@Namespace("torch::jit::tracer") public static native void addInputs( - JitNode n, - @Cast("const char*") BytePointer name, - @Const @ByRef MemoryFormatOptional value); -@Namespace("torch::jit::tracer") public static native void addInputs( - JitNode n, - String name, - @Const @ByRef MemoryFormatOptional value); -@Namespace("torch::jit::tracer") public static native void addInputs( - JitNode n, - @Cast("const char*") BytePointer name, - @Const @ByRef GeneratorOptional value); -@Namespace("torch::jit::tracer") public static native void addInputs( - JitNode n, - String name, - @Const @ByRef GeneratorOptional value); -@Namespace("torch::jit::tracer") public static native void addInputs( - JitNode n, - @Cast("const char*") BytePointer name, - @Const @ByRef BoolVector value); -@Namespace("torch::jit::tracer") public static native void addInputs( - JitNode n, - String name, - @Const @ByRef BoolVector value); +// Parsed from torch/csrc/jit/runtime/variable_tensor_list.h -@Namespace("torch::jit::tracer") public static native void ensureUniqueIfOutOfPlaced( - @Cast("const char*") BytePointer name, - @Const @ByRef Tensor tensor); -@Namespace("torch::jit::tracer") public static native void ensureUniqueIfOutOfPlaced( - String name, - @Const @ByRef Tensor tensor); -@Namespace("torch::jit::tracer") public static native void ensureUniqueIfOutOfPlaced( - @Cast("const char*") BytePointer name, - @Const @ByRef TensorOptional tensor); -@Namespace("torch::jit::tracer") public static native void ensureUniqueIfOutOfPlaced( - String name, - @Const @ByRef TensorOptional tensor); -@Namespace("torch::jit::tracer") public static native void addOutput(JitNode node, @Const @ByRef Tensor tensor); -@Namespace("torch::jit::tracer") public static native void setOutput(Value value, @Const @ByRef Tensor output); -@Namespace("torch::jit::tracer") public static native void addOutput(JitNode node, @Cast({"", "std::vector"}) @StdMove TensorVector list); -@Namespace("torch::jit::tracer") public static native void addOutput( - JitNode node, - @Cast("const c10::intrusive_ptr*") @ByRef Pointer output); - -@Namespace("torch::jit::tracer") public static native @ByVal @Cast("torch::autograd::Variable*") Tensor getSizeOf( - @Cast("const torch::autograd::Variable*") @ByRef Tensor var, - @Cast("int64_t") long dim); +// #pragma once +// #include -@Namespace("torch::jit::tracer") public static native @ByVal @Cast("torch::autograd::Variable*") Tensor getNumelOf(@Cast("const torch::autograd::Variable*") @ByRef Tensor var); +// a wrapper to mark places where we expect all the at::Tensors to be +// variables - // namespace tracer // namespace jit // namespace torch -// Parsed from torch/csrc/jit/frontend/lexer.h +// Parsed from torch/csrc/jit/runtime/graph_executor.h // #pragma once -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #if C10_CLANG_HAS_WARNING("-Wshorten-64-to-32") -// #endif +// #include +// #include -// single character tokens are just the character itself '+' -// multi-character tokens need an entry here -// if the third entry is not the empty string, it is used -// in the lexer to match this token. +// #include +// #include +// #include +// #include +// #include -// These kinds are also used in Tree.h as the kind of the AST node. -// Some kinds TK_APPLY, TK_LIST are only used in the AST and are not seen in the -// lexer. -// #define TC_FORALL_TOKEN_KINDS(_) -// _(TK_EOF, "eof", "") -// _(TK_WHITESPACE, "whitespace", "") -// _(TK_WHITESPACE_EOF, "whitespace_eof", "") -// _(TK_NUMBER, "number", "") -// _(TK_NEWLINE, "newline", "") -// _(TK_INDENT, "indent", "") -// _(TK_DEDENT, "dedent", "") -// _(TK_DEF, "def", "def") -// _(TK_EQUIVALENT, "equivalent", "<=>") -// _(TK_IDENT, "ident", "") -// _(TK_STRING, "string", "") -// _(TK_STRINGLITERAL, "string_literal", "") -// _(TK_CONST, "const", "") -// _(TK_LIST, "list", "") -// _(TK_DICT, "dict", "") -// _(TK_OPTION, "option", "") -// _(TK_APPLY, "apply", "") -// _(TK_COMPREHENSION, "comprehension", "") -// _(TK_RANGE_CONSTRAINT, "range_constraint", "") -// _(TK_PARAM, "param", "") -// _(TK_INFERRED, "inferred", "") -// _(TK_ACCESS, "access", "") -// _(TK_ASSIGN, "assign", "") -// _(TK_AUG_ASSIGN, "aug_assign", "") -// _(TK_ATTRIBUTE, "attribute", "") -// _(TK_IF, "if", "if") -// _(TK_ELSE, "else", "else") -// _(TK_ELIF, "elif", "elif") -// _(TK_WHILE, "while", "while") -// _(TK_EXPR_STMT, "expression statement", "") -// _(TK_RETURN, "return", "return") -// _(TK_IS, "is", "is") -// _(TK_ISNOT, "is not", "is not") -// _(TK_NE, "ne", "!=") -// _(TK_EQ, "eq", "==") -// _(TK_LE, "le", "<=") -// _(TK_GE, "ge", ">=") -// _(TK_FLOOR_DIV, "floordiv", "//") -// _(TK_IF_EXPR, "if", "") -// _(TK_TRUE, "True", "True") -// _(TK_FALSE, "False", "False") -// _(TK_NONE, "None", "None") -// _(TK_AND, "and", "and") -// _(TK_OR, "or", "or") -// _(TK_NOT, "not", "not") -// _(TK_LSHIFT, "<<", "<<") -// _(TK_RSHIFT, ">>", ">>") -// _(TK_CAST, "cast", "") -// _(TK_PLUS_EQ, "+=", "+=") -// _(TK_MINUS_EQ, "-=", "-=") -// _(TK_TIMES_EQ, "*=", "*=") -// _(TK_DIV_EQ, "/=", "/=") -// _(TK_MOD_EQ, "%=", "%=") -// _(TK_BIT_OR_EQ, "|=", "|=") -// _(TK_BIT_AND_EQ, "&=", "&=") -// _(TK_BIT_XOR_EQ, "^=", "^=") -// _(TK_LSHIFT_EQ, "<<=", "<<=") -// _(TK_RSHIFT_EQ, ">>=", ">>=") -// _(TK_POW_EQ, "**=", "**=") -// _(TK_GLOBAL, "global", "global") -// _(TK_BUILT_IN, "built-in", "") -// _(TK_SUBSCRIPT, "subscript", "") -// _(TK_VAR, "variable", "") -// _(TK_NOTHING, "nothing", "") -// _(TK_DICT_LITERAL, "dict-literal", "") -// _(TK_LIST_LITERAL, "list-literal", "") -// _(TK_TUPLE_LITERAL, "tuple-literal", "") -// _(TK_FOR, "for", "for") -// _(TK_IN, "in", "in") -// _(TK_NOTIN, "not in", "not in") -// _(TK_STARRED, "starred", "") -// _(TK_UNARY_MINUS, "unary minus", "") -// _(TK_POW, "pow operator", "**") -// _(TK_ARROW, "arrow", "->") -// _(TK_DECL, "decl", "") -// _(TK_SLICE_EXPR, "slice expr", "") -// _(TK_TYPE_COMMENT, "type comment", "# type:") -// _(TK_RAISE, "raise", "raise") -// _(TK_ASSERT, "assert", "assert") -// _(TK_DOTS, "dots", "...") -// _(TK_LIST_COMP, "list comprehension", "") -// _(TK_DICT_COMP, "dict comprehension", "") -// _(TK_BREAK, "break", "break") -// _(TK_CONTINUE, "continue", "continue") -// _(TK_DELETE, "del", "del") -// _(TK_PASS, "pass", "pass") -// _(TK_CLASS_DEF, "class", "class") -// _(TK_IMPORT, "import", "import") -// _(TK_WITH, "with", "with") -// _(TK_WITH_ITEM, "withitem", "") -// _(TK_AS, "as", "as") -// _(TK_PROP, "property", "") -// _(TK_ELLIPSIS, "Ellipsis", "Ellipsis") -// _(TK_NONE_TYPE, "NoneType", "NoneType") -@Namespace("torch::jit") public enum TokenKind { - // we use characters to represent themselves so skip all valid characters - // before - // assigning enum values to multi-char tokens. - TK_DUMMY_START(256), - TK_EOF(257), - TK_WHITESPACE(258), - TK_WHITESPACE_EOF(259), - TK_NUMBER(260), - TK_NEWLINE(261), - TK_INDENT(262), - TK_DEDENT(263), - TK_DEF(264), - TK_EQUIVALENT(265), - TK_IDENT(266), - TK_STRING(267), - TK_STRINGLITERAL(268), - TK_CONST(269), - TK_LIST(270), - TK_DICT(271), - TK_OPTION(272), - TK_APPLY(273), - TK_COMPREHENSION(274), - TK_RANGE_CONSTRAINT(275), - TK_PARAM(276), - TK_INFERRED(277), - TK_ACCESS(278), - TK_ASSIGN(279), - TK_AUG_ASSIGN(280), - TK_ATTRIBUTE(281), - TK_IF(282), - TK_ELSE(283), - TK_ELIF(284), - TK_WHILE(285), - TK_EXPR_STMT(286), - TK_RETURN(287), - TK_IS(288), - TK_ISNOT(289), - TK_NE(290), - TK_EQ(291), - TK_LE(292), - TK_GE(293), - TK_FLOOR_DIV(294), - TK_IF_EXPR(295), - TK_TRUE(296), - TK_FALSE(297), - TK_NONE(298), - TK_AND(299), - TK_OR(300), - TK_NOT(301), - TK_LSHIFT(302), - TK_RSHIFT(303), - TK_CAST(304), - TK_PLUS_EQ(305), - TK_MINUS_EQ(306), - TK_TIMES_EQ(307), - TK_DIV_EQ(308), - TK_MOD_EQ(309), - TK_BIT_OR_EQ(310), - TK_BIT_AND_EQ(311), - TK_BIT_XOR_EQ(312), - TK_LSHIFT_EQ(313), - TK_RSHIFT_EQ(314), - TK_POW_EQ(315), - TK_GLOBAL(316), - TK_BUILT_IN(317), - TK_SUBSCRIPT(318), - TK_VAR(319), - TK_NOTHING(320), - TK_DICT_LITERAL(321), - TK_LIST_LITERAL(322), - TK_TUPLE_LITERAL(323), - TK_FOR(324), - TK_IN(325), - TK_NOTIN(326), - TK_STARRED(327), - TK_UNARY_MINUS(328), - TK_POW(329), - TK_ARROW(330), - TK_DECL(331), - TK_SLICE_EXPR(332), - TK_TYPE_COMMENT(333), - TK_RAISE(334), - TK_ASSERT(335), - TK_DOTS(336), - TK_LIST_COMP(337), - TK_DICT_COMP(338), - TK_BREAK(339), - TK_CONTINUE(340), - TK_DELETE(341), - TK_PASS(342), - TK_CLASS_DEF(343), - TK_IMPORT(344), - TK_WITH(345), - TK_WITH_ITEM(346), - TK_AS(347), - TK_PROP(348), - TK_ELLIPSIS(349), - TK_NONE_TYPE(350); +@Namespace("torch::jit") public enum ExecutorExecutionMode { + SIMPLE(0), + PROFILING(1); public final int value; - private TokenKind(int v) { this.value = v; } - private TokenKind(TokenKind e) { this.value = e.value; } - public TokenKind intern() { for (TokenKind e : values()) if (e.value == value) return e; return this; } + private ExecutorExecutionMode(int v) { this.value = v; } + private ExecutorExecutionMode(ExecutorExecutionMode e) { this.value = e.value; } + public ExecutorExecutionMode intern() { for (ExecutorExecutionMode e : values()) if (e.value == value) return e; return this; } @Override public String toString() { return intern().name(); } } +// Targeting ../ExecutionPlan.java -@Namespace("torch::jit") public static native @StdString BytePointer kindToString(int kind); -@Namespace("torch::jit") public static native int stringToKind(@StdString BytePointer str); -@Namespace("torch::jit") public static native int stringToKind(@StdString String str); -// nested hash tables that indicate char-by-char what is a valid token. -// Targeting ../TokenTrie.java +// Targeting ../GraphExecutorState.java + + +// Targeting ../EnableProfilingGuard.java + + +// Targeting ../GraphExecutorImplBase.java + +// Targeting ../GraphExecutor.java -// Targeting ../SharedParserData.java +@Namespace("torch::jit") public static native JitNode replaceBlockWithFallbackGraph( + Block b, + @ByVal ValueArrayRef inputs); -@Namespace("torch::jit") public static native @ByRef SharedParserData sharedParserData(); -// Targeting ../Token.java +// These passes need to run before it is valid to pass to the interpreter +// regardless of whether sizes have been specialized or not. +@Namespace("torch::jit") public static native void runRequiredPasses(@Const @SharedPtr("torch::jit::Graph") @ByRef Graph g); +@Namespace("torch::jit") public static native void debugSetFusionGroupInlining(@Cast("bool") boolean state); +@Namespace("torch::jit") public static native @Cast("bool") boolean getFusionGroupInlining(); -// Targeting ../Lexer.java +@Namespace("torch::jit") public static native void debugSetAutodiffSubgraphInlining(@Cast("bool") boolean state); +@Namespace("torch::jit") public static native @SharedPtr("torch::jit::Graph") @ByVal Graph lastExecutedOptimizedGraph(); +@Namespace("torch::jit") public static native @Cast("std::atomic*") @ByRef BoolPointer getProfilingMode(); +@Namespace("torch::jit") public static native @Cast("std::atomic*") @ByRef BoolPointer getExecutorMode(); +@Namespace("torch::jit") public static native @Cast("std::atomic*") @ByRef LongPointer getNumProfiledRuns(); +@Namespace("torch::jit") public static native @Cast("size_t") long getBailoutDepth(); +@Namespace("torch::jit") public static native @Cast("bool") boolean IsNewExecutorEnabled(); +// Targeting ../GraphOptimizerEnabledGuard.java - // namespace jit - // namespace torch -// Parsed from torch/csrc/jit/frontend/strtod.h -// #pragma once -// #include -@Namespace("torch::jit") public static native double strtod_c(@Cast("const char*") BytePointer nptr, @Cast("char**") PointerPointer endptr); -@Namespace("torch::jit") public static native double strtod_c(@Cast("const char*") BytePointer nptr, @Cast("char**") @ByPtrPtr BytePointer endptr); -@Namespace("torch::jit") public static native double strtod_c(String nptr, @Cast("char**") @ByPtrPtr ByteBuffer endptr); -@Namespace("torch::jit") public static native double strtod_c(@Cast("const char*") BytePointer nptr, @Cast("char**") @ByPtrPtr byte[] endptr); -@Namespace("torch::jit") public static native double strtod_c(String nptr, @Cast("char**") @ByPtrPtr BytePointer endptr); -@Namespace("torch::jit") public static native double strtod_c(@Cast("const char*") BytePointer nptr, @Cast("char**") @ByPtrPtr ByteBuffer endptr); -@Namespace("torch::jit") public static native double strtod_c(String nptr, @Cast("char**") @ByPtrPtr byte[] endptr); -@Namespace("torch::jit") public static native float strtof_c(@Cast("const char*") BytePointer nptr, @Cast("char**") PointerPointer endptr); -@Namespace("torch::jit") public static native float strtof_c(@Cast("const char*") BytePointer nptr, @Cast("char**") @ByPtrPtr BytePointer endptr); -@Namespace("torch::jit") public static native float strtof_c(String nptr, @Cast("char**") @ByPtrPtr ByteBuffer endptr); -@Namespace("torch::jit") public static native float strtof_c(@Cast("const char*") BytePointer nptr, @Cast("char**") @ByPtrPtr byte[] endptr); -@Namespace("torch::jit") public static native float strtof_c(String nptr, @Cast("char**") @ByPtrPtr BytePointer endptr); -@Namespace("torch::jit") public static native float strtof_c(@Cast("const char*") BytePointer nptr, @Cast("char**") @ByPtrPtr ByteBuffer endptr); -@Namespace("torch::jit") public static native float strtof_c(String nptr, @Cast("char**") @ByPtrPtr byte[] endptr); +// for debugging information we expose a way to get the last actually +// run graph. Previous approaches allowed querying the GraphExecutor +// for what graph it would run in certain circumstances (graphFor), but +// this is fragile because we sometimes change how these decisions are made. +// This interface still allows our tests to look at optimized graphs, but +// with less plumbing. + // namespace detail // namespace jit // namespace torch -// Parsed from torch/csrc/jit/frontend/tree.h +// Parsed from torch/csrc/jit/api/function_impl.h // #pragma once -// #include -// #include -// #include -// #include - -// #include -// #include -// #include - -// Trees are used to represent all forms of TC IR, pre- and post-typechecking. -// Rather than have a full class hierarchy for all TC statements, trees are a -// slight variation of Lisp s-expressions. For instance, the expression a*b+1 -// is represented as: -// (+ (* (ident a) (ident b)) (const 1)) -// Atoms like 'a', 'b', and '1' are represented by subclasses of Tree which -// define stringValue(). Everything else is a Compound object, which has a -// 'kind' that is a token from lexer.h's TokenKind enum. Single-character -// operators like '+' are represented using the character itself (so, add.kind() -// would be '+'). Each Compound object also contains a list of subtrees and is -// associated with a SourceRange for error reporting. -// Memory management of trees is done using intrusive_ptr. -// Targeting ../Tree.java - +// #include +// #include +// #include +// #include +// Targeting ../GraphFunction.java -// Targeting ../JitString.java +// Short hands for dynamic_cast. +@Namespace("torch::jit") public static native @NoException(true) GraphFunction tryToGraphFunction(@ByRef Function arg0); +@Namespace("torch::jit") public static native @ByRef GraphFunction toGraphFunction(@ByRef Function arg0); -@Namespace("torch::jit") public static native @ByVal SourceRange mergeRanges(@ByVal SourceRange c, @Cast("const torch::jit::TreeList*") @ByRef Pointer others); -// Targeting ../Compound.java + // namespace jit + // namespace torch -// Targeting ../pretty_tree.java +// Parsed from torch/csrc/jit/api/method.h +// #pragma once +// #include +// #include +// #include +// #include +// #include +// Targeting ../Method.java -@Namespace("torch::jit") public static native @Cast("std::ostream*") @ByRef @Name("operator <<") Pointer shiftLeft(@Cast("std::ostream*") @ByRef Pointer out, @ByVal pretty_tree t_); -@Namespace("torch::jit") public static native @Cast("std::ostream*") @ByRef @Name("operator <<") Pointer shiftLeft(@Cast("std::ostream*") @ByRef Pointer out, @Cast("const torch::jit::TreeRef*") @ByRef Pointer t); +// We once had a `script::` namespace that was deleted. This is for backcompat +// of the public API; new code should not use this type alias. + // namespace script // namespace jit // namespace torch -// Parsed from torch/csrc/jit/frontend/error_report.h +// Parsed from torch/csrc/jit/api/object.h // #pragma once +// #include +// #include // #include -// #include -// Targeting ../Call.java - +// #include -// Targeting ../ErrorReport.java +// #include +// Throw this in C++ land if `attr` fails. This will be converted to a Python +// AttributeError by the Python binding code +// Targeting ../JitObject.java +// We once had a `script::` namespace that was deleted. This is for backcompat +// of the public API; new code should not use this type alias. + // namespace script // namespace jit // namespace torch -// Parsed from torch/csrc/jit/frontend/tree_views.h +// Parsed from torch/csrc/api/include/torch/ordered_dict.h // #pragma once -// #include -// #include -// #include -// #include -// #include -// #include -// #include +// #include +// #include // #include +// #include // #include -// Targeting ../TreeView.java +// #include +// Targeting ../StringTensorDict.java -// Targeting ../DefMaybe.java +// Targeting ../StringModuleDict.java -// Targeting ../ExprMaybe.java +// Targeting ../StringAnyModuleDict.java -// Targeting ../VarMaybe.java +// Targeting ../StringSharedModuleDict.java -// Targeting ../Ident.java +// Targeting ../StringTensorDictItem.java -// Targeting ../Stmt.java +// Targeting ../StringModuleDictItem.java -// Targeting ../Expr.java +// Targeting ../StringAnyModuleDictItem.java -// Targeting ../Attribute.java +// Targeting ../StringSharedModuleDictItem.java -// Targeting ../Param.java +// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ OrderedDict ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -// Targeting ../Decl.java -// Targeting ../Def.java -// Targeting ../Property.java -// Targeting ../ClassDef.java -// Targeting ../If.java -// Targeting ../While.java -// Targeting ../For.java -// Targeting ../ListComp.java -// Targeting ../DictComp.java -// Targeting ../Global.java -// Targeting ../AugAssignKind.java -// Targeting ../AugAssign.java -// Targeting ../Assign.java -// Targeting ../Return.java -// Targeting ../Raise.java -// Targeting ../Assert.java -// Targeting ../Pass.java -// Targeting ../Dots.java -// Targeting ../Break.java -// Targeting ../Continue.java -// Targeting ../ExprStmt.java -// Targeting ../BinOp.java -// Targeting ../UnaryOp.java -// Targeting ../ConstExpr.java -// Targeting ../StringLiteral.java -// Targeting ../Apply.java -// Targeting ../Select.java -// Targeting ../SliceExpr.java -// Targeting ../Subscript.java -// Targeting ../Var.java -// Targeting ../WithItem.java -// Targeting ../With.java -// Targeting ../TernaryIf.java + // namespace torch -// Targeting ../ListLiteral.java +// Parsed from torch/csrc/jit/frontend/name_mangler.h +// #pragma once -// Targeting ../TupleLiteral.java +// #include +// #include +// Targeting ../NameMangler.java -// Targeting ../DictLiteral.java + // namespace jit + // namespace torch -// Targeting ../Starred.java +// Parsed from torch/csrc/jit/api/compilation_unit.h + +// #pragma once +// #include +// #include +// #include +// #include +// #include +// #include +// #include + +// #include +// #include + +// #include +// #include +// #include +// #include + +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// Targeting ../Self.java -// Targeting ../Delete.java + +// Targeting ../CompilationUnit.java +// An owning pointer to a Function. Just a pair of a raw Function ptr and it's +// owning CU. We need this because pybind requires a ref-counted way to refer to +// Functions. +// We once had a `script::` namespace that was deleted. This is for backcompat +// of the public API; new code should not use this type alias. + // namespace script // namespace jit // namespace torch - // namespace std - -// Parsed from torch/csrc/jit/ir/attributes.h +// Parsed from torch/csrc/jit/api/module.h // #pragma once -// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include + +// #include +// #include +// #include +// #include + +// #include +// #include +// #include +// #include +// #include + +// #include +// #include +// #include +// #include // #include +// #include +// #include +// #include // #include -// #include -// #include +// This file contains classes which assist in desugaring Python style +// modules and their methods into flattened graphs which don't have any +// function calls. +// Map which stores filename to content. +// Targeting ../NamedJitModule.java -// #include -@Namespace("torch::jit") @MemberGetter public static native int max_tensor_display_size(); +// Targeting ../NamedTensor.java -@Name("torch::jit::AttributeKind") public enum JitAttributeKind { - f(0), - fs(1), - c(2), - cs(3), - i(4), - is(5), - s(6), - ss(7), - t(8), - ts(9), - g(10), - gs(11), - ty(12), - tys(13), - ival(14); + +// Targeting ../NamedIValue.java + + + // namespace detail +// Targeting ../JitModule.java + + + +// C++ equivalent api of `torch.jit.freeze`. See documentation there for +// details. +@Namespace("torch::jit") public static native @ByVal JitModule freeze( + @Const @ByRef JitModule module, + @ByVal(nullValue = "c10::optional >(c10::nullopt)") StringVectorOptional preserved_attrs, + @Cast("bool") boolean optimize_numerics/*=true*/); +@Namespace("torch::jit") public static native @ByVal JitModule freeze( + @Const @ByRef JitModule module); + +// C++ equivalent api of `torch.jit.optimize_for_inference`. See documentation +// there for details. +@Namespace("torch::jit") public static native @ByVal JitModule optimize_for_inference( + @ByRef JitModule module, + @Const @ByRef(nullValue = "std::vector{}") StringVector other_methods); +@Namespace("torch::jit") public static native @ByVal JitModule optimize_for_inference( + @ByRef JitModule module); + +@Namespace("torch::jit") public enum FusionBehavior { STATIC(0), DYNAMIC(1); public final int value; - private JitAttributeKind(int v) { this.value = v; } - private JitAttributeKind(JitAttributeKind e) { this.value = e.value; } - public JitAttributeKind intern() { for (JitAttributeKind e : values()) if (e.value == value) return e; return this; } + private FusionBehavior(int v) { this.value = v; } + private FusionBehavior(FusionBehavior e) { this.value = e.value; } + public FusionBehavior intern() { for (FusionBehavior e : values()) if (e.value == value) return e; return this; } @Override public String toString() { return intern().name(); } } -@Namespace("torch::jit") public static native @Cast("const char*") BytePointer toString(JitAttributeKind kind); -// Targeting ../AttributeValue.java +// clang-format off +/* +Sets the type and number of specializations that can occur during fusion. +Usage: provide a list of pairs (type, depth) where type is one of STATIC or DYNAMIC +and depth is an integer. -// Targeting ../GraphAttr.java +Behavior - static vs dynamic: + In STATIC fusion, fused ops are compiled to have fixed input shapes. The shape is determined + based on some initial profiling runs. + In DYNAMIC fusion, fused ops are compiled to have variable input shapes, so that multiple + shapes are possible. +In both cases, we also recompile on new striding behavior, device, or dtype. -// Targeting ../GraphsAttr.java +Behavior - fallback functions & depth: + When an input doesn't match the format required by the specialized compiled op, it will run + a fallback function. Fallback functions are recursively be compiled and specialized based + on the observed tensor shapes. Since compilation can be slow, the "depth" parameter is provided to + limit the number of specializations that can be compiled, before giving up on recompiling and + falling back to a completely un-fused, un-specialized implementation. +The list of (type, depth) pairs controls the type of specializations and the number of +specializations. For example: [(STATIC, 2), (DYNAMIC, 2)] indicates that the first +two specializations will use static fusions, the following two specializations will use +dynamic fusion, and any inputs that satisfy none of the 4 options will run an +unfused implementation. -// Targeting ../IRAttributeError.java +NB: in the future, if more as more fusion backends are added there may be more granular +apis for specific fusers. +*/ +// clang-format on +@Namespace("torch::jit") public static native @ByVal FusionStrategy getFusionStrategy(); +// returns previous strategy +@Namespace("torch::jit") public static native @ByVal FusionStrategy setFusionStrategy(@ByRef FusionStrategy fusion_strategy); +// Targeting ../SlotCursor.java - // namespace jit - // namespace torch -// Parsed from torch/csrc/jit/ir/constants.h +// Targeting ../module_iterator.java -// #pragma once -// #include -// #include -// #include -// #include -// #include -// helpers for handling constants in the IR -// - create constant nodes from ints, floats, complex, intlist, Tensors, and -// other types -// - implement primitive constant ops. +// Targeting ../named_module_iterator.java -// thrown when insertConstant cannot encode the IValue into a graph -@Namespace("torch::jit") public static native Value insertConstant( - @ByRef Graph g, - @Const @ByRef IValue val, - @ByVal(nullValue = "c10::optional(c10::nullopt)") SourceRangeOptional loc, - @ByVal(nullValue = "c10::optional(c10::nullopt)") @Cast("c10::optional*") ScopeOptional scope); -@Namespace("torch::jit") public static native Value insertConstant( - @ByRef Graph g, - @Const @ByRef IValue val); +// Targeting ../parameter_iterator.java -// note: prefer g.insertConsant(val, loc) which does exactly the same thing -// this function is only declared/defined here because its implementation is -// closely related to the implementation of prim::Constant that is also in -// constants.cpp. -// -// returns a c10::nullopt if the IValue kind cannot be inserted as a constant -@Namespace("torch::jit") public static native @ByVal ValueOptional tryInsertConstant( - @ByRef Graph g, - @Const @ByRef IValue val, - @ByVal(nullValue = "c10::optional(c10::nullopt)") SourceRangeOptional loc, - @ByVal(nullValue = "c10::optional(c10::nullopt)") @Cast("c10::optional*") ScopeOptional scope); -@Namespace("torch::jit") public static native @ByVal ValueOptional tryInsertConstant( - @ByRef Graph g, - @Const @ByRef IValue val); -//////////////////////////////////////////////////////////////////////////////// -// Helper for retrieving constants -//////////////////////////////////////////////////////////////////////////////// +// Targeting ../named_parameter_iterator.java -// attempt to convert a (possibly constant) Value* into an interpreter value -// (IValue). returns c10::nullopt if the Value* was not constant -@Namespace("torch::jit") public static native @ByVal IValueOptional toIValue(@Const Value v); -// if a value is a constant then try to turn into type T using the -// same rules as the interpreter +// Targeting ../attribute_iterator.java + + +// Targeting ../named_attribute_iterator.java + + +// Targeting ../buffer_iterator.java + + +// Targeting ../named_buffer_iterator.java + + +// Targeting ../module_list.java + + +// Targeting ../named_module_list.java + + +// Targeting ../parameter_list.java + + +// Targeting ../named_parameter_list.java + + +// Targeting ../attribute_list.java + + +// Targeting ../named_attribute_list.java + + +// Targeting ../buffer_list.java + + +// Targeting ../named_buffer_list.java + + +// Targeting ../ModulePolicy.java + + +// Targeting ../ParameterPolicy.java + + +// Targeting ../BufferPolicy.java + + +// Targeting ../AttributePolicy.java + + +// Targeting ../NamedJitModulePolicy.java + + +// Targeting ../NamedTensorPolicy.java + + +// Targeting ../NamedIValuePolicy.java + + + + // namespace detail + +@Namespace("torch::jit") public static native @Cast("bool*") @ByRef BoolPointer getInlineEverythingMode(); +// We once had a `script::` namespace that was deleted. This is for backcompat +// of the public API; new code should not use this type alias. + // namespace script + // namespace jit // namespace torch -// Parsed from torch/csrc/jit/ir/graph_node_list.h +// Parsed from torch/serialize/input-archive.h // #pragma once -// #include +// #include +// #include +// #include +// #include +// #include -// Intrusive doubly linked lists with sane reverse iterators. -// The header file is named generic_graph_node_list.h because it is ONLY -// used for Graph's Node lists, and if you want to use it for other -// things, you will have to do some refactoring. -// -// At the moment, the templated type T must support a few operations: -// -// - It must have a field: T* next_in_graph[2] = { nullptr, nullptr }; -// which are used for the intrusive linked list pointers. -// -// - It must have a method 'destroy()', which removes T from the -// list and frees a T. -// -// In practice, we are only using it with Node and const Node. 'destroy()' -// needs to be renegotiated if you want to use this somewhere else. -// -// Regardless of the iteration direction, iterators always physically point -// to the element they logically point to, rather than -// the off-by-one behavior for all standard library reverse iterators like -// std::list. +// #include +// #include +// #include +// #include + // namespace at + // namespace jit + // namespace torch +// Targeting ../InputArchive.java -// The list is includes two sentinel nodes, one at the beginning and one at the -// end with a circular link between them. It is an error to insert nodes after -// the end sentinel node but before the beginning node: -// Visualization showing only the next() links: -// HEAD -> first -> second -> ... -> last -> TAIL -// ^------------------------------------------ + // namespace serialize + // namespace torch -// Visualization showing only the prev() links: -// HEAD <- first <- second <- ... <- last <- TAIL -// ------------------------------------------^ -@Namespace("torch::jit") @MemberGetter public static native int kNextDirection(); -public static final int kNextDirection = kNextDirection(); -@Namespace("torch::jit") @MemberGetter public static native int kPrevDirection(); -public static final int kPrevDirection = kPrevDirection(); -// Targeting ../graph_node_list_iterator.java +// Parsed from torch/serialize/output-archive.h + +// #pragma once + +// #include +// #include + +// #include +// #include +// #include +// #include + // namespace at + // namespace jit + +// Targeting ../OutputArchive.java -// Targeting ../graph_node_list.java + // namespace serialize + // namespace torch +// Parsed from torch/serialize/archive.h - // namespace jit - // namespace torch +// #pragma once - // namespace std +// #include +// #include -// Parsed from torch/csrc/jit/ir/named_value.h +// Parsed from torch/data/samplers/serialize.h // #pragma once -// #include -// #include -// #include -// #include -// Targeting ../NamedValue.java - +// #include +// #include +/** Serializes a {@code Sampler} into an {@code OutputArchive}. */ - // namespace jit +/** Deserializes a {@code Sampler} from an {@code InputArchive}. */ + // namespace samplers + // namespace data // namespace torch -// Parsed from torch/csrc/jit/ir/scope.h +// Parsed from torch/data/samplers/stream.h // #pragma once -// #include -// #include -// #include -// #include + // #include -// #include -// #include -@Namespace("torch::jit") @MemberGetter public static native @Cast("const size_t") long kModuleInstanceInfo(); +// #include +// #include +// #include - // namespace utils +// #include + // namespace serialize + // namespace torch +// Targeting ../BatchSize.java -// Scope is a node of a trie that represents the tree of nested scopes. -// Individual scopes are pushed and popped from Graph, which holds a -// pointer to the current scope. Each Node in Graph holds a pointer -// to the scope that was current when the node was created. -// The trie never needs to shrink, it only grows until it is disposed -// of when Graph is deallocated. Hence, pointers to scopes held by nodes -// will always be valid as long as Graph is alive. -// Targeting ../Scope.java +// Targeting ../StreamSampler.java -// Targeting ../ModuleInstanceInfo.java + // namespace samplers + // namespace data + // namespace torch -/** - * InlinedCallStack is an element in a list representing callstack of functions - * that have been inlined. - * - * Each such element holds info about the current callsite (Function and - * SourceRange) and a pointer to the next element in the list. The last element - * in the list represents the innermost function that was inlined. - * - * For instance, if a node has a callstack - * [foo, source_range1] -> [bar, source_range2] - * it means that this node was originally from function 'bar' that was called - * at 'source_range2' in function 'foo' that was called in the current function - * at 'source_range1'. - * - * If a node did not come from any inlined function, its callstack will be - * empty. - * - * The callstack lists only grow, we never remove elements from them, which - * allows us to reuse same elements in different lists. For instance, if we - * inline function 'bar' to 'foo' and then inline 'foo' to two functions 'ham' - * and 'baz', the callstacks would look like: - * - * [baz, source_range3] -- - * \ - * --> [foo, source_range1] -> [bar, source_range2] - * / - * [ham, source_range4] -- - */ -// Targeting ../InlinedCallStack.java +// Parsed from torch/data/samplers.h +// #pragma once -// {source range, node name, InlinedCallStack} -// We store node name because same debug infor will be used for -// profiling as well, so we need to know op names as well. -@Namespace("torch::jit") @MemberGetter public static native @Cast("const size_t") long kDebugInfoTupleSourceRangeIndex(); -@Namespace("torch::jit") @MemberGetter public static native @Cast("const size_t") long kDebugInfoTupleNodeNameIndex(); -@Namespace("torch::jit") @MemberGetter public static native @Cast("const size_t") long kDebugInfoTupleInlinedCSIndex(); - // namespace jit - // namespace torch +// #include +// #include +// #include +// #include +// #include +// #include +// #include -// Parsed from torch/csrc/jit/ir/ir.h +// Parsed from torch/serialize/tensor.h // #pragma once -// #include -// #include -// #include -// #include -// #include - -// #include -// #include -// #include +// #include +// #include +@Namespace("torch") public static native @ByRef @Name("operator <<") OutputArchive shiftLeft( + @ByRef OutputArchive archive, + @Const @ByRef Tensor tensor); -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include +@Namespace("torch") public static native @ByRef @Name("operator >>") InputArchive shiftRight( + @ByRef InputArchive archive, + @ByRef Tensor tensor); + // namespace torch -// #include -// #include -// #include -// #include -// Forward declare, the real meat is in python_ir.cpp -@Namespace("torch::jit::utils") public static native @StdString BytePointer getNodesModuleHierarchy(@Const @ByRef JitNode n); +// Parsed from torch/serialize.h -// Targeting ../AliasDb.java +// #pragma once +// #include +// #include +// #include +// #include +// #include -// #define C10_USING(T) using ::c10::T; -// #undef C10_USING +/** Serializes the given {@code value}. + * There must be an overload of {@code operator<<} between {@code serialize::OutputArchive} + * and {@code Value} for this method to be well-formed. Currently, such an overload + * is provided for (subclasses of): + * + * - {@code torch::nn::Module}, + * - {@code torch::optim::Optimizer} + * - {@code torch::Tensor} + * + * To perform the serialization, a {@code serialize::OutputArchive} is constructed, + * and all arguments after the {@code value} are forwarded to its {@code save_to} method. + * For example, you can pass a filename, or an {@code ostream}. + * + * \rst + * .. code-block:: cpp + * + * torch::nn::Linear model(3, 4); + * torch::save(model, "model.pt"); + * + * torch::optim::SGD sgd(/*lr=* /0.9); + * std::ostringstream stream; + * // Note that the same stream cannot be used in multiple torch::save(...) + * // invocations, otherwise the header will be corrupted. + * torch::save(sgd, stream); + * + * auto tensor = torch::ones({3, 4}); + * torch::save(tensor, "my_tensor.pt"); + * \endrst */ -// #define C10_USING(T) using ::c10::T##Ptr; -// #undef C10_USING +/** Serializes the given {@code tensor_vec} of type {@code std::vector}. + * + * To perform the serialization, a {@code serialize::OutputArchive} is constructed, + * and all arguments after the {@code tensor_vec} are forwarded to its {@code save_to} + * method. For example, you can pass a filename, or an {@code ostream}. + * + * \rst + * .. code-block:: cpp + * + * std::vector tensor_vec = { torch::randn({1, 2}), + * torch::randn({3, 4}) }; torch::save(tensor_vec, "my_tensor_vec.pt"); + * + * std::vector tensor_vec = { torch::randn({5, 6}), + * torch::randn({7, 8}) }; std::ostringstream stream; + * // Note that the same stream cannot be used in multiple torch::save(...) + * // invocations, otherwise the header will be corrupted. + * torch::save(tensor_vec, stream); + * \endrst */ +@Namespace("torch") public static native @Cast("char*") @StdVector BytePointer pickle_save(@Const @ByRef IValue ivalue); +/// +/// +/// +/// +/// +/// +@Namespace("torch") public static native @ByVal IValue pickle_load(@Cast("char*") @StdVector BytePointer data); +@Namespace("torch") public static native @ByVal IValue pickle_load(@Cast("char*") @StdVector ByteBuffer data); +@Namespace("torch") public static native @ByVal IValue pickle_load(@Cast("char*") @StdVector byte[] data); -// #if !defined(USE_ROCM) -// #endif +/** Deserializes the given {@code value}. + * There must be an overload of {@code operator>>} between {@code serialize::InputArchive} + * and {@code Value} for this method to be well-formed. Currently, such an overload + * is provided for (subclasses of): + * + * - {@code torch::nn::Module}, + * - {@code torch::optim::Optimizer} + * - {@code torch::Tensor} + * + * To perform the serialization, a {@code serialize::InputArchive} is constructed, + * and all arguments after the {@code value} are forwarded to its {@code load_from} method. + * For example, you can pass a filename, or an {@code istream}. + * + * \rst + * .. code-block:: cpp + * + * torch::nn::Linear model(3, 4); + * torch::load(model, "model.pt"); + * + * torch::optim::SGD sgd(/*lr=* /0.9); + * std::istringstream stream("..."); + * torch::load(sgd, stream); + * + * auto tensor = torch::ones({3, 4}); + * torch::load(tensor, "my_tensor.pt"); + * \endrst */ -// Targeting ../MatchedSchema.java +/** Deserializes the given {@code tensor_vec} of type {@code std::vector}. + * + * To perform the serialization, a {@code serialize::InputArchive} is constructed, + * and all arguments after the {@code value} are forwarded to its {@code load_from} method. + * For example, you can pass a filename, or an {@code istream}. + * + * \rst + * .. code-block:: cpp + * + * std::vector tensor_vec; + * torch::load(tensor_vec, "my_tensor_vec.pt"); + * + * std::vector tensor_vec; + * std::istringstream stream("..."); + * torch::load(tensor_vec, stream); + * \endrst */ + // namespace torch +// Parsed from torch/data/datasets/chunk.h -// A Graph represents one "function" of computation. -// It uses a simple ownership model where the graph owns all the nodes inside -// it. All references inside the graph are raw pointers. Destroying the Graph -// will invalidate any pointers to nodes in the graph. +// #pragma once -// Node is the base class of the IR graph. It represents one computation -// and dependencies on a list of Values. The "prim-ops", so to speak. +// #include +// #include +// #include +// #include +// #include +// #include +// #include -// A Value represents an input or output to node that is either a -// Tensor or an opaque Handle object, as determined by type(). +// #include +// Targeting ../ChunkDataReader.java -@Namespace("torch::jit") public static native @Cast("std::ostream*") @ByRef @Name("operator <<") Pointer shiftLeft(@Cast("std::ostream*") @ByRef Pointer out, @Const @ByRef Graph g); -@Namespace("torch::jit") public static native @Cast("std::ostream*") @ByRef @Name("operator <<") Pointer shiftLeft(@Cast("std::ostream*") @ByRef Pointer out, @Const @ByRef JitNode n); -// A list of nodes, with inputs and outputs -// Targeting ../Use.java +/** BatchDataBuffer manages a queue of UnwrappedBatchData. After a new chunk is + * loaded, BatchDataBuffer splits it into small batches and push them into the + * queue. When get_batch is called from data loader, it pops cached batches and + * return. If the cache is empty, it either waits to load more chunks or return + * null if all chunks are loaded. */ +// Targeting ../ChunkDatasetOptions.java -// Note [User node does not uniquely identify use] -// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -// A while back, we wrote some code manipulating uses that looked like this: -// -// for (auto& use : used_val->uses_) { -// if (use.user == this_node) { -// use.offset += 1; -// break; -// } -// } -// -// This code is trying to find a particular use (our node's use) to update it. -// However, it's wrong: there may be *multiple* uses of a value %x in a node, -// as might be the case in this IR: -// -// %y = Add %x %x -// -// In this case, there are two uses of %x whose user is the node 'Add %x %x'. -// So, "use induced by this node" is not a well-formed concept. -// -// If you are looking for "use induced by an input", it's best to use -// findUseForInput() to get it. +// Targeting ../ChunkDataset.java -// the list types are intentionally simple, but we type-def -// them here so if we need to change them, refactoring will be easier -// Targeting ../BlockWrap.java + // namespace datasets + // namespace data + // namespace torch -// Targeting ../JitNodeWrap.java +// Parsed from torch/data/datasets/map.h -// Targeting ../ValueWrap.java +// #pragma once +// #include +// #include -// Targeting ../Value.java +// #include +// #include +// #include +// #include -// Targeting ../JitNode.java +// Targeting ../ChunkMapDataset.java -// Targeting ../Block.java +// Targeting ../MNISTMapDataset.java -// Targeting ../Graph.java +/** Creates a {@code MapDataset} with the given dataset and transform. */ -// Targeting ../WithInsertPoint.java + // namespace datasets + // namespace data + // namespace torch -// Targeting ../WithCurrentScope.java +// Parsed from torch/data/datasets/mnist.h +// #pragma once +// #include +// #include +// #include -// NOLINTNEXTLINE(cppcoreguidelines-pro-type-member-init) +// #include +// #include +// #include +// Targeting ../MNIST.java + // namespace datasets + // namespace data + // namespace torch +// Parsed from torch/data/datasets/shared.h +// #pragma once +// #include -/************* All nodes not required to be defined before Graph **************/ -// Targeting ../ProfileIValueOp.java +// #include +// #include +// Targeting ../ChunkSharedBatchDataset.java -// Targeting ../PythonOp.java +/** Constructs a new {@code SharedBatchDataset} by creating a + * {@code shared_ptr}. All arguments are forwarded to + * {@code make_shared}. */ + // namespace datasets + // namespace data + // namespace torch -@Namespace("torch::jit") public static native void LintGraph(@Const @SharedPtr @ByRef Graph graph); +// Parsed from torch/data/datasets/tensor.h -@Namespace("torch::jit") public static native @ByVal ValueArrayRef createTupleUnpack(Value v); +// #pragma once -/** Insert graph \p CALLEE into graph \p G using \p INPUTS as input values. - * The insertion happens at the current insertion point. - * Optionally, one can also pass \p VALUE_MAP to get a map between \p CALLEE - * values and their cloned copies in \p G. - */ -@Namespace("torch::jit") public static native @ByVal ValueVector insertGraph( - @ByRef Graph g, - @ByRef Graph callee, - @ByVal ValueArrayRef inputs); -@Namespace("torch::jit") public static native @ByVal ValueVector insertGraph( - @ByRef Graph g, - @ByRef Graph callee, - @ByVal ValueArrayRef inputs, - @ByRef ValueValueMap value_map); +// #include +// #include +// #include -/** Insert function \p CALLEE after node \p TO_REPLACE, remove the node and - * replace all its uses with corresponding outputs of the inserted function. - * This asserts that the number of outputs of the original node and the - * graph are the same. - */ -@Namespace("torch::jit") public static native @ByVal ValueVector inlineCallTo( - JitNode to_replace, - GraphFunction callee, - @Cast("bool") boolean use_graph/*=true*/); -@Namespace("torch::jit") public static native @ByVal ValueVector inlineCallTo( - JitNode to_replace, - GraphFunction callee); +// #include +// #include -@Namespace("torch::jit") public static native @ByVal ValueVector inlineCallTo( - JitNode to_replace, - GraphFunction callee, - Graph callee_graph); +/** A dataset of tensors. + * Stores a single tensor internally, which is then indexed inside {@code get()}. */ -/** If there is only one value in \p OUTPUTS and its kind is Tuple, insert a - * tuple unpack node and return the resulting values. - */ -@Namespace("torch::jit") public static native @ByVal ValueVector unpackOutputs(@Const @ByRef ValueVector outputs); + // namespace datasets + // namespace data + // namespace torch -@Namespace("torch::jit") public static native @Cast("torch::jit::Node**") @StdVector PointerPointer findAllNodes(@ByRef Graph g, @ByVal Symbol kind, @Cast("bool") boolean recurse); -@Namespace("torch::jit") public static native @Cast("torch::jit::Node**") @StdVector PointerPointer findAllNodes(@ByRef Block b, @ByVal Symbol kind, @Cast("bool") boolean recurse); -@Namespace("torch::jit") public static native @Cast("torch::jit::Node**") @StdVector PointerPointer findAllNodes( - @ByVal BlockArrayRef a, - @ByVal Symbol kind, - @Cast("bool") boolean recurse); -// Targeting ../OperatorSet.java +// Parsed from torch/data/datasets.h +// #pragma once - // namespace jit - // namespace torch +// #include +// #include +// #include +// #include +// #include +// #include +// #include -// Parsed from torch/csrc/jit/ir/type_hashing.h +// Parsed from torch/data/transforms/base.h // #pragma once -// #include -// #include -// Targeting ../HashType.java - +// #include -// Targeting ../EqualType.java +// #include +// #include +// Targeting ../ExampleCollation.java - // namespace jit +/** A transformation of individual input examples to individual output examples. + * + * Just like a {@code Dataset} is a {@code BatchDataset}, a {@code Transform} is a + * {@code BatchTransform} that can operate on the level of individual examples rather + * than entire batches. The batch-level transform is implemented (by default) + * in terms of the example-level transform, though this can be customized. */ + // namespace transforms + // namespace data // namespace torch -// Parsed from torch/csrc/jit/passes/shape_analysis.h +// Parsed from torch/data/transforms/lambda.h // #pragma once -// #include -// #include -// #include -// Targeting ../propagation_error.java +// #include + +// #include +// #include +// #include +/** A {@code BatchTransform} that applies a user-provided functor to a batch. */ -// Targeting ../PropertyPropBase.java +// A `Transform` that applies a user-provided functor to individual examples. + // namespace transforms + // namespace data + // namespace torch -@Namespace("torch::jit") public static native void EraseShapeInformation(@Const @SharedPtr @ByRef Graph graph); -@Namespace("torch::jit") public static native void PropagateInputShapes(@Const @SharedPtr @ByRef Graph graph); +// Parsed from torch/data/transforms/collate.h -@Namespace("torch::jit") public static native @Cast("bool") boolean mergeTypes( - @ByVal ValueArrayRef lhs, - @ByVal ValueArrayRef rhs, - @ByVal ValueArrayRef outputs); +// #pragma once - // namespace jit - // namespace torch +// #include +// #include +// #include -// Parsed from torch/csrc/jit/python/update_graph_executor_opt.h +/** A {@code Collation} is a transform that reduces a batch into a single value. + * The result is a {@code BatchDataset} that has the type of the single value as its + * {@code BatchType}. */ -// #pragma once -// #include -@Namespace("torch::jit") public static native void setGraphExecutorOptimize(@Cast("bool") boolean o); -@Namespace("torch::jit") public static native @Cast("bool") boolean getGraphExecutorOptimize(); - // namespace jit +/// +/// + +/** A {@code Collate} allows passing a custom function to reduce/collate a batch + * into a single value. It's effectively the lambda version of {@code Collation}, + * which you could subclass and override {@code operator()} to achieve the same. + * + * \rst + * .. code-block:: cpp + * using namespace torch::data; + * + * auto dataset = datasets::MNIST("path/to/mnist") + * .map(transforms::Collate>([](std::vector> e) { + * return std::move(e.front()); + * })); + * \endrst */ + // namespace transforms + // namespace data // namespace torch -// Parsed from torch/csrc/jit/runtime/argument_spec.h +// Parsed from torch/data/transforms/stack.h // #pragma once -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include - -// #if C10_CLANG_HAS_WARNING("-Wshorten-64-to-32") -// #endif -// Targeting ../ArgumentInfo.java +// #include +// #include +// #include +// #include +// #include +// Targeting ../ExampleStack.java -// Targeting ../ArgumentSpec.java -@Namespace("torch::jit") @MemberGetter public static native @Cast("const size_t") long ARG_SPEC_DEPTH_LIMIT(); -public static final long ARG_SPEC_DEPTH_LIMIT = ARG_SPEC_DEPTH_LIMIT(); +/** A {@code Collation} for {@code Example} types that stacks all data + * tensors into one tensor. */ + // namespace transforms + // namespace data + // namespace torch -// Targeting ../ArgumentSpecCreator.java +// Parsed from torch/data/transforms/tensor.h -// Targeting ../CompleteArgumentInfoPOD.java +// #pragma once +// #include +// #include +// #include -// Targeting ../CompleteArgumentSpec.java +// #include +// #include +/** A {@code Transform} that is specialized for the typical {@code Example} + * combination. It exposes a single {@code operator()} interface hook (for + * subclasses), and calls this function on input {@code Example} objects. */ -// Targeting ../CompleteArgumentInfo.java +/** A {@code Lambda} specialized for the typical {@code Example} input type. */ +/** Normalizes input tensors by subtracting the supplied mean and dividing by + * the given standard deviation. */ + // namespace transforms + // namespace data + // namespace torch -@Namespace("torch::jit") public static native @Cast("std::ostream*") @ByRef @Name("operator <<") Pointer shiftLeft(@Cast("std::ostream*") @ByRef Pointer out, @Const @ByRef ArgumentInfo info); +// Parsed from torch/data/transforms.h -@Namespace("torch::jit") public static native @Cast("std::ostream*") @ByRef @Name("operator <<") Pointer shiftLeft(@Cast("std::ostream*") @ByRef Pointer out, @Const @ByRef ArgumentSpec spec); +// #pragma once -@Namespace("torch::jit") public static native @Cast("std::ostream*") @ByRef @Name("operator <<") Pointer shiftLeft( - @Cast("std::ostream*") @ByRef Pointer out, - @Const @ByRef CompleteArgumentInfo info); +// #include +// #include +// #include +// #include +// #include -@Namespace("torch::jit") public static native @Cast("std::ostream*") @ByRef @Name("operator <<") Pointer shiftLeft( - @Cast("std::ostream*") @ByRef Pointer out, - @Const @ByRef CompleteArgumentSpec spec); +// Parsed from torch/data.h +// #pragma once -@Namespace("torch::jit") public static native @ByVal ByteOptional convertOptional( - @Const @ByRef ScalarTypeOptional from); +// #include +// #include +// #include +// #include - // namespace jit +// Some "exports". + // namespace data // namespace torch - // namespace std - -// Parsed from torch/csrc/jit/runtime/instruction.h +// Parsed from torch/enum.h // #pragma once -// #include -// #include -// #include -// instruction look like: -// op_code X, N -// meaning of X, N depend on the op: -// O - index into operator table -// R - index into register table -// I - literal integer -// C - index into constant table -// P - jump offset relative to beginning of current instruction -// F - index into function table -// T - index into the type table, used for guard instructions -// S - index into object slots -// C - index into code table - -// #define FORALL_OPCODES(_) -// _(OP, "O") /* invoke operator X */ -// _(OPN, "OI") /* invoke vararg operator X with N arguments */ -// _(LOAD, "R") /* push a value from a register X */ -// _(MOVE, "R") /* push a value from register X, clearing the register */ -// _(STOREN, "RI") /* store N values to registers [X, X+N) */ -// _(STORE, "R") /* store 1 value to registers X */ -// _(DROP, "") /* drop 1 value from the top of the stack */ -// _(DROPR, "R") /* clear register X */ -// _(LOADC, "C") /* push the constant X */ -// _(JF, "P") /* pop the top of the stack, if false, branch to P */ -// _(JMP, "P") /* unconditional branch to X */ -// _(LOOP, "PI") /* perform a loop, X is where to branch if cond is false */ -// _(RET, "") /* exit execution */ -// _(WAIT, "") /* wait for a future to be complete */ -// _(CALL, "F") /* call function X */ -// _(GUARD, "T") /* check a guard against type_table, true if passes */ -// _(TYPECHECK, "TN") /* check each type of input[i] against type_table[X+N] */ -// _(FAIL_GUARD, "T") /* fail a guard, patch back to GUARD */ -// _(PROFILE_OP, "F") /* get a callback from profile_function_table at X */ -// _(TAIL_CALL, "F") /* replace current frame with function F */ -// _(INTERFACE_CALL, "CI") /* call method X on the first argument (of N) */ -// _(GET_ATTR, "S") /* get attribute from slot X in an Object */ -// _(SET_ATTR, "S") /* set attribute to slot X in an Object */ -// _(LIST_UNPACK, "I") /* unpack list expecting length I */ -// _(TUPLE_CONSTRUCT, "I") /* construct a tuple using X inputs */ -// _(NAMED_TUPLE_CONSTRUCT, -// "TI") /* construct a tuple of type X, using N inputs */ -// _(LIST_CONSTRUCT, "TI") /* construct a list of type X, using N inputs */ -// _(DICT_CONSTRUCT, "TI") /* construct a dict of type X, using N inputs */ -// _(CREATE_OBJECT, "T") /* create an object of type X */ -// _(ISINSTANCE, "TI") /* check object is one of types[X:X+N] */ -// _(TUPLE_SLICE, "II") /* slice tup[X:(X+N)] */ -// _(TUPLE_INDEX, "") /* get the value from a tuple at that index */ -// _(RAISE_EXCEPTION, "") /* throws the exception from Python */ -// _(DICT_INDEX, "") /* gets the value from the dict for given key */ -// _(UNCHECKED_CAST, "") /* perform an unchecked cast operation */ -// _(__IS__, "") /* performs `is` operator from Python */ -// _(UN_INITIALIZED, -// "") /* sets default values to varaibles that are un initialized */ -// _(__ISNOT__, "") /* performs `is not` operator from Python */ -// _(FORMAT, "I") /* performs string format function `f strings` or `{}.format` \ -// the number of inputs in stored in X */ -// _(DEVICE, "") /* invokes aten::device for a Tensor */ -// _(DTYPE, "") /* invokes aten::dtype for a Tensor */ -// _(DIM, "") /* invokes aten::dim for a Tensor */ -// _(__NOT__, "") /* performs `not` operator from Python */ -// _(TO_LIST, "") /* convert the input to a list */ -// _(NUM_TO_TENSOR, -// "") /* performs the conversion of a number/scalar to Tensor */ -// _(IS_CUDA, "") /* invokes aten::is_cuda for a Tensor */ -// _(FORK, "CN") /* launch a thread to run code entry x with N inputs */ -// _(WARN, "I") /* emit a warning with line information */ -// _(ENTER, "EN") /* enter scope of a contextmanager */ -// _(EXIT, "EX") /* exit the last entered contextmanager */ -// _(AWAITABLE, "CN") /* initialize await for code entry x with N inputs */ - -@Namespace("torch::jit") public enum OpCode { - OP((byte)(0)), /* invoke operator X */ - OPN((byte)(1)), /* invoke vararg operator X with N arguments */ - LOAD((byte)(2)), /* push a value from a register X */ - MOVE((byte)(3)), /* push a value from register X, clearing the register */ - STOREN((byte)(4)), /* store N values to registers [X, X+N) */ - STORE((byte)(5)), /* store 1 value to registers X */ - DROP((byte)(6)), /* drop 1 value from the top of the stack */ - DROPR((byte)(7)), /* clear register X */ - LOADC((byte)(8)), /* push the constant X */ - JF((byte)(9)), /* pop the top of the stack, if false, branch to P */ - JMP((byte)(10)), /* unconditional branch to X */ - LOOP((byte)(11)), /* perform a loop, X is where to branch if cond is false */ - RET((byte)(12)), /* exit execution */ - WAIT((byte)(13)), /* wait for a future to be complete */ - CALL((byte)(14)), /* call function X */ - GUARD((byte)(15)), /* check a guard against type_table, true if passes */ - TYPECHECK((byte)(16)), /* check each type of input[i] against type_table[X+N] */ - FAIL_GUARD((byte)(17)), /* fail a guard, patch back to GUARD */ - PROFILE_OP((byte)(18)), /* get a callback from profile_function_table at X */ - TAIL_CALL((byte)(19)), /* replace current frame with function F */ - INTERFACE_CALL((byte)(20)), /* call method X on the first argument (of N) */ - GET_ATTR((byte)(21)), /* get attribute from slot X in an Object */ - SET_ATTR((byte)(22)), /* set attribute to slot X in an Object */ - LIST_UNPACK((byte)(23)), /* unpack list expecting length I */ - TUPLE_CONSTRUCT((byte)(24)), /* construct a tuple using X inputs */ - NAMED_TUPLE_CONSTRUCT((byte)(25)), /* construct a tuple of type X, using N inputs */ - LIST_CONSTRUCT((byte)(26)), /* construct a list of type X, using N inputs */ - DICT_CONSTRUCT((byte)(27)), /* construct a dict of type X, using N inputs */ - CREATE_OBJECT((byte)(28)), /* create an object of type X */ - ISINSTANCE((byte)(29)), /* check object is one of types[X:X+N] */ - TUPLE_SLICE((byte)(30)), /* slice tup[X:(X+N)] */ - TUPLE_INDEX((byte)(31)), /* get the value from a tuple at that index */ - RAISE_EXCEPTION((byte)(32)), /* throws the exception from Python */ - DICT_INDEX((byte)(33)), /* gets the value from the dict for given key */ - UNCHECKED_CAST((byte)(34)), /* perform an unchecked cast operation */ - __IS__((byte)(35)), /* performs `is` operator from Python */ - UN_INITIALIZED((byte)(36)), /* sets default values to varaibles that are un initialized */ - __ISNOT__((byte)(37)), /* performs `is not` operator from Python */ - FORMAT((byte)(38)), /* performs string format function `f strings` or `{}.format` \ - the number of inputs in stored in X */ - DEVICE((byte)(39)), /* invokes aten::device for a Tensor */ - DTYPE((byte)(40)), /* invokes aten::dtype for a Tensor */ - DIM((byte)(41)), /* invokes aten::dim for a Tensor */ - __NOT__((byte)(42)), /* performs `not` operator from Python */ - TO_LIST((byte)(43)), /* convert the input to a list */ - NUM_TO_TENSOR((byte)(44)), /* performs the conversion of a number/scalar to Tensor */ - IS_CUDA((byte)(45)), /* invokes aten::is_cuda for a Tensor */ - FORK((byte)(46)), /* launch a thread to run code entry x with N inputs */ - WARN((byte)(47)), /* emit a warning with line information */ - ENTER((byte)(48)), /* enter scope of a contextmanager */ - EXIT((byte)(49)), /* exit the last entered contextmanager */ - AWAITABLE((byte)(50)); +// #include - public final byte value; - private OpCode(byte v) { this.value = v; } - private OpCode(OpCode e) { this.value = e.value; } - public OpCode intern() { for (OpCode e : values()) if (e.value == value) return e; return this; } - @Override public String toString() { return intern().name(); } -} -// Targeting ../Instruction.java +// #include +// #include +// #include +// #include +// #define TORCH_ENUM_DECLARE(name) +// namespace torch { +// namespace enumtype { +// /* \ +// NOTE: We need to provide the default constructor for each struct, \ +// otherwise Clang 3.8 would complain: \ +// ``` \ +// error: default initialization of an object of const type 'const \ +// enumtype::Enum1' without a user-provided default constructor \ +// ``` \ +// */ +// struct k##name { +// k##name() {} +// }; +// } +// TORCH_API extern const enumtype::k##name k##name; +// } +// #define TORCH_ENUM_DEFINE(name) +// namespace torch { +// const enumtype::k##name k##name; +// } +// #define TORCH_ENUM_PRETTY_PRINT(name) +// std::string operator()(const enumtype::k##name& v) const { +// std::string k("k"); +// return k + #name; +// } +// NOTE: Backstory on why we need the following two macros: +// +// Consider the following options class: +// +// ``` +// struct TORCH_API SomeOptions { +// typedef c10::variant +// reduction_t; SomeOptions(reduction_t reduction = torch::kMean) : +// reduction_(reduction) {} +// +// TORCH_ARG(reduction_t, reduction); +// }; +// ``` +// +// and the functional that uses it: +// +// ``` +// Tensor some_functional( +// const Tensor& input, +// SomeOptions options = {}) { +// ... +// } +// ``` +// +// Normally, we would expect this to work: +// +// `F::some_functional(input, torch::kNone)` +// +// However, it throws the following error instead: +// +// ``` +// error: could not convert `torch::kNone` from `const torch::enumtype::kNone` +// to `torch::nn::SomeOptions` +// ``` +// +// To get around this problem, we explicitly provide the following constructors +// for `SomeOptions`: +// +// ``` +// SomeOptions(torch::enumtype::kNone reduction) : reduction_(torch::kNone) {} +// SomeOptions(torch::enumtype::kMean reduction) : reduction_(torch::kMean) {} +// SomeOptions(torch::enumtype::kSum reduction) : reduction_(torch::kSum) {} +// ``` +// +// so that the conversion from `torch::kNone` to `SomeOptions` would work. +// +// Note that we also provide the default constructor `SomeOptions() {}`, so that +// `SomeOptions options = {}` can work. +// #define TORCH_OPTIONS_CTOR_VARIANT_ARG3( +// OPTIONS_NAME, ARG_NAME, TYPE1, TYPE2, TYPE3) +// OPTIONS_NAME() = default; +// OPTIONS_NAME(torch::enumtype::TYPE1 ARG_NAME) : ARG_NAME##_(torch::TYPE1) {} +// OPTIONS_NAME(torch::enumtype::TYPE2 ARG_NAME) : ARG_NAME##_(torch::TYPE2) {} +// OPTIONS_NAME(torch::enumtype::TYPE3 ARG_NAME) : ARG_NAME##_(torch::TYPE3) {} +// #define TORCH_OPTIONS_CTOR_VARIANT_ARG4( +// OPTIONS_NAME, ARG_NAME, TYPE1, TYPE2, TYPE3, TYPE4) +// OPTIONS_NAME() = default; +// OPTIONS_NAME(torch::enumtype::TYPE1 ARG_NAME) : ARG_NAME##_(torch::TYPE1) {} +// OPTIONS_NAME(torch::enumtype::TYPE2 ARG_NAME) : ARG_NAME##_(torch::TYPE2) {} +// OPTIONS_NAME(torch::enumtype::TYPE3 ARG_NAME) : ARG_NAME##_(torch::TYPE3) {} +// OPTIONS_NAME(torch::enumtype::TYPE4 ARG_NAME) : ARG_NAME##_(torch::TYPE4) {} +// Targeting ../kLinear.java + +// Targeting ../kConv1D.java + +// Targeting ../kConv2D.java - // namespace jit - // namespace torch + +// Targeting ../kConv3D.java + +// Targeting ../kConvTranspose1D.java -// Parsed from torch/csrc/jit/runtime/interpreter.h + +// Targeting ../kConvTranspose2D.java -// #pragma once -// #include -// #include -// #include + +// Targeting ../kConvTranspose3D.java -// #include -// #include -// #include -// #include -// #include + +// Targeting ../kSigmoid.java -// #if C10_CLANG_HAS_WARNING("-Wdeprecated-copy-dtor") -// #endif + +// Targeting ../kTanh.java + +// Targeting ../kReLU.java + +// Targeting ../kGELU.java - // namespace at - // namespace c10 -// Targeting ../CodeImpl.java + +// Targeting ../kSiLU.java + +// Targeting ../kMish.java + +// Targeting ../kLeakyReLU.java -// Targeting ../InterpreterStateImpl.java + +// Targeting ../kFanIn.java + +// Targeting ../kFanOut.java -// Targeting ../Code.java + +// Targeting ../kConstant.java + +// Targeting ../kReflect.java -// Targeting ../MobileCode.java + +// Targeting ../kReplicate.java + +// Targeting ../kCircular.java -// Targeting ../InterpreterState.java + +// Targeting ../kNearest.java + +// Targeting ../kBilinear.java -// Targeting ../Suspend.java + +// Targeting ../kBicubic.java + +// Targeting ../kTrilinear.java -// Targeting ../InterpreterContinuation.java + +// Targeting ../kArea.java + +// Targeting ../kNearestExact.java + +// Targeting ../kSum.java -// what is the tensors type, including state from the current execution context -// that modifies how the tensor behaves. For instance if no_grad is enabled -// this will cause the TensorType to have requires_grad=False. -@Namespace("torch::jit") public static native @SharedPtr @ByVal TensorType tensorTypeInCurrentExecutionContext( - @Const @ByRef Tensor t); + +// Targeting ../kMean.java -// current (TLS) TorchScript interpreter callstack -@Namespace("torch::jit") public static native @ByVal StackEntryVector currentCallstack(); -@Namespace("torch::jit") public static native @ByVal StringVector currentModuleHierarchy(); + +// Targeting ../kMax.java - // namespace jit - // namespace torch + +// Targeting ../kNone.java + +// Targeting ../kBatchMean.java + +// Targeting ../kZeros.java -// Parsed from torch/csrc/jit/runtime/graph_executor.h + +// Targeting ../kBorder.java -// #pragma once + +// Targeting ../kReflection.java -// #include -// #include + +// Targeting ../kRNN_TANH.java -// #include -// #include -// #include -// #include -// #include + +// Targeting ../kRNN_RELU.java + +// Targeting ../kLSTM.java + +// Targeting ../kGRU.java -@Namespace("torch::jit") public enum ExecutorExecutionMode { - SIMPLE(0), - PROFILING(1); + +// Targeting ../kValid.java - public final int value; - private ExecutorExecutionMode(int v) { this.value = v; } - private ExecutorExecutionMode(ExecutorExecutionMode e) { this.value = e.value; } - public ExecutorExecutionMode intern() { for (ExecutorExecutionMode e : values()) if (e.value == value) return e; return this; } - @Override public String toString() { return intern().name(); } -} -// Targeting ../ExecutionPlan.java + +// Targeting ../kSame.java + -// Targeting ../GraphExecutorState.java + // namespace enumtype + // namespace torch -// Targeting ../EnableProfilingGuard.java +// Parsed from torch/fft.h +// #pragma once -// Targeting ../GraphExecutorImplBase.java +// #include +/** Computes the 1 dimensional fast Fourier transform over a given dimension. + * See https://pytorch.org/docs/master/fft.html#torch.fft.fft. + * + * Example: + *
{@code
+ *  auto t = torch::randn(128, dtype=kComplexDouble);
+ *  torch::fft::fft(t);
+ *  }
*/ -// Targeting ../GraphExecutor.java +/// +@Namespace("torch::fft") public static native @ByVal Tensor fft( + @Const @ByRef Tensor self, + @ByVal(nullValue = "c10::optional(c10::nullopt)") LongOptional n, + @Cast("int64_t") long dim/*=-1*/, + @ByVal(nullValue = "c10::optional(c10::nullopt)") @Cast("c10::optional*") Pointer norm); +@Namespace("torch::fft") public static native @ByVal Tensor fft( + @Const @ByRef Tensor self); +/** Computes the 1 dimensional inverse Fourier transform over a given dimension. + * See https://pytorch.org/docs/master/fft.html#torch.fft.ifft. + * + * Example: + *
{@code
+ *  auto t = torch::randn(128, dtype=kComplexDouble);
+ *  torch::fft::ifft(t);
+ *  }
*/ +/// +@Namespace("torch::fft") public static native @ByVal Tensor ifft( + @Const @ByRef Tensor self, + @ByVal(nullValue = "c10::optional(c10::nullopt)") LongOptional n, + @Cast("int64_t") long dim/*=-1*/, + @ByVal(nullValue = "c10::optional(c10::nullopt)") @Cast("c10::optional*") Pointer norm); +@Namespace("torch::fft") public static native @ByVal Tensor ifft( + @Const @ByRef Tensor self); -@Namespace("torch::jit") public static native JitNode replaceBlockWithFallbackGraph( - Block b, - @ByVal ValueArrayRef inputs); +/** Computes the 2-dimensional fast Fourier transform over the given dimensions. + * See https://pytorch.org/docs/master/fft.html#torch.fft.fft2. + * + * Example: + *
{@code
+ *  auto t = torch::randn({128, 128}, dtype=kComplexDouble);
+ *  torch::fft::fft2(t);
+ *  }
*/ -// These passes need to run before it is valid to pass to the interpreter -// regardless of whether sizes have been specialized or not. -@Namespace("torch::jit") public static native void runRequiredPasses(@Const @SharedPtr @ByRef Graph g); +/// +@Namespace("torch::fft") public static native @ByVal Tensor fft2( + @Const @ByRef Tensor self, + @ByVal(nullValue = "c10::OptionalIntArrayRef(c10::nullopt)") LongArrayRefOptional s, + @ByVal(nullValue = "torch::IntArrayRef({-2, -1})") LongArrayRef dim, + @ByVal(nullValue = "c10::optional(c10::nullopt)") @Cast("c10::optional*") Pointer norm); +@Namespace("torch::fft") public static native @ByVal Tensor fft2( + @Const @ByRef Tensor self); +@Namespace("torch::fft") public static native @ByVal Tensor fft2( + @Const @ByRef Tensor self, + @ByVal(nullValue = "c10::OptionalIntArrayRef(c10::nullopt)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] s, + @ByVal(nullValue = "torch::IntArrayRef({-2, -1})") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dim, + @ByVal(nullValue = "c10::optional(c10::nullopt)") @Cast("c10::optional*") Pointer norm); -@Namespace("torch::jit") public static native void debugSetFusionGroupInlining(@Cast("bool") boolean state); -@Namespace("torch::jit") public static native @Cast("bool") boolean getFusionGroupInlining(); +/** Computes the inverse of torch.fft.fft2 + * See https://pytorch.org/docs/master/fft.html#torch.fft.ifft2. + * + * Example: + *
{@code
+ *  auto t = torch::randn({128, 128}, dtype=kComplexDouble);
+ *  torch::fft::ifft2(t);
+ *  }
*/ -@Namespace("torch::jit") public static native void debugSetAutodiffSubgraphInlining(@Cast("bool") boolean state); -@Namespace("torch::jit") public static native @SharedPtr @ByVal Graph lastExecutedOptimizedGraph(); -@Namespace("torch::jit") public static native @Cast("size_t") long getBailoutDepth(); -@Namespace("torch::jit") public static native @Cast("bool") boolean IsNewExecutorEnabled(); -// Targeting ../GraphOptimizerEnabledGuard.java +/// +@Namespace("torch::fft") public static native @ByVal Tensor ifft2( + @Const @ByRef Tensor self, + @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") LongArrayRefOptional s, + @ByVal(nullValue = "torch::IntArrayRef({-2, -1})") LongArrayRef dim, + @ByVal(nullValue = "c10::optional(c10::nullopt)") @Cast("c10::optional*") Pointer norm); +@Namespace("torch::fft") public static native @ByVal Tensor ifft2( + @Const @ByRef Tensor self); +@Namespace("torch::fft") public static native @ByVal Tensor ifft2( + @Const @ByRef Tensor self, + @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] s, + @ByVal(nullValue = "torch::IntArrayRef({-2, -1})") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dim, + @ByVal(nullValue = "c10::optional(c10::nullopt)") @Cast("c10::optional*") Pointer norm); +/** Computes the N dimensional fast Fourier transform over given dimensions. + * See https://pytorch.org/docs/master/fft.html#torch.fft.fftn. + * + * Example: + *
{@code
+ *  auto t = torch::randn({128, 128}, dtype=kComplexDouble);
+ *  torch::fft::fftn(t);
+ *  }
*/ +/// +@Namespace("torch::fft") public static native @ByVal Tensor fftn( + @Const @ByRef Tensor self, + @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") LongArrayRefOptional s, + @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") LongArrayRefOptional dim, + @ByVal(nullValue = "c10::optional(c10::nullopt)") @Cast("c10::optional*") Pointer norm); +@Namespace("torch::fft") public static native @ByVal Tensor fftn( + @Const @ByRef Tensor self); +@Namespace("torch::fft") public static native @ByVal Tensor fftn( + @Const @ByRef Tensor self, + @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] s, + @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dim, + @ByVal(nullValue = "c10::optional(c10::nullopt)") @Cast("c10::optional*") Pointer norm); +/** Computes the N dimensional fast Fourier transform over given dimensions. + * See https://pytorch.org/docs/master/fft.html#torch.fft.ifftn. + * + * Example: + *
{@code
+ *  auto t = torch::randn({128, 128}, dtype=kComplexDouble);
+ *  torch::fft::ifftn(t);
+ *  }
*/ +/// +@Namespace("torch::fft") public static native @ByVal Tensor ifftn( + @Const @ByRef Tensor self, + @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") LongArrayRefOptional s, + @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") LongArrayRefOptional dim, + @ByVal(nullValue = "c10::optional(c10::nullopt)") @Cast("c10::optional*") Pointer norm); +@Namespace("torch::fft") public static native @ByVal Tensor ifftn( + @Const @ByRef Tensor self); +@Namespace("torch::fft") public static native @ByVal Tensor ifftn( + @Const @ByRef Tensor self, + @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] s, + @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dim, + @ByVal(nullValue = "c10::optional(c10::nullopt)") @Cast("c10::optional*") Pointer norm); +/** Computes the 1 dimensional FFT of real input with onesided Hermitian output. + * See https://pytorch.org/docs/master/fft.html#torch.fft.rfft. + * + * Example: + *
{@code
+ *  auto t = torch::randn(128);
+ *  auto T = torch::fft::rfft(t);
+ *  assert(T.is_complex() && T.numel() == 128 / 2 + 1);
+ *  }
*/ +/// +/// +@Namespace("torch::fft") public static native @ByVal Tensor rfft( + @Const @ByRef Tensor self, + @ByVal(nullValue = "c10::optional(c10::nullopt)") LongOptional n, + @Cast("int64_t") long dim/*=-1*/, + @ByVal(nullValue = "c10::optional(c10::nullopt)") @Cast("c10::optional*") Pointer norm); +@Namespace("torch::fft") public static native @ByVal Tensor rfft( + @Const @ByRef Tensor self); -// for debugging information we expose a way to get the last actually -// run graph. Previous approaches allowed querying the GraphExecutor -// for what graph it would run in certain circumstances (graphFor), but -// this is fragile because we sometimes change how these decisions are made. -// This interface still allows our tests to look at optimized graphs, but -// with less plumbing. - // namespace detail +/** Computes the inverse of torch.fft.rfft + * + * The input is a onesided Hermitian Fourier domain signal, with real-valued + * output. See https://pytorch.org/docs/master/fft.html#torch.fft.irfft + * + * Example: + *
{@code
+ *  auto T = torch::randn(128 / 2 + 1, torch::kComplexDouble);
+ *  auto t = torch::fft::irfft(t, /*n=* /128);
+ *  assert(t.is_floating_point() && T.numel() == 128);
+ *  }
*/ - // namespace jit - // namespace torch +/// +@Namespace("torch::fft") public static native @ByVal Tensor irfft( + @Const @ByRef Tensor self, + @ByVal(nullValue = "c10::optional(c10::nullopt)") LongOptional n, + @Cast("int64_t") long dim/*=-1*/, + @ByVal(nullValue = "c10::optional(c10::nullopt)") @Cast("c10::optional*") Pointer norm); +@Namespace("torch::fft") public static native @ByVal Tensor irfft( + @Const @ByRef Tensor self); +/** Computes the 2-dimensional FFT of real input. Returns a onesided Hermitian + * output. See https://pytorch.org/docs/master/fft.html#torch.fft.rfft2 + * + * Example: + *
{@code
+ *  auto t = torch::randn({128, 128}, dtype=kDouble);
+ *  torch::fft::rfft2(t);
+ *  }
*/ -// Parsed from torch/csrc/jit/runtime/operator_options.h +/// +@Namespace("torch::fft") public static native @ByVal Tensor rfft2( + @Const @ByRef Tensor self, + @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") LongArrayRefOptional s, + @ByVal(nullValue = "torch::IntArrayRef({-2, -1})") LongArrayRef dim, + @ByVal(nullValue = "c10::optional(c10::nullopt)") @Cast("c10::optional*") Pointer norm); +@Namespace("torch::fft") public static native @ByVal Tensor rfft2( + @Const @ByRef Tensor self); +@Namespace("torch::fft") public static native @ByVal Tensor rfft2( + @Const @ByRef Tensor self, + @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] s, + @ByVal(nullValue = "torch::IntArrayRef({-2, -1})") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dim, + @ByVal(nullValue = "c10::optional(c10::nullopt)") @Cast("c10::optional*") Pointer norm); -// #pragma once +/** Computes the inverse of torch.fft.rfft2. + * See https://pytorch.org/docs/master/fft.html#torch.fft.irfft2. + * + * Example: + *
{@code
+ *  auto t = torch::randn({128, 128}, dtype=kComplexDouble);
+ *  torch::fft::irfft2(t);
+ *  }
*/ -// #include +/// +@Namespace("torch::fft") public static native @ByVal Tensor irfft2( + @Const @ByRef Tensor self, + @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") LongArrayRefOptional s, + @ByVal(nullValue = "torch::IntArrayRef({-2, -1})") LongArrayRef dim, + @ByVal(nullValue = "c10::optional(c10::nullopt)") @Cast("c10::optional*") Pointer norm); +@Namespace("torch::fft") public static native @ByVal Tensor irfft2( + @Const @ByRef Tensor self); +@Namespace("torch::fft") public static native @ByVal Tensor irfft2( + @Const @ByRef Tensor self, + @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] s, + @ByVal(nullValue = "torch::IntArrayRef({-2, -1})") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dim, + @ByVal(nullValue = "c10::optional(c10::nullopt)") @Cast("c10::optional*") Pointer norm); - // namespace jit - // namespace torch +/** Computes the N dimensional FFT of real input with onesided Hermitian output. + * See https://pytorch.org/docs/master/fft.html#torch.fft.rfftn + * + * Example: + *
{@code
+ *  auto t = torch::randn({128, 128}, dtype=kDouble);
+ *  torch::fft::rfftn(t);
+ *  }
*/ +/// +@Namespace("torch::fft") public static native @ByVal Tensor rfftn( + @Const @ByRef Tensor self, + @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") LongArrayRefOptional s, + @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") LongArrayRefOptional dim, + @ByVal(nullValue = "c10::optional(c10::nullopt)") @Cast("c10::optional*") Pointer norm); +@Namespace("torch::fft") public static native @ByVal Tensor rfftn( + @Const @ByRef Tensor self); +@Namespace("torch::fft") public static native @ByVal Tensor rfftn( + @Const @ByRef Tensor self, + @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] s, + @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dim, + @ByVal(nullValue = "c10::optional(c10::nullopt)") @Cast("c10::optional*") Pointer norm); -// Parsed from torch/csrc/jit/runtime/operator.h +/** Computes the inverse of torch.fft.rfftn. + * See https://pytorch.org/docs/master/fft.html#torch.fft.irfftn. + * + * Example: + *
{@code
+ *  auto t = torch::randn({128, 128}, dtype=kComplexDouble);
+ *  torch::fft::irfftn(t);
+ *  }
*/ -// in memory description of all ATen Ops similar to Caffe2 schema -// once C10 exists this can be removed, or stubbed out, but we need -// it now to implement correct semantic checking for script -// #pragma once +/// +/// +@Namespace("torch::fft") public static native @ByVal Tensor irfftn( + @Const @ByRef Tensor self, + @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") LongArrayRefOptional s, + @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") LongArrayRefOptional dim, + @ByVal(nullValue = "c10::optional(c10::nullopt)") @Cast("c10::optional*") Pointer norm); +@Namespace("torch::fft") public static native @ByVal Tensor irfftn( + @Const @ByRef Tensor self); +@Namespace("torch::fft") public static native @ByVal Tensor irfftn( + @Const @ByRef Tensor self, + @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] s, + @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dim, + @ByVal(nullValue = "c10::optional(c10::nullopt)") @Cast("c10::optional*") Pointer norm); -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include +/** Computes the 1 dimensional FFT of a onesided Hermitian signal + * + * The input represents a Hermitian symmetric time domain signal. The returned + * Fourier domain representation of such a signal is a real-valued. See + * https://pytorch.org/docs/master/fft.html#torch.fft.hfft + * + * Example: + *
{@code
+ *  auto t = torch::randn(128 / 2 + 1, torch::kComplexDouble);
+ *  auto T = torch::fft::hfft(t, /*n=* /128);
+ *  assert(T.is_floating_point() && T.numel() == 128);
+ *  }
*/ -// #include -// #include +/// +/// +@Namespace("torch::fft") public static native @ByVal Tensor hfft( + @Const @ByRef Tensor self, + @ByVal(nullValue = "c10::optional(c10::nullopt)") LongOptional n, + @Cast("int64_t") long dim/*=-1*/, + @ByVal(nullValue = "c10::optional(c10::nullopt)") @Cast("c10::optional*") Pointer norm); +@Namespace("torch::fft") public static native @ByVal Tensor hfft( + @Const @ByRef Tensor self); -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// Targeting ../OperationCreator.java +/** Computes the inverse FFT of a real-valued Fourier domain signal. + * + * The output is a onesided representation of the Hermitian symmetric time + * domain signal. See https://pytorch.org/docs/master/fft.html#torch.fft.ihfft. + * + * Example: + *
{@code
+ *  auto T = torch::randn(128, torch::kDouble);
+ *  auto t = torch::fft::ihfft(T);
+ *  assert(t.is_complex() && T.numel() == 128 / 2 + 1);
+ *  }
*/ +/// +/// +@Namespace("torch::fft") public static native @ByVal Tensor ihfft( + @Const @ByRef Tensor self, + @ByVal(nullValue = "c10::optional(c10::nullopt)") LongOptional n, + @Cast("int64_t") long dim/*=-1*/, + @ByVal(nullValue = "c10::optional(c10::nullopt)") @Cast("c10::optional*") Pointer norm); +@Namespace("torch::fft") public static native @ByVal Tensor ihfft( + @Const @ByRef Tensor self); -// Targeting ../Operator.java +/** Computes the 2-dimensional FFT of a Hermitian symmetric input signal. + * + * The input is a onesided representation of the Hermitian symmetric time + * domain signal. See https://pytorch.org/docs/master/fft.html#torch.fft.hfft2. + * + * Example: + *
{@code
+ *  auto t = torch::randn({128, 65}, torch::kComplexDouble);
+ *  auto T = torch::fft::hfft2(t, /*s=* /{128, 128});
+ *  assert(T.is_floating_point() && T.numel() == 128 * 128);
+ *  }
*/ +/// +/// +@Namespace("torch::fft") public static native @ByVal Tensor hfft2( + @Const @ByRef Tensor self, + @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") LongArrayRefOptional s, + @ByVal(nullValue = "torch::IntArrayRef({-2, -1})") LongArrayRef dim, + @ByVal(nullValue = "c10::optional(c10::nullopt)") @Cast("c10::optional*") Pointer norm); +@Namespace("torch::fft") public static native @ByVal Tensor hfft2( + @Const @ByRef Tensor self); +@Namespace("torch::fft") public static native @ByVal Tensor hfft2( + @Const @ByRef Tensor self, + @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] s, + @ByVal(nullValue = "torch::IntArrayRef({-2, -1})") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dim, + @ByVal(nullValue = "c10::optional(c10::nullopt)") @Cast("c10::optional*") Pointer norm); +/** Computes the 2-dimensional IFFT of a real input signal. + * + * The output is a onesided representation of the Hermitian symmetric time + * domain signal. See + * https://pytorch.org/docs/master/fft.html#torch.fft.ihfft2. + * + * Example: + *
{@code
+ *  auto T = torch::randn({128, 128}, torch::kDouble);
+ *  auto t = torch::fft::hfft2(T);
+ *  assert(t.is_complex() && t.size(1) == 65);
+ *  }
*/ -@Namespace("torch::jit") public static native @StdString BytePointer canonicalSchemaString(@Const @ByRef FunctionSchema schema); +/// +/// +@Namespace("torch::fft") public static native @ByVal Tensor ihfft2( + @Const @ByRef Tensor self, + @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") LongArrayRefOptional s, + @ByVal(nullValue = "torch::IntArrayRef({-2, -1})") LongArrayRef dim, + @ByVal(nullValue = "c10::optional(c10::nullopt)") @Cast("c10::optional*") Pointer norm); +@Namespace("torch::fft") public static native @ByVal Tensor ihfft2( + @Const @ByRef Tensor self); +@Namespace("torch::fft") public static native @ByVal Tensor ihfft2( + @Const @ByRef Tensor self, + @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] s, + @ByVal(nullValue = "torch::IntArrayRef({-2, -1})") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dim, + @ByVal(nullValue = "c10::optional(c10::nullopt)") @Cast("c10::optional*") Pointer norm); -@Namespace("torch::jit") public static native @Const @ByVal OperatorVector getAllOperators(); -@Namespace("torch::jit") public static native @Const @ByRef OperatorVector getAllOperatorsFor( - @ByVal Symbol name); +/** Computes the N-dimensional FFT of a Hermitian symmetric input signal. + * + * The input is a onesided representation of the Hermitian symmetric time + * domain signal. See https://pytorch.org/docs/master/fft.html#torch.fft.hfftn. + * + * Example: + *
{@code
+ *  auto t = torch::randn({128, 65}, torch::kComplexDouble);
+ *  auto T = torch::fft::hfftn(t, /*s=* /{128, 128});
+ *  assert(T.is_floating_point() && T.numel() == 128 * 128);
+ *  }
*/ -// given a operator with an overload name, find the specific operator related to -// it, may return nullptr if no operator exists. -@Namespace("torch::jit") public static native @SharedPtr @ByVal Operator findOperatorFor( - @Const @ByRef OperatorName full_name); +/// +/// +@Namespace("torch::fft") public static native @ByVal Tensor hfftn( + @Const @ByRef Tensor self, + @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") LongArrayRefOptional s, + @ByVal(nullValue = "torch::IntArrayRef({-2, -1})") LongArrayRef dim, + @ByVal(nullValue = "c10::optional(c10::nullopt)") @Cast("c10::optional*") Pointer norm); +@Namespace("torch::fft") public static native @ByVal Tensor hfftn( + @Const @ByRef Tensor self); +@Namespace("torch::fft") public static native @ByVal Tensor hfftn( + @Const @ByRef Tensor self, + @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] s, + @ByVal(nullValue = "torch::IntArrayRef({-2, -1})") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dim, + @ByVal(nullValue = "c10::optional(c10::nullopt)") @Cast("c10::optional*") Pointer norm); -@Namespace("torch::jit") public static native @ByVal SymbolVector findSimilarOperators(@ByVal Symbol input_op); +/** Computes the N-dimensional IFFT of a real input signal. + * + * The output is a onesided representation of the Hermitian symmetric time + * domain signal. See + * https://pytorch.org/docs/master/fft.html#torch.fft.ihfftn. + * + * Example: + *
{@code
+ *  auto T = torch::randn({128, 128}, torch::kDouble);
+ *  auto t = torch::fft::hfft2(T);
+ *  assert(t.is_complex() && t.size(1) == 65);
+ *  }
*/ -@Namespace("torch::jit") public static native void registerOperator(@ByRef(true) Operator op); -@Namespace("torch::jit") public static native void deregisterOperator(@Const @ByRef FunctionSchema schema); +/// +/// +@Namespace("torch::fft") public static native @ByVal Tensor ihfftn( + @Const @ByRef Tensor self, + @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") LongArrayRefOptional s, + @ByVal(nullValue = "torch::IntArrayRef({-2, -1})") LongArrayRef dim, + @ByVal(nullValue = "c10::optional(c10::nullopt)") @Cast("c10::optional*") Pointer norm); +@Namespace("torch::fft") public static native @ByVal Tensor ihfftn( + @Const @ByRef Tensor self); +@Namespace("torch::fft") public static native @ByVal Tensor ihfftn( + @Const @ByRef Tensor self, + @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] s, + @ByVal(nullValue = "torch::IntArrayRef({-2, -1})") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dim, + @ByVal(nullValue = "c10::optional(c10::nullopt)") @Cast("c10::optional*") Pointer norm); -// XXX: this function is meant to be used with string literals only! -@Namespace("torch::jit") public static native @SharedPtr @ByVal Operator getOperatorForLiteral( - @Cast("const char*") BytePointer signature); -@Namespace("torch::jit") public static native @SharedPtr @ByVal Operator getOperatorForLiteral( - String signature); +/** Computes the discrete Fourier Transform sample frequencies for a signal of + * size n. + * + * See https://pytorch.org/docs/master/fft.html#torch.fft.fftfreq + * + * Example: + *
{@code
+ *  auto frequencies = torch::fft::fftfreq(128, torch::kDouble);
+ *  }
*/ +@Namespace("torch::fft") public static native @ByVal Tensor fftfreq(@Cast("int64_t") long n, double d, @Const @ByRef(nullValue = "c10::TensorOptions{}") TensorOptions options); +@Namespace("torch::fft") public static native @ByVal Tensor fftfreq(@Cast("int64_t") long n, double d); + + +/// +/// +@Namespace("torch::fft") public static native @ByVal Tensor fftfreq(@Cast("int64_t") long n, @Const @ByRef(nullValue = "c10::TensorOptions{}") TensorOptions options); +@Namespace("torch::fft") public static native @ByVal Tensor fftfreq(@Cast("int64_t") long n); + +/** Computes the sample frequencies for torch.fft.rfft with a signal of size n. + * + * Like torch.fft.rfft, only the positive frequencies are included. + * See https://pytorch.org/docs/master/fft.html#torch.fft.rfftfreq + * + * Example: + *
{@code
+ *  auto frequencies = torch::fft::rfftfreq(128, torch::kDouble);
+ *  }
*/ +@Namespace("torch::fft") public static native @ByVal Tensor rfftfreq(@Cast("int64_t") long n, double d, @Const @ByRef TensorOptions options); -// Ensure the thing that registers c10 ops is defined. -// Otherwise, our registry will not have c10 ops. You can run into this -// scenario if you're querying registered ops during static init. -// -// This fn is defined in register_c10_ops.cpp -@Namespace("torch::jit") public static native void ensure_c10_registerer_defined(); -// Used to assert that unschematized operators have an analysis method written -@Namespace("torch::jit") public static native @Cast("bool") boolean aliasAnalysisHasSpecialCaseFor(@ByVal Symbol sym); +/// +/// +@Namespace("torch::fft") public static native @ByVal Tensor rfftfreq(@Cast("int64_t") long n, @Const @ByRef TensorOptions options); -// A factory function to generate an optional operator. It has two -// instantiations depending on the template bool arg value. The arg can be a -// compile-time function for the selective op registration based on schema -// string. +/** Reorders n-dimensional FFT output to have negative frequency terms first, by + * a torch.roll operation. + * + * See https://pytorch.org/docs/master/fft.html#torch.fft.fftshift + * + * Example: + *
{@code
+ *  auto x = torch::randn({127, 4});
+ *  auto centred_fft = torch::fft::fftshift(torch::fft::fftn(x));
+ *  }
*/ - // namespace jit +/// +/// +@Namespace("torch::fft") public static native @ByVal Tensor fftshift( + @Const @ByRef Tensor x, + @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") LongArrayRefOptional dim); +@Namespace("torch::fft") public static native @ByVal Tensor fftshift( + @Const @ByRef Tensor x); +@Namespace("torch::fft") public static native @ByVal Tensor fftshift( + @Const @ByRef Tensor x, + @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... dim); + +/** Inverse of torch.fft.fftshift + * + * See https://pytorch.org/docs/master/fft.html#torch.fft.ifftshift + * + * Example: + *
{@code
+ *  auto x = torch::randn({127, 4});
+ *  auto shift = torch::fft::fftshift(x)
+ *  auto unshift = torch::fft::ifftshift(shift);
+ *  assert(torch::allclose(x, unshift));
+ *  }
*/ +@Namespace("torch::fft") public static native @ByVal Tensor ifftshift( + @Const @ByRef Tensor x, + @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") LongArrayRefOptional dim); +@Namespace("torch::fft") public static native @ByVal Tensor ifftshift( + @Const @ByRef Tensor x); +@Namespace("torch::fft") public static native @ByVal Tensor ifftshift( + @Const @ByRef Tensor x, + @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... dim); + + // namespace fft // namespace torch -// Parsed from torch/csrc/jit/runtime/custom_operator.h +// Parsed from torch/jit.h // #pragma once -// #include -// #include -// #include -// Targeting ../RegisterOperators.java +// #include +// #include +// #include +// #include +/** Compiles script code into an executable graph. + * + * Takes a string containing functions in script syntax and compiles them into + * a module (graph). The returned module provides a {@code run_method} function + * that may be used to invoke the compiled functions. + * + * For example: + * \rst + * .. code-block:: cpp + * + * auto module = torch::jit::compile(R"JIT( + * def relu_script(a, b): + * return torch.relu(a + b) + * def test_while(a, i): + * while i < 10: + * a += a + * i += 1 + * return a + * )JIT"); + * IValue output = module->run_method("relu_script", a, b); + * \endrst */ +@Namespace("torch::jit") public static native @SharedPtr CompilationUnit compile(@StdString BytePointer source); +@Namespace("torch::jit") public static native @SharedPtr CompilationUnit compile(@StdString String source); // namespace jit // namespace torch -// Parsed from torch/csrc/jit/api/compilation_unit.h +// Parsed from torch/linalg.h // #pragma once -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include +// #include -// #include -// #include -// #include -// #include +// #ifndef DOXYGEN_SHOULD_SKIP_THIS -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// Targeting ../Self.java +@Namespace("torch::linalg::detail") public static native @ByVal T_TensorTensor_T eig(@Const @ByRef Tensor self); +@Namespace("torch::linalg::detail") public static native @ByVal @Cast("std::tuple*") PointerPointer eig_out( + @ByRef Tensor eigvals, + @ByRef Tensor eigvecs, + @Const @ByRef Tensor self); -// Targeting ../CompilationUnit.java +@Namespace("torch::linalg::detail") public static native @ByVal Tensor eigvals(@Const @ByRef Tensor self); +@Namespace("torch::linalg::detail") public static native @ByRef Tensor eigvals_out(@ByRef Tensor result, @Const @ByRef Tensor self); -// Targeting ../StrongFunctionPtr.java +@Namespace("torch::linalg::detail") public static native @ByVal T_TensorTensor_T eigh( + @Const @ByRef Tensor self, + @ByVal @Cast("c10::string_view*") Pointer uplo); +@Namespace("torch::linalg::detail") public static native @ByVal @Cast("std::tuple*") PointerPointer eigh_out( + @ByRef Tensor eigvals, + @ByRef Tensor eigvecs, + @Const @ByRef Tensor self, + @ByVal @Cast("c10::string_view*") Pointer uplo); -// We once had a `script::` namespace that was deleted. This is for backcompat -// of the public API; new code should not use this type alias. - // namespace script - // namespace jit - // namespace torch +@Namespace("torch::linalg::detail") public static native @ByVal Tensor eigvalsh(@Const @ByRef Tensor self, @ByVal @Cast("c10::string_view*") Pointer uplo); +@Namespace("torch::linalg::detail") public static native @ByRef Tensor eigvalsh_out( + @ByRef Tensor result, + @Const @ByRef Tensor self, + @ByVal @Cast("c10::string_view*") Pointer uplo); -// Parsed from torch/csrc/jit/api/function_impl.h +@Namespace("torch::linalg::detail") public static native @ByVal Tensor householder_product(@Const @ByRef Tensor input, @Const @ByRef Tensor tau); -// #pragma once +@Namespace("torch::linalg::detail") public static native @ByRef Tensor householder_product_out( + @ByRef Tensor result, + @Const @ByRef Tensor input, + @Const @ByRef Tensor tau); -// #include -// #include -// #include -// #include -// Targeting ../GraphFunction.java +@Namespace("torch::linalg::detail") public static native @ByVal T_TensorTensor_T lu_factor( + @Const @ByRef Tensor self, + @Cast("const bool") boolean pivot); +@Namespace("torch::linalg::detail") public static native @ByVal @Cast("std::tuple*") PointerPointer lu_factor_out( + @ByRef Tensor LU, + @ByRef Tensor pivots, + @Const @ByRef Tensor self, + @Cast("const bool") boolean pivot); +@Namespace("torch::linalg::detail") public static native @ByVal T_TensorTensorTensor_T lu( + @Const @ByRef Tensor self, + @Cast("const bool") boolean pivot); -// Short hands for dynamic_cast. -@Namespace("torch::jit") public static native @NoException(true) GraphFunction tryToGraphFunction(@ByRef Function arg0); -@Namespace("torch::jit") public static native @ByRef GraphFunction toGraphFunction(@ByRef Function arg0); +@Namespace("torch::linalg::detail") public static native @ByVal @Cast("std::tuple*") PointerPointer lu_out( + @ByRef Tensor P, + @ByRef Tensor L, + @ByRef Tensor U, + @Const @ByRef Tensor self, + @Cast("const bool") boolean pivot); - // namespace jit - // namespace torch +@Namespace("torch::linalg::detail") public static native @ByVal T_TensorTensorTensorTensor_T lstsq( + @Const @ByRef Tensor self, + @Const @ByRef Tensor b, + @ByVal DoubleOptional cond, + @ByVal @Cast("c10::optional*") Pointer driver); +@Namespace("torch::linalg::detail") public static native @ByVal Tensor norm( + @Const @ByRef Tensor self, + @Const @ByRef ScalarOptional opt_ord, + @ByVal LongArrayRefOptional opt_dim, + @Cast("bool") boolean keepdim, + @ByVal ScalarTypeOptional opt_dtype); +@Namespace("torch::linalg::detail") public static native @ByVal Tensor norm( + @Const @ByRef Tensor self, + @Const @ByRef ScalarOptional opt_ord, + @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] opt_dim, + @Cast("bool") boolean keepdim, + @ByVal ScalarTypeOptional opt_dtype); -// Parsed from torch/csrc/jit/api/method.h +@Namespace("torch::linalg::detail") public static native @ByVal Tensor norm( + @Const @ByRef Tensor self, + @ByVal @Cast("c10::string_view*") Pointer ord, + @ByVal LongArrayRefOptional opt_dim, + @Cast("bool") boolean keepdim, + @ByVal ScalarTypeOptional opt_dtype); +@Namespace("torch::linalg::detail") public static native @ByVal Tensor norm( + @Const @ByRef Tensor self, + @ByVal @Cast("c10::string_view*") Pointer ord, + @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] opt_dim, + @Cast("bool") boolean keepdim, + @ByVal ScalarTypeOptional opt_dtype); -// #pragma once +@Namespace("torch::linalg::detail") public static native @ByRef Tensor norm_out( + @ByRef Tensor result, + @Const @ByRef Tensor self, + @Const @ByRef ScalarOptional opt_ord, + @ByVal LongArrayRefOptional opt_dim, + @Cast("bool") boolean keepdim, + @ByVal ScalarTypeOptional opt_dtype); +@Namespace("torch::linalg::detail") public static native @ByRef Tensor norm_out( + @ByRef Tensor result, + @Const @ByRef Tensor self, + @Const @ByRef ScalarOptional opt_ord, + @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] opt_dim, + @Cast("bool") boolean keepdim, + @ByVal ScalarTypeOptional opt_dtype); -// #include -// #include -// #include -// #include -// #include -// Targeting ../Method.java +@Namespace("torch::linalg::detail") public static native @ByRef Tensor norm_out( + @ByRef Tensor result, + @Const @ByRef Tensor self, + @ByVal @Cast("c10::string_view*") Pointer ord, + @ByVal LongArrayRefOptional opt_dim, + @Cast("bool") boolean keepdim, + @ByVal ScalarTypeOptional opt_dtype); +@Namespace("torch::linalg::detail") public static native @ByRef Tensor norm_out( + @ByRef Tensor result, + @Const @ByRef Tensor self, + @ByVal @Cast("c10::string_view*") Pointer ord, + @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] opt_dim, + @Cast("bool") boolean keepdim, + @ByVal ScalarTypeOptional opt_dtype); +@Namespace("torch::linalg::detail") public static native @ByVal Tensor vector_norm( + @Const @ByRef Tensor self, + @ByVal Scalar ord, + @ByVal LongArrayRefOptional opt_dim, + @Cast("bool") boolean keepdim, + @ByVal ScalarTypeOptional opt_dtype); +@Namespace("torch::linalg::detail") public static native @ByVal Tensor vector_norm( + @Const @ByRef Tensor self, + @ByVal Scalar ord, + @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] opt_dim, + @Cast("bool") boolean keepdim, + @ByVal ScalarTypeOptional opt_dtype); -// We once had a `script::` namespace that was deleted. This is for backcompat -// of the public API; new code should not use this type alias. - // namespace script +@Namespace("torch::linalg::detail") public static native @ByRef Tensor vector_norm_out( + @ByRef Tensor result, + @Const @ByRef Tensor self, + @ByVal Scalar ord, + @ByVal LongArrayRefOptional opt_dim, + @Cast("bool") boolean keepdim, + @ByVal ScalarTypeOptional opt_dtype); +@Namespace("torch::linalg::detail") public static native @ByRef Tensor vector_norm_out( + @ByRef Tensor result, + @Const @ByRef Tensor self, + @ByVal Scalar ord, + @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] opt_dim, + @Cast("bool") boolean keepdim, + @ByVal ScalarTypeOptional opt_dtype); - // namespace jit - // namespace torch +@Namespace("torch::linalg::detail") public static native @ByVal Tensor matrix_norm( + @Const @ByRef Tensor self, + @Const @ByRef Scalar ord, + @ByVal LongArrayRef dim, + @Cast("bool") boolean keepdim, + @ByVal ScalarTypeOptional dtype); +@Namespace("torch::linalg::detail") public static native @ByVal Tensor matrix_norm( + @Const @ByRef Tensor self, + @Const @ByRef Scalar ord, + @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dim, + @Cast("bool") boolean keepdim, + @ByVal ScalarTypeOptional dtype); +@Namespace("torch::linalg::detail") public static native @ByRef Tensor matrix_norm_out( + @Const @ByRef Tensor self, + @Const @ByRef Scalar ord, + @ByVal LongArrayRef dim, + @Cast("bool") boolean keepdim, + @ByVal ScalarTypeOptional dtype, + @ByRef Tensor result); +@Namespace("torch::linalg::detail") public static native @ByRef Tensor matrix_norm_out( + @Const @ByRef Tensor self, + @Const @ByRef Scalar ord, + @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dim, + @Cast("bool") boolean keepdim, + @ByVal ScalarTypeOptional dtype, + @ByRef Tensor result); -// Parsed from torch/csrc/jit/api/object.h +@Namespace("torch::linalg::detail") public static native @ByVal Tensor matrix_norm( + @Const @ByRef Tensor self, + @StdString BytePointer ord, + @ByVal LongArrayRef dim, + @Cast("bool") boolean keepdim, + @ByVal ScalarTypeOptional dtype); +@Namespace("torch::linalg::detail") public static native @ByVal Tensor matrix_norm( + @Const @ByRef Tensor self, + @StdString String ord, + @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dim, + @Cast("bool") boolean keepdim, + @ByVal ScalarTypeOptional dtype); -// #pragma once +@Namespace("torch::linalg::detail") public static native @ByRef Tensor matrix_norm_out( + @Const @ByRef Tensor self, + @StdString BytePointer ord, + @ByVal LongArrayRef dim, + @Cast("bool") boolean keepdim, + @ByVal ScalarTypeOptional dtype, + @ByRef Tensor result); +@Namespace("torch::linalg::detail") public static native @ByRef Tensor matrix_norm_out( + @Const @ByRef Tensor self, + @StdString String ord, + @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dim, + @Cast("bool") boolean keepdim, + @ByVal ScalarTypeOptional dtype, + @ByRef Tensor result); -// #include -// #include -// #include -// #include +@Namespace("torch::linalg::detail") public static native @ByRef Tensor matrix_power_out(@Const @ByRef Tensor self, @Cast("int64_t") long n, @ByRef Tensor result); -// #include +@Namespace("torch::linalg::detail") public static native @ByVal Tensor matrix_rank(@Const @ByRef Tensor input, double tol, @Cast("bool") boolean hermitian); -// Throw this in C++ land if `attr` fails. This will be converted to a Python -// AttributeError by the Python binding code -// Targeting ../JitObject.java +@Namespace("torch::linalg::detail") public static native @ByVal Tensor matrix_rank( + @Const @ByRef Tensor input, + @Const @ByRef Tensor tol, + @Cast("bool") boolean hermitian); +@Namespace("torch::linalg::detail") public static native @ByVal Tensor matrix_rank( + @Const @ByRef Tensor input, + @ByVal DoubleOptional atol, + @ByVal DoubleOptional rtol, + @Cast("bool") boolean hermitian); -// We once had a `script::` namespace that was deleted. This is for backcompat -// of the public API; new code should not use this type alias. - // namespace script - // namespace jit - // namespace torch +@Namespace("torch::linalg::detail") public static native @ByVal Tensor matrix_rank( + @Const @ByRef Tensor input, + @Const @ByRef TensorOptional atol, + @Const @ByRef TensorOptional rtol, + @Cast("bool") boolean hermitian); +@Namespace("torch::linalg::detail") public static native @ByRef Tensor matrix_rank_out( + @ByRef Tensor result, + @Const @ByRef Tensor input, + double tol, + @Cast("bool") boolean hermitian); -// Parsed from torch/csrc/jit/api/module.h +@Namespace("torch::linalg::detail") public static native @ByRef Tensor matrix_rank_out( + @ByRef Tensor result, + @Const @ByRef Tensor input, + @Const @ByRef Tensor tol, + @Cast("bool") boolean hermitian); -// #pragma once -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include +@Namespace("torch::linalg::detail") public static native @ByRef Tensor matrix_rank_out( + @ByRef Tensor result, + @Const @ByRef Tensor input, + @ByVal DoubleOptional atol, + @ByVal DoubleOptional rtol, + @Cast("bool") boolean hermitian); -// #include -// #include -// #include -// #include +@Namespace("torch::linalg::detail") public static native @ByRef Tensor matrix_rank_out( + @ByRef Tensor result, + @Const @ByRef Tensor input, + @Const @ByRef TensorOptional atol, + @Const @ByRef TensorOptional rtol, + @Cast("bool") boolean hermitian); -// #include -// #include -// #include -// #include -// #include +@Namespace("torch::linalg::detail") public static native @ByVal Tensor multi_dot(@ByVal TensorArrayRef tensors); -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include +@Namespace("torch::linalg::detail") public static native @ByRef Tensor multi_dot_out(@ByVal TensorArrayRef tensors, @ByRef Tensor result); -// This file contains classes which assist in desugaring Python style -// modules and their methods into flattened graphs which don't have any -// function calls. -// Map which stores filename to content. -// Targeting ../NamedJitModule.java +@Namespace("torch::linalg::detail") public static native @ByVal Tensor pinv(@Const @ByRef Tensor input, double rcond, @Cast("bool") boolean hermitian); +@Namespace("torch::linalg::detail") public static native @ByRef Tensor pinv_out( + @ByRef Tensor result, + @Const @ByRef Tensor input, + double rcond, + @Cast("bool") boolean hermitian); -// Targeting ../NamedTensor.java +@Namespace("torch::linalg::detail") public static native @ByVal T_TensorTensor_T qr( + @Const @ByRef Tensor input, + @ByVal @Cast("c10::string_view*") Pointer mode); +@Namespace("torch::linalg::detail") public static native @ByVal @Cast("std::tuple*") PointerPointer qr_out( + @ByRef Tensor Q, + @ByRef Tensor R, + @Const @ByRef Tensor input, + @ByVal @Cast("c10::string_view*") Pointer mode); -// Targeting ../NamedIValue.java +@Namespace("torch::linalg::detail") public static native @ByVal T_TensorTensor_T solve_ex( + @Const @ByRef Tensor input, + @Const @ByRef Tensor other, + @Cast("bool") boolean left, + @Cast("bool") boolean check_errors); +@Namespace("torch::linalg::detail") public static native @ByVal @Cast("std::tuple*") PointerPointer solve_ex_out( + @ByRef Tensor result, + @ByRef Tensor info, + @Const @ByRef Tensor input, + @Const @ByRef Tensor other, + @Cast("bool") boolean left, + @Cast("bool") boolean check_errors); - // namespace detail -// Targeting ../JitModule.java +@Namespace("torch::linalg::detail") public static native @ByVal Tensor solve(@Const @ByRef Tensor input, @Const @ByRef Tensor other, @Cast("bool") boolean left); +@Namespace("torch::linalg::detail") public static native @ByRef Tensor solve_out( + @ByRef Tensor result, + @Const @ByRef Tensor input, + @Const @ByRef Tensor other, + @Cast("bool") boolean left); +@Namespace("torch::linalg::detail") public static native @ByVal Tensor solve_triangular( + @Const @ByRef Tensor input, + @Const @ByRef Tensor other, + @Cast("bool") boolean upper, + @Cast("bool") boolean left, + @Cast("bool") boolean unitriangular); -// C++ equivalent api of `torch.jit.freeze`. See documentation there for -// details. -@Namespace("torch::jit") public static native @ByVal JitModule freeze( - @Const @ByRef JitModule module, - @ByVal(nullValue = "c10::optional >(c10::nullopt)") StringVectorOptional preserved_attrs, - @Cast("bool") boolean optimize_numerics/*=true*/); -@Namespace("torch::jit") public static native @ByVal JitModule freeze( - @Const @ByRef JitModule module); +@Namespace("torch::linalg::detail") public static native @ByRef Tensor solve_triangular_out( + @ByRef Tensor result, + @Const @ByRef Tensor input, + @Const @ByRef Tensor other, + @Cast("bool") boolean upper, + @Cast("bool") boolean left, + @Cast("bool") boolean unitriangular); -// C++ equivalent api of `torch.jit.optimize_for_inference`. See documentation -// there for details. -@Namespace("torch::jit") public static native @ByVal JitModule optimize_for_inference( - @ByRef JitModule module, - @Const @ByRef(nullValue = "std::vector{}") StringVector other_methods); -@Namespace("torch::jit") public static native @ByVal JitModule optimize_for_inference( - @ByRef JitModule module); +@Namespace("torch::linalg::detail") public static native @ByVal T_TensorTensorTensor_T svd( + @Const @ByRef Tensor input, + @Cast("bool") boolean full_matrices, + @ByVal @Cast("c10::optional*") Pointer driver); -@Namespace("torch::jit") public enum FusionBehavior { STATIC(0), DYNAMIC(1); +@Namespace("torch::linalg::detail") public static native @ByVal @Cast("std::tuple*") PointerPointer svd_out( + @ByRef Tensor U, + @ByRef Tensor S, + @ByRef Tensor Vh, + @Const @ByRef Tensor input, + @Cast("bool") boolean full_matrices, + @ByVal @Cast("c10::optional*") Pointer driver); - public final int value; - private FusionBehavior(int v) { this.value = v; } - private FusionBehavior(FusionBehavior e) { this.value = e.value; } - public FusionBehavior intern() { for (FusionBehavior e : values()) if (e.value == value) return e; return this; } - @Override public String toString() { return intern().name(); } -} -// clang-format off -/* -Sets the type and number of specializations that can occur during fusion. +@Namespace("torch::linalg::detail") public static native @ByVal Tensor svdvals( + @Const @ByRef Tensor input, + @ByVal @Cast("c10::optional*") Pointer driver); -Usage: provide a list of pairs (type, depth) where type is one of STATIC or DYNAMIC -and depth is an integer. +@Namespace("torch::linalg::detail") public static native @ByRef Tensor svdvals_out( + @ByRef Tensor result, + @Const @ByRef Tensor input, + @ByVal @Cast("c10::optional*") Pointer driver); -Behavior - static vs dynamic: - In STATIC fusion, fused ops are compiled to have fixed input shapes. The shape is determined - based on some initial profiling runs. - In DYNAMIC fusion, fused ops are compiled to have variable input shapes, so that multiple - shapes are possible. +@Namespace("torch::linalg::detail") public static native @ByVal Tensor tensorinv(@Const @ByRef Tensor self, @Cast("int64_t") long ind); -In both cases, we also recompile on new striding behavior, device, or dtype. +@Namespace("torch::linalg::detail") public static native @ByRef Tensor tensorinv_out(@ByRef Tensor result, @Const @ByRef Tensor self, @Cast("int64_t") long ind); -Behavior - fallback functions & depth: - When an input doesn't match the format required by the specialized compiled op, it will run - a fallback function. Fallback functions are recursively be compiled and specialized based - on the observed tensor shapes. Since compilation can be slow, the "depth" parameter is provided to - limit the number of specializations that can be compiled, before giving up on recompiling and - falling back to a completely un-fused, un-specialized implementation. +@Namespace("torch::linalg::detail") public static native @ByVal Tensor tensorsolve( + @Const @ByRef Tensor self, + @Const @ByRef Tensor other, + @ByVal LongArrayRefOptional dims); +@Namespace("torch::linalg::detail") public static native @ByVal Tensor tensorsolve( + @Const @ByRef Tensor self, + @Const @ByRef Tensor other, + @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... dims); -The list of (type, depth) pairs controls the type of specializations and the number of -specializations. For example: [(STATIC, 2), (DYNAMIC, 2)] indicates that the first -two specializations will use static fusions, the following two specializations will use -dynamic fusion, and any inputs that satisfy none of the 4 options will run an -unfused implementation. +@Namespace("torch::linalg::detail") public static native @ByRef Tensor tensorsolve_out( + @ByRef Tensor result, + @Const @ByRef Tensor self, + @Const @ByRef Tensor other, + @ByVal LongArrayRefOptional dims); +@Namespace("torch::linalg::detail") public static native @ByRef Tensor tensorsolve_out( + @ByRef Tensor result, + @Const @ByRef Tensor self, + @Const @ByRef Tensor other, + @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... dims); -NB: in the future, if more as more fusion backends are added there may be more granular -apis for specific fusers. -*/ -// clang-format on -@Namespace("torch::jit") public static native @ByVal FusionStrategy getFusionStrategy(); -// returns previous strategy -@Namespace("torch::jit") public static native @ByVal FusionStrategy setFusionStrategy(@ByRef FusionStrategy fusion_strategy); -// Targeting ../SlotCursor.java +@Namespace("torch::linalg::detail") public static native @ByVal Tensor inv(@Const @ByRef Tensor input); +@Namespace("torch::linalg::detail") public static native @ByRef Tensor inv_out(@ByRef Tensor result, @Const @ByRef Tensor input); + // namespace detail +// #endif /* DOXYGEN_SHOULD_SKIP_THIS */ +/** Cholesky decomposition +/** +/** See https://pytorch.org/docs/master/linalg.html#torch.linalg.cholesky +/** +/** Example: +/**
{@code
+/** auto A = torch::randn({4, 4});
+/** auto A = torch::matmul(A, A.t());
+/** auto L = torch::linalg::cholesky(A);
+/** assert(torch::allclose(torch::matmul(L, L.t()), A));
+/** }
*/ -// Targeting ../module_iterator.java +// C10_DEPRECATED_MESSAGE("linalg_det is deprecated, use det instead.") +/** See the documentation of torch.linalg.det */ -// Targeting ../named_module_iterator.java +/** Computes the sign and (natural) logarithm of the determinant + * + * See https://pytorch.org/docs/master/linalg.html#torch.linalg.slogdet */ +/** Computes eigenvalues and eigenvectors of non-symmetric/non-hermitian + * matrices + * + * See https://pytorch.org/docs/master/linalg.html#torch.linalg.eig */ -// Targeting ../parameter_iterator.java +/** Computes eigenvalues of non-symmetric/non-hermitian matrices + * + * See https://pytorch.org/docs/master/linalg.html#torch.linalg.eigvals */ +/** Computes eigenvalues and eigenvectors + * + * See https://pytorch.org/docs/master/linalg.html#torch.linalg.eigh */ -// Targeting ../named_parameter_iterator.java +/** Computes eigenvalues + * + * See https://pytorch.org/docs/master/linalg.html#torch.linalg.eigvalsh */ +/** Computes the product of Householder matrices + * + * See + * https://pytorch.org/docs/master/linalg.html#torch.linalg.householder_product */ -// Targeting ../attribute_iterator.java +/** Computes the matrix exponential + * + * See https://pytorch.org/docs/master/linalg.html#torch.linalg.matrix_exp */ +// C10_DEPRECATED_MESSAGE("linalg_norm is deprecated, use norm instead.") -// Targeting ../named_attribute_iterator.java +// C10_DEPRECATED_MESSAGE("linalg_norm is deprecated, use norm instead.") +// C10_DEPRECATED_MESSAGE("linalg_norm_out is deprecated, use norm_out +// instead.") -// Targeting ../buffer_iterator.java +// C10_DEPRECATED_MESSAGE("linalg_norm_out is deprecated, use norm_out +// instead.") +/** Computes the LU factorization with partial pivoting + * + * See https://pytorch.org/docs/master/linalg.html#torch.linalg.lu_factor */ +@Namespace("torch::linalg") public static native @ByVal T_TensorTensor_T lu_factor( + @Const @ByRef Tensor input); -// Targeting ../named_buffer_iterator.java +/// +@Namespace("torch::linalg") public static native @ByVal @Cast("std::tuple*") PointerPointer lu_factor_out( + @ByRef Tensor LU, + @ByRef Tensor pivots, + @Const @ByRef Tensor self); -// Targeting ../module_list.java +/** Computes the LU factorization with partial pivoting + * + * See https://pytorch.org/docs/master/linalg.html#torch.linalg.lu */ +@Namespace("torch::linalg") public static native @ByVal T_TensorTensorTensor_T lu( + @Const @ByRef Tensor input); +@Namespace("torch::linalg") public static native @ByVal @Cast("std::tuple*") PointerPointer lu_out( + @ByRef Tensor P, + @ByRef Tensor L, + @ByRef Tensor U, + @Const @ByRef Tensor self); -// Targeting ../named_module_list.java +@Namespace("torch::linalg") public static native @ByVal Tensor norm( + @Const @ByRef Tensor self, + @StdString BytePointer ord, + @ByVal LongArrayRefOptional opt_dim, + @Cast("bool") boolean keepdim, + @ByVal ScalarTypeOptional opt_dtype); +@Namespace("torch::linalg") public static native @ByVal Tensor norm( + @Const @ByRef Tensor self, + @StdString String ord, + @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] opt_dim, + @Cast("bool") boolean keepdim, + @ByVal ScalarTypeOptional opt_dtype); + +@Namespace("torch::linalg") public static native @ByRef Tensor norm_out( + @ByRef Tensor result, + @Const @ByRef Tensor self, + @StdString BytePointer ord, + @ByVal LongArrayRefOptional opt_dim, + @Cast("bool") boolean keepdim, + @ByVal ScalarTypeOptional opt_dtype); +@Namespace("torch::linalg") public static native @ByRef Tensor norm_out( + @ByRef Tensor result, + @Const @ByRef Tensor self, + @StdString String ord, + @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] opt_dim, + @Cast("bool") boolean keepdim, + @ByVal ScalarTypeOptional opt_dtype); +/** See https://pytorch.org/docs/master/linalg.html#torch.linalg.vector_norm */ -// Targeting ../parameter_list.java +/** See https://pytorch.org/docs/master/linalg.html#torch.linalg.matrix_norm */ +/** See https://pytorch.org/docs/master/linalg.html#torch.linalg.matrix_power */ -// Targeting ../named_parameter_list.java +/** See https://pytorch.org/docs/master/linalg.html#torch.linalg.matrix_rank */ +/** See https://pytorch.org/docs/master/linalg.html#torch.linalg.multi_dot */ -// Targeting ../attribute_list.java +/** Computes the pseudo-inverse + * + * See https://pytorch.org/docs/master/linalg.html#torch.linalg.pinv */ +@Namespace("torch::linalg") public static native @ByVal Tensor pinv( + @Const @ByRef Tensor input); -// Targeting ../named_attribute_list.java +/// +@Namespace("torch::linalg") public static native @ByRef Tensor pinv_out( + @ByRef Tensor result, + @Const @ByRef Tensor input); +/** Computes the QR decomposition + * + * See https://pytorch.org/docs/master/linalg.html#torch.linalg.qr */ -// Targeting ../buffer_list.java +/** Computes the LDL decomposition + * + * See https://pytorch.org/docs/master/linalg.html#torch.linalg.ldl_factor_ex */ +@Namespace("torch::linalg") public static native @ByVal T_TensorTensorTensor_T ldl_factor_ex( + @Const @ByRef Tensor input, + @Cast("bool") boolean hermitian, + @Cast("bool") boolean check_errors); -// Targeting ../named_buffer_list.java +/// +@Namespace("torch::linalg") public static native @ByVal @Cast("std::tuple*") PointerPointer ldl_factor_ex_out( + @ByRef Tensor LD, + @ByRef Tensor pivots, + @ByRef Tensor info, + @Const @ByRef Tensor input, + @Cast("bool") boolean hermitian, + @Cast("bool") boolean check_errors); +/** Solve a system of linear equations using the LDL decomposition + * + * See https://pytorch.org/docs/master/linalg.html#torch.linalg.ldl_solve */ +@Namespace("torch::linalg") public static native @ByVal Tensor ldl_solve( + @Const @ByRef Tensor LD, + @Const @ByRef Tensor pivots, + @Const @ByRef Tensor B, + @Cast("bool") boolean hermitian); -// Targeting ../ModulePolicy.java +/// +@Namespace("torch::linalg") public static native @ByRef Tensor ldl_solve_out( + @ByRef Tensor result, + @Const @ByRef Tensor LD, + @Const @ByRef Tensor pivots, + @Const @ByRef Tensor B, + @Cast("bool") boolean hermitian); + +/** Solves a system linear system AX = B + * + * See https://pytorch.org/docs/master/linalg.html#torch.linalg.solve_ex */ -// Targeting ../ParameterPolicy.java +/** Computes a tensor {@code x} such that {@code matmul(input, x) = other}. + * + * See https://pytorch.org/docs/master/linalg.html#torch.linalg.solve */ +/** Computes a solution of a linear system AX = B for input = A and other = B + * whenever A is square upper or lower triangular and does not have zeros in + * the diagonal + * + * See + * https://pytorch.org/docs/master/linalg.html#torch.linalg.solve_triangular */ -// Targeting ../BufferPolicy.java +/** Computes the singular values and singular vectors + * + * See https://pytorch.org/docs/master/linalg.html#torch.linalg.svd */ +/** Computes the singular values + * + * See https://pytorch.org/docs/master/linalg.html#torch.linalg.svdvals */ -// Targeting ../AttributePolicy.java +/** Computes the inverse of a tensor + * + * See https://pytorch.org/docs/master/linalg.html#torch.linalg.tensorinv + * + * Example: + *
{@code
+ *  auto a = torch::eye(4*6).reshape({4, 6, 8, 3});
+ *  int64_t ind = 2;
+ *  auto ainv = torch::linalg::tensorinv(a, ind);
+ *  }
*/ + +/** Computes a tensor {@code x} such that {@code tensordot(input, x, dims=x.dim()) = other}. + * + * See https://pytorch.org/docs/master/linalg.html#torch.linalg.tensorsolve + * + * Example: + *
{@code
+ *  auto a = torch::eye(2*3*4).reshape({2*3, 4, 2, 3, 4});
+ *  auto b = torch::randn(2*3, 4);
+ *  auto x = torch::linalg::tensorsolve(a, b);
+ *  }
*/ +/** Computes a tensor {@code inverse_input} such that {@code dot(input, inverse_input) = + * eye(input.size(0))}. + * + * See https://pytorch.org/docs/master/linalg.html#torch.linalg.inv */ -// Targeting ../NamedModulePolicy.java + // namespace linalg + // namespace torch -// Targeting ../NamedParameterPolicy.java +// Parsed from torch/nested.h +// #pragma once -// Targeting ../NamedAttributePolicy.java +// #include +// #include +// #include +// #include +/** Nested tensor + * + * See + * https://pytorch.org/docs/master/nested.html#torch.nested.nested_tensor + * + *
{@code */
+// implemented on python object to allow torch.nested.nested_tensor to be
+// constructed with arbitrarily nested python objects - for now, only arbitrary
+// python lists and lists of Tensors
+// See torch/csrc/autograd/python_nested_functions_manual.cpp for Python
+// implementation
+// See here for C++ implementation
+@Namespace("torch::nested") public static native @ByVal Tensor nested_tensor(
+    @ByVal @Cast("at::TensorList*") TensorArrayRef nested_tensor_data,
+    @Const @ByRef(nullValue = "at::TensorOptions{}") TensorOptions options);
+@Namespace("torch::nested") public static native @ByVal Tensor nested_tensor(
+    @ByVal @Cast("at::TensorList*") TensorArrayRef nested_tensor_data);
 
-// Targeting ../NamedBufferPolicy.java
 
+///
+///
+@Namespace("torch::nested") public static native @ByVal Tensor nested_tensor(
+    @ByVal @Cast("at::ArrayRef*") Pointer nested_tensor_data,
+    @Const @ByRef(nullValue = "at::TensorOptions{}") TensorOptions options);
+@Namespace("torch::nested") public static native @ByVal Tensor nested_tensor(
+    @ByVal @Cast("at::ArrayRef*") Pointer nested_tensor_data);
 
+/** As Nested Tensor
+ * 
+ *  See
+ *  https://pytorch.org/docs/master/nested.html#torch.nested.as_nested_tensor
+ * 
+ *  
{@code */
 
- // namespace detail
+///
+///
+@Namespace("torch::nested") public static native @ByVal Tensor as_nested_tensor(
+    @ByVal @Cast("at::TensorList*") TensorArrayRef list,
+    @ByVal(nullValue = "c10::optional(c10::nullopt)") ScalarTypeOptional dtype,
+    @ByVal(nullValue = "c10::optional(c10::nullopt)") DeviceOptional device);
+@Namespace("torch::nested") public static native @ByVal Tensor as_nested_tensor(
+    @ByVal @Cast("at::TensorList*") TensorArrayRef list);
 
-@Namespace("torch::jit") public static native @Cast("bool*") @ByRef BoolPointer getInlineEverythingMode();
-// We once had a `script::` namespace that was deleted. This is for backcompat
-// of the public API; new code should not use this type alias.
- // namespace script
+/** Nested to padded tensor
+ * 
+ *  See
+ *  https://pytorch.org/docs/master/nested.html#torch.nested.to_padded_tensor
+ * 
+ *  
{@code */
+@Namespace("torch::nested") public static native @ByVal Tensor to_padded_tensor(
+    @Const @ByRef Tensor self,
+    double padding,
+    @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") LongArrayRefOptional output_size);
+@Namespace("torch::nested") public static native @ByVal Tensor to_padded_tensor(
+    @Const @ByRef Tensor self,
+    double padding);
+@Namespace("torch::nested") public static native @ByVal Tensor to_padded_tensor(
+    @Const @ByRef Tensor self,
+    double padding,
+    @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... output_size);
 
- // namespace jit
+ // namespace nested
  // namespace torch
 
 
-// Parsed from torch/csrc/jit/serialization/source_range_serialization.h
+// Parsed from torch/detail/static.h
 
 // #pragma once
 
-// #include 
-// #include 
+// #include 
+// #include 
 
-// #include 
+// #include 
+// #include 
+ // namespace nn
+ // namespace torch
+/** Detects if a type T has a forward() method. */
 
-// #include 
-// #include 
 
-// Targeting ../SourceRangeSerializer.java
 
 
-@Namespace("torch::jit") @MemberGetter public static native @Cast("const size_t") long kByteOffsetIndex();
-public static final long kByteOffsetIndex = kByteOffsetIndex();
-@Namespace("torch::jit") @MemberGetter public static native @Cast("const size_t") long kSourceRangeIndex();
-public static final long kSourceRangeIndex = kSourceRangeIndex();
-@Namespace("torch::jit") @MemberGetter public static native @Cast("const size_t") long kSourceRangeTagIndex();
-public static final long kSourceRangeTagIndex = kSourceRangeTagIndex();
-@Namespace("torch::jit") @MemberGetter public static native @ByRef @Cast("const c10::string_view*") Pointer kFormatWithStringTable();
-// Targeting ../SourceRangePickler.java
 
+/** A type trait whose {@code value} member is true if {@code M} derives from {@code Module}. */
+ // namespace detail
+ // namespace torch
 
-// Targeting ../SourceRangeDeserializer.java
 
+// Parsed from torch/csrc/api/include/torch/nn/pimpl-inl.h
 
-// Targeting ../SourceRangeUnpickler.java
+// This class exists  only to do SFINAE on abstract types `T` that are really
+// `ModuleHolder`, because there's no good way to say that `T` is a
+// `ModuleHolder` over some unknown type `ModuleType`. With this, you can do
+// `enable_if_t>`.
 
+// A type trait that is true for types that are `ModuleHolder`s.
 
+// A collection of templates that answer the question whether a type `T` is a
+// `ModuleHolder`, and if so whether its contained type is of type `C`. This is
+// tricky because it is hard to short circuit in template metaprogramming. A
+// naive and incorrect solution to this problem would be something like
+// `disable_if::value && typename T::ContainedType == C>`.
+// This would disable all types that are not `ModuleHolder`s, because even
+// though the `is_module_holder::value` may be `false` for such types the
+// `T::ContainedType` access would be ill-formed and thus fail the whole
+// expression by the rules of SFINAE. Instead we have to use template
+// specialization to statically branch on the first condition
+// (`is_module_holder`) and are only then allowed to query
+// `T::ContainedType` in the branch for which the condition was true.
 
-@Namespace("torch::jit") public static native void setShouldUseFormatWithStringTable(
-    @Cast("bool") boolean should_use_format_with_string_table);
+// Base template.
 
- // namespace jit
- // namespace torch
+// False branch. `T` is not a `ModuleHolder` and thus not a `ModuleHolder` with
+// contained type `C`.
 
+// True branch. `T` is a `ModuleHolder` and thus we can legit access its
+// `ContainedType` and compare it against `C`.
 
-// Parsed from torch/csrc/jit/serialization/pickler.h
+// Helper template.
 
-// #pragma once
+// A collection of templates that allow deducing the return type of the
+// `forward()` method, but only if a module actually has a `forward()` method,
+// and otherwise deduces to the type `void`.
 
-// #include 
-// #include 
-// #include 
-// #include 
 
-// #include 
-// #include 
-// #include 
-// #include 
-// #include 
+// Parsed from torch/nn/pimpl.h
 
-// See Python's pickletools.py for a detailed description of each of these codes
-@Namespace("torch::jit") public enum PickleOpCode {
-  MARK((byte)('(')),
-  STOP((byte)('.')),
-  POP((byte)('0')),
-  POP_MARK((byte)('1')),
-  DUP((byte)('2')),
-  FLOAT((byte)('F')),
-  INT((byte)('I')),
-  BININT((byte)('J')),
-  BININT1((byte)('K')),
-  LONG((byte)('L')),
-  BININT2((byte)('M')),
-  NONE((byte)('N')),
-  PERSID((byte)('P')),
-  BINPERSID((byte)('Q')),
-  REDUCE((byte)('R')),
-  STRING((byte)('S')),
-  BINSTRING((byte)('T')),
-  SHORT_BINSTRING((byte)('U')),
-  // NB: Avoid using UNICODE as it is a macro in the Windows API
-  UNICODE_((byte)('V')),
-  BINUNICODE((byte)('X')),
-  APPEND((byte)('a')),
-  BUILD((byte)('b')),
-  GLOBAL((byte)('c')),
-  DICT((byte)('d')),
-  EMPTY_DICT((byte)('}')),
-  APPENDS((byte)('e')),
-  GET((byte)('g')),
-  BINGET((byte)('h')),
-  INST((byte)('i')),
-  LONG_BINGET((byte)('j')),
-  LIST((byte)('l')),
-  EMPTY_LIST((byte)(']')),
-  OBJ((byte)('o')),
-  PUT((byte)('p')),
-  BINPUT((byte)('q')),
-  LONG_BINPUT((byte)('r')),
-  SETITEM((byte)('s')),
-  TUPLE((byte)('t')),
-  EMPTY_TUPLE((byte)(')')),
-  SETITEMS((byte)('u')),
-  BINFLOAT((byte)('G')),
+// #pragma once
 
-  // Protocol 2
-  PROTO((byte)(0x80)),
-  NEWOBJ((byte)(0x81)),
-  EXT1((byte)(0x82)),
-  EXT2((byte)(0x83)),
-  EXT4((byte)(0x84)),
-  TUPLE1((byte)(0x85)),
-  TUPLE2((byte)(0x86)),
-  TUPLE3((byte)(0x87)),
-  NEWTRUE((byte)(0x88)),
-  NEWFALSE((byte)(0x89)),
-  LONG1((byte)(0x8a)),
-  LONG4((byte)(0x8b)),
+// #include 
+// #include 
+// #include 
+// #include 
 
-  // Protocol 3 (Python 3.x)
-  BINBYTES((byte)('B')),
-  SHORT_BINBYTES((byte)('C')),
+// #include 
 
-  // Protocol 4
-  SHORT_BINUNICODE((byte)(0x8c)),
-  BINUNICODE8((byte)(0x8d)),
-  BINBYTES8((byte)(0x8e)),
-  EMPTY_SET((byte)(0x8f)),
-  ADDITEMS((byte)(0x90)),
-  FROZENSET((byte)(0x91)),
-  NEWOBJ_EX((byte)(0x92)),
-  STACK_GLOBAL((byte)(0x93)),
-  MEMOIZE((byte)(0x94)),
-  FRAME((byte)(0x95));
+// #include 
+// #include 
+// #include 
+// Dump all the template metaprogramming in this file.
+// #include 
+ // namespace detail
 
-    public final byte value;
-    private PickleOpCode(byte v) { this.value = v; }
-    private PickleOpCode(PickleOpCode e) { this.value = e.value; }
-    public PickleOpCode intern() { for (PickleOpCode e : values()) if (e.value == value) return e; return this; }
-    @Override public String toString() { return intern().name(); }
-}
-// Targeting ../WriteableTensorData.java
+/** A {@code ModuleHolder} is essentially a wrapper around {@code std::shared_ptr} where
+ *  {@code M} is an {@code nn::Module} subclass, with convenient constructors defined for
+ *  the kind of constructions we want to allow for our modules. */
 
+/** Pretty prints the given {@code Module} into the {@code ostream}. */
 
+/** Serializes a {@code ModuleHolder} into an {@code OutputArchive}. */
 
+/** Deserializes a {@code ModuleHolder} from an {@code InputArchive}. */
 
+ // namespace nn
+ // namespace torch
 
-// Targeting ../Pickler.java
+// Workaround for CUDA 10.2 and below not allowing attribute unused on
+// using declarations.
+// #ifdef __CUDACC__
+// #define TORCH_UNUSED_EXCEPT_CUDA
+// #else
+// #define TORCH_UNUSED_EXCEPT_CUDA C10_UNUSED
+// #endif
 
+/** Defines a class {@code Name} which inherits from {@code nn::ModuleHolder} to provide a
+ *  wrapper over a {@code std::shared_ptr}.
+ *  {@code Impl} is a type alias for {@code ImplType} which provides a way to call static
+ *  method of {@code ImplType}. */
+// #define TORCH_MODULE_IMPL(Name, ImplType)
+//   class Name : public torch::nn::ModuleHolder { /* NOLINT */
+//    public:
+//     using torch::nn::ModuleHolder::ModuleHolder;
+//     using Impl TORCH_UNUSED_EXCEPT_CUDA = ImplType;
+//   }
 
+/** Like {@code TORCH_MODULE_IMPL}, but defaults the {@code ImplType} name to {@code Impl}. */
+// #define TORCH_MODULE(Name) TORCH_MODULE_IMPL(Name, Name##Impl)
 
-// returns a (tensor, record_size) for a tensor, converting it to a CPU tensor
-// if it was CUDA and to_cpu is True.
-@Namespace("torch::jit") public static native @ByVal WriteableTensorData getWriteableTensorData(@Const @ByRef Tensor tensor, @Cast("bool") boolean to_cpu/*=true*/);
-@Namespace("torch::jit") public static native @ByVal WriteableTensorData getWriteableTensorData(@Const @ByRef Tensor tensor);
 
-// return the value of the tensor's storage pointer
+// Parsed from torch/nn/modules/container/any_value.h
 
+// #pragma once
 
-// if the cls has __getstate__/__setstate__
-// assert they have the right schema and return true,
-// otherwise return false
+// #include 
+// #include 
+// #include 
+// #include 
 
+// #include 
+// #include 
+// #include 
 
-// Return a map of Tensor Metadata for serialization.
-// For now, it only takes care of `conj` and `neg` bit.
-@Namespace("torch::jit") public static native @ByVal StringBoolMap getTensorMetadata(
-    @Const @ByRef Tensor t);
+// #include 
+// #include 
+// #include 
+// #include 
+// Targeting ../AnyValue.java
 
-// set Tensor Metadata based on the map.
-// Refer: getTensorMathdata
-@Namespace("torch::jit") public static native void setTensorMetadata(
-    @Const @ByRef Tensor t,
-    @ByVal StringBoolMap metadata);
 
-// set Tensor metadata based on the map.
-// NOTE: This overload is required by unpickler.cpp
-@Namespace("torch::jit") public static native void setTensorMetadata(
-    @Const @ByRef Tensor t,
-    @ByVal GenericDict metadata_idict);
 
- // namespace jit
+ // namespace nn
  // namespace torch
 
 
-// Parsed from torch/csrc/jit/serialization/unpickler.h
+// Parsed from torch/nn/modules/container/any_module_holder.h
 
 // #pragma once
 
-// #include 
-// #include 
-// #include 
-// #include 
-// #include 
-// #include 
-// Targeting ../Unpickler.java
+// #include 
 
+// ~~~~~~~~~~~~~~~~~~~~~~~~~~ AnyModulePlaceholder ~~~~~~~~~~~~~~~~~~~~~~~~~~
 
+/** The static type of the object we store in the {@code AnyModule}, which erases
+ *  the actual type, but allows us to call {@code forward()} on the underlying
+ *  module. */
 
+// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~ AnyModuleHolder ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
 
+/** The dynamic type of the object stored in the {@code AnyModule}. It contains the
+ *  concrete instance to which all calls are forwarded. It is parameterized
+ *  over the concrete type of the module, and the types of the arguments the
+ *  module takes in its {@code forward()} method. */
 
- // namespace jit
+ // namespace nn
  // namespace torch
 
 
-// Parsed from torch/csrc/jit/serialization/import.h
+// Parsed from torch/ordered_dict.h
 
 // #pragma once
 
-// #include 
-// #include 
-// #include 
-// #include 
-// #include 
+// #include 
+// #include 
+// #include 
+// #include 
+// #include 
+// #include 
+/** An ordered dictionary implementation, akin to Python's {@code OrderedDict}. */
 
-// #include 
-// Targeting ../ReadAdapterInterface.java
+// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~ OrderedDict::Item ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
 
+// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ OrderedDict ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
 
- // namespace serialize
- // namespace caffe2
 
-@Namespace("torch::jit") public static native @ByVal JitModule import_ir_module(
-    @SharedPtr CompilationUnit cu,
-    @StdString BytePointer filename,
-    @ByVal(nullValue = "c10::optional(c10::nullopt)") DeviceOptional device,
-    @Cast("bool") boolean load_debug_files/*=true*/);
-@Namespace("torch::jit") public static native @ByVal JitModule import_ir_module(
-    @SharedPtr CompilationUnit cu,
-    @StdString BytePointer filename);
-@Namespace("torch::jit") public static native @ByVal JitModule import_ir_module(
-    @SharedPtr CompilationUnit cu,
-    @StdString String filename,
-    @ByVal(nullValue = "c10::optional(c10::nullopt)") DeviceOptional device,
-    @Cast("bool") boolean load_debug_files/*=true*/);
-@Namespace("torch::jit") public static native @ByVal JitModule import_ir_module(
-    @SharedPtr CompilationUnit cu,
-    @StdString String filename);
 
-@Namespace("torch::jit") public static native @ByVal JitModule import_ir_module(
-    @SharedPtr CompilationUnit cu,
-    @Cast("std::istream*") @ByRef Pointer in,
-    @ByVal(nullValue = "c10::optional(c10::nullopt)") DeviceOptional device,
-    @Cast("bool") boolean load_debug_files/*=true*/);
-@Namespace("torch::jit") public static native @ByVal JitModule import_ir_module(
-    @SharedPtr CompilationUnit cu,
-    @Cast("std::istream*") @ByRef Pointer in);
 
-@Namespace("torch::jit") public static native @ByVal JitModule import_ir_module(
-    @SharedPtr CompilationUnit cu,
-    @UniquePtr ReadAdapterInterface rai,
-    @ByVal(nullValue = "c10::optional(c10::nullopt)") DeviceOptional device,
-    @Cast("bool") boolean load_debug_files/*=true*/);
-@Namespace("torch::jit") public static native @ByVal JitModule import_ir_module(
-    @SharedPtr CompilationUnit cu,
-    @UniquePtr ReadAdapterInterface rai);
 
-@Namespace("torch::jit") public static native @ByVal JitModule import_ir_module(
-    @SharedPtr CompilationUnit cu,
-    @StdString BytePointer filename,
-    @ByVal DeviceOptional device,
-    @ByRef ExtraFilesMap extra_files,
-    @Cast("bool") boolean load_debug_files/*=true*/,
-    @Cast("bool") boolean restore_shapes/*=false*/);
-@Namespace("torch::jit") public static native @ByVal JitModule import_ir_module(
-    @SharedPtr CompilationUnit cu,
-    @StdString BytePointer filename,
-    @ByVal DeviceOptional device,
-    @ByRef ExtraFilesMap extra_files);
-@Namespace("torch::jit") public static native @ByVal JitModule import_ir_module(
-    @SharedPtr CompilationUnit cu,
-    @StdString String filename,
-    @ByVal DeviceOptional device,
-    @ByRef ExtraFilesMap extra_files,
-    @Cast("bool") boolean load_debug_files/*=true*/,
-    @Cast("bool") boolean restore_shapes/*=false*/);
-@Namespace("torch::jit") public static native @ByVal JitModule import_ir_module(
-    @SharedPtr CompilationUnit cu,
-    @StdString String filename,
-    @ByVal DeviceOptional device,
-    @ByRef ExtraFilesMap extra_files);
 
-// For reading unified serialization format from torch.Package
-@Namespace("torch::jit") public static native @ByVal JitModule import_ir_module(
-    @SharedPtr CompilationUnit cu,
-    @ByVal @Cast("std::shared_ptr*") Pointer reader,
-    @SharedPtr DeserializationStorageContext storage_context,
-    @ByVal DeviceOptional device,
-    @StdString BytePointer ts_id);
-@Namespace("torch::jit") public static native @ByVal JitModule import_ir_module(
-    @SharedPtr CompilationUnit cu,
-    @ByVal @Cast("std::shared_ptr*") Pointer reader,
-    @SharedPtr DeserializationStorageContext storage_context,
-    @ByVal DeviceOptional device,
-    @StdString String ts_id);
 
-@Namespace("torch::jit") public static native @ByVal JitModule import_ir_module(
-    @SharedPtr CompilationUnit cu,
-    @Cast("std::istream*") @ByRef Pointer in,
-    @ByVal DeviceOptional device,
-    @ByRef ExtraFilesMap extra_files,
-    @Cast("bool") boolean load_debug_files/*=true*/,
-    @Cast("bool") boolean restore_shapes/*=false*/);
-@Namespace("torch::jit") public static native @ByVal JitModule import_ir_module(
-    @SharedPtr CompilationUnit cu,
-    @Cast("std::istream*") @ByRef Pointer in,
-    @ByVal DeviceOptional device,
-    @ByRef ExtraFilesMap extra_files);
 
-@Namespace("torch::jit") public static native @ByVal JitModule import_ir_module(
-    @SharedPtr CompilationUnit cu,
-    @UniquePtr ReadAdapterInterface rai,
-    @ByVal DeviceOptional device,
-    @ByRef ExtraFilesMap extra_files,
-    @Cast("bool") boolean load_debug_files/*=true*/);
-@Namespace("torch::jit") public static native @ByVal JitModule import_ir_module(
-    @SharedPtr CompilationUnit cu,
-    @UniquePtr ReadAdapterInterface rai,
-    @ByVal DeviceOptional device,
-    @ByRef ExtraFilesMap extra_files);
 
-/** Loads a serialized {@code Module} from the given {@code istream}.
- * 
- *  The istream must contain a serialized {@code Module}, exported via
- *  {@code torch::jit::ExportModule} in C++. */
-@Namespace("torch::jit") public static native @ByVal JitModule load(
-    @Cast("std::istream*") @ByRef Pointer in,
-    @ByVal(nullValue = "c10::optional(c10::nullopt)") DeviceOptional device,
-    @Cast("bool") boolean load_debug_files/*=true*/);
-@Namespace("torch::jit") public static native @ByVal JitModule load(
-    @Cast("std::istream*") @ByRef Pointer in);
 
 
-///
-@Namespace("torch::jit") public static native @ByVal JitModule load(
-    @Cast("std::istream*") @ByRef Pointer in,
-    @ByVal DeviceOptional device,
-    @ByRef ExtraFilesMap extra_files,
-    @Cast("bool") boolean load_debug_files/*=true*/);
-@Namespace("torch::jit") public static native @ByVal JitModule load(
-    @Cast("std::istream*") @ByRef Pointer in,
-    @ByVal DeviceOptional device,
-    @ByRef ExtraFilesMap extra_files);
 
-/** Loads a serialized {@code Module} from the given {@code filename}.
- * 
- *  The file stored at the location given in {@code filename} must contain a
- *  serialized {@code Module}, exported either via {@code ScriptModule.save()} in
- *  Python or {@code torch::jit::ExportModule} in C++. */
-@Namespace("torch::jit") public static native @ByVal JitModule load(
-    @StdString BytePointer filename,
-    @ByVal(nullValue = "c10::optional(c10::nullopt)") DeviceOptional device,
-    @Cast("bool") boolean load_debug_files/*=true*/);
-@Namespace("torch::jit") public static native @ByVal JitModule load(
-    @StdString BytePointer filename);
-@Namespace("torch::jit") public static native @ByVal JitModule load(
-    @StdString String filename,
-    @ByVal(nullValue = "c10::optional(c10::nullopt)") DeviceOptional device,
-    @Cast("bool") boolean load_debug_files/*=true*/);
-@Namespace("torch::jit") public static native @ByVal JitModule load(
-    @StdString String filename);
 
 
-///
-@Namespace("torch::jit") public static native @ByVal JitModule load(
-    @StdString BytePointer filename,
-    @ByVal DeviceOptional device,
-    @ByRef ExtraFilesMap extra_files,
-    @Cast("bool") boolean load_debug_files/*=true*/);
-@Namespace("torch::jit") public static native @ByVal JitModule load(
-    @StdString BytePointer filename,
-    @ByVal DeviceOptional device,
-    @ByRef ExtraFilesMap extra_files);
-@Namespace("torch::jit") public static native @ByVal JitModule load(
-    @StdString String filename,
-    @ByVal DeviceOptional device,
-    @ByRef ExtraFilesMap extra_files,
-    @Cast("bool") boolean load_debug_files/*=true*/);
-@Namespace("torch::jit") public static native @ByVal JitModule load(
-    @StdString String filename,
-    @ByVal DeviceOptional device,
-    @ByRef ExtraFilesMap extra_files);
 
-/** Loads a serialized {@code Module} from the given shared_ptr {@code rai}.
- * 
- *  The reader adapter, which is for customized input stream, must contain a
- *  serialized {@code Module}, exported either via {@code ScriptModule.save()} in
- *  Python or {@code torch::jit::ExportModule} in C++. */
-@Namespace("torch::jit") public static native @ByVal JitModule load(
-    @SharedPtr ReadAdapterInterface rai,
-    @ByVal(nullValue = "c10::optional(c10::nullopt)") DeviceOptional device,
-    @Cast("bool") boolean load_debug_files/*=true*/);
-@Namespace("torch::jit") public static native @ByVal JitModule load(
-    @SharedPtr ReadAdapterInterface rai);
 
-@Namespace("torch::jit") public static native @ByVal JitModule load(
-    @SharedPtr ReadAdapterInterface rai,
-    @ByVal DeviceOptional device,
-    @ByRef ExtraFilesMap extra_files,
-    @Cast("bool") boolean load_debug_files/*=true*/);
-@Namespace("torch::jit") public static native @ByVal JitModule load(
-    @SharedPtr ReadAdapterInterface rai,
-    @ByVal DeviceOptional device,
-    @ByRef ExtraFilesMap extra_files);
 
-@Namespace("torch::jit") public static native @ByVal JitModule jitModuleFromSourceAndConstants(
-    @Const @ByRef IValue ivalue,
-    @Const @ByRef ExtraFilesMap source,
-    @Const @ByRef IValueVector constants,
-    int version);
 
-@Namespace("torch::jit") public static native @ByVal JitModule parse_and_initialize_jit_module(
-    @Cast("char*") @SharedPtr BytePointer data,
-    @Cast("size_t") long size,
-    @ByRef ExtraFilesMap extra_files,
-    @ByVal(nullValue = "c10::optional(c10::nullopt)") DeviceOptional device);
-@Namespace("torch::jit") public static native @ByVal JitModule parse_and_initialize_jit_module(
-    @Cast("char*") @SharedPtr BytePointer data,
-    @Cast("size_t") long size,
-    @ByRef ExtraFilesMap extra_files);
-@Namespace("torch::jit") public static native @ByVal JitModule parse_and_initialize_jit_module(
-    @Cast("char*") @SharedPtr ByteBuffer data,
-    @Cast("size_t") long size,
-    @ByRef ExtraFilesMap extra_files,
-    @ByVal(nullValue = "c10::optional(c10::nullopt)") DeviceOptional device);
-@Namespace("torch::jit") public static native @ByVal JitModule parse_and_initialize_jit_module(
-    @Cast("char*") @SharedPtr ByteBuffer data,
-    @Cast("size_t") long size,
-    @ByRef ExtraFilesMap extra_files);
-@Namespace("torch::jit") public static native @ByVal JitModule parse_and_initialize_jit_module(
-    @Cast("char*") @SharedPtr byte[] data,
-    @Cast("size_t") long size,
-    @ByRef ExtraFilesMap extra_files,
-    @ByVal(nullValue = "c10::optional(c10::nullopt)") DeviceOptional device);
-@Namespace("torch::jit") public static native @ByVal JitModule parse_and_initialize_jit_module(
-    @Cast("char*") @SharedPtr byte[] data,
-    @Cast("size_t") long size,
-    @ByRef ExtraFilesMap extra_files);
 
-@Namespace("torch::jit") public static native @ByVal JitModule load_jit_module_from_file(
-    @StdString BytePointer filename,
-    @ByRef ExtraFilesMap extra_files,
-    @ByVal(nullValue = "c10::optional(c10::nullopt)") DeviceOptional device);
-@Namespace("torch::jit") public static native @ByVal JitModule load_jit_module_from_file(
-    @StdString BytePointer filename,
-    @ByRef ExtraFilesMap extra_files);
-@Namespace("torch::jit") public static native @ByVal JitModule load_jit_module_from_file(
-    @StdString String filename,
-    @ByRef ExtraFilesMap extra_files,
-    @ByVal(nullValue = "c10::optional(c10::nullopt)") DeviceOptional device);
-@Namespace("torch::jit") public static native @ByVal JitModule load_jit_module_from_file(
-    @StdString String filename,
-    @ByRef ExtraFilesMap extra_files);
 
-@Namespace("torch::jit") public static native @ByVal JitModule load_jit_module_from_stream(
-    @Cast("std::istream*") @ByRef Pointer in,
-    @ByRef ExtraFilesMap extra_files,
-    @ByVal(nullValue = "c10::optional(c10::nullopt)") DeviceOptional device);
-@Namespace("torch::jit") public static native @ByVal JitModule load_jit_module_from_stream(
-    @Cast("std::istream*") @ByRef Pointer in,
-    @ByRef ExtraFilesMap extra_files);
 
- // namespace jit
- // namespace torch
 
 
-// Parsed from torch/csrc/jit/serialization/pickle.h
 
-// #pragma once
 
-// #include 
-// #include 
-// #include 
-// #include 
-// #include 
-// #include 
 
-/** Pickle an IValue by calling a function to handle writing the data.
- * 
- *  {@code writer} is a function that takes in a pointer to a chunk of memory and its
- *  size and consumes it.
- * 
- *  See {@code jit::pickle} for more details. */
 
-///
-///
-///
-///
-///
-///
-///
-///
-@Namespace("torch::jit") public static native void pickle(
-    @ByVal Writer writer,
-    @Const @ByRef IValue ivalue,
-    TensorVector tensor_table/*=nullptr*/);
-@Namespace("torch::jit") public static native void pickle(
-    @ByVal Writer writer,
-    @Const @ByRef IValue ivalue);
 
-/** Save a {@code torch::IValue} in a format compatible with Python's {@code pickle} module
- * 
- *  If present, {@code tensor_table} is a pointer to a table in which tensors that
- *  are contained within {@code ivalue} are stored, and the bytes returned by the
- *  pickler will only include references to these tensors in the table. This can
- *  be used to keep the binary blob size small.
- *  If not provided, tensors are stored in the same byte stream as the pickle
- *  data, similar to {@code torch.save()} in eager Python.
- * 
- *  Pickled values can be loaded in Python and C++:
- *  \rst
- *  .. code-block:: cpp
- * 
- *   torch::IValue float_value(2.3);
- * 
- *   // TODO: when tensors are stored in the pickle, delete this
- *   std::vector tensor_table;
- *   auto data = torch::jit::pickle(float_value, &tensor_table);
- * 
- *   std::vector ivalues =
- *       torch::jit::unpickle(data.data(), data.size());
- * 
- *  .. code-block:: python
- * 
- *    values = torch.load('data.pkl')
- *    print(values)
- * 
- *  \endrst */
-@Namespace("torch::jit") public static native @Cast("char*") @StdVector BytePointer pickle(
-    @Const @ByRef IValue ivalue,
-    TensorVector tensor_table/*=nullptr*/);
-@Namespace("torch::jit") public static native @Cast("char*") @StdVector BytePointer pickle(
-    @Const @ByRef IValue ivalue);
 
-/** Save a {@code torch::IValue} in a format that can be loaded by both
- *  {@code torch::pickle_load} in C++ and {@code torch.load} in Python. */
-@Namespace("torch::jit") public static native @Cast("char*") @StdVector BytePointer pickle_save(@Const @ByRef IValue ivalue);
 
-/** Deserialize a {@code torch::IValue} from bytes produced by either
- *  {@code torch::pickle_save} in C++ or {@code torch.save} in Python */
-@Namespace("torch::jit") public static native @ByVal IValue pickle_load(@Cast("char*") @StdVector BytePointer data);
-@Namespace("torch::jit") public static native @ByVal IValue pickle_load(@Cast("char*") @StdVector ByteBuffer data);
-@Namespace("torch::jit") public static native @ByVal IValue pickle_load(@Cast("char*") @StdVector byte[] data);
-// Targeting ../TypeParser.java
 
 
 
-///
-///
-@Namespace("torch::jit") public static native @ByVal IValue unpickle(
-    @ByVal Reader reader,
-    @ByVal @Cast("torch::jit::TypeResolver*") Pointer type_resolver,
-    @ByVal TensorArrayRef tensor_table,
-    TypeParser type_parser/*=torch::jit::Unpickler::defaultTypeParser*/);
-@Namespace("torch::jit") public static native @ByVal IValue unpickle(
-    @ByVal Reader reader,
-    @ByVal @Cast("torch::jit::TypeResolver*") Pointer type_resolver,
-    @ByVal TensorArrayRef tensor_table);
-
-/** Decode a chunk of memory containing pickled data into its {@code torch::IValue}s.
- * 
- *  If any {@code torch::IValue}s in the pickled data are {@code Object}s, then a
- *  {@code class_resolver} function must be provided.
- * 
- *  See {@code torch::pickle} for details. */
-@Namespace("torch::jit") public static native @ByVal IValue unpickle(
-    @Cast("const char*") BytePointer data,
-    @Cast("size_t") long size,
-    @ByVal(nullValue = "torch::jit::TypeResolver(nullptr)") @Cast("torch::jit::TypeResolver*") Pointer type_resolver,
-    @ByVal(nullValue = "c10::ArrayRef{}") TensorArrayRef tensor_table,
-    TypeParser type_parser/*=torch::jit::Unpickler::defaultTypeParser*/);
-@Namespace("torch::jit") public static native @ByVal IValue unpickle(
-    @Cast("const char*") BytePointer data,
-    @Cast("size_t") long size);
-@Namespace("torch::jit") public static native @ByVal IValue unpickle(
-    String data,
-    @Cast("size_t") long size,
-    @ByVal(nullValue = "torch::jit::TypeResolver(nullptr)") @Cast("torch::jit::TypeResolver*") Pointer type_resolver,
-    @ByVal(nullValue = "c10::ArrayRef{}") TensorArrayRef tensor_table,
-    TypeParser type_parser/*=torch::jit::Unpickler::defaultTypeParser*/);
-@Namespace("torch::jit") public static native @ByVal IValue unpickle(
-    String data,
-    @Cast("size_t") long size);
 
- // namespace jit
- // namespace torch
 
 
-// Parsed from torch/csrc/jit/serialization/python_print.h
 
-// #pragma once
-// #include 
-// #include 
-// #include 
-// #include 
-// #include 
-// Targeting ../PythonPrintImpl.java
 
 
-// Targeting ../PrintDepsTable.java
 
 
-// Targeting ../PythonPrint.java
 
 
 
 
 
-@Namespace("torch::jit") public static native void jitModuleToPythonCodeAndConstants(
-    @Const @ByRef JitModule module,
-    ExtraFilesMap jit_sources,
-    IValueVector constants
-);
 
- // namespace jit
- // namespace torch
 
 
-// Parsed from torch/csrc/jit/serialization/type_name_uniquer.h
 
-// #pragma once
 
-// #include 
-// #include 
-// Targeting ../TypeNameUniquer.java
 
 
- // namespace jit
- // namespace torch
 
 
-// Parsed from torch/csrc/jit/serialization/storage_context.h
 
-// #pragma once
 
-// #include 
-// Targeting ../SerializationStorageContext.java
 
 
-// Targeting ../DeserializationStorageContext.java
 
 
 
- // namespace jit
- // namespace torch
 
 
-// Parsed from torch/csrc/jit/serialization/export.h
 
-// #pragma once
 
-// #include 
-// #include 
-// #include 
-// #include 
-// #include 
-// #include 
-// #include 
-// #include 
-// #include 
-// #include 
-// #include 
 
+ // namespace torch
 
-// This map is used to keep track of parameters that should be exported
-// externally. When `defer_weight_export` is true, the returned map contains
-// kv pairs that map {external reference name} -> {at::Tensor to be exported}.
-// It is the responsibility of the caller to export these appropriately.
-//
-// For example, when exporting to a zip archive, the caller may write out files
-// for each entry in the export map, with the filename being the key and the
-// file contents being the raw tensor data.
-
-// Used for modularized export settling function and node attributes.
-
-
-
-
-
-@Namespace("torch::jit") public static native void check_onnx_proto(@StdString BytePointer proto_string);
-@Namespace("torch::jit") public static native void check_onnx_proto(@StdString String proto_string);
-// Targeting ../ScriptModuleSerializer.java
-
-
-
-// For testing purposes
-@Namespace("torch::jit") public static native @StdString BytePointer pretty_print_onnx(
-    @Const @SharedPtr @ByRef Graph graph,
-    @Const @ByRef StringTensorMap initializers,
-    @Cast("int64_t") long onnx_opset_version,
-    @Cast("bool") boolean defer_weight_export,
-    OperatorExportTypes operator_export_type/*=torch::onnx::OperatorExportTypes::ONNX*/,
-    @Cast("bool") boolean google_printer/*=false*/,
-    @Cast("bool") boolean keep_initializers_as_inputs/*=true*/,
-    @Const @ByRef(nullValue = "std::map{}") StringIntMap custom_opsets,
-    @Cast("bool") boolean add_node_names/*=true*/);
-@Namespace("torch::jit") public static native @StdString BytePointer pretty_print_onnx(
-    @Const @SharedPtr @ByRef Graph graph,
-    @Const @ByRef StringTensorMap initializers,
-    @Cast("int64_t") long onnx_opset_version,
-    @Cast("bool") boolean defer_weight_export);
-@Namespace("torch::jit") public static native @StdString String pretty_print_onnx(
-    @Const @SharedPtr @ByRef Graph graph,
-    @Const @ByRef StringTensorMap initializers,
-    @Cast("int64_t") long onnx_opset_version,
-    @Cast("bool") boolean defer_weight_export,
-    @Cast("torch::onnx::OperatorExportTypes") int operator_export_type/*=torch::onnx::OperatorExportTypes::ONNX*/,
-    @Cast("bool") boolean google_printer/*=false*/,
-    @Cast("bool") boolean keep_initializers_as_inputs/*=true*/,
-    @Const @ByRef(nullValue = "std::map{}") StringIntMap custom_opsets,
-    @Cast("bool") boolean add_node_names/*=true*/);
-
-@Namespace("torch::jit") public static native void ExportModule(
-    @Const @ByRef JitModule module,
-    @Cast("std::ostream*") @ByRef Pointer out,
-    @Const @ByRef(nullValue = "torch::jit::ExtraFilesMap()") ExtraFilesMap metadata,
-    @Cast("bool") boolean bytecode_format/*=false*/,
-    @Cast("bool") boolean save_mobile_debug_info/*=false*/,
-    @Cast("bool") boolean use_flatbuffer/*=false*/);
-@Namespace("torch::jit") public static native void ExportModule(
-    @Const @ByRef JitModule module,
-    @Cast("std::ostream*") @ByRef Pointer out);
 
-@Namespace("torch::jit") public static native void ExportModule(
-    @Const @ByRef JitModule module,
-    @StdString BytePointer filename,
-    @Const @ByRef(nullValue = "torch::jit::ExtraFilesMap()") ExtraFilesMap metadata,
-    @Cast("bool") boolean bytecode_format/*=false*/,
-    @Cast("bool") boolean save_mobile_debug_info/*=false*/,
-    @Cast("bool") boolean use_flatbuffer/*=false*/);
-@Namespace("torch::jit") public static native void ExportModule(
-    @Const @ByRef JitModule module,
-    @StdString BytePointer filename);
-@Namespace("torch::jit") public static native void ExportModule(
-    @Const @ByRef JitModule module,
-    @StdString String filename,
-    @Const @ByRef(nullValue = "torch::jit::ExtraFilesMap()") ExtraFilesMap metadata,
-    @Cast("bool") boolean bytecode_format/*=false*/,
-    @Cast("bool") boolean save_mobile_debug_info/*=false*/,
-    @Cast("bool") boolean use_flatbuffer/*=false*/);
-@Namespace("torch::jit") public static native void ExportModule(
-    @Const @ByRef JitModule module,
-    @StdString String filename);
+// Parsed from torch/nn/module.h
 
-@Namespace("torch::jit") public static native void ExportModule(
-    @Const @ByRef JitModule module,
-    @Const @ByRef WriteFunction writer_func,
-    @Const @ByRef(nullValue = "torch::jit::ExtraFilesMap()") ExtraFilesMap metadata,
-    @Cast("bool") boolean bytecode_format/*=false*/,
-    @Cast("bool") boolean save_mobile_debug_info/*=false*/,
-    @Cast("bool") boolean use_flatbuffer/*=false*/);
-@Namespace("torch::jit") public static native void ExportModule(
-    @Const @ByRef JitModule module,
-    @Const @ByRef WriteFunction writer_func);
+// #pragma once
 
-// Write the bytes of a pickle archive and the tensors referenced inside that
-// archive
-@Namespace("torch::jit") public static native void writeArchiveAndTensors(
-    @StdString BytePointer archive_name,
-    @Cast("const char*") BytePointer pickle_bytes,
-    @Cast("size_t") long size,
-    @Cast({"", "std::vector"}) @StdMove TensorVector tensors,
-    @Cast("caffe2::serialize::PyTorchStreamWriter*") @ByRef Pointer out);
-@Namespace("torch::jit") public static native void writeArchiveAndTensors(
-    @StdString String archive_name,
-    String pickle_bytes,
-    @Cast("size_t") long size,
-    @Cast({"", "std::vector"}) @StdMove TensorVector tensors,
-    @Cast("caffe2::serialize::PyTorchStreamWriter*") @ByRef Pointer out);
+// #include 
+// #include 
+// #include 
+// #include 
+// #include 
+// #include 
 
-// Surrounding system can install an additional hook to produce extra files
-// with metadata based on environment every time a module is serialized.
-@Namespace("torch::jit") public static native void SetExportModuleExtraFilesHook(@ByVal @Cast("torch::jit::ExportModuleExtraFilesHook*") Pointer hook);
+// #include 
 
-/**
- * Generates new bytecode for a Script module and returns what the op list
- * would be for a LiteScriptModule based off the current code base. If you
- * have a LiteScriptModule and want to get the currently present
- * list of ops call _export_operator_list instead.
- */
-@Namespace("torch::jit") public static native @ByVal StringVector export_opnames(@Const @ByRef JitModule m);
-// Targeting ../BytecodeEmitMode.java
+// #include 
+// #include 
+// #include 
+// #include 
+// #include 
+// #include 
+// Targeting ../Module.java
 
 
-// Targeting ../BytecodeEmitModeGuard.java
+@Namespace("torch::nn") public static Pointer shiftLeft(Pointer stream, Module module) { return _shiftLeft(stream, module.asModule()); }
+private static native @Cast("std::ostream*") @ByRef @Name("operator <<") Pointer _shiftLeft(@Cast("std::ostream*") @ByRef Pointer stream, @Const @ByRef Module module);
 
 
+/** Serialize a {@code Module} pointer into an {@code OutputArchive}. */
+@Namespace("torch::nn") public static OutputArchive shiftLeft(OutputArchive archive, Module module) { return _shiftLeft(archive, module.asModule()); }
+private static native @ByRef @Name("operator <<") OutputArchive _shiftLeft(
+    @ByRef OutputArchive archive,
+    @Const @SharedPtr("torch::nn::Module") @ByRef Module module);
 
-@Namespace("torch::jit") public static native @ByVal IValue to_tuple(@ByVal IValueVector ivalues);
-@Namespace("torch::jit") public static native @ByVal IValue Table(@StdVector EnumNameValue entries);
+/** Deserializes a {@code Module} from an {@code InputArchive}. */
+@Namespace("torch::nn") public static InputArchive shiftRight(InputArchive archive, Module module) { return _shiftRight(archive, module.asModule()); }
+private static native @ByRef @Name("operator >>") InputArchive _shiftRight(
+    @ByRef InputArchive archive,
+    @Const @SharedPtr("torch::nn::Module") @ByRef Module module);
 
-// TODO remove these switches once interface call is rolled out.
-@Namespace("torch::jit") public static native void enableMobileInterfaceCallExport();
+// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ nn::Module ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
 
 
 
 
-@Namespace("torch::jit") public static native void save_jit_module(
-    @Const @ByRef JitModule module,
-    @StdString BytePointer filename,
-    @Const @ByRef(nullValue = "torch::jit::ExtraFilesMap()") ExtraFilesMap extra_files);
-@Namespace("torch::jit") public static native void save_jit_module(
-    @Const @ByRef JitModule module,
-    @StdString BytePointer filename);
-@Namespace("torch::jit") public static native void save_jit_module(
-    @Const @ByRef JitModule module,
-    @StdString String filename,
-    @Const @ByRef(nullValue = "torch::jit::ExtraFilesMap()") ExtraFilesMap extra_files);
-@Namespace("torch::jit") public static native void save_jit_module(
-    @Const @ByRef JitModule module,
-    @StdString String filename);
 
-@Namespace("torch::jit") public static native @ByVal @Cast("torch::jit::DetachedBuffer::UniqueDetachedBuffer*") Pointer save_jit_module_to_bytes(
-    @Const @ByRef JitModule module,
-    @Const @ByRef(nullValue = "torch::jit::ExtraFilesMap()") ExtraFilesMap extra_files);
-@Namespace("torch::jit") public static native @ByVal @Cast("torch::jit::DetachedBuffer::UniqueDetachedBuffer*") Pointer save_jit_module_to_bytes(
-    @Const @ByRef JitModule module);
 
-@Namespace("torch::jit") public static native void save_jit_module_to_write_func(
-    @Const @ByRef JitModule module,
-    @Const @ByRef ExtraFilesMap extra_files,
-    @Cast("bool") boolean save_mobile_debug_info,
-    @Const @ByRef WriteFunction writer_func);
 
- // namespace jit
- // namespace torch
 
 
-// Parsed from torch/arg.h
 
-// #pragma once
 
-// #include 
 
-// #define TORCH_ARG(T, name)
-//  public:
-//   inline auto name(const T& new_##name)->decltype(*this) { /* NOLINT */
-//     this->name##_ = new_##name;
-//     return *this;
-//   }
-//   inline auto name(T&& new_##name)->decltype(*this) { /* NOLINT */
-//     this->name##_ = std::move(new_##name);
-//     return *this;
-//   }
-//   inline const T& name() const noexcept { /* NOLINT */
-//     return this->name##_;
-//   }
-//   inline T& name() noexcept { /* NOLINT */
-//     return this->name##_;
-//   }
-// 
-//  private:
-//   T name##_ /* NOLINT */
 
 
-// Parsed from torch/enum.h
 
-// #pragma once
 
-// #include 
 
-// #include 
-// #include 
-// #include 
-// #include 
 
-// #define TORCH_ENUM_DECLARE(name)
-//   namespace torch {
-//   namespace enumtype {
-//   /*                                                                  \
-//    NOTE: We need to provide the default constructor for each struct, \
-//    otherwise Clang 3.8 would complain:                               \
-//    ```                                                               \
-//    error: default initialization of an object of const type 'const   \
-//    enumtype::Enum1' without a user-provided default constructor      \
-//    ```                                                               \
-//  */
-//   struct k##name {
-//     k##name() {}
-//   };
-//   }
-//   TORCH_API extern const enumtype::k##name k##name;
-//   }
 
-// #define TORCH_ENUM_DEFINE(name)
-//   namespace torch {
-//   const enumtype::k##name k##name;
-//   }
+ // namespace nn
+ // namespace torch
 
-// #define TORCH_ENUM_PRETTY_PRINT(name)
-//   std::string operator()(const enumtype::k##name& v) const {
-//     std::string k("k");
-//     return k + #name;
-//   }
 
-// NOTE: Backstory on why we need the following two macros:
-//
-// Consider the following options class:
-//
-// ```
-// struct TORCH_API SomeOptions {
-//   typedef c10::variant
-//   reduction_t; SomeOptions(reduction_t reduction = torch::kMean) :
-//   reduction_(reduction) {}
-//
-//   TORCH_ARG(reduction_t, reduction);
-// };
-// ```
-//
-// and the functional that uses it:
-//
-// ```
-// Tensor some_functional(
-//     const Tensor& input,
-//     SomeOptions options = {}) {
-//   ...
-// }
-// ```
-//
-// Normally, we would expect this to work:
-//
-// `F::some_functional(input, torch::kNone)`
-//
-// However, it throws the following error instead:
-//
-// ```
-// error: could not convert `torch::kNone` from `const torch::enumtype::kNone`
-// to `torch::nn::SomeOptions`
-// ```
-//
-// To get around this problem, we explicitly provide the following constructors
-// for `SomeOptions`:
-//
-// ```
-// SomeOptions(torch::enumtype::kNone reduction) : reduction_(torch::kNone) {}
-// SomeOptions(torch::enumtype::kMean reduction) : reduction_(torch::kMean) {}
-// SomeOptions(torch::enumtype::kSum reduction) : reduction_(torch::kSum) {}
-// ```
-//
-// so that the conversion from `torch::kNone` to `SomeOptions` would work.
+// Parsed from ATen/Config.h
+
+// #pragma once
+
+// Test these using #if AT_MKL_ENABLED(), not #ifdef, so that it's
+// obvious if you forgot to include Config.h
+//    c.f. https://stackoverflow.com/questions/33759787/generating-an-error-if-checked-boolean-macro-is-not-defined
 //
-// Note that we also provide the default constructor `SomeOptions() {}`, so that
-// `SomeOptions options = {}` can work.
-// #define TORCH_OPTIONS_CTOR_VARIANT_ARG3(
-//     OPTIONS_NAME, ARG_NAME, TYPE1, TYPE2, TYPE3)
-//   OPTIONS_NAME() = default;
-//   OPTIONS_NAME(torch::enumtype::TYPE1 ARG_NAME) : ARG_NAME##_(torch::TYPE1) {}
-//   OPTIONS_NAME(torch::enumtype::TYPE2 ARG_NAME) : ARG_NAME##_(torch::TYPE2) {}
-//   OPTIONS_NAME(torch::enumtype::TYPE3 ARG_NAME) : ARG_NAME##_(torch::TYPE3) {}
+// DO NOT put the macros for CUDA libraries in this file; they belong in cuda/CUDAConfig.h
 
-// #define TORCH_OPTIONS_CTOR_VARIANT_ARG4(
-//     OPTIONS_NAME, ARG_NAME, TYPE1, TYPE2, TYPE3, TYPE4)
-//   OPTIONS_NAME() = default;
-//   OPTIONS_NAME(torch::enumtype::TYPE1 ARG_NAME) : ARG_NAME##_(torch::TYPE1) {}
-//   OPTIONS_NAME(torch::enumtype::TYPE2 ARG_NAME) : ARG_NAME##_(torch::TYPE2) {}
-//   OPTIONS_NAME(torch::enumtype::TYPE3 ARG_NAME) : ARG_NAME##_(torch::TYPE3) {}
-//   OPTIONS_NAME(torch::enumtype::TYPE4 ARG_NAME) : ARG_NAME##_(torch::TYPE4) {}
-// Targeting ../kLinear.java
+// #define AT_MKLDNN_ENABLED() 1
+// #define AT_MKL_ENABLED() 0
+// #define AT_MKL_SEQUENTIAL() 0
+// #define AT_FFTW_ENABLED() 1
+// #define AT_POCKETFFT_ENABLED() 1
+// #define AT_NNPACK_ENABLED() 1
+// #define CAFFE2_STATIC_LINK_CUDA() 0
+// #define AT_BUILD_WITH_BLAS() 1
+// #define AT_BUILD_WITH_LAPACK() 1
+public static final int AT_PARALLEL_OPENMP = 1;
+public static final int AT_PARALLEL_NATIVE = 0;
+public static final int AT_PARALLEL_NATIVE_TBB = 0;
+// #define AT_BLAS_F2C() 0
+// #define AT_BLAS_USE_CBLAS_DOT() 1
 
-  
-// Targeting ../kConv1D.java
 
-  
-// Targeting ../kConv2D.java
+// Parsed from ATen/Parallel-inl.h
 
-  
-// Targeting ../kConv3D.java
+// #pragma once
 
-  
-// Targeting ../kConvTranspose1D.java
+// #include 
+// #include 
 
-  
-// Targeting ../kConvTranspose2D.java
+ // namespace at
 
-  
-// Targeting ../kConvTranspose3D.java
 
-  
-// Targeting ../kSigmoid.java
+// Parsed from ATen/Parallel.h
 
-  
-// Targeting ../kTanh.java
+// #pragma once
+// #include 
+// #include 
+// #include 
+// #include 
 
-  
-// Targeting ../kReLU.java
+@Namespace("at") public static native @Cast("int64_t") long divup(@Cast("int64_t") long x, @Cast("int64_t") long y);
 
-  
-// Targeting ../kGELU.java
+// Called during new thread initialization
+@Namespace("at") public static native void init_num_threads();
 
-  
-// Targeting ../kSiLU.java
+// Sets the number of threads to be used in parallel region
+@Namespace("at") public static native void set_num_threads(int arg0);
 
-  
-// Targeting ../kMish.java
+// Returns the maximum number of threads that may be used in a parallel region
+@Namespace("at") public static native int get_num_threads();
 
-  
-// Targeting ../kLeakyReLU.java
+// Returns the current thread number (starting from 0)
+// in the current parallel region, or 0 in the sequential region
+@Namespace("at") public static native int get_thread_num();
 
-  
-// Targeting ../kFanIn.java
+// Checks whether the code runs in parallel region
+@Namespace("at") public static native @Cast("bool") boolean in_parallel_region();
 
-  
-// Targeting ../kFanOut.java
+// Initialise num_threads lazily at first parallel call
+@Namespace("at::internal") public static native void lazy_init_num_threads();
 
-  
-// Targeting ../kConstant.java
+@Namespace("at::internal") public static native void set_thread_num(int arg0);
+// Targeting ../ThreadIdGuard.java
 
-  
-// Targeting ../kReflect.java
 
-  
-// Targeting ../kReplicate.java
 
-  
-// Targeting ../kCircular.java
+ // namespace internal
 
-  
-// Targeting ../kNearest.java
+/*
+parallel_for
 
-  
-// Targeting ../kBilinear.java
+begin: index at which to start applying user function
 
-  
-// Targeting ../kBicubic.java
+end: index at which to stop applying user function
 
-  
-// Targeting ../kTrilinear.java
+grain_size: number of elements per chunk. impacts the degree of parallelization
 
-  
-// Targeting ../kArea.java
+f: user function applied in parallel to the chunks, signature:
+  void f(int64_t begin, int64_t end)
 
-  
-// Targeting ../kNearestExact.java
+Warning: parallel_for does NOT copy thread local
+states from the current thread to the worker threads.
+This means for example that Tensor operations CANNOT be used in the
+body of your function, only data pointers.
+*/
 
-  
-// Targeting ../kSum.java
+/*
+parallel_reduce
 
-  
-// Targeting ../kMean.java
+begin: index at which to start applying reduction
 
-  
-// Targeting ../kMax.java
+end: index at which to stop applying reduction
 
-  
-// Targeting ../kNone.java
+grain_size: number of elements per chunk. impacts number of elements in
+intermediate results tensor and degree of parallelization.
 
-  
-// Targeting ../kBatchMean.java
+ident: identity for binary combination function sf. sf(ident, x) needs to return
+x.
 
-  
-// Targeting ../kZeros.java
+f: function for reduction over a chunk. f needs to be of signature scalar_t
+f(int64_t partial_begin, int64_t partial_end, scalar_t identifiy)
 
-  
-// Targeting ../kBorder.java
+sf: function to combine two partial results. sf needs to be of signature
+scalar_t sf(scalar_t x, scalar_t y)
 
-  
-// Targeting ../kReflection.java
+For example, you might have a tensor of 10000 entires and want to sum together
+all the elements. Parallel_reduce with a grain_size of 2500 will then allocate
+an intermediate result tensor with 4 elements. Then it will execute the function
+"f" you provide and pass the beginning and end index of these chunks, so
+0-2499, 2500-4999, etc. and the combination identity. It will then write out
+the result from each of these chunks into the intermediate result tensor. After
+that it'll reduce the partial results from each chunk into a single number using
+the combination function sf and the identity ident. For a total summation this
+would be "+" and 0 respectively. This is similar to tbb's approach [1], where
+you need to provide a function to accumulate a subrange, a function to combine
+two partial results and an identity.
 
-  
-// Targeting ../kRNN_TANH.java
+Warning: parallel_reduce does NOT copy thread local
+states from the current thread to the worker threads.
+This means for example that Tensor operations CANNOT be used in the
+body of your function, only data pointers.
 
-  
-// Targeting ../kRNN_RELU.java
+[1] https://software.intel.com/en-us/node/506154
+*/
 
-  
-// Targeting ../kLSTM.java
+// Returns a detailed string describing parallelization settings
+@Namespace("at") public static native @StdString BytePointer get_parallel_info();
 
-  
-// Targeting ../kGRU.java
+// Sets number of threads used for inter-op parallelism
+@Namespace("at") public static native void set_num_interop_threads(int arg0);
 
-  
-// Targeting ../kValid.java
+// Returns the number of threads used for inter-op parallelism
+@Namespace("at") public static native int get_num_interop_threads();
 
-  
-// Targeting ../kSame.java
+// Launches inter-op parallel task
 
-  
-// Targeting ../_compute_enum_name.java
+ // namespace internal
 
+// Launches intra-op parallel task
+@Namespace("at") public static native void intraop_launch(@ByVal Func func);
 
+// Returns number of intra-op threads used by default
+@Namespace("at") public static native int intraop_default_num_threads();
 
- // namespace enumtype
- // namespace torch
+ // namespace at
+
+// #if AT_PARALLEL_OPENMP
+// #include  // IWYU pragma: keep
+// #elif AT_PARALLEL_NATIVE
+// #include  // IWYU pragma: keep
+// #elif AT_PARALLEL_NATIVE_TBB
+// #include  // IWYU pragma: keep
+// #endif
 
+// #include  // IWYU pragma: keep
 
-// Parsed from torch/types.h
+
+// Parsed from torch/csrc/api/include/torch/types.h
 
 // #pragma once
 
@@ -76107,6 +64877,385 @@ The list of (type, depth) pairs controls the type of specializations and the num
  // namespace torch
 
 
+// Parsed from torch/csrc/profiler/orchestration/observer.h
+
+// #pragma once
+
+// #include 
+// #include 
+
+// #include 
+
+// ----------------------------------------------------------------------------
+// -- Profiler Config ---------------------------------------------------------
+// ----------------------------------------------------------------------------
+@Namespace("torch::profiler::impl") public enum ActivityType {
+  CPU(0),
+  CUDA(1), // CUDA kernels, runtime
+  NUM_KINETO_ACTIVITIES(2);// must be the last one
+
+    public final int value;
+    private ActivityType(int v) { this.value = v; }
+    private ActivityType(ActivityType e) { this.value = e.value; }
+    public ActivityType intern() { for (ActivityType e : values()) if (e.value == value) return e; return this; }
+    @Override public String toString() { return intern().name(); }
+}
+
+@Namespace("torch::profiler::impl") public enum ProfilerState {
+  Disabled(0),
+  CPU(1), // CPU-only profiling
+  CUDA(2), // CPU + CUDA events
+  NVTX(3), // only emit NVTX markers
+  ITT(4), // only emit ITT markers
+  KINETO(5), // use libkineto
+  KINETO_GPU_FALLBACK(6), // use CUDA events when CUPTI is not available
+  KINETO_ONDEMAND(7), // run the profiler in on-demand mode
+  NUM_PROFILER_STATES(8);// must be the last one
+
+    public final int value;
+    private ProfilerState(int v) { this.value = v; }
+    private ProfilerState(ProfilerState e) { this.value = e.value; }
+    public ProfilerState intern() { for (ProfilerState e : values()) if (e.value == value) return e; return this; }
+    @Override public String toString() { return intern().name(); }
+}
+
+@Namespace("torch::profiler::impl") public enum ActiveProfilerType {
+  NONE(0),
+  LEGACY(1),
+  KINETO(2),
+  NVTX(3),
+  ITT(4);
+
+    public final int value;
+    private ActiveProfilerType(int v) { this.value = v; }
+    private ActiveProfilerType(ActiveProfilerType e) { this.value = e.value; }
+    public ActiveProfilerType intern() { for (ActiveProfilerType e : values()) if (e.value == value) return e; return this; }
+    @Override public String toString() { return intern().name(); }
+}
+// Targeting ../ExperimentalConfig.java
+
+
+// Targeting ../ProfilerConfig.java
+
+
+
+// ----------------------------------------------------------------------------
+// -- Profiler base class -----------------------------------------------------
+// ----------------------------------------------------------------------------
+
+// Note: The following are only for the active *thread local* profiler.
+@Namespace("torch::profiler::impl") public static native @Cast("bool") boolean profilerEnabled();
+@Namespace("torch::profiler::impl") public static native ActiveProfilerType profilerType();
+@Namespace("torch::profiler::impl") public static native @ByVal ProfilerConfig getProfilerConfig();
+
+ // namespace impl
+ // namespace profiler
+ // namespace torch
+
+
+// Parsed from torch/csrc/profiler/api.h
+
+// #pragma once
+
+// #include 
+
+// There are some components which use these symbols. Until we migrate them
+// we have to mirror them in the old autograd namespace.
+ // namespace profiler
+ // namespace autograd
+ // namespace torch
+
+
+// Parsed from torch/csrc/profiler/events.h
+
+// #pragma once
+
+// #include 
+// #include 
+// #include 
+
+/* A vector type to hold a list of performance counters */
+
+/* Standard list of performance events independent of hardware or backend */
+@Namespace("torch::profiler") @MemberGetter public static native @Const @ByRef PointerPointer ProfilerPerfEvents();
+ // namespace profiler
+ // namespace torch
+
+
+// Parsed from torch/csrc/profiler/stubs/base.h
+
+// #pragma once
+
+// #include 
+// #include 
+
+// #include 
+// #include 
+// Targeting ../CUevent_st.java
+
+
+
+// ----------------------------------------------------------------------------
+// -- Annotation --------------------------------------------------------------
+// ----------------------------------------------------------------------------
+
+ // namespace impl
+ // namespace profiler
+ // namespace torch
+
+
+// Parsed from torch/csrc/profiler/util.h
+
+// #pragma once
+
+// #include 
+// #include 
+// #include 
+// #include 
+// #include 
+// #include 
+
+// #include 
+// #include 
+// #include 
+// #include 
+// #include 
+// #include 
+
+// #ifndef _WIN32
+// #include 
+// #endif
+// #if defined(C10_IOS) && defined(C10_MOBILE)
+// #include  // for gettimeofday()
+// #endif
+
+// #if defined(__i386__) || defined(__x86_64__) || defined(__amd64__)
+// #define C10_RDTSC
+// #if defined(_MSC_VER)
+// #elif defined(__CUDACC__) || defined(__HIPCC__)
+// #elif defined(__clang__)
+// `__rdtsc` is available by default.
+// NB: This has to be first, because Clang will also define `__GNUC__`
+// #elif defined(__GNUC__)
+// #include 
+// #else
+// #undef C10_RDTSC
+// #endif
+// #endif
+
+// TODO: replace with pytorch/rfcs#43 when it is ready.
+// #define SOFT_ASSERT(cond, ...)
+//   [&]() -> bool {
+//     if (C10_UNLIKELY(!(cond))) {
+//       torch::profiler::impl::logSoftAssert(
+//           __func__,
+//           __FILE__,
+//           static_cast(__LINE__),
+//           #cond,
+//           ::c10::str(__VA_ARGS__));
+//       if (torch::profiler::impl::softAssertRaises()) {
+//         TORCH_INTERNAL_ASSERT(cond, __VA_ARGS__);
+//       } else {
+//         TORCH_WARN(__VA_ARGS__);
+//       }
+//       return false;
+//     }
+//     return true;
+//   }()
+@Namespace("torch::profiler::impl") public static native @Cast("bool") boolean softAssertRaises();
+@Namespace("torch::profiler::impl") public static native void setSoftAssertRaises(@ByVal BoolOptional value);
+@Namespace("torch::profiler::impl") public static native void logSoftAssert(
+    @Cast("const char*") BytePointer func,
+    @Cast("const char*") BytePointer file,
+    @Cast("uint32_t") int line,
+    @Cast("const char*") BytePointer cond,
+    @Cast("const char*") BytePointer args);
+@Namespace("torch::profiler::impl") public static native void logSoftAssert(
+    String func,
+    String file,
+    @Cast("uint32_t") int line,
+    String cond,
+    String args);
+@Namespace("torch::profiler::impl") public static native void logSoftAssert(
+    @Cast("const char*") BytePointer func,
+    @Cast("const char*") BytePointer file,
+    @Cast("uint32_t") int line,
+    @Cast("const char*") BytePointer cond,
+    @ByVal CompileTimeEmptyString args);
+@Namespace("torch::profiler::impl") public static native void logSoftAssert(
+    String func,
+    String file,
+    @Cast("uint32_t") int line,
+    String cond,
+    @ByVal CompileTimeEmptyString args);
+
+@Namespace("torch::profiler::impl") public static native @Cast("torch::profiler::impl::time_t") long getTimeSinceEpoch();
+
+@Namespace("torch::profiler::impl") public static native @Cast("torch::profiler::impl::time_t") long getTime(@Cast("bool") boolean allow_monotonic/*=false*/);
+@Namespace("torch::profiler::impl") public static native @Cast("torch::profiler::impl::time_t") long getTime();
+
+// We often do not need to capture true wall times. If a fast mechanism such
+// as TSC is available we can use that instead and convert back to epoch time
+// during post processing. This greatly reduce the clock's contribution to
+// profiling.
+//   http://btorpey.github.io/blog/2014/02/18/clock-sources-in-linux/
+//   https://quick-bench.com/q/r8opkkGZSJMu9wM_XTbDouq-0Io
+// TODO: We should use
+// `https://github.com/google/benchmark/blob/main/src/cycleclock.h`
+
+// Convert `getCount` results to Nanoseconds since unix epoch.
+
+
+// Targeting ../FileLineFunc.java
+
+
+
+@Namespace("torch::profiler::impl") public static native @StdVector FileLineFunc prepareCallstack(
+    @Const @ByRef StackEntryVector cs);
+@Namespace("torch::profiler::impl") public static native @ByVal StringVector callstackStr(
+    @StdVector FileLineFunc cs);
+@Namespace("torch::profiler::impl") public static native @StdString BytePointer stacksToStr(
+    @Const @ByRef StringVector stacks,
+    @Cast("const char*") BytePointer delim);
+@Namespace("torch::profiler::impl") public static native @StdString String stacksToStr(
+    @Const @ByRef StringVector stacks,
+    String delim);
+@Namespace("torch::profiler::impl") public static native @Cast("std::vector*") @StdVector LongVector inputSizes(
+    @Const @ByRef RecordFunction fn,
+    @Cast("const bool") boolean flatten_list_enabled/*=false*/);
+@Namespace("torch::profiler::impl") public static native @Cast("std::vector*") @StdVector LongVector inputSizes(
+    @Const @ByRef RecordFunction fn);
+@Namespace("torch::profiler::impl") public static native @StdString BytePointer shapesToStr(
+    @Cast("std::vector*") @StdVector LongVector shapes);
+@Namespace("torch::profiler::impl") public static native @StdString BytePointer dtypesToStr(@Const @ByRef StringVector types);
+@Namespace("torch::profiler::impl") public static native @StdString BytePointer inputOpIdsToStr(
+    @Const @ByRef RecordFunctionHandleIntList input_op_ids);
+@Namespace("torch::profiler::impl") public static native @ByVal StringVector inputTypes(@Const @ByRef RecordFunction fn);
+
+@Namespace("torch::profiler::impl") public static native @ByVal StringIValueMap saveExtraArgs(@Const @ByRef RecordFunction fn);
+
+@Namespace("torch::profiler::impl") public static native @Cast("uint64_t") long computeFlops(
+    @StdString BytePointer op_name,
+    @Const @ByRef StringIValueMap extra_args);
+@Namespace("torch::profiler::impl") public static native @Cast("uint64_t") long computeFlops(
+    @StdString String op_name,
+    @Const @ByRef StringIValueMap extra_args);
+
+ // namespace impl
+ // namespace profiler
+ // namespace torch
+ // namespace profiler
+ // namespace autograd
+ // namespace torch
+
+
+// Parsed from torch/csrc/autograd/profiler_kineto.h
+
+// #pragma once
+
+// #include 
+// #include 
+
+// #include 
+// #include 
+// #include 
+// #include 
+// Targeting ../Result.java
+
+
+// Targeting ../ActivityTraceWrapper.java
+
+
+ // namespace kineto
+ // namespace impl
+ // namespace profiler
+
+// Consolidating events returned directly from Kineto
+// with events manually created by us (e.g. start/stop marks,
+// memory allocation events)
+
+/*
+ * This API is used by backends to record latency of events that
+ * happened in the backend but were not visible to pytorch runtime.
+ * For example, if part of the model is lowered to a dsp backend, then
+ * the execution of that part of the model is delegated to the backend.
+ * When backend finishes execution it has an option to provide profiling
+ * information (latency only at th emoment) corresponding to different operators
+ * that were executed in the backend.
+ * When such events are recorded by backend using this API, the event
+ * records will be collected by active kineto profiler. If no kineto profiler
+ * is active then the event is ignored.
+ * This provides us with a way to generate all the profiling information
+ * for a model regardless of where model (or part of it) executed.
+ * @param start_time_us: start time in us of the event
+ * @param end_time_us: end time in us of the event
+ * @param debug_handle: debug handle to correlate this event/op with
+ * model level module/source information
+ * @param scope: scope of the event, e.g. LITE_INTERPRETER, RECORD_FN etc.
+ * @param event_name: name of the event, e.g. op name
+ * @param backend_name: name of the backend where the event took place.
+ */
+@Namespace("torch::autograd::profiler") public static native void reportBackendEventToActiveKinetoProfiler(
+    @Cast("const int64_t") long start_time_us,
+    @Cast("const int64_t") long end_time_us,
+    @Cast("const int64_t") long debug_handle,
+    RecordScope scope,
+    @StdString BytePointer event_name,
+    @StdString BytePointer backend_name);
+@Namespace("torch::autograd::profiler") public static native void reportBackendEventToActiveKinetoProfiler(
+    @Cast("const int64_t") long start_time_us,
+    @Cast("const int64_t") long end_time_us,
+    @Cast("const int64_t") long debug_handle,
+    @Cast("at::RecordScope") byte scope,
+    @StdString String event_name,
+    @StdString String backend_name);
+
+
+
+/*
+ * Same as enableProfiler but with callback to do post-processing of
+ * KinetoEvents.
+ * enableProfilerWithEventPostProcess enables profiler to capture
+ * specified activities, with specified RecordFunction scope, if any.
+ * Additionally, it takes a functor that does in-place post processing of
+ * events, e.g. populate stack trace or module hierarchy information lazily
+ * using debug_handle.
+ * Example usage is with lite interpreter that has recording scope of
+ * LITE_INTERPRETER. In this case lite interpreter runtime, records debug
+ * handles in RecordFunction, along with other information. Debug handles are
+ * eventually passed down to KinetoEvent and recorded as part of the event.
+ * KinetoEdgeCPUProfiler, in torch/csrc/jit/mobile/profiler_edge.cpp, enables
+ * profiler using post-processing callback, via
+ * enableProfilerWithEventPostProcess, that takes these debug handles and
+ * generates stack trace and module hierarchy information, once profiling is
+ * done.
+ */
+
+
+@Namespace("torch::autograd::profiler") public static native void prepareProfiler(
+    @Const @ByRef ProfilerConfig config,
+    @Const @ByRef ActivityTypeSet activities);
+
+ // namespace profiler
+ // namespace autograd
+
+// Experimental.
+@Namespace("torch::profiler::impl") public static native void _reportVulkanEventToProfiler(@ByVal @Cast("torch::profiler::impl::vulkan_id_t*") Pointer id);
+
+ // namespace impl
+ // namespace profiler
+
+ // namespace torch
+
+
+// Parsed from torch/csrc/autograd/profiler.h
+
+// #pragma once
+
+// #include 
+// #include 
+
+
 // Parsed from torch/utils.h
 
 // #pragma once
@@ -76202,3247 +65351,4972 @@ The list of (type, depth) pairs controls the type of specializations and the num
  // namespace torch
 
 
-// Parsed from torch/data.h
-
-// #pragma once
-
-// #include 
-// #include 
-// #include 
-// #include 
-
-// Some "exports".
- // namespace data
- // namespace torch
-
-
-// Parsed from torch/data/example.h
-
-// #pragma once
-
-// #include 
-// Targeting ../Example.java
-
-
-// Targeting ../TensorExample.java
-
-
-// Targeting ../NoTarget.java
-
-
- // namespace example
-
-/** A specialization for {@code Example} that does not have a target.
- * 
- *  This class exists so that code can be written for a templated {@code Example}
- *  type, and work both for labeled and unlabeled datasets. */
- // namespace data
- // namespace torch
-
-
-// Parsed from torch/data/iterator.h
+// Parsed from torch/nn/cloneable.h
 
 // #pragma once
 
-// #include 
+// #include 
 // #include 
+// #include 
 
+// #include 
 // #include 
 
-// #include 
-// #include 
 // #include 
-// #include 
 // #include 
-// For increased safety and more separated logic, this implementation of
-// `Iterator` consists of a `ValidIterator` and a `SentinelIterator`. A
-// `ValidIterator` yields new batches until the `DataLoader` is exhausted. While
-// the `DataLoader` is not exhausted, `ValidIterator`s compare equal if they are
-// the same object. When the `ValidIterator` becomes exhausted, it compares
-// equal to the `SentinelIterator`, but not before. Half the code here is to
-// implement double dispatch for the comparison. Got damnit, C++.
-
-/** Base class for the {@code ValidIterator} and {@code SentinelIterator} */
+// Targeting ../ModuleDictImplCloneable.java
 
-// Targeting ../ExampleIterator.java
 
+// Targeting ../ModuleListImplCloneable.java
 
-// Targeting ../ExampleVectorIterator.java
 
+// Targeting ../SequentialImplCloneable.java
 
-// Targeting ../ExampleVectorOptionalIterator.java
 
+// Targeting ../ParameterDictImplCloneable.java
 
- // namespace data
- // namespace torch
 
+// Targeting ../ParameterListImplCloneable.java
 
-// Parsed from torch/data/worker_exception.h
 
-// #pragma once
+// Targeting ../AdaptiveLogSoftmaxWithLossImplCloneable.java
 
-// #include 
-// #include 
-// #include 
-// Targeting ../WorkerException.java
 
+// Targeting ../BatchNorm1dImplCloneable.java
 
 
- // namespace data
- // namespace torch
+// Targeting ../InstanceNorm1dImplCloneable.java
 
 
-// Parsed from torch/data/dataloader.h
+// Targeting ../Conv1dImplCloneable.java
 
-// #pragma once
 
-// #include 
-// #include 
+// Targeting ../ConvTranspose1dImplCloneable.java
 
-// #include 
-// #include 
 
-// #include 
+// Targeting ../DropoutImplCloneable.java
 
-// #include 
-// #include 
-// #include 
-// #include 
 
-/** Creates a {@code DataLoader} instance for a stateless {@code dataset}, a {@code sampler} and
- *  some {@code options}. */
+// Targeting ../BatchNorm2dImplCloneable.java
 
-/** Creates a {@code DataLoader} instance for a stateless {@code dataset} and some
- *  {@code options}. A sampler (by default a {@code RandomSampler}) will be constructed from
- *  the size of the dataset. */
 
-/** Creates a {@code DataLoader} for a stateful {@code dataset} and some {@code options}. */
- // namespace data
- // namespace torch
+// Targeting ../InstanceNorm2dImplCloneable.java
 
 
-// Parsed from torch/data/dataloader/base.h
+// Targeting ../Conv2dImplCloneable.java
 
-// #pragma once
 
-// #include 
-// #include 
-// #include 
-// #include 
-// #include 
-// #include 
-// #include 
+// Targeting ../ConvTranspose2dImplCloneable.java
 
-// #include 
-// #include 
 
-// #include 
-// #include 
+// Targeting ../Dropout2dImplCloneable.java
 
-// #include 
-// #include 
-// #include 
-// #include 
-// #include 
-// #include 
-// #include 
-// Targeting ../ChunkRandomDataLoaderBase.java
 
+// Targeting ../BatchNorm3dImplCloneable.java
 
-// Targeting ../MNISTRandomDataLoaderBase.java
 
+// Targeting ../InstanceNorm3dImplCloneable.java
 
- // namespace data
- // namespace torch
 
+// Targeting ../Conv3dImplCloneable.java
 
-// Parsed from torch/data/dataloader_options.h
 
-// #pragma once
+// Targeting ../ConvTranspose3dImplCloneable.java
 
-// #include 
-// #include 
 
-// #include 
-// #include 
-// Targeting ../DataLoaderOptions.java
+// Targeting ../Dropout3dImplCloneable.java
 
 
-// Targeting ../FullDataLoaderOptions.java
+// Targeting ../AlphaDropoutImplCloneable.java
 
 
- // namespace data
- // namespace torch
+// Targeting ../FeatureAlphaDropoutImplCloneable.java
 
 
-// Parsed from torch/data/dataloader/stateful.h
+// Targeting ../CosineSimilarityImplCloneable.java
 
-// #pragma once
 
-// #include 
-// #include 
+// Targeting ../PairwiseDistanceImplCloneable.java
 
-// #include 
-// #include 
-// #include 
-// Targeting ../ChunkRandomDataLoader.java
 
+// Targeting ../EmbeddingImplCloneable.java
 
- // namespace data
- // namespace torch
 
+// Targeting ../EmbeddingBagImplCloneable.java
 
-// Parsed from torch/data/dataloader/stateless.h
 
-// #pragma once
+// Targeting ../FoldImplCloneable.java
 
-// #include 
-// #include 
 
-// #include 
+// Targeting ../UnfoldImplCloneable.java
 
-// #include 
-// #include 
 
-// #include 
-// #include 
-// #include 
-// Targeting ../MNISTRandomDataLoader.java
+// Targeting ../IdentityImplCloneable.java
 
 
- // namespace data
- // namespace torch
+// Targeting ../LinearImplCloneable.java
 
 
-// Parsed from torch/data/datasets.h
+// Targeting ../BilinearImplCloneable.java
 
-// #pragma once
 
-// #include 
-// #include 
-// #include 
-// #include 
-// #include 
-// #include 
-// #include 
+// Targeting ../FlattenImplCloneable.java
 
 
-// Parsed from torch/data/datasets/base.h
+// Targeting ../UnflattenImplCloneable.java
 
-// #pragma once
 
-// #include 
-// #include 
+// Targeting ../L1LossImplCloneable.java
 
-// #include 
 
-// #include 
-// #include 
-// #include 
-// #include 
-// #include  // NOLINT
- // namespace datasets
- // namespace data
- // namespace torch
+// Targeting ../KLDivLossImplCloneable.java
 
-// Targeting ../ChunkBatchDataset.java
 
+// Targeting ../MSELossImplCloneable.java
 
-// Targeting ../ChunkBatchSharedBatchDataset.java
 
+// Targeting ../BCELossImplCloneable.java
 
-// Targeting ../ChunkMapBatchDataset.java
 
+// Targeting ../HingeEmbeddingLossImplCloneable.java
 
-// Targeting ../MNISTBatchDataset.java
 
+// Targeting ../MultiMarginLossImplCloneable.java
 
-// Targeting ../MNISTMapBatchDataset.java
 
+// Targeting ../CosineEmbeddingLossImplCloneable.java
 
-// Targeting ../TensorExampleBatchDataset.java
 
+// Targeting ../SmoothL1LossImplCloneable.java
 
-// Targeting ../MNISTDataset.java
 
+// Targeting ../HuberLossImplCloneable.java
 
-// Targeting ../TensorExampleDataset.java
 
+// Targeting ../MultiLabelMarginLossImplCloneable.java
 
 
-/** A {@code StreamDataset} represents a dataset that is a potentially infinite
- *  stream. It takes as batch index only a number, which is the batch size, and
- *  yields that many elements from the stream. */
- // namespace datasets
- // namespace data
- // namespace torch
+// Targeting ../SoftMarginLossImplCloneable.java
 
 
-// Parsed from torch/data/datasets/chunk.h
+// Targeting ../MultiLabelSoftMarginLossImplCloneable.java
 
-// #pragma once
 
-// #include 
-// #include 
-// #include 
-// #include 
-// #include 
-// #include 
-// #include 
+// Targeting ../TripletMarginLossImplCloneable.java
 
-// #include 
-// Targeting ../ChunkDataReader.java
 
+// Targeting ../TripletMarginWithDistanceLossImplCloneable.java
 
-/** BatchDataBuffer manages a queue of UnwrappedBatchData. After a new chunk is
- *  loaded, BatchDataBuffer splits it into small batches and push them into the
- *  queue. When get_batch is called from data loader, it pops cached batches and
- *  return. If the cache is empty, it either waits to load more chunks or return
- *  null if all chunks are loaded. */
 
-// Targeting ../ChunkDatasetOptions.java
+// Targeting ../CTCLossImplCloneable.java
 
 
-// Targeting ../ChunkDataset.java
+// Targeting ../PoissonNLLLossImplCloneable.java
 
 
- // namespace datasets
- // namespace data
- // namespace torch
+// Targeting ../MarginRankingLossImplCloneable.java
 
 
-// Parsed from torch/data/datasets/map.h
+// Targeting ../NLLLossImplCloneable.java
 
-// #pragma once
 
-// #include 
-// #include 
+// Targeting ../CrossEntropyLossImplCloneable.java
 
-// #include 
 
-// #include 
-// #include 
-// #include 
+// Targeting ../BCEWithLogitsLossImplCloneable.java
 
-// Targeting ../ChunkMapDataset.java
 
+// Targeting ../ReflectionPad1dImplCloneable.java
 
-// Targeting ../MNISTMapDataset.java
 
+// Targeting ../ReplicationPad1dImplCloneable.java
 
 
-/** Creates a {@code MapDataset} with the given dataset and transform. */
+// Targeting ../ConstantPad1dImplCloneable.java
 
- // namespace datasets
- // namespace data
- // namespace torch
 
+// Targeting ../AvgPool1dImplCloneable.java
 
-// Parsed from torch/data/datasets/mnist.h
 
-// #pragma once
+// Targeting ../MaxPool1dImplCloneable.java
 
-// #include 
-// #include 
-// #include 
 
-// #include 
+// Targeting ../AdaptiveAvgPool1dImplCloneable.java
 
-// #include 
-// #include 
-// Targeting ../MNIST.java
 
+// Targeting ../AdaptiveMaxPool1dImplCloneable.java
 
- // namespace datasets
- // namespace data
- // namespace torch
 
+// Targeting ../MaxUnpool1dImplCloneable.java
 
-// Parsed from torch/data/datasets/shared.h
 
-// #pragma once
+// Targeting ../LPPool1dImplCloneable.java
 
-// #include 
 
-// #include 
-// #include 
-// Targeting ../ChunkSharedBatchDataset.java
+// Targeting ../ReflectionPad2dImplCloneable.java
 
 
+// Targeting ../ReplicationPad2dImplCloneable.java
 
-/** Constructs a new {@code SharedBatchDataset} by creating a
- *  {@code shared_ptr}. All arguments are forwarded to
- *  {@code make_shared}. */
- // namespace datasets
- // namespace data
- // namespace torch
 
+// Targeting ../ConstantPad2dImplCloneable.java
 
-// Parsed from torch/data/datasets/stateful.h
 
-// #pragma once
+// Targeting ../ZeroPad2dImplCloneable.java
 
-// #include 
-// #include 
 
-// #include 
-// #include 
- // namespace serialize
- // namespace torch
-// Targeting ../ChunkStatefulDataset.java
+// Targeting ../AvgPool2dImplCloneable.java
 
 
+// Targeting ../MaxPool2dImplCloneable.java
 
-/** Serializes a statefulDataset to {@code OutputArchive}. */
 
-/** Deserializes a statefulDataset from an {@code InputArchive}. */
+// Targeting ../AdaptiveAvgPool2dImplCloneable.java
 
- // namespace datasets
- // namespace data
- // namespace torch
 
+// Targeting ../AdaptiveMaxPool2dImplCloneable.java
 
-// Parsed from torch/data/datasets/tensor.h
 
-// #pragma once
+// Targeting ../MaxUnpool2dImplCloneable.java
 
-// #include 
-// #include 
-// #include 
 
-// #include 
-// #include 
-// Targeting ../TensorDataset.java
+// Targeting ../FractionalMaxPool2dImplCloneable.java
 
 
+// Targeting ../LPPool2dImplCloneable.java
 
- // namespace datasets
- // namespace data
- // namespace torch
 
+// Targeting ../ReflectionPad3dImplCloneable.java
 
-// Parsed from torch/data/samplers.h
 
-// #pragma once
+// Targeting ../ReplicationPad3dImplCloneable.java
 
-// #include 
-// #include 
-// #include 
-// #include 
-// #include 
-// #include 
-// #include 
 
+// Targeting ../ConstantPad3dImplCloneable.java
 
-// Parsed from torch/data/samplers/base.h
 
-// #pragma once
+// Targeting ../AvgPool3dImplCloneable.java
 
-// #include 
-// #include 
 
-// #include 
-// #include 
-// #include 
- // namespace serialize
- // namespace torch
-// Targeting ../Sampler.java
+// Targeting ../MaxPool3dImplCloneable.java
 
 
-// Targeting ../BatchSizeSampler.java
+// Targeting ../AdaptiveAvgPool3dImplCloneable.java
 
 
+// Targeting ../AdaptiveMaxPool3dImplCloneable.java
 
- // namespace samplers
- // namespace data
- // namespace torch
 
+// Targeting ../MaxUnpool3dImplCloneable.java
 
-// Parsed from torch/data/samplers/custom_batch_request.h
 
-// #pragma once
+// Targeting ../FractionalMaxPool3dImplCloneable.java
 
-// #include 
-// #include 
-// Targeting ../CustomBatchRequest.java
 
+// Targeting ../RNNImplCloneable.java
 
- // namespace samplers
- // namespace data
- // namespace torch
 
+// Targeting ../LSTMImplCloneable.java
 
-// Parsed from torch/data/samplers/distributed.h
 
-// #pragma once
+// Targeting ../GRUImplCloneable.java
 
-// #include 
-// #include 
 
-// #include 
-// #include 
- // namespace serialize
- // namespace torch
-// Targeting ../DistributedSampler.java
+// Targeting ../RNNCellImplCloneable.java
 
 
-// Targeting ../DistributedRandomSampler.java
+// Targeting ../LSTMCellImplCloneable.java
 
 
-// Targeting ../DistributedSequentialSampler.java
+// Targeting ../GRUCellImplCloneable.java
 
 
+// Targeting ../PixelShuffleImplCloneable.java
 
- // namespace samplers
- // namespace data
- // namespace torch
 
+// Targeting ../PixelUnshuffleImplCloneable.java
 
-// Parsed from torch/data/samplers/random.h
 
-// #pragma once
+// Targeting ../UpsampleImplCloneable.java
 
-// #include 
-// #include 
-// #include 
 
-// #include 
-// #include 
- // namespace serialize
- // namespace torch
-// Targeting ../RandomSampler.java
+// Targeting ../ELUImplCloneable.java
 
 
- // namespace samplers
- // namespace data
- // namespace torch
+// Targeting ../SELUImplCloneable.java
 
 
-// Parsed from torch/data/samplers/sequential.h
+// Targeting ../HardshrinkImplCloneable.java
 
-// #pragma once
 
-// #include 
-// #include 
-// #include 
+// Targeting ../HardtanhImplCloneable.java
 
-// #include 
-// #include 
- // namespace serialize
- // namespace torch
-// Targeting ../SequentialSampler.java
 
+// Targeting ../LeakyReLUImplCloneable.java
 
 
- // namespace samplers
- // namespace data
- // namespace torch
+// Targeting ../LogSigmoidImplCloneable.java
 
 
-// Parsed from torch/data/samplers/serialize.h
+// Targeting ../SoftmaxImplCloneable.java
 
-// #pragma once
 
-// #include 
-// #include 
-/** Serializes a {@code Sampler} into an {@code OutputArchive}. */
+// Targeting ../SoftminImplCloneable.java
 
-/** Deserializes a {@code Sampler} from an {@code InputArchive}. */
- // namespace samplers
- // namespace data
- // namespace torch
 
+// Targeting ../LogSoftmaxImplCloneable.java
 
-// Parsed from torch/data/samplers/stream.h
 
-// #pragma once
+// Targeting ../Softmax2dImplCloneable.java
 
-// #include 
-// #include 
-// #include 
-// #include 
 
-// #include 
- // namespace serialize
- // namespace torch
-// Targeting ../BatchSize.java
+// Targeting ../PReLUImplCloneable.java
 
 
-// Targeting ../StreamSampler.java
+// Targeting ../ReLUImplCloneable.java
 
 
+// Targeting ../ReLU6ImplCloneable.java
 
- // namespace samplers
- // namespace data
- // namespace torch
 
+// Targeting ../RReLUImplCloneable.java
 
-// Parsed from torch/data/transforms.h
 
-// #pragma once
+// Targeting ../CELUImplCloneable.java
 
-// #include 
-// #include 
-// #include 
-// #include 
-// #include 
 
+// Targeting ../GLUImplCloneable.java
 
-// Parsed from torch/data/transforms/base.h
 
-// #pragma once
+// Targeting ../GELUImplCloneable.java
 
-// #include 
 
-// #include 
-// #include 
-// Targeting ../ExampleCollation.java
+// Targeting ../SiLUImplCloneable.java
 
 
+// Targeting ../MishImplCloneable.java
 
-/** A transformation of individual input examples to individual output examples.
- * 
- *  Just like a {@code Dataset} is a {@code BatchDataset}, a {@code Transform} is a
- *  {@code BatchTransform} that can operate on the level of individual examples rather
- *  than entire batches. The batch-level transform is implemented (by default)
- *  in terms of the example-level transform, though this can be customized. */
- // namespace transforms
- // namespace data
- // namespace torch
 
+// Targeting ../SigmoidImplCloneable.java
 
-// Parsed from torch/data/transforms/collate.h
 
-// #pragma once
+// Targeting ../SoftplusImplCloneable.java
 
-// #include 
-// #include 
 
-// #include 
+// Targeting ../SoftshrinkImplCloneable.java
 
-/** A {@code Collation} is a transform that reduces a batch into a single value.
- *  The result is a {@code BatchDataset} that has the type of the single value as its
- *  {@code BatchType}. */
 
-///
-///
+// Targeting ../SoftsignImplCloneable.java
 
-/** A {@code Collate} allows passing a custom function to reduce/collate a batch
- *  into a single value. It's effectively the lambda version of {@code Collation},
- *  which you could subclass and override {@code operator()} to achieve the same.
- * 
- *  \rst
- *  .. code-block:: cpp
- *    using namespace torch::data;
- * 
- *    auto dataset = datasets::MNIST("path/to/mnist")
- *      .map(transforms::Collate>([](std::vector> e) {
- *        return std::move(e.front());
- *      }));
- *  \endrst */
- // namespace transforms
- // namespace data
- // namespace torch
 
+// Targeting ../TanhImplCloneable.java
 
-// Parsed from torch/data/transforms/lambda.h
 
-// #pragma once
+// Targeting ../TanhshrinkImplCloneable.java
 
-// #include 
 
-// #include 
-// #include 
-// #include 
+// Targeting ../ThresholdImplCloneable.java
 
-/** A {@code BatchTransform} that applies a user-provided functor to a batch. */
 
-// A `Transform` that applies a user-provided functor to individual examples.
+// Targeting ../MultiheadAttentionImplCloneable.java
 
- // namespace transforms
- // namespace data
- // namespace torch
 
+// Targeting ../LayerNormImplCloneable.java
 
-// Parsed from torch/data/transforms/stack.h
 
-// #pragma once
+// Targeting ../LocalResponseNormImplCloneable.java
 
-// #include 
-// #include 
-// #include 
 
-// #include 
-// #include 
-// Targeting ../ExampleStack.java
+// Targeting ../CrossMapLRN2dImplCloneable.java
 
 
+// Targeting ../GroupNormImplCloneable.java
 
-/** A {@code Collation} for {@code Example} types that stacks all data
- *  tensors into one tensor. */
- // namespace transforms
- // namespace data
- // namespace torch
 
+// Targeting ../TransformerEncoderLayerImplCloneable.java
 
-// Parsed from torch/data/transforms/tensor.h
 
-// #pragma once
+// Targeting ../TransformerDecoderLayerImplCloneable.java
 
-// #include 
-// #include 
-// #include 
 
-// #include 
-// #include 
+// Targeting ../TransformerEncoderImplCloneable.java
 
-/** A {@code Transform} that is specialized for the typical {@code Example}
- *  combination. It exposes a single {@code operator()} interface hook (for
- *  subclasses), and calls this function on input {@code Example} objects. */
 
-/** A {@code Lambda} specialized for the typical {@code Example} input type. */
+// Targeting ../TransformerDecoderImplCloneable.java
 
-/** Normalizes input tensors by subtracting the supplied mean and dividing by
- *  the given standard deviation. */
- // namespace transforms
- // namespace data
- // namespace torch
 
+// Targeting ../TransformerImplCloneable.java
 
-// Parsed from torch/serialize.h
 
-// #pragma once
 
-// #include 
-// #include 
-// #include 
-// #include 
+ // namespace nn
+ // namespace torch
 
-// #include 
 
-/** Serializes the given {@code value}.
- *  There must be an overload of {@code operator<<} between {@code serialize::OutputArchive}
- *  and {@code Value} for this method to be well-formed. Currently, such an overload
- *  is provided for (subclasses of):
- * 
- *  - {@code torch::nn::Module},
- *  - {@code torch::optim::Optimizer}
- *  - {@code torch::Tensor}
- * 
- *  To perform the serialization, a {@code serialize::OutputArchive} is constructed,
- *  and all arguments after the {@code value} are forwarded to its {@code save_to} method.
- *  For example, you can pass a filename, or an {@code ostream}.
- * 
- *  \rst
- *  .. code-block:: cpp
- * 
- *    torch::nn::Linear model(3, 4);
- *    torch::save(model, "model.pt");
- * 
- *    torch::optim::SGD sgd(/*lr=* /0.9);
- *    std::ostringstream stream;
- *    // Note that the same stream cannot be used in multiple torch::save(...)
- *    // invocations, otherwise the header will be corrupted.
- *    torch::save(sgd, stream);
- * 
- *    auto tensor = torch::ones({3, 4});
- *    torch::save(tensor, "my_tensor.pt");
- *  \endrst */
+// Parsed from torch/nn/options/batchnorm.h
 
-/** Serializes the given {@code tensor_vec} of type {@code std::vector}.
- * 
- *  To perform the serialization, a {@code serialize::OutputArchive} is constructed,
- *  and all arguments after the {@code tensor_vec} are forwarded to its {@code save_to}
- *  method. For example, you can pass a filename, or an {@code ostream}.
- * 
- *  \rst
- *  .. code-block:: cpp
- * 
- *    std::vector tensor_vec = { torch::randn({1, 2}),
- *    torch::randn({3, 4}) }; torch::save(tensor_vec, "my_tensor_vec.pt");
- * 
- *    std::vector tensor_vec = { torch::randn({5, 6}),
- *    torch::randn({7, 8}) }; std::ostringstream stream;
- *    // Note that the same stream cannot be used in multiple torch::save(...)
- *    // invocations, otherwise the header will be corrupted.
- *    torch::save(tensor_vec, stream);
- *  \endrst */
+// #pragma once
 
-/** Deserializes the given {@code value}.
- *  There must be an overload of {@code operator>>} between {@code serialize::InputArchive}
- *  and {@code Value} for this method to be well-formed. Currently, such an overload
- *  is provided for (subclasses of):
- * 
- *  - {@code torch::nn::Module},
- *  - {@code torch::optim::Optimizer}
- *  - {@code torch::Tensor}
- * 
- *  To perform the serialization, a {@code serialize::InputArchive} is constructed,
- *  and all arguments after the {@code value} are forwarded to its {@code load_from} method.
- *  For example, you can pass a filename, or an {@code istream}.
- * 
- *  \rst
- *  .. code-block:: cpp
- * 
- *    torch::nn::Linear model(3, 4);
- *    torch::load(model, "model.pt");
- * 
- *    torch::optim::SGD sgd(/*lr=* /0.9);
- *    std::istringstream stream("...");
- *    torch::load(sgd, stream);
- * 
- *    auto tensor = torch::ones({3, 4});
- *    torch::load(tensor, "my_tensor.pt");
- *  \endrst */
+// #include 
+// #include 
+// #include 
+// Targeting ../BatchNormOptions.java
 
-/** Deserializes the given {@code tensor_vec} of type {@code std::vector}.
- * 
- *  To perform the serialization, a {@code serialize::InputArchive} is constructed,
- *  and all arguments after the {@code value} are forwarded to its {@code load_from} method.
- *  For example, you can pass a filename, or an {@code istream}.
+
+
+/** Options for the {@code BatchNorm1d} module.
  * 
- *  \rst
- *  .. code-block:: cpp
+ *  Example:
+ *  
{@code
+ *  BatchNorm1d
+ *  model(BatchNorm1dOptions(4).eps(0.5).momentum(0.1).affine(false).track_running_stats(true));
+ *  }
*/ + +/// + +/** Options for the {@code BatchNorm2d} module. * - * std::vector tensor_vec; - * torch::load(tensor_vec, "my_tensor_vec.pt"); + * Example: + *
{@code
+ *  BatchNorm2d
+ *  model(BatchNorm2dOptions(4).eps(0.5).momentum(0.1).affine(false).track_running_stats(true));
+ *  }
*/ + +/// + +/** Options for the {@code BatchNorm3d} module. * - * std::vector tensor_vec; - * std::istringstream stream("..."); - * torch::load(tensor_vec, stream); - * \endrst */ - // namespace torch + * Example: + *
{@code
+ *  BatchNorm3d
+ *  model(BatchNorm3dOptions(4).eps(0.5).momentum(0.1).affine(false).track_running_stats(true));
+ *  }
*/ +// ============================================================================ +// Targeting ../BatchNormFuncOptions.java -// Parsed from torch/serialize/archive.h -// #pragma once -// #include -// #include + // namespace functional + + // namespace nn + // namespace torch -// Parsed from torch/serialize/input-archive.h +// Parsed from torch/nn/functional/batchnorm.h // #pragma once -// #include -// #include -// #include -// #include +// #include +// #include // #include -// #include -// #include -// #include -// #include - // namespace at - // namespace jit - // namespace torch -// Targeting ../InputArchive.java +// #ifndef DOXYGEN_SHOULD_SKIP_THIS +@Namespace("torch::nn::functional::detail") public static native @ByVal Tensor batch_norm( + @Const @ByRef Tensor input, + @Const @ByRef Tensor running_mean, + @Const @ByRef Tensor running_var, + @ByVal Tensor weight, + @ByVal Tensor bias, + @Cast("bool") boolean training, + @ByVal DoubleOptional momentum, + double eps); + // namespace detail +// #endif /* DOXYGEN_SHOULD_SKIP_THIS */ +/** See +/** https://pytorch.org/docs/master/nn.functional.html#torch.nn.functional.batch_norm +/** about the exact behavior of this functional. +/** +/** See the documentation for {@code torch::nn::functional::BatchNormFuncOptions} +/** class to learn what optional arguments are supported for this functional. +/** +/** Example: +/**
{@code
+/** namespace F = torch::nn::functional;
+/** F::batch_norm(input, mean, variance,
+/** F::BatchNormFuncOptions().weight(weight).bias(bias).momentum(0.1).eps(1e-05).training(false));
+/** }
*/ +@Namespace("torch::nn::functional") public static native @ByVal Tensor batch_norm( + @Const @ByRef Tensor input, + @Const @ByRef Tensor running_mean, + @Const @ByRef Tensor running_var, + @Const @ByRef(nullValue = "torch::nn::functional::BatchNormFuncOptions{}") BatchNormFuncOptions options); +@Namespace("torch::nn::functional") public static native @ByVal Tensor batch_norm( + @Const @ByRef Tensor input, + @Const @ByRef Tensor running_mean, + @Const @ByRef Tensor running_var); - // namespace serialize + // namespace functional + // namespace nn // namespace torch -// Parsed from torch/serialize/output-archive.h +// Parsed from torch/nn/options/conv.h // #pragma once +// #include // #include -// #include +// #include +// #include +// #include +// Targeting ../DetailConv1dOptions.java -// #include -// #include -// #include -// #include - // namespace at - // namespace jit -// Targeting ../OutputArchive.java +// Targeting ../DetailConv2dOptions.java - // namespace serialize - // namespace torch +// Targeting ../DetailConv3dOptions.java -// Parsed from torch/serialize/tensor.h -// #pragma once -// #include -// #include -@Namespace("torch") public static native @ByRef @Name("operator <<") OutputArchive shiftLeft( - @ByRef OutputArchive archive, - @Const @ByRef Tensor tensor); +// Targeting ../Conv1dOptions.java -@Namespace("torch") public static native @ByRef @Name("operator >>") InputArchive shiftRight( - @ByRef InputArchive archive, - @ByRef Tensor tensor); - // namespace torch +// Targeting ../Conv2dOptions.java -// Parsed from torch/nn.h -// #pragma once +// Targeting ../Conv3dOptions.java -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// Parsed from torch/nn/cloneable.h +/** {@code ConvOptions} specialized for the {@code Conv1d} module. + * + * Example: + *
{@code
+ *  Conv1d model(Conv1dOptions(3, 2, 3).stride(1).bias(false));
+ *  }
*/ -// #pragma once +/// -// #include -// #include -// #include +/** {@code ConvOptions} specialized for the {@code Conv2d} module. + * + * Example: + *
{@code
+ *  Conv2d model(Conv2dOptions(3, 2, 3).stride(1).bias(false));
+ *  }
*/ -// #include -// #include +/// -// #include -// #include -// Targeting ../ModuleDictImplCloneable.java +/** {@code ConvOptions} specialized for the {@code Conv3d} module. + * + * Example: + *
{@code
+ *  Conv3d model(Conv3dOptions(3, 2, 3).stride(1).bias(false));
+ *  }
*/ +// ============================================================================ +// Targeting ../Conv1dFuncOptions.java -// Targeting ../ModuleListImplCloneable.java +// Targeting ../Conv2dFuncOptions.java -// Targeting ../SequentialImplCloneable.java +// Targeting ../Conv3dFuncOptions.java -// Targeting ../ParameterDictImplCloneable.java -// Targeting ../ParameterListImplCloneable.java +/** {@code ConvFuncOptions} specialized for {@code torch::nn::functional::conv1d}. + * + * Example: + *
{@code
+ *  namespace F = torch::nn::functional;
+ *  F::conv1d(x, weight, F::Conv1dFuncOptions().stride(1));
+ *  }
*/ +/// -// Targeting ../AdaptiveLogSoftmaxWithLossImplCloneable.java +/** {@code ConvFuncOptions} specialized for {@code torch::nn::functional::conv2d}. + * + * Example: + *
{@code
+ *  namespace F = torch::nn::functional;
+ *  F::conv2d(x, weight, F::Conv2dFuncOptions().stride(1));
+ *  }
*/ +/// -// Targeting ../BatchNorm1dImplCloneable.java +/** {@code ConvFuncOptions} specialized for {@code torch::nn::functional::conv3d}. + * + * Example: + *
{@code
+ *  namespace F = torch::nn::functional;
+ *  F::conv3d(x, weight, F::Conv3dFuncOptions().stride(1));
+ *  }
*/ -// Targeting ../InstanceNorm1dImplCloneable.java +// Targeting ../ConvTranspose1dOptions.java -// Targeting ../Conv1dImplCloneable.java +// Targeting ../ConvTranspose2dOptions.java -// Targeting ../ConvTranspose1dImplCloneable.java +// Targeting ../ConvTranspose3dOptions.java -// Targeting ../DropoutImplCloneable.java +/** {@code ConvTransposeOptions} specialized for the {@code ConvTranspose1d} module. + * + * Example: + *
{@code
+ *  ConvTranspose1d model(ConvTranspose1dOptions(3, 2,
+ *  3).stride(1).bias(false));
+ *  }
*/ + +/// -// Targeting ../BatchNorm2dImplCloneable.java +/** {@code ConvTransposeOptions} specialized for the {@code ConvTranspose2d} module. + * + * Example: + *
{@code
+ *  ConvTranspose2d model(ConvTranspose2dOptions(3, 2,
+ *  3).stride(1).bias(false));
+ *  }
*/ +/// -// Targeting ../InstanceNorm2dImplCloneable.java +/** {@code ConvTransposeOptions} specialized for the {@code ConvTranspose3d} module. + * + * Example: + *
{@code
+ *  ConvTranspose3d model(ConvTranspose3dOptions(2, 2,
+ *  2).stride(1).bias(false));
+ *  }
*/ +// ============================================================================ +// Targeting ../ConvTranspose1dFuncOptions.java -// Targeting ../Conv2dImplCloneable.java +// Targeting ../ConvTranspose2dFuncOptions.java -// Targeting ../ConvTranspose2dImplCloneable.java +// Targeting ../ConvTranspose3dFuncOptions.java -// Targeting ../Dropout2dImplCloneable.java -// Targeting ../BatchNorm3dImplCloneable.java +/** {@code ConvTransposeFuncOptions} specialized for + * {@code torch::nn::functional::conv_transpose1d}. + * + * Example: + *
{@code
+ *  namespace F = torch::nn::functional;
+ *  F::conv_transpose1d(x, weight, F::ConvTranspose1dFuncOptions().stride(1));
+ *  }
*/ +/// -// Targeting ../InstanceNorm3dImplCloneable.java +/** {@code ConvTransposeFuncOptions} specialized for + * {@code torch::nn::functional::conv_transpose2d}. + * + * Example: + *
{@code
+ *  namespace F = torch::nn::functional;
+ *  F::conv_transpose2d(x, weight, F::ConvTranspose2dFuncOptions().stride(1));
+ *  }
*/ +/// -// Targeting ../Conv3dImplCloneable.java +/** {@code ConvTransposeFuncOptions} specialized for + * {@code torch::nn::functional::conv_transpose3d}. + * + * Example: + *
{@code
+ *  namespace F = torch::nn::functional;
+ *  F::conv_transpose3d(x, weight, F::ConvTranspose3dFuncOptions().stride(1));
+ *  }
*/ + // namespace functional -// Targeting ../ConvTranspose3dImplCloneable.java + // namespace nn + // namespace torch -// Targeting ../Dropout3dImplCloneable.java +// Parsed from torch/nn/functional/conv.h +// #pragma once -// Targeting ../AlphaDropoutImplCloneable.java +// #include +// #include +// #ifndef DOXYGEN_SHOULD_SKIP_THIS -// Targeting ../FeatureAlphaDropoutImplCloneable.java +@Namespace("torch::nn::functional::detail") public static native @StdString BytePointer padding_unwrap(@ByVal kValid arg0); +@Namespace("torch::nn::functional::detail") public static native @StdString BytePointer padding_unwrap(@ByVal kSame arg0); -// Targeting ../CosineSimilarityImplCloneable.java +@Namespace("torch::nn::functional::detail") public static native @ByVal Tensor conv1d( + @Const @ByRef Tensor input, + @Const @ByRef Tensor weight, + @Const @ByRef Tensor bias, + @ByVal @Cast("torch::ExpandingArray<1>*") LongPointer stride, + @Const @ByRef Conv1dPadding padding, + @ByVal @Cast("torch::ExpandingArray<1>*") LongPointer dilation, + @Cast("int64_t") long groups); + // namespace detail +// #endif /* DOXYGEN_SHOULD_SKIP_THIS */ + +/** See +/** https://pytorch.org/docs/master/nn.functional.html#torch.nn.functional.conv1d +/** about the exact behavior of this functional. +/** +/** See the documentation for {@code torch::nn::functional::Conv1dFuncOptions} class +/** to learn what optional arguments are supported for this functional. +/** +/** Example: +/**
{@code
+/** namespace F = torch::nn::functional;
+/** F::conv1d(x, weight, F::Conv1dFuncOptions().stride(1));
+/** }
*/ +@Namespace("torch::nn::functional") public static native @ByVal Tensor conv1d( + @Const @ByRef Tensor input, + @Const @ByRef Tensor weight, + @Const @ByRef(nullValue = "torch::nn::functional::Conv1dFuncOptions{}") Conv1dFuncOptions options); +// #ifndef DOXYGEN_SHOULD_SKIP_THIS +@Namespace("torch::nn::functional::detail") public static native @ByVal Tensor conv2d( + @Const @ByRef Tensor input, + @Const @ByRef Tensor weight, + @Const @ByRef Tensor bias, + @ByVal @Cast("torch::ExpandingArray<2>*") LongPointer stride, + @Const @ByRef Conv2dPadding padding, + @ByVal @Cast("torch::ExpandingArray<2>*") LongPointer dilation, + @Cast("int64_t") long groups); + // namespace detail +// #endif /* DOXYGEN_SHOULD_SKIP_THIS */ -// Targeting ../PairwiseDistanceImplCloneable.java +/** See +/** https://pytorch.org/docs/master/nn.functional.html#torch.nn.functional.conv2d +/** about the exact behavior of this functional. +/** +/** See the documentation for {@code torch::nn::functional::Conv2dFuncOptions} class +/** to learn what optional arguments are supported for this functional. +/** +/** Example: +/**
{@code
+/** namespace F = torch::nn::functional;
+/** F::conv2d(x, weight, F::Conv2dFuncOptions().stride(1));
+/** }
*/ +@Namespace("torch::nn::functional") public static native @ByVal Tensor conv2d( + @Const @ByRef Tensor input, + @Const @ByRef Tensor weight, + @Const @ByRef(nullValue = "torch::nn::functional::Conv2dFuncOptions{}") Conv2dFuncOptions options); +// #ifndef DOXYGEN_SHOULD_SKIP_THIS +@Namespace("torch::nn::functional::detail") public static native @ByVal Tensor conv3d( + @Const @ByRef Tensor input, + @Const @ByRef Tensor weight, + @Const @ByRef Tensor bias, + @ByVal @Cast("torch::ExpandingArray<3>*") LongPointer stride, + @Const @ByRef Conv3dPadding padding, + @ByVal @Cast("torch::ExpandingArray<3>*") LongPointer dilation, + @Cast("int64_t") long groups); + // namespace detail +// #endif /* DOXYGEN_SHOULD_SKIP_THIS */ -// Targeting ../EmbeddingImplCloneable.java +/** See +/** https://pytorch.org/docs/master/nn.functional.html#torch.nn.functional.conv3d +/** about the exact behavior of this functional. +/** +/** See the documentation for {@code torch::nn::functional::Conv3dFuncOptions} class +/** to learn what optional arguments are supported for this functional. +/** +/** Example: +/**
{@code
+/** namespace F = torch::nn::functional;
+/** F::conv3d(x, weight, F::Conv3dFuncOptions().stride(1));
+/** }
*/ +@Namespace("torch::nn::functional") public static native @ByVal Tensor conv3d( + @Const @ByRef Tensor input, + @Const @ByRef Tensor weight, + @Const @ByRef(nullValue = "torch::nn::functional::Conv3dFuncOptions{}") Conv3dFuncOptions options); +// ============================================================================ -// Targeting ../EmbeddingBagImplCloneable.java +// #ifndef DOXYGEN_SHOULD_SKIP_THIS +@Namespace("torch::nn::functional::detail") public static native @ByVal Tensor conv_transpose1d( + @Const @ByRef Tensor input, + @Const @ByRef Tensor weight, + @Const @ByRef Tensor bias, + @ByVal LongArrayRef stride, + @ByVal LongArrayRef padding, + @ByVal LongArrayRef output_padding, + @Cast("int64_t") long groups, + @ByVal LongArrayRef dilation); +@Namespace("torch::nn::functional::detail") public static native @ByVal Tensor conv_transpose1d( + @Const @ByRef Tensor input, + @Const @ByRef Tensor weight, + @Const @ByRef Tensor bias, + @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] stride, + @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] padding, + @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] output_padding, + @Cast("int64_t") long groups, + @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... dilation); + // namespace detail +// #endif /* DOXYGEN_SHOULD_SKIP_THIS */ +/** See +/** https://pytorch.org/docs/master/nn.functional.html#torch.nn.functional.conv_transpose1d +/** about the exact behavior of this functional. +/** +/** See the documentation for +/** {@code torch::nn::functional::ConvTranspose1dFuncOptions} class to learn what +/** optional arguments are supported for this functional. +/** +/** Example: +/**
{@code
+/** namespace F = torch::nn::functional;
+/** F::conv_transpose1d(x, weight, F::ConvTranspose1dFuncOptions().stride(1));
+/** }
*/ +@Namespace("torch::nn::functional") public static native @ByVal Tensor conv_transpose1d( + @Const @ByRef Tensor input, + @Const @ByRef Tensor weight, + @Const @ByRef(nullValue = "torch::nn::functional::ConvTranspose1dFuncOptions{}") ConvTranspose1dFuncOptions options); -// Targeting ../FoldImplCloneable.java +// #ifndef DOXYGEN_SHOULD_SKIP_THIS +@Namespace("torch::nn::functional::detail") public static native @ByVal Tensor conv_transpose2d( + @Const @ByRef Tensor input, + @Const @ByRef Tensor weight, + @Const @ByRef Tensor bias, + @ByVal LongArrayRef stride, + @ByVal LongArrayRef padding, + @ByVal LongArrayRef output_padding, + @Cast("int64_t") long groups, + @ByVal LongArrayRef dilation); +@Namespace("torch::nn::functional::detail") public static native @ByVal Tensor conv_transpose2d( + @Const @ByRef Tensor input, + @Const @ByRef Tensor weight, + @Const @ByRef Tensor bias, + @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] stride, + @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] padding, + @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] output_padding, + @Cast("int64_t") long groups, + @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... dilation); + // namespace detail +// #endif /* DOXYGEN_SHOULD_SKIP_THIS */ + +/** See +/** https://pytorch.org/docs/master/nn.functional.html#torch.nn.functional.conv_transpose2d +/** about the exact behavior of this functional. +/** +/** See the documentation for +/** {@code torch::nn::functional::ConvTranspose2dFuncOptions} class to learn what +/** optional arguments are supported for this functional. +/** +/** Example: +/**
{@code
+/** namespace F = torch::nn::functional;
+/** F::conv_transpose2d(x, weight, F::ConvTranspose2dFuncOptions().stride(1));
+/** }
*/ +@Namespace("torch::nn::functional") public static native @ByVal Tensor conv_transpose2d( + @Const @ByRef Tensor input, + @Const @ByRef Tensor weight, + @Const @ByRef(nullValue = "torch::nn::functional::ConvTranspose2dFuncOptions{}") ConvTranspose2dFuncOptions options); +// #ifndef DOXYGEN_SHOULD_SKIP_THIS +@Namespace("torch::nn::functional::detail") public static native @ByVal Tensor conv_transpose3d( + @Const @ByRef Tensor input, + @Const @ByRef Tensor weight, + @Const @ByRef Tensor bias, + @ByVal LongArrayRef stride, + @ByVal LongArrayRef padding, + @ByVal LongArrayRef output_padding, + @Cast("int64_t") long groups, + @ByVal LongArrayRef dilation); +@Namespace("torch::nn::functional::detail") public static native @ByVal Tensor conv_transpose3d( + @Const @ByRef Tensor input, + @Const @ByRef Tensor weight, + @Const @ByRef Tensor bias, + @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] stride, + @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] padding, + @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] output_padding, + @Cast("int64_t") long groups, + @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... dilation); + // namespace detail +// #endif /* DOXYGEN_SHOULD_SKIP_THIS */ -// Targeting ../UnfoldImplCloneable.java +/** See +/** https://pytorch.org/docs/master/nn.functional.html#torch.nn.functional.conv_transpose3d +/** about the exact behavior of this functional. +/** +/** See the documentation for +/** {@code torch::nn::functional::ConvTranspose3dFuncOptions} class to learn what +/** optional arguments are supported for this functional. +/** +/** Example: +/**
{@code
+/** namespace F = torch::nn::functional;
+/** F::conv_transpose3d(x, weight, F::ConvTranspose3dFuncOptions().stride(1));
+/** }
*/ +@Namespace("torch::nn::functional") public static native @ByVal Tensor conv_transpose3d( + @Const @ByRef Tensor input, + @Const @ByRef Tensor weight, + @Const @ByRef(nullValue = "torch::nn::functional::ConvTranspose3dFuncOptions{}") ConvTranspose3dFuncOptions options); + // namespace functional + // namespace nn + // namespace torch -// Targeting ../IdentityImplCloneable.java +// Parsed from torch/nn/options/distance.h -// Targeting ../LinearImplCloneable.java +// #pragma once +// #include +// #include +// #include +// Targeting ../CosineSimilarityOptions.java -// Targeting ../BilinearImplCloneable.java +/** Options for {@code torch::nn::functional::cosine_similarity}. + * + * See the documentation for {@code torch::nn::CosineSimilarityOptions} class to + * learn what arguments are supported. + * + * Example: + *
{@code
+ *  namespace F = torch::nn::functional;
+ *  F::cosine_similarity(input1, input2,
+ *  F::CosineSimilarityFuncOptions().dim(1));
+ *  }
*/ + +// Targeting ../PairwiseDistanceOptions.java -// Targeting ../FlattenImplCloneable.java +/** Options for {@code torch::nn::functional::pairwise_distance}. + * + * See the documentation for {@code torch::nn::PairwiseDistanceOptions} class to + * learn what arguments are supported. + * + * Example: + *
{@code
+ *  namespace F = torch::nn::functional;
+ *  F::pairwise_distance(input1, input2, F::PairwiseDistanceFuncOptions().p(1));
+ *  }
*/ + // namespace functional -// Targeting ../UnflattenImplCloneable.java + // namespace nn + // namespace torch -// Targeting ../L1LossImplCloneable.java +// Parsed from torch/nn/functional/distance.h +// #pragma once -// Targeting ../KLDivLossImplCloneable.java +// #include +// #ifndef DOXYGEN_SHOULD_SKIP_THIS + // namespace detail +// #endif /* DOXYGEN_SHOULD_SKIP_THIS */ -// Targeting ../MSELossImplCloneable.java +/** See +/** https://pytorch.org/docs/master/nn.functional.html#torch.nn.functional.cosine_similarity +/** about the exact behavior of this functional. +/** +/** See the documentation for +/** {@code torch::nn::functional::CosineSimilarityFuncOptions} class to learn what +/** optional arguments are supported for this functional. +/** +/** Example: +/**
{@code
+/** namespace F = torch::nn::functional;
+/** F::cosine_similarity(input1, input2,
+/** F::CosineSimilarityFuncOptions().dim(1));
+/** }
*/ +@Namespace("torch::nn::functional") public static native @ByVal Tensor cosine_similarity( + @Const @ByRef Tensor x1, + @Const @ByRef Tensor x2, + @Cast("const torch::nn::functional::CosineSimilarityFuncOptions*") @ByRef(nullValue = "torch::nn::functional::CosineSimilarityFuncOptions{}") CosineSimilarityOptions options); +// ============================================================================ -// Targeting ../BCELossImplCloneable.java +// #ifndef DOXYGEN_SHOULD_SKIP_THIS + // namespace detail +// #endif /* DOXYGEN_SHOULD_SKIP_THIS */ +/** See +/** https://pytorch.org/docs/master/nn.functional.html#torch.nn.functional.pairwise_distance +/** about the exact behavior of this functional. +/** +/** See the documentation for +/** {@code torch::nn::functional::PairwiseDistanceFuncOptions} class to learn what +/** optional arguments are supported for this functional. +/** +/** Example: +/**
{@code
+/** namespace F = torch::nn::functional;
+/** F::pairwise_distance(input1, input2, F::PairwiseDistanceFuncOptions().p(1));
+/** }
*/ +@Namespace("torch::nn::functional") public static native @ByVal Tensor pairwise_distance( + @Const @ByRef Tensor x1, + @Const @ByRef Tensor x2, + @Cast("const torch::nn::functional::PairwiseDistanceFuncOptions*") @ByRef(nullValue = "torch::nn::functional::PairwiseDistanceFuncOptions{}") PairwiseDistanceOptions options); -// Targeting ../HingeEmbeddingLossImplCloneable.java +// ============================================================================ +/** Computes the p-norm distance between every pair of row vectors in the input. + * This function will be faster if the rows are contiguous. */ -// Targeting ../MultiMarginLossImplCloneable.java + // namespace functional + // namespace nn + // namespace torch -// Targeting ../CosineEmbeddingLossImplCloneable.java +// Parsed from torch/nn/options/dropout.h +// #pragma once -// Targeting ../SmoothL1LossImplCloneable.java +// #include +// #include +// #include +// Targeting ../DropoutOptions.java -// Targeting ../HuberLossImplCloneable.java +/** Options for the {@code Dropout2d} module. + * + * Example: + *
{@code
+ *  Dropout2d model(Dropout2dOptions().p(0.42).inplace(true));
+ *  }
*/ -// Targeting ../MultiLabelMarginLossImplCloneable.java +/// +/** Options for the {@code Dropout3d} module. + * + * Example: + *
{@code
+ *  Dropout3d model(Dropout3dOptions().p(0.42).inplace(true));
+ *  }
*/ -// Targeting ../SoftMarginLossImplCloneable.java +/// +/** Options for the {@code AlphaDropout} module. + * + * Example: + *
{@code
+ *  AlphaDropout model(AlphaDropoutOptions(0.2).inplace(true));
+ *  }
*/ -// Targeting ../MultiLabelSoftMarginLossImplCloneable.java +/// +/** Options for the {@code FeatureAlphaDropout} module. + * + * Example: + *
{@code
+ *  FeatureAlphaDropout model(FeatureAlphaDropoutOptions(0.2).inplace(true));
+ *  }
*/ +// Targeting ../DropoutFuncOptions.java -// Targeting ../TripletMarginLossImplCloneable.java -// Targeting ../TripletMarginWithDistanceLossImplCloneable.java +/** Options for {@code torch::nn::functional::dropout2d}. + * + * Example: + *
{@code
+ *  namespace F = torch::nn::functional;
+ *  F::dropout2d(input, F::Dropout2dFuncOptions().p(0.5));
+ *  }
*/ +/// -// Targeting ../CTCLossImplCloneable.java +/** Options for {@code torch::nn::functional::dropout3d}. + * + * Example: + *
{@code
+ *  namespace F = torch::nn::functional;
+ *  F::dropout3d(input, F::Dropout3dFuncOptions().p(0.5));
+ *  }
*/ +/// +// Targeting ../AlphaDropoutFuncOptions.java -// Targeting ../PoissonNLLLossImplCloneable.java +// Targeting ../FeatureAlphaDropoutFuncOptions.java -// Targeting ../MarginRankingLossImplCloneable.java -// Targeting ../NLLLossImplCloneable.java + // namespace functional + // namespace nn + // namespace torch -// Targeting ../CrossEntropyLossImplCloneable.java +// Parsed from torch/nn/functional/dropout.h -// Targeting ../BCEWithLogitsLossImplCloneable.java +// #pragma once +// #include -// Targeting ../ReflectionPad1dImplCloneable.java +// #include +// #ifndef DOXYGEN_SHOULD_SKIP_THIS -// Targeting ../ReplicationPad1dImplCloneable.java +@Namespace("torch::nn::functional::detail") public static native @ByVal Tensor dropout(@ByVal Tensor input, double p, @Cast("bool") boolean training, @Cast("bool") boolean inplace); + // namespace detail +// #endif /* DOXYGEN_SHOULD_SKIP_THIS */ -// Targeting ../ConstantPad1dImplCloneable.java +/** See +/** https://pytorch.org/docs/master/nn.functional.html#torch.nn.functional.dropout +/** about the exact behavior of this functional. +/** +/** See the documentation for {@code torch::nn::functional::DropoutFuncOptions} class +/** to learn what optional arguments are supported for this functional. +/** +/** Example: +/**
{@code
+/** namespace F = torch::nn::functional;
+/** F::dropout(input, F::DropoutFuncOptions().p(0.5));
+/** }
*/ +@Namespace("torch::nn::functional") public static native @ByVal Tensor dropout(@ByVal Tensor input, @Const @ByRef(nullValue = "torch::nn::functional::DropoutFuncOptions{}") DropoutFuncOptions options); +@Namespace("torch::nn::functional") public static native @ByVal Tensor dropout(@ByVal Tensor input); +// ============================================================================ -// Targeting ../AvgPool1dImplCloneable.java +// #ifndef DOXYGEN_SHOULD_SKIP_THIS +@Namespace("torch::nn::functional::detail") public static native @ByVal Tensor dropout2d(@ByVal Tensor input, double p, @Cast("bool") boolean training, @Cast("bool") boolean inplace); -// Targeting ../MaxPool1dImplCloneable.java + // namespace detail +// #endif /* DOXYGEN_SHOULD_SKIP_THIS */ +/** See +/** https://pytorch.org/docs/master/nn.functional.html#torch.nn.functional.dropout2d +/** about the exact behavior of this functional. +/** +/** See the documentation for {@code torch::nn::functional::Dropout2dFuncOptions} +/** class to learn what optional arguments are supported for this functional. +/** +/** Example: +/**
{@code
+/** namespace F = torch::nn::functional;
+/** F::dropout2d(input, F::Dropout2dFuncOptions().p(0.5));
+/** }
*/ +@Namespace("torch::nn::functional") public static native @ByVal Tensor dropout2d( + @ByVal Tensor input, + @Cast("const torch::nn::functional::Dropout2dFuncOptions*") @ByRef(nullValue = "torch::nn::functional::Dropout2dFuncOptions{}") DropoutFuncOptions options); +@Namespace("torch::nn::functional") public static native @ByVal Tensor dropout2d( + @ByVal Tensor input); -// Targeting ../AdaptiveAvgPool1dImplCloneable.java +// ============================================================================ +// #ifndef DOXYGEN_SHOULD_SKIP_THIS -// Targeting ../AdaptiveMaxPool1dImplCloneable.java +@Namespace("torch::nn::functional::detail") public static native @ByVal Tensor dropout3d(@ByVal Tensor input, double p, @Cast("bool") boolean training, @Cast("bool") boolean inplace); + // namespace detail +// #endif /* DOXYGEN_SHOULD_SKIP_THIS */ -// Targeting ../MaxUnpool1dImplCloneable.java +/** See +/** https://pytorch.org/docs/master/nn.functional.html#torch.nn.functional.dropout3d +/** about the exact behavior of this functional. +/** +/** See the documentation for {@code torch::nn::functional::Dropout3dFuncOptions} +/** class to learn what optional arguments are supported for this functional. +/** +/** Example: +/**
{@code
+/** namespace F = torch::nn::functional;
+/** F::dropout3d(input, F::Dropout3dFuncOptions().p(0.5));
+/** }
*/ +@Namespace("torch::nn::functional") public static native @ByVal Tensor dropout3d( + @ByVal Tensor input, + @Cast("const torch::nn::functional::Dropout3dFuncOptions*") @ByRef(nullValue = "torch::nn::functional::Dropout3dFuncOptions{}") DropoutFuncOptions options); +@Namespace("torch::nn::functional") public static native @ByVal Tensor dropout3d( + @ByVal Tensor input); +// ============================================================================ -// Targeting ../LPPool1dImplCloneable.java +// #ifndef DOXYGEN_SHOULD_SKIP_THIS +@Namespace("torch::nn::functional::detail") public static native @ByVal Tensor alpha_dropout( + @ByVal Tensor input, + double p, + @Cast("bool") boolean training, + @Cast("bool") boolean inplace); -// Targeting ../ReflectionPad2dImplCloneable.java + // namespace detail +// #endif /* DOXYGEN_SHOULD_SKIP_THIS */ +/** See +/** https://pytorch.org/docs/master/nn.functional.html#torch.nn.functional.alpha_dropout +/** about the exact behavior of this functional. +/** +/** See the documentation for {@code torch::nn::functional::AlphaDropoutFuncOptions} +/** class to learn what optional arguments are supported for this functional. +/** +/** Example: +/**
{@code
+/** namespace F = torch::nn::functional;
+/** F::alpha_dropout(input,
+/** F::AlphaDropoutFuncOptions().p(0.5).training(false));
+/** }
*/ +@Namespace("torch::nn::functional") public static native @ByVal Tensor alpha_dropout( + @ByVal Tensor input, + @Const @ByRef(nullValue = "torch::nn::functional::AlphaDropoutFuncOptions{}") AlphaDropoutFuncOptions options); +@Namespace("torch::nn::functional") public static native @ByVal Tensor alpha_dropout( + @ByVal Tensor input); -// Targeting ../ReplicationPad2dImplCloneable.java +// ============================================================================ +// #ifndef DOXYGEN_SHOULD_SKIP_THIS -// Targeting ../ConstantPad2dImplCloneable.java +@Namespace("torch::nn::functional::detail") public static native @ByVal Tensor feature_alpha_dropout( + @ByVal Tensor input, + double p, + @Cast("bool") boolean training, + @Cast("bool") boolean inplace); + // namespace detail +// #endif /* DOXYGEN_SHOULD_SKIP_THIS */ -// Targeting ../ZeroPad2dImplCloneable.java +/** See +/** https://pytorch.org/docs/master/nn.functional.html#torch.nn.functional.feature_alpha_dropout +/** about the exact behavior of this functional. +/** +/** See the documentation for +/** {@code torch::nn::functional::FeatureAlphaDropoutFuncOptions} class to learn what +/** optional arguments are supported for this functional. +/** +/** Example: +/**
{@code
+/** namespace F = torch::nn::functional;
+/** F::feature_alpha_dropout(input,
+/** F::FeatureAlphaDropoutFuncOptions().p(0.5).training(false));
+/** }
*/ +@Namespace("torch::nn::functional") public static native @ByVal Tensor feature_alpha_dropout( + @ByVal Tensor input, + @Const @ByRef(nullValue = "torch::nn::functional::FeatureAlphaDropoutFuncOptions{}") FeatureAlphaDropoutFuncOptions options); +@Namespace("torch::nn::functional") public static native @ByVal Tensor feature_alpha_dropout( + @ByVal Tensor input); + // namespace functional + // namespace nn + // namespace torch -// Targeting ../AvgPool2dImplCloneable.java +// Parsed from torch/nn/options/embedding.h -// Targeting ../MaxPool2dImplCloneable.java +// #pragma once +// #include +// #include +// #include +// #include +// Targeting ../EmbeddingOptions.java -// Targeting ../AdaptiveAvgPool2dImplCloneable.java +// Targeting ../EmbeddingFromPretrainedOptions.java -// Targeting ../AdaptiveMaxPool2dImplCloneable.java -// Targeting ../MaxUnpool2dImplCloneable.java +// ============================================================================ +// Targeting ../EmbeddingFuncOptions.java -// Targeting ../FractionalMaxPool2dImplCloneable.java + // namespace functional -// Targeting ../LPPool2dImplCloneable.java +// ============================================================================ -// Targeting ../ReflectionPad3dImplCloneable.java +/// +// Targeting ../EmbeddingBagOptions.java -// Targeting ../ReplicationPad3dImplCloneable.java +// Targeting ../EmbeddingBagFromPretrainedOptions.java -// Targeting ../ConstantPad3dImplCloneable.java +// ============================================================================ +// Targeting ../EmbeddingBagFuncOptions.java -// Targeting ../AvgPool3dImplCloneable.java -// Targeting ../MaxPool3dImplCloneable.java + // namespace functional + // namespace nn + // namespace torch -// Targeting ../AdaptiveAvgPool3dImplCloneable.java +// Parsed from torch/nn/functional/embedding.h -// Targeting ../AdaptiveMaxPool3dImplCloneable.java +// #pragma once +// #include -// Targeting ../MaxUnpool3dImplCloneable.java +// #ifndef DOXYGEN_SHOULD_SKIP_THIS +@Namespace("torch::nn::functional::detail") public static native void _no_grad_embedding_renorm_( + @ByVal Tensor weight, + @Const @ByRef Tensor input, + float max_norm, + float norm_type); +@Namespace("torch::nn::functional::detail") public static native @ByVal Tensor embedding( + @Const @ByRef Tensor input, + @Const @ByRef Tensor weight, + @ByVal LongOptional padding_idx, + @ByVal DoubleOptional max_norm, + double norm_type, + @Cast("bool") boolean scale_grad_by_freq, + @Cast("bool") boolean sparse); + // namespace detail +// #endif /* DOXYGEN_SHOULD_SKIP_THIS */ -// Targeting ../FractionalMaxPool3dImplCloneable.java +/** See +/** https://pytorch.org/docs/master/nn.functional.html#torch.nn.functional.embedding +/** about the exact behavior of this functional. +/** +/** See the documentation for {@code torch::nn::functional::EmbeddingFuncOptions} +/** class to learn what optional arguments are supported for this functional. +/** +/** Example: +/**
{@code
+/** namespace F = torch::nn::functional;
+/** F::embedding(input, weight,
+/** F::EmbeddingFuncOptions().norm_type(2.5).scale_grad_by_freq(true).sparse(true));
+/** }
*/ +@Namespace("torch::nn::functional") public static native @ByVal Tensor embedding( + @Const @ByRef Tensor input, + @Const @ByRef Tensor weight, + @Const @ByRef(nullValue = "torch::nn::functional::EmbeddingFuncOptions{}") EmbeddingFuncOptions options); +// #ifndef DOXYGEN_SHOULD_SKIP_THIS +@Namespace("torch::nn::functional::detail") public static native @ByVal Tensor embedding_bag( + @Const @ByRef Tensor input, + @Const @ByRef Tensor weight, + @Const @ByRef Tensor offsets, + @ByVal DoubleOptional max_norm, + double norm_type, + @Cast("bool") boolean scale_grad_by_freq, + @ByVal EmbeddingBagMode mode, + @Cast("bool") boolean sparse, + @Const @ByRef Tensor per_sample_weights, + @Cast("bool") boolean include_last_offset, + @ByVal LongOptional padding_idx); + // namespace detail +// #endif /* DOXYGEN_SHOULD_SKIP_THIS */ -// Targeting ../RNNImplCloneable.java +/** See +/** https://pytorch.org/docs/master/nn.functional.html#torch.nn.functional.embedding_bag +/** about the exact behavior of this functional. +/** +/** See the documentation for {@code torch::nn::functional::EmbeddingBagFuncOptions} +/** class to learn what optional arguments are supported for this functional. +/** +/** Example: +/**
{@code
+/** namespace F = torch::nn::functional;
+/** F::embedding_bag(input, weight,
+/** F::EmbeddingBagFuncOptions().mode(torch::kSum).offsets(offsets));
+/** }
*/ +@Namespace("torch::nn::functional") public static native @ByVal Tensor embedding_bag( + @Const @ByRef Tensor input, + @Const @ByRef Tensor weight, + @Const @ByRef(nullValue = "torch::nn::functional::EmbeddingBagFuncOptions{}") EmbeddingBagFuncOptions options); +@Namespace("torch::nn::functional") public static native @ByVal Tensor embedding_bag( + @Const @ByRef Tensor input, + @Const @ByRef Tensor weight); + // namespace functional + // namespace nn + // namespace torch -// Targeting ../LSTMImplCloneable.java +// Parsed from torch/nn/options/fold.h -// Targeting ../GRUImplCloneable.java +// #pragma once +// #include +// #include +// #include +// #include +// Targeting ../FoldOptions.java -// Targeting ../RNNCellImplCloneable.java +/** Options for {@code torch::nn::functional::fold}. + * + * See the documentation for {@code torch::nn::FoldOptions} class to learn what + * arguments are supported. + * + * Example: + *
{@code
+ *  namespace F = torch::nn::functional;
+ *  F::fold(input, F::FoldFuncOptions({3, 2}, {2, 2}));
+ *  }
*/ -// Targeting ../LSTMCellImplCloneable.java +// Targeting ../UnfoldOptions.java -// Targeting ../GRUCellImplCloneable.java +/** Options for {@code torch::nn::functional::unfold}. + * + * See the documentation for {@code torch::nn::UnfoldOptions} class to learn what + * arguments are supported. + * + * Example: + *
{@code
+ *  namespace F = torch::nn::functional;
+ *  F::unfold(input, F::UnfoldFuncOptions({2, 2}).padding(1).stride(2));
+ *  }
*/ + // namespace functional + // namespace nn + // namespace torch -// Targeting ../PixelShuffleImplCloneable.java +// Parsed from torch/nn/functional/fold.h -// Targeting ../PixelUnshuffleImplCloneable.java +// #pragma once +// #include -// Targeting ../UpsampleImplCloneable.java +// #ifndef DOXYGEN_SHOULD_SKIP_THIS +@Namespace("torch::nn::functional::detail") public static native @ByVal Tensor fold( + @Const @ByRef Tensor input, + @ByVal @Cast("torch::ExpandingArray<2>*") LongPointer output_size, + @ByVal @Cast("torch::ExpandingArray<2>*") LongPointer kernel_size, + @ByVal @Cast("torch::ExpandingArray<2>*") LongPointer dilation, + @ByVal @Cast("torch::ExpandingArray<2>*") LongPointer padding, + @ByVal @Cast("torch::ExpandingArray<2>*") LongPointer stride); + // namespace detail +// #endif /* DOXYGEN_SHOULD_SKIP_THIS */ +/** See +/** https://pytorch.org/docs/master/nn.functional.html#torch.nn.functional.fold +/** about the exact behavior of this functional. +/** +/** See the documentation for {@code torch::nn::functional::FoldFuncOptions} class to +/** learn what optional arguments are supported for this functional. +/** +/** Example: +/**
{@code
+/** namespace F = torch::nn::functional;
+/** F::fold(input, F::FoldFuncOptions({3, 2}, {2, 2}));
+/** }
*/ +@Namespace("torch::nn::functional") public static native @ByVal Tensor fold(@Const @ByRef Tensor input, @Cast("const torch::nn::functional::FoldFuncOptions*") @ByRef FoldOptions options); -// Targeting ../ELUImplCloneable.java +// ============================================================================ +// #ifndef DOXYGEN_SHOULD_SKIP_THIS +@Namespace("torch::nn::functional::detail") public static native @ByVal Tensor unfold( + @Const @ByRef Tensor input, + @ByVal @Cast("torch::ExpandingArray<2>*") LongPointer kernel_size, + @ByVal @Cast("torch::ExpandingArray<2>*") LongPointer dilation, + @ByVal @Cast("torch::ExpandingArray<2>*") LongPointer padding, + @ByVal @Cast("torch::ExpandingArray<2>*") LongPointer stride); + // namespace detail +// #endif /* DOXYGEN_SHOULD_SKIP_THIS */ -// Targeting ../SELUImplCloneable.java +/** See +/** https://pytorch.org/docs/master/nn.functional.html#torch.nn.functional.unfold +/** about the exact behavior of this functional. +/** +/** See the documentation for {@code torch::nn::functional::UnfoldFuncOptions} class +/** to learn what optional arguments are supported for this functional. +/** +/** Example: +/**
{@code
+/** namespace F = torch::nn::functional;
+/** F::unfold(input, F::UnfoldFuncOptions({2, 2}).padding(1).stride(2));
+/** }
*/ +@Namespace("torch::nn::functional") public static native @ByVal Tensor unfold(@Const @ByRef Tensor input, @Cast("const torch::nn::functional::UnfoldFuncOptions*") @ByRef UnfoldOptions options); + // namespace functional + // namespace nn + // namespace torch -// Targeting ../HardshrinkImplCloneable.java +// Parsed from torch/nn/options/instancenorm.h -// Targeting ../HardtanhImplCloneable.java +// #pragma once +// #include +// #include +// #include +// #include +// Targeting ../InstanceNormOptions.java -// Targeting ../LeakyReLUImplCloneable.java -// Targeting ../LogSigmoidImplCloneable.java +/** Options for the {@code InstanceNorm1d} module. + * + * Example: + *
{@code
+ *  InstanceNorm1d
+ *  model(InstanceNorm1dOptions(4).eps(0.5).momentum(0.1).affine(false).track_running_stats(true));
+ *  }
*/ +/// -// Targeting ../SoftmaxImplCloneable.java +/** Options for the {@code InstanceNorm2d} module. + * + * Example: + *
{@code
+ *  InstanceNorm2d
+ *  model(InstanceNorm2dOptions(4).eps(0.5).momentum(0.1).affine(false).track_running_stats(true));
+ *  }
*/ +/// -// Targeting ../SoftminImplCloneable.java +/** Options for the {@code InstanceNorm3d} module. + * + * Example: + *
{@code
+ *  InstanceNorm3d
+ *  model(InstanceNorm3dOptions(4).eps(0.5).momentum(0.1).affine(false).track_running_stats(true));
+ *  }
*/ +// Targeting ../InstanceNormFuncOptions.java -// Targeting ../LogSoftmaxImplCloneable.java + // namespace functional -// Targeting ../Softmax2dImplCloneable.java + // namespace nn + // namespace torch -// Targeting ../PReLUImplCloneable.java +// Parsed from torch/nn/functional/instancenorm.h +// #pragma once -// Targeting ../ReLUImplCloneable.java +// #include +// #ifndef DOXYGEN_SHOULD_SKIP_THIS +@Namespace("torch::nn::functional::detail") public static native @ByVal Tensor instance_norm( + @Const @ByRef Tensor input, + @Const @ByRef Tensor running_mean, + @Const @ByRef Tensor running_var, + @Const @ByRef Tensor weight, + @Const @ByRef Tensor bias, + @Cast("bool") boolean use_input_stats, + double momentum, + double eps); + // namespace detail +// #endif /* DOXYGEN_SHOULD_SKIP_THIS */ -// Targeting ../ReLU6ImplCloneable.java +/** See +/** https://pytorch.org/docs/master/nn.functional.html#torch.nn.functional.instance_norm +/** about the exact behavior of this functional. +/** +/** See the documentation for {@code torch::nn::functional::InstanceNormFuncOptions} +/** class to learn what optional arguments are supported for this functional. +/** +/** Example: +/**
{@code
+/** namespace F = torch::nn::functional;
+/** F::instance_norm(input,
+/** F::InstanceNormFuncOptions().running_mean(mean).running_var(variance).weight(weight).bias(bias).momentum(0.1).eps(1e-5));
+/** }
*/ +@Namespace("torch::nn::functional") public static native @ByVal Tensor instance_norm( + @Const @ByRef Tensor input, + @Const @ByRef(nullValue = "torch::nn::functional::InstanceNormFuncOptions{}") InstanceNormFuncOptions options); +@Namespace("torch::nn::functional") public static native @ByVal Tensor instance_norm( + @Const @ByRef Tensor input); + // namespace functional + // namespace nn + // namespace torch -// Targeting ../RReLUImplCloneable.java +// Parsed from torch/nn/functional/linear.h -// Targeting ../CELUImplCloneable.java +// #pragma once +// #include -// Targeting ../GLUImplCloneable.java +@Namespace("torch::nn::functional") public static native @ByVal Tensor bilinear( + @Const @ByRef Tensor input1, + @Const @ByRef Tensor input2, + @Const @ByRef Tensor weight, + @Const @ByRef(nullValue = "torch::Tensor()") Tensor bias); +// ============================================================================ -// Targeting ../GELUImplCloneable.java +@Namespace("torch::nn::functional") public static native @ByVal Tensor linear( + @Const @ByRef Tensor input, + @Const @ByRef Tensor weight, + @Const @ByRef(nullValue = "torch::Tensor{}") Tensor bias); + // namespace functional + // namespace nn + // namespace torch -// Targeting ../SiLUImplCloneable.java +// Parsed from torch/nn/options/activation.h -// Targeting ../MishImplCloneable.java +// #pragma once +// #include +// #include +// #include +// #include +// Targeting ../ELUOptions.java -// Targeting ../SigmoidImplCloneable.java +/** Options for {@code torch::nn::functional::elu}. + * + * See the documentation for {@code torch::nn::ELUOptions} class to learn what + * arguments are supported. + * + * Example: + *
{@code
+ *  namespace F = torch::nn::functional;
+ *  F::elu(x, F::ELUFuncOptions().alpha(0.42).inplace(true));
+ *  }
*/ -// Targeting ../SoftplusImplCloneable.java +// Targeting ../SELUOptions.java -// Targeting ../SoftshrinkImplCloneable.java +/** Options for {@code torch::nn::functional::selu}. + * + * See the documentation for {@code torch::nn::SELUOptions} class to learn what + * arguments are supported. + * + * Example: + *
{@code
+ *  namespace F = torch::nn::functional;
+ *  F::selu(input, F::SELUFuncOptions(false));
+ *  }
*/ +// Targeting ../GLUOptions.java -// Targeting ../SoftsignImplCloneable.java +/** Options for {@code torch::nn::functional::glu}. + * + * See the documentation for {@code torch::nn::GLUOptions} class to learn what + * arguments are supported. + * + * Example: + *
{@code
+ *  namespace F = torch::nn::functional;
+ *  F::glu(input, GLUFuncOptions(1));
+ *  }
*/ -// Targeting ../TanhImplCloneable.java +// Targeting ../GELUOptions.java -// Targeting ../TanhshrinkImplCloneable.java +/** Options for {@code torch::nn::functional::gelu}. + * + * See the documentation for {@code torch::nn::GELUOptions} class to learn what + * arguments are supported. + * + * Example: + *
{@code
+ *  namespace F = torch::nn::functional;
+ *  F::gelu(input, F::GELUFuncOptions().approximate("none"));
+ *  }
*/ +// Targeting ../HardshrinkOptions.java -// Targeting ../ThresholdImplCloneable.java +/** Options for {@code torch::nn::functional::hardshrink}. + * + * See the documentation for {@code torch::nn::HardshrinkOptions} class to learn what + * arguments are supported. + * + * Example: + *
{@code
+ *  namespace F = torch::nn::functional;
+ *  F::hardshrink(x, F::HardshrinkFuncOptions().lambda(0.42));
+ *  }
*/ -// Targeting ../MultiheadAttentionImplCloneable.java +// Targeting ../HardtanhOptions.java -// Targeting ../LayerNormImplCloneable.java +/** Options for {@code torch::nn::functional::hardtanh}. + * + * See the documentation for {@code torch::nn::HardtanhOptions} class to learn what + * arguments are supported. + * + * Example: + *
{@code
+ *  namespace F = torch::nn::functional;
+ *  F::hardtanh(x,
+ *  F::HardtanhFuncOptions().min_val(-1.0).max_val(1.0).inplace(true));
+ *  }
*/ +// Targeting ../LeakyReLUOptions.java -// Targeting ../LocalResponseNormImplCloneable.java +/** Options for {@code torch::nn::functional::leaky_relu}. + * + * See the documentation for {@code torch::nn::LeakyReLUOptions} class to learn what + * arguments are supported. + * + * Example: + *
{@code
+ *  namespace F = torch::nn::functional;
+ *  F::leaky_relu(x,
+ *  F::LeakyReLUFuncOptions().negative_slope(0.42).inplace(true));
+ *  }
*/ -// Targeting ../CrossMapLRN2dImplCloneable.java +// Targeting ../SoftmaxOptions.java -// Targeting ../GroupNormImplCloneable.java +// ============================================================================ +// Targeting ../SoftmaxFuncOptions.java -// Targeting ../TransformerEncoderLayerImplCloneable.java -// Targeting ../TransformerDecoderLayerImplCloneable.java +// Targeting ../SoftminOptions.java -// Targeting ../TransformerEncoderImplCloneable.java -// Targeting ../TransformerDecoderImplCloneable.java +// ============================================================================ +// Targeting ../SoftminFuncOptions.java -// Targeting ../TransformerImplCloneable.java +// Targeting ../LogSoftmaxOptions.java - // namespace nn - // namespace torch -// Parsed from torch/nn/init.h +// ============================================================================ +// Targeting ../LogSoftmaxFuncOptions.java -// #pragma once -// #include -// #include -// #include - // namespace init - // namespace nn -/** Return the recommended gain value for the given nonlinearity function. */ -@Namespace("torch::nn::init") public static native double calculate_gain( - @ByVal NonlinearityType nonlinearity, - double param/*=0.01*/); -@Namespace("torch::nn::init") public static native double calculate_gain( - @ByVal NonlinearityType nonlinearity); +// Targeting ../PReLUOptions.java -/** Fills the given {@code tensor} with the provided {@code value} in-place, and returns it. - * No gradient will be recorded for this operation. */ -@Namespace("torch::nn::init") public static native @ByVal Tensor constant_(@ByVal Tensor tensor, @ByVal Scalar value); -/** Fills the given {@code tensor} with the Dirac delta function in-place, and returns - * it. No gradient will be recorded for this operation. */ -@Namespace("torch::nn::init") public static native @ByVal Tensor dirac_(@ByVal Tensor tensor); +// Targeting ../ReLUOptions.java -/** Fills the given 2-dimensional {@code matrix} with an identity matrix. - * No gradient will be recorded for this operation. */ -@Namespace("torch::nn::init") public static native @ByVal Tensor eye_(@ByVal Tensor matrix); -/** Fills the given 2-dimensional {@code matrix} with values drawn from a normal - * distribution parameterized by {@code mean} and {@code std}. - * No gradient will be recorded for this operation. */ -@Namespace("torch::nn::init") public static native @ByVal Tensor normal_(@ByVal Tensor tensor, double mean/*=0*/, double std/*=1*/); -@Namespace("torch::nn::init") public static native @ByVal Tensor normal_(@ByVal Tensor tensor); +/** Options for {@code torch::nn::functional::relu}. + * + * See the documentation for {@code torch::nn::ReLUOptions} class to learn what + * arguments are supported. + * + * Example: + *
{@code
+ *  namespace F = torch::nn::functional;
+ *  F::relu(x, F::ReLUFuncOptions().inplace(true));
+ *  }
*/ -/** Fills the given {@code tensor} with ones. - * No gradient will be recorded for this operation. */ -@Namespace("torch::nn::init") public static native @ByVal Tensor ones_(@ByVal Tensor tensor); +// Targeting ../ReLU6Options.java -/** Fills the input {@code Tensor} with a (semi) orthogonal matrix, as described in - * "Exact solutions to the nonlinear dynamics of learning in deep linear neural - * networks" - Saxe, A. et al. (2013). The input tensor must have at least 2 - * dimensions, and for tensors with more than 2 dimensions the trailing - * dimensions are flattened. - * No gradient will be recorded for this operation. */ -@Namespace("torch::nn::init") public static native @ByVal Tensor orthogonal_(@ByVal Tensor tensor, double gain/*=1.0*/); -@Namespace("torch::nn::init") public static native @ByVal Tensor orthogonal_(@ByVal Tensor tensor); -/** Fills the 2D input {@code Tensor} as a sparse matrix, where the - * non-zero elements will be drawn from a centered normal distribution - * with the given standard deviation {@code std}, as described in "Deep learning via - * Hessian-free optimization" - Martens, J. (2010). The {@code sparsity} is a real - * value between 0 and 1 that controls the fraction of elements in each column - * to be set to zero. - * No gradient will be recorded for this operation. */ -@Namespace("torch::nn::init") public static native @ByVal Tensor sparse_(@ByVal Tensor tensor, double sparsity, double std/*=0.01*/); -@Namespace("torch::nn::init") public static native @ByVal Tensor sparse_(@ByVal Tensor tensor, double sparsity); +/** Options for {@code torch::nn::functional::relu6}. + * + * See the documentation for {@code torch::nn::ReLU6Options} class to learn what + * arguments are supported. + * + * Example: + *
{@code
+ *  namespace F = torch::nn::functional;
+ *  F::relu6(x, F::ReLU6FuncOptions().inplace(true));
+ *  }
*/ -/** Fills the given 2-dimensional {@code matrix} with values drawn from a uniform - * distribution parameterized by {@code low} and {@code high}. - * No gradient will be recorded for this operation. */ -@Namespace("torch::nn::init") public static native @ByVal Tensor uniform_(@ByVal Tensor tensor, double low/*=0*/, double high/*=1*/); -@Namespace("torch::nn::init") public static native @ByVal Tensor uniform_(@ByVal Tensor tensor); +// Targeting ../RReLUOptions.java -/** Fills the input {@code Tensor} with values according to the method - * described in "Delving deep into rectifiers: Surpassing human-level - * performance on ImageNet classification" - He, K. et al. (2015), using a - * normal distribution. Also known as He initialization. - * No gradient will be recorded for this operation. */ -@Namespace("torch::nn::init") public static native @ByVal Tensor kaiming_normal_( - @ByVal Tensor tensor, - double a/*=0*/, - @ByVal(nullValue = "torch::nn::init::FanModeType(torch::kFanIn)") FanModeType mode, - @ByVal(nullValue = "torch::nn::init::NonlinearityType(torch::kLeakyReLU)") NonlinearityType nonlinearity); -@Namespace("torch::nn::init") public static native @ByVal Tensor kaiming_normal_( - @ByVal Tensor tensor); -/** Fills the input {@code Tensor} with values according to the method - * described in "Delving deep into rectifiers: Surpassing human-level - * performance on ImageNet classification" - He, K. et al. (2015), using a - * uniform distribution. Also known as He initialization. - * No gradient will be recorded for this operation. */ -@Namespace("torch::nn::init") public static native @ByVal Tensor kaiming_uniform_( - @ByVal Tensor tensor, - double a/*=0*/, - @ByVal(nullValue = "torch::nn::init::FanModeType(torch::kFanIn)") FanModeType mode, - @ByVal(nullValue = "torch::nn::init::NonlinearityType(torch::kLeakyReLU)") NonlinearityType nonlinearity); -@Namespace("torch::nn::init") public static native @ByVal Tensor kaiming_uniform_( - @ByVal Tensor tensor); -/** Fills the input {@code Tensor} with values according to the method - * described in "Understanding the difficulty of training deep feedforward - * neural networks" - Glorot, X. & Bengio, Y. (2010). Values are scaled by the - * {@code gain} parameter. No gradient will be recorded for this operation. */ -@Namespace("torch::nn::init") public static native @ByVal Tensor xavier_normal_(@ByVal Tensor tensor, double gain/*=1.0*/); -@Namespace("torch::nn::init") public static native @ByVal Tensor xavier_normal_(@ByVal Tensor tensor); +// ============================================================================ +// Targeting ../RReLUFuncOptions.java -/** Fills the input {@code Tensor} with values according to the method - * described in "Understanding the difficulty of training deep feedforward - * neural networks" - Glorot, X. & Bengio, Y. (2010), using a uniform - * distribution. Values are scaled by the {@code gain} parameter - * No gradient will be recorded for this operation. */ -@Namespace("torch::nn::init") public static native @ByVal Tensor xavier_uniform_(@ByVal Tensor tensor, double gain/*=1.0*/); -@Namespace("torch::nn::init") public static native @ByVal Tensor xavier_uniform_(@ByVal Tensor tensor); -/** Fills the given {@code tensor} with zeros. - * No gradient will be recorded for this operation. */ -@Namespace("torch::nn::init") public static native @ByVal Tensor zeros_(@ByVal Tensor tensor); -@Namespace("torch::nn::init") public static native @ByVal @Cast("std::tuple*") LongPointer _calculate_fan_in_and_fan_out( - @Const @ByRef Tensor tensor); - // namespace init - // namespace nn - // namespace torch +// Targeting ../CELUOptions.java -// Parsed from torch/nn/pimpl.h +/** Options for {@code torch::nn::functional::celu}. + * + * See the documentation for {@code torch::nn::CELUOptions} class to learn what + * arguments are supported. + * + * Example: + *
{@code
+ *  namespace F = torch::nn::functional;
+ *  F::celu(x, F::CELUFuncOptions().alpha(0.42).inplace(true));
+ *  }
*/ -// #pragma once +// Targeting ../SoftplusOptions.java -// #include -// #include -// #include -// #include -// #include +/** Options for {@code torch::nn::functional::softplus}. + * + * See the documentation for {@code torch::nn::SoftplusOptions} class to learn what + * arguments are supported. + * + * Example: + *
{@code
+ *  namespace F = torch::nn::functional;
+ *  F::softplus(x, F::SoftplusFuncOptions().beta(0.5).threshold(3.0));
+ *  }
*/ -// #include -// #include -// #include -// Dump all the template metaprogramming in this file. -// #include - // namespace detail -// Targeting ../ModuleHolder.java +// Targeting ../SoftshrinkOptions.java -// Targeting ../ModuleDictImplModuleHolder.java +/** Options for {@code torch::nn::functional::softshrink}. + * + * See the documentation for {@code torch::nn::SoftshrinkOptions} class to learn what + * arguments are supported. + * + * Example: + *
{@code
+ *  namespace F = torch::nn::functional;
+ *  F::softshrink(x, F::SoftshrinkFuncOptions(0.42));
+ *  }
*/ +// Targeting ../ThresholdOptions.java -// Targeting ../ModuleListImplModuleHolder.java +/** Options for {@code torch::nn::functional::threshold}. + * + * See the documentation for {@code torch::nn::ThresholdOptions} class to learn what + * arguments are supported. + * + * Example: + *
{@code
+ *  namespace F = torch::nn::functional;
+ *  F::threshold(x, F::ThresholdFuncOptions(0.5, 0.5).inplace(true));
+ *  }
*/ + // namespace functional -// Targeting ../SequentialImplModuleHolder.java +// ============================================================================ +// Targeting ../GumbelSoftmaxFuncOptions.java -// Targeting ../ParameterDictImplModuleHolder.java -// Targeting ../ParameterListImplModuleHolder.java +// Targeting ../MultiheadAttentionOptions.java -// Targeting ../AdaptiveLogSoftmaxWithLossImplModuleHolder.java +// ============================================================================ +// Targeting ../MultiheadAttentionForwardFuncOptions.java -// Targeting ../BatchNorm1dImplModuleHolder.java -// Targeting ../InstanceNorm1dImplModuleHolder.java + // namespace functional + // namespace nn + // namespace torch -// Targeting ../Conv1dImplModuleHolder.java +// Parsed from torch/nn/options/linear.h -// Targeting ../ConvTranspose1dImplModuleHolder.java +// #pragma once +// #include +// #include +// #include +// #include +// Targeting ../LinearOptions.java -// Targeting ../DropoutImplModuleHolder.java +// Targeting ../FlattenOptions.java -// Targeting ../BatchNorm2dImplModuleHolder.java +// Targeting ../UnflattenOptions.java -// Targeting ../InstanceNorm2dImplModuleHolder.java +// Targeting ../BilinearOptions.java -// Targeting ../Conv2dImplModuleHolder.java -// Targeting ../ConvTranspose2dImplModuleHolder.java + // namespace nn + // namespace torch -// Targeting ../Dropout2dImplModuleHolder.java +// Parsed from torch/nn/functional/activation.h +// #pragma once -// Targeting ../BatchNorm3dImplModuleHolder.java +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #ifndef DOXYGEN_SHOULD_SKIP_THIS +@Namespace("torch::nn::functional::detail") public static native @ByVal Tensor elu(@ByVal Tensor input, double alpha, @Cast("bool") boolean inplace); + // namespace detail +// #endif /* DOXYGEN_SHOULD_SKIP_THIS */ -// Targeting ../InstanceNorm3dImplModuleHolder.java +/** See +/** https://pytorch.org/docs/master/nn.functional.html#torch.nn.functional.elu +/** about the exact behavior of this functional. +/** +/** See the documentation for {@code torch::nn::functional::ELUFuncOptions} class to +/** learn what optional arguments are supported for this functional. +/** +/** Example: +/**
{@code
+/** namespace F = torch::nn::functional;
+/** F::elu(x, F::ELUFuncOptions().alpha(0.42).inplace(true));
+/** }
*/ +@Namespace("torch::nn::functional") public static native @ByVal Tensor elu(@ByVal Tensor input, @Cast("const torch::nn::functional::ELUFuncOptions*") @ByRef(nullValue = "torch::nn::functional::ELUFuncOptions{}") ELUOptions options); +// ============================================================================ -// Targeting ../Conv3dImplModuleHolder.java +// #ifndef DOXYGEN_SHOULD_SKIP_THIS +@Namespace("torch::nn::functional::detail") public static native @ByVal Tensor selu(@ByVal Tensor input, @Cast("bool") boolean inplace); + // namespace detail +// #endif /* DOXYGEN_SHOULD_SKIP_THIS */ +/** See +/** https://pytorch.org/docs/master/nn.functional.html#torch.nn.functional.selu +/** about the exact behavior of this functional. +/** +/** See the documentation for {@code torch::nn::functional::SELUFuncOptions} class to +/** learn what optional arguments are supported for this functional. +/** +/** Example: +/**
{@code
+/** namespace F = torch::nn::functional;
+/** F::selu(input, F::SELUFuncOptions(false));
+/** }
*/ +@Namespace("torch::nn::functional") public static native @ByVal Tensor selu(@ByVal Tensor input, @Cast("const torch::nn::functional::SELUFuncOptions*") @ByRef(nullValue = "torch::nn::functional::SELUFuncOptions{}") SELUOptions options); -// Targeting ../ConvTranspose3dImplModuleHolder.java +// ============================================================================ +// #ifndef DOXYGEN_SHOULD_SKIP_THIS +@Namespace("torch::nn::functional::detail") public static native @ByVal Tensor hardshrink(@Const @ByRef Tensor input, double lambda); + // namespace detail +// #endif /* DOXYGEN_SHOULD_SKIP_THIS */ -// Targeting ../Dropout3dImplModuleHolder.java +/** See +/** https://pytorch.org/docs/master/nn.functional.html#torch.nn.functional.hardshrink +/** about the exact behavior of this functional. +/** +/** See the documentation for {@code torch::nn::functional::HardshrinkFuncOptions} +/** class to learn what optional arguments are supported for this functional. +/** +/** Example: +/**
{@code
+/** namespace F = torch::nn::functional;
+/** F::hardshrink(x, F::HardshrinkFuncOptions().lambda(0.42));
+/** }
*/ +@Namespace("torch::nn::functional") public static native @ByVal Tensor hardshrink( + @Const @ByRef Tensor input, + @Cast("const torch::nn::functional::HardshrinkFuncOptions*") @ByRef(nullValue = "torch::nn::functional::HardshrinkFuncOptions{}") HardshrinkOptions options); +// ============================================================================ -// Targeting ../AlphaDropoutImplModuleHolder.java +// #ifndef DOXYGEN_SHOULD_SKIP_THIS +@Namespace("torch::nn::functional::detail") public static native @ByVal Tensor hardtanh( + @ByVal Tensor input, + double min_val, + double max_val, + @Cast("bool") boolean inplace); + // namespace detail +// #endif /* DOXYGEN_SHOULD_SKIP_THIS */ +/** See +/** https://pytorch.org/docs/master/nn.functional.html#torch.nn.functional.hardtanh +/** about the exact behavior of this functional. +/** +/** See the documentation for {@code torch::nn::functional::HardtanhFuncOptions} class +/** to learn what optional arguments are supported for this functional. +/** +/** Example: +/**
{@code
+/** namespace F = torch::nn::functional;
+/** F::hardtanh(x,
+/** F::HardtanhFuncOptions().min_val(-1.0).max_val(1.0).inplace(true));
+/** }
*/ +@Namespace("torch::nn::functional") public static native @ByVal Tensor hardtanh(@ByVal Tensor input, @Cast("const torch::nn::functional::HardtanhFuncOptions*") @ByRef(nullValue = "torch::nn::functional::HardtanhFuncOptions{}") HardtanhOptions options); -// Targeting ../FeatureAlphaDropoutImplModuleHolder.java +// ============================================================================ +// #ifndef DOXYGEN_SHOULD_SKIP_THIS +@Namespace("torch::nn::functional::detail") public static native @ByVal Tensor leaky_relu(@ByVal Tensor input, double negative_slope, @Cast("bool") boolean inplace); + // namespace detail +// #endif /* DOXYGEN_SHOULD_SKIP_THIS */ -// Targeting ../CosineSimilarityImplModuleHolder.java +/** See +/** https://pytorch.org/docs/master/nn.functional.html#torch.nn.functional.leaky_relu +/** about the exact behavior of this functional. +/** +/** See the documentation for {@code torch::nn::functional::LeakyReLUFuncOptions} +/** class to learn what optional arguments are supported for this functional. +/** +/** Example: +/**
{@code
+/** namespace F = torch::nn::functional;
+/** F::leaky_relu(x,
+/** F::LeakyReLUFuncOptions().negative_slope(0.42).inplace(true));
+/** }
*/ +@Namespace("torch::nn::functional") public static native @ByVal Tensor leaky_relu( + @ByVal Tensor input, + @Cast("const torch::nn::functional::LeakyReLUFuncOptions*") @ByRef(nullValue = "torch::nn::functional::LeakyReLUFuncOptions{}") LeakyReLUOptions options); +// ============================================================================ -// Targeting ../PairwiseDistanceImplModuleHolder.java +@Namespace("torch::nn::functional") public static native @ByVal Tensor logsigmoid(@Const @ByRef Tensor input); +// ============================================================================ -// Targeting ../EmbeddingImplModuleHolder.java +// #ifndef DOXYGEN_SHOULD_SKIP_THIS +@Namespace("torch::nn::functional::detail") public static native @ByVal Tensor gumbel_softmax( + @Const @ByRef Tensor logits, + double tau, + @Cast("bool") boolean hard, + int dim); + // namespace detail +// #endif /* DOXYGEN_SHOULD_SKIP_THIS */ +/** See +/** https://pytorch.org/docs/master/nn.functional.html#torch.nn.functional.gumbel_softmax +/** about the exact behavior of this functional. +/** +/** See the documentation for {@code torch::nn::functional::GumbelSoftmaxFuncOptions} +/** class to learn what optional arguments are supported for this functional. +/** +/** Example: +/**
{@code
+/** namespace F = torch::nn::functional;
+/** F::gumbel_softmax(logits, F::GumbelSoftmaxFuncOptions().hard(true).dim(-1));
+/** }
*/ +@Namespace("torch::nn::functional") public static native @ByVal Tensor gumbel_softmax( + @Const @ByRef Tensor logits, + @Const @ByRef(nullValue = "torch::nn::functional::GumbelSoftmaxFuncOptions{}") GumbelSoftmaxFuncOptions options); +@Namespace("torch::nn::functional") public static native @ByVal Tensor gumbel_softmax( + @Const @ByRef Tensor logits); -// Targeting ../EmbeddingBagImplModuleHolder.java +// ============================================================================ +// #ifndef DOXYGEN_SHOULD_SKIP_THIS + // namespace detail +// #endif /* DOXYGEN_SHOULD_SKIP_THIS */ -// Targeting ../FoldImplModuleHolder.java +/** See +/** https://pytorch.org/docs/master/nn.functional.html#torch.nn.functional.softmax +/** about the exact behavior of this functional. +/** +/** See the documentation for {@code torch::nn::functional::SoftmaxFuncOptions} class +/** to learn what optional arguments are supported for this functional. +/** +/** Example: +/**
{@code
+/** namespace F = torch::nn::functional;
+/** F::softmax(input, F::SoftmaxFuncOptions(1));
+/** }
*/ +@Namespace("torch::nn::functional") public static native @ByVal Tensor softmax(@Const @ByRef Tensor input, @Const @ByRef SoftmaxFuncOptions options); +// ============================================================================ -// Targeting ../UnfoldImplModuleHolder.java +// #ifndef DOXYGEN_SHOULD_SKIP_THIS +@Namespace("torch::nn::functional::detail") public static native @ByVal Tensor softmin( + @Const @ByRef Tensor input, + @Cast("int64_t") long dim, + @ByVal ScalarTypeOptional dtype); + // namespace detail +// #endif /* DOXYGEN_SHOULD_SKIP_THIS */ +/** See +/** https://pytorch.org/docs/master/nn.functional.html#torch.nn.functional.softmin +/** about the exact behavior of this functional. +/** +/** See the documentation for {@code torch::nn::functional::SoftminFuncOptions} class +/** to learn what optional arguments are supported for this functional. +/** +/** Example: +/**
{@code
+/** namespace F = torch::nn::functional;
+/** F::softmin(input, F::SoftminFuncOptions(1));
+/** }
*/ +@Namespace("torch::nn::functional") public static native @ByVal Tensor softmin(@Const @ByRef Tensor input, @Const @ByRef SoftminFuncOptions options); -// Targeting ../IdentityImplModuleHolder.java +// ============================================================================ +// #ifndef DOXYGEN_SHOULD_SKIP_THIS + // namespace detail +// #endif /* DOXYGEN_SHOULD_SKIP_THIS */ -// Targeting ../LinearImplModuleHolder.java +/** See +/** https://pytorch.org/docs/master/nn.functional.html#torch.nn.functional.log_softmax +/** about the exact behavior of this functional. +/** +/** See the documentation for {@code torch::nn::functional::LogSoftmaxFuncOptions} +/** class to learn what optional arguments are supported for this functional. +/** +/** Example: +/**
{@code
+/** namespace F = torch::nn::functional;
+/** F::log_softmax(input, LogSoftmaxFuncOptions(1));
+/** }
*/ +@Namespace("torch::nn::functional") public static native @ByVal Tensor log_softmax( + @Const @ByRef Tensor input, + @Const @ByRef LogSoftmaxFuncOptions options); +// ============================================================================ -// Targeting ../BilinearImplModuleHolder.java +// #ifndef DOXYGEN_SHOULD_SKIP_THIS + // namespace detail +// #endif /* DOXYGEN_SHOULD_SKIP_THIS */ +/** See +/** https://pytorch.org/docs/master/nn.functional.html#torch.nn.functional.glu +/** about the exact behavior of this functional. +/** +/** See the documentation for {@code torch::nn::functional::GLUFuncOptions} class to +/** learn what optional arguments are supported for this functional. +/** +/** Example: +/**
{@code
+/** namespace F = torch::nn::functional;
+/** F::glu(input, GLUFuncOptions(1));
+/** }
*/ +@Namespace("torch::nn::functional") public static native @ByVal Tensor glu(@Const @ByRef Tensor input, @Cast("const torch::nn::functional::GLUFuncOptions*") @ByRef(nullValue = "torch::nn::functional::GLUFuncOptions{}") GLUOptions options); -// Targeting ../FlattenImplModuleHolder.java +// ============================================================================ +// #ifndef DOXYGEN_SHOULD_SKIP_THIS +@Namespace("torch::nn::functional::detail") public static native @ByVal Tensor gelu(@Const @ByRef Tensor input, @StdString BytePointer approximate); +@Namespace("torch::nn::functional::detail") public static native @ByVal Tensor gelu(@Const @ByRef Tensor input, @StdString String approximate); + // namespace detail +// #endif /* DOXYGEN_SHOULD_SKIP_THIS */ -// Targeting ../UnflattenImplModuleHolder.java +@Namespace("torch::nn::functional") public static native @ByVal Tensor gelu(@Const @ByRef Tensor input, @Cast("const torch::nn::functional::GELUFuncOptions*") @ByRef(nullValue = "torch::nn::functional::GELUFuncOptions{}") GELUOptions options); +// ============================================================================ -// Targeting ../L1LossImplModuleHolder.java +// ============================================================================ +// ============================================================================ -// Targeting ../KLDivLossImplModuleHolder.java +// ============================================================================ +// #ifndef DOXYGEN_SHOULD_SKIP_THIS +@Namespace("torch::nn::functional::detail") public static native @ByVal Tensor relu(@ByVal Tensor input, @Cast("bool") boolean inplace); + // namespace detail +// #endif /* DOXYGEN_SHOULD_SKIP_THIS */ -// Targeting ../MSELossImplModuleHolder.java +/** See +/** https://pytorch.org/docs/master/nn.functional.html#torch.nn.functional.relu +/** about the exact behavior of this functional. +/** +/** See the documentation for {@code torch::nn::functional::ReLUFuncOptions} class to +/** learn what optional arguments are supported for this functional. +/** +/** Example: +/**
{@code
+/** namespace F = torch::nn::functional;
+/** F::relu(x, F::ReLUFuncOptions().inplace(true));
+/** }
*/ +@Namespace("torch::nn::functional") public static native @ByVal Tensor relu(@ByVal Tensor input, @Cast("const torch::nn::functional::ReLUFuncOptions*") @ByRef(nullValue = "torch::nn::functional::ReLUFuncOptions{}") ReLUOptions options); +// ============================================================================ -// Targeting ../BCELossImplModuleHolder.java +// #ifndef DOXYGEN_SHOULD_SKIP_THIS +@Namespace("torch::nn::functional::detail") public static native @ByVal Tensor relu6(@ByVal Tensor input, @Cast("bool") boolean inplace); + // namespace detail +// #endif /* DOXYGEN_SHOULD_SKIP_THIS */ +/** See +/** https://pytorch.org/docs/master/nn.functional.html#torch.nn.functional.relu6 +/** about the exact behavior of this functional. +/** +/** See the documentation for {@code torch::nn::functional::ReLU6FuncOptions} class to +/** learn what optional arguments are supported for this functional. +/** +/** Example: +/**
{@code
+/** namespace F = torch::nn::functional;
+/** F::relu6(x, F::ReLU6FuncOptions().inplace(true));
+/** }
*/ +@Namespace("torch::nn::functional") public static native @ByVal Tensor relu6(@ByVal Tensor input, @Cast("const torch::nn::functional::ReLU6FuncOptions*") @ByRef(nullValue = "torch::nn::functional::ReLU6FuncOptions{}") ReLU6Options options); -// Targeting ../HingeEmbeddingLossImplModuleHolder.java +// ============================================================================ +// #ifndef DOXYGEN_SHOULD_SKIP_THIS +@Namespace("torch::nn::functional::detail") public static native @ByVal Tensor rrelu( + @ByVal Tensor input, + double lower, + double upper, + @Cast("bool") boolean training, + @Cast("bool") boolean inplace); + // namespace detail +// #endif /* DOXYGEN_SHOULD_SKIP_THIS */ -// Targeting ../MultiMarginLossImplModuleHolder.java +/** See +/** https://pytorch.org/docs/master/nn.functional.html#torch.nn.functional.rrelu +/** about the exact behavior of this functional. +/** +/** See the documentation for {@code torch::nn::functional::RReLUFuncOptions} class to +/** learn what optional arguments are supported for this functional. +/** +/** Example: +/**
{@code
+/** namespace F = torch::nn::functional;
+/** F::rrelu(x, F::RReLUFuncOptions().lower(0.1).upper(0.4).inplace(true));
+/** }
*/ +@Namespace("torch::nn::functional") public static native @ByVal Tensor rrelu(@ByVal Tensor input, @Const @ByRef(nullValue = "torch::nn::functional::RReLUFuncOptions{}") RReLUFuncOptions options); +// ============================================================================ -// Targeting ../CosineEmbeddingLossImplModuleHolder.java +// #ifndef DOXYGEN_SHOULD_SKIP_THIS +@Namespace("torch::nn::functional::detail") public static native @ByVal Tensor celu(@ByVal Tensor input, double alpha, @Cast("bool") boolean inplace); + // namespace detail +// #endif /* DOXYGEN_SHOULD_SKIP_THIS */ +/** See +/** https://pytorch.org/docs/master/nn.functional.html#torch.nn.functional.celu +/** about the exact behavior of this functional. +/** +/** See the documentation for {@code torch::nn::functional::CELUFuncOptions} class to +/** learn what optional arguments are supported for this functional. +/** +/** Example: +/**
{@code
+/** namespace F = torch::nn::functional;
+/** F::celu(x, F::CELUFuncOptions().alpha(0.42).inplace(true));
+/** }
*/ +@Namespace("torch::nn::functional") public static native @ByVal Tensor celu(@ByVal Tensor input, @Cast("const torch::nn::functional::CELUFuncOptions*") @ByRef(nullValue = "torch::nn::functional::CELUFuncOptions{}") CELUOptions options); -// Targeting ../SmoothL1LossImplModuleHolder.java +// ============================================================================ +// #ifndef DOXYGEN_SHOULD_SKIP_THIS +@Namespace("torch::nn::functional::detail") public static native @ByVal Tensor softplus(@Const @ByRef Tensor input, double beta, double threshold); + // namespace detail +// #endif /* DOXYGEN_SHOULD_SKIP_THIS */ -// Targeting ../HuberLossImplModuleHolder.java +/** See +/** https://pytorch.org/docs/master/nn.functional.html#torch.nn.functional.softplus +/** about the exact behavior of this functional. +/** +/** See the documentation for {@code torch::nn::functional::SoftplusFuncOptions} class +/** to learn what optional arguments are supported for this functional. +/** +/** Example: +/**
{@code
+/** namespace F = torch::nn::functional;
+/** F::softplus(x, F::SoftplusFuncOptions().beta(0.5).threshold(3.0));
+/** }
*/ +@Namespace("torch::nn::functional") public static native @ByVal Tensor softplus( + @Const @ByRef Tensor input, + @Cast("const torch::nn::functional::SoftplusFuncOptions*") @ByRef(nullValue = "torch::nn::functional::SoftplusFuncOptions{}") SoftplusOptions options); +// ============================================================================ -// Targeting ../MultiLabelMarginLossImplModuleHolder.java +// #ifndef DOXYGEN_SHOULD_SKIP_THIS +@Namespace("torch::nn::functional::detail") public static native @ByVal Tensor softshrink(@Const @ByRef Tensor input, double lambda); + // namespace detail +// #endif /* DOXYGEN_SHOULD_SKIP_THIS */ +/** See +/** https://pytorch.org/docs/master/nn.functional.html#torch.nn.functional.softshrink +/** about the exact behavior of this functional. +/** +/** See the documentation for {@code torch::nn::functional::SoftshrinkFuncOptions} +/** class to learn what optional arguments are supported for this functional. +/** +/** Example: +/**
{@code
+/** namespace F = torch::nn::functional;
+/** F::softshrink(x, F::SoftshrinkFuncOptions(0.42));
+/** }
*/ +@Namespace("torch::nn::functional") public static native @ByVal Tensor softshrink( + @Const @ByRef Tensor input, + @Cast("const torch::nn::functional::SoftshrinkFuncOptions*") @ByRef(nullValue = "torch::nn::functional::SoftshrinkFuncOptions{}") SoftshrinkOptions options); -// Targeting ../SoftMarginLossImplModuleHolder.java +// ============================================================================ +@Namespace("torch::nn::functional") public static native @ByVal Tensor softsign(@Const @ByRef Tensor input); -// Targeting ../MultiLabelSoftMarginLossImplModuleHolder.java +// ============================================================================ +@Namespace("torch::nn::functional") public static native @ByVal Tensor tanhshrink(@Const @ByRef Tensor input); -// Targeting ../TripletMarginLossImplModuleHolder.java +// ============================================================================ +// #ifndef DOXYGEN_SHOULD_SKIP_THIS +@Namespace("torch::nn::functional::detail") public static native @ByVal Tensor threshold( + @ByVal Tensor input, + double threshold, + double value, + @Cast("bool") boolean inplace); + // namespace detail +// #endif /* DOXYGEN_SHOULD_SKIP_THIS */ -// Targeting ../TripletMarginWithDistanceLossImplModuleHolder.java +/** See +/** https://pytorch.org/docs/master/nn.functional.html#torch.nn.functional.threshold +/** about the exact behavior of this functional. +/** +/** See the documentation for {@code torch::nn::functional::ThresholdFuncOptions} +/** class to learn what optional arguments are supported for this functional. +/** +/** Example: +/**
{@code
+/** namespace F = torch::nn::functional;
+/** F::threshold(x, F::ThresholdFuncOptions(0.5, 0.5).inplace(true));
+/** }
*/ +@Namespace("torch::nn::functional") public static native @ByVal Tensor threshold(@ByVal Tensor input, @Cast("const torch::nn::functional::ThresholdFuncOptions*") @ByRef ThresholdOptions options); +// ============================================================================ -// Targeting ../CTCLossImplModuleHolder.java +// #ifndef DOXYGEN_SHOULD_SKIP_THIS +@Namespace("torch::nn::functional::detail") public static native @ByVal T_TensorTensor_T multi_head_attention_forward( + @Const @ByRef Tensor query, + @Const @ByRef Tensor key, + @Const @ByRef Tensor value, + @Cast("int64_t") long embed_dim_to_check, + @Cast("int64_t") long num_heads, + @Const @ByRef Tensor in_proj_weight, + @Const @ByRef Tensor in_proj_bias, + @Const @ByRef Tensor bias_k, + @Const @ByRef Tensor bias_v, + @Cast("bool") boolean add_zero_attn, + double dropout_p, + @Const @ByRef Tensor out_proj_weight, + @Const @ByRef Tensor out_proj_bias, + @Cast("bool") boolean training/*=true*/, + @Const @ByRef(nullValue = "torch::Tensor{}") Tensor key_padding_mask, + @Cast("bool") boolean need_weights/*=true*/, + @Const @ByRef(nullValue = "torch::Tensor{}") Tensor attn_mask, + @Cast("bool") boolean use_separate_proj_weight/*=false*/, + @Const @ByRef(nullValue = "torch::Tensor{}") Tensor q_proj_weight, + @Const @ByRef(nullValue = "torch::Tensor{}") Tensor k_proj_weight, + @Const @ByRef(nullValue = "torch::Tensor{}") Tensor v_proj_weight, + @Const @ByRef(nullValue = "torch::Tensor{}") Tensor static_k, + @Const @ByRef(nullValue = "torch::Tensor{}") Tensor static_v, + @Cast("bool") boolean average_attn_weights/*=true*/); +@Namespace("torch::nn::functional::detail") public static native @ByVal T_TensorTensor_T multi_head_attention_forward( + @Const @ByRef Tensor query, + @Const @ByRef Tensor key, + @Const @ByRef Tensor value, + @Cast("int64_t") long embed_dim_to_check, + @Cast("int64_t") long num_heads, + @Const @ByRef Tensor in_proj_weight, + @Const @ByRef Tensor in_proj_bias, + @Const @ByRef Tensor bias_k, + @Const @ByRef Tensor bias_v, + @Cast("bool") boolean add_zero_attn, + double dropout_p, + @Const @ByRef Tensor out_proj_weight, + @Const @ByRef Tensor out_proj_bias); + // namespace detail +// #endif /* DOXYGEN_SHOULD_SKIP_THIS */ +@Namespace("torch::nn::functional") public static native @ByVal T_TensorTensor_T multi_head_attention_forward( + @Const @ByRef Tensor query, + @Const @ByRef Tensor key, + @Const @ByRef Tensor value, + @Const @ByRef MultiheadAttentionForwardFuncOptions options); -// Targeting ../PoissonNLLLossImplModuleHolder.java + // namespace functional + // namespace nn + // namespace torch -// Targeting ../MarginRankingLossImplModuleHolder.java +// Parsed from torch/nn/options/loss.h +// #pragma once -// Targeting ../NLLLossImplModuleHolder.java +// #include +// #include +// #include +// #include +// Targeting ../L1LossOptions.java -// Targeting ../CrossEntropyLossImplModuleHolder.java +/** Options for {@code torch::nn::functional::l1_loss}. + * + * See the documentation for {@code torch::nn::L1LossOptions} class to learn what + * arguments are supported. + * + * Example: + *
{@code
+ *  namespace F = torch::nn::functional;
+ *  F::l1_loss(input, target, F::L1LossFuncOptions(torch::kNone));
+ *  }
*/ +// Targeting ../KLDivLossOptions.java -// Targeting ../BCEWithLogitsLossImplModuleHolder.java +/** Options for {@code torch::nn::functional::kl_div}. + * + * See the documentation for {@code torch::nn::KLDivLossOptions} class to learn what + * arguments are supported. + * + * Example: + *
{@code
+ *  namespace F = torch::nn::functional;
+ *  F::kl_div(input, target,
+ *  F::KLDivFuncOptions().reduction(torch::kNone).log_target(false));
+ *  }
*/ -// Targeting ../ReflectionPad1dImplModuleHolder.java +// Targeting ../MSELossOptions.java -// Targeting ../ReplicationPad1dImplModuleHolder.java +/** Options for {@code torch::nn::functional::mse_loss}. + * + * See the documentation for {@code torch::nn::MSELossOptions} class to learn what + * arguments are supported. + * + * Example: + *
{@code
+ *  namespace F = torch::nn::functional;
+ *  F::mse_loss(input, target, F::MSELossFuncOptions(torch::kNone));
+ *  }
*/ +// Targeting ../BCELossOptions.java -// Targeting ../ConstantPad1dImplModuleHolder.java +/** Options for {@code torch::nn::functional::binary_cross_entropy}. + * + * See the documentation for {@code torch::nn::BCELossOptions} class to learn what + * arguments are supported. + * + * Example: + *
{@code
+ *  namespace F = torch::nn::functional;
+ *  F::binary_cross_entropy(input, target,
+ *  F::BinaryCrossEntropyFuncOptions().weight(weight));
+ *  }
*/ -// Targeting ../AvgPool1dImplModuleHolder.java +// Targeting ../HingeEmbeddingLossOptions.java -// Targeting ../MaxPool1dImplModuleHolder.java +/** Options for {@code torch::nn::functional::hinge_embedding_loss}. + * + * See the documentation for {@code torch::nn::HingeEmbeddingLossOptions} class to + * learn what arguments are supported. + * + * Example: + *
{@code
+ *  namespace F = torch::nn::functional;
+ *  F::hinge_embedding_loss(input, target,
+ *  F::HingeEmbeddingLossFuncOptions().margin(2));
+ *  }
*/ +// Targeting ../MultiMarginLossOptions.java -// Targeting ../AdaptiveAvgPool1dImplModuleHolder.java +/** Options for {@code torch::nn::functional::multi_margin_loss}. + * + * See the documentation for {@code torch::nn::MultiMarginLossOptions} class to learn + * what arguments are supported. + * + * Example: + *
{@code
+ *  namespace F = torch::nn::functional;
+ *  F::multi_margin_loss(input, target,
+ *  F::MultiMarginLossFuncOptions().margin(2).weight(weight));
+ *  }
*/ -// Targeting ../AdaptiveMaxPool1dImplModuleHolder.java +// Targeting ../CosineEmbeddingLossOptions.java -// Targeting ../MaxUnpool1dImplModuleHolder.java +/** Options for {@code torch::nn::functional::cosine_embedding_loss}. + * + * See the documentation for {@code torch::nn::CosineEmbeddingLossOptions} class to + * learn what arguments are supported. + * + * Example: + *
{@code
+ *  namespace F = torch::nn::functional;
+ *  F::cosine_embedding_loss(input1, input2, target,
+ *  F::CosineEmbeddingLossFuncOptions().margin(0.5));
+ *  }
*/ +// Targeting ../MultiLabelMarginLossOptions.java -// Targeting ../LPPool1dImplModuleHolder.java +/** Options for {@code torch::nn::functional::multilabel_margin_loss}. + * + * See the documentation for {@code torch::nn::MultiLabelMarginLossOptions} class to + * learn what arguments are supported. + * + * Example: + *
{@code
+ *  namespace F = torch::nn::functional;
+ *  F::multilabel_margin_loss(input, target,
+ *  F::MultilabelMarginLossFuncOptions(torch::kNone));
+ *  }
*/ -// Targeting ../ReflectionPad2dImplModuleHolder.java +// Targeting ../SoftMarginLossOptions.java -// Targeting ../ReplicationPad2dImplModuleHolder.java +/** Options for {@code torch::nn::functional::soft_margin_loss}. + * + * See the documentation for {@code torch::nn::SoftMarginLossOptions} class to learn + * what arguments are supported. + * + * Example: + *
{@code
+ *  namespace F = torch::nn::functional;
+ *  F::soft_margin_loss(input, target,
+ *  F::SoftMarginLossFuncOptions(torch::kNone));
+ *  }
*/ +// Targeting ../MultiLabelSoftMarginLossOptions.java -// Targeting ../ConstantPad2dImplModuleHolder.java +/** Options for {@code torch::nn::functional::multilabel_soft_margin_loss}. + * + * See the documentation for {@code torch::nn::MultiLabelSoftMarginLossOptions} class + * to learn what arguments are supported. + * + * Example: + *
{@code
+ *  namespace F = torch::nn::functional;
+ *  F::multilabel_soft_margin_loss(input, target,
+ *  F::MultilabelSoftMarginLossFuncOptions().reduction(torch::kNone).weight(weight));
+ *  }
*/ -// Targeting ../ZeroPad2dImplModuleHolder.java +// Targeting ../TripletMarginLossOptions.java -// Targeting ../AvgPool2dImplModuleHolder.java +/** Options for {@code torch::nn::functional::triplet_margin_loss}. + * + * See the documentation for {@code torch::nn::TripletMarginLossOptions} class to + * learn what arguments are supported. + * + * Example: + *
{@code
+ *  namespace F = torch::nn::functional;
+ *  F::triplet_margin_loss(anchor, positive, negative,
+ *  F::TripletMarginLossFuncOptions().margin(1.0));
+ *  }
*/ +// Targeting ../TripletMarginWithDistanceLossOptions.java -// Targeting ../MaxPool2dImplModuleHolder.java +/** Options for {@code torch::nn::functional::triplet_margin_with_distance_loss}. + * + * See the documentation for {@code torch::nn::TripletMarginWithDistanceLossOptions} + * class to learn what arguments are supported. + * + * Example: + *
{@code
+ *  namespace F = torch::nn::functional;
+ *  F::triplet_margin_with_distance_loss(anchor, positive, negative,
+ *  F::TripletMarginWithDistanceLossFuncOptions().margin(1.0));
+ *  }
*/ -// Targeting ../AdaptiveAvgPool2dImplModuleHolder.java +// Targeting ../CTCLossOptions.java -// Targeting ../AdaptiveMaxPool2dImplModuleHolder.java +/** Options for {@code torch::nn::functional::ctc_loss}. + * + * See the documentation for {@code torch::nn::CTCLossOptions} class to learn what + * arguments are supported. + * + * Example: + *
{@code
+ *  namespace F = torch::nn::functional;
+ *  F::ctc_loss(log_probs, targets, input_lengths, target_lengths,
+ *  F::CTCLossFuncOptions().reduction(torch::kNone));
+ *  }
*/ +// Targeting ../SmoothL1LossOptions.java -// Targeting ../MaxUnpool2dImplModuleHolder.java +/** Options for {@code torch::nn::functional::smooth_l1_loss}. + * + * See the documentation for {@code torch::nn::SmoothL1LossOptions} class to learn + * what arguments are supported. + * + * Example: + *
{@code
+ *  namespace F = torch::nn::functional;
+ *  F::smooth_l1_loss(input, target, F::SmoothL1LossFuncOptions(torch::kNone));
+ *  }
*/ -// Targeting ../FractionalMaxPool2dImplModuleHolder.java +// Targeting ../HuberLossOptions.java -// Targeting ../LPPool2dImplModuleHolder.java +/** Options for {@code torch::nn::functional::huber_loss}. + * + * See the documentation for {@code torch::nn::HuberLossOptions} class to learn what + * arguments are supported. + * + * Example: + *
{@code
+ *  namespace F = torch::nn::functional;
+ *  F::huber_loss(input, target, F::HuberLossFuncOptions(torch::kNone));
+ *  }
*/ +// Targeting ../PoissonNLLLossOptions.java -// Targeting ../ReflectionPad3dImplModuleHolder.java +/** Options for {@code torch::nn::functional::poisson_nll_loss}. + * + * See the documentation for {@code torch::nn::PoissonNLLLossOptions} class to learn + * what arguments are supported. + * + * Example: + *
{@code
+ *  namespace F = torch::nn::functional;
+ *  F::poisson_nll_loss(input, target,
+ *  F::PoissonNLLLossFuncOptions().reduction(torch::kNone));
+ *  }
*/ -// Targeting ../ReplicationPad3dImplModuleHolder.java +// Targeting ../MarginRankingLossOptions.java -// Targeting ../ConstantPad3dImplModuleHolder.java +/** Options for {@code torch::nn::functional::margin_ranking_loss}. + * + * See the documentation for {@code torch::nn::MarginRankingLossOptions} class to + * learn what arguments are supported. + * + * Example: + *
{@code
+ *  namespace F = torch::nn::functional;
+ *  F::margin_ranking_loss(input1, input2, target,
+ *  F::MarginRankingLossFuncOptions().margin(0.5).reduction(torch::kSum));
+ *  }
*/ +// Targeting ../NLLLossOptions.java -// Targeting ../AvgPool3dImplModuleHolder.java +/** Options for {@code torch::nn::functional::nll_loss}. + * + * See the documentation for {@code torch::nn::NLLLossOptions} class to learn what + * arguments are supported. + * + * Example: + *
{@code
+ *  namespace F = torch::nn::functional;
+ *  F::nll_loss(input, target,
+ *  F::NLLLossFuncOptions().ignore_index(-100).reduction(torch::kMean));
+ *  }
*/ -// Targeting ../MaxPool3dImplModuleHolder.java +// Targeting ../CrossEntropyLossOptions.java -// Targeting ../AdaptiveAvgPool3dImplModuleHolder.java +/** Options for {@code torch::nn::functional::cross_entropy}. + * + * See the documentation for {@code torch::nn::CrossEntropyLossOptions} class to + * learn what arguments are supported. + * + * Example: + *
{@code
+ *  namespace F = torch::nn::functional;
+ *  F::cross_entropy(input, target,
+ *  F::CrossEntropyFuncOptions().ignore_index(-100).reduction(torch::kMean));
+ *  }
*/ +// Targeting ../BCEWithLogitsLossOptions.java -// Targeting ../AdaptiveMaxPool3dImplModuleHolder.java +/** Options for {@code torch::nn::functional::binary_cross_entropy_with_logits}. + * + * See the documentation for {@code torch::nn::BCEWithLogitsLossOptions} class to + * learn what arguments are supported. + * + * Example: + *
{@code
+ *  namespace F = torch::nn::functional;
+ *  F::binary_cross_entropy_with_logits(input, target,
+ *  F::BinaryCrossEntropyWithLogitsFuncOptions().pos_weight(pos_weight).reduction(torch::kSum));
+ *  }
*/ + // namespace functional -// Targeting ../MaxUnpool3dImplModuleHolder.java + // namespace nn + // namespace torch -// Targeting ../FractionalMaxPool3dImplModuleHolder.java +// Parsed from torch/nn/functional/loss.h +// #pragma once -// Targeting ../RNNImplModuleHolder.java +// #include +// #include +// #include +// #ifndef DOXYGEN_SHOULD_SKIP_THIS +@Namespace("torch::nn::functional::detail") public static native @ByVal Tensor l1_loss( + @Const @ByRef Tensor input, + @Const @ByRef Tensor target, + @ByVal LossReduction reduction); + // namespace detail +// #endif /* DOXYGEN_SHOULD_SKIP_THIS */ -// Targeting ../LSTMImplModuleHolder.java +/** See +/** https://pytorch.org/docs/master/nn.functional.html#torch.nn.functional.l1_loss +/** about the exact behavior of this functional. +/** +/** See the documentation for {@code torch::nn::functional::L1LossFuncOptions} class +/** to learn what optional arguments are supported for this functional. +/** +/** Example: +/**
{@code
+/** namespace F = torch::nn::functional;
+/** F::l1_loss(input, target, F::L1LossFuncOptions(torch::kNone));
+/** }
*/ +@Namespace("torch::nn::functional") public static native @ByVal Tensor l1_loss( + @Const @ByRef Tensor input, + @Const @ByRef Tensor target, + @Cast("const torch::nn::functional::L1LossFuncOptions*") @ByRef(nullValue = "torch::nn::functional::L1LossFuncOptions{}") L1LossOptions options); +// ============================================================================ -// Targeting ../GRUImplModuleHolder.java +// #ifndef DOXYGEN_SHOULD_SKIP_THIS +@Namespace("torch::nn::functional::detail") public static native @ByVal Tensor kl_div( + @Const @ByRef Tensor input, + @Const @ByRef Tensor target, + @ByVal KLDivLossReduction reduction, + @Cast("bool") boolean log_target/*=false*/); +@Namespace("torch::nn::functional::detail") public static native @ByVal Tensor kl_div( + @Const @ByRef Tensor input, + @Const @ByRef Tensor target, + @ByVal KLDivLossReduction reduction); + // namespace detail +// #endif /* DOXYGEN_SHOULD_SKIP_THIS */ +/** See +/** https://pytorch.org/docs/master/nn.functional.html#torch.nn.functional.kl_div +/** about the exact behavior of this functional. +/** +/** See the documentation for {@code torch::nn::functional::KLDivFuncOptions} class to +/** learn what optional arguments are supported for this functional. +/** +/** Example: +/**
{@code
+/** namespace F = torch::nn::functional;
+/** F::kl_div(input, target,
+/** F::KLDivFuncOptions.reduction(torch::kNone).log_target(false));
+/** }
*/ +@Namespace("torch::nn::functional") public static native @ByVal Tensor kl_div( + @Const @ByRef Tensor input, + @Const @ByRef Tensor target, + @Cast("const torch::nn::functional::KLDivFuncOptions*") @ByRef(nullValue = "torch::nn::functional::KLDivFuncOptions{}") KLDivLossOptions options); -// Targeting ../RNNCellImplModuleHolder.java +// ============================================================================ +// #ifndef DOXYGEN_SHOULD_SKIP_THIS +@Namespace("torch::nn::functional::detail") public static native @ByVal Tensor mse_loss( + @Const @ByRef Tensor input, + @Const @ByRef Tensor target, + @ByVal LossReduction reduction); + // namespace detail +// #endif /* DOXYGEN_SHOULD_SKIP_THIS */ -// Targeting ../LSTMCellImplModuleHolder.java +/** See +/** https://pytorch.org/docs/master/nn.functional.html#torch.nn.functional.mse_loss +/** about the exact behavior of this functional. +/** +/** See the documentation for {@code torch::nn::functional::MSELossFuncOptions} class +/** to learn what optional arguments are supported for this functional. +/** +/** Example: +/**
{@code
+/** namespace F = torch::nn::functional;
+/** F::mse_loss(input, target, F::MSELossFuncOptions(torch::kNone));
+/** }
*/ +@Namespace("torch::nn::functional") public static native @ByVal Tensor mse_loss( + @Const @ByRef Tensor input, + @Const @ByRef Tensor target, + @Cast("const torch::nn::functional::MSELossFuncOptions*") @ByRef(nullValue = "torch::nn::functional::MSELossFuncOptions{}") MSELossOptions options); +// ============================================================================ -// Targeting ../GRUCellImplModuleHolder.java +// #ifndef DOXYGEN_SHOULD_SKIP_THIS +@Namespace("torch::nn::functional::detail") public static native @ByVal Tensor binary_cross_entropy( + @Const @ByRef Tensor input, + @Const @ByRef Tensor target, + @Const @ByRef Tensor weight, + @ByVal LossReduction reduction); + // namespace detail +// #endif /* DOXYGEN_SHOULD_SKIP_THIS */ +/** See +/** https://pytorch.org/docs/master/nn.functional.html#torch.nn.functional.binary_cross_entropy +/** about the exact behavior of this functional. +/** +/** See the documentation for +/** {@code torch::nn::functional::BinaryCrossEntropyFuncOptions} class to learn what +/** optional arguments are supported for this functional. +/** +/** Example: +/**
{@code
+/** namespace F = torch::nn::functional;
+/** F::binary_cross_entropy(input, target,
+/** F::BinaryCrossEntropyFuncOptions().weight(weight));
+/** }
*/ +@Namespace("torch::nn::functional") public static native @ByVal Tensor binary_cross_entropy( + @Const @ByRef Tensor input, + @Const @ByRef Tensor target, + @Cast("const torch::nn::functional::BinaryCrossEntropyFuncOptions*") @ByRef(nullValue = "torch::nn::functional::BinaryCrossEntropyFuncOptions{}") BCELossOptions options); -// Targeting ../PixelShuffleImplModuleHolder.java +// ============================================================================ +// #ifndef DOXYGEN_SHOULD_SKIP_THIS +@Namespace("torch::nn::functional::detail") public static native @ByVal Tensor hinge_embedding_loss( + @Const @ByRef Tensor input, + @Const @ByRef Tensor target, + double margin, + @ByVal LossReduction reduction); + // namespace detail +// #endif /* DOXYGEN_SHOULD_SKIP_THIS */ -// Targeting ../PixelUnshuffleImplModuleHolder.java +/** See +/** https://pytorch.org/docs/master/nn.functional.html#torch.nn.functional.hinge_embedding_loss +/** about the exact behavior of this functional. +/** +/** See the documentation for +/** {@code torch::nn::functional::HingeEmbeddingLossFuncOptions} class to learn what +/** optional arguments are supported for this functional. +/** +/** Example: +/**
{@code
+/** namespace F = torch::nn::functional;
+/** F::hinge_embedding_loss(input, target,
+/** F::HingeEmbeddingLossFuncOptions().margin(2));
+/** }
*/ +@Namespace("torch::nn::functional") public static native @ByVal Tensor hinge_embedding_loss( + @Const @ByRef Tensor input, + @Const @ByRef Tensor target, + @Cast("const torch::nn::functional::HingeEmbeddingLossFuncOptions*") @ByRef(nullValue = "torch::nn::functional::HingeEmbeddingLossFuncOptions{}") HingeEmbeddingLossOptions options); +// ============================================================================ -// Targeting ../UpsampleImplModuleHolder.java +// #ifndef DOXYGEN_SHOULD_SKIP_THIS +@Namespace("torch::nn::functional::detail") public static native @ByVal Tensor multi_margin_loss( + @Const @ByRef Tensor input, + @Const @ByRef Tensor target, + @Cast("int64_t") long p, + double margin, + @Const @ByRef Tensor weight, + @ByVal LossReduction reduction); + // namespace detail +// #endif /* DOXYGEN_SHOULD_SKIP_THIS */ +/** See +/** https://pytorch.org/docs/master/nn.functional.html#torch.nn.functional.multi_margin_loss +/** about the exact behavior of this functional. +/** +/** See the documentation for +/** {@code torch::nn::functional::MultiMarginLossFuncOptions} class to learn what +/** optional arguments are supported for this functional. +/** +/** Example: +/**
{@code
+/** namespace F = torch::nn::functional;
+/** F::multi_margin_loss(input, target,
+/** F::MultiMarginLossFuncOptions().margin(2).weight(weight));
+/** }
*/ +@Namespace("torch::nn::functional") public static native @ByVal Tensor multi_margin_loss( + @Const @ByRef Tensor input, + @Const @ByRef Tensor target, + @Cast("const torch::nn::functional::MultiMarginLossFuncOptions*") @ByRef(nullValue = "torch::nn::functional::MultiMarginLossFuncOptions{}") MultiMarginLossOptions options); -// Targeting ../ELUImplModuleHolder.java +// ============================================================================ +// #ifndef DOXYGEN_SHOULD_SKIP_THIS +@Namespace("torch::nn::functional::detail") public static native @ByVal Tensor cosine_embedding_loss( + @Const @ByRef Tensor input1, + @Const @ByRef Tensor input2, + @Const @ByRef Tensor target, + double margin, + @ByVal LossReduction reduction); + // namespace detail +// #endif /* DOXYGEN_SHOULD_SKIP_THIS */ -// Targeting ../SELUImplModuleHolder.java +/** See +/** https://pytorch.org/docs/master/nn.functional.html#torch.nn.functional.cosine_embedding_loss +/** about the exact behavior of this functional. +/** +/** See the documentation for +/** {@code torch::nn::functional::CosineEmbeddingLossFuncOptions} class to learn what +/** optional arguments are supported for this functional. +/** +/** Example: +/**
{@code
+/** namespace F = torch::nn::functional;
+/** F::cosine_embedding_loss(input1, input2, target,
+/** F::CosineEmbeddingLossFuncOptions().margin(0.5));
+/** }
*/ +@Namespace("torch::nn::functional") public static native @ByVal Tensor cosine_embedding_loss( + @Const @ByRef Tensor input1, + @Const @ByRef Tensor input2, + @Const @ByRef Tensor target, + @Cast("const torch::nn::functional::CosineEmbeddingLossFuncOptions*") @ByRef(nullValue = "torch::nn::functional::CosineEmbeddingLossFuncOptions{}") CosineEmbeddingLossOptions options); +// ============================================================================ -// Targeting ../HardshrinkImplModuleHolder.java +@Namespace("torch::nn::functional") public static native @ByVal Tensor _smooth_l1_loss( + @Const @ByRef Tensor input, + @Const @ByRef Tensor target, + double beta/*=1.*/); +@Namespace("torch::nn::functional") public static native @ByVal Tensor _smooth_l1_loss( + @Const @ByRef Tensor input, + @Const @ByRef Tensor target); +// #ifndef DOXYGEN_SHOULD_SKIP_THIS +@Namespace("torch::nn::functional::detail") public static native @ByVal Tensor smooth_l1_loss( + @Const @ByRef Tensor input, + @Const @ByRef Tensor target, + @ByVal LossReduction reduction, + double beta/*=1.*/); +@Namespace("torch::nn::functional::detail") public static native @ByVal Tensor smooth_l1_loss( + @Const @ByRef Tensor input, + @Const @ByRef Tensor target, + @ByVal LossReduction reduction); + // namespace detail +// #endif /* DOXYGEN_SHOULD_SKIP_THIS */ -// Targeting ../HardtanhImplModuleHolder.java +/** See +/** https://pytorch.org/docs/master/nn.functional.html#torch.nn.functional.smooth_l1_loss +/** about the exact behavior of this functional. +/** +/** See the documentation for {@code torch::nn::functional::SmoothL1LossFuncOptions} +/** class to learn what optional arguments are supported for this functional. +/** +/** Example: +/**
{@code
+/** namespace F = torch::nn::functional;
+/** F::smooth_l1_loss(input, target, F::SmoothL1LossFuncOptions(torch::kNone));
+/** }
*/ +@Namespace("torch::nn::functional") public static native @ByVal Tensor smooth_l1_loss( + @Const @ByRef Tensor input, + @Const @ByRef Tensor target, + @Cast("const torch::nn::functional::SmoothL1LossFuncOptions*") @ByRef(nullValue = "torch::nn::functional::SmoothL1LossFuncOptions{}") SmoothL1LossOptions options, + double beta/*=1.*/); +// ============================================================================ -// Targeting ../LeakyReLUImplModuleHolder.java +// #ifndef DOXYGEN_SHOULD_SKIP_THIS +@Namespace("torch::nn::functional::detail") public static native @ByVal Tensor huber_loss( + @Const @ByRef Tensor input, + @Const @ByRef Tensor target, + @ByVal LossReduction reduction, + double delta/*=1.*/); +@Namespace("torch::nn::functional::detail") public static native @ByVal Tensor huber_loss( + @Const @ByRef Tensor input, + @Const @ByRef Tensor target, + @ByVal LossReduction reduction); + // namespace detail +// #endif /* DOXYGEN_SHOULD_SKIP_THIS */ +/** See +/** https://pytorch.org/docs/master/nn.functional.html#torch.nn.functional.huber_loss +/** about the exact behavior of this functional. +/** +/** See the documentation for {@code torch::nn::functional::HuberLossFuncOptions} +/** class to learn what optional arguments are supported for this functional. +/** +/** Example: +/**
{@code
+/** namespace F = torch::nn::functional;
+/** F::huber_loss(input, target,
+/** F::HuberLossFuncOptions().reduction(torch::kNone).delta(0.5));
+/** }
*/ +@Namespace("torch::nn::functional") public static native @ByVal Tensor huber_loss( + @Const @ByRef Tensor input, + @Const @ByRef Tensor target, + @Cast("const torch::nn::functional::HuberLossFuncOptions*") @ByRef(nullValue = "torch::nn::functional::HuberLossFuncOptions{}") HuberLossOptions options); -// Targeting ../LogSigmoidImplModuleHolder.java +// ============================================================================ +// #ifndef DOXYGEN_SHOULD_SKIP_THIS +@Namespace("torch::nn::functional::detail") public static native @ByVal Tensor multilabel_margin_loss( + @Const @ByRef Tensor input, + @Const @ByRef Tensor target, + @ByVal LossReduction reduction); + // namespace detail +// #endif /* DOXYGEN_SHOULD_SKIP_THIS */ -// Targeting ../SoftmaxImplModuleHolder.java +/** See +/** https://pytorch.org/docs/master/nn.functional.html#torch.nn.functional.multilabel_margin_loss +/** about the exact behavior of this functional. +/** +/** See the documentation for +/** {@code torch::nn::functional::MultilabelMarginLossFuncOptions} class to learn what +/** optional arguments are supported for this functional. +/** +/** Example: +/**
{@code
+/** namespace F = torch::nn::functional;
+/** F::multilabel_margin_loss(input, target,
+/** F::MultilabelMarginLossFuncOptions(torch::kNone));
+/** }
*/ +@Namespace("torch::nn::functional") public static native @ByVal Tensor multilabel_margin_loss( + @Const @ByRef Tensor input, + @Const @ByRef Tensor target, + @Cast("const torch::nn::functional::MultilabelMarginLossFuncOptions*") @ByRef(nullValue = "torch::nn::functional::MultilabelMarginLossFuncOptions{}") MultiLabelMarginLossOptions options); +// ============================================================================ -// Targeting ../SoftminImplModuleHolder.java +// #ifndef DOXYGEN_SHOULD_SKIP_THIS +@Namespace("torch::nn::functional::detail") public static native @ByVal Tensor soft_margin_loss( + @Const @ByRef Tensor input, + @Const @ByRef Tensor target, + @ByVal LossReduction reduction); + // namespace detail +// #endif /* DOXYGEN_SHOULD_SKIP_THIS */ +/** See +/** https://pytorch.org/docs/master/nn.functional.html#torch.nn.functional.soft_margin_loss +/** about the exact behavior of this functional. +/** +/** See the documentation for {@code torch::nn::functional::SoftMarginLossFuncOptions} +/** class to learn what optional arguments are supported for this functional. +/** +/** Example: +/**
{@code
+/** namespace F = torch::nn::functional;
+/** F::soft_margin_loss(input, target,
+/** F::SoftMarginLossFuncOptions(torch::kNone));
+/** }
*/ +@Namespace("torch::nn::functional") public static native @ByVal Tensor soft_margin_loss( + @Const @ByRef Tensor input, + @Const @ByRef Tensor target, + @Cast("const torch::nn::functional::SoftMarginLossFuncOptions*") @ByRef(nullValue = "torch::nn::functional::SoftMarginLossFuncOptions{}") SoftMarginLossOptions options); -// Targeting ../LogSoftmaxImplModuleHolder.java +// ============================================================================ +// #ifndef DOXYGEN_SHOULD_SKIP_THIS +@Namespace("torch::nn::functional::detail") public static native @ByVal Tensor multilabel_soft_margin_loss( + @Const @ByRef Tensor input, + @Const @ByRef Tensor target, + @Const @ByRef Tensor weight, + @ByVal LossReduction reduction); + // namespace detail +// #endif /* DOXYGEN_SHOULD_SKIP_THIS */ -// Targeting ../Softmax2dImplModuleHolder.java +/** See +/** https://pytorch.org/docs/master/nn.functional.html#torch.nn.functional.multilabel_soft_margin_loss +/** about the exact behavior of this functional. +/** +/** See the documentation for +/** {@code torch::nn::functional::MultilabelSoftMarginLossFuncOptions} class to learn +/** what optional arguments are supported for this functional. +/** +/** Example: +/**
{@code
+/** namespace F = torch::nn::functional;
+/** F::multilabel_soft_margin_loss(input, target,
+/** F::MultilabelSoftMarginLossFuncOptions().reduction(torch::kNone).weight(weight));
+/** }
*/ +@Namespace("torch::nn::functional") public static native @ByVal Tensor multilabel_soft_margin_loss( + @Const @ByRef Tensor input, + @Const @ByRef Tensor target, + @Cast("const torch::nn::functional::MultilabelSoftMarginLossFuncOptions*") @ByRef(nullValue = "torch::nn::functional::MultilabelSoftMarginLossFuncOptions{}") MultiLabelSoftMarginLossOptions options); +@Namespace("torch::nn::functional") public static native @ByVal Tensor multilabel_soft_margin_loss( + @Const @ByRef Tensor input, + @Const @ByRef Tensor target); +// ============================================================================ -// Targeting ../PReLUImplModuleHolder.java +// #ifndef DOXYGEN_SHOULD_SKIP_THIS +@Namespace("torch::nn::functional::detail") public static native @ByVal Tensor triplet_margin_loss( + @Const @ByRef Tensor anchor, + @Const @ByRef Tensor positive, + @Const @ByRef Tensor negative, + double margin, + double p, + double eps, + @Cast("bool") boolean swap, + @ByVal LossReduction reduction); + // namespace detail +// #endif /* DOXYGEN_SHOULD_SKIP_THIS */ +/** See +/** https://pytorch.org/docs/master/nn.functional.html#torch.nn.functional.triplet_margin_loss +/** about the exact behavior of this functional. +/** +/** See the documentation for +/** {@code torch::nn::functional::TripletMarginLossFuncOptions} class to learn what +/** optional arguments are supported for this functional. +/** +/** Example: +/**
{@code
+/** namespace F = torch::nn::functional;
+/** F::triplet_margin_loss(anchor, positive, negative,
+/** F::TripletMarginLossFuncOptions().margin(1.0));
+/** }
*/ +@Namespace("torch::nn::functional") public static native @ByVal Tensor triplet_margin_loss( + @Const @ByRef Tensor anchor, + @Const @ByRef Tensor positive, + @Const @ByRef Tensor negative, + @Cast("const torch::nn::functional::TripletMarginLossFuncOptions*") @ByRef(nullValue = "torch::nn::functional::TripletMarginLossFuncOptions{}") TripletMarginLossOptions options); -// Targeting ../ReLUImplModuleHolder.java +// ============================================================================ +// #ifndef DOXYGEN_SHOULD_SKIP_THIS +@Namespace("torch::nn::functional::detail") public static native @ByVal Tensor triplet_margin_with_distance_loss( + @Const @ByRef Tensor anchor, + @Const @ByRef Tensor positive, + @Const @ByRef Tensor negative, + @ByVal @Cast("c10::optional*") Pointer distance_function, + double margin, + @Cast("bool") boolean swap, + @ByVal LossReduction reduction); + // namespace detail +// #endif /* DOXYGEN_SHOULD_SKIP_THIS */ -// Targeting ../ReLU6ImplModuleHolder.java +/** See +/** https://pytorch.org/docs/master/nn.functional.html#torch.nn.functional.triplet_margin_with_distance_loss +/** about the exact behavior of this functional. +/** +/** See the documentation for +/** {@code torch::nn::functional::TripletMarginWithDistanceLossFuncOptions} class to +/** learn what optional arguments are supported for this functional. +/** +/** Example: +/**
{@code
+/** namespace F = torch::nn::functional;
+/** F::triplet_margin_with_distance_loss(anchor, positive, negative,
+/** F::TripletMarginWithDistanceLossFuncOptions().margin(1.0));
+/** }
*/ +@Namespace("torch::nn::functional") public static native @ByVal Tensor triplet_margin_with_distance_loss( + @Const @ByRef Tensor anchor, + @Const @ByRef Tensor positive, + @Const @ByRef Tensor negative, + @Cast("const torch::nn::functional::TripletMarginWithDistanceLossFuncOptions*") @ByRef(nullValue = "torch::nn::functional::TripletMarginWithDistanceLossFuncOptions{}") TripletMarginWithDistanceLossOptions options); +@Namespace("torch::nn::functional") public static native @ByVal Tensor triplet_margin_with_distance_loss( + @Const @ByRef Tensor anchor, + @Const @ByRef Tensor positive, + @Const @ByRef Tensor negative); +// ============================================================================ -// Targeting ../RReLUImplModuleHolder.java +// #ifndef DOXYGEN_SHOULD_SKIP_THIS +@Namespace("torch::nn::functional::detail") public static native @ByVal Tensor ctc_loss( + @Const @ByRef Tensor log_probs, + @Const @ByRef Tensor targets, + @Const @ByRef Tensor input_lengths, + @Const @ByRef Tensor target_lengths, + @Cast("int64_t") long blank, + @ByVal LossReduction reduction, + @Cast("bool") boolean zero_infinity); + // namespace detail +// #endif /* DOXYGEN_SHOULD_SKIP_THIS */ +/** See +/** https://pytorch.org/docs/master/nn.functional.html#torch.nn.functional.ctc_loss +/** about the exact behavior of this functional. +/** +/** See the documentation for {@code torch::nn::functional::CTCLossFuncOptions} class +/** to learn what optional arguments are supported for this functional. +/** +/** Example: +/**
{@code
+/** namespace F = torch::nn::functional;
+/** F::ctc_loss(log_probs, targets, input_lengths, target_lengths,
+/** F::CTCLossFuncOptions().reduction(torch::kNone));
+/** }
*/ +@Namespace("torch::nn::functional") public static native @ByVal Tensor ctc_loss( + @Const @ByRef Tensor log_probs, + @Const @ByRef Tensor targets, + @Const @ByRef Tensor input_lengths, + @Const @ByRef Tensor target_lengths, + @Cast("const torch::nn::functional::CTCLossFuncOptions*") @ByRef(nullValue = "torch::nn::functional::CTCLossFuncOptions{}") CTCLossOptions options); -// Targeting ../CELUImplModuleHolder.java +// ============================================================================ +// #ifndef DOXYGEN_SHOULD_SKIP_THIS +@Namespace("torch::nn::functional::detail") public static native @ByVal Tensor poisson_nll_loss( + @Const @ByRef Tensor input, + @Const @ByRef Tensor target, + @Cast("bool") boolean log_input, + @Cast("bool") boolean full, + double eps, + @ByVal LossReduction reduction); + // namespace detail +// #endif /* DOXYGEN_SHOULD_SKIP_THIS */ -// Targeting ../GLUImplModuleHolder.java +/** See +/** https://pytorch.org/docs/master/nn.functional.html#torch.nn.functional.poisson_nll_loss +/** about the exact behavior of this functional. +/** +/** See the documentation for {@code torch::nn::functional::PoissonNLLLossFuncOptions} +/** class to learn what optional arguments are supported for this functional. +/** +/** Example: +/**
{@code
+/** namespace F = torch::nn::functional;
+/** F::poisson_nll_loss(input, target,
+/** F::PoissonNLLLossFuncOptions().reduction(torch::kNone));
+/** }
*/ +@Namespace("torch::nn::functional") public static native @ByVal Tensor poisson_nll_loss( + @Const @ByRef Tensor input, + @Const @ByRef Tensor target, + @Cast("const torch::nn::functional::PoissonNLLLossFuncOptions*") @ByRef(nullValue = "torch::nn::functional::PoissonNLLLossFuncOptions{}") PoissonNLLLossOptions options); +@Namespace("torch::nn::functional") public static native @ByVal Tensor poisson_nll_loss( + @Const @ByRef Tensor input, + @Const @ByRef Tensor target); +// ============================================================================ -// Targeting ../GELUImplModuleHolder.java +// #ifndef DOXYGEN_SHOULD_SKIP_THIS +@Namespace("torch::nn::functional::detail") public static native @ByVal Tensor margin_ranking_loss( + @Const @ByRef Tensor input1, + @Const @ByRef Tensor input2, + @Const @ByRef Tensor target, + double margin, + @ByVal LossReduction reduction); + // namespace detail +// #endif /* DOXYGEN_SHOULD_SKIP_THIS */ +/** See +/** https://pytorch.org/docs/master/nn.functional.html#torch.nn.functional.margin_ranking_loss +/** about the exact behavior of this functional. +/** +/** See the documentation for +/** {@code torch::nn::functional::MarginRankingLossFuncOptions} class to learn what +/** optional arguments are supported for this functional. +/** +/** Example: +/**
{@code
+/** namespace F = torch::nn::functional;
+/** F::margin_ranking_loss(input1, input2, target,
+/** F::MarginRankingLossFuncOptions().margin(0.5).reduction(torch::kSum));
+/** }
*/ +@Namespace("torch::nn::functional") public static native @ByVal Tensor margin_ranking_loss( + @Const @ByRef Tensor input1, + @Const @ByRef Tensor input2, + @Const @ByRef Tensor target, + @Cast("const torch::nn::functional::MarginRankingLossFuncOptions*") @ByRef(nullValue = "torch::nn::functional::MarginRankingLossFuncOptions{}") MarginRankingLossOptions options); -// Targeting ../SiLUImplModuleHolder.java +// ============================================================================ +// #ifndef DOXYGEN_SHOULD_SKIP_THIS +@Namespace("torch::nn::functional::detail") public static native @ByVal Tensor nll_loss( + @Const @ByRef Tensor input, + @Const @ByRef Tensor target, + @Const @ByRef Tensor weight, + @Cast("int64_t") long ignore_index, + @Const @ByVal LossReduction reduction); + // namespace detail +// #endif /* DOXYGEN_SHOULD_SKIP_THIS */ -// Targeting ../MishImplModuleHolder.java +/** See +/** https://pytorch.org/docs/master/nn.functional.html#torch.nn.functional.nll_loss +/** about the exact behavior of this functional. +/** +/** See the documentation for {@code torch::nn::functional::NLLLossFuncOptions} class +/** to learn what optional arguments are supported for this functional. +/** +/** Example: +/**
{@code
+/** namespace F = torch::nn::functional;
+/** F::nll_loss(input, target,
+/** F::NLLLossFuncOptions().ignore_index(-100).reduction(torch::kMean));
+/** }
*/ +@Namespace("torch::nn::functional") public static native @ByVal Tensor nll_loss( + @Const @ByRef Tensor input, + @Const @ByRef Tensor target, + @Cast("const torch::nn::functional::NLLLossFuncOptions*") @ByRef(nullValue = "torch::nn::functional::NLLLossFuncOptions{}") NLLLossOptions options); +// ============================================================================ -// Targeting ../SigmoidImplModuleHolder.java +// #ifndef DOXYGEN_SHOULD_SKIP_THIS +@Namespace("torch::nn::functional::detail") public static native @ByVal Tensor cross_entropy( + @Const @ByRef Tensor input, + @Const @ByRef Tensor target, + @Const @ByRef Tensor weight, + @Cast("int64_t") long ignore_index, + @ByVal LossReduction reduction, + double label_smoothing); + // namespace detail +// #endif /* DOXYGEN_SHOULD_SKIP_THIS */ +/** See +/** https://pytorch.org/docs/master/nn.functional.html#torch.nn.functional.cross_entropy +/** about the exact behavior of this functional. +/** +/** See the documentation for {@code torch::nn::functional::CrossEntropyFuncOptions} +/** class to learn what optional arguments are supported for this functional. +/** +/** Example: +/**
{@code
+/** namespace F = torch::nn::functional;
+/** F::cross_entropy(input, target,
+/** F::CrossEntropyFuncOptions().ignore_index(-100).reduction(torch::kMean));
+/** }
*/ +@Namespace("torch::nn::functional") public static native @ByVal Tensor cross_entropy( + @Const @ByRef Tensor input, + @Const @ByRef Tensor target, + @Cast("const torch::nn::functional::CrossEntropyFuncOptions*") @ByRef(nullValue = "torch::nn::functional::CrossEntropyFuncOptions{}") CrossEntropyLossOptions options); +@Namespace("torch::nn::functional") public static native @ByVal Tensor cross_entropy( + @Const @ByRef Tensor input, + @Const @ByRef Tensor target); -// Targeting ../SoftplusImplModuleHolder.java +// ============================================================================ +// #ifndef DOXYGEN_SHOULD_SKIP_THIS +@Namespace("torch::nn::functional::detail") public static native @ByVal Tensor binary_cross_entropy_with_logits( + @Const @ByRef Tensor input, + @Const @ByRef Tensor target, + @Const @ByRef Tensor weight, + @ByVal LossReduction reduction, + @Const @ByRef Tensor pos_weight); + // namespace detail +// #endif /* DOXYGEN_SHOULD_SKIP_THIS */ -// Targeting ../SoftshrinkImplModuleHolder.java +/** See +/** https://pytorch.org/docs/master/nn.functional.html#torch.nn.functional.binary_cross_entropy_with_logits +/** about the exact behavior of this functional. +/** +/** See the documentation for +/** {@code torch::nn::functional::BinaryCrossEntropyWithLogitsFuncOptions} class to +/** learn what optional arguments are supported for this functional. +/** +/** Example: +/**
{@code
+/** namespace F = torch::nn::functional;
+/** F::binary_cross_entropy_with_logits(input, target,
+/** F::BinaryCrossEntropyWithLogitsFuncOptions().pos_weight(pos_weight).reduction(torch::kSum));
+/** }
*/ +@Namespace("torch::nn::functional") public static native @ByVal Tensor binary_cross_entropy_with_logits( + @Const @ByRef Tensor input, + @Const @ByRef Tensor target, + @Cast("const torch::nn::functional::BinaryCrossEntropyWithLogitsFuncOptions*") @ByRef(nullValue = "torch::nn::functional::BinaryCrossEntropyWithLogitsFuncOptions{}") BCEWithLogitsLossOptions options); + // namespace functional + // namespace nn + // namespace torch -// Targeting ../SoftsignImplModuleHolder.java +// Parsed from ATen/PadNd.h -// Targeting ../TanhImplModuleHolder.java +// #pragma once +// #include +// #include +@Namespace("at") public enum padding_mode { + reflect(0), + replicate(1), + circular(2), + constant(3); -// Targeting ../TanhshrinkImplModuleHolder.java + public final int value; + private padding_mode(int v) { this.value = v; } + private padding_mode(padding_mode e) { this.value = e.value; } + public padding_mode intern() { for (padding_mode e : values()) if (e.value == value) return e; return this; } + @Override public String toString() { return intern().name(); } +} +@Namespace("at") public static native @ByVal @Cast("c10::string_view*") Pointer padding_mode_string(padding_mode m); +@Namespace("at") public static native @ByVal @Cast("c10::string_view*") Pointer padding_mode_string(@Cast("at::padding_mode") int m); -// Targeting ../ThresholdImplModuleHolder.java + // namespace at -// Targeting ../MultiheadAttentionImplModuleHolder.java +// Parsed from torch/nn/options/padding.h +// #pragma once -// Targeting ../LayerNormImplModuleHolder.java +// #include +// #include +// #include +// #include +// #include +// #include +// Targeting ../ReflectionPad1dOptions.java -// Targeting ../LocalResponseNormImplModuleHolder.java +// Targeting ../ReflectionPad2dOptions.java -// Targeting ../CrossMapLRN2dImplModuleHolder.java +// Targeting ../ReflectionPad3dOptions.java -// Targeting ../GroupNormImplModuleHolder.java +/** {@code ReflectionPadOptions} specialized for the {@code ReflectionPad1d} module. + * + * Example: + *
{@code
+ *  ReflectionPad1d model(ReflectionPad1dOptions({3, 1}));
+ *  }
*/ -// Targeting ../TransformerEncoderLayerImplModuleHolder.java +/// +/** {@code ReflectionPadOptions} specialized for the {@code ReflectionPad2d} module. + * + * Example: + *
{@code
+ *  ReflectionPad2d model(ReflectionPad2dOptions({1, 1, 2, 0}));
+ *  }
*/ -// Targeting ../TransformerDecoderLayerImplModuleHolder.java +/// +/** {@code ReflectionPadOptions} specialized for the {@code ReflectionPad3d} module. + * + * Example: + *
{@code
+ *  ReflectionPad3d model(ReflectionPad3dOptions({1, 1, 2, 0, 1, 1}));
+ *  }
*/ +// Targeting ../ReplicationPad1dOptions.java -// Targeting ../TransformerEncoderImplModuleHolder.java +// Targeting ../ReplicationPad2dOptions.java -// Targeting ../TransformerDecoderImplModuleHolder.java +// Targeting ../ReplicationPad3dOptions.java -// Targeting ../TransformerImplModuleHolder.java +/** {@code ReplicationPadOptions} specialized for the {@code ReplicationPad1d} module. + * + * Example: + *
{@code
+ *  ReplicationPad1d model(ReplicationPad1dOptions({3, 1}));
+ *  }
*/ -/** Pretty prints the given {@code Module} into the {@code ostream}. */ +/// -/** Serializes a {@code ModuleHolder} into an {@code OutputArchive}. */ +/** {@code ReplicationPadOptions} specialized for the {@code ReplicationPad2d} module. + * + * Example: + *
{@code
+ *  ReplicationPad2d model(ReplicationPad2dOptions({1, 1, 2, 0}));
+ *  }
*/ -/** Deserializes a {@code ModuleHolder} from an {@code InputArchive}. */ +/// - // namespace nn - // namespace torch +/** {@code ReplicationPadOptions} specialized for the {@code ReplicationPad3d} module. + * + * Example: + *
{@code
+ *  ReplicationPad3d model(ReplicationPad3dOptions({1, 2, 1, 2, 1, 2}));
+ *  }
*/ -// Workaround for CUDA 10.2 and below not allowing attribute unused on -// using declarations. -// #ifdef __CUDACC__ -// #define TORCH_UNUSED_EXCEPT_CUDA -// #else -// #define TORCH_UNUSED_EXCEPT_CUDA C10_UNUSED -// #endif +/// +// Targeting ../ZeroPad2dOptions.java -/** Defines a class {@code Name} which inherits from {@code nn::ModuleHolder} to provide a - * wrapper over a {@code std::shared_ptr}. - * {@code Impl} is a type alias for {@code ImplType} which provides a way to call static - * method of {@code ImplType}. */ -// #define TORCH_MODULE_IMPL(Name, ImplType) -// class Name : public torch::nn::ModuleHolder { /* NOLINT */ -// public: -// using torch::nn::ModuleHolder::ModuleHolder; -// using Impl TORCH_UNUSED_EXCEPT_CUDA = ImplType; -// } -/** Like {@code TORCH_MODULE_IMPL}, but defaults the {@code ImplType} name to {@code Impl}. */ -// #define TORCH_MODULE(Name) TORCH_MODULE_IMPL(Name, Name##Impl) +// Targeting ../ConstantPad1dOptions.java -// Parsed from torch/nn/utils.h +// Targeting ../ConstantPad2dOptions.java -// #pragma once -// #include -// #include -// #include +// Targeting ../ConstantPad3dOptions.java -// Parsed from torch/nn/utils/clip_grad.h -// #pragma once +/** {@code ConstantPadOptions} specialized for the {@code ConstantPad1d} module. + * + * Example: + *
{@code
+ *  ConstantPad1d model(ConstantPad1dOptions({3, 1}, 3.5));
+ *  }
*/ -// #include +/// -// #include +/** {@code ConstantPadOptions} specialized for the {@code ConstantPad2d} module. + * + * Example: + *
{@code
+ *  ConstantPad2d model(ConstantPad2dOptions({3, 0, 2, 1}, 3.5));
+ *  }
*/ -// Clips gradient norm of a vector of Tensors. -// See -// https://pytorch.org/docs/stable/nn.html?highlight=clip_grad_norm#torch.nn.utils.clip_grad_norm_ -// for more details about this module. -// -// Difference with the python version: unlike the python version, even when -// skipping the finiteness checks (error_if_nonfinite = false), this function -// will introduce a device <=> CPU synchronization (for devices where that makes -// sense!) in order to return a CPU-side `double`. This C++ version therefore -// cannot be run fully asynchronously w.r.t. the device of the gradients. -@Namespace("torch::nn::utils") public static native double clip_grad_norm_( - @Cast({"", "std::vector"}) @StdMove TensorVector parameters, - double max_norm, - double norm_type/*=2.0*/, - @Cast("bool") boolean error_if_nonfinite/*=false*/); -@Namespace("torch::nn::utils") public static native double clip_grad_norm_( - @Cast({"", "std::vector"}) @StdMove TensorVector parameters, - double max_norm); +/// -// A wrapper around clip_grad_norm_ that allows us to call the function with a -// braced-init-list of Tensors. +/** {@code ConstantPadOptions} specialized for the {@code ConstantPad3d} module. + * + * Example: + *
{@code
+ *  ConstantPad3d model(ConstantPad3dOptions({1, 2, 1, 2, 1, 2}, 3.5));
+ *  }
*/ -// A wrapper around clip_grad_norm_ that allows us to call the function with a -// single Tensor. -@Namespace("torch::nn::utils") public static native double clip_grad_norm_( - @ByVal Tensor parameter, - double max_norm, - double norm_type/*=2.0*/, - @Cast("bool") boolean error_if_nonfinite/*=false*/); -@Namespace("torch::nn::utils") public static native double clip_grad_norm_( - @ByVal Tensor parameter, - double max_norm); +// ============================================================================ +// Targeting ../PadFuncOptions.java -// Clips gradient of an iterable of parameters at specified value. -// Gradients are modified in-place. -// See https://pytorch.org/docs/stable/nn.html#clip-grad-value -// for more details about this module. -@Namespace("torch::nn::utils") public static native void clip_grad_value_( - @Cast({"", "std::vector"}) @StdMove TensorVector parameters, - double clip_value); -// A wrapper around clip_grad_value_ that allows us to call the function with a -// braced-init-list of Tensors. -// A wrapper around clip_grad_value_ that allows us to call the function with a -// single Tensor. -@Namespace("torch::nn::utils") public static native void clip_grad_value_(@ByVal Tensor parameter, double clip_value); + // namespace functional - // namespace utils // namespace nn // namespace torch -// Parsed from torch/nn/utils/convert_parameters.h +// Parsed from torch/nn/functional/padding.h // #pragma once -// #include -// #include - -// This helper function is to check if the parameters are located -// in the same device. Currently, the conversion between model parameters -// and single vector form is not supported for multiple allocations, -// e.g. parameters in different GPUs, or mixture of CPU/GPU. -@Namespace("torch::nn::utils") public static native @ByVal LongOptional _check_param_device( - @Const @ByRef Tensor param, - @ByVal LongOptional old_param_device); +// #include +// #include -// Convert parameters to one vector -@Namespace("torch::nn::utils") public static native @ByVal Tensor parameters_to_vector( - @StdVector Tensor parameters); +// #ifndef DOXYGEN_SHOULD_SKIP_THIS +@Namespace("torch::nn::functional::detail") public static native @ByVal Tensor pad( + @Const @ByRef Tensor input, + @ByVal LongArrayRef pad, + @ByVal PaddingMode mode, + double value); +@Namespace("torch::nn::functional::detail") public static native @ByVal Tensor pad( + @Const @ByRef Tensor input, + @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] pad, + @ByVal PaddingMode mode, + double value); + // namespace detail +// #endif /* DOXYGEN_SHOULD_SKIP_THIS */ -// Convert one vector to the parameters -@Namespace("torch::nn::utils") public static native void vector_to_parameters( - @Const @ByRef Tensor vec, - @StdVector Tensor parameters); +/** See +/** https://pytorch.org/docs/master/nn.functional.html#torch.nn.functional.pad +/** about the exact behavior of this functional. +/** +/** See the documentation for {@code torch::nn::functional::PadFuncOptions} class to +/** learn what optional arguments are supported for this functional. +/** +/** Example: +/**
{@code
+/** namespace F = torch::nn::functional;
+/** F::pad(input, F::PadFuncOptions({1, 2, 2, 1, 1,
+/** 2}).mode(torch::kReplicate));
+/** }
*/ +@Namespace("torch::nn::functional") public static native @ByVal Tensor pad(@Const @ByRef Tensor input, @Const @ByRef PadFuncOptions options); - // namespace utils + // namespace functional // namespace nn // namespace torch -// Parsed from torch/nn/utils/rnn.h +// Parsed from torch/nn/modules/utils.h // #pragma once +// #include +// #include // #include -// #include - -// #include - - -/// -/// -/// -/// -/// -/// -/// -@Namespace("torch::nn::utils::rnn") public static native @ByVal Tensor invert_permutation(@Const @ByRef Tensor permutation); -// Targeting ../PackedSequence.java - - - -/** Packs a Tensor containing padded sequences of variable length. - * - * {@code input} can be of size {@code }T x B x *{@code } where {@code T} is the length of the - * longest sequence (equal to {@code }lengths[0]{@code }), {@code }B{@code } is the batch size, and - * {@code }*{@code } is any number of dimensions (including 0). If {@code }batch_first{@code } is - * {@code }true{@code }, {@code }B x T x *{@code } {@code input} is expected. - * - * For unsorted sequences, use {@code enforce_sorted = false}. If {@code enforce_sorted} is - * {@code }true{@code }, the sequences should be sorted by length in a decreasing order, - * i.e. - * {@code }input[:,0]{@code } should be the longest sequence, and {@code }input[:,B-1]{@code } the - * shortest one. - * - * Note: - * This function accepts any input that has at least two dimensions. You - * can apply it to pack the labels, and use the output of the RNN with - * them to compute the loss directly. A Tensor can be retrieved from - * a {@code PackedSequence} object by calling its {@code }.data(){@code } function. - * - * Arguments: - * input (Tensor): padded batch of variable length sequences. - * lengths (Tensor): list of sequences lengths of each batch element. - * batch_first (bool, optional): if {@code }true{@code }, the input is expected in {@code }B - * x T x *{@code } - * format. Default: {@code }false{@code }. - * enforce_sorted (bool, optional): if {@code }true{@code }, the input is expected to - * contain sequences sorted by length in a decreasing order. If - * {@code }false{@code }, this condition is not checked. Default: {@code }true{@code }. - * - * Returns: - * a {@code PackedSequence} object */ - -/// -/// -/// -/// -/// -@Namespace("torch::nn::utils::rnn") public static native @ByVal PackedSequence pack_padded_sequence( - @ByVal Tensor input, - @ByVal Tensor lengths, - @Cast("bool") boolean batch_first/*=false*/, - @Cast("bool") boolean enforce_sorted/*=true*/); -@Namespace("torch::nn::utils::rnn") public static native @ByVal PackedSequence pack_padded_sequence( - @ByVal Tensor input, - @ByVal Tensor lengths); - -/** Pads a packed batch of variable length sequences. - * - * It is an inverse operation to {@code pack_padded_sequence}. - * - * The returned Tensor's data will be of size {@code }T x B x *{@code }, where {@code T} is the - * length of the longest sequence and {@code B} is the batch size. If {@code }batch_first{@code } - * is true, the data will be transposed into {@code }B x T x *{@code } format. - * - * Batch elements will be ordered decreasingly by their length. - * - * Arguments: - * sequence (PackedSequence): batch to pad - * batch_first (bool, optional): if {@code }true{@code }, the output will be in {@code }B x T - * x *{@code } - * format. - * padding_value (double, optional): values for padded elements. - * total_length (int64_t, optional): if specified, the output will be - * padded to - * have length {@code total_length}. This method will throw error - * if {@code total_length} is less than the max sequence length in - * {@code sequence}. - * - * Returns: - * Tuple of Tensor containing the padded sequence, and a Tensor - * containing the list of lengths of each sequence in the batch. */ - -/// -/// -/// -/// -/// -@Namespace("torch::nn::utils::rnn") public static native @ByVal TensorTensorTuple pad_packed_sequence( - @ByVal PackedSequence sequence, - @Cast("bool") boolean batch_first/*=false*/, - double padding_value/*=0.0*/, - @ByVal(nullValue = "c10::optional(torch::nullopt)") LongOptional total_length); -@Namespace("torch::nn::utils::rnn") public static native @ByVal TensorTensorTuple pad_packed_sequence( - @ByVal PackedSequence sequence); -/** Pad a list of variable length Tensors with {@code }padding_value{@code } - * - * {@code }pad_sequence{@code } stacks a list of Tensors along a new dimension, - * and pads them to equal length. For example, if the input is list of - * sequences with size {@code }L x *{@code } and if batch_first is false, and {@code }T x B x *{@code } - * otherwise. - * - * {@code B} is batch size. It is equal to the number of elements in {@code }sequences{@code }. - * {@code T} is length of the longest sequence. - * {@code L} is length of the sequence. - * {@code *} is any number of trailing dimensions, including none. - * - * Note: - * This function returns a Tensor of size {@code }T x B x *{@code } or {@code }B x T x *{@code } - * where {@code T} is the length of the longest sequence. This function assumes - * trailing dimensions and type of all the Tensors in sequences are same. - * - * Arguments: - * sequences (torch::ArrayRef): list of variable length sequences. - * batch_first (bool, optional): output will be in {@code }B x T x *{@code } if true, - * or in - * {@code }T x B x *{@code } otherwise - * padding_value (double, optional): value for padded elements. Default: 0. - * - * Returns: - * Tensor of size {@code }T x B x *{@code } if {@code batch_first} is {@code }false{@code }. - * Tensor of size {@code }B x T x *{@code } otherwise */ +// #include -/** Packs a list of variable length Tensors - * - * {@code }sequences{@code } should be a list of Tensors of size {@code }L x *{@code }, where {@code L} is - * the length of a sequence and {@code *} is any number of trailing dimensions, - * including zero. - * - * For unsorted sequences, use {@code enforce_sorted = false}. If {@code }enforce_sorted{@code } - * is {@code }true{@code }, the sequences should be sorted in the order of decreasing - * length. - * - * - * Arguments: - * sequences (torch::ArrayRef): A list of sequences of decreasing - * length. enforce_sorted (bool, optional): if {@code }true{@code }, checks that the - * input - * contains sequences sorted by length in a decreasing order. If - * {@code }false{@code }, this condition is not checked. Default: {@code }true{@code }. - * - * Returns: - * a {@code PackedSequence} object */ -@Namespace("torch::nn::utils::rnn") public static native @ByVal PackedSequence pack_sequence( - @ByVal TensorArrayRef sequences, - @Cast("bool") boolean enforce_sorted/*=true*/); -@Namespace("torch::nn::utils::rnn") public static native @ByVal PackedSequence pack_sequence( - @ByVal TensorArrayRef sequences); +// Reverse the order of `t` and repeat each element for `n` times. +// This can be used to translate padding arg used by Conv and Pooling modules +// to the ones used by `F::pad`. +// +// This mirrors `_reverse_repeat_tuple` in `torch/nn/modules/utils.py`. +@Namespace("torch::nn::modules::utils") public static native @ByVal @Cast("std::vector*") LongVector _reverse_repeat_vector( + @ByVal LongArrayRef t, + @Cast("int64_t") long n); +@Namespace("torch::nn::modules::utils") public static native @ByVal @Cast("std::vector*") LongVector _reverse_repeat_vector( + @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] t, + @Cast("int64_t") long n); + +@Namespace("torch::nn::modules::utils") public static native @ByVal @Cast("std::vector*") LongVector _list_with_default( + @ByVal LongOptionalArrayRef out_size, + @ByVal LongArrayRef defaults); +@Namespace("torch::nn::modules::utils") public static native @ByVal @Cast("std::vector*") LongVector _list_with_default( + @ByVal LongOptionalArrayRef out_size, + @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... defaults); - // namespace rnn // namespace utils + // namespace modules // namespace nn // namespace torch -// Parsed from torch/nn/options.h - -// #pragma once - -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include - - -// Parsed from torch/nn/options/activation.h +// Parsed from torch/nn/options/pooling.h // #pragma once // #include // #include -// #include +// #include // #include -// Targeting ../ELUOptions.java +// Targeting ../AvgPool1dOptions.java -/** Options for {@code torch::nn::functional::elu}. - * - * See the documentation for {@code torch::nn::ELUOptions} class to learn what - * arguments are supported. - * - * Example: - *
{@code
- *  namespace F = torch::nn::functional;
- *  F::elu(x, F::ELUFuncOptions().alpha(0.42).inplace(true));
- *  }
*/ +// Targeting ../AvgPool2dOptions.java -// Targeting ../SELUOptions.java +// Targeting ../AvgPool3dOptions.java -/** Options for {@code torch::nn::functional::selu}. - * - * See the documentation for {@code torch::nn::SELUOptions} class to learn what - * arguments are supported. + + +/** {@code AvgPoolOptions} specialized for the {@code AvgPool1d} module. * * Example: *
{@code
- *  namespace F = torch::nn::functional;
- *  F::selu(input, F::SELUFuncOptions(false));
+ *  AvgPool1d model(AvgPool1dOptions(3).stride(2));
  *  }
*/ -// Targeting ../GLUOptions.java - +/// -/** Options for {@code torch::nn::functional::glu}. - * - * See the documentation for {@code torch::nn::GLUOptions} class to learn what - * arguments are supported. +/** {@code AvgPoolOptions} specialized for the {@code AvgPool2d} module. * * Example: *
{@code
- *  namespace F = torch::nn::functional;
- *  F::glu(input, GLUFuncOptions(1));
+ *  AvgPool2d model(AvgPool2dOptions({3, 2}).stride({2, 2}));
  *  }
*/ -// Targeting ../GELUOptions.java - +/// -/** Options for {@code torch::nn::functional::gelu}. - * - * See the documentation for {@code torch::nn::GELUOptions} class to learn what - * arguments are supported. +/** {@code AvgPoolOptions} specialized for the {@code AvgPool3d} module. * * Example: *
{@code
- *  namespace F = torch::nn::functional;
- *  F::gelu(input, F::GELUFuncOptions().approximate("none"));
- *  }
*/ - -// Targeting ../HardshrinkOptions.java - - -/** Options for {@code torch::nn::functional::hardshrink}. + * AvgPool3d model(AvgPool3dOptions(5).stride(2)); + * }
*/ +/** Options for {@code torch::nn::functional::avg_pool1d}. * - * See the documentation for {@code torch::nn::HardshrinkOptions} class to learn what + * See the documentation for {@code torch::nn::AvgPool1dOptions} class to learn what * arguments are supported. * * Example: *
{@code
  *  namespace F = torch::nn::functional;
- *  F::hardshrink(x, F::HardshrinkFuncOptions().lambda(0.42));
+ *  F::avg_pool1d(x, F::AvgPool1dFuncOptions(3).stride(2));
  *  }
*/ - -// Targeting ../HardtanhOptions.java - - -/** Options for {@code torch::nn::functional::hardtanh}. + // namespace functional +/** Options for {@code torch::nn::functional::avg_pool2d}. * - * See the documentation for {@code torch::nn::HardtanhOptions} class to learn what + * See the documentation for {@code torch::nn::AvgPool2dOptions} class to learn what * arguments are supported. * * Example: *
{@code
  *  namespace F = torch::nn::functional;
- *  F::hardtanh(x,
- *  F::HardtanhFuncOptions().min_val(-1.0).max_val(1.0).inplace(true));
+ *  F::avg_pool2d(x, F::AvgPool2dFuncOptions(3).stride(2));
  *  }
*/ - -// Targeting ../LeakyReLUOptions.java - - -/** Options for {@code torch::nn::functional::leaky_relu}. + // namespace functional +/** Options for {@code torch::nn::functional::avg_pool3d}. * - * See the documentation for {@code torch::nn::LeakyReLUOptions} class to learn what + * See the documentation for {@code torch::nn::AvgPool3dOptions} class to learn what * arguments are supported. * * Example: *
{@code
  *  namespace F = torch::nn::functional;
- *  F::leaky_relu(x,
- *  F::LeakyReLUFuncOptions().negative_slope(0.42).inplace(true));
+ *  F::avg_pool3d(x, F::AvgPool3dFuncOptions(3).stride(2));
  *  }
*/ -// Targeting ../SoftmaxOptions.java - - - -// ============================================================================ -// Targeting ../SoftmaxFuncOptions.java - - - - -// Targeting ../SoftminOptions.java - - - -// ============================================================================ -// Targeting ../SoftminFuncOptions.java - - - - -// Targeting ../LogSoftmaxOptions.java +// Targeting ../MaxPool1dOptions.java +// Targeting ../MaxPool2dOptions.java -// ============================================================================ -// Targeting ../LogSoftmaxFuncOptions.java +// Targeting ../MaxPool3dOptions.java -// Targeting ../PReLUOptions.java +/** {@code MaxPoolOptions} specialized for the {@code MaxPool1d} module. + * + * Example: + *
{@code
+ *  MaxPool1d model(MaxPool1dOptions(3).stride(2));
+ *  }
*/ +/// -// Targeting ../ReLUOptions.java +/** {@code MaxPoolOptions} specialized for the {@code MaxPool2d} module. + * + * Example: + *
{@code
+ *  MaxPool2d model(MaxPool2dOptions({3, 2}).stride({2, 2}));
+ *  }
*/ +/// -/** Options for {@code torch::nn::functional::relu}. +/** {@code MaxPoolOptions} specialized for the {@code MaxPool3d} module. * - * See the documentation for {@code torch::nn::ReLUOptions} class to learn what - * arguments are supported. + * Example: + *
{@code
+ *  MaxPool3d model(MaxPool3dOptions(3).stride(2));
+ *  }
*/ +/** Options for {@code torch::nn::functional::max_pool1d} and + * {@code torch::nn::functional::max_pool1d_with_indices}. * * Example: *
{@code
  *  namespace F = torch::nn::functional;
- *  F::relu(x, F::ReLUFuncOptions().inplace(true));
+ *  F::max_pool1d(x, F::MaxPool1dFuncOptions(3).stride(2));
  *  }
*/ - -// Targeting ../ReLU6Options.java - - -/** Options for {@code torch::nn::functional::relu6}. + // namespace functional +/** Options for {@code torch::nn::functional::max_pool2d} and + * {@code torch::nn::functional::max_pool2d_with_indices}. * - * See the documentation for {@code torch::nn::ReLU6Options} class to learn what - * arguments are supported. + * Example: + *
{@code
+ *  namespace F = torch::nn::functional;
+ *  F::max_pool2d(x, F::MaxPool2dFuncOptions(3).stride(2));
+ *  }
*/ + // namespace functional +/** Options for {@code torch::nn::functional::max_pool3d} and + * {@code torch::nn::functional::max_pool3d_with_indices}. * * Example: *
{@code
  *  namespace F = torch::nn::functional;
- *  F::relu6(x, F::ReLU6FuncOptions().inplace(true));
+ *  F::max_pool3d(x, F::MaxPool3dFuncOptions(3).stride(2));
  *  }
*/ -// Targeting ../RReLUOptions.java - - +// Targeting ../AdaptiveMaxPool1dOptions.java -// ============================================================================ -// Targeting ../RReLUFuncOptions.java +// Targeting ../AdaptiveMaxPool2dOptions.java +// Targeting ../AdaptiveMaxPool3dOptions.java -// Targeting ../CELUOptions.java -/** Options for {@code torch::nn::functional::celu}. - * - * See the documentation for {@code torch::nn::CELUOptions} class to learn what - * arguments are supported. +/** {@code AdaptiveMaxPoolOptions} specialized for the {@code AdaptiveMaxPool1d} module. * * Example: *
{@code
- *  namespace F = torch::nn::functional;
- *  F::celu(x, F::CELUFuncOptions().alpha(0.42).inplace(true));
+ *  AdaptiveMaxPool1d model(AdaptiveMaxPool1dOptions(3));
  *  }
*/ -// Targeting ../SoftplusOptions.java - +/// -/** Options for {@code torch::nn::functional::softplus}. - * - * See the documentation for {@code torch::nn::SoftplusOptions} class to learn what - * arguments are supported. +/** {@code AdaptiveMaxPoolOptions} specialized for the {@code AdaptiveMaxPool2d} module. * * Example: *
{@code
- *  namespace F = torch::nn::functional;
- *  F::softplus(x, F::SoftplusFuncOptions().beta(0.5).threshold(3.0));
+ *  AdaptiveMaxPool2d model(AdaptiveMaxPool2dOptions({3, 2}));
  *  }
*/ -// Targeting ../SoftshrinkOptions.java - +/// -/** Options for {@code torch::nn::functional::softshrink}. +/** {@code AdaptiveMaxPoolOptions} specialized for the {@code AdaptiveMaxPool3d} module. * - * See the documentation for {@code torch::nn::SoftshrinkOptions} class to learn what - * arguments are supported. + * Example: + *
{@code
+ *  AdaptiveMaxPool3d model(AdaptiveMaxPool3dOptions(3));
+ *  }
*/ +/** Options for {@code torch::nn::functional::adaptive_max_pool1d} and + * {@code torch::nn::functional::adaptive_max_pool1d_with_indices} * * Example: *
{@code
  *  namespace F = torch::nn::functional;
- *  F::softshrink(x, F::SoftshrinkFuncOptions(0.42));
+ *  F::adaptive_max_pool1d(x, F::AdaptiveMaxPool1dFuncOptions(3));
  *  }
*/ - -// Targeting ../ThresholdOptions.java - - -/** Options for {@code torch::nn::functional::threshold}. - * - * See the documentation for {@code torch::nn::ThresholdOptions} class to learn what - * arguments are supported. + // namespace functional +/** Options for {@code torch::nn::functional::adaptive_max_pool2d} and + * {@code torch::nn::functional::adaptive_max_pool2d_with_indices} * * Example: *
{@code
  *  namespace F = torch::nn::functional;
- *  F::threshold(x, F::ThresholdFuncOptions(0.5, 0.5).inplace(true));
+ *  F::adaptive_max_pool2d(x, F::AdaptiveMaxPool2dFuncOptions(3));
  *  }
*/ // namespace functional +/** Options for {@code torch::nn::functional::adaptive_max_pool3d} and + * {@code torch::nn::functional::adaptive_max_pool3d_with_indices} + * + * Example: + *
{@code
+ *  namespace F = torch::nn::functional;
+ *  F::adaptive_max_pool3d(x, F::AdaptiveMaxPool3dFuncOptions(3));
+ *  }
*/ -// ============================================================================ -// Targeting ../GumbelSoftmaxFuncOptions.java - - - - -// Targeting ../MultiheadAttentionOptions.java - - - -// ============================================================================ -// Targeting ../MultiheadAttentionForwardFuncOptions.java - - - - // namespace functional - - // namespace nn - // namespace torch - - -// Parsed from torch/nn/options/adaptive.h - -// #pragma once - -// #include -// #include -// #include -// Targeting ../AdaptiveLogSoftmaxWithLossOptions.java - - - - // namespace nn - // namespace torch +// Targeting ../AdaptiveAvgPool1dOptions.java -// Parsed from torch/nn/options/batchnorm.h +// Targeting ../AdaptiveAvgPool2dOptions.java -// #pragma once -// #include -// #include -// #include -// Targeting ../BatchNormOptions.java +// Targeting ../AdaptiveAvgPool3dOptions.java -/** Options for the {@code BatchNorm1d} module. +/** {@code AdaptiveAvgPoolOptions} specialized for the {@code AdaptiveAvgPool1d} module. * * Example: *
{@code
- *  BatchNorm1d
- *  model(BatchNorm1dOptions(4).eps(0.5).momentum(0.1).affine(false).track_running_stats(true));
+ *  AdaptiveAvgPool1d model(AdaptiveAvgPool1dOptions(5));
  *  }
*/ /// -/** Options for the {@code BatchNorm2d} module. +/** {@code AdaptiveAvgPoolOptions} specialized for the {@code AdaptiveAvgPool2d} module. * * Example: *
{@code
- *  BatchNorm2d
- *  model(BatchNorm2dOptions(4).eps(0.5).momentum(0.1).affine(false).track_running_stats(true));
+ *  AdaptiveAvgPool2d model(AdaptiveAvgPool2dOptions({3, 2}));
  *  }
*/ /// -/** Options for the {@code BatchNorm3d} module. +/** {@code AdaptiveAvgPoolOptions} specialized for the {@code AdaptiveAvgPool3d} module. * * Example: *
{@code
- *  BatchNorm3d
- *  model(BatchNorm3dOptions(4).eps(0.5).momentum(0.1).affine(false).track_running_stats(true));
+ *  AdaptiveAvgPool3d model(AdaptiveAvgPool3dOptions(3));
+ *  }
*/ +/** Options for {@code torch::nn::functional::adaptive_avg_pool1d}. + * + * See the documentation for {@code torch::nn::AdaptiveAvgPool1dOptions} class to + * learn what arguments are supported. + * + * Example: + *
{@code
+ *  namespace F = torch::nn::functional;
+ *  F::adaptive_avg_pool1d(x, F::AdaptiveAvgPool1dFuncOptions(3));
  *  }
*/ - -// ============================================================================ -// Targeting ../BatchNormFuncOptions.java - - - // namespace functional +/** Options for {@code torch::nn::functional::adaptive_avg_pool2d}. + * + * See the documentation for {@code torch::nn::AdaptiveAvgPool2dOptions} class to + * learn what arguments are supported. + * + * Example: + *
{@code
+ *  namespace F = torch::nn::functional;
+ *  F::adaptive_avg_pool2d(x, F::AdaptiveAvgPool2dFuncOptions(3));
+ *  }
*/ + // namespace functional +/** Options for {@code torch::nn::functional::adaptive_avg_pool3d}. + * + * See the documentation for {@code torch::nn::AdaptiveAvgPool3dOptions} class to + * learn what arguments are supported. + * + * Example: + *
{@code
+ *  namespace F = torch::nn::functional;
+ *  F::adaptive_avg_pool3d(x, F::AdaptiveAvgPool3dFuncOptions(3));
+ *  }
*/ - // namespace nn - // namespace torch - - -// Parsed from torch/nn/options/conv.h - -// #pragma once - -// #include -// #include -// #include -// #include -// #include -// Targeting ../DetailConv1dOptions.java - - -// Targeting ../DetailConv2dOptions.java - - -// Targeting ../DetailConv3dOptions.java - - - - -// Targeting ../Conv1dOptions.java +// Targeting ../MaxUnpool1dOptions.java -// Targeting ../Conv2dOptions.java +// Targeting ../MaxUnpool2dOptions.java -// Targeting ../Conv3dOptions.java +// Targeting ../MaxUnpool3dOptions.java -/** {@code ConvOptions} specialized for the {@code Conv1d} module. +/** {@code MaxUnpoolOptions} specialized for the {@code MaxUnpool1d} module. * * Example: *
{@code
- *  Conv1d model(Conv1dOptions(3, 2, 3).stride(1).bias(false));
+ *  MaxUnpool1d model(MaxUnpool1dOptions(3).stride(2).padding(1));
  *  }
*/ /// -/** {@code ConvOptions} specialized for the {@code Conv2d} module. +/** {@code MaxUnpoolOptions} specialized for the {@code MaxUnpool2d} module. * * Example: *
{@code
- *  Conv2d model(Conv2dOptions(3, 2, 3).stride(1).bias(false));
+ *  MaxUnpool2d model(MaxUnpool2dOptions(3).stride(2).padding(1));
  *  }
*/ /// -/** {@code ConvOptions} specialized for the {@code Conv3d} module. +/** {@code MaxUnpoolOptions} specialized for the {@code MaxUnpool3d} module. * * Example: *
{@code
- *  Conv3d model(Conv3dOptions(3, 2, 3).stride(1).bias(false));
+ *  MaxUnpool3d model(MaxUnpool3dOptions(3).stride(2).padding(1));
  *  }
*/ // ============================================================================ -// Targeting ../Conv1dFuncOptions.java +// Targeting ../MaxUnpool1dFuncOptions.java -// Targeting ../Conv2dFuncOptions.java +// Targeting ../MaxUnpool2dFuncOptions.java -// Targeting ../Conv3dFuncOptions.java +// Targeting ../MaxUnpool3dFuncOptions.java -/** {@code ConvFuncOptions} specialized for {@code torch::nn::functional::conv1d}. +/** {@code MaxUnpoolFuncOptions} specialized for + * {@code torch::nn::functional::max_unpool1d}. * * Example: *
{@code
  *  namespace F = torch::nn::functional;
- *  F::conv1d(x, weight, F::Conv1dFuncOptions().stride(1));
+ *  F::max_unpool1d(x, indices,
+ *  F::MaxUnpool1dFuncOptions(3).stride(2).padding(1));
  *  }
*/ /// -/** {@code ConvFuncOptions} specialized for {@code torch::nn::functional::conv2d}. +/** {@code MaxUnpoolFuncOptions} specialized for + * {@code torch::nn::functional::max_unpool2d}. * * Example: *
{@code
  *  namespace F = torch::nn::functional;
- *  F::conv2d(x, weight, F::Conv2dFuncOptions().stride(1));
+ *  F::max_unpool2d(x, indices,
+ *  F::MaxUnpool2dFuncOptions(3).stride(2).padding(1));
  *  }
*/ /// -/** {@code ConvFuncOptions} specialized for {@code torch::nn::functional::conv3d}. +/** {@code MaxUnpoolFuncOptions} specialized for + * {@code torch::nn::functional::max_unpool3d}. * * Example: *
{@code
  *  namespace F = torch::nn::functional;
- *  F::conv3d(x, weight, F::Conv3dFuncOptions().stride(1));
+ *  F::max_unpool3d(x, indices, F::MaxUnpool3dFuncOptions(3));
  *  }
*/ -// Targeting ../ConvTranspose1dOptions.java +// Targeting ../FractionalMaxPool1dOptions.java -// Targeting ../ConvTranspose2dOptions.java +// Targeting ../FractionalMaxPool2dOptions.java -// Targeting ../ConvTranspose3dOptions.java +// Targeting ../FractionalMaxPool3dOptions.java -/** {@code ConvTransposeOptions} specialized for the {@code ConvTranspose1d} module. +/** {@code FractionalMaxPoolOptions} specialized for the {@code FractionalMaxPool2d} module. * * Example: *
{@code
- *  ConvTranspose1d model(ConvTranspose1dOptions(3, 2,
- *  3).stride(1).bias(false));
+ *  FractionalMaxPool2d model(FractionalMaxPool2dOptions(5).output_size(1));
  *  }
*/ /// -/** {@code ConvTransposeOptions} specialized for the {@code ConvTranspose2d} module. +/** {@code FractionalMaxPoolOptions} specialized for the {@code FractionalMaxPool3d} module. * * Example: *
{@code
- *  ConvTranspose2d model(ConvTranspose2dOptions(3, 2,
- *  3).stride(1).bias(false));
+ *  FractionalMaxPool3d model(FractionalMaxPool3dOptions(5).output_size(1));
  *  }
*/ - -/// - -/** {@code ConvTransposeOptions} specialized for the {@code ConvTranspose3d} module. +/** Options for {@code torch::nn::functional::fractional_max_pool2d} and + * {@code torch::nn::functional::fractional_max_pool2d_with_indices} * * Example: *
{@code
- *  ConvTranspose3d model(ConvTranspose3dOptions(2, 2,
- *  2).stride(1).bias(false));
+ *  namespace F = torch::nn::functional;
+ *  F::fractional_max_pool2d(x,
+ *  F::FractionalMaxPool2dFuncOptions(3).output_size(2));
+ *  }
*/ + // namespace functional +/** Options for {@code torch::nn::functional::fractional_max_pool3d} and + * {@code torch::nn::functional::fractional_max_pool3d_with_indices} + * + * Example: + *
{@code
+ *  namespace F = torch::nn::functional;
+ *  F::fractional_max_pool3d(x,
+ *  F::FractionalMaxPool3dFuncOptions(3).output_size(2));
  *  }
*/ -// ============================================================================ -// Targeting ../ConvTranspose1dFuncOptions.java +// Targeting ../LPPool1dOptions.java -// Targeting ../ConvTranspose2dFuncOptions.java +// Targeting ../LPPool2dOptions.java -// Targeting ../ConvTranspose3dFuncOptions.java +// Targeting ../LPPool3dOptions.java -/** {@code ConvTransposeFuncOptions} specialized for - * {@code torch::nn::functional::conv_transpose1d}. +/** {@code LPPoolOptions} specialized for the {@code LPPool1d} module. * * Example: *
{@code
- *  namespace F = torch::nn::functional;
- *  F::conv_transpose1d(x, weight, F::ConvTranspose1dFuncOptions().stride(1));
+ *  LPPool1d model(LPPool1dOptions(1, 2).stride(5).ceil_mode(true));
  *  }
*/ /// -/** {@code ConvTransposeFuncOptions} specialized for - * {@code torch::nn::functional::conv_transpose2d}. +/** {@code LPPoolOptions} specialized for the {@code LPPool2d} module. + * + * Example: + *
{@code
+ *  LPPool2d model(LPPool2dOptions(1, std::vector({3, 4})).stride({5,
+ *  6}).ceil_mode(true));
+ *  }
*/ +/** Options for {@code torch::nn::functional::lp_pool1d}. + * + * See the documentation for {@code torch::nn::LPPool1dOptions} class to learn what + * arguments are supported. * * Example: *
{@code
  *  namespace F = torch::nn::functional;
- *  F::conv_transpose2d(x, weight, F::ConvTranspose2dFuncOptions().stride(1));
+ *  F::lp_pool1d(x, F::LPPool1dFuncOptions(2, 3).stride(2));
  *  }
*/ - -/// - -/** {@code ConvTransposeFuncOptions} specialized for - * {@code torch::nn::functional::conv_transpose3d}. + // namespace functional +/** Options for {@code torch::nn::functional::lp_pool2d}. + * + * See the documentation for {@code torch::nn::LPPool2dOptions} class to learn what + * arguments are supported. * * Example: *
{@code
  *  namespace F = torch::nn::functional;
- *  F::conv_transpose3d(x, weight, F::ConvTranspose3dFuncOptions().stride(1));
+ *  F::lp_pool2d(x, F::LPPool2dFuncOptions(2, {2, 3}).stride(2));
  *  }
*/ - // namespace functional // namespace nn // namespace torch -// Parsed from torch/nn/options/distance.h +// Parsed from torch/nn/functional/pooling.h // #pragma once -// #include -// #include -// #include -// Targeting ../CosineSimilarityOptions.java - - -/** Options for {@code torch::nn::functional::cosine_similarity}. - * - * See the documentation for {@code torch::nn::CosineSimilarityOptions} class to - * learn what arguments are supported. - * - * Example: - *
{@code
- *  namespace F = torch::nn::functional;
- *  F::cosine_similarity(input1, input2,
- *  F::CosineSimilarityFuncOptions().dim(1));
- *  }
*/ +// #include +// #include +// #include +// #include -// Targeting ../PairwiseDistanceOptions.java +// #ifndef DOXYGEN_SHOULD_SKIP_THIS +@Namespace("torch::nn::functional::detail") public static native @ByVal Tensor avg_pool1d( + @Const @ByRef Tensor input, + @ByVal @Cast("torch::ExpandingArray<1>*") LongPointer kernel_size, + @ByVal @Cast("torch::ExpandingArray<1>*") LongPointer stride, + @ByVal @Cast("torch::ExpandingArray<1>*") LongPointer padding, + @Cast("bool") boolean ceil_mode, + @Cast("bool") boolean count_include_pad); + // namespace detail +// #endif /* DOXYGEN_SHOULD_SKIP_THIS */ +/** See +/** https://pytorch.org/docs/master/nn.functional.html#torch.nn.functional.avg_pool1d +/** about the exact behavior of this functional. +/** +/** See the documentation for {@code torch::nn::functional::AvgPool1dFuncOptions} +/** class to learn what optional arguments are supported for this functional. +/** +/** Example: +/**
{@code
+/** namespace F = torch::nn::functional;
+/** F::avg_pool1d(x, F::AvgPool1dFuncOptions(3).stride(2));
+/** }
*/ +@Namespace("torch::nn::functional") public static native @ByVal Tensor avg_pool1d( + @Const @ByRef Tensor input, + @Const @ByRef AvgPool1dOptions options); -/** Options for {@code torch::nn::functional::pairwise_distance}. - * - * See the documentation for {@code torch::nn::PairwiseDistanceOptions} class to - * learn what arguments are supported. - * - * Example: - *
{@code
- *  namespace F = torch::nn::functional;
- *  F::pairwise_distance(input1, input2, F::PairwiseDistanceFuncOptions().p(1));
- *  }
*/ - // namespace functional +// #ifndef DOXYGEN_SHOULD_SKIP_THIS +@Namespace("torch::nn::functional::detail") public static native @ByVal Tensor avg_pool2d( + @Const @ByRef Tensor input, + @ByVal @Cast("torch::ExpandingArray<2>*") LongPointer kernel_size, + @ByVal @Cast("torch::ExpandingArray<2>*") LongPointer stride, + @ByVal @Cast("torch::ExpandingArray<2>*") LongPointer padding, + @Cast("bool") boolean ceil_mode, + @Cast("bool") boolean count_include_pad, + @ByVal LongOptional divisor_override); + // namespace detail +// #endif /* DOXYGEN_SHOULD_SKIP_THIS */ - // namespace nn - // namespace torch +/** See +/** https://pytorch.org/docs/master/nn.functional.html#torch.nn.functional.avg_pool2d +/** about the exact behavior of this functional. +/** +/** See the documentation for {@code torch::nn::functional::AvgPool2dFuncOptions} +/** class to learn what optional arguments are supported for this functional. +/** +/** Example: +/**
{@code
+/** namespace F = torch::nn::functional;
+/** F::avg_pool2d(x, F::AvgPool2dFuncOptions(3).stride(2));
+/** }
*/ +@Namespace("torch::nn::functional") public static native @ByVal Tensor avg_pool2d( + @Const @ByRef Tensor input, + @Const @ByRef AvgPool2dOptions options); +// #ifndef DOXYGEN_SHOULD_SKIP_THIS +@Namespace("torch::nn::functional::detail") public static native @ByVal Tensor avg_pool3d( + @Const @ByRef Tensor input, + @ByVal @Cast("torch::ExpandingArray<3>*") LongPointer kernel_size, + @ByVal @Cast("torch::ExpandingArray<3>*") LongPointer stride, + @ByVal @Cast("torch::ExpandingArray<3>*") LongPointer padding, + @Cast("bool") boolean ceil_mode, + @Cast("bool") boolean count_include_pad, + @ByVal LongOptional divisor_override); + // namespace detail +// #endif /* DOXYGEN_SHOULD_SKIP_THIS */ -// Parsed from torch/nn/options/dropout.h +/** See +/** https://pytorch.org/docs/master/nn.functional.html#torch.nn.functional.avg_pool3d +/** about the exact behavior of this functional. +/** +/** See the documentation for {@code torch::nn::functional::AvgPool3dFuncOptions} +/** class to learn what optional arguments are supported for this functional. +/** +/** Example: +/**
{@code
+/** namespace F = torch::nn::functional;
+/** F::avg_pool3d(x, F::AvgPool3dFuncOptions(3).stride(2));
+/** }
*/ +@Namespace("torch::nn::functional") public static native @ByVal Tensor avg_pool3d( + @Const @ByRef Tensor input, + @Const @ByRef AvgPool3dOptions options); -// #pragma once +// ============================================================================ -// #include -// #include -// #include -// Targeting ../DropoutOptions.java +// #ifndef DOXYGEN_SHOULD_SKIP_THIS +@Namespace("torch::nn::functional::detail") public static native @ByVal Tensor max_pool1d( + @Const @ByRef Tensor input, + @ByVal @Cast("torch::ExpandingArray<1>*") LongPointer kernel_size, + @ByVal @Cast("torch::ExpandingArray<1>*") LongPointer stride, + @ByVal @Cast("torch::ExpandingArray<1>*") LongPointer padding, + @ByVal @Cast("torch::ExpandingArray<1>*") LongPointer dilation, + @Cast("bool") boolean ceil_mode); + // namespace detail +// #endif /* DOXYGEN_SHOULD_SKIP_THIS */ +/** See +/** https://pytorch.org/docs/master/nn.functional.html#torch.nn.functional.max_pool1d +/** about the exact behavior of this functional. +/** +/** See the documentation for {@code torch::nn::functional::MaxPool1dFuncOptions} +/** class to learn what optional arguments are supported for this functional. +/** +/** Example: +/**
{@code
+/** namespace F = torch::nn::functional;
+/** F::max_pool1d(x, F::MaxPool1dFuncOptions(3).stride(2));
+/** }
*/ +@Namespace("torch::nn::functional") public static native @ByVal Tensor max_pool1d( + @Const @ByRef Tensor input, + @Const @ByRef MaxPool1dOptions options); +// #ifndef DOXYGEN_SHOULD_SKIP_THIS +@Namespace("torch::nn::functional::detail") public static native @ByVal T_TensorTensor_T max_pool1d_with_indices( + @Const @ByRef Tensor input, + @ByVal @Cast("torch::ExpandingArray<1>*") LongPointer kernel_size, + @ByVal @Cast("torch::ExpandingArray<1>*") LongPointer stride, + @ByVal @Cast("torch::ExpandingArray<1>*") LongPointer padding, + @ByVal @Cast("torch::ExpandingArray<1>*") LongPointer dilation, + @Cast("bool") boolean ceil_mode); + // namespace detail +// #endif /* DOXYGEN_SHOULD_SKIP_THIS */ -/** Options for the {@code Dropout2d} module. - * - * Example: - *
{@code
- *  Dropout2d model(Dropout2dOptions().p(0.42).inplace(true));
- *  }
*/ +/** See the documentation for {@code torch::nn::functional::MaxPool1dFuncOptions} +/** class to learn what optional arguments are supported for this functional. +/** +/** Example: +/**
{@code
+/** namespace F = torch::nn::functional;
+/** F::max_pool1d_with_indices(x, F::MaxPool1dFuncOptions(3).stride(2));
+/** }
*/ +@Namespace("torch::nn::functional") public static native @ByVal T_TensorTensor_T max_pool1d_with_indices( + @Const @ByRef Tensor input, + @Const @ByRef MaxPool1dOptions options); -/// +// #ifndef DOXYGEN_SHOULD_SKIP_THIS +@Namespace("torch::nn::functional::detail") public static native @ByVal Tensor max_pool2d( + @Const @ByRef Tensor input, + @ByVal @Cast("torch::ExpandingArray<2>*") LongPointer kernel_size, + @ByVal @Cast("torch::ExpandingArray<2>*") LongPointer stride, + @ByVal @Cast("torch::ExpandingArray<2>*") LongPointer padding, + @ByVal @Cast("torch::ExpandingArray<2>*") LongPointer dilation, + @Cast("bool") boolean ceil_mode); + // namespace detail +// #endif /* DOXYGEN_SHOULD_SKIP_THIS */ -/** Options for the {@code Dropout3d} module. - * - * Example: - *
{@code
- *  Dropout3d model(Dropout3dOptions().p(0.42).inplace(true));
- *  }
*/ +/** See +/** https://pytorch.org/docs/master/nn.functional.html#torch.nn.functional.max_pool2d +/** about the exact behavior of this functional. +/** +/** See the documentation for {@code torch::nn::functional::MaxPool2dFuncOptions} +/** class to learn what optional arguments are supported for this functional. +/** +/** Example: +/**
{@code
+/** namespace F = torch::nn::functional;
+/** F::max_pool2d(x, F::MaxPool2dFuncOptions(3).stride(2));
+/** }
*/ +@Namespace("torch::nn::functional") public static native @ByVal Tensor max_pool2d( + @Const @ByRef Tensor input, + @Const @ByRef MaxPool2dOptions options); -/// +// #ifndef DOXYGEN_SHOULD_SKIP_THIS +@Namespace("torch::nn::functional::detail") public static native @ByVal T_TensorTensor_T max_pool2d_with_indices( + @Const @ByRef Tensor input, + @ByVal @Cast("torch::ExpandingArray<2>*") LongPointer kernel_size, + @ByVal @Cast("torch::ExpandingArray<2>*") LongPointer stride, + @ByVal @Cast("torch::ExpandingArray<2>*") LongPointer padding, + @ByVal @Cast("torch::ExpandingArray<2>*") LongPointer dilation, + @Cast("bool") boolean ceil_mode); + // namespace detail +// #endif /* DOXYGEN_SHOULD_SKIP_THIS */ -/** Options for the {@code AlphaDropout} module. - * - * Example: - *
{@code
- *  AlphaDropout model(AlphaDropoutOptions(0.2).inplace(true));
- *  }
*/ +/** See the documentation for {@code torch::nn::functional::MaxPool2dFuncOptions} +/** class to learn what optional arguments are supported for this functional. +/** +/** Example: +/**
{@code
+/** namespace F = torch::nn::functional;
+/** F::max_pool2d_with_indices(x, F::MaxPool2dFuncOptions(3).stride(2));
+/** }
*/ +@Namespace("torch::nn::functional") public static native @ByVal T_TensorTensor_T max_pool2d_with_indices( + @Const @ByRef Tensor input, + @Const @ByRef MaxPool2dOptions options); -/// +// #ifndef DOXYGEN_SHOULD_SKIP_THIS +@Namespace("torch::nn::functional::detail") public static native @ByVal Tensor max_pool3d( + @Const @ByRef Tensor input, + @ByVal @Cast("torch::ExpandingArray<3>*") LongPointer kernel_size, + @ByVal @Cast("torch::ExpandingArray<3>*") LongPointer stride, + @ByVal @Cast("torch::ExpandingArray<3>*") LongPointer padding, + @ByVal @Cast("torch::ExpandingArray<3>*") LongPointer dilation, + @Cast("bool") boolean ceil_mode); + // namespace detail +// #endif /* DOXYGEN_SHOULD_SKIP_THIS */ -/** Options for the {@code FeatureAlphaDropout} module. - * - * Example: - *
{@code
- *  FeatureAlphaDropout model(FeatureAlphaDropoutOptions(0.2).inplace(true));
- *  }
*/ -// Targeting ../DropoutFuncOptions.java +/** See +/** https://pytorch.org/docs/master/nn.functional.html#torch.nn.functional.max_pool3d +/** about the exact behavior of this functional. +/** +/** See the documentation for {@code torch::nn::functional::MaxPool3dFuncOptions} +/** class to learn what optional arguments are supported for this functional. +/** +/** Example: +/**
{@code
+/** namespace F = torch::nn::functional;
+/** F::max_pool3d(x, F::MaxPool3dFuncOptions(3).stride(2));
+/** }
*/ +@Namespace("torch::nn::functional") public static native @ByVal Tensor max_pool3d( + @Const @ByRef Tensor input, + @Const @ByRef MaxPool3dOptions options); +// #ifndef DOXYGEN_SHOULD_SKIP_THIS +@Namespace("torch::nn::functional::detail") public static native @ByVal T_TensorTensor_T max_pool3d_with_indices( + @Const @ByRef Tensor input, + @ByVal @Cast("torch::ExpandingArray<3>*") LongPointer kernel_size, + @ByVal @Cast("torch::ExpandingArray<3>*") LongPointer stride, + @ByVal @Cast("torch::ExpandingArray<3>*") LongPointer padding, + @ByVal @Cast("torch::ExpandingArray<3>*") LongPointer dilation, + @Cast("bool") boolean ceil_mode); + // namespace detail +// #endif /* DOXYGEN_SHOULD_SKIP_THIS */ +/** See the documentation for {@code torch::nn::functional::MaxPool3dFuncOptions} +/** class to learn what optional arguments are supported for this functional. +/** +/** Example: +/**
{@code
+/** namespace F = torch::nn::functional;
+/** F::max_pool3d_with_indices(x, F::MaxPool3dFuncOptions(3).stride(2));
+/** }
*/ +@Namespace("torch::nn::functional") public static native @ByVal T_TensorTensor_T max_pool3d_with_indices( + @Const @ByRef Tensor input, + @Const @ByRef MaxPool3dOptions options); -/** Options for {@code torch::nn::functional::dropout2d}. - * - * Example: - *
{@code
- *  namespace F = torch::nn::functional;
- *  F::dropout2d(input, F::Dropout2dFuncOptions().p(0.5));
- *  }
*/ +// ============================================================================ -/// +// #ifndef DOXYGEN_SHOULD_SKIP_THIS +@Namespace("torch::nn::functional::detail") public static native @ByVal T_TensorTensor_T adaptive_max_pool1d_with_indices( + @Const @ByRef Tensor input, + @ByVal @Cast("torch::ExpandingArray<1>*") LongPointer output_size); + // namespace detail -/** Options for {@code torch::nn::functional::dropout3d}. +/** See the documentation for + * {@code torch::nn::functional::AdaptiveMaxPool1dFuncOptions} class to learn what + * optional arguments are supported for this functional. * * Example: *
{@code
  *  namespace F = torch::nn::functional;
- *  F::dropout3d(input, F::Dropout3dFuncOptions().p(0.5));
+ *  F::adaptive_max_pool1d_with_indices(x, F::AdaptiveMaxPool1dFuncOptions(3));
  *  }
*/ +@Namespace("torch::nn::functional") public static native @ByVal T_TensorTensor_T adaptive_max_pool1d_with_indices( + @Const @ByRef Tensor input, + @Const @ByRef AdaptiveMaxPool1dOptions options); +@Namespace("torch::nn::functional::detail") public static native @ByVal Tensor adaptive_max_pool1d( + @Const @ByRef Tensor input, + @ByVal @Cast("torch::ExpandingArray<1>*") LongPointer output_size); + // namespace detail +// #endif /* DOXYGEN_SHOULD_SKIP_THIS */ -/// -// Targeting ../AlphaDropoutFuncOptions.java - - -// Targeting ../FeatureAlphaDropoutFuncOptions.java - - - - // namespace functional - - // namespace nn - // namespace torch - - -// Parsed from torch/nn/options/embedding.h - -// #pragma once - -// #include -// #include -// #include -// #include -// Targeting ../EmbeddingOptions.java +/** See +/** https://pytorch.org/docs/master/nn.functional.html#torch.nn.functional.adaptive_max_pool1d +/** about the exact behavior of this functional. +/** +/** See the documentation for +/** {@code torch::nn::functional::AdaptiveMaxPool1dFuncOptions} class to learn what +/** optional arguments are supported for this functional. +/** +/** Example: +/**
{@code
+/** namespace F = torch::nn::functional;
+/** F::adaptive_max_pool1d(x, F::AdaptiveMaxPool1dFuncOptions(3));
+/** }
*/ +@Namespace("torch::nn::functional") public static native @ByVal Tensor adaptive_max_pool1d( + @Const @ByRef Tensor input, + @Const @ByRef AdaptiveMaxPool1dOptions options); +// #ifndef DOXYGEN_SHOULD_SKIP_THIS +@Namespace("torch::nn::functional::detail") public static native @ByVal T_TensorTensor_T adaptive_max_pool2d_with_indices( + @Const @ByRef Tensor input, + @ByVal @Cast("torch::ExpandingArrayWithOptionalElem<2>*") LongOptional output_size); + // namespace detail +// #endif /* DOXYGEN_SHOULD_SKIP_THIS */ -// Targeting ../EmbeddingFromPretrainedOptions.java +/** See the documentation for +/** {@code torch::nn::functional::AdaptiveMaxPool2dFuncOptions} class to learn what +/** optional arguments are supported for this functional. +/** +/** Example: +/**
{@code
+/** namespace F = torch::nn::functional;
+/** F::adaptive_max_pool2d_with_indices(x, F::AdaptiveMaxPool2dFuncOptions(3));
+/** }
*/ +@Namespace("torch::nn::functional") public static native @ByVal T_TensorTensor_T adaptive_max_pool2d_with_indices( + @Const @ByRef Tensor input, + @Const @ByRef AdaptiveMaxPool2dOptions options); +// #ifndef DOXYGEN_SHOULD_SKIP_THIS +@Namespace("torch::nn::functional::detail") public static native @ByVal Tensor adaptive_max_pool2d( + @Const @ByRef Tensor input, + @ByVal @Cast("torch::ExpandingArrayWithOptionalElem<2>*") LongOptional output_size); + // namespace detail +// #endif /* DOXYGEN_SHOULD_SKIP_THIS */ +/** See +/** https://pytorch.org/docs/master/nn.functional.html#torch.nn.functional.adaptive_max_pool2d +/** about the exact behavior of this functional. +/** +/** See the documentation for +/** {@code torch::nn::functional::AdaptiveMaxPool2dFuncOptions} class to learn what +/** optional arguments are supported for this functional. +/** +/** Example: +/**
{@code
+/** namespace F = torch::nn::functional;
+/** F::adaptive_max_pool2d(x, F::AdaptiveMaxPool2dFuncOptions(3));
+/** }
*/ +@Namespace("torch::nn::functional") public static native @ByVal Tensor adaptive_max_pool2d( + @Const @ByRef Tensor input, + @Const @ByRef AdaptiveMaxPool2dOptions options); -// ============================================================================ -// Targeting ../EmbeddingFuncOptions.java +// #ifndef DOXYGEN_SHOULD_SKIP_THIS +@Namespace("torch::nn::functional::detail") public static native @ByVal T_TensorTensor_T adaptive_max_pool3d_with_indices( + @Const @ByRef Tensor input, + @ByVal @Cast("torch::ExpandingArrayWithOptionalElem<3>*") LongOptional output_size); + // namespace detail +// #endif /* DOXYGEN_SHOULD_SKIP_THIS */ +/** See the documentation for +/** {@code torch::nn::functional::AdaptiveMaxPool3dFuncOptions} class to learn what +/** optional arguments are supported for this functional. +/** +/** Example: +/**
{@code
+/** namespace F = torch::nn::functional;
+/** F::adaptive_max_pool3d_with_indices(x, F::AdaptiveMaxPool3dFuncOptions(3));
+/** }
*/ +@Namespace("torch::nn::functional") public static native @ByVal T_TensorTensor_T adaptive_max_pool3d_with_indices( + @Const @ByRef Tensor input, + @Const @ByRef AdaptiveMaxPool3dOptions options); +// #ifndef DOXYGEN_SHOULD_SKIP_THIS +@Namespace("torch::nn::functional::detail") public static native @ByVal Tensor adaptive_max_pool3d( + @Const @ByRef Tensor input, + @ByVal @Cast("torch::ExpandingArrayWithOptionalElem<3>*") LongOptional output_size); + // namespace detail +// #endif /* DOXYGEN_SHOULD_SKIP_THIS */ - // namespace functional +/** See +/** https://pytorch.org/docs/master/nn.functional.html#torch.nn.functional.adaptive_max_pool3d +/** about the exact behavior of this functional. +/** +/** See the documentation for +/** {@code torch::nn::functional::AdaptiveMaxPool3dFuncOptions} class to learn what +/** optional arguments are supported for this functional. +/** +/** Example: +/**
{@code
+/** namespace F = torch::nn::functional;
+/** F::adaptive_max_pool3d(x, F::AdaptiveMaxPool3dFuncOptions(3));
+/** }
*/ +@Namespace("torch::nn::functional") public static native @ByVal Tensor adaptive_max_pool3d( + @Const @ByRef Tensor input, + @Const @ByRef AdaptiveMaxPool3dOptions options); // ============================================================================ +// #ifndef DOXYGEN_SHOULD_SKIP_THIS +@Namespace("torch::nn::functional::detail") public static native @ByVal Tensor adaptive_avg_pool1d( + @Const @ByRef Tensor input, + @ByVal @Cast("torch::ExpandingArray<1>*") LongPointer output_size); + // namespace detail +// #endif /* DOXYGEN_SHOULD_SKIP_THIS */ -/// -// Targeting ../EmbeddingBagOptions.java +/** See +/** https://pytorch.org/docs/master/nn.functional.html#torch.nn.functional.adaptive_avg_pool1d +/** about the exact behavior of this functional. +/** +/** See the documentation for +/** {@code torch::nn::functional::AdaptiveAvgPool1dFuncOptions} class to learn what +/** optional arguments are supported for this functional. +/** +/** Example: +/**
{@code
+/** namespace F = torch::nn::functional;
+/** F::adaptive_avg_pool1d(x, F::AdaptiveAvgPool1dFuncOptions(3));
+/** }
*/ +@Namespace("torch::nn::functional") public static native @ByVal Tensor adaptive_avg_pool1d( + @Const @ByRef Tensor input, + @Const @ByRef AdaptiveAvgPool1dOptions options); +// #ifndef DOXYGEN_SHOULD_SKIP_THIS +@Namespace("torch::nn::functional::detail") public static native @ByVal Tensor adaptive_avg_pool2d( + @Const @ByRef Tensor input, + @ByVal @Cast("torch::ExpandingArrayWithOptionalElem<2>*") LongOptional output_size); + // namespace detail +// #endif /* DOXYGEN_SHOULD_SKIP_THIS */ -// Targeting ../EmbeddingBagFromPretrainedOptions.java +/** See +/** https://pytorch.org/docs/master/nn.functional.html#torch.nn.functional.adaptive_avg_pool2d +/** about the exact behavior of this functional. +/** +/** See the documentation for +/** {@code torch::nn::functional::AdaptiveAvgPool2dFuncOptions} class to learn what +/** optional arguments are supported for this functional. +/** +/** Example: +/**
{@code
+/** namespace F = torch::nn::functional;
+/** F::adaptive_avg_pool2d(x, F::AdaptiveAvgPool2dFuncOptions(3));
+/** }
*/ +@Namespace("torch::nn::functional") public static native @ByVal Tensor adaptive_avg_pool2d( + @Const @ByRef Tensor input, + @Const @ByRef AdaptiveAvgPool2dOptions options); +// #ifndef DOXYGEN_SHOULD_SKIP_THIS +@Namespace("torch::nn::functional::detail") public static native @ByVal Tensor adaptive_avg_pool3d( + @Const @ByRef Tensor input, + @ByVal @Cast("torch::ExpandingArrayWithOptionalElem<3>*") LongOptional output_size); + // namespace detail +// #endif /* DOXYGEN_SHOULD_SKIP_THIS */ +/** See +/** https://pytorch.org/docs/master/nn.functional.html#torch.nn.functional.adaptive_avg_pool3d +/** about the exact behavior of this functional. +/** +/** See the documentation for +/** {@code torch::nn::functional::AdaptiveAvgPool3dFuncOptions} class to learn what +/** optional arguments are supported for this functional. +/** +/** Example: +/**
{@code
+/** namespace F = torch::nn::functional;
+/** F::adaptive_avg_pool3d(x, F::AdaptiveAvgPool3dFuncOptions(3));
+/** }
*/ +@Namespace("torch::nn::functional") public static native @ByVal Tensor adaptive_avg_pool3d( + @Const @ByRef Tensor input, + @Const @ByRef AdaptiveAvgPool3dOptions options); // ============================================================================ -// Targeting ../EmbeddingBagFuncOptions.java - - - - // namespace functional - - // namespace nn - // namespace torch - - -// Parsed from torch/nn/options/fold.h -// #pragma once +@Namespace("torch::nn::functional") public static native @ByVal @Cast("std::vector*") LongVector _unpool_output_size( + @Const @ByRef Tensor input, + @Const @ByRef LongArrayRef kernel_size, + @Const @ByRef LongArrayRef stride, + @Const @ByRef LongArrayRef padding, + @Const @ByRef LongVectorOptional output_size); +@Namespace("torch::nn::functional") public static native @ByVal @Cast("std::vector*") LongVector _unpool_output_size( + @Const @ByRef Tensor input, + @ByRef @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] kernel_size, + @ByRef @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] stride, + @ByRef @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] padding, + @Const @ByRef LongVectorOptional output_size); -// #include -// #include -// #include -// #include -// Targeting ../FoldOptions.java +// #ifndef DOXYGEN_SHOULD_SKIP_THIS +@Namespace("torch::nn::functional::detail") public static native @ByVal Tensor max_unpool1d( + @Const @ByRef Tensor input, + @Const @ByRef Tensor indices, + @ByVal @Cast("torch::ExpandingArray<1>*") LongPointer kernel_size, + @ByVal @Cast("torch::ExpandingArray<1>*") LongPointer stride, + @ByVal @Cast("torch::ExpandingArray<1>*") LongPointer padding, + @Const @ByRef LongVectorOptional output_size); + // namespace detail +// #endif /* DOXYGEN_SHOULD_SKIP_THIS */ +/** See +/** https://pytorch.org/docs/master/nn.functional.html#torch.nn.functional.max_unpool1d +/** about the exact behavior of this functional. +/** +/** See the documentation for {@code torch::nn::functional::MaxUnpool1dFuncOptions} +/** class to learn what optional arguments are supported for this functional. +/** +/** Example: +/**
{@code
+/** namespace F = torch::nn::functional;
+/** F::max_unpool1d(x, indices,
+/** F::MaxUnpool1dFuncOptions(3).stride(2).padding(1));
+/** }
*/ +@Namespace("torch::nn::functional") public static native @ByVal Tensor max_unpool1d( + @Const @ByRef Tensor input, + @Const @ByRef Tensor indices, + @Const @ByRef MaxUnpool1dFuncOptions options); -/** Options for {@code torch::nn::functional::fold}. - * - * See the documentation for {@code torch::nn::FoldOptions} class to learn what - * arguments are supported. - * - * Example: - *
{@code
- *  namespace F = torch::nn::functional;
- *  F::fold(input, F::FoldFuncOptions({3, 2}, {2, 2}));
- *  }
*/ +// #ifndef DOXYGEN_SHOULD_SKIP_THIS +@Namespace("torch::nn::functional::detail") public static native @ByVal Tensor max_unpool2d( + @Const @ByRef Tensor input, + @Const @ByRef Tensor indices, + @ByVal @Cast("torch::ExpandingArray<2>*") LongPointer kernel_size, + @ByVal @Cast("torch::ExpandingArray<2>*") LongPointer stride, + @ByVal @Cast("torch::ExpandingArray<2>*") LongPointer padding, + @Const @ByRef LongVectorOptional output_size); + // namespace detail +// #endif /* DOXYGEN_SHOULD_SKIP_THIS */ -// Targeting ../UnfoldOptions.java +/** See +/** https://pytorch.org/docs/master/nn.functional.html#torch.nn.functional.max_unpool2d +/** about the exact behavior of this functional. +/** +/** See the documentation for {@code torch::nn::functional::MaxUnpool2dFuncOptions} +/** class to learn what optional arguments are supported for this functional. +/** +/** Example: +/**
{@code
+/** namespace F = torch::nn::functional;
+/** F::max_unpool2d(x, indices,
+/** F::MaxUnpool2dFuncOptions(3).stride(2).padding(1));
+/** }
*/ +@Namespace("torch::nn::functional") public static native @ByVal Tensor max_unpool2d( + @Const @ByRef Tensor input, + @Const @ByRef Tensor indices, + @Const @ByRef MaxUnpool2dFuncOptions options); +// #ifndef DOXYGEN_SHOULD_SKIP_THIS +@Namespace("torch::nn::functional::detail") public static native @ByVal Tensor max_unpool3d( + @Const @ByRef Tensor input, + @Const @ByRef Tensor indices, + @ByVal @Cast("torch::ExpandingArray<3>*") LongPointer kernel_size, + @ByVal @Cast("torch::ExpandingArray<3>*") LongPointer stride, + @ByVal @Cast("torch::ExpandingArray<3>*") LongPointer padding, + @Const @ByRef LongVectorOptional output_size); + // namespace detail +// #endif /* DOXYGEN_SHOULD_SKIP_THIS */ -/** Options for {@code torch::nn::functional::unfold}. - * - * See the documentation for {@code torch::nn::UnfoldOptions} class to learn what - * arguments are supported. - * - * Example: - *
{@code
- *  namespace F = torch::nn::functional;
- *  F::unfold(input, F::UnfoldFuncOptions({2, 2}).padding(1).stride(2));
- *  }
*/ - // namespace functional +/** See +/** https://pytorch.org/docs/master/nn.functional.html#torch.nn.functional.max_unpool3d +/** about the exact behavior of this functional. +/** +/** See the documentation for {@code torch::nn::functional::MaxUnpool3dFuncOptions} +/** class to learn what optional arguments are supported for this functional. +/** +/** Example: +/**
{@code
+/** namespace F = torch::nn::functional;
+/** F::max_unpool3d(x, indices, F::MaxUnpool3dFuncOptions(3));
+/** }
*/ +@Namespace("torch::nn::functional") public static native @ByVal Tensor max_unpool3d( + @Const @ByRef Tensor input, + @Const @ByRef Tensor indices, + @Const @ByRef MaxUnpool3dFuncOptions options); - // namespace nn - // namespace torch +// ============================================================================ +// #ifndef DOXYGEN_SHOULD_SKIP_THIS +@Namespace("torch::nn::functional::detail") public static native @ByVal T_TensorTensor_T fractional_max_pool2d_with_indices( + @Const @ByRef Tensor input, + @Cast("const torch::ExpandingArray<2>*") @ByRef LongPointer kernel_size, + @Cast("const c10::optional >*") @ByRef LongExpandingArrayOptional output_size, + @Cast("const c10::optional >*") @ByRef DoubleExpandingArrayOptional output_ratio, + @Const @ByRef Tensor _random_samples); + // namespace detail +// #endif /* DOXYGEN_SHOULD_SKIP_THIS */ -// Parsed from torch/nn/options/linear.h +/** See the documentation for +/** {@code torch::nn::functional::FractionalMaxPool2dFuncOptions} class to learn what +/** optional arguments are supported for this functional. +/** +/** Example: +/**
{@code
+/** namespace F = torch::nn::functional;
+/** F::fractional_max_pool2d_with_indices(x,
+/** F::FractionalMaxPool2dFuncOptions(3).output_size(2));
+/** }
*/ +@Namespace("torch::nn::functional") public static native @ByVal T_TensorTensor_T fractional_max_pool2d_with_indices( + @Const @ByRef Tensor input, + @Const @ByRef FractionalMaxPool2dOptions options); -// #pragma once +// #ifndef DOXYGEN_SHOULD_SKIP_THIS +@Namespace("torch::nn::functional::detail") public static native @ByVal Tensor fractional_max_pool2d( + @Const @ByRef Tensor input, + @ByVal @Cast("torch::ExpandingArray<2>*") LongPointer kernel_size, + @ByVal @Cast("c10::optional >*") LongExpandingArrayOptional output_size, + @ByVal @Cast("c10::optional >*") DoubleExpandingArrayOptional output_ratio, + @Const @ByRef Tensor _random_samples); + // namespace detail +// #endif /* DOXYGEN_SHOULD_SKIP_THIS */ -// #include -// #include -// #include -// #include -// Targeting ../LinearOptions.java +/** See the documentation for +/** {@code torch::nn::functional::FractionalMaxPool2dFuncOptions} class to learn what +/** optional arguments are supported for this functional. +/** +/** Example: +/**
{@code
+/** namespace F = torch::nn::functional;
+/** F::fractional_max_pool2d(x,
+/** F::FractionalMaxPool2dFuncOptions(3).output_size(2));
+/** }
*/ +@Namespace("torch::nn::functional") public static native @ByVal Tensor fractional_max_pool2d( + @Const @ByRef Tensor input, + @Const @ByRef FractionalMaxPool2dOptions options); + +// #ifndef DOXYGEN_SHOULD_SKIP_THIS +@Namespace("torch::nn::functional::detail") public static native @ByVal T_TensorTensor_T fractional_max_pool3d_with_indices( + @Const @ByRef Tensor input, + @Cast("const torch::ExpandingArray<3>*") @ByRef LongPointer kernel_size, + @Cast("const c10::optional >*") @ByRef LongExpandingArrayOptional output_size, + @Cast("const c10::optional >*") @ByRef DoubleExpandingArrayOptional output_ratio, + @Const @ByRef Tensor _random_samples); + // namespace detail +// #endif /* DOXYGEN_SHOULD_SKIP_THIS */ +/** See the documentation for +/** {@code torch::nn::functional::FractionalMaxPool3dFuncOptions} class to learn what +/** optional arguments are supported for this functional. +/** +/** Example: +/**
{@code
+/** namespace F = torch::nn::functional;
+/** F::fractional_max_pool3d_with_indices(x,
+/** F::FractionalMaxPool3dFuncOptions(3).output_size(2));
+/** }
*/ +@Namespace("torch::nn::functional") public static native @ByVal T_TensorTensor_T fractional_max_pool3d_with_indices( + @Const @ByRef Tensor input, + @Const @ByRef FractionalMaxPool3dOptions options); -// Targeting ../FlattenOptions.java +// #ifndef DOXYGEN_SHOULD_SKIP_THIS +@Namespace("torch::nn::functional::detail") public static native @ByVal Tensor fractional_max_pool3d( + @Const @ByRef Tensor input, + @ByVal @Cast("torch::ExpandingArray<3>*") LongPointer kernel_size, + @ByVal @Cast("c10::optional >*") LongExpandingArrayOptional output_size, + @ByVal @Cast("c10::optional >*") DoubleExpandingArrayOptional output_ratio, + @Const @ByRef Tensor _random_samples); + // namespace detail +// #endif /* DOXYGEN_SHOULD_SKIP_THIS */ +/** See the documentation for +/** {@code torch::nn::functional::FractionalMaxPool3dFuncOptions} class to learn what +/** optional arguments are supported for this functional. +/** +/** Example: +/**
{@code
+/** namespace F = torch::nn::functional;
+/** F::fractional_max_pool3d(x,
+/** F::FractionalMaxPool3dFuncOptions(3).output_size(2));
+/** }
*/ +@Namespace("torch::nn::functional") public static native @ByVal Tensor fractional_max_pool3d( + @Const @ByRef Tensor input, + @Const @ByRef FractionalMaxPool3dOptions options); -// Targeting ../UnflattenOptions.java +// ============================================================================ +// #ifndef DOXYGEN_SHOULD_SKIP_THIS +@Namespace("torch::nn::functional::detail") public static native @ByVal Tensor lp_pool1d( + @Const @ByRef Tensor input, + double norm_type, + @ByVal @Cast("torch::ExpandingArray<1>*") LongPointer kernel_size, + @ByVal @Cast("torch::ExpandingArray<1>*") LongPointer stride, + @Cast("bool") boolean ceil_mode); + // namespace detail +// #endif /* DOXYGEN_SHOULD_SKIP_THIS */ -// Targeting ../BilinearOptions.java +/** See +/** https://pytorch.org/docs/master/nn.functional.html#torch.nn.functional.lp_pool1d +/** about the exact behavior of this functional. +/** +/** See the documentation for {@code torch::nn::functional::LPPool1dFuncOptions} class +/** to learn what optional arguments are supported for this functional. +/** +/** Example: +/**
{@code
+/** namespace F = torch::nn::functional;
+/** F::lp_pool1d(x, F::LPPool1dFuncOptions(2, 3).stride(2));
+/** }
*/ +@Namespace("torch::nn::functional") public static native @ByVal Tensor lp_pool1d( + @Const @ByRef Tensor input, + @Const @ByRef LPPool1dOptions options); +// #ifndef DOXYGEN_SHOULD_SKIP_THIS +@Namespace("torch::nn::functional::detail") public static native @ByVal Tensor lp_pool2d( + @Const @ByRef Tensor input, + double norm_type, + @ByVal @Cast("torch::ExpandingArray<2>*") LongPointer kernel_size, + @ByVal @Cast("torch::ExpandingArray<2>*") LongPointer stride, + @Cast("bool") boolean ceil_mode); + // namespace detail +// #endif /* DOXYGEN_SHOULD_SKIP_THIS */ +/** See +/** https://pytorch.org/docs/master/nn.functional.html#torch.nn.functional.lp_pool2d +/** about the exact behavior of this functional. +/** +/** See the documentation for {@code torch::nn::functional::LPPool2dFuncOptions} class +/** to learn what optional arguments are supported for this functional. +/** +/** Example: +/**
{@code
+/** namespace F = torch::nn::functional;
+/** F::lp_pool2d(x, F::LPPool2dFuncOptions(2, {2, 3}).stride(2));
+/** }
*/ +@Namespace("torch::nn::functional") public static native @ByVal Tensor lp_pool2d( + @Const @ByRef Tensor input, + @Const @ByRef LPPool2dOptions options); + // namespace functional // namespace nn // namespace torch -// Parsed from torch/nn/options/loss.h +// Parsed from torch/nn/options/normalization.h // #pragma once // #include // #include -// #include // #include -// Targeting ../L1LossOptions.java - - -/** Options for {@code torch::nn::functional::l1_loss}. - * - * See the documentation for {@code torch::nn::L1LossOptions} class to learn what - * arguments are supported. - * - * Example: - *
{@code
- *  namespace F = torch::nn::functional;
- *  F::l1_loss(input, target, F::L1LossFuncOptions(torch::kNone));
- *  }
*/ - -// Targeting ../KLDivLossOptions.java - - -/** Options for {@code torch::nn::functional::kl_div}. - * - * See the documentation for {@code torch::nn::KLDivLossOptions} class to learn what - * arguments are supported. - * - * Example: - *
{@code
- *  namespace F = torch::nn::functional;
- *  F::kl_div(input, target,
- *  F::KLDivFuncOptions().reduction(torch::kNone).log_target(false));
- *  }
*/ - -// Targeting ../MSELossOptions.java - - -/** Options for {@code torch::nn::functional::mse_loss}. - * - * See the documentation for {@code torch::nn::MSELossOptions} class to learn what - * arguments are supported. - * - * Example: - *
{@code
- *  namespace F = torch::nn::functional;
- *  F::mse_loss(input, target, F::MSELossFuncOptions(torch::kNone));
- *  }
*/ - -// Targeting ../BCELossOptions.java - - -/** Options for {@code torch::nn::functional::binary_cross_entropy}. - * - * See the documentation for {@code torch::nn::BCELossOptions} class to learn what - * arguments are supported. - * - * Example: - *
{@code
- *  namespace F = torch::nn::functional;
- *  F::binary_cross_entropy(input, target,
- *  F::BinaryCrossEntropyFuncOptions().weight(weight));
- *  }
*/ - -// Targeting ../HingeEmbeddingLossOptions.java - - -/** Options for {@code torch::nn::functional::hinge_embedding_loss}. - * - * See the documentation for {@code torch::nn::HingeEmbeddingLossOptions} class to - * learn what arguments are supported. - * - * Example: - *
{@code
- *  namespace F = torch::nn::functional;
- *  F::hinge_embedding_loss(input, target,
- *  F::HingeEmbeddingLossFuncOptions().margin(2));
- *  }
*/ +// #include +// Targeting ../LayerNormOptions.java -// Targeting ../MultiMarginLossOptions.java -/** Options for {@code torch::nn::functional::multi_margin_loss}. - * - * See the documentation for {@code torch::nn::MultiMarginLossOptions} class to learn - * what arguments are supported. - * - * Example: - *
{@code
- *  namespace F = torch::nn::functional;
- *  F::multi_margin_loss(input, target,
- *  F::MultiMarginLossFuncOptions().margin(2).weight(weight));
- *  }
*/ +// ============================================================================ +// Targeting ../LayerNormFuncOptions.java -// Targeting ../CosineEmbeddingLossOptions.java -/** Options for {@code torch::nn::functional::cosine_embedding_loss}. - * - * See the documentation for {@code torch::nn::CosineEmbeddingLossOptions} class to - * learn what arguments are supported. - * - * Example: - *
{@code
- *  namespace F = torch::nn::functional;
- *  F::cosine_embedding_loss(input1, input2, target,
- *  F::CosineEmbeddingLossFuncOptions().margin(0.5));
- *  }
*/ -// Targeting ../MultiLabelMarginLossOptions.java +// Targeting ../LocalResponseNormOptions.java -/** Options for {@code torch::nn::functional::multilabel_margin_loss}. +/** Options for {@code torch::nn::functional::local_response_norm}. * - * See the documentation for {@code torch::nn::MultiLabelMarginLossOptions} class to + * See the documentation for {@code torch::nn::LocalResponseNormOptions} class to * learn what arguments are supported. * * Example: *
{@code
  *  namespace F = torch::nn::functional;
- *  F::multilabel_margin_loss(input, target,
- *  F::MultilabelMarginLossFuncOptions(torch::kNone));
- *  }
*/ - -// Targeting ../SoftMarginLossOptions.java - - -/** Options for {@code torch::nn::functional::soft_margin_loss}. - * - * See the documentation for {@code torch::nn::SoftMarginLossOptions} class to learn - * what arguments are supported. - * - * Example: - *
{@code
- *  namespace F = torch::nn::functional;
- *  F::soft_margin_loss(input, target,
- *  F::SoftMarginLossFuncOptions(torch::kNone));
+ *  F::local_response_norm(x, F::LocalResponseNormFuncOptions(2));
  *  }
*/ -// Targeting ../MultiLabelSoftMarginLossOptions.java - - -/** Options for {@code torch::nn::functional::multilabel_soft_margin_loss}. - * - * See the documentation for {@code torch::nn::MultiLabelSoftMarginLossOptions} class - * to learn what arguments are supported. - * - * Example: - *
{@code
- *  namespace F = torch::nn::functional;
- *  F::multilabel_soft_margin_loss(input, target,
- *  F::MultilabelSoftMarginLossFuncOptions().reduction(torch::kNone).weight(weight));
- *  }
*/ +// Targeting ../CrossMapLRN2dOptions.java -// Targeting ../TripletMarginLossOptions.java -/** Options for {@code torch::nn::functional::triplet_margin_loss}. - * - * See the documentation for {@code torch::nn::TripletMarginLossOptions} class to - * learn what arguments are supported. - * - * Example: - *
{@code
- *  namespace F = torch::nn::functional;
- *  F::triplet_margin_loss(anchor, positive, negative,
- *  F::TripletMarginLossFuncOptions().margin(1.0));
- *  }
*/ +// ============================================================================ +// Targeting ../NormalizeFuncOptions.java -// Targeting ../TripletMarginWithDistanceLossOptions.java -/** Options for {@code torch::nn::functional::triplet_margin_with_distance_loss}. - * - * See the documentation for {@code torch::nn::TripletMarginWithDistanceLossOptions} - * class to learn what arguments are supported. - * - * Example: - *
{@code
- *  namespace F = torch::nn::functional;
- *  F::triplet_margin_with_distance_loss(anchor, positive, negative,
- *  F::TripletMarginWithDistanceLossFuncOptions().margin(1.0));
- *  }
*/ -// Targeting ../CTCLossOptions.java +// Targeting ../GroupNormOptions.java -/** Options for {@code torch::nn::functional::ctc_loss}. - * - * See the documentation for {@code torch::nn::CTCLossOptions} class to learn what - * arguments are supported. - * - * Example: - *
{@code
- *  namespace F = torch::nn::functional;
- *  F::ctc_loss(log_probs, targets, input_lengths, target_lengths,
- *  F::CTCLossFuncOptions().reduction(torch::kNone));
- *  }
*/ -// Targeting ../SmoothL1LossOptions.java +// ============================================================================ +// Targeting ../GroupNormFuncOptions.java -/** Options for {@code torch::nn::functional::smooth_l1_loss}. - * - * See the documentation for {@code torch::nn::SmoothL1LossOptions} class to learn - * what arguments are supported. - * - * Example: - *
{@code
- *  namespace F = torch::nn::functional;
- *  F::smooth_l1_loss(input, target, F::SmoothL1LossFuncOptions(torch::kNone));
- *  }
*/ -// Targeting ../HuberLossOptions.java + // namespace functional + // namespace nn + // namespace torch -/** Options for {@code torch::nn::functional::huber_loss}. - * - * See the documentation for {@code torch::nn::HuberLossOptions} class to learn what - * arguments are supported. - * - * Example: - *
{@code
- *  namespace F = torch::nn::functional;
- *  F::huber_loss(input, target, F::HuberLossFuncOptions(torch::kNone));
- *  }
*/ -// Targeting ../PoissonNLLLossOptions.java +// Parsed from torch/nn/functional/normalization.h +// #pragma once -/** Options for {@code torch::nn::functional::poisson_nll_loss}. - * - * See the documentation for {@code torch::nn::PoissonNLLLossOptions} class to learn - * what arguments are supported. - * - * Example: - *
{@code
- *  namespace F = torch::nn::functional;
- *  F::poisson_nll_loss(input, target,
- *  F::PoissonNLLLossFuncOptions().reduction(torch::kNone));
- *  }
*/ +// #include +// #include +// #include +// #include -// Targeting ../MarginRankingLossOptions.java +// #ifndef DOXYGEN_SHOULD_SKIP_THIS +@Namespace("torch::nn::functional::detail") public static native @ByVal Tensor normalize( + @Const @ByRef Tensor input, + double p, + @Cast("int64_t") long dim, + double eps, + @ByVal TensorOptional out); + // namespace detail +// #endif /* DOXYGEN_SHOULD_SKIP_THIS */ +/** See +/** https://pytorch.org/docs/master/nn.functional.html#torch.nn.functional.normalize +/** about the exact behavior of this functional. +/** +/** See the documentation for {@code torch::nn::functional::NormalizeFuncOptions} +/** class to learn what optional arguments are supported for this functional. +/** +/** Example: +/**
{@code
+/** namespace F = torch::nn::functional;
+/** F::normalize(input, F::NormalizeFuncOptions().p(1).dim(-1));
+/** }
*/ +@Namespace("torch::nn::functional") public static native @ByVal Tensor normalize( + @Const @ByRef Tensor input, + @ByVal(nullValue = "torch::nn::functional::NormalizeFuncOptions{}") NormalizeFuncOptions options); +@Namespace("torch::nn::functional") public static native @ByVal Tensor normalize( + @Const @ByRef Tensor input); -/** Options for {@code torch::nn::functional::margin_ranking_loss}. - * - * See the documentation for {@code torch::nn::MarginRankingLossOptions} class to - * learn what arguments are supported. - * - * Example: - *
{@code
- *  namespace F = torch::nn::functional;
- *  F::margin_ranking_loss(input1, input2, target,
- *  F::MarginRankingLossFuncOptions().margin(0.5).reduction(torch::kSum));
- *  }
*/ +// ============================================================================ -// Targeting ../NLLLossOptions.java +// #ifndef DOXYGEN_SHOULD_SKIP_THIS +@Namespace("torch::nn::functional::detail") public static native @ByVal Tensor layer_norm( + @Const @ByRef Tensor input, + @Cast("const std::vector*") @ByRef LongVector normalized_shape, + @Const @ByRef Tensor weight, + @Const @ByRef Tensor bias, + double eps); + // namespace detail +// #endif /* DOXYGEN_SHOULD_SKIP_THIS */ +/** See +/** https://pytorch.org/docs/master/nn.functional.html#torch.nn.functional.layer_norm +/** about the exact behavior of this functional. +/** +/** See the documentation for {@code torch::nn::functional::LayerNormFuncOptions} +/** class to learn what optional arguments are supported for this functional. +/** +/** Example: +/**
{@code
+/** namespace F = torch::nn::functional;
+/** F::layer_norm(input, F::LayerNormFuncOptions({2, 2}).eps(2e-5));
+/** }
*/ +@Namespace("torch::nn::functional") public static native @ByVal Tensor layer_norm( + @Const @ByRef Tensor input, + @Const @ByRef LayerNormFuncOptions options); -/** Options for {@code torch::nn::functional::nll_loss}. - * - * See the documentation for {@code torch::nn::NLLLossOptions} class to learn what - * arguments are supported. - * - * Example: - *
{@code
- *  namespace F = torch::nn::functional;
- *  F::nll_loss(input, target,
- *  F::NLLLossFuncOptions().ignore_index(-100).reduction(torch::kMean));
- *  }
*/ +// ============================================================================ -// Targeting ../CrossEntropyLossOptions.java +// #ifndef DOXYGEN_SHOULD_SKIP_THIS +@Namespace("torch::nn::functional::detail") public static native @ByVal Tensor local_response_norm( + @Const @ByRef Tensor input, + @Cast("int64_t") long size, + double alpha, + double beta, + double k); + // namespace detail +// #endif /* DOXYGEN_SHOULD_SKIP_THIS */ +/** See +/** https://pytorch.org/docs/master/nn.functional.html#torch.nn.functional.local_response_norm +/** about the exact behavior of this functional. +/** +/** See the documentation for +/** {@code torch::nn::functional::LocalResponseNormFuncOptions} class to learn what +/** optional arguments are supported for this functional. +/** +/** Example: +/**
{@code
+/** namespace F = torch::nn::functional;
+/** F::local_response_norm(x, F::LocalResponseNormFuncOptions(2));
+/** }
*/ +@Namespace("torch::nn::functional") public static native @ByVal Tensor local_response_norm( + @Const @ByRef Tensor input, + @Cast("const torch::nn::functional::LocalResponseNormFuncOptions*") @ByRef LocalResponseNormOptions options); -/** Options for {@code torch::nn::functional::cross_entropy}. - * - * See the documentation for {@code torch::nn::CrossEntropyLossOptions} class to - * learn what arguments are supported. - * - * Example: - *
{@code
- *  namespace F = torch::nn::functional;
- *  F::cross_entropy(input, target,
- *  F::CrossEntropyFuncOptions().ignore_index(-100).reduction(torch::kMean));
- *  }
*/ +// ============================================================================ -// Targeting ../BCEWithLogitsLossOptions.java +// #ifndef DOXYGEN_SHOULD_SKIP_THIS +@Namespace("torch::nn::functional::detail") public static native @ByVal Tensor group_norm( + @Const @ByRef Tensor input, + @Cast("int64_t") long num_groups, + @Const @ByRef Tensor weight, + @Const @ByRef Tensor bias, + double eps); + // namespace detail +// #endif /* DOXYGEN_SHOULD_SKIP_THIS */ +/** See +/** https://pytorch.org/docs/master/nn.functional.html#torch.nn.functional.group_norm +/** about the exact behavior of this functional. +/** +/** See the documentation for {@code torch::nn::functional::GroupNormFuncOptions} +/** class to learn what optional arguments are supported for this functional. +/** +/** Example: +/**
{@code
+/** namespace F = torch::nn::functional;
+/** F::group_norm(input, F::GroupNormFuncOptions(2).eps(2e-5));
+/** }
*/ +@Namespace("torch::nn::functional") public static native @ByVal Tensor group_norm( + @Const @ByRef Tensor input, + @Const @ByRef GroupNormFuncOptions options); -/** Options for {@code torch::nn::functional::binary_cross_entropy_with_logits}. - * - * See the documentation for {@code torch::nn::BCEWithLogitsLossOptions} class to - * learn what arguments are supported. - * - * Example: - *
{@code
- *  namespace F = torch::nn::functional;
- *  F::binary_cross_entropy_with_logits(input, target,
- *  F::BinaryCrossEntropyWithLogitsFuncOptions().pos_weight(pos_weight).reduction(torch::kSum));
- *  }
*/ // namespace functional - // namespace nn // namespace torch -// Parsed from torch/nn/options/normalization.h +// Parsed from torch/nn/options/pixelshuffle.h // #pragma once // #include // #include // #include -// #include -// Targeting ../LayerNormOptions.java - - - -// ============================================================================ -// Targeting ../LayerNormFuncOptions.java - - +// Targeting ../PixelShuffleOptions.java -// Targeting ../LocalResponseNormOptions.java +// Targeting ../PixelUnshuffleOptions.java -/** Options for {@code torch::nn::functional::local_response_norm}. +/** Options for {@code torch::nn::functional::pixel_shuffle}. * - * See the documentation for {@code torch::nn::LocalResponseNormOptions} class to - * learn what arguments are supported. + * See the documentation for {@code torch::nn::PixelShuffleOptions} class to learn + * what arguments are supported. * * Example: *
{@code
  *  namespace F = torch::nn::functional;
- *  F::local_response_norm(x, F::LocalResponseNormFuncOptions(2));
+ *  F::pixel_shuffle(x, F::PixelShuffleFuncOptions(2));
  *  }
*/ -// Targeting ../CrossMapLRN2dOptions.java - - - -// ============================================================================ -// Targeting ../NormalizeFuncOptions.java +/// +/// +/** Options for {@code torch::nn::functional::pixel_unshuffle}. + * + * See the documentation for {@code torch::nn::PixelUnshuffleOptions} class to learn + * what arguments are supported. + * + * Example: + *
{@code
+ *  namespace F = torch::nn::functional;
+ *  F::pixel_unshuffle(x, F::PixelUnshuffleFuncOptions(2));
+ *  }
*/ + // namespace functional + // namespace nn + // namespace torch -// Targeting ../GroupNormOptions.java +// Parsed from torch/nn/functional/pixelshuffle.h +// #pragma once +// #include -// ============================================================================ -// Targeting ../GroupNormFuncOptions.java +// #ifndef DOXYGEN_SHOULD_SKIP_THIS + // namespace detail +// #endif /* DOXYGEN_SHOULD_SKIP_THIS */ +/** See +/** https://pytorch.org/docs/master/nn.functional.html#torch.nn.functional.pixel_shuffle +/** about the exact behavior of this functional. +/** +/** See the documentation for {@code torch::nn::functional::PixelShuffleFuncOptions} +/** class to learn what optional arguments are supported for this functional. +/** +/** Example: +/**
{@code
+/** namespace F = torch::nn::functional;
+/** F::pixel_shuffle(x, F::PixelShuffleFuncOptions(2));
+/** }
*/ +@Namespace("torch::nn::functional") public static native @ByVal Tensor pixel_shuffle( + @Const @ByRef Tensor input, + @Cast("const torch::nn::functional::PixelShuffleFuncOptions*") @ByRef PixelShuffleOptions options); +@Namespace("torch::nn::functional") public static native @ByVal Tensor pixel_unshuffle( + @Const @ByRef Tensor input, + @Cast("const torch::nn::functional::PixelUnshuffleFuncOptions*") @ByRef PixelUnshuffleOptions options); // namespace functional - // namespace nn // namespace torch -// Parsed from torch/nn/options/padding.h +// Parsed from torch/nn/options/upsampling.h // #pragma once @@ -79452,3744 +70326,2091 @@ The list of (type, depth) pairs controls the type of specializations and the num // #include // #include // #include -// Targeting ../ReflectionPad1dOptions.java - - -// Targeting ../ReflectionPad2dOptions.java - -// Targeting ../ReflectionPad3dOptions.java - - - -/** {@code ReflectionPadOptions} specialized for the {@code ReflectionPad1d} module. - * - * Example: - *
{@code
- *  ReflectionPad1d model(ReflectionPad1dOptions({3, 1}));
- *  }
*/ - -/// - -/** {@code ReflectionPadOptions} specialized for the {@code ReflectionPad2d} module. - * - * Example: - *
{@code
- *  ReflectionPad2d model(ReflectionPad2dOptions({1, 1, 2, 0}));
- *  }
*/ +// #include +// Targeting ../UpsampleOptions.java -/// -/** {@code ReflectionPadOptions} specialized for the {@code ReflectionPad3d} module. - * - * Example: - *
{@code
- *  ReflectionPad3d model(ReflectionPad3dOptions({1, 1, 2, 0, 1, 1}));
- *  }
*/ -// Targeting ../ReplicationPad1dOptions.java +// Targeting ../InterpolateFuncOptions.java -// Targeting ../ReplicationPad2dOptions.java + // namespace functional -// Targeting ../ReplicationPad3dOptions.java + // namespace nn + // namespace torch +// Parsed from torch/nn/functional/upsampling.h -/** {@code ReplicationPadOptions} specialized for the {@code ReplicationPad1d} module. - * - * Example: - *
{@code
- *  ReplicationPad1d model(ReplicationPad1dOptions({3, 1}));
- *  }
*/ +// #pragma once -/// +// #include +// #include +// #include -/** {@code ReplicationPadOptions} specialized for the {@code ReplicationPad2d} module. - * - * Example: - *
{@code
- *  ReplicationPad2d model(ReplicationPad2dOptions({1, 1, 2, 0}));
- *  }
*/ +// #include +// #include -/// +@Namespace("torch::nn::functional") public static native @ByVal @Cast("std::vector*") LongVector _interp_output_size( + @Cast("int64_t") long dim, + @ByVal @Cast("std::tuple >,c10::optional >,c10::optional >*") Pointer closed_over_args); -/** {@code ReplicationPadOptions} specialized for the {@code ReplicationPad3d} module. - * - * Example: - *
{@code
- *  ReplicationPad3d model(ReplicationPad3dOptions({1, 2, 1, 2, 1, 2}));
- *  }
*/ +// #ifndef DOXYGEN_SHOULD_SKIP_THIS +@Namespace("torch::nn::functional::detail") public static native @ByVal Tensor interpolate( + @Const @ByRef Tensor input, + @Const @ByRef LongVectorOptional size, + @Const @ByRef DoubleVectorOptional scale_factor, + @ByVal InterpolateMode mode, + @ByVal BoolOptional align_corners, + @ByVal BoolOptional recompute_scale_factor, + @Cast("bool") boolean antialias); + // namespace detail +// #endif /* DOXYGEN_SHOULD_SKIP_THIS */ -/// -// Targeting ../ZeroPad2dOptions.java +/** See +/** https://pytorch.org/docs/master/nn.functional.html#torch.nn.functional.interpolate +/** about the exact behavior of this functional. +/** +/** See the documentation for {@code torch::nn::functional::InterpolateFuncOptions} +/** class to learn what optional arguments are supported for this functional. +/** +/** Example: +/**
{@code
+/** namespace F = torch::nn::functional;
+/** F::interpolate(input,
+/** F::InterpolateFuncOptions().size({4}).mode(torch::kNearest));
+/** }
*/ +@Namespace("torch::nn::functional") public static native @ByVal Tensor interpolate( + @Const @ByRef Tensor input, + @Const @ByRef(nullValue = "torch::nn::functional::InterpolateFuncOptions{}") InterpolateFuncOptions options); +@Namespace("torch::nn::functional") public static native @ByVal Tensor interpolate( + @Const @ByRef Tensor input); + // namespace functional + // namespace nn + // namespace torch -// Targeting ../ConstantPad1dOptions.java +// Parsed from torch/nn/options/vision.h -// Targeting ../ConstantPad2dOptions.java +// #pragma once +// #include +// #include +// #include +// #include +// Targeting ../GridSampleFuncOptions.java -// Targeting ../ConstantPad3dOptions.java + // namespace functional + // namespace nn + // namespace torch -/** {@code ConstantPadOptions} specialized for the {@code ConstantPad1d} module. - * - * Example: - *
{@code
- *  ConstantPad1d model(ConstantPad1dOptions({3, 1}, 3.5));
- *  }
*/ -/// +// Parsed from torch/nn/functional/vision.h -/** {@code ConstantPadOptions} specialized for the {@code ConstantPad2d} module. - * - * Example: - *
{@code
- *  ConstantPad2d model(ConstantPad2dOptions({3, 0, 2, 1}, 3.5));
- *  }
*/ +// #pragma once -/// +// #include +// #include -/** {@code ConstantPadOptions} specialized for the {@code ConstantPad3d} module. - * - * Example: - *
{@code
- *  ConstantPad3d model(ConstantPad3dOptions({1, 2, 1, 2, 1, 2}, 3.5));
- *  }
*/ +@Namespace("torch::nn::functional") public static native @ByVal Tensor affine_grid( + @Const @ByRef Tensor theta, + @Const @ByRef LongArrayRef size, + @Cast("bool") boolean align_corners/*=false*/); +@Namespace("torch::nn::functional") public static native @ByVal Tensor affine_grid( + @Const @ByRef Tensor theta, + @Const @ByRef LongArrayRef size); +@Namespace("torch::nn::functional") public static native @ByVal Tensor affine_grid( + @Const @ByRef Tensor theta, + @ByRef @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, + @Cast("bool") boolean align_corners/*=false*/); +@Namespace("torch::nn::functional") public static native @ByVal Tensor affine_grid( + @Const @ByRef Tensor theta, + @ByRef @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... size); // ============================================================================ -// Targeting ../PadFuncOptions.java +// #ifndef DOXYGEN_SHOULD_SKIP_THIS +@Namespace("torch::nn::functional::detail") public static native @ByVal Tensor grid_sample( + @Const @ByRef Tensor input, + @Const @ByRef Tensor grid, + @ByVal GridSampleMode mode, + @ByVal GridSamplePaddingMode padding_mode, + @ByVal BoolOptional align_corners); + // namespace detail +// #endif /* DOXYGEN_SHOULD_SKIP_THIS */ +/** See +/** https://pytorch.org/docs/master/nn.functional.html#torch.nn.functional.grid_sample +/** about the exact behavior of this functional. +/** +/** See the documentation for {@code torch::nn::functional::GridSampleFuncOptions} +/** class to learn what optional arguments are supported for this functional. +/** +/** Example: +/**
{@code
+/** namespace F = torch::nn::functional;
+/** F::grid_sample(input, grid,
+/** F::GridSampleFuncOptions().mode(torch::kBilinear).padding_mode(torch::kZeros).align_corners(true));
+/** }
*/ +@Namespace("torch::nn::functional") public static native @ByVal Tensor grid_sample( + @Const @ByRef Tensor input, + @Const @ByRef Tensor grid, + @Const @ByRef(nullValue = "torch::nn::functional::GridSampleFuncOptions{}") GridSampleFuncOptions options); +@Namespace("torch::nn::functional") public static native @ByVal Tensor grid_sample( + @Const @ByRef Tensor input, + @Const @ByRef Tensor grid); // namespace functional - // namespace nn // namespace torch -// Parsed from torch/nn/options/pixelshuffle.h +// Parsed from torch/nn/functional.h + +// #pragma once + +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include + + +// Parsed from torch/nn/init.h // #pragma once -// #include // #include +// #include // #include -// Targeting ../PixelShuffleOptions.java + // namespace init + // namespace nn -// Targeting ../PixelUnshuffleOptions.java +/** Return the recommended gain value for the given nonlinearity function. */ +@Namespace("torch::nn::init") public static native double calculate_gain( + @ByVal Nonlinearity nonlinearity, + double param/*=0.01*/); +@Namespace("torch::nn::init") public static native double calculate_gain( + @ByVal Nonlinearity nonlinearity); +/** Fills the given {@code tensor} with the provided {@code value} in-place, and returns it. + * No gradient will be recorded for this operation. */ +@Namespace("torch::nn::init") public static native @ByVal Tensor constant_(@ByVal Tensor tensor, @ByVal Scalar value); -/** Options for {@code torch::nn::functional::pixel_shuffle}. - * - * See the documentation for {@code torch::nn::PixelShuffleOptions} class to learn - * what arguments are supported. - * - * Example: - *
{@code
- *  namespace F = torch::nn::functional;
- *  F::pixel_shuffle(x, F::PixelShuffleFuncOptions(2));
- *  }
*/ +/** Fills the given {@code tensor} with the Dirac delta function in-place, and returns + * it. No gradient will be recorded for this operation. */ +@Namespace("torch::nn::init") public static native @ByVal Tensor dirac_(@ByVal Tensor tensor); -/// -/// +/** Fills the given 2-dimensional {@code matrix} with an identity matrix. + * No gradient will be recorded for this operation. */ +@Namespace("torch::nn::init") public static native @ByVal Tensor eye_(@ByVal Tensor matrix); -/** Options for {@code torch::nn::functional::pixel_unshuffle}. - * - * See the documentation for {@code torch::nn::PixelUnshuffleOptions} class to learn - * what arguments are supported. - * - * Example: - *
{@code
- *  namespace F = torch::nn::functional;
- *  F::pixel_unshuffle(x, F::PixelUnshuffleFuncOptions(2));
- *  }
*/ - // namespace functional +/** Fills the given 2-dimensional {@code matrix} with values drawn from a normal + * distribution parameterized by {@code mean} and {@code std}. + * No gradient will be recorded for this operation. */ +@Namespace("torch::nn::init") public static native @ByVal Tensor normal_(@ByVal Tensor tensor, double mean/*=0*/, double std/*=1*/); +@Namespace("torch::nn::init") public static native @ByVal Tensor normal_(@ByVal Tensor tensor); - // namespace nn - // namespace torch +/** Fills the given {@code tensor} with ones. + * No gradient will be recorded for this operation. */ +@Namespace("torch::nn::init") public static native @ByVal Tensor ones_(@ByVal Tensor tensor); +/** Fills the input {@code Tensor} with a (semi) orthogonal matrix, as described in + * "Exact solutions to the nonlinear dynamics of learning in deep linear neural + * networks" - Saxe, A. et al. (2013). The input tensor must have at least 2 + * dimensions, and for tensors with more than 2 dimensions the trailing + * dimensions are flattened. + * No gradient will be recorded for this operation. */ +@Namespace("torch::nn::init") public static native @ByVal Tensor orthogonal_(@ByVal Tensor tensor, double gain/*=1.0*/); +@Namespace("torch::nn::init") public static native @ByVal Tensor orthogonal_(@ByVal Tensor tensor); -// Parsed from torch/nn/options/pooling.h +/** Fills the 2D input {@code Tensor} as a sparse matrix, where the + * non-zero elements will be drawn from a centered normal distribution + * with the given standard deviation {@code std}, as described in "Deep learning via + * Hessian-free optimization" - Martens, J. (2010). The {@code sparsity} is a real + * value between 0 and 1 that controls the fraction of elements in each column + * to be set to zero. + * No gradient will be recorded for this operation. */ +@Namespace("torch::nn::init") public static native @ByVal Tensor sparse_(@ByVal Tensor tensor, double sparsity, double std/*=0.01*/); +@Namespace("torch::nn::init") public static native @ByVal Tensor sparse_(@ByVal Tensor tensor, double sparsity); -// #pragma once +/** Fills the given 2-dimensional {@code matrix} with values drawn from a uniform + * distribution parameterized by {@code low} and {@code high}. + * No gradient will be recorded for this operation. */ +@Namespace("torch::nn::init") public static native @ByVal Tensor uniform_(@ByVal Tensor tensor, double low/*=0*/, double high/*=1*/); +@Namespace("torch::nn::init") public static native @ByVal Tensor uniform_(@ByVal Tensor tensor); -// #include -// #include -// #include -// #include -// Targeting ../AvgPool1dOptions.java +/** Fills the input {@code Tensor} with values according to the method + * described in "Delving deep into rectifiers: Surpassing human-level + * performance on ImageNet classification" - He, K. et al. (2015), using a + * normal distribution. Also known as He initialization. + * No gradient will be recorded for this operation. */ +@Namespace("torch::nn::init") public static native @ByVal Tensor kaiming_normal_( + @ByVal Tensor tensor, + double a/*=0*/, + @ByVal(nullValue = "torch::nn::init::FanModeType(torch::kFanIn)") FanModeType mode, + @ByVal(nullValue = "torch::nn::init::NonlinearityType(torch::kLeakyReLU)") Nonlinearity nonlinearity); +@Namespace("torch::nn::init") public static native @ByVal Tensor kaiming_normal_( + @ByVal Tensor tensor); +/** Fills the input {@code Tensor} with values according to the method + * described in "Delving deep into rectifiers: Surpassing human-level + * performance on ImageNet classification" - He, K. et al. (2015), using a + * uniform distribution. Also known as He initialization. + * No gradient will be recorded for this operation. */ +@Namespace("torch::nn::init") public static native @ByVal Tensor kaiming_uniform_( + @ByVal Tensor tensor, + double a/*=0*/, + @ByVal(nullValue = "torch::nn::init::FanModeType(torch::kFanIn)") FanModeType mode, + @ByVal(nullValue = "torch::nn::init::NonlinearityType(torch::kLeakyReLU)") Nonlinearity nonlinearity); +@Namespace("torch::nn::init") public static native @ByVal Tensor kaiming_uniform_( + @ByVal Tensor tensor); -// Targeting ../AvgPool2dOptions.java +/** Fills the input {@code Tensor} with values according to the method + * described in "Understanding the difficulty of training deep feedforward + * neural networks" - Glorot, X. & Bengio, Y. (2010). Values are scaled by the + * {@code gain} parameter. No gradient will be recorded for this operation. */ +@Namespace("torch::nn::init") public static native @ByVal Tensor xavier_normal_(@ByVal Tensor tensor, double gain/*=1.0*/); +@Namespace("torch::nn::init") public static native @ByVal Tensor xavier_normal_(@ByVal Tensor tensor); +/** Fills the input {@code Tensor} with values according to the method + * described in "Understanding the difficulty of training deep feedforward + * neural networks" - Glorot, X. & Bengio, Y. (2010), using a uniform + * distribution. Values are scaled by the {@code gain} parameter + * No gradient will be recorded for this operation. */ +@Namespace("torch::nn::init") public static native @ByVal Tensor xavier_uniform_(@ByVal Tensor tensor, double gain/*=1.0*/); +@Namespace("torch::nn::init") public static native @ByVal Tensor xavier_uniform_(@ByVal Tensor tensor); -// Targeting ../AvgPool3dOptions.java +/** Fills the given {@code tensor} with zeros. + * No gradient will be recorded for this operation. */ +@Namespace("torch::nn::init") public static native @ByVal Tensor zeros_(@ByVal Tensor tensor); +@Namespace("torch::nn::init") public static native @ByVal T_LongLong_T _calculate_fan_in_and_fan_out( + @Const @ByRef Tensor tensor); + // namespace init + // namespace nn + // namespace torch -/** {@code AvgPoolOptions} specialized for the {@code AvgPool1d} module. - * - * Example: - *
{@code
- *  AvgPool1d model(AvgPool1dOptions(3).stride(2));
- *  }
*/ -/// +// Parsed from torch/nn/modules/common.h -/** {@code AvgPoolOptions} specialized for the {@code AvgPool2d} module. - * - * Example: - *
{@code
- *  AvgPool2d model(AvgPool2dOptions({3, 2}).stride({2, 2}));
- *  }
*/ /// +/// +/// +/// +/// +// #pragma once -/** {@code AvgPoolOptions} specialized for the {@code AvgPool3d} module. - * - * Example: - *
{@code
- *  AvgPool3d model(AvgPool3dOptions(5).stride(2));
- *  }
*/ -/** Options for {@code torch::nn::functional::avg_pool1d}. +/** This macro enables a module with default arguments in its forward method + * to be used in a Sequential module. * - * See the documentation for {@code torch::nn::AvgPool1dOptions} class to learn what - * arguments are supported. + * Example usage: * - * Example: + * Let's say we have a module declared like this: *
{@code
- *  namespace F = torch::nn::functional;
- *  F::avg_pool1d(x, F::AvgPool1dFuncOptions(3).stride(2));
- *  }
*/ - // namespace functional -/** Options for {@code torch::nn::functional::avg_pool2d}. - * - * See the documentation for {@code torch::nn::AvgPool2dOptions} class to learn what - * arguments are supported. + * struct MImpl : torch::nn::Module { + * public: + * explicit MImpl(int value_) : value(value_) {} + * torch::Tensor forward(int a, int b = 2, double c = 3.0) { + * return torch::tensor(a + b + c); + * } + * private: + * int value; + * }; + * TORCH_MODULE(M); + * }
* - * Example: + * If we try to use it in a Sequential module and run forward: *
{@code
- *  namespace F = torch::nn::functional;
- *  F::avg_pool2d(x, F::AvgPool2dFuncOptions(3).stride(2));
- *  }
*/ - // namespace functional -/** Options for {@code torch::nn::functional::avg_pool3d}. + * torch::nn::Sequential seq(M(1)); + * seq->forward(1); + * }
* - * See the documentation for {@code torch::nn::AvgPool3dOptions} class to learn what - * arguments are supported. + * We will receive the following error message: + *
{@code
+ *  MImpl's forward() method expects 3 argument(s), but received 1.
+ *  If MImpl's forward() method has default arguments, please make sure
+ *  the forward() method is declared with a corresponding
+ *  `FORWARD_HAS_DEFAULT_ARGS` macro.
+ *  }
* - * Example: + * The right way to fix this error is to use the {@code FORWARD_HAS_DEFAULT_ARGS} + * macro when declaring the module: + *
{@code
+ *  struct MImpl : torch::nn::Module {
+ *   public:
+ *    explicit MImpl(int value_) : value(value_) {}
+ *    torch::Tensor forward(int a, int b = 2, double c = 3.0) {
+ *      return torch::tensor(a + b + c);
+ *    }
+ *   protected:
+ *    /*
+ *    NOTE: looking at the argument list of `forward`:
+ *    `forward(int a, int b = 2, double c = 3.0)`
+ *    we saw the following default arguments:
+ *    ----------------------------------------------------------------
+ *    0-based index of default |         Default value of arg
+ *    arg in forward arg list  |  (wrapped by `torch::nn::AnyValue()`)
+ *    ----------------------------------------------------------------
+ *                1            |       torch::nn::AnyValue(2)
+ *                2            |       torch::nn::AnyValue(3.0)
+ *    ----------------------------------------------------------------
+ *    Thus we pass the following arguments to the `FORWARD_HAS_DEFAULT_ARGS`
+ *    macro:
+ *    * /
+ *    FORWARD_HAS_DEFAULT_ARGS({1, torch::nn::AnyValue(2)}, {2,
+ *    torch::nn::AnyValue(3.0)})
+ *   private:
+ *    int value;
+ *  };
+ *  TORCH_MODULE(M);
+ *  }
+ * Now, running the following would work: *
{@code
- *  namespace F = torch::nn::functional;
- *  F::avg_pool3d(x, F::AvgPool3dFuncOptions(3).stride(2));
+ *  torch::nn::Sequential seq(M(1));
+ *  seq->forward(1);  // This correctly populates the default arguments for
+ *  `MImpl::forward`
  *  }
*/ - -// Targeting ../MaxPool1dOptions.java - - -// Targeting ../MaxPool2dOptions.java - - -// Targeting ../MaxPool3dOptions.java - +// #define FORWARD_HAS_DEFAULT_ARGS(...) +// template +// friend struct torch::nn::AnyModuleHolder; +// bool _forward_has_default_args() override { +// return true; +// } +// unsigned int _forward_num_required_args() override { +// std::pair args_info[] = {__VA_ARGS__}; +// return args_info[0].first; +// } +// std::vector _forward_populate_default_args( +// std::vector&& arguments) override { +// std::pair args_info[] = {__VA_ARGS__}; +// unsigned int num_all_args = std::rbegin(args_info)->first + 1; +// TORCH_INTERNAL_ASSERT( +// arguments.size() >= _forward_num_required_args() && +// arguments.size() <= num_all_args); +// std::vector ret = std::move(arguments); +// ret.reserve(num_all_args); +// for (auto& arg_info : args_info) { +// if (arg_info.first > ret.size() - 1) +// ret.emplace_back(std::move(arg_info.second)); +// } +// return ret; +// } -/** {@code MaxPoolOptions} specialized for the {@code MaxPool1d} module. - * - * Example: - *
{@code
- *  MaxPool1d model(MaxPool1dOptions(3).stride(2));
- *  }
*/ +// Parsed from torch/nn/modules/container/any.h -/// +// #pragma once -/** {@code MaxPoolOptions} specialized for the {@code MaxPool2d} module. - * - * Example: - *
{@code
- *  MaxPool2d model(MaxPool2dOptions({3, 2}).stride({2, 2}));
- *  }
*/ +// #include +// #include +// #include +// #include +// #include +// #include -/// +// #include +// #include +// #include -/** {@code MaxPoolOptions} specialized for the {@code MaxPool3d} module. - * - * Example: - *
{@code
- *  MaxPool3d model(MaxPool3dOptions(3).stride(2));
- *  }
*/ -/** Options for {@code torch::nn::functional::max_pool1d} and - * {@code torch::nn::functional::max_pool1d_with_indices}. - * - * Example: - *
{@code
- *  namespace F = torch::nn::functional;
- *  F::max_pool1d(x, F::MaxPool1dFuncOptions(3).stride(2));
- *  }
*/ - // namespace functional -/** Options for {@code torch::nn::functional::max_pool2d} and - * {@code torch::nn::functional::max_pool2d_with_indices}. - * - * Example: - *
{@code
- *  namespace F = torch::nn::functional;
- *  F::max_pool2d(x, F::MaxPool2dFuncOptions(3).stride(2));
- *  }
*/ - // namespace functional -/** Options for {@code torch::nn::functional::max_pool3d} and - * {@code torch::nn::functional::max_pool3d_with_indices}. - * - * Example: - *
{@code
- *  namespace F = torch::nn::functional;
- *  F::max_pool3d(x, F::MaxPool3dFuncOptions(3).stride(2));
- *  }
*/ +// #include -// Targeting ../AdaptiveMaxPool1dOptions.java +// #include +// #include +// #include +// #include +// #include +// Targeting ../AnyModule.java -// Targeting ../AdaptiveMaxPool2dOptions.java +// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ AnyModule ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -// Targeting ../AdaptiveMaxPool3dOptions.java -/** {@code AdaptiveMaxPoolOptions} specialized for the {@code AdaptiveMaxPool1d} module. - * - * Example: - *
{@code
- *  AdaptiveMaxPool1d model(AdaptiveMaxPool1dOptions(3));
- *  }
*/ -/// -/** {@code AdaptiveMaxPoolOptions} specialized for the {@code AdaptiveMaxPool2d} module. - * - * Example: - *
{@code
- *  AdaptiveMaxPool2d model(AdaptiveMaxPool2dOptions({3, 2}));
- *  }
*/ -/// -/** {@code AdaptiveMaxPoolOptions} specialized for the {@code AdaptiveMaxPool3d} module. - * - * Example: - *
{@code
- *  AdaptiveMaxPool3d model(AdaptiveMaxPool3dOptions(3));
- *  }
*/ -/** Options for {@code torch::nn::functional::adaptive_max_pool1d} and - * {@code torch::nn::functional::adaptive_max_pool1d_with_indices} - * - * Example: - *
{@code
- *  namespace F = torch::nn::functional;
- *  F::adaptive_max_pool1d(x, F::AdaptiveMaxPool1dFuncOptions(3));
- *  }
*/ - // namespace functional -/** Options for {@code torch::nn::functional::adaptive_max_pool2d} and - * {@code torch::nn::functional::adaptive_max_pool2d_with_indices} - * - * Example: - *
{@code
- *  namespace F = torch::nn::functional;
- *  F::adaptive_max_pool2d(x, F::AdaptiveMaxPool2dFuncOptions(3));
- *  }
*/ - // namespace functional -/** Options for {@code torch::nn::functional::adaptive_max_pool3d} and - * {@code torch::nn::functional::adaptive_max_pool3d_with_indices} - * - * Example: - *
{@code
- *  namespace F = torch::nn::functional;
- *  F::adaptive_max_pool3d(x, F::AdaptiveMaxPool3dFuncOptions(3));
- *  }
*/ -// Targeting ../AdaptiveAvgPool1dOptions.java -// Targeting ../AdaptiveAvgPool2dOptions.java -// Targeting ../AdaptiveAvgPool3dOptions.java -/** {@code AdaptiveAvgPoolOptions} specialized for the {@code AdaptiveAvgPool1d} module. - * - * Example: - *
{@code
- *  AdaptiveAvgPool1d model(AdaptiveAvgPool1dOptions(5));
- *  }
*/ -/// -/** {@code AdaptiveAvgPoolOptions} specialized for the {@code AdaptiveAvgPool2d} module. - * - * Example: - *
{@code
- *  AdaptiveAvgPool2d model(AdaptiveAvgPool2dOptions({3, 2}));
- *  }
*/ -/// -/** {@code AdaptiveAvgPoolOptions} specialized for the {@code AdaptiveAvgPool3d} module. - * - * Example: - *
{@code
- *  AdaptiveAvgPool3d model(AdaptiveAvgPool3dOptions(3));
- *  }
*/ -/** Options for {@code torch::nn::functional::adaptive_avg_pool1d}. - * - * See the documentation for {@code torch::nn::AdaptiveAvgPool1dOptions} class to - * learn what arguments are supported. - * - * Example: - *
{@code
- *  namespace F = torch::nn::functional;
- *  F::adaptive_avg_pool1d(x, F::AdaptiveAvgPool1dFuncOptions(3));
- *  }
*/ - // namespace functional -/** Options for {@code torch::nn::functional::adaptive_avg_pool2d}. - * - * See the documentation for {@code torch::nn::AdaptiveAvgPool2dOptions} class to - * learn what arguments are supported. - * - * Example: - *
{@code
- *  namespace F = torch::nn::functional;
- *  F::adaptive_avg_pool2d(x, F::AdaptiveAvgPool2dFuncOptions(3));
- *  }
*/ - // namespace functional -/** Options for {@code torch::nn::functional::adaptive_avg_pool3d}. - * - * See the documentation for {@code torch::nn::AdaptiveAvgPool3dOptions} class to - * learn what arguments are supported. - * - * Example: - *
{@code
- *  namespace F = torch::nn::functional;
- *  F::adaptive_avg_pool3d(x, F::AdaptiveAvgPool3dFuncOptions(3));
- *  }
*/ -// Targeting ../MaxUnpool1dOptions.java -// Targeting ../MaxUnpool2dOptions.java -// Targeting ../MaxUnpool3dOptions.java -/** {@code MaxUnpoolOptions} specialized for the {@code MaxUnpool1d} module. - * - * Example: - *
{@code
- *  MaxUnpool1d model(MaxUnpool1dOptions(3).stride(2).padding(1));
- *  }
*/ -/// -/** {@code MaxUnpoolOptions} specialized for the {@code MaxUnpool2d} module. - * - * Example: - *
{@code
- *  MaxUnpool2d model(MaxUnpool2dOptions(3).stride(2).padding(1));
- *  }
*/ -/// -/** {@code MaxUnpoolOptions} specialized for the {@code MaxUnpool3d} module. - * - * Example: - *
{@code
- *  MaxUnpool3d model(MaxUnpool3dOptions(3).stride(2).padding(1));
- *  }
*/ -// ============================================================================ -// Targeting ../MaxUnpool1dFuncOptions.java +// Private Methods -// Targeting ../MaxUnpool2dFuncOptions.java -// Targeting ../MaxUnpool3dFuncOptions.java -/** {@code MaxUnpoolFuncOptions} specialized for - * {@code torch::nn::functional::max_unpool1d}. - * - * Example: - *
{@code
- *  namespace F = torch::nn::functional;
- *  F::max_unpool1d(x, indices,
- *  F::MaxUnpool1dFuncOptions(3).stride(2).padding(1));
- *  }
*/ + // namespace nn + // namespace torch -/// -/** {@code MaxUnpoolFuncOptions} specialized for - * {@code torch::nn::functional::max_unpool2d}. - * - * Example: - *
{@code
- *  namespace F = torch::nn::functional;
- *  F::max_unpool2d(x, indices,
- *  F::MaxUnpool2dFuncOptions(3).stride(2).padding(1));
- *  }
*/ +// Parsed from torch/nn/modules/container/moduledict.h -/// +// #pragma once -/** {@code MaxUnpoolFuncOptions} specialized for - * {@code torch::nn::functional::max_unpool3d}. - * - * Example: - *
{@code
- *  namespace F = torch::nn::functional;
- *  F::max_unpool3d(x, indices, F::MaxUnpool3dFuncOptions(3));
- *  }
*/ +// #include +// #include +// #include +// #include +// Targeting ../ModuleDictImpl.java -// Targeting ../FractionalMaxPool1dOptions.java +/** A {@code ModuleHolder} subclass for {@code ModuleDictImpl}. + * See the documentation for {@code ModuleDictImpl} class to learn what methods it + * provides, or the documentation for {@code ModuleHolder} to learn about PyTorch's + * module storage semantics. */ -// Targeting ../FractionalMaxPool2dOptions.java + // namespace nn + // namespace torch -// Targeting ../FractionalMaxPool3dOptions.java +// Parsed from torch/nn/modules/container/modulelist.h +// #pragma once +// #include +// #include +// #include -/** {@code FractionalMaxPoolOptions} specialized for the {@code FractionalMaxPool2d} module. - * - * Example: - *
{@code
- *  FractionalMaxPool2d model(FractionalMaxPool2dOptions(5).output_size(1));
- *  }
*/ +// #include +// #include +// Targeting ../ModuleListImpl.java -/// -/** {@code FractionalMaxPoolOptions} specialized for the {@code FractionalMaxPool3d} module. - * - * Example: - *
{@code
- *  FractionalMaxPool3d model(FractionalMaxPool3dOptions(5).output_size(1));
- *  }
*/ -/** Options for {@code torch::nn::functional::fractional_max_pool2d} and - * {@code torch::nn::functional::fractional_max_pool2d_with_indices} - * - * Example: - *
{@code
- *  namespace F = torch::nn::functional;
- *  F::fractional_max_pool2d(x,
- *  F::FractionalMaxPool2dFuncOptions(3).output_size(2));
- *  }
*/ - // namespace functional -/** Options for {@code torch::nn::functional::fractional_max_pool3d} and - * {@code torch::nn::functional::fractional_max_pool3d_with_indices} - * - * Example: - *
{@code
- *  namespace F = torch::nn::functional;
- *  F::fractional_max_pool3d(x,
- *  F::FractionalMaxPool3dFuncOptions(3).output_size(2));
- *  }
*/ -// Targeting ../LPPool1dOptions.java +/** A {@code ModuleHolder} subclass for {@code ModuleListImpl}. + * See the documentation for {@code ModuleListImpl} class to learn what methods it + * provides, or the documentation for {@code ModuleHolder} to learn about PyTorch's + * module storage semantics. */ + // namespace nn + // namespace torch -// Targeting ../LPPool2dOptions.java +// Parsed from torch/nn/modules/container/named_any.h -// Targeting ../LPPool3dOptions.java +// #pragma once +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include -/** {@code LPPoolOptions} specialized for the {@code LPPool1d} module. - * - * Example: - *
{@code
- *  LPPool1d model(LPPool1dOptions(1, 2).stride(5).ceil_mode(true));
- *  }
*/ +// #include -/// +// #include +// #include +// #include +// #include +// #include +// #include -/** {@code LPPoolOptions} specialized for the {@code LPPool2d} module. - * - * Example: - *
{@code
- *  LPPool2d model(LPPool2dOptions(1, std::vector({3, 4})).stride({5,
- *  6}).ceil_mode(true));
- *  }
*/ -/** Options for {@code torch::nn::functional::lp_pool1d}. - * - * See the documentation for {@code torch::nn::LPPool1dOptions} class to learn what - * arguments are supported. +/** Stores a type erased {@code Module} with name. * - * Example: - *
{@code
- *  namespace F = torch::nn::functional;
- *  F::lp_pool1d(x, F::LPPool1dFuncOptions(2, 3).stride(2));
- *  }
*/ - // namespace functional -/** Options for {@code torch::nn::functional::lp_pool2d}. + * The {@code NamedAnyModule} class enables the following API for constructing + * {@code nn::Sequential} with named submodules: + * \rst + * .. code-block:: cpp * - * See the documentation for {@code torch::nn::LPPool2dOptions} class to learn what - * arguments are supported. + * struct M : torch::nn::Module { + * explicit M(int value_) : value(value_) {} + * int value; + * int forward() { + * return value; + * } + * }; * - * Example: - *
{@code
- *  namespace F = torch::nn::functional;
- *  F::lp_pool2d(x, F::LPPool2dFuncOptions(2, {2, 3}).stride(2));
- *  }
*/ - // namespace functional + * Sequential sequential({ + * {"m1", std::make_shared(1)}, // shared pointer to {@code Module} is + * supported {std::string("m2"), M(2)}, // {@code Module} is supported + * {"linear1", Linear(10, 3)} // {@code ModuleHolder} is supported + * }); + * \endrst */ // namespace nn // namespace torch -// Parsed from torch/nn/options/rnn.h +// Parsed from torch/nn/modules/container/parameterdict.h // #pragma once -// #include -// #include -// #include -// #include -// Targeting ../RNNOptionsBase.java - - +// #include +// #include +// #include +// #include +// #include +// Targeting ../ParameterDictImpl.java -// Targeting ../RNNOptions.java + // namespace nn + // namespace torch -// Targeting ../LSTMOptions.java +// Parsed from torch/nn/modules/container/parameterlist.h -// Targeting ../GRUOptions.java +// #pragma once +// #include +// #include -// Targeting ../RNNCellOptionsBase.java +// #include +// Targeting ../ParameterListImpl.java + // namespace nn + // namespace torch -// Targeting ../RNNCellOptions.java +// Parsed from torch/nn/modules/container/sequential.h +// #pragma once -// Targeting ../LSTMCellOptions.java +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include -// Targeting ../GRUCellOptions.java +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// Targeting ../SequentialImpl.java +/** A {@code ModuleHolder} subclass for {@code SequentialImpl}. + * See the documentation for {@code SequentialImpl} class to learn what methods it + * provides, or the documentation for {@code ModuleHolder} to learn about PyTorch's + * module storage semantics. */ // namespace nn // namespace torch -// Parsed from torch/nn/options/upsampling.h +// Parsed from torch/nn/modules/linear.h // #pragma once -// #include -// #include -// #include -// #include -// #include +// #include +// #include +// #include +// #include +// #include // #include +// #include // #include -// Targeting ../UpsampleOptions.java +// Targeting ../IdentityImpl.java -// Targeting ../InterpolateFuncOptions.java +/** A {@code ModuleHolder} subclass for {@code IdentityImpl}. + * See the documentation for {@code IdentityImpl} class to learn what methods it + * provides, or the documentation for {@code ModuleHolder} to learn about PyTorch's + * module storage semantics. */ +// Targeting ../LinearImpl.java - // namespace functional - // namespace nn - // namespace torch +/** A {@code ModuleHolder} subclass for {@code LinearImpl}. + * See the documentation for {@code LinearImpl} class to learn what methods it + * provides, and examples of how to use {@code Linear} with + * {@code torch::nn::LinearOptions}. See the documentation for {@code ModuleHolder} to + * learn about PyTorch's module storage semantics. */ +// Targeting ../FlattenImpl.java -// Parsed from torch/nn/options/vision.h -// #pragma once +/** A {@code ModuleHolder} subclass for {@code FlattenImpl}. + * See the documentation for {@code FlattenImpl} class to learn what methods it + * provides, and examples of how to use {@code Flatten} with + * {@code torch::nn::FlattenOptions}. See the documentation for {@code ModuleHolder} to + * learn about PyTorch's module storage semantics. */ +// Targeting ../UnflattenImpl.java + -// #include -// #include -// #include -// #include -// Targeting ../GridSampleFuncOptions.java +/** A {@code ModuleHolder} subclass for {@code UnflattenImpl}. + * See the documentation for {@code UnflattenImpl} class to learn what methods it + * provides, and examples of how to use {@code Unflatten} with + * {@code torch::nn::UnflattenOptions}. See the documentation for {@code ModuleHolder} to + * learn about PyTorch's module storage semantics. */ +// Targeting ../BilinearImpl.java - // namespace functional + +/** A {@code ModuleHolder} subclass for {@code BilinearImpl}. + * See the documentation for {@code BilinearImpl} class to learn what methods it + * provides, and examples of how to use {@code Bilinear} with + * {@code torch::nn::BilinearOptions}. See the documentation for {@code ModuleHolder} to + * learn about PyTorch's module storage semantics. */ + // namespace nn // namespace torch -// Parsed from torch/nn/options/instancenorm.h +// Parsed from torch/nn/modules/activation.h // #pragma once -// #include +// #include +// #include +// #include +// #include +// #include + // #include -// #include -// #include -// Targeting ../InstanceNormOptions.java +// Targeting ../ELUImpl.java -/** Options for the {@code InstanceNorm1d} module. - * - * Example: - *
{@code
- *  InstanceNorm1d
- *  model(InstanceNorm1dOptions(4).eps(0.5).momentum(0.1).affine(false).track_running_stats(true));
- *  }
*/ +/** A {@code ModuleHolder} subclass for {@code ELUImpl}. + * See the documentation for {@code ELUImpl} class to learn what methods it + * provides, and examples of how to use {@code ELU} with {@code torch::nn::ELUOptions}. + * See the documentation for {@code ModuleHolder} to learn about PyTorch's + * module storage semantics. */ +// Targeting ../SELUImpl.java -/// -/** Options for the {@code InstanceNorm2d} module. - * - * Example: - *
{@code
- *  InstanceNorm2d
- *  model(InstanceNorm2dOptions(4).eps(0.5).momentum(0.1).affine(false).track_running_stats(true));
- *  }
*/ -/// +/** A {@code ModuleHolder} subclass for {@code SELUImpl}. + * See the documentation for {@code SELUImpl} class to learn what methods it + * provides, and examples of how to use {@code SELU} with {@code torch::nn::SELUOptions}. + * See the documentation for {@code ModuleHolder} to learn about PyTorch's + * module storage semantics. */ +// Targeting ../HardshrinkImpl.java -/** Options for the {@code InstanceNorm3d} module. - * - * Example: - *
{@code
- *  InstanceNorm3d
- *  model(InstanceNorm3dOptions(4).eps(0.5).momentum(0.1).affine(false).track_running_stats(true));
- *  }
*/ -// Targeting ../InstanceNormFuncOptions.java +/** A {@code ModuleHolder} subclass for {@code HardshrinkImpl}. + * See the documentation for {@code HardshrinkImpl} class to learn what methods it + * provides, and examples of how to use {@code Hardshrink} with + * {@code torch::nn::HardshrinkOptions}. See the documentation for {@code ModuleHolder} to + * learn about PyTorch's module storage semantics. */ +// Targeting ../HardtanhImpl.java - // namespace functional - // namespace nn - // namespace torch +/** A {@code ModuleHolder} subclass for {@code HardtanhImpl}. + * See the documentation for {@code HardtanhImpl} class to learn what methods it + * provides, and examples of how to use {@code Hardtanh} with + * {@code torch::nn::HardtanhOptions}. See the documentation for {@code ModuleHolder} to + * learn about PyTorch's module storage semantics. */ +// Targeting ../LeakyReLUImpl.java -// Parsed from torch/nn/options/transformerlayer.h -// #pragma once -// #include -// #include -// #include -// #include +/** A {@code ModuleHolder} subclass for {@code LeakyReLUImpl}. + * See the documentation for {@code LeakyReLUImpl} class to learn what methods it + * provides, and examples of how to use {@code LeakyReLU} with + * {@code torch::nn::LeakyReLUOptions}. See the documentation for {@code ModuleHolder} to + * learn about PyTorch's module storage semantics. */ +// Targeting ../LogSigmoidImpl.java -/// -// Targeting ../TransformerEncoderLayerOptions.java +/** A {@code ModuleHolder} subclass for {@code LogSigmoidImpl}. + * See the documentation for {@code LogSigmoidImpl} class to learn what methods it + * provides, or the documentation for {@code ModuleHolder} to learn about PyTorch's + * module storage semantics. */ +// Targeting ../SoftmaxImpl.java -// Targeting ../TransformerDecoderLayerOptions.java +/** A {@code ModuleHolder} subclass for {@code SoftmaxImpl}. + * See the documentation for {@code SoftmaxImpl} class to learn what methods it + * provides, and examples of how to use {@code Softmax} with + * {@code torch::nn::SoftmaxOptions}. See the documentation for {@code ModuleHolder} to + * learn about PyTorch's module storage semantics. */ +// Targeting ../SoftminImpl.java - // namespace nn - // namespace torch -// Parsed from torch/nn/options/transformercoder.h +/** A {@code ModuleHolder} subclass for {@code SoftminImpl}. + * See the documentation for {@code SoftminImpl} class to learn what methods it + * provides, and examples of how to use {@code Softmin} with + * {@code torch::nn::SoftminOptions}. See the documentation for {@code ModuleHolder} to + * learn about PyTorch's module storage semantics. */ +// Targeting ../LogSoftmaxImpl.java -// #pragma once -// #include -// #include -// #include -// #include -// #include -// #include -// Targeting ../TransformerEncoderOptions.java +/** A {@code ModuleHolder} subclass for {@code LogSoftmaxImpl}. + * See the documentation for {@code LogSoftmaxImpl} class to learn what methods it + * provides, and examples of how to use {@code LogSoftmax} with + * {@code torch::nn::LogSoftmaxOptions}. See the documentation for {@code ModuleHolder} to + * learn about PyTorch's module storage semantics. */ +// Targeting ../Softmax2dImpl.java -// Targeting ../TransformerDecoderOptions.java +/** A {@code ModuleHolder} subclass for {@code Softmax2dImpl}. + * See the documentation for {@code Softmax2dImpl} class to learn what methods it + * provides, or the documentation for {@code ModuleHolder} to learn about PyTorch's + * module storage semantics. */ +// Targeting ../PReLUImpl.java - // namespace nn - // namespace torch +/** A {@code ModuleHolder} subclass for {@code PReLUImpl}. + * See the documentation for {@code PReLUImpl} class to learn what methods it + * provides, and examples of how to use {@code PReLU} with {@code torch::nn::PReLUOptions}. + * See the documentation for {@code ModuleHolder} to learn about PyTorch's + * module storage semantics. */ +// Targeting ../ReLUImpl.java -// Parsed from torch/nn/options/transformer.h -// #pragma once -// #include -// #include -// #include -// #include +/** A {@code ModuleHolder} subclass for {@code ReLUImpl}. + * See the documentation for {@code ReLUImpl} class to learn what methods it + * provides, and examples of how to use {@code ReLU} with {@code torch::nn::ReLUOptions}. + * See the documentation for {@code ModuleHolder} to learn about PyTorch's + * module storage semantics. */ +// Targeting ../ReLU6Impl.java -// #include -// #include -// Targeting ../TransformerOptions.java +/** A {@code ModuleHolder} subclass for {@code ReLU6Impl}. + * See the documentation for {@code ReLU6Impl} class to learn what methods it + * provides, and examples of how to use {@code ReLU6} with {@code torch::nn::ReLU6Options}. + * See the documentation for {@code ModuleHolder} to learn about PyTorch's + * module storage semantics. */ +// Targeting ../RReLUImpl.java - // namespace nn - // namespace torch -// Parsed from torch/nn/functional.h +/** A {@code ModuleHolder} subclass for {@code RReLUImpl}. + * See the documentation for {@code RReLUImpl} class to learn what methods it + * provides, and examples of how to use {@code RReLU} with {@code torch::nn::RReLUOptions}. + * See the documentation for {@code ModuleHolder} to learn about PyTorch's + * module storage semantics. */ +// Targeting ../CELUImpl.java -// #pragma once -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include +/** A {@code ModuleHolder} subclass for {@code CELUImpl}. + * See the documentation for {@code CELUImpl} class to learn what methods it + * provides, and examples of how to use {@code CELU} with {@code torch::nn::CELUOptions}. + * See the documentation for {@code ModuleHolder} to learn about PyTorch's + * module storage semantics. */ +// Targeting ../GLUImpl.java -// Parsed from torch/nn/functional/activation.h -// #pragma once -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include +/** A {@code ModuleHolder} subclass for {@code GLUImpl}. + * See the documentation for {@code GLUImpl} class to learn what methods it + * provides, and examples of how to use {@code GLU} with {@code torch::nn::GLUOptions}. + * See the documentation for {@code ModuleHolder} to learn about PyTorch's + * module storage semantics. */ +// Targeting ../GELUImpl.java -// #ifndef DOXYGEN_SHOULD_SKIP_THIS -@Namespace("torch::nn::functional::detail") public static native @ByVal Tensor elu(@ByVal Tensor input, double alpha, @Cast("bool") boolean inplace); - // namespace detail -// #endif /* DOXYGEN_SHOULD_SKIP_THIS */ -/** See -/** https://pytorch.org/docs/master/nn.functional.html#torch.nn.functional.elu -/** about the exact behavior of this functional. -/** -/** See the documentation for {@code torch::nn::functional::ELUFuncOptions} class to -/** learn what optional arguments are supported for this functional. -/** -/** Example: -/**
{@code
-/** namespace F = torch::nn::functional;
-/** F::elu(x, F::ELUFuncOptions().alpha(0.42).inplace(true));
-/** }
*/ -@Namespace("torch::nn::functional") public static native @ByVal Tensor elu(@ByVal Tensor input, @Cast("const torch::nn::functional::ELUFuncOptions*") @ByRef(nullValue = "torch::nn::functional::ELUFuncOptions{}") ELUOptions options); -// ============================================================================ +/** A {@code ModuleHolder} subclass for {@code GELUImpl}. + * See the documentation for {@code GELUImpl} class to learn what methods it + * provides, or the documentation for {@code ModuleHolder} to learn about PyTorch's + * module storage semantics. */ +// Targeting ../SiLUImpl.java -// #ifndef DOXYGEN_SHOULD_SKIP_THIS -@Namespace("torch::nn::functional::detail") public static native @ByVal Tensor selu(@ByVal Tensor input, @Cast("bool") boolean inplace); - // namespace detail -// #endif /* DOXYGEN_SHOULD_SKIP_THIS */ -/** See -/** https://pytorch.org/docs/master/nn.functional.html#torch.nn.functional.selu -/** about the exact behavior of this functional. -/** -/** See the documentation for {@code torch::nn::functional::SELUFuncOptions} class to -/** learn what optional arguments are supported for this functional. -/** -/** Example: -/**
{@code
-/** namespace F = torch::nn::functional;
-/** F::selu(input, F::SELUFuncOptions(false));
-/** }
*/ -@Namespace("torch::nn::functional") public static native @ByVal Tensor selu(@ByVal Tensor input, @Cast("const torch::nn::functional::SELUFuncOptions*") @ByRef(nullValue = "torch::nn::functional::SELUFuncOptions{}") SELUOptions options); -// ============================================================================ +/** A {@code ModuleHolder} subclass for {@code SiLUImpl}. + * See the documentation for {@code SiLUImpl} class to learn what methods it + * provides, or the documentation for {@code ModuleHolder} to learn about PyTorch's + * module storage semantics. */ +// Targeting ../MishImpl.java -// #ifndef DOXYGEN_SHOULD_SKIP_THIS -@Namespace("torch::nn::functional::detail") public static native @ByVal Tensor hardshrink(@Const @ByRef Tensor input, double lambda); - // namespace detail -// #endif /* DOXYGEN_SHOULD_SKIP_THIS */ -/** See -/** https://pytorch.org/docs/master/nn.functional.html#torch.nn.functional.hardshrink -/** about the exact behavior of this functional. -/** -/** See the documentation for {@code torch::nn::functional::HardshrinkFuncOptions} -/** class to learn what optional arguments are supported for this functional. -/** -/** Example: -/**
{@code
-/** namespace F = torch::nn::functional;
-/** F::hardshrink(x, F::HardshrinkFuncOptions().lambda(0.42));
-/** }
*/ -@Namespace("torch::nn::functional") public static native @ByVal Tensor hardshrink( - @Const @ByRef Tensor input, - @Cast("const torch::nn::functional::HardshrinkFuncOptions*") @ByRef(nullValue = "torch::nn::functional::HardshrinkFuncOptions{}") HardshrinkOptions options); -// ============================================================================ +/** A {@code ModuleHolder} subclass for {@code MishImpl}. + * See the documentation for {@code MishImpl} class to learn what methods it + * provides, or the documentation for {@code ModuleHolder} to learn about PyTorch's + * module storage semantics. */ +// Targeting ../SigmoidImpl.java -// #ifndef DOXYGEN_SHOULD_SKIP_THIS -@Namespace("torch::nn::functional::detail") public static native @ByVal Tensor hardtanh( - @ByVal Tensor input, - double min_val, - double max_val, - @Cast("bool") boolean inplace); - // namespace detail -// #endif /* DOXYGEN_SHOULD_SKIP_THIS */ -/** See -/** https://pytorch.org/docs/master/nn.functional.html#torch.nn.functional.hardtanh -/** about the exact behavior of this functional. -/** -/** See the documentation for {@code torch::nn::functional::HardtanhFuncOptions} class -/** to learn what optional arguments are supported for this functional. -/** -/** Example: -/**
{@code
-/** namespace F = torch::nn::functional;
-/** F::hardtanh(x,
-/** F::HardtanhFuncOptions().min_val(-1.0).max_val(1.0).inplace(true));
-/** }
*/ -@Namespace("torch::nn::functional") public static native @ByVal Tensor hardtanh(@ByVal Tensor input, @Cast("const torch::nn::functional::HardtanhFuncOptions*") @ByRef(nullValue = "torch::nn::functional::HardtanhFuncOptions{}") HardtanhOptions options); -// ============================================================================ +/** A {@code ModuleHolder} subclass for {@code SigmoidImpl}. + * See the documentation for {@code SigmoidImpl} class to learn what methods it + * provides, or the documentation for {@code ModuleHolder} to learn about PyTorch's + * module storage semantics. */ +// Targeting ../SoftplusImpl.java -// #ifndef DOXYGEN_SHOULD_SKIP_THIS -@Namespace("torch::nn::functional::detail") public static native @ByVal Tensor leaky_relu(@ByVal Tensor input, double negative_slope, @Cast("bool") boolean inplace); - // namespace detail -// #endif /* DOXYGEN_SHOULD_SKIP_THIS */ -/** See -/** https://pytorch.org/docs/master/nn.functional.html#torch.nn.functional.leaky_relu -/** about the exact behavior of this functional. -/** -/** See the documentation for {@code torch::nn::functional::LeakyReLUFuncOptions} -/** class to learn what optional arguments are supported for this functional. -/** -/** Example: -/**
{@code
-/** namespace F = torch::nn::functional;
-/** F::leaky_relu(x,
-/** F::LeakyReLUFuncOptions().negative_slope(0.42).inplace(true));
-/** }
*/ -@Namespace("torch::nn::functional") public static native @ByVal Tensor leaky_relu( - @ByVal Tensor input, - @Cast("const torch::nn::functional::LeakyReLUFuncOptions*") @ByRef(nullValue = "torch::nn::functional::LeakyReLUFuncOptions{}") LeakyReLUOptions options); -// ============================================================================ +/** A {@code ModuleHolder} subclass for {@code SoftplusImpl}. + * See the documentation for {@code SoftplusImpl} class to learn what methods it + * provides, and examples of how to use {@code Softplus} with + * {@code torch::nn::SoftplusOptions}. See the documentation for {@code ModuleHolder} to + * learn about PyTorch's module storage semantics. */ +// Targeting ../SoftshrinkImpl.java -@Namespace("torch::nn::functional") public static native @ByVal Tensor logsigmoid(@Const @ByRef Tensor input); -// ============================================================================ -// #ifndef DOXYGEN_SHOULD_SKIP_THIS -@Namespace("torch::nn::functional::detail") public static native @ByVal Tensor gumbel_softmax( - @Const @ByRef Tensor logits, - double tau, - @Cast("bool") boolean hard, - int dim); - // namespace detail -// #endif /* DOXYGEN_SHOULD_SKIP_THIS */ +/** A {@code ModuleHolder} subclass for {@code SoftshrinkImpl}. + * See the documentation for {@code SoftshrinkImpl} class to learn what methods it + * provides, and examples of how to use {@code Softshrink} with + * {@code torch::nn::SoftshrinkOptions}. See the documentation for {@code ModuleHolder} to + * learn about PyTorch's module storage semantics. */ +// Targeting ../SoftsignImpl.java -/** See -/** https://pytorch.org/docs/master/nn.functional.html#torch.nn.functional.gumbel_softmax -/** about the exact behavior of this functional. -/** -/** See the documentation for {@code torch::nn::functional::GumbelSoftmaxFuncOptions} -/** class to learn what optional arguments are supported for this functional. -/** -/** Example: -/**
{@code
-/** namespace F = torch::nn::functional;
-/** F::gumbel_softmax(logits, F::GumbelSoftmaxFuncOptions().hard(true).dim(-1));
-/** }
*/ -@Namespace("torch::nn::functional") public static native @ByVal Tensor gumbel_softmax( - @Const @ByRef Tensor logits, - @Const @ByRef(nullValue = "torch::nn::functional::GumbelSoftmaxFuncOptions{}") GumbelSoftmaxFuncOptions options); -@Namespace("torch::nn::functional") public static native @ByVal Tensor gumbel_softmax( - @Const @ByRef Tensor logits); -// ============================================================================ -// #ifndef DOXYGEN_SHOULD_SKIP_THIS - // namespace detail -// #endif /* DOXYGEN_SHOULD_SKIP_THIS */ +/** A {@code ModuleHolder} subclass for {@code SoftsignImpl}. + * See the documentation for {@code SoftsignImpl} class to learn what methods it + * provides, or the documentation for {@code ModuleHolder} to learn about PyTorch's + * module storage semantics. */ +// Targeting ../TanhImpl.java -/** See -/** https://pytorch.org/docs/master/nn.functional.html#torch.nn.functional.softmax -/** about the exact behavior of this functional. -/** -/** See the documentation for {@code torch::nn::functional::SoftmaxFuncOptions} class -/** to learn what optional arguments are supported for this functional. -/** -/** Example: -/**
{@code
-/** namespace F = torch::nn::functional;
-/** F::softmax(input, F::SoftmaxFuncOptions(1));
-/** }
*/ -@Namespace("torch::nn::functional") public static native @ByVal Tensor softmax(@Const @ByRef Tensor input, @Const @ByRef SoftmaxFuncOptions options); -// ============================================================================ -// #ifndef DOXYGEN_SHOULD_SKIP_THIS -@Namespace("torch::nn::functional::detail") public static native @ByVal Tensor softmin( - @Const @ByRef Tensor input, - @Cast("int64_t") long dim, - @ByVal ScalarTypeOptional dtype); - // namespace detail -// #endif /* DOXYGEN_SHOULD_SKIP_THIS */ +/** A {@code ModuleHolder} subclass for {@code TanhImpl}. + * See the documentation for {@code TanhImpl} class to learn what methods it + * provides, or the documentation for {@code ModuleHolder} to learn about PyTorch's + * module storage semantics. */ +// Targeting ../TanhshrinkImpl.java -/** See -/** https://pytorch.org/docs/master/nn.functional.html#torch.nn.functional.softmin -/** about the exact behavior of this functional. -/** -/** See the documentation for {@code torch::nn::functional::SoftminFuncOptions} class -/** to learn what optional arguments are supported for this functional. -/** -/** Example: -/**
{@code
-/** namespace F = torch::nn::functional;
-/** F::softmin(input, F::SoftminFuncOptions(1));
-/** }
*/ -@Namespace("torch::nn::functional") public static native @ByVal Tensor softmin(@Const @ByRef Tensor input, @Const @ByRef SoftminFuncOptions options); -// ============================================================================ -// #ifndef DOXYGEN_SHOULD_SKIP_THIS - // namespace detail -// #endif /* DOXYGEN_SHOULD_SKIP_THIS */ +/** A {@code ModuleHolder} subclass for {@code TanhshrinkImpl}. + * See the documentation for {@code TanhshrinkImpl} class to learn what methods it + * provides, or the documentation for {@code ModuleHolder} to learn about PyTorch's + * module storage semantics. */ +// Targeting ../ThresholdImpl.java -/** See -/** https://pytorch.org/docs/master/nn.functional.html#torch.nn.functional.log_softmax -/** about the exact behavior of this functional. -/** -/** See the documentation for {@code torch::nn::functional::LogSoftmaxFuncOptions} -/** class to learn what optional arguments are supported for this functional. -/** -/** Example: -/**
{@code
-/** namespace F = torch::nn::functional;
-/** F::log_softmax(input, LogSoftmaxFuncOptions(1));
-/** }
*/ -@Namespace("torch::nn::functional") public static native @ByVal Tensor log_softmax( - @Const @ByRef Tensor input, - @Const @ByRef LogSoftmaxFuncOptions options); -// ============================================================================ -// #ifndef DOXYGEN_SHOULD_SKIP_THIS - // namespace detail -// #endif /* DOXYGEN_SHOULD_SKIP_THIS */ +/** A {@code ModuleHolder} subclass for {@code ThresholdImpl}. + * See the documentation for {@code ThresholdImpl} class to learn what methods it + * provides, and examples of how to use {@code Threshold} with + * {@code torch::nn::ThresholdOptions}. See the documentation for {@code ModuleHolder} to + * learn about PyTorch's module storage semantics. */ +// Targeting ../MultiheadAttentionImpl.java -/** See -/** https://pytorch.org/docs/master/nn.functional.html#torch.nn.functional.glu -/** about the exact behavior of this functional. -/** -/** See the documentation for {@code torch::nn::functional::GLUFuncOptions} class to -/** learn what optional arguments are supported for this functional. -/** -/** Example: -/**
{@code
-/** namespace F = torch::nn::functional;
-/** F::glu(input, GLUFuncOptions(1));
-/** }
*/ -@Namespace("torch::nn::functional") public static native @ByVal Tensor glu(@Const @ByRef Tensor input, @Cast("const torch::nn::functional::GLUFuncOptions*") @ByRef(nullValue = "torch::nn::functional::GLUFuncOptions{}") GLUOptions options); -// ============================================================================ -// #ifndef DOXYGEN_SHOULD_SKIP_THIS -@Namespace("torch::nn::functional::detail") public static native @ByVal Tensor gelu(@Const @ByRef Tensor input, @StdString BytePointer approximate); -@Namespace("torch::nn::functional::detail") public static native @ByVal Tensor gelu(@Const @ByRef Tensor input, @StdString String approximate); - // namespace detail -// #endif /* DOXYGEN_SHOULD_SKIP_THIS */ +/** A {@code ModuleHolder} subclass for {@code MultiheadAttentionImpl}. + * See the documentation for {@code MultiheadAttentionImpl} class to learn what + * methods it provides, and examples of how to use {@code MultiheadAttention} with + * {@code torch::nn::MultiheadAttentionOptions}. See the documentation for + * {@code ModuleHolder} to learn about PyTorch's module storage semantics. */ -@Namespace("torch::nn::functional") public static native @ByVal Tensor gelu(@Const @ByRef Tensor input, @Cast("const torch::nn::functional::GELUFuncOptions*") @ByRef(nullValue = "torch::nn::functional::GELUFuncOptions{}") GELUOptions options); + // namespace nn + // namespace torch -// ============================================================================ -// ============================================================================ +// Parsed from torch/nn/options/adaptive.h -// ============================================================================ +// #pragma once -// ============================================================================ +// #include +// #include +// #include +// Targeting ../AdaptiveLogSoftmaxWithLossOptions.java -// #ifndef DOXYGEN_SHOULD_SKIP_THIS -@Namespace("torch::nn::functional::detail") public static native @ByVal Tensor relu(@ByVal Tensor input, @Cast("bool") boolean inplace); - // namespace detail -// #endif /* DOXYGEN_SHOULD_SKIP_THIS */ -/** See -/** https://pytorch.org/docs/master/nn.functional.html#torch.nn.functional.relu -/** about the exact behavior of this functional. -/** -/** See the documentation for {@code torch::nn::functional::ReLUFuncOptions} class to -/** learn what optional arguments are supported for this functional. -/** -/** Example: -/**
{@code
-/** namespace F = torch::nn::functional;
-/** F::relu(x, F::ReLUFuncOptions().inplace(true));
-/** }
*/ -@Namespace("torch::nn::functional") public static native @ByVal Tensor relu(@ByVal Tensor input, @Cast("const torch::nn::functional::ReLUFuncOptions*") @ByRef(nullValue = "torch::nn::functional::ReLUFuncOptions{}") ReLUOptions options); -// ============================================================================ + // namespace nn + // namespace torch -// #ifndef DOXYGEN_SHOULD_SKIP_THIS -@Namespace("torch::nn::functional::detail") public static native @ByVal Tensor relu6(@ByVal Tensor input, @Cast("bool") boolean inplace); - // namespace detail -// #endif /* DOXYGEN_SHOULD_SKIP_THIS */ -/** See -/** https://pytorch.org/docs/master/nn.functional.html#torch.nn.functional.relu6 -/** about the exact behavior of this functional. -/** -/** See the documentation for {@code torch::nn::functional::ReLU6FuncOptions} class to -/** learn what optional arguments are supported for this functional. -/** -/** Example: -/**
{@code
-/** namespace F = torch::nn::functional;
-/** F::relu6(x, F::ReLU6FuncOptions().inplace(true));
-/** }
*/ -@Namespace("torch::nn::functional") public static native @ByVal Tensor relu6(@ByVal Tensor input, @Cast("const torch::nn::functional::ReLU6FuncOptions*") @ByRef(nullValue = "torch::nn::functional::ReLU6FuncOptions{}") ReLU6Options options); +// Parsed from torch/nn/modules/adaptive.h -// ============================================================================ +// #pragma once -// #ifndef DOXYGEN_SHOULD_SKIP_THIS -@Namespace("torch::nn::functional::detail") public static native @ByVal Tensor rrelu( - @ByVal Tensor input, - double lower, - double upper, - @Cast("bool") boolean training, - @Cast("bool") boolean inplace); - // namespace detail -// #endif /* DOXYGEN_SHOULD_SKIP_THIS */ +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// Targeting ../ASMoutput.java -/** See -/** https://pytorch.org/docs/master/nn.functional.html#torch.nn.functional.rrelu -/** about the exact behavior of this functional. -/** -/** See the documentation for {@code torch::nn::functional::RReLUFuncOptions} class to -/** learn what optional arguments are supported for this functional. -/** -/** Example: -/**
{@code
-/** namespace F = torch::nn::functional;
-/** F::rrelu(x, F::RReLUFuncOptions().lower(0.1).upper(0.4).inplace(true));
-/** }
*/ -@Namespace("torch::nn::functional") public static native @ByVal Tensor rrelu(@ByVal Tensor input, @Const @ByRef(nullValue = "torch::nn::functional::RReLUFuncOptions{}") RReLUFuncOptions options); -// ============================================================================ +// Targeting ../AdaptiveLogSoftmaxWithLossImpl.java -// #ifndef DOXYGEN_SHOULD_SKIP_THIS -@Namespace("torch::nn::functional::detail") public static native @ByVal Tensor celu(@ByVal Tensor input, double alpha, @Cast("bool") boolean inplace); - // namespace detail -// #endif /* DOXYGEN_SHOULD_SKIP_THIS */ -/** See -/** https://pytorch.org/docs/master/nn.functional.html#torch.nn.functional.celu -/** about the exact behavior of this functional. -/** -/** See the documentation for {@code torch::nn::functional::CELUFuncOptions} class to -/** learn what optional arguments are supported for this functional. -/** -/** Example: -/**
{@code
-/** namespace F = torch::nn::functional;
-/** F::celu(x, F::CELUFuncOptions().alpha(0.42).inplace(true));
-/** }
*/ -@Namespace("torch::nn::functional") public static native @ByVal Tensor celu(@ByVal Tensor input, @Cast("const torch::nn::functional::CELUFuncOptions*") @ByRef(nullValue = "torch::nn::functional::CELUFuncOptions{}") CELUOptions options); -// ============================================================================ +/** A {@code ModuleHolder} subclass for {@code AdaptiveLogSoftmaxWithLossImpl}. + * See the documentation for {@code AdaptiveLogSoftmaxWithLossImpl} class to learn + * what methods it provides, and examples of how to use + * {@code AdaptiveLogSoftmaxWithLoss} with + * {@code torch::nn::AdaptiveLogSoftmaxWithLossOptions}. See the documentation for + * {@code ModuleHolder} to learn about PyTorch's module storage semantics. */ -// #ifndef DOXYGEN_SHOULD_SKIP_THIS -@Namespace("torch::nn::functional::detail") public static native @ByVal Tensor softplus(@Const @ByRef Tensor input, double beta, double threshold); - // namespace detail -// #endif /* DOXYGEN_SHOULD_SKIP_THIS */ + // namespace nn + // namespace torch -/** See -/** https://pytorch.org/docs/master/nn.functional.html#torch.nn.functional.softplus -/** about the exact behavior of this functional. -/** -/** See the documentation for {@code torch::nn::functional::SoftplusFuncOptions} class -/** to learn what optional arguments are supported for this functional. -/** -/** Example: -/**
{@code
-/** namespace F = torch::nn::functional;
-/** F::softplus(x, F::SoftplusFuncOptions().beta(0.5).threshold(3.0));
-/** }
*/ -@Namespace("torch::nn::functional") public static native @ByVal Tensor softplus( - @Const @ByRef Tensor input, - @Cast("const torch::nn::functional::SoftplusFuncOptions*") @ByRef(nullValue = "torch::nn::functional::SoftplusFuncOptions{}") SoftplusOptions options); -// ============================================================================ +// Parsed from torch/nn/modules/batchnorm.h -// #ifndef DOXYGEN_SHOULD_SKIP_THIS -@Namespace("torch::nn::functional::detail") public static native @ByVal Tensor softshrink(@Const @ByRef Tensor input, double lambda); - // namespace detail -// #endif /* DOXYGEN_SHOULD_SKIP_THIS */ +// #pragma once -/** See -/** https://pytorch.org/docs/master/nn.functional.html#torch.nn.functional.softshrink -/** about the exact behavior of this functional. -/** -/** See the documentation for {@code torch::nn::functional::SoftshrinkFuncOptions} -/** class to learn what optional arguments are supported for this functional. -/** -/** Example: -/**
{@code
-/** namespace F = torch::nn::functional;
-/** F::softshrink(x, F::SoftshrinkFuncOptions(0.42));
-/** }
*/ -@Namespace("torch::nn::functional") public static native @ByVal Tensor softshrink( - @Const @ByRef Tensor input, - @Cast("const torch::nn::functional::SoftshrinkFuncOptions*") @ByRef(nullValue = "torch::nn::functional::SoftshrinkFuncOptions{}") SoftshrinkOptions options); +// #include +// #include +// #include +// #include +// #include +// #include -// ============================================================================ +// #include +// Targeting ../BatchNorm1dImplBaseBase.java -@Namespace("torch::nn::functional") public static native @ByVal Tensor softsign(@Const @ByRef Tensor input); -// ============================================================================ +// Targeting ../InstanceNorm1dImplBaseBase.java -@Namespace("torch::nn::functional") public static native @ByVal Tensor tanhshrink(@Const @ByRef Tensor input); -// ============================================================================ +// Targeting ../BatchNorm2dImplBaseBase.java -// #ifndef DOXYGEN_SHOULD_SKIP_THIS -@Namespace("torch::nn::functional::detail") public static native @ByVal Tensor threshold( - @ByVal Tensor input, - double threshold, - double value, - @Cast("bool") boolean inplace); - // namespace detail -// #endif /* DOXYGEN_SHOULD_SKIP_THIS */ -/** See -/** https://pytorch.org/docs/master/nn.functional.html#torch.nn.functional.threshold -/** about the exact behavior of this functional. -/** -/** See the documentation for {@code torch::nn::functional::ThresholdFuncOptions} -/** class to learn what optional arguments are supported for this functional. -/** -/** Example: -/**
{@code
-/** namespace F = torch::nn::functional;
-/** F::threshold(x, F::ThresholdFuncOptions(0.5, 0.5).inplace(true));
-/** }
*/ -@Namespace("torch::nn::functional") public static native @ByVal Tensor threshold(@ByVal Tensor input, @Cast("const torch::nn::functional::ThresholdFuncOptions*") @ByRef ThresholdOptions options); +// Targeting ../InstanceNorm2dImplBaseBase.java -// ============================================================================ -// #ifndef DOXYGEN_SHOULD_SKIP_THIS -@Namespace("torch::nn::functional::detail") public static native @ByVal TensorTensorTuple multi_head_attention_forward( - @Const @ByRef Tensor query, - @Const @ByRef Tensor key, - @Const @ByRef Tensor value, - @Cast("int64_t") long embed_dim_to_check, - @Cast("int64_t") long num_heads, - @Const @ByRef Tensor in_proj_weight, - @Const @ByRef Tensor in_proj_bias, - @Const @ByRef Tensor bias_k, - @Const @ByRef Tensor bias_v, - @Cast("bool") boolean add_zero_attn, - double dropout_p, - @Const @ByRef Tensor out_proj_weight, - @Const @ByRef Tensor out_proj_bias, - @Cast("bool") boolean training/*=true*/, - @Const @ByRef(nullValue = "at::Tensor{}") Tensor key_padding_mask, - @Cast("bool") boolean need_weights/*=true*/, - @Const @ByRef(nullValue = "at::Tensor{}") Tensor attn_mask, - @Cast("bool") boolean use_separate_proj_weight/*=false*/, - @Const @ByRef(nullValue = "at::Tensor{}") Tensor q_proj_weight, - @Const @ByRef(nullValue = "at::Tensor{}") Tensor k_proj_weight, - @Const @ByRef(nullValue = "at::Tensor{}") Tensor v_proj_weight, - @Const @ByRef(nullValue = "at::Tensor{}") Tensor static_k, - @Const @ByRef(nullValue = "at::Tensor{}") Tensor static_v, - @Cast("bool") boolean average_attn_weights/*=true*/); -@Namespace("torch::nn::functional::detail") public static native @ByVal TensorTensorTuple multi_head_attention_forward( - @Const @ByRef Tensor query, - @Const @ByRef Tensor key, - @Const @ByRef Tensor value, - @Cast("int64_t") long embed_dim_to_check, - @Cast("int64_t") long num_heads, - @Const @ByRef Tensor in_proj_weight, - @Const @ByRef Tensor in_proj_bias, - @Const @ByRef Tensor bias_k, - @Const @ByRef Tensor bias_v, - @Cast("bool") boolean add_zero_attn, - double dropout_p, - @Const @ByRef Tensor out_proj_weight, - @Const @ByRef Tensor out_proj_bias); - // namespace detail -// #endif /* DOXYGEN_SHOULD_SKIP_THIS */ +// Targeting ../BatchNorm3dImplBaseBase.java -@Namespace("torch::nn::functional") public static native @ByVal TensorTensorTuple multi_head_attention_forward( - @Const @ByRef Tensor query, - @Const @ByRef Tensor key, - @Const @ByRef Tensor value, - @Const @ByRef MultiheadAttentionForwardFuncOptions options); - // namespace functional - // namespace nn - // namespace torch +// Targeting ../InstanceNorm3dImplBaseBase.java -// Parsed from torch/nn/functional/batchnorm.h +// Targeting ../BatchNorm1dImplBase.java -// #pragma once -// #include -// #include -// #include +// Targeting ../BatchNorm2dImplBase.java -// #ifndef DOXYGEN_SHOULD_SKIP_THIS -@Namespace("torch::nn::functional::detail") public static native @ByVal Tensor batch_norm( - @Const @ByRef Tensor input, - @Const @ByRef Tensor running_mean, - @Const @ByRef Tensor running_var, - @ByVal Tensor weight, - @ByVal Tensor bias, - @Cast("bool") boolean training, - @ByVal DoubleOptional momentum, - double eps); - // namespace detail -// #endif /* DOXYGEN_SHOULD_SKIP_THIS */ -/** See -/** https://pytorch.org/docs/master/nn.functional.html#torch.nn.functional.batch_norm -/** about the exact behavior of this functional. -/** -/** See the documentation for {@code torch::nn::functional::BatchNormFuncOptions} -/** class to learn what optional arguments are supported for this functional. -/** -/** Example: -/**
{@code
-/** namespace F = torch::nn::functional;
-/** F::batch_norm(input, mean, variance,
-/** F::BatchNormFuncOptions().weight(weight).bias(bias).momentum(0.1).eps(1e-05).training(false));
-/** }
*/ -@Namespace("torch::nn::functional") public static native @ByVal Tensor batch_norm( - @Const @ByRef Tensor input, - @Const @ByRef Tensor running_mean, - @Const @ByRef Tensor running_var, - @Const @ByRef(nullValue = "torch::nn::functional::BatchNormFuncOptions{}") BatchNormFuncOptions options); -@Namespace("torch::nn::functional") public static native @ByVal Tensor batch_norm( - @Const @ByRef Tensor input, - @Const @ByRef Tensor running_mean, - @Const @ByRef Tensor running_var); +// Targeting ../BatchNorm3dImplBase.java + + +// Targeting ../BatchNorm1dImpl.java + + + +/** A {@code ModuleHolder} subclass for {@code BatchNorm1dImpl}. + * See the documentation for {@code BatchNorm1dImpl} class to learn what methods it + * provides, and examples of how to use {@code BatchNorm1d} with + * {@code torch::nn::BatchNorm1dOptions}. See the documentation for {@code ModuleHolder} to + * learn about PyTorch's module storage semantics. */ +// Targeting ../BatchNorm2dImpl.java + + + +/** A {@code ModuleHolder} subclass for {@code BatchNorm2dImpl}. + * See the documentation for {@code BatchNorm2dImpl} class to learn what methods it + * provides, and examples of how to use {@code BatchNorm2d} with + * {@code torch::nn::BatchNorm2dOptions}. See the documentation for {@code ModuleHolder} to + * learn about PyTorch's module storage semantics. */ +// Targeting ../BatchNorm3dImpl.java + + + +/** A {@code ModuleHolder} subclass for {@code BatchNorm3dImpl}. + * See the documentation for {@code BatchNorm3dImpl} class to learn what methods it + * provides, and examples of how to use {@code BatchNorm3d} with + * {@code torch::nn::BatchNorm3dOptions}. See the documentation for {@code ModuleHolder} to + * learn about PyTorch's module storage semantics. */ - // namespace functional // namespace nn // namespace torch -// Parsed from torch/nn/functional/conv.h +// Parsed from torch/nn/modules/conv.h // #pragma once +// #include +// #include + +// #include +// #include +// #include +// #include +// #include // #include +// #include // #include -// #ifndef DOXYGEN_SHOULD_SKIP_THIS +// #include -@Namespace("torch::nn::functional::detail") public static native @StdString BytePointer padding_unwrap(@ByVal kValid arg0); +// #include +// #include +// Targeting ../Conv1dImplBase.java -@Namespace("torch::nn::functional::detail") public static native @StdString BytePointer padding_unwrap(@ByVal kSame arg0); -@Namespace("torch::nn::functional::detail") public static native @ByVal Tensor conv1d( - @Const @ByRef Tensor input, - @Const @ByRef Tensor weight, - @Const @ByRef Tensor bias, - @ByVal @Cast("torch::ExpandingArray<1>*") LongPointer stride, - @Const @ByRef conv_padding_t1 padding, - @ByVal @Cast("torch::ExpandingArray<1>*") LongPointer dilation, - @Cast("int64_t") long groups); - // namespace detail -// #endif /* DOXYGEN_SHOULD_SKIP_THIS */ +// Targeting ../ConvTranspose1dImplBaseBase.java -/** See -/** https://pytorch.org/docs/master/nn.functional.html#torch.nn.functional.conv1d -/** about the exact behavior of this functional. -/** -/** See the documentation for {@code torch::nn::functional::Conv1dFuncOptions} class -/** to learn what optional arguments are supported for this functional. -/** -/** Example: -/**
{@code
-/** namespace F = torch::nn::functional;
-/** F::conv1d(x, weight, F::Conv1dFuncOptions().stride(1));
-/** }
*/ -@Namespace("torch::nn::functional") public static native @ByVal Tensor conv1d( - @Const @ByRef Tensor input, - @Const @ByRef Tensor weight, - @Const @ByRef(nullValue = "torch::nn::functional::Conv1dFuncOptions{}") Conv1dFuncOptions options); -// #ifndef DOXYGEN_SHOULD_SKIP_THIS -@Namespace("torch::nn::functional::detail") public static native @ByVal Tensor conv2d( - @Const @ByRef Tensor input, - @Const @ByRef Tensor weight, - @Const @ByRef Tensor bias, - @ByVal @Cast("torch::ExpandingArray<2>*") LongPointer stride, - @Const @ByRef conv_padding_t2 padding, - @ByVal @Cast("torch::ExpandingArray<2>*") LongPointer dilation, - @Cast("int64_t") long groups); - // namespace detail -// #endif /* DOXYGEN_SHOULD_SKIP_THIS */ +// Targeting ../Conv2dImplBase.java -/** See -/** https://pytorch.org/docs/master/nn.functional.html#torch.nn.functional.conv2d -/** about the exact behavior of this functional. -/** -/** See the documentation for {@code torch::nn::functional::Conv2dFuncOptions} class -/** to learn what optional arguments are supported for this functional. -/** -/** Example: -/**
{@code
-/** namespace F = torch::nn::functional;
-/** F::conv2d(x, weight, F::Conv2dFuncOptions().stride(1));
-/** }
*/ -@Namespace("torch::nn::functional") public static native @ByVal Tensor conv2d( - @Const @ByRef Tensor input, - @Const @ByRef Tensor weight, - @Const @ByRef(nullValue = "torch::nn::functional::Conv2dFuncOptions{}") Conv2dFuncOptions options); -// #ifndef DOXYGEN_SHOULD_SKIP_THIS -@Namespace("torch::nn::functional::detail") public static native @ByVal Tensor conv3d( - @Const @ByRef Tensor input, - @Const @ByRef Tensor weight, - @Const @ByRef Tensor bias, - @ByVal @Cast("torch::ExpandingArray<3>*") LongPointer stride, - @Const @ByRef conv_padding_t3 padding, - @ByVal @Cast("torch::ExpandingArray<3>*") LongPointer dilation, - @Cast("int64_t") long groups); - // namespace detail -// #endif /* DOXYGEN_SHOULD_SKIP_THIS */ +// Targeting ../ConvTranspose2dImplBaseBase.java -/** See -/** https://pytorch.org/docs/master/nn.functional.html#torch.nn.functional.conv3d -/** about the exact behavior of this functional. -/** -/** See the documentation for {@code torch::nn::functional::Conv3dFuncOptions} class -/** to learn what optional arguments are supported for this functional. -/** -/** Example: -/**
{@code
-/** namespace F = torch::nn::functional;
-/** F::conv3d(x, weight, F::Conv3dFuncOptions().stride(1));
-/** }
*/ -@Namespace("torch::nn::functional") public static native @ByVal Tensor conv3d( - @Const @ByRef Tensor input, - @Const @ByRef Tensor weight, - @Const @ByRef(nullValue = "torch::nn::functional::Conv3dFuncOptions{}") Conv3dFuncOptions options); -// ============================================================================ +// Targeting ../Conv3dImplBase.java -// #ifndef DOXYGEN_SHOULD_SKIP_THIS -@Namespace("torch::nn::functional::detail") public static native @ByVal Tensor conv_transpose1d( - @Const @ByRef Tensor input, - @Const @ByRef Tensor weight, - @Const @ByRef Tensor bias, - @ByVal @Cast("c10::ArrayRef*") LongArrayRef stride, - @ByVal @Cast("c10::ArrayRef*") LongArrayRef padding, - @ByVal @Cast("c10::ArrayRef*") LongArrayRef output_padding, - @Cast("int64_t") long groups, - @ByVal @Cast("c10::ArrayRef*") LongArrayRef dilation); -@Namespace("torch::nn::functional::detail") public static native @ByVal Tensor conv_transpose1d( - @Const @ByRef Tensor input, - @Const @ByRef Tensor weight, - @Const @ByRef Tensor bias, - @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] stride, - @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] padding, - @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] output_padding, - @Cast("int64_t") long groups, - @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... dilation); - // namespace detail -// #endif /* DOXYGEN_SHOULD_SKIP_THIS */ -/** See -/** https://pytorch.org/docs/master/nn.functional.html#torch.nn.functional.conv_transpose1d -/** about the exact behavior of this functional. -/** -/** See the documentation for -/** {@code torch::nn::functional::ConvTranspose1dFuncOptions} class to learn what -/** optional arguments are supported for this functional. -/** -/** Example: -/**
{@code
-/** namespace F = torch::nn::functional;
-/** F::conv_transpose1d(x, weight, F::ConvTranspose1dFuncOptions().stride(1));
-/** }
*/ -@Namespace("torch::nn::functional") public static native @ByVal Tensor conv_transpose1d( - @Const @ByRef Tensor input, - @Const @ByRef Tensor weight, - @Const @ByRef(nullValue = "torch::nn::functional::ConvTranspose1dFuncOptions{}") ConvTranspose1dFuncOptions options); +// Targeting ../ConvTranspose3dImplBaseBase.java -// #ifndef DOXYGEN_SHOULD_SKIP_THIS -@Namespace("torch::nn::functional::detail") public static native @ByVal Tensor conv_transpose2d( - @Const @ByRef Tensor input, - @Const @ByRef Tensor weight, - @Const @ByRef Tensor bias, - @ByVal @Cast("c10::ArrayRef*") LongArrayRef stride, - @ByVal @Cast("c10::ArrayRef*") LongArrayRef padding, - @ByVal @Cast("c10::ArrayRef*") LongArrayRef output_padding, - @Cast("int64_t") long groups, - @ByVal @Cast("c10::ArrayRef*") LongArrayRef dilation); -@Namespace("torch::nn::functional::detail") public static native @ByVal Tensor conv_transpose2d( - @Const @ByRef Tensor input, - @Const @ByRef Tensor weight, - @Const @ByRef Tensor bias, - @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] stride, - @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] padding, - @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] output_padding, - @Cast("int64_t") long groups, - @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... dilation); - // namespace detail -// #endif /* DOXYGEN_SHOULD_SKIP_THIS */ -/** See -/** https://pytorch.org/docs/master/nn.functional.html#torch.nn.functional.conv_transpose2d -/** about the exact behavior of this functional. -/** -/** See the documentation for -/** {@code torch::nn::functional::ConvTranspose2dFuncOptions} class to learn what -/** optional arguments are supported for this functional. -/** -/** Example: -/**
{@code
-/** namespace F = torch::nn::functional;
-/** F::conv_transpose2d(x, weight, F::ConvTranspose2dFuncOptions().stride(1));
-/** }
*/ -@Namespace("torch::nn::functional") public static native @ByVal Tensor conv_transpose2d( - @Const @ByRef Tensor input, - @Const @ByRef Tensor weight, - @Const @ByRef(nullValue = "torch::nn::functional::ConvTranspose2dFuncOptions{}") ConvTranspose2dFuncOptions options); +// Targeting ../Conv1dImpl.java -// #ifndef DOXYGEN_SHOULD_SKIP_THIS -@Namespace("torch::nn::functional::detail") public static native @ByVal Tensor conv_transpose3d( - @Const @ByRef Tensor input, - @Const @ByRef Tensor weight, - @Const @ByRef Tensor bias, - @ByVal @Cast("c10::ArrayRef*") LongArrayRef stride, - @ByVal @Cast("c10::ArrayRef*") LongArrayRef padding, - @ByVal @Cast("c10::ArrayRef*") LongArrayRef output_padding, - @Cast("int64_t") long groups, - @ByVal @Cast("c10::ArrayRef*") LongArrayRef dilation); -@Namespace("torch::nn::functional::detail") public static native @ByVal Tensor conv_transpose3d( - @Const @ByRef Tensor input, - @Const @ByRef Tensor weight, - @Const @ByRef Tensor bias, - @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] stride, - @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] padding, - @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] output_padding, - @Cast("int64_t") long groups, - @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... dilation); - // namespace detail -// #endif /* DOXYGEN_SHOULD_SKIP_THIS */ -/** See -/** https://pytorch.org/docs/master/nn.functional.html#torch.nn.functional.conv_transpose3d -/** about the exact behavior of this functional. -/** -/** See the documentation for -/** {@code torch::nn::functional::ConvTranspose3dFuncOptions} class to learn what -/** optional arguments are supported for this functional. -/** -/** Example: -/**
{@code
-/** namespace F = torch::nn::functional;
-/** F::conv_transpose3d(x, weight, F::ConvTranspose3dFuncOptions().stride(1));
-/** }
*/ -@Namespace("torch::nn::functional") public static native @ByVal Tensor conv_transpose3d( - @Const @ByRef Tensor input, - @Const @ByRef Tensor weight, - @Const @ByRef(nullValue = "torch::nn::functional::ConvTranspose3dFuncOptions{}") ConvTranspose3dFuncOptions options); - // namespace functional +/** A {@code ModuleHolder} subclass for {@code Conv1dImpl}. + * See the documentation for {@code Conv1dImpl} class to learn what methods it + * provides, and examples of how to use {@code Conv1d} with + * {@code torch::nn::Conv1dOptions}. See the documentation for {@code ModuleHolder} to + * learn about PyTorch's module storage semantics. */ +// Targeting ../Conv2dImpl.java + + + +/** A {@code ModuleHolder} subclass for {@code Conv2dImpl}. + * See the documentation for {@code Conv2dImpl} class to learn what methods it + * provides, and examples of how to use {@code Conv2d} with + * {@code torch::nn::Conv2dOptions}. See the documentation for {@code ModuleHolder} to + * learn about PyTorch's module storage semantics. */ +// Targeting ../Conv3dImpl.java + + + +/** A {@code ModuleHolder} subclass for {@code Conv3dImpl}. + * See the documentation for {@code Conv3dImpl} class to learn what methods it + * provides, and examples of how to use {@code Conv3d} with + * {@code torch::nn::Conv3dOptions}. See the documentation for {@code ModuleHolder} to + * learn about PyTorch's module storage semantics. */ +// Targeting ../ConvTranspose1dImplBase.java + + +// Targeting ../ConvTranspose2dImplBase.java + + +// Targeting ../ConvTranspose3dImplBase.java + + +// Targeting ../ConvTranspose1dImpl.java + + + +/** A {@code ModuleHolder} subclass for {@code ConvTranspose1dImpl}. + * See the documentation for {@code ConvTranspose1dImpl} class to learn what methods + * it provides, and examples of how to use {@code ConvTranspose1d} with + * {@code torch::nn::ConvTranspose1dOptions}. See the documentation for + * {@code ModuleHolder} to learn about PyTorch's module storage semantics. */ +// Targeting ../ConvTranspose2dImpl.java + + + +/** A {@code ModuleHolder} subclass for {@code ConvTranspose2dImpl}. + * See the documentation for {@code ConvTranspose2dImpl} class to learn what methods + * it provides, and examples of how to use {@code ConvTranspose2d} with + * {@code torch::nn::ConvTranspose2dOptions}. See the documentation for + * {@code ModuleHolder} to learn about PyTorch's module storage semantics. */ +// Targeting ../ConvTranspose3dImpl.java + + + +/** A {@code ModuleHolder} subclass for {@code ConvTranspose3dImpl}. + * See the documentation for {@code ConvTranspose3dImpl} class to learn what methods + * it provides, and examples of how to use {@code ConvTranspose3d} with + * {@code torch::nn::ConvTranspose3dOptions}. See the documentation for + * {@code ModuleHolder} to learn about PyTorch's module storage semantics. */ + // namespace nn // namespace torch -// Parsed from torch/nn/functional/distance.h +// Parsed from torch/nn/modules/distance.h // #pragma once +// #include +// #include // #include +// #include +// #include -// #ifndef DOXYGEN_SHOULD_SKIP_THIS - // namespace detail -// #endif /* DOXYGEN_SHOULD_SKIP_THIS */ +// #include +// Targeting ../CosineSimilarityImpl.java -/** See -/** https://pytorch.org/docs/master/nn.functional.html#torch.nn.functional.cosine_similarity -/** about the exact behavior of this functional. -/** -/** See the documentation for -/** {@code torch::nn::functional::CosineSimilarityFuncOptions} class to learn what -/** optional arguments are supported for this functional. -/** -/** Example: -/**
{@code
-/** namespace F = torch::nn::functional;
-/** F::cosine_similarity(input1, input2,
-/** F::CosineSimilarityFuncOptions().dim(1));
-/** }
*/ -@Namespace("torch::nn::functional") public static native @ByVal Tensor cosine_similarity( - @Const @ByRef Tensor x1, - @Const @ByRef Tensor x2, - @Cast("const torch::nn::functional::CosineSimilarityFuncOptions*") @ByRef(nullValue = "torch::nn::functional::CosineSimilarityFuncOptions{}") CosineSimilarityOptions options); -// ============================================================================ -// #ifndef DOXYGEN_SHOULD_SKIP_THIS - // namespace detail -// #endif /* DOXYGEN_SHOULD_SKIP_THIS */ +/** A {@code ModuleHolder} subclass for {@code CosineSimilarityImpl}. + * See the documentation for {@code CosineSimilarityImpl} class to learn what methods + * it provides, and examples of how to use {@code CosineSimilarity} with + * {@code torch::nn::CosineSimilarityOptions}. See the documentation for + * {@code ModuleHolder} to learn about PyTorch's module storage semantics. */ +// Targeting ../PairwiseDistanceImpl.java -/** See -/** https://pytorch.org/docs/master/nn.functional.html#torch.nn.functional.pairwise_distance -/** about the exact behavior of this functional. -/** -/** See the documentation for -/** {@code torch::nn::functional::PairwiseDistanceFuncOptions} class to learn what -/** optional arguments are supported for this functional. -/** -/** Example: -/**
{@code
-/** namespace F = torch::nn::functional;
-/** F::pairwise_distance(input1, input2, F::PairwiseDistanceFuncOptions().p(1));
-/** }
*/ -@Namespace("torch::nn::functional") public static native @ByVal Tensor pairwise_distance( - @Const @ByRef Tensor x1, - @Const @ByRef Tensor x2, - @Cast("const torch::nn::functional::PairwiseDistanceFuncOptions*") @ByRef(nullValue = "torch::nn::functional::PairwiseDistanceFuncOptions{}") PairwiseDistanceOptions options); -// ============================================================================ -/** Computes the p-norm distance between every pair of row vectors in the input. - * This function will be faster if the rows are contiguous. */ +/** A {@code ModuleHolder} subclass for {@code PairwiseDistanceImpl}. + * See the documentation for {@code PairwiseDistanceImpl} class to learn what methods + * it provides, and examples of how to use {@code PairwiseDistance} with + * {@code torch::nn::PairwiseDistanceOptions}. See the documentation for + * {@code ModuleHolder} to learn about PyTorch's module storage semantics. */ - // namespace functional // namespace nn // namespace torch -// Parsed from torch/nn/functional/dropout.h +// Parsed from torch/nn/modules/dropout.h // #pragma once +// #include // #include +// #include +// #include -// #include +// #include -// #ifndef DOXYGEN_SHOULD_SKIP_THIS +// #include +// #include +// Targeting ../DropoutImplBase.java -@Namespace("torch::nn::functional::detail") public static native @ByVal Tensor dropout(@ByVal Tensor input, double p, @Cast("bool") boolean training, @Cast("bool") boolean inplace); - // namespace detail -// #endif /* DOXYGEN_SHOULD_SKIP_THIS */ +// Targeting ../Dropout2dImplBase.java -/** See -/** https://pytorch.org/docs/master/nn.functional.html#torch.nn.functional.dropout -/** about the exact behavior of this functional. -/** -/** See the documentation for {@code torch::nn::functional::DropoutFuncOptions} class -/** to learn what optional arguments are supported for this functional. -/** -/** Example: -/**
{@code
-/** namespace F = torch::nn::functional;
-/** F::dropout(input, F::DropoutFuncOptions().p(0.5));
-/** }
*/ -@Namespace("torch::nn::functional") public static native @ByVal Tensor dropout(@ByVal Tensor input, @Const @ByRef(nullValue = "torch::nn::functional::DropoutFuncOptions{}") DropoutFuncOptions options); -@Namespace("torch::nn::functional") public static native @ByVal Tensor dropout(@ByVal Tensor input); -// ============================================================================ +// Targeting ../Dropout3dImplBase.java -// #ifndef DOXYGEN_SHOULD_SKIP_THIS -@Namespace("torch::nn::functional::detail") public static native @ByVal Tensor dropout2d(@ByVal Tensor input, double p, @Cast("bool") boolean training, @Cast("bool") boolean inplace); +// Targeting ../AlphaDropoutImplBase.java - // namespace detail -// #endif /* DOXYGEN_SHOULD_SKIP_THIS */ -/** See -/** https://pytorch.org/docs/master/nn.functional.html#torch.nn.functional.dropout2d -/** about the exact behavior of this functional. -/** -/** See the documentation for {@code torch::nn::functional::Dropout2dFuncOptions} -/** class to learn what optional arguments are supported for this functional. -/** -/** Example: -/**
{@code
-/** namespace F = torch::nn::functional;
-/** F::dropout2d(input, F::Dropout2dFuncOptions().p(0.5));
-/** }
*/ -@Namespace("torch::nn::functional") public static native @ByVal Tensor dropout2d( - @ByVal Tensor input, - @Cast("const torch::nn::functional::Dropout2dFuncOptions*") @ByRef(nullValue = "torch::nn::functional::Dropout2dFuncOptions{}") DropoutFuncOptions options); -@Namespace("torch::nn::functional") public static native @ByVal Tensor dropout2d( - @ByVal Tensor input); +// Targeting ../FeatureAlphaDropoutImplBase.java -// ============================================================================ -// #ifndef DOXYGEN_SHOULD_SKIP_THIS -@Namespace("torch::nn::functional::detail") public static native @ByVal Tensor dropout3d(@ByVal Tensor input, double p, @Cast("bool") boolean training, @Cast("bool") boolean inplace); - // namespace detail -// #endif /* DOXYGEN_SHOULD_SKIP_THIS */ +// Targeting ../DropoutImpl.java -/** See -/** https://pytorch.org/docs/master/nn.functional.html#torch.nn.functional.dropout3d -/** about the exact behavior of this functional. -/** -/** See the documentation for {@code torch::nn::functional::Dropout3dFuncOptions} -/** class to learn what optional arguments are supported for this functional. -/** -/** Example: -/**
{@code
-/** namespace F = torch::nn::functional;
-/** F::dropout3d(input, F::Dropout3dFuncOptions().p(0.5));
-/** }
*/ -@Namespace("torch::nn::functional") public static native @ByVal Tensor dropout3d( - @ByVal Tensor input, - @Cast("const torch::nn::functional::Dropout3dFuncOptions*") @ByRef(nullValue = "torch::nn::functional::Dropout3dFuncOptions{}") DropoutFuncOptions options); -@Namespace("torch::nn::functional") public static native @ByVal Tensor dropout3d( - @ByVal Tensor input); -// ============================================================================ -// #ifndef DOXYGEN_SHOULD_SKIP_THIS +/** A {@code ModuleHolder} subclass for {@code DropoutImpl}. + * See the documentation for {@code DropoutImpl} class to learn what methods it + * provides, and examples of how to use {@code Dropout} with + * {@code torch::nn::DropoutOptions}. See the documentation for {@code ModuleHolder} to + * learn about PyTorch's module storage semantics. */ +// Targeting ../Dropout2dImpl.java -@Namespace("torch::nn::functional::detail") public static native @ByVal Tensor alpha_dropout( - @ByVal Tensor input, - double p, - @Cast("bool") boolean training, - @Cast("bool") boolean inplace); - // namespace detail -// #endif /* DOXYGEN_SHOULD_SKIP_THIS */ -/** See -/** https://pytorch.org/docs/master/nn.functional.html#torch.nn.functional.alpha_dropout -/** about the exact behavior of this functional. -/** -/** See the documentation for {@code torch::nn::functional::AlphaDropoutFuncOptions} -/** class to learn what optional arguments are supported for this functional. -/** -/** Example: -/**
{@code
-/** namespace F = torch::nn::functional;
-/** F::alpha_dropout(input,
-/** F::AlphaDropoutFuncOptions().p(0.5).training(false));
-/** }
*/ -@Namespace("torch::nn::functional") public static native @ByVal Tensor alpha_dropout( - @ByVal Tensor input, - @Const @ByRef(nullValue = "torch::nn::functional::AlphaDropoutFuncOptions{}") AlphaDropoutFuncOptions options); -@Namespace("torch::nn::functional") public static native @ByVal Tensor alpha_dropout( - @ByVal Tensor input); +/** A {@code ModuleHolder} subclass for {@code Dropout2dImpl}. + * See the documentation for {@code Dropout2dImpl} class to learn what methods it + * provides, and examples of how to use {@code Dropout2d} with + * {@code torch::nn::Dropout2dOptions}. See the documentation for {@code ModuleHolder} to + * learn about PyTorch's module storage semantics. */ +// Targeting ../Dropout3dImpl.java -// ============================================================================ -// #ifndef DOXYGEN_SHOULD_SKIP_THIS -@Namespace("torch::nn::functional::detail") public static native @ByVal Tensor feature_alpha_dropout( - @ByVal Tensor input, - double p, - @Cast("bool") boolean training, - @Cast("bool") boolean inplace); +/** A {@code ModuleHolder} subclass for {@code Dropout3dImpl}. + * See the documentation for {@code Dropout3dImpl} class to learn what methods it + * provides, and examples of how to use {@code Dropout3d} with + * {@code torch::nn::Dropout3dOptions}. See the documentation for {@code ModuleHolder} to + * learn about PyTorch's module storage semantics. */ +// Targeting ../AlphaDropoutImpl.java - // namespace detail -// #endif /* DOXYGEN_SHOULD_SKIP_THIS */ -/** See -/** https://pytorch.org/docs/master/nn.functional.html#torch.nn.functional.feature_alpha_dropout -/** about the exact behavior of this functional. -/** -/** See the documentation for -/** {@code torch::nn::functional::FeatureAlphaDropoutFuncOptions} class to learn what -/** optional arguments are supported for this functional. -/** -/** Example: -/**
{@code
-/** namespace F = torch::nn::functional;
-/** F::feature_alpha_dropout(input,
-/** F::FeatureAlphaDropoutFuncOptions().p(0.5).training(false));
-/** }
*/ -@Namespace("torch::nn::functional") public static native @ByVal Tensor feature_alpha_dropout( - @ByVal Tensor input, - @Const @ByRef(nullValue = "torch::nn::functional::FeatureAlphaDropoutFuncOptions{}") FeatureAlphaDropoutFuncOptions options); -@Namespace("torch::nn::functional") public static native @ByVal Tensor feature_alpha_dropout( - @ByVal Tensor input); - // namespace functional +/** A {@code ModuleHolder} subclass for {@code AlphaDropoutImpl}. + * See the documentation for {@code AlphaDropoutImpl} class to learn what methods it + * provides, and examples of how to use {@code AlphaDropout} with + * {@code torch::nn::AlphaDropoutOptions}. See the documentation for {@code ModuleHolder} + * to learn about PyTorch's module storage semantics. */ +// Targeting ../FeatureAlphaDropoutImpl.java + + + +/** A {@code ModuleHolder} subclass for {@code FeatureAlphaDropoutImpl}. + * See the documentation for {@code FeatureAlphaDropoutImpl} class to learn what + * methods it provides, and examples of how to use {@code FeatureAlphaDropout} with + * {@code torch::nn::FeatureAlphaDropoutOptions}. See the documentation for + * {@code ModuleHolder} to learn about PyTorch's module storage semantics. */ + + // namespace nn + // namespace torch + + +// Parsed from torch/nn/modules/embedding.h + +// #pragma once + +// #include +// #include +// #include +// #include +// #include +// #include + +// #include +// Targeting ../EmbeddingImpl.java + + + +/** A {@code ModuleHolder} subclass for {@code EmbeddingImpl}. + * See the documentation for {@code EmbeddingImpl} class to learn what methods it + * provides, and examples of how to use {@code Embedding} with + * {@code torch::nn::EmbeddingOptions}. See the documentation for {@code ModuleHolder} to + * learn about PyTorch's module storage semantics. */ +// Targeting ../EmbeddingBagImpl.java + + + +/** A {@code ModuleHolder} subclass for {@code EmbeddingBagImpl}. + * See the documentation for {@code EmbeddingBagImpl} class to learn what methods it + * provides, and examples of how to use {@code EmbeddingBag} with + * {@code torch::nn::EmbeddingBagOptions}. See the documentation for {@code ModuleHolder} + * to learn about PyTorch's module storage semantics. */ // namespace nn // namespace torch -// Parsed from torch/nn/functional/embedding.h +// Parsed from torch/nn/modules/fold.h // #pragma once -// #include +// #include +// #include +// #include +// #include +// #include +// #include +// Targeting ../FoldImpl.java -// #ifndef DOXYGEN_SHOULD_SKIP_THIS -@Namespace("torch::nn::functional::detail") public static native void _no_grad_embedding_renorm_( - @ByVal Tensor weight, - @Const @ByRef Tensor input, - float max_norm, - float norm_type); -@Namespace("torch::nn::functional::detail") public static native @ByVal Tensor embedding( - @Const @ByRef Tensor input, - @Const @ByRef Tensor weight, - @ByVal LongOptional padding_idx, - @ByVal DoubleOptional max_norm, - double norm_type, - @Cast("bool") boolean scale_grad_by_freq, - @Cast("bool") boolean sparse); - // namespace detail -// #endif /* DOXYGEN_SHOULD_SKIP_THIS */ -/** See -/** https://pytorch.org/docs/master/nn.functional.html#torch.nn.functional.embedding -/** about the exact behavior of this functional. -/** -/** See the documentation for {@code torch::nn::functional::EmbeddingFuncOptions} -/** class to learn what optional arguments are supported for this functional. -/** -/** Example: -/**
{@code
-/** namespace F = torch::nn::functional;
-/** F::embedding(input, weight,
-/** F::EmbeddingFuncOptions().norm_type(2.5).scale_grad_by_freq(true).sparse(true));
-/** }
*/ -@Namespace("torch::nn::functional") public static native @ByVal Tensor embedding( - @Const @ByRef Tensor input, - @Const @ByRef Tensor weight, - @Const @ByRef(nullValue = "torch::nn::functional::EmbeddingFuncOptions{}") EmbeddingFuncOptions options); +/** A {@code ModuleHolder} subclass for {@code FoldImpl}. + * See the documentation for {@code FoldImpl} class to learn what methods it + * provides, and examples of how to use {@code Fold} with {@code torch::nn::FoldOptions}. + * See the documentation for {@code ModuleHolder} to learn about PyTorch's + * module storage semantics. */ +// Targeting ../UnfoldImpl.java -// #ifndef DOXYGEN_SHOULD_SKIP_THIS -@Namespace("torch::nn::functional::detail") public static native @ByVal Tensor embedding_bag( - @Const @ByRef Tensor input, - @Const @ByRef Tensor weight, - @Const @ByRef Tensor offsets, - @ByVal DoubleOptional max_norm, - double norm_type, - @Cast("bool") boolean scale_grad_by_freq, - @ByVal EmbeddingBagMode mode, - @Cast("bool") boolean sparse, - @Const @ByRef Tensor per_sample_weights, - @Cast("bool") boolean include_last_offset, - @ByVal LongOptional padding_idx); - // namespace detail -// #endif /* DOXYGEN_SHOULD_SKIP_THIS */ -/** See -/** https://pytorch.org/docs/master/nn.functional.html#torch.nn.functional.embedding_bag -/** about the exact behavior of this functional. -/** -/** See the documentation for {@code torch::nn::functional::EmbeddingBagFuncOptions} -/** class to learn what optional arguments are supported for this functional. -/** -/** Example: -/**
{@code
-/** namespace F = torch::nn::functional;
-/** F::embedding_bag(input, weight,
-/** F::EmbeddingBagFuncOptions().mode(torch::kSum).offsets(offsets));
-/** }
*/ -@Namespace("torch::nn::functional") public static native @ByVal Tensor embedding_bag( - @Const @ByRef Tensor input, - @Const @ByRef Tensor weight, - @Const @ByRef(nullValue = "torch::nn::functional::EmbeddingBagFuncOptions{}") EmbeddingBagFuncOptions options); -@Namespace("torch::nn::functional") public static native @ByVal Tensor embedding_bag( - @Const @ByRef Tensor input, - @Const @ByRef Tensor weight); - // namespace functional +/** A {@code ModuleHolder} subclass for {@code UnfoldImpl}. + * See the documentation for {@code UnfoldImpl} class to learn what methods it + * provides, and examples of how to use {@code Unfold} with + * {@code torch::nn::UnfoldOptions}. See the documentation for {@code ModuleHolder} to + * learn about PyTorch's module storage semantics. */ + // namespace nn // namespace torch -// Parsed from torch/nn/functional/fold.h +// Parsed from torch/nn/modules/instancenorm.h // #pragma once -// #include +// #include +// #include +// Targeting ../InstanceNorm1dImplBase.java -// #ifndef DOXYGEN_SHOULD_SKIP_THIS -@Namespace("torch::nn::functional::detail") public static native @ByVal Tensor fold( - @Const @ByRef Tensor input, - @ByVal @Cast("torch::ExpandingArray<2>*") LongPointer output_size, - @ByVal @Cast("torch::ExpandingArray<2>*") LongPointer kernel_size, - @ByVal @Cast("torch::ExpandingArray<2>*") LongPointer dilation, - @ByVal @Cast("torch::ExpandingArray<2>*") LongPointer padding, - @ByVal @Cast("torch::ExpandingArray<2>*") LongPointer stride); - // namespace detail -// #endif /* DOXYGEN_SHOULD_SKIP_THIS */ -/** See -/** https://pytorch.org/docs/master/nn.functional.html#torch.nn.functional.fold -/** about the exact behavior of this functional. -/** -/** See the documentation for {@code torch::nn::functional::FoldFuncOptions} class to -/** learn what optional arguments are supported for this functional. -/** -/** Example: -/**
{@code
-/** namespace F = torch::nn::functional;
-/** F::fold(input, F::FoldFuncOptions({3, 2}, {2, 2}));
-/** }
*/ -@Namespace("torch::nn::functional") public static native @ByVal Tensor fold(@Const @ByRef Tensor input, @Cast("const torch::nn::functional::FoldFuncOptions*") @ByRef FoldOptions options); +// Targeting ../InstanceNorm2dImplBase.java -// ============================================================================ -// #ifndef DOXYGEN_SHOULD_SKIP_THIS -@Namespace("torch::nn::functional::detail") public static native @ByVal Tensor unfold( - @Const @ByRef Tensor input, - @ByVal @Cast("torch::ExpandingArray<2>*") LongPointer kernel_size, - @ByVal @Cast("torch::ExpandingArray<2>*") LongPointer dilation, - @ByVal @Cast("torch::ExpandingArray<2>*") LongPointer padding, - @ByVal @Cast("torch::ExpandingArray<2>*") LongPointer stride); - // namespace detail -// #endif /* DOXYGEN_SHOULD_SKIP_THIS */ +// Targeting ../InstanceNorm3dImplBase.java -/** See -/** https://pytorch.org/docs/master/nn.functional.html#torch.nn.functional.unfold -/** about the exact behavior of this functional. -/** -/** See the documentation for {@code torch::nn::functional::UnfoldFuncOptions} class -/** to learn what optional arguments are supported for this functional. -/** -/** Example: -/**
{@code
-/** namespace F = torch::nn::functional;
-/** F::unfold(input, F::UnfoldFuncOptions({2, 2}).padding(1).stride(2));
-/** }
*/ -@Namespace("torch::nn::functional") public static native @ByVal Tensor unfold(@Const @ByRef Tensor input, @Cast("const torch::nn::functional::UnfoldFuncOptions*") @ByRef UnfoldOptions options); - // namespace functional - // namespace nn - // namespace torch +// Targeting ../InstanceNorm1dImpl.java -// Parsed from torch/nn/functional/linear.h -// #pragma once +/** A {@code ModuleHolder} subclass for {@code InstanceNorm1dImpl}. + * See the documentation for {@code InstanceNorm1dImpl} class to learn what methods + * it provides, and examples of how to use {@code InstanceNorm1d} with + * {@code torch::nn::InstanceNorm1dOptions}. See the documentation for {@code ModuleHolder} + * to learn about PyTorch's module storage semantics. */ +// Targeting ../InstanceNorm2dImpl.java -// #include -@Namespace("torch::nn::functional") public static native @ByVal Tensor bilinear( - @Const @ByRef Tensor input1, - @Const @ByRef Tensor input2, - @Const @ByRef Tensor weight, - @Const @ByRef(nullValue = "at::Tensor()") Tensor bias); -// ============================================================================ +/** A {@code ModuleHolder} subclass for {@code InstanceNorm2dImpl}. + * See the documentation for {@code InstanceNorm2dImpl} class to learn what methods + * it provides, and examples of how to use {@code InstanceNorm2d} with + * {@code torch::nn::InstanceNorm2dOptions}. See the documentation for {@code ModuleHolder} + * to learn about PyTorch's module storage semantics. */ +// Targeting ../InstanceNorm3dImpl.java -@Namespace("torch::nn::functional") public static native @ByVal Tensor linear( - @Const @ByRef Tensor input, - @Const @ByRef Tensor weight, - @Const @ByRef(nullValue = "at::Tensor{}") Tensor bias); - // namespace functional + +/** A {@code ModuleHolder} subclass for {@code InstanceNorm3dImpl}. + * See the documentation for {@code InstanceNorm3dImpl} class to learn what methods + * it provides, and examples of how to use {@code InstanceNorm3d} with + * {@code torch::nn::InstanceNorm3dOptions}. See the documentation for {@code ModuleHolder} + * to learn about PyTorch's module storage semantics. */ + // namespace nn // namespace torch -// Parsed from torch/nn/functional/loss.h +// Parsed from torch/nn/modules/loss.h // #pragma once -// #include -// #include +// #include +// #include +// #include // #include +// #include +// #include -// #ifndef DOXYGEN_SHOULD_SKIP_THIS -@Namespace("torch::nn::functional::detail") public static native @ByVal Tensor l1_loss( - @Const @ByRef Tensor input, - @Const @ByRef Tensor target, - @ByVal loss_reduction_t reduction); - // namespace detail -// #endif /* DOXYGEN_SHOULD_SKIP_THIS */ - -/** See -/** https://pytorch.org/docs/master/nn.functional.html#torch.nn.functional.l1_loss -/** about the exact behavior of this functional. -/** -/** See the documentation for {@code torch::nn::functional::L1LossFuncOptions} class -/** to learn what optional arguments are supported for this functional. -/** -/** Example: -/**
{@code
-/** namespace F = torch::nn::functional;
-/** F::l1_loss(input, target, F::L1LossFuncOptions(torch::kNone));
-/** }
*/ -@Namespace("torch::nn::functional") public static native @ByVal Tensor l1_loss( - @Const @ByRef Tensor input, - @Const @ByRef Tensor target, - @Cast("const torch::nn::functional::L1LossFuncOptions*") @ByRef(nullValue = "torch::nn::functional::L1LossFuncOptions{}") L1LossOptions options); - -// ============================================================================ - -// #ifndef DOXYGEN_SHOULD_SKIP_THIS -@Namespace("torch::nn::functional::detail") public static native @ByVal Tensor kl_div( - @Const @ByRef Tensor input, - @Const @ByRef Tensor target, - @ByVal kldiv_loss_reduction_t reduction, - @Cast("bool") boolean log_target/*=false*/); -@Namespace("torch::nn::functional::detail") public static native @ByVal Tensor kl_div( - @Const @ByRef Tensor input, - @Const @ByRef Tensor target, - @ByVal kldiv_loss_reduction_t reduction); - // namespace detail -// #endif /* DOXYGEN_SHOULD_SKIP_THIS */ - -/** See -/** https://pytorch.org/docs/master/nn.functional.html#torch.nn.functional.kl_div -/** about the exact behavior of this functional. -/** -/** See the documentation for {@code torch::nn::functional::KLDivFuncOptions} class to -/** learn what optional arguments are supported for this functional. -/** -/** Example: -/**
{@code
-/** namespace F = torch::nn::functional;
-/** F::kl_div(input, target,
-/** F::KLDivFuncOptions.reduction(torch::kNone).log_target(false));
-/** }
*/ -@Namespace("torch::nn::functional") public static native @ByVal Tensor kl_div( - @Const @ByRef Tensor input, - @Const @ByRef Tensor target, - @Cast("const torch::nn::functional::KLDivFuncOptions*") @ByRef(nullValue = "torch::nn::functional::KLDivFuncOptions{}") KLDivLossOptions options); +// #include -// ============================================================================ +// #include +// #include +// Targeting ../L1LossImpl.java -// #ifndef DOXYGEN_SHOULD_SKIP_THIS -@Namespace("torch::nn::functional::detail") public static native @ByVal Tensor mse_loss( - @Const @ByRef Tensor input, - @Const @ByRef Tensor target, - @ByVal loss_reduction_t reduction); - // namespace detail -// #endif /* DOXYGEN_SHOULD_SKIP_THIS */ -/** See -/** https://pytorch.org/docs/master/nn.functional.html#torch.nn.functional.mse_loss -/** about the exact behavior of this functional. -/** -/** See the documentation for {@code torch::nn::functional::MSELossFuncOptions} class -/** to learn what optional arguments are supported for this functional. -/** -/** Example: -/**
{@code
-/** namespace F = torch::nn::functional;
-/** F::mse_loss(input, target, F::MSELossFuncOptions(torch::kNone));
-/** }
*/ -@Namespace("torch::nn::functional") public static native @ByVal Tensor mse_loss( - @Const @ByRef Tensor input, - @Const @ByRef Tensor target, - @Cast("const torch::nn::functional::MSELossFuncOptions*") @ByRef(nullValue = "torch::nn::functional::MSELossFuncOptions{}") MSELossOptions options); -// ============================================================================ +/** A {@code ModuleHolder} subclass for {@code L1LossImpl}. + * See the documentation for {@code L1LossImpl} class to learn what methods it + * provides, and examples of how to use {@code L1Loss} with + * {@code torch::nn::L1LossOptions}. See the documentation for {@code ModuleHolder} to + * learn about PyTorch's module storage semantics. */ +// Targeting ../KLDivLossImpl.java -// #ifndef DOXYGEN_SHOULD_SKIP_THIS -@Namespace("torch::nn::functional::detail") public static native @ByVal Tensor binary_cross_entropy( - @Const @ByRef Tensor input, - @Const @ByRef Tensor target, - @Const @ByRef Tensor weight, - @ByVal loss_reduction_t reduction); - // namespace detail -// #endif /* DOXYGEN_SHOULD_SKIP_THIS */ -/** See -/** https://pytorch.org/docs/master/nn.functional.html#torch.nn.functional.binary_cross_entropy -/** about the exact behavior of this functional. -/** -/** See the documentation for -/** {@code torch::nn::functional::BinaryCrossEntropyFuncOptions} class to learn what -/** optional arguments are supported for this functional. -/** -/** Example: -/**
{@code
-/** namespace F = torch::nn::functional;
-/** F::binary_cross_entropy(input, target,
-/** F::BinaryCrossEntropyFuncOptions().weight(weight));
-/** }
*/ -@Namespace("torch::nn::functional") public static native @ByVal Tensor binary_cross_entropy( - @Const @ByRef Tensor input, - @Const @ByRef Tensor target, - @Cast("const torch::nn::functional::BinaryCrossEntropyFuncOptions*") @ByRef(nullValue = "torch::nn::functional::BinaryCrossEntropyFuncOptions{}") BCELossOptions options); -// ============================================================================ +/** A {@code ModuleHolder} subclass for {@code KLDivLossImpl}. + * See the documentation for {@code KLDivLossImpl} class to learn what methods it + * provides, and examples of how to use {@code KLDivLoss} with + * {@code torch::nn::KLDivLossOptions}. See the documentation for {@code ModuleHolder} to + * learn about PyTorch's module storage semantics. */ +// Targeting ../MSELossImpl.java -// #ifndef DOXYGEN_SHOULD_SKIP_THIS -@Namespace("torch::nn::functional::detail") public static native @ByVal Tensor hinge_embedding_loss( - @Const @ByRef Tensor input, - @Const @ByRef Tensor target, - double margin, - @ByVal loss_reduction_t reduction); - // namespace detail -// #endif /* DOXYGEN_SHOULD_SKIP_THIS */ -/** See -/** https://pytorch.org/docs/master/nn.functional.html#torch.nn.functional.hinge_embedding_loss -/** about the exact behavior of this functional. -/** -/** See the documentation for -/** {@code torch::nn::functional::HingeEmbeddingLossFuncOptions} class to learn what -/** optional arguments are supported for this functional. -/** -/** Example: -/**
{@code
-/** namespace F = torch::nn::functional;
-/** F::hinge_embedding_loss(input, target,
-/** F::HingeEmbeddingLossFuncOptions().margin(2));
-/** }
*/ -@Namespace("torch::nn::functional") public static native @ByVal Tensor hinge_embedding_loss( - @Const @ByRef Tensor input, - @Const @ByRef Tensor target, - @Cast("const torch::nn::functional::HingeEmbeddingLossFuncOptions*") @ByRef(nullValue = "torch::nn::functional::HingeEmbeddingLossFuncOptions{}") HingeEmbeddingLossOptions options); -// ============================================================================ +/** A {@code ModuleHolder} subclass for {@code MSELossImpl}. + * See the documentation for {@code MSELossImpl} class to learn what methods it + * provides, and examples of how to use {@code MSELoss} with + * {@code torch::nn::MSELossOptions}. See the documentation for {@code ModuleHolder} to + * learn about PyTorch's module storage semantics. */ +// Targeting ../BCELossImpl.java -// #ifndef DOXYGEN_SHOULD_SKIP_THIS -@Namespace("torch::nn::functional::detail") public static native @ByVal Tensor multi_margin_loss( - @Const @ByRef Tensor input, - @Const @ByRef Tensor target, - @Cast("int64_t") long p, - double margin, - @Const @ByRef Tensor weight, - @ByVal loss_reduction_t reduction); - // namespace detail -// #endif /* DOXYGEN_SHOULD_SKIP_THIS */ -/** See -/** https://pytorch.org/docs/master/nn.functional.html#torch.nn.functional.multi_margin_loss -/** about the exact behavior of this functional. -/** -/** See the documentation for -/** {@code torch::nn::functional::MultiMarginLossFuncOptions} class to learn what -/** optional arguments are supported for this functional. -/** -/** Example: -/**
{@code
-/** namespace F = torch::nn::functional;
-/** F::multi_margin_loss(input, target,
-/** F::MultiMarginLossFuncOptions().margin(2).weight(weight));
-/** }
*/ -@Namespace("torch::nn::functional") public static native @ByVal Tensor multi_margin_loss( - @Const @ByRef Tensor input, - @Const @ByRef Tensor target, - @Cast("const torch::nn::functional::MultiMarginLossFuncOptions*") @ByRef(nullValue = "torch::nn::functional::MultiMarginLossFuncOptions{}") MultiMarginLossOptions options); -// ============================================================================ +/** A {@code ModuleHolder} subclass for {@code BCELossImpl}. + * See the documentation for {@code BCELossImpl} class to learn what methods it + * provides, and examples of how to use {@code BCELoss} with + * {@code torch::nn::BCELossOptions}. See the documentation for {@code ModuleHolder} to + * learn about PyTorch's module storage semantics. */ +// Targeting ../HingeEmbeddingLossImpl.java -// #ifndef DOXYGEN_SHOULD_SKIP_THIS -@Namespace("torch::nn::functional::detail") public static native @ByVal Tensor cosine_embedding_loss( - @Const @ByRef Tensor input1, - @Const @ByRef Tensor input2, - @Const @ByRef Tensor target, - double margin, - @ByVal loss_reduction_t reduction); - // namespace detail -// #endif /* DOXYGEN_SHOULD_SKIP_THIS */ -/** See -/** https://pytorch.org/docs/master/nn.functional.html#torch.nn.functional.cosine_embedding_loss -/** about the exact behavior of this functional. -/** -/** See the documentation for -/** {@code torch::nn::functional::CosineEmbeddingLossFuncOptions} class to learn what -/** optional arguments are supported for this functional. -/** -/** Example: -/**
{@code
-/** namespace F = torch::nn::functional;
-/** F::cosine_embedding_loss(input1, input2, target,
-/** F::CosineEmbeddingLossFuncOptions().margin(0.5));
-/** }
*/ -@Namespace("torch::nn::functional") public static native @ByVal Tensor cosine_embedding_loss( - @Const @ByRef Tensor input1, - @Const @ByRef Tensor input2, - @Const @ByRef Tensor target, - @Cast("const torch::nn::functional::CosineEmbeddingLossFuncOptions*") @ByRef(nullValue = "torch::nn::functional::CosineEmbeddingLossFuncOptions{}") CosineEmbeddingLossOptions options); -// ============================================================================ +/** A {@code ModuleHolder} subclass for {@code HingeEmbeddingLossImpl}. + * See the documentation for {@code HingeEmbeddingLossImpl} class to learn what + * methods it provides, and examples of how to use {@code HingeEmbeddingLoss} with + * {@code torch::nn::HingeEmbeddingLossOptions}. See the documentation for + * {@code ModuleHolder} to learn about PyTorch's module storage semantics. */ +// Targeting ../MultiMarginLossImpl.java -@Namespace("torch::nn::functional") public static native @ByVal Tensor _smooth_l1_loss( - @Const @ByRef Tensor input, - @Const @ByRef Tensor target, - double beta/*=1.*/); -@Namespace("torch::nn::functional") public static native @ByVal Tensor _smooth_l1_loss( - @Const @ByRef Tensor input, - @Const @ByRef Tensor target); -// #ifndef DOXYGEN_SHOULD_SKIP_THIS -@Namespace("torch::nn::functional::detail") public static native @ByVal Tensor smooth_l1_loss( - @Const @ByRef Tensor input, - @Const @ByRef Tensor target, - @ByVal loss_reduction_t reduction, - double beta/*=1.*/); -@Namespace("torch::nn::functional::detail") public static native @ByVal Tensor smooth_l1_loss( - @Const @ByRef Tensor input, - @Const @ByRef Tensor target, - @ByVal loss_reduction_t reduction); - // namespace detail -// #endif /* DOXYGEN_SHOULD_SKIP_THIS */ -/** See -/** https://pytorch.org/docs/master/nn.functional.html#torch.nn.functional.smooth_l1_loss -/** about the exact behavior of this functional. -/** -/** See the documentation for {@code torch::nn::functional::SmoothL1LossFuncOptions} -/** class to learn what optional arguments are supported for this functional. -/** -/** Example: -/**
{@code
-/** namespace F = torch::nn::functional;
-/** F::smooth_l1_loss(input, target, F::SmoothL1LossFuncOptions(torch::kNone));
-/** }
*/ -@Namespace("torch::nn::functional") public static native @ByVal Tensor smooth_l1_loss( - @Const @ByRef Tensor input, - @Const @ByRef Tensor target, - @Cast("const torch::nn::functional::SmoothL1LossFuncOptions*") @ByRef(nullValue = "torch::nn::functional::SmoothL1LossFuncOptions{}") SmoothL1LossOptions options, - double beta/*=1.*/); +/** A {@code ModuleHolder} subclass for {@code MultiMarginLossImpl}. + * See the documentation for {@code MultiMarginLossImpl} class to learn what methods + * it provides, and examples of how to use {@code MultiMarginLoss} with + * {@code torch::nn::MultiMarginLossOptions}. See the documentation for + * {@code ModuleHolder} to learn about PyTorch's module storage semantics. */ +// Targeting ../CosineEmbeddingLossImpl.java -// ============================================================================ -// #ifndef DOXYGEN_SHOULD_SKIP_THIS -@Namespace("torch::nn::functional::detail") public static native @ByVal Tensor huber_loss( - @Const @ByRef Tensor input, - @Const @ByRef Tensor target, - @ByVal loss_reduction_t reduction, - double delta/*=1.*/); -@Namespace("torch::nn::functional::detail") public static native @ByVal Tensor huber_loss( - @Const @ByRef Tensor input, - @Const @ByRef Tensor target, - @ByVal loss_reduction_t reduction); - // namespace detail -// #endif /* DOXYGEN_SHOULD_SKIP_THIS */ -/** See -/** https://pytorch.org/docs/master/nn.functional.html#torch.nn.functional.huber_loss -/** about the exact behavior of this functional. -/** -/** See the documentation for {@code torch::nn::functional::HuberLossFuncOptions} -/** class to learn what optional arguments are supported for this functional. -/** -/** Example: -/**
{@code
-/** namespace F = torch::nn::functional;
-/** F::huber_loss(input, target,
-/** F::HuberLossFuncOptions().reduction(torch::kNone).delta(0.5));
-/** }
*/ -@Namespace("torch::nn::functional") public static native @ByVal Tensor huber_loss( - @Const @ByRef Tensor input, - @Const @ByRef Tensor target, - @Cast("const torch::nn::functional::HuberLossFuncOptions*") @ByRef(nullValue = "torch::nn::functional::HuberLossFuncOptions{}") HuberLossOptions options); +/** A {@code ModuleHolder} subclass for {@code CosineEmbeddingLossImpl}. + * See the documentation for {@code CosineEmbeddingLossImpl} class to learn what + * methods it provides, and examples of how to use {@code CosineEmbeddingLoss} with + * {@code torch::nn::CosineEmbeddingLossOptions}. See the documentation for + * {@code ModuleHolder} to learn about PyTorch's module storage semantics. */ +// Targeting ../SmoothL1LossImpl.java -// ============================================================================ -// #ifndef DOXYGEN_SHOULD_SKIP_THIS -@Namespace("torch::nn::functional::detail") public static native @ByVal Tensor multilabel_margin_loss( - @Const @ByRef Tensor input, - @Const @ByRef Tensor target, - @ByVal loss_reduction_t reduction); - // namespace detail -// #endif /* DOXYGEN_SHOULD_SKIP_THIS */ -/** See -/** https://pytorch.org/docs/master/nn.functional.html#torch.nn.functional.multilabel_margin_loss -/** about the exact behavior of this functional. -/** -/** See the documentation for -/** {@code torch::nn::functional::MultilabelMarginLossFuncOptions} class to learn what -/** optional arguments are supported for this functional. -/** -/** Example: -/**
{@code
-/** namespace F = torch::nn::functional;
-/** F::multilabel_margin_loss(input, target,
-/** F::MultilabelMarginLossFuncOptions(torch::kNone));
-/** }
*/ -@Namespace("torch::nn::functional") public static native @ByVal Tensor multilabel_margin_loss( - @Const @ByRef Tensor input, - @Const @ByRef Tensor target, - @Cast("const torch::nn::functional::MultilabelMarginLossFuncOptions*") @ByRef(nullValue = "torch::nn::functional::MultilabelMarginLossFuncOptions{}") MultiLabelMarginLossOptions options); +/** A {@code ModuleHolder} subclass for {@code SmoothL1LossImpl}. + * See the documentation for {@code SmoothL1LossImpl} class to learn what methods it + * provides, and examples of how to use {@code SmoothL1Loss} with + * {@code torch::nn::SmoothL1LossOptions}. See the documentation for {@code ModuleHolder} + * to learn about PyTorch's module storage semantics. */ +// Targeting ../HuberLossImpl.java -// ============================================================================ -// #ifndef DOXYGEN_SHOULD_SKIP_THIS -@Namespace("torch::nn::functional::detail") public static native @ByVal Tensor soft_margin_loss( - @Const @ByRef Tensor input, - @Const @ByRef Tensor target, - @ByVal loss_reduction_t reduction); - // namespace detail -// #endif /* DOXYGEN_SHOULD_SKIP_THIS */ -/** See -/** https://pytorch.org/docs/master/nn.functional.html#torch.nn.functional.soft_margin_loss -/** about the exact behavior of this functional. -/** -/** See the documentation for {@code torch::nn::functional::SoftMarginLossFuncOptions} -/** class to learn what optional arguments are supported for this functional. -/** -/** Example: -/**
{@code
-/** namespace F = torch::nn::functional;
-/** F::soft_margin_loss(input, target,
-/** F::SoftMarginLossFuncOptions(torch::kNone));
-/** }
*/ -@Namespace("torch::nn::functional") public static native @ByVal Tensor soft_margin_loss( - @Const @ByRef Tensor input, - @Const @ByRef Tensor target, - @Cast("const torch::nn::functional::SoftMarginLossFuncOptions*") @ByRef(nullValue = "torch::nn::functional::SoftMarginLossFuncOptions{}") SoftMarginLossOptions options); +/** A {@code ModuleHolder} subclass for {@code HuberLossImpl}. + * See the documentation for {@code HuberLossImpl} class to learn what methods it + * provides, and examples of how to use {@code HuberLoss} with + * {@code torch::nn::HuberLossOptions}. See the documentation for {@code ModuleHolder} to + * learn about PyTorch's module storage semantics. */ +// Targeting ../MultiLabelMarginLossImpl.java -// ============================================================================ -// #ifndef DOXYGEN_SHOULD_SKIP_THIS -@Namespace("torch::nn::functional::detail") public static native @ByVal Tensor multilabel_soft_margin_loss( - @Const @ByRef Tensor input, - @Const @ByRef Tensor target, - @Const @ByRef Tensor weight, - @ByVal loss_reduction_t reduction); - // namespace detail -// #endif /* DOXYGEN_SHOULD_SKIP_THIS */ -/** See -/** https://pytorch.org/docs/master/nn.functional.html#torch.nn.functional.multilabel_soft_margin_loss -/** about the exact behavior of this functional. -/** -/** See the documentation for -/** {@code torch::nn::functional::MultilabelSoftMarginLossFuncOptions} class to learn -/** what optional arguments are supported for this functional. -/** -/** Example: -/**
{@code
-/** namespace F = torch::nn::functional;
-/** F::multilabel_soft_margin_loss(input, target,
-/** F::MultilabelSoftMarginLossFuncOptions().reduction(torch::kNone).weight(weight));
-/** }
*/ -@Namespace("torch::nn::functional") public static native @ByVal Tensor multilabel_soft_margin_loss( - @Const @ByRef Tensor input, - @Const @ByRef Tensor target, - @Cast("const torch::nn::functional::MultilabelSoftMarginLossFuncOptions*") @ByRef(nullValue = "torch::nn::functional::MultilabelSoftMarginLossFuncOptions{}") MultiLabelSoftMarginLossOptions options); -@Namespace("torch::nn::functional") public static native @ByVal Tensor multilabel_soft_margin_loss( - @Const @ByRef Tensor input, - @Const @ByRef Tensor target); +/** A {@code ModuleHolder} subclass for {@code MultiLabelMarginLossImpl}. + * See the documentation for {@code MultiLabelMarginLossImpl} class to learn what + * methods it provides, and examples of how to use {@code MultiLabelMarginLoss} with + * {@code torch::nn::MultiLabelMarginLossOptions}. See the documentation for + * {@code ModuleHolder} to learn about PyTorch's module storage semantics. */ +// Targeting ../SoftMarginLossImpl.java -// ============================================================================ -// #ifndef DOXYGEN_SHOULD_SKIP_THIS -@Namespace("torch::nn::functional::detail") public static native @ByVal Tensor triplet_margin_loss( - @Const @ByRef Tensor anchor, - @Const @ByRef Tensor positive, - @Const @ByRef Tensor negative, - double margin, - double p, - double eps, - @Cast("bool") boolean swap, - @ByVal loss_reduction_t reduction); - // namespace detail -// #endif /* DOXYGEN_SHOULD_SKIP_THIS */ -/** See -/** https://pytorch.org/docs/master/nn.functional.html#torch.nn.functional.triplet_margin_loss -/** about the exact behavior of this functional. -/** -/** See the documentation for -/** {@code torch::nn::functional::TripletMarginLossFuncOptions} class to learn what -/** optional arguments are supported for this functional. -/** -/** Example: -/**
{@code
-/** namespace F = torch::nn::functional;
-/** F::triplet_margin_loss(anchor, positive, negative,
-/** F::TripletMarginLossFuncOptions().margin(1.0));
-/** }
*/ -@Namespace("torch::nn::functional") public static native @ByVal Tensor triplet_margin_loss( - @Const @ByRef Tensor anchor, - @Const @ByRef Tensor positive, - @Const @ByRef Tensor negative, - @Cast("const torch::nn::functional::TripletMarginLossFuncOptions*") @ByRef(nullValue = "torch::nn::functional::TripletMarginLossFuncOptions{}") TripletMarginLossOptions options); +/** A {@code ModuleHolder} subclass for {@code SoftMarginLossImpl}. + * See the documentation for {@code SoftMarginLossImpl} class to learn what methods + * it provides, and examples of how to use {@code SoftMarginLoss} with + * {@code torch::nn::SoftMarginLossOptions}. See the documentation for {@code ModuleHolder} + * to learn about PyTorch's module storage semantics. */ +// Targeting ../MultiLabelSoftMarginLossImpl.java -// ============================================================================ -// #ifndef DOXYGEN_SHOULD_SKIP_THIS -@Namespace("torch::nn::functional::detail") public static native @ByVal Tensor triplet_margin_with_distance_loss( - @Const @ByRef Tensor anchor, - @Const @ByRef Tensor positive, - @Const @ByRef Tensor negative, - @ByVal @Cast("c10::optional*") Pointer distance_function, - double margin, - @Cast("bool") boolean swap, - @ByVal loss_reduction_t reduction); - // namespace detail -// #endif /* DOXYGEN_SHOULD_SKIP_THIS */ -/** See -/** https://pytorch.org/docs/master/nn.functional.html#torch.nn.functional.triplet_margin_with_distance_loss -/** about the exact behavior of this functional. -/** -/** See the documentation for -/** {@code torch::nn::functional::TripletMarginWithDistanceLossFuncOptions} class to -/** learn what optional arguments are supported for this functional. -/** -/** Example: -/**
{@code
-/** namespace F = torch::nn::functional;
-/** F::triplet_margin_with_distance_loss(anchor, positive, negative,
-/** F::TripletMarginWithDistanceLossFuncOptions().margin(1.0));
-/** }
*/ -@Namespace("torch::nn::functional") public static native @ByVal Tensor triplet_margin_with_distance_loss( - @Const @ByRef Tensor anchor, - @Const @ByRef Tensor positive, - @Const @ByRef Tensor negative, - @Cast("const torch::nn::functional::TripletMarginWithDistanceLossFuncOptions*") @ByRef(nullValue = "torch::nn::functional::TripletMarginWithDistanceLossFuncOptions{}") TripletMarginWithDistanceLossOptions options); -@Namespace("torch::nn::functional") public static native @ByVal Tensor triplet_margin_with_distance_loss( - @Const @ByRef Tensor anchor, - @Const @ByRef Tensor positive, - @Const @ByRef Tensor negative); +/** A {@code ModuleHolder} subclass for {@code MultiLabelSoftMarginLossImpl}. + * See the documentation for {@code MultiLabelSoftMarginLossImpl} class to learn what + * methods it provides, and examples of how to use {@code MultiLabelSoftMarginLoss} + * with {@code torch::nn::MultiLabelSoftMarginLossOptions}. See the documentation for + * {@code ModuleHolder} to learn about PyTorch's module storage semantics. */ +// Targeting ../TripletMarginLossImpl.java -// ============================================================================ -// #ifndef DOXYGEN_SHOULD_SKIP_THIS -@Namespace("torch::nn::functional::detail") public static native @ByVal Tensor ctc_loss( - @Const @ByRef Tensor log_probs, - @Const @ByRef Tensor targets, - @Const @ByRef Tensor input_lengths, - @Const @ByRef Tensor target_lengths, - @Cast("int64_t") long blank, - @ByVal loss_reduction_t reduction, - @Cast("bool") boolean zero_infinity); - // namespace detail -// #endif /* DOXYGEN_SHOULD_SKIP_THIS */ -/** See -/** https://pytorch.org/docs/master/nn.functional.html#torch.nn.functional.ctc_loss -/** about the exact behavior of this functional. -/** -/** See the documentation for {@code torch::nn::functional::CTCLossFuncOptions} class -/** to learn what optional arguments are supported for this functional. -/** -/** Example: -/**
{@code
-/** namespace F = torch::nn::functional;
-/** F::ctc_loss(log_probs, targets, input_lengths, target_lengths,
-/** F::CTCLossFuncOptions().reduction(torch::kNone));
-/** }
*/ -@Namespace("torch::nn::functional") public static native @ByVal Tensor ctc_loss( - @Const @ByRef Tensor log_probs, - @Const @ByRef Tensor targets, - @Const @ByRef Tensor input_lengths, - @Const @ByRef Tensor target_lengths, - @Cast("const torch::nn::functional::CTCLossFuncOptions*") @ByRef(nullValue = "torch::nn::functional::CTCLossFuncOptions{}") CTCLossOptions options); +/** A {@code ModuleHolder} subclass for {@code TripletMarginLossImpl}. + * See the documentation for {@code TripletMarginLossImpl} class to learn what + * methods it provides, and examples of how to use {@code TripletMarginLoss} with + * {@code torch::nn::TripletMarginLossOptions}. See the documentation for + * {@code ModuleHolder} to learn about PyTorch's module storage semantics. */ +// Targeting ../TripletMarginWithDistanceLossImpl.java -// ============================================================================ -// #ifndef DOXYGEN_SHOULD_SKIP_THIS -@Namespace("torch::nn::functional::detail") public static native @ByVal Tensor poisson_nll_loss( - @Const @ByRef Tensor input, - @Const @ByRef Tensor target, - @Cast("bool") boolean log_input, - @Cast("bool") boolean full, - double eps, - @ByVal loss_reduction_t reduction); - // namespace detail -// #endif /* DOXYGEN_SHOULD_SKIP_THIS */ -/** See -/** https://pytorch.org/docs/master/nn.functional.html#torch.nn.functional.poisson_nll_loss -/** about the exact behavior of this functional. -/** -/** See the documentation for {@code torch::nn::functional::PoissonNLLLossFuncOptions} -/** class to learn what optional arguments are supported for this functional. -/** -/** Example: -/**
{@code
-/** namespace F = torch::nn::functional;
-/** F::poisson_nll_loss(input, target,
-/** F::PoissonNLLLossFuncOptions().reduction(torch::kNone));
-/** }
*/ -@Namespace("torch::nn::functional") public static native @ByVal Tensor poisson_nll_loss( - @Const @ByRef Tensor input, - @Const @ByRef Tensor target, - @Cast("const torch::nn::functional::PoissonNLLLossFuncOptions*") @ByRef(nullValue = "torch::nn::functional::PoissonNLLLossFuncOptions{}") PoissonNLLLossOptions options); -@Namespace("torch::nn::functional") public static native @ByVal Tensor poisson_nll_loss( - @Const @ByRef Tensor input, - @Const @ByRef Tensor target); +/** A {@code ModuleHolder} subclass for {@code TripletMarginWithDistanceLossImpl}. + * See the documentation for {@code TripletMarginWithDistanceLossImpl} class to learn + * what methods it provides, and examples of how to use + * {@code TripletMarginWithDistanceLoss} with + * {@code torch::nn::TripletMarginWithDistanceLossOptions}. + * See the documentation for {@code ModuleHolder} to learn about PyTorch's + * module storage semantics. */ +// Targeting ../CTCLossImpl.java -// ============================================================================ -// #ifndef DOXYGEN_SHOULD_SKIP_THIS -@Namespace("torch::nn::functional::detail") public static native @ByVal Tensor margin_ranking_loss( - @Const @ByRef Tensor input1, - @Const @ByRef Tensor input2, - @Const @ByRef Tensor target, - double margin, - @ByVal loss_reduction_t reduction); - // namespace detail -// #endif /* DOXYGEN_SHOULD_SKIP_THIS */ -/** See -/** https://pytorch.org/docs/master/nn.functional.html#torch.nn.functional.margin_ranking_loss -/** about the exact behavior of this functional. -/** -/** See the documentation for -/** {@code torch::nn::functional::MarginRankingLossFuncOptions} class to learn what -/** optional arguments are supported for this functional. -/** -/** Example: -/**
{@code
-/** namespace F = torch::nn::functional;
-/** F::margin_ranking_loss(input1, input2, target,
-/** F::MarginRankingLossFuncOptions().margin(0.5).reduction(torch::kSum));
-/** }
*/ -@Namespace("torch::nn::functional") public static native @ByVal Tensor margin_ranking_loss( - @Const @ByRef Tensor input1, - @Const @ByRef Tensor input2, - @Const @ByRef Tensor target, - @Cast("const torch::nn::functional::MarginRankingLossFuncOptions*") @ByRef(nullValue = "torch::nn::functional::MarginRankingLossFuncOptions{}") MarginRankingLossOptions options); +/** A {@code ModuleHolder} subclass for {@code CTCLossImpl}. + * See the documentation for {@code CTCLossImpl} class to learn what methods it + * provides, and examples of how to use {@code CTCLoss} with + * {@code torch::nn::CTCLossOptions}. See the documentation for {@code ModuleHolder} to + * learn about PyTorch's module storage semantics. */ +// Targeting ../PoissonNLLLossImpl.java -// ============================================================================ -// #ifndef DOXYGEN_SHOULD_SKIP_THIS -@Namespace("torch::nn::functional::detail") public static native @ByVal Tensor nll_loss( - @Const @ByRef Tensor input, - @Const @ByRef Tensor target, - @Const @ByRef Tensor weight, - @Cast("int64_t") long ignore_index, - @Const @ByVal loss_reduction_t reduction); - // namespace detail -// #endif /* DOXYGEN_SHOULD_SKIP_THIS */ -/** See -/** https://pytorch.org/docs/master/nn.functional.html#torch.nn.functional.nll_loss -/** about the exact behavior of this functional. -/** -/** See the documentation for {@code torch::nn::functional::NLLLossFuncOptions} class -/** to learn what optional arguments are supported for this functional. -/** -/** Example: -/**
{@code
-/** namespace F = torch::nn::functional;
-/** F::nll_loss(input, target,
-/** F::NLLLossFuncOptions().ignore_index(-100).reduction(torch::kMean));
-/** }
*/ -@Namespace("torch::nn::functional") public static native @ByVal Tensor nll_loss( - @Const @ByRef Tensor input, - @Const @ByRef Tensor target, - @Cast("const torch::nn::functional::NLLLossFuncOptions*") @ByRef(nullValue = "torch::nn::functional::NLLLossFuncOptions{}") NLLLossOptions options); +/** A {@code ModuleHolder} subclass for {@code PoissonNLLLossImpl}. + * See the documentation for {@code PoissonNLLLossImpl} class to learn what methods + * it provides, and examples of how to use {@code PoissonNLLLoss} with + * {@code torch::nn::PoissonNLLLossOptions}. See the documentation for {@code ModuleHolder} + * to learn about PyTorch's module storage semantics. */ +// Targeting ../MarginRankingLossImpl.java -// ============================================================================ -// #ifndef DOXYGEN_SHOULD_SKIP_THIS -@Namespace("torch::nn::functional::detail") public static native @ByVal Tensor cross_entropy( - @Const @ByRef Tensor input, - @Const @ByRef Tensor target, - @Const @ByRef Tensor weight, - @Cast("int64_t") long ignore_index, - @ByVal loss_reduction_t reduction, - double label_smoothing); - // namespace detail -// #endif /* DOXYGEN_SHOULD_SKIP_THIS */ -/** See -/** https://pytorch.org/docs/master/nn.functional.html#torch.nn.functional.cross_entropy -/** about the exact behavior of this functional. -/** -/** See the documentation for {@code torch::nn::functional::CrossEntropyFuncOptions} -/** class to learn what optional arguments are supported for this functional. -/** -/** Example: -/**
{@code
-/** namespace F = torch::nn::functional;
-/** F::cross_entropy(input, target,
-/** F::CrossEntropyFuncOptions().ignore_index(-100).reduction(torch::kMean));
-/** }
*/ -@Namespace("torch::nn::functional") public static native @ByVal Tensor cross_entropy( - @Const @ByRef Tensor input, - @Const @ByRef Tensor target, - @Cast("const torch::nn::functional::CrossEntropyFuncOptions*") @ByRef(nullValue = "torch::nn::functional::CrossEntropyFuncOptions{}") CrossEntropyLossOptions options); -@Namespace("torch::nn::functional") public static native @ByVal Tensor cross_entropy( - @Const @ByRef Tensor input, - @Const @ByRef Tensor target); +/** A {@code ModuleHolder} subclass for {@code MarginRankingLossImpl}. + * See the documentation for {@code MarginRankingLossImpl} class to learn what + * methods it provides, and examples of how to use {@code MarginRankingLoss} with + * {@code torch::nn::MarginRankingLossOptions}. See the documentation for + * {@code ModuleHolder} to learn about PyTorch's module storage semantics. */ +// Targeting ../NLLLossImpl.java -// ============================================================================ -// #ifndef DOXYGEN_SHOULD_SKIP_THIS -@Namespace("torch::nn::functional::detail") public static native @ByVal Tensor binary_cross_entropy_with_logits( - @Const @ByRef Tensor input, - @Const @ByRef Tensor target, - @Const @ByRef Tensor weight, - @ByVal loss_reduction_t reduction, - @Const @ByRef Tensor pos_weight); - // namespace detail -// #endif /* DOXYGEN_SHOULD_SKIP_THIS */ -/** See -/** https://pytorch.org/docs/master/nn.functional.html#torch.nn.functional.binary_cross_entropy_with_logits -/** about the exact behavior of this functional. -/** -/** See the documentation for -/** {@code torch::nn::functional::BinaryCrossEntropyWithLogitsFuncOptions} class to -/** learn what optional arguments are supported for this functional. -/** -/** Example: -/**
{@code
-/** namespace F = torch::nn::functional;
-/** F::binary_cross_entropy_with_logits(input, target,
-/** F::BinaryCrossEntropyWithLogitsFuncOptions().pos_weight(pos_weight).reduction(torch::kSum));
-/** }
*/ -@Namespace("torch::nn::functional") public static native @ByVal Tensor binary_cross_entropy_with_logits( - @Const @ByRef Tensor input, - @Const @ByRef Tensor target, - @Cast("const torch::nn::functional::BinaryCrossEntropyWithLogitsFuncOptions*") @ByRef(nullValue = "torch::nn::functional::BinaryCrossEntropyWithLogitsFuncOptions{}") BCEWithLogitsLossOptions options); +/** A {@code ModuleHolder} subclass for {@code NLLLossImpl}. + * See the documentation for {@code NLLLossImpl} class to learn what methods it + * provides, and examples of how to use {@code NLLLoss} with + * {@code torch::nn::NLLLossOptions}. See the documentation for {@code ModuleHolder} to + * learn about PyTorch's module storage semantics. */ +// Targeting ../CrossEntropyLossImpl.java + + + +/** A {@code ModuleHolder} subclass for {@code CrossEntropyLossImpl}. + * See the documentation for {@code CrossEntropyLossImpl} class to learn what methods + * it provides, and examples of how to use {@code CrossEntropyLoss} with + * {@code torch::nn::CrossEntropyLossOptions}. See the documentation for + * {@code ModuleHolder} to learn about PyTorch's module storage semantics. */ +// Targeting ../BCEWithLogitsLossImpl.java + + + +/** A {@code ModuleHolder} subclass for {@code BCEWithLogitsLossImpl}. + * See the documentation for {@code BCEWithLogitsLossImpl} class to learn what + * methods it provides, and examples of how to use {@code BCEWithLogitsLoss} with + * {@code torch::nn::BCEWithLogitsLossOptions}. See the documentation for + * {@code ModuleHolder} to learn about PyTorch's module storage semantics. */ - // namespace functional // namespace nn // namespace torch -// Parsed from torch/nn/functional/normalization.h +// Parsed from torch/nn/modules/_functions.h // #pragma once -// #include -// #include +// #include +// #include // #include // #include -// #ifndef DOXYGEN_SHOULD_SKIP_THIS -@Namespace("torch::nn::functional::detail") public static native @ByVal Tensor normalize( - @Const @ByRef Tensor input, - double p, - @Cast("int64_t") long dim, - double eps, - @ByVal TensorOptional out); - // namespace detail -// #endif /* DOXYGEN_SHOULD_SKIP_THIS */ + // namespace functions + // namespace nn + // namespace torch -/** See -/** https://pytorch.org/docs/master/nn.functional.html#torch.nn.functional.normalize -/** about the exact behavior of this functional. -/** -/** See the documentation for {@code torch::nn::functional::NormalizeFuncOptions} -/** class to learn what optional arguments are supported for this functional. -/** -/** Example: -/**
{@code
-/** namespace F = torch::nn::functional;
-/** F::normalize(input, F::NormalizeFuncOptions().p(1).dim(-1));
-/** }
*/ -@Namespace("torch::nn::functional") public static native @ByVal Tensor normalize( - @Const @ByRef Tensor input, - @ByVal(nullValue = "torch::nn::functional::NormalizeFuncOptions{}") NormalizeFuncOptions options); -@Namespace("torch::nn::functional") public static native @ByVal Tensor normalize( - @Const @ByRef Tensor input); -// ============================================================================ +// Parsed from torch/nn/modules/normalization.h -// #ifndef DOXYGEN_SHOULD_SKIP_THIS -@Namespace("torch::nn::functional::detail") public static native @ByVal Tensor layer_norm( - @Const @ByRef Tensor input, - @Cast("const std::vector*") @ByRef LongVector normalized_shape, - @Const @ByRef Tensor weight, - @Const @ByRef Tensor bias, - double eps); - // namespace detail -// #endif /* DOXYGEN_SHOULD_SKIP_THIS */ +// #pragma once -/** See -/** https://pytorch.org/docs/master/nn.functional.html#torch.nn.functional.layer_norm -/** about the exact behavior of this functional. -/** -/** See the documentation for {@code torch::nn::functional::LayerNormFuncOptions} -/** class to learn what optional arguments are supported for this functional. -/** -/** Example: -/**
{@code
-/** namespace F = torch::nn::functional;
-/** F::layer_norm(input, F::LayerNormFuncOptions({2, 2}).eps(2e-5));
-/** }
*/ -@Namespace("torch::nn::functional") public static native @ByVal Tensor layer_norm( - @Const @ByRef Tensor input, - @Const @ByRef LayerNormFuncOptions options); +// #include +// #include +// #include +// #include +// #include +// #include -// ============================================================================ +// #include +// #include +// Targeting ../LayerNormImpl.java -// #ifndef DOXYGEN_SHOULD_SKIP_THIS -@Namespace("torch::nn::functional::detail") public static native @ByVal Tensor local_response_norm( - @Const @ByRef Tensor input, - @Cast("int64_t") long size, - double alpha, - double beta, - double k); - // namespace detail -// #endif /* DOXYGEN_SHOULD_SKIP_THIS */ -/** See -/** https://pytorch.org/docs/master/nn.functional.html#torch.nn.functional.local_response_norm -/** about the exact behavior of this functional. -/** -/** See the documentation for -/** {@code torch::nn::functional::LocalResponseNormFuncOptions} class to learn what -/** optional arguments are supported for this functional. -/** -/** Example: -/**
{@code
-/** namespace F = torch::nn::functional;
-/** F::local_response_norm(x, F::LocalResponseNormFuncOptions(2));
-/** }
*/ -@Namespace("torch::nn::functional") public static native @ByVal Tensor local_response_norm( - @Const @ByRef Tensor input, - @Cast("const torch::nn::functional::LocalResponseNormFuncOptions*") @ByRef LocalResponseNormOptions options); -// ============================================================================ +/** A {@code ModuleHolder} subclass for {@code LayerNormImpl}. + * See the documentation for {@code LayerNormImpl} class to learn what methods it + * provides, and examples of how to use {@code LayerNorm} with + * {@code torch::nn::LayerNormOptions}. See the documentation for {@code ModuleHolder} to + * learn about PyTorch's module storage semantics. */ +// Targeting ../LocalResponseNormImpl.java -// #ifndef DOXYGEN_SHOULD_SKIP_THIS -@Namespace("torch::nn::functional::detail") public static native @ByVal Tensor group_norm( - @Const @ByRef Tensor input, - @Cast("int64_t") long num_groups, - @Const @ByRef Tensor weight, - @Const @ByRef Tensor bias, - double eps); - // namespace detail -// #endif /* DOXYGEN_SHOULD_SKIP_THIS */ -/** See -/** https://pytorch.org/docs/master/nn.functional.html#torch.nn.functional.group_norm -/** about the exact behavior of this functional. -/** -/** See the documentation for {@code torch::nn::functional::GroupNormFuncOptions} -/** class to learn what optional arguments are supported for this functional. -/** -/** Example: -/**
{@code
-/** namespace F = torch::nn::functional;
-/** F::group_norm(input, F::GroupNormFuncOptions(2).eps(2e-5));
-/** }
*/ -@Namespace("torch::nn::functional") public static native @ByVal Tensor group_norm( - @Const @ByRef Tensor input, - @Const @ByRef GroupNormFuncOptions options); - // namespace functional - // namespace nn - // namespace torch +/** A {@code ModuleHolder} subclass for {@code LocalResponseNormImpl}. + * See the documentation for {@code LocalResponseNormImpl} class to learn what + * methods it provides, and examples of how to use {@code LocalResponseNorm} with + * {@code torch::nn::LocalResponseNormOptions}. See the documentation for + * {@code ModuleHolder} to learn about PyTorch's module storage semantics. */ +// Targeting ../CrossMapLRN2dImpl.java -// Parsed from torch/nn/functional/padding.h -// #pragma once +/** A {@code ModuleHolder} subclass for {@code CrossMapLRN2dImpl}. + * See the documentation for {@code CrossMapLRN2dImpl} class to learn what methods it + * provides, and examples of how to use {@code CrossMapLRN2d} with + * {@code torch::nn::CrossMapLRN2dOptions}. See the documentation for {@code ModuleHolder} + * to learn about PyTorch's module storage semantics. */ +// Targeting ../GroupNormImpl.java -// #include -// #include -// #ifndef DOXYGEN_SHOULD_SKIP_THIS -@Namespace("torch::nn::functional::detail") public static native @ByVal Tensor pad( - @Const @ByRef Tensor input, - @ByVal @Cast("c10::ArrayRef*") LongArrayRef pad, - @ByVal pad_mode_t mode, - double value); -@Namespace("torch::nn::functional::detail") public static native @ByVal Tensor pad( - @Const @ByRef Tensor input, - @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] pad, - @ByVal pad_mode_t mode, - double value); - // namespace detail -// #endif /* DOXYGEN_SHOULD_SKIP_THIS */ -/** See -/** https://pytorch.org/docs/master/nn.functional.html#torch.nn.functional.pad -/** about the exact behavior of this functional. -/** -/** See the documentation for {@code torch::nn::functional::PadFuncOptions} class to -/** learn what optional arguments are supported for this functional. -/** -/** Example: -/**
{@code
-/** namespace F = torch::nn::functional;
-/** F::pad(input, F::PadFuncOptions({1, 2, 2, 1, 1,
-/** 2}).mode(torch::kReplicate));
-/** }
*/ -@Namespace("torch::nn::functional") public static native @ByVal Tensor pad(@Const @ByRef Tensor input, @Const @ByRef PadFuncOptions options); +/** A {@code ModuleHolder} subclass for {@code GroupNormImpl}. + * See the documentation for {@code GroupNormImpl} class to learn what methods it + * provides, and examples of how to use {@code GroupNorm} with + * {@code torch::nn::GroupNormOptions}. See the documentation for {@code ModuleHolder} to + * learn about PyTorch's module storage semantics. */ - // namespace functional // namespace nn // namespace torch -// Parsed from torch/nn/functional/pixelshuffle.h +// Parsed from torch/nn/modules/padding.h // #pragma once -// #include +// #include +// #include +// #include -// #ifndef DOXYGEN_SHOULD_SKIP_THIS - // namespace detail -// #endif /* DOXYGEN_SHOULD_SKIP_THIS */ +// #include +// Targeting ../ReflectionPad1dImplBase.java -/** See -/** https://pytorch.org/docs/master/nn.functional.html#torch.nn.functional.pixel_shuffle -/** about the exact behavior of this functional. -/** -/** See the documentation for {@code torch::nn::functional::PixelShuffleFuncOptions} -/** class to learn what optional arguments are supported for this functional. -/** -/** Example: -/**
{@code
-/** namespace F = torch::nn::functional;
-/** F::pixel_shuffle(x, F::PixelShuffleFuncOptions(2));
-/** }
*/ -@Namespace("torch::nn::functional") public static native @ByVal Tensor pixel_shuffle( - @Const @ByRef Tensor input, - @Cast("const torch::nn::functional::PixelShuffleFuncOptions*") @ByRef PixelShuffleOptions options); -@Namespace("torch::nn::functional") public static native @ByVal Tensor pixel_unshuffle( - @Const @ByRef Tensor input, - @Cast("const torch::nn::functional::PixelUnshuffleFuncOptions*") @ByRef PixelUnshuffleOptions options); +// Targeting ../ReflectionPad2dImplBase.java + + +// Targeting ../ReflectionPad3dImplBase.java + + +// Targeting ../ReflectionPad1dImpl.java + + + +/** A {@code ModuleHolder} subclass for {@code ReflectionPad1dImpl}. + * See the documentation for {@code ReflectionPad1dImpl} class to learn what methods + * it provides, and examples of how to use {@code ReflectionPad1d} with + * {@code torch::nn::ReflectionPad1dOptions}. See the documentation for + * {@code ModuleHolder} to learn about PyTorch's module storage semantics. */ +// Targeting ../ReflectionPad2dImpl.java + + + +/** A {@code ModuleHolder} subclass for {@code ReflectionPad2dImpl}. + * See the documentation for {@code ReflectionPad2dImpl} class to learn what methods + * it provides, and examples of how to use {@code ReflectionPad2d} with + * {@code torch::nn::ReflectionPad2dOptions}. See the documentation for + * {@code ModuleHolder} to learn about PyTorch's module storage semantics. */ +// Targeting ../ReflectionPad3dImpl.java + + + +/** A {@code ModuleHolder} subclass for {@code ReflectionPad3dImpl}. + * See the documentation for {@code ReflectionPad3dImpl} class to learn what methods + * it provides, and examples of how to use {@code ReflectionPad3d} with + * {@code torch::nn::ReflectionPad3dOptions}. See the documentation for + * {@code ModuleHolder} to learn about PyTorch's module storage semantics. */ +// Targeting ../ReplicationPad1dImplBase.java + + +// Targeting ../ReplicationPad2dImplBase.java + + +// Targeting ../ReplicationPad3dImplBase.java + + +// Targeting ../ReplicationPad1dImpl.java + + + +/** A {@code ModuleHolder} subclass for {@code ReplicationPad1dImpl}. + * See the documentation for {@code ReplicationPad1dImpl} class to learn what methods + * it provides, and examples of how to use {@code ReplicationPad1d} with + * {@code torch::nn::ReplicationPad1dOptions}. See the documentation for + * {@code ModuleHolder} to learn about PyTorch's module storage semantics. */ +// Targeting ../ReplicationPad2dImpl.java + + + +/** A {@code ModuleHolder} subclass for {@code ReplicationPad2dImpl}. + * See the documentation for {@code ReplicationPad2dImpl} class to learn what methods + * it provides, and examples of how to use {@code ReplicationPad2d} with + * {@code torch::nn::ReplicationPad2dOptions}. See the documentation for + * {@code ModuleHolder} to learn about PyTorch's module storage semantics. */ +// Targeting ../ReplicationPad3dImpl.java + + + +/** A {@code ModuleHolder} subclass for {@code ReplicationPad3dImpl}. + * See the documentation for {@code ReplicationPad3dImpl} class to learn what methods + * it provides, and examples of how to use {@code ReplicationPad3d} with + * {@code torch::nn::ReplicationPad3dOptions}. See the documentation for + * {@code ModuleHolder} to learn about PyTorch's module storage semantics. */ +// Targeting ../ZeroPad2dImpl.java + + + +/** A {@code ModuleHolder} subclass for {@code ZeroPad2dImpl}. + * See the documentation for {@code ZeroPad2dImpl} class to learn what methods it + * provides, and examples of how to use {@code ZeroPad2d} with + * {@code torch::nn::ZeroPad2dOptions}. See the documentation for {@code ModuleHolder} to + * learn about PyTorch's module storage semantics. */ +// Targeting ../ConstantPad1dImplBase.java + + +// Targeting ../ConstantPad2dImplBase.java + + +// Targeting ../ConstantPad3dImplBase.java + + +// Targeting ../ConstantPad1dImpl.java + + + +/** A {@code ModuleHolder} subclass for {@code ConstantPad1dImpl}. + * See the documentation for {@code ConstantPad1dImpl} class to learn what methods it + * provides, and examples of how to use {@code ConstantPad1d} with + * {@code torch::nn::ConstantPad1dOptions}. See the documentation for {@code ModuleHolder} + * to learn about PyTorch's module storage semantics. */ +// Targeting ../ConstantPad2dImpl.java + + + +/** A {@code ModuleHolder} subclass for {@code ConstantPad2dImpl}. + * See the documentation for {@code ConstantPad2dImpl} class to learn what methods it + * provides, and examples of how to use {@code ConstantPad2d} with + * {@code torch::nn::ConstantPad2dOptions}. See the documentation for {@code ModuleHolder} + * to learn about PyTorch's module storage semantics. */ +// Targeting ../ConstantPad3dImpl.java + + + +/** A {@code ModuleHolder} subclass for {@code ConstantPad3dImpl}. + * See the documentation for {@code ConstantPad3dImpl} class to learn what methods it + * provides, and examples of how to use {@code ConstantPad3d} with + * {@code torch::nn::ConstantPad3dOptions}. See the documentation for {@code ModuleHolder} + * to learn about PyTorch's module storage semantics. */ - // namespace functional // namespace nn // namespace torch -// Parsed from torch/nn/functional/pooling.h +// Parsed from torch/nn/modules/pixelshuffle.h + +// #pragma once + +// #include +// #include +// #include + +// #include +// Targeting ../PixelShuffleImpl.java -// #pragma once -// #include -// #include -// #include -// #include -// #ifndef DOXYGEN_SHOULD_SKIP_THIS -@Namespace("torch::nn::functional::detail") public static native @ByVal Tensor avg_pool1d( - @Const @ByRef Tensor input, - @ByVal @Cast("torch::ExpandingArray<1>*") LongPointer kernel_size, - @ByVal @Cast("torch::ExpandingArray<1>*") LongPointer stride, - @ByVal @Cast("torch::ExpandingArray<1>*") LongPointer padding, - @Cast("bool") boolean ceil_mode, - @Cast("bool") boolean count_include_pad); - // namespace detail -// #endif /* DOXYGEN_SHOULD_SKIP_THIS */ +/** A {@code ModuleHolder} subclass for {@code PixelShuffleImpl}. + * See the documentation for {@code PixelShuffleImpl} class to learn what methods it + * provides, and examples of how to use {@code PixelShuffle} with + * {@code torch::nn::PixelShuffleOptions}. See the documentation for {@code ModuleHolder} + * to learn about PyTorch's module storage semantics. */ +// Targeting ../PixelUnshuffleImpl.java -/** See -/** https://pytorch.org/docs/master/nn.functional.html#torch.nn.functional.avg_pool1d -/** about the exact behavior of this functional. -/** -/** See the documentation for {@code torch::nn::functional::AvgPool1dFuncOptions} -/** class to learn what optional arguments are supported for this functional. -/** -/** Example: -/**
{@code
-/** namespace F = torch::nn::functional;
-/** F::avg_pool1d(x, F::AvgPool1dFuncOptions(3).stride(2));
-/** }
*/ -@Namespace("torch::nn::functional") public static native @ByVal Tensor avg_pool1d( - @Const @ByRef Tensor input, - @Const @ByRef AvgPool1dOptions options); -// #ifndef DOXYGEN_SHOULD_SKIP_THIS -@Namespace("torch::nn::functional::detail") public static native @ByVal Tensor avg_pool2d( - @Const @ByRef Tensor input, - @ByVal @Cast("torch::ExpandingArray<2>*") LongPointer kernel_size, - @ByVal @Cast("torch::ExpandingArray<2>*") LongPointer stride, - @ByVal @Cast("torch::ExpandingArray<2>*") LongPointer padding, - @Cast("bool") boolean ceil_mode, - @Cast("bool") boolean count_include_pad, - @ByVal LongOptional divisor_override); - // namespace detail -// #endif /* DOXYGEN_SHOULD_SKIP_THIS */ -/** See -/** https://pytorch.org/docs/master/nn.functional.html#torch.nn.functional.avg_pool2d -/** about the exact behavior of this functional. -/** -/** See the documentation for {@code torch::nn::functional::AvgPool2dFuncOptions} -/** class to learn what optional arguments are supported for this functional. -/** -/** Example: -/**
{@code
-/** namespace F = torch::nn::functional;
-/** F::avg_pool2d(x, F::AvgPool2dFuncOptions(3).stride(2));
-/** }
*/ -@Namespace("torch::nn::functional") public static native @ByVal Tensor avg_pool2d( - @Const @ByRef Tensor input, - @Const @ByRef AvgPool2dOptions options); +/** A {@code ModuleHolder} subclass for {@code PixelUnshuffleImpl}. + * See the documentation for {@code PixelUnshuffleImpl} class to learn what methods + * it provides, and examples of how to use {@code PixelUnshuffle} with + * {@code torch::nn::PixelUnshuffleOptions}. See the documentation for {@code ModuleHolder} + * to learn about PyTorch's module storage semantics. */ -// #ifndef DOXYGEN_SHOULD_SKIP_THIS -@Namespace("torch::nn::functional::detail") public static native @ByVal Tensor avg_pool3d( - @Const @ByRef Tensor input, - @ByVal @Cast("torch::ExpandingArray<3>*") LongPointer kernel_size, - @ByVal @Cast("torch::ExpandingArray<3>*") LongPointer stride, - @ByVal @Cast("torch::ExpandingArray<3>*") LongPointer padding, - @Cast("bool") boolean ceil_mode, - @Cast("bool") boolean count_include_pad, - @ByVal LongOptional divisor_override); - // namespace detail -// #endif /* DOXYGEN_SHOULD_SKIP_THIS */ + // namespace nn + // namespace torch -/** See -/** https://pytorch.org/docs/master/nn.functional.html#torch.nn.functional.avg_pool3d -/** about the exact behavior of this functional. -/** -/** See the documentation for {@code torch::nn::functional::AvgPool3dFuncOptions} -/** class to learn what optional arguments are supported for this functional. -/** -/** Example: -/**
{@code
-/** namespace F = torch::nn::functional;
-/** F::avg_pool3d(x, F::AvgPool3dFuncOptions(3).stride(2));
-/** }
*/ -@Namespace("torch::nn::functional") public static native @ByVal Tensor avg_pool3d( - @Const @ByRef Tensor input, - @Const @ByRef AvgPool3dOptions options); -// ============================================================================ +// Parsed from torch/nn/modules/pooling.h -// #ifndef DOXYGEN_SHOULD_SKIP_THIS -@Namespace("torch::nn::functional::detail") public static native @ByVal Tensor max_pool1d( - @Const @ByRef Tensor input, - @ByVal @Cast("torch::ExpandingArray<1>*") LongPointer kernel_size, - @ByVal @Cast("torch::ExpandingArray<1>*") LongPointer stride, - @ByVal @Cast("torch::ExpandingArray<1>*") LongPointer padding, - @ByVal @Cast("torch::ExpandingArray<1>*") LongPointer dilation, - @Cast("bool") boolean ceil_mode); - // namespace detail -// #endif /* DOXYGEN_SHOULD_SKIP_THIS */ +// #pragma once -/** See -/** https://pytorch.org/docs/master/nn.functional.html#torch.nn.functional.max_pool1d -/** about the exact behavior of this functional. -/** -/** See the documentation for {@code torch::nn::functional::MaxPool1dFuncOptions} -/** class to learn what optional arguments are supported for this functional. -/** -/** Example: -/**
{@code
-/** namespace F = torch::nn::functional;
-/** F::max_pool1d(x, F::MaxPool1dFuncOptions(3).stride(2));
-/** }
*/ -@Namespace("torch::nn::functional") public static native @ByVal Tensor max_pool1d( - @Const @ByRef Tensor input, - @Const @ByRef MaxPool1dOptions options); +// #include +// #include +// #include +// #include +// #include -// #ifndef DOXYGEN_SHOULD_SKIP_THIS -@Namespace("torch::nn::functional::detail") public static native @ByVal TensorTensorTuple max_pool1d_with_indices( - @Const @ByRef Tensor input, - @ByVal @Cast("torch::ExpandingArray<1>*") LongPointer kernel_size, - @ByVal @Cast("torch::ExpandingArray<1>*") LongPointer stride, - @ByVal @Cast("torch::ExpandingArray<1>*") LongPointer padding, - @ByVal @Cast("torch::ExpandingArray<1>*") LongPointer dilation, - @Cast("bool") boolean ceil_mode); - // namespace detail -// #endif /* DOXYGEN_SHOULD_SKIP_THIS */ +// #include +// Targeting ../AvgPool1dImplBase.java -/** See the documentation for {@code torch::nn::functional::MaxPool1dFuncOptions} -/** class to learn what optional arguments are supported for this functional. -/** -/** Example: -/**
{@code
-/** namespace F = torch::nn::functional;
-/** F::max_pool1d_with_indices(x, F::MaxPool1dFuncOptions(3).stride(2));
-/** }
*/ -@Namespace("torch::nn::functional") public static native @ByVal TensorTensorTuple max_pool1d_with_indices( - @Const @ByRef Tensor input, - @Const @ByRef MaxPool1dOptions options); -// #ifndef DOXYGEN_SHOULD_SKIP_THIS -@Namespace("torch::nn::functional::detail") public static native @ByVal Tensor max_pool2d( - @Const @ByRef Tensor input, - @ByVal @Cast("torch::ExpandingArray<2>*") LongPointer kernel_size, - @ByVal @Cast("torch::ExpandingArray<2>*") LongPointer stride, - @ByVal @Cast("torch::ExpandingArray<2>*") LongPointer padding, - @ByVal @Cast("torch::ExpandingArray<2>*") LongPointer dilation, - @Cast("bool") boolean ceil_mode); - // namespace detail -// #endif /* DOXYGEN_SHOULD_SKIP_THIS */ +// Targeting ../AvgPool2dImplBase.java -/** See -/** https://pytorch.org/docs/master/nn.functional.html#torch.nn.functional.max_pool2d -/** about the exact behavior of this functional. -/** -/** See the documentation for {@code torch::nn::functional::MaxPool2dFuncOptions} -/** class to learn what optional arguments are supported for this functional. -/** -/** Example: -/**
{@code
-/** namespace F = torch::nn::functional;
-/** F::max_pool2d(x, F::MaxPool2dFuncOptions(3).stride(2));
-/** }
*/ -@Namespace("torch::nn::functional") public static native @ByVal Tensor max_pool2d( - @Const @ByRef Tensor input, - @Const @ByRef MaxPool2dOptions options); -// #ifndef DOXYGEN_SHOULD_SKIP_THIS -@Namespace("torch::nn::functional::detail") public static native @ByVal TensorTensorTuple max_pool2d_with_indices( - @Const @ByRef Tensor input, - @ByVal @Cast("torch::ExpandingArray<2>*") LongPointer kernel_size, - @ByVal @Cast("torch::ExpandingArray<2>*") LongPointer stride, - @ByVal @Cast("torch::ExpandingArray<2>*") LongPointer padding, - @ByVal @Cast("torch::ExpandingArray<2>*") LongPointer dilation, - @Cast("bool") boolean ceil_mode); - // namespace detail -// #endif /* DOXYGEN_SHOULD_SKIP_THIS */ +// Targeting ../AvgPool3dImplBase.java -/** See the documentation for {@code torch::nn::functional::MaxPool2dFuncOptions} -/** class to learn what optional arguments are supported for this functional. -/** -/** Example: -/**
{@code
-/** namespace F = torch::nn::functional;
-/** F::max_pool2d_with_indices(x, F::MaxPool2dFuncOptions(3).stride(2));
-/** }
*/ -@Namespace("torch::nn::functional") public static native @ByVal TensorTensorTuple max_pool2d_with_indices( - @Const @ByRef Tensor input, - @Const @ByRef MaxPool2dOptions options); -// #ifndef DOXYGEN_SHOULD_SKIP_THIS -@Namespace("torch::nn::functional::detail") public static native @ByVal Tensor max_pool3d( - @Const @ByRef Tensor input, - @ByVal @Cast("torch::ExpandingArray<3>*") LongPointer kernel_size, - @ByVal @Cast("torch::ExpandingArray<3>*") LongPointer stride, - @ByVal @Cast("torch::ExpandingArray<3>*") LongPointer padding, - @ByVal @Cast("torch::ExpandingArray<3>*") LongPointer dilation, - @Cast("bool") boolean ceil_mode); - // namespace detail -// #endif /* DOXYGEN_SHOULD_SKIP_THIS */ +// Targeting ../AvgPool1dImpl.java -/** See -/** https://pytorch.org/docs/master/nn.functional.html#torch.nn.functional.max_pool3d -/** about the exact behavior of this functional. -/** -/** See the documentation for {@code torch::nn::functional::MaxPool3dFuncOptions} -/** class to learn what optional arguments are supported for this functional. -/** -/** Example: -/**
{@code
-/** namespace F = torch::nn::functional;
-/** F::max_pool3d(x, F::MaxPool3dFuncOptions(3).stride(2));
-/** }
*/ -@Namespace("torch::nn::functional") public static native @ByVal Tensor max_pool3d( - @Const @ByRef Tensor input, - @Const @ByRef MaxPool3dOptions options); -// #ifndef DOXYGEN_SHOULD_SKIP_THIS -@Namespace("torch::nn::functional::detail") public static native @ByVal TensorTensorTuple max_pool3d_with_indices( - @Const @ByRef Tensor input, - @ByVal @Cast("torch::ExpandingArray<3>*") LongPointer kernel_size, - @ByVal @Cast("torch::ExpandingArray<3>*") LongPointer stride, - @ByVal @Cast("torch::ExpandingArray<3>*") LongPointer padding, - @ByVal @Cast("torch::ExpandingArray<3>*") LongPointer dilation, - @Cast("bool") boolean ceil_mode); - // namespace detail -// #endif /* DOXYGEN_SHOULD_SKIP_THIS */ -/** See the documentation for {@code torch::nn::functional::MaxPool3dFuncOptions} -/** class to learn what optional arguments are supported for this functional. -/** -/** Example: -/**
{@code
-/** namespace F = torch::nn::functional;
-/** F::max_pool3d_with_indices(x, F::MaxPool3dFuncOptions(3).stride(2));
-/** }
*/ -@Namespace("torch::nn::functional") public static native @ByVal TensorTensorTuple max_pool3d_with_indices( - @Const @ByRef Tensor input, - @Const @ByRef MaxPool3dOptions options); +/** A {@code ModuleHolder} subclass for {@code AvgPool1dImpl}. + * See the documentation for {@code AvgPool1dImpl} class to learn what methods it + * provides, and examples of how to use {@code AvgPool1d} with + * {@code torch::nn::AvgPool1dOptions}. See the documentation for {@code ModuleHolder} to + * learn about PyTorch's module storage semantics. */ +// Targeting ../AvgPool2dImpl.java -// ============================================================================ -// #ifndef DOXYGEN_SHOULD_SKIP_THIS -@Namespace("torch::nn::functional::detail") public static native @ByVal TensorTensorTuple adaptive_max_pool1d_with_indices( - @Const @ByRef Tensor input, - @ByVal @Cast("torch::ExpandingArray<1>*") LongPointer output_size); - // namespace detail -/** See the documentation for - * {@code torch::nn::functional::AdaptiveMaxPool1dFuncOptions} class to learn what - * optional arguments are supported for this functional. - * - * Example: - *
{@code
- *  namespace F = torch::nn::functional;
- *  F::adaptive_max_pool1d_with_indices(x, F::AdaptiveMaxPool1dFuncOptions(3));
- *  }
*/ -@Namespace("torch::nn::functional") public static native @ByVal TensorTensorTuple adaptive_max_pool1d_with_indices( - @Const @ByRef Tensor input, - @Const @ByRef AdaptiveMaxPool1dOptions options); -@Namespace("torch::nn::functional::detail") public static native @ByVal Tensor adaptive_max_pool1d( - @Const @ByRef Tensor input, - @ByVal @Cast("torch::ExpandingArray<1>*") LongPointer output_size); - // namespace detail -// #endif /* DOXYGEN_SHOULD_SKIP_THIS */ +/** A {@code ModuleHolder} subclass for {@code AvgPool2dImpl}. + * See the documentation for {@code AvgPool2dImpl} class to learn what methods it + * provides, and examples of how to use {@code AvgPool2d} with + * {@code torch::nn::AvgPool2dOptions}. See the documentation for {@code ModuleHolder} to + * learn about PyTorch's module storage semantics. */ +// Targeting ../AvgPool3dImpl.java -/** See -/** https://pytorch.org/docs/master/nn.functional.html#torch.nn.functional.adaptive_max_pool1d -/** about the exact behavior of this functional. -/** -/** See the documentation for -/** {@code torch::nn::functional::AdaptiveMaxPool1dFuncOptions} class to learn what -/** optional arguments are supported for this functional. -/** -/** Example: -/**
{@code
-/** namespace F = torch::nn::functional;
-/** F::adaptive_max_pool1d(x, F::AdaptiveMaxPool1dFuncOptions(3));
-/** }
*/ -@Namespace("torch::nn::functional") public static native @ByVal Tensor adaptive_max_pool1d( - @Const @ByRef Tensor input, - @Const @ByRef AdaptiveMaxPool1dOptions options); -// #ifndef DOXYGEN_SHOULD_SKIP_THIS -@Namespace("torch::nn::functional::detail") public static native @ByVal TensorTensorTuple adaptive_max_pool2d_with_indices( - @Const @ByRef Tensor input, - @ByVal @Cast("torch::ExpandingArrayWithOptionalElem<2>*") LongOptional output_size); - // namespace detail -// #endif /* DOXYGEN_SHOULD_SKIP_THIS */ -/** See the documentation for -/** {@code torch::nn::functional::AdaptiveMaxPool2dFuncOptions} class to learn what -/** optional arguments are supported for this functional. -/** -/** Example: -/**
{@code
-/** namespace F = torch::nn::functional;
-/** F::adaptive_max_pool2d_with_indices(x, F::AdaptiveMaxPool2dFuncOptions(3));
-/** }
*/ -@Namespace("torch::nn::functional") public static native @ByVal TensorTensorTuple adaptive_max_pool2d_with_indices( - @Const @ByRef Tensor input, - @Const @ByRef AdaptiveMaxPool2dOptions options); +/** A {@code ModuleHolder} subclass for {@code AvgPool3dImpl}. + * See the documentation for {@code AvgPool3dImpl} class to learn what methods it + * provides, and examples of how to use {@code AvgPool3d} with + * {@code torch::nn::AvgPool3dOptions}. See the documentation for {@code ModuleHolder} to + * learn about PyTorch's module storage semantics. */ +// Targeting ../MaxPool1dImplBase.java -// #ifndef DOXYGEN_SHOULD_SKIP_THIS -@Namespace("torch::nn::functional::detail") public static native @ByVal Tensor adaptive_max_pool2d( - @Const @ByRef Tensor input, - @ByVal @Cast("torch::ExpandingArrayWithOptionalElem<2>*") LongOptional output_size); - // namespace detail -// #endif /* DOXYGEN_SHOULD_SKIP_THIS */ -/** See -/** https://pytorch.org/docs/master/nn.functional.html#torch.nn.functional.adaptive_max_pool2d -/** about the exact behavior of this functional. -/** -/** See the documentation for -/** {@code torch::nn::functional::AdaptiveMaxPool2dFuncOptions} class to learn what -/** optional arguments are supported for this functional. -/** -/** Example: -/**
{@code
-/** namespace F = torch::nn::functional;
-/** F::adaptive_max_pool2d(x, F::AdaptiveMaxPool2dFuncOptions(3));
-/** }
*/ -@Namespace("torch::nn::functional") public static native @ByVal Tensor adaptive_max_pool2d( - @Const @ByRef Tensor input, - @Const @ByRef AdaptiveMaxPool2dOptions options); +// Targeting ../MaxPool2dImplBase.java -// #ifndef DOXYGEN_SHOULD_SKIP_THIS -@Namespace("torch::nn::functional::detail") public static native @ByVal TensorTensorTuple adaptive_max_pool3d_with_indices( - @Const @ByRef Tensor input, - @ByVal @Cast("torch::ExpandingArrayWithOptionalElem<3>*") LongOptional output_size); - // namespace detail -// #endif /* DOXYGEN_SHOULD_SKIP_THIS */ -/** See the documentation for -/** {@code torch::nn::functional::AdaptiveMaxPool3dFuncOptions} class to learn what -/** optional arguments are supported for this functional. -/** -/** Example: -/**
{@code
-/** namespace F = torch::nn::functional;
-/** F::adaptive_max_pool3d_with_indices(x, F::AdaptiveMaxPool3dFuncOptions(3));
-/** }
*/ -@Namespace("torch::nn::functional") public static native @ByVal TensorTensorTuple adaptive_max_pool3d_with_indices( - @Const @ByRef Tensor input, - @Const @ByRef AdaptiveMaxPool3dOptions options); +// Targeting ../MaxPool3dImplBase.java -// #ifndef DOXYGEN_SHOULD_SKIP_THIS -@Namespace("torch::nn::functional::detail") public static native @ByVal Tensor adaptive_max_pool3d( - @Const @ByRef Tensor input, - @ByVal @Cast("torch::ExpandingArrayWithOptionalElem<3>*") LongOptional output_size); - // namespace detail -// #endif /* DOXYGEN_SHOULD_SKIP_THIS */ -/** See -/** https://pytorch.org/docs/master/nn.functional.html#torch.nn.functional.adaptive_max_pool3d -/** about the exact behavior of this functional. -/** -/** See the documentation for -/** {@code torch::nn::functional::AdaptiveMaxPool3dFuncOptions} class to learn what -/** optional arguments are supported for this functional. -/** -/** Example: -/**
{@code
-/** namespace F = torch::nn::functional;
-/** F::adaptive_max_pool3d(x, F::AdaptiveMaxPool3dFuncOptions(3));
-/** }
*/ -@Namespace("torch::nn::functional") public static native @ByVal Tensor adaptive_max_pool3d( - @Const @ByRef Tensor input, - @Const @ByRef AdaptiveMaxPool3dOptions options); +// Targeting ../MaxPool1dImpl.java -// ============================================================================ -// #ifndef DOXYGEN_SHOULD_SKIP_THIS -@Namespace("torch::nn::functional::detail") public static native @ByVal Tensor adaptive_avg_pool1d( - @Const @ByRef Tensor input, - @ByVal @Cast("torch::ExpandingArray<1>*") LongPointer output_size); - // namespace detail -// #endif /* DOXYGEN_SHOULD_SKIP_THIS */ -/** See -/** https://pytorch.org/docs/master/nn.functional.html#torch.nn.functional.adaptive_avg_pool1d -/** about the exact behavior of this functional. -/** -/** See the documentation for -/** {@code torch::nn::functional::AdaptiveAvgPool1dFuncOptions} class to learn what -/** optional arguments are supported for this functional. -/** -/** Example: -/**
{@code
-/** namespace F = torch::nn::functional;
-/** F::adaptive_avg_pool1d(x, F::AdaptiveAvgPool1dFuncOptions(3));
-/** }
*/ -@Namespace("torch::nn::functional") public static native @ByVal Tensor adaptive_avg_pool1d( - @Const @ByRef Tensor input, - @Const @ByRef AdaptiveAvgPool1dOptions options); +/** A {@code ModuleHolder} subclass for {@code MaxPool1dImpl}. + * See the documentation for {@code MaxPool1dImpl} class to learn what methods it + * provides, and examples of how to use {@code MaxPool1d} with + * {@code torch::nn::MaxPool1dOptions}. See the documentation for {@code ModuleHolder} to + * learn about PyTorch's module storage semantics. */ +// Targeting ../MaxPool2dImpl.java -// #ifndef DOXYGEN_SHOULD_SKIP_THIS -@Namespace("torch::nn::functional::detail") public static native @ByVal Tensor adaptive_avg_pool2d( - @Const @ByRef Tensor input, - @ByVal @Cast("torch::ExpandingArrayWithOptionalElem<2>*") LongOptional output_size); - // namespace detail -// #endif /* DOXYGEN_SHOULD_SKIP_THIS */ -/** See -/** https://pytorch.org/docs/master/nn.functional.html#torch.nn.functional.adaptive_avg_pool2d -/** about the exact behavior of this functional. -/** -/** See the documentation for -/** {@code torch::nn::functional::AdaptiveAvgPool2dFuncOptions} class to learn what -/** optional arguments are supported for this functional. -/** -/** Example: -/**
{@code
-/** namespace F = torch::nn::functional;
-/** F::adaptive_avg_pool2d(x, F::AdaptiveAvgPool2dFuncOptions(3));
-/** }
*/ -@Namespace("torch::nn::functional") public static native @ByVal Tensor adaptive_avg_pool2d( - @Const @ByRef Tensor input, - @Const @ByRef AdaptiveAvgPool2dOptions options); -// #ifndef DOXYGEN_SHOULD_SKIP_THIS -@Namespace("torch::nn::functional::detail") public static native @ByVal Tensor adaptive_avg_pool3d( - @Const @ByRef Tensor input, - @ByVal @Cast("torch::ExpandingArrayWithOptionalElem<3>*") LongOptional output_size); - // namespace detail -// #endif /* DOXYGEN_SHOULD_SKIP_THIS */ +/** A {@code ModuleHolder} subclass for {@code MaxPool2dImpl}. + * See the documentation for {@code MaxPool2dImpl} class to learn what methods it + * provides, and examples of how to use {@code MaxPool2d} with + * {@code torch::nn::MaxPool2dOptions}. See the documentation for {@code ModuleHolder} to + * learn about PyTorch's module storage semantics. */ +// Targeting ../MaxPool3dImpl.java -/** See -/** https://pytorch.org/docs/master/nn.functional.html#torch.nn.functional.adaptive_avg_pool3d -/** about the exact behavior of this functional. -/** -/** See the documentation for -/** {@code torch::nn::functional::AdaptiveAvgPool3dFuncOptions} class to learn what -/** optional arguments are supported for this functional. -/** -/** Example: -/**
{@code
-/** namespace F = torch::nn::functional;
-/** F::adaptive_avg_pool3d(x, F::AdaptiveAvgPool3dFuncOptions(3));
-/** }
*/ -@Namespace("torch::nn::functional") public static native @ByVal Tensor adaptive_avg_pool3d( - @Const @ByRef Tensor input, - @Const @ByRef AdaptiveAvgPool3dOptions options); -// ============================================================================ -@Namespace("torch::nn::functional") public static native @ByVal @Cast("std::vector*") LongVector _unpool_output_size( - @Const @ByRef Tensor input, - @ByRef @Cast("c10::ArrayRef*") LongArrayRef kernel_size, - @ByRef @Cast("c10::ArrayRef*") LongArrayRef stride, - @ByRef @Cast("c10::ArrayRef*") LongArrayRef padding, - @Const @ByRef LongVectorOptional output_size); -@Namespace("torch::nn::functional") public static native @ByVal @Cast("std::vector*") LongVector _unpool_output_size( - @Const @ByRef Tensor input, - @ByRef @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] kernel_size, - @ByRef @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] stride, - @ByRef @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] padding, - @Const @ByRef LongVectorOptional output_size); +/** A {@code ModuleHolder} subclass for {@code MaxPool3dImpl}. + * See the documentation for {@code MaxPool3dImpl} class to learn what methods it + * provides, and examples of how to use {@code MaxPool3d} with + * {@code torch::nn::MaxPool3dOptions}. See the documentation for {@code ModuleHolder} to + * learn about PyTorch's module storage semantics. */ +// Targeting ../AdaptiveMaxPool1dImplBase.java -// #ifndef DOXYGEN_SHOULD_SKIP_THIS -@Namespace("torch::nn::functional::detail") public static native @ByVal Tensor max_unpool1d( - @Const @ByRef Tensor input, - @Const @ByRef Tensor indices, - @ByVal @Cast("torch::ExpandingArray<1>*") LongPointer kernel_size, - @ByVal @Cast("torch::ExpandingArray<1>*") LongPointer stride, - @ByVal @Cast("torch::ExpandingArray<1>*") LongPointer padding, - @Const @ByRef LongVectorOptional output_size); - // namespace detail -// #endif /* DOXYGEN_SHOULD_SKIP_THIS */ -/** See -/** https://pytorch.org/docs/master/nn.functional.html#torch.nn.functional.max_unpool1d -/** about the exact behavior of this functional. -/** -/** See the documentation for {@code torch::nn::functional::MaxUnpool1dFuncOptions} -/** class to learn what optional arguments are supported for this functional. -/** -/** Example: -/**
{@code
-/** namespace F = torch::nn::functional;
-/** F::max_unpool1d(x, indices,
-/** F::MaxUnpool1dFuncOptions(3).stride(2).padding(1));
-/** }
*/ -@Namespace("torch::nn::functional") public static native @ByVal Tensor max_unpool1d( - @Const @ByRef Tensor input, - @Const @ByRef Tensor indices, - @Const @ByRef MaxUnpool1dFuncOptions options); +// Targeting ../AdaptiveMaxPool2dImplBase.java -// #ifndef DOXYGEN_SHOULD_SKIP_THIS -@Namespace("torch::nn::functional::detail") public static native @ByVal Tensor max_unpool2d( - @Const @ByRef Tensor input, - @Const @ByRef Tensor indices, - @ByVal @Cast("torch::ExpandingArray<2>*") LongPointer kernel_size, - @ByVal @Cast("torch::ExpandingArray<2>*") LongPointer stride, - @ByVal @Cast("torch::ExpandingArray<2>*") LongPointer padding, - @Const @ByRef LongVectorOptional output_size); - // namespace detail -// #endif /* DOXYGEN_SHOULD_SKIP_THIS */ -/** See -/** https://pytorch.org/docs/master/nn.functional.html#torch.nn.functional.max_unpool2d -/** about the exact behavior of this functional. -/** -/** See the documentation for {@code torch::nn::functional::MaxUnpool2dFuncOptions} -/** class to learn what optional arguments are supported for this functional. -/** -/** Example: -/**
{@code
-/** namespace F = torch::nn::functional;
-/** F::max_unpool2d(x, indices,
-/** F::MaxUnpool2dFuncOptions(3).stride(2).padding(1));
-/** }
*/ -@Namespace("torch::nn::functional") public static native @ByVal Tensor max_unpool2d( - @Const @ByRef Tensor input, - @Const @ByRef Tensor indices, - @Const @ByRef MaxUnpool2dFuncOptions options); +// Targeting ../AdaptiveMaxPool3dImplBase.java -// #ifndef DOXYGEN_SHOULD_SKIP_THIS -@Namespace("torch::nn::functional::detail") public static native @ByVal Tensor max_unpool3d( - @Const @ByRef Tensor input, - @Const @ByRef Tensor indices, - @ByVal @Cast("torch::ExpandingArray<3>*") LongPointer kernel_size, - @ByVal @Cast("torch::ExpandingArray<3>*") LongPointer stride, - @ByVal @Cast("torch::ExpandingArray<3>*") LongPointer padding, - @Const @ByRef LongVectorOptional output_size); - // namespace detail -// #endif /* DOXYGEN_SHOULD_SKIP_THIS */ -/** See -/** https://pytorch.org/docs/master/nn.functional.html#torch.nn.functional.max_unpool3d -/** about the exact behavior of this functional. -/** -/** See the documentation for {@code torch::nn::functional::MaxUnpool3dFuncOptions} -/** class to learn what optional arguments are supported for this functional. -/** -/** Example: -/**
{@code
-/** namespace F = torch::nn::functional;
-/** F::max_unpool3d(x, indices, F::MaxUnpool3dFuncOptions(3));
-/** }
*/ -@Namespace("torch::nn::functional") public static native @ByVal Tensor max_unpool3d( - @Const @ByRef Tensor input, - @Const @ByRef Tensor indices, - @Const @ByRef MaxUnpool3dFuncOptions options); +// Targeting ../AdaptiveMaxPool1dImpl.java -// ============================================================================ -// #ifndef DOXYGEN_SHOULD_SKIP_THIS -@Namespace("torch::nn::functional::detail") public static native @ByVal TensorTensorTuple fractional_max_pool2d_with_indices( - @Const @ByRef Tensor input, - @Cast("const torch::ExpandingArray<2>*") @ByRef LongPointer kernel_size, - @Cast("const c10::optional >*") @ByRef LongExpandingArrayOptional output_size, - @Cast("const c10::optional >*") @ByRef DoubleExpandingArrayOptional output_ratio, - @Const @ByRef Tensor _random_samples); - // namespace detail -// #endif /* DOXYGEN_SHOULD_SKIP_THIS */ -/** See the documentation for -/** {@code torch::nn::functional::FractionalMaxPool2dFuncOptions} class to learn what -/** optional arguments are supported for this functional. -/** -/** Example: -/**
{@code
-/** namespace F = torch::nn::functional;
-/** F::fractional_max_pool2d_with_indices(x,
-/** F::FractionalMaxPool2dFuncOptions(3).output_size(2));
-/** }
*/ -@Namespace("torch::nn::functional") public static native @ByVal TensorTensorTuple fractional_max_pool2d_with_indices( - @Const @ByRef Tensor input, - @Const @ByRef FractionalMaxPool2dOptions options); +/** A {@code ModuleHolder} subclass for {@code AdaptiveMaxPool1dImpl}. + * See the documentation for {@code AdaptiveMaxPool1dImpl} class to learn what + * methods it provides, and examples of how to use {@code AdaptiveMaxPool1d} with + * {@code torch::nn::AdaptiveMaxPool1dOptions}. See the documentation for + * {@code ModuleHolder} to learn about PyTorch's module storage semantics. */ +// Targeting ../AdaptiveMaxPool2dImpl.java -// #ifndef DOXYGEN_SHOULD_SKIP_THIS -@Namespace("torch::nn::functional::detail") public static native @ByVal Tensor fractional_max_pool2d( - @Const @ByRef Tensor input, - @ByVal @Cast("torch::ExpandingArray<2>*") LongPointer kernel_size, - @ByVal @Cast("c10::optional >*") LongExpandingArrayOptional output_size, - @ByVal @Cast("c10::optional >*") DoubleExpandingArrayOptional output_ratio, - @Const @ByRef Tensor _random_samples); - // namespace detail -// #endif /* DOXYGEN_SHOULD_SKIP_THIS */ -/** See the documentation for -/** {@code torch::nn::functional::FractionalMaxPool2dFuncOptions} class to learn what -/** optional arguments are supported for this functional. -/** -/** Example: -/**
{@code
-/** namespace F = torch::nn::functional;
-/** F::fractional_max_pool2d(x,
-/** F::FractionalMaxPool2dFuncOptions(3).output_size(2));
-/** }
*/ -@Namespace("torch::nn::functional") public static native @ByVal Tensor fractional_max_pool2d( - @Const @ByRef Tensor input, - @Const @ByRef FractionalMaxPool2dOptions options); -// #ifndef DOXYGEN_SHOULD_SKIP_THIS -@Namespace("torch::nn::functional::detail") public static native @ByVal TensorTensorTuple fractional_max_pool3d_with_indices( - @Const @ByRef Tensor input, - @Cast("const torch::ExpandingArray<3>*") @ByRef LongPointer kernel_size, - @Cast("const c10::optional >*") @ByRef LongExpandingArrayOptional output_size, - @Cast("const c10::optional >*") @ByRef DoubleExpandingArrayOptional output_ratio, - @Const @ByRef Tensor _random_samples); - // namespace detail -// #endif /* DOXYGEN_SHOULD_SKIP_THIS */ +/** A {@code ModuleHolder} subclass for {@code AdaptiveMaxPool2dImpl}. + * See the documentation for {@code AdaptiveMaxPool2dImpl} class to learn what + * methods it provides, and examples of how to use {@code AdaptiveMaxPool2d} with + * {@code torch::nn::AdaptiveMaxPool2dOptions}. See the documentation for + * {@code ModuleHolder} to learn about PyTorch's module storage semantics. */ +// Targeting ../AdaptiveMaxPool3dImpl.java -/** See the documentation for -/** {@code torch::nn::functional::FractionalMaxPool3dFuncOptions} class to learn what -/** optional arguments are supported for this functional. -/** -/** Example: -/**
{@code
-/** namespace F = torch::nn::functional;
-/** F::fractional_max_pool3d_with_indices(x,
-/** F::FractionalMaxPool3dFuncOptions(3).output_size(2));
-/** }
*/ -@Namespace("torch::nn::functional") public static native @ByVal TensorTensorTuple fractional_max_pool3d_with_indices( - @Const @ByRef Tensor input, - @Const @ByRef FractionalMaxPool3dOptions options); -// #ifndef DOXYGEN_SHOULD_SKIP_THIS -@Namespace("torch::nn::functional::detail") public static native @ByVal Tensor fractional_max_pool3d( - @Const @ByRef Tensor input, - @ByVal @Cast("torch::ExpandingArray<3>*") LongPointer kernel_size, - @ByVal @Cast("c10::optional >*") LongExpandingArrayOptional output_size, - @ByVal @Cast("c10::optional >*") DoubleExpandingArrayOptional output_ratio, - @Const @ByRef Tensor _random_samples); - // namespace detail -// #endif /* DOXYGEN_SHOULD_SKIP_THIS */ -/** See the documentation for -/** {@code torch::nn::functional::FractionalMaxPool3dFuncOptions} class to learn what -/** optional arguments are supported for this functional. -/** -/** Example: -/**
{@code
-/** namespace F = torch::nn::functional;
-/** F::fractional_max_pool3d(x,
-/** F::FractionalMaxPool3dFuncOptions(3).output_size(2));
-/** }
*/ -@Namespace("torch::nn::functional") public static native @ByVal Tensor fractional_max_pool3d( - @Const @ByRef Tensor input, - @Const @ByRef FractionalMaxPool3dOptions options); +/** A {@code ModuleHolder} subclass for {@code AdaptiveMaxPool3dImpl}. + * See the documentation for {@code AdaptiveMaxPool3dImpl} class to learn what + * methods it provides, and examples of how to use {@code AdaptiveMaxPool3d} with + * {@code torch::nn::AdaptiveMaxPool3dOptions}. See the documentation for + * {@code ModuleHolder} to learn about PyTorch's module storage semantics. */ +// Targeting ../AdaptiveAvgPool1dImplBase.java -// ============================================================================ -// #ifndef DOXYGEN_SHOULD_SKIP_THIS -@Namespace("torch::nn::functional::detail") public static native @ByVal Tensor lp_pool1d( - @Const @ByRef Tensor input, - double norm_type, - @ByVal @Cast("torch::ExpandingArray<1>*") LongPointer kernel_size, - @ByVal @Cast("torch::ExpandingArray<1>*") LongPointer stride, - @Cast("bool") boolean ceil_mode); - // namespace detail -// #endif /* DOXYGEN_SHOULD_SKIP_THIS */ +// Targeting ../AdaptiveAvgPool2dImplBase.java -/** See -/** https://pytorch.org/docs/master/nn.functional.html#torch.nn.functional.lp_pool1d -/** about the exact behavior of this functional. -/** -/** See the documentation for {@code torch::nn::functional::LPPool1dFuncOptions} class -/** to learn what optional arguments are supported for this functional. -/** -/** Example: -/**
{@code
-/** namespace F = torch::nn::functional;
-/** F::lp_pool1d(x, F::LPPool1dFuncOptions(2, 3).stride(2));
-/** }
*/ -@Namespace("torch::nn::functional") public static native @ByVal Tensor lp_pool1d( - @Const @ByRef Tensor input, - @Const @ByRef LPPool1dOptions options); -// #ifndef DOXYGEN_SHOULD_SKIP_THIS -@Namespace("torch::nn::functional::detail") public static native @ByVal Tensor lp_pool2d( - @Const @ByRef Tensor input, - double norm_type, - @ByVal @Cast("torch::ExpandingArray<2>*") LongPointer kernel_size, - @ByVal @Cast("torch::ExpandingArray<2>*") LongPointer stride, - @Cast("bool") boolean ceil_mode); - // namespace detail -// #endif /* DOXYGEN_SHOULD_SKIP_THIS */ +// Targeting ../AdaptiveAvgPool3dImplBase.java -/** See -/** https://pytorch.org/docs/master/nn.functional.html#torch.nn.functional.lp_pool2d -/** about the exact behavior of this functional. -/** -/** See the documentation for {@code torch::nn::functional::LPPool2dFuncOptions} class -/** to learn what optional arguments are supported for this functional. -/** -/** Example: -/**
{@code
-/** namespace F = torch::nn::functional;
-/** F::lp_pool2d(x, F::LPPool2dFuncOptions(2, {2, 3}).stride(2));
-/** }
*/ -@Namespace("torch::nn::functional") public static native @ByVal Tensor lp_pool2d( - @Const @ByRef Tensor input, - @Const @ByRef LPPool2dOptions options); - // namespace functional - // namespace nn - // namespace torch +// Targeting ../AdaptiveAvgPool1dImpl.java -// Parsed from torch/nn/functional/upsampling.h -// #pragma once +/** A {@code ModuleHolder} subclass for {@code AdaptiveAvgPool1dImpl}. + * See the documentation for {@code AdaptiveAvgPool1dImpl} class to learn what + * methods it provides, and examples of how to use {@code AdaptiveAvgPool1d} with + * {@code torch::nn::AdaptiveAvgPool1dOptions}. See the documentation for + * {@code ModuleHolder} to learn about PyTorch's module storage semantics. */ +// Targeting ../AdaptiveAvgPool2dImpl.java -// #include -// #include -// #include -// #include -// #include -@Namespace("torch::nn::functional") public static native @ByVal @Cast("std::vector*") LongVector _interp_output_size( - @Cast("int64_t") long dim, - @ByVal @Cast("std::tuple >,c10::optional >,c10::optional >*") Pointer closed_over_args); +/** A {@code ModuleHolder} subclass for {@code AdaptiveAvgPool2dImpl}. + * See the documentation for {@code AdaptiveAvgPool2dImpl} class to learn what + * methods it provides, and examples of how to use {@code AdaptiveAvgPool2d} with + * {@code torch::nn::AdaptiveAvgPool2dOptions}. See the documentation for + * {@code ModuleHolder} to learn about PyTorch's module storage semantics. */ +// Targeting ../AdaptiveAvgPool3dImpl.java -// #ifndef DOXYGEN_SHOULD_SKIP_THIS -@Namespace("torch::nn::functional::detail") public static native @ByVal Tensor interpolate( - @Const @ByRef Tensor input, - @Const @ByRef LongVectorOptional size, - @Const @ByRef DoubleVectorOptional scale_factor, - @ByVal interpolate_mode_t mode, - @ByVal BoolOptional align_corners, - @ByVal BoolOptional recompute_scale_factor, - @Cast("bool") boolean antialias); - // namespace detail -// #endif /* DOXYGEN_SHOULD_SKIP_THIS */ -/** See -/** https://pytorch.org/docs/master/nn.functional.html#torch.nn.functional.interpolate -/** about the exact behavior of this functional. -/** -/** See the documentation for {@code torch::nn::functional::InterpolateFuncOptions} -/** class to learn what optional arguments are supported for this functional. -/** -/** Example: -/**
{@code
-/** namespace F = torch::nn::functional;
-/** F::interpolate(input,
-/** F::InterpolateFuncOptions().size({4}).mode(torch::kNearest));
-/** }
*/ -@Namespace("torch::nn::functional") public static native @ByVal Tensor interpolate( - @Const @ByRef Tensor input, - @Const @ByRef(nullValue = "torch::nn::functional::InterpolateFuncOptions{}") InterpolateFuncOptions options); -@Namespace("torch::nn::functional") public static native @ByVal Tensor interpolate( - @Const @ByRef Tensor input); - // namespace functional - // namespace nn - // namespace torch +/** A {@code ModuleHolder} subclass for {@code AdaptiveAvgPool3dImpl}. + * See the documentation for {@code AdaptiveAvgPool3dImpl} class to learn what + * methods it provides, and examples of how to use {@code AdaptiveAvgPool3d} with + * {@code torch::nn::AdaptiveAvgPool3dOptions}. See the documentation for + * {@code ModuleHolder} to learn about PyTorch's module storage semantics. */ +// Targeting ../MaxUnpool1dImplBase.java -// Parsed from torch/nn/functional/vision.h +// Targeting ../MaxUnpool2dImplBase.java -// #pragma once -// #include -// #include +// Targeting ../MaxUnpool3dImplBase.java -@Namespace("torch::nn::functional") public static native @ByVal Tensor affine_grid( - @Const @ByRef Tensor theta, - @ByRef @Cast("c10::ArrayRef*") LongArrayRef size, - @Cast("bool") boolean align_corners/*=false*/); -@Namespace("torch::nn::functional") public static native @ByVal Tensor affine_grid( - @Const @ByRef Tensor theta, - @ByRef @Cast("c10::ArrayRef*") LongArrayRef size); -@Namespace("torch::nn::functional") public static native @ByVal Tensor affine_grid( - @Const @ByRef Tensor theta, - @ByRef @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] size, - @Cast("bool") boolean align_corners/*=false*/); -@Namespace("torch::nn::functional") public static native @ByVal Tensor affine_grid( - @Const @ByRef Tensor theta, - @ByRef @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... size); -// ============================================================================ +// Targeting ../MaxUnpool1dImpl.java -// #ifndef DOXYGEN_SHOULD_SKIP_THIS -@Namespace("torch::nn::functional::detail") public static native @ByVal Tensor grid_sample( - @Const @ByRef Tensor input, - @Const @ByRef Tensor grid, - @ByVal grid_sample_mode_t mode, - @ByVal grid_sample_padding_mode_t padding_mode, - @ByVal BoolOptional align_corners); - // namespace detail -// #endif /* DOXYGEN_SHOULD_SKIP_THIS */ -/** See -/** https://pytorch.org/docs/master/nn.functional.html#torch.nn.functional.grid_sample -/** about the exact behavior of this functional. -/** -/** See the documentation for {@code torch::nn::functional::GridSampleFuncOptions} -/** class to learn what optional arguments are supported for this functional. -/** -/** Example: -/**
{@code
-/** namespace F = torch::nn::functional;
-/** F::grid_sample(input, grid,
-/** F::GridSampleFuncOptions().mode(torch::kBilinear).padding_mode(torch::kZeros).align_corners(true));
-/** }
*/ -@Namespace("torch::nn::functional") public static native @ByVal Tensor grid_sample( - @Const @ByRef Tensor input, - @Const @ByRef Tensor grid, - @Const @ByRef(nullValue = "torch::nn::functional::GridSampleFuncOptions{}") GridSampleFuncOptions options); -@Namespace("torch::nn::functional") public static native @ByVal Tensor grid_sample( - @Const @ByRef Tensor input, - @Const @ByRef Tensor grid); - // namespace functional - // namespace nn - // namespace torch +/** A {@code ModuleHolder} subclass for {@code MaxUnpool1dImpl}. + * See the documentation for {@code MaxUnpool1dImpl} class to learn what methods it + * provides, and examples of how to use {@code MaxUnpool1d} with + * {@code torch::nn::MaxUnpool1dOptions}. See the documentation for {@code ModuleHolder} to + * learn about PyTorch's module storage semantics. */ +// Targeting ../MaxUnpool2dImpl.java -// Parsed from torch/nn/functional/instancenorm.h -// #pragma once +/** A {@code ModuleHolder} subclass for {@code MaxUnpool2dImpl}. + * See the documentation for {@code MaxUnpool2dImpl} class to learn what methods it + * provides, and examples of how to use {@code MaxUnpool2d} with + * {@code torch::nn::MaxUnpool2dOptions}. See the documentation for {@code ModuleHolder} to + * learn about PyTorch's module storage semantics. */ +// Targeting ../MaxUnpool3dImpl.java -// #include -// #ifndef DOXYGEN_SHOULD_SKIP_THIS -@Namespace("torch::nn::functional::detail") public static native @ByVal Tensor instance_norm( - @Const @ByRef Tensor input, - @Const @ByRef Tensor running_mean, - @Const @ByRef Tensor running_var, - @Const @ByRef Tensor weight, - @Const @ByRef Tensor bias, - @Cast("bool") boolean use_input_stats, - double momentum, - double eps); - // namespace detail -// #endif /* DOXYGEN_SHOULD_SKIP_THIS */ -/** See -/** https://pytorch.org/docs/master/nn.functional.html#torch.nn.functional.instance_norm -/** about the exact behavior of this functional. -/** -/** See the documentation for {@code torch::nn::functional::InstanceNormFuncOptions} -/** class to learn what optional arguments are supported for this functional. -/** -/** Example: -/**
{@code
-/** namespace F = torch::nn::functional;
-/** F::instance_norm(input,
-/** F::InstanceNormFuncOptions().running_mean(mean).running_var(variance).weight(weight).bias(bias).momentum(0.1).eps(1e-5));
-/** }
*/ -@Namespace("torch::nn::functional") public static native @ByVal Tensor instance_norm( - @Const @ByRef Tensor input, - @Const @ByRef(nullValue = "torch::nn::functional::InstanceNormFuncOptions{}") InstanceNormFuncOptions options); -@Namespace("torch::nn::functional") public static native @ByVal Tensor instance_norm( - @Const @ByRef Tensor input); +/** A {@code ModuleHolder} subclass for {@code MaxUnpool3dImpl}. + * See the documentation for {@code MaxUnpool3dImpl} class to learn what methods it + * provides, and examples of how to use {@code MaxUnpool3d} with + * {@code torch::nn::MaxUnpool3dOptions}. See the documentation for {@code ModuleHolder} to + * learn about PyTorch's module storage semantics. */ +// Targeting ../FractionalMaxPool2dImpl.java - // namespace functional - // namespace nn - // namespace torch -// Parsed from torch/nn/module.h +/** A {@code ModuleHolder} subclass for {@code FractionalMaxPool2dImpl}. + * See the documentation for {@code FractionalMaxPool2dImpl} class to learn what + * methods it provides, and examples of how to use {@code FractionalMaxPool2d} with + * {@code torch::nn::FractionalMaxPool2dOptions}. See the documentation for + * {@code ModuleHolder} to learn about PyTorch's module storage semantics. */ +// Targeting ../FractionalMaxPool3dImpl.java -// #pragma once -// #include -// #include -// #include -// #include -// #include -// #include -// #include +/** A {@code ModuleHolder} subclass for {@code FractionalMaxPool3dImpl}. + * See the documentation for {@code FractionalMaxPool3dImpl} class to learn what + * methods it provides, and examples of how to use {@code FractionalMaxPool3d} with + * {@code torch::nn::FractionalMaxPool3dOptions}. See the documentation for + * {@code ModuleHolder} to learn about PyTorch's module storage semantics. */ +// Targeting ../LPPool1dImplBase.java -// #include -// #include -// #include -// #include -// #include -// #include -// Targeting ../Module.java + +// Targeting ../LPPool2dImplBase.java -@Namespace("torch::nn") public static native @Cast("std::ostream*") @ByRef @Name("operator <<") Pointer shiftLeft(@Cast("std::ostream*") @ByRef Pointer stream, @Const @ByRef Module module); +// Targeting ../LPPool1dImpl.java -/** Serialize a {@code Module} pointer into an {@code OutputArchive}. */ -@Namespace("torch::nn") public static native @ByRef @Name("operator <<") OutputArchive shiftLeft( - @ByRef OutputArchive archive, - @SharedPtr @Cast({"", "std::shared_ptr"}) Module module); -/** Deserializes a {@code Module} from an {@code InputArchive}. */ -@Namespace("torch::nn") public static native @ByRef @Name("operator >>") InputArchive shiftRight( - @ByRef InputArchive archive, - @SharedPtr @Cast({"", "std::shared_ptr"}) Module module); +/** A {@code ModuleHolder} subclass for {@code LPPool1dImpl}. + * See the documentation for {@code LPPool1dImpl} class to learn what methods it + * provides, and examples of how to use {@code LPPool1d} with + * {@code torch::nn::LPPool1dOptions}. See the documentation for {@code ModuleHolder} to + * learn about PyTorch's module storage semantics. */ +// Targeting ../LPPool2dImpl.java + + + +/** A {@code ModuleHolder} subclass for {@code LPPool2dImpl}. + * See the documentation for {@code LPPool2dImpl} class to learn what methods it + * provides, and examples of how to use {@code LPPool2d} with + * {@code torch::nn::LPPool2dOptions}. See the documentation for {@code ModuleHolder} to + * learn about PyTorch's module storage semantics. */ + + // namespace nn + // namespace torch + + +// Parsed from torch/nn/options/rnn.h + +// #pragma once + +// #include +// #include +// #include +// #include +// Targeting ../RNNOptionsBase.java -// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ nn::Module ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +// Targeting ../RNNOptions.java + +// Targeting ../LSTMOptions.java +// Targeting ../GRUOptions.java +// Targeting ../RNNCellOptionsBase.java +// Targeting ../RNNCellOptions.java +// Targeting ../LSTMCellOptions.java +// Targeting ../GRUCellOptions.java @@ -83197,237 +72418,289 @@ The list of (type, depth) pairs controls the type of specializations and the num // namespace torch -// Parsed from torch/nn/modules.h +// Parsed from torch/nn/utils/rnn.h // #pragma once -// Common -// #include +// #include +// #include -// Containers -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include +// #include -// Layers -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include + +/// +/// +/// +/// +/// +/// +/// +@Namespace("torch::nn::utils::rnn") public static native @ByVal Tensor invert_permutation(@Const @ByRef Tensor permutation); +// Targeting ../PackedSequence.java -// Parsed from torch/nn/modules/common.h +/** Packs a Tensor containing padded sequences of variable length. + * + * {@code input} can be of size {@code }T x B x *{@code } where {@code T} is the length of the + * longest sequence (equal to {@code }lengths[0]{@code }), {@code }B{@code } is the batch size, and + * {@code }*{@code } is any number of dimensions (including 0). If {@code }batch_first{@code } is + * {@code }true{@code }, {@code }B x T x *{@code } {@code input} is expected. + * + * For unsorted sequences, use {@code enforce_sorted = false}. If {@code enforce_sorted} is + * {@code }true{@code }, the sequences should be sorted by length in a decreasing order, + * i.e. + * {@code }input[:,0]{@code } should be the longest sequence, and {@code }input[:,B-1]{@code } the + * shortest one. + * + * Note: + * This function accepts any input that has at least two dimensions. You + * can apply it to pack the labels, and use the output of the RNN with + * them to compute the loss directly. A Tensor can be retrieved from + * a {@code PackedSequence} object by calling its {@code }.data(){@code } function. + * + * Arguments: + * input (Tensor): padded batch of variable length sequences. + * lengths (Tensor): list of sequences lengths of each batch element. + * batch_first (bool, optional): if {@code }true{@code }, the input is expected in {@code }B + * x T x *{@code } + * format. Default: {@code }false{@code }. + * enforce_sorted (bool, optional): if {@code }true{@code }, the input is expected to + * contain sequences sorted by length in a decreasing order. If + * {@code }false{@code }, this condition is not checked. Default: {@code }true{@code }. + * + * Returns: + * a {@code PackedSequence} object */ + +/// +/// +/// +/// +/// +@Namespace("torch::nn::utils::rnn") public static native @ByVal PackedSequence pack_padded_sequence( + @ByVal Tensor input, + @ByVal Tensor lengths, + @Cast("bool") boolean batch_first/*=false*/, + @Cast("bool") boolean enforce_sorted/*=true*/); +@Namespace("torch::nn::utils::rnn") public static native @ByVal PackedSequence pack_padded_sequence( + @ByVal Tensor input, + @ByVal Tensor lengths); + +/** Pads a packed batch of variable length sequences. + * + * It is an inverse operation to {@code pack_padded_sequence}. + * + * The returned Tensor's data will be of size {@code }T x B x *{@code }, where {@code T} is the + * length of the longest sequence and {@code B} is the batch size. If {@code }batch_first{@code } + * is true, the data will be transposed into {@code }B x T x *{@code } format. + * + * Batch elements will be ordered decreasingly by their length. + * + * Arguments: + * sequence (PackedSequence): batch to pad + * batch_first (bool, optional): if {@code }true{@code }, the output will be in {@code }B x T + * x *{@code } + * format. + * padding_value (double, optional): values for padded elements. + * total_length (int64_t, optional): if specified, the output will be + * padded to + * have length {@code total_length}. This method will throw error + * if {@code total_length} is less than the max sequence length in + * {@code sequence}. + * + * Returns: + * Tuple of Tensor containing the padded sequence, and a Tensor + * containing the list of lengths of each sequence in the batch. */ /// /// /// /// /// -// #pragma once +@Namespace("torch::nn::utils::rnn") public static native @ByVal T_TensorTensor_T pad_packed_sequence( + @ByVal PackedSequence sequence, + @Cast("bool") boolean batch_first/*=false*/, + double padding_value/*=0.0*/, + @ByVal(nullValue = "c10::optional(torch::nullopt)") LongOptional total_length); +@Namespace("torch::nn::utils::rnn") public static native @ByVal T_TensorTensor_T pad_packed_sequence( + @ByVal PackedSequence sequence); -/** This macro enables a module with default arguments in its forward method - * to be used in a Sequential module. +/** Pad a list of variable length Tensors with {@code }padding_value{@code } * - * Example usage: + * {@code }pad_sequence{@code } stacks a list of Tensors along a new dimension, + * and pads them to equal length. For example, if the input is list of + * sequences with size {@code }L x *{@code } and if batch_first is false, and {@code }T x B x *{@code } + * otherwise. * - * Let's say we have a module declared like this: - *
{@code
- *  struct MImpl : torch::nn::Module {
- *   public:
- *    explicit MImpl(int value_) : value(value_) {}
- *    torch::Tensor forward(int a, int b = 2, double c = 3.0) {
- *      return torch::tensor(a + b + c);
- *    }
- *   private:
- *    int value;
- *  };
- *  TORCH_MODULE(M);
- *  }
+ * {@code B} is batch size. It is equal to the number of elements in {@code }sequences{@code }. + * {@code T} is length of the longest sequence. + * {@code L} is length of the sequence. + * {@code *} is any number of trailing dimensions, including none. * - * If we try to use it in a Sequential module and run forward: - *
{@code
- *  torch::nn::Sequential seq(M(1));
- *  seq->forward(1);
- *  }
+ * Note: + * This function returns a Tensor of size {@code }T x B x *{@code } or {@code }B x T x *{@code } + * where {@code T} is the length of the longest sequence. This function assumes + * trailing dimensions and type of all the Tensors in sequences are same. * - * We will receive the following error message: - *
{@code
- *  MImpl's forward() method expects 3 argument(s), but received 1.
- *  If MImpl's forward() method has default arguments, please make sure
- *  the forward() method is declared with a corresponding
- *  `FORWARD_HAS_DEFAULT_ARGS` macro.
- *  }
+ * Arguments: + * sequences (torch::ArrayRef): list of variable length sequences. + * batch_first (bool, optional): output will be in {@code }B x T x *{@code } if true, + * or in + * {@code }T x B x *{@code } otherwise + * padding_value (double, optional): value for padded elements. Default: 0. + * + * Returns: + * Tensor of size {@code }T x B x *{@code } if {@code batch_first} is {@code }false{@code }. + * Tensor of size {@code }B x T x *{@code } otherwise */ + +/** Packs a list of variable length Tensors + * + * {@code }sequences{@code } should be a list of Tensors of size {@code }L x *{@code }, where {@code L} is + * the length of a sequence and {@code *} is any number of trailing dimensions, + * including zero. + * + * For unsorted sequences, use {@code enforce_sorted = false}. If {@code }enforce_sorted{@code } + * is {@code }true{@code }, the sequences should be sorted in the order of decreasing + * length. * - * The right way to fix this error is to use the {@code FORWARD_HAS_DEFAULT_ARGS} - * macro when declaring the module: - *
{@code
- *  struct MImpl : torch::nn::Module {
- *   public:
- *    explicit MImpl(int value_) : value(value_) {}
- *    torch::Tensor forward(int a, int b = 2, double c = 3.0) {
- *      return torch::tensor(a + b + c);
- *    }
- *   protected:
- *    /*
- *    NOTE: looking at the argument list of `forward`:
- *    `forward(int a, int b = 2, double c = 3.0)`
- *    we saw the following default arguments:
- *    ----------------------------------------------------------------
- *    0-based index of default |         Default value of arg
- *    arg in forward arg list  |  (wrapped by `torch::nn::AnyValue()`)
- *    ----------------------------------------------------------------
- *                1            |       torch::nn::AnyValue(2)
- *                2            |       torch::nn::AnyValue(3.0)
- *    ----------------------------------------------------------------
- *    Thus we pass the following arguments to the `FORWARD_HAS_DEFAULT_ARGS`
- *    macro:
- *    * /
- *    FORWARD_HAS_DEFAULT_ARGS({1, torch::nn::AnyValue(2)}, {2,
- *    torch::nn::AnyValue(3.0)})
- *   private:
- *    int value;
- *  };
- *  TORCH_MODULE(M);
- *  }
- * Now, running the following would work: - *
{@code
- *  torch::nn::Sequential seq(M(1));
- *  seq->forward(1);  // This correctly populates the default arguments for
- *  `MImpl::forward`
- *  }
*/ -// #define FORWARD_HAS_DEFAULT_ARGS(...) -// template -// friend struct torch::nn::AnyModuleHolder; -// bool _forward_has_default_args() override { -// return true; -// } -// unsigned int _forward_num_required_args() override { -// std::pair args_info[] = {__VA_ARGS__}; -// return args_info[0].first; -// } -// std::vector _forward_populate_default_args( -// std::vector&& arguments) override { -// std::pair args_info[] = {__VA_ARGS__}; -// unsigned int num_all_args = std::rbegin(args_info)->first + 1; -// TORCH_INTERNAL_ASSERT( -// arguments.size() >= _forward_num_required_args() && -// arguments.size() <= num_all_args); -// std::vector ret = std::move(arguments); -// ret.reserve(num_all_args); -// for (auto& arg_info : args_info) { -// if (arg_info.first > ret.size() - 1) -// ret.emplace_back(std::move(arg_info.second)); -// } -// return ret; -// } + * + * Arguments: + * sequences (torch::ArrayRef): A list of sequences of decreasing + * length. enforce_sorted (bool, optional): if {@code }true{@code }, checks that the + * input + * contains sequences sorted by length in a decreasing order. If + * {@code }false{@code }, this condition is not checked. Default: {@code }true{@code }. + * + * Returns: + * a {@code PackedSequence} object */ +@Namespace("torch::nn::utils::rnn") public static native @ByVal PackedSequence pack_sequence( + @ByVal TensorArrayRef sequences, + @Cast("bool") boolean enforce_sorted/*=true*/); +@Namespace("torch::nn::utils::rnn") public static native @ByVal PackedSequence pack_sequence( + @ByVal TensorArrayRef sequences); + + // namespace rnn + // namespace utils + // namespace nn + // namespace torch -// Parsed from torch/nn/modules/container/any.h +// Parsed from torch/nn/modules/rnn.h // #pragma once -// #include -// #include -// #include -// #include +// #include +// #include +// #include +// #include // #include +// #include // #include -// #include -// #include -// #include - -// #include +// #include +// #include +// #include +// #include // #include -// #include -// #include -// #include // #include -// Targeting ../AnyModule.java - - - -// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ AnyModule ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - - - - - - +// Targeting ../RNNImplBase.java +// Targeting ../LSTMImplBase.java +// Targeting ../GRUImplBase.java +// Targeting ../RNNImpl.java +/** A {@code ModuleHolder} subclass for {@code RNNImpl}. + * See the documentation for {@code RNNImpl} class to learn what methods it + * provides, and examples of how to use {@code RNN} with {@code torch::nn::RNNOptions}. + * See the documentation for {@code ModuleHolder} to learn about PyTorch's + * module storage semantics. */ +// Targeting ../LSTMImpl.java +/** A {@code ModuleHolder} subclass for {@code LSTMImpl}. + * See the documentation for {@code LSTMImpl} class to learn what methods it + * provides, and examples of how to use {@code LSTM} with {@code torch::nn::LSTMOptions}. + * See the documentation for {@code ModuleHolder} to learn about PyTorch's + * module storage semantics. */ +// Targeting ../GRUImpl.java +/** A {@code ModuleHolder} subclass for {@code GRUImpl}. + * See the documentation for {@code GRUImpl} class to learn what methods it + * provides, and examples of how to use {@code GRU} with {@code torch::nn::GRUOptions}. + * See the documentation for {@code ModuleHolder} to learn about PyTorch's + * module storage semantics. */ +// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ RNNCellImplBase +// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +// Targeting ../RNNCellImplBase.java +// Targeting ../LSTMCellImplBase.java +// Targeting ../GRUCellImplBase.java +// Targeting ../RNNCellImpl.java -// Private Methods +/** A {@code ModuleHolder} subclass for {@code RNNCellImpl}. + * See the documentation for {@code RNNCellImpl} class to learn what methods it + * provides, and examples of how to use {@code RNNCell} with + * {@code torch::nn::RNNCellOptions}. See the documentation for {@code ModuleHolder} to + * learn about PyTorch's module storage semantics. */ +// Targeting ../LSTMCellImpl.java +/** A {@code ModuleHolder} subclass for {@code LSTMCellImpl}. + * See the documentation for {@code LSTMCellImpl} class to learn what methods it + * provides, and examples of how to use {@code LSTMCell} with + * {@code torch::nn::LSTMCellOptions}. See the documentation for {@code ModuleHolder} to + * learn about PyTorch's module storage semantics. */ +// Targeting ../GRUCellImpl.java +/** A {@code ModuleHolder} subclass for {@code GRUCellImpl}. + * See the documentation for {@code GRUCellImpl} class to learn what methods it + * provides, and examples of how to use {@code GRUCell} with + * {@code torch::nn::GRUCellOptions}. See the documentation for {@code ModuleHolder} to + * learn about PyTorch's module storage semantics. */ // namespace nn // namespace torch -// Parsed from torch/nn/modules/container/moduledict.h +// Parsed from torch/nn/options/transformerlayer.h // #pragma once -// #include -// #include -// #include -// #include -// Targeting ../ModuleDictImpl.java +// #include +// #include +// #include +// #include -// Targeting ../ModuleDict.java +/// +// Targeting ../TransformerEncoderLayerOptions.java + + +// Targeting ../TransformerDecoderLayerOptions.java @@ -83435,20 +72708,18 @@ The list of (type, depth) pairs controls the type of specializations and the num // namespace torch -// Parsed from torch/nn/modules/container/modulelist.h +// Parsed from torch/nn/options/transformer.h // #pragma once -// #include -// #include -// #include - -// #include -// #include -// Targeting ../ModuleListImpl.java - +// #include +// #include +// #include +// #include -// Targeting ../ModuleList.java +// #include +// #include +// Targeting ../TransformerOptions.java @@ -83456,1798 +72727,3439 @@ The list of (type, depth) pairs controls the type of specializations and the num // namespace torch -// Parsed from torch/nn/modules/container/named_any.h +// Parsed from torch/nn/modules/transformer.h // #pragma once -// #include +// #include // #include -// #include +// #include +// #include // #include -// #include -// #include -// #include -// #include +// #include -// #include +// #include +// Targeting ../TransformerImpl.java -// #include -// #include -// #include -// #include -// #include -// #include -// Targeting ../NamedAnyModule.java +/** A {@code ModuleHolder} subclass for {@code TransformerImpl}. + * See the documentation for {@code TransformerImpl} class to learn what + * methods it provides, and examples of how to use {@code Transformer} with + * {@code torch::nn::TransformerOptions}. + * See the documentation for {@code ModuleHolder} to learn about PyTorch's + * module storage semantics. */ // namespace nn // namespace torch -// Parsed from torch/nn/modules/container/sequential.h +// Parsed from torch/nn/modules/transformerlayer.h // #pragma once -// #include // #include // #include -// #include -// #include +// #include +// #include +// #include +// #include +// #include +// #include // #include -// #include -// #include +// #include -// #include -// #include // #include -// #include -// #include -// #include -// #include -// Targeting ../SequentialImpl.java - - -// Targeting ../Sequential.java - - - // namespace nn - // namespace torch - - -// Parsed from torch/nn/modules/container/parameterdict.h +// Targeting ../TransformerEncoderLayerImpl.java -// #pragma once -// #include -// #include -// #include -// #include -// #include -// Targeting ../ParameterDictImpl.java +/** A {@code ModuleHolder} subclass for {@code TransformerEncoderLayerImpl}{@code . + * See the documentation for }TransformerEncoderLayerImpl{@code class to learn what + * methods it provides, and examples of how to use }TransformerEncoderLayer{@code + * with }torch::nn::TransformerEncoderLayerOptions{@code . See the documentation for + * }ModuleHolder{@code to learn about PyTorch's module storage semantics. */ +// Targeting ../TransformerDecoderLayerImpl.java -// Targeting ../ParameterDict.java +/** A {@code ModuleHolder} subclass for {@code TransformerDecoderLayerImpl}. + * See the documentation for {@code TransformerDecoderLayerImpl} class to learn what + * methods it provides, and examples of how to use {@code TransformerDecoderLayer} + * with {@code torch::nn::TransformerDecoderLayerOptions}. See the documentation for + * {@code ModuleHolder} to learn about PyTorch's module storage semantics. */ // namespace nn // namespace torch -// Parsed from torch/nn/modules/container/parameterlist.h +// Parsed from torch/nn/options/transformercoder.h // #pragma once -// #include -// #include +// #include +// #include +// #include +// #include + +// #include +// #include +// Targeting ../TransformerEncoderOptions.java -// #include -// Targeting ../ParameterListImpl.java +// Targeting ../TransformerDecoderOptions.java -// Targeting ../ParameterList.java // namespace nn // namespace torch -// Parsed from torch/nn/modules/adaptive.h +// Parsed from torch/nn/modules/transformercoder.h // #pragma once // #include -// #include // #include +// #include +// #include // #include -// #include -// #include -// #include -// Targeting ../ASMoutput.java +// #include +// #include +// #include -// Targeting ../AdaptiveLogSoftmaxWithLossImpl.java +// #include +// Targeting ../TransformerEncoderImpl.java -// Targeting ../AdaptiveLogSoftmaxWithLoss.java + +/** A {@code ModuleHolder} subclass for {@code TransformerEncoderImpl}. + * See the documentation for {@code TransformerEncoderImpl} class to learn what + * methods it provides, and examples of how to use {@code TransformerEncoder} with + * {@code torch::nn::TransformerEncoderOptions}. + * See the documentation for {@code ModuleHolder} to learn about PyTorch's + * module storage semantics. */ +// Targeting ../TransformerDecoderImpl.java +/** A {@code ModuleHolder} subclass for {@code TransformerDecoderImpl}. + * See the documentation for {@code TransformerDecoderImpl} class to learn what + * methods it provides, and examples of how to use {@code TransformerDecoder} with + * {@code torch::nn::TransformerDecoderOptions}. + * See the documentation for {@code ModuleHolder} to learn about PyTorch's + * module storage semantics. */ + // namespace nn // namespace torch -// Parsed from torch/nn/modules/batchnorm.h +// Parsed from torch/nn/modules/upsampling.h // #pragma once // #include -// #include -// #include -// #include +// #include +// #include // #include // #include -// #include -// Targeting ../BatchNorm1dImplBaseBase.java - - -// Targeting ../InstanceNorm1dImplBaseBase.java - - -// Targeting ../BatchNorm2dImplBaseBase.java - - -// Targeting ../InstanceNorm2dImplBaseBase.java - - -// Targeting ../BatchNorm3dImplBaseBase.java - - -// Targeting ../InstanceNorm3dImplBaseBase.java - - -// Targeting ../BatchNorm1dImplBase.java - - -// Targeting ../BatchNorm2dImplBase.java - - -// Targeting ../BatchNorm3dImplBase.java - - -// Targeting ../BatchNorm1dImpl.java - - -// Targeting ../BatchNorm1d.java - - -// Targeting ../BatchNorm2dImpl.java - - -// Targeting ../BatchNorm2d.java - - -// Targeting ../BatchNorm3dImpl.java +// #include +// #include +// #include +// Targeting ../UpsampleImpl.java -// Targeting ../BatchNorm3d.java +/** A {@code ModuleHolder} subclass for {@code UpsampleImpl}. + * See the documentation for {@code UpsampleImpl} class to learn what methods it + * provides, and examples of how to use {@code Upsample} with + * {@code torch::nn::UpsampleOptions}. See the documentation for {@code ModuleHolder} to + * learn about PyTorch's module storage semantics. */ // namespace nn // namespace torch -// Parsed from torch/nn/modules/instancenorm.h +// Parsed from torch/nn/modules.h // #pragma once -// #include -// #include -// Targeting ../InstanceNorm1dImplBase.java - +// Common +// #include -// Targeting ../InstanceNorm2dImplBase.java +// Containers +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// Layers +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include -// Targeting ../InstanceNorm3dImplBase.java +// Parsed from torch/nn/options.h -// Targeting ../InstanceNorm1dImpl.java +// #pragma once +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include -// Targeting ../InstanceNorm1d.java +// Parsed from torch/nn/utils/clip_grad.h -// Targeting ../InstanceNorm2dImpl.java +// #pragma once +// #include -// Targeting ../InstanceNorm2d.java +// #include +// Clips gradient norm of a vector of Tensors. +// See +// https://pytorch.org/docs/stable/nn.html?highlight=clip_grad_norm#torch.nn.utils.clip_grad_norm_ +// for more details about this module. +// +// Difference with the python version: unlike the python version, even when +// skipping the finiteness checks (error_if_nonfinite = false), this function +// will introduce a device <=> CPU synchronization (for devices where that makes +// sense!) in order to return a CPU-side `double`. This C++ version therefore +// cannot be run fully asynchronously w.r.t. the device of the gradients. +@Namespace("torch::nn::utils") public static native double clip_grad_norm_( + @Cast({"", "std::vector"}) @StdMove TensorVector parameters, + double max_norm, + double norm_type/*=2.0*/, + @Cast("bool") boolean error_if_nonfinite/*=false*/); +@Namespace("torch::nn::utils") public static native double clip_grad_norm_( + @Cast({"", "std::vector"}) @StdMove TensorVector parameters, + double max_norm); -// Targeting ../InstanceNorm3dImpl.java +// A wrapper around clip_grad_norm_ that allows us to call the function with a +// braced-init-list of Tensors. +// A wrapper around clip_grad_norm_ that allows us to call the function with a +// single Tensor. +@Namespace("torch::nn::utils") public static native double clip_grad_norm_( + @ByVal Tensor parameter, + double max_norm, + double norm_type/*=2.0*/, + @Cast("bool") boolean error_if_nonfinite/*=false*/); +@Namespace("torch::nn::utils") public static native double clip_grad_norm_( + @ByVal Tensor parameter, + double max_norm); -// Targeting ../InstanceNorm3d.java +// Clips gradient of an iterable of parameters at specified value. +// Gradients are modified in-place. +// See https://pytorch.org/docs/stable/nn.html#clip-grad-value +// for more details about this module. +@Namespace("torch::nn::utils") public static native void clip_grad_value_( + @Cast({"", "std::vector"}) @StdMove TensorVector parameters, + double clip_value); +// A wrapper around clip_grad_value_ that allows us to call the function with a +// braced-init-list of Tensors. +// A wrapper around clip_grad_value_ that allows us to call the function with a +// single Tensor. +@Namespace("torch::nn::utils") public static native void clip_grad_value_(@ByVal Tensor parameter, double clip_value); + // namespace utils // namespace nn // namespace torch -// Parsed from torch/nn/modules/conv.h +// Parsed from torch/nn/utils/convert_parameters.h // #pragma once -// #include -// #include - -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include - // #include +// #include -// #include -// #include -// Targeting ../Conv1dImplBase.java - - -// Targeting ../ConvTranspose1dImplBaseBase.java - - -// Targeting ../Conv2dImplBase.java - - -// Targeting ../ConvTranspose2dImplBaseBase.java - - -// Targeting ../Conv3dImplBase.java - - -// Targeting ../ConvTranspose3dImplBaseBase.java - - -// Targeting ../Conv1dImpl.java - - -// Targeting ../Conv1d.java - - -// Targeting ../Conv2dImpl.java - - -// Targeting ../Conv2d.java - - -// Targeting ../Conv3dImpl.java - - -// Targeting ../Conv3d.java - - -// Targeting ../ConvTranspose1dImplBase.java - +// This helper function is to check if the parameters are located +// in the same device. Currently, the conversion between model parameters +// and single vector form is not supported for multiple allocations, +// e.g. parameters in different GPUs, or mixture of CPU/GPU. +@Namespace("torch::nn::utils") public static native @ByVal LongOptional _check_param_device( + @Const @ByRef Tensor param, + @ByVal LongOptional old_param_device); -// Targeting ../ConvTranspose2dImplBase.java +// Convert parameters to one vector +@Namespace("torch::nn::utils") public static native @ByVal Tensor parameters_to_vector( + @Cast({"", "std::vector"}) @StdMove TensorVector parameters); +// Convert one vector to the parameters +@Namespace("torch::nn::utils") public static native void vector_to_parameters( + @Const @ByRef Tensor vec, + @Cast({"", "std::vector"}) @StdMove TensorVector parameters); -// Targeting ../ConvTranspose3dImplBase.java + // namespace utils + // namespace nn + // namespace torch -// Targeting ../ConvTranspose1dImpl.java +// Parsed from torch/nn/utils.h +// #pragma once -// Targeting ../ConvTranspose1d.java +// #include +// #include +// #include -// Targeting ../ConvTranspose2dImpl.java +// Parsed from torch/nn.h +// #pragma once -// Targeting ../ConvTranspose2d.java +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include -// Targeting ../ConvTranspose3dImpl.java +// Parsed from torch/optim/optimizer.h +// #pragma once -// Targeting ../ConvTranspose3d.java +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include - // namespace nn - // namespace torch +// Forward declarations confuse Doxygen +// #ifndef DOXYGEN_SHOULD_SKIP_THIS + // namespace at + // namespace serialize +// Targeting ../OptimizerParamState.java -// Parsed from torch/nn/modules/dropout.h -// #pragma once +// Targeting ../OptimizerCloneableAdagradParamState.java -// #include -// #include -// #include -// #include -// #include +// Targeting ../OptimizerCloneableAdamParamState.java -// #include -// #include -// Targeting ../DropoutImplBase.java +// Targeting ../OptimizerCloneableAdamWParamState.java -// Targeting ../Dropout2dImplBase.java +// Targeting ../OptimizerCloneableLBFGSParamState.java -// Targeting ../Dropout3dImplBase.java +// Targeting ../OptimizerCloneableRMSpropParamState.java -// Targeting ../AlphaDropoutImplBase.java +// Targeting ../OptimizerCloneableSGDParamState.java -// Targeting ../FeatureAlphaDropoutImplBase.java +// Targeting ../OptimizerOptions.java +// Targeting ../OptimizerCloneableAdagradOptions.java -// Targeting ../DropoutImpl.java +// Targeting ../OptimizerCloneableAdamOptions.java -// Targeting ../Dropout.java +// Targeting ../OptimizerCloneableAdamWOptions.java -// Targeting ../Dropout2dImpl.java +// Targeting ../OptimizerCloneableLBFGSOptions.java -// Targeting ../Dropout2d.java +// Targeting ../OptimizerCloneableRMSpropOptions.java -// Targeting ../Dropout3dImpl.java +// Targeting ../OptimizerCloneableSGDOptions.java -// Targeting ../Dropout3d.java +// Targeting ../OptimizerParamGroup.java -// Targeting ../AlphaDropoutImpl.java +// Targeting ../Optimizer.java -// Targeting ../AlphaDropout.java -// Targeting ../FeatureAlphaDropoutImpl.java +/* How do we decide whether to serialize undefined tensors or + c10::nullopt values into the output archive? +Answer: we strictly follow the behavior of Python API. To be more specific: +For optimizer options: +a) For undefined tensor: currently no tensor is used as an options argument in +Python API, so we don't need to worry about it now. b) For c10::nullopt value: +we serialize c10::nullopt values into the output archive, to follow the exact +same behavior as Python API. -// Targeting ../FeatureAlphaDropout.java +For optimizer param state: +a) For undefined tensor: in param state, undefined tensor in C++ impl is +equivalent to missing key in Python impl. Since we don't serialize missing keys +in Python API, we skip undefined tensors when serializing the param state. b) +For c10::nullopt value: in param state, c10::nullopt value in C++ impl is +equivalent to missing key in Python impl. Since we don't serialize missing keys +in Python API, we skip c10::nullopt values when serializing the param state. */ +/** Serializes an {@code Optimizer} into an {@code OutputArchive}. */ +@Namespace("torch::optim") public static native @ByRef @Name("operator <<") OutputArchive shiftLeft( + @ByRef OutputArchive archive, + @Const @ByRef Optimizer optimizer); +/** Deserializes a {@code Tensor} from an {@code InputArchive}. */ +@Namespace("torch::optim") public static native @ByRef @Name("operator >>") InputArchive shiftRight( + @ByRef InputArchive archive, + @ByRef Optimizer optimizer); - // namespace nn + // namespace optim // namespace torch -// Parsed from torch/nn/modules/distance.h +// Parsed from torch/optim/serialize.h // #pragma once -// #include -// #include -// #include -// #include +// #include +// #include +// #include // #include +// #include +// #include +// #include +// #include +// #include +// Utility function to save state -// #include -// Targeting ../CosineSimilarityImpl.java - - -// Targeting ../CosineSimilarity.java - - -// Targeting ../PairwiseDistanceImpl.java +// Utility function to load state -// Targeting ../PairwiseDistance.java +// Utility function to save param_groups - // namespace nn - // namespace torch +// Utility function to load param_groups +// We take as input vector of pair of string and unique_ptr to optimizer options +// so that we can retain the state for each param by using the old tensor impl +// keys (saved during serialization) and map the new tensor impl keys to the +// correct state for each param + // namespace detail -// Parsed from torch/nn/modules/embedding.h +// Note: These functions are all called `serialize()` so they can be called +// inside a template where the archive type is a template type and can thus be +// passed such that the appropriate overload is selected. -// #pragma once +/** Utility function to save a value of {@code int64_t} type. */ -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// Targeting ../EmbeddingImpl.java +/** Utility function to load a value of {@code int64_t} type. */ -// Targeting ../Embedding.java +/** Utility function to save a vector of step buffers. */ -// Targeting ../EmbeddingBagImpl.java +/** Utility function to load a vector of step buffers. */ -// Targeting ../EmbeddingBag.java +// Utility function to save state and param_groups - // namespace nn - // namespace torch +// Utility function to load state and param_groups and update state -// Parsed from torch/nn/modules/fold.h +/** Utility function to save a vector of buffers. */ -// #pragma once -// #include -// #include -// #include -// #include -// #include -// #include -// Targeting ../FoldImpl.java +/** Utility function to load a vector of buffers. */ -// Targeting ../Fold.java +// #define _TORCH_OPTIM_SERIALIZE(name) +// torch::optim::serialize(archive, #name, self.name) +// #define _TORCH_OPTIM_SERIALIZE_WITH_TEMPLATE_ARG(OptimizerName) +// torch::optim::serialize( +// archive, self) -// Targeting ../UnfoldImpl.java +// #define _TORCH_OPTIM_SERIALIZE_TORCH_ARG(name) +// { +// auto ivalue = torch::IValue(name()); +// /* do not serialize if name is an undefined tensor*/ +// if (!(ivalue.isTensor() && +// ivalue.nsafeToTensorImpl() == +// at::UndefinedTensorImpl::singleton())) { +// archive.write(#name, ivalue); +// } +// } +// #define _TORCH_OPTIM_SERIALIZE_TORCH_ARG_DEQUE(name) +// { +// c10::IValue ivalue = torch::IValue(deque_to_list(name())); +// archive.write(#name, ivalue); +// } -// Targeting ../Unfold.java +// #define _TORCH_OPTIM_DESERIALIZE_TORCH_ARG(T, name) +// { +// c10::IValue ivalue; +// bool exists = archive.try_read(#name, ivalue); +// if (exists) { +// name(ivalue.to()); +// } else { +// bool is_tensor_type = std::is_base_of::value; +// TORCH_INTERNAL_ASSERT(is_tensor_type); +// } +// } +// #define _TORCH_OPTIM_DESERIALIZE_TORCH_ARG_OPTIONAL(T, name) +// { +// c10::IValue ivalue; +// bool exists = archive.try_read(#name, ivalue); +// if (exists) { +// name(ivalue.toOptional()); +// } +// } +// #define _TORCH_OPTIM_DESERIALIZE_TORCH_ARG_DEQUE(T, name) +// { +// c10::IValue ivalue; +// archive.read(#name, ivalue); +// auto list = ivalue.to>(); +// name(list_to_deque(list)); +// } - // namespace nn + // namespace optim // namespace torch -// Parsed from torch/nn/modules/linear.h +// Parsed from torch/optim/adagrad.h // #pragma once -// #include -// #include -// #include -// #include // #include +// #include +// #include +// #include // #include -// #include +// #include // #include -// Targeting ../IdentityImpl.java - - -// Targeting ../Identity.java + // namespace serialize +// Targeting ../AdagradOptions.java -// Targeting ../LinearImpl.java +// Targeting ../AdagradParamState.java -// Targeting ../Linear.java +// Targeting ../Adagrad.java -// Targeting ../FlattenImpl.java + // namespace optim + // namespace torch -// Targeting ../Flatten.java +// Parsed from torch/optim/adam.h -// Targeting ../UnflattenImpl.java +// #pragma once +// #include +// #include +// #include -// Targeting ../Unflatten.java +// #include +// #include + // namespace serialize +// Targeting ../AdamOptions.java -// Targeting ../BilinearImpl.java +// Targeting ../AdamParamState.java -// Targeting ../Bilinear.java +// Targeting ../Adam.java - // namespace nn + // namespace optim // namespace torch -// Parsed from torch/nn/modules/loss.h +// Parsed from torch/optim/adamw.h // #pragma once -// #include -// #include -// #include -// #include -// #include -// #include - -// #include +// #include +// #include +// #include -// #include +// #include // #include -// Targeting ../L1LossImpl.java + // namespace serialize +// Targeting ../AdamWOptions.java -// Targeting ../L1Loss.java +// Targeting ../AdamWParamState.java -// Targeting ../KLDivLossImpl.java +// Targeting ../AdamW.java -// Targeting ../KLDivLoss.java + // namespace optim + // namespace torch -// Targeting ../MSELossImpl.java +// Parsed from torch/optim/lbfgs.h -// Targeting ../MSELoss.java +// #pragma once +// #include +// #include +// #include +// #include -// Targeting ../BCELossImpl.java +// #include +// #include +// #include +// #include +// Targeting ../LBFGSOptions.java -// Targeting ../BCELoss.java +// Targeting ../LBFGSParamState.java -// Targeting ../HingeEmbeddingLossImpl.java +// Targeting ../LBFGS.java -// Targeting ../HingeEmbeddingLoss.java + // namespace optim + // namespace torch -// Targeting ../MultiMarginLossImpl.java +// Parsed from torch/optim/rmsprop.h +// #pragma once -// Targeting ../MultiMarginLoss.java +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include + // namespace serialize -// Targeting ../CosineEmbeddingLossImpl.java +// Targeting ../RMSpropOptions.java -// Targeting ../CosineEmbeddingLoss.java +// Targeting ../RMSpropParamState.java -// Targeting ../SmoothL1LossImpl.java +// Targeting ../RMSprop.java -// Targeting ../SmoothL1Loss.java + // namespace optim + // namespace torch -// Targeting ../HuberLossImpl.java +// Parsed from torch/optim/sgd.h +// #pragma once -// Targeting ../HuberLoss.java +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include + // namespace serialize -// Targeting ../MultiLabelMarginLossImpl.java +// Targeting ../SGDOptions.java -// Targeting ../MultiLabelMarginLoss.java +// Targeting ../SGDParamState.java -// Targeting ../SoftMarginLossImpl.java +// Targeting ../SGD.java -// Targeting ../SoftMarginLoss.java + // namespace optim + // namespace torch -// Targeting ../MultiLabelSoftMarginLossImpl.java +// Parsed from torch/optim/schedulers/lr_scheduler.h +// #pragma once -// Targeting ../MultiLabelSoftMarginLoss.java +// #include +// #include +// Targeting ../LRScheduler.java -// Targeting ../TripletMarginLossImpl.java + // namespace optim + // namespace torch -// Targeting ../TripletMarginLoss.java +// Parsed from torch/optim/schedulers/step_lr.h -// Targeting ../TripletMarginWithDistanceLossImpl.java +// #pragma once +// #include +// Targeting ../StepLR.java -// Targeting ../TripletMarginWithDistanceLoss.java + // namespace optim + // namespace torch -// Targeting ../CTCLossImpl.java +// Parsed from torch/optim.h -// Targeting ../CTCLoss.java +// #pragma once +// #include +// #include +// #include +// #include +// #include +// #include +// #include -// Targeting ../PoissonNLLLossImpl.java +// #include +// #include -// Targeting ../PoissonNLLLoss.java +// Parsed from torch/sparse.h +// #pragma once -// Targeting ../MarginRankingLossImpl.java +// #include + // namespace torch -// Targeting ../MarginRankingLoss.java +// Parsed from torch/special.h -// Targeting ../NLLLossImpl.java +// #pragma once +// #include +// #include -// Targeting ../NLLLoss.java +/** Computes the natural logarithm of the absolute value of the gamma function + * See https://pytorch.org/docs/master/special.html#torch.special.gammaln. + * + * Example: + *
{@code
+ *  auto t = torch::randn(128, dtype=kDouble);
+ *  torch::special::gammaln(t);
+ *  }
*/ +@Namespace("torch::special") public static native @ByVal Tensor gammaln(@Const @ByRef Tensor self); -// Targeting ../CrossEntropyLossImpl.java +/// +@Namespace("torch::special") public static native @ByRef Tensor gammaln_out(@ByRef Tensor result, @Const @ByRef Tensor self); +/** Computes the regularized lower incomplete gamma function + * See https://pytorch.org/docs/master/special.html#torch.special.gammainc. + * + * Example: + *
{@code
+ *  auto t = torch::randn(128, dtype=kDouble);
+ *  auto s = torch::randn(128, dtype=kDouble);
+ *  torch::special::gammainc(s, t);
+ *  }
*/ +@Namespace("torch::special") public static native @ByVal Tensor gammainc(@Const @ByRef Tensor self, @Const @ByRef Tensor other); -// Targeting ../CrossEntropyLoss.java +/// +@Namespace("torch::special") public static native @ByRef Tensor gammainc_out( + @ByRef Tensor result, + @Const @ByRef Tensor self, + @Const @ByRef Tensor other); -// Targeting ../BCEWithLogitsLossImpl.java +/** Computes the regularized upper incomplete gamma function + * See https://pytorch.org/docs/master/special.html#torch.special.gammainc. + * + * Example: + *
{@code
+ *  auto t = torch::randn(128, dtype=kDouble);
+ *  auto s = torch::randn(128, dtype=kDouble);
+ *  torch::special::gammaincc(s, t);
+ *  }
*/ +@Namespace("torch::special") public static native @ByVal Tensor gammaincc(@Const @ByRef Tensor self, @Const @ByRef Tensor other); -// Targeting ../BCEWithLogitsLoss.java +/// +@Namespace("torch::special") public static native @ByRef Tensor gammaincc_out( + @ByRef Tensor result, + @Const @ByRef Tensor self, + @Const @ByRef Tensor other); +/** Computes the multivariate log-gamma function with dimension {@code p}, elementwise + * See https://pytorch.org/docs/master/special.html#torch.special.multigammaln. + * + * Example: + *
{@code
+ *  auto t = torch::randn(128, dtype=kDouble);
+ *  torch::special::multigammaln(t, 1);
+ *  }
*/ +@Namespace("torch::special") public static native @ByVal Tensor multigammaln(@Const @ByRef Tensor self, @Cast("int64_t") long p); - // namespace nn - // namespace torch +/// +@Namespace("torch::special") public static native @ByRef Tensor multigammaln_out(@ByRef Tensor result, @Const @ByRef Tensor self, @Cast("int64_t") long p); +/** Computes the nth derivative of the digamma function on the input. + * See https:://pytorch.org/docs/master/special.html#torch.special.polygamma. + * + * Example: + *
{@code
+ *  auto t = torch::randn(128, dtype=kDouble);
+ *  torch::special::polygamma(2, t);
+ *  }
*/ -// Parsed from torch/nn/modules/padding.h +/** Computes the logarithmic derivative of the gamma function on input + * See https://pytorch.org/docs/master/special.html#torch.special.psi + * + * Example: + *
{@code
+ *  auto t = torch::randn(128, dtype=kDouble);
+ *  torch::special::psi(t);
+ *  }
*/ +@Namespace("torch::special") public static native @ByVal Tensor psi(@Const @ByRef Tensor self); -// #pragma once -// #include -// #include -// #include +/// +@Namespace("torch::special") public static native @ByRef Tensor psi_out(@ByRef Tensor result, @Const @ByRef Tensor self); -// #include -// Targeting ../ReflectionPad1dImplBase.java +/** Computes the logarithmic derivative of the gamma function on input + * See https://pytorch.org/docs/master/special.html#torch.special.digamma + * + * Example: + *
{@code
+ *  auto t = torch::randn(128, dtype=kDouble);
+ *  torch::special::digamma(t);
+ *  }
*/ +/** Computes entropy of input, elementwise + * See https://pytorch.org/docs/master/special.html#torch.special.entr. + * + * Example: + *
{@code
+ *  auto t = torch::randn(128, dtype=kDouble);
+ *  torch::special::entr(t);
+ *  }
*/ +@Namespace("torch::special") public static native @ByVal Tensor entr(@Const @ByRef Tensor self); -// Targeting ../ReflectionPad2dImplBase.java +/// +@Namespace("torch::special") public static native @ByRef Tensor entr_out(@ByRef Tensor result, @Const @ByRef Tensor self); -// Targeting ../ReflectionPad3dImplBase.java +/** Computes the error function + * See https://pytorch.org/docs/master/special.html#torch.special.erf. + * + * Example: + *
{@code
+ *  auto t = torch::randn(128, dtype=kDouble);
+ *  torch::special::erf(t);
+ *  }
*/ +/** Computes the complementary error function + * See https://pytorch.org/docs/master/special.html#torch.special.erfc. + * + * Example: + *
{@code
+ *  auto t = torch::randn(128, dtype=kDouble);
+ *  torch::special::erfc(t);
+ *  }
*/ -// Targeting ../ReflectionPad1dImpl.java +/** Computes the scaled complementary error function + * See https://pytorch.org/docs/master/special.html#torch.special.erfcx. + * + * Example: + *
{@code
+ *  auto t = torch::randn(128, dtype=kDouble);
+ *  torch::special::erfcx(t);
+ *  }
*/ +@Namespace("torch::special") public static native @ByVal Tensor erfcx(@Const @ByRef Tensor self); -// Targeting ../ReflectionPad1d.java +/// +@Namespace("torch::special") public static native @ByRef Tensor erfcx_out(@ByRef Tensor result, @Const @ByRef Tensor self); +/** Computes the inverse error function + * See https://pytorch.org/docs/master/special.html#torch.special.erfinv. + * + * Example: + *
{@code
+ *  auto t = torch::randn(128, dtype=kDouble);
+ *  torch::special::erfinv(t);
+ *  }
*/ -// Targeting ../ReflectionPad2dImpl.java +/** Computes the log of summed exponentials of each row of input in the given + * dimension dim See + * https://pytorch.org/docs/master/special.html#torch.special.logsumexp. + * + * Example: + *
{@code
+ *  auto t = torch::randn(3, 3);
+ *  torch::special::logsumexp(t, 1);
+ *  }
*/ +/** Computes the argument, x, for which the area under the Gaussian probability + * density function (integrated from minus infinity to x) is equal to input, + * elementwise. See + * https://pytorch.org/docs/master/special.html#torch.special.ndtri + * + * Example: + *
{@code
+ *  auto t = torch::rand(128, dtype=kDouble);
+ *  torch::special::ndtri(t);
+ *  }
*/ +@Namespace("torch::special") public static native @ByVal Tensor ndtri(@Const @ByRef Tensor self); -// Targeting ../ReflectionPad2d.java +/// +@Namespace("torch::special") public static native @ByRef Tensor ndtri_out(@ByRef Tensor result, @Const @ByRef Tensor self); -// Targeting ../ReflectionPad3dImpl.java +/** Computes the log of area under the standard Gaussian probability density + * function, integrated from minus infinity to :attr:{@code input}, elementwise See + * https://pytorch.org/docs/master/special.html#torch.special.log_ndtr + * + * Example: + *
{@code
+ *  auto t = torch::randn(128, dtype=kDouble);
+ *  torch::special::log_ndtr(t);
+ *  }
*/ +@Namespace("torch::special") public static native @ByVal Tensor log_ndtr(@Const @ByRef Tensor self); -// Targeting ../ReflectionPad3d.java +/// +@Namespace("torch::special") public static native @ByRef Tensor log_ndtr_out(@ByRef Tensor result, @Const @ByRef Tensor self); +/** Computes the logit of input, elementwise. + * See https://pytorch.org/docs/master/special.html#torch.special.logit. + * + * Example: + *
{@code
+ *  auto t = torch::randn(128, dtype=kDouble);
+ *  torch::special::logit(t);
+ *  }
*/ -// Targeting ../ReplicationPad1dImplBase.java +/** Computes the expit (also known as the logistic sigmoid function) of input, + * elementwise See + * https://pytorch.org/docs/master/special.html#torch.special.expit. + * + * Example: + *
{@code
+ *  auto t = torch::randn(128, dtype=kDouble);
+ *  torch::special::expit(t);
+ *  }
*/ +@Namespace("torch::special") public static native @ByVal Tensor expit(@Const @ByRef Tensor self); -// Targeting ../ReplicationPad2dImplBase.java +/// +@Namespace("torch::special") public static native @ByRef Tensor expit_out(@ByRef Tensor result, @Const @ByRef Tensor self); +/** Computes the base two exponential function of :attr:{@code input}, elementwise + * See https://pytorch.org/docs/master/special.html#torch.special.exp2. + * + * Example: + *
{@code
+ *  auto t = torch::randn(128, dtype=kDouble);
+ *  torch::special::exp2(t);
+ *  }
*/ -// Targeting ../ReplicationPad3dImplBase.java +/** Computes the exponential of the elements minus 1, elementwise + * See https://pytorch.org/docs/master/special.html#torch.special.expm1. + * + * Example: + *
{@code
+ *  auto t = torch::randn(128, dtype=kDouble);
+ *  torch::special::expm1(t);
+ *  }
*/ +/** Computes x * log(y) for inputs, elementwise + * See https://pytorch.org/docs/master/special.html#torch.special.xlogy. + * + * Example: + *
{@code
+ *  auto x = torch::randn(128, dtype=kDouble);
+ *  auto y = torch::randn(128, dtype=kDouble);
+ *  torch::special::xlogy(x, y);
+ *  }
*/ -// Targeting ../ReplicationPad1dImpl.java +/** Computes x * log1p(y) for inputs, elementwise + * See https://pytorch.org/docs/master/special.html#torch.special.xlog1py. + * + * Example: + *
{@code
+ *  auto x = torch::randn(128, dtype=kDouble);
+ *  auto y = torch::randn(128, dtype=kDouble);
+ *  torch::special::xlog1py(x, y);
+ *  }
*/ +@Namespace("torch::special") public static native @ByVal Tensor xlog1py(@Const @ByRef Tensor self, @Const @ByRef Tensor other); +@Namespace("torch::special") public static native @ByVal Tensor xlog1py(@Const @ByRef Scalar self, @Const @ByRef Tensor other); -// Targeting ../ReplicationPad1d.java +@Namespace("torch::special") public static native @ByVal Tensor xlog1py(@Const @ByRef Tensor self, @Const @ByRef Scalar other); +@Namespace("torch::special") public static native @ByRef Tensor xlog1py_out( + @ByRef Tensor result, + @Const @ByRef Tensor self, + @Const @ByRef Tensor other); -// Targeting ../ReplicationPad2dImpl.java +@Namespace("torch::special") public static native @ByRef Tensor xlog1py_out( + @ByRef Tensor result, + @Const @ByRef Scalar self, + @Const @ByRef Tensor other); -// Targeting ../ReplicationPad2d.java +/// +@Namespace("torch::special") public static native @ByRef Tensor xlog1py_out( + @ByRef Tensor result, + @Const @ByRef Tensor self, + @Const @ByRef Scalar other); +/** Computes Hurwitz Zeta function for inputs, elementwise + * See https://pytorch.org/docs/master/special.html#torch.special.zeta. + * + * Example: + *
{@code
+ *  auto x = torch::randn(128, dtype=kDouble);
+ *  auto y = torch::randn(128, dtype=kDouble);
+ *  torch::special::zeta(x, y);
+ *  }
*/ +@Namespace("torch::special") public static native @ByVal Tensor zeta(@Const @ByRef Tensor self, @Const @ByRef Tensor other); -// Targeting ../ReplicationPad3dImpl.java +@Namespace("torch::special") public static native @ByVal Tensor zeta(@Const @ByRef Scalar self, @Const @ByRef Tensor other); +@Namespace("torch::special") public static native @ByVal Tensor zeta(@Const @ByRef Tensor self, @Const @ByRef Scalar other); -// Targeting ../ReplicationPad3d.java +@Namespace("torch::special") public static native @ByRef Tensor zeta_out( + @ByRef Tensor result, + @Const @ByRef Tensor self, + @Const @ByRef Tensor other); +@Namespace("torch::special") public static native @ByRef Tensor zeta_out( + @ByRef Tensor result, + @Const @ByRef Scalar self, + @Const @ByRef Tensor other); -// Targeting ../ZeroPad2dImpl.java +/// +@Namespace("torch::special") public static native @ByRef Tensor zeta_out( + @ByRef Tensor result, + @Const @ByRef Tensor self, + @Const @ByRef Scalar other); -// Targeting ../ZeroPad2d.java +/** Computes the zeroth order modified Bessel function of the first kind of + * input, elementwise See + * https://pytorch.org/docs/master/special.html#torch.special.i0 + * + * Example: + *
{@code
+ *  auto t = torch::randn(128, dtype=kDouble);
+ *  torch::special::i0(t);
+ *  }
*/ +/** Computes the area under the standard Gaussian probability density function, + * integrated from minus infinity to :attr:{@code input}, elementwise + * See https://pytorch.org/docs/master/special.html#torch.special.ndtr + * + * Example: + *
{@code
+ *  auto t = torch::randn(128, dtype=kDouble);
+ *  torch::special::ndtr(t);
+ *  }
*/ +@Namespace("torch::special") public static native @ByVal Tensor ndtr(@Const @ByRef Tensor self); -// Targeting ../ConstantPad1dImplBase.java +/// +@Namespace("torch::special") public static native @ByRef Tensor ndtr_out(@ByRef Tensor result, @Const @ByRef Tensor self); -// Targeting ../ConstantPad2dImplBase.java +/** Computes the exponentially scaled zeroth order modified Bessel function of + * the first kind See + * https://pytorch.org/docs/master/special.html#torch.special.i0e. + * + * Example: + *
{@code
+ *  auto t = torch::randn(128, dtype=kDouble);
+ *  torch::special::i0e(t);
+ *  }
*/ +@Namespace("torch::special") public static native @ByVal Tensor i0e(@Const @ByRef Tensor self); -// Targeting ../ConstantPad3dImplBase.java +/// +@Namespace("torch::special") public static native @ByRef Tensor i0e_out(@ByRef Tensor result, @Const @ByRef Tensor self); +/** Computes the first order modified Bessel function of the first kind + * See https://pytorch.org/docs/master/special.html#torch.special.i1. + * + * Example: + *
{@code
+ *  auto t = torch::randn(128, dtype=kDouble);
+ *  torch::special::i1(t);
+ *  }
*/ +@Namespace("torch::special") public static native @ByVal Tensor i1(@Const @ByRef Tensor self); -// Targeting ../ConstantPad1dImpl.java +/// +@Namespace("torch::special") public static native @ByRef Tensor i1_out(@ByRef Tensor result, @Const @ByRef Tensor self); -// Targeting ../ConstantPad1d.java +/** Computes the exponentially scaled first order modified Bessel function of + * the first kind See + * https://pytorch.org/docs/master/special.html#torch.special.i1e. + * + * Example: + *
{@code
+ *  auto t = torch::randn(128, dtype=kDouble);
+ *  torch::special::i1e(t);
+ *  }
*/ +@Namespace("torch::special") public static native @ByVal Tensor i1e(@Const @ByRef Tensor self); -// Targeting ../ConstantPad2dImpl.java +/// +@Namespace("torch::special") public static native @ByRef Tensor i1e_out(@ByRef Tensor result, @Const @ByRef Tensor self); +/** Computes the sinc of input, elementwise + * See https://pytorch.org/docs/master/special.html#torch.special.sinc. + * + * Example: + *
{@code
+ *  auto t = torch::randn(128, dtype=kDouble);
+ *  torch::special::sinc(t);
+ *  }
*/ -// Targeting ../ConstantPad2d.java +/** Rounds the elements of the input + * See https://pytorch.org/docs/master/special.html#torch.special.round. + * + * Example: + *
{@code
+ *  auto t = torch::randn(128, dtype=kDouble);
+ *  torch::special::round(t);
+ *  }
*/ +/** Computes log(1 + x) of the input, elementwise + * See https://pytorch.org/docs/master/special.html#torch.special.log1p. + * + * Example: + *
{@code
+ *  auto t = torch::randn(128, dtype=kDouble);
+ *  torch::special::log1p(t);
+ *  }
*/ -// Targeting ../ConstantPad3dImpl.java +/** Computes log followed by softmax(x) of the input + * See https://pytorch.org/docs/master/special.html#torch.special.log_softmax. + * + * Example: + *
{@code
+ *  auto t = torch::randn(128, 128, dtype=kDouble);
+ *  torch::special::log_softmax(t, 0);
+ *  }
*/ -// Targeting ../ConstantPad3d.java +/** Computes softmax of the input along a given dimension + * See https://pytorch.org/docs/master/special.html#torch.special.softmax. + * + * Example: + *
{@code
+ *  auto t = torch::randn(128, 128, dtype=kDouble);
+ *  torch::special::softmax(t, 0);
+ *  }
*/ +/** Airy function Ai. + * + * See https://pytorch.org/docs/master/special.html#torch.special.airy_ai. + * + * Example: + * + *
{@code
+ *  auto x = torch::randn(128, dtype=kDouble);
+ * 
+ *  torch::special::airy_ai(x);
+ *  }
*/ +@Namespace("torch::special") public static native @ByVal Tensor airy_ai(@Const @ByRef Tensor x); - // namespace nn - // namespace torch +/// +/// +/// +/// +@Namespace("torch::special") public static native @ByRef Tensor airy_ai_out(@ByRef Tensor y, @Const @ByRef Tensor x); +/** Bessel function of the first kind of order 0. + * + * See https://pytorch.org/docs/master/special.html#torch.special.bessel_j0. + * + * Example: + * + *
{@code
+ *  auto x = torch::randn(128, dtype=kDouble);
+ * 
+ *  torch::special::bessel_j0(x);
+ *  }
*/ +@Namespace("torch::special") public static native @ByVal Tensor bessel_j0(@Const @ByRef Tensor self); -// Parsed from torch/nn/modules/pooling.h -// #pragma once +/// +/// +/// +/// +@Namespace("torch::special") public static native @ByRef Tensor bessel_j0_out(@ByRef Tensor result, @Const @ByRef Tensor self); -// #include -// #include -// #include -// #include -// #include +/** Bessel function of the first kind of order 1. + * + * See https://pytorch.org/docs/master/special.html#torch.special.bessel_j1. + * + * Example: + * + *
{@code
+ *  auto x = torch::randn(128, dtype=kDouble);
+ * 
+ *  torch::special::bessel_j1(x);
+ *  }
*/ +@Namespace("torch::special") public static native @ByVal Tensor bessel_j1(@Const @ByRef Tensor self); -// #include -// Targeting ../AvgPool1dImplBase.java +/// +/// +/// +/// +@Namespace("torch::special") public static native @ByRef Tensor bessel_j1_out(@ByRef Tensor result, @Const @ByRef Tensor self); -// Targeting ../AvgPool2dImplBase.java +/** Bessel function of the second kind of order 0. + * + * See https://pytorch.org/docs/master/special.html#torch.special.bessel_y0. + * + * Example: + * + *
{@code
+ *  auto x = torch::randn(128, dtype=kDouble);
+ * 
+ *  torch::special::bessel_y0(x);
+ *  }
*/ +@Namespace("torch::special") public static native @ByVal Tensor bessel_y0(@Const @ByRef Tensor self); -// Targeting ../AvgPool3dImplBase.java +/// +/// +/// +/// +@Namespace("torch::special") public static native @ByRef Tensor bessel_y0_out(@ByRef Tensor result, @Const @ByRef Tensor self); +/** Bessel function of the second kind of order 1. + * + * See https://pytorch.org/docs/master/special.html#torch.special.bessel_y1. + * + * Example: + * + *
{@code
+ *  auto x = torch::randn(128, dtype=kDouble);
+ * 
+ *  torch::special::bessel_y1(x);
+ *  }
*/ +@Namespace("torch::special") public static native @ByVal Tensor bessel_y1(@Const @ByRef Tensor self); -// Targeting ../AvgPool1dImpl.java +/// +/// +/// +/// +@Namespace("torch::special") public static native @ByRef Tensor bessel_y1_out(@ByRef Tensor result, @Const @ByRef Tensor self); -// Targeting ../AvgPool1d.java +/** Chebyshev polynomial of the first kind. + * + * See + * https://pytorch.org/docs/master/special.html#torch.special.chebyshev_polynomial_t. + * + * Example: + * + *
{@code
+ *  auto x = torch::randn(128, dtype=kDouble);
+ *  auto n = torch::randn(128, dtype=kDouble);
+ * 
+ *  torch::special::chebyshev_polynomial_t(x, n);
+ *  }
*/ +@Namespace("torch::special") public static native @ByVal Tensor chebyshev_polynomial_t(@Const @ByRef Tensor x, @Const @ByRef Tensor n); +@Namespace("torch::special") public static native @ByVal Tensor chebyshev_polynomial_t(@Const @ByRef Scalar x, @Const @ByRef Tensor n); -// Targeting ../AvgPool2dImpl.java +@Namespace("torch::special") public static native @ByVal Tensor chebyshev_polynomial_t(@Const @ByRef Tensor x, @Const @ByRef Scalar n); +@Namespace("torch::special") public static native @ByRef Tensor chebyshev_polynomial_t_out( + @ByRef Tensor output, + @Const @ByRef Tensor x, + @Const @ByRef Tensor n); -// Targeting ../AvgPool2d.java +@Namespace("torch::special") public static native @ByRef Tensor chebyshev_polynomial_t_out( + @ByRef Tensor output, + @Const @ByRef Scalar x, + @Const @ByRef Tensor n); -// Targeting ../AvgPool3dImpl.java +/// +/// +/// +/// +@Namespace("torch::special") public static native @ByRef Tensor chebyshev_polynomial_t_out( + @ByRef Tensor output, + @Const @ByRef Tensor x, + @Const @ByRef Scalar n); +/** Chebyshev polynomial of the second kind. + * + * See + * https://pytorch.org/docs/master/special.html#torch.special.chebyshev_polynomial_u. + * + * Example: + * + *
{@code
+ *  auto x = torch::randn(128, dtype=kDouble);
+ *  auto n = torch::randn(128, dtype=kDouble);
+ * 
+ *  torch::special::chebyshev_polynomial_u(x, n);
+ *  }
*/ +@Namespace("torch::special") public static native @ByVal Tensor chebyshev_polynomial_u(@Const @ByRef Tensor x, @Const @ByRef Tensor n); -// Targeting ../AvgPool3d.java +@Namespace("torch::special") public static native @ByVal Tensor chebyshev_polynomial_u(@Const @ByRef Scalar x, @Const @ByRef Tensor n); +@Namespace("torch::special") public static native @ByVal Tensor chebyshev_polynomial_u(@Const @ByRef Tensor x, @Const @ByRef Scalar n); -// Targeting ../MaxPool1dImplBase.java +@Namespace("torch::special") public static native @ByRef Tensor chebyshev_polynomial_u_out( + @ByRef Tensor output, + @Const @ByRef Tensor x, + @Const @ByRef Tensor n); +@Namespace("torch::special") public static native @ByRef Tensor chebyshev_polynomial_u_out( + @ByRef Tensor output, + @Const @ByRef Scalar x, + @Const @ByRef Tensor n); -// Targeting ../MaxPool2dImplBase.java +/// +/// +/// +/// +@Namespace("torch::special") public static native @ByRef Tensor chebyshev_polynomial_u_out( + @ByRef Tensor output, + @Const @ByRef Tensor x, + @Const @ByRef Scalar n); -// Targeting ../MaxPool3dImplBase.java +/** Chebyshev polynomial of the third kind. + * + * See + * https://pytorch.org/docs/master/special.html#torch.special.chebyshev_polynomial_v. + * + * Example: + * + *
{@code
+ *  auto x = torch::randn(128, dtype=kDouble);
+ *  auto n = torch::randn(128, dtype=kDouble);
+ * 
+ *  torch::special::chebyshev_polynomial_v(x, n);
+ *  }
*/ +@Namespace("torch::special") public static native @ByVal Tensor chebyshev_polynomial_v(@Const @ByRef Tensor x, @Const @ByRef Tensor n); +@Namespace("torch::special") public static native @ByVal Tensor chebyshev_polynomial_v(@Const @ByRef Scalar x, @Const @ByRef Tensor n); -// Targeting ../MaxPool1dImpl.java +@Namespace("torch::special") public static native @ByVal Tensor chebyshev_polynomial_v(@Const @ByRef Tensor x, @Const @ByRef Scalar n); +@Namespace("torch::special") public static native @ByRef Tensor chebyshev_polynomial_v_out( + @ByRef Tensor output, + @Const @ByRef Tensor x, + @Const @ByRef Tensor n); -// Targeting ../MaxPool1d.java +@Namespace("torch::special") public static native @ByRef Tensor chebyshev_polynomial_v_out( + @ByRef Tensor output, + @Const @ByRef Scalar x, + @Const @ByRef Tensor n); -// Targeting ../MaxPool2dImpl.java +/// +/// +/// +/// +@Namespace("torch::special") public static native @ByRef Tensor chebyshev_polynomial_v_out( + @ByRef Tensor output, + @Const @ByRef Tensor x, + @Const @ByRef Scalar n); +/** Chebyshev polynomial of the fourth kind. + * + * See + * https://pytorch.org/docs/master/special.html#torch.special.chebyshev_polynomial_w. + * + * Example: + * + *
{@code
+ *  auto x = torch::randn(128, dtype=kDouble);
+ *  auto n = torch::randn(128, dtype=kDouble);
+ * 
+ *  torch::special::chebyshev_polynomial_w(x, n);
+ *  }
*/ +@Namespace("torch::special") public static native @ByVal Tensor chebyshev_polynomial_w(@Const @ByRef Tensor x, @Const @ByRef Tensor n); -// Targeting ../MaxPool2d.java +@Namespace("torch::special") public static native @ByVal Tensor chebyshev_polynomial_w(@Const @ByRef Scalar x, @Const @ByRef Tensor n); +@Namespace("torch::special") public static native @ByVal Tensor chebyshev_polynomial_w(@Const @ByRef Tensor x, @Const @ByRef Scalar n); -// Targeting ../MaxPool3dImpl.java +@Namespace("torch::special") public static native @ByRef Tensor chebyshev_polynomial_w_out( + @ByRef Tensor output, + @Const @ByRef Tensor x, + @Const @ByRef Tensor n); +@Namespace("torch::special") public static native @ByRef Tensor chebyshev_polynomial_w_out( + @ByRef Tensor output, + @Const @ByRef Scalar x, + @Const @ByRef Tensor n); -// Targeting ../MaxPool3d.java +/// +/// +/// +/// +@Namespace("torch::special") public static native @ByRef Tensor chebyshev_polynomial_w_out( + @ByRef Tensor output, + @Const @ByRef Tensor x, + @Const @ByRef Scalar n); -// Targeting ../AdaptiveMaxPool1dImplBase.java +/** Physicist’s Hermite polynomial. + * + * See + * https://pytorch.org/docs/master/special.html#torch.special.hermite_polynomial_h. + * + * Example: + * + *
{@code
+ *  auto x = torch::randn(128, dtype=kDouble);
+ *  auto n = torch::randn(128, dtype=kDouble);
+ * 
+ *  torch::special::hermite_polynomial_h(x, n);
+ *  }
*/ +@Namespace("torch::special") public static native @ByVal Tensor hermite_polynomial_h(@Const @ByRef Tensor x, @Const @ByRef Tensor n); +@Namespace("torch::special") public static native @ByVal Tensor hermite_polynomial_h(@Const @ByRef Scalar x, @Const @ByRef Tensor n); -// Targeting ../AdaptiveMaxPool2dImplBase.java +@Namespace("torch::special") public static native @ByVal Tensor hermite_polynomial_h(@Const @ByRef Tensor x, @Const @ByRef Scalar n); +@Namespace("torch::special") public static native @ByRef Tensor hermite_polynomial_h_out( + @ByRef Tensor output, + @Const @ByRef Tensor x, + @Const @ByRef Tensor n); -// Targeting ../AdaptiveMaxPool3dImplBase.java +@Namespace("torch::special") public static native @ByRef Tensor hermite_polynomial_h_out( + @ByRef Tensor output, + @Const @ByRef Scalar x, + @Const @ByRef Tensor n); -// Targeting ../AdaptiveMaxPool1dImpl.java +/// +/// +/// +/// +@Namespace("torch::special") public static native @ByRef Tensor hermite_polynomial_h_out( + @ByRef Tensor output, + @Const @ByRef Tensor x, + @Const @ByRef Scalar n); +/** Probabilist’s Hermite polynomial. + * + * See + * https://pytorch.org/docs/master/special.html#torch.special.hermite_polynomial_he. + * + * Example: + * + *
{@code
+ *  auto x = torch::randn(128, dtype=kDouble);
+ *  auto n = torch::randn(128, dtype=kDouble);
+ * 
+ *  torch::special::hermite_polynomial_he(x, n);
+ *  }
*/ +@Namespace("torch::special") public static native @ByVal Tensor hermite_polynomial_he(@Const @ByRef Tensor x, @Const @ByRef Tensor n); -// Targeting ../AdaptiveMaxPool1d.java +@Namespace("torch::special") public static native @ByVal Tensor hermite_polynomial_he(@Const @ByRef Scalar x, @Const @ByRef Tensor n); +@Namespace("torch::special") public static native @ByVal Tensor hermite_polynomial_he(@Const @ByRef Tensor x, @Const @ByRef Scalar n); -// Targeting ../AdaptiveMaxPool2dImpl.java +@Namespace("torch::special") public static native @ByRef Tensor hermite_polynomial_he_out( + @ByRef Tensor output, + @Const @ByRef Tensor x, + @Const @ByRef Tensor n); +@Namespace("torch::special") public static native @ByRef Tensor hermite_polynomial_he_out( + @ByRef Tensor output, + @Const @ByRef Scalar x, + @Const @ByRef Tensor n); -// Targeting ../AdaptiveMaxPool2d.java +/// +/// +/// +/// +@Namespace("torch::special") public static native @ByRef Tensor hermite_polynomial_he_out( + @ByRef Tensor output, + @Const @ByRef Tensor x, + @Const @ByRef Scalar n); -// Targeting ../AdaptiveMaxPool3dImpl.java +/** Laguerre polynomial. + * + * See + * https://pytorch.org/docs/master/special.html#torch.special.laguerre_polynomial_l. + * + * Example: + * + *
{@code
+ *  auto x = torch::randn(128, dtype=kDouble);
+ *  auto n = torch::randn(128, dtype=kDouble);
+ * 
+ *  torch::special::laguerre_polynomial_l(x, n);
+ *  }
*/ +@Namespace("torch::special") public static native @ByVal Tensor laguerre_polynomial_l(@Const @ByRef Tensor x, @Const @ByRef Tensor n); +@Namespace("torch::special") public static native @ByVal Tensor laguerre_polynomial_l(@Const @ByRef Scalar x, @Const @ByRef Tensor n); -// Targeting ../AdaptiveMaxPool3d.java +@Namespace("torch::special") public static native @ByVal Tensor laguerre_polynomial_l(@Const @ByRef Tensor x, @Const @ByRef Scalar n); +@Namespace("torch::special") public static native @ByRef Tensor laguerre_polynomial_l_out( + @ByRef Tensor output, + @Const @ByRef Tensor x, + @Const @ByRef Tensor n); -// Targeting ../AdaptiveAvgPool1dImplBase.java +@Namespace("torch::special") public static native @ByRef Tensor laguerre_polynomial_l_out( + @ByRef Tensor output, + @Const @ByRef Scalar x, + @Const @ByRef Tensor n); -// Targeting ../AdaptiveAvgPool2dImplBase.java +/// +/// +/// +/// +@Namespace("torch::special") public static native @ByRef Tensor laguerre_polynomial_l_out( + @ByRef Tensor output, + @Const @ByRef Tensor x, + @Const @ByRef Scalar n); +/** Legendre polynomial. + * + * See + * https://pytorch.org/docs/master/special.html#torch.special.legendre_polynomial_p. + * + * Example: + * + *
{@code
+ *  auto x = torch::randn(128, dtype=kDouble);
+ *  auto n = torch::randn(128, dtype=kDouble);
+ * 
+ *  torch::special::legendre_polynomial_p(x, n);
+ *  }
*/ +@Namespace("torch::special") public static native @ByVal Tensor legendre_polynomial_p(@Const @ByRef Tensor x, @Const @ByRef Tensor n); -// Targeting ../AdaptiveAvgPool3dImplBase.java +@Namespace("torch::special") public static native @ByVal Tensor legendre_polynomial_p(@Const @ByRef Scalar x, @Const @ByRef Tensor n); +@Namespace("torch::special") public static native @ByVal Tensor legendre_polynomial_p(@Const @ByRef Tensor x, @Const @ByRef Scalar n); -// Targeting ../AdaptiveAvgPool1dImpl.java +@Namespace("torch::special") public static native @ByRef Tensor legendre_polynomial_p_out( + @ByRef Tensor output, + @Const @ByRef Tensor x, + @Const @ByRef Tensor n); +@Namespace("torch::special") public static native @ByRef Tensor legendre_polynomial_p_out( + @ByRef Tensor output, + @Const @ByRef Scalar x, + @Const @ByRef Tensor n); -// Targeting ../AdaptiveAvgPool1d.java +/// +/// +/// +/// +@Namespace("torch::special") public static native @ByRef Tensor legendre_polynomial_p_out( + @ByRef Tensor output, + @Const @ByRef Tensor x, + @Const @ByRef Scalar n); -// Targeting ../AdaptiveAvgPool2dImpl.java +/** Modified Bessel function of the first kind of order 0. + * + * See + * https://pytorch.org/docs/master/special.html#torch.special.modified_bessel_i0. + * + * Example: + * + *
{@code
+ *  auto x = torch::randn(128, dtype=kDouble);
+ * 
+ *  torch::special::modified_bessel_i0(x);
+ *  }
*/ +@Namespace("torch::special") public static native @ByVal Tensor modified_bessel_i0(@Const @ByRef Tensor self); -// Targeting ../AdaptiveAvgPool2d.java +/// +/// +/// +/// +@Namespace("torch::special") public static native @ByRef Tensor modified_bessel_i0_out(@ByRef Tensor result, @Const @ByRef Tensor self); +/** Modified Bessel function of the first kind of order 1. + * + * See + * https://pytorch.org/docs/master/special.html#torch.special.modified_bessel_i1. + * + * Example: + * + *
{@code
+ *  auto x = torch::randn(128, dtype=kDouble);
+ * 
+ *  torch::special::modified_bessel_i1(x);
+ *  }
*/ +@Namespace("torch::special") public static native @ByVal Tensor modified_bessel_i1(@Const @ByRef Tensor self); -// Targeting ../AdaptiveAvgPool3dImpl.java +/// +/// +/// +/// +@Namespace("torch::special") public static native @ByRef Tensor modified_bessel_i1_out(@ByRef Tensor result, @Const @ByRef Tensor self); -// Targeting ../AdaptiveAvgPool3d.java +/** Modified Bessel function of the second kind of order 0. + * + * See + * https://pytorch.org/docs/master/special.html#torch.special.modified_bessel_k0. + * + * Example: + * + *
{@code
+ *  auto x = torch::randn(128, dtype=kDouble);
+ * 
+ *  torch::special::modified_bessel_k0(x);
+ *  }
*/ +@Namespace("torch::special") public static native @ByVal Tensor modified_bessel_k0(@Const @ByRef Tensor self); -// Targeting ../MaxUnpool1dImplBase.java +/// +/// +/// +/// +@Namespace("torch::special") public static native @ByRef Tensor modified_bessel_k0_out(@ByRef Tensor result, @Const @ByRef Tensor self); +/** Modified Bessel function of the second kind of order 1. + * + * See + * https://pytorch.org/docs/master/special.html#torch.special.modified_bessel_k1. + * + * Example: + * + *
{@code
+ *  auto x = torch::randn(128, dtype=kDouble);
+ * 
+ *  torch::special::modified_bessel_k1(x);
+ *  }
*/ +@Namespace("torch::special") public static native @ByVal Tensor modified_bessel_k1(@Const @ByRef Tensor self); -// Targeting ../MaxUnpool2dImplBase.java +/// +/// +/// +/// +@Namespace("torch::special") public static native @ByRef Tensor modified_bessel_k1_out(@ByRef Tensor result, @Const @ByRef Tensor self); -// Targeting ../MaxUnpool3dImplBase.java +/** Scaled modified Bessel function of the second kind of order 0. + * + * See + * https://pytorch.org/docs/master/special.html#torch.special.scaled_modified_bessel_k0. + * + * Example: + * + *
{@code
+ *  auto x = torch::randn(128, dtype=kDouble);
+ * 
+ *  torch::special::scaled_modified_bessel_k0(x);
+ *  }
*/ +@Namespace("torch::special") public static native @ByVal Tensor scaled_modified_bessel_k0(@Const @ByRef Tensor x); -// Targeting ../MaxUnpool1dImpl.java +/// +/// +/// +/// +@Namespace("torch::special") public static native @ByRef Tensor scaled_modified_bessel_k0_out(@ByRef Tensor y, @Const @ByRef Tensor x); +/** Scaled modified Bessel function of the second kind of order 1. + * + * See + * https://pytorch.org/docs/master/special.html#torch.special.scaled_modified_bessel_k1. + * + * Example: + * + *
{@code
+ *  auto x = torch::randn(128, dtype=kDouble);
+ * 
+ *  torch::special::scaled_modified_bessel_k1(x);
+ *  }
*/ +@Namespace("torch::special") public static native @ByVal Tensor scaled_modified_bessel_k1(@Const @ByRef Tensor x); -// Targeting ../MaxUnpool1d.java +/// +/// +/// +/// +@Namespace("torch::special") public static native @ByRef Tensor scaled_modified_bessel_k1_out(@ByRef Tensor y, @Const @ByRef Tensor x); -// Targeting ../MaxUnpool2dImpl.java +/** Shifted Chebyshev polynomial of the first kind. + * + * See + * https://pytorch.org/docs/master/special.html#torch.special.shifted_chebyshev_polynomial_t. + * + * Example: + * + *
{@code
+ *  auto x = torch::randn(128, dtype=kDouble);
+ *  auto n = torch::randn(128, dtype=kDouble);
+ * 
+ *  torch::special::shifted_chebyshev_polynomial_t(x, n);
+ *  }
*/ +@Namespace("torch::special") public static native @ByVal Tensor shifted_chebyshev_polynomial_t(@Const @ByRef Tensor x, @Const @ByRef Tensor n); +@Namespace("torch::special") public static native @ByVal Tensor shifted_chebyshev_polynomial_t(@Const @ByRef Scalar x, @Const @ByRef Tensor n); -// Targeting ../MaxUnpool2d.java +@Namespace("torch::special") public static native @ByVal Tensor shifted_chebyshev_polynomial_t(@Const @ByRef Tensor x, @Const @ByRef Scalar n); +@Namespace("torch::special") public static native @ByRef Tensor shifted_chebyshev_polynomial_t_out( + @ByRef Tensor output, + @Const @ByRef Tensor x, + @Const @ByRef Tensor n); -// Targeting ../MaxUnpool3dImpl.java +@Namespace("torch::special") public static native @ByRef Tensor shifted_chebyshev_polynomial_t_out( + @ByRef Tensor output, + @Const @ByRef Scalar x, + @Const @ByRef Tensor n); -// Targeting ../MaxUnpool3d.java +/// +/// +/// +/// +@Namespace("torch::special") public static native @ByRef Tensor shifted_chebyshev_polynomial_t_out( + @ByRef Tensor output, + @Const @ByRef Tensor x, + @Const @ByRef Scalar n); +/** Shifted Chebyshev polynomial of the second kind. + * + * See + * https://pytorch.org/docs/master/special.html#torch.special.shifted_chebyshev_polynomial_u. + * + * Example: + * + *
{@code
+ *  auto x = torch::randn(128, dtype=kDouble);
+ *  auto n = torch::randn(128, dtype=kDouble);
+ * 
+ *  torch::special::shifted_chebyshev_polynomial_u(x, n);
+ *  }
*/ +@Namespace("torch::special") public static native @ByVal Tensor shifted_chebyshev_polynomial_u(@Const @ByRef Tensor x, @Const @ByRef Tensor n); -// Targeting ../FractionalMaxPool2dImpl.java +@Namespace("torch::special") public static native @ByVal Tensor shifted_chebyshev_polynomial_u(@Const @ByRef Scalar x, @Const @ByRef Tensor n); +@Namespace("torch::special") public static native @ByVal Tensor shifted_chebyshev_polynomial_u(@Const @ByRef Tensor x, @Const @ByRef Scalar n); -// Targeting ../FractionalMaxPool2d.java +@Namespace("torch::special") public static native @ByRef Tensor shifted_chebyshev_polynomial_u_out( + @ByRef Tensor output, + @Const @ByRef Tensor x, + @Const @ByRef Tensor n); +@Namespace("torch::special") public static native @ByRef Tensor shifted_chebyshev_polynomial_u_out( + @ByRef Tensor output, + @Const @ByRef Scalar x, + @Const @ByRef Tensor n); -// Targeting ../FractionalMaxPool3dImpl.java +/// +/// +/// +/// +@Namespace("torch::special") public static native @ByRef Tensor shifted_chebyshev_polynomial_u_out( + @ByRef Tensor output, + @Const @ByRef Tensor x, + @Const @ByRef Scalar n); -// Targeting ../FractionalMaxPool3d.java +/** Shifted Chebyshev polynomial of the third kind. + * + * See + * https://pytorch.org/docs/master/special.html#torch.special.shifted_chebyshev_polynomial_v. + * + * Example: + * + *
{@code
+ *  auto x = torch::randn(128, dtype=kDouble);
+ *  auto n = torch::randn(128, dtype=kDouble);
+ * 
+ *  torch::special::shifted_chebyshev_polynomial_v(x, n);
+ *  }
*/ +@Namespace("torch::special") public static native @ByVal Tensor shifted_chebyshev_polynomial_v(@Const @ByRef Tensor x, @Const @ByRef Tensor n); +@Namespace("torch::special") public static native @ByVal Tensor shifted_chebyshev_polynomial_v(@Const @ByRef Scalar x, @Const @ByRef Tensor n); -// Targeting ../LPPool1dImplBase.java +@Namespace("torch::special") public static native @ByVal Tensor shifted_chebyshev_polynomial_v(@Const @ByRef Tensor x, @Const @ByRef Scalar n); +@Namespace("torch::special") public static native @ByRef Tensor shifted_chebyshev_polynomial_v_out( + @ByRef Tensor output, + @Const @ByRef Tensor x, + @Const @ByRef Tensor n); -// Targeting ../LPPool2dImplBase.java +@Namespace("torch::special") public static native @ByRef Tensor shifted_chebyshev_polynomial_v_out( + @ByRef Tensor output, + @Const @ByRef Scalar x, + @Const @ByRef Tensor n); -// Targeting ../LPPool1dImpl.java +/// +/// +/// +/// +@Namespace("torch::special") public static native @ByRef Tensor shifted_chebyshev_polynomial_v_out( + @ByRef Tensor output, + @Const @ByRef Tensor x, + @Const @ByRef Scalar n); +/** Shifted Chebyshev polynomial of the fourth kind. + * + * See + * https://pytorch.org/docs/master/special.html#torch.special.shifted_chebyshev_polynomial_w. + * + * Example: + * + *
{@code
+ *  auto x = torch::randn(128, dtype=kDouble);
+ *  auto n = torch::randn(128, dtype=kDouble);
+ * 
+ *  torch::special::shifted_chebyshev_polynomial_w(x, n);
+ *  }
*/ +@Namespace("torch::special") public static native @ByVal Tensor shifted_chebyshev_polynomial_w(@Const @ByRef Tensor x, @Const @ByRef Tensor n); -// Targeting ../LPPool1d.java +@Namespace("torch::special") public static native @ByVal Tensor shifted_chebyshev_polynomial_w(@Const @ByRef Scalar x, @Const @ByRef Tensor n); +@Namespace("torch::special") public static native @ByVal Tensor shifted_chebyshev_polynomial_w(@Const @ByRef Tensor x, @Const @ByRef Scalar n); -// Targeting ../LPPool2dImpl.java +@Namespace("torch::special") public static native @ByRef Tensor shifted_chebyshev_polynomial_w_out( + @ByRef Tensor output, + @Const @ByRef Tensor x, + @Const @ByRef Tensor n); +@Namespace("torch::special") public static native @ByRef Tensor shifted_chebyshev_polynomial_w_out( + @ByRef Tensor output, + @Const @ByRef Scalar x, + @Const @ByRef Tensor n); -// Targeting ../LPPool2d.java +/// +/// +/// +/// +@Namespace("torch::special") public static native @ByRef Tensor shifted_chebyshev_polynomial_w_out( + @ByRef Tensor output, + @Const @ByRef Tensor x, + @Const @ByRef Scalar n); +/** Spherical Bessel function of the first kind of order 0. + * + * See + * https://pytorch.org/docs/master/special.html#torch.special.spherical_bessel_j0. + * + * Example: + * + *
{@code
+ *  auto x = torch::randn(128, dtype=kDouble);
+ * 
+ *  torch::special::spherical_bessel_j0(x);
+ *  }
*/ +@Namespace("torch::special") public static native @ByVal Tensor spherical_bessel_j0(@Const @ByRef Tensor x); - // namespace nn +@Namespace("torch::special") public static native @ByRef Tensor spherical_bessel_j0_out(@ByRef Tensor y, @Const @ByRef Tensor x); + // namespace special // namespace torch -// Parsed from torch/nn/modules/rnn.h +// Parsed from torch/version.h // #pragma once -// #include -// #include -// #include -// #include -// #include -// #include -// #include - -// #include -// #include - -// #include -// #include -// #include -// #include -// Targeting ../RNNImplBase.java - - -// Targeting ../LSTMImplBase.java - +/** Indicates the major version of LibTorch. */ +public static final int TORCH_VERSION_MAJOR = 2; -// Targeting ../GRUImplBase.java +/** Indicates the minor version of LibTorch. */ +public static final int TORCH_VERSION_MINOR = 0; +/** Indicates the patch version of LibTorch. */ +public static final int TORCH_VERSION_PATCH = 0; +/** Indicates the version of LibTorch. */ +public static final String TORCH_VERSION = + "2.0.0"; -// Targeting ../RNNImpl.java +// Parsed from torch/csrc/api/include/torch/all.h -// Targeting ../RNN.java +// #pragma once +// #if !defined(_MSC_VER) && __cplusplus < 201402L +// #error C++14 or later compatible compiler is required to use PyTorch. +// #endif -// Targeting ../LSTMImpl.java +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include -// Targeting ../LSTM.java +// Parsed from caffe2/serialize/inline_container.h +// #pragma once -// Targeting ../GRUImpl.java +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include -// Targeting ../GRU.java +// #include "caffe2/serialize/istream_adapter.h" +// #include "caffe2/serialize/read_adapter_interface.h" +// #include "caffe2/serialize/versions.h" +// Targeting ../mz_zip_archive.java +// Targeting ../PyTorchStreamReader.java -// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ RNNCellImplBase -// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -// Targeting ../RNNCellImplBase.java +// Writer-specific constants +@Namespace("caffe2::serialize::detail") @MemberGetter public static native @Cast("const uint64_t") long kFieldAlignment(); -// Targeting ../LSTMCellImplBase.java +// Returns a record to be appended to the local user extra data entry in order +// to make data beginning aligned at kFieldAlignment bytes boundary. + // namespace detail -// Targeting ../GRUCellImplBase.java + // namespace serialize + // namespace caffe2 +// Parsed from caffe2/serialize/istream_adapter.h -// Targeting ../RNNCellImpl.java +// #pragma once +// #include -// Targeting ../RNNCell.java +// #include "c10/macros/Macros.h" +// #include "caffe2/serialize/read_adapter_interface.h" +// Targeting ../IStreamAdapter.java -// Targeting ../LSTMCellImpl.java + // namespace serialize + // namespace caffe2 -// Targeting ../LSTMCell.java +// Parsed from caffe2/serialize/read_adapter_interface.h -// Targeting ../GRUCellImpl.java +// #pragma once +// #include +// #include -// Targeting ../GRUCell.java +// #include "c10/macros/Macros.h" +// Targeting ../ReadAdapterInterface.java - // namespace nn - // namespace torch + // namespace serialize + // namespace caffe2 -// Parsed from torch/nn/modules/pixelshuffle.h +// Parsed from caffe2/serialize/versions.h // #pragma once +// #include -// #include -// #include -// #include - -// #include -// Targeting ../PixelShuffleImpl.java - - -// Targeting ../PixelShuffle.java - - -// Targeting ../PixelUnshuffleImpl.java - +@Namespace("caffe2::serialize") @MemberGetter public static native @Cast("const uint64_t") long kMinSupportedFileFormatVersion(); -// Targeting ../PixelUnshuffle.java +@Namespace("caffe2::serialize") @MemberGetter public static native @Cast("const uint64_t") long kMaxSupportedFileFormatVersion(); +// Versions (i.e. why was the version number bumped?) +// Note [Dynamic Versions and torch.jit.save vs. torch.save] +// +// Our versioning scheme has a "produced file format version" which +// describes how an archive is to be read. The version written in an archive +// is at least this current produced file format version, but may be greater +// if it includes certain symbols. We refer to these conditional versions +// as "dynamic," since they are identified at runtime. +// +// Dynamic versioning is useful when an operator's semantics are updated. +// When using torch.jit.save we want those semantics to be preserved. If +// we bumped the produced file format version on every change, however, +// then older versions of PyTorch couldn't read even simple archives, like +// a single tensor, from newer versions of PyTorch. Instead, we +// assign dynamic versions to these changes that override the +// produced file format version as needed. That is, when the semantics +// of torch.div changed it was assigned dynamic version 4, and when +// torch.jit.saving modules that use torch.div those archives also have +// (at least) version 4. This prevents earlier versions of PyTorch +// from accidentally performing the wrong kind of division. Modules +// that don't use torch.div or other operators with dynamic versions +// can write the produced file format version, and these programs will +// run as expected on earlier versions of PyTorch. +// +// While torch.jit.save attempts to preserve operator semantics, +// torch.save does not. torch.save is analogous to pickling Python, so +// a function that uses torch.div will have different behavior if torch.saved +// and torch.loaded across PyTorch versions. From a technical perspective, +// torch.save ignores dynamic versioning. + +// 1. Initial version +// 2. Removed op_version_set version numbers +// 3. Added type tags to pickle serialization of container types +// 4. (Dynamic) Stopped integer division using torch.div +// (a versioned symbol preserves the historic behavior of versions 1--3) +// 5. (Dynamic) Stops torch.full inferring a floating point dtype +// when given bool or integer fill values. +// 6. Write version string to `./data/version` instead of `version`. + +// [12/15/2021] +// kProducedFileFormatVersion is set to 7 from 3 due to a different +// interpretation of what file format version is. +// Whenever there is new upgrader introduced, +// this number should be bumped. +// The reasons that version is bumped in the past: +// 1. aten::div is changed at version 4 +// 2. aten::full is changed at version 5 +// 3. torch.package uses version 6 +// 4. Introduce new upgrader design and set the version number to 7 +// mark this change +// -------------------------------------------------- +// We describe new operator version bump reasons here: +// 1) [01/24/2022] +// We bump the version number to 8 to update aten::linspace +// and aten::linspace.out to error out when steps is not +// provided. (see: https://github.com/pytorch/pytorch/issues/55951) +// 2) [01/30/2022] +// Bump the version number to 9 to update aten::logspace and +// and aten::logspace.out to error out when steps is not +// provided. (see: https://github.com/pytorch/pytorch/issues/55951) +// 3) [02/11/2022] +// Bump the version number to 10 to update aten::gelu and +// and aten::gelu.out to support the new approximate kwarg. +// (see: https://github.com/pytorch/pytorch/pull/61439) +@Namespace("caffe2::serialize") @MemberGetter public static native @Cast("const uint64_t") long kProducedFileFormatVersion(); + +// Absolute minimum version we will write packages. This +// means that every package from now on will always be +// greater than this number. +@Namespace("caffe2::serialize") @MemberGetter public static native @Cast("const uint64_t") long kMinProducedFileFormatVersion(); + +// The version we write when the archive contains bytecode. +// It must be higher or eq to kProducedFileFormatVersion. +// Because torchscript changes is likely introduce bytecode change. +// If kProducedFileFormatVersion is increased, kProducedBytecodeVersion +// should be increased too. The relationship is: +// kMaxSupportedFileFormatVersion >= (most likely ==) kProducedBytecodeVersion +// >= kProducedFileFormatVersion +// If a format change is forward compatible (still readable by older +// executables), we will not increment the version number, to minimize the +// risk of breaking existing clients. TODO: A better way would be to allow +// the caller that creates a model to specify a maximum version that its +// clients can accept. +// Versions: +// 0x1L: Initial version +// 0x2L: (Comment missing) +// 0x3L: (Comment missing) +// 0x4L: (update) Added schema to function tuple. Forward-compatible change. +// 0x5L: (update) Update bytecode is sharing constant tensor files from +// torchscript, and only serialize extra tensors that are not in the +// torchscript constant table. Also update tensor storage schema adapting to +// the unify format, the root key of tensor storage is updated from {index} to +// {the_pointer_value_the_tensor.storage}, for example: +// `140245072983168.storage` Forward-compatibility change. +// 0x6L: Implicit opereator versioning using number of specified argument. +// Refer to the summary of https://github.com/pytorch/pytorch/pull/56845 for +// details. +// 0x7L: Enable support for operators with default arguments plus out +// arguments. Refer. See https://github.com/pytorch/pytorch/pull/63651 for +// details. +// 0x8L: Emit promoted operators as instructions. See +// https://github.com/pytorch/pytorch/pull/71662 for details. +// 0x9L: Change serialization format from pickle to format This version is to +// serve migration. v8 pickle and v9 flatbuffer are the same. Refer to the +// summary of https://github.com/pytorch/pytorch/pull/75201 for more details. +@Namespace("caffe2::serialize") @MemberGetter public static native @Cast("const uint64_t") long kProducedBytecodeVersion(); + +// static_assert( +// kProducedBytecodeVersion >= kProducedFileFormatVersion, +// "kProducedBytecodeVersion must be higher or equal to +// kProducedFileFormatVersion."); + +// Introduce kMinSupportedBytecodeVersion and kMaxSupportedBytecodeVersion +// for limited backward/forward compatibility support of bytecode. If +// kMinSupportedBytecodeVersion <= model_version <= kMaxSupportedBytecodeVersion +// (in loader), we should support this model_version. For example, we provide a +// wrapper to handle an updated operator. +@Namespace("caffe2::serialize") @MemberGetter public static native @Cast("const uint64_t") long kMinSupportedBytecodeVersion(); +@Namespace("caffe2::serialize") @MemberGetter public static native @Cast("const uint64_t") long kMaxSupportedBytecodeVersion(); - // namespace nn - // namespace torch + // namespace serialize + // namespace caffe2 -// Parsed from torch/nn/modules/upsampling.h +// Parsed from torch/csrc/jit/serialization/unpickler.h // #pragma once -// #include -// #include -// #include -// #include -// #include - +// #include +// #include +// #include // #include - -// #include -// #include -// Targeting ../UpsampleImpl.java +// #include +// #include +// Targeting ../Unpickler.java -// Targeting ../Upsample.java - // namespace nn + // namespace jit // namespace torch -// Parsed from torch/nn/modules/activation.h +// Parsed from torch/csrc/jit/frontend/script_type_parser.h // #pragma once - -// #include -// #include -// #include -// #include -// #include - +// #include // #include -// Targeting ../ELUImpl.java - +// #include +// #include +// Targeting ../ScriptTypeParser.java -// Targeting ../ELU.java - -// Targeting ../SELUImpl.java + // namespace jit + // namespace torch -// Targeting ../SELU.java +// Parsed from torch/csrc/jit/frontend/resolver.h +// #pragma once -// Targeting ../HardshrinkImpl.java +// #include +// #include +// #include +// Targeting ../Resolver.java -// Targeting ../Hardshrink.java +// Targeting ../NativeResolver.java -// Targeting ../HardtanhImpl.java +@Namespace("torch::jit") public static native @SharedPtr NativeResolver nativeResolver(); + // namespace jit + // namespace torch -// Targeting ../Hardtanh.java +// Parsed from torch/csrc/jit/frontend/sugared_value.h -// Targeting ../LeakyReLUImpl.java +// #pragma once +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// Targeting ../SugaredValue.java -// Targeting ../LeakyReLU.java +// Targeting ../SimpleValue.java -// Targeting ../LogSigmoidImpl.java +// Targeting ../BuiltinFunction.java -// Targeting ../LogSigmoid.java +// Targeting ../SugaredTupleValue.java -// Targeting ../SoftmaxImpl.java +// Targeting ../BuiltinModule.java -// Targeting ../Softmax.java +// Targeting ../ClassValue.java -// Targeting ../SoftminImpl.java +// Targeting ../NamedTupleConstructor.java -// Targeting ../Softmin.java +// Targeting ../FunctionValue.java -// Targeting ../LogSoftmaxImpl.java +// Targeting ../ClosureValue.java -// Targeting ../LogSoftmax.java +// Targeting ../MethodValue.java -// Targeting ../Softmax2dImpl.java +// Targeting ../PrintValue.java -// Targeting ../Softmax2d.java +// Targeting ../CastValue.java -// Targeting ../PReLUImpl.java +// Targeting ../TensorCastValue.java -// Targeting ../PReLU.java +// Targeting ../MagicMethod.java -// Targeting ../ReLUImpl.java +// Targeting ../SpecialFormValue.java -// Targeting ../ReLU.java +// Targeting ../LegacyTensorConstructor.java -// Targeting ../ReLU6Impl.java +// Targeting ../RangeValue.java -// Targeting ../ReLU6.java -// Targeting ../RReLUImpl.java +// Specialized Tree structure to matched against for special handling +// of builtin functions iterables expressions like zip(), enumerate(), etc. +// zip and enumerate can be modeled as a tree of SimpleValue/RangeValue: +// zip(x, y) -> (x, y) with tuple assignment to each loop target +// enumerate(x) -> (range(0, math.inf, 1), x) +// So a complicated expression like zip(a, enumerate(b), range(0, 100)) will be: +// (a, (range(0, math.inf, 1), b), range(0, 100)) +// We use those base iterables to fill in the loop information like +// max_trip_count and set the value table for loop targets +// Iterables can contain lists of SugaredValues like ModuleLists. If it +// does, then we emit it unrolled and require that all values it contains +// have a statically-determinable length. +@Namespace("torch::jit") public static native @ByVal ValueVector toValues( + @ByRef Graph g, + @ByVal NamedValueArrayRef nvs); +// Targeting ../SimpleSelf.java -// Targeting ../RReLU.java +// Targeting ../ExceptionMessageValue.java -// Targeting ../CELUImpl.java +// Targeting ../ExceptionValue.java -// Targeting ../CELU.java +// Targeting ../SugaredEnumClass.java -// Targeting ../GLUImpl.java +// Targeting ../SliceValue.java -// Targeting ../GLU.java -// Targeting ../GELUImpl.java + // namespace jit + // namespace torch -// Targeting ../GELU.java +// Parsed from torch/csrc/jit/frontend/error_report.h +// #pragma once -// Targeting ../SiLUImpl.java +// #include +// #include +// Targeting ../Call.java -// Targeting ../SiLU.java +// Targeting ../ErrorReport.java -// Targeting ../MishImpl.java + // namespace jit + // namespace torch -// Targeting ../Mish.java +// Parsed from torch/csrc/jit/frontend/tree.h -// Targeting ../SigmoidImpl.java +// #pragma once +// #include +// #include +// #include +// #include -// Targeting ../Sigmoid.java +// #include +// #include +// #include +// Trees are used to represent all forms of TC IR, pre- and post-typechecking. +// Rather than have a full class hierarchy for all TC statements, trees are a +// slight variation of Lisp s-expressions. For instance, the expression a*b+1 +// is represented as: +// (+ (* (ident a) (ident b)) (const 1)) +// Atoms like 'a', 'b', and '1' are represented by subclasses of Tree which +// define stringValue(). Everything else is a Compound object, which has a +// 'kind' that is a token from lexer.h's TokenKind enum. Single-character +// operators like '+' are represented using the character itself (so, add.kind() +// would be '+'). Each Compound object also contains a list of subtrees and is +// associated with a SourceRange for error reporting. +// Memory management of trees is done using intrusive_ptr. +// Targeting ../Tree.java -// Targeting ../SoftplusImpl.java +// Targeting ../JitString.java -// Targeting ../Softplus.java -// Targeting ../SoftshrinkImpl.java +@Namespace("torch::jit") public static native @ByVal SourceRange mergeRanges(@ByVal SourceRange c, @Cast("const torch::jit::TreeList*") @ByRef SymDimVector others); +// Targeting ../Compound.java -// Targeting ../Softshrink.java +// Targeting ../pretty_tree.java -// Targeting ../SoftsignImpl.java +@Namespace("torch::jit") public static native @Cast("std::ostream*") @ByRef @Name("operator <<") Pointer shiftLeft(@Cast("std::ostream*") @ByRef Pointer out, @ByVal pretty_tree t_); -// Targeting ../Softsign.java +@Namespace("torch::jit") public static native @Cast("std::ostream*") @ByRef @Name("operator <<") Pointer shiftLeft(@Cast("std::ostream*") @ByRef Pointer out, @Const @ByRef TreeRef t); + // namespace jit + // namespace torch -// Targeting ../TanhImpl.java +// Parsed from torch/csrc/jit/frontend/lexer.h -// Targeting ../Tanh.java +// #pragma once +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #if C10_CLANG_HAS_WARNING("-Wshorten-64-to-32") +// #endif -// Targeting ../TanhshrinkImpl.java +// single character tokens are just the character itself '+' +// multi-character tokens need an entry here +// if the third entry is not the empty string, it is used +// in the lexer to match this token. +// These kinds are also used in Tree.h as the kind of the AST node. +// Some kinds TK_APPLY, TK_LIST are only used in the AST and are not seen in the +// lexer. -// Targeting ../Tanhshrink.java +// #define TC_FORALL_TOKEN_KINDS(_) +// _(TK_EOF, "eof", "") +// _(TK_WHITESPACE, "whitespace", "") +// _(TK_WHITESPACE_EOF, "whitespace_eof", "") +// _(TK_NUMBER, "number", "") +// _(TK_NEWLINE, "newline", "") +// _(TK_INDENT, "indent", "") +// _(TK_DEDENT, "dedent", "") +// _(TK_DEF, "def", "def") +// _(TK_EQUIVALENT, "equivalent", "<=>") +// _(TK_IDENT, "ident", "") +// _(TK_STRING, "string", "") +// _(TK_STRINGLITERAL, "string_literal", "") +// _(TK_CONST, "const", "") +// _(TK_LIST, "list", "") +// _(TK_DICT, "dict", "") +// _(TK_OPTION, "option", "") +// _(TK_APPLY, "apply", "") +// _(TK_COMPREHENSION, "comprehension", "") +// _(TK_RANGE_CONSTRAINT, "range_constraint", "") +// _(TK_PARAM, "param", "") +// _(TK_INFERRED, "inferred", "") +// _(TK_ACCESS, "access", "") +// _(TK_ASSIGN, "assign", "") +// _(TK_AUG_ASSIGN, "aug_assign", "") +// _(TK_ATTRIBUTE, "attribute", "") +// _(TK_IF, "if", "if") +// _(TK_ELSE, "else", "else") +// _(TK_ELIF, "elif", "elif") +// _(TK_WHILE, "while", "while") +// _(TK_EXPR_STMT, "expression statement", "") +// _(TK_RETURN, "return", "return") +// _(TK_IS, "is", "is") +// _(TK_ISNOT, "is not", "is not") +// _(TK_NE, "ne", "!=") +// _(TK_EQ, "eq", "==") +// _(TK_LE, "le", "<=") +// _(TK_GE, "ge", ">=") +// _(TK_FLOOR_DIV, "floordiv", "//") +// _(TK_IF_EXPR, "if", "") +// _(TK_TRUE, "True", "True") +// _(TK_FALSE, "False", "False") +// _(TK_NONE, "None", "None") +// _(TK_AND, "and", "and") +// _(TK_OR, "or", "or") +// _(TK_NOT, "not", "not") +// _(TK_LSHIFT, "<<", "<<") +// _(TK_RSHIFT, ">>", ">>") +// _(TK_CAST, "cast", "") +// _(TK_PLUS_EQ, "+=", "+=") +// _(TK_MINUS_EQ, "-=", "-=") +// _(TK_TIMES_EQ, "*=", "*=") +// _(TK_DIV_EQ, "/=", "/=") +// _(TK_MOD_EQ, "%=", "%=") +// _(TK_BIT_OR_EQ, "|=", "|=") +// _(TK_BIT_AND_EQ, "&=", "&=") +// _(TK_BIT_XOR_EQ, "^=", "^=") +// _(TK_LSHIFT_EQ, "<<=", "<<=") +// _(TK_RSHIFT_EQ, ">>=", ">>=") +// _(TK_POW_EQ, "**=", "**=") +// _(TK_GLOBAL, "global", "global") +// _(TK_BUILT_IN, "built-in", "") +// _(TK_SUBSCRIPT, "subscript", "") +// _(TK_VAR, "variable", "") +// _(TK_NOTHING, "nothing", "") +// _(TK_DICT_LITERAL, "dict-literal", "") +// _(TK_LIST_LITERAL, "list-literal", "") +// _(TK_TUPLE_LITERAL, "tuple-literal", "") +// _(TK_FOR, "for", "for") +// _(TK_IN, "in", "in") +// _(TK_NOTIN, "not in", "not in") +// _(TK_STARRED, "starred", "") +// _(TK_UNARY_MINUS, "unary minus", "") +// _(TK_POW, "pow operator", "**") +// _(TK_ARROW, "arrow", "->") +// _(TK_DECL, "decl", "") +// _(TK_SLICE_EXPR, "slice expr", "") +// _(TK_TYPE_COMMENT, "type comment", "# type:") +// _(TK_RAISE, "raise", "raise") +// _(TK_ASSERT, "assert", "assert") +// _(TK_DOTS, "dots", "...") +// _(TK_LIST_COMP, "list comprehension", "") +// _(TK_DICT_COMP, "dict comprehension", "") +// _(TK_BREAK, "break", "break") +// _(TK_CONTINUE, "continue", "continue") +// _(TK_DELETE, "del", "del") +// _(TK_PASS, "pass", "pass") +// _(TK_CLASS_DEF, "class", "class") +// _(TK_IMPORT, "import", "import") +// _(TK_WITH, "with", "with") +// _(TK_WITH_ITEM, "withitem", "") +// _(TK_AS, "as", "as") +// _(TK_PROP, "property", "") +// _(TK_ELLIPSIS, "Ellipsis", "Ellipsis") +// _(TK_NONE_TYPE, "NoneType", "NoneType") +@Namespace("torch::jit") public enum TokenKind { + // we use characters to represent themselves so skip all valid characters + // before + // assigning enum values to multi-char tokens. + TK_DUMMY_START(256), + TK_EOF(257), + TK_WHITESPACE(258), + TK_WHITESPACE_EOF(259), + TK_NUMBER(260), + TK_NEWLINE(261), + TK_INDENT(262), + TK_DEDENT(263), + TK_DEF(264), + TK_EQUIVALENT(265), + TK_IDENT(266), + TK_STRING(267), + TK_STRINGLITERAL(268), + TK_CONST(269), + TK_LIST(270), + TK_DICT(271), + TK_OPTION(272), + TK_APPLY(273), + TK_COMPREHENSION(274), + TK_RANGE_CONSTRAINT(275), + TK_PARAM(276), + TK_INFERRED(277), + TK_ACCESS(278), + TK_ASSIGN(279), + TK_AUG_ASSIGN(280), + TK_ATTRIBUTE(281), + TK_IF(282), + TK_ELSE(283), + TK_ELIF(284), + TK_WHILE(285), + TK_EXPR_STMT(286), + TK_RETURN(287), + TK_IS(288), + TK_ISNOT(289), + TK_NE(290), + TK_EQ(291), + TK_LE(292), + TK_GE(293), + TK_FLOOR_DIV(294), + TK_IF_EXPR(295), + TK_TRUE(296), + TK_FALSE(297), + TK_NONE(298), + TK_AND(299), + TK_OR(300), + TK_NOT(301), + TK_LSHIFT(302), + TK_RSHIFT(303), + TK_CAST(304), + TK_PLUS_EQ(305), + TK_MINUS_EQ(306), + TK_TIMES_EQ(307), + TK_DIV_EQ(308), + TK_MOD_EQ(309), + TK_BIT_OR_EQ(310), + TK_BIT_AND_EQ(311), + TK_BIT_XOR_EQ(312), + TK_LSHIFT_EQ(313), + TK_RSHIFT_EQ(314), + TK_POW_EQ(315), + TK_GLOBAL(316), + TK_BUILT_IN(317), + TK_SUBSCRIPT(318), + TK_VAR(319), + TK_NOTHING(320), + TK_DICT_LITERAL(321), + TK_LIST_LITERAL(322), + TK_TUPLE_LITERAL(323), + TK_FOR(324), + TK_IN(325), + TK_NOTIN(326), + TK_STARRED(327), + TK_UNARY_MINUS(328), + TK_POW(329), + TK_ARROW(330), + TK_DECL(331), + TK_SLICE_EXPR(332), + TK_TYPE_COMMENT(333), + TK_RAISE(334), + TK_ASSERT(335), + TK_DOTS(336), + TK_LIST_COMP(337), + TK_DICT_COMP(338), + TK_BREAK(339), + TK_CONTINUE(340), + TK_DELETE(341), + TK_PASS(342), + TK_CLASS_DEF(343), + TK_IMPORT(344), + TK_WITH(345), + TK_WITH_ITEM(346), + TK_AS(347), + TK_PROP(348), + TK_ELLIPSIS(349), + TK_NONE_TYPE(350); -// Targeting ../ThresholdImpl.java + public final int value; + private TokenKind(int v) { this.value = v; } + private TokenKind(TokenKind e) { this.value = e.value; } + public TokenKind intern() { for (TokenKind e : values()) if (e.value == value) return e; return this; } + @Override public String toString() { return intern().name(); } +} +@Namespace("torch::jit") public static native @StdString BytePointer kindToString(int kind); +@Namespace("torch::jit") public static native int stringToKind(@StdString BytePointer str); +@Namespace("torch::jit") public static native int stringToKind(@StdString String str); -// Targeting ../Threshold.java +// nested hash tables that indicate char-by-char what is a valid token. +// Targeting ../SharedParserData.java -// Targeting ../MultiheadAttentionImpl.java +@Namespace("torch::jit") public static native @ByRef SharedParserData sharedParserData(); +// Targeting ../Token.java -// Targeting ../MultiheadAttention.java +// Targeting ../Lexer.java - // namespace nn + // namespace jit // namespace torch -// Parsed from torch/nn/modules/normalization.h -// #pragma once +// Parsed from torch/csrc/jit/frontend/parser_constants.h -// #include -// #include -// #include -// #include -// #include -// #include +// #pragma once +@Namespace("torch::jit") public static native @Cast("const char*") BytePointer valid_single_char_tokens(); public static native void valid_single_char_tokens(BytePointer setter); + // namespace jit + // namespace torch -// #include -// #include -// Targeting ../LayerNormImpl.java +// Parsed from torch/csrc/jit/frontend/strtod.h -// Targeting ../LayerNorm.java +// #pragma once +// #include -// Targeting ../LocalResponseNormImpl.java +@Namespace("torch::jit") public static native double strtod_c(@Cast("const char*") BytePointer nptr, @Cast("char**") PointerPointer endptr); +@Namespace("torch::jit") public static native double strtod_c(@Cast("const char*") BytePointer nptr, @Cast("char**") @ByPtrPtr BytePointer endptr); +@Namespace("torch::jit") public static native double strtod_c(String nptr, @Cast("char**") @ByPtrPtr ByteBuffer endptr); +@Namespace("torch::jit") public static native double strtod_c(@Cast("const char*") BytePointer nptr, @Cast("char**") @ByPtrPtr byte[] endptr); +@Namespace("torch::jit") public static native double strtod_c(String nptr, @Cast("char**") @ByPtrPtr BytePointer endptr); +@Namespace("torch::jit") public static native double strtod_c(@Cast("const char*") BytePointer nptr, @Cast("char**") @ByPtrPtr ByteBuffer endptr); +@Namespace("torch::jit") public static native double strtod_c(String nptr, @Cast("char**") @ByPtrPtr byte[] endptr); +@Namespace("torch::jit") public static native float strtof_c(@Cast("const char*") BytePointer nptr, @Cast("char**") PointerPointer endptr); +@Namespace("torch::jit") public static native float strtof_c(@Cast("const char*") BytePointer nptr, @Cast("char**") @ByPtrPtr BytePointer endptr); +@Namespace("torch::jit") public static native float strtof_c(String nptr, @Cast("char**") @ByPtrPtr ByteBuffer endptr); +@Namespace("torch::jit") public static native float strtof_c(@Cast("const char*") BytePointer nptr, @Cast("char**") @ByPtrPtr byte[] endptr); +@Namespace("torch::jit") public static native float strtof_c(String nptr, @Cast("char**") @ByPtrPtr BytePointer endptr); +@Namespace("torch::jit") public static native float strtof_c(@Cast("const char*") BytePointer nptr, @Cast("char**") @ByPtrPtr ByteBuffer endptr); +@Namespace("torch::jit") public static native float strtof_c(String nptr, @Cast("char**") @ByPtrPtr byte[] endptr); + // namespace jit + // namespace torch -// Targeting ../LocalResponseNorm.java +// Parsed from torch/csrc/jit/frontend/schema_matching.h -// Targeting ../CrossMapLRN2dImpl.java +// #pragma once +// #include +// #include +// #include +// #include +// Targeting ../MatchedSchema.java -// Targeting ../CrossMapLRN2d.java -// Targeting ../GroupNormImpl.java +@Namespace("torch::jit") public static native @Cast("bool") boolean isBlockListedSchema(@Const @ByRef FunctionSchema schema); + +@Namespace("torch::jit") public static native @ByVal MatchedSchema matchSchema( + @Const @ByRef FunctionSchema schema, + @Const @ByRef SourceRange loc, + @ByRef Graph graph, + @ByVal NamedValueArrayRef args, + @ByVal NamedValueArrayRef kwargs, + @Const @ByRef(nullValue = "c10::optional(c10::nullopt)") NamedValueOptional self); +@Namespace("torch::jit") public static native @ByVal MatchedSchema matchSchema( + @Const @ByRef FunctionSchema schema, + @Const @ByRef SourceRange loc, + @ByRef Graph graph, + @ByVal NamedValueArrayRef args, + @ByVal NamedValueArrayRef kwargs); + +@Namespace("torch::jit") public static native @ByVal SizeTMatchedSchemaPair matchSchemas( + @Const @ByRef FunctionSchemaVector schemas, + @Const @ByRef SourceRange loc, + @ByRef Graph graph, + @ByVal NamedValueArrayRef args, + @ByVal NamedValueArrayRef kwargs, + @Const @ByRef(nullValue = "c10::optional(c10::nullopt)") NamedValueOptional self, + @Cast("bool") boolean render_errors/*=false*/); +@Namespace("torch::jit") public static native @ByVal SizeTMatchedSchemaPair matchSchemas( + @Const @ByRef FunctionSchemaVector schemas, + @Const @ByRef SourceRange loc, + @ByRef Graph graph, + @ByVal NamedValueArrayRef args, + @ByVal NamedValueArrayRef kwargs); + +@Namespace("torch::jit") public static native @Cast("bool") boolean convertibleToList( + @Const @ByRef Type.TypePtr type, + @Const @ByRef Type.TypePtr list_type_); + +@Namespace("torch::jit") public static native @StdString BytePointer getFullSchemaName(@Const @ByRef FunctionSchema schema); + +@Namespace("torch::jit") public static native Value emitBuiltinCall( + @Const @ByRef SourceRange loc, + @ByRef Graph graph, + @ByVal Symbol name, + @ByVal NamedValueArrayRef args, + @ByVal NamedValueArrayRef kwargs, + @Const @ByRef(nullValue = "c10::optional(c10::nullopt)") NamedValueOptional self); +@Namespace("torch::jit") public static native Value emitBuiltinCall( + @Const @ByRef SourceRange loc, + @ByRef Graph graph, + @ByVal Symbol name, + @ByVal NamedValueArrayRef args, + @ByVal NamedValueArrayRef kwargs); + +@Namespace("torch::jit") public static native @ByVal SizeTOptional findInputWithName( + @StdString BytePointer name, + @ByVal NamedValueArrayRef kwargs, + @Cast("bool") boolean is_aten/*=false*/); +@Namespace("torch::jit") public static native @ByVal SizeTOptional findInputWithName( + @StdString BytePointer name, + @ByVal NamedValueArrayRef kwargs); +@Namespace("torch::jit") public static native @ByVal SizeTOptional findInputWithName( + @StdString String name, + @ByVal NamedValueArrayRef kwargs, + @Cast("bool") boolean is_aten/*=false*/); +@Namespace("torch::jit") public static native @ByVal SizeTOptional findInputWithName( + @StdString String name, + @ByVal NamedValueArrayRef kwargs); + +// applies implicit conversion from value trying to turn it into type +// concrete_type it succeeds if the return_value->isSubtypeOf(concrete_type) +@Namespace("torch::jit") public static native Value tryConvertToType( + @Const @ByRef SourceRange loc, + @ByRef Graph graph, + @Const @ByRef Type.TypePtr concrete_type, + Value value, + @Cast("bool") boolean allow_conversions); + // namespace jit + // namespace torch -// Targeting ../GroupNorm.java +// Parsed from torch/csrc/jit/frontend/versioned_symbols.h +// #pragma once +// #include +// #include +// #include - // namespace nn +// #include +// Maps the given symbol into an implementation of its behavior at the +// given version. +// See note [Versioned Symbols] +@Namespace("torch::jit") public static native @ByVal Symbol get_symbol_for_version(@Const @ByVal Symbol name, @Cast("const uint64_t") long version); + +// Maps the given kind to the minimum version that supports it. +// See note [Dynamic Versions and torch.jit.save vs. torch.save] +@Namespace("torch::jit") public static native @Cast("uint64_t") long get_min_version_for_kind(@Cast("const torch::jit::NodeKind*") @ByRef Symbol kind); + // namespace jit // namespace torch -// Parsed from torch/nn/modules/transformerlayer.h +// Parsed from torch/csrc/jit/frontend/tree_views.h // #pragma once +// #include +// #include +// #include +// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include - -// #include - -// #include -// Targeting ../TransformerEncoderLayerImpl.java - +// #include +// #include +// #include +// #include +// #include +// Targeting ../TreeView.java -// Targeting ../TransformerEncoderLayer.java +// Targeting ../ExprListIterator.java -// Targeting ../TransformerDecoderLayerImpl.java +// Targeting ../StmtListIterator.java -// Targeting ../TransformerDecoderLayer.java +// Targeting ../WithItemListIterator.java - // namespace nn - // namespace torch +// Targeting ../PropertyListIterator.java -// Parsed from torch/nn/modules/transformercoder.h +// Targeting ../AssignListIterator.java -// #pragma once -// #include -// #include -// #include -// #include -// #include -// #include -// #include +// Targeting ../ParamListIterator.java -// #include -// #include -// Targeting ../TransformerEncoderImpl.java +// Targeting ../IdentListIterator.java -// Targeting ../TransformerEncoder.java +// Targeting ../AttributeListIterator.java -// Targeting ../TransformerDecoderImpl.java +// Targeting ../ExprList.java -// Targeting ../TransformerDecoder.java +// Targeting ../StmtList.java +// Targeting ../WithItemList.java - // namespace nn - // namespace torch +// Targeting ../PropertyList.java -// Parsed from torch/nn/modules/transformer.h -// #pragma once +// Targeting ../AssignList.java -// #include -// #include -// #include -// #include -// #include -// #include +// Targeting ../ParamList.java -// #include -// Targeting ../TransformerImpl.java +// Targeting ../IdentList.java -// Targeting ../Transformer.java +// Targeting ../AttributeList.java - // namespace nn - // namespace torch +// Targeting ../DefMaybe.java -// Parsed from torch/optim.h +// Targeting ../ExprMaybe.java -// #pragma once -// #include -// #include -// #include -// #include -// #include -// #include -// #include +// Targeting ../VarMaybe.java -// #include -// #include +// Targeting ../PropertyListMaybe.java -// Parsed from torch/optim/optimizer.h -// #pragma once +// Targeting ../AssignListMaybe.java -// #include -// #include -// #include -// #include -// #include +// Targeting ../Ident.java -// #include -// #include -// #include -// #include -// #include -// #include -// Forward declarations confuse Doxygen -// #ifndef DOXYGEN_SHOULD_SKIP_THIS - // namespace at - // namespace serialize +// Targeting ../Stmt.java -// Targeting ../OptimizerParamState.java +// Targeting ../Expr.java -// Targeting ../OptimizerCloneableAdagradParamState.java +// Targeting ../Attribute.java -// Targeting ../OptimizerCloneableAdamParamState.java +// Targeting ../Param.java -// Targeting ../OptimizerCloneableAdamWParamState.java +// Targeting ../Decl.java -// Targeting ../OptimizerCloneableLBFGSParamState.java +// Targeting ../Def.java -// Targeting ../OptimizerCloneableRMSpropParamState.java +// Targeting ../Property.java -// Targeting ../OptimizerCloneableSGDParamState.java +// Targeting ../ClassDef.java -// Targeting ../OptimizerOptions.java -// Targeting ../OptimizerCloneableAdagradOptions.java +// Targeting ../If.java -// Targeting ../OptimizerCloneableAdamOptions.java +// Targeting ../While.java -// Targeting ../OptimizerCloneableAdamWOptions.java +// Targeting ../For.java -// Targeting ../OptimizerCloneableLBFGSOptions.java +// Targeting ../ListComp.java -// Targeting ../OptimizerCloneableRMSpropOptions.java +// Targeting ../DictComp.java -// Targeting ../OptimizerCloneableSGDOptions.java +// Targeting ../Global.java -// Targeting ../OptimizerParamGroup.java +// Targeting ../AugAssignKind.java -// Targeting ../Optimizer.java +// Targeting ../AugAssign.java -/* How do we decide whether to serialize undefined tensors or - c10::nullopt values into the output archive? -Answer: we strictly follow the behavior of Python API. To be more specific: +// Targeting ../Assign.java -For optimizer options: -a) For undefined tensor: currently no tensor is used as an options argument in -Python API, so we don't need to worry about it now. b) For c10::nullopt value: -we serialize c10::nullopt values into the output archive, to follow the exact -same behavior as Python API. -For optimizer param state: -a) For undefined tensor: in param state, undefined tensor in C++ impl is -equivalent to missing key in Python impl. Since we don't serialize missing keys -in Python API, we skip undefined tensors when serializing the param state. b) -For c10::nullopt value: in param state, c10::nullopt value in C++ impl is -equivalent to missing key in Python impl. Since we don't serialize missing keys -in Python API, we skip c10::nullopt values when serializing the param state. */ +// Targeting ../Return.java -/** Serializes an {@code Optimizer} into an {@code OutputArchive}. */ -@Namespace("torch::optim") public static native @ByRef @Name("operator <<") OutputArchive shiftLeft( - @ByRef OutputArchive archive, - @Const @ByRef Optimizer optimizer); -/** Deserializes a {@code Tensor} from an {@code InputArchive}. */ -@Namespace("torch::optim") public static native @ByRef @Name("operator >>") InputArchive shiftRight( - @ByRef InputArchive archive, - @ByRef Optimizer optimizer); +// Targeting ../Raise.java - // namespace optim - // namespace torch +// Targeting ../Assert.java -// Parsed from torch/optim/serialize.h -// #pragma once +// Targeting ../Pass.java -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// Utility function to save state +// Targeting ../Dots.java -// Utility function to load state +// Targeting ../Break.java -// Utility function to save param_groups +// Targeting ../Continue.java -// Utility function to load param_groups -// We take as input vector of pair of string and unique_ptr to optimizer options -// so that we can retain the state for each param by using the old tensor impl -// keys (saved during serialization) and map the new tensor impl keys to the -// correct state for each param - // namespace detail +// Targeting ../ExprStmt.java -// Note: These functions are all called `serialize()` so they can be called -// inside a template where the archive type is a template type and can thus be -// passed such that the appropriate overload is selected. -/** Utility function to save a value of {@code int64_t} type. */ +// Targeting ../BinOp.java -/** Utility function to load a value of {@code int64_t} type. */ +// Targeting ../UnaryOp.java -/** Utility function to save a vector of step buffers. */ +// Targeting ../ConstExpr.java -/** Utility function to load a vector of step buffers. */ +// Targeting ../StringLiteral.java -// Utility function to save state and param_groups +// Targeting ../Apply.java -// Utility function to load state and param_groups and update state +// Targeting ../Select.java -/** Utility function to save a vector of buffers. */ +// Targeting ../SliceExpr.java -/** Utility function to load a vector of buffers. */ +// Targeting ../Subscript.java -// #define _TORCH_OPTIM_SERIALIZE(name) -// torch::optim::serialize(archive, #name, self.name) +// Targeting ../Var.java -// #define _TORCH_OPTIM_SERIALIZE_WITH_TEMPLATE_ARG(OptimizerName) -// torch::optim::serialize( -// archive, self) -// #define _TORCH_OPTIM_SERIALIZE_TORCH_ARG(name) -// { -// auto ivalue = torch::IValue(name()); -// /* do not serialize if name is an undefined tensor*/ -// if (!(ivalue.isTensor() && -// ivalue.nsafeToTensorImpl() == -// at::UndefinedTensorImpl::singleton())) { -// archive.write(#name, ivalue); -// } -// } +// Targeting ../WithItem.java -// #define _TORCH_OPTIM_SERIALIZE_TORCH_ARG_DEQUE(name) -// { -// c10::IValue ivalue = torch::IValue(deque_to_list(name())); -// archive.write(#name, ivalue); -// } -// #define _TORCH_OPTIM_DESERIALIZE_TORCH_ARG(T, name) -// { -// c10::IValue ivalue; -// bool exists = archive.try_read(#name, ivalue); -// if (exists) { -// name(ivalue.to()); -// } else { -// bool is_tensor_type = std::is_base_of::value; -// TORCH_INTERNAL_ASSERT(is_tensor_type); -// } -// } +// Targeting ../With.java -// #define _TORCH_OPTIM_DESERIALIZE_TORCH_ARG_OPTIONAL(T, name) -// { -// c10::IValue ivalue; -// bool exists = archive.try_read(#name, ivalue); -// if (exists) { -// name(ivalue.toOptional()); -// } -// } -// #define _TORCH_OPTIM_DESERIALIZE_TORCH_ARG_DEQUE(T, name) -// { -// c10::IValue ivalue; -// archive.read(#name, ivalue); -// auto list = ivalue.to>(); -// name(list_to_deque(list)); -// } +// Targeting ../TernaryIf.java - // namespace optim - // namespace torch +// Targeting ../ListLiteral.java -// Parsed from torch/optim/adagrad.h -// #pragma once +// Targeting ../TupleLiteral.java -// #include -// #include -// #include -// #include -// #include -// #include -// #include - // namespace serialize +// Targeting ../DictLiteral.java -// Targeting ../AdagradOptions.java +// Targeting ../Starred.java -// Targeting ../AdagradParamState.java +// Targeting ../Delete.java -// Targeting ../Adagrad.java - // namespace optim + // namespace jit // namespace torch + // namespace std -// Parsed from torch/optim/adam.h -// #pragma once +// Parsed from torch/csrc/jit/serialization/pickler.h -// #include -// #include -// #include +// #pragma once +// #include +// #include // #include // #include - // namespace serialize -// Targeting ../AdamOptions.java +// #include +// #include +// #include +// #include +// #include + +// See Python's pickletools.py for a detailed description of each of these codes +@Namespace("torch::jit") public enum PickleOpCode { + MARK((byte)('(')), + STOP((byte)('.')), + POP((byte)('0')), + POP_MARK((byte)('1')), + DUP((byte)('2')), + FLOAT((byte)('F')), + INT((byte)('I')), + BININT((byte)('J')), + BININT1((byte)('K')), + LONG((byte)('L')), + BININT2((byte)('M')), + NONE((byte)('N')), + PERSID((byte)('P')), + BINPERSID((byte)('Q')), + REDUCE((byte)('R')), + STRING((byte)('S')), + BINSTRING((byte)('T')), + SHORT_BINSTRING((byte)('U')), + // NB: Avoid using UNICODE as it is a macro in the Windows API + UNICODE_((byte)('V')), + BINUNICODE((byte)('X')), + APPEND((byte)('a')), + BUILD((byte)('b')), + GLOBAL((byte)('c')), + DICT((byte)('d')), + EMPTY_DICT((byte)('}')), + APPENDS((byte)('e')), + GET((byte)('g')), + BINGET((byte)('h')), + INST((byte)('i')), + LONG_BINGET((byte)('j')), + LIST((byte)('l')), + EMPTY_LIST((byte)(']')), + OBJ((byte)('o')), + PUT((byte)('p')), + BINPUT((byte)('q')), + LONG_BINPUT((byte)('r')), + SETITEM((byte)('s')), + TUPLE((byte)('t')), + EMPTY_TUPLE((byte)(')')), + SETITEMS((byte)('u')), + BINFLOAT((byte)('G')), + // Protocol 2 + PROTO((byte)(0x80)), + NEWOBJ((byte)(0x81)), + EXT1((byte)(0x82)), + EXT2((byte)(0x83)), + EXT4((byte)(0x84)), + TUPLE1((byte)(0x85)), + TUPLE2((byte)(0x86)), + TUPLE3((byte)(0x87)), + NEWTRUE((byte)(0x88)), + NEWFALSE((byte)(0x89)), + LONG1((byte)(0x8a)), + LONG4((byte)(0x8b)), -// Targeting ../AdamParamState.java + // Protocol 3 (Python 3.x) + BINBYTES((byte)('B')), + SHORT_BINBYTES((byte)('C')), + // Protocol 4 + SHORT_BINUNICODE((byte)(0x8c)), + BINUNICODE8((byte)(0x8d)), + BINBYTES8((byte)(0x8e)), + EMPTY_SET((byte)(0x8f)), + ADDITEMS((byte)(0x90)), + FROZENSET((byte)(0x91)), + NEWOBJ_EX((byte)(0x92)), + STACK_GLOBAL((byte)(0x93)), + MEMOIZE((byte)(0x94)), + FRAME((byte)(0x95)); -// Targeting ../Adam.java + public final byte value; + private PickleOpCode(byte v) { this.value = v; } + private PickleOpCode(PickleOpCode e) { this.value = e.value; } + public PickleOpCode intern() { for (PickleOpCode e : values()) if (e.value == value) return e; return this; } + @Override public String toString() { return intern().name(); } +} +// Targeting ../WriteableTensorData.java - // namespace optim - // namespace torch -// Parsed from torch/optim/adamw.h -// #pragma once +// Targeting ../Pickler.java -// #include -// #include -// #include -// #include -// #include - // namespace serialize -// Targeting ../AdamWOptions.java +// returns a (tensor, record_size) for a tensor, converting it to a CPU tensor +// if it was CUDA and to_cpu is True. +@Namespace("torch::jit") public static native @ByVal WriteableTensorData getWriteableTensorData(@Const @ByRef Tensor tensor, @Cast("bool") boolean to_cpu/*=true*/); +@Namespace("torch::jit") public static native @ByVal WriteableTensorData getWriteableTensorData(@Const @ByRef Tensor tensor); +// return the value of the tensor's storage pointer -// Targeting ../AdamWParamState.java +// if the cls has __getstate__/__setstate__ +// assert they have the right schema and return true, +// otherwise return false -// Targeting ../AdamW.java +// Return a map of Tensor Metadata for serialization. +// For now, it only takes care of `conj` and `neg` bit. +@Namespace("torch::jit") public static native @ByVal StringBoolMap getTensorMetadata( + @Const @ByRef Tensor t); - // namespace optim +// set Tensor Metadata based on the map. +// Refer: getTensorMathdata +@Namespace("torch::jit") public static native void setTensorMetadata( + @Const @ByRef Tensor t, + @ByVal StringBoolMap metadata); + +// set Tensor metadata based on the map. +// NOTE: This overload is required by unpickler.cpp +@Namespace("torch::jit") public static native void setTensorMetadata( + @Const @ByRef Tensor t, + @ByVal GenericDict metadata_idict); + + // namespace jit // namespace torch -// Parsed from torch/optim/lbfgs.h +// Parsed from torch/torch.h // #pragma once -// #include -// #include -// #include -// #include +// #include -// #include -// #include -// #include -// #include -// Targeting ../LBFGSOptions.java +// #ifdef TORCH_API_INCLUDE_EXTENSION_H +// #include +// #endif // defined(TORCH_API_INCLUDE_EXTENSION_H) -// Targeting ../LBFGSParamState.java +// Parsed from ATen/native/TensorShape.h -// Targeting ../LBFGS.java +// #pragma once +// #include +// #include +// #include +@Namespace("at::native") public static native @ByVal Tensor clone_preserve_strides(@Const @ByRef Tensor self); - // namespace optim - // namespace torch +@Namespace("at::native") public static native @Cast("bool") boolean cat_should_skip_tensor(@Const @ByRef Tensor t); + // Check to see if the shape of tensors is compatible + // for being concatenated along a given dimension. +@Namespace("at::native") public static native void check_cat_shape_except_dim(@Const @ByRef Tensor first, @Const @ByRef Tensor second, @Cast("int64_t") long dimension, @Cast("int64_t") long index); -// Parsed from torch/optim/rmsprop.h +@Namespace("at::native") public static native @Cast("int64_t") long get_num_splits(@Const @ByRef Tensor self, @Cast("int64_t") long split_size, @Cast("int64_t") long dim); -// #pragma once + // namespace at::native -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include - // namespace serialize +// Parsed from torch/csrc/jit/serialization/storage_context.h -// Targeting ../RMSpropOptions.java +// #pragma once +// #include +// Targeting ../SerializationStorageContext.java -// Targeting ../RMSpropParamState.java +// Targeting ../DeserializationStorageContext.java -// Targeting ../RMSprop.java - // namespace optim + // namespace jit // namespace torch -// Parsed from torch/optim/sgd.h +// Parsed from torch/csrc/jit/serialization/import.h // #pragma once -// #include -// #include -// #include -// #include -// #include +// #include +// #include +// #include +// #include +// #include -// #include -// #include -// #include +// #include // namespace serialize + // namespace caffe2 -// Targeting ../SGDOptions.java +@Namespace("torch::jit") public static native @ByVal JitModule import_ir_module( + @SharedPtr CompilationUnit cu, + @StdString BytePointer filename, + @ByVal(nullValue = "c10::optional(c10::nullopt)") DeviceOptional device, + @Cast("bool") boolean load_debug_files/*=true*/); +@Namespace("torch::jit") public static native @ByVal JitModule import_ir_module( + @SharedPtr CompilationUnit cu, + @StdString BytePointer filename); +@Namespace("torch::jit") public static native @ByVal JitModule import_ir_module( + @SharedPtr CompilationUnit cu, + @StdString String filename, + @ByVal(nullValue = "c10::optional(c10::nullopt)") DeviceOptional device, + @Cast("bool") boolean load_debug_files/*=true*/); +@Namespace("torch::jit") public static native @ByVal JitModule import_ir_module( + @SharedPtr CompilationUnit cu, + @StdString String filename); +@Namespace("torch::jit") public static native @ByVal JitModule import_ir_module( + @SharedPtr CompilationUnit cu, + @Cast("std::istream*") @ByRef Pointer in, + @ByVal(nullValue = "c10::optional(c10::nullopt)") DeviceOptional device, + @Cast("bool") boolean load_debug_files/*=true*/); +@Namespace("torch::jit") public static native @ByVal JitModule import_ir_module( + @SharedPtr CompilationUnit cu, + @Cast("std::istream*") @ByRef Pointer in); -// Targeting ../SGDParamState.java +@Namespace("torch::jit") public static native @ByVal JitModule import_ir_module( + @SharedPtr CompilationUnit cu, + @UniquePtr ReadAdapterInterface rai, + @ByVal(nullValue = "c10::optional(c10::nullopt)") DeviceOptional device, + @Cast("bool") boolean load_debug_files/*=true*/); +@Namespace("torch::jit") public static native @ByVal JitModule import_ir_module( + @SharedPtr CompilationUnit cu, + @UniquePtr ReadAdapterInterface rai); +@Namespace("torch::jit") public static native @ByVal JitModule import_ir_module( + @SharedPtr CompilationUnit cu, + @StdString BytePointer filename, + @ByVal DeviceOptional device, + @ByRef ExtraFilesMap extra_files, + @Cast("bool") boolean load_debug_files/*=true*/, + @Cast("bool") boolean restore_shapes/*=false*/); +@Namespace("torch::jit") public static native @ByVal JitModule import_ir_module( + @SharedPtr CompilationUnit cu, + @StdString BytePointer filename, + @ByVal DeviceOptional device, + @ByRef ExtraFilesMap extra_files); +@Namespace("torch::jit") public static native @ByVal JitModule import_ir_module( + @SharedPtr CompilationUnit cu, + @StdString String filename, + @ByVal DeviceOptional device, + @ByRef ExtraFilesMap extra_files, + @Cast("bool") boolean load_debug_files/*=true*/, + @Cast("bool") boolean restore_shapes/*=false*/); +@Namespace("torch::jit") public static native @ByVal JitModule import_ir_module( + @SharedPtr CompilationUnit cu, + @StdString String filename, + @ByVal DeviceOptional device, + @ByRef ExtraFilesMap extra_files); -// Targeting ../SGD.java +// For reading unified serialization format from torch.Package +@Namespace("torch::jit") public static native @ByVal JitModule import_ir_module( + @SharedPtr CompilationUnit cu, + @ByVal @Cast("std::shared_ptr*") Pointer reader, + @SharedPtr DeserializationStorageContext storage_context, + @ByVal DeviceOptional device, + @StdString BytePointer ts_id); +@Namespace("torch::jit") public static native @ByVal JitModule import_ir_module( + @SharedPtr CompilationUnit cu, + @ByVal @Cast("std::shared_ptr*") Pointer reader, + @SharedPtr DeserializationStorageContext storage_context, + @ByVal DeviceOptional device, + @StdString String ts_id); +@Namespace("torch::jit") public static native @ByVal JitModule import_ir_module( + @SharedPtr CompilationUnit cu, + @Cast("std::istream*") @ByRef Pointer in, + @ByVal DeviceOptional device, + @ByRef ExtraFilesMap extra_files, + @Cast("bool") boolean load_debug_files/*=true*/, + @Cast("bool") boolean restore_shapes/*=false*/); +@Namespace("torch::jit") public static native @ByVal JitModule import_ir_module( + @SharedPtr CompilationUnit cu, + @Cast("std::istream*") @ByRef Pointer in, + @ByVal DeviceOptional device, + @ByRef ExtraFilesMap extra_files); - // namespace optim - // namespace torch +@Namespace("torch::jit") public static native @ByVal JitModule import_ir_module( + @SharedPtr CompilationUnit cu, + @UniquePtr ReadAdapterInterface rai, + @ByVal DeviceOptional device, + @ByRef ExtraFilesMap extra_files, + @Cast("bool") boolean load_debug_files/*=true*/); +@Namespace("torch::jit") public static native @ByVal JitModule import_ir_module( + @SharedPtr CompilationUnit cu, + @UniquePtr ReadAdapterInterface rai, + @ByVal DeviceOptional device, + @ByRef ExtraFilesMap extra_files); +/** Loads a serialized {@code Module} from the given {@code istream}. + * + * The istream must contain a serialized {@code Module}, exported via + * {@code torch::jit::ExportModule} in C++. */ +@Namespace("torch::jit") public static native @ByVal JitModule load( + @Cast("std::istream*") @ByRef Pointer in, + @ByVal(nullValue = "c10::optional(c10::nullopt)") DeviceOptional device, + @Cast("bool") boolean load_debug_files/*=true*/); +@Namespace("torch::jit") public static native @ByVal JitModule load( + @Cast("std::istream*") @ByRef Pointer in); -// Parsed from torch/optim/schedulers/lr_scheduler.h -// #pragma once +/// +@Namespace("torch::jit") public static native @ByVal JitModule load( + @Cast("std::istream*") @ByRef Pointer in, + @ByVal DeviceOptional device, + @ByRef ExtraFilesMap extra_files, + @Cast("bool") boolean load_debug_files/*=true*/); +@Namespace("torch::jit") public static native @ByVal JitModule load( + @Cast("std::istream*") @ByRef Pointer in, + @ByVal DeviceOptional device, + @ByRef ExtraFilesMap extra_files); -// #include +/** Loads a serialized {@code Module} from the given {@code filename}. + * + * The file stored at the location given in {@code filename} must contain a + * serialized {@code Module}, exported either via {@code ScriptModule.save()} in + * Python or {@code torch::jit::ExportModule} in C++. */ +@Namespace("torch::jit") public static native @ByVal JitModule load( + @StdString BytePointer filename, + @ByVal(nullValue = "c10::optional(c10::nullopt)") DeviceOptional device, + @Cast("bool") boolean load_debug_files/*=true*/); +@Namespace("torch::jit") public static native @ByVal JitModule load( + @StdString BytePointer filename); +@Namespace("torch::jit") public static native @ByVal JitModule load( + @StdString String filename, + @ByVal(nullValue = "c10::optional(c10::nullopt)") DeviceOptional device, + @Cast("bool") boolean load_debug_files/*=true*/); +@Namespace("torch::jit") public static native @ByVal JitModule load( + @StdString String filename); -// #include -// Targeting ../LRScheduler.java +/// +@Namespace("torch::jit") public static native @ByVal JitModule load( + @StdString BytePointer filename, + @ByVal DeviceOptional device, + @ByRef ExtraFilesMap extra_files, + @Cast("bool") boolean load_debug_files/*=true*/); +@Namespace("torch::jit") public static native @ByVal JitModule load( + @StdString BytePointer filename, + @ByVal DeviceOptional device, + @ByRef ExtraFilesMap extra_files); +@Namespace("torch::jit") public static native @ByVal JitModule load( + @StdString String filename, + @ByVal DeviceOptional device, + @ByRef ExtraFilesMap extra_files, + @Cast("bool") boolean load_debug_files/*=true*/); +@Namespace("torch::jit") public static native @ByVal JitModule load( + @StdString String filename, + @ByVal DeviceOptional device, + @ByRef ExtraFilesMap extra_files); - // namespace optim - // namespace torch +/** Loads a serialized {@code Module} from the given shared_ptr {@code rai}. + * + * The reader adapter, which is for customized input stream, must contain a + * serialized {@code Module}, exported either via {@code ScriptModule.save()} in + * Python or {@code torch::jit::ExportModule} in C++. */ +@Namespace("torch::jit") public static native @ByVal JitModule load( + @SharedPtr ReadAdapterInterface rai, + @ByVal(nullValue = "c10::optional(c10::nullopt)") DeviceOptional device, + @Cast("bool") boolean load_debug_files/*=true*/); +@Namespace("torch::jit") public static native @ByVal JitModule load( + @SharedPtr ReadAdapterInterface rai); +@Namespace("torch::jit") public static native @ByVal JitModule load( + @SharedPtr ReadAdapterInterface rai, + @ByVal DeviceOptional device, + @ByRef ExtraFilesMap extra_files, + @Cast("bool") boolean load_debug_files/*=true*/); +@Namespace("torch::jit") public static native @ByVal JitModule load( + @SharedPtr ReadAdapterInterface rai, + @ByVal DeviceOptional device, + @ByRef ExtraFilesMap extra_files); -// Parsed from torch/optim/schedulers/step_lr.h +@Namespace("torch::jit") public static native @ByVal JitModule jitModuleFromSourceAndConstants( + @Const @ByRef IValue ivalue, + @Const @ByRef ExtraFilesMap source, + @Const @ByRef IValueVector constants, + int version); -// #pragma once +@Namespace("torch::jit") public static native @ByVal JitModule parse_and_initialize_jit_module( + @Cast("char*") @SharedPtr BytePointer data, + @Cast("size_t") long size, + @ByRef ExtraFilesMap extra_files, + @ByVal(nullValue = "c10::optional(c10::nullopt)") DeviceOptional device); +@Namespace("torch::jit") public static native @ByVal JitModule parse_and_initialize_jit_module( + @Cast("char*") @SharedPtr BytePointer data, + @Cast("size_t") long size, + @ByRef ExtraFilesMap extra_files); +@Namespace("torch::jit") public static native @ByVal JitModule parse_and_initialize_jit_module( + @Cast("char*") @SharedPtr ByteBuffer data, + @Cast("size_t") long size, + @ByRef ExtraFilesMap extra_files, + @ByVal(nullValue = "c10::optional(c10::nullopt)") DeviceOptional device); +@Namespace("torch::jit") public static native @ByVal JitModule parse_and_initialize_jit_module( + @Cast("char*") @SharedPtr ByteBuffer data, + @Cast("size_t") long size, + @ByRef ExtraFilesMap extra_files); +@Namespace("torch::jit") public static native @ByVal JitModule parse_and_initialize_jit_module( + @Cast("char*") @SharedPtr byte[] data, + @Cast("size_t") long size, + @ByRef ExtraFilesMap extra_files, + @ByVal(nullValue = "c10::optional(c10::nullopt)") DeviceOptional device); +@Namespace("torch::jit") public static native @ByVal JitModule parse_and_initialize_jit_module( + @Cast("char*") @SharedPtr byte[] data, + @Cast("size_t") long size, + @ByRef ExtraFilesMap extra_files); -// #include -// Targeting ../StepLR.java +@Namespace("torch::jit") public static native @ByVal JitModule load_jit_module_from_file( + @StdString BytePointer filename, + @ByRef ExtraFilesMap extra_files, + @ByVal(nullValue = "c10::optional(c10::nullopt)") DeviceOptional device); +@Namespace("torch::jit") public static native @ByVal JitModule load_jit_module_from_file( + @StdString BytePointer filename, + @ByRef ExtraFilesMap extra_files); +@Namespace("torch::jit") public static native @ByVal JitModule load_jit_module_from_file( + @StdString String filename, + @ByRef ExtraFilesMap extra_files, + @ByVal(nullValue = "c10::optional(c10::nullopt)") DeviceOptional device); +@Namespace("torch::jit") public static native @ByVal JitModule load_jit_module_from_file( + @StdString String filename, + @ByRef ExtraFilesMap extra_files); +@Namespace("torch::jit") public static native @ByVal JitModule load_jit_module_from_stream( + @Cast("std::istream*") @ByRef Pointer in, + @ByRef ExtraFilesMap extra_files, + @ByVal(nullValue = "c10::optional(c10::nullopt)") DeviceOptional device); +@Namespace("torch::jit") public static native @ByVal JitModule load_jit_module_from_stream( + @Cast("std::istream*") @ByRef Pointer in, + @ByRef ExtraFilesMap extra_files); - // namespace optim + // namespace jit // namespace torch diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/global/torch_cuda.java b/pytorch/src/gen/java/org/bytedeco/pytorch/global/torch_cuda.java new file mode 100644 index 00000000000..1b188be3648 --- /dev/null +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/global/torch_cuda.java @@ -0,0 +1,777 @@ +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE + +package org.bytedeco.pytorch.global; + +import org.bytedeco.pytorch.cuda.*; + +import org.bytedeco.pytorch.*; +import org.bytedeco.pytorch.Error; +import org.bytedeco.pytorch.global.torch.DeviceType; +import org.bytedeco.pytorch.global.torch.ScalarType; +import org.bytedeco.pytorch.global.torch.MemoryFormat; +import org.bytedeco.pytorch.Allocator; +import java.nio.*; +import org.bytedeco.javacpp.*; +import org.bytedeco.javacpp.annotation.*; + +import static org.bytedeco.javacpp.presets.javacpp.*; +import static org.bytedeco.openblas.global.openblas_nolapack.*; +import static org.bytedeco.openblas.global.openblas.*; +import org.bytedeco.pytorch.*; +import static org.bytedeco.pytorch.global.torch.*; + +public class torch_cuda extends org.bytedeco.pytorch.presets.torch_cuda { + static { Loader.load(); } +@Namespace("at") public static native @ByVal @Name("make_generator") Generator make_generator_cuda(); +@Namespace("at") public static native @ByVal @Name("make_generator") Generator make_generator_cuda(@Cast("int8_t&&") byte device_index); + + +// Targeting ../cuda/CUDAStreamOptional.java + + +// Targeting ../cuda/DeviceAssertionsDataVector.java + + +// Targeting ../cuda/CUDAKernelLaunchInfoVector.java + + +// Targeting ../cuda/DeviceAssertionsDataVectorCUDAKernelLaunchInfoVectorPair.java + + +// Parsed from c10/util/ArrayRef.h + +//===--- ArrayRef.h - Array Reference Wrapper -------------------*- C++ -*-===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// + +// ATen: modified from llvm::ArrayRef. +// removed llvm-specific functionality +// removed some implicit const -> non-const conversions that rely on +// complicated std::enable_if meta-programming +// removed a bunch of slice variants for simplicity... + +// #pragma once + +// #include +// #include +// #include +// #include + +// #include +// #include +// #include +// Targeting ../cuda/CUDAStreamArrayRef.java + + + +/** \name ArrayRef Convenience constructors + * \{ +

+ * Construct an ArrayRef from a single element. */ + +/** Construct an ArrayRef from a pointer and length. */ + +/** Construct an ArrayRef from a range. */ + +/** Construct an ArrayRef from a SmallVector. */ + +/** Construct an ArrayRef from a SmallVector. */ + +/** Construct an ArrayRef from a std::vector. */ + +/** Construct an ArrayRef from a std::array. */ + +/** Construct an ArrayRef from an ArrayRef (no-op) (const) */ + +/** Construct an ArrayRef from an ArrayRef (no-op) */ + +/** Construct an ArrayRef from a C array. */ + +// WARNING: Template instantiation will NOT be willing to do an implicit +// conversions to get you to an c10::ArrayRef, which is why we need so +// many overloads. + +// This alias is deprecated because it doesn't make ownership +// semantics obvious. Use IntArrayRef instead! + // namespace c10 + + +// Parsed from c10/cuda/CUDAStream.h + +// #pragma once + +// #include +// #include + +// #include + +// #include +// #include +// #include +// #include + +/* + * Stream pool note. + * + * A CUDAStream is an abstraction of an actual cuStream on the GPU. CUDAStreams + * are backed by cuStreams, but they use several pools to minimize the costs + * associated with creating, retaining, and destroying cuStreams. + * + * There are three pools per device, and a device's pools are lazily created. + * + * The first pool contains only the default stream. When the default stream + * is requested it's returned. + * + * The second pool is the "low priority" or "default priority" streams. In + * HIP builds there is no distinction between streams in this pool and streams + * in the third pool (below). There are 32 of these streams per device, and + * when a stream is requested one of these streams is returned round-robin. + * That is, the first stream requested is at index 0, the second at index 1... + * to index 31, then index 0 again. + * + * This means that if 33 low priority streams are requested, the first and + * last streams requested are actually the same stream (under the covers) + * and kernels enqueued on them cannot run concurrently. + * + * The third pool is the "high priority" streams. The third pool acts like + * the second pool except the streams are created with a higher priority. + * + * These pools suggest that stream users should prefer many short-lived streams, + * as the cost of acquiring and releasing streams is effectively zero. If + * many longer-lived streams are required in performance critical scenarios + * then the functionality here may need to be extended to allow, for example, + * "reserving" a subset of the pool so that other streams do not accidentally + * overlap the performance critical streams. + * + * Note: although the notion of "current stream for device" is thread local + * (every OS thread has a separate current stream, as one might expect), + * the stream pool is global across all threads; stream 0 is always stream 0 + * no matter which thread you use it on. Multiple threads can synchronize + * on the same stream. Although the CUDA documentation is not very clear + * on the matter, streams are thread safe; e.g., it is safe to enqueue + * a kernel on the same stream from two different threads. + */ +// Targeting ../cuda/CUDAStream.java + + + +/** + * Get a new stream from the CUDA stream pool. You can think of this + * as "creating" a new stream, but no such creation actually happens; + * instead, streams are preallocated from the pool and returned in a + * round-robin fashion. + * + * You can request a stream from the high priority pool by setting + * isHighPriority to true, or a stream for a specific device by setting device + * (defaulting to the current CUDA stream.) + */ +@Namespace("c10::cuda") public static native @ByVal CUDAStream getStreamFromPool(@Cast("const bool") boolean isHighPriority/*=false*/, byte device/*=-1*/); +@Namespace("c10::cuda") public static native @ByVal CUDAStream getStreamFromPool(); + +/** + * Get a CUDAStream from a externally allocated one. + * + * This is mainly for interoperability with different libraries where we + * want to operate on a non-torch allocated stream for data exchange or similar + * purposes + */ +@Namespace("c10::cuda") public static native @ByVal CUDAStream getStreamFromExternal(@Cast("cudaStream_t") Pointer ext_stream, byte device_index); + +/** + * Get the default CUDA stream, for the passed CUDA device, or for the + * current device if no device index is passed. The default stream is + * where most computation occurs when you aren't explicitly using + * streams. + */ +@Namespace("c10::cuda") public static native @ByVal CUDAStream getDefaultCUDAStream(byte device_index/*=-1*/); +@Namespace("c10::cuda") public static native @ByVal CUDAStream getDefaultCUDAStream(); + +/** + * Get the current CUDA stream, for the passed CUDA device, or for the + * current device if no device index is passed. The current CUDA stream + * will usually be the default CUDA stream for the device, but it may + * be different if someone called 'setCurrentCUDAStream' or used 'StreamGuard' + * or 'CUDAStreamGuard'. + */ +@Namespace("c10::cuda") public static native @ByVal CUDAStream getCurrentCUDAStream(byte device_index/*=-1*/); +@Namespace("c10::cuda") public static native @ByVal CUDAStream getCurrentCUDAStream(); + +/** + * Set the current stream on the device of the passed in stream to be + * the passed in stream. Yes, you read that right: this function + * has *nothing* to do with the current device: it toggles the current + * stream of the device of the passed stream. + * + * Confused? Avoid using this function; prefer using 'CUDAStreamGuard' instead + * (which will switch both your current device and current stream in the way you + * expect, and reset it back to its original state afterwards). + */ +@Namespace("c10::cuda") public static native void setCurrentCUDAStream(@ByVal CUDAStream stream); + +@Namespace("c10::cuda") public static native @Cast("std::ostream*") @ByRef @Name("operator <<") Pointer shiftLeft(@Cast("std::ostream*") @ByRef Pointer stream, @Const @ByRef CUDAStream s); + + // namespace cuda + // namespace c10 + // namespace std + + +// Parsed from ATen/cuda/CUDAContext.h + +// #pragma once + +// #include + +// #include +// #include +// #include + +// #ifdef CUDART_VERSION +// #include +// #endif + +// #include +// #include +// #include +// #include +// #include + +/* +A common CUDA interface for ATen. + +This interface is distinct from CUDAHooks, which defines an interface that links +to both CPU-only and CUDA builds. That interface is intended for runtime +dispatch and should be used from files that are included in both CPU-only and +CUDA builds. + +CUDAContext, on the other hand, should be preferred by files only included in +CUDA builds. It is intended to expose CUDA functionality in a consistent +manner. + +This means there is some overlap between the CUDAContext and CUDAHooks, but +the choice of which to use is simple: use CUDAContext when in a CUDA-only file, +use CUDAHooks otherwise. + +Note that CUDAContext simply defines an interface with no associated class. +It is expected that the modules whose functions compose this interface will +manage their own state. There is only a single CUDA context/state. +*/ + +/** + * DEPRECATED: use device_count() instead + */ +@Namespace("at::cuda") public static native @Cast("int64_t") long getNumGPUs(); + +/** + * CUDA is available if we compiled with CUDA, and there are one or more + * devices. If we compiled with CUDA but there is a driver problem, etc., + * this function will report CUDA is not available (rather than raise an error.) + */ +@Namespace("at::cuda") public static native @Cast("bool") boolean is_available(); + +@Namespace("at::cuda") public static native Pointer getCurrentDeviceProperties(); + +@Namespace("at::cuda") public static native int warp_size(); + +@Namespace("at::cuda") public static native Pointer getDeviceProperties(@Cast("int64_t") long device); + +@Namespace("at::cuda") public static native @Cast("bool") boolean canDeviceAccessPeer( + @Cast("int64_t") long device, + @Cast("int64_t") long peer_device); + +@Namespace("at::cuda") public static native Allocator getCUDADeviceAllocator(); + +/* Handles */ +@Namespace("at::cuda") public static native @Cast("cusparseHandle_t") Pointer getCurrentCUDASparseHandle(); +@Namespace("at::cuda") public static native @Cast("cublasHandle_t") Pointer getCurrentCUDABlasHandle(); + +@Namespace("at::cuda") public static native void clearCublasWorkspaces(); + +// #ifdef CUDART_VERSION +@Namespace("at::cuda") public static native @Cast("cusolverDnHandle_t") Pointer getCurrentCUDASolverDnHandle(); +// #endif + + // namespace cuda + // namespace at + + +// Parsed from c10/core/impl/GPUTrace.h + +// #pragma once + +// #include + + // namespace impl + // namespace c10 + + +// Parsed from c10/cuda/CUDADeviceAssertionHost.h + +// #pragma once + +// #include + +// #include +// #include +// #include +// #include + +// #ifdef USE_CUDA +// #define TORCH_USE_CUDA_DSA +// #endif + +/** Number of assertion failure messages we can store. If this is too small + * threads will fail silently. */ +@MemberGetter public static native int C10_CUDA_DSA_ASSERTION_COUNT(); +@MemberGetter public static native int C10_CUDA_DSA_MAX_STR_LEN(); +// Targeting ../cuda/DeviceAssertionData.java + + +// Targeting ../cuda/DeviceAssertionsData.java + + +// Targeting ../cuda/CUDAKernelLaunchInfo.java + + +// Targeting ../cuda/CUDAKernelLaunchRegistry.java + + + + + + // namespace cuda + // namespace c10 + +// Each kernel launched with TORCH_DSA_KERNEL_LAUNCH +// requires the same input arguments. We introduce the following macro to +// standardize these. +// #define TORCH_DSA_KERNEL_ARGS +// [[maybe_unused]] c10::cuda::DeviceAssertionsData *const assertions_data, +// [[maybe_unused]] uint32_t assertion_caller_id + +// This macro can be used to pass the DSA arguments onward to another +// function +// #define TORCH_DSA_KERNEL_ARGS_PASS assertions_data, assertion_caller_id + + +// Parsed from c10/cuda/CUDAMacros.h + +// #pragma once + +// #ifndef C10_USING_CUSTOM_GENERATED_MACROS + +// We have not yet modified the AMD HIP build to generate this file so +// we add an extra option to specifically ignore it. +// #ifndef C10_CUDA_NO_CMAKE_CONFIGURE_FILE +// #include +// #endif // C10_CUDA_NO_CMAKE_CONFIGURE_FILE + +// #endif + +// See c10/macros/Export.h for a detailed explanation of what the function +// of these macros are. We need one set of macros for every separate library +// we build. + +// #ifdef _WIN32 +// #else // _WIN32 +// #if defined(__GNUC__) +// #define C10_CUDA_EXPORT __attribute__((__visibility__("default"))) +// #else // defined(__GNUC__) +// #define C10_CUDA_EXPORT +// #endif // defined(__GNUC__) +// #define C10_CUDA_IMPORT C10_CUDA_EXPORT +// #endif // _WIN32 + +// This one is being used by libc10_cuda.so +// #ifdef C10_CUDA_BUILD_MAIN_LIB +// #define C10_CUDA_API C10_CUDA_EXPORT +// #else +// #define C10_CUDA_API C10_CUDA_IMPORT +// #endif + +/** + * The maximum number of GPUs that we recognizes. + */ +public static final int C10_COMPILE_TIME_MAX_GPUS = 16; + + +// Parsed from c10/cuda/impl/cuda_cmake_macros.h + +// #pragma once + +// Automatically generated header file for the C10 CUDA library. Do not +// include this file directly. Instead, include c10/cuda/CUDAMacros.h + +// #define C10_CUDA_BUILD_SHARED_LIBS + + +// Parsed from c10/cuda/CUDAGraphsC10Utils.h + +// #pragma once + +// #include +// #include + +// CUDA Graphs utils used by c10 and aten. +// aten/cuda/CUDAGraphsUtils.cuh adds utils used by aten only. + +// first is set if the instance is created by CUDAGraph::capture_begin. +// second is set if the instance is created by at::cuda::graph_pool_handle. +// Targeting ../cuda/CUDAStreamCaptureModeGuard.java + + +// #endif + +// #if !defined(USE_ROCM) || ROCM_VERSION >= 50300 +// Protects against enum cudaStreamCaptureStatus implementation changes. +// Some compilers seem not to like static_assert without the messages. +// #endif + + + +@Namespace("c10::cuda") public static native @Cast("std::ostream*") @ByRef @Name("operator <<") Pointer shiftLeft(@Cast("std::ostream*") @ByRef Pointer os, @Cast("c10::cuda::CaptureStatus") int status); + +// Use this version where you're sure a CUDA context exists already. +@Namespace("c10::cuda") public static native @Cast("c10::cuda::CaptureStatus") int currentStreamCaptureStatusMayInitCtx(); + + // namespace cuda + // namespace c10 + + +// Parsed from ATen/cuda/Exceptions.h + +// #pragma once + +// #include +// #include +// #include + +// #ifdef CUDART_VERSION +// #include +// #endif + +// #include +// #include +// #include +// Targeting ../cuda/CuDNNError.java + + + + // namespace c10 + +// #define AT_CUDNN_CHECK_WITH_SHAPES(EXPR, ...) AT_CUDNN_CHECK(EXPR, "\n", ##__VA_ARGS__) + +// See Note [CHECK macro] +// #define AT_CUDNN_CHECK(EXPR, ...) +// do { +// cudnnStatus_t status = EXPR; +// if (status != CUDNN_STATUS_SUCCESS) { +// if (status == CUDNN_STATUS_NOT_SUPPORTED) { +// TORCH_CHECK_WITH(CuDNNError, false, +// "cuDNN error: ", +// cudnnGetErrorString(status), +// ". This error may appear if you passed in a non-contiguous input.", ##__VA_ARGS__); +// } else { +// TORCH_CHECK_WITH(CuDNNError, false, +// "cuDNN error: ", cudnnGetErrorString(status), ##__VA_ARGS__); +// } +// } +// } while (0) +@Namespace("at::cuda::blas") public static native @Cast("const char*") BytePointer _cublasGetErrorEnum(@Cast("cublasStatus_t") int error); + // namespace at::cuda::blas + +// #define TORCH_CUDABLAS_CHECK(EXPR) +// do { +// cublasStatus_t __err = EXPR; +// TORCH_CHECK(__err == CUBLAS_STATUS_SUCCESS, +// "CUDA error: ", +// at::cuda::blas::_cublasGetErrorEnum(__err), +// " when calling `" #EXPR "`"); +// } while (0) + +public static native @Cast("const char*") BytePointer cusparseGetErrorString(@Cast("cusparseStatus_t") int status); + +// #define TORCH_CUDASPARSE_CHECK(EXPR) +// do { +// cusparseStatus_t __err = EXPR; +// TORCH_CHECK(__err == CUSPARSE_STATUS_SUCCESS, +// "CUDA error: ", +// cusparseGetErrorString(__err), +// " when calling `" #EXPR "`"); +// } while (0) + +// cusolver related headers are only supported on cuda now +// #ifdef CUDART_VERSION +@Namespace("at::cuda::solver") public static native @Cast("const char*") BytePointer cusolverGetErrorMessage(@Cast("cusolverStatus_t") int status); + // namespace at::cuda::solver + +// When cuda < 11.5, cusolver raises CUSOLVER_STATUS_EXECUTION_FAILED when input contains nan. +// When cuda >= 11.5, cusolver normally finishes execution and sets info array indicating convergence issue. +// #define TORCH_CUSOLVER_CHECK(EXPR) +// do { +// cusolverStatus_t __err = EXPR; +// if ((CUDA_VERSION < 11500 && +// __err == CUSOLVER_STATUS_EXECUTION_FAILED) || +// (CUDA_VERSION >= 11500 && +// __err == CUSOLVER_STATUS_INVALID_VALUE)) { +// TORCH_CHECK_LINALG( +// false, +// "cusolver error: ", +// at::cuda::solver::cusolverGetErrorMessage(__err), +// ", when calling `" #EXPR "`", +// ". This error may appear if the input matrix contains NaN."); +// } else { +// TORCH_CHECK( +// __err == CUSOLVER_STATUS_SUCCESS, +// "cusolver error: ", +// at::cuda::solver::cusolverGetErrorMessage(__err), +// ", when calling `" #EXPR "`"); +// } +// } while (0) + +// #else +// #define TORCH_CUSOLVER_CHECK(EXPR) EXPR +// #endif + +// #define AT_CUDA_CHECK(EXPR) C10_CUDA_CHECK(EXPR) + +// For CUDA Driver API +// +// This is here instead of in c10 because NVRTC is loaded dynamically via a stub +// in ATen, and we need to use its nvrtcGetErrorString. +// See NOTE [ USE OF NVRTC AND DRIVER API ]. +// #if !defined(USE_ROCM) + +// #define AT_CUDA_DRIVER_CHECK(EXPR) +// do { +// CUresult __err = EXPR; +// if (__err != CUDA_SUCCESS) { +// const char* err_str; +// CUresult get_error_str_err C10_UNUSED = at::globalContext().getNVRTC().cGetErrorString(__err, &err_str); +// if (get_error_str_err != CUDA_SUCCESS) { +// AT_ERROR("CUDA driver error: unknown error"); +// } else { +// AT_ERROR("CUDA driver error: ", err_str); +// } +// } +// } while (0) + +// #else + +// #define AT_CUDA_DRIVER_CHECK(EXPR) +// do { +// CUresult __err = EXPR; +// if (__err != CUDA_SUCCESS) { +// AT_ERROR("CUDA driver error: ", static_cast(__err)); +// } +// } while (0) + +// #endif + +// For CUDA NVRTC +// +// Note: As of CUDA 10, nvrtc error code 7, NVRTC_ERROR_BUILTIN_OPERATION_FAILURE, +// incorrectly produces the error string "NVRTC unknown error." +// The following maps it correctly. +// +// This is here instead of in c10 because NVRTC is loaded dynamically via a stub +// in ATen, and we need to use its nvrtcGetErrorString. +// See NOTE [ USE OF NVRTC AND DRIVER API ]. +// #define AT_CUDA_NVRTC_CHECK(EXPR) +// do { +// nvrtcResult __err = EXPR; +// if (__err != NVRTC_SUCCESS) { +// if (static_cast(__err) != 7) { +// AT_ERROR("CUDA NVRTC error: ", at::globalContext().getNVRTC().nvrtcGetErrorString(__err)); +// } else { +// AT_ERROR("CUDA NVRTC error: NVRTC_ERROR_BUILTIN_OPERATION_FAILURE"); +// } +// } +// } while (0) + + +// Parsed from ATen/cudnn/cudnn-wrapper.h + +// #pragma once + +// #include + +// #define STRINGIFY(x) #x +// #define STRING(x) STRINGIFY(x) + +// #if CUDNN_MAJOR < 6 +// #pragma message ("CuDNN v" STRING(CUDNN_MAJOR) " found, but need at least CuDNN v6. You can get the latest version of CuDNN from https://developer.nvidia.com/cudnn or disable CuDNN with USE_CUDNN=0") +// #pragma message "We strongly encourage you to move to 6.0 and above." +// #pragma message "This message is intended to annoy you enough to update." +// #endif + +// #undef STRINGIFY +// #undef STRING + + +// Parsed from ATen/cudnn/Utils.h + +// #pragma once + +// #include +// #include +// #include +// #include + +// cuDNN has a buggy check for tensor being contiguous (that is, it does +// not ignore stride for dimension that is equal to 0). This function +// makes tensors which have zero stride contiguous, by setting the +// strides to 1 as cuDNN likes. +@Namespace("at::native") public static native @ByVal Tensor contiguousIfZeroInStrides(@Const @ByRef Tensor t); + + + + +// Parsed from ATen/cudnn/Handle.h + +// #pragma once + +// #include +// #include + +@Namespace("at::native") public static native @Cast("cudnnHandle_t") Pointer getCudnnHandle(); + // namespace at::native + + +// Parsed from ATen/cuda/ATenCUDAGeneral.h + +// #pragma once + +// #include +// #include +// #include + +// #include + +// Use TORCH_CUDA_CPP_API or TORCH_CUDA_CU_API for exports from this folder + + +// Parsed from ATen/cudnn/Descriptors.h + +// #pragma once + +// #include + +// #include +// #include + +// #include +// #include +// #include +// #include +// #include +// #include + +// #ifndef AT_PER_OPERATOR_HEADERS +// #include +// #else +// #include +// #endif + + + +// TODO: Add constructors for all of the descriptors + +@Namespace("at::native") public static native int dataSize(@Cast("cudnnDataType_t") int dataType); + +// The stride for a size-1 dimensions is not uniquely determined; in +// fact, it can be anything you want, because the fact that the +// tensor is size 1 at this dimension means that you will never actually +// try advancing your pointer by this stride. +// +// However, CuDNN has a much more stringent requirement on strides: +// if you are passing a contiguous input, it better be the case +// that the stride for dim i is the product of the sizes of dims +// i+1 to the end. This stride is indeed uniquely determined. This +// function modifies 'stride' in place so this invariant holds. +// Targeting ../cuda/TensorDescriptor.java + + + + +// Targeting ../cuda/FilterDescriptor.java + + + + +// Targeting ../cuda/ConvolutionDescriptor.java + + +// Targeting ../cuda/SpatialTransformerDescriptor.java + + +// Targeting ../cuda/DropoutDescriptor.java + + +// Targeting ../cuda/RNNDescriptor.java + + +// Targeting ../cuda/CTCLossDescriptor.java + + +// Targeting ../cuda/ActivationDescriptor.java + + +// Targeting ../cuda/Constant.java + + + + // namespace + + +// Parsed from ATen/cudnn/Types.h + +// #pragma once + +// #include +// #include + +@Namespace("at::native") public static native @Cast("cudnnDataType_t") int getCudnnDataTypeFromScalarType(ScalarType dtype); + + + + + // namespace at::cudnn + + +// Parsed from c10/cuda/CUDAGuard.h + +// #pragma once + +// #include +// #include +// #include +// #include +// #include + +// #include +// Targeting ../cuda/CUDAGuard.java + + +// Targeting ../cuda/OptionalCUDAGuard.java + + +// Targeting ../cuda/CUDAStreamGuard.java + + +// Targeting ../cuda/OptionalCUDAStreamGuard.java + + +// Targeting ../cuda/CUDAMultiStreamGuard.java + + + + // namespace cuda + // namespace c10 + + +} diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/graph_node_list.java b/pytorch/src/gen/java/org/bytedeco/pytorch/graph_node_list.java index f263cd06b54..8e8fef1a3c0 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/graph_node_list.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/graph_node_list.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/graph_node_list_iterator.java b/pytorch/src/gen/java/org/bytedeco/pytorch/graph_node_list_iterator.java index 7a76d40a58d..941ffdbf080 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/graph_node_list_iterator.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/graph_node_list_iterator.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/ivalue_to_const_ref_overload_return.java b/pytorch/src/gen/java/org/bytedeco/pytorch/ivalue_to_const_ref_overload_return.java deleted file mode 100644 index bad67e12bb0..00000000000 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/ivalue_to_const_ref_overload_return.java +++ /dev/null @@ -1,40 +0,0 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE - -package org.bytedeco.pytorch; - -import org.bytedeco.pytorch.Allocator; -import org.bytedeco.pytorch.Function; -import org.bytedeco.pytorch.Module; -import java.nio.*; -import org.bytedeco.javacpp.*; -import org.bytedeco.javacpp.annotation.*; - -import static org.bytedeco.javacpp.presets.javacpp.*; -import static org.bytedeco.openblas.global.openblas_nolapack.*; -import static org.bytedeco.openblas.global.openblas.*; - -import static org.bytedeco.pytorch.global.torch.*; - -// Determine the return type of `IValue::to() const &`. It's a const -// reference when possible and a copy otherwise. It is in this -// separate header so that List can use it as well. - -@Name("c10::detail::ivalue_to_const_ref_overload_return") @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) -public class ivalue_to_const_ref_overload_return extends Pointer { - static { Loader.load(); } - /** Default native constructor. */ - public ivalue_to_const_ref_overload_return() { super((Pointer)null); allocate(); } - /** Native array allocator. Access with {@link Pointer#position(long)}. */ - public ivalue_to_const_ref_overload_return(long size) { super((Pointer)null); allocateArray(size); } - /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ - public ivalue_to_const_ref_overload_return(Pointer p) { super(p); } - private native void allocate(); - private native void allocateArray(long size); - @Override public ivalue_to_const_ref_overload_return position(long position) { - return (ivalue_to_const_ref_overload_return)super.position(position); - } - @Override public ivalue_to_const_ref_overload_return getPointer(long i) { - return new ivalue_to_const_ref_overload_return((Pointer)this).offsetAddress(i); - } - -} diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/kArea.java b/pytorch/src/gen/java/org/bytedeco/pytorch/kArea.java index 1c78e4e3d40..914677f530e 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/kArea.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/kArea.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/kBatchMean.java b/pytorch/src/gen/java/org/bytedeco/pytorch/kBatchMean.java index 03d4ccd0d06..a717e0f20dc 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/kBatchMean.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/kBatchMean.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/kBicubic.java b/pytorch/src/gen/java/org/bytedeco/pytorch/kBicubic.java index 2250ca1c17d..f3dede0a3db 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/kBicubic.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/kBicubic.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/kBilinear.java b/pytorch/src/gen/java/org/bytedeco/pytorch/kBilinear.java index e02b4cc7d4b..69bfbb72eb1 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/kBilinear.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/kBilinear.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/kBorder.java b/pytorch/src/gen/java/org/bytedeco/pytorch/kBorder.java index 03e92bc6924..e36b257049c 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/kBorder.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/kBorder.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/kCircular.java b/pytorch/src/gen/java/org/bytedeco/pytorch/kCircular.java index d09013ce245..ae8b8fe0db3 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/kCircular.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/kCircular.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/kConstant.java b/pytorch/src/gen/java/org/bytedeco/pytorch/kConstant.java index f1b354056ed..68c3ea270a0 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/kConstant.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/kConstant.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/kConv1D.java b/pytorch/src/gen/java/org/bytedeco/pytorch/kConv1D.java index 14639f1f6cb..de95554ef3e 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/kConv1D.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/kConv1D.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/kConv2D.java b/pytorch/src/gen/java/org/bytedeco/pytorch/kConv2D.java index 712921d9a67..295c084ae3c 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/kConv2D.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/kConv2D.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/kConv3D.java b/pytorch/src/gen/java/org/bytedeco/pytorch/kConv3D.java index b193cb24d2a..b3fe5e9e253 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/kConv3D.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/kConv3D.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/kConvTranspose1D.java b/pytorch/src/gen/java/org/bytedeco/pytorch/kConvTranspose1D.java index 5462970b63f..a19900f88cd 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/kConvTranspose1D.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/kConvTranspose1D.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/kConvTranspose2D.java b/pytorch/src/gen/java/org/bytedeco/pytorch/kConvTranspose2D.java index 6817c7e7277..659afe50b0d 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/kConvTranspose2D.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/kConvTranspose2D.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/kConvTranspose3D.java b/pytorch/src/gen/java/org/bytedeco/pytorch/kConvTranspose3D.java index 62e4e1b259e..9105490ac8a 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/kConvTranspose3D.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/kConvTranspose3D.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/kFanIn.java b/pytorch/src/gen/java/org/bytedeco/pytorch/kFanIn.java index 6a59f5a63bd..9c767129343 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/kFanIn.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/kFanIn.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/kFanOut.java b/pytorch/src/gen/java/org/bytedeco/pytorch/kFanOut.java index 40098057d0c..9345c8f9864 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/kFanOut.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/kFanOut.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/kGELU.java b/pytorch/src/gen/java/org/bytedeco/pytorch/kGELU.java index 8c7f3518db7..06bbb74998a 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/kGELU.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/kGELU.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/kGRU.java b/pytorch/src/gen/java/org/bytedeco/pytorch/kGRU.java index 8930c5ea996..e0fc60f2f5c 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/kGRU.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/kGRU.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/kLSTM.java b/pytorch/src/gen/java/org/bytedeco/pytorch/kLSTM.java index b0ac579f507..ffae846e939 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/kLSTM.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/kLSTM.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/kLeakyReLU.java b/pytorch/src/gen/java/org/bytedeco/pytorch/kLeakyReLU.java index e040d5b6947..50b44accc36 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/kLeakyReLU.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/kLeakyReLU.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/kLinear.java b/pytorch/src/gen/java/org/bytedeco/pytorch/kLinear.java index 7dd27f46025..f55d0e76546 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/kLinear.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/kLinear.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/kMax.java b/pytorch/src/gen/java/org/bytedeco/pytorch/kMax.java index ba9754cb0d2..2017bc995be 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/kMax.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/kMax.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/kMean.java b/pytorch/src/gen/java/org/bytedeco/pytorch/kMean.java index 4d85293c458..58a0a3bf496 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/kMean.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/kMean.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/kMish.java b/pytorch/src/gen/java/org/bytedeco/pytorch/kMish.java index 8b4ff28a5d6..75e7a010e43 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/kMish.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/kMish.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/kNearest.java b/pytorch/src/gen/java/org/bytedeco/pytorch/kNearest.java index 44fbf1d3a18..0e3739c94a7 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/kNearest.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/kNearest.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/kNearestExact.java b/pytorch/src/gen/java/org/bytedeco/pytorch/kNearestExact.java index 348339fba8d..d6b6cbc9cac 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/kNearestExact.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/kNearestExact.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/kNone.java b/pytorch/src/gen/java/org/bytedeco/pytorch/kNone.java index 86b1ebb6f30..b8b831bc1a4 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/kNone.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/kNone.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/kRNN_RELU.java b/pytorch/src/gen/java/org/bytedeco/pytorch/kRNN_RELU.java index ed7b4a697d8..456c075e592 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/kRNN_RELU.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/kRNN_RELU.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/kRNN_TANH.java b/pytorch/src/gen/java/org/bytedeco/pytorch/kRNN_TANH.java index a7284f105c9..dda6dced87d 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/kRNN_TANH.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/kRNN_TANH.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/kReLU.java b/pytorch/src/gen/java/org/bytedeco/pytorch/kReLU.java index cd00b904f73..d62df3ccd5f 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/kReLU.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/kReLU.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/kReflect.java b/pytorch/src/gen/java/org/bytedeco/pytorch/kReflect.java index b3286bcdd87..30a2761f443 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/kReflect.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/kReflect.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/kReflection.java b/pytorch/src/gen/java/org/bytedeco/pytorch/kReflection.java index 2100d2ce27e..bbc055fca38 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/kReflection.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/kReflection.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/kReplicate.java b/pytorch/src/gen/java/org/bytedeco/pytorch/kReplicate.java index 9d656294f25..4618567f0f6 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/kReplicate.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/kReplicate.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/kSame.java b/pytorch/src/gen/java/org/bytedeco/pytorch/kSame.java index ecade8e0d52..6a4116bb2f6 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/kSame.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/kSame.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/kSiLU.java b/pytorch/src/gen/java/org/bytedeco/pytorch/kSiLU.java index 85ced51b183..5bfbdd052d3 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/kSiLU.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/kSiLU.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/kSigmoid.java b/pytorch/src/gen/java/org/bytedeco/pytorch/kSigmoid.java index f42ddf2bfc9..f8b3be4c582 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/kSigmoid.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/kSigmoid.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/kSum.java b/pytorch/src/gen/java/org/bytedeco/pytorch/kSum.java index 5a81ac2031a..46a65007964 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/kSum.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/kSum.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/kTanh.java b/pytorch/src/gen/java/org/bytedeco/pytorch/kTanh.java index 1db986205bb..6b4a9d72279 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/kTanh.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/kTanh.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/kTrilinear.java b/pytorch/src/gen/java/org/bytedeco/pytorch/kTrilinear.java index 9743c4187e0..65bf943fedd 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/kTrilinear.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/kTrilinear.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/kValid.java b/pytorch/src/gen/java/org/bytedeco/pytorch/kValid.java index 51d4cdeeaee..49102c20cea 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/kValid.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/kValid.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/kZeros.java b/pytorch/src/gen/java/org/bytedeco/pytorch/kZeros.java index 9ad6477649e..0cfe60a81c8 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/kZeros.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/kZeros.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/module_iterator.java b/pytorch/src/gen/java/org/bytedeco/pytorch/module_iterator.java index a864955d3f3..1a9a4f11cc2 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/module_iterator.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/module_iterator.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; @@ -53,4 +55,9 @@ private native void allocate( public native @ByVal @Name("operator ->") JitModule access(); public native @ByRef @Name("operator ++") module_iterator increment(); public native @ByVal @Name("operator ++") module_iterator increment(int arg0); + + private static native @Namespace @Cast("bool") @Name("operator !=") boolean notEquals( + @Const @ByRef module_iterator a, + @Const @ByRef module_iterator b); + public boolean notEquals(module_iterator b) { return notEquals(this, b); } } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/module_list.java b/pytorch/src/gen/java/org/bytedeco/pytorch/module_list.java index 8ac0b799349..a9b5098fd19 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/module_list.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/module_list.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/mt19937_data_pod.java b/pytorch/src/gen/java/org/bytedeco/pytorch/mt19937_data_pod.java new file mode 100644 index 00000000000..3094e26f856 --- /dev/null +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/mt19937_data_pod.java @@ -0,0 +1,121 @@ +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE + +package org.bytedeco.pytorch; + +import org.bytedeco.pytorch.Allocator; +import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; +import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; +import java.nio.*; +import org.bytedeco.javacpp.*; +import org.bytedeco.javacpp.annotation.*; + +import static org.bytedeco.javacpp.presets.javacpp.*; +import static org.bytedeco.openblas.global.openblas_nolapack.*; +import static org.bytedeco.openblas.global.openblas.*; + +import static org.bytedeco.pytorch.global.torch.*; + + +/** + * Note [Mt19937 Engine implementation] + * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + * Originally implemented in: + * http://www.math.sci.hiroshima-u.ac.jp/~m-mat/MT/MT2002/CODES/MTARCOK/mt19937ar-cok.c + * and modified with C++ constructs. Moreover the state array of the engine + * has been modified to hold 32 bit uints instead of 64 bits. + * + * Note that we reimplemented mt19937 instead of using std::mt19937 because, + * at::mt19937 turns out to be faster in the pytorch codebase. PyTorch builds with -O2 + * by default and following are the benchmark numbers (benchmark code can be found at + * https://github.com/syed-ahmed/benchmark-rngs): + * + * with -O2 + * Time to get 100000000 philox randoms with at::uniform_real_distribution = 0.462759s + * Time to get 100000000 at::mt19937 randoms with at::uniform_real_distribution = 0.39628s + * Time to get 100000000 std::mt19937 randoms with std::uniform_real_distribution = 0.352087s + * Time to get 100000000 std::mt19937 randoms with at::uniform_real_distribution = 0.419454s + * + * std::mt19937 is faster when used in conjunction with std::uniform_real_distribution, + * however we can't use std::uniform_real_distribution because of this bug: + * http://open-std.org/JTC1/SC22/WG21/docs/lwg-active.html#2524. Plus, even if we used + * std::uniform_real_distribution and filtered out the 1's, it is a different algorithm + * than what's in pytorch currently and that messes up the tests in tests_distributions.py. + * The other option, using std::mt19937 with at::uniform_real_distribution is a tad bit slower + * than at::mt19937 with at::uniform_real_distribution and hence, we went with the latter. + * + * Copyright notice: + * A C-program for MT19937, with initialization improved 2002/2/10. + * Coded by Takuji Nishimura and Makoto Matsumoto. + * This is a faster version by taking Shawn Cokus's optimization, + * Matthe Bellew's simplification, Isaku Wada's real version. + * + * Before using, initialize the state by using init_genrand(seed) + * or init_by_array(init_key, key_length). + * + * Copyright (C) 1997 - 2002, Makoto Matsumoto and Takuji Nishimura, + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * 3. The names of its contributors may not be used to endorse or promote + * products derived from this software without specific prior written + * permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF + * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING + * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + * + * Any feedback is very welcome. + * http://www.math.sci.hiroshima-u.ac.jp/~m-mat/MT/emt.html + * email: m-mat \ math.sci.hiroshima-u.ac.jp (remove space) + */ + +/** + * mt19937_data_pod is used to get POD data in and out + * of mt19937_engine. Used in torch.get_rng_state and + * torch.set_rng_state functions. + */ +@Namespace("at") @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) +public class mt19937_data_pod extends Pointer { + static { Loader.load(); } + /** Default native constructor. */ + public mt19937_data_pod() { super((Pointer)null); allocate(); } + /** Native array allocator. Access with {@link Pointer#position(long)}. */ + public mt19937_data_pod(long size) { super((Pointer)null); allocateArray(size); } + /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ + public mt19937_data_pod(Pointer p) { super(p); } + private native void allocate(); + private native void allocateArray(long size); + @Override public mt19937_data_pod position(long position) { + return (mt19937_data_pod)super.position(position); + } + @Override public mt19937_data_pod getPointer(long i) { + return new mt19937_data_pod((Pointer)this).offsetAddress(i); + } + + public native @Cast("uint64_t") long seed_(); public native mt19937_data_pod seed_(long setter); + public native int left_(); public native mt19937_data_pod left_(int setter); + public native @Cast("bool") boolean seeded_(); public native mt19937_data_pod seeded_(boolean setter); + public native @Cast("uint32_t") int next_(); public native mt19937_data_pod next_(int setter); + public native @ByRef @Cast("std::array*") IntPointer state_(); public native mt19937_data_pod state_(IntPointer setter); +} diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/mt19937_engine.java b/pytorch/src/gen/java/org/bytedeco/pytorch/mt19937_engine.java new file mode 100644 index 00000000000..6c480407dd7 --- /dev/null +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/mt19937_engine.java @@ -0,0 +1,43 @@ +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE + +package org.bytedeco.pytorch; + +import org.bytedeco.pytorch.Allocator; +import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; +import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; +import java.nio.*; +import org.bytedeco.javacpp.*; +import org.bytedeco.javacpp.annotation.*; + +import static org.bytedeco.javacpp.presets.javacpp.*; +import static org.bytedeco.openblas.global.openblas_nolapack.*; +import static org.bytedeco.openblas.global.openblas.*; + +import static org.bytedeco.pytorch.global.torch.*; + + +@Namespace("at") @NoOffset @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) +public class mt19937_engine extends Pointer { + static { Loader.load(); } + /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ + public mt19937_engine(Pointer p) { super(p); } + + + public mt19937_engine(@Cast("uint64_t") long seed/*=5489*/) { super((Pointer)null); allocate(seed); } + private native void allocate(@Cast("uint64_t") long seed/*=5489*/); + public mt19937_engine() { super((Pointer)null); allocate(); } + private native void allocate(); + + public native @ByVal mt19937_data_pod data(); + + public native void set_data(@Const @ByRef mt19937_data_pod data); + + public native @Cast("uint64_t") long seed(); + + public native @Cast("bool") boolean is_valid(); + + public native @Cast("uint32_t") @Name("operator ()") int apply(); + +} diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/Object.java b/pytorch/src/gen/java/org/bytedeco/pytorch/mz_zip_archive.java similarity index 61% rename from pytorch/src/gen/java/org/bytedeco/pytorch/Object.java rename to pytorch/src/gen/java/org/bytedeco/pytorch/mz_zip_archive.java index 37b56d00e7f..c3641c41837 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/Object.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/mz_zip_archive.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; @@ -15,10 +17,10 @@ import static org.bytedeco.pytorch.global.torch.*; -@Namespace("c10::ivalue") @Opaque @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) -public class Object extends Pointer { +@Opaque @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) +public class mz_zip_archive extends Pointer { /** Empty constructor. Calls {@code super((Pointer)null)}. */ - public Object() { super((Pointer)null); } + public mz_zip_archive() { super((Pointer)null); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ - public Object(Pointer p) { super(p); } + public mz_zip_archive(Pointer p) { super(p); } } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/named_attribute_iterator.java b/pytorch/src/gen/java/org/bytedeco/pytorch/named_attribute_iterator.java index 43e87eda05b..7724c8b9d99 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/named_attribute_iterator.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/named_attribute_iterator.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; @@ -45,4 +47,9 @@ private native void allocate( public native @ByVal @Name("operator ->") NamedIValue access(); public native @ByRef @Name("operator ++") named_attribute_iterator increment(); public native @ByVal @Name("operator ++") named_attribute_iterator increment(int arg0); + + private static native @Namespace @Cast("bool") @Name("operator !=") boolean notEquals( + @Const @ByRef named_attribute_iterator a, + @Const @ByRef named_attribute_iterator b); + public boolean notEquals(named_attribute_iterator b) { return notEquals(this, b); } } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/named_attribute_list.java b/pytorch/src/gen/java/org/bytedeco/pytorch/named_attribute_list.java index 4886f1237a9..0d1ee8c7015 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/named_attribute_list.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/named_attribute_list.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/named_buffer_iterator.java b/pytorch/src/gen/java/org/bytedeco/pytorch/named_buffer_iterator.java index 523607b25f1..1aeef22c386 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/named_buffer_iterator.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/named_buffer_iterator.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; @@ -45,4 +47,9 @@ private native void allocate( public native @ByVal @Name("operator ->") NamedTensor access(); public native @ByRef @Name("operator ++") named_buffer_iterator increment(); public native @ByVal @Name("operator ++") named_buffer_iterator increment(int arg0); + + private static native @Namespace @Cast("bool") @Name("operator !=") boolean notEquals( + @Const @ByRef named_buffer_iterator a, + @Const @ByRef named_buffer_iterator b); + public boolean notEquals(named_buffer_iterator b) { return notEquals(this, b); } } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/named_buffer_list.java b/pytorch/src/gen/java/org/bytedeco/pytorch/named_buffer_list.java index 51b2cf09fa8..f79ee2c7687 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/named_buffer_list.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/named_buffer_list.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/named_module_iterator.java b/pytorch/src/gen/java/org/bytedeco/pytorch/named_module_iterator.java index 8435aad9b4e..6aa7e4cb3b4 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/named_module_iterator.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/named_module_iterator.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; @@ -45,4 +47,9 @@ private native void allocate( public native @ByVal @Name("operator ->") NamedJitModule access(); public native @ByRef @Name("operator ++") named_module_iterator increment(); public native @ByVal @Name("operator ++") named_module_iterator increment(int arg0); + + private static native @Namespace @Cast("bool") @Name("operator !=") boolean notEquals( + @Const @ByRef named_module_iterator a, + @Const @ByRef named_module_iterator b); + public boolean notEquals(named_module_iterator b) { return notEquals(this, b); } } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/named_module_list.java b/pytorch/src/gen/java/org/bytedeco/pytorch/named_module_list.java index c319255fd82..9bf4e85fae5 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/named_module_list.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/named_module_list.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/named_parameter_iterator.java b/pytorch/src/gen/java/org/bytedeco/pytorch/named_parameter_iterator.java index 2552a78f50f..b7b95d94a50 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/named_parameter_iterator.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/named_parameter_iterator.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; @@ -45,4 +47,9 @@ private native void allocate( public native @ByVal @Name("operator ->") NamedTensor access(); public native @ByRef @Name("operator ++") named_parameter_iterator increment(); public native @ByVal @Name("operator ++") named_parameter_iterator increment(int arg0); + + private static native @Namespace @Cast("bool") @Name("operator !=") boolean notEquals( + @Const @ByRef named_parameter_iterator a, + @Const @ByRef named_parameter_iterator b); + public boolean notEquals(named_parameter_iterator b) { return notEquals(this, b); } } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/named_parameter_list.java b/pytorch/src/gen/java/org/bytedeco/pytorch/named_parameter_list.java index 8647cc70634..6eba09a6a7d 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/named_parameter_list.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/named_parameter_list.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/pack.java b/pytorch/src/gen/java/org/bytedeco/pytorch/pack.java index 4b4bec9cc96..462a5983648 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/pack.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/pack.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/parameter_iterator.java b/pytorch/src/gen/java/org/bytedeco/pytorch/parameter_iterator.java index 24a2903884d..14f412baf94 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/parameter_iterator.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/parameter_iterator.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; @@ -45,4 +47,9 @@ private native void allocate( public native @ByVal @Name("operator ->") Tensor access(); public native @ByRef @Name("operator ++") parameter_iterator increment(); public native @ByVal @Name("operator ++") parameter_iterator increment(int arg0); + + private static native @Namespace @Cast("bool") @Name("operator !=") boolean notEquals( + @Const @ByRef parameter_iterator a, + @Const @ByRef parameter_iterator b); + public boolean notEquals(parameter_iterator b) { return notEquals(this, b); } } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/parameter_list.java b/pytorch/src/gen/java/org/bytedeco/pytorch/parameter_list.java index 7128b9c0e06..f650a73ab2b 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/parameter_list.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/parameter_list.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/power_of_two_hash_policy.java b/pytorch/src/gen/java/org/bytedeco/pytorch/power_of_two_hash_policy.java new file mode 100644 index 00000000000..4c3d397c33a --- /dev/null +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/power_of_two_hash_policy.java @@ -0,0 +1,46 @@ +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE + +package org.bytedeco.pytorch; + +import org.bytedeco.pytorch.Allocator; +import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; +import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; +import java.nio.*; +import org.bytedeco.javacpp.*; +import org.bytedeco.javacpp.annotation.*; + +import static org.bytedeco.javacpp.presets.javacpp.*; +import static org.bytedeco.openblas.global.openblas_nolapack.*; +import static org.bytedeco.openblas.global.openblas.*; + +import static org.bytedeco.pytorch.global.torch.*; + + +@Namespace("ska_ordered") @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) +public class power_of_two_hash_policy extends Pointer { + static { Loader.load(); } + /** Default native constructor. */ + public power_of_two_hash_policy() { super((Pointer)null); allocate(); } + /** Native array allocator. Access with {@link Pointer#position(long)}. */ + public power_of_two_hash_policy(long size) { super((Pointer)null); allocateArray(size); } + /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ + public power_of_two_hash_policy(Pointer p) { super(p); } + private native void allocate(); + private native void allocateArray(long size); + @Override public power_of_two_hash_policy position(long position) { + return (power_of_two_hash_policy)super.position(position); + } + @Override public power_of_two_hash_policy getPointer(long i) { + return new power_of_two_hash_policy((Pointer)this).offsetAddress(i); + } + + public native @Cast("uint64_t") long index_for_hash(@Cast("uint64_t") long hash, @Cast("uint64_t") long num_slots_minus_one); + public native @Cast("uint64_t") long keep_in_range(@Cast("uint64_t") long index, @Cast("uint64_t") long num_slots_minus_one); + public native byte next_size_over(@Cast("uint64_t*") @ByRef LongPointer size); + public native byte next_size_over(@Cast("uint64_t*") @ByRef LongBuffer size); + public native byte next_size_over(@Cast("uint64_t*") @ByRef long[] size); + public native void commit(byte arg0); + public native void reset(); +} diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/pretty_tree.java b/pytorch/src/gen/java/org/bytedeco/pytorch/pretty_tree.java index 57dcd3ca43a..31fec4463a8 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/pretty_tree.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/pretty_tree.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; @@ -20,14 +22,16 @@ @Namespace("torch::jit") @NoOffset @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) public class pretty_tree extends Pointer { static { Loader.load(); } + /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ + public pretty_tree(Pointer p) { super(p); } - public pretty_tree(@Cast("const torch::jit::TreeRef*") @ByRef Pointer tree, @Cast("size_t") long col/*=40*/) { super((Pointer)null); allocate(tree, col); } - private native void allocate(@Cast("const torch::jit::TreeRef*") @ByRef Pointer tree, @Cast("size_t") long col/*=40*/); - public pretty_tree(@Cast("const torch::jit::TreeRef*") @ByRef Pointer tree) { super((Pointer)null); allocate(tree); } - private native void allocate(@Cast("const torch::jit::TreeRef*") @ByRef Pointer tree); - @MemberGetter public native @Cast("const torch::jit::TreeRef*") @ByRef Pointer tree(); + public pretty_tree(@Const @ByRef TreeRef tree, @Cast("size_t") long col/*=40*/) { super((Pointer)null); allocate(tree, col); } + private native void allocate(@Const @ByRef TreeRef tree, @Cast("size_t") long col/*=40*/); + public pretty_tree(@Const @ByRef TreeRef tree) { super((Pointer)null); allocate(tree); } + private native void allocate(@Const @ByRef TreeRef tree); + @MemberGetter public native @Const @ByRef TreeRef tree(); public native @Cast("size_t") long col(); public native pretty_tree col(long setter); - public native @ByRef @Cast("std::unordered_map*") Pointer flat_strings(); public native pretty_tree flat_strings(Pointer setter); - public native @StdString BytePointer get_flat(@Cast("const torch::jit::TreeRef*") @ByRef Pointer t); - public native void print(@Cast("std::ostream*") @ByRef Pointer out, @Cast("const torch::jit::TreeRef*") @ByRef Pointer t, int indent); + public native @ByRef TreeRefStringMap flat_strings(); public native pretty_tree flat_strings(TreeRefStringMap setter); + public native @StdString BytePointer get_flat(@Const @ByRef TreeRef t); + public native void print(@Cast("std::ostream*") @ByRef Pointer out, @Const @ByRef TreeRef t, int indent); } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/prime_number_hash_policy.java b/pytorch/src/gen/java/org/bytedeco/pytorch/prime_number_hash_policy.java new file mode 100644 index 00000000000..513f6993d37 --- /dev/null +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/prime_number_hash_policy.java @@ -0,0 +1,244 @@ +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE + +package org.bytedeco.pytorch; + +import org.bytedeco.pytorch.Allocator; +import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; +import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; +import java.nio.*; +import org.bytedeco.javacpp.*; +import org.bytedeco.javacpp.annotation.*; + +import static org.bytedeco.javacpp.presets.javacpp.*; +import static org.bytedeco.openblas.global.openblas_nolapack.*; +import static org.bytedeco.openblas.global.openblas.*; + +import static org.bytedeco.pytorch.global.torch.*; + // namespace detailv3 + +@Namespace("ska_ordered") @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) +public class prime_number_hash_policy extends Pointer { + static { Loader.load(); } + /** Default native constructor. */ + public prime_number_hash_policy() { super((Pointer)null); allocate(); } + /** Native array allocator. Access with {@link Pointer#position(long)}. */ + public prime_number_hash_policy(long size) { super((Pointer)null); allocateArray(size); } + /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ + public prime_number_hash_policy(Pointer p) { super(p); } + private native void allocate(); + private native void allocateArray(long size); + @Override public prime_number_hash_policy position(long position) { + return (prime_number_hash_policy)super.position(position); + } + @Override public prime_number_hash_policy getPointer(long i) { + return new prime_number_hash_policy((Pointer)this).offsetAddress(i); + } + + public static native @Cast("uint64_t") long mod0(@Cast("uint64_t") long arg0); + public static native @Cast("uint64_t") long mod2(@Cast("uint64_t") long hash); + public static native @Cast("uint64_t") long mod3(@Cast("uint64_t") long hash); + public static native @Cast("uint64_t") long mod5(@Cast("uint64_t") long hash); + public static native @Cast("uint64_t") long mod7(@Cast("uint64_t") long hash); + public static native @Cast("uint64_t") long mod11(@Cast("uint64_t") long hash); + public static native @Cast("uint64_t") long mod13(@Cast("uint64_t") long hash); + public static native @Cast("uint64_t") long mod17(@Cast("uint64_t") long hash); + public static native @Cast("uint64_t") long mod23(@Cast("uint64_t") long hash); + public static native @Cast("uint64_t") long mod29(@Cast("uint64_t") long hash); + public static native @Cast("uint64_t") long mod37(@Cast("uint64_t") long hash); + public static native @Cast("uint64_t") long mod47(@Cast("uint64_t") long hash); + public static native @Cast("uint64_t") long mod59(@Cast("uint64_t") long hash); + public static native @Cast("uint64_t") long mod73(@Cast("uint64_t") long hash); + public static native @Cast("uint64_t") long mod97(@Cast("uint64_t") long hash); + public static native @Cast("uint64_t") long mod127(@Cast("uint64_t") long hash); + public static native @Cast("uint64_t") long mod151(@Cast("uint64_t") long hash); + public static native @Cast("uint64_t") long mod197(@Cast("uint64_t") long hash); + public static native @Cast("uint64_t") long mod251(@Cast("uint64_t") long hash); + public static native @Cast("uint64_t") long mod313(@Cast("uint64_t") long hash); + public static native @Cast("uint64_t") long mod397(@Cast("uint64_t") long hash); + public static native @Cast("uint64_t") long mod499(@Cast("uint64_t") long hash); + public static native @Cast("uint64_t") long mod631(@Cast("uint64_t") long hash); + public static native @Cast("uint64_t") long mod797(@Cast("uint64_t") long hash); + public static native @Cast("uint64_t") long mod1009(@Cast("uint64_t") long hash); + public static native @Cast("uint64_t") long mod1259(@Cast("uint64_t") long hash); + public static native @Cast("uint64_t") long mod1597(@Cast("uint64_t") long hash); + public static native @Cast("uint64_t") long mod2011(@Cast("uint64_t") long hash); + public static native @Cast("uint64_t") long mod2539(@Cast("uint64_t") long hash); + public static native @Cast("uint64_t") long mod3203(@Cast("uint64_t") long hash); + public static native @Cast("uint64_t") long mod4027(@Cast("uint64_t") long hash); + public static native @Cast("uint64_t") long mod5087(@Cast("uint64_t") long hash); + public static native @Cast("uint64_t") long mod6421(@Cast("uint64_t") long hash); + public static native @Cast("uint64_t") long mod8089(@Cast("uint64_t") long hash); + public static native @Cast("uint64_t") long mod10193(@Cast("uint64_t") long hash); + public static native @Cast("uint64_t") long mod12853(@Cast("uint64_t") long hash); + public static native @Cast("uint64_t") long mod16193(@Cast("uint64_t") long hash); + public static native @Cast("uint64_t") long mod20399(@Cast("uint64_t") long hash); + public static native @Cast("uint64_t") long mod25717(@Cast("uint64_t") long hash); + public static native @Cast("uint64_t") long mod32401(@Cast("uint64_t") long hash); + public static native @Cast("uint64_t") long mod40823(@Cast("uint64_t") long hash); + public static native @Cast("uint64_t") long mod51437(@Cast("uint64_t") long hash); + public static native @Cast("uint64_t") long mod64811(@Cast("uint64_t") long hash); + public static native @Cast("uint64_t") long mod81649(@Cast("uint64_t") long hash); + public static native @Cast("uint64_t") long mod102877(@Cast("uint64_t") long hash); + public static native @Cast("uint64_t") long mod129607(@Cast("uint64_t") long hash); + public static native @Cast("uint64_t") long mod163307(@Cast("uint64_t") long hash); + public static native @Cast("uint64_t") long mod205759(@Cast("uint64_t") long hash); + public static native @Cast("uint64_t") long mod259229(@Cast("uint64_t") long hash); + public static native @Cast("uint64_t") long mod326617(@Cast("uint64_t") long hash); + public static native @Cast("uint64_t") long mod411527(@Cast("uint64_t") long hash); + public static native @Cast("uint64_t") long mod518509(@Cast("uint64_t") long hash); + public static native @Cast("uint64_t") long mod653267(@Cast("uint64_t") long hash); + public static native @Cast("uint64_t") long mod823117(@Cast("uint64_t") long hash); + public static native @Cast("uint64_t") long mod1037059(@Cast("uint64_t") long hash); + public static native @Cast("uint64_t") long mod1306601(@Cast("uint64_t") long hash); + public static native @Cast("uint64_t") long mod1646237(@Cast("uint64_t") long hash); + public static native @Cast("uint64_t") long mod2074129(@Cast("uint64_t") long hash); + public static native @Cast("uint64_t") long mod2613229(@Cast("uint64_t") long hash); + public static native @Cast("uint64_t") long mod3292489(@Cast("uint64_t") long hash); + public static native @Cast("uint64_t") long mod4148279(@Cast("uint64_t") long hash); + public static native @Cast("uint64_t") long mod5226491(@Cast("uint64_t") long hash); + public static native @Cast("uint64_t") long mod6584983(@Cast("uint64_t") long hash); + public static native @Cast("uint64_t") long mod8296553(@Cast("uint64_t") long hash); + public static native @Cast("uint64_t") long mod10453007(@Cast("uint64_t") long hash); + public static native @Cast("uint64_t") long mod13169977(@Cast("uint64_t") long hash); + public static native @Cast("uint64_t") long mod16593127(@Cast("uint64_t") long hash); + public static native @Cast("uint64_t") long mod20906033(@Cast("uint64_t") long hash); + public static native @Cast("uint64_t") long mod26339969(@Cast("uint64_t") long hash); + public static native @Cast("uint64_t") long mod33186281(@Cast("uint64_t") long hash); + public static native @Cast("uint64_t") long mod41812097(@Cast("uint64_t") long hash); + public static native @Cast("uint64_t") long mod52679969(@Cast("uint64_t") long hash); + public static native @Cast("uint64_t") long mod66372617(@Cast("uint64_t") long hash); + public static native @Cast("uint64_t") long mod83624237(@Cast("uint64_t") long hash); + public static native @Cast("uint64_t") long mod105359939(@Cast("uint64_t") long hash); + public static native @Cast("uint64_t") long mod132745199(@Cast("uint64_t") long hash); + public static native @Cast("uint64_t") long mod167248483(@Cast("uint64_t") long hash); + public static native @Cast("uint64_t") long mod210719881(@Cast("uint64_t") long hash); + public static native @Cast("uint64_t") long mod265490441(@Cast("uint64_t") long hash); + public static native @Cast("uint64_t") long mod334496971(@Cast("uint64_t") long hash); + public static native @Cast("uint64_t") long mod421439783(@Cast("uint64_t") long hash); + public static native @Cast("uint64_t") long mod530980861(@Cast("uint64_t") long hash); + public static native @Cast("uint64_t") long mod668993977(@Cast("uint64_t") long hash); + public static native @Cast("uint64_t") long mod842879579(@Cast("uint64_t") long hash); + public static native @Cast("uint64_t") long mod1061961721(@Cast("uint64_t") long hash); + public static native @Cast("uint64_t") long mod1337987929(@Cast("uint64_t") long hash); + public static native @Cast("uint64_t") long mod1685759167(@Cast("uint64_t") long hash); + public static native @Cast("uint64_t") long mod2123923447(@Cast("uint64_t") long hash); + public static native @Cast("uint64_t") long mod2675975881(@Cast("uint64_t") long hash); + public static native @Cast("uint64_t") long mod3371518343(@Cast("uint64_t") long hash); + public static native @Cast("uint64_t") long mod4247846927(@Cast("uint64_t") long hash); + public static native @Cast("uint64_t") long mod5351951779(@Cast("uint64_t") long hash); + public static native @Cast("uint64_t") long mod6743036717(@Cast("uint64_t") long hash); + public static native @Cast("uint64_t") long mod8495693897(@Cast("uint64_t") long hash); + public static native @Cast("uint64_t") long mod10703903591(@Cast("uint64_t") long hash); + public static native @Cast("uint64_t") long mod13486073473(@Cast("uint64_t") long hash); + public static native @Cast("uint64_t") long mod16991387857(@Cast("uint64_t") long hash); + public static native @Cast("uint64_t") long mod21407807219(@Cast("uint64_t") long hash); + public static native @Cast("uint64_t") long mod26972146961(@Cast("uint64_t") long hash); + public static native @Cast("uint64_t") long mod33982775741(@Cast("uint64_t") long hash); + public static native @Cast("uint64_t") long mod42815614441(@Cast("uint64_t") long hash); + public static native @Cast("uint64_t") long mod53944293929(@Cast("uint64_t") long hash); + public static native @Cast("uint64_t") long mod67965551447(@Cast("uint64_t") long hash); + public static native @Cast("uint64_t") long mod85631228929(@Cast("uint64_t") long hash); + public static native @Cast("uint64_t") long mod107888587883(@Cast("uint64_t") long hash); + public static native @Cast("uint64_t") long mod135931102921(@Cast("uint64_t") long hash); + public static native @Cast("uint64_t") long mod171262457903(@Cast("uint64_t") long hash); + public static native @Cast("uint64_t") long mod215777175787(@Cast("uint64_t") long hash); + public static native @Cast("uint64_t") long mod271862205833(@Cast("uint64_t") long hash); + public static native @Cast("uint64_t") long mod342524915839(@Cast("uint64_t") long hash); + public static native @Cast("uint64_t") long mod431554351609(@Cast("uint64_t") long hash); + public static native @Cast("uint64_t") long mod543724411781(@Cast("uint64_t") long hash); + public static native @Cast("uint64_t") long mod685049831731(@Cast("uint64_t") long hash); + public static native @Cast("uint64_t") long mod863108703229(@Cast("uint64_t") long hash); + public static native @Cast("uint64_t") long mod1087448823553(@Cast("uint64_t") long hash); + public static native @Cast("uint64_t") long mod1370099663459(@Cast("uint64_t") long hash); + public static native @Cast("uint64_t") long mod1726217406467(@Cast("uint64_t") long hash); + public static native @Cast("uint64_t") long mod2174897647073(@Cast("uint64_t") long hash); + public static native @Cast("uint64_t") long mod2740199326961(@Cast("uint64_t") long hash); + public static native @Cast("uint64_t") long mod3452434812973(@Cast("uint64_t") long hash); + public static native @Cast("uint64_t") long mod4349795294267(@Cast("uint64_t") long hash); + public static native @Cast("uint64_t") long mod5480398654009(@Cast("uint64_t") long hash); + public static native @Cast("uint64_t") long mod6904869625999(@Cast("uint64_t") long hash); + public static native @Cast("uint64_t") long mod8699590588571(@Cast("uint64_t") long hash); + public static native @Cast("uint64_t") long mod10960797308051(@Cast("uint64_t") long hash); + public static native @Cast("uint64_t") long mod13809739252051(@Cast("uint64_t") long hash); + public static native @Cast("uint64_t") long mod17399181177241(@Cast("uint64_t") long hash); + public static native @Cast("uint64_t") long mod21921594616111(@Cast("uint64_t") long hash); + public static native @Cast("uint64_t") long mod27619478504183(@Cast("uint64_t") long hash); + public static native @Cast("uint64_t") long mod34798362354533(@Cast("uint64_t") long hash); + public static native @Cast("uint64_t") long mod43843189232363(@Cast("uint64_t") long hash); + public static native @Cast("uint64_t") long mod55238957008387(@Cast("uint64_t") long hash); + public static native @Cast("uint64_t") long mod69596724709081(@Cast("uint64_t") long hash); + public static native @Cast("uint64_t") long mod87686378464759(@Cast("uint64_t") long hash); + public static native @Cast("uint64_t") long mod110477914016779(@Cast("uint64_t") long hash); + public static native @Cast("uint64_t") long mod139193449418173(@Cast("uint64_t") long hash); + public static native @Cast("uint64_t") long mod175372756929481(@Cast("uint64_t") long hash); + public static native @Cast("uint64_t") long mod220955828033581(@Cast("uint64_t") long hash); + public static native @Cast("uint64_t") long mod278386898836457(@Cast("uint64_t") long hash); + public static native @Cast("uint64_t") long mod350745513859007(@Cast("uint64_t") long hash); + public static native @Cast("uint64_t") long mod441911656067171(@Cast("uint64_t") long hash); + public static native @Cast("uint64_t") long mod556773797672909(@Cast("uint64_t") long hash); + public static native @Cast("uint64_t") long mod701491027718027(@Cast("uint64_t") long hash); + public static native @Cast("uint64_t") long mod883823312134381(@Cast("uint64_t") long hash); + public static native @Cast("uint64_t") long mod1113547595345903(@Cast("uint64_t") long hash); + public static native @Cast("uint64_t") long mod1402982055436147(@Cast("uint64_t") long hash); + public static native @Cast("uint64_t") long mod1767646624268779(@Cast("uint64_t") long hash); + public static native @Cast("uint64_t") long mod2227095190691797(@Cast("uint64_t") long hash); + public static native @Cast("uint64_t") long mod2805964110872297(@Cast("uint64_t") long hash); + public static native @Cast("uint64_t") long mod3535293248537579(@Cast("uint64_t") long hash); + public static native @Cast("uint64_t") long mod4454190381383713(@Cast("uint64_t") long hash); + public static native @Cast("uint64_t") long mod5611928221744609(@Cast("uint64_t") long hash); + public static native @Cast("uint64_t") long mod7070586497075177(@Cast("uint64_t") long hash); + public static native @Cast("uint64_t") long mod8908380762767489(@Cast("uint64_t") long hash); + public static native @Cast("uint64_t") long mod11223856443489329(@Cast("uint64_t") long hash); + public static native @Cast("uint64_t") long mod14141172994150357(@Cast("uint64_t") long hash); + public static native @Cast("uint64_t") long mod17816761525534927(@Cast("uint64_t") long hash); + public static native @Cast("uint64_t") long mod22447712886978529(@Cast("uint64_t") long hash); + public static native @Cast("uint64_t") long mod28282345988300791(@Cast("uint64_t") long hash); + public static native @Cast("uint64_t") long mod35633523051069991(@Cast("uint64_t") long hash); + public static native @Cast("uint64_t") long mod44895425773957261(@Cast("uint64_t") long hash); + public static native @Cast("uint64_t") long mod56564691976601587(@Cast("uint64_t") long hash); + public static native @Cast("uint64_t") long mod71267046102139967(@Cast("uint64_t") long hash); + public static native @Cast("uint64_t") long mod89790851547914507(@Cast("uint64_t") long hash); + public static native @Cast("uint64_t") long mod113129383953203213(@Cast("uint64_t") long hash); + public static native @Cast("uint64_t") long mod142534092204280003(@Cast("uint64_t") long hash); + public static native @Cast("uint64_t") long mod179581703095829107(@Cast("uint64_t") long hash); + public static native @Cast("uint64_t") long mod226258767906406483(@Cast("uint64_t") long hash); + public static native @Cast("uint64_t") long mod285068184408560057(@Cast("uint64_t") long hash); + public static native @Cast("uint64_t") long mod359163406191658253(@Cast("uint64_t") long hash); + public static native @Cast("uint64_t") long mod452517535812813007(@Cast("uint64_t") long hash); + public static native @Cast("uint64_t") long mod570136368817120201(@Cast("uint64_t") long hash); + public static native @Cast("uint64_t") long mod718326812383316683(@Cast("uint64_t") long hash); + public static native @Cast("uint64_t") long mod905035071625626043(@Cast("uint64_t") long hash); + public static native @Cast("uint64_t") long mod1140272737634240411(@Cast("uint64_t") long hash); + public static native @Cast("uint64_t") long mod1436653624766633509(@Cast("uint64_t") long hash); + public static native @Cast("uint64_t") long mod1810070143251252131(@Cast("uint64_t") long hash); + public static native @Cast("uint64_t") long mod2280545475268481167(@Cast("uint64_t") long hash); + public static native @Cast("uint64_t") long mod2873307249533267101(@Cast("uint64_t") long hash); + public static native @Cast("uint64_t") long mod3620140286502504283(@Cast("uint64_t") long hash); + public static native @Cast("uint64_t") long mod4561090950536962147(@Cast("uint64_t") long hash); + public static native @Cast("uint64_t") long mod5746614499066534157(@Cast("uint64_t") long hash); + public static native @Cast("uint64_t") long mod7240280573005008577(@Cast("uint64_t") long hash); + public static native @Cast("uint64_t") long mod9122181901073924329(@Cast("uint64_t") long hash); + public static native @Cast("uint64_t") long mod11493228998133068689(@Cast("uint64_t") long hash); + public static native @Cast("uint64_t") long mod14480561146010017169(@Cast("uint64_t") long hash); + public static native @Cast("uint64_t") long mod18446744073709551557(@Cast("uint64_t") long hash); + + public static class mod_function extends FunctionPointer { + static { Loader.load(); } + /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ + public mod_function(Pointer p) { super(p); } + protected mod_function() { allocate(); } + private native void allocate(); + public native @Cast("uint64_t") long call(@Cast("uint64_t") long arg0); + } + + public native mod_function next_size_over(@Cast("uint64_t*") @ByRef LongPointer size); + public native mod_function next_size_over(@Cast("uint64_t*") @ByRef LongBuffer size); + public native mod_function next_size_over(@Cast("uint64_t*") @ByRef long[] size); + public native void commit(mod_function new_mod_function); + public native void reset(); + + public native @Cast("uint64_t") long index_for_hash(@Cast("uint64_t") long hash, @Cast("uint64_t") long arg1); + public native @Cast("uint64_t") long keep_in_range(@Cast("uint64_t") long index, @Cast("uint64_t") long num_slots_minus_one); +} diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/propagation_error.java b/pytorch/src/gen/java/org/bytedeco/pytorch/propagation_error.java deleted file mode 100644 index d6d1d78c419..00000000000 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/propagation_error.java +++ /dev/null @@ -1,25 +0,0 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE - -package org.bytedeco.pytorch; - -import org.bytedeco.pytorch.Allocator; -import org.bytedeco.pytorch.Function; -import org.bytedeco.pytorch.Module; -import java.nio.*; -import org.bytedeco.javacpp.*; -import org.bytedeco.javacpp.annotation.*; - -import static org.bytedeco.javacpp.presets.javacpp.*; -import static org.bytedeco.openblas.global.openblas_nolapack.*; -import static org.bytedeco.openblas.global.openblas.*; - -import static org.bytedeco.pytorch.global.torch.*; - - -@Namespace("torch::jit") @Opaque @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) -public class propagation_error extends Pointer { - /** Empty constructor. Calls {@code super((Pointer)null)}. */ - public propagation_error() { super((Pointer)null); } - /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ - public propagation_error(Pointer p) { super(p); } -} diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/qint32.java b/pytorch/src/gen/java/org/bytedeco/pytorch/qint32.java index 39de62c543c..9e61295aad3 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/qint32.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/qint32.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/qint8.java b/pytorch/src/gen/java/org/bytedeco/pytorch/qint8.java index 4571bdc176d..069d2cd0cb7 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/qint8.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/qint8.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/quint2x4.java b/pytorch/src/gen/java/org/bytedeco/pytorch/quint2x4.java index 17cb1e3a572..c6a6e6cfe03 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/quint2x4.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/quint2x4.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/quint4x2.java b/pytorch/src/gen/java/org/bytedeco/pytorch/quint4x2.java index 7f53c7dc902..7a44c995099 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/quint4x2.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/quint4x2.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/quint8.java b/pytorch/src/gen/java/org/bytedeco/pytorch/quint8.java index ac8f0e998ca..7c6cfd7c823 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/quint8.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/quint8.java @@ -1,10 +1,12 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.pytorch; import org.bytedeco.pytorch.Allocator; import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; import java.nio.*; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.annotation.*; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/static_cast_with_inter_type.java b/pytorch/src/gen/java/org/bytedeco/pytorch/static_cast_with_inter_type.java deleted file mode 100644 index 0f961557361..00000000000 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/static_cast_with_inter_type.java +++ /dev/null @@ -1,47 +0,0 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE - -package org.bytedeco.pytorch; - -import org.bytedeco.pytorch.Allocator; -import org.bytedeco.pytorch.Function; -import org.bytedeco.pytorch.Module; -import java.nio.*; -import org.bytedeco.javacpp.*; -import org.bytedeco.javacpp.annotation.*; - -import static org.bytedeco.javacpp.presets.javacpp.*; -import static org.bytedeco.openblas.global.openblas_nolapack.*; -import static org.bytedeco.openblas.global.openblas.*; - -import static org.bytedeco.pytorch.global.torch.*; - - -// Partial template instantiation for casting to uint8. -// Note: Converting from negative float values to unsigned integer types is -// undefined behavior in C++, and current CPU and GPU compilers exhibit -// divergent behavior. Casting from negative float values to signed -// integer types and then to unsigned integer types is not undefined, -// however, so this cast improves the consistency of type conversions -// to uint8 across compilers. -// Further note: Type conversions across compilers still have other undefined -// and divergent behavior. - -@Name("c10::static_cast_with_inter_type,c10::BFloat16>") @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) -public class static_cast_with_inter_type extends Pointer { - static { Loader.load(); } - /** Default native constructor. */ - public static_cast_with_inter_type() { super((Pointer)null); allocate(); } - /** Native array allocator. Access with {@link Pointer#position(long)}. */ - public static_cast_with_inter_type(long size) { super((Pointer)null); allocateArray(size); } - /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ - public static_cast_with_inter_type(Pointer p) { super(p); } - private native void allocate(); - private native void allocateArray(long size); - @Override public static_cast_with_inter_type position(long position) { - return (static_cast_with_inter_type)super.position(position); - } - @Override public static_cast_with_inter_type getPointer(long i) { - return new static_cast_with_inter_type((Pointer)this).offsetAddress(i); - } - -} diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/transformer_activation_t.java b/pytorch/src/gen/java/org/bytedeco/pytorch/transformer_activation_t.java deleted file mode 100644 index 954e91144cf..00000000000 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/transformer_activation_t.java +++ /dev/null @@ -1,39 +0,0 @@ -// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE - -package org.bytedeco.pytorch; - -import org.bytedeco.pytorch.Allocator; -import org.bytedeco.pytorch.Function; -import org.bytedeco.pytorch.Module; -import java.nio.*; -import org.bytedeco.javacpp.*; -import org.bytedeco.javacpp.annotation.*; - -import static org.bytedeco.javacpp.presets.javacpp.*; -import static org.bytedeco.openblas.global.openblas_nolapack.*; -import static org.bytedeco.openblas.global.openblas.*; - -import static org.bytedeco.pytorch.global.torch.*; - -@NoOffset @Name("c10::variant >") @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) -public class transformer_activation_t extends Pointer { - static { Loader.load(); } - /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ - public transformer_activation_t(Pointer p) { super(p); } - public transformer_activation_t(kReLU value) { this(); put(value); } - public transformer_activation_t(kGELU value) { this(); put(value); } - public transformer_activation_t() { allocate(); } - private native void allocate(); - public native @Name("operator =") @ByRef transformer_activation_t put(@ByRef transformer_activation_t x); - - public @ByRef kReLU get0() { return get0(this); } - @Namespace @Name("c10::get<0>") public static native @ByRef kReLU get0(@ByRef transformer_activation_t container); - @ValueSetter public native transformer_activation_t put(@ByRef kReLU value); - public @ByRef kGELU get1() { return get1(this); } - @Namespace @Name("c10::get<1>") public static native @ByRef kGELU get1(@ByRef transformer_activation_t container); - @ValueSetter public native transformer_activation_t put(@ByRef kGELU value); - public @Cast("std::function*") @ByRef Pointer get2() { return get2(this); } - @Namespace @Name("c10::get<2>") public static native @Cast("std::function*") @ByRef Pointer get2(@ByRef transformer_activation_t container); - @ValueSetter public native transformer_activation_t put(@Cast("std::function*") @ByRef Pointer value); -} - diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/type_index.java b/pytorch/src/gen/java/org/bytedeco/pytorch/type_index.java new file mode 100644 index 00000000000..7bbf4b4d2b7 --- /dev/null +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/type_index.java @@ -0,0 +1,38 @@ +// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE + +package org.bytedeco.pytorch; + +import org.bytedeco.pytorch.Allocator; +import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.functions.*; +import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; +import java.nio.*; +import org.bytedeco.javacpp.*; +import org.bytedeco.javacpp.annotation.*; + +import static org.bytedeco.javacpp.presets.javacpp.*; +import static org.bytedeco.openblas.global.openblas_nolapack.*; +import static org.bytedeco.openblas.global.openblas.*; + +import static org.bytedeco.pytorch.global.torch.*; + +// #endif + +@Namespace("c10::util") @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) +public class type_index extends Pointer { + static { Loader.load(); } + /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ + public type_index(Pointer p) { super(p); } + + public type_index(@Cast("uint64_t") long checksum) { super((Pointer)null); allocate(checksum); } + private native void allocate(@Cast("uint64_t") long checksum); + + // Allow usage in std::map / std::set + // TODO Disallow this and rather use std::unordered_map/set everywhere + private static native @Namespace @Cast("const bool") @Name("operator <") @NoException(true) boolean lessThan(@ByVal type_index lhs, @ByVal type_index rhs); + public boolean lessThan(type_index rhs) { return lessThan(this, rhs); } + + private static native @Namespace @Cast("std::ostream*") @ByRef @Name("operator <<") Pointer shiftLeft(@Cast("std::ostream*") @ByRef Pointer stream, @ByVal type_index typeId); + public Pointer shiftLeft(Pointer stream) { return shiftLeft(stream, this); } +} diff --git a/pytorch/src/main/java/org/bytedeco/pytorch/AbstractTensor.java b/pytorch/src/main/java/org/bytedeco/pytorch/AbstractTensor.java index e7d7e704549..de2e812b421 100644 --- a/pytorch/src/main/java/org/bytedeco/pytorch/AbstractTensor.java +++ b/pytorch/src/main/java/org/bytedeco/pytorch/AbstractTensor.java @@ -58,6 +58,7 @@ public static Tensor create(byte[] data, boolean signed, long... shape) { public static Tensor create(boolean[] data, long... shape) { Tensor t = empty(shape, new TensorOptions(ScalarType.Bool), null); BooleanIndexer i = t.createIndexer(); i.put(0, data); return t; } public abstract TensorOptions options(); + public abstract ScalarType scalar_type(); public abstract long ndimension(); public abstract long size(long dim); public abstract long stride(long dim); @@ -92,7 +93,7 @@ public B createBuffer(long index) { if (options.device().type().intern() != DeviceType.CPU) { throw new UnsupportedOperationException("Device type not supported: " + options.device().type().intern()); } - ScalarType dtype = options.dtype().toScalarType().intern(); + ScalarType dtype = scalar_type().intern(); Pointer ptr = data_ptr(); long size = nbytes(); switch (dtype) { @@ -129,7 +130,7 @@ public I createIndexer() { if (options.device().type().intern() != DeviceType.CPU) { throw new UnsupportedOperationException("Device type not supported: " + options.device().type().intern()); } - ScalarType dtype = options.dtype().toScalarType().intern(); + ScalarType dtype = scalar_type().intern(); Pointer ptr = data_ptr(); long size = nbytes(); int dims = (int)ndimension(); diff --git a/pytorch/src/main/java/org/bytedeco/pytorch/TransformerActivation.java b/pytorch/src/main/java/org/bytedeco/pytorch/TransformerActivation.java new file mode 100644 index 00000000000..2dac4af2ceb --- /dev/null +++ b/pytorch/src/main/java/org/bytedeco/pytorch/TransformerActivation.java @@ -0,0 +1,63 @@ +package org.bytedeco.pytorch; + +import org.bytedeco.pytorch.functions.*; +import org.bytedeco.javacpp.*; +import org.bytedeco.javacpp.annotation.*; + +/* This is a modified version of the variant container without the get2 method, that would + * return a std::function and not a function pointer. */ +@NoOffset @Name("c10::variant >") @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) +public class TransformerActivation extends Pointer { + static { + Loader.load(); + } + + /** + * Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. + */ + public TransformerActivation(Pointer p) { + super(p); + } + + public TransformerActivation(kReLU value) { + this(); + put(value); + } + + public TransformerActivation(kGELU value) { + this(); + put(value); + } + + public TransformerActivation(TensorMapper value) { + this(); + put(value); + } + + public TransformerActivation() { + allocate(); + } + + private native void allocate(); + + public native @Name("operator =") @ByRef TransformerActivation put(@ByRef TransformerActivation x); + + public kReLU get0() { + return get0(this); + } + + @Namespace @Name("c10::get<0>") static native @ByRef kReLU get0(@ByRef TransformerActivation container); + + @ValueSetter public native TransformerActivation put(@ByRef kReLU value); + + public kGELU get1() { + return get1(this); + } + + @Namespace @Name("c10::get<1>") static native @ByRef kGELU get1(@ByRef TransformerActivation container); + + @ValueSetter public native TransformerActivation put(@ByRef kGELU value); + + @ValueSetter public native TransformerActivation put(@ByRef TensorMapper value); +} + diff --git a/pytorch/src/main/java/org/bytedeco/pytorch/functions/ArchiveWriter.java b/pytorch/src/main/java/org/bytedeco/pytorch/functions/ArchiveWriter.java new file mode 100644 index 00000000000..e2e7e1f2d06 --- /dev/null +++ b/pytorch/src/main/java/org/bytedeco/pytorch/functions/ArchiveWriter.java @@ -0,0 +1,30 @@ +package org.bytedeco.pytorch.functions; + +import org.bytedeco.javacpp.FunctionPointer; +import org.bytedeco.javacpp.Loader; +import org.bytedeco.javacpp.Pointer; +import org.bytedeco.javacpp.annotation.Cast; +import org.bytedeco.javacpp.annotation.Const; +import org.bytedeco.javacpp.annotation.Properties; + +@Properties(inherit = org.bytedeco.pytorch.presets.torch.class) +public class ArchiveWriter extends FunctionPointer { + static { + Loader.load(); + } + + /** + * Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. + */ + public ArchiveWriter(Pointer p) { + super(p); + } + + protected ArchiveWriter() { + allocate(); + } + + private native void allocate(); + + public native @Cast("size_t") long call(@Const Pointer buf, @Cast("size_t") long nbytes); +} diff --git a/pytorch/src/main/java/org/bytedeco/pytorch/functions/DDPLogger.java b/pytorch/src/main/java/org/bytedeco/pytorch/functions/DDPLogger.java new file mode 100644 index 00000000000..d48bbaad51e --- /dev/null +++ b/pytorch/src/main/java/org/bytedeco/pytorch/functions/DDPLogger.java @@ -0,0 +1,31 @@ +package org.bytedeco.pytorch.functions; + +import org.bytedeco.javacpp.FunctionPointer; +import org.bytedeco.javacpp.Loader; +import org.bytedeco.javacpp.Pointer; +import org.bytedeco.javacpp.annotation.ByRef; +import org.bytedeco.javacpp.annotation.Const; +import org.bytedeco.javacpp.annotation.Properties; +import org.bytedeco.pytorch.DDPLoggingData; + +@Properties(inherit = org.bytedeco.pytorch.presets.torch.class) +public class DDPLogger extends FunctionPointer { + static { + Loader.load(); + } + + /** + * Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. + */ + public DDPLogger(Pointer p) { + super(p); + } + + protected DDPLogger() { + allocate(); + } + + private native void allocate(); + + public native void call(@Const @ByRef DDPLoggingData d); +} diff --git a/pytorch/src/main/java/org/bytedeco/pytorch/functions/DistanceFunction.java b/pytorch/src/main/java/org/bytedeco/pytorch/functions/DistanceFunction.java new file mode 100644 index 00000000000..8e914a19e6e --- /dev/null +++ b/pytorch/src/main/java/org/bytedeco/pytorch/functions/DistanceFunction.java @@ -0,0 +1,31 @@ +package org.bytedeco.pytorch.functions; + +import org.bytedeco.javacpp.FunctionPointer; +import org.bytedeco.javacpp.Loader; +import org.bytedeco.javacpp.Pointer; +import org.bytedeco.javacpp.annotation.ByRef; +import org.bytedeco.javacpp.annotation.ByVal; +import org.bytedeco.javacpp.annotation.Properties; +import org.bytedeco.pytorch.Tensor; + +@Properties(inherit = org.bytedeco.pytorch.presets.torch.class) +public class DistanceFunction extends FunctionPointer { + static { + Loader.load(); + } + + /** + * Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. + */ + public DistanceFunction(Pointer p) { + super(p); + } + + protected DistanceFunction() { + allocate(); + } + + private native void allocate(); + + public native @ByVal Tensor call(@ByRef Tensor t1, @ByRef Tensor t2); +} diff --git a/pytorch/src/main/java/org/bytedeco/pytorch/functions/Func.java b/pytorch/src/main/java/org/bytedeco/pytorch/functions/Func.java new file mode 100644 index 00000000000..68783f47625 --- /dev/null +++ b/pytorch/src/main/java/org/bytedeco/pytorch/functions/Func.java @@ -0,0 +1,28 @@ +package org.bytedeco.pytorch.functions; + +import org.bytedeco.javacpp.FunctionPointer; +import org.bytedeco.javacpp.Loader; +import org.bytedeco.javacpp.Pointer; +import org.bytedeco.javacpp.annotation.Properties; + +@Properties(inherit = org.bytedeco.pytorch.presets.torch.class) +public class Func extends FunctionPointer { + static { + Loader.load(); + } + + /** + * Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. + */ + public Func(Pointer p) { + super(p); + } + + protected Func() { + allocate(); + } + + private native void allocate(); + + public native void call(); +} diff --git a/pytorch/src/main/java/org/bytedeco/pytorch/functions/GraphFunctionCreator.java b/pytorch/src/main/java/org/bytedeco/pytorch/functions/GraphFunctionCreator.java new file mode 100644 index 00000000000..093c5e42825 --- /dev/null +++ b/pytorch/src/main/java/org/bytedeco/pytorch/functions/GraphFunctionCreator.java @@ -0,0 +1,30 @@ +package org.bytedeco.pytorch.functions; + +import org.bytedeco.javacpp.FunctionPointer; +import org.bytedeco.javacpp.Loader; +import org.bytedeco.javacpp.Pointer; +import org.bytedeco.javacpp.annotation.ByRef; +import org.bytedeco.javacpp.annotation.Properties; +import org.bytedeco.pytorch.GraphFunction; + +@Properties(inherit = org.bytedeco.pytorch.presets.torch.class) +public class GraphFunctionCreator extends FunctionPointer { + static { + Loader.load(); + } + + /** + * Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. + */ + public GraphFunctionCreator(Pointer p) { + super(p); + } + + protected GraphFunctionCreator() { + allocate(); + } + + private native void allocate(); + + public native void call(@ByRef GraphFunction f); +} diff --git a/pytorch/src/main/java/org/bytedeco/pytorch/functions/IValueSupplier.java b/pytorch/src/main/java/org/bytedeco/pytorch/functions/IValueSupplier.java new file mode 100644 index 00000000000..4e41a712d10 --- /dev/null +++ b/pytorch/src/main/java/org/bytedeco/pytorch/functions/IValueSupplier.java @@ -0,0 +1,30 @@ +package org.bytedeco.pytorch.functions; + +import org.bytedeco.javacpp.FunctionPointer; +import org.bytedeco.javacpp.Loader; +import org.bytedeco.javacpp.Pointer; +import org.bytedeco.javacpp.annotation.ByPtr; +import org.bytedeco.javacpp.annotation.Properties; +import org.bytedeco.pytorch.IValue; + +@Properties(inherit = org.bytedeco.pytorch.presets.torch.class) +public class IValueSupplier extends FunctionPointer { + static { + Loader.load(); + } + + /** + * Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. + */ + public IValueSupplier(Pointer p) { + super(p); + } + + protected IValueSupplier() { + allocate(); + } + + private native void allocate(); + + public native @ByPtr IValue call(); +} diff --git a/pytorch/src/main/java/org/bytedeco/pytorch/functions/IValueVectorConsumer.java b/pytorch/src/main/java/org/bytedeco/pytorch/functions/IValueVectorConsumer.java new file mode 100644 index 00000000000..05855c1de81 --- /dev/null +++ b/pytorch/src/main/java/org/bytedeco/pytorch/functions/IValueVectorConsumer.java @@ -0,0 +1,30 @@ +package org.bytedeco.pytorch.functions; + +import org.bytedeco.javacpp.FunctionPointer; +import org.bytedeco.javacpp.Loader; +import org.bytedeco.javacpp.Pointer; +import org.bytedeco.javacpp.annotation.ByRef; +import org.bytedeco.javacpp.annotation.Properties; +import org.bytedeco.pytorch.IValueVector; + +@Properties(inherit = org.bytedeco.pytorch.presets.torch.class) +public class IValueVectorConsumer extends FunctionPointer { + static { + Loader.load(); + } + + /** + * Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. + */ + public IValueVectorConsumer(Pointer p) { + super(p); + } + + protected IValueVectorConsumer() { + allocate(); + } + + private native void allocate(); + + public native void call(@ByRef IValueVector v); +} diff --git a/pytorch/src/main/java/org/bytedeco/pytorch/functions/JitModuleApplyFunction.java b/pytorch/src/main/java/org/bytedeco/pytorch/functions/JitModuleApplyFunction.java new file mode 100644 index 00000000000..c7ed499ea45 --- /dev/null +++ b/pytorch/src/main/java/org/bytedeco/pytorch/functions/JitModuleApplyFunction.java @@ -0,0 +1,30 @@ +package org.bytedeco.pytorch.functions; + +import org.bytedeco.javacpp.FunctionPointer; +import org.bytedeco.javacpp.Loader; +import org.bytedeco.javacpp.Pointer; +import org.bytedeco.javacpp.annotation.ByRef; +import org.bytedeco.javacpp.annotation.Properties; +import org.bytedeco.pytorch.JitModule; + +@Properties(inherit = org.bytedeco.pytorch.presets.torch.class) +public class JitModuleApplyFunction extends FunctionPointer { + static { + Loader.load(); + } + + /** + * Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. + */ + public JitModuleApplyFunction(Pointer p) { + super(p); + } + + protected JitModuleApplyFunction() { + allocate(); + } + + private native void allocate(); + + public native void call(@ByRef JitModule m); +} diff --git a/pytorch/src/main/java/org/bytedeco/pytorch/functions/LossClosure.java b/pytorch/src/main/java/org/bytedeco/pytorch/functions/LossClosure.java new file mode 100644 index 00000000000..32877c79326 --- /dev/null +++ b/pytorch/src/main/java/org/bytedeco/pytorch/functions/LossClosure.java @@ -0,0 +1,29 @@ +package org.bytedeco.pytorch.functions; + +import org.bytedeco.javacpp.FunctionPointer; +import org.bytedeco.javacpp.Loader; +import org.bytedeco.javacpp.Pointer; +import org.bytedeco.javacpp.annotation.*; +import org.bytedeco.pytorch.Tensor; + +@Properties(inherit = org.bytedeco.pytorch.presets.torch.class) +public class LossClosure extends FunctionPointer { + static { + Loader.load(); + } + + /** + * Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. + */ + public LossClosure(Pointer p) { + super(p); + } + + protected LossClosure() { + allocate(); + } + + private native void allocate(); + + public native @ByRef Tensor call(); +} diff --git a/pytorch/src/main/java/org/bytedeco/pytorch/functions/ModuleApplyFunction.java b/pytorch/src/main/java/org/bytedeco/pytorch/functions/ModuleApplyFunction.java new file mode 100644 index 00000000000..c6dca07eb3c --- /dev/null +++ b/pytorch/src/main/java/org/bytedeco/pytorch/functions/ModuleApplyFunction.java @@ -0,0 +1,30 @@ +package org.bytedeco.pytorch.functions; + +import org.bytedeco.javacpp.FunctionPointer; +import org.bytedeco.javacpp.Loader; +import org.bytedeco.javacpp.Pointer; +import org.bytedeco.javacpp.annotation.ByRef; +import org.bytedeco.javacpp.annotation.Properties; +import org.bytedeco.pytorch.Module; + +@Properties(inherit = org.bytedeco.pytorch.presets.torch.class) +public class ModuleApplyFunction extends FunctionPointer { + static { + Loader.load(); + } + + /** + * Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. + */ + public ModuleApplyFunction(Pointer p) { + super(p); + } + + protected ModuleApplyFunction() { + allocate(); + } + + private native void allocate(); + + public native void call(@ByRef Module m); +} diff --git a/pytorch/src/main/java/org/bytedeco/pytorch/functions/NamedModuleApplyFunction.java b/pytorch/src/main/java/org/bytedeco/pytorch/functions/NamedModuleApplyFunction.java new file mode 100644 index 00000000000..ddee7cb7dc8 --- /dev/null +++ b/pytorch/src/main/java/org/bytedeco/pytorch/functions/NamedModuleApplyFunction.java @@ -0,0 +1,29 @@ +package org.bytedeco.pytorch.functions; + +import org.bytedeco.javacpp.FunctionPointer; +import org.bytedeco.javacpp.Loader; +import org.bytedeco.javacpp.Pointer; +import org.bytedeco.javacpp.annotation.*; +import org.bytedeco.pytorch.Module; + +@Properties(inherit = org.bytedeco.pytorch.presets.torch.class) +public class NamedModuleApplyFunction extends FunctionPointer { + static { + Loader.load(); + } + + /** + * Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. + */ + public NamedModuleApplyFunction(Pointer p) { + super(p); + } + + protected NamedModuleApplyFunction() { + allocate(); + } + + private native void allocate(); + + public native void call(@Const @StdString @ByRef String name, @ByRef Module m); +} diff --git a/pytorch/src/main/java/org/bytedeco/pytorch/functions/NamedSharedModuleApplyFunction.java b/pytorch/src/main/java/org/bytedeco/pytorch/functions/NamedSharedModuleApplyFunction.java new file mode 100644 index 00000000000..36730d521bc --- /dev/null +++ b/pytorch/src/main/java/org/bytedeco/pytorch/functions/NamedSharedModuleApplyFunction.java @@ -0,0 +1,29 @@ +package org.bytedeco.pytorch.functions; + +import org.bytedeco.javacpp.FunctionPointer; +import org.bytedeco.javacpp.Loader; +import org.bytedeco.javacpp.Pointer; +import org.bytedeco.javacpp.annotation.*; +import org.bytedeco.pytorch.Module; + +@Properties(inherit = org.bytedeco.pytorch.presets.torch.class) +public class NamedSharedModuleApplyFunction extends FunctionPointer { + static { + Loader.load(); + } + + /** + * Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. + */ + public NamedSharedModuleApplyFunction(Pointer p) { + super(p); + } + + protected NamedSharedModuleApplyFunction() { + allocate(); + } + + private native void allocate(); + + public native void call(@Const @StdString @ByRef String name, @ByRef @SharedPtr @Cast({"", "std::shared_ptr"}) Module m); +} diff --git a/pytorch/src/main/java/org/bytedeco/pytorch/functions/PickleWriter.java b/pytorch/src/main/java/org/bytedeco/pytorch/functions/PickleWriter.java new file mode 100644 index 00000000000..8d869911a3f --- /dev/null +++ b/pytorch/src/main/java/org/bytedeco/pytorch/functions/PickleWriter.java @@ -0,0 +1,31 @@ +package org.bytedeco.pytorch.functions; + +import org.bytedeco.javacpp.BytePointer; +import org.bytedeco.javacpp.FunctionPointer; +import org.bytedeco.javacpp.Loader; +import org.bytedeco.javacpp.Pointer; +import org.bytedeco.javacpp.annotation.Cast; +import org.bytedeco.javacpp.annotation.Const; +import org.bytedeco.javacpp.annotation.Properties; + +@Properties(inherit = org.bytedeco.pytorch.presets.torch.class) +public class PickleWriter extends FunctionPointer { + static { + Loader.load(); + } + + /** + * Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. + */ + public PickleWriter(Pointer p) { + super(p); + } + + protected PickleWriter() { + allocate(); + } + + private native void allocate(); + + public native void call(@Cast("const char *") BytePointer buf, @Cast("size_t") long nbytes); +} diff --git a/pytorch/src/main/java/org/bytedeco/pytorch/functions/PointerConsumer.java b/pytorch/src/main/java/org/bytedeco/pytorch/functions/PointerConsumer.java new file mode 100644 index 00000000000..6dccbb19ad2 --- /dev/null +++ b/pytorch/src/main/java/org/bytedeco/pytorch/functions/PointerConsumer.java @@ -0,0 +1,28 @@ +package org.bytedeco.pytorch.functions; + +import org.bytedeco.javacpp.FunctionPointer; +import org.bytedeco.javacpp.Loader; +import org.bytedeco.javacpp.Pointer; +import org.bytedeco.javacpp.annotation.Properties; + +@Properties(inherit = org.bytedeco.pytorch.presets.torch.class) +public class PointerConsumer extends FunctionPointer { + static { + Loader.load(); + } + + /** + * Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. + */ + public PointerConsumer(Pointer p) { + super(p); + } + + protected PointerConsumer() { + allocate(); + } + + private native void allocate(); + + public native void call(Pointer p); +} diff --git a/pytorch/src/main/java/org/bytedeco/pytorch/functions/Reader.java b/pytorch/src/main/java/org/bytedeco/pytorch/functions/Reader.java new file mode 100644 index 00000000000..6017ad60e1e --- /dev/null +++ b/pytorch/src/main/java/org/bytedeco/pytorch/functions/Reader.java @@ -0,0 +1,29 @@ +package org.bytedeco.pytorch.functions; + +import org.bytedeco.javacpp.FunctionPointer; +import org.bytedeco.javacpp.Loader; +import org.bytedeco.javacpp.Pointer; +import org.bytedeco.javacpp.annotation.Cast; +import org.bytedeco.javacpp.annotation.Properties; + +@Properties(inherit = org.bytedeco.pytorch.presets.torch.class) +public class Reader extends FunctionPointer { + static { + Loader.load(); + } + + /** + * Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. + */ + public Reader(Pointer p) { + super(p); + } + + protected Reader() { + allocate(); + } + + private native void allocate(); + + public native @Cast("size_t") long call(@Cast("uint64_t") long pos, Pointer buf, @Cast("size_t") long nbytes); +} diff --git a/pytorch/src/main/java/org/bytedeco/pytorch/functions/SharedModuleApplyFunction.java b/pytorch/src/main/java/org/bytedeco/pytorch/functions/SharedModuleApplyFunction.java new file mode 100644 index 00000000000..2408d082311 --- /dev/null +++ b/pytorch/src/main/java/org/bytedeco/pytorch/functions/SharedModuleApplyFunction.java @@ -0,0 +1,32 @@ +package org.bytedeco.pytorch.functions; + +import org.bytedeco.javacpp.FunctionPointer; +import org.bytedeco.javacpp.Loader; +import org.bytedeco.javacpp.Pointer; +import org.bytedeco.javacpp.annotation.ByRef; +import org.bytedeco.javacpp.annotation.Cast; +import org.bytedeco.javacpp.annotation.Properties; +import org.bytedeco.javacpp.annotation.SharedPtr; +import org.bytedeco.pytorch.Module; + +@Properties(inherit = org.bytedeco.pytorch.presets.torch.class) +public class SharedModuleApplyFunction extends FunctionPointer { + static { + Loader.load(); + } + + /** + * Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. + */ + public SharedModuleApplyFunction(Pointer p) { + super(p); + } + + protected SharedModuleApplyFunction() { + allocate(); + } + + private native void allocate(); + + public native void call(@SharedPtr @ByRef @Cast({"", "std::shared_ptr"}) Module m); +} diff --git a/pytorch/src/main/java/org/bytedeco/pytorch/functions/SizeTSupplier.java b/pytorch/src/main/java/org/bytedeco/pytorch/functions/SizeTSupplier.java new file mode 100644 index 00000000000..7bd857fc056 --- /dev/null +++ b/pytorch/src/main/java/org/bytedeco/pytorch/functions/SizeTSupplier.java @@ -0,0 +1,29 @@ +package org.bytedeco.pytorch.functions; + +import org.bytedeco.javacpp.FunctionPointer; +import org.bytedeco.javacpp.Loader; +import org.bytedeco.javacpp.Pointer; +import org.bytedeco.javacpp.annotation.Cast; +import org.bytedeco.javacpp.annotation.Properties; + +@Properties(inherit = org.bytedeco.pytorch.presets.torch.class) +public class SizeTSupplier extends FunctionPointer { + static { + Loader.load(); + } + + /** + * Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. + */ + public SizeTSupplier(Pointer p) { + super(p); + } + + protected SizeTSupplier() { + allocate(); + } + + private native void allocate(); + + public native @Cast("size_t") long call(); +} diff --git a/pytorch/src/main/java/org/bytedeco/pytorch/functions/StringConsumer.java b/pytorch/src/main/java/org/bytedeco/pytorch/functions/StringConsumer.java new file mode 100644 index 00000000000..8672e03e86d --- /dev/null +++ b/pytorch/src/main/java/org/bytedeco/pytorch/functions/StringConsumer.java @@ -0,0 +1,30 @@ +package org.bytedeco.pytorch.functions; + +import org.bytedeco.javacpp.FunctionPointer; +import org.bytedeco.javacpp.Loader; +import org.bytedeco.javacpp.Pointer; +import org.bytedeco.javacpp.annotation.Cast; +import org.bytedeco.javacpp.annotation.Properties; +import org.bytedeco.javacpp.annotation.StdString; + +@Properties(inherit = org.bytedeco.pytorch.presets.torch.class) +public class StringConsumer extends FunctionPointer { + static { + Loader.load(); + } + + /** + * Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. + */ + public StringConsumer(Pointer p) { + super(p); + } + + protected StringConsumer() { + allocate(); + } + + private native void allocate(); + + public native void call(@Cast({"", "const std::string&"}) @StdString String s); +} diff --git a/pytorch/src/main/java/org/bytedeco/pytorch/functions/StringSupplier.java b/pytorch/src/main/java/org/bytedeco/pytorch/functions/StringSupplier.java new file mode 100644 index 00000000000..6a2ae3fd6ec --- /dev/null +++ b/pytorch/src/main/java/org/bytedeco/pytorch/functions/StringSupplier.java @@ -0,0 +1,29 @@ +package org.bytedeco.pytorch.functions; + +import org.bytedeco.javacpp.FunctionPointer; +import org.bytedeco.javacpp.Loader; +import org.bytedeco.javacpp.Pointer; +import org.bytedeco.javacpp.annotation.Properties; +import org.bytedeco.javacpp.annotation.StdString; + +@Properties(inherit = org.bytedeco.pytorch.presets.torch.class) +public class StringSupplier extends FunctionPointer { + static { + Loader.load(); + } + + /** + * Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. + */ + public StringSupplier(Pointer p) { + super(p); + } + + protected StringSupplier() { + allocate(); + } + + private native void allocate(); + + public native @StdString String call(); +} diff --git a/pytorch/src/main/java/org/bytedeco/pytorch/functions/TensorIdGetter.java b/pytorch/src/main/java/org/bytedeco/pytorch/functions/TensorIdGetter.java new file mode 100644 index 00000000000..1951df6a037 --- /dev/null +++ b/pytorch/src/main/java/org/bytedeco/pytorch/functions/TensorIdGetter.java @@ -0,0 +1,31 @@ +package org.bytedeco.pytorch.functions; + +import org.bytedeco.javacpp.BytePointer; +import org.bytedeco.javacpp.FunctionPointer; +import org.bytedeco.javacpp.Loader; +import org.bytedeco.javacpp.Pointer; +import org.bytedeco.javacpp.annotation.*; +import org.bytedeco.pytorch.Tensor; + +@Properties(inherit = org.bytedeco.pytorch.presets.torch.class) +public class TensorIdGetter extends FunctionPointer { + static { + Loader.load(); + } + + /** + * Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. + */ + public TensorIdGetter(Pointer p) { + super(p); + } + + protected TensorIdGetter() { + allocate(); + } + + private native void allocate(); + + // std::function + public native @StdString String call(@Const @ByRef Tensor tensor); +} diff --git a/pytorch/src/main/java/org/bytedeco/pytorch/functions/TensorMapper.java b/pytorch/src/main/java/org/bytedeco/pytorch/functions/TensorMapper.java new file mode 100644 index 00000000000..6573817b45a --- /dev/null +++ b/pytorch/src/main/java/org/bytedeco/pytorch/functions/TensorMapper.java @@ -0,0 +1,33 @@ +package org.bytedeco.pytorch.functions; + +import org.bytedeco.javacpp.FunctionPointer; +import org.bytedeco.javacpp.Loader; +import org.bytedeco.javacpp.Pointer; +import org.bytedeco.javacpp.annotation.ByRef; +import org.bytedeco.javacpp.annotation.ByVal; +import org.bytedeco.javacpp.annotation.Const; +import org.bytedeco.javacpp.annotation.Properties; +import org.bytedeco.pytorch.Tensor; + +@Properties(inherit = org.bytedeco.pytorch.presets.torch.class) +public class TensorMapper extends FunctionPointer { + static { + Loader.load(); + } + + /** + * Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. + */ + public TensorMapper(Pointer p) { + super(p); + } + + protected TensorMapper() { + allocate(); + } + + private native void allocate(); + + // std::function + public native @ByVal Tensor call(@Const @ByRef Tensor t); +} diff --git a/pytorch/src/main/java/org/bytedeco/pytorch/functions/TensorTensorHook.java b/pytorch/src/main/java/org/bytedeco/pytorch/functions/TensorTensorHook.java new file mode 100644 index 00000000000..da05e5ea79c --- /dev/null +++ b/pytorch/src/main/java/org/bytedeco/pytorch/functions/TensorTensorHook.java @@ -0,0 +1,31 @@ +package org.bytedeco.pytorch.functions; + +import org.bytedeco.javacpp.FunctionPointer; +import org.bytedeco.javacpp.Loader; +import org.bytedeco.javacpp.Pointer; +import org.bytedeco.javacpp.annotation.ByRef; +import org.bytedeco.javacpp.annotation.ByVal; +import org.bytedeco.javacpp.annotation.Properties; +import org.bytedeco.pytorch.TensorBase; + +@Properties(inherit = org.bytedeco.pytorch.presets.torch.class) +public class TensorTensorHook extends FunctionPointer { + static { + Loader.load(); + } + + /** + * Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. + */ + public TensorTensorHook(Pointer p) { + super(p); + } + + protected TensorTensorHook() { + allocate(); + } + + private native void allocate(); + + public native @ByRef TensorBase call(@ByVal TensorBase a); +} diff --git a/pytorch/src/main/java/org/bytedeco/pytorch/functions/TypeMapper.java b/pytorch/src/main/java/org/bytedeco/pytorch/functions/TypeMapper.java new file mode 100644 index 00000000000..cf253f1f7c8 --- /dev/null +++ b/pytorch/src/main/java/org/bytedeco/pytorch/functions/TypeMapper.java @@ -0,0 +1,29 @@ +package org.bytedeco.pytorch.functions; + +import org.bytedeco.javacpp.FunctionPointer; +import org.bytedeco.javacpp.Loader; +import org.bytedeco.javacpp.Pointer; +import org.bytedeco.javacpp.annotation.*; +import org.bytedeco.pytorch.Type; + +@Properties(inherit = org.bytedeco.pytorch.presets.torch.class) +public class TypeMapper extends FunctionPointer { + static { + Loader.load(); + } + + /** + * Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. + */ + public TypeMapper(Pointer p) { + super(p); + } + + protected TypeMapper() { + allocate(); + } + + private native void allocate(); + + public native @ByRef Type.TypePtr call(@ByVal Type.TypePtr t); +} diff --git a/pytorch/src/main/java/org/bytedeco/pytorch/functions/TypeRenamer.java b/pytorch/src/main/java/org/bytedeco/pytorch/functions/TypeRenamer.java new file mode 100644 index 00000000000..0094edb2793 --- /dev/null +++ b/pytorch/src/main/java/org/bytedeco/pytorch/functions/TypeRenamer.java @@ -0,0 +1,31 @@ +package org.bytedeco.pytorch.functions; + +import org.bytedeco.javacpp.FunctionPointer; +import org.bytedeco.javacpp.Loader; +import org.bytedeco.javacpp.Pointer; +import org.bytedeco.javacpp.annotation.*; +import org.bytedeco.pytorch.ClassType; +import org.bytedeco.pytorch.QualifiedName; + +@Properties(inherit = org.bytedeco.pytorch.presets.torch.class) +public class TypeRenamer extends FunctionPointer { + static { + Loader.load(); + } + + /** + * Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. + */ + public TypeRenamer(Pointer p) { + super(p); + } + + protected TypeRenamer() { + allocate(); + } + + private native void allocate(); + + // std::function + public native @ByVal QualifiedName call(@SharedPtr @Cast({"", "std::shared_ptr"}) ClassType classType); +} diff --git a/pytorch/src/main/java/org/bytedeco/pytorch/functions/ValueMapper.java b/pytorch/src/main/java/org/bytedeco/pytorch/functions/ValueMapper.java new file mode 100644 index 00000000000..86c52f934a1 --- /dev/null +++ b/pytorch/src/main/java/org/bytedeco/pytorch/functions/ValueMapper.java @@ -0,0 +1,29 @@ +package org.bytedeco.pytorch.functions; + +import org.bytedeco.javacpp.FunctionPointer; +import org.bytedeco.javacpp.Loader; +import org.bytedeco.javacpp.Pointer; +import org.bytedeco.javacpp.annotation.*; +import org.bytedeco.pytorch.Value; + +@Properties(inherit = org.bytedeco.pytorch.presets.torch.class) +public class ValueMapper extends FunctionPointer { + static { + Loader.load(); + } + + /** + * Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. + */ + public ValueMapper(Pointer p) { + super(p); + } + + protected ValueMapper() { + allocate(); + } + + private native void allocate(); + + public native @ByPtr Value call(@ByPtr Value v); +} diff --git a/pytorch/src/main/java/org/bytedeco/pytorch/functions/VoidTensorHook.java b/pytorch/src/main/java/org/bytedeco/pytorch/functions/VoidTensorHook.java new file mode 100644 index 00000000000..0f5b0ee53c2 --- /dev/null +++ b/pytorch/src/main/java/org/bytedeco/pytorch/functions/VoidTensorHook.java @@ -0,0 +1,31 @@ +package org.bytedeco.pytorch.functions; + +import org.bytedeco.javacpp.FunctionPointer; +import org.bytedeco.javacpp.Loader; +import org.bytedeco.javacpp.Pointer; +import org.bytedeco.javacpp.annotation.Properties; +import org.bytedeco.javacpp.annotation.ByVal; +import org.bytedeco.pytorch.TensorBase; + + +@Properties(inherit = org.bytedeco.pytorch.presets.torch.class) +public class VoidTensorHook extends FunctionPointer { + static { + Loader.load(); + } + + /** + * Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. + */ + public VoidTensorHook(Pointer p) { + super(p); + } + + protected VoidTensorHook() { + allocate(); + } + + private native void allocate(); + + public native void call(@ByVal TensorBase a); +} diff --git a/pytorch/src/main/java/org/bytedeco/pytorch/presets/torch.java b/pytorch/src/main/java/org/bytedeco/pytorch/presets/torch.java index 4f9fffef1a6..e2ff9467348 100644 --- a/pytorch/src/main/java/org/bytedeco/pytorch/presets/torch.java +++ b/pytorch/src/main/java/org/bytedeco/pytorch/presets/torch.java @@ -1,5 +1,5 @@ /* - * Copyright (C) 2020-2023 Samuel Audet, Eduardo Gonzalez + * Copyright (C) 2020-2023 Hervé Guillemet, Samuel Audet, Eduardo Gonzalez * * Licensed either under the Apache License, Version 2.0, or (at your option) * under the terms of the GNU General Public License as published by @@ -21,22 +21,27 @@ */ package org.bytedeco.pytorch.presets; +import java.io.BufferedReader; +import java.io.IOException; +import java.io.InputStream; +import java.io.InputStreamReader; +import java.util.ArrayList; import java.util.List; +import java.util.regex.Matcher; +import java.util.regex.Pattern; + import org.bytedeco.javacpp.ClassProperties; import org.bytedeco.javacpp.LoadEnabled; import org.bytedeco.javacpp.Loader; import org.bytedeco.javacpp.Pointer; -import org.bytedeco.javacpp.FunctionPointer; + import org.bytedeco.javacpp.annotation.ByRef; -import org.bytedeco.javacpp.annotation.ByVal; import org.bytedeco.javacpp.annotation.Cast; -import org.bytedeco.javacpp.annotation.Const; import org.bytedeco.javacpp.annotation.MemberGetter; import org.bytedeco.javacpp.annotation.Namespace; import org.bytedeco.javacpp.annotation.Platform; import org.bytedeco.javacpp.annotation.Properties; -import org.bytedeco.javacpp.annotation.StdMove; -import org.bytedeco.javacpp.annotation.StdString; + import org.bytedeco.javacpp.tools.Info; import org.bytedeco.javacpp.tools.InfoMap; import org.bytedeco.javacpp.tools.InfoMapper; @@ -44,8 +49,7 @@ import org.bytedeco.openblas.presets.openblas; /** - * - * @author Samuel Audet + * @author Samuel Audet, Hervé Guillemet */ @Properties( inherit = openblas.class, @@ -55,1724 +59,26 @@ compiler = "cpp14", define = {"SHARED_PTR_NAMESPACE std", "UNIQUE_PTR_NAMESPACE std"}, include = { - "c10/macros/cmake_macros.h", - "c10/macros/Export.h", - "c10/macros/Macros.h", - "c10/util/IdWrapper.h", - "c10/util/MaybeOwned.h", -// "c10/util/C++17.h", -// "c10/util/Array.h", -// "c10/util/CallOnce.h", -// "c10/util/ConstexprCrc.h", -// "c10/util/ExclusivelyOwnedTensorTraits.h", -// "c10/core/PyHandleCache.h", -// "c10/util/TypeIndex.h", -// "c10/util/TypeTraits.h", -// "c10/util/TypeList.h", -// "c10/util/TypeSafeSignMath.h", -// "c10/util/Metaprogramming.h", -// "c10/util/Optional.h", -// "c10/util/ThreadLocal.h", -// "c10/util/UniqueVoidPtr.h", -// "c10/util/accumulate.h", -// "c10/util/either.h", -// "c10/util/env.h", -// "c10/util/hash.h", -// "c10/util/flat_hash_map.h", -// "c10/util/intrusive_ptr.h", -// "c10/util/irange.h", -// "c10/util/overloaded.h", -// "c10/util/python_stub.h", -// "c10/util/reverse_iterator.h", -// "c10/util/string_view.h", -// "c10/util/variant.h", - "c10/util/typeid.h", - "c10/util/AlignOf.h", - "c10/util/Deprecated.h", - "c10/util/StringUtil.h", - "c10/util/SmallVector.h", - "c10/util/DimVector.h", - "c10/util/Exception.h", - "c10/util/ArrayRef.h", - "c10/util/complex.h", - "c10/util/Half.h", - "c10/util/qint32.h", - "c10/util/qint8.h", - "c10/util/quint8.h", - "c10/util/BFloat16.h", - "c10/util/quint2x4.h", - "c10/util/quint4x2.h", - "c10/util/ThreadLocalDebugInfo.h", - "c10/util/Type.h", - "c10/util/TypeCast.h", - "c10/util/Registry.h", - "c10/util/Flags.h", - "c10/util/Logging.h", -// "c10/util/OptionalArrayRef.h", - "c10/core/DeviceType.h", - "c10/core/Device.h", - "c10/core/DeviceGuard.h", - "c10/core/DispatchKey.h", - "c10/core/DispatchKeySet.h", - "c10/core/Backend.h", - "c10/core/CopyBytes.h", - "c10/core/GradMode.h", - "c10/core/InferenceMode.h", - "c10/core/Layout.h", - "c10/core/MemoryFormat.h", - "c10/core/QEngine.h", - "c10/core/QScheme.h", - "c10/core/Stream.h", - "c10/core/ScalarType.h", - "c10/core/ScalarTypeToTypeMeta.h", - "c10/core/Scalar.h", - "c10/core/SymNodeImpl.h", - "c10/core/SymBool.h", -// "c10/core/SymFloatNodeImpl.h", - "c10/core/SymFloat.h", -// "c10/core/SymIntNodeImpl.h", - "c10/core/SymInt.h", - "c10/core/SymIntArrayRef.h", -// "c10/core/SymbolicIntNode.h", - "c10/core/Allocator.h", - "c10/core/DefaultDtype.h", - "c10/core/StorageImpl.h", - "c10/core/Storage.h", - "c10/core/TensorOptions.h", - "c10/core/TensorImpl.h", - "c10/core/UndefinedTensorImpl.h", - "c10/core/WrapDimMinimal.h", -// "c10/core/GeneratorImpl.h", -// "c10/core/impl/LocalDispatchKeySet.h", -// "c10/core/impl/DeviceGuardImplInterface.h", -// "c10/core/impl/PythonDispatcherTLS.h", -// "c10/core/impl/PyObjectSlot.h", -// "c10/core/impl/TorchDispatchModeTLS.h", -// "caffe2/serialize/read_adapter_interface.h", -// "caffe2/serialize/istream_adapter.h", -// "caffe2/serialize/versions.h", -// "caffe2/serialize/inline_container.h", -// "ATen/core/op_registration/hacky_wrapper_for_legacy_signatures.h" -// "ATen/core/ATen_fwd.h", -// "ATen/core/custom_class.h", - "ATen/core/symbol.h", - "ATen/core/aten_interned_strings.h", - "ATen/core/interned_strings.h", - "ATen/core/grad_mode.h", - "ATen/core/ATenGeneral.h", - "ATen/core/Dimname.h", - "ATen/core/DimVector.h", - "ATen/core/Generator.h", -// "ATen/core/CheckMemoryFormat.h", -// "ATen/core/DeprecatedTypeProperties.h", -// "ATen/core/DeprecatedTypePropertiesRegistry.h", -// "ATen/core/LegacyTypeDispatch.h", -// "ATen/core/QuantizerBase.h", -// "ATen/core/IListRef.h", - "ATen/core/Dict.h", - "ATen/core/List.h", - "ATen/core/NamedTensor.h", - "ATen/core/Reduction.h", - "ATen/core/Scalar.h", - "ATen/core/TensorAccessor.h", - "ATen/core/TensorBase.h", - "ATen/core/TensorBody.h", - "ATen/core/Tensor.h", - "ATen/core/Formatting.h", - "ATen/core/UnsafeFromTH.h", - "ATen/core/Variadic.h", - "ATen/core/blob.h", - "ATen/core/class_type.h", -// "ATen/core/dynamic_type.h", - "ATen/core/enum_tag.h", - "ATen/core/enum_type.h", - "ATen/core/type_ptr.h", - "ATen/core/functional.h", - "ATen/core/ivalue.h", - "ATen/core/ivalue_to.h", - "ATen/core/operator_name.h", - "ATen/core/qualified_name.h", - "ATen/core/stack.h", - "ATen/core/alias_info.h", - "ATen/core/jit_type_base.h", - "ATen/core/jit_type.h", - "ATen/core/function_schema.h", - "ATen/core/function.h", -// "ATen/core/builtin_function.h", -// "ATen/core/boxing/BoxedKernel.h", - "ATen/core/boxing/KernelFunction.h", -// "ATen/core/boxing/impl/boxing.h", - "ATen/core/dispatch/CppSignature.h", - "ATen/core/dispatch/DispatchKeyExtractor.h", - "ATen/core/dispatch/RegistrationHandleRAII.h", - "ATen/core/dispatch/OperatorOptions.h", - "ATen/core/dispatch/OperatorEntry.h", - "ATen/core/dispatch/Dispatcher.h", - "ATen/core/op_registration/op_allowlist.h", -// "ATen/core/op_registration/infer_schema.h", -// "ATen/core/op_registration/op_registration.h", -// "ATen/detail/CUDAHooksInterface.h", -// "ATen/detail/HIPHooksInterface.h", -// "ATen/detail/MPSHooksInterface.h", -// "ATen/detail/ORTHooksInterface.h", -// "ATen/CPUGeneratorImpl.h", -// "ATen/FuncTorchTLS.h", -// "ATen/MethodOperators.h", - "ATen/record_function.h", - "ATen/ThreadLocalState.h", - "ATen/ATen.h", - "ATen/Config.h", - "ATen/Device.h", - "ATen/DeviceGuard.h", - "ATen/DimVector.h", - "ATen/Dispatch.h", - "ATen/EmptyTensor.h", - "ATen/LinalgBackend.h", - "ATen/Formatting.h", - "ATen/Generator.h", - "ATen/PadNd.h", - "ATen/Parallel.h", - "ATen/Utils.h", - "ATen/TracerMode.h", - "ATen/WrapDimUtils.h", - "ATen/Tensor.h", - "ATen/TensorGeometry.h", - "ATen/TensorNames.h", - "ATen/TensorUtils.h", - "ATen/Context.h", - "ATen/ExpandUtils.h", - "ATen/Functions.h", - "ATen/NamedTensor.h", - "ATen/NestedTensorImpl.h", - "ATen/NamedTensorUtils.h", - "ATen/SavedTensorHooks.h", - "ATen/ScalarOps.h", - "ATen/SequenceNumber.h", - "ATen/TensorIndexing.h", - "ATen/TensorOperators.h", - "ATen/Version.h", - "ATen/WrapDimUtilsMulti.h", - - "ATen/ops/from_blob.h", - "ATen/ops/tensor.h", - "ATen/ops/_adaptive_avg_pool2d.h", - "ATen/ops/_adaptive_avg_pool2d_backward.h", - "ATen/ops/_adaptive_avg_pool3d.h", - "ATen/ops/_adaptive_avg_pool3d_backward.h", - "ATen/ops/_add_batch_dim.h", - "ATen/ops/_add_relu.h", - "ATen/ops/_addmm_activation.h", - "ATen/ops/_aminmax.h", - "ATen/ops/_amp_foreach_non_finite_check_and_unscale.h", - "ATen/ops/_amp_update_scale.h", - "ATen/ops/_assert_async.h", - "ATen/ops/_assert_tensor_metadata.h", - "ATen/ops/_autocast_to_full_precision.h", - "ATen/ops/_autocast_to_reduced_precision.h", - "ATen/ops/_backward.h", - "ATen/ops/_batch_norm_impl_index.h", - "ATen/ops/_batch_norm_impl_index_backward.h", - "ATen/ops/_cast_Byte.h", - "ATen/ops/_cast_Char.h", - "ATen/ops/_cast_Double.h", - "ATen/ops/_cast_Float.h", - "ATen/ops/_cast_Half.h", - "ATen/ops/_cast_Int.h", - "ATen/ops/_cast_Long.h", - "ATen/ops/_cast_Short.h", -// "ATen/ops/_cat.h", - "ATen/ops/_cdist_backward.h", - "ATen/ops/_cdist_forward.h", - "ATen/ops/_cholesky_solve_helper.h", - "ATen/ops/_choose_qparams_per_tensor.h", - "ATen/ops/_chunk_grad_outputs_efficient_attention.h", - "ATen/ops/_coalesce.h", - "ATen/ops/_coalesced.h", - "ATen/ops/_compute_linear_combination.h", - "ATen/ops/_conj.h", - "ATen/ops/_conj_copy.h", - "ATen/ops/_conj_physical.h", - "ATen/ops/_conv_depthwise2d.h", - "ATen/ops/_convert_indices_from_coo_to_csr.h", - "ATen/ops/_convert_indices_from_csr_to_coo.h", - "ATen/ops/_convolution.h", - "ATen/ops/_convolution_double_backward.h", - "ATen/ops/_convolution_mode.h", - "ATen/ops/_copy_from.h", - "ATen/ops/_copy_from_and_resize.h", - "ATen/ops/_ctc_loss.h", - "ATen/ops/_ctc_loss_backward.h", - "ATen/ops/_cudnn_ctc_loss.h", - "ATen/ops/_cudnn_init_dropout_state.h", - "ATen/ops/_cudnn_rnn.h", - "ATen/ops/_cudnn_rnn_backward.h", - "ATen/ops/_cudnn_rnn_flatten_weight.h", - "ATen/ops/_cufft_clear_plan_cache.h", - "ATen/ops/_cufft_get_plan_cache_max_size.h", - "ATen/ops/_cufft_get_plan_cache_size.h", - "ATen/ops/_cufft_set_plan_cache_max_size.h", - "ATen/ops/_cummax_helper.h", - "ATen/ops/_cummin_helper.h", - "ATen/ops/_debug_has_internal_overlap.h", -// "ATen/ops/_det_lu_based_helper.h", -// "ATen/ops/_det_lu_based_helper_backward_helper.h", - "ATen/ops/_dimI.h", - "ATen/ops/_dimV.h", - "ATen/ops/_dim_arange.h", - "ATen/ops/_dirichlet_grad.h", - "ATen/ops/_efficient_attention_backward.h", - "ATen/ops/_efficient_attention_forward.h", - "ATen/ops/_efficientzerotensor.h", - "ATen/ops/_embedding_bag.h", - "ATen/ops/_embedding_bag_backward.h", - "ATen/ops/_embedding_bag_dense_backward.h", - "ATen/ops/_embedding_bag_forward_only.h", - "ATen/ops/_embedding_bag_per_sample_weights_backward.h", - "ATen/ops/_embedding_bag_sparse_backward.h", - "ATen/ops/_empty_affine_quantized.h", - "ATen/ops/_empty_per_channel_affine_quantized.h", - "ATen/ops/_euclidean_dist.h", - "ATen/ops/_fake_quantize_learnable_per_channel_affine.h", - "ATen/ops/_fake_quantize_learnable_per_channel_affine_backward.h", - "ATen/ops/_fake_quantize_learnable_per_tensor_affine.h", - "ATen/ops/_fake_quantize_learnable_per_tensor_affine_backward.h", - "ATen/ops/_fake_quantize_per_tensor_affine_cachemask_tensor_qparams.h", - "ATen/ops/_fft_c2c.h", - "ATen/ops/_fft_c2r.h", - "ATen/ops/_fft_r2c.h", -// "ATen/ops/_flash_scaled_dot_product_attention.h", - "ATen/ops/_flash_attention_backward.h", - "ATen/ops/_flash_attention_forward.h", - "ATen/ops/_foobar.h", - "ATen/ops/_foreach_abs.h", - "ATen/ops/_foreach_acos.h", - "ATen/ops/_foreach_add.h", - "ATen/ops/_foreach_addcdiv.h", - "ATen/ops/_foreach_addcmul.h", - "ATen/ops/_foreach_asin.h", - "ATen/ops/_foreach_atan.h", - "ATen/ops/_foreach_ceil.h", - "ATen/ops/_foreach_clamp_max.h", - "ATen/ops/_foreach_clamp_min.h", - "ATen/ops/_foreach_cos.h", - "ATen/ops/_foreach_cosh.h", - "ATen/ops/_foreach_div.h", - "ATen/ops/_foreach_erf.h", - "ATen/ops/_foreach_erfc.h", - "ATen/ops/_foreach_exp.h", - "ATen/ops/_foreach_expm1.h", - "ATen/ops/_foreach_floor.h", - "ATen/ops/_foreach_frac.h", - "ATen/ops/_foreach_lerp.h", - "ATen/ops/_foreach_lgamma.h", - "ATen/ops/_foreach_log.h", - "ATen/ops/_foreach_log10.h", - "ATen/ops/_foreach_log1p.h", - "ATen/ops/_foreach_log2.h", - "ATen/ops/_foreach_maximum.h", - "ATen/ops/_foreach_minimum.h", - "ATen/ops/_foreach_mul.h", - "ATen/ops/_foreach_neg.h", - "ATen/ops/_foreach_norm.h", - "ATen/ops/_foreach_reciprocal.h", - "ATen/ops/_foreach_round.h", - "ATen/ops/_foreach_sigmoid.h", - "ATen/ops/_foreach_sin.h", - "ATen/ops/_foreach_sinh.h", - "ATen/ops/_foreach_sqrt.h", - "ATen/ops/_foreach_sub.h", - "ATen/ops/_foreach_tan.h", - "ATen/ops/_foreach_tanh.h", - "ATen/ops/_foreach_trunc.h", - "ATen/ops/_foreach_zero.h", - "ATen/ops/_fused_adam.h", - "ATen/ops/_fused_adamw.h", - "ATen/ops/_fused_dropout.h", - "ATen/ops/_fused_moving_avg_obs_fq_helper.h", - "ATen/ops/_fused_sdp_choice.h", - "ATen/ops/_fw_primal.h", - "ATen/ops/_fw_primal_copy.h", - "ATen/ops/_gather_sparse_backward.h", - "ATen/ops/_grid_sampler_2d_cpu_fallback.h", - "ATen/ops/_grid_sampler_2d_cpu_fallback_backward.h", - "ATen/ops/_has_compatible_shallow_copy_type.h", - "ATen/ops/_has_same_storage_numel.h", - "ATen/ops/_histogramdd_bin_edges.h", - "ATen/ops/_histogramdd_from_bin_cts.h", - "ATen/ops/_histogramdd_from_bin_tensors.h", -// "ATen/ops/_index_copy.h", - "ATen/ops/_index_put_impl.h", - "ATen/ops/_indices.h", - "ATen/ops/_indices_copy.h", - "ATen/ops/_is_all_true.h", - "ATen/ops/_is_any_true.h", - "ATen/ops/_is_zerotensor.h", - "ATen/ops/_linalg_check_errors.h", - "ATen/ops/_linalg_det.h", - "ATen/ops/_linalg_eigh.h", - "ATen/ops/_linalg_slogdet.h", - "ATen/ops/_linalg_solve_ex.h", -// "ATen/ops/_linalg_inv_out_helper.h", -// "ATen/ops/_linalg_qr_helper.h", - "ATen/ops/_linalg_svd.h", - "ATen/ops/_local_scalar_dense.h", - "ATen/ops/_log_softmax.h", - "ATen/ops/_log_softmax_backward_data.h", - "ATen/ops/_logcumsumexp.h", - "ATen/ops/_lstm_mps.h", - "ATen/ops/_lu_with_info.h", - "ATen/ops/_make_dual.h", - "ATen/ops/_make_dual_copy.h", - "ATen/ops/_make_per_channel_quantized_tensor.h", - "ATen/ops/_make_per_tensor_quantized_tensor.h", - "ATen/ops/_masked_scale.h", - "ATen/ops/_masked_softmax.h", - "ATen/ops/_masked_softmax_backward.h", - "ATen/ops/_mkldnn_reshape.h", - "ATen/ops/_mkldnn_transpose.h", - "ATen/ops/_mps_convolution.h", - "ATen/ops/_mps_convolution_transpose.h", -// "ATen/ops/_mps_linear.h", -// "ATen/ops/_mps_linear_backward_input.h", -// "ATen/ops/_mps_linear_backward_weights.h", -// "ATen/ops/_mps_max_pool2d.h", - "ATen/ops/_native_batch_norm_legit.h", - "ATen/ops/_native_decoder_only_multi_head_attention.h", - "ATen/ops/_native_multi_head_attention.h", -// "ATen/ops/_native_multi_head_self_attention.h", - "ATen/ops/_neg_view.h", - "ATen/ops/_neg_view_copy.h", - "ATen/ops/_nested_from_padded.h", - "ATen/ops/_nested_from_padded_and_nested_example.h", - "ATen/ops/_nested_select_backward.h", - "ATen/ops/_nested_sum_backward.h", - "ATen/ops/_nested_tensor_from_mask.h", - "ATen/ops/_nested_tensor_from_mask_left_aligned.h", - "ATen/ops/_nested_tensor_from_tensor_list.h", -// "ATen/ops/_nested_tensor_layer_norm.h", - "ATen/ops/_nested_tensor_offsets.h", - "ATen/ops/_nested_tensor_size.h", - "ATen/ops/_nested_tensor_softmax_with_shape.h", - "ATen/ops/_nested_tensor_strides.h", - "ATen/ops/_nested_view_from_buffer.h", - "ATen/ops/_nested_view_from_buffer_copy.h", - "ATen/ops/_new_zeros_with_same_feature_meta.h", -// "ATen/ops/_nnpack_available.h", - "ATen/ops/_nnpack_spatial_convolution.h", - "ATen/ops/_nnz.h", - "ATen/ops/_pack_padded_sequence.h", - "ATen/ops/_pack_padded_sequence_backward.h", - "ATen/ops/_pad_circular.h", - "ATen/ops/_pad_enum.h", - "ATen/ops/_pad_packed_sequence.h", - "ATen/ops/_pdist_backward.h", - "ATen/ops/_pdist_forward.h", - "ATen/ops/_pin_memory.h", - "ATen/ops/_prelu_kernel.h", - "ATen/ops/_prelu_kernel_backward.h", - "ATen/ops/_remove_batch_dim.h", - "ATen/ops/_reshape_alias.h", - "ATen/ops/_reshape_alias_copy.h", - "ATen/ops/_reshape_copy.h", - "ATen/ops/_reshape_from_tensor.h", - "ATen/ops/_resize_output.h", - "ATen/ops/_rowwise_prune.h", -// "ATen/ops/_s_where.h", - "ATen/ops/_sample_dirichlet.h", - "ATen/ops/_saturate_weight_to_fp16.h", - "ATen/ops/_scaled_dot_product_attention.h", -// "ATen/ops/_scaled_dot_product_attention_forward.h", - "ATen/ops/_scaled_dot_product_attention_math.h", - "ATen/ops/_scaled_dot_product_efficient_attention.h", - "ATen/ops/_scaled_dot_product_efficient_attention_backward.h", - "ATen/ops/_scaled_dot_product_flash_attention_backward.h", - "ATen/ops/_scaled_dot_product_attention_math.h", - "ATen/ops/_segment_reduce_backward.h", - "ATen/ops/_shape_as_tensor.h", - "ATen/ops/_slow_conv2d_backward.h", - "ATen/ops/_slow_conv2d_forward.h", - "ATen/ops/_sobol_engine_draw.h", - "ATen/ops/_sobol_engine_ff.h", - "ATen/ops/_sobol_engine_initialize_state.h", - "ATen/ops/_sobol_engine_scramble.h", - "ATen/ops/_softmax.h", - "ATen/ops/_softmax_backward_data.h", -// "ATen/ops/_solve_helper.h", - "ATen/ops/_sparse_addmm.h", - "ATen/ops/_sparse_broadcast_to.h", - "ATen/ops/_sparse_broadcast_to_copy.h", - "ATen/ops/_sparse_bsc_tensor_unsafe.h", - "ATen/ops/_sparse_bsr_tensor_unsafe.h", - "ATen/ops/_sparse_compressed_tensor_unsafe.h", - "ATen/ops/_sparse_coo_tensor_unsafe.h", - "ATen/ops/_sparse_coo_tensor_with_dims.h", - "ATen/ops/_sparse_coo_tensor_with_dims_and_tensors.h", - "ATen/ops/_sparse_csc_tensor_unsafe.h", - "ATen/ops/_sparse_csr_prod.h", - "ATen/ops/_sparse_csr_sum.h", - "ATen/ops/_sparse_csr_tensor_unsafe.h", - "ATen/ops/_sparse_log_softmax.h", - "ATen/ops/_sparse_log_softmax_backward_data.h", -// "ATen/ops/_sparse_mask_helper.h", - "ATen/ops/_sparse_mm.h", - "ATen/ops/_sparse_mm_reduce_impl.h", - "ATen/ops/_sparse_mm_reduce_impl_backward.h", - "ATen/ops/_sparse_softmax.h", - "ATen/ops/_sparse_softmax_backward_data.h", - "ATen/ops/_sparse_sparse_matmul.h", - "ATen/ops/_sparse_sum.h", - "ATen/ops/_sparse_sum_backward.h", - "ATen/ops/_spdiags.h", - "ATen/ops/_stack.h", - "ATen/ops/_standard_gamma.h", - "ATen/ops/_standard_gamma_grad.h", -// "ATen/ops/_symeig_helper.h", - "ATen/ops/_test_ambiguous_defaults.h", - "ATen/ops/_test_autograd_multiple_dispatch.h", - "ATen/ops/_test_autograd_multiple_dispatch_view.h", - "ATen/ops/_test_autograd_multiple_dispatch_view_copy.h", - "ATen/ops/_test_check_tensor.h", - "ATen/ops/_test_optional_filled_intlist.h", - "ATen/ops/_test_optional_floatlist.h", - "ATen/ops/_test_optional_intlist.h", - "ATen/ops/_test_serialization_subcmul.h", - "ATen/ops/_test_string_default.h", - "ATen/ops/_test_warn_in_autograd.h", - "ATen/ops/_thnn_differentiable_gru_cell_backward.h", - "ATen/ops/_thnn_differentiable_lstm_cell_backward.h", - "ATen/ops/_thnn_fused_gru_cell.h", - "ATen/ops/_thnn_fused_gru_cell_backward.h", - "ATen/ops/_thnn_fused_lstm_cell.h", - "ATen/ops/_thnn_fused_lstm_cell_backward.h", - "ATen/ops/_thnn_fused_lstm_cell_backward_impl.h", - "ATen/ops/_to_copy.h", - "ATen/ops/_to_cpu.h", - "ATen/ops/_to_dense.h", -// "ATen/ops/_torch_cuda_cu_linker_symbol_op.h", - "ATen/ops/_transform_bias_rescale_qkv.h", - "ATen/ops/_transformer_decoder_only_layer_fwd.h", - "ATen/ops/_transformer_encoder_layer_fwd.h", - "ATen/ops/_trilinear.h", - "ATen/ops/_triton_multi_head_attention.h", - "ATen/ops/_triton_scaled_dot_attention.h", - "ATen/ops/_unique.h", - "ATen/ops/_unique2.h", - "ATen/ops/_unpack_dual.h", - "ATen/ops/_unsafe_view.h", - "ATen/ops/_upsample_bicubic2d_aa.h", - "ATen/ops/_upsample_bicubic2d_aa_backward.h", - "ATen/ops/_upsample_bilinear2d_aa.h", - "ATen/ops/_upsample_bilinear2d_aa_backward.h", - "ATen/ops/_upsample_nearest_exact1d.h", - "ATen/ops/_upsample_nearest_exact1d_backward.h", - "ATen/ops/_upsample_nearest_exact2d.h", - "ATen/ops/_upsample_nearest_exact2d_backward.h", - "ATen/ops/_upsample_nearest_exact3d.h", - "ATen/ops/_upsample_nearest_exact3d_backward.h", - "ATen/ops/_use_cudnn_ctc_loss.h", -// "ATen/ops/_use_cudnn_rnn_flatten_weight.h", - "ATen/ops/_validate_compressed_sparse_indices.h", - "ATen/ops/_validate_sparse_bsc_tensor_args.h", - "ATen/ops/_validate_sparse_bsr_tensor_args.h", - "ATen/ops/_validate_sparse_compressed_tensor_args.h", - "ATen/ops/_validate_sparse_coo_tensor_args.h", - "ATen/ops/_validate_sparse_csc_tensor_args.h", - "ATen/ops/_validate_sparse_csr_tensor_args.h", - "ATen/ops/_values.h", - "ATen/ops/_values_copy.h", - "ATen/ops/_version.h", - "ATen/ops/_weight_norm.h", -// "ATen/ops/_weight_norm_cuda_interface.h", -// "ATen/ops/_weight_norm_cuda_interface_backward.h", - "ATen/ops/_weight_norm_differentiable_backward.h", - "ATen/ops/_weight_norm_interface.h", - "ATen/ops/_weight_norm_interface_backward.h", - "ATen/ops/abs.h", - "ATen/ops/absolute.h", - "ATen/ops/acos.h", - "ATen/ops/acosh.h", - "ATen/ops/adaptive_avg_pool1d.h", - "ATen/ops/adaptive_avg_pool2d.h", - "ATen/ops/adaptive_avg_pool3d.h", - "ATen/ops/adaptive_avg_pool3d_backward.h", - "ATen/ops/adaptive_max_pool1d.h", - "ATen/ops/adaptive_max_pool2d.h", - "ATen/ops/adaptive_max_pool2d_backward.h", - "ATen/ops/adaptive_max_pool3d.h", - "ATen/ops/adaptive_max_pool3d_backward.h", - "ATen/ops/add.h", - "ATen/ops/addbmm.h", - "ATen/ops/addcdiv.h", - "ATen/ops/addcmul.h", - "ATen/ops/addmm.h", - "ATen/ops/addmv.h", - "ATen/ops/addr.h", - "ATen/ops/adjoint.h", - "ATen/ops/affine_grid_generator.h", - "ATen/ops/affine_grid_generator_backward.h", - "ATen/ops/alias.h", - "ATen/ops/alias_copy.h", - "ATen/ops/align_as.h", - "ATen/ops/align_tensors.h", - "ATen/ops/align_to.h", - "ATen/ops/all.h", - "ATen/ops/allclose.h", - "ATen/ops/alpha_dropout.h", - "ATen/ops/amax.h", - "ATen/ops/amin.h", - "ATen/ops/aminmax.h", - "ATen/ops/and.h", - "ATen/ops/angle.h", - "ATen/ops/any.h", - "ATen/ops/arange.h", - "ATen/ops/arccos.h", - "ATen/ops/arccosh.h", - "ATen/ops/arcsin.h", - "ATen/ops/arcsinh.h", - "ATen/ops/arctan.h", - "ATen/ops/arctan2.h", - "ATen/ops/arctanh.h", - "ATen/ops/argmax.h", - "ATen/ops/argmin.h", - "ATen/ops/argsort.h", - "ATen/ops/argwhere.h", - "ATen/ops/as_strided.h", - "ATen/ops/as_strided_copy.h", - "ATen/ops/as_strided_scatter.h", - "ATen/ops/asin.h", - "ATen/ops/asinh.h", - "ATen/ops/atan.h", - "ATen/ops/atan2.h", - "ATen/ops/atanh.h", - "ATen/ops/atleast_1d.h", - "ATen/ops/atleast_2d.h", - "ATen/ops/atleast_3d.h", - "ATen/ops/avg_pool1d.h", - "ATen/ops/avg_pool2d.h", - "ATen/ops/avg_pool2d_backward.h", - "ATen/ops/avg_pool3d.h", - "ATen/ops/avg_pool3d_backward.h", - "ATen/ops/baddbmm.h", - "ATen/ops/bartlett_window.h", - "ATen/ops/batch_norm.h", - "ATen/ops/batch_norm_backward_elemt.h", - "ATen/ops/batch_norm_backward_reduce.h", - "ATen/ops/batch_norm_elemt.h", - "ATen/ops/batch_norm_gather_stats.h", - "ATen/ops/batch_norm_gather_stats_with_counts.h", - "ATen/ops/batch_norm_stats.h", - "ATen/ops/batch_norm_update_stats.h", - "ATen/ops/bernoulli.h", - "ATen/ops/bilinear.h", - "ATen/ops/binary_cross_entropy.h", - "ATen/ops/binary_cross_entropy_backward.h", - "ATen/ops/binary_cross_entropy_with_logits.h", -// "ATen/ops/binary_cross_entropy_with_logits_backward.h", - "ATen/ops/bincount.h", - "ATen/ops/binomial.h", - "ATen/ops/bitwise_and.h", - "ATen/ops/bitwise_left_shift.h", - "ATen/ops/bitwise_not.h", - "ATen/ops/bitwise_or.h", - "ATen/ops/bitwise_right_shift.h", - "ATen/ops/bitwise_xor.h", - "ATen/ops/blackman_window.h", - "ATen/ops/block_diag.h", - "ATen/ops/bmm.h", - "ATen/ops/broadcast_tensors.h", - "ATen/ops/broadcast_to.h", - "ATen/ops/bucketize.h", - "ATen/ops/can_cast.h", - "ATen/ops/cartesian_prod.h", - "ATen/ops/cat.h", - "ATen/ops/cauchy.h", - "ATen/ops/ccol_indices.h", - "ATen/ops/ccol_indices_copy.h", - "ATen/ops/cdist.h", - "ATen/ops/ceil.h", - "ATen/ops/celu.h", - "ATen/ops/chain_matmul.h", - "ATen/ops/chalf.h", - "ATen/ops/channel_shuffle.h", - "ATen/ops/cholesky.h", - "ATen/ops/cholesky_inverse.h", - "ATen/ops/cholesky_solve.h", - "ATen/ops/choose_qparams_optimized.h", - "ATen/ops/chunk.h", - "ATen/ops/clamp.h", - "ATen/ops/clamp_max.h", - "ATen/ops/clamp_min.h", - "ATen/ops/clip.h", - "ATen/ops/clone.h", - "ATen/ops/coalesce.h", - "ATen/ops/col2im.h", -// "ATen/ops/col2im_backward.h", - "ATen/ops/col_indices.h", - "ATen/ops/col_indices_copy.h", - "ATen/ops/column_stack.h", - "ATen/ops/combinations.h", - "ATen/ops/complex.h", - "ATen/ops/concat.h", - "ATen/ops/concatenate.h", - "ATen/ops/conj.h", - "ATen/ops/conj_physical.h", - "ATen/ops/constant_pad_nd.h", - "ATen/ops/contiguous.h", - "ATen/ops/conv1d.h", - "ATen/ops/conv2d.h", - "ATen/ops/conv3d.h", - "ATen/ops/conv_depthwise3d.h", - "ATen/ops/conv_tbc.h", - "ATen/ops/conv_tbc_backward.h", - "ATen/ops/conv_transpose1d.h", - "ATen/ops/conv_transpose2d.h", - "ATen/ops/conv_transpose3d.h", - "ATen/ops/convolution.h", - "ATen/ops/convolution_backward.h", - "ATen/ops/convolution_backward_overrideable.h", - "ATen/ops/convolution_overrideable.h", - "ATen/ops/copy.h", - "ATen/ops/copy_sparse_to_sparse.h", - "ATen/ops/copysign.h", - "ATen/ops/corrcoef.h", - "ATen/ops/cos.h", - "ATen/ops/cosh.h", - "ATen/ops/cosine_embedding_loss.h", - "ATen/ops/cosine_similarity.h", - "ATen/ops/count_nonzero.h", - "ATen/ops/cov.h", - "ATen/ops/cross.h", - "ATen/ops/cross_entropy_loss.h", - "ATen/ops/crow_indices.h", - "ATen/ops/crow_indices_copy.h", - "ATen/ops/ctc_loss.h", - "ATen/ops/cudnn_affine_grid_generator.h", - "ATen/ops/cudnn_affine_grid_generator_backward.h", - "ATen/ops/cudnn_batch_norm.h", - "ATen/ops/cudnn_batch_norm_backward.h", - "ATen/ops/cudnn_convolution.h", - "ATen/ops/cudnn_convolution_add_relu.h", - "ATen/ops/cudnn_convolution_relu.h", - "ATen/ops/cudnn_convolution_transpose.h", - "ATen/ops/cudnn_grid_sampler.h", - "ATen/ops/cudnn_grid_sampler_backward.h", - "ATen/ops/cudnn_is_acceptable.h", - "ATen/ops/cummax.h", - "ATen/ops/cummaxmin_backward.h", - "ATen/ops/cummin.h", - "ATen/ops/cumprod.h", - "ATen/ops/cumprod_backward.h", - "ATen/ops/cumsum.h", - "ATen/ops/cumulative_trapezoid.h", - "ATen/ops/data.h", - "ATen/ops/deg2rad.h", - "ATen/ops/dense_dim.h", - "ATen/ops/dequantize.h", - "ATen/ops/det.h", - "ATen/ops/detach.h", - "ATen/ops/detach_copy.h", - "ATen/ops/diag.h", -// "ATen/ops/diag_backward.h", - "ATen/ops/diag_embed.h", - "ATen/ops/diagflat.h", - "ATen/ops/diagonal.h", - "ATen/ops/diagonal_backward.h", - "ATen/ops/diagonal_copy.h", - "ATen/ops/diagonal_scatter.h", - "ATen/ops/diff.h", - "ATen/ops/digamma.h", - "ATen/ops/dist.h", - "ATen/ops/div.h", - "ATen/ops/divide.h", - "ATen/ops/dot.h", - "ATen/ops/dropout.h", - "ATen/ops/dsplit.h", - "ATen/ops/dstack.h", -// "ATen/ops/eig.h", - "ATen/ops/einsum.h", - "ATen/ops/elu.h", - "ATen/ops/elu_backward.h", - "ATen/ops/embedding.h", - "ATen/ops/embedding_backward.h", - "ATen/ops/embedding_bag.h", - "ATen/ops/embedding_dense_backward.h", - "ATen/ops/embedding_renorm.h", - "ATen/ops/embedding_sparse_backward.h", - "ATen/ops/empty.h", - "ATen/ops/empty_like.h", - "ATen/ops/empty_quantized.h", - "ATen/ops/empty_strided.h", - "ATen/ops/eq.h", - "ATen/ops/equal.h", - "ATen/ops/erf.h", - "ATen/ops/erfc.h", - "ATen/ops/erfinv.h", - "ATen/ops/exp.h", - "ATen/ops/exp2.h", - "ATen/ops/expand.h", - "ATen/ops/expand_as.h", - "ATen/ops/expand_copy.h", - "ATen/ops/expm1.h", - "ATen/ops/exponential.h", - "ATen/ops/eye.h", - "ATen/ops/fake_quantize_per_channel_affine.h", - "ATen/ops/fake_quantize_per_channel_affine_cachemask.h", - "ATen/ops/fake_quantize_per_channel_affine_cachemask_backward.h", - "ATen/ops/fake_quantize_per_tensor_affine.h", - "ATen/ops/fake_quantize_per_tensor_affine_cachemask.h", - "ATen/ops/fake_quantize_per_tensor_affine_cachemask_backward.h", - "ATen/ops/fbgemm_linear_fp16_weight.h", - "ATen/ops/fbgemm_linear_fp16_weight_fp32_activation.h", - "ATen/ops/fbgemm_linear_int8_weight.h", - "ATen/ops/fbgemm_linear_int8_weight_fp32_activation.h", - "ATen/ops/fbgemm_linear_quantize_weight.h", - "ATen/ops/fbgemm_pack_gemm_matrix_fp16.h", - "ATen/ops/fbgemm_pack_quantized_matrix.h", - "ATen/ops/feature_alpha_dropout.h", - "ATen/ops/feature_dropout.h", - "ATen/ops/fft_fft.h", - "ATen/ops/fft_fft2.h", - "ATen/ops/fft_fftfreq.h", - "ATen/ops/fft_fftn.h", - "ATen/ops/fft_fftshift.h", - "ATen/ops/fft_hfft.h", - "ATen/ops/fft_hfft2.h", - "ATen/ops/fft_hfftn.h", - "ATen/ops/fft_ifft.h", - "ATen/ops/fft_ifft2.h", - "ATen/ops/fft_ifftn.h", - "ATen/ops/fft_ifftshift.h", - "ATen/ops/fft_ihfft.h", - "ATen/ops/fft_ihfft2.h", - "ATen/ops/fft_ihfftn.h", - "ATen/ops/fft_irfft.h", - "ATen/ops/fft_irfft2.h", - "ATen/ops/fft_irfftn.h", - "ATen/ops/fft_rfft.h", - "ATen/ops/fft_rfft2.h", - "ATen/ops/fft_rfftfreq.h", - "ATen/ops/fft_rfftn.h", - "ATen/ops/fill.h", - "ATen/ops/fill_diagonal.h", - "ATen/ops/fix.h", - "ATen/ops/flatten.h", - "ATen/ops/flatten_dense_tensors.h", - "ATen/ops/flip.h", - "ATen/ops/fliplr.h", - "ATen/ops/flipud.h", - "ATen/ops/float_power.h", - "ATen/ops/floor.h", - "ATen/ops/floor_divide.h", - "ATen/ops/fmax.h", - "ATen/ops/fmin.h", - "ATen/ops/fmod.h", - "ATen/ops/frac.h", - "ATen/ops/fractional_max_pool2d.h", - "ATen/ops/fractional_max_pool2d_backward.h", - "ATen/ops/fractional_max_pool3d.h", - "ATen/ops/fractional_max_pool3d_backward.h", - "ATen/ops/frexp.h", - "ATen/ops/frobenius_norm.h", - "ATen/ops/from_file.h", - "ATen/ops/full.h", - "ATen/ops/full_like.h", - "ATen/ops/fused_moving_avg_obs_fake_quant.h", - "ATen/ops/gather.h", - "ATen/ops/gather_backward.h", - "ATen/ops/gcd.h", - "ATen/ops/ge.h", - "ATen/ops/gelu.h", - "ATen/ops/gelu_backward.h", - "ATen/ops/geometric.h", - "ATen/ops/geqrf.h", - "ATen/ops/ger.h", - "ATen/ops/glu.h", - "ATen/ops/glu_backward.h", - "ATen/ops/glu_backward_jvp.h", - "ATen/ops/glu_jvp.h", - "ATen/ops/gradient.h", - "ATen/ops/greater.h", - "ATen/ops/greater_equal.h", - "ATen/ops/grid_sampler.h", - "ATen/ops/grid_sampler_2d.h", - "ATen/ops/grid_sampler_2d_backward.h", - "ATen/ops/grid_sampler_3d.h", - "ATen/ops/grid_sampler_3d_backward.h", - "ATen/ops/group_norm.h", - "ATen/ops/gru.h", - "ATen/ops/gru_cell.h", - "ATen/ops/gt.h", - "ATen/ops/hamming_window.h", - "ATen/ops/hann_window.h", - "ATen/ops/hardshrink.h", - "ATen/ops/hardshrink_backward.h", - "ATen/ops/hardsigmoid.h", - "ATen/ops/hardsigmoid_backward.h", - "ATen/ops/hardswish.h", - "ATen/ops/hardswish_backward.h", - "ATen/ops/hardtanh.h", - "ATen/ops/hardtanh_backward.h", - "ATen/ops/heaviside.h", - "ATen/ops/hinge_embedding_loss.h", - "ATen/ops/histc.h", - "ATen/ops/histogram.h", - "ATen/ops/histogramdd.h", - "ATen/ops/hsplit.h", - "ATen/ops/hspmm.h", - "ATen/ops/hstack.h", - "ATen/ops/huber_loss.h", - "ATen/ops/huber_loss_backward.h", - "ATen/ops/hypot.h", - "ATen/ops/i0.h", - "ATen/ops/igamma.h", - "ATen/ops/igammac.h", - "ATen/ops/im2col.h", -// "ATen/ops/im2col_backward.h", - "ATen/ops/imag.h", - "ATen/ops/index.h", - "ATen/ops/index_add.h", - "ATen/ops/index_copy.h", - "ATen/ops/index_fill.h", - "ATen/ops/index_put.h", - "ATen/ops/index_reduce.h", - "ATen/ops/index_select.h", - "ATen/ops/index_select_backward.h", - "ATen/ops/indices.h", - "ATen/ops/indices_copy.h", - "ATen/ops/infinitely_differentiable_gelu_backward.h", - "ATen/ops/inner.h", - "ATen/ops/instance_norm.h", - "ATen/ops/int_repr.h", - "ATen/ops/inverse.h", - "ATen/ops/is_coalesced.h", - "ATen/ops/is_complex.h", - "ATen/ops/is_conj.h", - "ATen/ops/is_distributed.h", - "ATen/ops/is_floating_point.h", - "ATen/ops/is_inference.h", - "ATen/ops/is_leaf.h", - "ATen/ops/is_neg.h", - "ATen/ops/is_nonzero.h", - "ATen/ops/is_pinned.h", - "ATen/ops/is_same_size.h", - "ATen/ops/is_set_to.h", - "ATen/ops/is_signed.h", -// "ATen/ops/is_vulkan_available.h", - "ATen/ops/isclose.h", - "ATen/ops/isfinite.h", - "ATen/ops/isin.h", - "ATen/ops/isinf.h", - "ATen/ops/isnan.h", - "ATen/ops/isneginf.h", - "ATen/ops/isposinf.h", - "ATen/ops/isreal.h", - "ATen/ops/istft.h", - "ATen/ops/item.h", - "ATen/ops/kaiser_window.h", - "ATen/ops/kl_div.h", -// "ATen/ops/kl_div_backward.h", - "ATen/ops/kron.h", - "ATen/ops/kthvalue.h", - "ATen/ops/l1_loss.h", -// "ATen/ops/l1_loss_backward.h", - "ATen/ops/layer_norm.h", - "ATen/ops/lcm.h", - "ATen/ops/ldexp.h", - "ATen/ops/le.h", - "ATen/ops/leaky_relu.h", - "ATen/ops/leaky_relu_backward.h", - "ATen/ops/lerp.h", - "ATen/ops/less.h", - "ATen/ops/less_equal.h", - "ATen/ops/lgamma.h", - "ATen/ops/lift.h", - "ATen/ops/lift_fresh.h", - "ATen/ops/lift_fresh_copy.h", - "ATen/ops/linalg_cholesky.h", - "ATen/ops/linalg_cholesky_ex.h", - "ATen/ops/linalg_cond.h", - "ATen/ops/linalg_cross.h", - "ATen/ops/linalg_det.h", - "ATen/ops/linalg_diagonal.h", - "ATen/ops/linalg_eig.h", - "ATen/ops/linalg_eigh.h", - "ATen/ops/linalg_eigvals.h", - "ATen/ops/linalg_eigvalsh.h", - "ATen/ops/linalg_householder_product.h", - "ATen/ops/linalg_inv.h", - "ATen/ops/linalg_inv_ex.h", - "ATen/ops/linalg_ldl_factor.h", - "ATen/ops/linalg_ldl_factor_ex.h", - "ATen/ops/linalg_ldl_solve.h", - "ATen/ops/linalg_lstsq.h", - "ATen/ops/linalg_lu.h", - "ATen/ops/linalg_lu_factor.h", - "ATen/ops/linalg_lu_factor_ex.h", - "ATen/ops/linalg_lu_solve.h", - "ATen/ops/linalg_matmul.h", - "ATen/ops/linalg_matrix_exp.h", - "ATen/ops/linalg_matrix_norm.h", - "ATen/ops/linalg_matrix_power.h", - "ATen/ops/linalg_matrix_rank.h", - "ATen/ops/linalg_multi_dot.h", - "ATen/ops/linalg_norm.h", - "ATen/ops/linalg_pinv.h", - "ATen/ops/linalg_qr.h", - "ATen/ops/linalg_slogdet.h", - "ATen/ops/linalg_solve.h", - "ATen/ops/linalg_solve_ex.h", - "ATen/ops/linalg_solve_triangular.h", - "ATen/ops/linalg_svd.h", - "ATen/ops/linalg_svdvals.h", - "ATen/ops/linalg_tensorinv.h", - "ATen/ops/linalg_tensorsolve.h", - "ATen/ops/linalg_vander.h", - "ATen/ops/linalg_vecdot.h", - "ATen/ops/linalg_vector_norm.h", - "ATen/ops/linear.h", - "ATen/ops/linear_backward.h", - "ATen/ops/linspace.h", - "ATen/ops/log.h", - "ATen/ops/log10.h", - "ATen/ops/log1p.h", - "ATen/ops/log2.h", - "ATen/ops/log_normal.h", - "ATen/ops/log_sigmoid.h", - "ATen/ops/log_sigmoid_backward.h", - "ATen/ops/log_sigmoid_forward.h", - "ATen/ops/log_softmax.h", - "ATen/ops/logaddexp.h", - "ATen/ops/logaddexp2.h", - "ATen/ops/logcumsumexp.h", - "ATen/ops/logdet.h", - "ATen/ops/logical_and.h", - "ATen/ops/logical_not.h", - "ATen/ops/logical_or.h", - "ATen/ops/logical_xor.h", - "ATen/ops/logit.h", - "ATen/ops/logit_backward.h", - "ATen/ops/logspace.h", - "ATen/ops/logsumexp.h", - "ATen/ops/lshift.h", - "ATen/ops/lstm.h", - "ATen/ops/lstm_cell.h", - "ATen/ops/lstm_mps_backward.h", -// "ATen/ops/lstsq.h", - "ATen/ops/lt.h", - "ATen/ops/lu_solve.h", - "ATen/ops/lu_unpack.h", - "ATen/ops/mH.h", - "ATen/ops/mT.h", - "ATen/ops/margin_ranking_loss.h", - "ATen/ops/masked_fill.h", - "ATen/ops/masked_scatter.h", - "ATen/ops/masked_select.h", - "ATen/ops/masked_select_backward.h", - "ATen/ops/matmul.h", - "ATen/ops/matmul_backward.h", - "ATen/ops/matrix_H.h", - "ATen/ops/matrix_exp.h", - "ATen/ops/matrix_exp_backward.h", - "ATen/ops/matrix_power.h", -// "ATen/ops/matrix_rank.h", - "ATen/ops/max.h", - "ATen/ops/max_pool1d.h", - "ATen/ops/max_pool1d_with_indices.h", - "ATen/ops/max_pool2d.h", - "ATen/ops/max_pool2d_backward.h", - "ATen/ops/max_pool2d_with_indices.h", - "ATen/ops/max_pool2d_with_indices_backward.h", - "ATen/ops/max_pool3d.h", - "ATen/ops/max_pool3d_with_indices.h", - "ATen/ops/max_pool3d_with_indices_backward.h", - "ATen/ops/max_unpool2d.h", -// "ATen/ops/max_unpool2d_backward.h", - "ATen/ops/max_unpool3d.h", -// "ATen/ops/max_unpool3d_backward.h", - "ATen/ops/maximum.h", - "ATen/ops/mean.h", - "ATen/ops/median.h", - "ATen/ops/meshgrid.h", - "ATen/ops/min.h", - "ATen/ops/minimum.h", - "ATen/ops/miopen_batch_norm.h", - "ATen/ops/miopen_batch_norm_backward.h", - "ATen/ops/miopen_convolution.h", - "ATen/ops/miopen_convolution_add_relu.h", - "ATen/ops/miopen_convolution_relu.h", - "ATen/ops/miopen_convolution_transpose.h", - "ATen/ops/miopen_depthwise_convolution.h", - "ATen/ops/miopen_rnn.h", - "ATen/ops/miopen_rnn_backward.h", - "ATen/ops/mish.h", - "ATen/ops/mish_backward.h", - "ATen/ops/mkldnn_adaptive_avg_pool2d.h", - "ATen/ops/mkldnn_adaptive_avg_pool2d_backward.h", - "ATen/ops/mkldnn_convolution.h", - "ATen/ops/mkldnn_linear.h", - "ATen/ops/mkldnn_linear_backward.h", - "ATen/ops/mkldnn_linear_backward_input.h", - "ATen/ops/mkldnn_linear_backward_weights.h", - "ATen/ops/mkldnn_max_pool2d.h", - "ATen/ops/mkldnn_max_pool2d_backward.h", - "ATen/ops/mkldnn_max_pool3d.h", - "ATen/ops/mkldnn_max_pool3d_backward.h", - "ATen/ops/mkldnn_reorder_conv2d_weight.h", - "ATen/ops/mkldnn_reorder_conv3d_weight.h", - "ATen/ops/mkldnn_rnn_layer.h", - "ATen/ops/mkldnn_rnn_layer_backward.h", - "ATen/ops/mm.h", - "ATen/ops/mode.h", - "ATen/ops/moveaxis.h", - "ATen/ops/movedim.h", - "ATen/ops/mps_convolution_backward.h", - "ATen/ops/mps_convolution_transpose_backward.h", -// "ATen/ops/mps_linear_backward.h", -// "ATen/ops/mps_max_pool2d_backward.h", - "ATen/ops/mse_loss.h", - "ATen/ops/mse_loss_backward.h", - "ATen/ops/msort.h", - "ATen/ops/mul.h", - "ATen/ops/multi_margin_loss.h", - "ATen/ops/multi_margin_loss_backward.h", - "ATen/ops/multilabel_margin_loss.h", - "ATen/ops/multilabel_margin_loss_backward.h", - "ATen/ops/multilabel_margin_loss_forward.h", - "ATen/ops/multinomial.h", - "ATen/ops/multiply.h", - "ATen/ops/mv.h", - "ATen/ops/mvlgamma.h", - "ATen/ops/nan_to_num.h", - "ATen/ops/nanmean.h", - "ATen/ops/nanmedian.h", - "ATen/ops/nanquantile.h", - "ATen/ops/nansum.h", - "ATen/ops/narrow.h", - "ATen/ops/narrow_copy.h", - "ATen/ops/native_batch_norm.h", - "ATen/ops/native_batch_norm_backward.h", - "ATen/ops/native_channel_shuffle.h", - "ATen/ops/native_dropout.h", - "ATen/ops/native_dropout_backward.h", - "ATen/ops/native_group_norm.h", - "ATen/ops/native_group_norm_backward.h", - "ATen/ops/native_layer_norm.h", - "ATen/ops/native_layer_norm_backward.h", - "ATen/ops/native_norm.h", - "ATen/ops/ne.h", - "ATen/ops/neg.h", - "ATen/ops/negative.h", -// "ATen/ops/nested_tensor.h", - "ATen/ops/nested_to_padded_tensor.h", - "ATen/ops/new_empty.h", - "ATen/ops/new_empty_strided.h", - "ATen/ops/new_full.h", - "ATen/ops/new_ones.h", - "ATen/ops/new_zeros.h", - "ATen/ops/nextafter.h", - "ATen/ops/nll_loss.h", - "ATen/ops/nll_loss2d.h", - "ATen/ops/nll_loss2d_backward.h", - "ATen/ops/nll_loss2d_forward.h", - "ATen/ops/nll_loss_backward.h", - "ATen/ops/nll_loss_forward.h", - "ATen/ops/nll_loss_nd.h", - "ATen/ops/nonzero.h", - "ATen/ops/nonzero_numpy.h", - "ATen/ops/norm.h", - "ATen/ops/norm_except_dim.h", - "ATen/ops/normal.h", - "ATen/ops/not_equal.h", - "ATen/ops/nuclear_norm.h", - "ATen/ops/numpy_T.h", - "ATen/ops/one_hot.h", - "ATen/ops/ones.h", - "ATen/ops/ones_like.h", - "ATen/ops/or.h", - "ATen/ops/orgqr.h", - "ATen/ops/ormqr.h", - "ATen/ops/outer.h", - "ATen/ops/output_nr.h", - "ATen/ops/pad.h", - "ATen/ops/pad_sequence.h", - "ATen/ops/pairwise_distance.h", - "ATen/ops/pdist.h", - "ATen/ops/permute.h", - "ATen/ops/permute_copy.h", - "ATen/ops/pin_memory.h", - "ATen/ops/pinverse.h", - "ATen/ops/pixel_shuffle.h", - "ATen/ops/pixel_unshuffle.h", - "ATen/ops/poisson.h", - "ATen/ops/poisson_nll_loss.h", - "ATen/ops/polar.h", - "ATen/ops/polygamma.h", - "ATen/ops/positive.h", - "ATen/ops/pow.h", - "ATen/ops/prelu.h", -// "ATen/ops/prelu_backward.h", - "ATen/ops/prod.h", - "ATen/ops/promote_types.h", - "ATen/ops/put.h", - "ATen/ops/q_per_channel_axis.h", - "ATen/ops/q_per_channel_scales.h", - "ATen/ops/q_per_channel_zero_points.h", - "ATen/ops/q_scale.h", - "ATen/ops/q_zero_point.h", - "ATen/ops/qr.h", - "ATen/ops/qscheme.h", - "ATen/ops/quantile.h", - "ATen/ops/quantize_per_channel.h", - "ATen/ops/quantize_per_tensor.h", - "ATen/ops/quantize_per_tensor_dynamic.h", - "ATen/ops/quantized_batch_norm.h", - "ATen/ops/quantized_gru_cell.h", - "ATen/ops/quantized_lstm_cell.h", - "ATen/ops/quantized_max_pool1d.h", - "ATen/ops/quantized_max_pool2d.h", - "ATen/ops/quantized_rnn_relu_cell.h", - "ATen/ops/quantized_rnn_tanh_cell.h", - "ATen/ops/rad2deg.h", - "ATen/ops/rand.h", - "ATen/ops/rand_like.h", - "ATen/ops/randint.h", - "ATen/ops/randint_like.h", - "ATen/ops/randn.h", - "ATen/ops/randn_like.h", - "ATen/ops/random.h", - "ATen/ops/randperm.h", - "ATen/ops/range.h", - "ATen/ops/ravel.h", - "ATen/ops/real.h", - "ATen/ops/reciprocal.h", - "ATen/ops/record_stream.h", - "ATen/ops/refine_names.h", - "ATen/ops/reflection_pad1d.h", - "ATen/ops/reflection_pad1d_backward.h", - "ATen/ops/reflection_pad2d.h", - "ATen/ops/reflection_pad2d_backward.h", - "ATen/ops/reflection_pad3d.h", - "ATen/ops/reflection_pad3d_backward.h", - "ATen/ops/relu.h", - "ATen/ops/relu6.h", - "ATen/ops/remainder.h", - "ATen/ops/rename.h", - "ATen/ops/renorm.h", - "ATen/ops/repeat.h", - "ATen/ops/repeat_interleave.h", - "ATen/ops/replication_pad1d.h", - "ATen/ops/replication_pad1d_backward.h", - "ATen/ops/replication_pad2d.h", - "ATen/ops/replication_pad2d_backward.h", - "ATen/ops/replication_pad3d.h", - "ATen/ops/replication_pad3d_backward.h", - "ATen/ops/requires_grad.h", - "ATen/ops/reshape.h", - "ATen/ops/reshape_as.h", - "ATen/ops/resize.h", - "ATen/ops/resize_as.h", - "ATen/ops/resize_as_sparse.h", - "ATen/ops/resolve_conj.h", - "ATen/ops/resolve_neg.h", - "ATen/ops/result_type.h", - "ATen/ops/retain_grad.h", - "ATen/ops/retains_grad.h", - "ATen/ops/rnn_relu.h", - "ATen/ops/rnn_relu_cell.h", - "ATen/ops/rnn_tanh.h", - "ATen/ops/rnn_tanh_cell.h", - "ATen/ops/roll.h", - "ATen/ops/rot90.h", - "ATen/ops/round.h", - "ATen/ops/row_indices.h", - "ATen/ops/row_indices_copy.h", - "ATen/ops/row_stack.h", - "ATen/ops/rrelu.h", - "ATen/ops/rrelu_with_noise.h", - "ATen/ops/rrelu_with_noise_backward.h", - "ATen/ops/rshift.h", - "ATen/ops/rsqrt.h", - "ATen/ops/rsub.h", - "ATen/ops/scalar_tensor.h", - "ATen/ops/scaled_dot_product_attention.h", - "ATen/ops/scatter.h", - "ATen/ops/scatter_add.h", - "ATen/ops/scatter_reduce.h", - "ATen/ops/searchsorted.h", - "ATen/ops/segment_reduce.h", - "ATen/ops/select.h", - "ATen/ops/select_backward.h", - "ATen/ops/select_copy.h", - "ATen/ops/select_scatter.h", - "ATen/ops/selu.h", - "ATen/ops/set.h", - "ATen/ops/set_data.h", - "ATen/ops/sgn.h", - "ATen/ops/sigmoid.h", - "ATen/ops/sigmoid_backward.h", - "ATen/ops/sign.h", - "ATen/ops/signbit.h", - "ATen/ops/silu.h", - "ATen/ops/silu_backward.h", - "ATen/ops/sin.h", - "ATen/ops/sinc.h", - "ATen/ops/sinh.h", - "ATen/ops/size.h", - "ATen/ops/slice.h", - "ATen/ops/slice_backward.h", - "ATen/ops/slice_copy.h", - "ATen/ops/slice_scatter.h", - "ATen/ops/slogdet.h", - "ATen/ops/slow_conv3d.h", - "ATen/ops/slow_conv3d_forward.h", - "ATen/ops/slow_conv_dilated2d.h", - "ATen/ops/slow_conv_dilated3d.h", - "ATen/ops/slow_conv_transpose2d.h", - "ATen/ops/slow_conv_transpose3d.h", - "ATen/ops/smm.h", - "ATen/ops/smooth_l1_loss.h", - "ATen/ops/smooth_l1_loss_backward.h", - "ATen/ops/soft_margin_loss.h", - "ATen/ops/soft_margin_loss_backward.h", - "ATen/ops/softmax.h", - "ATen/ops/softplus.h", - "ATen/ops/softplus_backward.h", - "ATen/ops/softshrink.h", - "ATen/ops/softshrink_backward.h", -// "ATen/ops/solve.h", - "ATen/ops/sort.h", - "ATen/ops/sparse_bsc_tensor.h", - "ATen/ops/sparse_bsr_tensor.h", - "ATen/ops/sparse_compressed_tensor.h", - "ATen/ops/sparse_coo_tensor.h", - "ATen/ops/sparse_csc_tensor.h", - "ATen/ops/sparse_csr_tensor.h", - "ATen/ops/sparse_dim.h", - "ATen/ops/sparse_mask.h", - "ATen/ops/sparse_resize.h", - "ATen/ops/sparse_resize_and_clear.h", - "ATen/ops/sparse_sampled_addmm.h", - "ATen/ops/special_airy_ai.h", - "ATen/ops/special_bessel_j0.h", - "ATen/ops/special_bessel_j1.h", - "ATen/ops/special_bessel_y0.h", - "ATen/ops/special_bessel_y1.h", - "ATen/ops/special_chebyshev_polynomial_t.h", - "ATen/ops/special_chebyshev_polynomial_u.h", - "ATen/ops/special_chebyshev_polynomial_v.h", - "ATen/ops/special_chebyshev_polynomial_w.h", - "ATen/ops/special_digamma.h", - "ATen/ops/special_entr.h", - "ATen/ops/special_erf.h", - "ATen/ops/special_erfc.h", - "ATen/ops/special_erfcx.h", - "ATen/ops/special_erfinv.h", - "ATen/ops/special_exp2.h", - "ATen/ops/special_expit.h", - "ATen/ops/special_expm1.h", - "ATen/ops/special_gammainc.h", - "ATen/ops/special_gammaincc.h", - "ATen/ops/special_gammaln.h", - "ATen/ops/special_hermite_polynomial_h.h", - "ATen/ops/special_hermite_polynomial_he.h", - "ATen/ops/special_i0.h", - "ATen/ops/special_i0e.h", - "ATen/ops/special_i1.h", - "ATen/ops/special_i1e.h", - "ATen/ops/special_laguerre_polynomial_l.h", - "ATen/ops/special_legendre_polynomial_p.h", - "ATen/ops/special_log1p.h", - "ATen/ops/special_log_ndtr.h", - "ATen/ops/special_log_softmax.h", - "ATen/ops/special_logit.h", - "ATen/ops/special_logsumexp.h", - "ATen/ops/special_modified_bessel_i0.h", - "ATen/ops/special_modified_bessel_i1.h", - "ATen/ops/special_modified_bessel_k0.h", - "ATen/ops/special_modified_bessel_k1.h", - "ATen/ops/special_multigammaln.h", - "ATen/ops/special_ndtr.h", - "ATen/ops/special_ndtri.h", - "ATen/ops/special_polygamma.h", - "ATen/ops/special_psi.h", - "ATen/ops/special_round.h", - "ATen/ops/special_scaled_modified_bessel_k0.h", - "ATen/ops/special_scaled_modified_bessel_k1.h", - "ATen/ops/special_shifted_chebyshev_polynomial_t.h", - "ATen/ops/special_shifted_chebyshev_polynomial_u.h", - "ATen/ops/special_shifted_chebyshev_polynomial_v.h", - "ATen/ops/special_shifted_chebyshev_polynomial_w.h", - "ATen/ops/special_sinc.h", - "ATen/ops/special_softmax.h", - "ATen/ops/special_spherical_bessel_j0.h", - "ATen/ops/special_xlog1py.h", - "ATen/ops/special_xlogy.h", - "ATen/ops/special_zeta.h", - "ATen/ops/split.h", - "ATen/ops/split_copy.h", - "ATen/ops/split_with_sizes.h", - "ATen/ops/split_with_sizes_copy.h", - "ATen/ops/sqrt.h", - "ATen/ops/square.h", - "ATen/ops/squeeze.h", - "ATen/ops/squeeze_copy.h", - "ATen/ops/sspaddmm.h", - "ATen/ops/stack.h", - "ATen/ops/std.h", - "ATen/ops/std_mean.h", - "ATen/ops/stft.h", - "ATen/ops/stride.h", - "ATen/ops/sub.h", - "ATen/ops/subtract.h", - "ATen/ops/sum.h", - "ATen/ops/sum_to_size.h", - "ATen/ops/svd.h", - "ATen/ops/swapaxes.h", - "ATen/ops/swapdims.h", -// "ATen/ops/symeig.h", - "ATen/ops/t.h", - "ATen/ops/t_copy.h", - "ATen/ops/take.h", - "ATen/ops/take_along_dim.h", - "ATen/ops/tan.h", - "ATen/ops/tanh.h", - "ATen/ops/tanh_backward.h", - "ATen/ops/tensor_split.h", - "ATen/ops/tensordot.h", - "ATen/ops/thnn_conv2d.h", - "ATen/ops/threshold.h", - "ATen/ops/threshold_backward.h", - "ATen/ops/tile.h", - "ATen/ops/to.h", - "ATen/ops/to_dense.h", - "ATen/ops/to_dense_backward.h", - "ATen/ops/to_mkldnn.h", - "ATen/ops/to_mkldnn_backward.h", - "ATen/ops/to_padded_tensor.h", - "ATen/ops/to_sparse.h", - "ATen/ops/to_sparse_bsc.h", - "ATen/ops/to_sparse_bsr.h", - "ATen/ops/to_sparse_csc.h", - "ATen/ops/to_sparse_csr.h", - "ATen/ops/topk.h", - "ATen/ops/trace.h", - "ATen/ops/trace_backward.h", - "ATen/ops/transpose.h", - "ATen/ops/transpose_copy.h", - "ATen/ops/trapezoid.h", - "ATen/ops/trapz.h", - "ATen/ops/triangular_solve.h", - "ATen/ops/tril.h", - "ATen/ops/tril_indices.h", - "ATen/ops/triplet_margin_loss.h", - "ATen/ops/triu.h", - "ATen/ops/triu_indices.h", - "ATen/ops/true_divide.h", - "ATen/ops/trunc.h", - "ATen/ops/type_as.h", - "ATen/ops/unbind.h", - "ATen/ops/unbind_copy.h", - "ATen/ops/unflatten.h", - "ATen/ops/unflatten_dense_tensors.h", - "ATen/ops/unfold.h", - "ATen/ops/unfold_backward.h", - "ATen/ops/unfold_copy.h", - "ATen/ops/uniform.h", - "ATen/ops/unique_consecutive.h", - "ATen/ops/unique_dim.h", - "ATen/ops/unique_dim_consecutive.h", - "ATen/ops/unsafe_chunk.h", - "ATen/ops/unsafe_split.h", - "ATen/ops/unsafe_split_with_sizes.h", - "ATen/ops/unsqueeze.h", - "ATen/ops/unsqueeze_copy.h", - "ATen/ops/upsample_bicubic2d.h", - "ATen/ops/upsample_bicubic2d_backward.h", - "ATen/ops/upsample_bilinear2d.h", - "ATen/ops/upsample_bilinear2d_backward.h", - "ATen/ops/upsample_linear1d.h", - "ATen/ops/upsample_linear1d_backward.h", - "ATen/ops/upsample_nearest1d.h", - "ATen/ops/upsample_nearest1d_backward.h", - "ATen/ops/upsample_nearest2d.h", - "ATen/ops/upsample_nearest2d_backward.h", - "ATen/ops/upsample_nearest3d.h", - "ATen/ops/upsample_nearest3d_backward.h", - "ATen/ops/upsample_trilinear3d.h", - "ATen/ops/upsample_trilinear3d_backward.h", - "ATen/ops/value_selecting_reduction_backward.h", - "ATen/ops/values.h", - "ATen/ops/values_copy.h", - "ATen/ops/vander.h", - "ATen/ops/var.h", - "ATen/ops/var_mean.h", - "ATen/ops/vdot.h", - "ATen/ops/view.h", - "ATen/ops/view_as.h", - "ATen/ops/view_as_complex.h", - "ATen/ops/view_as_complex_copy.h", - "ATen/ops/view_as_real.h", - "ATen/ops/view_as_real_copy.h", - "ATen/ops/view_copy.h", - "ATen/ops/vsplit.h", - "ATen/ops/vstack.h", - "ATen/ops/where.h", - "ATen/ops/xlogy.h", - "ATen/ops/xor.h", - "ATen/ops/zero.h", - "ATen/ops/zeros.h", - "ATen/ops/zeros_like.h", - "ATen/ops/values.h", - "ATen/ops/vander.h", - "ATen/ops/var.h", - "ATen/ops/var_mean.h", - "ATen/ops/vdot.h", - "ATen/ops/view.h", - "ATen/ops/view_as.h", - "ATen/ops/view_as_complex.h", - "ATen/ops/view_as_real.h", - "ATen/ops/vsplit.h", - "ATen/ops/vstack.h", - "ATen/ops/where.h", - "ATen/ops/xlogy.h", - "ATen/ops/xor.h", - "ATen/ops/zero.h", - "ATen/ops/zeros.h", - "ATen/ops/zeros_like.h", - - "torch/autograd.h", -// "torch/library.h", -// "torch/custom_class.h", - "torch/script.h", - "torch/csrc/Export.h", - "torch/csrc/onnx/onnx.h", -// "torch/csrc/WindowsTorchApiMacro.h", - "torch/csrc/api/include/torch/imethod.h", - "torch/csrc/api/include/torch/types.h", - "torch/csrc/api/include/torch/cuda.h", - "torch/csrc/api/include/torch/ordered_dict.h", -// "torch/csrc/api/include/torch/detail/TensorDataContainer.h", -// "torch/csrc/utils/disallow_copy.h", - "torch/csrc/utils/memory.h", - "torch/csrc/utils/python_stub.h", -// "torch/csrc/utils/object_ptr.h", - "torch/csrc/utils/schema_info.h", - "torch/csrc/utils/variadic.h", - "torch/csrc/autograd/utils/warnings.h", - "torch/csrc/autograd/anomaly_mode.h", - "torch/csrc/autograd/edge.h", - "torch/csrc/autograd/grad_mode.h", - "torch/csrc/autograd/InferenceMode.h", -// "torch/csrc/autograd/input_buffer.h", - "torch/csrc/autograd/input_metadata.h", - "torch/csrc/autograd/function_hook.h", -// "torch/csrc/autograd/graph_task.h", -// "torch/csrc/autograd/cpp_hook.h", - "torch/csrc/autograd/profiler.h", - "torch/csrc/autograd/saved_variable_hooks.h", - "torch/csrc/autograd/saved_variable.h", - "torch/csrc/autograd/forward_grad.h", - "torch/csrc/autograd/variable.h", - "torch/csrc/autograd/function.h", - "torch/csrc/autograd/custom_function.h", - "torch/csrc/autograd/autograd.h", -// "torch/csrc/autograd/generated/Functions.h", - "torch/csrc/autograd/generated/VariableType.h", - "torch/csrc/autograd/generated/variable_factories.h", - "torch/csrc/jit/frontend/function_schema_parser.h", - "torch/csrc/jit/frontend/name_mangler.h", - "torch/csrc/jit/frontend/parser_constants.h", - "torch/csrc/jit/frontend/source_range.h", - "torch/csrc/jit/frontend/sugared_value.h", - "torch/csrc/jit/frontend/resolver.h", - "torch/csrc/jit/frontend/tracer.h", - "torch/csrc/jit/frontend/lexer.h", - "torch/csrc/jit/frontend/strtod.h", - "torch/csrc/jit/frontend/tree.h", - "torch/csrc/jit/frontend/error_report.h", - "torch/csrc/jit/frontend/tree_views.h", - "torch/csrc/jit/ir/attributes.h", - "torch/csrc/jit/ir/constants.h", - "torch/csrc/jit/ir/graph_node_list.h", - "torch/csrc/jit/ir/named_value.h", - "torch/csrc/jit/ir/scope.h", - "torch/csrc/jit/ir/ir.h", - "torch/csrc/jit/ir/type_hashing.h", - "torch/csrc/jit/passes/shape_analysis.h", - "torch/csrc/jit/python/update_graph_executor_opt.h", - "torch/csrc/jit/runtime/argument_spec.h", - "torch/csrc/jit/runtime/instruction.h", - "torch/csrc/jit/runtime/interpreter.h", -// "torch/csrc/jit/runtime/variable_tensor_list.h", - "torch/csrc/jit/runtime/graph_executor.h", - "torch/csrc/jit/runtime/operator_options.h", - "torch/csrc/jit/runtime/operator.h", - "torch/csrc/jit/runtime/custom_operator.h", - "torch/csrc/jit/api/compilation_unit.h", - "torch/csrc/jit/api/function_impl.h", - "torch/csrc/jit/api/method.h", - "torch/csrc/jit/api/object.h", - "torch/csrc/jit/api/module.h", - "torch/csrc/jit/serialization/source_range_serialization.h", - "torch/csrc/jit/serialization/pickler.h", - "torch/csrc/jit/serialization/unpickler.h", - "torch/csrc/jit/serialization/import.h", - "torch/csrc/jit/serialization/pickle.h", - "torch/csrc/jit/serialization/python_print.h", - "torch/csrc/jit/serialization/type_name_uniquer.h", + "torch/torch.h", + "ATen/native/TensorShape.h", "torch/csrc/jit/serialization/storage_context.h", - "torch/csrc/jit/serialization/export.h", - - "torch/arg.h", - "torch/enum.h", - "torch/types.h", - "torch/utils.h", - - "torch/data.h", - "torch/data/example.h", - "torch/data/iterator.h", - "torch/data/worker_exception.h", - "torch/data/dataloader.h", - "torch/data/dataloader/base.h", - "torch/data/dataloader_options.h", - "torch/data/dataloader/stateful.h", - "torch/data/dataloader/stateless.h", - "torch/data/datasets.h", - "torch/data/datasets/base.h", - "torch/data/datasets/chunk.h", - "torch/data/datasets/map.h", - "torch/data/datasets/mnist.h", - "torch/data/datasets/shared.h", - "torch/data/datasets/stateful.h", - "torch/data/datasets/tensor.h", - "torch/data/samplers.h", - "torch/data/samplers/base.h", - "torch/data/samplers/custom_batch_request.h", - "torch/data/samplers/distributed.h", - "torch/data/samplers/random.h", - "torch/data/samplers/sequential.h", - "torch/data/samplers/serialize.h", - "torch/data/samplers/stream.h", - "torch/data/transforms.h", - "torch/data/transforms/base.h", - "torch/data/transforms/collate.h", - "torch/data/transforms/lambda.h", - "torch/data/transforms/stack.h", - "torch/data/transforms/tensor.h", - - "torch/serialize.h", - "torch/serialize/archive.h", - "torch/serialize/input-archive.h", - "torch/serialize/output-archive.h", - "torch/serialize/tensor.h", - - "torch/nn.h", - "torch/nn/cloneable.h", - "torch/nn/init.h", - "torch/nn/pimpl.h", - "torch/nn/utils.h", - "torch/nn/utils/clip_grad.h", - "torch/nn/utils/convert_parameters.h", - "torch/nn/utils/rnn.h", - - "torch/nn/options.h", - "torch/nn/options/activation.h", - "torch/nn/options/adaptive.h", - "torch/nn/options/batchnorm.h", - "torch/nn/options/conv.h", - "torch/nn/options/distance.h", - "torch/nn/options/dropout.h", - "torch/nn/options/embedding.h", - "torch/nn/options/fold.h", - "torch/nn/options/linear.h", - "torch/nn/options/loss.h", - "torch/nn/options/normalization.h", - "torch/nn/options/padding.h", - "torch/nn/options/pixelshuffle.h", - "torch/nn/options/pooling.h", - "torch/nn/options/rnn.h", - "torch/nn/options/upsampling.h", - "torch/nn/options/vision.h", - "torch/nn/options/instancenorm.h", - "torch/nn/options/transformerlayer.h", - "torch/nn/options/transformercoder.h", - "torch/nn/options/transformer.h", - - "torch/nn/functional.h", - "torch/nn/functional/activation.h", - "torch/nn/functional/batchnorm.h", - "torch/nn/functional/conv.h", - "torch/nn/functional/distance.h", - "torch/nn/functional/dropout.h", - "torch/nn/functional/embedding.h", - "torch/nn/functional/fold.h", - "torch/nn/functional/linear.h", - "torch/nn/functional/loss.h", - "torch/nn/functional/normalization.h", - "torch/nn/functional/padding.h", - "torch/nn/functional/pixelshuffle.h", - "torch/nn/functional/pooling.h", - "torch/nn/functional/upsampling.h", - "torch/nn/functional/vision.h", - "torch/nn/functional/instancenorm.h", - - "torch/nn/module.h", - "torch/nn/modules.h", - "torch/nn/modules/common.h", - - "torch/nn/modules/container/any.h", -// "torch/nn/modules/container/functional.h", - "torch/nn/modules/container/moduledict.h", - "torch/nn/modules/container/modulelist.h", - "torch/nn/modules/container/named_any.h", - "torch/nn/modules/container/sequential.h", - "torch/nn/modules/container/parameterdict.h", - "torch/nn/modules/container/parameterlist.h", - - "torch/nn/modules/adaptive.h", - "torch/nn/modules/batchnorm.h", - "torch/nn/modules/instancenorm.h", - "torch/nn/modules/conv.h", - "torch/nn/modules/dropout.h", - "torch/nn/modules/distance.h", - "torch/nn/modules/embedding.h", - "torch/nn/modules/fold.h", - "torch/nn/modules/linear.h", - "torch/nn/modules/loss.h", - "torch/nn/modules/padding.h", - "torch/nn/modules/pooling.h", - "torch/nn/modules/rnn.h", - "torch/nn/modules/pixelshuffle.h", - "torch/nn/modules/upsampling.h", - "torch/nn/modules/activation.h", - "torch/nn/modules/normalization.h", - "torch/nn/modules/transformerlayer.h", - "torch/nn/modules/transformercoder.h", - "torch/nn/modules/transformer.h", - - "torch/optim.h", - "torch/optim/optimizer.h", - "torch/optim/serialize.h", - "torch/optim/adagrad.h", - "torch/optim/adam.h", - "torch/optim/adamw.h", - "torch/optim/lbfgs.h", - "torch/optim/rmsprop.h", - "torch/optim/sgd.h", - "torch/optim/schedulers/lr_scheduler.h", - "torch/optim/schedulers/step_lr.h", - }, - exclude = { - "ATen/core/UnsafeFromTH.h", - "torch/csrc/jit/api/method.h", + "torch/csrc/jit/serialization/import.h", + + // For inclusion in JNI only, not parsed (compiler needs some complete definitions) + "torch/csrc/jit/runtime/instruction.h", + "torch/csrc/jit/serialization/source_range_serialization.h" }, link = {"c10", "torch_cpu", "torch"}, preload = {"gomp@.1", "iomp5", "omp", "tbb@.2", "asmjit", "fbgemm"} ), @Platform( value = {"linux", "macosx", "windows"}, - link = {"c10", "c10_cuda", "nvfuser_codegen", "torch_cpu", "torch_cuda", "torch"}, - preload = {"gomp@.1", "iomp5", "omp", "tbb@.2", "asmjit", "fbgemm", "cupti@.12"}, + link = { "c10", "c10_cuda", "torch_cpu", "torch_cuda", "torch" }, + // If nvfuser_codegen is linked and not preloaded, and javacpp cache is empty, we get: + // Loading nvfuser library failed with: Error in dlopen: libtorch.so: Cannot open... (function LoadingNvfuserLibrary) + // The warning disappears once the cache is filled. Probably some obscure race condition. + preload = {"gomp@.1", "iomp5", "omp", "tbb@.2", "asmjit", "fbgemm", "cupti@.12", "nvfuser_codegen"}, + includepath = {"/usr/local/cuda/include", "C:/Program Files/NVIDIA GPU Computing Toolkit/CUDA/v12.1/include/"}, preloadpath = { "/usr/local/cuda-12.1/lib64/", "/usr/local/cuda-12.1/extras/CUPTI/lib64/", @@ -1783,6 +89,7 @@ "C:/Program Files/NVIDIA GPU Computing Toolkit/CUDA/v12.1/extras/CUPTI/lib64/", "C:/Program Files/NVIDIA Corporation/NvToolsExt/bin/x64/", }, + extension = "-gpu" ), }, @@ -1790,14 +97,44 @@ global = "org.bytedeco.pytorch.global.torch" ) public class torch implements LoadEnabled, InfoMapper { - static { Loader.checkVersion("org.bytedeco", "pytorch"); } + static { + Loader.checkVersion("org.bytedeco", "pytorch"); + } + + static void initIncludes(Class thisClass, ClassProperties properties) { + // If we are called from Parser, fetch the list of headers to parse from resources. + // This check for stack depth 5 also excludes the code path where, because of property inheritance, + // we are called from torch class while processing torch_cuda. Parser stack depth is 6 in that code path. + if (Loader.getCallerClass(5).getName().equals("org.bytedeco.javacpp.tools.Parser")) { + properties.put("platform.include", new ArrayList()); + Class presets = properties.getEffectiveClasses().get(0); + InputStream includesStream = thisClass.getResourceAsStream(presets.getSimpleName() + "_include.h"); + if (includesStream == null) { + throw new RuntimeException("Cannot find parse list for " + presets); + } + Pattern re = Pattern.compile("^#include\\s+[\"<]([^\">]+)[\">]"); + try (BufferedReader br = new BufferedReader(new InputStreamReader(includesStream))) { + String line; + while ((line = br.readLine()) != null) { + Matcher m = re.matcher(line); + if (m.find()) + properties.addAll("platform.include", m.group(1)); + } + } catch (IOException e) { + throw new RuntimeException(e); + } + } + } - @Override public void init(ClassProperties properties) { + @Override + public void init(ClassProperties properties) { String platform = properties.getProperty("platform"); String extension = properties.getProperty("platform.extension"); List preloads = properties.get("platform.preload"); List resources = properties.get("platform.preloadresource"); + initIncludes(getClass(), properties); + // Only apply this at load time since we don't want to copy the CUDA libraries here if (!Loader.isLoadLibraries() || extension == null || !extension.endsWith("-gpu")) { return; @@ -1807,32 +144,28 @@ public class torch implements LoadEnabled, InfoMapper { preloads.add(i++, "zlibwapi"); } String[] libs = {"cudart", "cublasLt", "cublas", "cufft", "curand", "cusolver", "nvJitLink", "cusparse", "cudnn", "nccl", "nvrtc", "myelin", "nvinfer", - "cudnn_ops_infer", "cudnn_ops_train", "cudnn_adv_infer", "cudnn_adv_train", "cudnn_cnn_infer", "cudnn_cnn_train"}; + "cudnn_ops_infer", "cudnn_ops_train", "cudnn_adv_infer", "cudnn_adv_train", "cudnn_cnn_infer", "cudnn_cnn_train"}; for (String lib : libs) { if (platform.startsWith("linux")) { lib += lib.startsWith("cudnn") ? "@.8" - : lib.equals("nccl") ? "@.2" - : lib.equals("myelin") ? "@.1" - : lib.equals("nvinfer") ? "@.8" - : lib.equals("cufft") ? "@.11" - : lib.equals("curand") ? "@.10" - : lib.equals("cusolver") ? "@.11" - : lib.equals("cudart") ? "@.12" - : lib.equals("nvrtc") ? "@.12" - : lib.equals("nvJitLink") ? "@.12" - : "@.12"; + : lib.equals("nccl") ? "@.2" + : lib.equals("myelin") ? "@.1" + : lib.equals("nvinfer") ? "@.8" + : lib.equals("cufft") ? "@.11" + : lib.equals("curand") ? "@.10" + : lib.equals("cusolver") ? "@.11" + : "@.12"; } else if (platform.startsWith("windows")) { lib += lib.startsWith("cudnn") ? "64_8" - : lib.equals("nccl") ? "64_2" - : lib.equals("myelin") ? "64_1" - : lib.equals("nvinfer") ? "64_8" - : lib.equals("cufft") ? "64_11" - : lib.equals("curand") ? "64_10" - : lib.equals("cusolver") ? "64_11" - : lib.equals("cudart") ? "64_12" - : lib.equals("nvrtc") ? "64_120_0" - : lib.equals("nvJitLink") ? "64_120_0" - : "64_12"; + : lib.equals("nccl") ? "64_2" + : lib.equals("myelin") ? "64_1" + : lib.equals("nvinfer") ? "64_8" + : lib.equals("cufft") ? "64_11" + : lib.equals("curand") ? "64_10" + : lib.equals("cusolver") ? "64_11" + : lib.equals("nvrtc") ? "64_120_0" + : lib.equals("nvJitLink") ? "64_120_0" + : "64_12"; } else { continue; // no CUDA } @@ -1849,773 +182,1053 @@ public class torch implements LoadEnabled, InfoMapper { } public void mapModule(InfoMap infoMap, String name) { - mapModule(infoMap, name, false); + mapModule(infoMap, name, null, null, true); + } + + public void mapModule(InfoMap infoMap, String name, boolean anyModuleCompatible) { + mapModule(infoMap, name, null, null, anyModuleCompatible); } + public void mapModule(InfoMap infoMap, String name, String base) { - mapModule(infoMap, name, base, false); + mapModule(infoMap, name, base, null, true); } + public void mapModule(InfoMap infoMap, String name, String base, String baseBase) { - mapModule(infoMap, name, base, baseBase, false); - } - public void mapModule(InfoMap infoMap, String name, boolean hasDefaultConstructor) { - mapModule(infoMap, name, null, hasDefaultConstructor); + mapModule(infoMap, name, base, baseBase, true); } - public void mapModule(InfoMap infoMap, String name, String base, boolean hasDefaultConstructor) { - mapModule(infoMap, name, base, null, hasDefaultConstructor); - } - public void mapModule(InfoMap infoMap, String name, String base, String baseBase, boolean hasDefaultConstructor) { + + String anyModuleConstructors = ""; + + public void mapModule(InfoMap infoMap, String name, String base, String baseBase, boolean anyModuleCompatible) { if (baseBase != null) { infoMap.put(new Info(baseBase).pointerTypes(name + "ImplBaseBase")); } if (base != null) { - int template = base.indexOf('<'); - int namespace = base.lastIndexOf("::", template); - infoMap.put(new Info(base + base.substring(namespace, template)).annotations("@NoDeallocator")) - .put(new Info(base, base.replace("torch::nn::" + name + "Impl", name + "Impl")).purify(baseBase != null).pointerTypes(name + "ImplBase")); + infoMap.put(new Info(base).pointerTypes(name + "ImplBase")); } - infoMap.put(new Info("torch::nn::" + name + "Impl::" + name + "Impl").annotations("@NoDeallocator")) - .put(new Info("std::shared_ptr").annotations("@SharedPtr") - .valueTypes("@Cast({\"\", \"std::shared_ptr\"}) " + name + "Impl").pointerTypes(name + "Impl")) - .put(new Info("torch::nn::Cloneable", - "torch::nn::Cloneable<" + name + "Impl>").pointerTypes(name + "ImplCloneable")) - .put(new Info("torch::nn::Cloneable::reset").javaText("public native void reset();\n" - + "@Override public Module asModule() { return asModule(this); }\n" - + "@Namespace public static native @Name(\"static_cast\") Module asModule(" + name + "ImplCloneable module);\n")) - .put(new Info("torch::nn::ModuleHolder").pointerTypes(name + "ImplModuleHolder")) - .put(new Info("torch::nn::Module::register_module").javaNames("register_module")); - - if (!hasDefaultConstructor) { - infoMap.put(new Info("torch::nn::ModuleHolder()").skip()); + infoMap.put(new Info("torch::nn::" + name + "Impl")) // Ensure qualified name is in Info when Cloneable inheritance is parsed (and before class XImpl is finished parsing) + .put(new Info("torch::nn::" + name + "Impl::" + name + "Impl").annotations("@SharedPtr")) + .put(new Info("torch::nn::Cloneable").pointerTypes(name + "ImplCloneable").purify()) + .put(new Info("torch::nn::ModuleHolder").skip()) + .put(new Info("torch::nn::" + name).skip()); + + if (anyModuleCompatible) { + anyModuleConstructors += + "public AnyModule(" + name + "Impl module) { super((Pointer)null); allocate(module); }\n" + + // We need a @Cast because AnyModule constructor is explicit + "@SharedPtr private native void allocate(@SharedPtr @Cast({\"\", \"std::shared_ptr\"}) " + name + "Impl module);\n"; + infoMap.put(new Info("torch::nn::SequentialImpl::push_back").javaNames("push_back")); } } + public static void sharedMap(InfoMap infoMap) { + infoMap + .put(new Info().enumerate().friendly()) + .put(new Info("auto", "c10::reverse_iterator", "ska::flat_hash_map", /*"std::atomic", */"std::conditional", "std::iterator_traits", + "std::initializer_list", "std::integral_constant", "std::mutex", "std::reverse_iterator", "std::weak_ptr").skip()) + ; + + //// Macros + infoMap + .put(new Info("TORCH_API", "C10_API", "C10_EXPORT", "C10_HIDDEN", "C10_IMPORT", "C10_API_ENUM", "EXPORT_IF_NOT_GCC", + "TORCH_CUDA_CU_API", "TORCH_CUDA_CPP_API", "TORCH_HIP_API", "TORCH_PYTHON_API", + "__ubsan_ignore_float_divide_by_zero__", "__ubsan_ignore_undefined__", "__ubsan_ignore_signed_int_overflow__", "__ubsan_ignore_function__", + "C10_CLANG_DIAGNOSTIC_IGNORE", "C10_CLANG_DIAGNOSTIC_PUSH", "C10_CLANG_DIAGNOSTIC_POP", "C10_ATTR_VISIBILITY_HIDDEN", "C10_ERASE", + "C10_UID", "C10_NODISCARD", "C10_UNUSED", "C10_USED", "C10_RESTRICT", "C10_NOINLINE", "C10_ALWAYS_INLINE", "C10_FALLTHROUGH", + "C10_HOST_DEVICE", "C10_DEVICE", "C10_HOST", "C10_LAUNCH_BOUNDS_0", "C10_HIP_HOST_DEVICE", "C10_WARP_SIZE", "C10_IOS", "C10_MOBILE", + "C10_HOST_CONSTEXPR", "CONSTEXPR_EXCEPT_WIN_CUDA", "C10_HOST_CONSTEXPR_EXCEPT_WIN_CUDA", "C10_ALWAYS_INLINE_UNLESS_MOBILE", + "alignas", "COMPLEX_INTEGER_OP_TEMPLATE_CONDITION", "C10_DEVICE_HOST_FUNCTION", "FORCE_INLINE_APPLE", + "ERROR_UNSUPPORTED_CAST", "LEGACY_CONTIGUOUS_MEMORY_FORMAT", "GFLAGS_DLL_DEFINE_FLAG", "GFLAGS_DLL_DECLARE_FLAG", + "AT_X", "DEFINE_KEY", "C10_DISPATCHER_INLINE_UNLESS_MOBILE", "TH_DISALLOW_COPY_AND_ASSIGN", "__device__", + "TORCH_DSA_KERNEL_ARGS", "TORCH_DSA_KERNEL_ARGS_PASS", + "C10_CUDA_API", "C10_CUDA_IMPORT", "C10_CUDA_EXPORT").cppTypes().annotations()) + + .put(new Info("defined(__CUDACC__) || defined(__HIPCC__)", + "defined(__CUDACC__) && !defined(USE_ROCM)", + "defined(SYCL_EXT_ONEAPI_BFLOAT16_MATH_FUNCTIONS)", + "defined(_MSC_VER) && _MSC_VER <= 1900", + "defined(NDEBUG)", + "defined(__ANDROID__)", + "defined(__APPLE__)", + "defined(__HIP_PLATFORM_HCC__)", + "defined(_MSC_VER)", "_WIN32", + "defined(USE_ROCM)", "USE_ROCM", "SYCL_LANGUAGE_VERSION", + "defined(CUDA_VERSION) && CUDA_VERSION >= 11000", + "defined ENABLE_RECORD_KERNEL_FUNCTION_DTYPE").define(false)) + + .put(new Info("C10_DEFINE_DEPRECATED_USING").cppText("#define C10_DEFINE_DEPRECATED_USING(TypeName, TypeThingy)").cppTypes()) + .put(new Info("C10_DEPRECATED_MESSAGE").cppText("#define C10_DEPRECATED_MESSAGE() deprecated").cppTypes()) + .put(new Info("C10_DEPRECATED").cppText("#define C10_DEPRECATED deprecated").cppTypes()) + .put(new Info("deprecated").annotations("@Deprecated")) + + .put(new Info("CAFFE2_LOG_THRESHOLD").translate(false)) + + .put(new Info("TORCH_CHECK").cppText("#define TORCH_CHECK(cond, ...)").define()) + .put(new Info("DEFINE_SYMBOL").cppText("#define DEFINE_SYMBOL(ns, s) namespace ns { constexpr Symbol s; }").define()) + .put(new Info("TORCH_ENUM_DECLARE").cppText("#define TORCH_ENUM_DECLARE(name) namespace torch { namespace enumtype { struct k##name { k##name() {} }; } }").define()) + ; + } + public void map(InfoMap infoMap) { - infoMap.putFirst(new Info("openblas_config.h", "cblas.h", "lapacke_config.h", "lapacke_mangling.h", "lapack.h", "lapacke.h", "lapacke_utils.h").skip()) - .put(new Info("ordered_dict.h").linePatterns(".*class Item;.*").skip()) - .put(new Info().enumerate()) - .put(new Info().javaText("import org.bytedeco.pytorch.Allocator;")) - .put(new Info().javaText("import org.bytedeco.pytorch.Function;")) - .put(new Info().javaText("import org.bytedeco.pytorch.Module;")) - - .put(new Info("basic/containers").cppTypes("c10::optional", "torch::optional", "c10::variant")) - .put(new Info("std::nullptr_t").cast().pointerTypes("PointerPointer")) - .put(new Info("auto", "c10::reverse_iterator", "ska::flat_hash_map", "std::atomic", "std::bitset", "std::conditional", "std::iterator_traits", - "std::initializer_list", "std::integral_constant", "std::mutex", "std::reverse_iterator", "std::weak_ptr").skip()) - .put(new Info("at::CheckedFrom").cast().valueTypes("BytePointer", "String").pointerTypes("PointerPointer")) - .put(new Info("c10::IValue", "at::IValue", "decltype(auto)").pointerTypes("IValue")) - .put(new Info("c10::ScalarType", "at::ScalarType", "torch::Dtype").enumerate().valueTypes("ScalarType").pointerTypes("@Cast(\"c10::ScalarType*\") BytePointer")) - .put(new Info("torch::jit::AttributeKind").enumerate().valueTypes("JitAttributeKind")) - .put(new Info("torch::jit::PickleOpCode").enumerate().translate(false).valueTypes("PickleOpCode")) - .put(new Info("std::size_t", "c10::Dict::size_type", - "c10::Dict::size_type").cast().valueTypes("long").pointerTypes("SizeTPointer")) - .put(new Info("std::tuple", "std::tuple", - "torch::ExpandingArray<1>", "torch::ExpandingArray<2>", "torch::ExpandingArray<3>", "torch::ExpandingArray<4>", - "torch::ExpandingArray", "torch::ExpandingArray<1*2>", "torch::ExpandingArray<2*2>", "torch::ExpandingArray<3*2>").cast().pointerTypes("LongPointer")) - .put(new Info("torch::ExpandingArray<1,double>", "torch::ExpandingArray<2,double>", "torch::ExpandingArray<3,double>").cast().pointerTypes("DoublePointer")) - .put(new Info("torch::ExpandingArrayWithOptionalElem<2>", "torch::ExpandingArrayWithOptionalElem<3>").cast().pointerTypes("LongOptional")) - .put(new Info("std::array", "std::array", "std::array").cast().pointerTypes("BoolPointer")) - .put(new Info("std::pair").pointerTypes("EnumNameValue").define()) - .put(new Info("c10::ClassType::Property").pointerTypes("ClassType.Property")) - .put(new Info("c10::optional").pointerTypes("BoolOptional").define()) - .put(new Info("c10::optional").pointerTypes("ByteOptional").define()) - .put(new Info("c10::optional", "c10::optional").pointerTypes("IntOptional").define()) - .put(new Info("c10::optional").pointerTypes("LongOptional").define()) - .put(new Info("c10::optional").pointerTypes("DoubleOptional").define()) - .put(new Info("c10::optional").pointerTypes("SizeTOptional").define()) - .put(new Info("c10::optional").pointerTypes("StringOptional").define()) - .put(new Info("c10::optional >").pointerTypes("BoolVectorOptional").define()) - .put(new Info("c10::optional >").pointerTypes("LongVectorOptional").define()) - .put(new Info("c10::optional >").pointerTypes("DoubleVectorOptional").define()) - .put(new Info("c10::optional >").pointerTypes("SizeTVectorOptional").define()) - .put(new Info("c10::optional >").pointerTypes("StringVectorOptional").define()) - .put(new Info("c10::optional >").pointerTypes("StrideVectorOptional").define()) - .put(new Info("c10::optional >").pointerTypes("ShapeSymbolVectorOptional").define()) - .put(new Info("c10::optional >").pointerTypes("TensorVectorOptional").define()) - .put(new Info("c10::optional", "c10::optional", "c10::optional").pointerTypes("DeviceOptional").define()) - .put(new Info("c10::optional >", "c10::optional", "c10::optional", - "c10::OptionalArrayRef", "c10::OptionalIntArrayRef", "at::OptionalIntArrayRef") - .pointerTypes("LongArrayRefOptional", "@Cast({\"int64_t*\", \"c10::ArrayRef\", \"std::vector&\"}) @StdVector long...").define()) - .put(new Info("c10::optional >", "c10::optional >", - "c10::OptionalArrayRef").pointerTypes("DoubleArrayRefOptional").define()) - .put(new Info("c10::optional >", "c10::optional >", - "c10::OptionalArrayRef", "c10::OptionalSymIntArrayRef", "at::OptionalSymIntArrayRef").pointerTypes("SymIntArrayRefOptional").define()) - .put(new Info("c10::optional", "c10::optional").pointerTypes("LayoutOptional").define()) - .put(new Info("c10::optional", "c10::optional").pointerTypes("MemoryFormatOptional").define()) - .put(new Info("c10::optional", "c10::optional").pointerTypes("ScalarOptional").define()) - .put(new Info("c10::optional", "c10::optional", "c10::optional").pointerTypes("ScalarTypeOptional").define()) - .put(new Info("c10::optional").pointerTypes("AliasInfoOptional").define()) - .put(new Info("c10::optional").pointerTypes("IValueOptional").define()) - .put(new Info("c10::optional").pointerTypes("CppSignatureOptional").define()) - .put(new Info("c10::optional").pointerTypes("DispatchKeyOptional").define()) - .put(new Info("c10::optional").pointerTypes("OperatorHandleOptional").define()) - .put(new Info("c10::optional").pointerTypes("OperatorNameOptional").define()) - .put(new Info("c10::optional").pointerTypes("QualifiedNameOptional").define()) - .put(new Info("c10::optional").pointerTypes("StreamOptional").define()) - .put(new Info("c10::optional").pointerTypes("StrideOptional").define()) - .put(new Info("c10::optional").pointerTypes("TypePtrOptional").define()) - .put(new Info("c10::optional").pointerTypes("ClassTypePropertyOptional").define()) - .put(new Info("c10::optional").pointerTypes("AliasTypeSetOptional").define()) - .put(new Info("c10::optional").pointerTypes("FunctionSchemaOptional").define()) - .put(new Info("c10::optional", "c10::optional").pointerTypes("SymDimVectorOptional").define()) - .put(new Info("c10::optional").pointerTypes("SymIntOptional").define()) - .put(new Info("c10::optional").pointerTypes("SymIntArrayRefOptional").define()) - .put(new Info("c10::optional").pointerTypes("IValueOptional").define()) - .put(new Info("c10::optional").pointerTypes("DimVectorOptional").define()) - .put(new Info("c10::optional").pointerTypes("DimnameOptional").define()) - .put(new Info("c10::optional").pointerTypes("DimnameListOptional").define()) - .put(new Info("c10::optional").pointerTypes("GeneratorOptional").define()) - .put(new Info("c10::optional", "c10::optional", "c10::optional").pointerTypes("TensorOptional").define()) - .put(new Info("c10::optional").pointerTypes("TensorListOptional").define()) - .put(new Info("c10::optional").pointerTypes("ThreadLocalStateOptional").define()) - .put(new Info("c10::optional").pointerTypes("TypeMetaOptional").define()) - .put(new Info("c10::optional").pointerTypes("ExecutorExecutionModeOptional").define()) - .put(new Info("c10::optional", - "c10::optional").cast().pointerTypes("InlinedCallStackOptional").define()) - .put(new Info("c10::optional", - "c10::optional").cast().pointerTypes("ScopeOptional").define()) - .put(new Info("c10::optional").pointerTypes("ModuleInstanceInfoOptional").define()) - .put(new Info("c10::optional").pointerTypes("SourceRangeOptional").define()) - .put(new Info("c10::optional").pointerTypes("MethodOptional").define()) - .put(new Info("c10::optional").pointerTypes("OperatorOptional").define()) - .put(new Info("c10::optional", "c10::optional").pointerTypes("NamedValueOptional").define()) - .put(new Info("c10::optional").pointerTypes("ValueOptional").define()) - .put(new Info("c10::optional >", - "c10::optional >", - "c10::optional >").cast().pointerTypes("LongExpandingArrayOptional").define()) - .put(new Info("c10::optional >", - "c10::optional >", - "c10::optional >", - "c10::optional::ExpandingArrayDouble>", - "c10::optional::ExpandingArrayDouble>", - "c10::optional::ExpandingArrayDouble>").cast().pointerTypes("DoubleExpandingArrayOptional").define()) - .put(new Info("c10::optional >").pointerTypes("StringSizeTSizeTTupleOptional").define()) - .put(new Info("torch::optional >").pointerTypes("TensorTensorOptional").define()) - - .put(new Info("c10::Type::SingletonOrSharedTypePtr", "c10::TypePtr", "c10::Type::TypePtr").pointerTypes("Type.TypePtr").define()) - .put(new Info("c10::Type::SingletonOrSharedTypePtr(c10::SingletonTypePtr)", - "c10::ComplexType::get", "c10::FloatType::get", "c10::IntType::get").skip()) - .put(new Info("c10::SingletonTypePtr").pointerTypes("SingletonTypePtr").define()) - .put(new Info("c10::SingletonTypePtr").pointerTypes("AnyTypePtr").define()) - .put(new Info("c10::SingletonTypePtr").pointerTypes("AnyEnumTypePtr").define()) - .put(new Info("c10::SingletonTypePtr").pointerTypes("NumberTypePtr").define()) - .put(new Info("c10::SingletonTypePtr").pointerTypes("FloatTypePtr").define()) - .put(new Info("c10::SingletonTypePtr").pointerTypes("ComplexTypePtr").define()) - .put(new Info("c10::SingletonTypePtr").pointerTypes("IntTypePtr").define()) - .put(new Info("c10::SingletonTypePtr").pointerTypes("BoolTypePtr").define()) - .put(new Info("c10::SingletonTypePtr").pointerTypes("StringTypePtr").define()) - .put(new Info("c10::SingletonTypePtr").pointerTypes("StorageTypePtr").define()) - .put(new Info("c10::SingletonTypePtr").pointerTypes("NoneTypePtr").define()) - .put(new Info("c10::SingletonTypePtr").pointerTypes("GeneratorTypePtr").define()) - .put(new Info("c10::SingletonTypePtr").pointerTypes("QuantizerTypePtr").define()) - .put(new Info("c10::SingletonTypePtr").pointerTypes("QSchemeTypePtr").define()) - .put(new Info("c10::SingletonTypePtr").pointerTypes("DeviceObjTypePtr").define()) - .put(new Info("c10::SingletonTypePtr").pointerTypes("StreamObjTypePtr").define()) - .put(new Info("c10::SingletonTypePtr").pointerTypes("CapsuleTypePtr").define()) - .put(new Info("c10::SingletonTypePtr").pointerTypes("PyObjectTypePtr").define()) - .put(new Info("c10::SingletonTypePtr").pointerTypes("LayoutTypePtr").define()) - .put(new Info("c10::SingletonTypePtr").pointerTypes("ScalarTypeTypePtr").define()) - .put(new Info("c10::SingletonTypePtr").pointerTypes("AnyListTypePtr").define()) - .put(new Info("c10::SingletonTypePtr").pointerTypes("AnyTupleTypePtr").define()) - .put(new Info("c10::SingletonTypePtr").pointerTypes("AnyClassTypePtr").define()) - - .put(new Info("c10::variant", - "torch::nn::init::NonlinearityType").pointerTypes("NonlinearityType").define()) - .put(new Info("c10::variant", - "torch::nn::init::FanModeType").pointerTypes("FanModeType").define()) - - .put(new Info("c10::variant", - "torch::nn::ConvOptions<1>::padding_mode_t", - "torch::nn::ConvOptions<2>::padding_mode_t", - "torch::nn::ConvOptions<3>::padding_mode_t", - "torch::nn::ConvTransposeOptions<1>::padding_mode_t", - "torch::nn::ConvTransposeOptions<2>::padding_mode_t", - "torch::nn::ConvTransposeOptions<3>::padding_mode_t", - "torch::nn::detail::conv_padding_mode_t").pointerTypes("conv_padding_mode_t").define()) - .put(new Info("c10::variant,torch::enumtype::kValid,torch::enumtype::kSame>", - "torch::nn::ConvOptions<1>::padding_t", - "torch::nn::detail::ConvNdOptions<1>::padding_t", - "torch::nn::functional::ConvFuncOptions<1>::padding_t", - "torch::nn::functional::Conv1dFuncOptions::padding_t").purify().pointerTypes("conv_padding_t1").define()) - .put(new Info("c10::variant,torch::enumtype::kValid,torch::enumtype::kSame>", - "torch::nn::ConvOptions<2>::padding_t", - "torch::nn::detail::ConvNdOptions<2>::padding_t", - "torch::nn::functional::ConvFuncOptions<2>::padding_t", - "torch::nn::functional::Conv2dFuncOptions::padding_t").purify().pointerTypes("conv_padding_t2").define()) - .put(new Info("c10::variant,torch::enumtype::kValid,torch::enumtype::kSame>", - "torch::nn::ConvOptions<3>::padding_t", - "torch::nn::detail::ConvNdOptions<3>::padding_t", - "torch::nn::functional::ConvFuncOptions<3>::padding_t", - "torch::nn::functional::Conv3dFuncOptions::padding_t").purify().pointerTypes("conv_padding_t3").define()) - - .put(new Info("c10::variant", - "torch::nn::EmbeddingBagMode").pointerTypes("EmbeddingBagMode").define()) - .put(new Info("c10::variant", - "torch::nn::functional::PadFuncOptions::mode_t").pointerTypes("pad_mode_t").define()) - - .put(new Info("c10::variant", - "torch::nn::L1LossOptions::reduction_t", "torch::nn::functional::L1LossFuncOptions::reduction_t", - "torch::nn::MSELossOptions::reduction_t", "torch::nn::functional::MSELossFuncOptions::reduction_t", - "torch::nn::BCELossOptions::reduction_t", "torch::nn::functional::BinaryCrossEntropyFuncOptions::reduction_t", - "torch::nn::HingeEmbeddingLossOptions::reduction_t", "torch::nn::functional::HingeEmbeddingLossFuncOptions::reduction_t", - "torch::nn::MultiMarginLossOptions::reduction_t", "torch::nn::functional::MultiMarginLossFuncOptions::reduction_t", - "torch::nn::CosineEmbeddingLossOptions::reduction_t", "torch::nn::functional::CosineEmbeddingLossFuncOptions::reduction_t", - "torch::nn::MultiLabelMarginLossOptions::reduction_t", "torch::nn::functional::MultilabelMarginLossFuncOptions::reduction_t", - "torch::nn::SoftMarginLossOptions::reduction_t", "torch::nn::functional::SoftMarginLossFuncOptions::reduction_t", - "torch::nn::MultiLabelSoftMarginLossOptions::reduction_t", "torch::nn::functional::MultilabelSoftMarginLossFuncOptions::reduction_t", - "torch::nn::TripletMarginLossOptions::reduction_t", "torch::nn::functional::TripletMarginLossFuncOptions::reduction_t", - "torch::nn::TripletMarginWithDistanceLossOptions::reduction_t", "torch::nn::functional::TripletMarginWithDistanceLossFuncOptions::reduction_t", - "torch::nn::CTCLossOptions::reduction_t", "torch::nn::functional::CTCLossFuncOptions::reduction_t", - "torch::nn::SmoothL1LossOptions::reduction_t", "torch::nn::functional::SmoothL1LossFuncOptions::reduction_t", - "torch::nn::HuberLossOptions::reduction_t", "torch::nn::functional::HuberLossFuncOptions::reduction_t", - "torch::nn::PoissonNLLLossOptions::reduction_t", "torch::nn::functional::PoissonNLLLossFuncOptions::reduction_t", - "torch::nn::MarginRankingLossOptions::reduction_t", "torch::nn::functional::MarginRankingLossFuncOptions::reduction_t", - "torch::nn::NLLLossOptions::reduction_t", "torch::nn::functional::NLLLossFuncOptions::reduction_t", - "torch::nn::CrossEntropyLossOptions::reduction_t", "torch::nn::functional::CrossEntropyFuncOptions::reduction_t", - "torch::nn::BCEWithLogitsLossOptions::reduction_t", "torch::nn::functional::BinaryCrossEntropyWithLogitsFuncOptions::reduction_t").pointerTypes("loss_reduction_t").define()) - .put(new Info("c10::variant", - "torch::nn::KLDivLossOptions::reduction_t", "torch::nn::functional::KLDivFuncOptions::reduction_t").pointerTypes("kldiv_loss_reduction_t").define()) - - .put(new Info("c10::variant", - "torch::nn::functional::GridSampleFuncOptions::mode_t").pointerTypes("grid_sample_mode_t").define()) - .put(new Info("c10::variant", - "torch::nn::functional::GridSampleFuncOptions::padding_mode_t").pointerTypes("grid_sample_padding_mode_t").define()) - - .put(new Info("c10::variant", - "torch::nn::detail::RNNOptionsBase::rnn_options_base_mode_t").pointerTypes("rnn_options_base_mode_t").define()) - .put(new Info("c10::variant", - "torch::nn::RNNOptions::nonlinearity_t", "torch::nn::RNNCellOptions::nonlinearity_t").pointerTypes("rnn_nonlinearity_t").define()) - - .put(new Info("c10::variant", - "torch::nn::UpsampleOptions::mode_t").pointerTypes("upsample_mode_t").define()) - .put(new Info("c10::variant", - "torch::nn::functional::InterpolateFuncOptions::mode_t").pointerTypes("interpolate_mode_t").define()) - - .put(new Info("c10::variant >", - "torch::nn::TransformerEncoderLayerOptions::activation_t", - "torch::nn::TransformerDecoderLayerOptions::activation_t", - "torch::nn::TransformerOptions::activation_t").pointerTypes("transformer_activation_t").define()) - - .put(new Info("std::vector >").pointerTypes("Bool2Vector").define()) - .put(new Info("std::vector").pointerTypes("BoolVector").define()) - .put(new Info("std::vector").pointerTypes("BytePointerVector").define()) - .put(new Info("std::vector", "std::tuple,std::vector >").cast().pointerTypes("LongVector").define()) - .put(new Info("std::vector").cast().pointerTypes("DoubleVector").define()) - .put(new Info("std::vector").cast().pointerTypes("SizeTVector").define()) - .put(new Info("std::vector").pointerTypes("StringVector").define()) - .put(new Info("std::vector >").pointerTypes("StringLongVector").define()) - .put(new Info("const std::vector >", - "std::vector >").pointerTypes("RecordFunctionCallbackHandleVector").define()) - .put(new Info("std::vector").pointerTypes("ArgumentVector").define()) - .put(new Info("std::vector", "torch::jit::Stack").pointerTypes("IValueVector").define()) - .put(new Info("std::vector", "std::vector").pointerTypes("QEngineVector").define()) - .put(new Info("std::vector").pointerTypes("ScalarTypeVector").define()) - .put(new Info("std::vector").pointerTypes("SymbolVector").define()) - .put(new Info("std::vector").pointerTypes("SymIntVector").define()) - .put(new Info("std::vector >").pointerTypes("LongOptionalVector").define()) - .put(new Info("std::vector >").pointerTypes("IValueOptionalVector").define()) - .put(new Info("c10::Dict").purify().pointerTypes("StringGenericListDict").define()) - .put(new Info("c10::Dict").purify().pointerTypes("GenericDict").define()) - .put(new Info("c10::impl::DictIterator", - "c10::Dict::iterator").purify().pointerTypes("GenericDictIterator").define()) - .put(new Info("c10::impl::DictEntryRef").pointerTypes("GenericDictEntryRef").define()) - .put(new Info("std::map").pointerTypes("StringStringMap").define()) - .put(new Info("std::map").pointerTypes("StringIntMap").define()) - .put(new Info("std::map").pointerTypes("StringLongMap").define()) - .put(new Info("std::map").pointerTypes("StringTensorMap").define()) - .put(new Info("std::unordered_set").pointerTypes("StringSet").define()) - .put(new Info("std::unordered_set").pointerTypes("HashAliasedIValues").define()) - .put(new Info("std::unordered_set").pointerTypes("SymbolSet").define()) - .put(new Info("std::unordered_set").pointerTypes("TensorImplSet").define()) - .put(new Info("std::unordered_set >").pointerTypes("RecordScopeSet").define()) -// .put(new Info("std::unordered_set").pointerTypes("NodeSet").define()) -// .put(new Info("std::unordered_map").pointerTypes("NodeIntMap").define()) -// .put(new Info("std::unordered_map").pointerTypes("NodeInputBufferfoMap").define()) -// .put(new Info("std::unordered_map").pointerTypes("NodeExecInfoMap").define()) - .put(new Info("std::unordered_map").pointerTypes("HashAliasedIValueMap").define()) - .put(new Info("std::unordered_map").pointerTypes("LongStringMap").define()) - .put(new Info("std::unordered_map").pointerTypes("StringBoolMap").define()) - .put(new Info("std::unordered_map").pointerTypes("StringSizeTMap").define()) - .put(new Info("std::unordered_map").pointerTypes("ExtraFilesMap").define()) - .put(new Info("std::unordered_map").pointerTypes("TypeEnv").define()) - .put(new Info("std::unordered_map", "std::unordered_map").pointerTypes("StringIValueMap").define()) - .put(new Info("std::unordered_map >").pointerTypes("StringFunctionMap").define()) - .put(new Info("std::unordered_map").pointerTypes("StringValueMap").define()) - .put(new Info("std::unordered_map >").pointerTypes("StringLongStringMapMap").define()) - .put(new Info("std::unordered_map").pointerTypes("ArgumentSpecExecutionPlanMap").define()) - .put(new Info("std::unordered_map").pointerTypes("ValueValueMap").define()) - .put(new Info("std::vector >", "std::vector").pointerTypes("ClassTypeVector").define()) - .put(new Info("std::vector >", "std::vector", - "std::vector", "c10::AliasTypeSet").pointerTypes("TypeVector").define()) - .put(new Info("const std::vector", "std::vector").valueTypes("@StdMove DimnameVector").pointerTypes("DimnameVector").define()) - .put(new Info("std::vector").pointerTypes("StrideVector").define()) - .put(new Info("std::vector").pointerTypes("ShapeSymbolVector").define()) - .put(new Info("std::vector").pointerTypes("TensorImplVector").define()) - .put(new Info("std::vector", "torch::autograd::edge_list") - .valueTypes("@Cast({\"\", \"std::vector\"}) @StdMove EdgeVector").pointerTypes("EdgeVector").define()) - .put(new Info("std::vector", "std::vector", "std::vector", "torch::autograd::variable_list") - .valueTypes("@Cast({\"\", \"std::vector\"}) @StdMove TensorVector").pointerTypes("TensorVector").define()) - .put(new Info("std::vector", "std::vector").pointerTypes("TensorIndexVector").define()) - .put(new Info("std::vector >").pointerTypes("TensorOptionalVector").define()) - .put(new Info("std::vector >").pointerTypes("OperatorOptionalVector").define()) - .put(new Info("std::vector >").pointerTypes("FunctionPreVector").define()) - .put(new Info("const std::vector >", - "std::vector >").pointerTypes("FunctionPreHookVector").define()) - .put(new Info("const std::vector >", - "std::vector >").pointerTypes("FunctionPostHookVector").define()) - .put(new Info("const std::vector >", - "std::vector >").pointerTypes("TokenTrieVector").define()) - .put(new Info("const std::vector", "std::vector").pointerTypes("SavedVariableVector").define()) - .put(new Info("const std::vector", "std::vector").pointerTypes("DefVector").define()) - .put(new Info("const std::vector", "std::vector").pointerTypes("PropertyVector").define()) - .put(new Info("const std::vector", "std::vector").pointerTypes("InstructionVector").define()) - .put(new Info("const std::vector", "std::vector").pointerTypes("CompilationUnitVector").define()) - .put(new Info("const std::vector", "std::vector").pointerTypes("OptimizerParamGroupVector").define()) - .put(new Info("std::vector").pointerTypes("FunctionVector").define()) - .put(new Info("std::vector >").pointerTypes("GraphVector").define()) - .put(new Info("std::vector >").pointerTypes("OperatorVector").define()) - .put(new Info("std::vector >", "std::vector").pointerTypes("ResolverVector").define()) - .put(new Info("std::vector >", "std::vector").pointerTypes("SugaredValueVector").define()) - .put(new Info("std::vector").pointerTypes("StackEntryVector").define()) - .put(new Info("std::vector").pointerTypes("BlockVector").define()) - .put(new Info("std::vector", "std::vector").pointerTypes("ValueVector").define()) - .put(new Info("std::vector").pointerTypes("JitNodeVector").define()) - .put(new Info("std::deque").pointerTypes("TensorDeque").define()) - .put(new Info("std::tuple").pointerTypes("TensorTuple").define()) - .put(new Info("std::tuple").pointerTypes("TensorTensorTuple").define()) - .put(new Info("std::tuple").pointerTypes("TensorTensorTensorTuple").define()) - .put(new Info("std::tuple").pointerTypes("TensorTensorTensorTensorTuple").define()) - .put(new Info("std::tuple").pointerTypes("TensorTensorTensorTensorTensorTuple").define()) - .put(new Info("std::tuple >").pointerTypes("TensorTensorTensorTensorVectorTuple").define()) - .put(new Info("std::tuple").pointerTypes("TensorTensorTensorTensorLongTuple").define()) - .put(new Info("std::tuple").pointerTypes("TensorTensorLongLongTensorTuple").define()) - .put(new Info("std::tuple").pointerTypes("TensorTensorTensorTensorTensorTensorTuple").define()) - .put(new Info("std::tuple").pointerTypes("TensorTensorTensorTensorTensorTensorTensorTuple").define()) - .put(new Info("std::tuple").pointerTypes("TensorTensorDoubleLongTuple").define()) - .put(new Info("std::tuple >").pointerTypes("TensorTensorTensorTupleTuple").define()) - .put(new Info("std::tuple,c10::MaybeOwned >") - .pointerTypes("TensorMaybeOwnedTensorMaybeOwnedTuple").define()) - .put(new Info("std::tuple,c10::MaybeOwned,c10::MaybeOwned >") - .pointerTypes("TensorMaybeOwnedTensorMaybeOwnedTensorMaybeOwnedTuple").define()) - .put(new Info("std::tuple").purify().pointerTypes("PackedSequenceTensorTuple").define()) - .put(new Info("std::tuple >").purify().pointerTypes("PackedSequenceTensorTensorTupleTuple").define()) - .put(new Info("std::tuple", "std::tuple", - "std::tuple", - "std::tuple", - "std::tuple", - "std::tuple").cast().pointerTypes("PointerPointer")) - .put(new Info("std::tuple").pointerTypes("StringSizeTSizeTTuple").define()) - .put(new Info("std::tuple >").pointerTypes("TensorTensorVectorTuple").define()) - .put(new Info("std::tuple,at::Tensor>").pointerTypes("TensorVectorTensorTuple").define()) - .put(new Info("std::tuple,std::vector,std::vector,std::vector,std::vector >") - .pointerTypes("TensorVectorTensorVectorTensorVectorTensorVectorTensorVectorTuple").define()) - .put(new Info("std::tuple,std::vector >").pointerTypes("TensorTensorVectorTensorVectorTuple").define()) - .put(new Info("torch::OrderedDict", "torch::OrderedDict").pointerTypes("StringTensorDict").define()) - .put(new Info("torch::OrderedDict::Item", "torch::OrderedDict::Item", - "std::vector::Item>::iterator").pointerTypes("StringTensorDictItem")) - .put(new Info("torch::OrderedDict").pointerTypes("StringModuleDict").define()) - .put(new Info("torch::OrderedDict::Item", "torch::OrderedDict::Item", - "std::vector::Item>::iterator").pointerTypes("StringModuleDictItem")) - .put(new Info("torch::OrderedDict") - .valueTypes("@Cast({\"\", \"torch::OrderedDict&&\"}) @StdMove StringAnyModuleDict").pointerTypes("StringAnyModuleDict").define()) - .put(new Info("torch::OrderedDict::Item", "torch::OrderedDict::Item", - "std::vector::Item>::iterator").pointerTypes("StringAnyModuleDictItem")) - .put(new Info("torch::OrderedDict >").pointerTypes("StringSharedModuleDict").define()) - .put(new Info("torch::OrderedDict::Item >", "torch::OrderedDict >::Item", - "std::vector >::Item>::iterator").pointerTypes("StringSharedModuleDictItem")) - .put(new Info("std::pair", "std::pair", "torch::OrderedDict::Item", - "std::vector::Item>::iterator").cast().pointerTypes("StringTensorPair").define()) - .put(new Info("std::pair").pointerTypes("StringModulePair").define()) - .put(new Info("std::pair").pointerTypes("StringAnyModulePair").define()) - .put(new Info("std::pair >").pointerTypes("StringSharedModulePair").define()) - .put(new Info("std::vector").pointerTypes("ModuleVector").define()) - .put(new Info("std::vector::iterator").pointerTypes("ModuleVector.Iterator")) - .put(new Info("std::vector").pointerTypes("AnyModuleVector").define()) - .put(new Info("std::vector::iterator").pointerTypes("AnyModuleVector.Iterator")) - .put(new Info("std::vector >").pointerTypes("SharedModuleVector").define()) - .put(new Info("std::vector >::iterator").pointerTypes("SharedModuleVector.Iterator")) - .put(new Info("std::vector >").pointerTypes("SharedAnyModuleVector").define()) - .put(new Info("std::vector >::iterator").pointerTypes("SharedAnyModuleVector.Iterator")) - .put(new Info("std::vector >").pointerTypes("StringTensorPairVector").define()) - .put(new Info("std::vector >").pointerTypes("StringModulePairVector").define()) - .put(new Info("std::vector >").pointerTypes("StringAnyModulePairVector").define()) - .put(new Info("std::vector > >").pointerTypes("StringSharedModulePairVector").define()) - .put(new Info("std::vector >", "torch::jit::FusionStrategy").pointerTypes("FusionStrategy").define()) - - .put(new Info("C10_EXPORT", "C10_HIDDEN", "C10_IMPORT", "C10_API", "C10_API_ENUM", "EXPORT_IF_NOT_GCC", - "TORCH_API", "TORCH_CUDA_CU_API", "TORCH_CUDA_CPP_API", "TORCH_HIP_API", "TORCH_PYTHON_API", - "__ubsan_ignore_float_divide_by_zero__", "__ubsan_ignore_undefined__", "__ubsan_ignore_signed_int_overflow__", "__ubsan_ignore_function__", - "C10_CLANG_DIAGNOSTIC_IGNORE", "C10_CLANG_DIAGNOSTIC_PUSH", "C10_CLANG_DIAGNOSTIC_POP", "C10_ATTR_VISIBILITY_HIDDEN", "C10_ERASE", - "C10_UID", "C10_NODISCARD", "C10_UNUSED", "C10_USED", "C10_RESTRICT", "C10_NOINLINE", "C10_ALWAYS_INLINE", "C10_FALLTHROUGH", - "C10_HOST_DEVICE", "C10_DEVICE", "C10_HOST", "C10_LAUNCH_BOUNDS_0", "C10_HIP_HOST_DEVICE", "C10_WARP_SIZE", "C10_IOS", "C10_MOBILE", - "C10_HOST_CONSTEXPR", "CONSTEXPR_EXCEPT_WIN_CUDA", "C10_HOST_CONSTEXPR_EXCEPT_WIN_CUDA", "C10_ALWAYS_INLINE_UNLESS_MOBILE", - "alignas", "COMPLEX_INTEGER_OP_TEMPLATE_CONDITION", "C10_DEVICE_HOST_FUNCTION", "FORCE_INLINE_APPLE", - "ERROR_UNSUPPORTED_CAST", "LEGACY_CONTIGUOUS_MEMORY_FORMAT", "GFLAGS_DLL_DEFINE_FLAG", "GFLAGS_DLL_DECLARE_FLAG", - "AT_X", "DEFINE_KEY", "C10_DISPATCHER_INLINE_UNLESS_MOBILE", "TH_DISALLOW_COPY_AND_ASSIGN").cppTypes().annotations()) - - .put(new Info("defined(__CUDACC__) || defined(__HIPCC__)", - "defined(__CUDACC__) && !defined(USE_ROCM)", - "defined(SYCL_EXT_ONEAPI_BFLOAT16_MATH_FUNCTIONS)", - "defined(_MSC_VER) && _MSC_VER <= 1900", - "defined(NDEBUG)", - "defined(__ANDROID__)", - "defined(__APPLE__)", - "defined(__HIP_PLATFORM_HCC__)", - "defined(_MSC_VER)", "_WIN32", - "defined(USE_ROCM)", "USE_ROCM", "SYCL_LANGUAGE_VERSION", - "defined(CUDA_VERSION) && CUDA_VERSION >= 11000", - "defined ENABLE_RECORD_KERNEL_FUNCTION_DTYPE").define(false)) - - .put(new Info("C10_DEFINE_DEPRECATED_USING").cppText("#define C10_DEFINE_DEPRECATED_USING(TypeName, TypeThingy)").cppTypes()) - .put(new Info("C10_DEPRECATED_MESSAGE").cppText("#define C10_DEPRECATED_MESSAGE() deprecated").cppTypes()) - .put(new Info("C10_DEPRECATED").cppText("#define C10_DEPRECATED deprecated").cppTypes()) - .put(new Info("deprecated").annotations("@Deprecated")) - - .put(new Info("CAFFE2_LOG_THRESHOLD").translate(false)) - - .put(new Info("TORCH_CHECK").cppText("#define TORCH_CHECK(cond, ...)").define()) - .put(new Info("DEFINE_SYMBOL").cppText("#define DEFINE_SYMBOL(ns, s) namespace ns { constexpr Symbol s; }").define()) - .put(new Info("TORCH_ENUM_DECLARE").cppText("#define TORCH_ENUM_DECLARE(name) namespace torch { namespace enumtype { struct k##name { k##name() {} }; } }").define()) - - .put(new Info("c10::Error", "c10::IndexError", "c10::LinAlgError", "c10::ValueError", "c10::TypeError", "c10::NotImplementedError", "c10::EnforceFiniteError", "c10::OutOfMemoryError", - "c10::OnnxfiBackendSystemError", "c10::Capsule", "c10::ClassType", "c10::DistBackendError", "c10::EnumType", "c10::OperatorNameView", "c10::SharedType", "c10::StrongTypePtr", - "c10::WeakTypePtr", "c10::NamedType", "torch::autograd::CppFunctionPreHook", "torch::autograd::DifferentiableViewMeta", "torch::autograd::Node", - "torch::autograd::NodeGuard", "torch::autograd::TraceableFunction", "torch::jit::Instruction", "torch::jit::Method", "torch::jit::ModuleInstanceInfo", - "torch::jit::Object::Property", "torch::jit::Operator", "torch::jit::OperatorSet", "torch::jit::SourceRangePickler", "torch::jit::Suspend", "torch::jit::Unpickler").purify()) - - .put(new Info("c10::intrusive_ptr", "c10::weak_intrusive_ptr", "c10::guts::is_fundamental", "c10::operator !=", "c10::operator ==", "c10::operator <<", - "c10::detail::CaptureKernelCall", "c10::detail::DictImpl", "c10::detail::MultiDispatchKeySet", "c10::ExclusivelyOwnedTraits", "c10::FunctionSchema::dump", - "c10::domain_prefix", "c10::C10FlagsRegistry", "c10::enforce_detail::EnforceFailMessage", "c10::impl::build_feature_required_feature_not_available", - "c10::detail::getMaybeFakeTypePtr_", "c10::complex_literals::operator \"\"_if", "c10::complex_literals::operator \"\"_id", "c10::complex", - "decltype(::c10::impl::ScalarTypeToCPPType<::c10::ScalarType::ComplexHalf>::t)", "c10::BoxedKernel", "c10::ExtraMeta", "c10::remove_symint", - "c10::InefficientStdFunctionContext", "c10::DataPtr::move_context", "QuantizerPtr", "c10::IValue::toModule", "c10::toBackendComponent", - "c10::List >", "c10::optional", "c10::asIntArrayRefSlow", "c10::standardizeVectorForUnion", - "c10::impl::ExcludeDispatchKeyGuard", "c10::impl::ScalarTypeToCPPType", "c10::impl::AnnotatedKernel", "c10::impl::OperatorEntry", - "c10::StorageImpl(c10::StorageImpl)", "c10::StorageImpl::operator =", - "c10::TensorImpl(c10::TensorImpl)", "c10::TensorImpl::operator =", - "caffe2::Blob(caffe2::Blob)", "caffe2::Blob::operator =", - "torch::serialize::InputArchive(torch::serialize::InputArchive)", "torch::serialize::InputArchive::operator =", - "torch::serialize::OutputArchive(torch::serialize::OutputArchive)", "torch::serialize::OutputArchive::operator =", - "at::_test_serialization_subcmul", "at::_test_optional_intlist", "at::_test_optional_filled_intlist", - "at::_test_optional_floatlist", "at::_test_string_default", "at::_test_ambiguous_defaults", - "at::TensorBase::expect_contiguous", "at::Tensor::print", "at::borrow_from_optional_tensor", - "at::MaterializedITensorListRef", "at::impl::check_names_valid_for", "at::internal::launch_no_thread_state", - "at::checkSameNumel", "at::check_names_valid_for", "at::default_names", "at::get_device", "at::detail::scalar_fill", - "at::namedinference::compute_diagonal_outnames", "at::Tensor::packed_accessor", "torch::optim::serialize", "torch::none_of", - "torch::CountTensors", "torch::CountVariables", "torch::autograd::ExtractVariables", "torch::autograd::detail::MakeNextFunctionList", - "torch::autograd::AutogradMeta::hooks_", "torch::autograd::AutogradMeta::cpp_hooks_list_", - "torch::autograd::VariableType::unpack", "torch::autograd::VariableType::unpack_opt", "torch::jit::parseSchemaOrName", - "torch::jit::trace", "torch::jit::tracer::TracingState::lookup_var_name_fn", "torch::jit::tracer::ArgumentStash", - "torch::jit::constant_not_supported_error", "torch::jit::ObjectAttributeError", "torch::jit::utils::get_module_info", - "torch::jit::operator <<(std::ostream&, torch::jit::Instruction)", "torch::jit::toString(torch::jit::OpCode)", - "torch::jit::PropertyPropBase::processLoop", "torch::jit::PropertyPropBase::processIf", "torch::jit::PropertyPropBase::propagateBlock", - "torch::jit::getMobileInterfaceCallExport", "torch::jit::OperatorSet::getOps", "torch::jit::SourceView::findSourceRangeThatGenerated", - "at::namedinference::propagate_names_if_present_and_nonempty", "torch::jit::_load_jit_module_from_flatbuffer_bytes", "torch::jit::_save_jit_module_to", - - "torch::jit::checkHasValidSetGetState", "torch::jit::getTypeTags", "torch::jit::setTypeTags", "torch::jit::getStorageKey", - "torch::jit::getUnresolvedClassAttributes", "torch::jit::isOpSupportedInMobile", "torch::jit::restoreAccurateTypeTags", - "torch::jit::detail::getDifferentiableGraphOpExecutor","torch::jit::detail::getGradExecutor", "torch::jit::Graph::createPythonOp", - "torch::jit::Graph::createDifferentiableSubgraph", "torch::jit::NamedValue::type", "torch::jit::ProfileOp", "torch::jit::Value::isValidName", - "torch::jit::EqualType::operator ()", "torch::jit::HashType::operator ()", "torch::jit::InterpreterContinuation::operator ()", - "torch::jit::Object(c10::QualifiedName, torch::jit::CompilationUnit*, bool)", "torch::jit::Source::findSourceRangeThatGenerated", - "torch::jit::SourceRangeDeserializer::deserialize", "torch::jit::SourceRangePickler::pickle", "torch::jit::Pickler::pushEmptyDict", - "torch::jit::PrintDepsTable::add", "torch::jit::printerHasSpecialCaseFor", "ONNX_NAMESPACE::ModelProto", "torch::jit::export_onnx", - "torch::jit::Function::call", "torch::jit::GraphFunction::call", "torch::jit::GraphFunction::function_creator", "torch::jit::getOptionsFromGlobal", - "torch::jit::serialize_model_proto_to_string", "torch::onnx::IR_VERSION", "torch::onnx::PRODUCER_VERSION").skip()) - - .put(new Info("c10::requires_grad", "at::range", "at::bernoulli_out", "at::normal_out", "at::stft").skipDefaults()) - .put(new Info("c10::prim::requires_grad").javaNames("requires_grad")) - .put(new Info("c10::fetch_and_cast").javaNames("fetch_and_cast_qint8")) - .put(new Info("c10::cast_and_store").javaNames("cast_and_store_qint8")) - .put(new Info("c10::fetch_and_cast").javaNames("fetch_and_cast_quint8")) - .put(new Info("c10::cast_and_store").javaNames("cast_and_store_quint8")) - .put(new Info("c10::fetch_and_cast").javaNames("fetch_and_cast_qint32")) - .put(new Info("c10::cast_and_store").javaNames("cast_and_store_qint32")) - .put(new Info("c10::fetch_and_cast").javaNames("fetch_and_cast_quint4x2")) - .put(new Info("c10::cast_and_store").javaNames("cast_and_store_quint4x2")) - .put(new Info("c10::aten::clone").javaNames("_clone")) - .put(new Info("c10::TensorOptions").javaNames("TensorOptions")) - .put(new Info("c10::detail::_str").javaNames("_strCompileTimeEmptyString")) - .put(new Info("at::TensorBase").base("AbstractTensor").pointerTypes("TensorBase")) - .put(new Info("at::TensorBase::data_ptr").javaNames("data_ptr_byte")) - .put(new Info("at::TensorBase::data_ptr").javaNames("data_ptr_short")) - .put(new Info("at::TensorBase::data_ptr").javaNames("data_ptr_int")) - .put(new Info("at::TensorBase::data_ptr").javaNames("data_ptr_long")) - .put(new Info("at::TensorBase::data_ptr").javaNames("data_ptr_float")) - .put(new Info("at::TensorBase::data_ptr").javaNames("data_ptr_double")) - .put(new Info("at::Tensor::item").javaNames("item_byte")) - .put(new Info("at::Tensor::item").javaNames("item_short")) - .put(new Info("at::Tensor::item").javaNames("item_int")) - .put(new Info("at::Tensor::item").javaNames("item_long")) - .put(new Info("at::Tensor::item").javaNames("item_float")) - .put(new Info("at::Tensor::item").javaNames("item_double")) + sharedMap(infoMap); + + infoMap + .put(new Info("ordered_dict.h").linePatterns(".*class Item;.*").skip()) + .put(new Info("util.h").linePatterns(".*using approx_time_t = decltype.*").skip()) + + .put(new Info().javaText("import org.bytedeco.pytorch.Allocator;")) + .put(new Info().javaText("import org.bytedeco.pytorch.Function;")) + .put(new Info().javaText("import org.bytedeco.pytorch.functions.*;")) + .put(new Info().javaText("import org.bytedeco.pytorch.Module;")) + .put(new Info().javaText("import org.bytedeco.javacpp.annotation.Cast;")) + + .put(new Info("basic/containers").cppTypes("c10::optional", "torch::optional", "c10::variant")) + .put(new Info("std::nullptr_t").cast().pointerTypes("PointerPointer")) + + .put(new Info("at::CheckedFrom").cast().valueTypes("BytePointer", "String").pointerTypes("PointerPointer")) // Alias to const char* + .put(new Info("c10::IValue", "at::IValue", "decltype(auto)").pointerTypes("IValue")) + // .put(new Info("c10::IValue::operator ==").skip()) // Possible name conflict with IValue.equals + .put(new Info("std::size_t", "c10::Dict::size_type", + "c10::Dict::size_type").cast().valueTypes("long").pointerTypes("SizeTPointer")) + .put(new Info("approx_time_t").cast().valueTypes("long").pointerTypes("LongPointer")) + .put(new Info( + "torch::ExpandingArray<1>", "torch::ExpandingArray<2>", "torch::ExpandingArray<3>", "torch::ExpandingArray<4>", + "torch::ExpandingArray", "torch::ExpandingArray<1*2>", "torch::ExpandingArray<2*2>", "torch::ExpandingArray<3*2>").cast().pointerTypes("LongPointer")) + .put(new Info("torch::ExpandingArray<1,double>", "torch::ExpandingArray<2,double>", "torch::ExpandingArray<3,double>").cast().pointerTypes("DoublePointer")) + .put(new Info("torch::ExpandingArrayWithOptionalElem<2>", "torch::ExpandingArrayWithOptionalElem<3>").cast().pointerTypes("LongOptional")) + .put(new Info("std::pair").pointerTypes("EnumNameValue").define()) + .put(new Info("c10::ClassType::Property").pointerTypes("ClassType.Property")) + + .put(new Info("std::list >").pointerTypes("RecordFunctionHandleIntList").define()) + .put(new Info("at::RecordFunctionHandle").valueTypes("long")) + .put(new Info("c10::ivalue::Future::FutureError::FutureError").skip()) // This constructor takes a std::string&& but parser sends a std::string& + .put(new Info("operator const std::string&()").javaText( // Hopefully targets the one in ConstantString only + "public native @Const @ByRef @Name(\"operator const std::string&\") @StdString @Override String toString();" + )) + .put(new Info("c10::weak_intrusive_ptr").pointerTypes("WeakStorage")) + + .put(new Info("torch::monitor::Stat").pointerTypes("DoubleStat")) + .put(new Info("torch::monitor::Stat").pointerTypes("LongStat")) + .put(new Info("torch::jit::generic_graph_node_list").pointerTypes("graph_node_list")) + .put(new Info("torch::jit::generic_graph_node_list_iterator").pointerTypes("graph_node_list_iterator")) + .put(new Info("torch::autograd::Function").pointerTypes("FunctionCrossMapLRN2d")) + + .put(new Info("strong::type,strong::hashable>").pointerTypes("Pointer")) + + .put(new Info("c10::VaryingShape").pointerTypes("LongVaryingShape")) + .put(new Info("c10::VaryingShape").pointerTypes("StrideVaryingShape")) + .put(new Info("torch::detail::SelectiveStr").pointerTypes("DisabledStr")) + .put(new Info("torch::detail::SelectiveStr").pointerTypes("EnabledStr")) + .put(new Info("torch::detail::SelectiveStr::operator const char*", + "torch::detail::SelectiveStr::operator const char*"). + javaText("public native @Name(\"operator const char*\") @Cast(\"const char*\") BytePointer asBytePointer();"))// Fixes bug where constexpr prevents addition of const in @Name + .put(new Info("fbgemm::bfloat16", "__nv_bfloat16", "sycl::ext::oneapi::bfloat16").pointerTypes("BFloat16").valueTypes("short", "short", "short")) + .put(new Info("decltype(::c10::impl::ScalarTypeToCPPType<::c10::ScalarType::Bool>::t)").cast().valueTypes("boolean").pointerTypes("BoolPointer")) + .put(new Info("decltype(::c10::impl::ScalarTypeToCPPType<::c10::ScalarType::Half>::t)").pointerTypes("Half")) + .put(new Info("decltype(::c10::impl::ScalarTypeToCPPType<::c10::ScalarType::BFloat16>::t)").pointerTypes("BFloat16")) + .put(new Info("c10::DataPtr", "at::DataPtr").valueTypes("@Cast({\"\", \"c10::DataPtr&&\"}) @StdMove DataPtr").pointerTypes("DataPtr")) + .put(new Info("c10::Storage", "at::Storage").valueTypes("@Cast({\"\", \"c10::Storage&&\"}) @StdMove Storage").pointerTypes("Storage")) + .put(new Info("c10::ClassType").purify().pointerTypes("ClassType")) // Issue #669 + .put(new Info("c10::EnumType").purify().pointerTypes("EnumType")) // Issue #669 + .put(new Info("c10::NamedType").purify().pointerTypes("NamedType")) // Issue #669 + // See comments in PR#668 about a const-agnostic adapter + .put(new Info("std::unique_ptr").annotations("@UniquePtr") + .valueTypes("@Cast({\"\", \"std::unique_ptr&&\"}) FunctionSchema") + .pointerTypes("FunctionSchema")) + .put(new Info("c10::MaybeOwned").valueTypes("@Cast({\"\", \"c10::MaybeOwned&&\"}) @StdMove TensorMaybeOwned").pointerTypes("TensorMaybeOwned")) + .put(new Info("c10::MaybeOwned").valueTypes("@Cast({\"\", \"c10::MaybeOwned&&\"}) @StdMove TensorBaseMaybeOwned").pointerTypes("TensorBaseMaybeOwned")) + .put(new Info("c10::MaybeOwnedTraits").pointerTypes("MaybeOwnedTraitsTensor")) + .put(new Info("c10::MaybeOwnedTraitsGenericImpl >").pointerTypes("MaybeOwnedTraitsGenericImplTensor")) + .put(new Info("at::InferExpandGeometryResult").pointerTypes("DimVectorInferExpandGeometryResult")) + .put(new Info("at::namedinference::TensorName").valueTypes("@Cast({\"\", \"at::namedinference::TensorName&&\"}) @StdMove TensorName").pointerTypes("TensorName")) + .put(new Info("c10::remove_symint::type").valueTypes("long")) + .put(new Info("std::aligned_storage_t").pointerTypes("Pointer")) + .put(new Info("c10::TensorImpl::identity").pointerTypes("SymIntIdentity")) + .put(new Info("c10::TensorImpl::identity").pointerTypes("LongIdentity")) + .put(new Info("c10::requires_grad", "at::range", "at::bernoulli_out", "at::normal_out", "at::stft").skipDefaults()) + .put(new Info("c10::prim::requires_grad").javaNames("requires_grad")) + .put(new Info("c10::aten::clone").javaNames("_clone")) + .put(new Info("c10::TensorOptions").javaNames("TensorOptions")) + .put(new Info("c10::detail::_str").javaNames("_strCompileTimeEmptyString")) + .put(new Info("at::TensorBase").base("AbstractTensor").pointerTypes("TensorBase")) + ; -// .put(new Info("c10::complex").pointerTypes("DoubleComplex").define()) -// .put(new Info("c10::complex").pointerTypes("FloatComplex").define()) -// .put(new Info("c10::complex").pointerTypes("HalfComplex").define()) -// .put(new Info("c10::complex::real", "c10::complex::imag", -// "c10::complex::real", "c10::complex::imag", -// "c10::complex::real", "c10::complex::imag").annotations("@Function")) - .put(new Info("c10::ArrayRef", "c10::ArrayRef", "c10::ArrayRef").cast().pointerTypes("ByteArrayRef")) - .put(new Info("c10::ArrayRef::iterator", "c10::ArrayRef::const_iterator").cast().pointerTypes("BytePointer")) - .put(new Info("c10::ArrayRef", "c10::ArrayRef", "c10::ArrayRef").cast().pointerTypes("ShortArrayRef")) - .put(new Info("c10::ArrayRef::iterator", "c10::ArrayRef::const_iterator").cast().pointerTypes("ShortPointer")) - .put(new Info("c10::ArrayRef", "c10::ArrayRef", "c10::ArrayRef", "c10::ArrayRef", - "c10::ArrayRef", "at::ArrayRef").cast().pointerTypes("IntArrayRef")) - .put(new Info("c10::ArrayRef::iterator", "c10::ArrayRef::const_iterator").cast().pointerTypes("IntPointer")) - .put(new Info("c10::ArrayRef", "c10::IntArrayRef", "at::IntArrayRef", "c10::OptionalArray") - .pointerTypes("@Cast(\"c10::ArrayRef*\") LongArrayRef", "@Cast({\"int64_t*\", \"c10::ArrayRef\", \"std::vector&\"}) @StdVector long...")) - .put(new Info("c10::ArrayRef::iterator", "c10::ArrayRef::const_iterator").cast().pointerTypes("LongPointer")) - .put(new Info("c10::ArrayRef").pointerTypes("FloatArrayRef")) - .put(new Info("c10::ArrayRef::iterator", "c10::ArrayRef::const_iterator").cast().pointerTypes("FloatPointer")) - .put(new Info("c10::ArrayRef", "c10::OptionalArray").pointerTypes("DoubleArrayRef")) - .put(new Info("c10::ArrayRef::iterator", "c10::ArrayRef::const_iterator").cast().pointerTypes("DoublePointer")) - .put(new Info("c10::ArrayRef", "at::ArrayRef").pointerTypes("SizeTArrayRef")) - .put(new Info("c10::ArrayRef::iterator", "c10::ArrayRef::const_iterator").cast().pointerTypes("SizeTPointer")) - .put(new Info("c10::ArrayRef", "c10::SymIntArrayRef").pointerTypes("SymIntRef")) - .put(new Info("c10::ArrayRef::iterator", "c10::ArrayRef::const_iterator").cast().pointerTypes("SymInt")) - .put(new Info("c10::ArrayRef", "c10::ArrayRef").pointerTypes("SymNodeRef")) - .put(new Info("c10::ArrayRef::iterator", "c10::ArrayRef::const_iterator").cast().pointerTypes("Pointer")) - .put(new Info("c10::ArrayRef", "at::ArrayRef").pointerTypes("StringArrayRef").purify()) - .put(new Info("c10::ArrayRef::iterator", "c10::ArrayRef::const_iterator").pointerTypes("@Cast({\"\", \"std::string*\"}) @StdString BytePointer")) - .put(new Info("c10::ArrayRef::t)>").pointerTypes("BoolArrayRef")) - .put(new Info("c10::ArrayRef::t)>::iterator", - "c10::ArrayRef::t)>::const_iterator").cast().pointerTypes("BoolPointer")) - .put(new Info("c10::ArrayRef::t)>").pointerTypes("HalfArrayRef")) - .put(new Info("c10::ArrayRef::t)>::iterator", - "c10::ArrayRef::t)>::const_iterator").cast().pointerTypes("ShortPointer")) - .put(new Info("c10::ArrayRef::t)>").pointerTypes("BFloat16ArrayRef")) - .put(new Info("c10::ArrayRef::t)>::iterator", - "c10::ArrayRef::t)>::const_iterator").cast().pointerTypes("ShortPointer")) - .put(new Info("c10::ArrayRef >", "at::ArrayRef >").pointerTypes("FloatComplexrrayRef")) - .put(new Info("c10::ArrayRef >::iterator", "c10::ArrayRef >::const_iterator").cast().pointerTypes("FloatPointer")) - .put(new Info("c10::ArrayRef >", "at::ArrayRef >").pointerTypes("DoubleComplexrrayRef")) - .put(new Info("c10::ArrayRef >::iterator", "c10::ArrayRef >::const_iterator").cast().pointerTypes("DoublePointer")) - .put(new Info("c10::ArrayRef", "at::ArrayRef", "at::ArrayRef").pointerTypes("ScalarTypeArrayRef")) - .put(new Info("c10::ArrayRef::iterator", "c10::ArrayRef::const_iterator").cast().pointerTypes("BytePointer")) - .put(new Info("c10::ArrayRef", "at::ArrayRef", "c10::ArrayRef").cast().pointerTypes("IValueArrayRef")) - .put(new Info("c10::ArrayRef::iterator", "c10::ArrayRef::const_iterator").cast().pointerTypes("IValue")) - .put(new Info("c10::ArrayRef", "at::ArrayRef").pointerTypes("EnumNameValueArrayRef")) - .put(new Info("c10::ArrayRef::iterator", "c10::ArrayRef::const_iterator").cast().pointerTypes("EnumNameValue")) - .put(new Info("c10::ArrayRef", "at::ArrayRef", "at::ArrayRef").pointerTypes("TypeArrayRef")) - .put(new Info("c10::ArrayRef::iterator", "c10::ArrayRef::const_iterator").cast().pointerTypes("Type")) - .put(new Info("c10::ArrayRef", "at::ArrayRef").pointerTypes("SymbolArrayRef")) - .put(new Info("c10::ArrayRef::iterator", "c10::ArrayRef::const_iterator").cast().pointerTypes("Symbol")) - .put(new Info("c10::ArrayRef", "at::ArrayRef").pointerTypes("StrideArrayRef")) - .put(new Info("c10::ArrayRef::iterator", "c10::ArrayRef::const_iterator").cast().pointerTypes("Stride")) - .put(new Info("c10::ArrayRef", "at::DimnameList").pointerTypes("DimnameArrayRef")) - .put(new Info("c10::ArrayRef::iterator", "c10::ArrayRef::const_iterator").cast().pointerTypes("Dimname")) - .put(new Info("c10::ArrayRef", "at::ArrayRef").pointerTypes("ScalarArrayRef")) - .put(new Info("c10::ArrayRef::iterator", "c10::ArrayRef::const_iterator").cast().pointerTypes("Scalar")) - .put(new Info("c10::ArrayRef", "at::ArrayRef", "at::TensorList", "at::ITensorListRef").pointerTypes("TensorArrayRef")) - .put(new Info("c10::ArrayRef(std::vector&)").javaText( - "public TensorArrayRef(@ByRef TensorVector Vec) { super((Pointer)null); allocate(Vec); }\n" - + "private native void allocate(@ByRef TensorVector Vec);")) - .put(new Info("c10::ArrayRef::iterator", "c10::ArrayRef::const_iterator").cast().pointerTypes("Tensor")) - .put(new Info("c10::ArrayRef", "at::ArrayRef").pointerTypes("TensorArgArrayRef")) - .put(new Info("c10::ArrayRef::iterator", "c10::ArrayRef::const_iterator").cast().pointerTypes("TensorArg")) - .put(new Info("c10::ArrayRef").pointerTypes("TensorIndexArrayRef")) - .put(new Info("c10::ArrayRef(std::vector&)").javaText( - "public TensorIndexArrayRef(@ByRef TensorIndexVector Vec) { super((Pointer)null); allocate(Vec); }\n" - + "private native void allocate(@ByRef TensorIndexVector Vec);")) - .put(new Info("c10::ArrayRef::iterator", "c10::ArrayRef::const_iterator").cast().pointerTypes("TensorIndex")) - .put(new Info("c10::ArrayRef >", "at::ArrayRef >").pointerTypes("TensorOptionalArrayRef")) - .put(new Info("c10::ArrayRef >::iterator", "c10::ArrayRef >::const_iterator").cast().pointerTypes("TensorOptional")) - .put(new Info("c10::ArrayRef", "at::ArrayRef").pointerTypes("SavedVariableArrayRef")) - .put(new Info("c10::ArrayRef::iterator", "c10::ArrayRef::const_iterator").cast().pointerTypes("SavedVariable")) - .put(new Info("c10::ArrayRef", "at::ArrayRef").pointerTypes("SugaredValueArrayRef")) - .put(new Info("c10::ArrayRef::iterator", "c10::ArrayRef::const_iterator").annotations("@SharedPtr").pointerTypes("SugaredValue")) - .put(new Info("c10::ArrayRef", "at::ArrayRef", "at::ArrayRef").pointerTypes("NamedValueArrayRef")) - .put(new Info("c10::ArrayRef::iterator", "c10::ArrayRef::const_iterator").cast().pointerTypes("NamedValue")) - .put(new Info("c10::ArrayRef", "at::ArrayRef").purify().pointerTypes("BlockArrayRef")) - .put(new Info("c10::ArrayRef::iterator", "c10::ArrayRef::const_iterator").cast().pointerTypes("Block")) - .put(new Info("c10::ArrayRef", "at::ArrayRef").purify().pointerTypes("ValueArrayRef")) - .put(new Info("c10::ArrayRef::iterator", "c10::ArrayRef::const_iterator").cast().pointerTypes("Value")) - .put(new Info("c10::ArrayRef::equals", "c10::ArrayRef::equals", - "c10::ArrayRef::equals", "c10::ArrayRef::equals", - "c10::ArrayRef >::equals", "c10::ArrayRef::equals", - "c10::ArrayRef::equals", "c10::ArrayRef::vec", - "std::array").skip()) - .put(new Info("c10::VaryingShape").pointerTypes("LongVaryingShape")) - .put(new Info("c10::VaryingShape").pointerTypes("StrideVaryingShape")) - - .put(new Info("std::hash").pointerTypes("DeviceTypeHash")) - .put(new Info("std::hash").pointerTypes("DeviceHash")) - .put(new Info("std::hash").pointerTypes("StreamHash")) - .put(new Info("std::hash").pointerTypes("SymbolHash")) - .put(new Info("decltype(::c10::impl::ScalarTypeToCPPType<::c10::ScalarType::Bool>::t)").cast().valueTypes("boolean").pointerTypes("BoolPointer")) - .put(new Info("decltype(::c10::impl::ScalarTypeToCPPType<::c10::ScalarType::Half>::t)").pointerTypes("Half")) - .put(new Info("decltype(::c10::impl::ScalarTypeToCPPType<::c10::ScalarType::BFloat16>::t)").pointerTypes("BFloat16")) - .put(new Info("c10::DataPtr", "at::DataPtr").valueTypes("@Cast({\"\", \"c10::DataPtr&&\"}) @StdMove DataPtr").pointerTypes("DataPtr")) - .put(new Info("c10::Storage", "at::Storage").valueTypes("@Cast({\"\", \"c10::Storage&&\"}) @StdMove Storage").pointerTypes("Storage")) - .put(new Info("std::shared_ptr").annotations("@SharedPtr").pointerTypes("ClassType")) - .put(new Info("std::shared_ptr", "c10::EnumTypePtr").annotations("@SharedPtr").pointerTypes("EnumType")) - .put(new Info("std::shared_ptr").annotations("@SharedPtr").pointerTypes("NamedType")) - .put(new Info("std::shared_ptr").annotations("@SharedPtr") - .valueTypes("@Cast({\"\", \"\", \"std::shared_ptr&&\"}) NamedType") - .pointerTypes("NamedType")) - .put(new Info("std::shared_ptr").annotations("@SharedPtr").pointerTypes("Type")) - .put(new Info("std::shared_ptr", "c10::TensorTypePtr", "at::TensorTypePtr").annotations("@SharedPtr").pointerTypes("TensorType")) - .put(new Info("std::unique_ptr").annotations("@UniquePtr") - .valueTypes("@Cast({\"\", \"std::unique_ptr&&\"}) FunctionSchema") - .pointerTypes("FunctionSchema")) - .put(new Info("c10::IdWrapper", "at::IdWrapper").pointerTypes("TypeIdentifierIdWrapper")) - .put(new Info("c10::MaybeOwned").valueTypes("@Cast({\"\", \"c10::MaybeOwned&&\"}) @StdMove TensorMaybeOwned").pointerTypes("TensorMaybeOwned")) - .put(new Info("c10::SmallVectorTemplateCommon").pointerTypes("Pointer")) - .put(new Info("c10::SmallVectorTemplateBase").pointerTypes("SmallVectorBase")) - .put(new Info("c10::SmallVectorImpl").pointerTypes("DimVectorImpl")) - .put(new Info("c10::SmallVectorImpl::size_type", "c10::SmallVectorImpl::ValueParamT").valueTypes("long")) - .put(new Info("c10::SmallVectorImpl::iterator", "c10::SmallVectorImpl::const_iterator").cast().pointerTypes("LongPointer")) - .put(new Info("c10::SmallVector", "at::DimVector").pointerTypes("DimVector")) - .put(new Info("c10::SmallVector(c10::SmallVectorImpl&&)", - "c10::SmallVector::operator =(c10::SmallVectorImpl&&)").skip()) - .put(new Info("c10::SmallVectorTemplateCommon").pointerTypes("Pointer")) - .put(new Info("c10::SmallVectorTemplateBase").pointerTypes("SymSmallVectorBase")) - .put(new Info("c10::SmallVectorImpl").pointerTypes("SymDimVectorImpl")) - .put(new Info("c10::SmallVectorImpl::size_type", "c10::SmallVectorImpl::ValueParamT").valueTypes("long")) - .put(new Info("c10::SmallVectorImpl::iterator", "c10::SmallVectorImpl::const_iterator").cast().pointerTypes("SymInt")) - .put(new Info("c10::SmallVector", "at::SymDimVector").pointerTypes("SymDimVector")) - .put(new Info("c10::SmallVector(c10::SmallVectorImpl&&)", - "c10::SmallVector::operator =(c10::SmallVectorImpl&&)").skip()) - .put(new Info("c10::SymIntArrayRef::iterator", "c10::SymIntArrayRef::const_iterator").cast().pointerTypes("SymInt")) - .put(new Info("c10::EnumerationType").pointerTypes("LayoutEnumerationType")) - .put(new Info("c10::EnumerationType").pointerTypes("ScalarTypeEnumerationType")) - .put(new Info("c10::EnumerationType").pointerTypes("MemoryFormattEnumerationType")) - .put(new Info("c10::SingleElementType").pointerTypes("AwaitSingleElementType")) - .put(new Info("c10::SingleElementType").pointerTypes("ListSingleElementType")) - .put(new Info("c10::SingleElementType").pointerTypes("RRefSingleElementType")) - .put(new Info("c10::SingleElementType").pointerTypes("FutureSingleElementType")) - .put(new Info("c10::SingleElementType").pointerTypes("OptionalSingleElementType")) - .put(new Info("at::InferExpandGeometryResult").pointerTypes("DimVectorInferExpandGeometryResult")) - .put(new Info("at::namedinference::TensorName").valueTypes("@Cast({\"\", \"at::namedinference::TensorName&&\"}) @StdMove TensorName").pointerTypes("TensorName")) - .put(new Info("std::shared_ptr").annotations("@SharedPtr").valueTypes("FunctionPreHook").pointerTypes("FunctionPreHook")) - .put(new Info("std::unique_ptr").annotations("@UniquePtr") - .valueTypes("@Cast({\"\", \"std::unique_ptr&&\"}) FunctionPreHook") - .pointerTypes("FunctionPreHook")) - .put(new Info("std::unique_ptr").annotations("@UniquePtr") - .valueTypes("@Cast({\"\", \"std::unique_ptr&&\"}) FunctionPostHook") - .pointerTypes("FunctionPostHook")) - .put(new Info("const std::unordered_map >", - "std::unordered_map >").pointerTypes("IntFunctionPreHookMap").define()) -// .put(new Info("torch::jit::ScalarAttributeValue,torch::jit::AttributeKind::c>").pointerTypes("ComplexAttr")) -// .put(new Info("torch::jit::VectorAttributeValue,torch::jit::AttributeKind::cs>").pointerTypes("ComplexValsAttr")) - .put(new Info("torch::jit::ComplexAttr::ConstructorType", "torch::jit::ComplexAttr::ValueType").cast().pointerTypes("DoublePointer")) - .put(new Info("torch::jit::ComplexValsAttr::ConstructorType", "torch::jit::ComplexValsAttr::ValueType").cast().pointerTypes("Pointer")) -// .put(new Info("torch::jit::ScalarAttributeValue").pointerTypes("FloatAttr")) -// .put(new Info("torch::jit::VectorAttributeValue").pointerTypes("FloatsAttr")) - .put(new Info("torch::jit::FloatAttr::ConstructorType", "torch::jit::FloatAttr::ValueType").cast().valueTypes("double").pointerTypes("DoublePointer")) - .put(new Info("torch::jit::FloatsAttr::ConstructorType", "torch::jit::FloatsAttr::ValueType").cast().pointerTypes("DoubleVector")) -// .put(new Info("torch::jit::ScalarAttributeValue").pointerTypes("IntAttr")) -// .put(new Info("torch::jit::VectorAttributeValue").pointerTypes("IntsAttr")) - .put(new Info("torch::jit::IntAttr::ConstructorType", "torch::jit::IntAttr::ValueType").cast().valueTypes("long").pointerTypes("LongPointer")) - .put(new Info("torch::jit::IntsAttr::ConstructorType", "torch::jit::IntsAttr::ValueType").cast().pointerTypes("LongVector")) -// .put(new Info("torch::jit::ScalarAttributeValue").pointerTypes("StringAttr")) -// .put(new Info("torch::jit::VectorAttributeValue").pointerTypes("StringsAttr")) - .put(new Info("torch::jit::StringAttr::ConstructorType", "torch::jit::StringAttr::ValueType").annotations("@StdString").pointerTypes("BytePointer")) - .put(new Info("torch::jit::StringsAttr::ConstructorType", "torch::jit::StringsAttr::ValueType").cast().pointerTypes("StringVector")) -// .put(new Info("torch::jit::ScalarAttributeValue").pointerTypes("TensorAttr")) -// .put(new Info("torch::jit::VectorAttributeValue").pointerTypes("TensorsAttr")) - .put(new Info("torch::jit::TensorAttr::ConstructorType", "torch::jit::TensorAttr::ValueType").cast().pointerTypes("Tensor")) - .put(new Info("torch::jit::TensorsAttr::ConstructorType", "torch::jit::TensorsAttr::ValueType").cast().pointerTypes("TensorVector")) -// .put(new Info("torch::jit::ScalarAttributeValue").pointerTypes("TypeAttr")) -// .put(new Info("torch::jit::VectorAttributeValue").pointerTypes("TypesAttr")) - .put(new Info("torch::jit::TypeAttr::ConstructorType", "torch::jit::TypeAttr::ValueType").cast().pointerTypes("Type.TypePtr")) - .put(new Info("torch::jit::TypesAttr::ConstructorType", "torch::jit::TypesAttr::ValueType").cast().pointerTypes("TypeVector")) -// .put(new Info("torch::jit::ScalarAttributeValue").pointerTypes("IValueAttr")) - .put(new Info("torch::jit::IValueAttr::ConstructorType", "torch::jit::IValueAttr::ValueType").cast().pointerTypes("IValue")) - .put(new Info("std::shared_ptr").annotations("@SharedPtr").pointerTypes("Graph")) - .put(new Info("std::shared_ptr").annotations("@SharedPtr").pointerTypes("Operator")) - .put(new Info("std::shared_ptr", "torch::jit::ResolverPtr").annotations("@SharedPtr").pointerTypes("Resolver")) - .put(new Info("std::shared_ptr", "torch::jit::SugaredValuePtr").annotations("@SharedPtr").pointerTypes("SugaredValue")) - .put(new Info("std::shared_ptr").annotations("@SharedPtr") - .valueTypes("@Cast(\"const torch::jit::CompilationUnit*\") CompilationUnit") - .pointerTypes("CompilationUnit")) - .put(new Info("std::unique_ptr", "Ptr").annotations("@UniquePtr").pointerTypes("AttributeValue")) - .put(new Info("std::unique_ptr", "TokenTriePtr").annotations("@UniquePtr").pointerTypes("TokenTrie")) - .put(new Info("torch::jit::TokenTrie").immutable()) - .put(new Info("torch::cuda::device_count").javaNames("cuda_device_count")) - .put(new Info("torch::cuda::is_available").javaNames("cuda_is_available")) - .put(new Info("torch::cuda::manual_seed").javaNames("cuda_manual_seed")) - .put(new Info("torch::cuda::manual_seed_all").javaNames("cuda_manual_seed_all")) - .put(new Info("torch::cuda::synchronize").javaNames("cuda_synchronize")) - .put(new Info("torch::jit::Const").pointerTypes("ConstExpr")) - .put(new Info("torch::jit::Node").pointerTypes("JitNode")) - .put(new Info("torch::jit::Module").pointerTypes("JitModule")) - .put(new Info("torch::jit::Object").pointerTypes("JitObject")) - .put(new Info("torch::jit::String").pointerTypes("JitString")) - .put(new Info("torch::jit::generic_graph_node_list").pointerTypes("graph_node_list")) - .put(new Info("torch::jit::generic_graph_node_list_iterator").pointerTypes("graph_node_list_iterator")) - - .put(new Info("torch::jit::slot_list_impl", "torch::jit::module_list").pointerTypes("module_list")) - .put(new Info("torch::jit::slot_iterator_impl").pointerTypes("module_iterator")) - .put(new Info("torch::jit::slot_iterator_impl::value_type").pointerTypes("JitModule")) - .put(new Info("torch::jit::Named").pointerTypes("NamedJitModule")) - .put(new Info("torch::jit::detail::NamedPolicy").pointerTypes("NamedModulePolicy")) - .put(new Info("torch::jit::slot_list_impl >", "torch::jit::named_module_list").pointerTypes("named_module_list")) - .put(new Info("torch::jit::slot_iterator_impl >").pointerTypes("named_module_iterator")) - .put(new Info("torch::jit::slot_iterator_impl >::value_type").pointerTypes("NamedJitModule")) - - .put(new Info("torch::jit::slot_list_impl", "torch::jit::parameter_list").pointerTypes("parameter_list")) - .put(new Info("torch::jit::slot_iterator_impl").pointerTypes("parameter_iterator")) - .put(new Info("torch::jit::slot_iterator_impl::value_type").pointerTypes("Tensor")) - .put(new Info("torch::jit::Named").pointerTypes("NamedTensor")) - .put(new Info("torch::jit::detail::NamedPolicy").pointerTypes("NamedParameterPolicy")) - .put(new Info("torch::jit::slot_list_impl >", "torch::jit::named_parameter_list").pointerTypes("named_parameter_list")) - .put(new Info("torch::jit::slot_iterator_impl >").pointerTypes("named_parameter_iterator")) - .put(new Info("torch::jit::slot_iterator_impl >::value_type").pointerTypes("NamedTensor")) - - .put(new Info("torch::jit::slot_list_impl", "torch::jit::attribute_list").pointerTypes("attribute_list")) - .put(new Info("torch::jit::slot_iterator_impl").pointerTypes("attribute_iterator")) - .put(new Info("torch::jit::slot_iterator_impl::value_type").pointerTypes("IValue")) - .put(new Info("torch::jit::Named").pointerTypes("NamedIValue")) - .put(new Info("torch::jit::detail::NamedPolicy").pointerTypes("NamedAttributePolicy")) - .put(new Info("torch::jit::slot_list_impl >", "torch::jit::named_attribute_list").pointerTypes("named_attribute_list")) - .put(new Info("torch::jit::slot_iterator_impl >").pointerTypes("named_attribute_iterator")) - .put(new Info("torch::jit::slot_iterator_impl >::value_type").pointerTypes("NamedIValue")) - - .put(new Info("torch::jit::slot_list_impl", "torch::jit::buffer_list").pointerTypes("buffer_list")) - .put(new Info("torch::jit::slot_iterator_impl").pointerTypes("buffer_iterator")) - .put(new Info("torch::jit::slot_iterator_impl::value_type").pointerTypes("Tensor")) - .put(new Info("torch::jit::Named").pointerTypes("NamedTensor")) - .put(new Info("torch::jit::detail::NamedPolicy").pointerTypes("NamedBufferPolicy")) - .put(new Info("torch::jit::slot_list_impl >", "torch::jit::named_buffer_list").pointerTypes("named_buffer_list")) - .put(new Info("torch::jit::slot_iterator_impl >").pointerTypes("named_buffer_iterator")) - .put(new Info("torch::jit::slot_iterator_impl >::value_type").pointerTypes("NamedTensor")) - - .put(new Info("torch::jit::tracer::warn_fn_type", "warn_fn_type").cast().pointerTypes("warn_fn_type")) - .put(new Info("torch::jit::Maybe").pointerTypes("DefMaybe")) - .put(new Info("torch::jit::Maybe").pointerTypes("ExprMaybe")) - .put(new Info("torch::jit::Maybe").pointerTypes("VarMaybe")) - .put(new Info("torch::jit::Compound::map", "torch::jit::Tree::map", "torch::jit::Maybe::map", - "torch::jit::Maybe::map", "torch::jit::Maybe::map").skip()) - .put(new Info("torch::jit::Wrap").pointerTypes("BlockWrap")) - .put(new Info("torch::jit::Wrap").pointerTypes("JitNodeWrap")) - .put(new Info("torch::jit::Wrap").pointerTypes("ValueWrap")); + //// Enumerations + infoMap + .put(new Info("c10::ScalarType", "at::ScalarType", "torch::Dtype").enumerate().valueTypes("ScalarType").pointerTypes("@Cast(\"c10::ScalarType*\") BytePointer")) + .put(new Info("torch::jit::AttributeKind").enumerate().valueTypes("JitAttributeKind")) + .put(new Info("torch::jit::PickleOpCode").enumerate().translate(false).valueTypes("PickleOpCode")) + ; + + //// c10::optional + infoMap + .put(new Info("c10::optional").pointerTypes("BoolOptional").define()) + .put(new Info("c10::optional", "c10::optional").pointerTypes("ByteOptional").define()) + .put(new Info("c10::optional", "c10::optional").pointerTypes("IntOptional").define()) + .put(new Info("c10::optional", "c10::remove_symint >::type").pointerTypes("LongOptional").define()) + .put(new Info("c10::optional").pointerTypes("FloatOptional").define()) + .put(new Info("c10::optional").pointerTypes("DoubleOptional").define()) + .put(new Info("c10::optional").pointerTypes("SizeTOptional").define()) + .put(new Info("c10::optional").pointerTypes("StringOptional").define()) + .put(new Info("c10::optional >").pointerTypes("BoolVectorOptional").define()) + .put(new Info("c10::optional >").pointerTypes("LongVectorOptional").define()) + .put(new Info("c10::optional >").pointerTypes("DoubleVectorOptional").define()) + .put(new Info("c10::optional >").pointerTypes("SizeTVectorOptional").define()) + .put(new Info("c10::optional >").pointerTypes("StringVectorOptional").define()) + .put(new Info("c10::optional >").pointerTypes("StrideVectorOptional").define()) + .put(new Info("c10::optional >").pointerTypes("ShapeSymbolVectorOptional").define()) + .put(new Info("c10::optional >").pointerTypes("TensorVectorOptional").define()) + .put(new Info("c10::optional", "c10::optional", "c10::optional").pointerTypes("DeviceOptional").define()) + .put(new Info("c10::optional >", "c10::optional", "c10::optional", + "c10::OptionalArrayRef", "c10::OptionalIntArrayRef", "at::OptionalIntArrayRef", "c10::remove_symint::type") + // This second pointer type prevents optional.swap to work. I don't know exactly why. Skipping swap for now. + .pointerTypes("LongArrayRefOptional", "@Cast({\"int64_t*\", \"c10::ArrayRef\", \"std::vector&\"}) @StdVector long...").define()) + .put(new Info("c10::optional >::swap").skip()) + .put(new Info("c10::optional >", "c10::optional >", + "c10::OptionalArrayRef").pointerTypes("DoubleArrayRefOptional").define()) + .put(new Info("c10::optional >", "c10::optional >", + "c10::OptionalArrayRef", "c10::OptionalSymIntArrayRef", "at::OptionalSymIntArrayRef", "c10::optional").pointerTypes("SymIntArrayRefOptional").define()) + .put(new Info("c10::optional", "c10::optional").pointerTypes("LayoutOptional").define()) + .put(new Info("c10::optional", "c10::optional").pointerTypes("MemoryFormatOptional").define()) + .put(new Info("c10::optional", "c10::optional").pointerTypes("ScalarOptional").define()) + .put(new Info("c10::optional", "c10::optional", "c10::optional").pointerTypes("ScalarTypeOptional").define()) + .put(new Info("c10::optional").pointerTypes("AliasInfoOptional").define()) + .put(new Info("c10::optional").pointerTypes("IValueOptional").define()) + .put(new Info("c10::optional").pointerTypes("CppSignatureOptional").define()) + .put(new Info("c10::optional").pointerTypes("DispatchKeyOptional").define()) + .put(new Info("c10::optional").pointerTypes("OperatorHandleOptional").define()) + .put(new Info("c10::optional").pointerTypes("OperatorNameOptional").define()) + .put(new Info("c10::optional").pointerTypes("QualifiedNameOptional").define()) + .put(new Info("c10::optional").pointerTypes("StreamOptional").define()) + .put(new Info("c10::optional").pointerTypes("StrideOptional").define()) + .put(new Info("c10::optional").pointerTypes("TypePtrOptional").define()) + .put(new Info("c10::optional").pointerTypes("ClassTypePropertyOptional").define()) + .put(new Info("c10::optional").pointerTypes("AliasTypeSetOptional").define()) + .put(new Info("c10::optional").pointerTypes("FunctionSchemaOptional").define()) + .put(new Info("c10::optional", "c10::optional").pointerTypes("SymDimVectorOptional").define()) + .put(new Info("c10::optional").pointerTypes("SymIntOptional").define()) + .put(new Info("c10::optional").pointerTypes("IValueOptional").define()) + .put(new Info("c10::optional").pointerTypes("DimVectorOptional").define()) + .put(new Info("c10::optional").pointerTypes("DimnameOptional").define()) + .put(new Info("c10::optional").pointerTypes("DimnameListOptional").define()) + .put(new Info("c10::optional").pointerTypes("GeneratorOptional").define()) + .put(new Info("c10::optional", "c10::optional", "c10::optional", "c10::optional", "c10::optional").pointerTypes("TensorOptional").define()) + .put(new Info("c10::optional", "c10::optional").pointerTypes("TensorArrayRefOptional").define()) + .put(new Info("c10::optional").pointerTypes("ThreadLocalStateOptional").define()) + .put(new Info("c10::optional").pointerTypes("TypeMetaOptional").define()) + .put(new Info("c10::optional").pointerTypes("ExecutorExecutionModeOptional").define()) + .put(new Info("c10::optional::operator ->").skip()) // Returns a pointer to ExecutorExecutionMode, which is an enum + .put(new Info("c10::optional", + "c10::optional").cast().pointerTypes("InlinedCallStackOptional").define()) + .put(new Info("c10::optional", + "c10::optional").cast().pointerTypes("ScopeOptional").define()) + .put(new Info("c10::optional").pointerTypes("ModuleInstanceInfoOptional").define()) + .put(new Info("c10::optional").pointerTypes("SourceRangeOptional").define()) + .put(new Info("c10::optional").pointerTypes("MethodOptional").define()) + .put(new Info("c10::optional").pointerTypes("OperatorOptional").define()) + .put(new Info("c10::optional", "c10::optional").pointerTypes("NamedValueOptional").define()) + .put(new Info("c10::optional").pointerTypes("ValueOptional").define()) + .put(new Info("c10::optional >", + "c10::optional >", + "c10::optional >").cast().pointerTypes("LongExpandingArrayOptional").define()) + .put(new Info("c10::optional >", + "c10::optional >", + "c10::optional >", + "c10::optional::ExpandingArrayDouble>", + "c10::optional::ExpandingArrayDouble>", + "c10::optional::ExpandingArrayDouble>").cast().pointerTypes("DoubleExpandingArrayOptional").define()) + .put(new Info("c10::optional >").pointerTypes("T_StringSizeTSizeT_TOptional").define()) + .put(new Info("torch::optional >").pointerTypes("T_TensorTensor_TOptional").define()) + .put(new Info("c10::optional >", "c10::optional >").pointerTypes("T_TypePtrLong_TOptional").cast().define()) + ; + + + //// Singleton + infoMap + .put(new Info("c10::Type::SingletonOrSharedTypePtr", "c10::TypePtr", "c10::Type::TypePtr", "at::TypePtr", + "torch::jit::TypeAttr::ConstructorType", "torch::jit::TypeAttr::ValueType").pointerTypes("Type.TypePtr")) // No way to move it outside Type class + .put(new Info("c10::SingletonTypePtr").pointerTypes("SingletonTypePtr")) + .put(new Info("c10::SingletonTypePtr").pointerTypes("AnyTypePtr")) + .put(new Info("c10::SingletonTypePtr").pointerTypes("AnyEnumTypePtr")) + .put(new Info("c10::SingletonTypePtr").pointerTypes("NumberTypePtr")) + .put(new Info("c10::SingletonTypePtr").pointerTypes("FloatTypePtr")) + .put(new Info("c10::SingletonTypePtr").pointerTypes("ComplexTypePtr")) + .put(new Info("c10::SingletonTypePtr").pointerTypes("IntTypePtr")) + .put(new Info("c10::SingletonTypePtr").pointerTypes("BoolTypePtr")) + .put(new Info("c10::SingletonTypePtr").pointerTypes("StringTypePtr")) + .put(new Info("c10::SingletonTypePtr").pointerTypes("StorageTypePtr")) + .put(new Info("c10::SingletonTypePtr").pointerTypes("NoneTypePtr")) + .put(new Info("c10::SingletonTypePtr").pointerTypes("GeneratorTypePtr")) + .put(new Info("c10::SingletonTypePtr").pointerTypes("QuantizerTypePtr")) + .put(new Info("c10::SingletonTypePtr").pointerTypes("QSchemeTypePtr")) + .put(new Info("c10::SingletonTypePtr").pointerTypes("DeviceObjTypePtr")) + .put(new Info("c10::SingletonTypePtr").pointerTypes("StreamObjTypePtr")) + .put(new Info("c10::SingletonTypePtr").pointerTypes("CapsuleTypePtr")) + .put(new Info("c10::SingletonTypePtr").pointerTypes("PyObjectTypePtr")) + .put(new Info("c10::SingletonTypePtr").pointerTypes("LayoutTypePtr")) + .put(new Info("c10::SingletonTypePtr").pointerTypes("ScalarTypeTypePtr")) + .put(new Info("c10::SingletonTypePtr").pointerTypes("AnyListTypePtr")) + .put(new Info("c10::SingletonTypePtr").pointerTypes("AnyTupleTypePtr")) + .put(new Info("c10::SingletonTypePtr").pointerTypes("AnyClassTypePtr")) + ; + + + //// c10::variant + infoMap + .put(new Info("c10::variant", + "torch::nn::init::NonlinearityType").pointerTypes("Nonlinearity").define()) + .put(new Info("c10::variant", + "torch::nn::init::FanModeType").pointerTypes("FanModeType").define()) + + .put(new Info("c10::variant", + "torch::nn::ConvOptions<1>::padding_mode_t", + "torch::nn::ConvOptions<2>::padding_mode_t", + "torch::nn::ConvOptions<3>::padding_mode_t", + "torch::nn::ConvTransposeOptions<1>::padding_mode_t", + "torch::nn::ConvTransposeOptions<2>::padding_mode_t", + "torch::nn::ConvTransposeOptions<3>::padding_mode_t", + "torch::nn::detail::conv_padding_mode_t").pointerTypes("ConvPaddingMode").define()) + .put(new Info("c10::variant,torch::enumtype::kValid,torch::enumtype::kSame>", + "torch::nn::ConvOptions<1>::padding_t", + "torch::nn::detail::ConvNdOptions<1>::padding_t", + "torch::nn::functional::ConvFuncOptions<1>::padding_t", + "torch::nn::functional::Conv1dFuncOptions::padding_t").purify().pointerTypes("Conv1dPadding").define()) + .put(new Info("c10::variant,torch::enumtype::kValid,torch::enumtype::kSame>", + "torch::nn::ConvOptions<2>::padding_t", + "torch::nn::detail::ConvNdOptions<2>::padding_t", + "torch::nn::functional::ConvFuncOptions<2>::padding_t", + "torch::nn::functional::Conv2dFuncOptions::padding_t").purify().pointerTypes("Conv2dPadding").define()) + .put(new Info("c10::variant,torch::enumtype::kValid,torch::enumtype::kSame>", + "torch::nn::ConvOptions<3>::padding_t", + "torch::nn::detail::ConvNdOptions<3>::padding_t", + "torch::nn::functional::ConvFuncOptions<3>::padding_t", + "torch::nn::functional::Conv3dFuncOptions::padding_t").purify().pointerTypes("Conv3dPadding").define()) + + .put(new Info("c10::variant", + "torch::nn::EmbeddingBagMode").pointerTypes("EmbeddingBagMode").define()) + .put(new Info("c10::variant", + "torch::nn::functional::PadFuncOptions::mode_t").pointerTypes("PaddingMode").define()) + + .put(new Info("c10::variant", + "torch::nn::L1LossOptions::reduction_t", "torch::nn::functional::L1LossFuncOptions::reduction_t", + "torch::nn::MSELossOptions::reduction_t", "torch::nn::functional::MSELossFuncOptions::reduction_t", + "torch::nn::BCELossOptions::reduction_t", "torch::nn::functional::BinaryCrossEntropyFuncOptions::reduction_t", + "torch::nn::HingeEmbeddingLossOptions::reduction_t", "torch::nn::functional::HingeEmbeddingLossFuncOptions::reduction_t", + "torch::nn::MultiMarginLossOptions::reduction_t", "torch::nn::functional::MultiMarginLossFuncOptions::reduction_t", + "torch::nn::CosineEmbeddingLossOptions::reduction_t", "torch::nn::functional::CosineEmbeddingLossFuncOptions::reduction_t", + "torch::nn::MultiLabelMarginLossOptions::reduction_t", "torch::nn::functional::MultilabelMarginLossFuncOptions::reduction_t", + "torch::nn::SoftMarginLossOptions::reduction_t", "torch::nn::functional::SoftMarginLossFuncOptions::reduction_t", + "torch::nn::MultiLabelSoftMarginLossOptions::reduction_t", "torch::nn::functional::MultilabelSoftMarginLossFuncOptions::reduction_t", + "torch::nn::TripletMarginLossOptions::reduction_t", "torch::nn::functional::TripletMarginLossFuncOptions::reduction_t", + "torch::nn::TripletMarginWithDistanceLossOptions::reduction_t", "torch::nn::functional::TripletMarginWithDistanceLossFuncOptions::reduction_t", + "torch::nn::CTCLossOptions::reduction_t", "torch::nn::functional::CTCLossFuncOptions::reduction_t", + "torch::nn::SmoothL1LossOptions::reduction_t", "torch::nn::functional::SmoothL1LossFuncOptions::reduction_t", + "torch::nn::HuberLossOptions::reduction_t", "torch::nn::functional::HuberLossFuncOptions::reduction_t", + "torch::nn::PoissonNLLLossOptions::reduction_t", "torch::nn::functional::PoissonNLLLossFuncOptions::reduction_t", + "torch::nn::MarginRankingLossOptions::reduction_t", "torch::nn::functional::MarginRankingLossFuncOptions::reduction_t", + "torch::nn::NLLLossOptions::reduction_t", "torch::nn::functional::NLLLossFuncOptions::reduction_t", + "torch::nn::CrossEntropyLossOptions::reduction_t", "torch::nn::functional::CrossEntropyFuncOptions::reduction_t", + "torch::nn::BCEWithLogitsLossOptions::reduction_t", "torch::nn::functional::BinaryCrossEntropyWithLogitsFuncOptions::reduction_t").pointerTypes("LossReduction").define()) + .put(new Info("c10::variant", + "torch::nn::KLDivLossOptions::reduction_t", "torch::nn::functional::KLDivFuncOptions::reduction_t").pointerTypes("KLDivLossReduction").define()) + + .put(new Info("c10::variant", + "torch::nn::functional::GridSampleFuncOptions::mode_t").pointerTypes("GridSampleMode").define()) + .put(new Info("c10::variant", + "torch::nn::functional::GridSampleFuncOptions::padding_mode_t").pointerTypes("GridSamplePaddingMode").define()) + + .put(new Info("c10::variant", + "torch::nn::detail::RNNOptionsBase::rnn_options_base_mode_t").pointerTypes("RNNBaseMode").define()) + .put(new Info("c10::variant", + "torch::nn::RNNOptions::nonlinearity_t", "torch::nn::RNNCellOptions::nonlinearity_t").pointerTypes("RNNNonlinearity").define()) + + .put(new Info("c10::variant", + "torch::nn::UpsampleOptions::mode_t").pointerTypes("UpsampleMode").define()) + .put(new Info("c10::variant", + "torch::nn::functional::InterpolateFuncOptions::mode_t").pointerTypes("InterpolateMode").define()) + + .put(new Info("c10::variant >", + "torch::nn::activation_t", + "torch::nn::TransformerOptions::activation_t").pointerTypes("TransformerActivation")) // Defined explicitly + ; + + /* + * array of consecutive elements variants: + * std::array + * fixed-size + * mapped to raw pointers, with cast() + * std::vector + * variable-size array, re-allocatable + * c10::ArrayRef, defined in c10/util/ArrayRef.h + * not owning ref + * iterator is const T* => mapped to T pointer + * reverse_iterator is std::reverse_iterator => skipped + * c10::List, defined in ATen/core/List.h + * wrapper around std::vector + * (using c10::ListImpl::list_type = std::vector) + * SmallVector, defined in c10/util/SmallVector.h + * variable-size array, optimized for the case when the array is small, avoiding heap allocation + * iterator is T* or const T* => mapped to T pointer + * reverse_iterator is std::reverse_iterator => skipped + */ + + //// std::array + infoMap + .put(new Info("std::array", "std::array", "std::array").cast().pointerTypes("BoolPointer")) + .put(new Info("std::array").cast().pointerTypes("ArgumentDef")) + .put(new Info("std::array").pointerTypes("PointerPointer")) + .put(new Info("std::array").cast().pointerTypes("FunctionalityOffsetAndMask")) + .put(new Info("std::array").pointerTypes("IntPointer").cast()) + ; + + + //// std::vector + infoMap + .put(new Info("std::vector >").pointerTypes("Bool2Vector").define()) + .put(new Info("std::vector").pointerTypes("BoolVector").define()) + .put(new Info("std::vector").pointerTypes("BytePointerVector").define()) + .put(new Info("std::vector", "std::tuple,std::vector >").cast().pointerTypes("LongVector").define()) + .put(new Info("std::vector").cast().pointerTypes("DoubleVector").define()) + .put(new Info("std::vector").cast().pointerTypes("SizeTVector").define()) + .put(new Info("std::vector").pointerTypes("StringVector").define()) + .put(new Info("std::vector >").pointerTypes("StringLongVector").define()) + .put(new Info("const std::vector >", + "std::vector >").pointerTypes("RecordFunctionCallbackHandleVector").define()) + .put(new Info("std::vector", "torch::jit::Stack").pointerTypes("IValueVector").define()) + .put(new Info("std::vector::const_iterator", "torch::jit::Stack::const_iterator").pointerTypes("IValueVector.Iterator")) + .put(new Info("std::vector", "std::vector").pointerTypes("QEngineVector").define()) + .put(new Info("std::vector").pointerTypes("ScalarTypeVector").define()) + .put(new Info("std::vector").pointerTypes("SymbolVector").define()) + .put(new Info("std::vector >").pointerTypes("LongOptionalVector").define()) + .put(new Info("std::vector >").pointerTypes("IValueOptionalVector").define()) + .put(new Info("std::vector >", "std::vector").pointerTypes("SharedClassTypeVector").define()) + .put(new Info("std::vector >", "std::vector", + "std::vector", "c10::AliasTypeSet").pointerTypes("TypeVector").define()) + .put(new Info("const std::vector", "std::vector").valueTypes("@StdMove DimnameVector").pointerTypes("DimnameVector").define()) + .put(new Info("std::vector").pointerTypes("StrideVector").define()) + .put(new Info("std::vector").pointerTypes("ShapeSymbolVector").define()) + .put(new Info("std::vector").pointerTypes("TensorImplVector").define()) + .put(new Info("std::vector", "torch::autograd::edge_list") // Used in Node constructor + .valueTypes("@Cast({\"\", \"std::vector\"}) @StdMove EdgeVector").pointerTypes("EdgeVector").define()) + .put(new Info("std::vector", "std::vector", "std::vector", "torch::autograd::variable_list") + .valueTypes("@Cast({\"\", \"std::vector\"}) @StdMove TensorVector").pointerTypes("TensorVector").define()) + .put(new Info("std::vector", "std::vector").pointerTypes("TensorIndexVector").define()) + .put(new Info("std::vector >").pointerTypes("TensorOptionalVector").define()) + .put(new Info("std::vector >").pointerTypes("OperatorOptionalVector").define()) + .put(new Info("std::vector >").pointerTypes("SharedFunctionPreVector").define()) + .put(new Info("const std::vector >", + "std::vector >").pointerTypes("FunctionPreHookVector").define()) + .put(new Info("const std::vector >", + "std::vector >").pointerTypes("FunctionPostHookVector").define()) + .put(new Info("const std::vector", "std::vector").pointerTypes("SavedVariableVector").define()) + .put(new Info("const std::vector", "std::vector").pointerTypes("DefVector").define()) + .put(new Info("const std::vector", "std::vector").pointerTypes("PropertyVector").define()) + .put(new Info("const std::vector", "std::vector").pointerTypes("InstructionVector").define()) + .put(new Info("const std::vector", "std::vector").pointerTypes("CompilationUnitVector").define()) + .put(new Info("const std::vector", "std::vector").pointerTypes("OptimizerParamGroupVector").define()) + .put(new Info("std::vector").pointerTypes("FunctionVector").define()) + .put(new Info("std::vector >").pointerTypes("GraphVector").define()) + .put(new Info("std::vector >").pointerTypes("OperatorVector").define()) + .put(new Info("std::vector >", "std::vector").pointerTypes("ResolverVector").define()) + .put(new Info("std::vector").pointerTypes("StackEntryVector").define()) + .put(new Info("std::vector", "std::vector").pointerTypes("ValueVector").define()) // Returned by inlineCallTo + .put(new Info("std::vector").pointerTypes("JitNodeVector").define()) + .put(new Info("std::vector").pointerTypes("ModuleVector").define()) + .put(new Info("std::vector::iterator").pointerTypes("ModuleVector.Iterator")) + .put(new Info("std::vector").pointerTypes("AnyModuleVector").define()) + .put(new Info("std::vector::iterator").pointerTypes("AnyModuleVector.Iterator")) + .put(new Info("std::vector >").pointerTypes("SharedModuleVector").define()) + .put(new Info("std::vector >::iterator").pointerTypes("SharedModuleVector.Iterator")) + .put(new Info("std::vector >").pointerTypes("SharedAnyModuleVector").define()) + .put(new Info("std::vector >::iterator").pointerTypes("SharedAnyModuleVector.Iterator")) + .put(new Info("std::vector >").pointerTypes("StringTensorVector").define()) + .put(new Info("std::vector >").pointerTypes("StringModuleVector").define()) + .put(new Info("std::vector >").pointerTypes("StringAnyModuleVector").define()) + .put(new Info("std::vector > >").pointerTypes("StringSharedModuleVector").define()) + .put(new Info("std::vector >", "torch::jit::FusionStrategy").pointerTypes("FusionStrategy").define()) + .put(new Info("std::vector").pointerTypes("SymIntVector").define()) + .put(new Info("std::vector >").pointerTypes("SharedSugaredValueVector").define()) + .put(new Info("const std::vector").pointerTypes("FunctionSchemaVector").define()) + ; + + + //// c10::ArrayRef + for (ArrayInfo t : new ArrayInfo[]{ + new ArrayInfo("Argument").elementTypes("c10::Argument"), + new ArrayInfo("ArgumentDef").elementTypes("c10::detail::infer_schema::ArgumentDef"), + new ArrayInfo("BFloat16") /*.itPointerType("ShortPointer") */.elementTypes("decltype(::c10::impl::ScalarTypeToCPPType<::c10::ScalarType::BFloat16>::t)"), + new ArrayInfo("Block").elementTypes("torch::jit::Block*").itPointerType("PointerPointer"), + new ArrayInfo("Bool").itPointerType("BoolPointer").elementTypes("bool", "decltype(::c10::impl::ScalarTypeToCPPType<::c10::ScalarType::Bool>::t)").elementValueType("boolean"), + new ArrayInfo("Byte").itPointerType("BytePointer").elementTypes("jbyte", "int8_t", "uint8_t").elementValueType("byte"), + new ArrayInfo("Dimname").otherCppNames("at::DimnameList").elementTypes("at::Dimname"), + new ArrayInfo("Double").itPointerType("DoublePointer").elementTypes("double"), + new ArrayInfo("DoubleComplex") /*.itPointertype("DoublePointer") */.elementTypes("c10::complex"), + new ArrayInfo("EnumNameValue").elementTypes("c10::EnumNameValue"), + new ArrayInfo("Float").itPointerType("FloatPointer").elementTypes("float").elementValueType("float"), + new ArrayInfo("FloatComplex") /*.itPointerType("FloatPointer") */.elementTypes("c10::complex"), + new ArrayInfo("FuturePtr").elementTypes("c10::intrusive_ptr"), + new ArrayInfo("Half") /*.itPointerType("ShortPointer") */.elementTypes("decltype(::c10::impl::ScalarTypeToCPPType<::c10::ScalarType::Half>::t)"), + new ArrayInfo("IValue").elementTypes("c10::IValue", "const at::IValue"), + new ArrayInfo("Int") + .itPointerType("IntPointer") + .elementTypes("jint", "int", "int32_t", "uint32_t") + .elementValueType("int"), + new ArrayInfo("Tag").itPointerType("BytePointer").elementTypes("at::Tag"), + new ArrayInfo("Long") // Warning : c10::IntArrayRef is a Java LongArrayRef and not a Java IntArrayRef + .otherCppNames("c10::IntArrayRef", "torch::IntArrayRef", "at::IntArrayRef", "c10::OptionalArray", "c10::remove_symint::type") + .itPointerType("LongPointer") + .otherPointerTypes("@Cast({\"int64_t*\", \"c10::ArrayRef\", \"std::vector&\"}) @StdVector long...") + .elementTypes("int64_t", "jlong") // Order is important, since ArrayRef and ArrayRef are incompatible, even though long == long long. And jlong is long long. + .elementValueType("long"), + new ArrayInfo("LongOptional").elementTypes("c10::optional"), + new ArrayInfo("LongVector").elementTypes("std::vector"), + new ArrayInfo("NamedValue").elementTypes("torch::jit::NamedValue"), + new ArrayInfo("SavedVariable").elementTypes("torch::autograd::SavedVariable"), + new ArrayInfo("Scalar").elementTypes("at::Scalar"), + new ArrayInfo("ScalarType").itPointerType("@Cast(\"c10::ScalarType*\") BytePointer").elementTypes("c10::ScalarType", "at::ScalarType"), + new ArrayInfo("Short").itPointerType("ShortPointer").elementTypes("jshort", "int16_t", "uint16_t").elementValueType("short"), + new ArrayInfo("SizeT").itPointerType("SizeTPointer").elementTypes("size_t").elementValueType("long"), + new ArrayInfo("Stride").elementTypes("c10::Stride"), + new ArrayInfo("String").itPointerType("PointerPointer" /*"@Cast({\"\", \"std::string*\"}) @StdString BytePointer"*/).elementTypes("std::string"), + new ArrayInfo("SymInt").otherCppNames("c10::SymIntArrayRef").elementTypes("c10::SymInt"), + new ArrayInfo("SymNode").elementTypes("c10::SymNode", "c10::intrusive_ptr"), + new ArrayInfo("Symbol").elementTypes("c10::Symbol"), + new ArrayInfo("Tensor").otherCppNames("torch::TensorList", "at::ITensorListRef").elementTypes("torch::Tensor", "at::Tensor"), // Warning: not a TensorList (List) + new ArrayInfo("TensorArg").elementTypes("torch::TensorArg", "at::TensorArg"), + new ArrayInfo("TensorIndex").elementTypes("at::indexing::TensorIndex"), + new ArrayInfo("TensorOptional").elementTypes("c10::optional", "c10::optional", "c10::optional"), + new ArrayInfo("Type").itPointerType("Type.TypePtr").elementTypes("c10::TypePtr", "c10::Type::TypePtr"), + new ArrayInfo("Value").elementTypes("torch::jit::Value*") + + }) { + t.mapArrayRef(infoMap); + } + + // Special case for StringArrayRef: prevent using String or BytePointer and @StdString + // when arrays or std::string are expected. + // Any cleaner way to do this ? + infoMap.put(new Info("c10::ArrayRef::begin()").javaText( + "public native @Const PointerPointer begin();" + )).put(new Info("c10::ArrayRef::end()").javaText( + "public native @Const PointerPointer end();" + )).put(new Info("c10::ArrayRef::cbegin()").javaText( + "public native @Const PointerPointer cbegin();" + )).put(new Info("c10::ArrayRef::cend()").javaText( + "public native @Const PointerPointer cend();" + )).put(new Info("c10::ArrayRef::data()").javaText( + "public native @Const PointerPointer data();" + )).put(new Info("c10::ArrayRef(const std::string*, size_t)").javaText( + "public StringArrayRef(PointerPointer data, long length) { super((Pointer)null); allocate(data, length); }\n" + + "private native void allocate(@Cast(\"const std::string*\") PointerPointer data, @Cast(\"size_t\") long length);" + )).put(new Info("c10::ArrayRef(const std::string*, const std::string*)").javaText( + "public StringArrayRef(PointerPointer begin, PointerPointer end) { super((Pointer)null); allocate(begin, end); }\n" + + "private native void allocate(@Cast(\"const std::string*\") PointerPointer begin, @Cast(\"const std::string*\") PointerPointer end);" + )); + + // Special case for TagArrayRef: Tag is an enum and not a Pointer. arrays returned as IntPointer. + infoMap.put(new Info("c10::ArrayRef::begin()").javaText( + "public native @Const IntPointer begin();" + )).put(new Info("c10::ArrayRef::end()").javaText( + "public native @Const IntPointer end();" + )).put(new Info("c10::ArrayRef::cbegin()").javaText( + "public native @Const IntPointer cbegin();" + )).put(new Info("c10::ArrayRef::cend()").javaText( + "public native @Const IntPointer cend();" + )).put(new Info("c10::ArrayRef::data()").javaText( + "public native @Const IntPointer data();" + )).put(new Info("c10::ArrayRef(const at::Tag*, size_t)").javaText( + "public TagArrayRef(IntPointer data, long length) { super((Pointer)null); allocate(data, length); }\n" + + "private native void allocate(@Cast(\"const at::Tag*\") IntPointer data, @Cast(\"size_t\") long length);" + )).put(new Info("c10::ArrayRef(const at::Tag*, const at::Tag*)").javaText( + "public TagArrayRef(IntPointer begin, IntPointer end) { super((Pointer)null); allocate(begin, end); }\n" + + "private native void allocate(@Cast(\"const at::Tag*\") IntPointer begin, @Cast(\"const at::Tag*\") IntPointer end);" + )).put(new Info("c10::ArrayRef::vec()").skip() // Is there any way to make this work ? + ); + + + //// c10::List + for (ArrayInfo ai : new ArrayInfo[]{ + new ArrayInfo("DoubleComplex").elementTypes("c10::complex"), + new ArrayInfo("Boolean").elementTypes("bool").elementValueType("boolean"), + new ArrayInfo("Long").elementTypes("int64_t").elementValueType("long"), + new ArrayInfo("Double").elementTypes("double").elementValueType("double"), + new ArrayInfo("TensorOptional").elementTypes("c10::optional"), + new ArrayInfo("Tensor").elementTypes("at::Tensor"), + new ArrayInfo("FuturePtr").elementTypes("c10::intrusive_ptr"), + new ArrayInfo("Generic").elementTypes("c10::IValue").itPointerType("IValue").elementValueType("@ByVal IValue"), + }) { + ai.mapList(infoMap); + } + // swap is a friend templated function. Parser fails to perform template substitution in this case. + infoMap.put(new Info("c10::impl::ListElementReference::swap").skip()); + // friendly global setting lost + infoMap.put(new Info("impl::ptr_to_first_element(const c10::List&)").javaNames("ptr_to_first_element").annotations("@Name(\"c10::impl::ptr_to_first_element\")").friendly()); + + + //// Small Vectors + /* Warning: two classes "Node": + * torch::autograd::Node, defined in autograd/function.h, referenced in Doxygen, TORCH_API + * torch::lazy::Node, defined in torch/csrc/lazy/core/ir.h, TORCH_API, not mapped + */ + infoMap.put(new Info("torch::autograd::Node").pointerTypes("Node").purify()); // Since Node is defined after SmallVector.h + infoMap.put(new Info("c10::SymInt").pointerTypes("SymInt")); // Since SymInt is defined after SmallVector.h + for (String[] t : new String[][]{ + {"SymInt", "SymInt", "@ByVal SymInt", "c10::SymInt", "at::kDimVectorStaticSize", "at::SymDimVector", "SymDimVector"}, + {"Long", "LongPointer", "long", "int64_t", "at::kDimVectorStaticSize", "at::DimVector", "DimVector"}, + {"Node", "Node", "@ByPtr Node", "torch::autograd::Node*", "4", null, "SmallNodeVector"}, + {"TreeRef", "TreeRef", "@ByVal TreeRef", "c10::intrusive_ptr", "4", null, "TreeList"} + + }) { + // Assume all have SmallVectorSizeType == uint32_t + infoMap + .put(new Info(template("c10::SmallVectorBase", template("c10::SmallVectorSizeType", t[3]))).pointerTypes("IntSizedSmallVectorBase")) + .put(new Info(template("c10::SmallVectorTemplateCommon", t[3])).pointerTypes(t[0] + "SmallVectorCommon")) + .put(new Info(template("c10::SmallVectorTemplateCommon", t[3]) + "::size_type", + template("c10::SmallVectorImpl", t[3]) + "::size_type").valueTypes("long")) + .put(new Info(template("c10::SmallVectorTemplateBase", t[3])).pointerTypes(t[0] + "SmallVectorBase")) + .put(new Info(template("c10::SmallVectorImpl", t[3])).pointerTypes(t[0] + "SmallVectorImpl")) + .put(new Info(template("c10::SmallVectorImpl", t[3]) + "::iterator", + template("c10::SmallVectorImpl", t[3]) + "::const_iterator", + template("c10::SmallVectorTemplateCommon", t[3]) + "::iterator", + template("c10::SmallVectorTemplateCommon", t[3]) + "::pointer" + ) + .cast().pointerTypes(t[1])) + .put(new Info( + template("c10::SmallVector", t[3], t[4]) + "(" + template("c10::SmallVectorImpl", t[3]) + "&&)", + template("c10::SmallVector", t[3], t[4]) + "::operator =(" + template("c10::SmallVectorImpl", t[3]) + "&&)") + .skip()) + .put(new Info( + template("c10::SmallVectorTemplateCommon", t[3]) + "::reference", + template("c10::SmallVectorTemplateCommon", t[3]) + "::const_reference") + .pointerTypes(t[1]).valueTypes(t[2])) + .put(new Info( + template("c10::SmallVectorTemplateCommon", t[3]) + "::reverse_iterator", + template("c10::SmallVectorTemplateCommon", t[3]) + "::const_reverse_iterator") + .skip()) + .put(new Info(template("c10::SmallVectorImpl", t[3]) + "::ValueParamT") + .valueTypes(t[2])) + ; + if (t[5] == null) { + infoMap.put(new Info(template("c10::SmallVector", t[3], t[4]), template("at::SmallVector", t[3], t[4])).pointerTypes(t[6])); + } else { + infoMap.put(new Info(template("c10::SmallVector", t[3], t[4]), template("at::SmallVector", t[3], t[4]), t[5]).pointerTypes(t[6])); + } + } + + + //// std::map + infoMap + .put(new Info("std::map").pointerTypes("StringStringMap").define()) + .put(new Info("std::map").pointerTypes("StringIntMap").define()) + .put(new Info("std::map").pointerTypes("StringLongMap").define()) + .put(new Info("std::map").pointerTypes("StringTensorMap").define()) + ; + + + //// std::unordered_set + infoMap + .put(new Info("std::unordered_set").pointerTypes("StringSet").define()) + .put(new Info("std::unordered_set").pointerTypes("HashAliasedIValues").define()) + .put(new Info("std::unordered_set").pointerTypes("SymbolSet").define()) + .put(new Info("std::unordered_set", "std::unordered_set").pointerTypes("TensorImplSet").define()) + .put(new Info("std::unordered_set >").pointerTypes("RecordScopeSet").define()) + .put(new Info("std::unordered_set").pointerTypes("NodeSet").define()) + .put(new Info("std::unordered_set").pointerTypes("StreamSet").define()) + .put(new Info("std::unordered_set").pointerTypes("RecordScopeSet").define()) + .put(new Info("std::set").pointerTypes("ActivityTypeSet").define()) + ; + + + //// std::unordered_map + infoMap + .put(new Info("std::unordered_map").pointerTypes("NodeIntMap").define()) + .put(new Info("std::unordered_map").pointerTypes("HashAliasedIValueMap").define()) + .put(new Info("std::unordered_map").pointerTypes("LongStringMap").define()) + .put(new Info("std::unordered_map").pointerTypes("StringBoolMap").define()) + .put(new Info("std::unordered_map").pointerTypes("StringSizeTMap").define()) + .put(new Info("std::unordered_map").pointerTypes("ExtraFilesMap").define()) + .put(new Info("std::unordered_map").pointerTypes("TypeEnv").define()) + .put(new Info("std::unordered_map", "std::unordered_map").pointerTypes("StringIValueMap").define()) + .put(new Info("std::unordered_map >").pointerTypes("StringFunctionMap").define()) + .put(new Info("std::unordered_map").pointerTypes("StringValueMap").define()) + .put(new Info("std::unordered_map >").pointerTypes("StringLongStringMapMap").define()) + .put(new Info("std::unordered_map").pointerTypes("ValueValueMap").define()) + .put(new Info("std::unordered_map").pointerTypes("ArgumentSpecExecutionPlanMap").define()) + .put(new Info("std::unordered_map").pointerTypes("TreeRefStringMap").define()) + ; + + + //// std::atomic + infoMap + .put(new Info("std::atomic_bool", "std::atomic").cast().valueTypes("boolean").pointerTypes("BoolPointer")) + .put(new Info("std::atomic_uint64_t", "std::atomic", "std::atomic", "std::atomic_size_t", "std::atomic").cast().valueTypes("long").pointerTypes("LongPointer")) + .put(new Info("std::atomic").cast().pointerTypes("DeviceGuardImplInterface")) + ; + + + //// std::tuple + infoMap + .put(new Info("std::tuple").pointerTypes("T_IntInt_T").define()) + .put(new Info("std::tuple").pointerTypes("T_LongLong_T").define()) + .put(new Info("std::tuple").pointerTypes("T_DoubleLong_T").define()) + //.put(new Info("std::tuple").pointerTypes("TensorTuple").define()) + .put(new Info("std::tuple", "std::tuple", "std::tuple", "std::tuple").pointerTypes("T_TensorTensor_T").define()) + .put(new Info("std::tuple", "std::tuple", "std::tuple").pointerTypes("T_TensorTensorTensor_T").define()) + .put(new Info("std::tuple", "std::tuple", "std::tuple").pointerTypes("T_TensorTensorTensorTensor_T").define()) + .put(new Info("std::tuple", "std::tuple", "std::tuple").pointerTypes("T_TensorTensorTensorTensorTensor_T").define()) + .put(new Info("std::tuple", "std::tuple", "std::tuple").pointerTypes("T_TensorTensorTensorTensorTensorTensor_T").define()) + .put(new Info("std::tuple", "std::tuple", "std::tuple").pointerTypes("T_TensorTensorTensorTensorTensorTensorTensor_T").define()) + .put(new Info("std::tuple >", "std::tuple >").pointerTypes("T_TensorTensorTensorTensorVector_T").define()) + .put(new Info("std::tuple", "std::tuple").pointerTypes("T_TensorTensorTensorTensorLong_T").define()) + .put(new Info("std::tuple", "std::tuple").pointerTypes("T_TensorTensorDoubleLong_T").define()) + .put(new Info("std::tuple >").pointerTypes("T_TensorT_TensorTensor_T_T").define()) + .put(new Info("std::tuple,c10::MaybeOwned >") + .pointerTypes("T_TensorMaybeOwnedTensorMaybeOwned_T").define()) + .put(new Info("std::tuple,c10::MaybeOwned,c10::MaybeOwned >") + .pointerTypes("T_TensorMaybeOwnedTensorMaybeOwnedTensorMaybeOwned_T").define()) + .put(new Info("std::tuple").purify().pointerTypes("T_PackedSequenceTensor_T").define()) + .put(new Info("std::tuple >").purify().pointerTypes("T_PackedSequenceT_TensorTensor_T_T").define()) + .put(new Info("std::tuple", + "std::tuple", + "std::tuple", + "std::tuple", + "std::tuple", + "std::tuple" + ).cast().pointerTypes("PointerPointer")) + .put(new Info("std::tuple").pointerTypes("T_StringSizeTSizeT_T").define()) + .put(new Info("std::tuple").pointerTypes("T_StringLong_T").define()) + .put(new Info("std::tuple >", "std::tuple >").pointerTypes("T_TensorTensorVector_T").define()) + .put(new Info("std::tuple,torch::Tensor>", "std::tuple,at::Tensor>").pointerTypes("T_TensorVectorTensor_T").define()) + .put(new Info( + "std::tuple,std::vector,std::vector,std::vector,std::vector >", + "std::tuple,std::vector,std::vector,std::vector,std::vector >") + .pointerTypes("T_TensorVectorTensorVectorTensorVectorTensorVectorTensorVector_T").define()) + .put(new Info("std::tuple,std::vector >", "std::tuple,std::vector >").pointerTypes("T_TensorTensorVectorTensorVector_T").define()) + .put(new Info("std::tuple", "std::tuple").pointerTypes("T_TensorTensorLongLongTensor_T").define()) + .put(new Info("std::tuple", "std::tuple").pointerTypes("T_TensorTensorTensorTensorsLongLongLongLongTensor_T").define()) + .put(new Info("const std::tuple", "std::tuple").pointerTypes("T_DataPtrSizeT_T").define()) + .put(new Info("std::tuple", "std::pair").pointerTypes("T_TypePtrLong_T").define()) // Parse this pair as tuple because Parser doesn't generate valid code for optional + ; + + + //// Other std stuff + infoMap + .put(new Info("std::type_index").pointerTypes("@Cast(\"std::type_index*\") Pointer")) + .put(new Info("std::deque").pointerTypes("TensorDeque").define()) + .put(new Info("std::bitset<64>", "std::bitset", "std::bitset", + "std::bitset", "std::bitset").valueTypes("long")) + .put(new Info("std::basic_string").annotations("@StdString").valueTypes("BytePointer").pointerTypes("@Cast({\"char*\", \"std::string\"}) BytePointer")) + ; + + + //// Jit List + for (String[] t : new String[][]{ + {"ExprList", "torch::jit::Expr", "Expr"}, + {"StmtList", "torch::jit::Stmt", "Stmt"}, + {"WithItemList", "torch::jit::WithItem", "WithItem"}, + {"PropertyList", "torch::jit::Property", "Property"}, + {"AssignList", "torch::jit::Assign", "Assign"}, + {"ParamList", "torch::jit::Param", "Param"}, + {"IdentList", "torch::jit::Ident", "Ident"}, + {"AttributeList", "torch::jit::Attribute", "Attribute"}, + }) { + infoMap.put(new Info(template("torch::jit::List", t[1])).pointerTypes(t[0])) + .put(new Info(template("torch::jit::ListIterator", t[1])).pointerTypes(t[0] + "Iterator")) + .put(new Info(template("torch::jit::List", t[1]) + "::map").skip()) // Could map if needed + ; + } + infoMap.put(new Info("torch::jit::TreeList::const_iterator").cast().pointerTypes("TreeRef")); + + + /* Not parsed anymore + List binaryOps = Arrays.asList("Add", "Sub", "Div", "Max", "Min", "Mul", "Mod", "Xor", "And", "Or", "Rshift", "Lshift"); + List exprOps = new ArrayList<>(); + exprOps.addAll(Arrays.asList("CharImm", "FloatImm", "BitCast", "Intrinsics", "Broadcast", "Cast")); + exprOps.addAll(binaryOps); + List bitwiseOps = Arrays.asList("Xor", "And", "Or", "Rshift", "Lshift"); + + for (String op : binaryOps) + infoMap.put(new Info("torch::jit::tensorexpr::BinaryOpNode").pointerTypes("BinaryOpNode" + op)); + for (String op : exprOps) + infoMap.put(new Info("torch::jit::tensorexpr::ExprNode").pointerTypes("ExprNode" + op)); + for (String op : bitwiseOps) + infoMap.put(new Info("torch::jit::tensorexpr::BitwiseOpNode").pointerTypes("BitwiseOpNode" + op)); + */ + + + //// c10 Dict + infoMap + .put(new Info("c10::Dict").purify().pointerTypes("GenericDict")) + .put(new Info("c10::impl::DictEntryRef").pointerTypes("GenericDictEntryRef")) + .put(new Info("c10::impl::DictIterator", + "c10::Dict::iterator").purify().pointerTypes("GenericDictIterator").friendly()) + .put(new Info("c10::Dict").pointerTypes("StringGenericListDict")) + .put(new Info("c10::Dict(c10::TypePtr, c10::TypePtr)").skip()) + .put(new Info( + "c10::impl::DictIterator::operator -(const c10::impl::DictIterator&, const c10::impl::DictIterator&)", + "c10::impl::DictIterator::operator -").skip()) // Don't know how to map :difference_type + + /* Following operators throw a template error "no match", even in C++. */ + .put(new Info("c10::Dict::iterator::operator <(const c10::Dict::iterator&, const c10::Dict::iterator&)").skip()) + .put(new Info("c10::impl::DictIterator::operator <(const c10::impl::DictIterator&, const c10::impl::DictIterator&)").skip()) + .put(new Info("c10::Dict::iterator::operator <=(const c10::Dict::iterator&, const c10::Dict::iterator&)").skip()) + .put(new Info("c10::impl::DictIterator::operator <=(const c10::impl::DictIterator&, const c10::impl::DictIterator&)").skip()) + .put(new Info("c10::Dict::iterator::operator >=(const c10::Dict::iterator&, const c10::Dict::iterator&)").skip()) + .put(new Info("c10::impl::DictIterator::operator >=(const c10::impl::DictIterator&, const c10::impl::DictIterator&)").skip()) + .put(new Info("c10::Dict::iterator::operator >(const c10::Dict::iterator&, const c10::Dict::iterator&)").skip()) + .put(new Info("c10::impl::DictIterator::operator >(const c10::impl::DictIterator&, const c10::impl::DictIterator&)").skip()) + ; + + + //// torch::OrderedDict + for (String[] o: new String[][] { + { "std::string", "torch::Tensor", "StringTensor" }, + { "std::string", "torch::nn::Module", "StringModule" }, + { "std::string", "torch::nn::AnyModule", "StringAnyModule" }, + { "std::string", "std::shared_ptr", "StringSharedModule" } + }) { + infoMap + .put(new Info(template("torch::OrderedDict", o[0], o[1])).pointerTypes(o[2] + "Dict")) + .put(new Info(template("torch::OrderedDict::Item", o[0], o[1]), template("torch::OrderedDict", o[0], o[1]) + "::Item").pointerTypes(o[2] + "DictItem")) + // Adding const since items don't have no-arg constructors. See PR #664. + .put(new Info("const " + template("std::vector", template("torch::OrderedDict", o[0], o[1]) + "::Item")).pointerTypes(o[2] + "DictItemVector").define()) + ; + } + + // What is the use for this ? + //.put(new Info("torch::OrderedDict") + // .valueTypes("@Cast({\"\", \"torch::OrderedDict&&\"}) @StdMove StringAnyModuleDict")) + + //// std::pair + infoMap + // Parser doesn't generate iterators for vector of pairs, so function returning such iterators, like ParameterListImpl::begin() + // must be mapped to returning item instead. Issue #673. Change when issue resolved. + .put(new Info("std::pair", "std::pair").cast().pointerTypes("StringTensorPair").define()) + .put(new Info("std::pair").pointerTypes("StringModulePair").define()) + .put(new Info("std::pair").pointerTypes("StringAnyModulePair").define()) + .put(new Info("std::pair >").pointerTypes("StringSharedModulePair").define()) + .put(new Info("std::pair").pointerTypes("RecordFunctionHandleIntPair").define()) + .put(new Info("std::pair").pointerTypes("SizeTMatchedSchemaPair").define()) + ; + + //// Intrusive pointers + /* We cannot define an adapter working like SharedPtrAdapter since there is no public constructor of + intrusive_ptr taking a T*. */ + for (PointerInfo pi : new PointerInfo[]{ + new PointerInfo("c10::ivalue::Tuple"), + new PointerInfo("c10::ivalue::Future", "at::ivalue::Future"), + new PointerInfo("c10::ivalue::ConstantString"), + new PointerInfo("c10::GeneratorImpl"), + new PointerInfo("at::Quantizer"), + new PointerInfo("c10::ivalue::Await"), + new PointerInfo("c10::RRefInterface"), + new PointerInfo("c10::ivalue::PyObjectHolder"), + new PointerInfo("c10::ivalue::EnumHolder"), + new PointerInfo("c10::TensorImpl"), + new PointerInfo("c10::TensorImpl,c10::UndefinedTensorImpl").javaBaseName("TensorImpl"), + new PointerInfo("torch::jit::Tree").javaName("TreeRef"), + new PointerInfo("c10::StorageImpl", "c10::StorageImpl,NullType"), + new PointerInfo("c10::SymNodeImpl").javaName("SymNode") + }) { + String[] cppNames = new String[pi.argumentNames.length + pi.otherCppNames.length]; + int i = 0; + for (String n : pi.argumentNames) { + String ipn = template("c10::intrusive_ptr", n); + cppNames[i++] = ipn; + // Skipping constructor taking a unique_ptr + infoMap.put(new Info(ipn + "(" + n + "*)").skip()); + /* If we need to map a unique_ptr with this type, we need to disambiguate constructor + with something like: + infoMap.put(new Info(ipn + "(" + upn + ")").javaText( + "public " + pi.javaName + "(" + xxx + " rhs) { super((Pointer)null); allocate(rhs); }\n" + + "@NoException(true) private native void allocate(@Cast({\"\", \"" + upn + "\"}) @UniquePtr " + xxx + " rhs);")); + */ + } + for (String n : pi.otherCppNames) + cppNames[i++] = n; + infoMap.put(new Info(cppNames).pointerTypes(pi.javaName == null ? (pi.javaBaseName + "Ptr") : pi.javaName)); + } + + + //// Classes that Parser cannot detect as virtual + infoMap.put(new Info("c10::Error", "c10::IndexError", "c10::LinAlgError", "c10::ValueError", "c10::TypeError", "c10::NotImplementedError", "c10::EnforceFiniteError", "c10::OutOfMemoryError", + "c10::OnnxfiBackendSystemError", "c10::DistBackendError", "c10::SharedType", "c10::StrongTypePtr", + "c10::WeakTypePtr", "torch::autograd::CppFunctionPreHook", "torch::autograd::DifferentiableViewMeta", + "torch::autograd::TraceableFunction", "torch::jit::Instruction", "torch::jit::Method", "torch::jit::ModuleInstanceInfo", + "torch::jit::Object::Property", "torch::jit::OperatorSet", "torch::jit::SourceRangePickler", "torch::jit::Unpickler", + "torch::jit::Operator", "c10::CuDNNError").purify()); + + + /// Classes skipped for various non-investigated reasons + infoMap + .put(new Info(/*"c10::intrusive_ptr", "c10::weak_intrusive_ptr", */"c10::guts::is_fundamental", + "c10::detail::CaptureKernelCall", "c10::detail::DictImpl", "c10::detail::MultiDispatchKeySet", "c10::ExclusivelyOwnedTraits", "c10::FunctionSchema::dump", + "c10::domain_prefix", "c10::C10FlagsRegistry", "c10::enforce_detail::EnforceFailMessage", "c10::impl::build_feature_required_feature_not_available", + "c10::detail::getMaybeFakeTypePtr_", "c10::complex_literals::operator \"\"_if", "c10::complex_literals::operator \"\"_id", + "decltype(::c10::impl::ScalarTypeToCPPType<::c10::ScalarType::ComplexHalf>::t)", "c10::BoxedKernel", "c10::ExtraMeta", "c10::remove_symint", + "c10::InefficientStdFunctionContext", "c10::DataPtr::move_context", "c10::detail::UniqueVoidPtr::move_context", "QuantizerPtr", "c10::IValue::toModule", "c10::toBackendComponent", + "c10::optional", "c10::asIntArrayRefSlow", "c10::standardizeVectorForUnion", + "c10::impl::ExcludeDispatchKeyGuard", "c10::impl::ScalarTypeToCPPType", "c10::impl::AnnotatedKernel", "c10::impl::OperatorEntry", + "c10::StorageImpl(c10::StorageImpl)", "c10::StorageImpl::operator =", + "c10::TensorImpl(c10::TensorImpl)", "c10::TensorImpl::operator =", + "caffe2::Blob(caffe2::Blob)", "caffe2::Blob::operator =", "c10::detail::infer_schema::bool_t", + "torch::serialize::InputArchive(torch::serialize::InputArchive)", "torch::serialize::InputArchive::operator =", + "torch::serialize::OutputArchive(torch::serialize::OutputArchive)", "torch::serialize::OutputArchive::operator =", + "at::_test_serialization_subcmul", "at::_test_optional_intlist", "at::_test_optional_filled_intlist", + "at::_test_optional_floatlist", "at::_test_string_default", "at::_test_ambiguous_defaults", + "at::TensorBase::expect_contiguous", // conflict with returning type of "Tensor::expect_contiguous" + "torch::Tensor::print", "at::borrow_from_optional_tensor", + "at::MaterializedITensorListRef", "at::impl::check_names_valid_for", "at::internal::launch_no_thread_state", + "at::checkSameNumel", "at::check_names_valid_for", "at::default_names", "at::get_device", "at::detail::scalar_fill", + "at::namedinference::compute_diagonal_outnames", "torch::Tensor::packed_accessor", "torch::optim::serialize", "torch::none_of", + "torch::CountTensors", "torch::CountVariables", "torch::autograd::ExtractVariables", "torch::autograd::detail::MakeNextFunctionList", + "torch::autograd::AutogradMeta::hooks_", "torch::autograd::AutogradMeta::cpp_hooks_list_", + "torch::autograd::VariableType::unpack", "torch::autograd::VariableType::unpack_opt", "torch::jit::parseSchemaOrName", + "torch::jit::trace", "torch::jit::tracer::TracingState::lookup_var_name_fn", "torch::jit::tracer::ArgumentStash", + "torch::jit::constant_not_supported_error", "torch::jit::ObjectAttributeError", "torch::jit::utils::get_module_info", + "torch::jit::operator <<(std::ostream&, torch::jit::Instruction)", "torch::jit::toString(torch::jit::OpCode)", + "torch::jit::PropertyPropBase::processLoop", "torch::jit::PropertyPropBase::processIf", "torch::jit::PropertyPropBase::propagateBlock", + "torch::jit::getMobileInterfaceCallExport", "torch::jit::OperatorSet::getOps", "torch::jit::SourceView::findSourceRangeThatGenerated", + "at::namedinference::propagate_names_if_present_and_nonempty", "torch::jit::_load_jit_module_from_flatbuffer_bytes", "torch::jit::_save_jit_module_to", + "torch::jit::checkHasValidSetGetState", "torch::jit::getTypeTags", "torch::jit::setTypeTags", "torch::jit::getStorageKey", + "torch::jit::getUnresolvedClassAttributes", "torch::jit::isOpSupportedInMobile", "torch::jit::restoreAccurateTypeTags", + "torch::jit::detail::getDifferentiableGraphOpExecutor", "torch::jit::detail::getGradExecutor", "torch::jit::Graph::createPythonOp", + "torch::jit::Graph::createDifferentiableSubgraph", "torch::jit::NamedValue::type", "torch::jit::ProfileOp", "torch::jit::Value::isValidName", + "torch::jit::EqualType::operator ()", "torch::jit::HashType::operator ()", "torch::jit::InterpreterContinuation::operator ()", + "torch::jit::Object(c10::QualifiedName, torch::jit::CompilationUnit*, bool)", "torch::jit::Source::findSourceRangeThatGenerated", + "torch::jit::SourceRangeDeserializer::deserialize", "torch::jit::SourceRangePickler::pickle", "torch::jit::Pickler::pushEmptyDict", + "torch::jit::PrintDepsTable::add", "torch::jit::printerHasSpecialCaseFor", "ONNX_NAMESPACE::ModelProto", "torch::jit::export_onnx", + "torch::jit::Function::call", "torch::jit::GraphFunction::call", "torch::jit::GraphFunction::function_creator", "torch::jit::getOptionsFromGlobal", + "torch::jit::serialize_model_proto_to_string", "torch::onnx::IR_VERSION", "torch::onnx::PRODUCER_VERSION", + "TORCH_DISALLOW_TEMPORARIES", "TORCH_DISALLOW_TEMPORARIES_IMPL", // Issue #674 + "DEFINE_CASTING(TAG, ...)", "TORCH_ILISTREF_FORALL_TAGS", + "torch::autograd::GraphTask::ExecInfo::Capture::DO_NOT_USE_DEPRECATED_get_capture_hooks", + "torch::autograd::GraphTask::ExecInfo::Capture::DO_NOT_USE_DEPRECATED_register_capture_hook", + "c10::detail::IListRefTagImplBase", + "c10::detail::IListRefTagImpl", + "c10::IValue::TagType", + "std::conjunction<>", + "std::disjunction<>", + "std::numeric_limits", + "torch::profiler::impl::ApproximateClockToUnixTimeConverter", + "basic_string_view::npos", + "c10::impl::boxed_size_one", + "torch::detail::check_not_lvalue_references", + "c10::guts::false_higher_t" + ).skip()); + + + //// Complex + infoMap + .put(new Info("c10::complex").pointerTypes("DoubleComplex")) + .put(new Info("c10::complex").pointerTypes("FloatComplex")) + .put(new Info("c10::complex").pointerTypes("HalfComplex")) + .put(new Info("c10::complex::real", "c10::complex::imag", + "c10::complex::real", "c10::complex::imag", + "c10::complex::real", "c10::complex::imag").annotations("@org.bytedeco.javacpp.annotation.Function")) + ; + + + //// TypeKind + infoMap + .put(new Info("c10::EnumerationType").pointerTypes("LayoutEnumerationType")) + .put(new Info("c10::EnumerationType").pointerTypes("ScalarTypeEnumerationType")) + .put(new Info("c10::EnumerationType").pointerTypes("MemoryFormattEnumerationType")) + .put(new Info("c10::SingleElementType").pointerTypes("AwaitSingleElementType")) + .put(new Info("c10::SingleElementType").pointerTypes("ListSingleElementType")) + .put(new Info("c10::SingleElementType").pointerTypes("RRefSingleElementType")) + .put(new Info("c10::SingleElementType").pointerTypes("FutureSingleElementType")) + .put(new Info("c10::SingleElementType").pointerTypes("OptionalSingleElementType")) + .put(new Info("c10::SingleElementType").pointerTypes("AwaitSingleElementType")) + ; + + + //// Jit attributes + infoMap + .put(new Info("torch::jit::ComplexAttr::ConstructorType", "torch::jit::ComplexAttr::ValueType").cast().pointerTypes("DoublePointer")) + .put(new Info("torch::jit::ComplexValsAttr::ConstructorType", "torch::jit::ComplexValsAttr::ValueType").cast().pointerTypes("Pointer")) + .put(new Info("torch::jit::FloatAttr::ConstructorType", "torch::jit::FloatAttr::ValueType").cast().valueTypes("double").pointerTypes("DoublePointer")) + .put(new Info("torch::jit::FloatsAttr::ConstructorType", "torch::jit::FloatsAttr::ValueType").cast().pointerTypes("DoubleVector")) + .put(new Info("torch::jit::IntAttr::ConstructorType", "torch::jit::IntAttr::ValueType").cast().valueTypes("long").pointerTypes("LongPointer")) + .put(new Info("torch::jit::IntsAttr::ConstructorType", "torch::jit::IntsAttr::ValueType").cast().pointerTypes("LongVector")) + .put(new Info("torch::jit::StringAttr::ConstructorType", "torch::jit::StringAttr::ValueType").annotations("@StdString").pointerTypes("BytePointer")) + .put(new Info("torch::jit::StringsAttr::ConstructorType", "torch::jit::StringsAttr::ValueType").cast().pointerTypes("StringVector")) + .put(new Info("torch::jit::TensorAttr::ConstructorType", "torch::jit::TensorAttr::ValueType").cast().pointerTypes("Tensor")) + .put(new Info("torch::jit::TensorsAttr::ConstructorType", "torch::jit::TensorsAttr::ValueType").cast().pointerTypes("TensorVector")) + .put(new Info("torch::jit::TypesAttr::ConstructorType", "torch::jit::TypesAttr::ValueType").cast().pointerTypes("TypeVector")) + .put(new Info("torch::jit::IValueAttr::ConstructorType", "torch::jit::IValueAttr::ValueType").cast().pointerTypes("IValue")) + ; + + + //// Jit iterators + for (String[] t : new String[][]{ + {"Module", "JitModule", "torch::jit::Module"}, + {"Parameter", "Tensor", "torch::Tensor"}, + {"Attribute", "IValue", "c10::IValue"}, + {"Buffer", "Tensor", "torch::Tensor"} + }) { + infoMap.put(new Info( + "torch::jit::slot_list_impl", + "torch::jit::" + t[0].toLowerCase() + "_list").pointerTypes(t[0].toLowerCase() + "_list")) + .put(new Info("torch::jit::slot_iterator_impl").pointerTypes(t[0].toLowerCase() + "_iterator")) + .put(new Info("torch::jit::slot_iterator_impl::value_type").pointerTypes(t[1])) + .put(new Info("torch::jit::Named<" + t[2] + ">").pointerTypes("Named" + t[1])) + .put(new Info("torch::jit::detail::NamedPolicy").pointerTypes("Named" + t[1] + "Policy")) + .put(new Info( + "torch::jit::slot_list_impl >", + "torch::jit::named_" + t[0].toLowerCase() + "_list").pointerTypes("named_" + t[0].toLowerCase() + "_list")) + .put(new Info("torch::jit::slot_iterator_impl >").pointerTypes("named_" + t[0].toLowerCase() + "_iterator")) + .put(new Info("torch::jit::slot_iterator_impl >::value_type").pointerTypes("Named" + t[1])) + ; + } + + infoMap + .put(new Info("torch::jit::tracer::warn_fn_type", "warn_fn_type").cast().pointerTypes("warn_fn_type")) + .put(new Info("torch::jit::Maybe").pointerTypes("DefMaybe")) + .put(new Info("torch::jit::Maybe").pointerTypes("ExprMaybe")) + .put(new Info("torch::jit::Maybe").pointerTypes("VarMaybe")) + .put(new Info("torch::jit::Maybe >").pointerTypes("PropertyListMaybe")) + .put(new Info("torch::jit::Maybe >").pointerTypes("AssignListMaybe")) + .put(new Info( + "torch::jit::Compound::map", + "torch::jit::Tree::map", + "torch::jit::Maybe::map", + "torch::jit::Maybe::map", + "torch::jit::Maybe::map", + "torch::jit::Maybe >::map", + "torch::jit::Maybe >::map").skip()) + .put(new Info("torch::jit::Wrap").pointerTypes("BlockWrap")) + .put(new Info("torch::jit::Wrap").pointerTypes("JitNodeWrap")) + .put(new Info("torch::jit::Wrap").pointerTypes("ValueWrap")); + + + //// Datasets String VirtualChunkDataReader = "JavaCPP_torch_0003a_0003adata_0003a_0003adatasets_0003a_0003aChunkDataReader_0003ctorch_0003a_0003adata_0003a_0003aExample_0003c_0003e_0002cstd_0003a_0003avector_0003ctorch_0003a_0003adata_0003a_0003aExample_0003c_0003e_00020_0003e_00020_0003e"; infoMap.put(new Info("std::vector >", // "UnwrappedBatchType", - "std::vector >::ExampleType>").pointerTypes("ExampleVector").define()) + "std::vector >::ExampleType>").pointerTypes("ExampleVector").define()) .put(new Info("std::vector >").pointerTypes("TensorExampleVector").define()) .put(new Info("c10::optional > >", "c10::optional<" + VirtualChunkDataReader + "::BatchType>", - "torch::data::datasets::ChunkDataset<" + VirtualChunkDataReader + ",torch::data::samplers::RandomSampler,torch::data::samplers::RandomSampler>::BatchType") - .pointerTypes("ExampleVectorOptional").define()) + "torch::data::datasets::ChunkDataset<" + VirtualChunkDataReader + ",torch::data::samplers::RandomSampler,torch::data::samplers::RandomSampler>::BatchType") + .pointerTypes("ExampleVectorOptional").define()) .put(new Info("torch::data::Example", "torch::data::Example<>").pointerTypes("Example")) .put(new Info("c10::optional >", "c10::optional >").pointerTypes("ExampleOptional").define()) .put(new Info("torch::data::Example").pointerTypes("TensorExample")) .put(new Info("torch::data::Example::Example").javaText( - "public TensorExample(@ByVal Tensor data) { super((Pointer)null); allocate(data); }\n" - + "private native void allocate(@ByVal Tensor data);\n")) + "public TensorExample(@ByVal Tensor data) { super((Pointer)null); allocate(data); }\n" + + "private native void allocate(@ByVal Tensor data);\n")) .put(new Info("torch::data::Example::target").skip()) // .put(new Info("torch::data::detail::SentinelIterator > >").pointerTypes("ExampleSentinelIterator")) // .put(new Info("torch::data::detail::ValidIterator > >").pointerTypes("ExampleValidIterator")) // .put(new Info("torch::data::detail::IteratorImpl > >").pointerTypes("ExampleIteratorImpl")) .put(new Info("torch::data::Iterator >").purify().pointerTypes("ExampleIterator")) - .put(new Info("torch::data::Iterator > >").purify().pointerTypes("ExampleVectorIterator")) + //.put(new Info("torch::data::Iterator > >").purify().pointerTypes("ExampleVectorIterator")) .put(new Info("torch::data::Iterator > > >").purify().pointerTypes("ExampleVectorOptionalIterator")) .put(new Info("torch::data::samplers::Sampler >", "torch::data::samplers::Sampler<>").pointerTypes("Sampler")) .put(new Info("torch::data::samplers::Sampler").pointerTypes("BatchSizeSampler")) @@ -2623,162 +1236,106 @@ public void map(InfoMap infoMap) { .put(new Info("torch::data::samplers::DistributedSampler >", "torch::data::samplers::DistributedSampler<>").purify().pointerTypes("DistributedSampler")) .put(new Info("c10::optional").pointerTypes("BatchSizeOptional").define()) .put(new Info("torch::data::transforms::BatchTransform >, torch::data::Example<> >", - "torch::data::transforms::Collation >").pointerTypes("ExampleCollation")) + "torch::data::transforms::Collation >").pointerTypes("ExampleCollation")) .put(new Info("torch::data::transforms::Stack >").pointerTypes("ExampleStack")) + .put(new Info("c10::optional >").pointerTypes("WeakStorageVectorOptional").define()) + .put(new Info("const std::vector", "std::vector").pointerTypes("WeakStorageVector").define()) + .put(new Info("std::vector").pointerTypes("CaptureVector")) + .put(new Info("torch::data::datasets::ChunkDataReader,std::vector > >", VirtualChunkDataReader).pointerTypes("ChunkDataReader").virtualize()) .put(new Info("torch::data::datasets::ChunkDataset<" + VirtualChunkDataReader + ",torch::data::samplers::RandomSampler,torch::data::samplers::RandomSampler>").pointerTypes("ChunkDataset")) .put(new Info("torch::data::datasets::ChunkDataset<" + VirtualChunkDataReader + ",torch::data::samplers::RandomSampler,torch::data::samplers::RandomSampler>::ChunkDataset").javaText( - "public ChunkDataset(\n" - + " ChunkDataReader chunk_reader,\n" - + " RandomSampler chunk_sampler,\n" - + " RandomSampler example_sampler,\n" - + " ChunkDatasetOptions options) { super((Pointer)null); allocate(chunk_reader, chunk_sampler, example_sampler, options, null); }\n" - + "public ChunkDataset(\n" - + " ChunkDataReader chunk_reader,\n" - + " RandomSampler chunk_sampler,\n" - + " RandomSampler example_sampler,\n" - + " ChunkDatasetOptions options,\n" - + " Pointer preprocessing_policy) { super((Pointer)null); allocate(chunk_reader, chunk_sampler, example_sampler, options, preprocessing_policy); }\n" - + "private native void allocate(\n" - + " @ByVal @Cast(\"" + VirtualChunkDataReader + "*\") ChunkDataReader chunk_reader,\n" - + " @ByVal RandomSampler chunk_sampler,\n" - + " @ByVal RandomSampler example_sampler,\n" - + " @ByVal ChunkDatasetOptions options,\n" - + " @ByVal(nullValue = \"std::function>&)>()\") @Cast(\"std::function>&)>*\") Pointer preprocessing_policy);\n")) -// .put(new Info("std::function").cast().pointerTypes("Pointer")) -// .put(new Info("std::function>&)>", "std::function").pointerTypes("PreprocessingPolicy").define()) + "public ChunkDataset(\n" + + " ChunkDataReader chunk_reader,\n" + + " RandomSampler chunk_sampler,\n" + + " RandomSampler example_sampler,\n" + + " ChunkDatasetOptions options) { super((Pointer)null); allocate(chunk_reader, chunk_sampler, example_sampler, options, null); }\n" + + "public ChunkDataset(\n" + + " ChunkDataReader chunk_reader,\n" + + " RandomSampler chunk_sampler,\n" + + " RandomSampler example_sampler,\n" + + " ChunkDatasetOptions options,\n" + + " Pointer preprocessing_policy) { super((Pointer)null); allocate(chunk_reader, chunk_sampler, example_sampler, options, preprocessing_policy); }\n" + + "private native void allocate(\n" + + " @ByVal @Cast(\"" + VirtualChunkDataReader + "*\") ChunkDataReader chunk_reader,\n" + + " @ByVal RandomSampler chunk_sampler,\n" + + " @ByVal RandomSampler example_sampler,\n" + + " @ByVal ChunkDatasetOptions options,\n" + + " @ByVal(nullValue = \"std::function>&)>()\") @Cast(\"std::function>&)>*\") Pointer preprocessing_policy);\n")) .put(new Info("torch::data::datasets::StatefulDataset," + VirtualChunkDataReader + "::BatchType,size_t>") - .pointerTypes("ChunkStatefulDataset")) + .pointerTypes("ChunkStatefulDataset")) .put(new Info("torch::data::datasets::BatchDataset,c10::optional<" + VirtualChunkDataReader + "::BatchType>,size_t>", - "torch::data::datasets::BatchDataset,std::vector > >") - .pointerTypes("ChunkBatchDataset")) + "torch::data::datasets::BatchDataset,std::vector > >") + .pointerTypes("ChunkBatchDataset")) .put(new Info("torch::data::datasets::BatchDataset >,c10::optional<" + VirtualChunkDataReader + "::BatchType>,size_t>", - "torch::data::datasets::BatchDataset >,torch::data::datasets::ChunkDataset<" + VirtualChunkDataReader + ",torch::data::samplers::RandomSampler,torch::data::samplers::RandomSampler>::BatchType,torch::data::datasets::ChunkDataset<" + VirtualChunkDataReader + ",torch::data::samplers::RandomSampler,torch::data::samplers::RandomSampler>::BatchRequestType>") - .pointerTypes("ChunkBatchSharedBatchDataset")) + "torch::data::datasets::BatchDataset >,torch::data::datasets::ChunkDataset<" + VirtualChunkDataReader + ",torch::data::samplers::RandomSampler,torch::data::samplers::RandomSampler>::BatchType,torch::data::datasets::ChunkDataset<" + VirtualChunkDataReader + ",torch::data::samplers::RandomSampler,torch::data::samplers::RandomSampler>::BatchRequestType>") + .pointerTypes("ChunkBatchSharedBatchDataset")) .put(new Info("torch::data::datasets::BatchDataset >,c10::optional<" + VirtualChunkDataReader + "::BatchType>,size_t>::map") - .javaText("public native @ByVal ChunkMapDataset map(@ByVal ExampleStack transform);")) -// .put(new Info("torch::data::datasets::BatchDataset >,c10::optional<" + VirtualChunkDataReader + "::BatchType>,size_t>::map > >") -// .javaNames("map")) + .javaText("public native @ByVal ChunkMapDataset map(@ByVal ExampleStack transform);")) .put(new Info("torch::data::datasets::SharedBatchDataset >") - .pointerTypes("ChunkSharedBatchDataset")) + .pointerTypes("ChunkSharedBatchDataset")) .put(new Info("torch::data::datasets::MapDataset >,torch::data::transforms::Stack > >") - .pointerTypes("ChunkMapDataset")) + .pointerTypes("ChunkMapDataset")) .put(new Info("torch::data::datasets::MapDataset >,torch::data::transforms::Stack > >::reset") - .skip()) + .skip()) .put(new Info("torch::data::datasets::MapDataset >,torch::data::transforms::Stack > >::DatasetType") - .pointerTypes("ChunkSharedBatchDataset")) + .pointerTypes("ChunkSharedBatchDataset")) .put(new Info("torch::data::datasets::BatchDataset >,torch::data::transforms::Stack > >,std::vector >,at::ArrayRef >", - "torch::data::datasets::BatchDataset >,torch::data::transforms::Stack > >,torch::data::datasets::detail::optional_if_t >::is_stateful,torch::data::transforms::Stack >::OutputBatchType>,torch::data::datasets::SharedBatchDataset >::BatchRequestType>") - .pointerTypes("ChunkMapBatchDataset")) -// .put(new Info("torch::data::datasets::MapDataset >,torch::data::transforms::Stack > >::BatchRequestType").pointerTypes("SizeTArrayRef")) -// .put(new Info("torch::data::datasets::MapDataset >,torch::data::transforms::Stack > >::OutputBatchType").pointerTypes("Example")) + "torch::data::datasets::BatchDataset >,torch::data::transforms::Stack > >,torch::data::datasets::detail::optional_if_t >::is_stateful,torch::data::transforms::Stack >::OutputBatchType>,torch::data::datasets::SharedBatchDataset >::BatchRequestType>") + .pointerTypes("ChunkMapBatchDataset")) + .put(new Info("torch::data::datasets::MapDataset >,torch::data::transforms::Stack > >::BatchRequestType").pointerTypes("SizeTArrayRef")) + .put(new Info("torch::data::datasets::MapDataset >,torch::data::transforms::Stack > >::OutputBatchType").pointerTypes("Example")) .put(new Info("torch::data::datasets::MapDataset >,torch::data::transforms::Stack > >::get_batch") - .javaText("public native @Name(\"get_batch\") @ByVal ExampleOptional get_batch_example(@Cast(\"size_t\") long indices);")) + .javaText("public native @Name(\"get_batch\") @ByVal ExampleOptional get_batch_example(@Cast(\"size_t\") long indices);")) .put(new Info("torch::data::DataLoaderBase >,torch::data::transforms::Stack > >,torch::data::Example<>,size_t>", - "torch::data::DataLoaderBase >,torch::data::transforms::Stack > >,torch::data::datasets::MapDataset >,torch::data::transforms::Stack > >::BatchType::value_type,torch::data::datasets::MapDataset >,torch::data::transforms::Stack > >::BatchRequestType>") - .purify().pointerTypes("ChunkRandomDataLoaderBase")) + "torch::data::DataLoaderBase >,torch::data::transforms::Stack > >,torch::data::datasets::MapDataset >,torch::data::transforms::Stack > >::BatchType::value_type,torch::data::datasets::MapDataset >,torch::data::transforms::Stack > >::BatchRequestType>") + .purify().pointerTypes("ChunkRandomDataLoaderBase")) .put(new Info("torch::data::StatefulDataLoader >,torch::data::transforms::Stack > > >") - .pointerTypes("ChunkRandomDataLoader")) + .pointerTypes("ChunkRandomDataLoader")) .put(new Info("torch::data::DataLoaderBase > >,torch::data::Example<>,std::vector >", - "torch::data::DataLoaderBase > >,torch::data::datasets::MapDataset > >::BatchType,torch::data::samplers::RandomSampler::BatchRequestType>") - .purify().pointerTypes("MNISTRandomDataLoaderBase")) + "torch::data::DataLoaderBase > >,torch::data::datasets::MapDataset > >::BatchType,torch::data::samplers::RandomSampler::BatchRequestType>") + .purify().pointerTypes("MNISTRandomDataLoaderBase")) .put(new Info("torch::data::StatelessDataLoader > >,torch::data::samplers::RandomSampler>").pointerTypes("MNISTRandomDataLoader")) .put(new Info("torch::data::datasets::Dataset >", - "torch::data::datasets::Dataset").pointerTypes("MNISTDataset")) + "torch::data::datasets::Dataset").pointerTypes("MNISTDataset")) .put(new Info("torch::data::datasets::BatchDataset >,at::ArrayRef >", - "torch::data::datasets::BatchDataset > >").pointerTypes("MNISTBatchDataset")) + "torch::data::datasets::BatchDataset > >").pointerTypes("MNISTBatchDataset")) .put(new Info("torch::data::datasets::BatchDataset >,at::ArrayRef >::map") - .javaText("public native @ByVal MNISTMapDataset map(@ByVal ExampleStack transform);")) + .javaText("public native @ByVal MNISTMapDataset map(@ByVal ExampleStack transform);")) // .put(new Info("torch::data::datasets::BatchDataset >,at::ArrayRef >::map > >") // .javaNames("map")) .put(new Info("torch::data::datasets::MapDataset > >").pointerTypes("MNISTMapDataset")) .put(new Info("torch::data::datasets::MapDataset > >::reset").skip()) .put(new Info("torch::data::datasets::MapDataset > >::DatasetType").pointerTypes("MNIST")) .put(new Info("torch::data::datasets::BatchDataset > >,std::vector >,at::ArrayRef >", - "torch::data::datasets::BatchDataset > >,torch::data::datasets::detail::optional_if_t >::OutputBatchType>,torch::data::datasets::MNIST::BatchRequestType>") - .pointerTypes("MNISTMapBatchDataset")) + "torch::data::datasets::BatchDataset > >,torch::data::datasets::detail::optional_if_t >::OutputBatchType>,torch::data::datasets::MNIST::BatchRequestType>") + .pointerTypes("MNISTMapBatchDataset")) // .put(new Info("torch::data::datasets::MapDataset > >::BatchRequestType").pointerTypes("SizeTArrayRef")) // .put(new Info("torch::data::datasets::MapDataset > >::OutputBatchType").pointerTypes("Example")) .put(new Info("torch::data::datasets::MapDataset > >::get_batch") - .javaText("public native @Name(\"get_batch\") @ByVal Example get_batch_example(@ByVal SizeTArrayRef indices);")) + .javaText("public native @Name(\"get_batch\") @ByVal Example get_batch_example(@ByVal SizeTArrayRef indices);")) .put(new Info("torch::data::datasets::Dataset", - "torch::data::datasets::Dataset").pointerTypes("TensorExampleDataset")) + "torch::data::datasets::Dataset").pointerTypes("TensorExampleDataset")) .put(new Info("torch::data::datasets::BatchDataset >", - "torch::data::datasets::BatchDataset >").pointerTypes("TensorExampleBatchDataset")) + "torch::data::datasets::BatchDataset >").pointerTypes("TensorExampleBatchDataset")) .put(new Info("torch::data::datasets::Dataset::get_batch", - "torch::data::datasets::BatchDataset >::get_batch") - .javaText("public native @ByVal TensorExampleVector get_batch(@ByVal SizeTArrayRef request);")) - - .put(new Info("torch::nn::detail::ConvNdOptions<1>").pointerTypes("DetailConv1dOptions")) - .put(new Info("torch::nn::detail::ConvNdOptions<2>").pointerTypes("DetailConv2dOptions")) - .put(new Info("torch::nn::detail::ConvNdOptions<3>").pointerTypes("DetailConv3dOptions")) - .put(new Info("torch::nn::ConvOptions<1>").pointerTypes("Conv1dOptions")) - .put(new Info("torch::nn::ConvOptions<2>").pointerTypes("Conv2dOptions")) - .put(new Info("torch::nn::ConvOptions<3>").pointerTypes("Conv3dOptions")) - .put(new Info("torch::nn::functional::ConvFuncOptions<1>").pointerTypes("Conv1dFuncOptions")) - .put(new Info("torch::nn::functional::ConvFuncOptions<2>").pointerTypes("Conv2dFuncOptions")) - .put(new Info("torch::nn::functional::ConvFuncOptions<3>").pointerTypes("Conv3dFuncOptions")) - .put(new Info("torch::nn::ConvTransposeOptions<1>").pointerTypes("ConvTranspose1dOptions")) - .put(new Info("torch::nn::ConvTransposeOptions<2>").pointerTypes("ConvTranspose2dOptions")) - .put(new Info("torch::nn::ConvTransposeOptions<3>").pointerTypes("ConvTranspose3dOptions")) - .put(new Info("torch::nn::functional::ConvTransposeFuncOptions<1>").pointerTypes("ConvTranspose1dFuncOptions")) - .put(new Info("torch::nn::functional::ConvTransposeFuncOptions<2>").pointerTypes("ConvTranspose2dFuncOptions")) - .put(new Info("torch::nn::functional::ConvTransposeFuncOptions<3>").pointerTypes("ConvTranspose3dFuncOptions")) - - .put(new Info("torch::nn::ReflectionPadOptions<1>").pointerTypes("ReflectionPad1dOptions")) - .put(new Info("torch::nn::ReflectionPadOptions<2>").pointerTypes("ReflectionPad2dOptions")) - .put(new Info("torch::nn::ReflectionPadOptions<3>").pointerTypes("ReflectionPad3dOptions")) - .put(new Info("torch::nn::ReplicationPadOptions<1>").pointerTypes("ReplicationPad1dOptions")) - .put(new Info("torch::nn::ReplicationPadOptions<2>").pointerTypes("ReplicationPad2dOptions")) - .put(new Info("torch::nn::ReplicationPadOptions<3>").pointerTypes("ReplicationPad3dOptions")) - .put(new Info("torch::nn::ConstantPadOptions<1>").pointerTypes("ConstantPad1dOptions")) - .put(new Info("torch::nn::ConstantPadOptions<2>").pointerTypes("ConstantPad2dOptions")) - .put(new Info("torch::nn::ConstantPadOptions<3>").pointerTypes("ConstantPad3dOptions")) - - .put(new Info("torch::nn::AvgPoolOptions<1>", "torch::nn::functional::AvgPool1dFuncOptions").pointerTypes("AvgPool1dOptions")) - .put(new Info("torch::nn::AvgPoolOptions<2>", "torch::nn::functional::AvgPool2dFuncOptions").pointerTypes("AvgPool2dOptions")) - .put(new Info("torch::nn::AvgPoolOptions<3>", "torch::nn::functional::AvgPool3dFuncOptions").pointerTypes("AvgPool3dOptions")) - .put(new Info("torch::nn::MaxPoolOptions<1>", "torch::nn::functional::MaxPool1dFuncOptions").pointerTypes("MaxPool1dOptions")) - .put(new Info("torch::nn::MaxPoolOptions<2>", "torch::nn::functional::MaxPool2dFuncOptions").pointerTypes("MaxPool2dOptions")) - .put(new Info("torch::nn::MaxPoolOptions<3>", "torch::nn::functional::MaxPool3dFuncOptions").pointerTypes("MaxPool3dOptions")) - .put(new Info("torch::nn::AdaptiveAvgPoolOptions >", "torch::nn::functional::AdaptiveAvgPool1dFuncOptions").pointerTypes("AdaptiveAvgPool1dOptions")) - .put(new Info("torch::nn::AdaptiveAvgPoolOptions >", "torch::nn::functional::AdaptiveAvgPool2dFuncOptions").pointerTypes("AdaptiveAvgPool2dOptions")) - .put(new Info("torch::nn::AdaptiveAvgPoolOptions >", "torch::nn::functional::AdaptiveAvgPool3dFuncOptions").pointerTypes("AdaptiveAvgPool3dOptions")) - .put(new Info("torch::nn::AdaptiveMaxPoolOptions >", "torch::nn::functional::AdaptiveMaxPool1dFuncOptions").pointerTypes("AdaptiveMaxPool1dOptions")) - .put(new Info("torch::nn::AdaptiveMaxPoolOptions >", "torch::nn::functional::AdaptiveMaxPool2dFuncOptions").pointerTypes("AdaptiveMaxPool2dOptions")) - .put(new Info("torch::nn::AdaptiveMaxPoolOptions >", "torch::nn::functional::AdaptiveMaxPool3dFuncOptions").pointerTypes("AdaptiveMaxPool3dOptions")) - .put(new Info("torch::nn::MaxUnpoolOptions<1>").pointerTypes("MaxUnpool1dOptions")) - .put(new Info("torch::nn::MaxUnpoolOptions<2>").pointerTypes("MaxUnpool2dOptions")) - .put(new Info("torch::nn::MaxUnpoolOptions<3>").pointerTypes("MaxUnpool3dOptions")) - .put(new Info("torch::nn::functional::MaxUnpoolFuncOptions<1>").pointerTypes("MaxUnpool1dFuncOptions")) - .put(new Info("torch::nn::functional::MaxUnpoolFuncOptions<2>").pointerTypes("MaxUnpool2dFuncOptions")) - .put(new Info("torch::nn::functional::MaxUnpoolFuncOptions<3>").pointerTypes("MaxUnpool3dFuncOptions")) - .put(new Info("torch::nn::FractionalMaxPoolOptions<1>", "torch::nn::functional::FractionalMaxPool1dFuncOptions").pointerTypes("FractionalMaxPool1dOptions")) - .put(new Info("torch::nn::FractionalMaxPoolOptions<2>", "torch::nn::functional::FractionalMaxPool2dFuncOptions").pointerTypes("FractionalMaxPool2dOptions")) - .put(new Info("torch::nn::FractionalMaxPoolOptions<3>", "torch::nn::functional::FractionalMaxPool3dFuncOptions").pointerTypes("FractionalMaxPool3dOptions")) - .put(new Info("torch::nn::LPPoolOptions<1>", "torch::nn::functional::LPPool1dFuncOptions").pointerTypes("LPPool1dOptions")) - .put(new Info("torch::nn::LPPoolOptions<2>", "torch::nn::functional::LPPool2dFuncOptions").pointerTypes("LPPool2dOptions")) - .put(new Info("torch::nn::LPPoolOptions<3>", "torch::nn::functional::LPPool3dFuncOptions").pointerTypes("LPPool3dOptions")) - - .put(new Info("std::shared_ptr").annotations("@SharedPtr") - .valueTypes("@Cast({\"\", \"std::shared_ptr\"}) Module").pointerTypes("Module")) - .put(new Info("torch::nn::ModuleHolder").pointerTypes("ModuleHolder")) - .put(new Info("torch::nn::Module::as").javaText("public Module asModule() { return this; }")) - .put(new Info("torch::nn::Module::register_module").javaNames("register_module")) - .put(new Info("std::shared_ptr").annotations("@SharedPtr") - .valueTypes("@Cast({\"\", \"std::shared_ptr\"}) AnyModule").pointerTypes("AnyModule")); + "torch::data::datasets::BatchDataset >::get_batch") + .javaText("public native @ByVal TensorExampleVector get_batch(@ByVal SizeTArrayRef request);")) + ; + + //// Tensor factories String[] factories = {"_cudnn_init_dropout_state", "arange", "bartlett_window", "blackman_window", "empty", "_empty_affine_quantized", - "_empty_per_channel_affine_quantized", "empty_quantized", "empty_like", "empty_strided", "eye", "full", "full", "full_like", "from_file", - "hann_window", "hamming_window", "kaiser_window", "linspace", "logspace", "ones", "ones_like", "scalar_tensor", "rand", "rand_like", - "randint", "randint_like", "randn", "randn_like", "randperm", "randperm", "range", "zeros", "_efficientzerotensor", "zeros_like", - "sparse_compressed_tensor", "sparse_csr_tensor", "sparse_csc_tensor", "sparse_bsr_tensor", "sparse_bsc_tensor", - "_sparse_compressed_tensor_unsafe", "_sparse_csr_tensor_unsafe", "_sparse_csc_tensor_unsafe", "_sparse_bsr_tensor_unsafe", "_sparse_bsc_tensor_unsafe", - "sparse_coo_tensor", "_sparse_coo_tensor_unsafe", "_sparse_coo_tensor_with_dims", "_sparse_coo_tensor_with_dims_and_tensors", - "_to_copy", "tril_indices", "triu_indices", "normal", "fft_fftfreq", "fft_rfftfreq"}; + "_empty_per_channel_affine_quantized", "empty_quantized", "empty_like", "empty_strided", "eye", "full", "full_like", "from_file", + "hann_window", "hamming_window", "kaiser_window", "linspace", "logspace", "ones", "ones_like", "scalar_tensor", "rand", "rand_like", + "randint", "randint_like", "randn", "randn_like", "randperm", "range", "zeros", "_efficientzerotensor", "zeros_like", + "sparse_compressed_tensor", "sparse_csr_tensor", "sparse_csc_tensor", "sparse_bsr_tensor", "sparse_bsc_tensor", + "_sparse_compressed_tensor_unsafe", "_sparse_csr_tensor_unsafe", "_sparse_csc_tensor_unsafe", "_sparse_bsr_tensor_unsafe", "_sparse_bsc_tensor_unsafe", + "sparse_coo_tensor", "_sparse_coo_tensor_unsafe", "_sparse_coo_tensor_with_dims", "_sparse_coo_tensor_with_dims_and_tensors", + "_to_copy", "tril_indices", "triu_indices", "normal", "fft_fftfreq", "fft_rfftfreq"}; for (String factory : factories) { infoMap.put(new Info("torch::" + factory).javaNames("torch_" + factory).skipDefaults(factory.equals("range"))) .put(new Info("torch::autograd::" + factory)) @@ -2786,23 +1343,93 @@ public void map(InfoMap infoMap) { .put(new Info("torch::nn::" + factory)); } - mapModule(infoMap, "ModuleDict", true); - mapModule(infoMap, "ModuleList", true); - mapModule(infoMap, "Sequential", true); - mapModule(infoMap, "ParameterDict", true); - mapModule(infoMap, "ParameterList", true); - mapModule(infoMap, "AdaptiveLogSoftmaxWithLoss", false); + //// Module options + infoMap + .put(new Info("torch::nn::detail::ConvNdOptions<1>").pointerTypes("DetailConv1dOptions")) + .put(new Info("torch::nn::detail::ConvNdOptions<2>").pointerTypes("DetailConv2dOptions")) + .put(new Info("torch::nn::detail::ConvNdOptions<3>").pointerTypes("DetailConv3dOptions")) + .put(new Info("torch::nn::ConvOptions<1>").pointerTypes("Conv1dOptions")) + .put(new Info("torch::nn::ConvOptions<2>").pointerTypes("Conv2dOptions")) + .put(new Info("torch::nn::ConvOptions<3>").pointerTypes("Conv3dOptions")) + .put(new Info("torch::nn::functional::ConvFuncOptions<1>").pointerTypes("Conv1dFuncOptions")) + .put(new Info("torch::nn::functional::ConvFuncOptions<2>").pointerTypes("Conv2dFuncOptions")) + .put(new Info("torch::nn::functional::ConvFuncOptions<3>").pointerTypes("Conv3dFuncOptions")) + .put(new Info("torch::nn::ConvTransposeOptions<1>").pointerTypes("ConvTranspose1dOptions")) + .put(new Info("torch::nn::ConvTransposeOptions<2>").pointerTypes("ConvTranspose2dOptions")) + .put(new Info("torch::nn::ConvTransposeOptions<3>").pointerTypes("ConvTranspose3dOptions")) + .put(new Info("torch::nn::functional::ConvTransposeFuncOptions<1>").pointerTypes("ConvTranspose1dFuncOptions")) + .put(new Info("torch::nn::functional::ConvTransposeFuncOptions<2>").pointerTypes("ConvTranspose2dFuncOptions")) + .put(new Info("torch::nn::functional::ConvTransposeFuncOptions<3>").pointerTypes("ConvTranspose3dFuncOptions")) + + .put(new Info("torch::nn::ReflectionPadOptions<1>").pointerTypes("ReflectionPad1dOptions")) + .put(new Info("torch::nn::ReflectionPadOptions<2>").pointerTypes("ReflectionPad2dOptions")) + .put(new Info("torch::nn::ReflectionPadOptions<3>").pointerTypes("ReflectionPad3dOptions")) + .put(new Info("torch::nn::ReplicationPadOptions<1>").pointerTypes("ReplicationPad1dOptions")) + .put(new Info("torch::nn::ReplicationPadOptions<2>").pointerTypes("ReplicationPad2dOptions")) + .put(new Info("torch::nn::ReplicationPadOptions<3>").pointerTypes("ReplicationPad3dOptions")) + .put(new Info("torch::nn::ConstantPadOptions<1>").pointerTypes("ConstantPad1dOptions")) + .put(new Info("torch::nn::ConstantPadOptions<2>").pointerTypes("ConstantPad2dOptions")) + .put(new Info("torch::nn::ConstantPadOptions<3>").pointerTypes("ConstantPad3dOptions")) + .put(new Info("torch::nn::AvgPoolOptions<1>", "torch::nn::functional::AvgPool1dFuncOptions").pointerTypes("AvgPool1dOptions")) + .put(new Info("torch::nn::AvgPoolOptions<2>", "torch::nn::functional::AvgPool2dFuncOptions").pointerTypes("AvgPool2dOptions")) + .put(new Info("torch::nn::AvgPoolOptions<3>", "torch::nn::functional::AvgPool3dFuncOptions").pointerTypes("AvgPool3dOptions")) + .put(new Info("torch::nn::MaxPoolOptions<1>", "torch::nn::functional::MaxPool1dFuncOptions").pointerTypes("MaxPool1dOptions")) + .put(new Info("torch::nn::MaxPoolOptions<2>", "torch::nn::functional::MaxPool2dFuncOptions").pointerTypes("MaxPool2dOptions")) + .put(new Info("torch::nn::MaxPoolOptions<3>", "torch::nn::functional::MaxPool3dFuncOptions").pointerTypes("MaxPool3dOptions")) + .put(new Info("torch::nn::AdaptiveAvgPoolOptions >", "torch::nn::functional::AdaptiveAvgPool1dFuncOptions").pointerTypes("AdaptiveAvgPool1dOptions")) + .put(new Info("torch::nn::AdaptiveAvgPoolOptions >", "torch::nn::functional::AdaptiveAvgPool2dFuncOptions").pointerTypes("AdaptiveAvgPool2dOptions")) + .put(new Info("torch::nn::AdaptiveAvgPoolOptions >", "torch::nn::functional::AdaptiveAvgPool3dFuncOptions").pointerTypes("AdaptiveAvgPool3dOptions")) + .put(new Info("torch::nn::AdaptiveMaxPoolOptions >", "torch::nn::functional::AdaptiveMaxPool1dFuncOptions").pointerTypes("AdaptiveMaxPool1dOptions")) + .put(new Info("torch::nn::AdaptiveMaxPoolOptions >", "torch::nn::functional::AdaptiveMaxPool2dFuncOptions").pointerTypes("AdaptiveMaxPool2dOptions")) + .put(new Info("torch::nn::AdaptiveMaxPoolOptions >", "torch::nn::functional::AdaptiveMaxPool3dFuncOptions").pointerTypes("AdaptiveMaxPool3dOptions")) + .put(new Info("torch::nn::MaxUnpoolOptions<1>").pointerTypes("MaxUnpool1dOptions")) + .put(new Info("torch::nn::MaxUnpoolOptions<2>").pointerTypes("MaxUnpool2dOptions")) + .put(new Info("torch::nn::MaxUnpoolOptions<3>").pointerTypes("MaxUnpool3dOptions")) + .put(new Info("torch::nn::functional::MaxUnpoolFuncOptions<1>").pointerTypes("MaxUnpool1dFuncOptions")) + .put(new Info("torch::nn::functional::MaxUnpoolFuncOptions<2>").pointerTypes("MaxUnpool2dFuncOptions")) + .put(new Info("torch::nn::functional::MaxUnpoolFuncOptions<3>").pointerTypes("MaxUnpool3dFuncOptions")) + .put(new Info("torch::nn::FractionalMaxPoolOptions<1>", "torch::nn::functional::FractionalMaxPool1dFuncOptions").pointerTypes("FractionalMaxPool1dOptions")) + .put(new Info("torch::nn::FractionalMaxPoolOptions<2>", "torch::nn::functional::FractionalMaxPool2dFuncOptions").pointerTypes("FractionalMaxPool2dOptions")) + .put(new Info("torch::nn::FractionalMaxPoolOptions<3>", "torch::nn::functional::FractionalMaxPool3dFuncOptions").pointerTypes("FractionalMaxPool3dOptions")) + .put(new Info("torch::nn::LPPoolOptions<1>", "torch::nn::functional::LPPool1dFuncOptions").pointerTypes("LPPool1dOptions")) + .put(new Info("torch::nn::LPPoolOptions<2>", "torch::nn::functional::LPPool2dFuncOptions").pointerTypes("LPPool2dOptions")) + .put(new Info("torch::nn::LPPoolOptions<3>", "torch::nn::functional::LPPool3dFuncOptions").pointerTypes("LPPool3dOptions")) + ; + + //// Modules + infoMap + .put(new Info("torch::nn::Module::register_module").javaNames("register_module")) + .put(new Info("torch::nn::Module").upcast()) + ; + String[] virtuals = {"train", "is_training", "to", "zero_grad", "save", "load", "pretty_print", "is_serializable"}; + for (String m : virtuals) + infoMap.put(new Info("torch::nn::Module::" + m).virtualize().annotations("@Virtual(subclasses=false, method=\"" + m + "\")")); + + // clone returns a std::shared_ptr and not a Module. + // This cast is normally added automatically by Parser but the info on shared_ptr prevents this (issue #670) + // The second value of @Cast is used for the return type + infoMap.put(new Info("torch::nn::Module::clone") + .virtualize() + .annotations("@Virtual(subclasses=false, method=\"clone\")", "@Cast({\"\", \"std::shared_ptr\"})")); + + mapModule(infoMap, "ModuleDict", false); + mapModule(infoMap, "ModuleList", false); + mapModule(infoMap, "Sequential", false); + mapModule(infoMap, "ParameterDict", false); + mapModule(infoMap, "ParameterList", false); + + mapModule(infoMap, "AdaptiveLogSoftmaxWithLoss"); for (int i = 1; i <= 3; i++) { mapModule(infoMap, "BatchNorm" + i + "d", "torch::nn::BatchNormImplBase<" + i + ",torch::nn::BatchNorm" + i + "dImpl>", - "torch::nn::NormImplBase<" + i + ",torch::nn::BatchNorm" + i + "dImpl,torch::nn::BatchNormOptions>"); + "torch::nn::NormImplBase<" + i + ",torch::nn::BatchNorm" + i + "dImpl,torch::nn::BatchNormOptions>"); mapModule(infoMap, "InstanceNorm" + i + "d", "torch::nn::InstanceNormImpl<" + i + ",torch::nn::InstanceNorm" + i + "dImpl>", - "torch::nn::NormImplBase<" + i + ",torch::nn::InstanceNorm" + i + "dImpl,torch::nn::InstanceNormOptions>"); + "torch::nn::NormImplBase<" + i + ",torch::nn::InstanceNorm" + i + "dImpl,torch::nn::InstanceNormOptions>"); mapModule(infoMap, "Conv" + i + "d", "torch::nn::ConvNdImpl<" + i + ",torch::nn::Conv" + i + "dImpl>"); mapModule(infoMap, "ConvTranspose" + i + "d", "torch::nn::ConvTransposeNdImpl<" + i + ",torch::nn::ConvTranspose" + i + "dImpl>", - "torch::nn::ConvNdImpl<" + i + ",torch::nn::ConvTranspose" + i + "dImpl>"); + "torch::nn::ConvNdImpl<" + i + ",torch::nn::ConvTranspose" + i + "dImpl>"); mapModule(infoMap, "Dropout" + (i > 1 ? i + "d" : ""), "torch::nn::detail::_DropoutNd 1 ? i + "d" : "") + "Impl>"); } @@ -2917,323 +1544,898 @@ public void map(InfoMap infoMap) { mapModule(infoMap, "Transformer"); infoMap.put(new Info("torch::optim::OptimizerCloneableOptions", - "torch::optim::OptimizerCloneableOptions").pointerTypes("OptimizerCloneableAdagradOptions")) + "torch::optim::OptimizerCloneableOptions").pointerTypes("OptimizerCloneableAdagradOptions")) .put(new Info("torch::optim::OptimizerCloneableParamState", - "torch::optim::OptimizerCloneableParamState").pointerTypes("OptimizerCloneableAdagradParamState")) + "torch::optim::OptimizerCloneableParamState").pointerTypes("OptimizerCloneableAdagradParamState")) .put(new Info("torch::optim::OptimizerCloneableOptions", - "torch::optim::OptimizerCloneableOptions").pointerTypes("OptimizerCloneableAdamOptions")) + "torch::optim::OptimizerCloneableOptions").pointerTypes("OptimizerCloneableAdamOptions")) .put(new Info("torch::optim::OptimizerCloneableParamState", - "torch::optim::OptimizerCloneableParamState").pointerTypes("OptimizerCloneableAdamParamState")) + "torch::optim::OptimizerCloneableParamState").pointerTypes("OptimizerCloneableAdamParamState")) .put(new Info("torch::optim::OptimizerCloneableOptions", - "torch::optim::OptimizerCloneableOptions").pointerTypes("OptimizerCloneableAdamWOptions")) + "torch::optim::OptimizerCloneableOptions").pointerTypes("OptimizerCloneableAdamWOptions")) .put(new Info("torch::optim::OptimizerCloneableParamState", - "torch::optim::OptimizerCloneableParamState").pointerTypes("OptimizerCloneableAdamWParamState")) + "torch::optim::OptimizerCloneableParamState").pointerTypes("OptimizerCloneableAdamWParamState")) .put(new Info("torch::optim::OptimizerCloneableOptions", - "torch::optim::OptimizerCloneableOptions").pointerTypes("OptimizerCloneableLBFGSOptions")) + "torch::optim::OptimizerCloneableOptions").pointerTypes("OptimizerCloneableLBFGSOptions")) .put(new Info("torch::optim::OptimizerCloneableParamState", - "torch::optim::OptimizerCloneableParamState").pointerTypes("OptimizerCloneableLBFGSParamState")) + "torch::optim::OptimizerCloneableParamState").pointerTypes("OptimizerCloneableLBFGSParamState")) .put(new Info("torch::optim::OptimizerCloneableOptions", - "torch::optim::OptimizerCloneableOptions").pointerTypes("OptimizerCloneableRMSpropOptions")) + "torch::optim::OptimizerCloneableOptions").pointerTypes("OptimizerCloneableRMSpropOptions")) .put(new Info("torch::optim::OptimizerCloneableParamState", - "torch::optim::OptimizerCloneableParamState").pointerTypes("OptimizerCloneableRMSpropParamState")) + "torch::optim::OptimizerCloneableParamState").pointerTypes("OptimizerCloneableRMSpropParamState")) .put(new Info("torch::optim::OptimizerCloneableOptions", - "torch::optim::OptimizerCloneableOptions").pointerTypes("OptimizerCloneableSGDOptions")) + "torch::optim::OptimizerCloneableOptions").pointerTypes("OptimizerCloneableSGDOptions")) .put(new Info("torch::optim::OptimizerCloneableParamState", - "torch::optim::OptimizerCloneableParamState").pointerTypes("OptimizerCloneableSGDParamState")) - - .put(new Info("c10::intrusive_ptr_target", "c10::nullopt", "c10::nullopt_t", "c10::string_view", "c10::GeneratorImpl", "c10::impl::DeviceGuardImplInterface", "c10::impl::PyObjectSlot", - "PyObject", "std::function", "THPObjectPtr", "pyobj_list", "std::chrono::milliseconds", "std::exception_ptr", "std::type_info", - "std::pair", "std::stack >", "torch::autograd::utils::DelayWarningHandler", - "std::is_same,torch::detail::pack >", "at::cuda::NVRTC", "at::RecordFunctionCallback", "at::StepCallbacks", "THCState", "THHState", - "torch::autograd::ViewInfo", "torch::jit::InlinedCallStackPtr", "InlinedCallStackPtr", "torch::jit::ScopePtr", "torch::jit::BackendDebugInfoRecorder", - "torch::detail::TensorDataContainer", "std::shared_ptr", "caffe2::serialize::PyTorchStreamWriter", - "c10::impl::GenericList", "c10::impl::PyInterpreter", "std::function", "c10::detail::DictImpl::dict_map_type::iterator", - "std::iterator >", - "c10::TensorImpl::identity", "c10::TensorImpl::identity", - - "c10::optional", "c10::optional", "c10::optional >", "c10::optional", - "c10::intrusive_ptr", "c10::ArrayRef >", "c10::intrusive_ptr", - "c10::intrusive_ptr", "c10::intrusive_ptr", "c10::intrusive_ptr", - "c10::SymNode", "SymNode", "c10::SymIntNode", "c10::SymFloatNode", "torch::jit::DetachedBuffer::UniqueDetachedBuffer", - "c10::intrusive_ptr", "at::SmallVector", "std::unordered_map", - "torch::jit::Maybe >", "torch::jit::Maybe >", "c10::optional", - "c10::optional::ListOfOptionalElements>", "c10::optional::ListOfOptionalElements>", - "c10::optional", "c10::optional >", - "c10::optional", - "c10::optional", - "std::tuple >,c10::optional >,c10::optional >", - "c10::optional >", "c10::optional >", - "std::vector >", "std::reference_wrapper", - - "std::enable_shared_from_this", - "std::enable_shared_from_this", - "std::enable_shared_from_this", - "std::enable_shared_from_this", - "std::enable_shared_from_this", - "std::enable_shared_from_this", - "std::enable_shared_from_this", - "std::enable_shared_from_this", "std::enable_shared_from_this", - "std::enable_shared_from_this", "std::enable_shared_from_this", - "std::enable_shared_from_this", "std::enable_shared_from_this").cast().pointerTypes("Pointer")) - - .put(new Info("at::Tensor::toString", "at::TensorBase::toString", "at::DeprecatedTypeProperties::toString", "torch::jit::Graph::toString").javaText("public native @StdString String toString();")) - .put(new Info("torch::jit::tracer::pauseTracing()").javaText("@Namespace(\"torch::jit::tracer\") public static native @ByVal @Cast(\"std::function*\") Pointer pauseTracing();")) - .put(new Info("torch::jit::ProfileOp::getCallback()", "torch::jit::ProfileIValueOp::getCallback()").javaText( - "public native @ByVal @Cast(\"std::function&)>*\") Pointer getCallback();")) -// .put(new Info("at::indexing::slicePrefix1sSize").javaText( -// "@Namespace(\"at::indexing\") public static native @ByVal @Cast(\"c10::ArrayRef*\") LongArrayRef slicePrefix1sSize(@ByRef @Cast(\"c10::ArrayRef*\") LongArrayRef sizes);")) - .put(new Info("torch::optim::AdamOptions::betas", "torch::optim::AdamWOptions::betas").javaText( - "public native @Cast(\"std::tuple*\") @ByRef @NoException DoublePointer betas();")) - .put(new Info("torch::optim::Adagrad::step", "torch::optim::Adam::step", "torch::optim::AdamW::step", - "torch::optim::LBFG::step", "torch::optim::RMSprop::step", "torch::optim::SGD::step").javaText( - "public native @ByVal Tensor step(@ByVal(nullValue = \"torch::optim::Optimizer::LossClosure(nullptr)\") LossClosure closure);\n" - + "public native @ByVal Tensor step();\n")) - - .put(new Info("c10::DeleterFnPtr").cast().valueTypes("Deleter", "Pointer", "long")) - .put(new Info("std::function").pointerTypes("Deleter", "@Cast(\"void(*)(void*)\") Pointer", "@Cast(\"void(*)(void*)\") long")) - .put(new Info("std::function").pointerTypes("Func")) - .put(new Info("std::function").pointerTypes("Fetcher")) - .put(new Info("std::function").pointerTypes("Logger")) - .put(new Info("std::function", - "std::function").pointerTypes("DataLogger")) - .put(new Info("std::function", - "std::function").pointerTypes("TypeMapper")) - .put(new Info("std::function", - "std::function").pointerTypes("ValueMapper")) - .put(new Info("std::function", - "std::function").pointerTypes("GraphFunctionCreator")) - .put(new Info("std::function", - "std::function").pointerTypes("ModuleFunction")) - .put(new Info("std::function&)>", - "std::function&)>").pointerTypes("IValueCallback")) - .put(new Info("std::function").pointerTypes("CustomFormatter")) - .put(new Info("std::function").pointerTypes("IValueVisitor")) - .put(new Info("std::function").pointerTypes("Reader")) - .put(new Info("std::function").pointerTypes("RecordReader")) - .put(new Info("std::function", - "std::function").pointerTypes("Writer")) - .put(new Info("std::function").pointerTypes("TensorIdGetter")) - .put(new Info("std::function&)>").pointerTypes("TypeRenamer")) - .put(new Info("std::function").pointerTypes("ReadFunction")) - .put(new Info("std::function").pointerTypes("WriteFunction")) - .put(new Info("std::function").pointerTypes("SizeFunction")) - .put(new Info("std::function").pointerTypes("LossClosure")) - .put(new Info("std::function", - "torch::nn::TripletMarginWithDistanceLossOptions::distance_function_t", - "torch::nn::functional::TripletMarginWithDistanceLossFuncOptions::distance_function_t").pointerTypes("DistanceFunction")) - .put(new Info("c10::TypePtr (*)(const std::string&)", "torch::jit::Unpickler::TypeParserT").pointerTypes("TypeParser").define(false)) + "torch::optim::OptimizerCloneableParamState").pointerTypes("OptimizerCloneableSGDParamState")) ; - } - public static class Deleter extends FunctionPointer { - static { Loader.load(); } - /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ - public Deleter(Pointer p) { super(p); } - protected Deleter() { allocate(); } - private native void allocate(); - public native void call(Pointer p); - } + //// AnyModule, AnyValue and Sequential + infoMap + // All forward variants of native modules + .put(new Info("torch::nn::AnyModule::any_forward").javaText( + "public native @ByVal AnyValue any_forward(@Const @ByRef AnyValue input);\n" + + "public native @ByVal AnyValue any_forward(@Const @ByRef Tensor input);\n" + + "public native @ByVal AnyValue any_forward(@Const @ByRef Tensor input1, @Const @ByRef Tensor input2);\n" + + "public native @ByVal AnyValue any_forward(@Const @ByRef Tensor input1, @Const @ByRef Tensor input2, @Const @ByRef Tensor input3);\n" + + "public native @ByVal AnyValue any_forward(@Const @ByRef Tensor input1, @Const @ByRef Tensor input2, @Const @ByRef Tensor input3, @Const @ByRef Tensor input4);\n" + + "public native @ByVal AnyValue any_forward(@Const @ByRef Tensor input1, @Const @ByRef Tensor input2, @Const @ByRef Tensor input3, @Const @ByRef Tensor input4, @Const @ByRef Tensor input5, @Const @ByRef Tensor input6);\n" + + "public native @ByVal AnyValue any_forward(@Const @ByRef Tensor input1, @Const @ByRef Tensor input2, @Const @ByRef Tensor input3, @Const @ByRef Tensor input4, @Const @ByRef Tensor input5, @Const @ByRef Tensor input6, @Const @ByRef Tensor input7, @Const @ByRef Tensor input8);\n" + + "public native @ByVal AnyValue any_forward(@Const @ByRef Tensor input, @ByRef(nullValue = \"c10::optional(c10::nullopt)\") @Cast({\"int64_t*\", \"c10::ArrayRef\", \"std::vector&\"}) @StdVector long... output_size);\n" + + "public native @ByVal AnyValue any_forward(@Const @ByRef Tensor input, @Const @ByRef(nullValue = \"c10::optional(c10::nullopt)\") LongArrayRefOptional output_size);\n" + + "public native @ByVal AnyValue any_forward(@Const @ByRef Tensor input, @Const @ByRef Tensor indices, @Const @ByRef(nullValue = \"c10::optional >(c10::nullopt)\") LongVectorOptional output_size);\n" + + "public native @ByVal AnyValue any_forward(@Const @ByRef Tensor input, @ByVal(nullValue = \"torch::optional >{}\") T_TensorTensor_TOptional hx_opt);\n" + + "public native @ByVal AnyValue any_forward(@Const @ByRef Tensor query, @Const @ByRef Tensor key, @Const @ByRef Tensor value, @Const @ByRef(nullValue = \"torch::Tensor{}\") Tensor key_padding_mask, @Cast(\"bool\") boolean need_weights/*=true*/, @Const @ByRef(nullValue = \"torch::Tensor{}\") Tensor attn_mask, @Cast(\"bool\") boolean average_attn_weights/*=true*/);\n" + )) + .put(new Info("torch::nn::AnyModule::forward", "torch::nn::SequentialImpl::forward").javaText( + "public native @ByVal Tensor forward(@Const @ByRef Tensor input);\n" + + "public native @ByVal Tensor forward(@Const @ByRef Tensor input1, @Const @ByRef Tensor input2);\n" + + "public native @ByVal Tensor forward(@Const @ByRef Tensor input1, @Const @ByRef Tensor input2, @Const @ByRef Tensor input3);\n" + + "public native @ByVal Tensor forward(@Const @ByRef Tensor input1, @Const @ByRef Tensor input2, @Const @ByRef Tensor input3, @Const @ByRef Tensor input4);\n" + + "public native @ByVal Tensor forward(@Const @ByRef Tensor input1, @Const @ByRef Tensor input2, @Const @ByRef Tensor input3, @Const @ByRef Tensor input4, @Const @ByRef Tensor input5, @Const @ByRef Tensor input6);\n" + + "public native @ByVal Tensor forward(@Const @ByRef Tensor input1, @Const @ByRef Tensor input2, @Const @ByRef Tensor input3, @Const @ByRef Tensor input4, @Const @ByRef Tensor input5, @Const @ByRef Tensor input6, @Const @ByRef Tensor input7, @Const @ByRef Tensor input8);\n" + + "public native @ByVal Tensor forward(@Const @ByRef Tensor input, @ByRef(nullValue = \"c10::optional(c10::nullopt)\") @Cast({\"int64_t*\", \"c10::ArrayRef\", \"std::vector&\"}) @StdVector long... output_size);\n" + + "public native @ByVal Tensor forward(@Const @ByRef Tensor input, @Const @ByRef(nullValue = \"c10::optional(c10::nullopt)\") LongArrayRefOptional output_size);\n" + + "public native @ByVal Tensor forward(@Const @ByRef Tensor input, @Const @ByRef Tensor indices, @Const @ByRef(nullValue = \"c10::optional >(c10::nullopt)\") LongVectorOptional output_size);\n" + + "public native @ByVal @Name(\"forward>>\") T_TensorT_TensorTensor_T_T forwardT_TensorT_TensorTensor_T_T(@Const @ByRef Tensor input);\n" + + "public native @ByVal @Name(\"forward>>\") T_TensorT_TensorTensor_T_T forwardT_TensorT_TensorTensor_T_T(@Const @ByRef Tensor input, @ByVal(nullValue = \"torch::optional >{}\") T_TensorTensor_TOptional hx_opt);\n" + + "public native @ByVal @Name(\"forward>\") T_TensorTensor_T forwardT_TensorTensor_T(@Const @ByRef Tensor input);\n" + + "public native @ByVal @Name(\"forward>\") T_TensorTensor_T forwardT_TensorTensor_T(@Const @ByRef Tensor input1, @Const @ByRef Tensor input2);\n" + + "public native @ByVal @Name(\"forward>\") T_TensorTensor_T forwardT_TensorTensor_T(@Const @ByRef Tensor input1, @Const @ByRef Tensor input2, @Const @ByRef Tensor input3);\n" + + "public native @ByVal @Name(\"forward>\") T_TensorTensor_T forwardT_TensorTensor_T(@Const @ByRef Tensor input, @ByVal(nullValue = \"torch::optional >{}\") T_TensorTensor_TOptional hx_opt);\n" + + "public native @ByVal @Name(\"forward>\") T_TensorTensor_T forwardT_TensorTensor_T(@Const @ByRef Tensor query, @Const @ByRef Tensor key, @Const @ByRef Tensor value, @Const @ByRef(nullValue = \"torch::Tensor{}\") Tensor key_padding_mask, @Cast(\"bool\") boolean need_weights/*=true*/, @Const @ByRef(nullValue = \"torch::Tensor{}\") Tensor attn_mask, @Cast(\"bool\") boolean average_attn_weights/*=true*/);\n" + + "public native @ByVal @Name(\"forward\") ASMoutput forwardASMoutput(@Const @ByRef Tensor input, @Const @ByRef Tensor target);\n" + )) + .put(new Info("torch::nn::AnyModule(ModuleType*)") + // We cannot use template instantiation mechanism in Parser with something like + // new Info("torch::nn::AnyModule(ModuleType*)") + // because it doesn't work with javaText. And we need javaText because of @Cast. + .javaText(anyModuleConstructors)); + + for (String[] outputType : new String[][]{ + {"at::Tensor", "Tensor"}, + {"torch::nn::ASMoutput", "ASMoutput"}, + {"std::tuple", "T_TensorTensor_T"}, + {"std::tuple >", "T_TensorT_TensorTensor_T_T"} + }) { + infoMap + .put(new Info(template("torch::nn::AnyValue::get", outputType[0])).javaNames("get" + outputType[1])) + .put(new Info(template("torch::nn::AnyValue::try_get", outputType[0])).javaNames("try_get" + outputType[1])) + ; + } - public static class Func extends FunctionPointer { - static { Loader.load(); } - /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ - public Func(Pointer p) { super(p); } - protected Func() { allocate(); } - private native void allocate(); - public native void call(); - } - public static class Fetcher extends FunctionPointer { - static { Loader.load(); } - /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ - public Fetcher(Pointer p) { super(p); } - protected Fetcher() { allocate(); } - private native void allocate(); - public native @StdString String call(); - } + //// Classes handled with @SharedPtr + for (PointerInfo pi : new PointerInfo[]{ + new PointerInfo("torch::jit::Graph"), + new PointerInfo("torch::jit::Operator"), + new PointerInfo("torch::jit::Resolver"), + new PointerInfo("at::Tensor"), + new PointerInfo("torch::jit::tensorexpr::analysis::AccessInfo"), + new PointerInfo("c10::ClassType"), + new PointerInfo("c10::TensorType").otherCppNames("c10::TensorTypePtr", "at::TensorTypePtr", "torch::TensorTypePtr"), + new PointerInfo("torch::autograd::FunctionPreHook"), + new PointerInfo("torch::nn::AnyModule"), + new PointerInfo("torch::nn::Module"), + new PointerInfo("const at::functorch::FuncTorchTLSBase"), + new PointerInfo("const torch::jit::CompilationUnit"), + new PointerInfo("torch::jit::SugaredValue") + }) { + // See issue #670 + String[] cppNames = new String[pi.argumentNames.length + pi.otherCppNames.length]; + int i = 0; + for (String n : pi.argumentNames) cppNames[i++] = template("std::shared_ptr", n); + for (String n : pi.otherCppNames) cppNames[i++] = n; + // Specifying the parameter of the annotation allows to disambiguate cases where a class can store either a + // std::shared_ptr or std::shared_ptr (like CompilationUnit) + // .valueTypes("@Cast(\"const torch::jit::CompilationUnit*\") CompilationUnit") seems to work too but for obscure reason + infoMap.put(new Info(cppNames).annotations("@SharedPtr(\"" + pi.argumentNames[0] + "\")").pointerTypes(pi.javaBaseName)); + + // Also annotate constructor of target class to ensure only one shared_ptr exists for each instance + String n = pi.argumentNames[0].substring(pi.argumentNames[0].lastIndexOf(' ') + 1); // Remove possible const + infoMap.put(new Info(n + n.substring(n.lastIndexOf("::"))).annotations("@SharedPtr")); + } - public static class Logger extends FunctionPointer { - static { Loader.load(); } - /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ - public Logger(Pointer p) { super(p); } - protected Logger() { allocate(); } - private native void allocate(); - public native void call(@Cast({"", "const std::string&"}) @StdString String s); - } - public static class DataLogger extends FunctionPointer { - static { Loader.load(); } - /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ - public DataLogger(Pointer p) { super(p); } - protected DataLogger() { allocate(); } - private native void allocate(); - public native void call(@ByRef @Cast("const c10::DDPLoggingData*") Pointer d); - } + //// @UniquePtr + infoMap + .put(new Info("std::unique_ptr").annotations("@UniquePtr") + .valueTypes("@Cast({\"\", \"std::unique_ptr&&\"}) FunctionPreHook") + .pointerTypes("FunctionPreHook")) + .put(new Info("std::unique_ptr").annotations("@UniquePtr") + .valueTypes("@Cast({\"\", \"std::unique_ptr&&\"}) FunctionPostHook") + .pointerTypes("FunctionPostHook")) + .put(new Info("std::unique_ptr", "Ptr").annotations("@UniquePtr").pointerTypes("AttributeValue")) - public static class TypeMapper extends FunctionPointer { - static { Loader.load(); } - /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ - public TypeMapper(Pointer p) { super(p); } - protected TypeMapper() { allocate(); } - private native void allocate(); - public native @ByVal @Cast("c10::TypePtr*") Pointer call(@ByVal @Cast("c10::TypePtr*") Pointer t); - } + ; - public static class ValueMapper extends FunctionPointer { - static { Loader.load(); } - /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ - public ValueMapper(Pointer p) { super(p); } - protected ValueMapper() { allocate(); } - private native void allocate(); - public native @Cast("torch::jit::Value*") Pointer call(@Cast("torch::jit::Value*") Pointer v); - } + /* TODO: see how to map these, if needed and meant to be part of API */ + infoMap.put(new Info("c10::MaybeOwnedTraitsGenericImpl >::assignBorrow", + "c10::MaybeOwnedTraitsGenericImpl >::destroyBorrow", + "torch::autograd::profiler::ProfilerResult", "torch::profiler::impl::ProfilerEventStub", + "torch::autograd::profiler::enableProfiler", "torch::autograd::profiler::enableProfilerWithEventPostProcess", + "torch::profiler::impl::ProfilerStateBase", "torch::profiler::impl::ProfilerStubs", "torch::autograd::profiler::KinetoEvent", + "at::Tensor::wrap_tensor_impl(c10::TensorImpl*)", + "c10::impl::list_element_to_const_ref", + "c10::unpackSymInt(at::OptionalSymIntArrayRef)", + "c10::detail::infer_schema::make_function_schema(std::string&&, std::string&&, c10::ArrayRef, c10::ArrayRef)", + "torch::autograd::_wrap_outputs", + "torch::autograd::Node::retains_grad_hooks", // IntFunctionPreHookMap cannot be instantiated because static_assert errors due to unique_ptr copying + "c10::impl::GPUTrace", "torch::jit::IterableTree", + "c10::cuda::CaptureStatus", + + // Ignore for now, takes a callback. + "c10::IValue::repr", "c10::IValue::visit", + "at::TensorIteratorBase::foreach_reduced_elt", + "at::TensorIteratorBase::parallel_reduce", + "at::TensorIteratorBase::serial_for_each", + "at::TensorIteratorBase::for_each", + + "torch::autograd::get_current_graph_task_exec_info" // Would need to map GraphTask, NodeExec...too much burden + + ).skip()) + ; - public static class GraphFunctionCreator extends FunctionPointer { - static { Loader.load(); } - /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ - public GraphFunctionCreator(Pointer p) { super(p); } - protected GraphFunctionCreator() { allocate(); } - private native void allocate(); - public native void call(@ByRef @Cast("torch::jit::GraphFunction*") Pointer m); - } + //// Prevents compiler to croak about "non-standard-layout type". + /* We cannot add an Info annotation("@NoOffset") on the class, or the parser will also add the annotation on method argument, + which is not supported and has no sense. + We need either to put an annotation info on each member, or javaName("@NoOffset XXX") on the whole class. + If an info exists on the member, it must not have annotations, or they will be replaced. + */ + for (String n : new String[]{ + "c10::DDPLoggingData::strs_map", + "c10::DDPLoggingData::ints_map", + "torch::jit::Object::Property::setter_func", + "torch::jit::Object::Property::getter_func", + "torch::jit::Object::Property::name", + "torch::jit::Named::name", + "torch::jit::Named::value", + "torch::jit::detail::SlotCursor::i_", + "torch::jit::detail::SlotCursor::module_", + "torch::jit::StackEntry::filename", + "torch::jit::StackEntry::range", + "torch::jit::Call::fn_name", + "torch::jit::Call::caller_range" + }) { + Info i = infoMap.getFirst(n, false); + if (i == null) { + i = new Info(n); + infoMap.put(i); + } + i.annotations("@NoOffset"); + } - public static class ModuleFunction extends FunctionPointer { - static { Loader.load(); } - /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ - public ModuleFunction(Pointer p) { super(p); } - protected ModuleFunction() { allocate(); } - private native void allocate(); - public native void call(@ByRef @Cast("torch::jit::Module*") Pointer m); - } - public static class IValueCallback extends FunctionPointer { - static { Loader.load(); } - /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ - public IValueCallback(Pointer p) { super(p); } - protected IValueCallback() { allocate(); } - private native void allocate(); - public native void call(@ByRef @Cast("std::vector*") Pointer v); - } + //// Classes whose parent are useless for us + infoMap.put(new Info( + "caffe2::TypeIdentifier", "c10::util::crc64_t", "c10::util::type_index" + ).base("Pointer")); + + + //// Pytorch "internal only" + infoMap.put(new Info( + "at::RecordFunction::_setAsync", "at::RecordFunction::_setStaticRuntimeOutVariant", + "at::Tensor(c10::TensorImpl*)", // Really at::Tensor(c10::intrusive_ptr but the Parser gets the wrong fullname + "at::Tensor::_set_fw_grad", "at::Tensor::_fw_grad", + "at::TensorBase(c10::intrusive_ptr", + "at::TensorBase::_set_fw_grad", "at::TensorBase::_fw_grad", + "at::TensorImpl::_set_fw_grad", "at::TensorImpl::_fw_grad", + "c10::KernelFunction::_equalsBoxedAndUnboxed", + "c10::RegisterOperators::Options::catchAllKernel()", + "c10::RegisterOperators::Options::kernel(c10::DispatchKey)", + "c10::RegisterOperators::Options::schema(c10::FunctionSchema&&)", + "c10::RegisterOperators::op(c10::FunctionSchema,c10::Options&&)", + "c10::ThreadLocalDebugInfo::_forceCurrentDebugInfo", + "c10::impl::_force_tls_local_dispatch_key_set", + "torch::jit::CompilationUnit::_clear_python_cu", + "torch::jit::GraphFunction::_set_initial_executor_execution_mode", "torch::jit::GraphFunction::_set_ignore_amp" + ).skip()); + + + //// Deprecated + infoMap.put(new Info( + "c10::detail::deprecated_AT_ERROR", + "c10::detail::deprecated_AT_ASSERT", + "c10::detail::deprecated_AT_ASSERTM", + "detail::deprecated_AT_DISPATCH_ALL_TYPES_AND_HALF", + "detail::deprecated_AT_DISPATCH_ALL_TYPES_AND_HALF_AND_COMPLEX", + "detail::scalar_type(const at::DeprecatedTypeProperties&)", + "at::DeprecatedTypeProperties", + "c10::Scalar::isIntegral()", + "c10::isIntegralType(c10::ScalarType)", + "at::Tensor::type()", + "at::Tensor::is_variable()" + ).skip()); + + //// Function returning object by value, and copy constructor was deleted. Any way to get around this ? + infoMap.put(new Info( + "c10::RegisterOperators::Options", //All methods of Options return Options&& + "c10::impl::device_guard_impl_registry", + "torch::autograd::graph_task_id", + "c10::getLessThanComparator", "c10::getGreaterThanComparator" + ).skip()); + + + //// Deleted operator=. Any way to skip setter only ? + infoMap.put(new Info("at::native::RNNDescriptor::dropout_desc_").skip()); + + + //// ifdef'd out + infoMap.put(new Info( + "c10_complex_math::_detail::sqrt", + "c10_complex_math::_detail::acos", + "c10::__ldg", + "c10::impl::raw_local_dispatch_key_set" // non-windows, non-android only + ).skip()); + + + //// Function not compiling because failing some static_assert + infoMap.put(new Info("at::SplitUntil32Bit::iterator::vec", + //"std::vector >::put(std::vector >)", + "c10::ArrayRef::equals", + "c10::ArrayRef::equals", + "c10::ArrayRef::equals", + "c10::ArrayRef::vec", + "c10::ArrayRef::equals", + "c10::ArrayRef::equals", + "c10::ArrayRef::equals", + "c10::ArrayRef::equals", + "c10::ArrayRef >::equals" + ).skip()); + + + //// Avoiding name clashes or making them more explicit. + infoMap.put(new Info("c10::ComplexType::get").javaNames("getComplexTypePtr")) + .put(new Info("c10::FloatType::get").javaNames("getFloatTypePtr")) + .put(new Info("c10::IntType::get").javaNames("getIntTypePtr")) + .put(new Info("c10::NumberType::get").javaNames("getNumberIntTypePtr")) + .put(new Info("c10::GeneratorImpl::clone").javaNames("clonePtr")) + .put(new Info("c10::IValue::toString", "at::IValue::toString").javaNames("toConstantString")) + .put(new Info("torch::jit::TreeView::get").skip()) // Prevents override of get() in subclasses, and tree is available as tree() anyway + .put(new Info("torch::cuda::device_count").javaNames("cuda_device_count")) + .put(new Info("torch::cuda::is_available").javaNames("cuda_is_available")) + .put(new Info("torch::cuda::manual_seed").javaNames("cuda_manual_seed")) + .put(new Info("torch::cuda::manual_seed_all").javaNames("cuda_manual_seed_all")) + .put(new Info("torch::cuda::synchronize").javaNames("cuda_synchronize")) + .put(new Info("torch::jit::Const").pointerTypes("ConstExpr")) + .put(new Info("torch::jit::Node").pointerTypes("JitNode")) + .put(new Info("torch::jit::Module").pointerTypes("JitModule")) + .put(new Info("torch::jit::Object").pointerTypes("JitObject")) + .put(new Info("torch::jit::String").pointerTypes("JitString")) + ; - public static class CustomFormatter extends FunctionPointer { - static { Loader.load(); } - /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ - public CustomFormatter(Pointer p) { super(p); } - protected CustomFormatter() { allocate(); } - private native void allocate(); - public native boolean call(@ByRef @Cast("std::ostream*") Pointer o, @ByRef @Cast("const c10::IValue*") Pointer v); - } - public static class IValueVisitor extends FunctionPointer { - static { Loader.load(); } - /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ - public IValueVisitor(Pointer p) { super(p); } - protected IValueVisitor() { allocate(); } - private native void allocate(); - public native boolean call(@ByRef @Cast("const c10::IValue*") Pointer v); - } + //// Instantiation of templated functions. + for (String op : new String[]{"exp", "log", "log10", "log2", "sqrt", "pow", "sin", "cos", "tan", + "asin", "acos", "atan", "sinh", "cosh", "tanh", "asinh", "acosh", "atanh", "log1p" }) { + infoMap.put(new Info("c10_complex_math::" + op + "").javaNames(op)) + .put(new Info("c10_complex_math::" + op + "").javaNames(op)) + .put(new Info("at::" + op).javaNames(op)); // Needed because "ATen/ops/*.h" + // are parsed after complex_math.h and Parser would set the qualified names to the first + // matching cppName it finds in infoMap. + } + infoMap.put(new Info("ska::detailv3::log2").javaNames("log2")) // Same reason + .put(new Info("c10_complex_math::pow(c10::complex&, c10::complex&)").javaText( + "@Namespace(\"c10_complex_math\") public static native @ByVal @Name(\"pow\") DoubleComplex pow(@Const @ByRef DoubleComplex x, @Const @ByRef FloatComplex y);\n" + + "@Namespace(\"c10_complex_math\") public static native @ByVal @Name(\"pow\") DoubleComplex pow(@Const @ByRef FloatComplex x, @Const @ByRef DoubleComplex y);\n" + )) + .put(new Info("c10_complex_math::pow(c10::complex&, U&)").javaText( + "@Namespace(\"c10_complex_math\") public static native @ByVal @Name(\"pow\") DoubleComplex pow(@Const @ByRef DoubleComplex x, @Const @ByRef float y);\n" + + "@Namespace(\"c10_complex_math\") public static native @ByVal @Name(\"pow\") DoubleComplex pow(@Const @ByRef FloatComplex x, @Const @ByRef double y);\n" + )) + .put(new Info("c10_complex_math::pow(T&, c10::complex&)").javaText( + "@Namespace(\"c10_complex_math\") public static native @ByVal @Name(\"pow\") DoubleComplex pow(@Const @ByRef double x, @Const @ByRef FloatComplex y);\n" + + "@Namespace(\"c10_complex_math\") public static native @ByVal @Name(\"pow\") DoubleComplex pow(@Const @ByRef float x, @Const @ByRef DoubleComplex y);\n" + )) + .put(new Info("c10::util::get_type_index").javaNames("get_type_index_string")) + .put(new Info("at::TensorBase::data_ptr").javaNames("data_ptr_char")) + .put(new Info("at::TensorBase::data_ptr").javaNames("data_ptr_short")) + .put(new Info("at::TensorBase::data_ptr").javaNames("data_ptr_int")) + .put(new Info("at::TensorBase::data_ptr").javaNames("data_ptr_long")) + .put(new Info("at::TensorBase::data_ptr").javaNames("data_ptr_float")) + .put(new Info("at::TensorBase::data_ptr").javaNames("data_ptr_double")) + .put(new Info("at::Tensor::item").javaNames("item_char")) + .put(new Info("at::Tensor::item").javaNames("item_short")) + .put(new Info("at::Tensor::item").javaNames("item_int")) + .put(new Info("at::Tensor::item").javaNames("item_long")) + .put(new Info("at::Tensor::item").javaNames("item_float")) + .put(new Info("at::Tensor::item").javaNames("item_double")) + .put(new Info("at::make_generator").javaText( + "@Namespace(\"at\") public static native @ByVal @Name(\"make_generator\") Generator make_generator_cpu();\n" + + "@Namespace(\"at\") public static native @ByVal @Name(\"make_generator\") Generator make_generator_cpu(@Cast(\"uint64_t&&\") long seed_in);" + )) + ; - public static class Reader extends FunctionPointer { - static { Loader.load(); } - /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ - public Reader(Pointer p) { super(p); } - protected Reader() { allocate(); } - private native void allocate(); - public native @Cast("size_t") long call(@Cast("char*") Pointer data_start, @Cast("size_t") long data_len); - } + for (String[] t : new String[][]{ + {"c10::qint8", "qint8"}, + {"c10::quint8", "quint8"}, + {"c10::qint32", "quint32"}, + {"c10::quint4x2", "quint4x2"}, + {"c10::quint2x4", "quint2x4"}, + {"int8_t", "byte"}, + {"int16_t", "short"}, + {"int", "int"}, + {"int64_t", "long"}, + {"at::Half", "Half"}, + {"float", "float"}, + {"double", "double"}, + {"c10::complex", "ComplexFloat"}, + {"c10::complex", "ComplexDouble"}, + {"bool", "boolean"}, + {"at::BFloat16", "BFload16"} + }) { + infoMap.put(new Info("c10::fetch_and_cast<" + t[0] + ">").javaNames("fetch_and_cast_to_" + t[1])) + .put(new Info("c10::cast_and_store<" + t[0] + ">").javaNames("cast_and_store_from_" + t[1])); + } - public static class RecordReader extends FunctionPointer { - static { Loader.load(); } - /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ - public RecordReader(Pointer p) { super(p); } - protected RecordReader() { allocate(); } - private native void allocate(); - public native @StdMove @ByVal @Cast("c10::DataPtr*") Pointer call(@ByRef @Cast("const std::string*") Pointer s); - } + // Registries. + // Skipped them for now. Much burden with variadic args and creator function pointers. + // We cannot map ThreadPoolRegistry because it takes 3 arguments in the variadic Args Registry template arguments + + /* + .put(new Info("c10::Registry,at::MPSHooksArgs>").pointerTypes("MPSHooksRegistry")) + .put(new Info("c10::Registry,at::MPSHooksArgs>::Create").javaText( + "public native @UniquePtr MPSHooksInterface Create(@StdString BytePointer key, @ByRef MPSHooksArgs args);\n" + + "public native @UniquePtr MPSHooksInterface Create(@StdString String key, @ByRef MPSHooksArgs args);")) // Handle pack extension + + .put(new Info("c10::Registry,int,int,bool>", + "c10::Registry,int>" // JavaCPP doesn't really support variadic templates argument. + // We must provide this truncated list of arguments so that Context.qualify can find this Info. Issue #81. + ).pointerTypes("ThreadPoolRegistry").javaNames("ThreadPoolRegistry")) + .put(new Info("c10::Registry,int>::Create").javaText( + "public native @SharedPtr TaskThreadPoolBase Create(@StdString BytePointer key, int i1, int i2, boolean b);\n" + + "public native @SharedPtr TaskThreadPoolBase Create(@StdString String key, int i1, int i2, boolean b);")) // Handle pack extension + .put(new Info("std::shared_ptr").pointerTypes("TaskThreadPoolBase").annotations("@SharedPtr")) + + .put(new Info("c10::Registry,at::CUDAHooksArgs>").pointerTypes("CUDAHooksRegistry")) + .put(new Info("c10::Registry,at::CUDAHooksArgs>::Create").javaText( + "public native @UniquePtr CUDAHooksInterface Create(@StdString BytePointer key, @ByRef CUDAHooksArgs args);\n" + + "public native @UniquePtr CUDAHooksInterface Create(@StdString String key, @ByRef CUDAHooksArgs args);")) // Handle pack extension + + .put(new Info("c10::Registry,at::HIPHooksArgs>").pointerTypes("HIPHooksRegistry")) + .put(new Info("c10::Registry,at::HIPHooksArgs>::Create").javaText( + "public native @UniquePtr HIPHooksInterface Create(@StdString BytePointer key, @ByRef HIPHooksArgs args);\n" + + "public native @UniquePtr HIPHooksInterface Create(@StdString String key, @ByRef HIPHooksArgs args);")) // Handle pack extension + + .put(new Info("c10::Registry,at::ORTHooksArgs>").pointerTypes("ORTHooksRegistry")) + .put(new Info("c10::Registry,at::ORTHooksArgs>::Create").javaText( + "public native @UniquePtr ORTHooksInterface Create(@StdString BytePointer key, @ByRef ORTHooksArgs args);\n" + + "public native @UniquePtr ORTHooksInterface Create(@StdString String key, @ByRef ORTHooksArgs args);")) // Handle pack extension + + , + .put(new Info("c10::Registry,at::ORTHooksArgs>::Creator", + "c10::Registry,at::CUDAHooksArgs>::Creator", + "c10::Registry,at::HIPHooksArgs>::Creator", + "c10::Registry,at::MPSHooksArgs>::Creator").pointerTypes("Pointer")) + */ + + infoMap.put(new Info("c10::ThreadPoolRegistry()", + "c10::CUDAHooksRegistry()").skip()); + + + /* Classes that are not part of API (no TORCH_API nor C10_API) and are not argument nor return type of API methods. + * Consider manual exclusion of all at::meta, at::native and caffe2 namespaces (but TypeMeta, that should + * be moved to c10 one day). */ + infoMap.put(new Info( + "ModuleHolderIndicator", + "at::ObserverContext", + "at::Range", + "at::StepCallbacks::StartEndPair", + "at::TensorBase::unsafe_borrow_t", + //"at::mt19937_data_pod", + //"at::mt19937_engine", + "at::tracer::impl::NoTracerDispatchMode", + "c10::_CopyBytesFunctionRegisterer", + "c10::AlignedCharArray<1,Size>::", + "c10::AlignedCharArray<2,Size>::", + "c10::AlignedCharArray<4,Size>::", + "c10::AlignedCharArray<8,Size>::", + "c10::Capsule", + "c10::DeviceGuard", + "c10::DispatchTraceNestingGuard", + "c10::Dispatcher::OperatorDef", + "c10::DynamicType", + "c10::DynamicType::", + "c10::DynamicType::Arguments", + "c10::DynamicType::LabeledDynamicType", + "c10::DynamicTypeTrait", + "c10::Event", + "c10::ExclusivelyOwned::", + "c10::IListRef::Payload", + "c10::IListRefIterator::Payload", + "c10::IValue::CompAliasedIValues", + "c10::IValue::HashAliasedIValue", + "c10::IValue::Payload", + "c10::IValue::Payload::TriviallyCopyablePayload", + "c10::IValue::Payload::TriviallyCopyablePayload::", + "c10::MultiStreamGuard", + "c10::OpTableOffsetAndMask", + "c10::OperatorNameView", + "c10::OptionalStreamGuard", + "c10::PyHandleCache", + "c10::RegisterOperators::Options::KernelRegistrationConfig", + "c10::Registry,int>", + "c10::Registry,at::CUDAHooksArgs>", + "c10::Registry,at::HIPHooksArgs>", + "c10::Registry,at::MPSHooksArgs>", + "c10::Registry,at::ORTHooksArgs>", + "c10::Scalar::v_t", + "c10::StreamGuard", + "c10::Type::SingletonOrSharedTypePtr::Repr", + "c10::Type::SingletonOrSharedTypePtr::Repr::RawRepr", + "c10::Type::SingletonOrSharedTypePtr::Repr::SingletonRepr", + "c10::Type::SingletonOrSharedTypePtr::SharedPtrWrapper", + "c10::Type::SingletonOrSharedTypePtr::Repr", + "c10::Type::SingletonOrSharedTypePtr::Repr::RawRepr", + "c10::Type::SingletonOrSharedTypePtr::Repr::SingletonRepr", + "c10::Type::SingletonOrSharedTypePtr::SharedPtrWrapper", + "c10::TypeFactoryBase", + "c10::VarType", + "c10::VariableVersion::VersionCounter", + "c10::arrayref_optional_base::storage", + "c10::arrayref_optional_base::storage::raw", + "c10::bad_optional_access", + "c10::basic_string_view::charIsEqual_", + "c10::basic_string_view::charIsNotEqual_", + "c10::basic_string_view::stringViewContainsChar_", + "c10::basic_string_view::stringViewDoesNotContainChar_", + "c10::basic_string_view", + "c10::basic_string_view::charIsEqual_", + "c10::basic_string_view::charIsNotEqual_", + "c10::basic_string_view::stringViewContainsChar_", + "c10::basic_string_view::stringViewDoesNotContainChar_", + "c10::detail::DictKeyEqualTo", + "c10::detail::DictKeyHash", + "c10::detail::ListElementFrom", + "c10::detail::ListImpl", + "c10::detail::LoadImpl", + "c10::detail::_guarded_unsigned_long_unique_dummy", + "c10::detail::_str_wrapper", + "c10::detail::getTypePtr_", + "c10::detail::infer_schema::createReturns", + "c10::detail::infer_schema::createReturns,void>", // Parsing error ? + "c10::detail::ivalue_to_const_ref_overload_return", + "c10::either::", + "c10::either", + "c10::either::", + "c10::guts::conjunction", + "c10::guts::detail::DummyClassForToString", + "c10::guts::detail::__array_traits<_Tp,0>::_Type", + "c10::guts::detail::_identity", + "c10::guts::detail::_if_constexpr", + "c10::guts::disjunction", + "c10::guts::typelist::concat<>", + "c10::guts::typelist::concat >", + "c10::guts::typelist::concat ><>", // Parsing error ? + "c10::guts::typelist::reverse >", + "c10::guts::typelist::concat,c10::guts::typelist::typelist<> >", + "c10::guts::typelist::concat,c10::guts::typelist::typelist<> ><>", // Persing error ? + "c10::hash >::tuple_hash<0> >", + "c10::hash >::tuple_hash >", + "c10::impl::AnnotatedSchema", + "c10::impl::ListElementConstReferenceTraits >", + "c10::impl::SizesAndStrides::", + "c10::impl::VirtualGuardImpl", + "c10::impl::decay_if_not_tensor", + "c10::impl::is_mutable_tensor_ref", + "c10::in_place_t", + "c10::ivalue::ComplexHolder", + "c10::ivalue::Object", + "c10::ivalue::StreamData3Holder", + "c10::ivalue::TupleElements::", + "c10::ivalue::TupleTypeFactory", + "c10::once_flag", + "c10::sha1", + "c10::static_cast_with_inter_type,c10::BFloat16>", + "c10::trivial_init_t", + "caffe2::detail::_Uninitialized", + "ska::detailv3::sherwood_v3_entry::", + "ska::detailv3::sherwood_v3_table::convertible_to_iterator", + "ska::fibonacci_hash_policy", + "ska::power_of_two_hash_policy", + "ska::prime_number_hash_policy", + "ska_ordered::detailv3::sherwood_v3_entry::", + "ska_ordered::detailv3::sherwood_v3_table::convertible_to_iterator", + "ska_ordered::order_preserving_flat_hash_map::convertible_to_value", + "std::hash", + "std::hash", + "std::hash", + "std::hash", + "torch::Indices", + "torch::MakeIndices<0>", + "torch::NoInferSchemaTag", + "torch::all_of", + "torch::any_of<>", + "torch::autograd::CppFunctionSingleTensorPreHook", + "torch::autograd::CppFunctionTensorPreHook", + "torch::autograd::GraphTask", + "torch::autograd::GraphTask::ExecInfo", // returned by an API function get_current_graph_task_exec_info, finally excluding get_current_graph_task_exec_info + "torch::autograd::GraphTask::ExecInfo::Capture", + "torch::autograd::GraphTask::ExecInfo::Capture::GradCaptureHook", + "torch::autograd::GraphTaskGuard", + "torch::autograd::InputBuffer", + "torch::autograd::InputMetadata", + "torch::autograd::NodeGuard", + "torch::autograd::TraceableFunction", + "torch::data::DataLoaderBase::Job", + "torch::data::DataLoaderBase::QuitWorker", + "torch::data::DataLoaderBase::Result", + "torch::data::DataLoaderBase::Sequenced", + "torch::data::FullDataLoaderOptions", + "torch::data::Iterator > > >", + "torch::data::Iterator > >", + "torch::data::WorkerException", + "torch::data::datasets::TensorDataset", + "torch::data::datasets::detail::BatchDataBuffer::UnwrappedBatchData", + "torch::detail::ClassNotSelected", + "torch::detail::TorchLibraryInit", + "torch::enumtype::_compute_enum_name", + "torch::jit::CompleteArgumentInfo", + "torch::jit::CompleteArgumentInfoPOD", + "torch::jit::CompleteArgumentSpec", + "torch::jit::IRAttributeError", + "torch::jit::InterpreterContinuation", + "torch::jit::InterpreterState", + "torch::jit::Operator::C10Operator", + "torch::jit::Operator::JitOnlyOperator", + "torch::jit::Operator::UnparsedFunctionSchema", + "torch::jit::OwnedSourceRange", + "torch::jit::RecursiveMethodCallError", + "torch::jit::StrongFunctionPtr", + "torch::jit::Suspend", + "torch::jit::TokenTrie", + "torch::jit::TaggedRange", + "torch::jit::WithCurrentScope", + "torch::jit::WithInsertPoint", + "torch::jit::variable_tensor_list", + "torch::nn::AnyModuleHolder::CheckedGetter", + "torch::nn::AnyModuleHolder::InvokeForward", + "torch::nn::AnyModulePlaceholder", + "torch::nn::AnyValue::Placeholder", + "torch::nn::NamedAnyModule", + "torch::nn::functions::CrossMapLRN2d", + "torch::profiler::impl::HashCombine", + + "torch::autograd::_jvp_fn_t", "torch::autograd::profiler::post_process_t" + + ).skip()) + ; - public static class Writer extends FunctionPointer { - static { Loader.load(); } - /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ - public Writer(Pointer p) { super(p); } - protected Writer() { allocate(); } - private native void allocate(); - public native void call(@Cast("const char*") Pointer data_start, @Cast("size_t") long data_len); + //// Functions not part of the API + //// TORCH_API and the like are not honored on Linux but are on Windows. We must skip all public + //// functions not marked as part of API. + infoMap.put(new Info( + "c10::detail::makeBaseType", + "torch::detail::constructSchemaOrName", + "at::operator <<(std::ostream&, at::Range&)", + "caffe2::serialize::detail::getPadding", + "at::assert_no_partial_overlap(c10::TensorImpl*, c10::TensorImpl*)", + "at::TensorIteratorBase::apply_perm_and_mul", + "c10::ivalue::ConstantString::operator <<", // No idea why these are not exported. TODO: dig + "c10::ivalue::Future::operator <<", + "c10::ivalue::EnumHolder::operator <<", + "c10::ivalue::Await::operator <<", + "c10::ivalue::EnumHolder::operator ==", // The friend operator is truly a member of c10::ivalue and not c10::ivalue::EnumHolder + "c10::ivalue::EnumHolder::is", // Calls ==, which is not exported + "c10::ivalue::EnumHolder::unqualifiedClassName", + "c10::operator <<(std::ostream&, c10::SourceLocation&)", + "torch::jit::Code::operator <<(std::ostream&, const torch::jit::Code&)", // The friend operator is truly a member of torch::jit and not torch::jit::Code + "torch::jit::ClassDef::create", + "torch::profiler::impl::getNvtxStr", + "torch::autograd::add_node_to_current_graph_task_exec_info" + ).skip()); + + //// Aliases necessary because of Parser limited namespace resolution + infoMap.put(new Info("at::Device", "torch::Device")) + .put(new Info("torch::Tensor", "at::Tensor")) + + + //// Classes kept but passed as generic pointer + .put(new Info("c10::intrusive_ptr_target", "c10::nullopt", "c10::nullopt_t", "c10::string_view", "c10::impl::PyObjectSlot", + "_object", + "PyObject", "std::function", "THPObjectPtr", "pyobj_list", "std::chrono::milliseconds", "std::exception_ptr", "std::type_info", + "std::pair", "std::stack >", "torch::autograd::utils::DelayWarningHandler", + "std::is_same,torch::detail::pack >", "at::cuda::NVRTC", "at::RecordFunctionCallback", "at::StepCallbacks", "THCState", "THHState", + "torch::autograd::ViewInfo", "torch::jit::InlinedCallStackPtr", "InlinedCallStackPtr", "torch::jit::ScopePtr", "torch::jit::BackendDebugInfoRecorder", + "torch::detail::TensorDataContainer", "at::ArrayRef", + "std::shared_ptr", "caffe2::serialize::PyTorchStreamWriter", + "c10::detail::DictImpl::dict_map_type::iterator", + "std::iterator >", + "c10::optional", "c10::optional", "c10::optional >", "c10::optional", + "c10::intrusive_ptr", "c10::intrusive_ptr", + "c10::intrusive_ptr", "c10::ArrayRef >", + "torch::jit::DetachedBuffer::UniqueDetachedBuffer", "c10::optional", + "c10::optional::ListOfOptionalElements>", "c10::optional::ListOfOptionalElements>", + "c10::optional", "c10::optional >", + "c10::optional", + "c10::optional", + "std::tuple >,c10::optional >,c10::optional >", + "c10::optional >", "c10::optional >", + "std::vector >", "std::reference_wrapper", + "std::enable_shared_from_this", + "std::enable_shared_from_this", + "std::enable_shared_from_this", + "std::enable_shared_from_this", + "std::enable_shared_from_this", + "std::enable_shared_from_this", + "std::enable_shared_from_this", + "std::enable_shared_from_this", + "std::enable_shared_from_this", + "std::enable_shared_from_this", + "std::enable_shared_from_this", "std::enable_shared_from_this", + "std::enable_shared_from_this", "std::enable_shared_from_this", + "std::enable_shared_from_this", "std::enable_shared_from_this" + ).pointerTypes("Pointer").cast()); + + + ///// Special cases needing javaText + infoMap + .put(new Info("at::Tensor::toString", "at::TensorBase::toString", "torch::Tensor::toString", "torch::TensorBase::toString", "torch::jit::Graph::toString").javaText("public native @StdString String toString();")) + .put(new Info("torch::jit::tracer::pauseTracing()").javaText("@Namespace(\"torch::jit::tracer\") public static native @ByVal @Cast(\"std::function*\") Pointer pauseTracing();")) + .put(new Info("torch::jit::ProfileOp::getCallback()", "torch::jit::ProfileIValueOp::getCallback()").javaText( + "public native @ByVal @Cast(\"std::function&)>*\") Pointer getCallback();")) + .put(new Info("torch::optim::AdamOptions::betas", "torch::optim::AdamWOptions::betas").javaText( + "public native @Cast(\"std::tuple*\") @ByRef @NoException DoublePointer betas();")) + .put(new Info("torch::optim::Adagrad::step", "torch::optim::Adam::step", "torch::optim::AdamW::step", + "torch::optim::LBFG::step", "torch::optim::RMSprop::step", "torch::optim::SGD::step").javaText( + "public native @ByVal Tensor step(@ByVal(nullValue = \"torch::optim::Optimizer::LossClosure(nullptr)\") LossClosure closure);\n" + + "public native @ByVal Tensor step();\n")); + + + // Abstract classes because parent class is abstract, and not detected as such by Parser. + String[] abstracts = new String[]{ + "torch::nn::InstanceNormImpl<1,torch::nn::InstanceNorm1dImpl>", + "torch::nn::InstanceNormImpl<2,torch::nn::InstanceNorm2dImpl>", + "torch::nn::InstanceNormImpl<3,torch::nn::InstanceNorm3dImpl>", + "torch::nn::InstanceNormImpl<3,torch::nn::InstanceNorm3dImpl>", + "torch::nn::BatchNormImplBase<1,torch::nn::BatchNorm1dImpl>", + "torch::nn::BatchNormImplBase<2,torch::nn::BatchNorm2dImpl>", + "torch::nn::BatchNormImplBase<3,torch::nn::BatchNorm3dImpl>" + }; + for (String a : abstracts) { + infoMap.getFirst(a, false).purify(); + } + infoMap.put(new Info("at::TensorIteratorBase").purify()); + + + //// Callback functions + infoMap + .put(new Info("c10::DeleterFnPtr").cast().valueTypes("PointerConsumer", "Pointer", "long")) + .put(new Info("torch::Deleter", "std::function").pointerTypes("PointerConsumer", "@Cast(\"void(*)(void*)\") Pointer", "@Cast(\"void(*)(void*)\") long")) + .put(new Info("std::function").pointerTypes("Func")) + .put(new Info("std::function").pointerTypes("StringSupplier")) + .put(new Info("std::function").pointerTypes("StringConsumer")) + .put(new Info("std::function", + "std::function").pointerTypes("DDPLogger")) + .put(new Info("std::function").pointerTypes("TypeMapper")) + .put(new Info("std::function").pointerTypes("ValueMapper")) + .put(new Info("std::function").pointerTypes("GraphFunctionCreator")) + .put(new Info("torch::nn::Module::ModuleApplyFunction", "torch::nn::Module::ConstModuleApplyFunction", "std::function", "std::function").pointerTypes("ModuleApplyFunction")) + .put(new Info("std::function", "std::function").pointerTypes("JitModuleApplyFunction")) + .put(new Info("torch::nn::NamedModuleApplyFunction", "torch::nn::ConstNamedModuleApplyFunction", "std::function", "std::function").pointerTypes("NamedModuleApplyFunction")) + .put(new Info("torch::nn::ModulePointerApplyFunction", "std::function&)>").pointerTypes("SharedModuleApplyFunction")) + .put(new Info("torch::nn::Module::NamedModulePointerApplyFunction", "std::function&)>").pointerTypes("NamedSharedModuleApplyFunction")) + .put(new Info("std::function&)>").pointerTypes("IValueVectorConsumer")) + .put(new Info("std::function").pointerTypes("IValueSupplier")) + .put(new Info("std::function").pointerTypes("Reader")) + .put(new Info("std::function").pointerTypes("ArchiveWriter")) + .put(new Info("std::function").pointerTypes("PickleWriter")) + .put(new Info("std::function&)>").pointerTypes("TypeRenamer")) + .put(new Info("std::function").pointerTypes("TensorIdGetter")) + .put(new Info("std::function").pointerTypes("SizeTSupplier")) + .put(new Info("std::function").pointerTypes("LossClosure")) + .put(new Info("std::function", + "torch::nn::TripletMarginWithDistanceLossOptions::distance_function_t", + "torch::nn::functional::TripletMarginWithDistanceLossFuncOptions::distance_function_t").pointerTypes("DistanceFunction")) + .put(new Info("std::function)>").pointerTypes("Pointer")) + + .put(new Info("at::TensorBase::register_hook >").javaNames("register_hook")) + .put(new Info("at::TensorBase::register_hook >").javaNames("register_hook")) + .put(new Info("std::function").pointerTypes("VoidTensorHook")) + .put(new Info("std::function").pointerTypes("TensorTensorHook")) + .put(new Info("std::function").pointerTypes("TensorMapper")) + .put(new Info("at::TensorBase::hook_return_void_t > ", + "at::TensorBase::hook_return_void_t >").valueTypes("int")) + ; } - public static class TensorIdGetter extends FunctionPointer { - static { Loader.load(); } - /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ - public TensorIdGetter(Pointer p) { super(p); } - protected TensorIdGetter() { allocate(); } - private native void allocate(); - public native @ByVal @Cast("std::string*") Pointer call(@ByRef @Cast("const at::Tensor*") Pointer t); + private static String template(String t, String... args) { + StringBuilder sb = new StringBuilder(t); + sb.append('<'); + for (int i = 0; i < args.length; i++) { + if (i > 0) sb.append(','); + sb.append(args[i]); + } + if (args[args.length - 1].endsWith(">")) sb.append(' '); + sb.append('>'); + return sb.toString(); } - public static class TypeRenamer extends FunctionPointer { - static { Loader.load(); } - /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ - public TypeRenamer(Pointer p) { super(p); } - protected TypeRenamer() { allocate(); } - private native void allocate(); - public native @ByVal @Cast("c10::QualifiedName*") Pointer call(@ByRef @Cast("const c10::ClassTypePtr*") Pointer t); - } + static class ArrayInfo { + String baseJavaName; + String[] elementTypes = new String[0]; + String[] otherCppNames = new String[0]; + String itPointerType; + String[] otherPointerTypes = new String[0]; + String elementValueType; + + ArrayInfo(String b) { + baseJavaName = b; + itPointerType = "@ByPtr " + b; + elementValueType = "@ByVal " + b; + } - public static class ReadFunction extends FunctionPointer { - static { Loader.load(); } - /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ - public ReadFunction(Pointer p) { super(p); } - protected ReadFunction() { allocate(); } - private native void allocate(); - public native @Cast("size_t") long call(@Cast("uint64_t") long pos, Pointer buf, @Cast("size_t") long nbytes); - } + ArrayInfo elementTypes(String... vt) { + elementTypes = vt; + return this; + } - public static class WriteFunction extends FunctionPointer { - static { Loader.load(); } - /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ - public WriteFunction(Pointer p) { super(p); } - protected WriteFunction() { allocate(); } - private native void allocate(); - public native @Cast("size_t") long call(@Const Pointer buf, @Cast("size_t") long nbytes); - } + ArrayInfo otherCppNames(String... jn) { + otherCppNames = jn; + return this; + } - public static class SizeFunction extends FunctionPointer { - static { Loader.load(); } - /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ - public SizeFunction(Pointer p) { super(p); } - protected SizeFunction() { allocate(); } - private native void allocate(); - public native @Cast("size_t") long call(); - } + ArrayInfo itPointerType(String p) { + itPointerType = p; + return this; + } - public static class LossClosure extends FunctionPointer { - static { Loader.load(); } - /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ - public LossClosure(Pointer p) { super(p); } - protected LossClosure() { allocate(); } - private native void allocate(); - public native @ByVal @Cast("at::Tensor*") Pointer call(); - } + ArrayInfo elementValueType(String t) { + elementValueType = t; + return this; + } + + ArrayInfo otherPointerTypes(String... p) { + otherPointerTypes = p; + return this; + } - public static class DistanceFunction extends FunctionPointer { - static { Loader.load(); } - /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ - public DistanceFunction(Pointer p) { super(p); } - protected DistanceFunction() { allocate(); } - private native void allocate(); - public native @ByVal @Cast("at::Tensor*") Pointer call(@ByRef @Cast("const at::Tensor*") Pointer t1, @ByRef @Cast("const at::Tensor*") Pointer t2); + void mapArrayRef(InfoMap infoMap) { + String[] cppNames = new String[elementTypes.length * 3 + otherCppNames.length]; + String[] cppNamesIterator = new String[cppNames.length * 2]; + String[] cppNamesRIterator = new String[cppNames.length * 2]; + int n = 0; + for (String vt : elementTypes) { + String mainName = cppNames[n++] = template("c10::ArrayRef", vt); + cppNames[n++] = template("at::ArrayRef", vt); + cppNames[n++] = template("torch::ArrayRef", vt); + infoMap.put(new Info(mainName + "(const " + vt + "&)").skip())// Causes SIGSEGV since it just make a pointer to the value + .put(new Info(mainName + "(" + vt + "&)").skip());// Parser removes const for non-investigated reasons for some elementTypes (eg Block*) + // With the following info, any operator<< + //infoMap.put(new Info(template("c10::operator <<", vt)).javaNames("shiftLeft")); + } + for (String on : otherCppNames) + cppNames[n++] = on; + n = 0; + for (String cn : cppNames) { + cppNamesIterator[n++] = cn + "::iterator"; + cppNamesIterator[n++] = cn + "::const_iterator"; + /* + infoMap.put(new Info(cn + "::at").javaText( + //"@Index(function = \"at\") public native @Const " + elementValueType + "get(@Cast(\"size_t\") long i);\n" + + "@ValueSetter @Index(function = \"at\") public native " + baseJavaName + "ArrayRef put(@Cast(\"size_t\") long i, " + elementValueType + " value);" + )); + */ + } + n = 0; + for (String cn : cppNames) { + cppNamesRIterator[n++] = cn + "::reverse_iterator"; + cppNamesRIterator[n++] = cn + "::const_reverse_iterator"; + } + String[] pt = new String[otherPointerTypes.length + 1]; + pt[0] = baseJavaName + "ArrayRef"; + System.arraycopy(otherPointerTypes, 0, pt, 1, otherPointerTypes.length); + Info info = new Info(cppNames).pointerTypes(pt); + if (baseJavaName.contains("@Cast")) info.cast(); + infoMap.put(info); + info = new Info(cppNamesIterator).valueTypes("@Const " + itPointerType); + infoMap.put(info); + infoMap.put(new Info(cppNamesRIterator).skip()); + + // Add templated constructor taking a std::vector, if the vector class has been mapped. + // Relies on the fact that std::vector info are created before. + Info vectorInfo = infoMap.getFirst(template("std::vector", elementTypes[0]), false); + if (vectorInfo != null && !elementTypes[0].equals("bool")) + infoMap.put(new Info(template(cppNames[0], template("std::allocator", elementTypes[0])) + "(" + elementTypes[0] + "*)") + .javaText( + "public " + baseJavaName + "ArrayRef(@ByRef " + baseJavaName + "Vector vec) { super((Pointer)null); allocate(vec); }\n" + + "private native void allocate(@ByRef " + baseJavaName + "Vector vec);")); + } + + void mapList(InfoMap infoMap) { + String t = elementTypes[0]; + infoMap.put(new Info(template("c10::List", t)).pointerTypes(baseJavaName + "List")) + .put(new Info( + template("c10::impl::ListElementReference", t, "typename c10::detail::ListImpl::list_type::iterator"), + template("c10::impl::ListElementReference", t, "c10::detail::ListImpl::list_type::iterator"), + template("c10::impl::ListElementReference", t, template("std::vector", t) + "::iterator")) + .pointerTypes(baseJavaName + "ElementReference")) + .put(new Info(template("c10::impl::ListIterator", t, "typename c10::detail::ListImpl::list_type::iterator"), + template("c10::impl::ListIterator", t, "c10::detail::ListImpl::list_type::iterator")) + .pointerTypes(baseJavaName + "ListIterator")) + .put(new Info(template("c10::List", t) + "::value_type").valueTypes(elementValueType)) + .put(new Info(template("operator std::conditional_t", template("std::is_reference", template("c10::detail::ivalue_to_const_ref_overload_return", t) + "::type") + "::value", "const " + t + "&", t) + "()") + .javaNames("get" + baseJavaName)) + .put(new Info(template("c10::List", t) + "::size_type").valueTypes("long")) + .put(new Info( + template("c10::impl::ListElementReference", t, "typename c10::detail::ListImpl::list_type::iterator") + "::swap", + template("c10::impl::ListElementReference", t, "c10::detail::ListImpl::list_type::iterator") + "::swap", + template("c10::impl::ListElementReference", t, template("std::vector", t) + "::iterator") + "::swap") + .skip()); + infoMap.put(new Info(template("c10::List", t) + "::operator []").skip()) // Returns an internal_reference_type by value, which is a ListElementReference, whose copy constructor is disabled. + .put(new Info( + template("c10::impl::ListIterator", t, "c10::detail::ListImpl::list_type::iterator") + "::operator []", + template("c10::impl::ListIterator", t, "c10::detail::ListImpl::list_type::iterator") + "::operator *") + .skip()) // Returns ListElementReference by value, and ListElementReference has copy constructor disabled. + .put(new Info(template("std::conditional_t", template("std::is_reference", template("c10::detail::ivalue_to_const_ref_overload_return", t) + "::type") + "::value", "const " + t + "&", t)) + .pointerTypes(itPointerType).valueTypes(elementValueType)) + + .put(new Info(template("c10::impl::swap", t, "typename c10::detail::ListImpl::list_type::iterator")).javaNames("swap").friendly()); + + // Some List constructors are only for specific instances + if (baseJavaName.equals("Generic")) + infoMap.put(new Info( + template("c10::List", t) + "(" + template("std::initializer_list", t) + ")", + template("c10::List", t) + "(" + template("c10::ArrayRef", t) + ")", + template("c10::List", t) + "()" + ).skip()); + else if (!baseJavaName.equals("Future")) + infoMap.put(new Info(template("c10::List", t) + "(c10::TypePtr)").skip()); + } } - public static class TypeParser extends FunctionPointer { - static { Loader.load(); } - /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ - public TypeParser(Pointer p) { super(p); } - protected TypeParser() { allocate(); } - private native void allocate(); - public native @ByVal @Cast("c10::TypePtr*") Pointer call(@ByRef @Cast("const std::string*") Pointer s); + private static class PointerInfo { + String javaBaseName; + String javaName; + final String[] argumentNames; + String[] otherCppNames = new String[0]; + + PointerInfo(String... an) { + argumentNames = an; + javaBaseName = an[0].substring(an[0].lastIndexOf(':') + 1); + } + + PointerInfo otherCppNames(String... n) { + otherCppNames = n; + return this; + } + + PointerInfo javaBaseName(String jn) { + javaBaseName = jn; + return this; + } + + PointerInfo javaName(String jn) { + javaName = jn; + return this; + } } @Namespace("std") public static native @MemberGetter @ByRef @Cast("std::istream*") Pointer cin(); + @Namespace("std") public static native @MemberGetter @ByRef @Cast("std::ostream*") Pointer cout(); + @Namespace("std") public static native @MemberGetter @ByRef @Cast("std::ostream*") Pointer cerr(); + @Namespace("std") public static native @MemberGetter @ByRef @Cast("std::ostream*") Pointer clog(); + } diff --git a/pytorch/src/main/java/org/bytedeco/pytorch/presets/torch_cuda.java b/pytorch/src/main/java/org/bytedeco/pytorch/presets/torch_cuda.java new file mode 100644 index 00000000000..68a574141cd --- /dev/null +++ b/pytorch/src/main/java/org/bytedeco/pytorch/presets/torch_cuda.java @@ -0,0 +1,153 @@ +/* + * Copyright (C) 2023 Hervé Guillemet + * + * Licensed either under the Apache License, Version 2.0, or (at your option) + * under the terms of the GNU General Public License as published by + * the Free Software Foundation (subject to the "Classpath" exception), + * either version 2, or any later version (collectively, the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.gnu.org/licenses/ + * http://www.gnu.org/software/classpath/license.html + * + * or as provided in the LICENSE.txt file that accompanied this code. + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.bytedeco.pytorch.presets; + +import org.bytedeco.javacpp.ClassProperties; +import org.bytedeco.javacpp.LoadEnabled; +import org.bytedeco.javacpp.Loader; +import org.bytedeco.javacpp.annotation.*; +import org.bytedeco.javacpp.tools.Info; +import org.bytedeco.javacpp.tools.InfoMap; +import org.bytedeco.javacpp.tools.InfoMapper; + +import java.util.List; + +/** + * @author Hervé Guillemet + */ +@Properties( + inherit = torch.class, + value = { + @Platform( + extension = "-gpu", + include = { + "ATen/cudnn/Descriptors.h", + "ATen/cudnn/Types.h", + "c10/cuda/CUDAGuard.h", + + // For inclusion in JNI only, not parsed + "ATen/cuda/CUDAGeneratorImpl.h", + }, + link = { "cudart", "cusparse" }, + linkpath = { + "C:/Program Files/NVIDIA GPU Computing Toolkit/CUDA/v12.1/lib/x64/", + "/usr/local/cuda-12.1/lib64/", + "/usr/local/cuda/lib64/", + "/usr/lib64/" + } + ), + }, + target = "org.bytedeco.pytorch.cuda", + global = "org.bytedeco.pytorch.global.torch_cuda" +) +public class torch_cuda implements LoadEnabled, InfoMapper { + + @Override + public void init(ClassProperties properties) { + String extension = properties.getProperty("platform.extension"); + if (extension != null && extension.endsWith("-gpu")) + torch.initIncludes(getClass(), properties); + } + + @Override + public void map(InfoMap infoMap) { + + torch.sharedMap(infoMap); + + infoMap + .put(new Info("basic/containers").cppTypes("c10::optional")) + + .put(new Info().enumerate().friendly()) + .put(new Info().javaText("import org.bytedeco.pytorch.*;")) + .put(new Info().javaText("import org.bytedeco.pytorch.Error;")) + .put(new Info().javaText("import org.bytedeco.pytorch.global.torch.DeviceType;")) + .put(new Info().javaText("import org.bytedeco.pytorch.global.torch.ScalarType;")) + .put(new Info().javaText("import org.bytedeco.pytorch.global.torch.MemoryFormat;")) + .put(new Info().javaText("import org.bytedeco.pytorch.Allocator;")) + + .put(new Info().javaText( + "@Namespace(\"at\") public static native @ByVal @Name(\"make_generator\") Generator make_generator_cuda();\n" + + "@Namespace(\"at\") public static native @ByVal @Name(\"make_generator\") Generator make_generator_cuda(@Cast(\"int8_t&&\") byte device_index);\n" + )) + + .put(new Info( + "at::CUDAGeneratorImpl" + ).skip()) + + .put(new Info("c10::optional").pointerTypes("CUDAStreamOptional").define()) + + //// Already defined in main torch + .put(new Info("c10::Stream").pointerTypes("Stream")) + .put(new Info("c10::optional").pointerTypes("StreamOptional")) + .put(new Info("c10::optional").pointerTypes("DeviceOptional")) + .put(new Info("c10::Device").pointerTypes("Device")) + .put(new Info("c10::impl::PyInterpreter").pointerTypes("PyInterpreter")) + .put(new Info("std::tuple").pointerTypes("T_IntInt_T")) + .put(new Info("c10::optional").pointerTypes("ByteOptional")) + .put(new Info("c10::IntArrayRef", "at::IntArrayRef").pointerTypes("LongArrayRef")) + + .put(new Info("c10::DeviceIndex").valueTypes("byte")) + .put(new Info("c10::StreamId").valueTypes("long")) + .put(new Info("c10::cuda::CaptureStatus").valueTypes("int").cast().skip()) // Enum doesn't parse + .put(new Info("std::pair,std::vector >").pointerTypes("DeviceAssertionsDataVectorCUDAKernelLaunchInfoVectorPair").define()) + .put(new Info("std::vector").pointerTypes("DeviceAssertionsDataVector").define()) + .put(new Info("std::vector").pointerTypes("CUDAKernelLaunchInfoVector").define()) + .put(new Info("std::atomic").cast().pointerTypes("PyInterpreter")) + .put(new Info("c10::CuDNNError").purify()) + .put(new Info("c10::impl::GPUTrace::gpuTraceState").skip()) + .put(new Info("at::native::RNNDescriptor::dropout_desc_").skip()) + .put(new Info("at::native::operator <<(std::ostream&, at::native::TensorDescriptor&)", + "at::native::operator <<(std::ostream&, at::native::FilterDescriptor&)", + "at::native::cudnnTypeToString", "at::native::getCudnnDataType", "at::native::cudnn_version", + "c10::cuda::c10_retrieve_device_side_assertion_info").skip()) + + .put(new Info( + "at::native::Descriptor", + "at::native::Descriptor", + "at::native::Descriptor", + "at::native::Descriptor", + "at::native::Descriptor", + "at::native::Descriptor", + "at::native::Descriptor", + "at::native::Descriptor", + + "std::hash" + + ).cast().pointerTypes("Pointer")) + + //// CUDA types + .put(new Info( // Struct + "cudaDeviceProp" + ).pointerTypes("Pointer")) + .put(new Info( // Pointers to opaque structs + "cudaStream_t", "cusparseHandle_t", "cublasHandle_t", "cusolverDnHandle_t", "cudnnHandle_t" + ).valueTypes("Pointer").cast()) + .put(new Info( // Enums + "cudnnActivationMode_t", "cudnnLossNormalizationMode_t", "cudnnRNNInputMode_t", + "cudnnDirectionMode_t", "cudnnRNNMode_t", "cudaStreamCaptureMode", "cudnnDataType_t", "cudnnNanPropagation_t", + "cusparseStatus_t", "cusolverStatus_t", "cudnnRNNAlgo_t", "cudnnNanPropagation_t", "cublasStatus_t" + ).valueTypes("int").cast()) + ; + + new torch.ArrayInfo("CUDAStream").elementTypes("c10::cuda::CUDAStream").mapArrayRef(infoMap); + } +} diff --git a/pytorch/src/main/java9/module-info.java b/pytorch/src/main/java9/module-info.java index 29db155ffed..933f01a8cbe 100644 --- a/pytorch/src/main/java9/module-info.java +++ b/pytorch/src/main/java9/module-info.java @@ -3,5 +3,7 @@ requires transitive org.bytedeco.openblas; exports org.bytedeco.pytorch.global; exports org.bytedeco.pytorch.presets; + exports org.bytedeco.pytorch.functions; + exports org.bytedeco.pytorch.cuda; exports org.bytedeco.pytorch; } diff --git a/pytorch/src/main/resources/org/bytedeco/pytorch/presets/torch_cuda_include.h b/pytorch/src/main/resources/org/bytedeco/pytorch/presets/torch_cuda_include.h new file mode 100644 index 00000000000..254741f1d0d --- /dev/null +++ b/pytorch/src/main/resources/org/bytedeco/pytorch/presets/torch_cuda_include.h @@ -0,0 +1,26 @@ +#include "c10/util/ArrayRef.h" + +// Included by +// ATen/cudnn/Descriptors.h +// ATen/cudnn/Types.h +// c10/cuda/CUDAGuard.h +#include "c10/cuda/CUDAStream.h" +#include "ATen/cuda/CUDAContext.h" +#include "c10/core/impl/GPUTrace.h" +#include "c10/cuda/CUDADeviceAssertionHost.h" +#include "c10/cuda/CUDAMacros.h" +#include "c10/cuda/impl/cuda_cmake_macros.h" +#include "c10/cuda/CUDAGraphsC10Utils.h" +#include "ATen/cuda/Exceptions.h" +#include "ATen/cudnn/cudnn-wrapper.h" +#include "ATen/cudnn/Utils.h" +#include "ATen/cudnn/Handle.h" +#include "ATen/cuda/ATenCUDAGeneral.h" +// #include "c10/cuda/CUDAFunctions.h", // Parsing error +// #include "c10/cuda/CUDAException.h", // Parsing error +// #include "c10/cuda/CUDAMiscFunctions.h", // Parsing error +// #include "c10/cuda/CUDACachingAllocator.h", // If map needed, rename global symbols + +#include "ATen/cudnn/Descriptors.h" +#include "ATen/cudnn/Types.h" +#include "c10/cuda/CUDAGuard.h" diff --git a/pytorch/src/main/resources/org/bytedeco/pytorch/presets/torch_include.h b/pytorch/src/main/resources/org/bytedeco/pytorch/presets/torch_include.h new file mode 100644 index 00000000000..01cddd92ffe --- /dev/null +++ b/pytorch/src/main/resources/org/bytedeco/pytorch/presets/torch_include.h @@ -0,0 +1,1415 @@ +// All files included by #include +// as listed by g++ -M torch/csrc/api/include/torch/all.h (or -H) +// Excluding: +// - the ones that fill at::meta at::native and at::_ops namespaces +// (ATen/ops/*_native.h ATen/ops/*_meta.h ATen/ops/*_ops.h) +// - ATen/ops/_* +// - and some exceptions commented below +#include "torch/csrc/utils/python_stub.h" +#include "c10/macros/cmake_macros.h" +#include "c10/macros/Export.h" +#include "torch/csrc/Export.h" +#include "c10/macros/Macros.h" // import c10 into at and others +#include "c10/core/DeviceType.h" +#include "c10/util/Deprecated.h" +// #include "c10/util/string_utils.h" // Android only +// #include "c10/util/C++17.h" +#include "c10/util/reverse_iterator.h" +#include "c10/util/string_view.h" +#include "c10/util/StringUtil.h" +#include "c10/util/in_place.h" +// #include "c10/util/variant.h" // Not parseable and incompatible with declaring c10::variant as basic container +#include "c10/util/Exception.h" +#include "c10/core/Device.h" +#include "c10/core/DispatchKey.h" +#include "c10/util/Array.h" +#include "c10/util/TypeTraits.h" +#include "c10/util/TypeList.h" +// #include "c10/util/Metaprogramming.h" // Not parseable +// #include "c10/util/llvmMathExtras.h" // Not parseable +#include "c10/core/DispatchKeySet.h" +#include "c10/core/Backend.h" +#include "c10/core/Layout.h" +#include "c10/util/AlignOf.h" +#include "c10/util/SmallVector.h" +#include "c10/util/ArrayRef.h" +#include "c10/core/MemoryFormat.h" +#include "c10/core/QScheme.h" +#include "c10/core/Stream.h" +#include "c10/core/OptionalRef.h" +#include "c10/util/BFloat16.h" +#include "c10/util/BFloat16-inl.h" +#include "c10/util/TypeSafeSignMath.h" +#include "c10/util/complex_math.h" +#include "c10/util/Half.h" // Moved before complex.h because it overrides complex +#include "c10/util/Half-inl.h" +#include "c10/util/complex_utils.h" +#include "c10/util/complex.h" +#include "c10/util/qint32.h" +#include "c10/util/qint8.h" +#include "c10/util/quint2x4.h" +#include "c10/util/quint4x2.h" +#include "c10/util/quint8.h" +#include "c10/core/ScalarType.h" +#include "c10/util/ExclusivelyOwned.h" +#include "c10/util/MaybeOwned.h" +// #include "c10/util/intrusive_ptr.h" Moved below +#include "c10/core/SymNodeImpl.h" +#include "c10/core/SymFloat.h" +#include "c10/core/SymBool.h" +#include "c10/core/SymInt.h" +#include "c10/util/TypeCast.h" +#include "c10/core/Scalar.h" +// #include "c10/util/Optional.h" // Incompatible with declaration of c10::optional as basic container +#include "c10/util/Backtrace.h" +#include "c10/util/IdWrapper.h" +#include "c10/util/Type.h" +#include "c10/util/ConstexprCrc.h" +#include "c10/util/TypeIndex.h" +#include "c10/util/flat_hash_map.h" +#include "c10/util/irange.h" +#include "c10/util/typeid.h" +#include "c10/core/ScalarTypeToTypeMeta.h" +#include "c10/util/ThreadLocalDebugInfo.h" +#include "c10/util/UniqueVoidPtr.h" +#include "c10/core/Allocator.h" +#include "c10/core/StorageImpl.h" +#include "c10/core/Storage.h" +#include "c10/core/CopyBytes.h" +#include "c10/core/AutogradState.h" +#include "c10/core/GradMode.h" +#include "c10/util/Registry.h" +#include "c10/util/Flags.h" +#include "c10/core/impl/LocalDispatchKeySet.h" +#include "c10/core/InferenceMode.h" +#include "c10/core/SymIntArrayRef.h" +#include "c10/core/DefaultDtype.h" +#include "c10/core/TensorOptions.h" +#include "c10/core/WrapDimMinimal.h" +#include "c10/core/impl/HermeticPyObjectTLS.h" +#include "c10/core/impl/PyInterpreter.h" +#include "c10/core/impl/PyObjectSlot.h" +#include "c10/core/impl/SizesAndStrides.h" +#include "c10/util/DimVector.h" +// #include "c10/util/logging_is_google_glog.h" // Not parseable +// #include "c10/util/logging_is_not_google_glog.h" // Not parseable +#include "c10/util/Logging.h" +#include "c10/util/accumulate.h" +#include "c10/util/safe_numerics.h" +#include "c10/core/TensorImpl.h" +#include "c10/core/UndefinedTensorImpl.h" +// #include "c10/util/OptionalArrayRef.h" // Not compatible with basic container. Are we concerned by https://github.com/pytorch/pytorch/issues/63645 ? +#include "ATen/core/CheckMemoryFormat.h" +// #include "ATen/core/DeprecatedTypePropertiesRegistry.h" // Deprecated +#include "c10/core/GeneratorImpl.h" +#include "ATen/core/Generator.h" +// #include "ATen/core/DeprecatedTypeProperties.h" // Deprecated +#include "ATen/core/symbol.h" +#include "ATen/core/Dimname.h" +#include "ATen/core/NamedTensor.h" +#include "ATen/core/QuantizerBase.h" +#include "ATen/core/TensorAccessor.h" +#include "c10/util/ExclusivelyOwnedTensorTraits.h" +#include "ATen/core/TensorBase.h" +// #include "ATen/core/ATen_fwd.h" // Only forward declarations + conflict with basic containers +#include "ATen/MethodOperators.h" +#include "ATen/core/TensorBody.h" +#include "ATen/core/Tensor.h" +#include "ATen/Tensor.h" +#include "torch/csrc/autograd/function_hook.h" +#include "torch/csrc/autograd/cpp_hook.h" +#include "c10/util/hash.h" +#include "torch/csrc/autograd/edge.h" +#include "torch/csrc/autograd/forward_grad.h" +#include "ATen/NamedTensor.h" +#include "ATen/core/ivalue_to.h" +#include "ATen/core/qualified_name.h" +#include "ATen/core/type_ptr.h" +#include "ATen/core/jit_type_base.h" +#include "ATen/core/DimVector.h" +#include "ATen/core/blob.h" +#include "ATen/core/custom_class.h" +#include "ATen/core/dynamic_type.h" +#include "ATen/core/type_factory.h" +#include "c10/util/order_preserving_flat_hash_map.h" +#include "ATen/core/Dict_inl.h" +#include "ATen/core/Dict.h" +#include "ATen/core/functional.h" +#include "ATen/core/jit_type.h" +#include "ATen/core/rref_interface.h" +#include "c10/core/impl/DeviceGuardImplInterface.h" +#include "c10/core/impl/VirtualGuardImpl.h" +#include "c10/core/impl/InlineDeviceGuard.h" +#include "c10/core/DeviceGuard.h" +#include "c10/core/impl/InlineEvent.h" +#include "c10/core/Event.h" +#include "c10/core/impl/InlineStreamGuard.h" +#include "c10/core/StreamGuard.h" +#include "c10/util/FunctionRef.h" +#include "c10/util/intrusive_ptr.h" +#include "ATen/core/ivalue_inl.h" +#include "ATen/core/ivalue.h" +#include "ATen/core/List_inl.h" +#include "ATen/core/List.h" +#include "ATen/core/IListRef_inl.h" +#include "ATen/core/IListRef.h" +#include "ATen/WrapDimUtils.h" +#include "ATen/TensorNames.h" +// #include "ATen/WrapDimUtilsMulti.h" // Windows-specific +#include "ATen/NamedTensorUtils.h" +#include "torch/csrc/autograd/variable.h" +#include "torch/csrc/autograd/autograd.h" +#include "ATen/core/alias_info.h" +#include "ATen/core/operator_name.h" +#include "ATen/core/dispatch/OperatorOptions.h" +#include "ATen/core/function_schema.h" +#include "ATen/core/function_schema_inl.h" +#include "ATen/core/op_registration/infer_schema.h" +#include "ATen/record_function.h" +#include "ATen/core/op_registration/op_allowlist.h" +#include "c10/util/either.h" +#include "torch/csrc/jit/frontend/function_schema_parser.h" +#include "c10/core/CompileTimeFunctionPointer.h" +#include "ATen/core/boxing/OperatorKernel.h" +#include "ATen/core/boxing/BoxedKernel.h" +#include "ATen/core/boxing/BoxedKernel_impl.h" +#include "ATen/core/stack.h" +#include "ATen/core/boxing/impl/boxing.h" +#include "ATen/core/boxing/impl/make_boxed_from_unboxed_functor.h" +#include "ATen/core/boxing/impl/WrapFunctionIntoFunctor.h" +#include "ATen/core/boxing/impl/WrapFunctionIntoRuntimeFunctor.h" +#include "ATen/core/boxing/KernelFunction.h" +#include "ATen/core/boxing/KernelFunction_impl.h" +#include "ATen/core/dispatch/CppSignature.h" +#include "ATen/core/dispatch/RegistrationHandleRAII.h" +#include "ATen/core/ATenOpList.h" +#include "ATen/core/op_registration/op_registration.h" +#include "ATen/core/enum_tag.h" +#include "ATen/core/function.h" +// #include "ATen/core/builtin_function.h" // Not in API +#include "ATen/core/class_type.h" +// #include "torch/custom_class_detail.h" // Not in API +// #include "torch/custom_class.h" // Not in API +#include "torch/library.h" +#include "torch/csrc/autograd/autograd_not_implemented_fallback.h" +#include "torch/csrc/autograd/anomaly_mode.h" +#include "ATen/core/grad_mode.h" +#include "torch/csrc/autograd/grad_mode.h" +#include "ATen/FuncTorchTLS.h" +#include "c10/core/SafePyObject.h" +#include "ATen/PythonTorchFunctionTLS.h" +#include "ATen/SavedTensorHooks.h" +#include "ATen/ThreadLocalPythonObjects.h" +#include "c10/core/impl/PythonDispatcherTLS.h" +#include "c10/core/impl/TorchDispatchModeTLS.h" +#include "ATen/ThreadLocalState.h" +#include "c10/util/ThreadLocal.h" +#include "torch/csrc/autograd/input_buffer.h" +#include "torch/csrc/autograd/utils/warnings.h" +#include "torch/csrc/autograd/graph_task.h" +#include "ATen/core/MT19937RNGEngine.h" +#include "ATen/CPUGeneratorImpl.h" +#include "ATen/LinalgBackend.h" +#include "ATen/core/ATenGeneral.h" +#include "ATen/core/LegacyTypeDispatch.h" +#include "ATen/detail/CUDAHooksInterface.h" +#include "ATen/detail/HIPHooksInterface.h" +#include "ATen/detail/MPSHooksInterface.h" +#include "ATen/detail/ORTHooksInterface.h" +#include "c10/core/QEngine.h" +#include "c10/util/CallOnce.h" +#include "c10/util/env.h" +#include "ATen/Context.h" +#include "ATen/DeviceGuard.h" +#include "ATen/DimVector.h" +#include "ATen/EmptyTensor.h" +#include "ATen/TensorGeometry.h" +#include "ATen/core/Formatting.h" +#include "ATen/Formatting.h" +#include "ATen/Utils.h" +#include "ATen/TensorUtils.h" +#include "ATen/TracerMode.h" +#include "ATen/core/Reduction.h" +#include "ATen/ops/abs.h" +#include "ATen/ops/absolute.h" +#include "ATen/ops/acos.h" +#include "ATen/ops/acosh.h" +#include "ATen/ops/adaptive_avg_pool1d.h" +#include "ATen/ops/adaptive_avg_pool2d.h" +#include "ATen/ops/adaptive_avg_pool3d.h" +#include "ATen/ops/adaptive_avg_pool3d_backward.h" +#include "ATen/ops/adaptive_max_pool1d.h" +#include "ATen/ops/adaptive_max_pool2d.h" +#include "ATen/ops/adaptive_max_pool2d_backward.h" +#include "ATen/ops/adaptive_max_pool3d.h" +#include "ATen/ops/adaptive_max_pool3d_backward.h" +#include "ATen/ops/add.h" +#include "ATen/ops/addbmm.h" +#include "ATen/ops/addcdiv.h" +#include "ATen/ops/addcmul.h" +#include "ATen/ops/addmm.h" +#include "ATen/ops/addmv.h" +#include "ATen/ops/addr.h" +#include "ATen/ops/adjoint.h" +#include "ATen/ops/affine_grid_generator.h" +#include "ATen/ops/affine_grid_generator_backward.h" +#include "ATen/ops/alias.h" +#include "ATen/ops/alias_copy.h" +#include "ATen/ops/align_as.h" +#include "ATen/ops/align_tensors.h" +#include "ATen/ops/align_to.h" +#include "ATen/ops/all.h" +#include "ATen/ops/allclose.h" +#include "ATen/ops/alpha_dropout.h" +#include "ATen/ops/amax.h" +#include "ATen/ops/amin.h" +#include "ATen/ops/aminmax.h" +#include "ATen/ops/and.h" +#include "ATen/ops/angle.h" +#include "ATen/ops/any.h" +#include "ATen/ops/arange.h" +#include "ATen/ops/arccos.h" +#include "ATen/ops/arccosh.h" +#include "ATen/ops/arcsin.h" +#include "ATen/ops/arcsinh.h" +#include "ATen/ops/arctan.h" +#include "ATen/ops/arctan2.h" +#include "ATen/ops/arctanh.h" +#include "ATen/ops/argmax.h" +#include "ATen/ops/argmin.h" +#include "ATen/ops/argsort.h" +#include "ATen/ops/argwhere.h" +#include "ATen/ops/as_strided.h" +#include "ATen/ops/as_strided_copy.h" +#include "ATen/ops/as_strided_scatter.h" +#include "ATen/ops/asin.h" +#include "ATen/ops/asinh.h" +#include "ATen/ops/atan.h" +#include "ATen/ops/atan2.h" +#include "ATen/ops/atanh.h" +#include "ATen/ops/atleast_1d.h" +#include "ATen/ops/atleast_2d.h" +#include "ATen/ops/atleast_3d.h" +#include "ATen/ops/avg_pool1d.h" +#include "ATen/ops/avg_pool2d.h" +#include "ATen/ops/avg_pool2d_backward.h" +#include "ATen/ops/avg_pool3d.h" +#include "ATen/ops/avg_pool3d_backward.h" +#include "ATen/ops/baddbmm.h" +#include "ATen/ops/bartlett_window.h" +#include "ATen/ops/batch_norm.h" +#include "ATen/ops/batch_norm_backward_elemt.h" +#include "ATen/ops/batch_norm_backward_reduce.h" +#include "ATen/ops/batch_norm_elemt.h" +#include "ATen/ops/batch_norm_gather_stats.h" +#include "ATen/ops/batch_norm_gather_stats_with_counts.h" +#include "ATen/ops/batch_norm_stats.h" +#include "ATen/ops/batch_norm_update_stats.h" +#include "ATen/ops/bernoulli.h" +#include "ATen/ops/bilinear.h" +#include "ATen/ops/binary_cross_entropy.h" +#include "ATen/ops/binary_cross_entropy_backward.h" +#include "ATen/ops/binary_cross_entropy_with_logits.h" +#include "ATen/ops/bincount.h" +#include "ATen/ops/binomial.h" +#include "ATen/ops/bitwise_and.h" +#include "ATen/ops/bitwise_left_shift.h" +#include "ATen/ops/bitwise_not.h" +#include "ATen/ops/bitwise_or.h" +#include "ATen/ops/bitwise_right_shift.h" +#include "ATen/ops/bitwise_xor.h" +#include "ATen/ops/blackman_window.h" +#include "ATen/ops/block_diag.h" +#include "ATen/ops/bmm.h" +#include "ATen/ops/broadcast_tensors.h" +#include "ATen/ops/broadcast_to.h" +#include "ATen/ops/bucketize.h" +#include "ATen/ops/can_cast.h" +#include "ATen/ops/cartesian_prod.h" +#include "ATen/ops/cat.h" +#include "ATen/ops/cauchy.h" +#include "ATen/ops/ccol_indices.h" +#include "ATen/ops/ccol_indices_copy.h" +#include "ATen/ops/cdist.h" +#include "ATen/ops/ceil.h" +#include "ATen/ops/celu.h" +#include "ATen/ops/chain_matmul.h" +#include "ATen/ops/chalf.h" +#include "ATen/ops/channel_shuffle.h" +#include "ATen/ops/cholesky.h" +#include "ATen/ops/cholesky_inverse.h" +#include "ATen/ops/cholesky_solve.h" +#include "ATen/ops/choose_qparams_optimized.h" +#include "ATen/ops/chunk.h" +#include "ATen/ops/clamp.h" +#include "ATen/ops/clamp_max.h" +#include "ATen/ops/clamp_min.h" +#include "ATen/ops/clip.h" +#include "ATen/ops/clone.h" +#include "ATen/ops/coalesce.h" +#include "ATen/ops/col2im.h" +#include "ATen/ops/col_indices.h" +#include "ATen/ops/col_indices_copy.h" +#include "ATen/ops/column_stack.h" +#include "ATen/ops/combinations.h" +#include "ATen/ops/complex.h" +#include "ATen/ops/concat.h" +#include "ATen/ops/concatenate.h" +#include "ATen/ops/conj.h" +#include "ATen/ops/conj_physical.h" +#include "ATen/ops/constant_pad_nd.h" +#include "ATen/ops/contiguous.h" +#include "ATen/ops/conv1d.h" +#include "ATen/ops/conv2d.h" +#include "ATen/ops/conv3d.h" +#include "ATen/ops/conv_depthwise3d.h" +#include "ATen/ops/conv_tbc.h" +#include "ATen/ops/conv_tbc_backward.h" +#include "ATen/ops/conv_transpose1d.h" +#include "ATen/ops/conv_transpose2d.h" +#include "ATen/ops/conv_transpose3d.h" +#include "ATen/ops/convolution.h" +#include "ATen/ops/convolution_backward.h" +#include "ATen/ops/convolution_backward_overrideable.h" +#include "ATen/ops/convolution_overrideable.h" +#include "ATen/ops/copy.h" +#include "ATen/ops/copy_sparse_to_sparse.h" +#include "ATen/ops/copysign.h" +#include "ATen/ops/corrcoef.h" +#include "ATen/ops/cos.h" +#include "ATen/ops/cosh.h" +#include "ATen/ops/cosine_embedding_loss.h" +#include "ATen/ops/cosine_similarity.h" +#include "ATen/ops/count_nonzero.h" +#include "ATen/ops/cov.h" +#include "ATen/ops/cross.h" +#include "ATen/ops/cross_entropy_loss.h" +#include "ATen/ops/crow_indices.h" +#include "ATen/ops/crow_indices_copy.h" +#include "ATen/ops/ctc_loss.h" +#include "ATen/ops/cudnn_affine_grid_generator.h" +#include "ATen/ops/cudnn_affine_grid_generator_backward.h" +#include "ATen/ops/cudnn_batch_norm.h" +#include "ATen/ops/cudnn_batch_norm_backward.h" +#include "ATen/ops/cudnn_convolution.h" +#include "ATen/ops/cudnn_convolution_add_relu.h" +#include "ATen/ops/cudnn_convolution_relu.h" +#include "ATen/ops/cudnn_convolution_transpose.h" +#include "ATen/ops/cudnn_grid_sampler.h" +#include "ATen/ops/cudnn_grid_sampler_backward.h" +#include "ATen/ops/cudnn_is_acceptable.h" +#include "ATen/ops/cummax.h" +#include "ATen/ops/cummaxmin_backward.h" +#include "ATen/ops/cummin.h" +#include "ATen/ops/cumprod.h" +#include "ATen/ops/cumprod_backward.h" +#include "ATen/ops/cumsum.h" +#include "ATen/ops/cumulative_trapezoid.h" +#include "ATen/ops/data.h" +#include "ATen/ops/deg2rad.h" +#include "ATen/ops/dense_dim.h" +#include "ATen/ops/dequantize.h" +#include "ATen/ops/det.h" +#include "ATen/ops/detach.h" +#include "ATen/ops/detach_copy.h" +#include "ATen/ops/diag.h" +#include "ATen/ops/diag_embed.h" +#include "ATen/ops/diagflat.h" +#include "ATen/ops/diagonal.h" +#include "ATen/ops/diagonal_backward.h" +#include "ATen/ops/diagonal_copy.h" +#include "ATen/ops/diagonal_scatter.h" +#include "ATen/ops/diff.h" +#include "ATen/ops/digamma.h" +#include "ATen/ops/dist.h" +#include "ATen/ops/div.h" +#include "ATen/ops/divide.h" +#include "ATen/ops/dot.h" +#include "ATen/ops/dropout.h" +#include "ATen/ops/dsplit.h" +#include "ATen/ops/dstack.h" +#include "ATen/ops/einsum.h" +#include "ATen/ops/elu.h" +#include "ATen/ops/elu_backward.h" +#include "ATen/ops/embedding.h" +#include "ATen/ops/embedding_backward.h" +#include "ATen/ops/embedding_bag.h" +#include "ATen/ops/embedding_dense_backward.h" +#include "ATen/ops/embedding_renorm.h" +#include "ATen/ops/embedding_sparse_backward.h" +#include "ATen/ops/empty.h" +#include "ATen/ops/empty_like.h" +#include "ATen/ops/empty_quantized.h" +#include "ATen/ops/empty_strided.h" +#include "ATen/ops/eq.h" +#include "ATen/ops/equal.h" +#include "ATen/ops/erf.h" +#include "ATen/ops/erfc.h" +#include "ATen/ops/erfinv.h" +#include "ATen/ops/exp.h" +#include "ATen/ops/exp2.h" +#include "ATen/ops/expand.h" +#include "ATen/ops/expand_as.h" +#include "ATen/ops/expand_copy.h" +#include "ATen/ops/expm1.h" +#include "ATen/ops/exponential.h" +#include "ATen/ops/eye.h" +#include "ATen/ops/fake_quantize_per_channel_affine.h" +#include "ATen/ops/fake_quantize_per_channel_affine_cachemask.h" +#include "ATen/ops/fake_quantize_per_channel_affine_cachemask_backward.h" +#include "ATen/ops/fake_quantize_per_tensor_affine.h" +#include "ATen/ops/fake_quantize_per_tensor_affine_cachemask.h" +#include "ATen/ops/fake_quantize_per_tensor_affine_cachemask_backward.h" +#include "ATen/ops/fbgemm_linear_fp16_weight.h" +#include "ATen/ops/fbgemm_linear_fp16_weight_fp32_activation.h" +#include "ATen/ops/fbgemm_linear_int8_weight.h" +#include "ATen/ops/fbgemm_linear_int8_weight_fp32_activation.h" +#include "ATen/ops/fbgemm_linear_quantize_weight.h" +#include "ATen/ops/fbgemm_pack_gemm_matrix_fp16.h" +#include "ATen/ops/fbgemm_pack_quantized_matrix.h" +#include "ATen/ops/feature_alpha_dropout.h" +#include "ATen/ops/feature_dropout.h" +#include "ATen/ops/fft_fft.h" +#include "ATen/ops/fft_fft2.h" +#include "ATen/ops/fft_fftfreq.h" +#include "ATen/ops/fft_fftn.h" +#include "ATen/ops/fft_fftshift.h" +#include "ATen/ops/fft_hfft.h" +#include "ATen/ops/fft_hfft2.h" +#include "ATen/ops/fft_hfftn.h" +#include "ATen/ops/fft_ifft.h" +#include "ATen/ops/fft_ifft2.h" +#include "ATen/ops/fft_ifftn.h" +#include "ATen/ops/fft_ifftshift.h" +#include "ATen/ops/fft_ihfft.h" +#include "ATen/ops/fft_ihfft2.h" +#include "ATen/ops/fft_ihfftn.h" +#include "ATen/ops/fft_irfft.h" +#include "ATen/ops/fft_irfft2.h" +#include "ATen/ops/fft_irfftn.h" +#include "ATen/ops/fft_rfft.h" +#include "ATen/ops/fft_rfft2.h" +#include "ATen/ops/fft_rfftfreq.h" +#include "ATen/ops/fft_rfftn.h" +#include "ATen/ops/fill.h" +#include "ATen/ops/fill_diagonal.h" +#include "ATen/ops/fix.h" +#include "ATen/ops/flatten.h" +#include "ATen/ops/flatten_dense_tensors.h" +#include "ATen/ops/flip.h" +#include "ATen/ops/fliplr.h" +#include "ATen/ops/flipud.h" +#include "ATen/ops/float_power.h" +#include "ATen/ops/floor.h" +#include "ATen/ops/floor_divide.h" +#include "ATen/ops/fmax.h" +#include "ATen/ops/fmin.h" +#include "ATen/ops/fmod.h" +#include "ATen/ops/frac.h" +#include "ATen/ops/fractional_max_pool2d.h" +#include "ATen/ops/fractional_max_pool2d_backward.h" +#include "ATen/ops/fractional_max_pool3d.h" +#include "ATen/ops/fractional_max_pool3d_backward.h" +#include "ATen/ops/frexp.h" +#include "ATen/ops/frobenius_norm.h" +#include "ATen/ops/from_blob.h" +#include "ATen/ops/from_file.h" +#include "ATen/ops/full.h" +#include "ATen/ops/full_like.h" +#include "ATen/ops/fused_moving_avg_obs_fake_quant.h" +#include "ATen/ops/gather.h" +#include "ATen/ops/gather_backward.h" +#include "ATen/ops/gcd.h" +#include "ATen/ops/ge.h" +#include "ATen/ops/gelu.h" +#include "ATen/ops/gelu_backward.h" +#include "ATen/ops/geometric.h" +#include "ATen/ops/geqrf.h" +#include "ATen/ops/ger.h" +#include "ATen/ops/glu.h" +#include "ATen/ops/glu_backward.h" +#include "ATen/ops/glu_backward_jvp.h" +#include "ATen/ops/glu_jvp.h" +#include "ATen/ops/gradient.h" +#include "ATen/ops/greater.h" +#include "ATen/ops/greater_equal.h" +#include "ATen/ops/grid_sampler.h" +#include "ATen/ops/grid_sampler_2d.h" +#include "ATen/ops/grid_sampler_2d_backward.h" +#include "ATen/ops/grid_sampler_3d.h" +#include "ATen/ops/grid_sampler_3d_backward.h" +#include "ATen/ops/group_norm.h" +#include "ATen/ops/gru.h" +#include "ATen/ops/gru_cell.h" +#include "ATen/ops/gt.h" +#include "ATen/ops/hamming_window.h" +#include "ATen/ops/hann_window.h" +#include "ATen/ops/hardshrink.h" +#include "ATen/ops/hardshrink_backward.h" +#include "ATen/ops/hardsigmoid.h" +#include "ATen/ops/hardsigmoid_backward.h" +#include "ATen/ops/hardswish.h" +#include "ATen/ops/hardswish_backward.h" +#include "ATen/ops/hardtanh.h" +#include "ATen/ops/hardtanh_backward.h" +#include "ATen/ops/heaviside.h" +#include "ATen/ops/hinge_embedding_loss.h" +#include "ATen/ops/histc.h" +#include "ATen/ops/histogram.h" +#include "ATen/ops/histogramdd.h" +#include "ATen/ops/hsplit.h" +#include "ATen/ops/hspmm.h" +#include "ATen/ops/hstack.h" +#include "ATen/ops/huber_loss.h" +#include "ATen/ops/huber_loss_backward.h" +#include "ATen/ops/hypot.h" +#include "ATen/ops/i0.h" +#include "ATen/ops/igamma.h" +#include "ATen/ops/igammac.h" +#include "ATen/ops/im2col.h" +#include "ATen/ops/imag.h" +#include "ATen/ops/index.h" +#include "ATen/ops/index_add.h" +#include "ATen/ops/index_copy.h" +#include "ATen/ops/index_fill.h" +#include "ATen/ops/index_put.h" +#include "ATen/ops/index_reduce.h" +#include "ATen/ops/index_select.h" +#include "ATen/ops/index_select_backward.h" +#include "ATen/ops/indices.h" +#include "ATen/ops/indices_copy.h" +#include "ATen/ops/infinitely_differentiable_gelu_backward.h" +#include "ATen/ops/inner.h" +#include "ATen/ops/instance_norm.h" +#include "ATen/ops/int_repr.h" +#include "ATen/ops/inverse.h" +#include "ATen/ops/is_coalesced.h" +#include "ATen/ops/is_complex.h" +#include "ATen/ops/is_conj.h" +#include "ATen/ops/is_distributed.h" +#include "ATen/ops/is_floating_point.h" +#include "ATen/ops/is_inference.h" +#include "ATen/ops/is_leaf.h" +#include "ATen/ops/is_neg.h" +#include "ATen/ops/is_nonzero.h" +#include "ATen/ops/is_pinned.h" +#include "ATen/ops/is_same_size.h" +#include "ATen/ops/is_set_to.h" +#include "ATen/ops/is_signed.h" +#include "ATen/ops/is_vulkan_available.h" +#include "ATen/ops/isclose.h" +#include "ATen/ops/isfinite.h" +#include "ATen/ops/isin.h" +#include "ATen/ops/isinf.h" +#include "ATen/ops/isnan.h" +#include "ATen/ops/isneginf.h" +#include "ATen/ops/isposinf.h" +#include "ATen/ops/isreal.h" +#include "ATen/ops/istft.h" +#include "ATen/ops/item.h" +#include "ATen/ops/kaiser_window.h" +#include "ATen/ops/kl_div.h" +#include "ATen/ops/kron.h" +#include "ATen/ops/kthvalue.h" +#include "ATen/ops/l1_loss.h" +#include "ATen/ops/layer_norm.h" +#include "ATen/ops/lcm.h" +#include "ATen/ops/ldexp.h" +#include "ATen/ops/le.h" +#include "ATen/ops/leaky_relu.h" +#include "ATen/ops/leaky_relu_backward.h" +#include "ATen/ops/lerp.h" +#include "ATen/ops/less.h" +#include "ATen/ops/less_equal.h" +#include "ATen/ops/lgamma.h" +#include "ATen/ops/lift.h" +#include "ATen/ops/lift_fresh.h" +#include "ATen/ops/lift_fresh_copy.h" +#include "ATen/ops/linalg_cholesky.h" +#include "ATen/ops/linalg_cholesky_ex.h" +#include "ATen/ops/linalg_cond.h" +#include "ATen/ops/linalg_cross.h" +#include "ATen/ops/linalg_det.h" +#include "ATen/ops/linalg_diagonal.h" +#include "ATen/ops/linalg_eig.h" +#include "ATen/ops/linalg_eigh.h" +#include "ATen/ops/linalg_eigvals.h" +#include "ATen/ops/linalg_eigvalsh.h" +#include "ATen/ops/linalg_householder_product.h" +#include "ATen/ops/linalg_inv.h" +#include "ATen/ops/linalg_inv_ex.h" +#include "ATen/ops/linalg_ldl_factor.h" +#include "ATen/ops/linalg_ldl_factor_ex.h" +#include "ATen/ops/linalg_ldl_solve.h" +#include "ATen/ops/linalg_lstsq.h" +#include "ATen/ops/linalg_lu.h" +#include "ATen/ops/linalg_lu_factor.h" +#include "ATen/ops/linalg_lu_factor_ex.h" +#include "ATen/ops/linalg_lu_solve.h" +#include "ATen/ops/linalg_matmul.h" +#include "ATen/ops/linalg_matrix_exp.h" +#include "ATen/ops/linalg_matrix_norm.h" +#include "ATen/ops/linalg_matrix_power.h" +#include "ATen/ops/linalg_matrix_rank.h" +#include "ATen/ops/linalg_multi_dot.h" +#include "ATen/ops/linalg_norm.h" +#include "ATen/ops/linalg_pinv.h" +#include "ATen/ops/linalg_qr.h" +#include "ATen/ops/linalg_slogdet.h" +#include "ATen/ops/linalg_solve.h" +#include "ATen/ops/linalg_solve_ex.h" +#include "ATen/ops/linalg_solve_triangular.h" +#include "ATen/ops/linalg_svd.h" +#include "ATen/ops/linalg_svdvals.h" +#include "ATen/ops/linalg_tensorinv.h" +#include "ATen/ops/linalg_tensorsolve.h" +#include "ATen/ops/linalg_vander.h" +#include "ATen/ops/linalg_vecdot.h" +#include "ATen/ops/linalg_vector_norm.h" +#include "ATen/ops/linear.h" +#include "ATen/ops/linear_backward.h" +#include "ATen/ops/linspace.h" +#include "ATen/ops/log.h" +#include "ATen/ops/log10.h" +#include "ATen/ops/log1p.h" +#include "ATen/ops/log2.h" +#include "ATen/ops/log_normal.h" +#include "ATen/ops/log_sigmoid.h" +#include "ATen/ops/log_sigmoid_backward.h" +#include "ATen/ops/log_sigmoid_forward.h" +#include "ATen/ops/log_softmax.h" +#include "ATen/ops/logaddexp.h" +#include "ATen/ops/logaddexp2.h" +#include "ATen/ops/logcumsumexp.h" +#include "ATen/ops/logdet.h" +#include "ATen/ops/logical_and.h" +#include "ATen/ops/logical_not.h" +#include "ATen/ops/logical_or.h" +#include "ATen/ops/logical_xor.h" +#include "ATen/ops/logit.h" +#include "ATen/ops/logit_backward.h" +#include "ATen/ops/logspace.h" +#include "ATen/ops/logsumexp.h" +#include "ATen/ops/lshift.h" +#include "ATen/ops/lstm.h" +#include "ATen/ops/lstm_cell.h" +#include "ATen/ops/lstm_mps_backward.h" +#include "ATen/ops/lt.h" +#include "ATen/ops/lu_solve.h" +#include "ATen/ops/lu_unpack.h" +#include "ATen/ops/mH.h" +#include "ATen/ops/mT.h" +#include "ATen/ops/margin_ranking_loss.h" +#include "ATen/ops/masked_fill.h" +#include "ATen/ops/masked_scatter.h" +#include "ATen/ops/masked_select.h" +#include "ATen/ops/masked_select_backward.h" +#include "ATen/ops/matmul.h" +#include "ATen/ops/matmul_backward.h" +#include "ATen/ops/matrix_H.h" +#include "ATen/ops/matrix_exp.h" +#include "ATen/ops/matrix_exp_backward.h" +#include "ATen/ops/matrix_power.h" +#include "ATen/ops/max.h" +#include "ATen/ops/max_pool1d.h" +#include "ATen/ops/max_pool1d_with_indices.h" +#include "ATen/ops/max_pool2d.h" +#include "ATen/ops/max_pool2d_backward.h" +#include "ATen/ops/max_pool2d_with_indices.h" +#include "ATen/ops/max_pool2d_with_indices_backward.h" +#include "ATen/ops/max_pool3d.h" +#include "ATen/ops/max_pool3d_with_indices.h" +#include "ATen/ops/max_pool3d_with_indices_backward.h" +#include "ATen/ops/max_unpool2d.h" +#include "ATen/ops/max_unpool3d.h" +#include "ATen/ops/maximum.h" +#include "ATen/ops/mean.h" +#include "ATen/ops/median.h" +#include "ATen/ops/meshgrid.h" +#include "ATen/ops/min.h" +#include "ATen/ops/minimum.h" +#include "ATen/ops/miopen_batch_norm.h" +#include "ATen/ops/miopen_batch_norm_backward.h" +#include "ATen/ops/miopen_convolution.h" +#include "ATen/ops/miopen_convolution_add_relu.h" +#include "ATen/ops/miopen_convolution_relu.h" +#include "ATen/ops/miopen_convolution_transpose.h" +#include "ATen/ops/miopen_depthwise_convolution.h" +#include "ATen/ops/miopen_rnn.h" +#include "ATen/ops/miopen_rnn_backward.h" +#include "ATen/ops/mish.h" +#include "ATen/ops/mish_backward.h" +#include "ATen/ops/mkldnn_adaptive_avg_pool2d.h" +#include "ATen/ops/mkldnn_adaptive_avg_pool2d_backward.h" +#include "ATen/ops/mkldnn_convolution.h" +#include "ATen/ops/mkldnn_linear.h" +#include "ATen/ops/mkldnn_linear_backward.h" +#include "ATen/ops/mkldnn_linear_backward_input.h" +#include "ATen/ops/mkldnn_linear_backward_weights.h" +#include "ATen/ops/mkldnn_max_pool2d.h" +#include "ATen/ops/mkldnn_max_pool2d_backward.h" +#include "ATen/ops/mkldnn_max_pool3d.h" +#include "ATen/ops/mkldnn_max_pool3d_backward.h" +#include "ATen/ops/mkldnn_reorder_conv2d_weight.h" +#include "ATen/ops/mkldnn_reorder_conv3d_weight.h" +#include "ATen/ops/mkldnn_rnn_layer.h" +#include "ATen/ops/mkldnn_rnn_layer_backward.h" +#include "ATen/ops/mm.h" +#include "ATen/ops/mode.h" +#include "ATen/ops/moveaxis.h" +#include "ATen/ops/movedim.h" +#include "ATen/ops/mps_convolution_backward.h" +#include "ATen/ops/mps_convolution_transpose_backward.h" +#include "ATen/ops/mse_loss.h" +#include "ATen/ops/mse_loss_backward.h" +#include "ATen/ops/msort.h" +#include "ATen/ops/mul.h" +#include "ATen/ops/multi_margin_loss.h" +#include "ATen/ops/multi_margin_loss_backward.h" +#include "ATen/ops/multilabel_margin_loss.h" +#include "ATen/ops/multilabel_margin_loss_backward.h" +#include "ATen/ops/multilabel_margin_loss_forward.h" +#include "ATen/ops/multinomial.h" +#include "ATen/ops/multiply.h" +#include "ATen/ops/mv.h" +#include "ATen/ops/mvlgamma.h" +#include "ATen/ops/nan_to_num.h" +#include "ATen/ops/nanmean.h" +#include "ATen/ops/nanmedian.h" +#include "ATen/ops/nanquantile.h" +#include "ATen/ops/nansum.h" +#include "ATen/ops/narrow.h" +#include "ATen/ops/narrow_copy.h" +#include "ATen/ops/native_batch_norm.h" +#include "ATen/ops/native_batch_norm_backward.h" +#include "ATen/ops/native_channel_shuffle.h" +#include "ATen/ops/native_dropout.h" +#include "ATen/ops/native_dropout_backward.h" +#include "ATen/ops/native_group_norm.h" +#include "ATen/ops/native_group_norm_backward.h" +#include "ATen/ops/native_layer_norm.h" +#include "ATen/ops/native_layer_norm_backward.h" +#include "ATen/ops/native_norm.h" +#include "ATen/ops/ne.h" +#include "ATen/ops/neg.h" +#include "ATen/ops/negative.h" +#include "ATen/ops/nested_to_padded_tensor.h" +#include "ATen/ops/new_empty.h" +#include "ATen/ops/new_empty_strided.h" +#include "ATen/ops/new_full.h" +#include "ATen/ops/new_ones.h" +#include "ATen/ops/new_zeros.h" +#include "ATen/ops/nextafter.h" +#include "ATen/ops/nll_loss.h" +#include "ATen/ops/nll_loss2d.h" +#include "ATen/ops/nll_loss2d_backward.h" +#include "ATen/ops/nll_loss2d_forward.h" +#include "ATen/ops/nll_loss_backward.h" +#include "ATen/ops/nll_loss_forward.h" +#include "ATen/ops/nll_loss_nd.h" +#include "ATen/ops/nonzero.h" +#include "ATen/ops/nonzero_numpy.h" +#include "ATen/ops/norm.h" +#include "ATen/ops/norm_except_dim.h" +#include "ATen/ops/normal.h" +#include "ATen/ops/not_equal.h" +#include "ATen/ops/nuclear_norm.h" +#include "ATen/ops/numpy_T.h" +#include "ATen/ops/one_hot.h" +#include "ATen/ops/ones.h" +#include "ATen/ops/ones_like.h" +#include "ATen/ops/or.h" +#include "ATen/ops/orgqr.h" +#include "ATen/ops/ormqr.h" +#include "ATen/ops/outer.h" +#include "ATen/ops/output_nr.h" +#include "ATen/ops/pad.h" +#include "ATen/ops/pad_sequence.h" +#include "ATen/ops/pairwise_distance.h" +#include "ATen/ops/pdist.h" +#include "ATen/ops/permute.h" +#include "ATen/ops/permute_copy.h" +#include "ATen/ops/pin_memory.h" +#include "ATen/ops/pinverse.h" +#include "ATen/ops/pixel_shuffle.h" +#include "ATen/ops/pixel_unshuffle.h" +#include "ATen/ops/poisson.h" +#include "ATen/ops/poisson_nll_loss.h" +#include "ATen/ops/polar.h" +#include "ATen/ops/polygamma.h" +#include "ATen/ops/positive.h" +#include "ATen/ops/pow.h" +#include "ATen/ops/prelu.h" +#include "ATen/ops/prod.h" +#include "ATen/ops/promote_types.h" +#include "ATen/ops/put.h" +#include "ATen/ops/q_per_channel_axis.h" +#include "ATen/ops/q_per_channel_scales.h" +#include "ATen/ops/q_per_channel_zero_points.h" +#include "ATen/ops/q_scale.h" +#include "ATen/ops/q_zero_point.h" +#include "ATen/ops/qr.h" +#include "ATen/ops/qscheme.h" +#include "ATen/ops/quantile.h" +#include "ATen/ops/quantize_per_channel.h" +#include "ATen/ops/quantize_per_tensor.h" +#include "ATen/ops/quantize_per_tensor_dynamic.h" +#include "ATen/ops/quantized_batch_norm.h" +#include "ATen/ops/quantized_gru_cell.h" +#include "ATen/ops/quantized_lstm_cell.h" +#include "ATen/ops/quantized_max_pool1d.h" +#include "ATen/ops/quantized_max_pool2d.h" +#include "ATen/ops/quantized_rnn_relu_cell.h" +#include "ATen/ops/quantized_rnn_tanh_cell.h" +#include "ATen/ops/rad2deg.h" +#include "ATen/ops/rand.h" +#include "ATen/ops/rand_like.h" +#include "ATen/ops/randint.h" +#include "ATen/ops/randint_like.h" +#include "ATen/ops/randn.h" +#include "ATen/ops/randn_like.h" +#include "ATen/ops/random.h" +#include "ATen/ops/randperm.h" +#include "ATen/ops/range.h" +#include "ATen/ops/ravel.h" +#include "ATen/ops/real.h" +#include "ATen/ops/reciprocal.h" +#include "ATen/ops/record_stream.h" +#include "ATen/ops/refine_names.h" +#include "ATen/ops/reflection_pad1d.h" +#include "ATen/ops/reflection_pad1d_backward.h" +#include "ATen/ops/reflection_pad2d.h" +#include "ATen/ops/reflection_pad2d_backward.h" +#include "ATen/ops/reflection_pad3d.h" +#include "ATen/ops/reflection_pad3d_backward.h" +#include "ATen/ops/relu.h" +#include "ATen/ops/relu6.h" +#include "ATen/ops/remainder.h" +#include "ATen/ops/rename.h" +#include "ATen/ops/renorm.h" +#include "ATen/ops/repeat.h" +#include "ATen/ops/repeat_interleave.h" +#include "ATen/ops/replication_pad1d.h" +#include "ATen/ops/replication_pad1d_backward.h" +#include "ATen/ops/replication_pad2d.h" +#include "ATen/ops/replication_pad2d_backward.h" +#include "ATen/ops/replication_pad3d.h" +#include "ATen/ops/replication_pad3d_backward.h" +#include "ATen/ops/requires_grad.h" +#include "ATen/ops/reshape.h" +#include "ATen/ops/reshape_as.h" +#include "ATen/ops/resize.h" +#include "ATen/ops/resize_as.h" +#include "ATen/ops/resize_as_sparse.h" +#include "ATen/ops/resolve_conj.h" +#include "ATen/ops/resolve_neg.h" +#include "ATen/ops/result_type.h" +#include "ATen/ops/retain_grad.h" +#include "ATen/ops/retains_grad.h" +#include "ATen/ops/rnn_relu.h" +#include "ATen/ops/rnn_relu_cell.h" +#include "ATen/ops/rnn_tanh.h" +#include "ATen/ops/rnn_tanh_cell.h" +#include "ATen/ops/roll.h" +#include "ATen/ops/rot90.h" +#include "ATen/ops/round.h" +#include "ATen/ops/row_indices.h" +#include "ATen/ops/row_indices_copy.h" +#include "ATen/ops/row_stack.h" +#include "ATen/ops/rrelu.h" +#include "ATen/ops/rrelu_with_noise.h" +#include "ATen/ops/rrelu_with_noise_backward.h" +#include "ATen/ops/rshift.h" +#include "ATen/ops/rsqrt.h" +#include "ATen/ops/rsub.h" +#include "ATen/ops/scalar_tensor.h" +#include "ATen/ops/scaled_dot_product_attention.h" +#include "ATen/ops/scatter.h" +#include "ATen/ops/scatter_add.h" +#include "ATen/ops/scatter_reduce.h" +#include "ATen/ops/searchsorted.h" +#include "ATen/ops/segment_reduce.h" +#include "ATen/ops/select.h" +#include "ATen/ops/select_backward.h" +#include "ATen/ops/select_copy.h" +#include "ATen/ops/select_scatter.h" +#include "ATen/ops/selu.h" +#include "ATen/ops/set.h" +#include "ATen/ops/set_data.h" +#include "ATen/ops/sgn.h" +#include "ATen/ops/sigmoid.h" +#include "ATen/ops/sigmoid_backward.h" +#include "ATen/ops/sign.h" +#include "ATen/ops/signbit.h" +#include "ATen/ops/silu.h" +#include "ATen/ops/silu_backward.h" +#include "ATen/ops/sin.h" +#include "ATen/ops/sinc.h" +#include "ATen/ops/sinh.h" +#include "ATen/ops/size.h" +#include "ATen/ops/slice.h" +#include "ATen/ops/slice_backward.h" +#include "ATen/ops/slice_copy.h" +#include "ATen/ops/slice_scatter.h" +#include "ATen/ops/slogdet.h" +#include "ATen/ops/slow_conv3d.h" +#include "ATen/ops/slow_conv3d_forward.h" +#include "ATen/ops/slow_conv_dilated2d.h" +#include "ATen/ops/slow_conv_dilated3d.h" +#include "ATen/ops/slow_conv_transpose2d.h" +#include "ATen/ops/slow_conv_transpose3d.h" +#include "ATen/ops/smm.h" +#include "ATen/ops/smooth_l1_loss.h" +#include "ATen/ops/smooth_l1_loss_backward.h" +#include "ATen/ops/soft_margin_loss.h" +#include "ATen/ops/soft_margin_loss_backward.h" +#include "ATen/ops/softmax.h" +#include "ATen/ops/softplus.h" +#include "ATen/ops/softplus_backward.h" +#include "ATen/ops/softshrink.h" +#include "ATen/ops/softshrink_backward.h" +#include "ATen/ops/sort.h" +#include "ATen/ops/sparse_bsc_tensor.h" +#include "ATen/ops/sparse_bsr_tensor.h" +#include "ATen/ops/sparse_compressed_tensor.h" +#include "ATen/ops/sparse_coo_tensor.h" +#include "ATen/ops/sparse_csc_tensor.h" +#include "ATen/ops/sparse_csr_tensor.h" +#include "ATen/ops/sparse_dim.h" +#include "ATen/ops/sparse_mask.h" +#include "ATen/ops/sparse_resize.h" +#include "ATen/ops/sparse_resize_and_clear.h" +#include "ATen/ops/sparse_sampled_addmm.h" +#include "ATen/ops/special_airy_ai.h" +#include "ATen/ops/special_bessel_j0.h" +#include "ATen/ops/special_bessel_j1.h" +#include "ATen/ops/special_bessel_y0.h" +#include "ATen/ops/special_bessel_y1.h" +#include "ATen/ops/special_chebyshev_polynomial_t.h" +#include "ATen/ops/special_chebyshev_polynomial_u.h" +#include "ATen/ops/special_chebyshev_polynomial_v.h" +#include "ATen/ops/special_chebyshev_polynomial_w.h" +#include "ATen/ops/special_digamma.h" +#include "ATen/ops/special_entr.h" +#include "ATen/ops/special_erf.h" +#include "ATen/ops/special_erfc.h" +#include "ATen/ops/special_erfcx.h" +#include "ATen/ops/special_erfinv.h" +#include "ATen/ops/special_exp2.h" +#include "ATen/ops/special_expit.h" +#include "ATen/ops/special_expm1.h" +#include "ATen/ops/special_gammainc.h" +#include "ATen/ops/special_gammaincc.h" +#include "ATen/ops/special_gammaln.h" +#include "ATen/ops/special_hermite_polynomial_h.h" +#include "ATen/ops/special_hermite_polynomial_he.h" +#include "ATen/ops/special_i0.h" +#include "ATen/ops/special_i0e.h" +#include "ATen/ops/special_i1.h" +#include "ATen/ops/special_i1e.h" +#include "ATen/ops/special_laguerre_polynomial_l.h" +#include "ATen/ops/special_legendre_polynomial_p.h" +#include "ATen/ops/special_log1p.h" +#include "ATen/ops/special_log_ndtr.h" +#include "ATen/ops/special_log_softmax.h" +#include "ATen/ops/special_logit.h" +#include "ATen/ops/special_logsumexp.h" +#include "ATen/ops/special_modified_bessel_i0.h" +#include "ATen/ops/special_modified_bessel_i1.h" +#include "ATen/ops/special_modified_bessel_k0.h" +#include "ATen/ops/special_modified_bessel_k1.h" +#include "ATen/ops/special_multigammaln.h" +#include "ATen/ops/special_ndtr.h" +#include "ATen/ops/special_ndtri.h" +#include "ATen/ops/special_polygamma.h" +#include "ATen/ops/special_psi.h" +#include "ATen/ops/special_round.h" +#include "ATen/ops/special_scaled_modified_bessel_k0.h" +#include "ATen/ops/special_scaled_modified_bessel_k1.h" +#include "ATen/ops/special_shifted_chebyshev_polynomial_t.h" +#include "ATen/ops/special_shifted_chebyshev_polynomial_u.h" +#include "ATen/ops/special_shifted_chebyshev_polynomial_v.h" +#include "ATen/ops/special_shifted_chebyshev_polynomial_w.h" +#include "ATen/ops/special_sinc.h" +#include "ATen/ops/special_softmax.h" +#include "ATen/ops/special_spherical_bessel_j0.h" +#include "ATen/ops/special_xlog1py.h" +#include "ATen/ops/special_xlogy.h" +#include "ATen/ops/special_zeta.h" +#include "ATen/ops/split.h" +#include "ATen/ops/split_copy.h" +#include "ATen/ops/split_with_sizes.h" +#include "ATen/ops/split_with_sizes_copy.h" +#include "ATen/ops/sqrt.h" +#include "ATen/ops/square.h" +#include "ATen/ops/squeeze.h" +#include "ATen/ops/squeeze_copy.h" +#include "ATen/ops/sspaddmm.h" +#include "ATen/ops/stack.h" +#include "ATen/ops/std.h" +#include "ATen/ops/std_mean.h" +#include "ATen/ops/stft.h" +#include "ATen/ops/stride.h" +#include "ATen/ops/sub.h" +#include "ATen/ops/subtract.h" +#include "ATen/ops/sum.h" +#include "ATen/ops/sum_to_size.h" +#include "ATen/ops/svd.h" +#include "ATen/ops/swapaxes.h" +#include "ATen/ops/swapdims.h" +#include "ATen/ops/t.h" +#include "ATen/ops/t_copy.h" +#include "ATen/ops/take.h" +#include "ATen/ops/take_along_dim.h" +#include "ATen/ops/tan.h" +#include "ATen/ops/tanh.h" +#include "ATen/ops/tanh_backward.h" +#include "ATen/ops/tensor_split.h" +#include "ATen/ops/tensor.h" +#include "ATen/ops/tensordot.h" +#include "ATen/ops/thnn_conv2d.h" +#include "ATen/ops/threshold.h" +#include "ATen/ops/threshold_backward.h" +#include "ATen/ops/tile.h" +#include "ATen/ops/to.h" +#include "ATen/ops/to_dense.h" +#include "ATen/ops/to_dense_backward.h" +#include "ATen/ops/to_mkldnn.h" +#include "ATen/ops/to_mkldnn_backward.h" +#include "ATen/ops/to_padded_tensor.h" +#include "ATen/ops/to_sparse.h" +#include "ATen/ops/to_sparse_bsc.h" +#include "ATen/ops/to_sparse_bsr.h" +#include "ATen/ops/to_sparse_csc.h" +#include "ATen/ops/to_sparse_csr.h" +#include "ATen/ops/topk.h" +#include "ATen/ops/trace.h" +#include "ATen/ops/trace_backward.h" +#include "ATen/ops/transpose.h" +#include "ATen/ops/transpose_copy.h" +#include "ATen/ops/trapezoid.h" +#include "ATen/ops/trapz.h" +#include "ATen/ops/triangular_solve.h" +#include "ATen/ops/tril.h" +#include "ATen/ops/tril_indices.h" +#include "ATen/ops/triplet_margin_loss.h" +#include "ATen/ops/triu.h" +#include "ATen/ops/triu_indices.h" +#include "ATen/ops/true_divide.h" +#include "ATen/ops/trunc.h" +#include "ATen/ops/type_as.h" +#include "ATen/ops/unbind.h" +#include "ATen/ops/unbind_copy.h" +#include "ATen/ops/unflatten.h" +#include "ATen/ops/unflatten_dense_tensors.h" +#include "ATen/ops/unfold.h" +#include "ATen/ops/unfold_backward.h" +#include "ATen/ops/unfold_copy.h" +#include "ATen/ops/uniform.h" +#include "ATen/ops/unique_consecutive.h" +#include "ATen/ops/unique_dim.h" +#include "ATen/ops/unique_dim_consecutive.h" +#include "ATen/ops/unsafe_chunk.h" +#include "ATen/ops/unsafe_split.h" +#include "ATen/ops/unsafe_split_with_sizes.h" +#include "ATen/ops/unsqueeze.h" +#include "ATen/ops/unsqueeze_copy.h" +#include "ATen/ops/upsample_bicubic2d.h" +#include "ATen/ops/upsample_bicubic2d_backward.h" +#include "ATen/ops/upsample_bilinear2d.h" +#include "ATen/ops/upsample_bilinear2d_backward.h" +#include "ATen/ops/upsample_linear1d.h" +#include "ATen/ops/upsample_linear1d_backward.h" +#include "ATen/ops/upsample_nearest1d.h" +#include "ATen/ops/upsample_nearest1d_backward.h" +#include "ATen/ops/upsample_nearest2d.h" +#include "ATen/ops/upsample_nearest2d_backward.h" +#include "ATen/ops/upsample_nearest3d.h" +#include "ATen/ops/upsample_nearest3d_backward.h" +#include "ATen/ops/upsample_trilinear3d.h" +#include "ATen/ops/upsample_trilinear3d_backward.h" +#include "ATen/ops/value_selecting_reduction_backward.h" +#include "ATen/ops/values.h" +#include "ATen/ops/values_copy.h" +#include "ATen/ops/vander.h" +#include "ATen/ops/var.h" +#include "ATen/ops/var_mean.h" +#include "ATen/ops/vdot.h" +#include "ATen/ops/view.h" +#include "ATen/ops/view_as.h" +#include "ATen/ops/view_as_complex.h" +#include "ATen/ops/view_as_complex_copy.h" +#include "ATen/ops/view_as_real.h" +#include "ATen/ops/view_as_real_copy.h" +#include "ATen/ops/view_copy.h" +#include "ATen/ops/vsplit.h" +#include "ATen/ops/vstack.h" +#include "ATen/ops/where.h" +#include "ATen/ops/xlogy.h" +#include "ATen/ops/xor.h" +#include "ATen/ops/zero.h" +#include "ATen/ops/zeros.h" +#include "ATen/ops/zeros_like.h" +#include "ATen/Functions.h" +#include "ATen/ExpandUtils.h" +#include "ATen/MemoryOverlap.h" +#include "ATen/NestedTensorImpl.h" +#include "torch/csrc/autograd/input_metadata.h" +#include "torch/csrc/autograd/saved_variable_hooks.h" +#include "torch/csrc/autograd/saved_variable.h" +#include "ATen/core/Variadic.h" +#include "torch/csrc/utils/variadic.h" +#include "ATen/SequenceNumber.h" +#include "torch/csrc/autograd/function.h" +#include "torch/csrc/autograd/custom_function.h" +#include "torch/autograd.h" +#include "torch/cuda.h" +#include "torch/arg.h" +#include "ATen/Device.h" +#include "ATen/Dispatch.h" +#include "ATen/ScalarOps.h" +#include "c10/util/strides.h" +#include "ATen/TensorMeta.h" +#include "ATen/core/Range.h" +#include "c10/util/Load.h" +#include "c10/core/DynamicCast.h" +#include "ATen/TensorIterator.h" +#include "ATen/NativeFunctions.h" +#include "ATen/TensorIndexing.h" +#include "ATen/TensorOperators.h" +#include "ATen/Version.h" +#include "ATen/core/Scalar.h" +#include "ATen/core/UnsafeFromTH.h" +#include "ATen/ATen.h" +#include "torch/csrc/api/include/torch/detail/TensorDataContainer.h" +#include "torch/csrc/autograd/generated/variable_factories.h" +#include "c10/core/PyHandleCache.h" +#include "c10/util/Bitset.h" +#include "ATen/core/dispatch/DispatchKeyExtractor.h" +#include "ATen/core/dispatch/OperatorEntry.h" +#include "c10/util/Synchronized.h" +// #include "c10/util/LeftRight.h" // Not in API +#include "ATen/core/dispatch/Dispatcher.h" +#include "torch/types.h" +#include "torch/data/dataloader_options.h" +#include "torch/data/detail/queue.h" +#include "torch/data/detail/data_shuttle.h" +#include "torch/data/detail/sequencers.h" +#include "torch/data/iterator.h" +#include "torch/data/samplers/base.h" +#include "torch/data/samplers/random.h" +#include "torch/data/worker_exception.h" +#include "torch/csrc/utils/memory.h" +#include "torch/data/dataloader/base.h" +#include "torch/data/dataloader/stateful.h" +#include "torch/data/dataloader/stateless.h" +#include "torch/data/dataloader.h" +#include "torch/data/example.h" +#include "torch/data/datasets/base.h" +#include "torch/data/datasets/stateful.h" +#include "torch/data/samplers/custom_batch_request.h" +#include "torch/data/samplers/distributed.h" +#include "torch/data/samplers/sequential.h" +#include "torch/csrc/api/include/torch/imethod.h" +#include "torch/csrc/jit/ir/attributes.h" +#include "torch/csrc/jit/ir/graph_node_list.h" +#include "torch/csrc/jit/frontend/source_range.h" +#include "torch/csrc/jit/ir/scope.h" +#include "torch/csrc/jit/ir/constants.h" +#include "torch/csrc/jit/ir/named_value.h" +#include "torch/csrc/jit/runtime/operator_options.h" +#include "torch/csrc/jit/runtime/operator.h" +#include "torch/csrc/utils/schema_info.h" +#include "ATen/core/enum_type.h" +// #include "ATen/core/aten_interned_strings.h" // Internal only +// #include "ATen/core/interned_strings.h" // Internal only +#include "torch/csrc/jit/ir/ir.h" +#include "torch/csrc/jit/python/update_graph_executor_opt.h" +#include "torch/csrc/jit/runtime/argument_spec.h" +#include "torch/csrc/jit/runtime/interpreter.h" +#include "torch/csrc/jit/runtime/variable_tensor_list.h" +#include "torch/csrc/jit/runtime/graph_executor.h" +#include "torch/csrc/jit/api/function_impl.h" +#include "torch/csrc/jit/api/method.h" +#include "torch/csrc/jit/api/object.h" +#include "torch/csrc/api/include/torch/ordered_dict.h" +#include "torch/csrc/jit/frontend/name_mangler.h" +#include "torch/csrc/jit/api/compilation_unit.h" +#include "torch/csrc/jit/api/module.h" +#include "torch/serialize/input-archive.h" +#include "torch/serialize/output-archive.h" +#include "torch/serialize/archive.h" +#include "torch/data/samplers/serialize.h" +#include "torch/data/samplers/stream.h" +#include "torch/data/samplers.h" +#include "torch/serialize/tensor.h" +#include "torch/serialize.h" +#include "torch/data/datasets/chunk.h" +#include "torch/data/datasets/map.h" +#include "torch/data/datasets/mnist.h" +#include "torch/data/datasets/shared.h" +#include "torch/data/datasets/tensor.h" +#include "torch/data/datasets.h" +#include "torch/data/transforms/base.h" +#include "torch/data/transforms/lambda.h" +#include "torch/data/transforms/collate.h" +#include "torch/data/transforms/stack.h" +#include "torch/data/transforms/tensor.h" +#include "torch/data/transforms.h" +#include "torch/data.h" +#include "torch/enum.h" +#include "torch/fft.h" +#include "torch/jit.h" +#include "torch/linalg.h" +#include "torch/nested.h" +#include "torch/detail/static.h" +#include "torch/csrc/api/include/torch/nn/pimpl-inl.h" +#include "torch/nn/pimpl.h" +#include "torch/nn/modules/container/any_value.h" +#include "torch/nn/modules/container/any_module_holder.h" +#include "torch/ordered_dict.h" +#include "torch/nn/module.h" +#include "ATen/Config.h" +// #include "ATen/ParallelOpenMP.h" // Internal only +// #include "ATen/ParallelNative.h" // Internal only +// #include "ATen/ParallelNativeTBB.h" // Internal only +#include "ATen/Parallel-inl.h" +#include "ATen/Parallel.h" +#include "torch/csrc/api/include/torch/types.h" +#include "torch/csrc/profiler/orchestration/observer.h" +#include "torch/csrc/profiler/api.h" +#include "torch/csrc/profiler/events.h" +// #include "c10/util/strong_type.h" // Complex variadic templates non parseable +#include "torch/csrc/profiler/stubs/base.h" +#include "torch/csrc/profiler/util.h" +#include "torch/csrc/autograd/profiler_kineto.h" +// #include "torch/csrc/autograd/profiler_legacy.h" // Do not bother with legacy API +#include "torch/csrc/autograd/profiler.h" +#include "torch/utils.h" +#include "torch/nn/cloneable.h" +#include "torch/nn/options/batchnorm.h" +#include "torch/nn/functional/batchnorm.h" +// #include "torch/expanding_array.h" // Mapped to *Pointer +#include "torch/nn/options/conv.h" +#include "torch/nn/functional/conv.h" +#include "torch/nn/options/distance.h" +#include "torch/nn/functional/distance.h" +#include "torch/nn/options/dropout.h" +#include "torch/nn/functional/dropout.h" +#include "torch/nn/options/embedding.h" +#include "torch/nn/functional/embedding.h" +#include "torch/nn/options/fold.h" +#include "torch/nn/functional/fold.h" +#include "torch/nn/options/instancenorm.h" +#include "torch/nn/functional/instancenorm.h" +#include "torch/nn/functional/linear.h" +#include "torch/nn/options/activation.h" +#include "torch/nn/options/linear.h" +#include "torch/nn/functional/activation.h" +#include "torch/nn/options/loss.h" +#include "torch/nn/functional/loss.h" +#include "ATen/PadNd.h" +#include "torch/nn/options/padding.h" +#include "torch/nn/functional/padding.h" +#include "torch/nn/modules/utils.h" +#include "torch/nn/options/pooling.h" +#include "torch/nn/functional/pooling.h" +#include "torch/nn/options/normalization.h" +#include "torch/nn/functional/normalization.h" +#include "torch/nn/options/pixelshuffle.h" +#include "torch/nn/functional/pixelshuffle.h" +#include "torch/nn/options/upsampling.h" +#include "torch/nn/functional/upsampling.h" +#include "torch/nn/options/vision.h" +#include "torch/nn/functional/vision.h" +#include "torch/nn/functional.h" +#include "torch/nn/init.h" +#include "torch/nn/modules/common.h" +#include "torch/nn/modules/container/any.h" +// #include "torch/nn/modules/container/functional.h" // Complex variadic templates non parseable +#include "torch/nn/modules/container/moduledict.h" +#include "torch/nn/modules/container/modulelist.h" +#include "torch/nn/modules/container/named_any.h" +#include "torch/nn/modules/container/parameterdict.h" +#include "torch/nn/modules/container/parameterlist.h" +#include "torch/nn/modules/container/sequential.h" +#include "torch/nn/modules/linear.h" +#include "torch/nn/modules/activation.h" +#include "torch/nn/options/adaptive.h" +#include "torch/nn/modules/adaptive.h" +#include "torch/nn/modules/batchnorm.h" +// #include "c10/util/overloaded.h" // Non parseable +#include "torch/nn/modules/conv.h" +#include "torch/nn/modules/distance.h" +#include "torch/nn/modules/dropout.h" +#include "torch/nn/modules/embedding.h" +#include "torch/nn/modules/fold.h" +#include "torch/nn/modules/instancenorm.h" +#include "torch/nn/modules/loss.h" +#include "torch/nn/modules/_functions.h" +#include "torch/nn/modules/normalization.h" +#include "torch/nn/modules/padding.h" +#include "torch/nn/modules/pixelshuffle.h" +#include "torch/nn/modules/pooling.h" +#include "torch/nn/options/rnn.h" +#include "torch/nn/utils/rnn.h" +#include "torch/nn/modules/rnn.h" +#include "torch/nn/options/transformerlayer.h" +#include "torch/nn/options/transformer.h" +#include "torch/nn/modules/transformer.h" +#include "torch/nn/modules/transformerlayer.h" +#include "torch/nn/options/transformercoder.h" +#include "torch/nn/modules/transformercoder.h" +#include "torch/nn/modules/upsampling.h" +#include "torch/nn/modules.h" +#include "torch/nn/options.h" +#include "torch/nn/utils/clip_grad.h" +#include "torch/nn/utils/convert_parameters.h" +#include "torch/nn/utils.h" +#include "torch/nn.h" +#include "torch/optim/optimizer.h" +#include "torch/optim/serialize.h" +#include "torch/optim/adagrad.h" +#include "torch/optim/adam.h" +#include "torch/optim/adamw.h" +#include "torch/optim/lbfgs.h" +#include "torch/optim/rmsprop.h" +#include "torch/optim/sgd.h" +#include "torch/optim/schedulers/lr_scheduler.h" +#include "torch/optim/schedulers/step_lr.h" +#include "torch/optim.h" +#include "torch/sparse.h" +#include "torch/special.h" +#include "torch/version.h" +#include "torch/csrc/api/include/torch/all.h" + +// Included by +// ATen/native/TensorShape.h" +// torch/csrc/jit/serialization/storage_context.h" +// torch/csrc/jit/serialization/import.h" +#include "caffe2/serialize/inline_container.h" +#include "caffe2/serialize/istream_adapter.h" +#include "caffe2/serialize/read_adapter_interface.h" +#include "caffe2/serialize/versions.h" +#include "torch/csrc/jit/serialization/unpickler.h" +#include "torch/csrc/jit/frontend/script_type_parser.h" +#include "torch/csrc/jit/frontend/resolver.h" +#include "torch/csrc/jit/frontend/sugared_value.h" +#include "torch/csrc/jit/frontend/error_report.h" +#include "torch/csrc/jit/frontend/tree.h" +#include "torch/csrc/jit/frontend/lexer.h" +#include "torch/csrc/jit/frontend/parser_constants.h" +#include "torch/csrc/jit/frontend/strtod.h" +#include "torch/csrc/jit/frontend/schema_matching.h" +#include "torch/csrc/jit/frontend/versioned_symbols.h" +#include "torch/csrc/jit/frontend/tree_views.h" +#include "torch/csrc/jit/serialization/pickler.h" + +// Parsed and for inclusion in JNI +// See also https://github.com/pytorch/pytorch/blob/main/docs/cpp/source/Doxyfile +// for an approximation of what should be in API in addition to torch.h" +// torch/csrc/jit/runtime/custom_operator.h: Name conflict with torch::RegisterOperator + little chance to have any use +#include "torch/torch.h" +#include "ATen/native/TensorShape.h" +#include "torch/csrc/jit/serialization/storage_context.h" +#include "torch/csrc/jit/serialization/import.h"